gsalloc.c 62 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129
  1. /* Copyright (C) 1995, 2000 Aladdin Enterprises. All rights reserved.
  2. This software is provided AS-IS with no warranty, either express or
  3. implied.
  4. This software is distributed under license and may not be copied,
  5. modified or distributed except as expressly authorized under the terms
  6. of the license contained in the file LICENSE in this distribution.
  7. For more information about licensing, please refer to
  8. http://www.ghostscript.com/licensing/. For information on
  9. commercial licensing, go to http://www.artifex.com/licensing/ or
  10. contact Artifex Software, Inc., 101 Lucas Valley Road #110,
  11. San Rafael, CA 94903, U.S.A., +1(415)492-9861.
  12. */
  13. /* $Id: gsalloc.c,v 1.24 2005/10/12 10:45:21 leonardo Exp $ */
  14. /* Standard memory allocator */
  15. #include "gx.h"
  16. #include "memory_.h"
  17. #include "gserrors.h"
  18. #include "gsexit.h"
  19. #include "gsmdebug.h"
  20. #include "gsstruct.h"
  21. #include "gxalloc.h"
  22. #include "stream.h" /* for clearing stream list */
  23. /*
  24. * Define whether to try consolidating space before adding a new chunk.
  25. * The default is not to do this, because it is computationally
  26. * expensive and doesn't seem to help much. However, this is done for
  27. * "controlled" spaces whether or not the #define is in effect.
  28. */
  29. /*#define CONSOLIDATE_BEFORE_ADDING_CHUNK */
  30. /*
  31. * This allocator produces tracing messages of the form
  32. * [aNMOTS]...
  33. * where
  34. * N is the VM space number, +1 if we are allocating from stable memory.
  35. * M is : for movable objects, | for immovable,
  36. * O is {alloc = +, free = -, grow = >, shrink = <},
  37. * T is {bytes = b, object = <, ref = $, string = >}, and
  38. * S is {small freelist = f, large freelist = F, LIFO = space,
  39. * own chunk = L, lost = #, lost own chunk = ~, other = .}.
  40. */
  41. #ifdef DEBUG
  42. private int
  43. alloc_trace_space(const gs_ref_memory_t *imem)
  44. {
  45. return imem->space + (imem->stable_memory == (const gs_memory_t *)imem);
  46. }
  47. private void
  48. alloc_trace(const char *chars, gs_ref_memory_t * imem, client_name_t cname,
  49. gs_memory_type_ptr_t stype, uint size, const void *ptr)
  50. {
  51. if_debug7('A', "[a%d%s]%s %s(%u) %s0x%lx\n",
  52. alloc_trace_space(imem), chars, client_name_string(cname),
  53. (ptr == 0 || stype == 0 ? "" :
  54. struct_type_name_string(stype)),
  55. size, (chars[1] == '+' ? "= " : ""), (ulong) ptr);
  56. }
  57. private bool
  58. alloc_size_is_ok(gs_memory_type_ptr_t stype)
  59. {
  60. return (stype->ssize > 0 && stype->ssize < 0x100000);
  61. }
  62. # define ALLOC_CHECK_SIZE(stype)\
  63. BEGIN\
  64. if (!alloc_size_is_ok(stype)) {\
  65. lprintf2("size of struct type 0x%lx is 0x%lx!\n",\
  66. (ulong)(stype), (ulong)((stype)->ssize));\
  67. return 0;\
  68. }\
  69. END
  70. #else
  71. # define alloc_trace(chars, imem, cname, stype, size, ptr) DO_NOTHING
  72. # define ALLOC_CHECK_SIZE(stype) DO_NOTHING
  73. #endif
  74. /*
  75. * The structure descriptor for allocators. Even though allocators
  76. * are allocated outside GC space, they reference objects within it.
  77. */
  78. public_st_ref_memory();
  79. private
  80. ENUM_PTRS_BEGIN(ref_memory_enum_ptrs) return 0;
  81. ENUM_PTR3(0, gs_ref_memory_t, streams, names_array, changes);
  82. ENUM_PTR(3, gs_ref_memory_t, saved);
  83. ENUM_PTRS_END
  84. private RELOC_PTRS_WITH(ref_memory_reloc_ptrs, gs_ref_memory_t *mptr)
  85. {
  86. RELOC_PTR(gs_ref_memory_t, streams);
  87. RELOC_PTR(gs_ref_memory_t, names_array);
  88. RELOC_PTR(gs_ref_memory_t, changes);
  89. /* Don't relocate the saved pointer now -- see igc.c for details. */
  90. mptr->reloc_saved = RELOC_OBJ(mptr->saved);
  91. }
  92. RELOC_PTRS_END
  93. /*
  94. * Define the flags for alloc_obj, which implements all but the fastest
  95. * case of allocation.
  96. */
  97. typedef enum {
  98. ALLOC_IMMOVABLE = 1,
  99. ALLOC_DIRECT = 2 /* called directly, without fast-case checks */
  100. } alloc_flags_t;
  101. /* Forward references */
  102. private void remove_range_from_freelist(gs_ref_memory_t *mem, void* bottom, void* top);
  103. private obj_header_t *large_freelist_alloc(gs_ref_memory_t *mem, uint size);
  104. private obj_header_t *scavenge_low_free(gs_ref_memory_t *mem, unsigned request_size);
  105. private ulong compute_free_objects(gs_ref_memory_t *);
  106. private obj_header_t *alloc_obj(gs_ref_memory_t *, ulong, gs_memory_type_ptr_t, alloc_flags_t, client_name_t);
  107. private void consolidate_chunk_free(chunk_t *cp, gs_ref_memory_t *mem);
  108. private void trim_obj(gs_ref_memory_t *mem, obj_header_t *obj, uint size, chunk_t *cp);
  109. private chunk_t *alloc_acquire_chunk(gs_ref_memory_t *, ulong, bool, client_name_t);
  110. private chunk_t *alloc_add_chunk(gs_ref_memory_t *, ulong, client_name_t);
  111. void alloc_close_chunk(gs_ref_memory_t *);
  112. /*
  113. * Define the standard implementation (with garbage collection)
  114. * of Ghostscript's memory manager interface.
  115. */
  116. /* Raw memory procedures */
  117. private gs_memory_proc_alloc_bytes(i_alloc_bytes_immovable);
  118. private gs_memory_proc_resize_object(i_resize_object);
  119. private gs_memory_proc_free_object(i_free_object);
  120. private gs_memory_proc_stable(i_stable);
  121. private gs_memory_proc_status(i_status);
  122. private gs_memory_proc_free_all(i_free_all);
  123. private gs_memory_proc_consolidate_free(i_consolidate_free);
  124. /* Object memory procedures */
  125. private gs_memory_proc_alloc_bytes(i_alloc_bytes);
  126. private gs_memory_proc_alloc_struct(i_alloc_struct);
  127. private gs_memory_proc_alloc_struct(i_alloc_struct_immovable);
  128. private gs_memory_proc_alloc_byte_array(i_alloc_byte_array);
  129. private gs_memory_proc_alloc_byte_array(i_alloc_byte_array_immovable);
  130. private gs_memory_proc_alloc_struct_array(i_alloc_struct_array);
  131. private gs_memory_proc_alloc_struct_array(i_alloc_struct_array_immovable);
  132. private gs_memory_proc_object_size(i_object_size);
  133. private gs_memory_proc_object_type(i_object_type);
  134. private gs_memory_proc_alloc_string(i_alloc_string);
  135. private gs_memory_proc_alloc_string(i_alloc_string_immovable);
  136. private gs_memory_proc_resize_string(i_resize_string);
  137. private gs_memory_proc_free_string(i_free_string);
  138. private gs_memory_proc_register_root(i_register_root);
  139. private gs_memory_proc_unregister_root(i_unregister_root);
  140. private gs_memory_proc_enable_free(i_enable_free);
  141. /* We export the procedures for subclasses. */
  142. const gs_memory_procs_t gs_ref_memory_procs =
  143. {
  144. /* Raw memory procedures */
  145. i_alloc_bytes_immovable,
  146. i_resize_object,
  147. i_free_object,
  148. i_stable,
  149. i_status,
  150. i_free_all,
  151. i_consolidate_free,
  152. /* Object memory procedures */
  153. i_alloc_bytes,
  154. i_alloc_struct,
  155. i_alloc_struct_immovable,
  156. i_alloc_byte_array,
  157. i_alloc_byte_array_immovable,
  158. i_alloc_struct_array,
  159. i_alloc_struct_array_immovable,
  160. i_object_size,
  161. i_object_type,
  162. i_alloc_string,
  163. i_alloc_string_immovable,
  164. i_resize_string,
  165. i_free_string,
  166. i_register_root,
  167. i_unregister_root,
  168. i_enable_free
  169. };
  170. /*
  171. * Allocate and mostly initialize the state of an allocator (system, global,
  172. * or local). Does not initialize global or space.
  173. */
  174. private void *ialloc_solo(gs_memory_t *, gs_memory_type_ptr_t,
  175. chunk_t **);
  176. gs_ref_memory_t *
  177. ialloc_alloc_state(gs_memory_t * parent, uint chunk_size)
  178. {
  179. chunk_t *cp;
  180. gs_ref_memory_t *iimem = ialloc_solo(parent, &st_ref_memory, &cp);
  181. if (iimem == 0)
  182. return 0;
  183. iimem->stable_memory = (gs_memory_t *)iimem;
  184. iimem->procs = gs_ref_memory_procs;
  185. iimem->gs_lib_ctx = parent->gs_lib_ctx;
  186. iimem->non_gc_memory = parent;
  187. iimem->chunk_size = chunk_size;
  188. iimem->large_size = ((chunk_size / 4) & -obj_align_mod) + 1;
  189. iimem->is_controlled = false;
  190. iimem->gc_status.vm_threshold = chunk_size * 3L;
  191. iimem->gc_status.max_vm = max_long;
  192. iimem->gc_status.psignal = NULL;
  193. iimem->gc_status.signal_value = 0;
  194. iimem->gc_status.enabled = false;
  195. iimem->gc_status.requested = 0;
  196. iimem->gc_allocated = 0;
  197. iimem->previous_status.allocated = 0;
  198. iimem->previous_status.used = 0;
  199. ialloc_reset(iimem);
  200. iimem->cfirst = iimem->clast = cp;
  201. ialloc_set_limit(iimem);
  202. iimem->cc.cbot = iimem->cc.ctop = 0;
  203. iimem->pcc = 0;
  204. iimem->save_level = 0;
  205. iimem->new_mask = 0;
  206. iimem->test_mask = ~0;
  207. iimem->streams = 0;
  208. iimem->names_array = 0;
  209. iimem->roots = 0;
  210. iimem->num_contexts = 0;
  211. iimem->saved = 0;
  212. return iimem;
  213. }
  214. /* Allocate a 'solo' object with its own chunk. */
  215. private void *
  216. ialloc_solo(gs_memory_t * parent, gs_memory_type_ptr_t pstype,
  217. chunk_t ** pcp)
  218. { /*
  219. * We can't assume that the parent uses the same object header
  220. * that we do, but the GC requires that allocators have
  221. * such a header. Therefore, we prepend one explicitly.
  222. */
  223. chunk_t *cp =
  224. gs_raw_alloc_struct_immovable(parent, &st_chunk,
  225. "ialloc_solo(chunk)");
  226. uint csize =
  227. ROUND_UP(sizeof(chunk_head_t) + sizeof(obj_header_t) +
  228. pstype->ssize,
  229. obj_align_mod);
  230. byte *cdata = gs_alloc_bytes_immovable(parent, csize, "ialloc_solo");
  231. obj_header_t *obj = (obj_header_t *) (cdata + sizeof(chunk_head_t));
  232. if (cp == 0 || cdata == 0)
  233. return 0;
  234. alloc_init_chunk(cp, cdata, cdata + csize, false, (chunk_t *) NULL);
  235. cp->cbot = cp->ctop;
  236. cp->cprev = cp->cnext = 0;
  237. /* Construct the object header "by hand". */
  238. obj->o_alone = 1;
  239. obj->o_size = pstype->ssize;
  240. obj->o_type = pstype;
  241. *pcp = cp;
  242. return (void *)(obj + 1);
  243. }
  244. /*
  245. * Add a chunk to an externally controlled allocator. Such allocators
  246. * allocate all objects as immovable, are not garbage-collected, and
  247. * don't attempt to acquire additional memory on their own.
  248. */
  249. int
  250. ialloc_add_chunk(gs_ref_memory_t *imem, ulong space, client_name_t cname)
  251. {
  252. chunk_t *cp;
  253. /* Allow acquisition of this chunk. */
  254. imem->is_controlled = false;
  255. imem->large_size = imem->chunk_size;
  256. imem->limit = max_long;
  257. imem->gc_status.max_vm = max_long;
  258. /* Acquire the chunk. */
  259. cp = alloc_add_chunk(imem, space, cname);
  260. /*
  261. * Make all allocations immovable. Since the "movable" allocators
  262. * allocate within existing chunks, whereas the "immovable" ones
  263. * allocate in new chunks, we equate the latter to the former, even
  264. * though this seems backwards.
  265. */
  266. imem->procs.alloc_bytes_immovable = imem->procs.alloc_bytes;
  267. imem->procs.alloc_struct_immovable = imem->procs.alloc_struct;
  268. imem->procs.alloc_byte_array_immovable = imem->procs.alloc_byte_array;
  269. imem->procs.alloc_struct_array_immovable = imem->procs.alloc_struct_array;
  270. imem->procs.alloc_string_immovable = imem->procs.alloc_string;
  271. /* Disable acquisition of additional chunks. */
  272. imem->is_controlled = true;
  273. imem->limit = 0;
  274. return (cp ? 0 : gs_note_error(gs_error_VMerror));
  275. }
  276. /* Prepare for a GC by clearing the stream list. */
  277. /* This probably belongs somewhere else.... */
  278. void
  279. ialloc_gc_prepare(gs_ref_memory_t * mem)
  280. { /*
  281. * We have to unlink every stream from its neighbors,
  282. * so that referenced streams don't keep all streams around.
  283. */
  284. while (mem->streams != 0) {
  285. stream *s = mem->streams;
  286. mem->streams = s->next;
  287. s->prev = s->next = 0;
  288. }
  289. }
  290. /* Initialize after a save. */
  291. void
  292. ialloc_reset(gs_ref_memory_t * mem)
  293. {
  294. mem->cfirst = 0;
  295. mem->clast = 0;
  296. mem->cc.rcur = 0;
  297. mem->cc.rtop = 0;
  298. mem->cc.has_refs = false;
  299. mem->allocated = 0;
  300. mem->inherited = 0;
  301. mem->changes = 0;
  302. ialloc_reset_free(mem);
  303. }
  304. /* Initialize after a save or GC. */
  305. void
  306. ialloc_reset_free(gs_ref_memory_t * mem)
  307. {
  308. int i;
  309. obj_header_t **p;
  310. mem->lost.objects = 0;
  311. mem->lost.refs = 0;
  312. mem->lost.strings = 0;
  313. mem->cfreed.cp = 0;
  314. for (i = 0, p = &mem->freelists[0]; i < num_freelists; i++, p++)
  315. *p = 0;
  316. mem->largest_free_size = 0;
  317. }
  318. /*
  319. * Set an arbitrary limit so that the amount of allocated VM does not grow
  320. * indefinitely even when GC is disabled. Benchmarks have shown that
  321. * the resulting GC's are infrequent enough not to degrade performance
  322. * significantly.
  323. */
  324. #define FORCE_GC_LIMIT 8000000
  325. /* Set the allocation limit after a change in one or more of */
  326. /* vm_threshold, max_vm, or enabled, or after a GC. */
  327. void
  328. ialloc_set_limit(register gs_ref_memory_t * mem)
  329. { /*
  330. * The following code is intended to set the limit so that
  331. * we stop allocating when allocated + previous_status.allocated
  332. * exceeds the lesser of max_vm or (if GC is enabled)
  333. * gc_allocated + vm_threshold.
  334. */
  335. ulong max_allocated =
  336. (mem->gc_status.max_vm > mem->previous_status.allocated ?
  337. mem->gc_status.max_vm - mem->previous_status.allocated :
  338. 0);
  339. if (mem->gc_status.enabled) {
  340. ulong limit = mem->gc_allocated + mem->gc_status.vm_threshold;
  341. if (limit < mem->previous_status.allocated)
  342. mem->limit = 0;
  343. else {
  344. limit -= mem->previous_status.allocated;
  345. mem->limit = min(limit, max_allocated);
  346. }
  347. } else
  348. mem->limit = min(max_allocated, mem->gc_allocated + FORCE_GC_LIMIT);
  349. if_debug7('0', "[0]space=%d, max_vm=%ld, prev.alloc=%ld, enabled=%d,\n\
  350. gc_alloc=%ld, threshold=%ld => limit=%ld\n",
  351. mem->space, (long)mem->gc_status.max_vm,
  352. (long)mem->previous_status.allocated,
  353. mem->gc_status.enabled, (long)mem->gc_allocated,
  354. (long)mem->gc_status.vm_threshold, (long)mem->limit);
  355. }
  356. /*
  357. * Free all the memory owned by the allocator, except the allocator itself.
  358. * Note that this only frees memory at the current save level: the client
  359. * is responsible for restoring to the outermost level if desired.
  360. */
  361. private void
  362. i_free_all(gs_memory_t * mem, uint free_mask, client_name_t cname)
  363. {
  364. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  365. chunk_t *cp;
  366. if (free_mask & FREE_ALL_DATA) {
  367. chunk_t *csucc;
  368. /*
  369. * Free the chunks in reverse order, to encourage LIFO behavior.
  370. * Don't free the chunk holding the allocator itself.
  371. */
  372. for (cp = imem->clast; cp != 0; cp = csucc) {
  373. csucc = cp->cprev; /* save before freeing */
  374. if (cp->cbase + sizeof(obj_header_t) != (byte *)mem)
  375. alloc_free_chunk(cp, imem);
  376. }
  377. }
  378. if (free_mask & FREE_ALL_ALLOCATOR) {
  379. /* Free the chunk holding the allocator itself. */
  380. for (cp = imem->clast; cp != 0; cp = cp->cprev)
  381. if (cp->cbase + sizeof(obj_header_t) == (byte *)mem) {
  382. alloc_free_chunk(cp, imem);
  383. break;
  384. }
  385. }
  386. }
  387. /* ================ Accessors ================ */
  388. /* Get the size of an object from the header. */
  389. private uint
  390. i_object_size(gs_memory_t * mem, const void /*obj_header_t */ *obj)
  391. {
  392. return pre_obj_contents_size((const obj_header_t *)obj - 1);
  393. }
  394. /* Get the type of a structure from the header. */
  395. private gs_memory_type_ptr_t
  396. i_object_type(gs_memory_t * mem, const void /*obj_header_t */ *obj)
  397. {
  398. return ((const obj_header_t *)obj - 1)->o_type;
  399. }
  400. /* Get the GC status of a memory. */
  401. void
  402. gs_memory_gc_status(const gs_ref_memory_t * mem, gs_memory_gc_status_t * pstat)
  403. {
  404. *pstat = mem->gc_status;
  405. }
  406. /* Set the GC status of a memory. */
  407. void
  408. gs_memory_set_gc_status(gs_ref_memory_t * mem, const gs_memory_gc_status_t * pstat)
  409. {
  410. mem->gc_status = *pstat;
  411. ialloc_set_limit(mem);
  412. }
  413. /* Set VM threshold. */
  414. void
  415. gs_memory_set_vm_threshold(gs_ref_memory_t * mem, long val)
  416. {
  417. gs_memory_gc_status_t stat;
  418. gs_ref_memory_t * stable = (gs_ref_memory_t *)mem->stable_memory;
  419. gs_memory_gc_status(mem, &stat);
  420. stat.vm_threshold = val;
  421. gs_memory_set_gc_status(mem, &stat);
  422. gs_memory_gc_status(stable, &stat);
  423. stat.vm_threshold = val;
  424. gs_memory_set_gc_status(stable, &stat);
  425. }
  426. /* Set VM reclaim. */
  427. void
  428. gs_memory_set_vm_reclaim(gs_ref_memory_t * mem, bool enabled)
  429. {
  430. gs_memory_gc_status_t stat;
  431. gs_ref_memory_t * stable = (gs_ref_memory_t *)mem->stable_memory;
  432. gs_memory_gc_status(mem, &stat);
  433. stat.enabled = enabled;
  434. gs_memory_set_gc_status(mem, &stat);
  435. gs_memory_gc_status(stable, &stat);
  436. stat.enabled = enabled;
  437. gs_memory_set_gc_status(stable, &stat);
  438. }
  439. /* ================ Objects ================ */
  440. /* Allocate a small object quickly if possible. */
  441. /* The size must be substantially less than max_uint. */
  442. /* ptr must be declared as obj_header_t *. */
  443. /* pfl must be declared as obj_header_t **. */
  444. #define IF_FREELIST_ALLOC(ptr, imem, size, pstype, pfl)\
  445. if ( size <= max_freelist_size &&\
  446. *(pfl = &imem->freelists[(size + obj_align_mask) >> log2_obj_align_mod]) != 0\
  447. )\
  448. { ptr = *pfl;\
  449. *pfl = *(obj_header_t **)ptr;\
  450. ptr[-1].o_size = size;\
  451. ptr[-1].o_type = pstype;\
  452. /* If debugging, clear the block in an attempt to */\
  453. /* track down uninitialized data errors. */\
  454. gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
  455. #define ELSEIF_BIG_FREELIST_ALLOC(ptr, imem, size, pstype)\
  456. }\
  457. else if (size > max_freelist_size &&\
  458. (ptr = large_freelist_alloc(imem, size)) != 0)\
  459. { ptr[-1].o_type = pstype;\
  460. /* If debugging, clear the block in an attempt to */\
  461. /* track down uninitialized data errors. */\
  462. gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
  463. #define ELSEIF_LIFO_ALLOC(ptr, imem, size, pstype)\
  464. }\
  465. else if ( (imem->cc.ctop - (byte *)(ptr = (obj_header_t *)imem->cc.cbot))\
  466. >= size + (obj_align_mod + sizeof(obj_header_t) * 2) &&\
  467. size < imem->large_size\
  468. )\
  469. { imem->cc.cbot = (byte *)ptr + obj_size_round(size);\
  470. ptr->o_alone = 0;\
  471. ptr->o_size = size;\
  472. ptr->o_type = pstype;\
  473. ptr++;\
  474. /* If debugging, clear the block in an attempt to */\
  475. /* track down uninitialized data errors. */\
  476. gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
  477. #define ELSE_ALLOC\
  478. }\
  479. else
  480. private byte *
  481. i_alloc_bytes(gs_memory_t * mem, uint size, client_name_t cname)
  482. {
  483. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  484. obj_header_t *obj;
  485. obj_header_t **pfl;
  486. IF_FREELIST_ALLOC(obj, imem, size, &st_bytes, pfl)
  487. alloc_trace(":+bf", imem, cname, NULL, size, obj);
  488. ELSEIF_BIG_FREELIST_ALLOC(obj, imem, size, &st_bytes)
  489. alloc_trace(":+bF", imem, cname, NULL, size, obj);
  490. ELSEIF_LIFO_ALLOC(obj, imem, size, &st_bytes)
  491. alloc_trace(":+b ", imem, cname, NULL, size, obj);
  492. ELSE_ALLOC
  493. {
  494. obj = alloc_obj(imem, size, &st_bytes, 0, cname);
  495. if (obj == 0)
  496. return 0;
  497. alloc_trace(":+b.", imem, cname, NULL, size, obj);
  498. }
  499. return (byte *) obj;
  500. }
  501. private byte *
  502. i_alloc_bytes_immovable(gs_memory_t * mem, uint size, client_name_t cname)
  503. {
  504. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  505. obj_header_t *obj = alloc_obj(imem, size, &st_bytes,
  506. ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
  507. if (obj == 0)
  508. return 0;
  509. alloc_trace("|+b.", imem, cname, NULL, size, obj);
  510. return (byte *) obj;
  511. }
  512. private void *
  513. i_alloc_struct(gs_memory_t * mem, gs_memory_type_ptr_t pstype,
  514. client_name_t cname)
  515. {
  516. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  517. uint size = pstype->ssize;
  518. obj_header_t *obj;
  519. obj_header_t **pfl;
  520. ALLOC_CHECK_SIZE(pstype);
  521. IF_FREELIST_ALLOC(obj, imem, size, pstype, pfl)
  522. alloc_trace(":+<f", imem, cname, pstype, size, obj);
  523. ELSEIF_BIG_FREELIST_ALLOC(obj, imem, size, pstype)
  524. alloc_trace(":+<F", imem, cname, pstype, size, obj);
  525. ELSEIF_LIFO_ALLOC(obj, imem, size, pstype)
  526. alloc_trace(":+< ", imem, cname, pstype, size, obj);
  527. ELSE_ALLOC
  528. {
  529. obj = alloc_obj(imem, size, pstype, 0, cname);
  530. if (obj == 0)
  531. return 0;
  532. alloc_trace(":+<.", imem, cname, pstype, size, obj);
  533. }
  534. return obj;
  535. }
  536. private void *
  537. i_alloc_struct_immovable(gs_memory_t * mem, gs_memory_type_ptr_t pstype,
  538. client_name_t cname)
  539. {
  540. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  541. uint size = pstype->ssize;
  542. obj_header_t *obj;
  543. ALLOC_CHECK_SIZE(pstype);
  544. obj = alloc_obj(imem, size, pstype, ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
  545. alloc_trace("|+<.", imem, cname, pstype, size, obj);
  546. return obj;
  547. }
  548. private byte *
  549. i_alloc_byte_array(gs_memory_t * mem, uint num_elements, uint elt_size,
  550. client_name_t cname)
  551. {
  552. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  553. obj_header_t *obj = alloc_obj(imem, (ulong) num_elements * elt_size,
  554. &st_bytes, ALLOC_DIRECT, cname);
  555. if_debug6('A', "[a%d:+b.]%s -bytes-*(%lu=%u*%u) = 0x%lx\n",
  556. alloc_trace_space(imem), client_name_string(cname),
  557. (ulong) num_elements * elt_size,
  558. num_elements, elt_size, (ulong) obj);
  559. return (byte *) obj;
  560. }
  561. private byte *
  562. i_alloc_byte_array_immovable(gs_memory_t * mem, uint num_elements,
  563. uint elt_size, client_name_t cname)
  564. {
  565. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  566. obj_header_t *obj = alloc_obj(imem, (ulong) num_elements * elt_size,
  567. &st_bytes, ALLOC_IMMOVABLE | ALLOC_DIRECT,
  568. cname);
  569. if_debug6('A', "[a%d|+b.]%s -bytes-*(%lu=%u*%u) = 0x%lx\n",
  570. alloc_trace_space(imem), client_name_string(cname),
  571. (ulong) num_elements * elt_size,
  572. num_elements, elt_size, (ulong) obj);
  573. return (byte *) obj;
  574. }
  575. private void *
  576. i_alloc_struct_array(gs_memory_t * mem, uint num_elements,
  577. gs_memory_type_ptr_t pstype, client_name_t cname)
  578. {
  579. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  580. obj_header_t *obj;
  581. ALLOC_CHECK_SIZE(pstype);
  582. #ifdef DEBUG
  583. if (pstype->enum_ptrs == basic_enum_ptrs) {
  584. dprintf2(" i_alloc_struct_array: called with incorrect structure type (not element), struct='%s', client='%s'\n",
  585. pstype->sname, cname);
  586. return NULL; /* fail */
  587. }
  588. #endif
  589. obj = alloc_obj(imem,
  590. (ulong) num_elements * pstype->ssize,
  591. pstype, ALLOC_DIRECT, cname);
  592. if_debug7('A', "[a%d:+<.]%s %s*(%lu=%u*%u) = 0x%lx\n",
  593. alloc_trace_space(imem), client_name_string(cname),
  594. struct_type_name_string(pstype),
  595. (ulong) num_elements * pstype->ssize,
  596. num_elements, pstype->ssize, (ulong) obj);
  597. return (char *)obj;
  598. }
  599. private void *
  600. i_alloc_struct_array_immovable(gs_memory_t * mem, uint num_elements,
  601. gs_memory_type_ptr_t pstype, client_name_t cname)
  602. {
  603. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  604. obj_header_t *obj;
  605. ALLOC_CHECK_SIZE(pstype);
  606. obj = alloc_obj(imem,
  607. (ulong) num_elements * pstype->ssize,
  608. pstype, ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
  609. if_debug7('A', "[a%d|+<.]%s %s*(%lu=%u*%u) = 0x%lx\n",
  610. alloc_trace_space(imem), client_name_string(cname),
  611. struct_type_name_string(pstype),
  612. (ulong) num_elements * pstype->ssize,
  613. num_elements, pstype->ssize, (ulong) obj);
  614. return (char *)obj;
  615. }
  616. private void *
  617. i_resize_object(gs_memory_t * mem, void *obj, uint new_num_elements,
  618. client_name_t cname)
  619. {
  620. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  621. obj_header_t *pp = (obj_header_t *) obj - 1;
  622. gs_memory_type_ptr_t pstype = pp->o_type;
  623. ulong old_size = pre_obj_contents_size(pp);
  624. ulong new_size = (ulong) pstype->ssize * new_num_elements;
  625. ulong old_size_rounded = obj_align_round(old_size);
  626. ulong new_size_rounded = obj_align_round(new_size);
  627. void *new_obj = NULL;
  628. if (old_size_rounded == new_size_rounded) {
  629. pp->o_size = new_size;
  630. new_obj = obj;
  631. } else
  632. if ((byte *)obj + old_size_rounded == imem->cc.cbot &&
  633. imem->cc.ctop - (byte *)obj >= new_size_rounded ) {
  634. imem->cc.cbot = (byte *)obj + new_size_rounded;
  635. pp->o_size = new_size;
  636. new_obj = obj;
  637. } else /* try and trim the object -- but only if room for a dummy header */
  638. if (new_size_rounded + sizeof(obj_header_t) <= old_size_rounded) {
  639. trim_obj(imem, obj, new_size, (chunk_t *)0);
  640. new_obj = obj;
  641. }
  642. if (new_obj) {
  643. if_debug8('A', "[a%d:%c%c ]%s %s(%lu=>%lu) 0x%lx\n",
  644. alloc_trace_space(imem),
  645. (new_size > old_size ? '>' : '<'),
  646. (pstype == &st_bytes ? 'b' : '<'),
  647. client_name_string(cname),
  648. struct_type_name_string(pstype),
  649. old_size, new_size, (ulong) obj);
  650. return new_obj;
  651. }
  652. /* Punt. */
  653. new_obj = gs_alloc_struct_array(mem, new_num_elements, void,
  654. pstype, cname);
  655. if (new_obj == 0)
  656. return 0;
  657. memcpy(new_obj, obj, min(old_size, new_size));
  658. gs_free_object(mem, obj, cname);
  659. return new_obj;
  660. }
  661. private void
  662. i_free_object(gs_memory_t * mem, void *ptr, client_name_t cname)
  663. {
  664. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  665. obj_header_t *pp;
  666. gs_memory_type_ptr_t pstype;
  667. struct_proc_finalize((*finalize));
  668. uint size, rounded_size;
  669. if (ptr == 0)
  670. return;
  671. pp = (obj_header_t *) ptr - 1;
  672. pstype = pp->o_type;
  673. #ifdef DEBUG
  674. if (gs_debug_c('?')) {
  675. chunk_locator_t cld;
  676. if (pstype == &st_free) {
  677. lprintf2("%s: object 0x%lx already free!\n",
  678. client_name_string(cname), (ulong) ptr);
  679. return; /*gs_abort(); */
  680. }
  681. /* Check that this allocator owns the object being freed. */
  682. cld.memory = imem;
  683. while ((cld.cp = cld.memory->clast),
  684. !chunk_locate_ptr(ptr, &cld)
  685. ) {
  686. if (!cld.memory->saved) {
  687. lprintf3("%s: freeing 0x%lx, not owned by memory 0x%lx!\n",
  688. client_name_string(cname), (ulong) ptr,
  689. (ulong) mem);
  690. return; /*gs_abort(); */
  691. }
  692. /****** HACK: we know the saved state is the first ******
  693. ****** member of an alloc_save_t. ******/
  694. cld.memory = (gs_ref_memory_t *) cld.memory->saved;
  695. }
  696. /* Check that the object is in the allocated region. */
  697. if (cld.memory == imem && cld.cp == imem->pcc)
  698. cld.cp = &imem->cc;
  699. if (!(PTR_BETWEEN((const byte *)pp, cld.cp->cbase,
  700. cld.cp->cbot))
  701. ) {
  702. lprintf5("%s: freeing 0x%lx,\n\toutside chunk 0x%lx cbase=0x%lx, cbot=0x%lx!\n",
  703. client_name_string(cname), (ulong) ptr,
  704. (ulong) cld.cp, (ulong) cld.cp->cbase,
  705. (ulong) cld.cp->cbot);
  706. return; /*gs_abort(); */
  707. }
  708. }
  709. #endif
  710. size = pre_obj_contents_size(pp);
  711. rounded_size = obj_align_round(size);
  712. finalize = pstype->finalize;
  713. if (finalize != 0) {
  714. if_debug3('u', "[u]finalizing %s 0x%lx (%s)\n",
  715. struct_type_name_string(pstype),
  716. (ulong) ptr, client_name_string(cname));
  717. (*finalize) (ptr);
  718. }
  719. if ((byte *) ptr + rounded_size == imem->cc.cbot) {
  720. alloc_trace(":-o ", imem, cname, pstype, size, ptr);
  721. gs_alloc_fill(ptr, gs_alloc_fill_free, size);
  722. imem->cc.cbot = (byte *) pp;
  723. /* IFF this object is adjacent to (or below) the byte after the
  724. * highest free object, do the consolidation within this chunk. */
  725. if ((byte *)pp <= imem->cc.int_freed_top) {
  726. consolidate_chunk_free(&(imem->cc), imem);
  727. }
  728. return;
  729. }
  730. if (pp->o_alone) {
  731. /*
  732. * We gave this object its own chunk. Free the entire chunk,
  733. * unless it belongs to an older save level, in which case
  734. * we mustn't overwrite it.
  735. */
  736. chunk_locator_t cl;
  737. #ifdef DEBUG
  738. {
  739. chunk_locator_t cld;
  740. cld.memory = imem;
  741. cld.cp = 0;
  742. if (gs_debug_c('a'))
  743. alloc_trace(
  744. (chunk_locate_ptr(ptr, &cld) ? ":-oL" : ":-o~"),
  745. imem, cname, pstype, size, ptr);
  746. }
  747. #endif
  748. cl.memory = imem;
  749. cl.cp = 0;
  750. if (chunk_locate_ptr(ptr, &cl)) {
  751. if (!imem->is_controlled)
  752. alloc_free_chunk(cl.cp, imem);
  753. return;
  754. }
  755. /* Don't overwrite even if gs_alloc_debug is set. */
  756. }
  757. if (rounded_size >= sizeof(obj_header_t *)) {
  758. /*
  759. * Put the object on a freelist, unless it belongs to
  760. * an older save level, in which case we mustn't
  761. * overwrite it.
  762. */
  763. imem->cfreed.memory = imem;
  764. if (chunk_locate(ptr, &imem->cfreed)) {
  765. obj_header_t **pfl;
  766. if (size > max_freelist_size) {
  767. pfl = &imem->freelists[LARGE_FREELIST_INDEX];
  768. if (rounded_size > imem->largest_free_size)
  769. imem->largest_free_size = rounded_size;
  770. } else {
  771. pfl = &imem->freelists[(size + obj_align_mask) >>
  772. log2_obj_align_mod];
  773. }
  774. /* keep track of highest object on a freelist */
  775. if ((byte *)pp >= imem->cc.int_freed_top)
  776. imem->cc.int_freed_top = (byte *)ptr + rounded_size;
  777. pp->o_type = &st_free; /* don't confuse GC */
  778. gs_alloc_fill(ptr, gs_alloc_fill_free, size);
  779. *(obj_header_t **) ptr = *pfl;
  780. *pfl = (obj_header_t *) ptr;
  781. alloc_trace((size > max_freelist_size ? ":-oF" : ":-of"),
  782. imem, cname, pstype, size, ptr);
  783. return;
  784. }
  785. /* Don't overwrite even if gs_alloc_debug is set. */
  786. } else {
  787. pp->o_type = &st_free; /* don't confuse GC */
  788. gs_alloc_fill(ptr, gs_alloc_fill_free, size);
  789. }
  790. alloc_trace(":-o#", imem, cname, pstype, size, ptr);
  791. imem->lost.objects += obj_size_round(size);
  792. }
  793. private byte *
  794. i_alloc_string(gs_memory_t * mem, uint nbytes, client_name_t cname)
  795. {
  796. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  797. byte *str;
  798. /*
  799. * Cycle through the chunks at the current save level, starting
  800. * with the currently open one.
  801. */
  802. chunk_t *cp_orig = imem->pcc;
  803. if (cp_orig == 0) {
  804. /* Open an arbitrary chunk. */
  805. cp_orig = imem->pcc = imem->cfirst;
  806. alloc_open_chunk(imem);
  807. }
  808. top:
  809. if (imem->cc.ctop - imem->cc.cbot > nbytes) {
  810. if_debug4('A', "[a%d:+> ]%s(%u) = 0x%lx\n",
  811. alloc_trace_space(imem), client_name_string(cname), nbytes,
  812. (ulong) (imem->cc.ctop - nbytes));
  813. str = imem->cc.ctop -= nbytes;
  814. gs_alloc_fill(str, gs_alloc_fill_alloc, nbytes);
  815. return str;
  816. }
  817. /* Try the next chunk. */
  818. {
  819. chunk_t *cp = imem->cc.cnext;
  820. alloc_close_chunk(imem);
  821. if (cp == 0)
  822. cp = imem->cfirst;
  823. imem->pcc = cp;
  824. alloc_open_chunk(imem);
  825. if (cp != cp_orig)
  826. goto top;
  827. }
  828. if (nbytes > string_space_quanta(max_uint - sizeof(chunk_head_t)) *
  829. string_data_quantum
  830. ) { /* Can't represent the size in a uint! */
  831. return 0;
  832. }
  833. if (nbytes >= imem->large_size) { /* Give it a chunk all its own. */
  834. return i_alloc_string_immovable(mem, nbytes, cname);
  835. } else { /* Add another chunk. */
  836. chunk_t *cp =
  837. alloc_acquire_chunk(imem, (ulong) imem->chunk_size, true, "chunk");
  838. if (cp == 0)
  839. return 0;
  840. alloc_close_chunk(imem);
  841. imem->pcc = cp;
  842. imem->cc = *imem->pcc;
  843. gs_alloc_fill(imem->cc.cbase, gs_alloc_fill_free,
  844. imem->cc.climit - imem->cc.cbase);
  845. goto top;
  846. }
  847. }
  848. private byte *
  849. i_alloc_string_immovable(gs_memory_t * mem, uint nbytes, client_name_t cname)
  850. {
  851. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  852. byte *str;
  853. /* Give it a chunk all its own. */
  854. uint asize = string_chunk_space(nbytes) + sizeof(chunk_head_t);
  855. chunk_t *cp = alloc_acquire_chunk(imem, (ulong) asize, true,
  856. "large string chunk");
  857. if (cp == 0)
  858. return 0;
  859. str = cp->ctop = cp->climit - nbytes;
  860. if_debug4('a', "[a%d|+>L]%s(%u) = 0x%lx\n",
  861. alloc_trace_space(imem), client_name_string(cname), nbytes,
  862. (ulong) str);
  863. gs_alloc_fill(str, gs_alloc_fill_alloc, nbytes);
  864. return str;
  865. }
  866. private byte *
  867. i_resize_string(gs_memory_t * mem, byte * data, uint old_num, uint new_num,
  868. client_name_t cname)
  869. {
  870. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  871. byte *ptr;
  872. if (old_num == new_num) /* same size returns the same string */
  873. return data;
  874. if (data == imem->cc.ctop && /* bottom-most string */
  875. (new_num < old_num ||
  876. imem->cc.ctop - imem->cc.cbot > new_num - old_num)
  877. ) { /* Resize in place. */
  878. ptr = data + old_num - new_num;
  879. if_debug6('A', "[a%d:%c> ]%s(%u->%u) 0x%lx\n",
  880. alloc_trace_space(imem),
  881. (new_num > old_num ? '>' : '<'),
  882. client_name_string(cname), old_num, new_num,
  883. (ulong) ptr);
  884. imem->cc.ctop = ptr;
  885. memmove(ptr, data, min(old_num, new_num));
  886. #ifdef DEBUG
  887. if (new_num > old_num)
  888. gs_alloc_fill(ptr + old_num, gs_alloc_fill_alloc,
  889. new_num - old_num);
  890. else
  891. gs_alloc_fill(data, gs_alloc_fill_free, old_num - new_num);
  892. #endif
  893. } else
  894. if (new_num < old_num) {
  895. /* trim the string and create a free space hole */
  896. ptr = data;
  897. imem->lost.strings += old_num - new_num;
  898. gs_alloc_fill(data + new_num, gs_alloc_fill_free,
  899. old_num - new_num);
  900. if_debug5('A', "[a%d:<> ]%s(%u->%u) 0x%lx\n",
  901. alloc_trace_space(imem), client_name_string(cname),
  902. old_num, new_num, (ulong)ptr);
  903. } else { /* Punt. */
  904. ptr = gs_alloc_string(mem, new_num, cname);
  905. if (ptr == 0)
  906. return 0;
  907. memcpy(ptr, data, min(old_num, new_num));
  908. gs_free_string(mem, data, old_num, cname);
  909. }
  910. return ptr;
  911. }
  912. private void
  913. i_free_string(gs_memory_t * mem, byte * data, uint nbytes,
  914. client_name_t cname)
  915. {
  916. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  917. if (data == imem->cc.ctop) {
  918. if_debug4('A', "[a%d:-> ]%s(%u) 0x%lx\n",
  919. alloc_trace_space(imem), client_name_string(cname), nbytes,
  920. (ulong) data);
  921. imem->cc.ctop += nbytes;
  922. } else {
  923. if_debug4('A', "[a%d:->#]%s(%u) 0x%lx\n",
  924. alloc_trace_space(imem), client_name_string(cname), nbytes,
  925. (ulong) data);
  926. imem->lost.strings += nbytes;
  927. }
  928. gs_alloc_fill(data, gs_alloc_fill_free, nbytes);
  929. }
  930. private gs_memory_t *
  931. i_stable(gs_memory_t *mem)
  932. {
  933. return mem->stable_memory;
  934. }
  935. private void
  936. i_status(gs_memory_t * mem, gs_memory_status_t * pstat)
  937. {
  938. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  939. ulong unused = imem->lost.refs + imem->lost.strings;
  940. ulong inner = 0;
  941. alloc_close_chunk(imem);
  942. /* Add up unallocated space within each chunk. */
  943. /* Also keep track of space allocated to inner chunks, */
  944. /* which are included in previous_status.allocated. */
  945. {
  946. const chunk_t *cp = imem->cfirst;
  947. while (cp != 0) {
  948. unused += cp->ctop - cp->cbot;
  949. if (cp->outer)
  950. inner += cp->cend - (byte *) cp->chead;
  951. cp = cp->cnext;
  952. }
  953. }
  954. unused += compute_free_objects(imem);
  955. pstat->used = imem->allocated + inner - unused +
  956. imem->previous_status.used;
  957. pstat->allocated = imem->allocated +
  958. imem->previous_status.allocated;
  959. }
  960. private void
  961. i_enable_free(gs_memory_t * mem, bool enable)
  962. {
  963. if (enable)
  964. mem->procs.free_object = i_free_object,
  965. mem->procs.free_string = i_free_string;
  966. else
  967. mem->procs.free_object = gs_ignore_free_object,
  968. mem->procs.free_string = gs_ignore_free_string;
  969. }
  970. /* ------ Internal procedures ------ */
  971. /* Compute the amount of free object space by scanning free lists. */
  972. private ulong
  973. compute_free_objects(gs_ref_memory_t * mem)
  974. {
  975. ulong unused = mem->lost.objects;
  976. int i;
  977. /* Add up space on free lists. */
  978. for (i = 0; i < num_freelists; i++) {
  979. const obj_header_t *pfree;
  980. for (pfree = mem->freelists[i]; pfree != 0;
  981. pfree = *(const obj_header_t * const *)pfree
  982. )
  983. unused += obj_align_round(pfree[-1].o_size);
  984. }
  985. return unused;
  986. }
  987. /* Allocate an object from the large-block freelist. */
  988. private obj_header_t * /* rets obj if allocated, else 0 */
  989. large_freelist_alloc(gs_ref_memory_t *mem, uint size)
  990. {
  991. /* Scan large object freelist. We'll grab an object up to 1/8 bigger */
  992. /* right away, else use best fit of entire scan. */
  993. uint aligned_size = obj_align_round(size);
  994. uint aligned_min_size = aligned_size + sizeof(obj_header_t);
  995. uint aligned_max_size =
  996. aligned_min_size + obj_align_round(aligned_min_size / 8);
  997. obj_header_t *best_fit = 0;
  998. obj_header_t **best_fit_prev = NULL; /* Initialize against indeteminizm. */
  999. uint best_fit_size = max_uint;
  1000. obj_header_t *pfree;
  1001. obj_header_t **ppfprev = &mem->freelists[LARGE_FREELIST_INDEX];
  1002. uint largest_size = 0;
  1003. if (aligned_size > mem->largest_free_size)
  1004. return 0; /* definitely no block large enough */
  1005. while ((pfree = *ppfprev) != 0) {
  1006. uint free_size = obj_align_round(pfree[-1].o_size);
  1007. if (free_size == aligned_size ||
  1008. (free_size >= aligned_min_size && free_size < best_fit_size)
  1009. ) {
  1010. best_fit = pfree;
  1011. best_fit_prev = ppfprev;
  1012. best_fit_size = pfree[-1].o_size;
  1013. if (best_fit_size <= aligned_max_size)
  1014. break; /* good enough fit to spare scan of entire list */
  1015. }
  1016. ppfprev = (obj_header_t **) pfree;
  1017. if (free_size > largest_size)
  1018. largest_size = free_size;
  1019. }
  1020. if (best_fit == 0) {
  1021. /*
  1022. * No single free chunk is large enough, but since we scanned the
  1023. * entire list, we now have an accurate updated value for
  1024. * largest_free_size.
  1025. */
  1026. mem->largest_free_size = largest_size;
  1027. return 0;
  1028. }
  1029. /* Remove from freelist & return excess memory to free */
  1030. *best_fit_prev = *(obj_header_t **)best_fit;
  1031. trim_obj(mem, best_fit, aligned_size, (chunk_t *)0);
  1032. /* Pre-init block header; o_alone & o_type are already init'd */
  1033. best_fit[-1].o_size = size;
  1034. return best_fit;
  1035. }
  1036. /* Allocate an object. This handles all but the fastest, simplest case. */
  1037. private obj_header_t *
  1038. alloc_obj(gs_ref_memory_t *mem, ulong lsize, gs_memory_type_ptr_t pstype,
  1039. alloc_flags_t flags, client_name_t cname)
  1040. {
  1041. obj_header_t *ptr;
  1042. if (lsize >= mem->large_size || (flags & ALLOC_IMMOVABLE)) {
  1043. /*
  1044. * Give the object a chunk all its own. Note that this case does
  1045. * not occur if is_controlled is true.
  1046. */
  1047. ulong asize =
  1048. ((lsize + obj_align_mask) & -obj_align_mod) +
  1049. sizeof(obj_header_t);
  1050. chunk_t *cp =
  1051. alloc_acquire_chunk(mem, asize + sizeof(chunk_head_t), false,
  1052. "large object chunk");
  1053. if (
  1054. #if arch_sizeof_long > arch_sizeof_int
  1055. asize > max_uint
  1056. #else
  1057. asize < lsize
  1058. #endif
  1059. )
  1060. return 0;
  1061. if (cp == 0)
  1062. return 0;
  1063. ptr = (obj_header_t *) cp->cbot;
  1064. cp->cbot += asize;
  1065. ptr->o_alone = 1;
  1066. ptr->o_size = lsize;
  1067. } else {
  1068. /*
  1069. * Cycle through the chunks at the current save level, starting
  1070. * with the currently open one.
  1071. */
  1072. chunk_t *cp_orig = mem->pcc;
  1073. uint asize = obj_size_round((uint) lsize);
  1074. bool allocate_success = false;
  1075. if (lsize > max_freelist_size && (flags & ALLOC_DIRECT)) {
  1076. /* We haven't checked the large block freelist yet. */
  1077. if ((ptr = large_freelist_alloc(mem, lsize)) != 0) {
  1078. --ptr; /* must point to header */
  1079. goto done;
  1080. }
  1081. }
  1082. if (cp_orig == 0) {
  1083. /* Open an arbitrary chunk. */
  1084. cp_orig = mem->pcc = mem->cfirst;
  1085. alloc_open_chunk(mem);
  1086. }
  1087. #define CAN_ALLOC_AT_END(cp)\
  1088. ((cp)->ctop - (byte *) (ptr = (obj_header_t *) (cp)->cbot)\
  1089. > asize + sizeof(obj_header_t))
  1090. do {
  1091. if (CAN_ALLOC_AT_END(&mem->cc)) {
  1092. allocate_success = true;
  1093. break;
  1094. } else if (mem->is_controlled) {
  1095. /* Try consolidating free space. */
  1096. gs_consolidate_free((gs_memory_t *)mem);
  1097. if (CAN_ALLOC_AT_END(&mem->cc)) {
  1098. allocate_success = true;
  1099. break;
  1100. }
  1101. }
  1102. /* No luck, go on to the next chunk. */
  1103. {
  1104. chunk_t *cp = mem->cc.cnext;
  1105. alloc_close_chunk(mem);
  1106. if (cp == 0)
  1107. cp = mem->cfirst;
  1108. mem->pcc = cp;
  1109. alloc_open_chunk(mem);
  1110. }
  1111. } while (mem->pcc != cp_orig);
  1112. #ifdef CONSOLIDATE_BEFORE_ADDING_CHUNK
  1113. if (!allocate_success) {
  1114. /*
  1115. * Try consolidating free space before giving up.
  1116. * It's not clear this is a good idea, since it requires quite
  1117. * a lot of computation and doesn't seem to improve things much.
  1118. */
  1119. if (!mem->is_controlled) { /* already did this if controlled */
  1120. chunk_t *cp = cp_orig;
  1121. alloc_close_chunk(mem);
  1122. do {
  1123. consolidate_chunk_free(cp, mem);
  1124. if (CAN_ALLOC_AT_END(cp)) {
  1125. mem->pcc = cp;
  1126. alloc_open_chunk(mem);
  1127. allocate_success = true;
  1128. break;
  1129. }
  1130. if ((cp = cp->cnext) == 0)
  1131. cp = mem->cfirst;
  1132. } while (cp != cp_orig);
  1133. }
  1134. }
  1135. #endif
  1136. #undef CAN_ALLOC_AT_END
  1137. if (!allocate_success) {
  1138. /* Add another chunk. */
  1139. chunk_t *cp =
  1140. alloc_add_chunk(mem, (ulong)mem->chunk_size, "chunk");
  1141. if (cp) {
  1142. /* mem->pcc == cp, mem->cc == *mem->pcc. */
  1143. ptr = (obj_header_t *)cp->cbot;
  1144. allocate_success = true;
  1145. }
  1146. }
  1147. /*
  1148. * If no success, try to scavenge from low free memory. This is
  1149. * only enabled for controlled memory (currently only async
  1150. * renderer) because it's too much work to prevent it from
  1151. * examining outer save levels in the general case.
  1152. */
  1153. if (allocate_success)
  1154. mem->cc.cbot = (byte *) ptr + asize;
  1155. else if (!mem->is_controlled ||
  1156. (ptr = scavenge_low_free(mem, (uint)lsize)) == 0)
  1157. return 0; /* allocation failed */
  1158. ptr->o_alone = 0;
  1159. ptr->o_size = (uint) lsize;
  1160. }
  1161. done:
  1162. ptr->o_type = pstype;
  1163. # if IGC_PTR_STABILITY_CHECK
  1164. ptr->d.o.space_id = mem->space_id;
  1165. # endif
  1166. ptr++;
  1167. gs_alloc_fill(ptr, gs_alloc_fill_alloc, lsize);
  1168. return ptr;
  1169. }
  1170. /*
  1171. * Consolidate free objects contiguous to free space at cbot onto the cbot
  1172. * area. Also keep track of end of highest internal free object
  1173. * (int_freed_top).
  1174. */
  1175. private void
  1176. consolidate_chunk_free(chunk_t *cp, gs_ref_memory_t *mem)
  1177. {
  1178. obj_header_t *begin_free = 0;
  1179. cp->int_freed_top = cp->cbase; /* below all objects in chunk */
  1180. SCAN_CHUNK_OBJECTS(cp)
  1181. DO_ALL
  1182. if (pre->o_type == &st_free) {
  1183. if (begin_free == 0)
  1184. begin_free = pre;
  1185. } else {
  1186. if (begin_free)
  1187. cp->int_freed_top = (byte *)pre; /* first byte following internal free */
  1188. begin_free = 0;
  1189. }
  1190. END_OBJECTS_SCAN
  1191. if (begin_free) {
  1192. /* We found free objects at the top of the object area. */
  1193. /* Remove the free objects from the freelists. */
  1194. remove_range_from_freelist(mem, begin_free, cp->cbot);
  1195. if_debug4('a', "[a]resetting chunk 0x%lx cbot from 0x%lx to 0x%lx (%lu free)\n",
  1196. (ulong) cp, (ulong) cp->cbot, (ulong) begin_free,
  1197. (ulong) ((byte *) cp->cbot - (byte *) begin_free));
  1198. cp->cbot = (byte *) begin_free;
  1199. }
  1200. }
  1201. /* Consolidate free objects. */
  1202. void
  1203. ialloc_consolidate_free(gs_ref_memory_t *mem)
  1204. {
  1205. chunk_t *cp;
  1206. chunk_t *cprev;
  1207. alloc_close_chunk(mem);
  1208. /* Visit chunks in reverse order to encourage LIFO behavior. */
  1209. for (cp = mem->clast; cp != 0; cp = cprev) {
  1210. cprev = cp->cprev;
  1211. consolidate_chunk_free(cp, mem);
  1212. if (cp->cbot == cp->cbase && cp->ctop == cp->climit) {
  1213. /* The entire chunk is free. */
  1214. chunk_t *cnext = cp->cnext;
  1215. if (!mem->is_controlled) {
  1216. alloc_free_chunk(cp, mem);
  1217. if (mem->pcc == cp)
  1218. mem->pcc =
  1219. (cnext == 0 ? cprev : cprev == 0 ? cnext :
  1220. cprev->cbot - cprev->ctop >
  1221. cnext->cbot - cnext->ctop ? cprev :
  1222. cnext);
  1223. }
  1224. }
  1225. }
  1226. alloc_open_chunk(mem);
  1227. }
  1228. private void
  1229. i_consolidate_free(gs_memory_t *mem)
  1230. {
  1231. ialloc_consolidate_free((gs_ref_memory_t *)mem);
  1232. }
  1233. /* try to free-up given amount of space from freespace below chunk base */
  1234. private obj_header_t * /* returns uninitialized object hdr, NULL if none found */
  1235. scavenge_low_free(gs_ref_memory_t *mem, unsigned request_size)
  1236. {
  1237. /* find 1st range of memory that can be glued back together to fill request */
  1238. obj_header_t *found_pre = 0;
  1239. /* Visit chunks in forward order */
  1240. obj_header_t *begin_free = 0;
  1241. uint found_free;
  1242. uint request_size_rounded = obj_size_round(request_size);
  1243. uint need_free = request_size_rounded + sizeof(obj_header_t); /* room for GC's dummy hdr */
  1244. chunk_t *cp;
  1245. for (cp = mem->cfirst; cp != 0; cp = cp->cnext) {
  1246. begin_free = 0;
  1247. found_free = 0;
  1248. SCAN_CHUNK_OBJECTS(cp)
  1249. DO_ALL
  1250. if (pre->o_type == &st_free) {
  1251. if (begin_free == 0) {
  1252. found_free = 0;
  1253. begin_free = pre;
  1254. }
  1255. found_free += pre_obj_rounded_size(pre);
  1256. if (begin_free != 0 && found_free >= need_free)
  1257. break;
  1258. } else
  1259. begin_free = 0;
  1260. END_OBJECTS_SCAN_NO_ABORT
  1261. /* Found sufficient range of empty memory */
  1262. if (begin_free != 0 && found_free >= need_free) {
  1263. /* Fish found pieces out of various freelists */
  1264. remove_range_from_freelist(mem, (char*)begin_free,
  1265. (char*)begin_free + found_free);
  1266. /* Prepare found object */
  1267. found_pre = begin_free;
  1268. found_pre->o_type = &st_free; /* don't confuse GC if gets lost */
  1269. found_pre->o_size = found_free - sizeof(obj_header_t);
  1270. /* Chop off excess tail piece & toss it back into free pool */
  1271. trim_obj(mem, found_pre + 1, request_size, cp);
  1272. }
  1273. }
  1274. return found_pre;
  1275. }
  1276. /* Remove range of memory from a mem's freelists */
  1277. private void
  1278. remove_range_from_freelist(gs_ref_memory_t *mem, void* bottom, void* top)
  1279. {
  1280. int num_free[num_freelists];
  1281. int smallest = num_freelists, largest = -1;
  1282. const obj_header_t *cur;
  1283. uint size;
  1284. int i;
  1285. uint removed = 0;
  1286. /*
  1287. * Scan from bottom to top, a range containing only free objects,
  1288. * counting the number of objects of each size.
  1289. */
  1290. for (cur = bottom; cur != top;
  1291. cur = (const obj_header_t *)
  1292. ((const byte *)cur + obj_size_round(size))
  1293. ) {
  1294. size = cur->o_size;
  1295. i = (size > max_freelist_size ? LARGE_FREELIST_INDEX :
  1296. (size + obj_align_mask) >> log2_obj_align_mod);
  1297. if (i < smallest) {
  1298. /*
  1299. * 0-length free blocks aren't kept on any list, because
  1300. * they don't have room for a pointer.
  1301. */
  1302. if (i == 0)
  1303. continue;
  1304. if (smallest < num_freelists)
  1305. memset(&num_free[i], 0, (smallest - i) * sizeof(int));
  1306. else
  1307. num_free[i] = 0;
  1308. smallest = i;
  1309. }
  1310. if (i > largest) {
  1311. if (largest >= 0)
  1312. memset(&num_free[largest + 1], 0, (i - largest) * sizeof(int));
  1313. largest = i;
  1314. }
  1315. num_free[i]++;
  1316. }
  1317. /*
  1318. * Remove free objects from the freelists, adjusting lost.objects by
  1319. * subtracting the size of the region being processed minus the amount
  1320. * of space reclaimed.
  1321. */
  1322. for (i = smallest; i <= largest; i++) {
  1323. int count = num_free[i];
  1324. obj_header_t *pfree;
  1325. obj_header_t **ppfprev;
  1326. if (!count)
  1327. continue;
  1328. ppfprev = &mem->freelists[i];
  1329. for (;;) {
  1330. pfree = *ppfprev;
  1331. if (PTR_GE(pfree, bottom) && PTR_LT(pfree, top)) {
  1332. /* We're removing an object. */
  1333. *ppfprev = *(obj_header_t **) pfree;
  1334. removed += obj_align_round(pfree[-1].o_size);
  1335. if (!--count)
  1336. break;
  1337. } else
  1338. ppfprev = (obj_header_t **) pfree;
  1339. }
  1340. }
  1341. mem->lost.objects -= (char*)top - (char*)bottom - removed;
  1342. }
  1343. /* Trim a memory object down to a given size */
  1344. private void
  1345. trim_obj(gs_ref_memory_t *mem, obj_header_t *obj, uint size, chunk_t *cp)
  1346. /* Obj must have rounded size == req'd size, or have enough room for */
  1347. /* trailing dummy obj_header */
  1348. {
  1349. uint rounded_size = obj_align_round(size);
  1350. obj_header_t *pre_obj = obj - 1;
  1351. obj_header_t *excess_pre = (obj_header_t*)((char*)obj + rounded_size);
  1352. uint old_rounded_size = obj_align_round(pre_obj->o_size);
  1353. uint excess_size = old_rounded_size - rounded_size - sizeof(obj_header_t);
  1354. /* trim object's size to desired */
  1355. pre_obj->o_size = size;
  1356. if (old_rounded_size == rounded_size)
  1357. return; /* nothing more to do here */
  1358. /*
  1359. * If the object is alone in its chunk, move cbot to point to the end
  1360. * of the object.
  1361. */
  1362. if (pre_obj->o_alone) {
  1363. if (!cp) {
  1364. mem->cfreed.memory = mem;
  1365. if (chunk_locate(obj, &mem->cfreed)) {
  1366. cp = mem->cfreed.cp;
  1367. }
  1368. }
  1369. if (cp) {
  1370. #ifdef DEBUG
  1371. if (cp->cbot != (byte *)obj + old_rounded_size) {
  1372. lprintf3("resizing 0x%lx, old size %u, new size %u, cbot wrong!\n",
  1373. (ulong)obj, old_rounded_size, size);
  1374. /* gs_abort */
  1375. } else
  1376. #endif
  1377. {
  1378. cp->cbot = (byte *)excess_pre;
  1379. return;
  1380. }
  1381. }
  1382. /*
  1383. * Something very weird is going on. This probably shouldn't
  1384. * ever happen, but if it does....
  1385. */
  1386. pre_obj->o_alone = 0;
  1387. }
  1388. /* make excess into free obj */
  1389. excess_pre->o_type = &st_free; /* don't confuse GC */
  1390. excess_pre->o_size = excess_size;
  1391. excess_pre->o_alone = 0;
  1392. if (excess_size >= obj_align_mod) {
  1393. /* Put excess object on a freelist */
  1394. obj_header_t **pfl;
  1395. if ((byte *)excess_pre >= mem->cc.int_freed_top)
  1396. mem->cc.int_freed_top = (byte *)excess_pre + excess_size;
  1397. if (excess_size <= max_freelist_size)
  1398. pfl = &mem->freelists[(excess_size + obj_align_mask) >>
  1399. log2_obj_align_mod];
  1400. else {
  1401. uint rounded_size = obj_align_round(excess_size);
  1402. pfl = &mem->freelists[LARGE_FREELIST_INDEX];
  1403. if (rounded_size > mem->largest_free_size)
  1404. mem->largest_free_size = rounded_size;
  1405. }
  1406. *(obj_header_t **) (excess_pre + 1) = *pfl;
  1407. *pfl = excess_pre + 1;
  1408. mem->cfreed.memory = mem;
  1409. } else {
  1410. /* excess piece will be "lost" memory */
  1411. mem->lost.objects += excess_size + sizeof(obj_header_t);
  1412. }
  1413. }
  1414. /* ================ Roots ================ */
  1415. /* Register a root. */
  1416. private int
  1417. i_register_root(gs_memory_t * mem, gs_gc_root_t * rp, gs_ptr_type_t ptype,
  1418. void **up, client_name_t cname)
  1419. {
  1420. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  1421. if (rp == NULL) {
  1422. rp = gs_raw_alloc_struct_immovable(imem->non_gc_memory, &st_gc_root_t,
  1423. "i_register_root");
  1424. if (rp == 0)
  1425. return_error(gs_error_VMerror);
  1426. rp->free_on_unregister = true;
  1427. } else
  1428. rp->free_on_unregister = false;
  1429. if_debug3('8', "[8]register root(%s) 0x%lx -> 0x%lx\n",
  1430. client_name_string(cname), (ulong)rp, (ulong)up);
  1431. rp->ptype = ptype;
  1432. rp->p = up;
  1433. rp->next = imem->roots;
  1434. imem->roots = rp;
  1435. return 0;
  1436. }
  1437. /* Unregister a root. */
  1438. private void
  1439. i_unregister_root(gs_memory_t * mem, gs_gc_root_t * rp, client_name_t cname)
  1440. {
  1441. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  1442. gs_gc_root_t **rpp = &imem->roots;
  1443. if_debug2('8', "[8]unregister root(%s) 0x%lx\n",
  1444. client_name_string(cname), (ulong) rp);
  1445. while (*rpp != rp)
  1446. rpp = &(*rpp)->next;
  1447. *rpp = (*rpp)->next;
  1448. if (rp->free_on_unregister)
  1449. gs_free_object(imem->non_gc_memory, rp, "i_unregister_root");
  1450. }
  1451. /* ================ Chunks ================ */
  1452. public_st_chunk();
  1453. /* Insert a chunk in the chain. This is exported for the GC and for */
  1454. /* the forget_save operation. */
  1455. void
  1456. alloc_link_chunk(chunk_t * cp, gs_ref_memory_t * imem)
  1457. {
  1458. byte *cdata = cp->cbase;
  1459. chunk_t *icp;
  1460. chunk_t *prev;
  1461. /*
  1462. * Allocators tend to allocate in either ascending or descending
  1463. * address order. The loop will handle the latter well; check for
  1464. * the former first.
  1465. */
  1466. if (imem->clast && PTR_GE(cdata, imem->clast->ctop))
  1467. icp = 0;
  1468. else
  1469. for (icp = imem->cfirst; icp != 0 && PTR_GE(cdata, icp->ctop);
  1470. icp = icp->cnext
  1471. );
  1472. cp->cnext = icp;
  1473. if (icp == 0) { /* add at end of chain */
  1474. prev = imem->clast;
  1475. imem->clast = cp;
  1476. } else { /* insert before icp */
  1477. prev = icp->cprev;
  1478. icp->cprev = cp;
  1479. }
  1480. cp->cprev = prev;
  1481. if (prev == 0)
  1482. imem->cfirst = cp;
  1483. else
  1484. prev->cnext = cp;
  1485. if (imem->pcc != 0) {
  1486. imem->cc.cnext = imem->pcc->cnext;
  1487. imem->cc.cprev = imem->pcc->cprev;
  1488. }
  1489. }
  1490. /* Add a chunk for ordinary allocation. */
  1491. private chunk_t *
  1492. alloc_add_chunk(gs_ref_memory_t * mem, ulong csize, client_name_t cname)
  1493. {
  1494. chunk_t *cp = alloc_acquire_chunk(mem, csize, true, cname);
  1495. if (cp) {
  1496. alloc_close_chunk(mem);
  1497. mem->pcc = cp;
  1498. mem->cc = *mem->pcc;
  1499. gs_alloc_fill(mem->cc.cbase, gs_alloc_fill_free,
  1500. mem->cc.climit - mem->cc.cbase);
  1501. }
  1502. return cp;
  1503. }
  1504. /* Acquire a chunk. If we would exceed MaxLocalVM (if relevant), */
  1505. /* or if we would exceed the VMThreshold and psignal is NULL, */
  1506. /* return 0; if we would exceed the VMThreshold but psignal is valid, */
  1507. /* just set the signal and return successfully. */
  1508. private chunk_t *
  1509. alloc_acquire_chunk(gs_ref_memory_t * mem, ulong csize, bool has_strings,
  1510. client_name_t cname)
  1511. {
  1512. gs_memory_t *parent = mem->non_gc_memory;
  1513. chunk_t *cp;
  1514. byte *cdata;
  1515. #if arch_sizeof_long > arch_sizeof_int
  1516. /* If csize is larger than max_uint, punt. */
  1517. if (csize != (uint) csize)
  1518. return 0;
  1519. #endif
  1520. cp = gs_raw_alloc_struct_immovable(parent, &st_chunk, cname);
  1521. if( mem->gc_status.psignal != 0) {
  1522. /* we have a garbage collector */
  1523. if ((ulong) (mem->allocated) >= mem->limit) {
  1524. mem->gc_status.requested += csize;
  1525. if (mem->limit >= mem->gc_status.max_vm) {
  1526. gs_free_object(parent, cp, cname);
  1527. return 0;
  1528. }
  1529. if_debug4('0', "[0]signaling space=%d, allocated=%ld, limit=%ld, requested=%ld\n",
  1530. mem->space, (long)mem->allocated,
  1531. (long)mem->limit, (long)mem->gc_status.requested);
  1532. *mem->gc_status.psignal = mem->gc_status.signal_value;
  1533. }
  1534. }
  1535. cdata = gs_alloc_bytes_immovable(parent, csize, cname);
  1536. if (cp == 0 || cdata == 0) {
  1537. gs_free_object(parent, cdata, cname);
  1538. gs_free_object(parent, cp, cname);
  1539. mem->gc_status.requested = csize;
  1540. return 0;
  1541. }
  1542. alloc_init_chunk(cp, cdata, cdata + csize, has_strings, (chunk_t *) 0);
  1543. alloc_link_chunk(cp, mem);
  1544. mem->allocated += st_chunk.ssize + csize;
  1545. return cp;
  1546. }
  1547. /* Initialize the pointers in a chunk. This is exported for save/restore. */
  1548. /* The bottom pointer must be aligned, but the top pointer need not */
  1549. /* be aligned. */
  1550. void
  1551. alloc_init_chunk(chunk_t * cp, byte * bot, byte * top, bool has_strings,
  1552. chunk_t * outer)
  1553. {
  1554. byte *cdata = bot;
  1555. if (outer != 0)
  1556. outer->inner_count++;
  1557. cp->chead = (chunk_head_t *) cdata;
  1558. cdata += sizeof(chunk_head_t);
  1559. cp->cbot = cp->cbase = cp->int_freed_top = cdata;
  1560. cp->cend = top;
  1561. cp->rcur = 0;
  1562. cp->rtop = 0;
  1563. cp->outer = outer;
  1564. cp->inner_count = 0;
  1565. cp->has_refs = false;
  1566. cp->sbase = cdata;
  1567. if (has_strings && top - cdata >= string_space_quantum + sizeof(long) - 1) {
  1568. /*
  1569. * We allocate a large enough string marking and reloc table
  1570. * to cover the entire chunk.
  1571. */
  1572. uint nquanta = string_space_quanta(top - cdata);
  1573. cp->climit = cdata + nquanta * string_data_quantum;
  1574. cp->smark = cp->climit;
  1575. cp->smark_size = string_quanta_mark_size(nquanta);
  1576. cp->sreloc =
  1577. (string_reloc_offset *) (cp->smark + cp->smark_size);
  1578. cp->sfree1 = (uint *) cp->sreloc;
  1579. } else {
  1580. /* No strings, don't need the string GC tables. */
  1581. cp->climit = cp->cend;
  1582. cp->sfree1 = 0;
  1583. cp->smark = 0;
  1584. cp->smark_size = 0;
  1585. cp->sreloc = 0;
  1586. }
  1587. cp->ctop = cp->climit;
  1588. alloc_init_free_strings(cp);
  1589. }
  1590. /* Initialize the string freelists in a chunk. */
  1591. void
  1592. alloc_init_free_strings(chunk_t * cp)
  1593. {
  1594. if (cp->sfree1)
  1595. memset(cp->sfree1, 0, STRING_FREELIST_SPACE(cp));
  1596. cp->sfree = 0;
  1597. }
  1598. /* Close up the current chunk. */
  1599. /* This is exported for save/restore and the GC. */
  1600. void
  1601. alloc_close_chunk(gs_ref_memory_t * mem)
  1602. {
  1603. if (mem->pcc != 0) {
  1604. *mem->pcc = mem->cc;
  1605. #ifdef DEBUG
  1606. if (gs_debug_c('a')) {
  1607. dlprintf1("[a%d]", alloc_trace_space(mem));
  1608. dprintf_chunk("closing chunk", mem->pcc);
  1609. }
  1610. #endif
  1611. }
  1612. }
  1613. /* Reopen the current chunk after a GC or restore. */
  1614. void
  1615. alloc_open_chunk(gs_ref_memory_t * mem)
  1616. {
  1617. if (mem->pcc != 0) {
  1618. mem->cc = *mem->pcc;
  1619. #ifdef DEBUG
  1620. if (gs_debug_c('a')) {
  1621. dlprintf1("[a%d]", alloc_trace_space(mem));
  1622. dprintf_chunk("opening chunk", mem->pcc);
  1623. }
  1624. #endif
  1625. }
  1626. }
  1627. /* Remove a chunk from the chain. This is exported for the GC. */
  1628. void
  1629. alloc_unlink_chunk(chunk_t * cp, gs_ref_memory_t * mem)
  1630. {
  1631. #ifdef DEBUG
  1632. if (gs_alloc_debug) { /* Check to make sure this chunk belongs to this allocator. */
  1633. const chunk_t *ap = mem->cfirst;
  1634. while (ap != 0 && ap != cp)
  1635. ap = ap->cnext;
  1636. if (ap != cp) {
  1637. lprintf2("unlink_chunk 0x%lx not owned by memory 0x%lx!\n",
  1638. (ulong) cp, (ulong) mem);
  1639. return; /*gs_abort(); */
  1640. }
  1641. }
  1642. #endif
  1643. if (cp->cprev == 0)
  1644. mem->cfirst = cp->cnext;
  1645. else
  1646. cp->cprev->cnext = cp->cnext;
  1647. if (cp->cnext == 0)
  1648. mem->clast = cp->cprev;
  1649. else
  1650. cp->cnext->cprev = cp->cprev;
  1651. if (mem->pcc != 0) {
  1652. mem->cc.cnext = mem->pcc->cnext;
  1653. mem->cc.cprev = mem->pcc->cprev;
  1654. if (mem->pcc == cp) {
  1655. mem->pcc = 0;
  1656. mem->cc.cbot = mem->cc.ctop = 0;
  1657. }
  1658. }
  1659. }
  1660. /*
  1661. * Free a chunk. This is exported for the GC. Since we eventually use
  1662. * this to free the chunk containing the allocator itself, we must be
  1663. * careful not to reference anything in the allocator after freeing the
  1664. * chunk data.
  1665. */
  1666. void
  1667. alloc_free_chunk(chunk_t * cp, gs_ref_memory_t * mem)
  1668. {
  1669. gs_memory_t *parent = mem->non_gc_memory;
  1670. byte *cdata = (byte *)cp->chead;
  1671. ulong csize = (byte *)cp->cend - cdata;
  1672. alloc_unlink_chunk(cp, mem);
  1673. mem->allocated -= st_chunk.ssize;
  1674. if (mem->cfreed.cp == cp)
  1675. mem->cfreed.cp = 0;
  1676. if (cp->outer == 0) {
  1677. mem->allocated -= csize;
  1678. gs_free_object(parent, cdata, "alloc_free_chunk(data)");
  1679. } else {
  1680. cp->outer->inner_count--;
  1681. gs_alloc_fill(cdata, gs_alloc_fill_free, csize);
  1682. }
  1683. gs_free_object(parent, cp, "alloc_free_chunk(chunk struct)");
  1684. }
  1685. /* Find the chunk for a pointer. */
  1686. /* Note that this only searches the current save level. */
  1687. /* Since a given save level can't contain both a chunk and an inner chunk */
  1688. /* of that chunk, we can stop when is_within_chunk succeeds, and just test */
  1689. /* is_in_inner_chunk then. */
  1690. bool
  1691. chunk_locate_ptr(const void *ptr, chunk_locator_t * clp)
  1692. {
  1693. register chunk_t *cp = clp->cp;
  1694. if (cp == 0) {
  1695. cp = clp->memory->cfirst;
  1696. if (cp == 0)
  1697. return false;
  1698. /* ptr is in the last chunk often enough to be worth checking for. */
  1699. if (PTR_GE(ptr, clp->memory->clast->cbase))
  1700. cp = clp->memory->clast;
  1701. }
  1702. if (PTR_LT(ptr, cp->cbase)) {
  1703. do {
  1704. cp = cp->cprev;
  1705. if (cp == 0)
  1706. return false;
  1707. }
  1708. while (PTR_LT(ptr, cp->cbase));
  1709. if (PTR_GE(ptr, cp->cend))
  1710. return false;
  1711. } else {
  1712. while (PTR_GE(ptr, cp->cend)) {
  1713. cp = cp->cnext;
  1714. if (cp == 0)
  1715. return false;
  1716. }
  1717. if (PTR_LT(ptr, cp->cbase))
  1718. return false;
  1719. }
  1720. clp->cp = cp;
  1721. return !ptr_is_in_inner_chunk(ptr, cp);
  1722. }
  1723. /* ------ Debugging ------ */
  1724. #ifdef DEBUG
  1725. #include "string_.h"
  1726. inline private bool
  1727. obj_in_control_region(const void *obot, const void *otop,
  1728. const dump_control_t *pdc)
  1729. {
  1730. return
  1731. ((pdc->bottom == NULL || PTR_GT(otop, pdc->bottom)) &&
  1732. (pdc->top == NULL || PTR_LT(obot, pdc->top)));
  1733. }
  1734. const dump_control_t dump_control_default =
  1735. {
  1736. dump_do_default, NULL, NULL
  1737. };
  1738. const dump_control_t dump_control_all =
  1739. {
  1740. dump_do_strings | dump_do_type_addresses | dump_do_pointers |
  1741. dump_do_pointed_strings | dump_do_contents, NULL, NULL
  1742. };
  1743. /*
  1744. * Internal procedure to dump a block of memory, in hex and optionally
  1745. * also as characters.
  1746. */
  1747. private void
  1748. debug_indent(int indent)
  1749. {
  1750. int i;
  1751. for (i = indent; i > 0; --i)
  1752. dputc(' ');
  1753. }
  1754. private void
  1755. debug_dump_contents(const byte * bot, const byte * top, int indent,
  1756. bool as_chars)
  1757. {
  1758. const byte *block;
  1759. #define block_size 16
  1760. if (bot >= top)
  1761. return;
  1762. for (block = bot - ((bot - (byte *) 0) & (block_size - 1));
  1763. block < top; block += block_size
  1764. ) {
  1765. int i;
  1766. char label[12];
  1767. /* Check for repeated blocks. */
  1768. if (block >= bot + block_size &&
  1769. block <= top - (block_size * 2) &&
  1770. !memcmp(block, block - block_size, block_size) &&
  1771. !memcmp(block, block + block_size, block_size)
  1772. ) {
  1773. if (block < bot + block_size * 2 ||
  1774. memcmp(block, block - block_size * 2, block_size)
  1775. ) {
  1776. debug_indent(indent);
  1777. dputs(" ...\n");
  1778. }
  1779. continue;
  1780. }
  1781. sprintf(label, "0x%lx:", (ulong) block);
  1782. debug_indent(indent);
  1783. dputs(label);
  1784. for (i = 0; i < block_size; ++i) {
  1785. const char *sepr = ((i & 3) == 0 && i != 0 ? " " : " ");
  1786. dputs(sepr);
  1787. if (block + i >= bot && block + i < top)
  1788. dprintf1("%02x", block[i]);
  1789. else
  1790. dputs(" ");
  1791. }
  1792. dputc('\n');
  1793. if (as_chars) {
  1794. debug_indent(indent + strlen(label));
  1795. for (i = 0; i < block_size; ++i) {
  1796. byte ch;
  1797. if ((i & 3) == 0 && i != 0)
  1798. dputc(' ');
  1799. if (block + i >= bot && block + i < top &&
  1800. (ch = block[i]) >= 32 && ch <= 126
  1801. )
  1802. dprintf1(" %c", ch);
  1803. else
  1804. dputs(" ");
  1805. }
  1806. dputc('\n');
  1807. }
  1808. }
  1809. #undef block_size
  1810. }
  1811. /* Print one object with the given options. */
  1812. /* Relevant options: type_addresses, no_types, pointers, pointed_strings, */
  1813. /* contents. */
  1814. void
  1815. debug_print_object(const gs_memory_t *mem, const void *obj, const dump_control_t * control)
  1816. {
  1817. const obj_header_t *pre = ((const obj_header_t *)obj) - 1;
  1818. ulong size = pre_obj_contents_size(pre);
  1819. const gs_memory_struct_type_t *type = pre->o_type;
  1820. dump_options_t options = control->options;
  1821. dprintf3(" pre=0x%lx(obj=0x%lx) size=%lu", (ulong) pre, (ulong) obj,
  1822. size);
  1823. switch (options & (dump_do_type_addresses | dump_do_no_types)) {
  1824. case dump_do_type_addresses + dump_do_no_types: /* addresses only */
  1825. dprintf1(" type=0x%lx", (ulong) type);
  1826. break;
  1827. case dump_do_type_addresses: /* addresses & names */
  1828. dprintf2(" type=%s(0x%lx)", struct_type_name_string(type),
  1829. (ulong) type);
  1830. break;
  1831. case 0: /* names only */
  1832. dprintf1(" type=%s", struct_type_name_string(type));
  1833. case dump_do_no_types: /* nothing */
  1834. ;
  1835. }
  1836. if (options & dump_do_marks) {
  1837. dprintf2(" smark/back=%u (0x%x)", pre->o_smark, pre->o_smark);
  1838. }
  1839. dputc('\n');
  1840. if (type == &st_free)
  1841. return;
  1842. if (options & dump_do_pointers) {
  1843. struct_proc_enum_ptrs((*proc)) = type->enum_ptrs;
  1844. uint index = 0;
  1845. enum_ptr_t eptr;
  1846. gs_ptr_type_t ptype;
  1847. if (proc != gs_no_struct_enum_ptrs)
  1848. for (; (ptype = (*proc)(mem, pre + 1, size, index, &eptr, type, NULL)) != 0;
  1849. ++index
  1850. ) {
  1851. const void *ptr = eptr.ptr;
  1852. dprintf1(" ptr %u: ", index);
  1853. if (ptype == ptr_string_type || ptype == ptr_const_string_type) {
  1854. const gs_const_string *str = (const gs_const_string *)ptr;
  1855. dprintf2("0x%lx(%u)", (ulong) str->data, str->size);
  1856. if (options & dump_do_pointed_strings) {
  1857. dputs(" =>\n");
  1858. debug_dump_contents(str->data, str->data + str->size, 6,
  1859. true);
  1860. } else {
  1861. dputc('\n');
  1862. }
  1863. } else {
  1864. dprintf1((PTR_BETWEEN(ptr, obj, (const byte *)obj + size) ?
  1865. "(0x%lx)\n" : "0x%lx\n"), (ulong) ptr);
  1866. }
  1867. }
  1868. }
  1869. if (options & dump_do_contents) {
  1870. debug_dump_contents((const byte *)obj, (const byte *)obj + size,
  1871. 0, false);
  1872. }
  1873. }
  1874. /* Print the contents of a chunk with the given options. */
  1875. /* Relevant options: all. */
  1876. void
  1877. debug_dump_chunk(const gs_memory_t *mem, const chunk_t * cp, const dump_control_t * control)
  1878. {
  1879. dprintf1("chunk at 0x%lx:\n", (ulong) cp);
  1880. dprintf3(" chead=0x%lx cbase=0x%lx sbase=0x%lx\n",
  1881. (ulong) cp->chead, (ulong) cp->cbase, (ulong) cp->sbase);
  1882. dprintf3(" rcur=0x%lx rtop=0x%lx cbot=0x%lx\n",
  1883. (ulong) cp->rcur, (ulong) cp->rtop, (ulong) cp->cbot);
  1884. dprintf4(" ctop=0x%lx climit=0x%lx smark=0x%lx, size=%u\n",
  1885. (ulong) cp->ctop, (ulong) cp->climit, (ulong) cp->smark,
  1886. cp->smark_size);
  1887. dprintf2(" sreloc=0x%lx cend=0x%lx\n",
  1888. (ulong) cp->sreloc, (ulong) cp->cend);
  1889. dprintf5("cprev=0x%lx cnext=0x%lx outer=0x%lx inner_count=%u has_refs=%s\n",
  1890. (ulong) cp->cprev, (ulong) cp->cnext, (ulong) cp->outer,
  1891. cp->inner_count, (cp->has_refs ? "true" : "false"));
  1892. dprintf2(" sfree1=0x%lx sfree=0x%x\n",
  1893. (ulong) cp->sfree1, cp->sfree);
  1894. if (control->options & dump_do_strings) {
  1895. debug_dump_contents((control->bottom == 0 ? cp->ctop :
  1896. max(control->bottom, cp->ctop)),
  1897. (control->top == 0 ? cp->climit :
  1898. min(control->top, cp->climit)),
  1899. 0, true);
  1900. }
  1901. SCAN_CHUNK_OBJECTS(cp)
  1902. DO_ALL
  1903. if (obj_in_control_region(pre + 1,
  1904. (const byte *)(pre + 1) + size,
  1905. control)
  1906. )
  1907. debug_print_object(mem, pre + 1, control);
  1908. END_OBJECTS_SCAN_NO_ABORT
  1909. }
  1910. void
  1911. debug_print_chunk(const gs_memory_t *mem, const chunk_t * cp)
  1912. {
  1913. dump_control_t control;
  1914. control = dump_control_default;
  1915. debug_dump_chunk(mem, cp, &control);
  1916. }
  1917. /* Print the contents of all chunks managed by an allocator. */
  1918. /* Relevant options: all. */
  1919. void
  1920. debug_dump_memory(const gs_ref_memory_t * mem, const dump_control_t * control)
  1921. {
  1922. const chunk_t *mcp;
  1923. for (mcp = mem->cfirst; mcp != 0; mcp = mcp->cnext) {
  1924. const chunk_t *cp = (mcp == mem->pcc ? &mem->cc : mcp);
  1925. if (obj_in_control_region(cp->cbase, cp->cend, control))
  1926. debug_dump_chunk((const gs_memory_t *)mem, cp, control);
  1927. }
  1928. }
  1929. /* Find all the objects that contain a given pointer. */
  1930. void
  1931. debug_find_pointers(const gs_ref_memory_t *mem, const void *target)
  1932. {
  1933. dump_control_t control;
  1934. const chunk_t *mcp;
  1935. control.options = 0;
  1936. for (mcp = mem->cfirst; mcp != 0; mcp = mcp->cnext) {
  1937. const chunk_t *cp = (mcp == mem->pcc ? &mem->cc : mcp);
  1938. SCAN_CHUNK_OBJECTS(cp);
  1939. DO_ALL
  1940. struct_proc_enum_ptrs((*proc)) = pre->o_type->enum_ptrs;
  1941. uint index = 0;
  1942. enum_ptr_t eptr;
  1943. if (proc) /* doesn't trace refs */
  1944. for (; (*proc)((const gs_memory_t *)mem, pre + 1, size, index,
  1945. &eptr, pre->o_type, NULL);
  1946. ++index)
  1947. if (eptr.ptr == target) {
  1948. dprintf1("Index %d in", index);
  1949. debug_print_object((const gs_memory_t *)mem, pre + 1, &control);
  1950. }
  1951. END_OBJECTS_SCAN_NO_ABORT
  1952. }
  1953. }
  1954. #endif /* DEBUG */