gsalloc.c 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995
  1. /* Copyright (C) 1995, 2000 Aladdin Enterprises. All rights reserved.
  2. This file is part of AFPL Ghostscript.
  3. AFPL Ghostscript is distributed with NO WARRANTY OF ANY KIND. No author or
  4. distributor accepts any responsibility for the consequences of using it, or
  5. for whether it serves any particular purpose or works at all, unless he or
  6. she says so in writing. Refer to the Aladdin Free Public License (the
  7. "License") for full details.
  8. Every copy of AFPL Ghostscript must include a copy of the License, normally
  9. in a plain ASCII text file named PUBLIC. The License grants you the right
  10. to copy, modify and redistribute AFPL Ghostscript, but only under certain
  11. conditions described in the License. Among other things, the License
  12. requires that the copyright notice and this notice be preserved on all
  13. copies.
  14. */
  15. /*$Id: gsalloc.c,v 1.11 2001/09/05 18:03:39 lpd Exp $ */
  16. /* Standard memory allocator */
  17. #include "gx.h"
  18. #include "memory_.h"
  19. #include "gserrors.h"
  20. #include "gsexit.h"
  21. #include "gsmdebug.h"
  22. #include "gsstruct.h"
  23. #include "gxalloc.h"
  24. #include "stream.h" /* for clearing stream list */
  25. /*
  26. * This allocator produces tracing messages of the form
  27. * [aNMOTS]...
  28. * where
  29. * N is the VM space number, +1 if we are allocating from stable memory.
  30. * M is : for movable objects, | for immovable,
  31. * O is {alloc = +, free = -, grow = >, shrink = <},
  32. * T is {bytes = b, object = <, ref = $, string = >}, and
  33. * S is {small freelist = f, large freelist = F, LIFO = space,
  34. * own chunk = L, lost = #, lost own chunk = ~, other = .}.
  35. */
  36. #ifdef DEBUG
  37. private int
  38. alloc_trace_space(const gs_ref_memory_t *imem)
  39. {
  40. return imem->space + (imem->stable_memory == (const gs_memory_t *)imem);
  41. }
  42. private void
  43. alloc_trace(const char *chars, gs_ref_memory_t * imem, client_name_t cname,
  44. gs_memory_type_ptr_t stype, uint size, const void *ptr)
  45. {
  46. if_debug7('A', "[a%d%s]%s %s(%u) %s0x%lx\n",
  47. alloc_trace_space(imem), chars, client_name_string(cname),
  48. (ptr == 0 || stype == 0 ? "" :
  49. struct_type_name_string(stype)),
  50. size, (chars[1] == '+' ? "= " : ""), (ulong) ptr);
  51. }
  52. private bool
  53. alloc_size_is_ok(gs_memory_type_ptr_t stype)
  54. {
  55. return (stype->ssize > 0 && stype->ssize < 0x100000);
  56. }
  57. # define ALLOC_CHECK_SIZE(stype)\
  58. BEGIN\
  59. if (!alloc_size_is_ok(stype)) {\
  60. lprintf2("size of struct type 0x%lx is 0x%lx!\n",\
  61. (ulong)(stype), (ulong)((stype)->ssize));\
  62. return 0;\
  63. }\
  64. END
  65. #else
  66. # define alloc_trace(chars, imem, cname, stype, size, ptr) DO_NOTHING
  67. # define ALLOC_CHECK_SIZE(stype) DO_NOTHING
  68. #endif
  69. /*
  70. * The structure descriptor for allocators. Even though allocators
  71. * are allocated outside GC space, they reference objects within it.
  72. */
  73. public_st_ref_memory();
  74. private
  75. ENUM_PTRS_BEGIN(ref_memory_enum_ptrs) return 0;
  76. ENUM_PTR3(0, gs_ref_memory_t, streams, names_array, changes);
  77. ENUM_PTR(3, gs_ref_memory_t, saved);
  78. ENUM_PTRS_END
  79. private RELOC_PTRS_WITH(ref_memory_reloc_ptrs, gs_ref_memory_t *mptr)
  80. {
  81. RELOC_PTR(gs_ref_memory_t, streams);
  82. RELOC_PTR(gs_ref_memory_t, names_array);
  83. RELOC_PTR(gs_ref_memory_t, changes);
  84. /* Don't relocate the saved pointer now -- see igc.c for details. */
  85. mptr->reloc_saved = RELOC_OBJ(mptr->saved);
  86. }
  87. RELOC_PTRS_END
  88. /*
  89. * Define flags for the alloc_obj, which implements all but the fastest
  90. * case of allocation.
  91. */
  92. typedef enum {
  93. ALLOC_IMMOVABLE = 1,
  94. ALLOC_DIRECT = 2 /* called directly, without fast-case checks */
  95. } alloc_flags_t;
  96. /* Forward references */
  97. private void remove_range_from_freelist(P3(gs_ref_memory_t *mem, void* bottom, void* top));
  98. private obj_header_t *large_freelist_alloc(P2(gs_ref_memory_t *mem, uint size));
  99. private obj_header_t *scavenge_low_free(P2(gs_ref_memory_t *mem, unsigned request_size));
  100. private ulong compute_free_objects(P1(gs_ref_memory_t *));
  101. private obj_header_t *alloc_obj(P5(gs_ref_memory_t *, ulong, gs_memory_type_ptr_t, alloc_flags_t, client_name_t));
  102. private void consolidate_chunk_free(P2(chunk_t *cp, gs_ref_memory_t *mem));
  103. private void trim_obj(P4(gs_ref_memory_t *mem, obj_header_t *obj, uint size, chunk_t *cp));
  104. private chunk_t *alloc_acquire_chunk(P4(gs_ref_memory_t *, ulong, bool, client_name_t));
  105. private chunk_t *alloc_add_chunk(P3(gs_ref_memory_t *, ulong, client_name_t));
  106. void alloc_close_chunk(P1(gs_ref_memory_t *));
  107. /*
  108. * Define the standard implementation (with garbage collection)
  109. * of Ghostscript's memory manager interface.
  110. */
  111. /* Raw memory procedures */
  112. private gs_memory_proc_alloc_bytes(i_alloc_bytes_immovable);
  113. private gs_memory_proc_resize_object(i_resize_object);
  114. private gs_memory_proc_free_object(i_free_object);
  115. private gs_memory_proc_stable(i_stable);
  116. private gs_memory_proc_status(i_status);
  117. private gs_memory_proc_free_all(i_free_all);
  118. private gs_memory_proc_consolidate_free(i_consolidate_free);
  119. /* Object memory procedures */
  120. private gs_memory_proc_alloc_bytes(i_alloc_bytes);
  121. private gs_memory_proc_alloc_struct(i_alloc_struct);
  122. private gs_memory_proc_alloc_struct(i_alloc_struct_immovable);
  123. private gs_memory_proc_alloc_byte_array(i_alloc_byte_array);
  124. private gs_memory_proc_alloc_byte_array(i_alloc_byte_array_immovable);
  125. private gs_memory_proc_alloc_struct_array(i_alloc_struct_array);
  126. private gs_memory_proc_alloc_struct_array(i_alloc_struct_array_immovable);
  127. private gs_memory_proc_object_size(i_object_size);
  128. private gs_memory_proc_object_type(i_object_type);
  129. private gs_memory_proc_alloc_string(i_alloc_string);
  130. private gs_memory_proc_alloc_string(i_alloc_string_immovable);
  131. private gs_memory_proc_resize_string(i_resize_string);
  132. private gs_memory_proc_free_string(i_free_string);
  133. private gs_memory_proc_register_root(i_register_root);
  134. private gs_memory_proc_unregister_root(i_unregister_root);
  135. private gs_memory_proc_enable_free(i_enable_free);
  136. /* We export the procedures for subclasses. */
  137. const gs_memory_procs_t gs_ref_memory_procs =
  138. {
  139. /* Raw memory procedures */
  140. i_alloc_bytes_immovable,
  141. i_resize_object,
  142. i_free_object,
  143. i_stable,
  144. i_status,
  145. i_free_all,
  146. i_consolidate_free,
  147. /* Object memory procedures */
  148. i_alloc_bytes,
  149. i_alloc_struct,
  150. i_alloc_struct_immovable,
  151. i_alloc_byte_array,
  152. i_alloc_byte_array_immovable,
  153. i_alloc_struct_array,
  154. i_alloc_struct_array_immovable,
  155. i_object_size,
  156. i_object_type,
  157. i_alloc_string,
  158. i_alloc_string_immovable,
  159. i_resize_string,
  160. i_free_string,
  161. i_register_root,
  162. i_unregister_root,
  163. i_enable_free
  164. };
  165. /*
  166. * Allocate and mostly initialize the state of an allocator (system, global,
  167. * or local). Does not initialize global or space.
  168. */
  169. private void *ialloc_solo(P3(gs_raw_memory_t *, gs_memory_type_ptr_t,
  170. chunk_t **));
  171. gs_ref_memory_t *
  172. ialloc_alloc_state(gs_raw_memory_t * parent, uint chunk_size)
  173. {
  174. chunk_t *cp;
  175. gs_ref_memory_t *iimem = ialloc_solo(parent, &st_ref_memory, &cp);
  176. if (iimem == 0)
  177. return 0;
  178. iimem->stable_memory = (gs_memory_t *)iimem;
  179. iimem->procs = gs_ref_memory_procs;
  180. iimem->parent = parent;
  181. iimem->chunk_size = chunk_size;
  182. iimem->large_size = ((chunk_size / 4) & -obj_align_mod) + 1;
  183. iimem->is_controlled = false;
  184. iimem->gc_status.vm_threshold = chunk_size * 3L;
  185. iimem->gc_status.max_vm = max_long;
  186. iimem->gc_status.psignal = NULL;
  187. iimem->gc_status.enabled = false;
  188. iimem->previous_status.allocated = 0;
  189. iimem->previous_status.used = 0;
  190. ialloc_reset(iimem);
  191. iimem->cfirst = iimem->clast = cp;
  192. ialloc_set_limit(iimem);
  193. iimem->cc.cbot = iimem->cc.ctop = 0;
  194. iimem->pcc = 0;
  195. iimem->save_level = 0;
  196. iimem->new_mask = 0;
  197. iimem->test_mask = ~0;
  198. iimem->streams = 0;
  199. iimem->names_array = 0;
  200. iimem->roots = 0;
  201. iimem->num_contexts = 0;
  202. iimem->saved = 0;
  203. return iimem;
  204. }
  205. /* Allocate a 'solo' object with its own chunk. */
  206. private void *
  207. ialloc_solo(gs_raw_memory_t * parent, gs_memory_type_ptr_t pstype,
  208. chunk_t ** pcp)
  209. { /*
  210. * We can't assume that the parent uses the same object header
  211. * that we do, but the GC requires that allocators have
  212. * such a header. Therefore, we prepend one explicitly.
  213. */
  214. chunk_t *cp =
  215. gs_raw_alloc_struct_immovable(parent, &st_chunk,
  216. "ialloc_solo(chunk)");
  217. uint csize =
  218. ROUND_UP(sizeof(chunk_head_t) + sizeof(obj_header_t) +
  219. pstype->ssize,
  220. obj_align_mod);
  221. byte *cdata = gs_alloc_bytes_immovable(parent, csize, "ialloc_solo");
  222. obj_header_t *obj = (obj_header_t *) (cdata + sizeof(chunk_head_t));
  223. if (cp == 0 || cdata == 0)
  224. return 0;
  225. alloc_init_chunk(cp, cdata, cdata + csize, false, (chunk_t *) NULL);
  226. cp->cbot = cp->ctop;
  227. cp->cprev = cp->cnext = 0;
  228. /* Construct the object header "by hand". */
  229. obj->o_alone = 1;
  230. obj->o_size = pstype->ssize;
  231. obj->o_type = pstype;
  232. *pcp = cp;
  233. return (void *)(obj + 1);
  234. }
  235. /*
  236. * Add a chunk to an externally controlled allocator. Such allocators
  237. * allocate all objects as immovable, are not garbage-collected, and
  238. * don't attempt to acquire additional memory on their own.
  239. */
  240. int
  241. ialloc_add_chunk(gs_ref_memory_t *imem, ulong space, client_name_t cname)
  242. {
  243. chunk_t *cp;
  244. /* Allow acquisition of this chunk. */
  245. imem->is_controlled = false;
  246. imem->large_size = imem->chunk_size;
  247. imem->limit = max_long;
  248. imem->gc_status.max_vm = max_long;
  249. /* Acquire the chunk. */
  250. cp = alloc_add_chunk(imem, space, cname);
  251. /*
  252. * Make all allocations immovable. Since the "movable" allocators
  253. * allocate within existing chunks, whereas the "immovable" ones
  254. * allocate in new chunks, we equate the latter to the former, even
  255. * though this seems backwards.
  256. */
  257. imem->procs.alloc_bytes_immovable = imem->procs.alloc_bytes;
  258. imem->procs.alloc_struct_immovable = imem->procs.alloc_struct;
  259. imem->procs.alloc_byte_array_immovable = imem->procs.alloc_byte_array;
  260. imem->procs.alloc_struct_array_immovable = imem->procs.alloc_struct_array;
  261. imem->procs.alloc_string_immovable = imem->procs.alloc_string;
  262. /* Disable acquisition of additional chunks. */
  263. imem->is_controlled = true;
  264. imem->limit = 0;
  265. return (cp ? 0 : gs_note_error(gs_error_VMerror));
  266. }
  267. /* Prepare for a GC by clearing the stream list. */
  268. /* This probably belongs somewhere else.... */
  269. void
  270. ialloc_gc_prepare(gs_ref_memory_t * mem)
  271. { /*
  272. * We have to unlink every stream from its neighbors,
  273. * so that referenced streams don't keep all streams around.
  274. */
  275. while (mem->streams != 0) {
  276. stream *s = mem->streams;
  277. mem->streams = s->next;
  278. s->prev = s->next = 0;
  279. }
  280. }
  281. /* Initialize after a save. */
  282. void
  283. ialloc_reset(gs_ref_memory_t * mem)
  284. {
  285. mem->cfirst = 0;
  286. mem->clast = 0;
  287. mem->cc.rcur = 0;
  288. mem->cc.rtop = 0;
  289. mem->cc.has_refs = false;
  290. mem->allocated = 0;
  291. mem->inherited = 0;
  292. mem->changes = 0;
  293. ialloc_reset_free(mem);
  294. }
  295. /* Initialize after a save or GC. */
  296. void
  297. ialloc_reset_free(gs_ref_memory_t * mem)
  298. {
  299. int i;
  300. obj_header_t **p;
  301. mem->lost.objects = 0;
  302. mem->lost.refs = 0;
  303. mem->lost.strings = 0;
  304. mem->cfreed.cp = 0;
  305. for (i = 0, p = &mem->freelists[0]; i < num_freelists; i++, p++)
  306. *p = 0;
  307. mem->largest_free_size = 0;
  308. }
  309. /*
  310. * Set an arbitrary limit so that the amount of allocated VM does not grow
  311. * indefinitely even when GC is disabled. Benchmarks have shown that
  312. * the resulting GC's are infrequent enough not to degrade performance
  313. * significantly.
  314. */
  315. #define FORCE_GC_LIMIT 8000000
  316. /* Set the allocation limit after a change in one or more of */
  317. /* vm_threshold, max_vm, or enabled, or after a GC. */
  318. void
  319. ialloc_set_limit(register gs_ref_memory_t * mem)
  320. { /*
  321. * The following code is intended to set the limit so that
  322. * we stop allocating when allocated + previous_status.allocated
  323. * exceeds the lesser of max_vm or (if GC is enabled)
  324. * gc_allocated + vm_threshold.
  325. */
  326. ulong max_allocated =
  327. (mem->gc_status.max_vm > mem->previous_status.allocated ?
  328. mem->gc_status.max_vm - mem->previous_status.allocated :
  329. 0);
  330. if (mem->gc_status.enabled) {
  331. ulong limit = mem->gc_allocated + mem->gc_status.vm_threshold;
  332. if (limit < mem->previous_status.allocated)
  333. mem->limit = 0;
  334. else {
  335. limit -= mem->previous_status.allocated;
  336. mem->limit = min(limit, max_allocated);
  337. }
  338. } else
  339. mem->limit = min(max_allocated, mem->gc_allocated + FORCE_GC_LIMIT);
  340. if_debug7('0', "[0]space=%d, max_vm=%ld, prev.alloc=%ld, enabled=%d,\n\
  341. gc_alloc=%ld, threshold=%ld => limit=%ld\n",
  342. mem->space, (long)mem->gc_status.max_vm,
  343. (long)mem->previous_status.allocated,
  344. mem->gc_status.enabled, (long)mem->gc_allocated,
  345. (long)mem->gc_status.vm_threshold, (long)mem->limit);
  346. }
  347. /*
  348. * Free all the memory owned by the allocator, except the allocator itself.
  349. * Note that this only frees memory at the current save level: the client
  350. * is responsible for restoring to the outermost level if desired.
  351. */
  352. private void
  353. i_free_all(gs_memory_t * mem, uint free_mask, client_name_t cname)
  354. {
  355. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  356. chunk_t *cp;
  357. if (free_mask & FREE_ALL_DATA) {
  358. chunk_t *csucc;
  359. /*
  360. * Free the chunks in reverse order, to encourage LIFO behavior.
  361. * Don't free the chunk holding the allocator itself.
  362. */
  363. for (cp = imem->clast; cp != 0; cp = csucc) {
  364. csucc = cp->cprev; /* save before freeing */
  365. if (cp->cbase + sizeof(obj_header_t) != (byte *)mem)
  366. alloc_free_chunk(cp, imem);
  367. }
  368. }
  369. if (free_mask & FREE_ALL_ALLOCATOR) {
  370. /* Free the chunk holding the allocator itself. */
  371. for (cp = imem->clast; cp != 0; cp = cp->cprev)
  372. if (cp->cbase + sizeof(obj_header_t) == (byte *)mem) {
  373. alloc_free_chunk(cp, imem);
  374. break;
  375. }
  376. }
  377. }
  378. /* ================ Accessors ================ */
  379. /* Get the size of an object from the header. */
  380. private uint
  381. i_object_size(gs_memory_t * mem, const void /*obj_header_t */ *obj)
  382. {
  383. return pre_obj_contents_size((const obj_header_t *)obj - 1);
  384. }
  385. /* Get the type of a structure from the header. */
  386. private gs_memory_type_ptr_t
  387. i_object_type(gs_memory_t * mem, const void /*obj_header_t */ *obj)
  388. {
  389. return ((const obj_header_t *)obj - 1)->o_type;
  390. }
  391. /* Get the GC status of a memory. */
  392. void
  393. gs_memory_gc_status(const gs_ref_memory_t * mem, gs_memory_gc_status_t * pstat)
  394. {
  395. *pstat = mem->gc_status;
  396. }
  397. /* Set the GC status of a memory. */
  398. void
  399. gs_memory_set_gc_status(gs_ref_memory_t * mem, const gs_memory_gc_status_t * pstat)
  400. {
  401. mem->gc_status = *pstat;
  402. ialloc_set_limit(mem);
  403. }
  404. /* ================ Objects ================ */
  405. /* Allocate a small object quickly if possible. */
  406. /* The size must be substantially less than max_uint. */
  407. /* ptr must be declared as obj_header_t *. */
  408. /* pfl must be declared as obj_header_t **. */
  409. #define IF_FREELIST_ALLOC(ptr, imem, size, pstype, pfl)\
  410. if ( size <= max_freelist_size &&\
  411. *(pfl = &imem->freelists[(size + obj_align_mask) >> log2_obj_align_mod]) != 0\
  412. )\
  413. { ptr = *pfl;\
  414. *pfl = *(obj_header_t **)ptr;\
  415. ptr[-1].o_size = size;\
  416. ptr[-1].o_type = pstype;\
  417. /* If debugging, clear the block in an attempt to */\
  418. /* track down uninitialized data errors. */\
  419. gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
  420. #define ELSEIF_BIG_FREELIST_ALLOC(ptr, imem, size, pstype)\
  421. }\
  422. else if (size > max_freelist_size &&\
  423. (ptr = large_freelist_alloc(imem, size)) != 0)\
  424. { ptr[-1].o_type = pstype;\
  425. /* If debugging, clear the block in an attempt to */\
  426. /* track down uninitialized data errors. */\
  427. gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
  428. #define ELSEIF_LIFO_ALLOC(ptr, imem, size, pstype)\
  429. }\
  430. else if ( (imem->cc.ctop - (byte *)(ptr = (obj_header_t *)imem->cc.cbot))\
  431. >= size + (obj_align_mod + sizeof(obj_header_t) * 2) &&\
  432. size < imem->large_size\
  433. )\
  434. { imem->cc.cbot = (byte *)ptr + obj_size_round(size);\
  435. ptr->o_alone = 0;\
  436. ptr->o_size = size;\
  437. ptr->o_type = pstype;\
  438. ptr++;\
  439. /* If debugging, clear the block in an attempt to */\
  440. /* track down uninitialized data errors. */\
  441. gs_alloc_fill(ptr, gs_alloc_fill_alloc, size);
  442. #define ELSE_ALLOC\
  443. }\
  444. else
  445. private byte *
  446. i_alloc_bytes(gs_memory_t * mem, uint size, client_name_t cname)
  447. {
  448. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  449. obj_header_t *obj;
  450. obj_header_t **pfl;
  451. IF_FREELIST_ALLOC(obj, imem, size, &st_bytes, pfl)
  452. alloc_trace(":+bf", imem, cname, NULL, size, obj);
  453. ELSEIF_BIG_FREELIST_ALLOC(obj, imem, size, &st_bytes)
  454. alloc_trace(":+bF", imem, cname, NULL, size, obj);
  455. ELSEIF_LIFO_ALLOC(obj, imem, size, &st_bytes)
  456. alloc_trace(":+b ", imem, cname, NULL, size, obj);
  457. ELSE_ALLOC
  458. {
  459. obj = alloc_obj(imem, size, &st_bytes, 0, cname);
  460. if (obj == 0)
  461. return 0;
  462. alloc_trace(":+b.", imem, cname, NULL, size, obj);
  463. }
  464. return (byte *) obj;
  465. }
  466. private byte *
  467. i_alloc_bytes_immovable(gs_memory_t * mem, uint size, client_name_t cname)
  468. {
  469. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  470. obj_header_t *obj = alloc_obj(imem, size, &st_bytes,
  471. ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
  472. if (obj == 0)
  473. return 0;
  474. alloc_trace("|+b.", imem, cname, NULL, size, obj);
  475. return (byte *) obj;
  476. }
  477. private void *
  478. i_alloc_struct(gs_memory_t * mem, gs_memory_type_ptr_t pstype,
  479. client_name_t cname)
  480. {
  481. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  482. uint size = pstype->ssize;
  483. obj_header_t *obj;
  484. obj_header_t **pfl;
  485. ALLOC_CHECK_SIZE(pstype);
  486. IF_FREELIST_ALLOC(obj, imem, size, pstype, pfl)
  487. alloc_trace(":+<f", imem, cname, pstype, size, obj);
  488. ELSEIF_BIG_FREELIST_ALLOC(obj, imem, size, pstype)
  489. alloc_trace(":+<F", imem, cname, pstype, size, obj);
  490. ELSEIF_LIFO_ALLOC(obj, imem, size, pstype)
  491. alloc_trace(":+< ", imem, cname, pstype, size, obj);
  492. ELSE_ALLOC
  493. {
  494. obj = alloc_obj(imem, size, pstype, 0, cname);
  495. if (obj == 0)
  496. return 0;
  497. alloc_trace(":+<.", imem, cname, pstype, size, obj);
  498. }
  499. return obj;
  500. }
  501. private void *
  502. i_alloc_struct_immovable(gs_memory_t * mem, gs_memory_type_ptr_t pstype,
  503. client_name_t cname)
  504. {
  505. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  506. uint size = pstype->ssize;
  507. obj_header_t *obj;
  508. ALLOC_CHECK_SIZE(pstype);
  509. obj = alloc_obj(imem, size, pstype, ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
  510. alloc_trace("|+<.", imem, cname, pstype, size, obj);
  511. return obj;
  512. }
  513. private byte *
  514. i_alloc_byte_array(gs_memory_t * mem, uint num_elements, uint elt_size,
  515. client_name_t cname)
  516. {
  517. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  518. obj_header_t *obj = alloc_obj(imem, (ulong) num_elements * elt_size,
  519. &st_bytes, ALLOC_DIRECT, cname);
  520. if_debug6('A', "[a%d:+b.]%s -bytes-*(%lu=%u*%u) = 0x%lx\n",
  521. alloc_trace_space(imem), client_name_string(cname),
  522. (ulong) num_elements * elt_size,
  523. num_elements, elt_size, (ulong) obj);
  524. return (byte *) obj;
  525. }
  526. private byte *
  527. i_alloc_byte_array_immovable(gs_memory_t * mem, uint num_elements,
  528. uint elt_size, client_name_t cname)
  529. {
  530. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  531. obj_header_t *obj = alloc_obj(imem, (ulong) num_elements * elt_size,
  532. &st_bytes, ALLOC_IMMOVABLE | ALLOC_DIRECT,
  533. cname);
  534. if_debug6('A', "[a%d|+b.]%s -bytes-*(%lu=%u*%u) = 0x%lx\n",
  535. alloc_trace_space(imem), client_name_string(cname),
  536. (ulong) num_elements * elt_size,
  537. num_elements, elt_size, (ulong) obj);
  538. return (byte *) obj;
  539. }
  540. private void *
  541. i_alloc_struct_array(gs_memory_t * mem, uint num_elements,
  542. gs_memory_type_ptr_t pstype, client_name_t cname)
  543. {
  544. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  545. obj_header_t *obj;
  546. ALLOC_CHECK_SIZE(pstype);
  547. obj = alloc_obj(imem,
  548. (ulong) num_elements * pstype->ssize,
  549. pstype, ALLOC_DIRECT, cname);
  550. if_debug7('A', "[a%d:+<.]%s %s*(%lu=%u*%u) = 0x%lx\n",
  551. alloc_trace_space(imem), client_name_string(cname),
  552. struct_type_name_string(pstype),
  553. (ulong) num_elements * pstype->ssize,
  554. num_elements, pstype->ssize, (ulong) obj);
  555. return (char *)obj;
  556. }
  557. private void *
  558. i_alloc_struct_array_immovable(gs_memory_t * mem, uint num_elements,
  559. gs_memory_type_ptr_t pstype, client_name_t cname)
  560. {
  561. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  562. obj_header_t *obj;
  563. ALLOC_CHECK_SIZE(pstype);
  564. obj = alloc_obj(imem,
  565. (ulong) num_elements * pstype->ssize,
  566. pstype, ALLOC_IMMOVABLE | ALLOC_DIRECT, cname);
  567. if_debug7('A', "[a%d|+<.]%s %s*(%lu=%u*%u) = 0x%lx\n",
  568. alloc_trace_space(imem), client_name_string(cname),
  569. struct_type_name_string(pstype),
  570. (ulong) num_elements * pstype->ssize,
  571. num_elements, pstype->ssize, (ulong) obj);
  572. return (char *)obj;
  573. }
  574. private void *
  575. i_resize_object(gs_memory_t * mem, void *obj, uint new_num_elements,
  576. client_name_t cname)
  577. {
  578. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  579. obj_header_t *pp = (obj_header_t *) obj - 1;
  580. gs_memory_type_ptr_t pstype = pp->o_type;
  581. ulong old_size = pre_obj_contents_size(pp);
  582. ulong new_size = (ulong) pstype->ssize * new_num_elements;
  583. ulong old_size_rounded = obj_align_round(old_size);
  584. ulong new_size_rounded = obj_align_round(new_size);
  585. void *new_obj = NULL;
  586. if (old_size_rounded == new_size_rounded) {
  587. pp->o_size = new_size;
  588. new_obj = obj;
  589. } else
  590. if ((byte *)obj + old_size_rounded == imem->cc.cbot &&
  591. imem->cc.ctop - (byte *)obj >= new_size_rounded ) {
  592. imem->cc.cbot = (byte *)obj + new_size_rounded;
  593. pp->o_size = new_size;
  594. new_obj = obj;
  595. } else /* try and trim the object -- but only if room for a dummy header */
  596. if (new_size_rounded + sizeof(obj_header_t) <= old_size_rounded) {
  597. trim_obj(imem, obj, new_size, (chunk_t *)0);
  598. new_obj = obj;
  599. }
  600. if (new_obj) {
  601. if_debug8('A', "[a%d:%c%c ]%s %s(%lu=>%lu) 0x%lx\n",
  602. alloc_trace_space(imem),
  603. (new_size > old_size ? '>' : '<'),
  604. (pstype == &st_bytes ? 'b' : '<'),
  605. client_name_string(cname),
  606. struct_type_name_string(pstype),
  607. old_size, new_size, (ulong) obj);
  608. return new_obj;
  609. }
  610. /* Punt. */
  611. new_obj = gs_alloc_struct_array(mem, new_num_elements, void,
  612. pstype, cname);
  613. if (new_obj == 0)
  614. return 0;
  615. memcpy(new_obj, obj, min(old_size, new_size));
  616. gs_free_object(mem, obj, cname);
  617. return new_obj;
  618. }
  619. private void
  620. i_free_object(gs_memory_t * mem, void *ptr, client_name_t cname)
  621. {
  622. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  623. obj_header_t *pp;
  624. gs_memory_type_ptr_t pstype;
  625. struct_proc_finalize((*finalize));
  626. uint size, rounded_size;
  627. if (ptr == 0)
  628. return;
  629. pp = (obj_header_t *) ptr - 1;
  630. pstype = pp->o_type;
  631. #ifdef DEBUG
  632. if (gs_debug_c('?')) {
  633. chunk_locator_t cld;
  634. if (pstype == &st_free) {
  635. lprintf2("%s: object 0x%lx already free!\n",
  636. client_name_string(cname), (ulong) ptr);
  637. return; /*gs_abort(); */
  638. }
  639. /* Check that this allocator owns the object being freed. */
  640. cld.memory = imem;
  641. while ((cld.cp = cld.memory->clast),
  642. !chunk_locate_ptr(ptr, &cld)
  643. ) {
  644. if (!cld.memory->saved) {
  645. lprintf3("%s: freeing 0x%lx, not owned by memory 0x%lx!\n",
  646. client_name_string(cname), (ulong) ptr,
  647. (ulong) mem);
  648. return; /*gs_abort(); */
  649. }
  650. /****** HACK: we know the saved state is the first ******
  651. ****** member of an alloc_save_t. ******/
  652. cld.memory = (gs_ref_memory_t *) cld.memory->saved;
  653. }
  654. /* Check that the object is in the allocated region. */
  655. if (cld.memory == imem && cld.cp == imem->pcc)
  656. cld.cp = &imem->cc;
  657. if (!(PTR_BETWEEN((const byte *)pp, cld.cp->cbase,
  658. cld.cp->cbot))
  659. ) {
  660. lprintf5("%s: freeing 0x%lx,\n\toutside chunk 0x%lx cbase=0x%lx, cbot=0x%lx!\n",
  661. client_name_string(cname), (ulong) ptr,
  662. (ulong) cld.cp, (ulong) cld.cp->cbase,
  663. (ulong) cld.cp->cbot);
  664. return; /*gs_abort(); */
  665. }
  666. }
  667. #endif
  668. size = pre_obj_contents_size(pp);
  669. rounded_size = obj_align_round(size);
  670. finalize = pstype->finalize;
  671. if (finalize != 0) {
  672. if_debug3('u', "[u]finalizing %s 0x%lx (%s)\n",
  673. struct_type_name_string(pstype),
  674. (ulong) ptr, client_name_string(cname));
  675. (*finalize) (ptr);
  676. }
  677. if ((byte *) ptr + rounded_size == imem->cc.cbot) {
  678. alloc_trace(":-o ", imem, cname, pstype, size, ptr);
  679. gs_alloc_fill(ptr, gs_alloc_fill_free, size);
  680. imem->cc.cbot = (byte *) pp;
  681. /* IFF this object is adjacent to (or below) the byte after the
  682. * highest free object, do the consolidation within this chunk. */
  683. if ((byte *)pp <= imem->cc.int_freed_top) {
  684. consolidate_chunk_free(&(imem->cc), imem);
  685. }
  686. return;
  687. }
  688. if (pp->o_alone) {
  689. /*
  690. * We gave this object its own chunk. Free the entire chunk,
  691. * unless it belongs to an older save level, in which case
  692. * we mustn't overwrite it.
  693. */
  694. chunk_locator_t cl;
  695. #ifdef DEBUG
  696. {
  697. chunk_locator_t cld;
  698. cld.memory = imem;
  699. cld.cp = 0;
  700. if (gs_debug_c('a'))
  701. alloc_trace(
  702. (chunk_locate_ptr(ptr, &cld) ? ":-oL" : ":-o~"),
  703. imem, cname, pstype, size, ptr);
  704. }
  705. #endif
  706. cl.memory = imem;
  707. cl.cp = 0;
  708. if (chunk_locate_ptr(ptr, &cl)) {
  709. if (!imem->is_controlled)
  710. alloc_free_chunk(cl.cp, imem);
  711. return;
  712. }
  713. /* Don't overwrite even if gs_alloc_debug is set. */
  714. }
  715. if (rounded_size >= sizeof(obj_header_t *)) {
  716. /*
  717. * Put the object on a freelist, unless it belongs to
  718. * an older save level, in which case we mustn't
  719. * overwrite it.
  720. */
  721. imem->cfreed.memory = imem;
  722. if (chunk_locate(ptr, &imem->cfreed)) {
  723. obj_header_t **pfl;
  724. if (size > max_freelist_size) {
  725. pfl = &imem->freelists[LARGE_FREELIST_INDEX];
  726. if (rounded_size > imem->largest_free_size)
  727. imem->largest_free_size = rounded_size;
  728. } else {
  729. pfl = &imem->freelists[(size + obj_align_mask) >>
  730. log2_obj_align_mod];
  731. }
  732. /* keep track of highest object on a freelist */
  733. if ((byte *)pp >= imem->cc.int_freed_top)
  734. imem->cc.int_freed_top = (byte *)ptr + rounded_size;
  735. pp->o_type = &st_free; /* don't confuse GC */
  736. gs_alloc_fill(ptr, gs_alloc_fill_free, size);
  737. *(obj_header_t **) ptr = *pfl;
  738. *pfl = (obj_header_t *) ptr;
  739. alloc_trace((size > max_freelist_size ? ":-oF" : ":-of"),
  740. imem, cname, pstype, size, ptr);
  741. return;
  742. }
  743. /* Don't overwrite even if gs_alloc_debug is set. */
  744. } else {
  745. pp->o_type = &st_free; /* don't confuse GC */
  746. gs_alloc_fill(ptr, gs_alloc_fill_free, size);
  747. }
  748. alloc_trace(":-o#", imem, cname, pstype, size, ptr);
  749. imem->lost.objects += obj_size_round(size);
  750. }
  751. private byte *
  752. i_alloc_string(gs_memory_t * mem, uint nbytes, client_name_t cname)
  753. {
  754. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  755. byte *str;
  756. top:if (imem->cc.ctop - imem->cc.cbot > nbytes) {
  757. if_debug4('A', "[a%d:+> ]%s(%u) = 0x%lx\n",
  758. alloc_trace_space(imem), client_name_string(cname), nbytes,
  759. (ulong) (imem->cc.ctop - nbytes));
  760. str = imem->cc.ctop -= nbytes;
  761. gs_alloc_fill(str, gs_alloc_fill_alloc, nbytes);
  762. return str;
  763. }
  764. if (nbytes > string_space_quanta(max_uint - sizeof(chunk_head_t)) *
  765. string_data_quantum
  766. ) { /* Can't represent the size in a uint! */
  767. return 0;
  768. }
  769. if (nbytes >= imem->large_size) { /* Give it a chunk all its own. */
  770. return i_alloc_string_immovable(mem, nbytes, cname);
  771. } else { /* Add another chunk. */
  772. chunk_t *cp =
  773. alloc_acquire_chunk(imem, (ulong) imem->chunk_size, true, "chunk");
  774. if (cp == 0)
  775. return 0;
  776. alloc_close_chunk(imem);
  777. imem->pcc = cp;
  778. imem->cc = *imem->pcc;
  779. gs_alloc_fill(imem->cc.cbase, gs_alloc_fill_free,
  780. imem->cc.climit - imem->cc.cbase);
  781. goto top;
  782. }
  783. }
  784. private byte *
  785. i_alloc_string_immovable(gs_memory_t * mem, uint nbytes, client_name_t cname)
  786. {
  787. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  788. byte *str;
  789. /* Give it a chunk all its own. */
  790. uint asize = string_chunk_space(nbytes) + sizeof(chunk_head_t);
  791. chunk_t *cp = alloc_acquire_chunk(imem, (ulong) asize, true,
  792. "large string chunk");
  793. if (cp == 0)
  794. return 0;
  795. str = cp->ctop = cp->climit - nbytes;
  796. if_debug4('a', "[a%d|+>L]%s(%u) = 0x%lx\n",
  797. alloc_trace_space(imem), client_name_string(cname), nbytes,
  798. (ulong) str);
  799. gs_alloc_fill(str, gs_alloc_fill_alloc, nbytes);
  800. return str;
  801. }
  802. private byte *
  803. i_resize_string(gs_memory_t * mem, byte * data, uint old_num, uint new_num,
  804. client_name_t cname)
  805. {
  806. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  807. byte *ptr;
  808. if (old_num == new_num) /* same size returns the same string */
  809. return data;
  810. if (data == imem->cc.ctop && /* bottom-most string */
  811. (new_num < old_num ||
  812. imem->cc.ctop - imem->cc.cbot > new_num - old_num)
  813. ) { /* Resize in place. */
  814. ptr = data + old_num - new_num;
  815. if_debug6('A', "[a%d:%c> ]%s(%u->%u) 0x%lx\n",
  816. alloc_trace_space(imem),
  817. (new_num > old_num ? '>' : '<'),
  818. client_name_string(cname), old_num, new_num,
  819. (ulong) ptr);
  820. imem->cc.ctop = ptr;
  821. memmove(ptr, data, min(old_num, new_num));
  822. #ifdef DEBUG
  823. if (new_num > old_num)
  824. gs_alloc_fill(ptr + old_num, gs_alloc_fill_alloc,
  825. new_num - old_num);
  826. else
  827. gs_alloc_fill(data, gs_alloc_fill_free, old_num - new_num);
  828. #endif
  829. } else
  830. if (new_num < old_num) {
  831. /* trim the string and create a free space hole */
  832. ptr = data;
  833. imem->lost.strings += old_num - new_num;
  834. gs_alloc_fill(data + new_num, gs_alloc_fill_free,
  835. old_num - new_num);
  836. if_debug5('A', "[a%d:<> ]%s(%u->%u) 0x%lx\n",
  837. alloc_trace_space(imem), client_name_string(cname),
  838. old_num, new_num, (ulong)ptr);
  839. } else { /* Punt. */
  840. ptr = gs_alloc_string(mem, new_num, cname);
  841. if (ptr == 0)
  842. return 0;
  843. memcpy(ptr, data, min(old_num, new_num));
  844. gs_free_string(mem, data, old_num, cname);
  845. }
  846. return ptr;
  847. }
  848. private void
  849. i_free_string(gs_memory_t * mem, byte * data, uint nbytes,
  850. client_name_t cname)
  851. {
  852. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  853. if (data == imem->cc.ctop) {
  854. if_debug4('A', "[a%d:-> ]%s(%u) 0x%lx\n",
  855. alloc_trace_space(imem), client_name_string(cname), nbytes,
  856. (ulong) data);
  857. imem->cc.ctop += nbytes;
  858. } else {
  859. if_debug4('A', "[a%d:->#]%s(%u) 0x%lx\n",
  860. alloc_trace_space(imem), client_name_string(cname), nbytes,
  861. (ulong) data);
  862. imem->lost.strings += nbytes;
  863. }
  864. gs_alloc_fill(data, gs_alloc_fill_free, nbytes);
  865. }
  866. private gs_memory_t *
  867. i_stable(gs_memory_t *mem)
  868. {
  869. return mem->stable_memory;
  870. }
  871. private void
  872. i_status(gs_memory_t * mem, gs_memory_status_t * pstat)
  873. {
  874. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  875. ulong unused = imem->lost.refs + imem->lost.strings;
  876. ulong inner = 0;
  877. alloc_close_chunk(imem);
  878. /* Add up unallocated space within each chunk. */
  879. /* Also keep track of space allocated to inner chunks, */
  880. /* which are included in previous_status.allocated. */
  881. {
  882. const chunk_t *cp = imem->cfirst;
  883. while (cp != 0) {
  884. unused += cp->ctop - cp->cbot;
  885. if (cp->outer)
  886. inner += cp->cend - (byte *) cp->chead;
  887. cp = cp->cnext;
  888. }
  889. }
  890. unused += compute_free_objects(imem);
  891. pstat->used = imem->allocated + inner - unused +
  892. imem->previous_status.used;
  893. pstat->allocated = imem->allocated +
  894. imem->previous_status.allocated;
  895. }
  896. private void
  897. i_enable_free(gs_memory_t * mem, bool enable)
  898. {
  899. if (enable)
  900. mem->procs.free_object = i_free_object,
  901. mem->procs.free_string = i_free_string;
  902. else
  903. mem->procs.free_object = gs_ignore_free_object,
  904. mem->procs.free_string = gs_ignore_free_string;
  905. }
  906. /* ------ Internal procedures ------ */
  907. /* Compute the amount of free object space by scanning free lists. */
  908. private ulong
  909. compute_free_objects(gs_ref_memory_t * mem)
  910. {
  911. ulong unused = mem->lost.objects;
  912. int i;
  913. /* Add up space on free lists. */
  914. for (i = 0; i < num_freelists; i++) {
  915. const obj_header_t *pfree;
  916. for (pfree = mem->freelists[i]; pfree != 0;
  917. pfree = *(const obj_header_t * const *)pfree
  918. )
  919. unused += obj_align_round(pfree[-1].o_size);
  920. }
  921. return unused;
  922. }
  923. /* Allocate an object from the large-block freelist. */
  924. private obj_header_t * /* rets obj if allocated, else 0 */
  925. large_freelist_alloc(gs_ref_memory_t *mem, uint size)
  926. {
  927. /* Scan large object freelist. We'll grab an object up to 1/8 bigger */
  928. /* right away, else use best fit of entire scan. */
  929. uint aligned_size = obj_align_round(size);
  930. uint aligned_min_size = aligned_size + sizeof(obj_header_t);
  931. uint aligned_max_size =
  932. aligned_min_size + obj_align_round(aligned_min_size / 8);
  933. obj_header_t *best_fit = 0;
  934. obj_header_t **best_fit_prev;
  935. uint best_fit_size = max_uint;
  936. obj_header_t *pfree;
  937. obj_header_t **ppfprev = &mem->freelists[LARGE_FREELIST_INDEX];
  938. uint largest_size = 0;
  939. if (aligned_size > mem->largest_free_size)
  940. return 0; /* definitely no block large enough */
  941. while ((pfree = *ppfprev) != 0) {
  942. uint free_size = obj_align_round(pfree[-1].o_size);
  943. if (free_size == aligned_size ||
  944. (free_size >= aligned_min_size && free_size < best_fit_size)
  945. ) {
  946. best_fit = pfree;
  947. best_fit_prev = ppfprev;
  948. best_fit_size = pfree[-1].o_size;
  949. if (best_fit_size <= aligned_max_size)
  950. break; /* good enough fit to spare scan of entire list */
  951. }
  952. ppfprev = (obj_header_t **) pfree;
  953. if (free_size > largest_size)
  954. largest_size = free_size;
  955. }
  956. if (best_fit == 0) {
  957. /*
  958. * No single free chunk is large enough, but since we scanned the
  959. * entire list, we now have an accurate updated value for
  960. * largest_free_size.
  961. */
  962. mem->largest_free_size = largest_size;
  963. return 0;
  964. }
  965. /* Remove from freelist & return excess memory to free */
  966. *best_fit_prev = *(obj_header_t **)best_fit;
  967. trim_obj(mem, best_fit, aligned_size, (chunk_t *)0);
  968. /* Pre-init block header; o_alone & o_type are already init'd */
  969. best_fit[-1].o_size = size;
  970. return best_fit;
  971. }
  972. /* Allocate an object. This handles all but the fastest, simplest case. */
  973. private obj_header_t *
  974. alloc_obj(gs_ref_memory_t *mem, ulong lsize, gs_memory_type_ptr_t pstype,
  975. alloc_flags_t flags, client_name_t cname)
  976. {
  977. obj_header_t *ptr;
  978. if (lsize >= mem->large_size || (flags & ALLOC_IMMOVABLE)) {
  979. /*
  980. * Give the object a chunk all its own. Note that this case does
  981. * not occur if is_controlled is true.
  982. */
  983. ulong asize =
  984. ((lsize + obj_align_mask) & -obj_align_mod) +
  985. sizeof(obj_header_t);
  986. chunk_t *cp =
  987. alloc_acquire_chunk(mem, asize + sizeof(chunk_head_t), false,
  988. "large object chunk");
  989. if (
  990. #if arch_sizeof_long > arch_sizeof_int
  991. asize > max_uint
  992. #else
  993. asize < lsize
  994. #endif
  995. )
  996. return 0;
  997. if (cp == 0)
  998. return 0;
  999. ptr = (obj_header_t *) cp->cbot;
  1000. cp->cbot += asize;
  1001. ptr->o_alone = 1;
  1002. ptr->o_size = lsize;
  1003. } else if (lsize > max_freelist_size && (flags & ALLOC_DIRECT) &&
  1004. (ptr = large_freelist_alloc(mem, lsize)) != 0) {
  1005. /* We hadn't checked the large block freelist yet. */
  1006. --ptr; /* must point to header */
  1007. goto done;
  1008. } else {
  1009. uint asize = obj_size_round((uint) lsize);
  1010. bool consolidate = mem->is_controlled;
  1011. bool allocate_success = true;
  1012. while (mem->cc.ctop -
  1013. (byte *) (ptr = (obj_header_t *) mem->cc.cbot)
  1014. <= asize + sizeof(obj_header_t)) {
  1015. if (consolidate) {
  1016. /* Try consolidating free space. */
  1017. gs_consolidate_free((gs_memory_t *)mem);
  1018. consolidate = false;
  1019. continue;
  1020. } else {
  1021. /* Add another chunk. */
  1022. chunk_t *cp =
  1023. alloc_add_chunk(mem, (ulong)mem->chunk_size, "chunk");
  1024. if (cp == 0) {
  1025. allocate_success = false;
  1026. break;
  1027. }
  1028. }
  1029. }
  1030. /*
  1031. * If no success, try to scavenge from low free memory. This is only
  1032. * enabled for controlled memory (currently only async renderer)
  1033. * because it's too much work to prevent it from examining outer
  1034. * save levels in the general case.
  1035. */
  1036. if (allocate_success)
  1037. mem->cc.cbot = (byte *) ptr + asize;
  1038. else if (!mem->is_controlled ||
  1039. (ptr = scavenge_low_free(mem, (uint)lsize)) == 0)
  1040. return 0; /* allocation failed */
  1041. ptr->o_alone = 0;
  1042. ptr->o_size = (uint) lsize;
  1043. }
  1044. done:
  1045. ptr->o_type = pstype;
  1046. ptr++;
  1047. gs_alloc_fill(ptr, gs_alloc_fill_alloc, lsize);
  1048. return ptr;
  1049. }
  1050. /*
  1051. * Consolidate free objects contiguous to free space at cbot onto the cbot
  1052. * area. Also keep track of end of highest internal free object
  1053. * (int_freed_top).
  1054. */
  1055. private void
  1056. consolidate_chunk_free(chunk_t *cp, gs_ref_memory_t *mem)
  1057. {
  1058. obj_header_t *begin_free = 0;
  1059. cp->int_freed_top = cp->cbase; /* below all objects in chunk */
  1060. SCAN_CHUNK_OBJECTS(cp)
  1061. DO_ALL
  1062. if (pre->o_type == &st_free) {
  1063. if (begin_free == 0)
  1064. begin_free = pre;
  1065. } else {
  1066. if (begin_free)
  1067. cp->int_freed_top = (byte *)pre; /* first byte following internal free */
  1068. begin_free = 0;
  1069. }
  1070. END_OBJECTS_SCAN
  1071. if (begin_free) {
  1072. /* We found free objects at the top of the object area. */
  1073. /* Remove the free objects from the freelists. */
  1074. remove_range_from_freelist(mem, begin_free, cp->cbot);
  1075. if_debug4('a', "[a]resetting chunk 0x%lx cbot from 0x%lx to 0x%lx (%lu free)\n",
  1076. (ulong) cp, (ulong) cp->cbot, (ulong) begin_free,
  1077. (ulong) ((byte *) cp->cbot - (byte *) begin_free));
  1078. cp->cbot = (byte *) begin_free;
  1079. }
  1080. }
  1081. /* Consolidate free objects. */
  1082. void
  1083. ialloc_consolidate_free(gs_ref_memory_t *mem)
  1084. {
  1085. chunk_t *cp;
  1086. chunk_t *cprev;
  1087. alloc_close_chunk(mem);
  1088. /* Visit chunks in reverse order to encourage LIFO behavior. */
  1089. for (cp = mem->clast; cp != 0; cp = cprev) {
  1090. cprev = cp->cprev;
  1091. consolidate_chunk_free(cp, mem);
  1092. if (cp->cbot == cp->cbase && cp->ctop == cp->climit) {
  1093. /* The entire chunk is free. */
  1094. chunk_t *cnext = cp->cnext;
  1095. if (!mem->is_controlled) {
  1096. alloc_free_chunk(cp, mem);
  1097. if (mem->pcc == cp)
  1098. mem->pcc =
  1099. (cnext == 0 ? cprev : cprev == 0 ? cnext :
  1100. cprev->cbot - cprev->ctop >
  1101. cnext->cbot - cnext->ctop ? cprev :
  1102. cnext);
  1103. }
  1104. }
  1105. }
  1106. alloc_open_chunk(mem);
  1107. }
  1108. private void
  1109. i_consolidate_free(gs_memory_t *mem)
  1110. {
  1111. ialloc_consolidate_free((gs_ref_memory_t *)mem);
  1112. }
  1113. /* try to free-up given amount of space from freespace below chunk base */
  1114. private obj_header_t * /* returns uninitialized object hdr, NULL if none found */
  1115. scavenge_low_free(gs_ref_memory_t *mem, unsigned request_size)
  1116. {
  1117. /* find 1st range of memory that can be glued back together to fill request */
  1118. obj_header_t *found_pre = 0;
  1119. /* Visit chunks in forward order */
  1120. obj_header_t *begin_free = 0;
  1121. uint found_free;
  1122. uint request_size_rounded = obj_size_round(request_size);
  1123. uint need_free = request_size_rounded + sizeof(obj_header_t); /* room for GC's dummy hdr */
  1124. chunk_t *cp;
  1125. for (cp = mem->cfirst; cp != 0; cp = cp->cnext) {
  1126. begin_free = 0;
  1127. found_free = 0;
  1128. SCAN_CHUNK_OBJECTS(cp)
  1129. DO_ALL
  1130. if (pre->o_type == &st_free) {
  1131. if (begin_free == 0) {
  1132. found_free = 0;
  1133. begin_free = pre;
  1134. }
  1135. found_free += pre_obj_rounded_size(pre);
  1136. if (begin_free != 0 && found_free >= need_free)
  1137. break;
  1138. } else
  1139. begin_free = 0;
  1140. END_OBJECTS_SCAN_INCOMPLETE
  1141. /* Found sufficient range of empty memory */
  1142. if (begin_free != 0 && found_free >= need_free) {
  1143. /* Fish found pieces out of various freelists */
  1144. remove_range_from_freelist(mem, (char*)begin_free,
  1145. (char*)begin_free + found_free);
  1146. /* Prepare found object */
  1147. found_pre = begin_free;
  1148. found_pre->o_type = &st_free; /* don't confuse GC if gets lost */
  1149. found_pre->o_size = found_free - sizeof(obj_header_t);
  1150. /* Chop off excess tail piece & toss it back into free pool */
  1151. trim_obj(mem, found_pre + 1, request_size, cp);
  1152. }
  1153. }
  1154. return found_pre;
  1155. }
  1156. /* Remove range of memory from a mem's freelists */
  1157. private void
  1158. remove_range_from_freelist(gs_ref_memory_t *mem, void* bottom, void* top)
  1159. {
  1160. int num_free[num_freelists];
  1161. int smallest = num_freelists, largest = -1;
  1162. const obj_header_t *cur;
  1163. uint size;
  1164. int i;
  1165. uint removed = 0;
  1166. /*
  1167. * Scan from bottom to top, a range containing only free objects,
  1168. * counting the number of objects of each size.
  1169. */
  1170. for (cur = bottom; cur != top;
  1171. cur = (const obj_header_t *)
  1172. ((const byte *)cur + obj_size_round(size))
  1173. ) {
  1174. size = cur->o_size;
  1175. i = (size > max_freelist_size ? LARGE_FREELIST_INDEX :
  1176. (size + obj_align_mask) >> log2_obj_align_mod);
  1177. if (i < smallest) {
  1178. /*
  1179. * 0-length free blocks aren't kept on any list, because
  1180. * they don't have room for a pointer.
  1181. */
  1182. if (i == 0)
  1183. continue;
  1184. if (smallest < num_freelists)
  1185. memset(&num_free[i], 0, (smallest - i) * sizeof(int));
  1186. else
  1187. num_free[i] = 0;
  1188. smallest = i;
  1189. }
  1190. if (i > largest) {
  1191. if (largest >= 0)
  1192. memset(&num_free[largest + 1], 0, (i - largest) * sizeof(int));
  1193. largest = i;
  1194. }
  1195. num_free[i]++;
  1196. }
  1197. /*
  1198. * Remove free objects from the freelists, adjusting lost.objects by
  1199. * subtracting the size of the region being processed minus the amount
  1200. * of space reclaimed.
  1201. */
  1202. for (i = smallest; i <= largest; i++) {
  1203. int count = num_free[i];
  1204. obj_header_t *pfree;
  1205. obj_header_t **ppfprev;
  1206. if (!count)
  1207. continue;
  1208. ppfprev = &mem->freelists[i];
  1209. for (;;) {
  1210. pfree = *ppfprev;
  1211. if (PTR_GE(pfree, bottom) && PTR_LT(pfree, top)) {
  1212. /* We're removing an object. */
  1213. *ppfprev = *(obj_header_t **) pfree;
  1214. removed += obj_align_round(pfree[-1].o_size);
  1215. if (!--count)
  1216. break;
  1217. } else
  1218. ppfprev = (obj_header_t **) pfree;
  1219. }
  1220. }
  1221. mem->lost.objects -= (char*)top - (char*)bottom - removed;
  1222. }
  1223. /* Trim a memory object down to a given size */
  1224. private void
  1225. trim_obj(gs_ref_memory_t *mem, obj_header_t *obj, uint size, chunk_t *cp)
  1226. /* Obj must have rounded size == req'd size, or have enough room for */
  1227. /* trailing dummy obj_header */
  1228. {
  1229. uint rounded_size = obj_align_round(size);
  1230. obj_header_t *pre_obj = obj - 1;
  1231. obj_header_t *excess_pre = (obj_header_t*)((char*)obj + rounded_size);
  1232. uint old_rounded_size = obj_align_round(pre_obj->o_size);
  1233. uint excess_size = old_rounded_size - rounded_size - sizeof(obj_header_t);
  1234. /* trim object's size to desired */
  1235. pre_obj->o_size = size;
  1236. if (old_rounded_size == rounded_size)
  1237. return; /* nothing more to do here */
  1238. /*
  1239. * If the object is alone in its chunk, move cbot to point to the end
  1240. * of the object.
  1241. */
  1242. if (pre_obj->o_alone) {
  1243. if (!cp) {
  1244. mem->cfreed.memory = mem;
  1245. if (chunk_locate(obj, &mem->cfreed)) {
  1246. cp = mem->cfreed.cp;
  1247. }
  1248. }
  1249. if (cp) {
  1250. #ifdef DEBUG
  1251. if (cp->cbot != (byte *)obj + old_rounded_size) {
  1252. lprintf3("resizing 0x%lx, old size %u, new size %u, cbot wrong!\n",
  1253. (ulong)obj, old_rounded_size, size);
  1254. /* gs_abort */
  1255. } else
  1256. #endif
  1257. {
  1258. cp->cbot = (byte *)excess_pre;
  1259. return;
  1260. }
  1261. }
  1262. /*
  1263. * Something very weird is going on. This probably shouldn't
  1264. * ever happen, but if it does....
  1265. */
  1266. pre_obj->o_alone = 0;
  1267. }
  1268. /* make excess into free obj */
  1269. excess_pre->o_type = &st_free; /* don't confuse GC */
  1270. excess_pre->o_size = excess_size;
  1271. excess_pre->o_alone = 0;
  1272. if (excess_size >= obj_align_mod) {
  1273. /* Put excess object on a freelist */
  1274. obj_header_t **pfl;
  1275. if ((byte *)excess_pre >= mem->cc.int_freed_top)
  1276. mem->cc.int_freed_top = (byte *)excess_pre + excess_size;
  1277. if (excess_size <= max_freelist_size)
  1278. pfl = &mem->freelists[(excess_size + obj_align_mask) >>
  1279. log2_obj_align_mod];
  1280. else {
  1281. uint rounded_size = obj_align_round(excess_size);
  1282. pfl = &mem->freelists[LARGE_FREELIST_INDEX];
  1283. if (rounded_size > mem->largest_free_size)
  1284. mem->largest_free_size = rounded_size;
  1285. }
  1286. *(obj_header_t **) (excess_pre + 1) = *pfl;
  1287. *pfl = excess_pre + 1;
  1288. mem->cfreed.memory = mem;
  1289. } else {
  1290. /* excess piece will be "lost" memory */
  1291. mem->lost.objects += excess_size + sizeof(obj_header_t);
  1292. }
  1293. }
  1294. /* ================ Roots ================ */
  1295. /* Register a root. */
  1296. private int
  1297. i_register_root(gs_memory_t * mem, gs_gc_root_t * rp, gs_ptr_type_t ptype,
  1298. void **up, client_name_t cname)
  1299. {
  1300. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  1301. if (rp == NULL) {
  1302. rp = gs_raw_alloc_struct_immovable(imem->parent, &st_gc_root_t,
  1303. "i_register_root");
  1304. if (rp == 0)
  1305. return_error(gs_error_VMerror);
  1306. rp->free_on_unregister = true;
  1307. } else
  1308. rp->free_on_unregister = false;
  1309. if_debug3('8', "[8]register root(%s) 0x%lx -> 0x%lx\n",
  1310. client_name_string(cname), (ulong)rp, (ulong)up);
  1311. rp->ptype = ptype;
  1312. rp->p = up;
  1313. rp->next = imem->roots;
  1314. imem->roots = rp;
  1315. return 0;
  1316. }
  1317. /* Unregister a root. */
  1318. private void
  1319. i_unregister_root(gs_memory_t * mem, gs_gc_root_t * rp, client_name_t cname)
  1320. {
  1321. gs_ref_memory_t * const imem = (gs_ref_memory_t *)mem;
  1322. gs_gc_root_t **rpp = &imem->roots;
  1323. if_debug2('8', "[8]unregister root(%s) 0x%lx\n",
  1324. client_name_string(cname), (ulong) rp);
  1325. while (*rpp != rp)
  1326. rpp = &(*rpp)->next;
  1327. *rpp = (*rpp)->next;
  1328. if (rp->free_on_unregister)
  1329. gs_free_object(imem->parent, rp, "i_unregister_root");
  1330. }
  1331. /* ================ Chunks ================ */
  1332. public_st_chunk();
  1333. /* Insert a chunk in the chain. This is exported for the GC and for */
  1334. /* the forget_save operation. */
  1335. void
  1336. alloc_link_chunk(chunk_t * cp, gs_ref_memory_t * imem)
  1337. {
  1338. byte *cdata = cp->cbase;
  1339. chunk_t *icp;
  1340. chunk_t *prev;
  1341. /*
  1342. * Allocators tend to allocate in either ascending or descending
  1343. * address order. The loop will handle the latter well; check for
  1344. * the former first.
  1345. */
  1346. if (imem->clast && PTR_GE(cdata, imem->clast->ctop))
  1347. icp = 0;
  1348. else
  1349. for (icp = imem->cfirst; icp != 0 && PTR_GE(cdata, icp->ctop);
  1350. icp = icp->cnext
  1351. );
  1352. cp->cnext = icp;
  1353. if (icp == 0) { /* add at end of chain */
  1354. prev = imem->clast;
  1355. imem->clast = cp;
  1356. } else { /* insert before icp */
  1357. prev = icp->cprev;
  1358. icp->cprev = cp;
  1359. }
  1360. cp->cprev = prev;
  1361. if (prev == 0)
  1362. imem->cfirst = cp;
  1363. else
  1364. prev->cnext = cp;
  1365. if (imem->pcc != 0) {
  1366. imem->cc.cnext = imem->pcc->cnext;
  1367. imem->cc.cprev = imem->pcc->cprev;
  1368. }
  1369. }
  1370. /* Add a chunk for ordinary allocation. */
  1371. private chunk_t *
  1372. alloc_add_chunk(gs_ref_memory_t * mem, ulong csize, client_name_t cname)
  1373. {
  1374. chunk_t *cp = alloc_acquire_chunk(mem, csize, true, cname);
  1375. if (cp) {
  1376. alloc_close_chunk(mem);
  1377. mem->pcc = cp;
  1378. mem->cc = *mem->pcc;
  1379. gs_alloc_fill(mem->cc.cbase, gs_alloc_fill_free,
  1380. mem->cc.climit - mem->cc.cbase);
  1381. }
  1382. return cp;
  1383. }
  1384. /* Acquire a chunk. If we would exceed MaxLocalVM (if relevant), */
  1385. /* or if we would exceed the VMThreshold and psignal is NULL, */
  1386. /* return 0; if we would exceed the VMThreshold but psignal is valid, */
  1387. /* just set the signal and return successfully. */
  1388. private chunk_t *
  1389. alloc_acquire_chunk(gs_ref_memory_t * mem, ulong csize, bool has_strings,
  1390. client_name_t cname)
  1391. {
  1392. gs_raw_memory_t *parent = mem->parent;
  1393. chunk_t *cp;
  1394. byte *cdata;
  1395. #if arch_sizeof_long > arch_sizeof_int
  1396. /* If csize is larger than max_uint, punt. */
  1397. if (csize != (uint) csize)
  1398. return 0;
  1399. #endif
  1400. cp = gs_raw_alloc_struct_immovable(parent, &st_chunk, cname);
  1401. if ((ulong) (mem->allocated + mem->inherited) >= mem->limit) {
  1402. mem->gc_status.requested += csize;
  1403. if (mem->limit >= mem->gc_status.max_vm ||
  1404. mem->gc_status.psignal == 0
  1405. )
  1406. return 0;
  1407. if_debug4('0', "[0]signaling space=%d, allocated=%ld, limit=%ld, requested=%ld\n",
  1408. mem->space, (long)mem->allocated,
  1409. (long)mem->limit, (long)mem->gc_status.requested);
  1410. *mem->gc_status.psignal = mem->gc_status.signal_value;
  1411. }
  1412. cdata = gs_alloc_bytes_immovable(parent, csize, cname);
  1413. if (cp == 0 || cdata == 0) {
  1414. gs_free_object(parent, cdata, cname);
  1415. gs_free_object(parent, cp, cname);
  1416. mem->gc_status.requested = csize;
  1417. return 0;
  1418. }
  1419. alloc_init_chunk(cp, cdata, cdata + csize, has_strings, (chunk_t *) 0);
  1420. alloc_link_chunk(cp, mem);
  1421. mem->allocated += st_chunk.ssize + csize;
  1422. return cp;
  1423. }
  1424. /* Initialize the pointers in a chunk. This is exported for save/restore. */
  1425. /* The bottom pointer must be aligned, but the top pointer need not */
  1426. /* be aligned. */
  1427. void
  1428. alloc_init_chunk(chunk_t * cp, byte * bot, byte * top, bool has_strings,
  1429. chunk_t * outer)
  1430. {
  1431. byte *cdata = bot;
  1432. if (outer != 0)
  1433. outer->inner_count++;
  1434. cp->chead = (chunk_head_t *) cdata;
  1435. cdata += sizeof(chunk_head_t);
  1436. cp->cbot = cp->cbase = cp->int_freed_top = cdata;
  1437. cp->cend = top;
  1438. cp->rcur = 0;
  1439. cp->rtop = 0;
  1440. cp->outer = outer;
  1441. cp->inner_count = 0;
  1442. cp->has_refs = false;
  1443. cp->sbase = cdata;
  1444. if (has_strings && top - cdata >= string_space_quantum + sizeof(long) - 1) {
  1445. /*
  1446. * We allocate a large enough string marking and reloc table
  1447. * to cover the entire chunk.
  1448. */
  1449. uint nquanta = string_space_quanta(top - cdata);
  1450. cp->climit = cdata + nquanta * string_data_quantum;
  1451. cp->smark = cp->climit;
  1452. cp->smark_size = string_quanta_mark_size(nquanta);
  1453. cp->sreloc =
  1454. (string_reloc_offset *) (cp->smark + cp->smark_size);
  1455. cp->sfree1 = (uint *) cp->sreloc;
  1456. } else {
  1457. /* No strings, don't need the string GC tables. */
  1458. cp->climit = cp->cend;
  1459. cp->sfree1 = 0;
  1460. cp->smark = 0;
  1461. cp->smark_size = 0;
  1462. cp->sreloc = 0;
  1463. }
  1464. cp->ctop = cp->climit;
  1465. alloc_init_free_strings(cp);
  1466. }
  1467. /* Initialize the string freelists in a chunk. */
  1468. void
  1469. alloc_init_free_strings(chunk_t * cp)
  1470. {
  1471. if (cp->sfree1)
  1472. memset(cp->sfree1, 0, STRING_FREELIST_SPACE(cp));
  1473. cp->sfree = 0;
  1474. }
  1475. /* Close up the current chunk. */
  1476. /* This is exported for save/restore and the GC. */
  1477. void
  1478. alloc_close_chunk(gs_ref_memory_t * mem)
  1479. {
  1480. if (mem->pcc != 0) {
  1481. *mem->pcc = mem->cc;
  1482. #ifdef DEBUG
  1483. if (gs_debug_c('a')) {
  1484. dlprintf1("[a%d]", alloc_trace_space(mem));
  1485. dprintf_chunk("closing chunk", mem->pcc);
  1486. }
  1487. #endif
  1488. }
  1489. }
  1490. /* Reopen the current chunk after a GC or restore. */
  1491. void
  1492. alloc_open_chunk(gs_ref_memory_t * mem)
  1493. {
  1494. if (mem->pcc != 0) {
  1495. mem->cc = *mem->pcc;
  1496. #ifdef DEBUG
  1497. if (gs_debug_c('a')) {
  1498. dlprintf1("[a%d]", alloc_trace_space(mem));
  1499. dprintf_chunk("opening chunk", mem->pcc);
  1500. }
  1501. #endif
  1502. }
  1503. }
  1504. /* Remove a chunk from the chain. This is exported for the GC. */
  1505. void
  1506. alloc_unlink_chunk(chunk_t * cp, gs_ref_memory_t * mem)
  1507. {
  1508. #ifdef DEBUG
  1509. if (gs_alloc_debug) { /* Check to make sure this chunk belongs to this allocator. */
  1510. const chunk_t *ap = mem->cfirst;
  1511. while (ap != 0 && ap != cp)
  1512. ap = ap->cnext;
  1513. if (ap != cp) {
  1514. lprintf2("unlink_chunk 0x%lx not owned by memory 0x%lx!\n",
  1515. (ulong) cp, (ulong) mem);
  1516. return; /*gs_abort(); */
  1517. }
  1518. }
  1519. #endif
  1520. if (cp->cprev == 0)
  1521. mem->cfirst = cp->cnext;
  1522. else
  1523. cp->cprev->cnext = cp->cnext;
  1524. if (cp->cnext == 0)
  1525. mem->clast = cp->cprev;
  1526. else
  1527. cp->cnext->cprev = cp->cprev;
  1528. if (mem->pcc != 0) {
  1529. mem->cc.cnext = mem->pcc->cnext;
  1530. mem->cc.cprev = mem->pcc->cprev;
  1531. if (mem->pcc == cp) {
  1532. mem->pcc = 0;
  1533. mem->cc.cbot = mem->cc.ctop = 0;
  1534. }
  1535. }
  1536. }
  1537. /*
  1538. * Free a chunk. This is exported for the GC. Since we eventually use
  1539. * this to free the chunk containing the allocator itself, we must be
  1540. * careful not to reference anything in the allocator after freeing the
  1541. * chunk data.
  1542. */
  1543. void
  1544. alloc_free_chunk(chunk_t * cp, gs_ref_memory_t * mem)
  1545. {
  1546. gs_raw_memory_t *parent = mem->parent;
  1547. byte *cdata = (byte *)cp->chead;
  1548. ulong csize = (byte *)cp->cend - cdata;
  1549. alloc_unlink_chunk(cp, mem);
  1550. mem->allocated -= st_chunk.ssize;
  1551. if (mem->cfreed.cp == cp)
  1552. mem->cfreed.cp = 0;
  1553. if (cp->outer == 0) {
  1554. mem->allocated -= csize;
  1555. gs_free_object(parent, cdata, "alloc_free_chunk(data)");
  1556. } else {
  1557. cp->outer->inner_count--;
  1558. gs_alloc_fill(cdata, gs_alloc_fill_free, csize);
  1559. }
  1560. gs_free_object(parent, cp, "alloc_free_chunk(chunk struct)");
  1561. }
  1562. /* Find the chunk for a pointer. */
  1563. /* Note that this only searches the current save level. */
  1564. /* Since a given save level can't contain both a chunk and an inner chunk */
  1565. /* of that chunk, we can stop when is_within_chunk succeeds, and just test */
  1566. /* is_in_inner_chunk then. */
  1567. bool
  1568. chunk_locate_ptr(const void *ptr, chunk_locator_t * clp)
  1569. {
  1570. register chunk_t *cp = clp->cp;
  1571. if (cp == 0) {
  1572. cp = clp->memory->cfirst;
  1573. if (cp == 0)
  1574. return false;
  1575. /* ptr is in the last chunk often enough to be worth checking for. */
  1576. if (PTR_GE(ptr, clp->memory->clast->cbase))
  1577. cp = clp->memory->clast;
  1578. }
  1579. if (PTR_LT(ptr, cp->cbase)) {
  1580. do {
  1581. cp = cp->cprev;
  1582. if (cp == 0)
  1583. return false;
  1584. }
  1585. while (PTR_LT(ptr, cp->cbase));
  1586. if (PTR_GE(ptr, cp->cend))
  1587. return false;
  1588. } else {
  1589. while (PTR_GE(ptr, cp->cend)) {
  1590. cp = cp->cnext;
  1591. if (cp == 0)
  1592. return false;
  1593. }
  1594. if (PTR_LT(ptr, cp->cbase))
  1595. return false;
  1596. }
  1597. clp->cp = cp;
  1598. return !ptr_is_in_inner_chunk(ptr, cp);
  1599. }
  1600. /* ------ Debugging ------ */
  1601. #ifdef DEBUG
  1602. #include "string_.h"
  1603. inline private bool
  1604. obj_in_control_region(const void *obot, const void *otop,
  1605. const dump_control_t *pdc)
  1606. {
  1607. return
  1608. ((pdc->bottom == NULL || PTR_GT(otop, pdc->bottom)) &&
  1609. (pdc->top == NULL || PTR_LT(obot, pdc->top)));
  1610. }
  1611. const dump_control_t dump_control_default =
  1612. {
  1613. dump_do_default, NULL, NULL
  1614. };
  1615. const dump_control_t dump_control_all =
  1616. {
  1617. dump_do_strings | dump_do_type_addresses | dump_do_pointers |
  1618. dump_do_pointed_strings | dump_do_contents, NULL, NULL
  1619. };
  1620. /*
  1621. * Internal procedure to dump a block of memory, in hex and optionally
  1622. * also as characters.
  1623. */
  1624. private void
  1625. debug_indent(int indent)
  1626. {
  1627. int i;
  1628. for (i = indent; i > 0; --i)
  1629. dputc(' ');
  1630. }
  1631. private void
  1632. debug_dump_contents(const byte * bot, const byte * top, int indent,
  1633. bool as_chars)
  1634. {
  1635. const byte *block;
  1636. #define block_size 16
  1637. if (bot >= top)
  1638. return;
  1639. for (block = bot - ((bot - (byte *) 0) & (block_size - 1));
  1640. block < top; block += block_size
  1641. ) {
  1642. int i;
  1643. char label[12];
  1644. /* Check for repeated blocks. */
  1645. if (block >= bot + block_size &&
  1646. block <= top - (block_size * 2) &&
  1647. !memcmp(block, block - block_size, block_size) &&
  1648. !memcmp(block, block + block_size, block_size)
  1649. ) {
  1650. if (block < bot + block_size * 2 ||
  1651. memcmp(block, block - block_size * 2, block_size)
  1652. ) {
  1653. debug_indent(indent);
  1654. dputs(" ...\n");
  1655. }
  1656. continue;
  1657. }
  1658. sprintf(label, "0x%lx:", (ulong) block);
  1659. debug_indent(indent);
  1660. dputs(label);
  1661. for (i = 0; i < block_size; ++i) {
  1662. const char *sepr = ((i & 3) == 0 && i != 0 ? " " : " ");
  1663. dputs(sepr);
  1664. if (block + i >= bot && block + i < top)
  1665. dprintf1("%02x", block[i]);
  1666. else
  1667. dputs(" ");
  1668. }
  1669. dputc('\n');
  1670. if (as_chars) {
  1671. debug_indent(indent + strlen(label));
  1672. for (i = 0; i < block_size; ++i) {
  1673. byte ch;
  1674. if ((i & 3) == 0 && i != 0)
  1675. dputc(' ');
  1676. if (block + i >= bot && block + i < top &&
  1677. (ch = block[i]) >= 32 && ch <= 126
  1678. )
  1679. dprintf1(" %c", ch);
  1680. else
  1681. dputs(" ");
  1682. }
  1683. dputc('\n');
  1684. }
  1685. }
  1686. #undef block_size
  1687. }
  1688. /* Print one object with the given options. */
  1689. /* Relevant options: type_addresses, no_types, pointers, pointed_strings, */
  1690. /* contents. */
  1691. void
  1692. debug_print_object(const void *obj, const dump_control_t * control)
  1693. {
  1694. const obj_header_t *pre = ((const obj_header_t *)obj) - 1;
  1695. ulong size = pre_obj_contents_size(pre);
  1696. const gs_memory_struct_type_t *type = pre->o_type;
  1697. dump_options_t options = control->options;
  1698. dprintf3(" pre=0x%lx(obj=0x%lx) size=%lu", (ulong) pre, (ulong) obj,
  1699. size);
  1700. switch (options & (dump_do_type_addresses | dump_do_no_types)) {
  1701. case dump_do_type_addresses + dump_do_no_types: /* addresses only */
  1702. dprintf1(" type=0x%lx", (ulong) type);
  1703. break;
  1704. case dump_do_type_addresses: /* addresses & names */
  1705. dprintf2(" type=%s(0x%lx)", struct_type_name_string(type),
  1706. (ulong) type);
  1707. break;
  1708. case 0: /* names only */
  1709. dprintf1(" type=%s", struct_type_name_string(type));
  1710. case dump_do_no_types: /* nothing */
  1711. ;
  1712. }
  1713. if (options & dump_do_marks) {
  1714. dprintf2(" smark/back=%u (0x%x)", pre->o_smark, pre->o_smark);
  1715. }
  1716. dputc('\n');
  1717. if (type == &st_free)
  1718. return;
  1719. if (options & dump_do_pointers) {
  1720. struct_proc_enum_ptrs((*proc)) = type->enum_ptrs;
  1721. uint index = 0;
  1722. enum_ptr_t eptr;
  1723. gs_ptr_type_t ptype;
  1724. if (proc != gs_no_struct_enum_ptrs)
  1725. for (; (ptype = (*proc)(pre + 1, size, index, &eptr, type, NULL)) != 0;
  1726. ++index
  1727. ) {
  1728. const void *ptr = eptr.ptr;
  1729. dprintf1(" ptr %u: ", index);
  1730. if (ptype == ptr_string_type || ptype == ptr_const_string_type) {
  1731. const gs_const_string *str = (const gs_const_string *)ptr;
  1732. dprintf2("0x%lx(%u)", (ulong) str->data, str->size);
  1733. if (options & dump_do_pointed_strings) {
  1734. dputs(" =>\n");
  1735. debug_dump_contents(str->data, str->data + str->size, 6,
  1736. true);
  1737. } else {
  1738. dputc('\n');
  1739. }
  1740. } else {
  1741. dprintf1((PTR_BETWEEN(ptr, obj, (const byte *)obj + size) ?
  1742. "(0x%lx)\n" : "0x%lx\n"), (ulong) ptr);
  1743. }
  1744. }
  1745. }
  1746. if (options & dump_do_contents) {
  1747. debug_dump_contents((const byte *)obj, (const byte *)obj + size,
  1748. 0, false);
  1749. }
  1750. }
  1751. /* Print the contents of a chunk with the given options. */
  1752. /* Relevant options: all. */
  1753. void
  1754. debug_dump_chunk(const chunk_t * cp, const dump_control_t * control)
  1755. {
  1756. dprintf1("chunk at 0x%lx:\n", (ulong) cp);
  1757. dprintf3(" chead=0x%lx cbase=0x%lx sbase=0x%lx\n",
  1758. (ulong) cp->chead, (ulong) cp->cbase, (ulong) cp->sbase);
  1759. dprintf3(" rcur=0x%lx rtop=0x%lx cbot=0x%lx\n",
  1760. (ulong) cp->rcur, (ulong) cp->rtop, (ulong) cp->cbot);
  1761. dprintf4(" ctop=0x%lx climit=0x%lx smark=0x%lx, size=%u\n",
  1762. (ulong) cp->ctop, (ulong) cp->climit, (ulong) cp->smark,
  1763. cp->smark_size);
  1764. dprintf2(" sreloc=0x%lx cend=0x%lx\n",
  1765. (ulong) cp->sreloc, (ulong) cp->cend);
  1766. dprintf5("cprev=0x%lx cnext=0x%lx outer=0x%lx inner_count=%u has_refs=%s\n",
  1767. (ulong) cp->cprev, (ulong) cp->cnext, (ulong) cp->outer,
  1768. cp->inner_count, (cp->has_refs ? "true" : "false"));
  1769. dprintf2(" sfree1=0x%lx sfree=0x%x\n",
  1770. (ulong) cp->sfree1, cp->sfree);
  1771. if (control->options & dump_do_strings) {
  1772. debug_dump_contents((control->bottom == 0 ? cp->ctop :
  1773. max(control->bottom, cp->ctop)),
  1774. (control->top == 0 ? cp->climit :
  1775. min(control->top, cp->climit)),
  1776. 0, true);
  1777. }
  1778. SCAN_CHUNK_OBJECTS(cp)
  1779. DO_ALL
  1780. if (obj_in_control_region(pre + 1,
  1781. (const byte *)(pre + 1) + size,
  1782. control)
  1783. )
  1784. debug_print_object(pre + 1, control);
  1785. /* Temporarily redefine gs_exit so a chunk parsing error */
  1786. /* won't actually exit. */
  1787. #define gs_exit(n) DO_NOTHING
  1788. END_OBJECTS_SCAN
  1789. #undef gs_exit
  1790. }
  1791. void
  1792. debug_print_chunk(const chunk_t * cp)
  1793. {
  1794. dump_control_t control;
  1795. control = dump_control_default;
  1796. debug_dump_chunk(cp, &control);
  1797. }
  1798. /* Print the contents of all chunks managed by an allocator. */
  1799. /* Relevant options: all. */
  1800. void
  1801. debug_dump_memory(const gs_ref_memory_t * mem, const dump_control_t * control)
  1802. {
  1803. const chunk_t *mcp;
  1804. for (mcp = mem->cfirst; mcp != 0; mcp = mcp->cnext) {
  1805. const chunk_t *cp = (mcp == mem->pcc ? &mem->cc : mcp);
  1806. if (obj_in_control_region(cp->cbase, cp->cend, control))
  1807. debug_dump_chunk(cp, control);
  1808. }
  1809. }
  1810. /* Find all the objects that contain a given pointer. */
  1811. void
  1812. debug_find_pointers(const gs_ref_memory_t *mem, const void *target)
  1813. {
  1814. dump_control_t control;
  1815. const chunk_t *mcp;
  1816. control.options = 0;
  1817. for (mcp = mem->cfirst; mcp != 0; mcp = mcp->cnext) {
  1818. const chunk_t *cp = (mcp == mem->pcc ? &mem->cc : mcp);
  1819. SCAN_CHUNK_OBJECTS(cp);
  1820. DO_ALL
  1821. struct_proc_enum_ptrs((*proc)) = pre->o_type->enum_ptrs;
  1822. uint index = 0;
  1823. enum_ptr_t eptr;
  1824. if (proc) /* doesn't trace refs */
  1825. for (; (*proc)(pre + 1, size, index, &eptr, pre->o_type, NULL); ++index)
  1826. if (eptr.ptr == target) {
  1827. dprintf1("Index %d in", index);
  1828. debug_print_object(pre + 1, &control);
  1829. }
  1830. /* Temporarily redefine gs_exit so a chunk parsing error */
  1831. /* won't actually exit. */
  1832. #define gs_exit(n) DO_NOTHING
  1833. END_OBJECTS_SCAN
  1834. #undef gs_exit
  1835. }
  1836. }
  1837. #endif /* DEBUG */