pool.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479
  1. /*
  2. * This file is part of the UCB release of Plan 9. It is subject to the license
  3. * terms in the LICENSE file found in the top-level directory of this
  4. * distribution and at http://akaros.cs.berkeley.edu/files/Plan9License. No
  5. * part of the UCB release of Plan 9, including this file, may be copied,
  6. * modified, propagated, or distributed except according to the terms contained
  7. * in the LICENSE file.
  8. */
  9. /*
  10. * This allocator takes blocks from a coarser allocator (p->alloc) and
  11. * uses them as arenas.
  12. *
  13. * An arena is split into a sequence of blocks of variable size. The
  14. * blocks begin with a Bhdr that denotes the length (including the Bhdr)
  15. * of the block. An arena begins with an Arena header block (Arena,
  16. * ARENA_MAGIC) and ends with a Bhdr block with magic ARENATAIL_MAGIC and
  17. * size 0. Intermediate blocks are either allocated or free. At the end
  18. * of each intermediate block is a Btail, which contains information
  19. * about where the block starts. This is useful for walking backwards.
  20. *
  21. * Free blocks (Free*) have a magic value of FREE_MAGIC in their Bhdr
  22. * headers. They are kept in a binary tree (p->freeroot) traversible by
  23. * walking ->left and ->right. Each node of the binary tree is a pointer
  24. * to a circular doubly-linked list (next, prev) of blocks of identical
  25. * size. Blocks are added to this ``tree of lists'' by pooladd(), and
  26. * removed by pooldel().
  27. *
  28. * When freed, adjacent blocks are coalesced to create larger blocks when
  29. * possible.
  30. *
  31. * Allocated blocks (Alloc*) have one of two magic values: ALLOC_MAGIC or
  32. * UNALLOC_MAGIC. When blocks are released from the pool, they have
  33. * magic value UNALLOC_MAGIC. Once the block has been trimmed by trim()
  34. * and the amount of user-requested data has been recorded in the
  35. * datasize field of the tail, the magic value is changed to ALLOC_MAGIC.
  36. * All blocks returned to callers should be of type ALLOC_MAGIC, as
  37. * should all blocks passed to us by callers. The amount of data the user
  38. * asked us for can be found by subtracting the short in tail->datasize
  39. * from header->size. Further, the up to at most four bytes between the
  40. * end of the user-requested data block and the actual Btail structure are
  41. * marked with a magic value, which is checked to detect user overflow.
  42. *
  43. * The arenas returned by p->alloc are kept in a doubly-linked list
  44. * (p->arenalist) running through the arena headers, sorted by descending
  45. * base address (prev, next). When a new arena is allocated, we attempt
  46. * to merge it with its two neighbors via p->merge.
  47. */
  48. #include <u.h>
  49. #include <libc.h>
  50. #include <pool.h>
  51. typedef struct Alloc Alloc;
  52. typedef struct Arena Arena;
  53. typedef struct Bhdr Bhdr;
  54. typedef struct Btail Btail;
  55. typedef struct Free Free;
  56. struct Bhdr {
  57. uint32_t magic;
  58. uint32_t size;
  59. };
  60. enum {
  61. NOT_MAGIC = 0xdeadfa11,
  62. DEAD_MAGIC = 0xdeaddead,
  63. };
  64. #define B2NB(b) ((Bhdr*)((uint8_t*)(b)+(b)->size))
  65. #define SHORT(x) (((x)[0] << 8) | (x)[1])
  66. #define PSHORT(p, x) \
  67. (((uint8_t*)(p))[0] = ((x)>>8)&0xFF, \
  68. ((uint8_t*)(p))[1] = (x)&0xFF)
  69. enum {
  70. TAIL_MAGIC0 = 0xBE,
  71. TAIL_MAGIC1 = 0xEF
  72. };
  73. struct Btail {
  74. uint8_t magic0;
  75. uint8_t datasize[2];
  76. uint8_t magic1;
  77. uint32_t size; /* same as Bhdr->size */
  78. };
  79. #define B2T(b) ((Btail*)((uint8_t*)(b)+(b)->size-sizeof(Btail)))
  80. #define B2PT(b) ((Btail*)((uint8_t*)(b)-sizeof(Btail)))
  81. #define T2HDR(t) ((Bhdr*)((uint8_t*)(t)+sizeof(Btail)-(t)->size))
  82. struct Free {
  83. Bhdr;
  84. Free* left;
  85. Free* right;
  86. Free* next;
  87. Free* prev;
  88. };
  89. enum {
  90. FREE_MAGIC = 0xBA5EBA11,
  91. };
  92. /*
  93. * the point of the notused fields is to make 8c differentiate
  94. * between Bhdr and Allocblk, and between Kempt and Unkempt.
  95. */
  96. struct Alloc {
  97. Bhdr;
  98. };
  99. enum {
  100. ALLOC_MAGIC = 0x0A110C09,
  101. UNALLOC_MAGIC = 0xCAB00D1E + 1,
  102. };
  103. struct Arena {
  104. Bhdr;
  105. Arena* aup;
  106. Arena* down;
  107. uint32_t asize;
  108. uint32_t pad; /* to a multiple of 8 bytes */
  109. };
  110. enum {
  111. ARENA_MAGIC = 0xC0A1E5CE + 1,
  112. ARENATAIL_MAGIC = 0xEC5E1A0C + 1,
  113. };
  114. #define A2TB(a) ((Bhdr*)((uint8_t*)(a)+(a)->asize-sizeof(Bhdr)))
  115. #define A2B(a) B2NB(a)
  116. enum {
  117. ALIGN_MAGIC = 0xA1F1D1C1,
  118. };
  119. enum {
  120. MINBLOCKSIZE = sizeof(Free)+sizeof(Btail)
  121. };
  122. static uint8_t datamagic[] = { 0xFE, 0xF1, 0xF0, 0xFA };
  123. #define Poison (void*)0xCafeBabe
  124. #define _B2D(a) ((void*)((uint8_t*)a+sizeof(Bhdr)))
  125. #define _D2B(v) ((Alloc*)((uint8_t*)v-sizeof(Bhdr)))
  126. // static void* _B2D(void*);
  127. // static void* _D2B(void*);
  128. static void* B2D(Pool*, Alloc*);
  129. static Alloc* D2B(Pool*, void*);
  130. static Arena* arenamerge(Pool*, Arena*, Arena*);
  131. static void blockcheck(Pool*, Bhdr*);
  132. static Alloc* blockmerge(Pool*, Bhdr*, Bhdr*);
  133. static Alloc* blocksetdsize(Pool*, Alloc*, uint32_t);
  134. static Bhdr* blocksetsize(Bhdr*, uint32_t);
  135. static uint32_t bsize2asize(Pool*, uint32_t);
  136. static uint32_t dsize2bsize(Pool*, uint32_t);
  137. static uint32_t getdsize(Alloc*);
  138. static Alloc* trim(Pool*, Alloc*, uint32_t);
  139. static Free* listadd(Free*, Free*);
  140. static void logstack(Pool*);
  141. static Free** ltreewalk(Free**, uint32_t);
  142. static void memmark(void*, int, uint32_t);
  143. static Free* pooladd(Pool*, Alloc*);
  144. static void* poolallocl(Pool*, uint32_t);
  145. static void poolcheckl(Pool*);
  146. static void poolcheckarena(Pool*, Arena*);
  147. static int poolcompactl(Pool*);
  148. static Alloc* pooldel(Pool*, Free*);
  149. static void pooldumpl(Pool*);
  150. static void pooldumparena(Pool*, Arena*);
  151. static void poolfreel(Pool*, void*);
  152. static void poolnewarena(Pool*, uint32_t);
  153. static void* poolreallocl(Pool*, void*, uint32_t);
  154. static Free* treedelete(Free*, Free*);
  155. static Free* treeinsert(Free*, Free*);
  156. static Free* treelookup(Free*, uint32_t);
  157. static Free* treelookupgt(Free*, uint32_t);
  158. /*
  159. * Debugging
  160. *
  161. * Antagonism causes blocks to always be filled with garbage if their
  162. * contents are undefined. This tickles both programs and the library.
  163. * It's a linear time hit but not so noticeable during nondegenerate use.
  164. * It would be worth leaving in except that it negates the benefits of the
  165. * kernel's demand-paging. The tail magic and end-of-data magic
  166. * provide most of the user-visible benefit that antagonism does anyway.
  167. *
  168. * Paranoia causes the library to recheck the entire pool on each lock
  169. * or unlock. A failed check on unlock means we tripped over ourselves,
  170. * while a failed check on lock tends to implicate the user. Paranoia has
  171. * the potential to slow things down a fair amount for pools with large
  172. * numbers of allocated blocks. It completely negates all benefits won
  173. * by the binary tree. Turning on paranoia in the kernel makes it painfully
  174. * slow.
  175. *
  176. * Verbosity induces the dumping of the pool via p->print at each lock operation.
  177. * By default, only one line is logged for each alloc, free, and realloc.
  178. */
  179. /* the if(!x);else avoids ``dangling else'' problems */
  180. #define antagonism if(!(p->flags & POOL_ANTAGONISM)){}else
  181. #define paranoia if(!(p->flags & POOL_PARANOIA)){}else
  182. #define verbosity if(!(p->flags & POOL_VERBOSITY)){}else
  183. #define DPRINT if(!(p->flags & POOL_DEBUGGING)){}else p->print
  184. #define LOG if(!(p->flags & POOL_LOGGING)){}else p->print
  185. /*
  186. * Tree walking
  187. */
  188. static void
  189. checklist(Free *t)
  190. {
  191. Free *q;
  192. for(q=t->next; q!=t; q=q->next){
  193. assert(q->size == t->size);
  194. assert(q->next==nil || q->next->prev==q);
  195. assert(q->prev==nil || q->prev->next==q);
  196. // assert(q->left==nil);
  197. // assert(q->right==nil);
  198. assert(q->magic==FREE_MAGIC);
  199. }
  200. }
  201. static void
  202. checktree(Free *t, int a, int b)
  203. {
  204. assert(t->magic==FREE_MAGIC);
  205. assert(a < t->size && t->size < b);
  206. assert(t->next==nil || t->next->prev==t);
  207. assert(t->prev==nil || t->prev->next==t);
  208. checklist(t);
  209. if(t->left)
  210. checktree(t->left, a, t->size);
  211. if(t->right)
  212. checktree(t->right, t->size, b);
  213. }
  214. /* ltreewalk: return address of pointer to node of size == size */
  215. static Free**
  216. ltreewalk(Free **t, uint32_t size)
  217. {
  218. assert(t != nil /* ltreewalk */);
  219. for(;;) {
  220. if(*t == nil)
  221. return t;
  222. assert((*t)->magic == FREE_MAGIC);
  223. if(size == (*t)->size)
  224. return t;
  225. if(size < (*t)->size)
  226. t = &(*t)->left;
  227. else
  228. t = &(*t)->right;
  229. }
  230. }
  231. /* treelookup: find node in tree with size == size */
  232. static Free*
  233. treelookup(Free *t, uint32_t size)
  234. {
  235. return *ltreewalk(&t, size);
  236. }
  237. /* treeinsert: insert node into tree */
  238. static Free*
  239. treeinsert(Free *tree, Free *node)
  240. {
  241. Free **loc, *repl;
  242. assert(node != nil /* treeinsert */);
  243. loc = ltreewalk(&tree, node->size);
  244. if(*loc == nil) {
  245. node->left = nil;
  246. node->right = nil;
  247. } else { /* replace existing node */
  248. repl = *loc;
  249. node->left = repl->left;
  250. node->right = repl->right;
  251. }
  252. *loc = node;
  253. return tree;
  254. }
  255. /* treedelete: remove node from tree */
  256. static Free*
  257. treedelete(Free *tree, Free *node)
  258. {
  259. Free **loc, **lsucc, *succ;
  260. assert(node != nil /* treedelete */);
  261. loc = ltreewalk(&tree, node->size);
  262. assert(*loc == node);
  263. if(node->left == nil)
  264. *loc = node->right;
  265. else if(node->right == nil)
  266. *loc = node->left;
  267. else {
  268. /* have two children, use inorder successor as replacement */
  269. for(lsucc = &node->right; (*lsucc)->left; lsucc = &(*lsucc)->left)
  270. ;
  271. succ = *lsucc;
  272. *lsucc = succ->right;
  273. succ->left = node->left;
  274. succ->right = node->right;
  275. *loc = succ;
  276. }
  277. node->left = node->right = Poison;
  278. return tree;
  279. }
  280. /* treelookupgt: find smallest node in tree with size >= size */
  281. static Free*
  282. treelookupgt(Free *t, uint32_t size)
  283. {
  284. Free *lastgood; /* last node we saw that was big enough */
  285. lastgood = nil;
  286. for(;;) {
  287. if(t == nil)
  288. return lastgood;
  289. if(size == t->size)
  290. return t;
  291. if(size < t->size) {
  292. lastgood = t;
  293. t = t->left;
  294. } else
  295. t = t->right;
  296. }
  297. }
  298. /*
  299. * List maintenance
  300. */
  301. /* listadd: add a node to a doubly linked list */
  302. static Free*
  303. listadd(Free *list, Free *node)
  304. {
  305. if(list == nil) {
  306. node->next = node;
  307. node->prev = node;
  308. return node;
  309. }
  310. node->prev = list->prev;
  311. node->next = list;
  312. node->prev->next = node;
  313. node->next->prev = node;
  314. return list;
  315. }
  316. /* listdelete: remove node from a doubly linked list */
  317. static Free*
  318. listdelete(Pool *p, Free *list, Free *node)
  319. {
  320. if(node->next == node) { /* singular list */
  321. node->prev = node->next = Poison;
  322. return nil;
  323. }
  324. if(node->next == nil)
  325. p->panic(p, "pool->next");
  326. if(node->prev == nil)
  327. p->panic(p, "pool->prev");
  328. node->next->prev = node->prev;
  329. node->prev->next = node->next;
  330. if(list == node)
  331. list = node->next;
  332. node->prev = node->next = Poison;
  333. return list;
  334. }
  335. /*
  336. * Pool maintenance
  337. */
  338. /* pooladd: add anode to the free pool */
  339. static Free*
  340. pooladd(Pool *p, Alloc *anode)
  341. {
  342. Free *lst, *olst;
  343. Free *node;
  344. Free **parent;
  345. antagonism {
  346. memmark(_B2D(anode), 0xF7, anode->size-sizeof(Bhdr)-sizeof(Btail));
  347. }
  348. node = (Free*)anode;
  349. node->magic = FREE_MAGIC;
  350. parent = ltreewalk(&p->freeroot, node->size);
  351. olst = *parent;
  352. lst = listadd(olst, node);
  353. if(olst != lst) /* need to update tree */
  354. *parent = treeinsert(*parent, lst);
  355. p->curfree += node->size;
  356. return node;
  357. }
  358. /* pooldel: remove node from the free pool */
  359. static Alloc*
  360. pooldel(Pool *p, Free *node)
  361. {
  362. Free *lst, *olst;
  363. Free **parent;
  364. parent = ltreewalk(&p->freeroot, node->size);
  365. olst = *parent;
  366. assert(olst != nil /* pooldel */);
  367. lst = listdelete(p, olst, node);
  368. if(lst == nil)
  369. *parent = treedelete(*parent, olst);
  370. else if(lst != olst)
  371. *parent = treeinsert(*parent, lst);
  372. node->left = node->right = Poison;
  373. p->curfree -= node->size;
  374. antagonism {
  375. memmark(_B2D(node), 0xF9, node->size-sizeof(Bhdr)-sizeof(Btail));
  376. }
  377. node->magic = UNALLOC_MAGIC;
  378. return (Alloc*)node;
  379. }
  380. /*
  381. * Block maintenance
  382. */
  383. /* block allocation */
  384. static uint32_t
  385. dsize2bsize(Pool *p, uint32_t sz)
  386. {
  387. sz += sizeof(Bhdr)+sizeof(Btail);
  388. if(sz < p->minblock)
  389. sz = p->minblock;
  390. if(sz < MINBLOCKSIZE)
  391. sz = MINBLOCKSIZE;
  392. sz = (sz+p->quantum-1)&~(p->quantum-1);
  393. return sz;
  394. }
  395. static uint32_t
  396. bsize2asize(Pool *p, uint32_t sz)
  397. {
  398. sz += sizeof(Arena)+sizeof(Btail);
  399. if(sz < p->minarena)
  400. sz = p->minarena;
  401. sz = (sz+p->quantum)&~(p->quantum-1);
  402. return sz;
  403. }
  404. /* blockmerge: merge a and b, known to be adjacent */
  405. /* both are removed from pool if necessary. */
  406. static Alloc*
  407. blockmerge(Pool *pool, Bhdr *a, Bhdr *b)
  408. {
  409. Btail *t;
  410. assert(B2NB(a) == b);
  411. if(a->magic == FREE_MAGIC)
  412. pooldel(pool, (Free*)a);
  413. if(b->magic == FREE_MAGIC)
  414. pooldel(pool, (Free*)b);
  415. t = B2T(a);
  416. t->size = (uint32_t)Poison;
  417. t->magic0 = NOT_MAGIC;
  418. t->magic1 = NOT_MAGIC;
  419. PSHORT(t->datasize, NOT_MAGIC);
  420. a->size += b->size;
  421. t = B2T(a);
  422. t->size = a->size;
  423. PSHORT(t->datasize, 0xFFFF);
  424. b->size = NOT_MAGIC;
  425. b->magic = NOT_MAGIC;
  426. a->magic = UNALLOC_MAGIC;
  427. return (Alloc*)a;
  428. }
  429. /* blocksetsize: set the total size of a block, fixing tail pointers */
  430. static Bhdr*
  431. blocksetsize(Bhdr *b, uint32_t bsize)
  432. {
  433. Btail *t;
  434. assert(b->magic != FREE_MAGIC /* blocksetsize */);
  435. b->size = bsize;
  436. t = B2T(b);
  437. t->size = b->size;
  438. t->magic0 = TAIL_MAGIC0;
  439. t->magic1 = TAIL_MAGIC1;
  440. return b;
  441. }
  442. /* getdsize: return the requested data size for an allocated block */
  443. static uint32_t
  444. getdsize(Alloc *b)
  445. {
  446. Btail *t;
  447. t = B2T(b);
  448. return b->size - SHORT(t->datasize);
  449. }
  450. /* blocksetdsize: set the user data size of a block */
  451. static Alloc*
  452. blocksetdsize(Pool *p, Alloc *b, uint32_t dsize)
  453. {
  454. Btail *t;
  455. uint8_t *q, *eq;
  456. assert(b->size >= dsize2bsize(p, dsize));
  457. assert(b->size - dsize < 0x10000);
  458. t = B2T(b);
  459. PSHORT(t->datasize, b->size - dsize);
  460. q=(uint8_t*)_B2D(b)+dsize;
  461. eq = (uint8_t*)t;
  462. if(eq > q+4)
  463. eq = q+4;
  464. for(; q<eq; q++)
  465. *q = datamagic[((uint32_t)(uintptr)q)%nelem(datamagic)];
  466. return b;
  467. }
  468. /* trim: trim a block down to what is needed to hold dsize bytes of user data */
  469. static Alloc*
  470. trim(Pool *p, Alloc *b, uint32_t dsize)
  471. {
  472. uint32_t extra, bsize;
  473. Alloc *frag;
  474. bsize = dsize2bsize(p, dsize);
  475. extra = b->size - bsize;
  476. if(b->size - dsize >= 0x10000 ||
  477. (extra >= bsize>>2 && extra >= MINBLOCKSIZE && extra >= p->minblock)) {
  478. blocksetsize(b, bsize);
  479. frag = (Alloc*) B2NB(b);
  480. antagonism {
  481. memmark(frag, 0xF1, extra);
  482. }
  483. frag->magic = UNALLOC_MAGIC;
  484. blocksetsize(frag, extra);
  485. pooladd(p, frag);
  486. }
  487. b->magic = ALLOC_MAGIC;
  488. blocksetdsize(p, b, dsize);
  489. return b;
  490. }
  491. static Alloc*
  492. freefromfront(Pool *p, Alloc *b, uint32_t skip)
  493. {
  494. Alloc *bb;
  495. skip = skip&~(p->quantum-1);
  496. if(skip >= 0x1000 || (skip >= b->size>>2 && skip >= MINBLOCKSIZE && skip >= p->minblock)){
  497. bb = (Alloc*)((uint8_t*)b+skip);
  498. blocksetsize(bb, b->size-skip);
  499. bb->magic = UNALLOC_MAGIC;
  500. blocksetsize(b, skip);
  501. b->magic = UNALLOC_MAGIC;
  502. pooladd(p, b);
  503. return bb;
  504. }
  505. return b;
  506. }
  507. /*
  508. * Arena maintenance
  509. */
  510. /* arenasetsize: set arena size, updating tail */
  511. static void
  512. arenasetsize(Arena *a, uint32_t asize)
  513. {
  514. Bhdr *atail;
  515. a->asize = asize;
  516. atail = A2TB(a);
  517. atail->magic = ARENATAIL_MAGIC;
  518. atail->size = 0;
  519. }
  520. /* poolnewarena: allocate new arena */
  521. static void
  522. poolnewarena(Pool *p, uint32_t asize)
  523. {
  524. Arena *a;
  525. Arena *ap, *lastap;
  526. Alloc *b;
  527. LOG(p, "newarena %lud\n", asize);
  528. if(p->cursize+asize > p->maxsize) {
  529. if(poolcompactl(p) == 0){
  530. LOG(p, "pool too big: %lud+%lud > %lud\n",
  531. p->cursize, asize, p->maxsize);
  532. werrstr("memory pool too large");
  533. }
  534. return;
  535. }
  536. if((a = p->alloc(asize)) == nil) {
  537. /* assume errstr set by p->alloc */
  538. return;
  539. }
  540. p->cursize += asize;
  541. /* arena hdr */
  542. a->magic = ARENA_MAGIC;
  543. blocksetsize(a, sizeof(Arena));
  544. arenasetsize(a, asize);
  545. blockcheck(p, a);
  546. /* create one large block in arena */
  547. b = (Alloc*)A2B(a);
  548. b->magic = UNALLOC_MAGIC;
  549. blocksetsize(b, (uint8_t*)A2TB(a)-(uint8_t*)b);
  550. blockcheck(p, b);
  551. pooladd(p, b);
  552. blockcheck(p, b);
  553. /* sort arena into descending sorted arena list */
  554. for(lastap=nil, ap=p->arenalist; ap > a; lastap=ap, ap=ap->down)
  555. ;
  556. if(a->down = ap) /* assign = */
  557. a->down->aup = a;
  558. if(a->aup = lastap) /* assign = */
  559. a->aup->down = a;
  560. else
  561. p->arenalist = a;
  562. /* merge with surrounding arenas if possible */
  563. /* must do a with up before down with a (think about it) */
  564. if(a->aup)
  565. arenamerge(p, a, a->aup);
  566. if(a->down)
  567. arenamerge(p, a->down, a);
  568. }
  569. /* blockresize: grow a block to encompass space past its end, possibly by */
  570. /* trimming it into two different blocks. */
  571. static void
  572. blockgrow(Pool *p, Bhdr *b, uint32_t nsize)
  573. {
  574. if(b->magic == FREE_MAGIC) {
  575. Alloc *a;
  576. Bhdr *bnxt;
  577. a = pooldel(p, (Free*)b);
  578. blockcheck(p, a);
  579. blocksetsize(a, nsize);
  580. blockcheck(p, a);
  581. bnxt = B2NB(a);
  582. if(bnxt->magic == FREE_MAGIC)
  583. a = blockmerge(p, a, bnxt);
  584. blockcheck(p, a);
  585. pooladd(p, a);
  586. } else {
  587. Alloc *a;
  588. uint32_t dsize;
  589. a = (Alloc*)b;
  590. dsize = getdsize(a);
  591. blocksetsize(a, nsize);
  592. trim(p, a, dsize);
  593. }
  594. }
  595. /* arenamerge: attempt to coalesce to arenas that might be adjacent */
  596. static Arena*
  597. arenamerge(Pool *p, Arena *bot, Arena *top)
  598. {
  599. Bhdr *bbot, *btop;
  600. Btail *t;
  601. blockcheck(p, bot);
  602. blockcheck(p, top);
  603. assert(bot->aup == top && top > bot);
  604. if(p->merge == nil || p->merge(bot, top) == 0)
  605. return nil;
  606. /* remove top from list */
  607. if(bot->aup = top->aup) /* assign = */
  608. bot->aup->down = bot;
  609. else
  610. p->arenalist = bot;
  611. /* save ptrs to last block in bot, first block in top */
  612. t = B2PT(A2TB(bot));
  613. bbot = T2HDR(t);
  614. btop = A2B(top);
  615. blockcheck(p, bbot);
  616. blockcheck(p, btop);
  617. /* grow bottom arena to encompass top */
  618. arenasetsize(bot, top->asize + ((uint8_t*)top - (uint8_t*)bot));
  619. /* grow bottom block to encompass space between arenas */
  620. blockgrow(p, bbot, (uint8_t*)btop-(uint8_t*)bbot);
  621. blockcheck(p, bbot);
  622. return bot;
  623. }
  624. /* dumpblock: print block's vital stats */
  625. static void
  626. dumpblock(Pool *p, Bhdr *b)
  627. {
  628. uint32_t *dp;
  629. uint32_t dsize;
  630. uint8_t *cp;
  631. dp = (uint32_t*)b;
  632. p->print(p, "pool %s block %p\nhdr %.8lux %.8lux %.8lux %.8lux %.8lux %.8lux\n",
  633. p->name, b, dp[0], dp[1], dp[2], dp[3], dp[4], dp[5], dp[6]);
  634. dp = (uint32_t*)B2T(b);
  635. p->print(p, "tail %.8lux %.8lux %.8lux %.8lux %.8lux %.8lux | %.8lux %.8lux\n",
  636. dp[-6], dp[-5], dp[-4], dp[-3], dp[-2], dp[-1], dp[0], dp[1]);
  637. if(b->magic == ALLOC_MAGIC){
  638. dsize = getdsize((Alloc*)b);
  639. if(dsize >= b->size) /* user data size corrupt */
  640. return;
  641. cp = (uint8_t*)_B2D(b)+dsize;
  642. p->print(p, "user data ");
  643. p->print(p, "%.2ux %.2ux %.2ux %.2ux %.2ux %.2ux %.2ux %.2ux",
  644. cp[-8], cp[-7], cp[-6], cp[-5], cp[-4], cp[-3], cp[-2], cp[-1]);
  645. p->print(p, " | %.2ux %.2ux %.2ux %.2ux %.2ux %.2ux %.2ux %.2ux\n",
  646. cp[0], cp[1], cp[2], cp[3], cp[4], cp[5], cp[6], cp[7]);
  647. }
  648. }
  649. static void
  650. printblock(Pool *p, Bhdr *b, char *msg)
  651. {
  652. p->print(p, "%s\n", msg);
  653. dumpblock(p, b);
  654. }
  655. static void
  656. panicblock(Pool *p, Bhdr *b, char *msg)
  657. {
  658. p->print(p, "%s\n", msg);
  659. dumpblock(p, b);
  660. p->panic(p, "pool panic");
  661. }
  662. /* blockcheck: ensure a block consistent with our expectations */
  663. /* should only be called when holding pool lock */
  664. static void
  665. blockcheck(Pool *p, Bhdr *b)
  666. {
  667. Alloc *a;
  668. Btail *t;
  669. int i, n;
  670. uint8_t *q, *bq, *eq;
  671. uint32_t dsize;
  672. switch(b->magic) {
  673. default:
  674. panicblock(p, b, "bad magic");
  675. case FREE_MAGIC:
  676. case UNALLOC_MAGIC:
  677. t = B2T(b);
  678. if(t->magic0 != TAIL_MAGIC0 || t->magic1 != TAIL_MAGIC1)
  679. panicblock(p, b, "corrupt tail magic");
  680. if(T2HDR(t) != b)
  681. panicblock(p, b, "corrupt tail ptr");
  682. break;
  683. case DEAD_MAGIC:
  684. t = B2T(b);
  685. if(t->magic0 != TAIL_MAGIC0 || t->magic1 != TAIL_MAGIC1)
  686. panicblock(p, b, "corrupt tail magic");
  687. if(T2HDR(t) != b)
  688. panicblock(p, b, "corrupt tail ptr");
  689. n = getdsize((Alloc*)b);
  690. q = _B2D(b);
  691. q += 8;
  692. for(i=8; i<n; i++)
  693. if(*q++ != 0xDA)
  694. panicblock(p, b, "dangling pointer write");
  695. break;
  696. case ARENA_MAGIC:
  697. b = A2TB((Arena*)b);
  698. if(b->magic != ARENATAIL_MAGIC)
  699. panicblock(p, b, "bad arena size");
  700. /* fall through */
  701. case ARENATAIL_MAGIC:
  702. if(b->size != 0)
  703. panicblock(p, b, "bad arena tail size");
  704. break;
  705. case ALLOC_MAGIC:
  706. a = (Alloc*)b;
  707. t = B2T(b);
  708. dsize = getdsize(a);
  709. bq = (uint8_t*)_B2D(a)+dsize;
  710. eq = (uint8_t*)t;
  711. if(t->magic0 != TAIL_MAGIC0){
  712. /* if someone wrote exactly one byte over and it was a NUL, we sometimes only complain. */
  713. if((p->flags & POOL_TOLERANCE) && bq == eq && t->magic0 == 0)
  714. printblock(p, b, "mem user overflow (magic0)");
  715. else
  716. panicblock(p, b, "corrupt tail magic0");
  717. }
  718. if(t->magic1 != TAIL_MAGIC1)
  719. panicblock(p, b, "corrupt tail magic1");
  720. if(T2HDR(t) != b)
  721. panicblock(p, b, "corrupt tail ptr");
  722. if(dsize2bsize(p, dsize) > a->size)
  723. panicblock(p, b, "too much block data");
  724. if(eq > bq+4)
  725. eq = bq+4;
  726. for(q=bq; q<eq; q++){
  727. if(*q != datamagic[((uintptr)q)%nelem(datamagic)]){
  728. if(q == bq && *q == 0 && (p->flags & POOL_TOLERANCE)){
  729. printblock(p, b, "mem user overflow");
  730. continue;
  731. }
  732. panicblock(p, b, "mem user overflow");
  733. }
  734. }
  735. break;
  736. }
  737. }
  738. /*
  739. * compact an arena by shifting all the free blocks to the end.
  740. * assumes pool lock is held.
  741. */
  742. enum {
  743. FLOATING_MAGIC = 0xCBCBCBCB, /* temporarily neither allocated nor in the free tree */
  744. };
  745. static int
  746. arenacompact(Pool *p, Arena *a)
  747. {
  748. Bhdr *b, *wb, *eb, *nxt;
  749. int compacted;
  750. if(p->move == nil)
  751. p->panic(p, "don't call me when pool->move is nil\n");
  752. poolcheckarena(p, a);
  753. eb = A2TB(a);
  754. compacted = 0;
  755. for(b=wb=A2B(a); b && b < eb; b=nxt) {
  756. nxt = B2NB(b);
  757. switch(b->magic) {
  758. case FREE_MAGIC:
  759. pooldel(p, (Free*)b);
  760. b->magic = FLOATING_MAGIC;
  761. break;
  762. case ALLOC_MAGIC:
  763. if(wb != b) {
  764. memmove(wb, b, b->size);
  765. p->move(_B2D(b), _B2D(wb));
  766. compacted = 1;
  767. }
  768. wb = B2NB(wb);
  769. break;
  770. }
  771. }
  772. /*
  773. * the only free data is now at the end of the arena, pointed
  774. * at by wb. all we need to do is set its size and get out.
  775. */
  776. if(wb < eb) {
  777. wb->magic = UNALLOC_MAGIC;
  778. blocksetsize(wb, (uint8_t*)eb-(uint8_t*)wb);
  779. pooladd(p, (Alloc*)wb);
  780. }
  781. return compacted;
  782. }
  783. /*
  784. * compact a pool by compacting each individual arena.
  785. * 'twould be nice to shift blocks from one arena to the
  786. * next but it's a pain to code.
  787. */
  788. static int
  789. poolcompactl(Pool *pool)
  790. {
  791. Arena *a;
  792. int compacted;
  793. if(pool->move == nil || pool->lastcompact == pool->nfree)
  794. return 0;
  795. pool->lastcompact = pool->nfree;
  796. compacted = 0;
  797. for(a=pool->arenalist; a; a=a->down)
  798. compacted |= arenacompact(pool, a);
  799. return compacted;
  800. }
  801. /*
  802. static int
  803. poolcompactl(Pool*)
  804. {
  805. return 0;
  806. }
  807. */
  808. /*
  809. * Actual allocators
  810. */
  811. /*
  812. static void*
  813. _B2D(void *a)
  814. {
  815. return (uint8_t*)a+sizeof(Bhdr);
  816. }
  817. */
  818. static void*
  819. B2D(Pool *p, Alloc *a)
  820. {
  821. if(a->magic != ALLOC_MAGIC)
  822. p->panic(p, "B2D called on unworthy block");
  823. return _B2D(a);
  824. }
  825. /*
  826. static void*
  827. _D2B(void *v)
  828. {
  829. Alloc *a;
  830. a = (Alloc*)((uint8_t*)v-sizeof(Bhdr));
  831. return a;
  832. }
  833. */
  834. static Alloc*
  835. D2B(Pool *p, void *v)
  836. {
  837. Alloc *a;
  838. uint32_t *u;
  839. if((uintptr)v&(sizeof(uint32_t)-1))
  840. v = (char*)v - ((uintptr)v&(sizeof(uint32_t)-1));
  841. u = v;
  842. while(u[-1] == ALIGN_MAGIC)
  843. u--;
  844. a = _D2B(u);
  845. if(a->magic != ALLOC_MAGIC)
  846. p->panic(p, "D2B called on non-block %p (double-free?)", v);
  847. return a;
  848. }
  849. /* poolallocl: attempt to allocate block to hold dsize user bytes; assumes lock held */
  850. static void*
  851. poolallocl(Pool *p, uint32_t dsize)
  852. {
  853. uint32_t bsize;
  854. Free *fb;
  855. Alloc *ab;
  856. if(dsize >= 0x80000000UL){ /* for sanity, overflow */
  857. werrstr("invalid allocation size");
  858. return nil;
  859. }
  860. bsize = dsize2bsize(p, dsize);
  861. fb = treelookupgt(p->freeroot, bsize);
  862. if(fb == nil) {
  863. poolnewarena(p, bsize2asize(p, bsize));
  864. if((fb = treelookupgt(p->freeroot, bsize)) == nil) {
  865. /* assume poolnewarena failed and set %r */
  866. return nil;
  867. }
  868. }
  869. ab = trim(p, pooldel(p, fb), dsize);
  870. p->curalloc += ab->size;
  871. antagonism {
  872. memset(B2D(p, ab), 0xDF, dsize);
  873. }
  874. return B2D(p, ab);
  875. }
  876. /* poolreallocl: attempt to grow v to ndsize bytes; assumes lock held */
  877. static void*
  878. poolreallocl(Pool *p, void *v, uint32_t ndsize)
  879. {
  880. Alloc *a;
  881. Bhdr *left, *right, *newb;
  882. Btail *t;
  883. uint32_t nbsize;
  884. uint32_t odsize;
  885. uint32_t obsize;
  886. void *nv;
  887. if(v == nil) /* for ANSI */
  888. return poolallocl(p, ndsize);
  889. if(ndsize == 0) {
  890. poolfreel(p, v);
  891. return nil;
  892. }
  893. a = D2B(p, v);
  894. blockcheck(p, a);
  895. odsize = getdsize(a);
  896. obsize = a->size;
  897. /* can reuse the same block? */
  898. nbsize = dsize2bsize(p, ndsize);
  899. if(nbsize <= a->size) {
  900. Returnblock:
  901. if(v != _B2D(a))
  902. memmove(_B2D(a), v, odsize);
  903. a = trim(p, a, ndsize);
  904. p->curalloc -= obsize;
  905. p->curalloc += a->size;
  906. v = B2D(p, a);
  907. return v;
  908. }
  909. /* can merge with surrounding blocks? */
  910. right = B2NB(a);
  911. if(right->magic == FREE_MAGIC && a->size+right->size >= nbsize) {
  912. a = blockmerge(p, a, right);
  913. goto Returnblock;
  914. }
  915. t = B2PT(a);
  916. left = T2HDR(t);
  917. if(left->magic == FREE_MAGIC && left->size+a->size >= nbsize) {
  918. a = blockmerge(p, left, a);
  919. goto Returnblock;
  920. }
  921. if(left->magic == FREE_MAGIC && right->magic == FREE_MAGIC
  922. && left->size+a->size+right->size >= nbsize) {
  923. a = blockmerge(p, blockmerge(p, left, a), right);
  924. goto Returnblock;
  925. }
  926. if((nv = poolallocl(p, ndsize)) == nil)
  927. return nil;
  928. /* maybe the new block is next to us; if so, merge */
  929. left = T2HDR(B2PT(a));
  930. right = B2NB(a);
  931. newb = D2B(p, nv);
  932. if(left == newb || right == newb) {
  933. if(left == newb || left->magic == FREE_MAGIC)
  934. a = blockmerge(p, left, a);
  935. if(right == newb || right->magic == FREE_MAGIC)
  936. a = blockmerge(p, a, right);
  937. assert(a->size >= nbsize);
  938. goto Returnblock;
  939. }
  940. /* enough cleverness */
  941. memmove(nv, v, odsize);
  942. antagonism {
  943. memset((char*)nv+odsize, 0xDE, ndsize-odsize);
  944. }
  945. poolfreel(p, v);
  946. return nv;
  947. }
  948. static void*
  949. alignptr(void *v, uint32_t align, long offset)
  950. {
  951. char *c;
  952. uint32_t off;
  953. c = v;
  954. if(align){
  955. off = (uintptr)c%align;
  956. if(off != offset){
  957. c += offset - off;
  958. if(off > offset)
  959. c += align;
  960. }
  961. }
  962. return c;
  963. }
  964. /* poolallocalignl: allocate as described below; assumes pool locked */
  965. static void*
  966. poolallocalignl(Pool *p, uint32_t dsize, uint32_t align, long offset,
  967. uint32_t span)
  968. {
  969. uint32_t asize;
  970. void *v;
  971. char *c;
  972. uint32_t *u;
  973. int skip;
  974. Alloc *b;
  975. /*
  976. * allocate block
  977. * dsize bytes
  978. * addr == offset (modulo align)
  979. * does not cross span-byte block boundary
  980. *
  981. * to satisfy alignment, just allocate an extra
  982. * align bytes and then shift appropriately.
  983. *
  984. * to satisfy span, try once and see if we're
  985. * lucky. the second time, allocate 2x asize
  986. * so that we definitely get one not crossing
  987. * the boundary.
  988. */
  989. if(align){
  990. if(offset < 0)
  991. offset = align - ((-offset)%align);
  992. else
  993. offset %= align;
  994. }
  995. asize = dsize+align;
  996. v = poolallocl(p, asize);
  997. if(v == nil)
  998. return nil;
  999. if(span && (uintptr)v/span != ((uintptr)v+asize)/span){
  1000. /* try again */
  1001. poolfreel(p, v);
  1002. v = poolallocl(p, 2*asize);
  1003. if(v == nil)
  1004. return nil;
  1005. }
  1006. /*
  1007. * figure out what pointer we want to return
  1008. */
  1009. c = alignptr(v, align, offset);
  1010. if(span && (uintptr)c/span != (uintptr)(c+dsize-1)/span){
  1011. c += span - (uintptr)c%span;
  1012. c = alignptr(c, align, offset);
  1013. if((uintptr)c/span != (uintptr)(c+dsize-1)/span){
  1014. poolfreel(p, v);
  1015. werrstr("cannot satisfy dsize %lud span %lud with align %lud+%ld", dsize, span, align, offset);
  1016. return nil;
  1017. }
  1018. }
  1019. skip = c - (char*)v;
  1020. /*
  1021. * free up the skip bytes before that pointer
  1022. * or mark it as unavailable.
  1023. */
  1024. b = _D2B(v);
  1025. b = freefromfront(p, b, skip);
  1026. v = _B2D(b);
  1027. skip = c - (char*)v;
  1028. if(c > (char*)v){
  1029. u = v;
  1030. while(c >= (char*)u+sizeof(uint32_t))
  1031. *u++ = ALIGN_MAGIC;
  1032. }
  1033. trim(p, b, skip+dsize);
  1034. assert(D2B(p, c) == b);
  1035. antagonism {
  1036. memset(c, 0xDD, dsize);
  1037. }
  1038. return c;
  1039. }
  1040. /* poolfree: free block obtained from poolalloc; assumes lock held */
  1041. static void
  1042. poolfreel(Pool *p, void *v)
  1043. {
  1044. Alloc *ab;
  1045. Bhdr *back, *fwd;
  1046. if(v == nil) /* for ANSI */
  1047. return;
  1048. ab = D2B(p, v);
  1049. blockcheck(p, ab);
  1050. if(p->flags&POOL_NOREUSE){
  1051. int n;
  1052. ab->magic = DEAD_MAGIC;
  1053. n = getdsize(ab)-8;
  1054. if(n > 0)
  1055. memset((uint8_t*)v+8, 0xDA, n);
  1056. return;
  1057. }
  1058. p->nfree++;
  1059. p->curalloc -= ab->size;
  1060. back = T2HDR(B2PT(ab));
  1061. if(back->magic == FREE_MAGIC)
  1062. ab = blockmerge(p, back, ab);
  1063. fwd = B2NB(ab);
  1064. if(fwd->magic == FREE_MAGIC)
  1065. ab = blockmerge(p, ab, fwd);
  1066. pooladd(p, ab);
  1067. }
  1068. void*
  1069. poolalloc(Pool *p, uint32_t n)
  1070. {
  1071. void *v;
  1072. p->lock(p);
  1073. paranoia {
  1074. poolcheckl(p);
  1075. }
  1076. verbosity {
  1077. pooldumpl(p);
  1078. }
  1079. v = poolallocl(p, n);
  1080. paranoia {
  1081. poolcheckl(p);
  1082. }
  1083. verbosity {
  1084. pooldumpl(p);
  1085. }
  1086. if(p->logstack && (p->flags & POOL_LOGGING)) p->logstack(p);
  1087. LOG(p, "poolalloc %p %lud = %p\n", p, n, v);
  1088. p->unlock(p);
  1089. return v;
  1090. }
  1091. void*
  1092. poolallocalign(Pool *p, uint32_t n, uint32_t align, int32_t offset,
  1093. uint32_t span)
  1094. {
  1095. void *v;
  1096. p->lock(p);
  1097. paranoia {
  1098. poolcheckl(p);
  1099. }
  1100. verbosity {
  1101. pooldumpl(p);
  1102. }
  1103. v = poolallocalignl(p, n, align, offset, span);
  1104. paranoia {
  1105. poolcheckl(p);
  1106. }
  1107. verbosity {
  1108. pooldumpl(p);
  1109. }
  1110. if(p->logstack && (p->flags & POOL_LOGGING)) p->logstack(p);
  1111. LOG(p, "poolalignspanalloc %p %lud %lud %lud %ld = %p\n", p, n, align, span, offset, v);
  1112. p->unlock(p);
  1113. return v;
  1114. }
  1115. int
  1116. poolcompact(Pool *p)
  1117. {
  1118. int rv;
  1119. p->lock(p);
  1120. paranoia {
  1121. poolcheckl(p);
  1122. }
  1123. verbosity {
  1124. pooldumpl(p);
  1125. }
  1126. rv = poolcompactl(p);
  1127. paranoia {
  1128. poolcheckl(p);
  1129. }
  1130. verbosity {
  1131. pooldumpl(p);
  1132. }
  1133. LOG(p, "poolcompact %p\n", p);
  1134. p->unlock(p);
  1135. return rv;
  1136. }
  1137. void*
  1138. poolrealloc(Pool *p, void *v, uint32_t n)
  1139. {
  1140. void *nv;
  1141. p->lock(p);
  1142. paranoia {
  1143. poolcheckl(p);
  1144. }
  1145. verbosity {
  1146. pooldumpl(p);
  1147. }
  1148. nv = poolreallocl(p, v, n);
  1149. paranoia {
  1150. poolcheckl(p);
  1151. }
  1152. verbosity {
  1153. pooldumpl(p);
  1154. }
  1155. if(p->logstack && (p->flags & POOL_LOGGING)) p->logstack(p);
  1156. LOG(p, "poolrealloc %p %p %ld = %p\n", p, v, n, nv);
  1157. p->unlock(p);
  1158. return nv;
  1159. }
  1160. void
  1161. poolfree(Pool *p, void *v)
  1162. {
  1163. p->lock(p);
  1164. paranoia {
  1165. poolcheckl(p);
  1166. }
  1167. verbosity {
  1168. pooldumpl(p);
  1169. }
  1170. poolfreel(p, v);
  1171. paranoia {
  1172. poolcheckl(p);
  1173. }
  1174. verbosity {
  1175. pooldumpl(p);
  1176. }
  1177. if(p->logstack && (p->flags & POOL_LOGGING)) p->logstack(p);
  1178. LOG(p, "poolfree %p %p\n", p, v);
  1179. p->unlock(p);
  1180. }
  1181. /*
  1182. * Return the real size of a block, and let the user use it.
  1183. */
  1184. uint32_t
  1185. poolmsize(Pool *p, void *v)
  1186. {
  1187. Alloc *b;
  1188. uint32_t dsize;
  1189. p->lock(p);
  1190. paranoia {
  1191. poolcheckl(p);
  1192. }
  1193. verbosity {
  1194. pooldumpl(p);
  1195. }
  1196. if(v == nil) /* consistency with other braindead ANSI-ness */
  1197. dsize = 0;
  1198. else {
  1199. b = D2B(p, v);
  1200. dsize = (b->size&~(p->quantum-1)) - sizeof(Bhdr) - sizeof(Btail);
  1201. assert(dsize >= getdsize(b));
  1202. blocksetdsize(p, b, dsize);
  1203. }
  1204. paranoia {
  1205. poolcheckl(p);
  1206. }
  1207. verbosity {
  1208. pooldumpl(p);
  1209. }
  1210. if(p->logstack && (p->flags & POOL_LOGGING)) p->logstack(p);
  1211. LOG(p, "poolmsize %p %p = %ld\n", p, v, dsize);
  1212. p->unlock(p);
  1213. return dsize;
  1214. }
  1215. /*
  1216. * Debugging
  1217. */
  1218. static void
  1219. poolcheckarena(Pool *p, Arena *a)
  1220. {
  1221. Bhdr *b;
  1222. Bhdr *atail;
  1223. atail = A2TB(a);
  1224. for(b=a; b->magic != ARENATAIL_MAGIC && b<atail; b=B2NB(b))
  1225. blockcheck(p, b);
  1226. blockcheck(p, b);
  1227. if(b != atail)
  1228. p->panic(p, "found wrong tail");
  1229. }
  1230. static void
  1231. poolcheckl(Pool *p)
  1232. {
  1233. Arena *a;
  1234. for(a=p->arenalist; a; a=a->down)
  1235. poolcheckarena(p, a);
  1236. if(p->freeroot)
  1237. checktree(p->freeroot, 0, 1<<30);
  1238. }
  1239. void
  1240. poolcheck(Pool *p)
  1241. {
  1242. p->lock(p);
  1243. poolcheckl(p);
  1244. p->unlock(p);
  1245. }
  1246. void
  1247. poolblockcheck(Pool *p, void *v)
  1248. {
  1249. if(v == nil)
  1250. return;
  1251. p->lock(p);
  1252. blockcheck(p, D2B(p, v));
  1253. p->unlock(p);
  1254. }
  1255. static void
  1256. pooldumpl(Pool *p)
  1257. {
  1258. Arena *a;
  1259. p->print(p, "pool %p %s\n", p, p->name);
  1260. for(a=p->arenalist; a; a=a->down)
  1261. pooldumparena(p, a);
  1262. }
  1263. void
  1264. pooldump(Pool *p)
  1265. {
  1266. p->lock(p);
  1267. pooldumpl(p);
  1268. p->unlock(p);
  1269. }
  1270. static void
  1271. pooldumparena(Pool *p, Arena *a)
  1272. {
  1273. Bhdr *b;
  1274. for(b=a; b->magic != ARENATAIL_MAGIC; b=B2NB(b))
  1275. p->print(p, "(%p %.8lux %lud)", b, b->magic, b->size);
  1276. p->print(p, "\n");
  1277. }
  1278. /*
  1279. * mark the memory in such a way that we know who marked it
  1280. * (via the signature) and we know where the marking started.
  1281. */
  1282. static void
  1283. memmark(void *v, int sig, uint32_t size)
  1284. {
  1285. uint8_t *p, *ep;
  1286. uint32_t *lp, *elp;
  1287. lp = v;
  1288. elp = lp+size/4;
  1289. while(lp < elp)
  1290. *lp++ = (sig<<24) ^ ((uintptr)lp-(uintptr)v);
  1291. p = (uint8_t*)lp;
  1292. ep = (uint8_t*)v+size;
  1293. while(p<ep)
  1294. *p++ = sig;
  1295. }