lumpcache.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426
  1. #include "stdinc.h"
  2. #include "dat.h"
  3. #include "fns.h"
  4. /* #define CHECK(x) x */
  5. #define CHECK(x)
  6. typedef struct LumpCache LumpCache;
  7. enum
  8. {
  9. HashLog = 9,
  10. HashSize = 1<<HashLog,
  11. HashMask = HashSize - 1,
  12. };
  13. struct LumpCache
  14. {
  15. QLock lock;
  16. Rendez full;
  17. Lump *free; /* list of available lumps */
  18. u32int allowed; /* total allowable space for packets */
  19. u32int avail; /* remaining space for packets */
  20. u32int now; /* ticks for usage timestamps */
  21. Lump **heads; /* hash table for finding address */
  22. int nheap; /* number of available victims */
  23. Lump **heap; /* heap for locating victims */
  24. int nblocks; /* number of blocks allocated */
  25. Lump *blocks; /* array of block descriptors */
  26. };
  27. static LumpCache lumpcache;
  28. static void delheap(Lump *db);
  29. static int downheap(int i, Lump *b);
  30. static void fixheap(int i, Lump *b);
  31. static int upheap(int i, Lump *b);
  32. static Lump *bumplump(void);
  33. void
  34. initlumpcache(u32int size, u32int nblocks)
  35. {
  36. Lump *last, *b;
  37. int i;
  38. lumpcache.full.l = &lumpcache.lock;
  39. lumpcache.nblocks = nblocks;
  40. lumpcache.allowed = size;
  41. lumpcache.avail = size;
  42. lumpcache.heads = MKNZ(Lump*, HashSize);
  43. lumpcache.heap = MKNZ(Lump*, nblocks);
  44. lumpcache.blocks = MKNZ(Lump, nblocks);
  45. setstat(StatLcacheSize, lumpcache.nblocks);
  46. last = nil;
  47. for(i = 0; i < nblocks; i++){
  48. b = &lumpcache.blocks[i];
  49. b->type = TWID8;
  50. b->heap = TWID32;
  51. b->next = last;
  52. last = b;
  53. }
  54. lumpcache.free = last;
  55. lumpcache.nheap = 0;
  56. }
  57. Lump*
  58. lookuplump(u8int *score, int type)
  59. {
  60. uint ms;
  61. Lump *b;
  62. u32int h;
  63. ms = msec();
  64. trace(TraceLump, "lookuplump enter");
  65. h = hashbits(score, HashLog);
  66. /*
  67. * look for the block in the cache
  68. */
  69. qlock(&lumpcache.lock);
  70. CHECK(checklumpcache());
  71. again:
  72. for(b = lumpcache.heads[h]; b != nil; b = b->next){
  73. if(scorecmp(score, b->score)==0 && type == b->type){
  74. addstat(StatLcacheHit, 1);
  75. trace(TraceLump, "lookuplump hit");
  76. goto found;
  77. }
  78. }
  79. trace(TraceLump, "lookuplump miss");
  80. /*
  81. * missed: locate the block with the oldest second to last use.
  82. * remove it from the heap, and fix up the heap.
  83. */
  84. while(lumpcache.free == nil){
  85. trace(TraceLump, "lookuplump bump");
  86. CHECK(checklumpcache());
  87. if(bumplump() == nil){
  88. CHECK(checklumpcache());
  89. logerr(EAdmin, "all lump cache blocks in use");
  90. addstat(StatLcacheStall, 1);
  91. CHECK(checklumpcache());
  92. rsleep(&lumpcache.full);
  93. CHECK(checklumpcache());
  94. addstat(StatLcacheStall, -1);
  95. goto again;
  96. }
  97. CHECK(checklumpcache());
  98. }
  99. addstat(StatLcacheMiss, 1);
  100. b = lumpcache.free;
  101. lumpcache.free = b->next;
  102. /*
  103. * the new block has no last use, so assume it happens sometime in the middle
  104. ZZZ this is not reasonable
  105. */
  106. b->used = (b->used2 + lumpcache.now) / 2;
  107. /*
  108. * rechain the block on the correct hash chain
  109. */
  110. b->next = lumpcache.heads[h];
  111. lumpcache.heads[h] = b;
  112. if(b->next != nil)
  113. b->next->prev = b;
  114. b->prev = nil;
  115. scorecp(b->score, score);
  116. b->type = type;
  117. b->size = 0;
  118. b->data = nil;
  119. found:
  120. b->ref++;
  121. b->used2 = b->used;
  122. b->used = lumpcache.now++;
  123. if(b->heap != TWID32)
  124. fixheap(b->heap, b);
  125. CHECK(checklumpcache());
  126. qunlock(&lumpcache.lock);
  127. addstat(StatLumpStall, 1);
  128. qlock(&b->lock);
  129. addstat(StatLumpStall, -1);
  130. trace(TraceLump, "lookuplump exit");
  131. addstat2(StatLcacheRead, 1, StatLcacheReadTime, msec()-ms);
  132. return b;
  133. }
  134. void
  135. insertlump(Lump *b, Packet *p)
  136. {
  137. u32int size;
  138. /*
  139. * look for the block in the cache
  140. */
  141. trace(TraceLump, "insertlump enter");
  142. qlock(&lumpcache.lock);
  143. CHECK(checklumpcache());
  144. again:
  145. addstat(StatLcacheWrite, 1);
  146. /*
  147. * missed: locate the block with the oldest second to last use.
  148. * remove it from the heap, and fix up the heap.
  149. */
  150. size = packetasize(p);
  151. while(lumpcache.avail < size){
  152. trace(TraceLump, "insertlump bump");
  153. CHECK(checklumpcache());
  154. if(bumplump() == nil){
  155. logerr(EAdmin, "all lump cache blocks in use");
  156. addstat(StatLcacheStall, 1);
  157. CHECK(checklumpcache());
  158. rsleep(&lumpcache.full);
  159. CHECK(checklumpcache());
  160. addstat(StatLcacheStall, -1);
  161. goto again;
  162. }
  163. CHECK(checklumpcache());
  164. }
  165. b->data = p;
  166. b->size = size;
  167. lumpcache.avail -= size;
  168. CHECK(checklumpcache());
  169. qunlock(&lumpcache.lock);
  170. trace(TraceLump, "insertlump exit");
  171. }
  172. void
  173. putlump(Lump *b)
  174. {
  175. if(b == nil)
  176. return;
  177. trace(TraceLump, "putlump");
  178. qunlock(&b->lock);
  179. qlock(&lumpcache.lock);
  180. CHECK(checklumpcache());
  181. if(--b->ref == 0){
  182. if(b->heap == TWID32)
  183. upheap(lumpcache.nheap++, b);
  184. trace(TraceLump, "putlump wakeup");
  185. rwakeupall(&lumpcache.full);
  186. }
  187. CHECK(checklumpcache());
  188. qunlock(&lumpcache.lock);
  189. }
  190. /*
  191. * remove some lump from use and update the free list and counters
  192. */
  193. static Lump*
  194. bumplump(void)
  195. {
  196. Lump *b;
  197. u32int h;
  198. /*
  199. * remove blocks until we find one that is unused
  200. * referenced blocks are left in the heap even though
  201. * they can't be scavenged; this is simple a speed optimization
  202. */
  203. CHECK(checklumpcache());
  204. for(;;){
  205. if(lumpcache.nheap == 0){
  206. trace(TraceLump, "bumplump emptyheap");
  207. return nil;
  208. }
  209. b = lumpcache.heap[0];
  210. delheap(b);
  211. if(!b->ref){
  212. trace(TraceLump, "bumplump wakeup");
  213. rwakeupall(&lumpcache.full);
  214. break;
  215. }
  216. }
  217. /*
  218. * unchain the block
  219. */
  220. trace(TraceLump, "bumplump unchain");
  221. if(b->prev == nil){
  222. h = hashbits(b->score, HashLog);
  223. if(lumpcache.heads[h] != b)
  224. sysfatal("bad hash chains in lump cache");
  225. lumpcache.heads[h] = b->next;
  226. }else
  227. b->prev->next = b->next;
  228. if(b->next != nil)
  229. b->next->prev = b->prev;
  230. if(b->data != nil){
  231. packetfree(b->data);
  232. b->data = nil;
  233. lumpcache.avail += b->size;
  234. b->size = 0;
  235. }
  236. b->type = TWID8;
  237. b->next = lumpcache.free;
  238. lumpcache.free = b;
  239. CHECK(checklumpcache());
  240. trace(TraceLump, "bumplump exit");
  241. return b;
  242. }
  243. void
  244. emptylumpcache(void)
  245. {
  246. qlock(&lumpcache.lock);
  247. while(bumplump())
  248. ;
  249. qunlock(&lumpcache.lock);
  250. }
  251. /*
  252. * delete an arbitrary block from the heap
  253. */
  254. static void
  255. delheap(Lump *db)
  256. {
  257. fixheap(db->heap, lumpcache.heap[--lumpcache.nheap]);
  258. db->heap = TWID32;
  259. }
  260. /*
  261. * push an element up or down to it's correct new location
  262. */
  263. static void
  264. fixheap(int i, Lump *b)
  265. {
  266. if(upheap(i, b) == i)
  267. downheap(i, b);
  268. }
  269. static int
  270. upheap(int i, Lump *b)
  271. {
  272. Lump *bb;
  273. u32int now;
  274. int p;
  275. now = lumpcache.now;
  276. for(; i != 0; i = p){
  277. p = (i - 1) >> 1;
  278. bb = lumpcache.heap[p];
  279. if(b->used2 - now >= bb->used2 - now)
  280. break;
  281. lumpcache.heap[i] = bb;
  282. bb->heap = i;
  283. }
  284. lumpcache.heap[i] = b;
  285. b->heap = i;
  286. return i;
  287. }
  288. static int
  289. downheap(int i, Lump *b)
  290. {
  291. Lump *bb;
  292. u32int now;
  293. int k;
  294. now = lumpcache.now;
  295. for(; ; i = k){
  296. k = (i << 1) + 1;
  297. if(k >= lumpcache.nheap)
  298. break;
  299. if(k + 1 < lumpcache.nheap && lumpcache.heap[k]->used2 - now > lumpcache.heap[k + 1]->used2 - now)
  300. k++;
  301. bb = lumpcache.heap[k];
  302. if(b->used2 - now <= bb->used2 - now)
  303. break;
  304. lumpcache.heap[i] = bb;
  305. bb->heap = i;
  306. }
  307. lumpcache.heap[i] = b;
  308. b->heap = i;
  309. return i;
  310. }
  311. static void
  312. findblock(Lump *bb)
  313. {
  314. Lump *b, *last;
  315. int h;
  316. last = nil;
  317. h = hashbits(bb->score, HashLog);
  318. for(b = lumpcache.heads[h]; b != nil; b = b->next){
  319. if(last != b->prev)
  320. sysfatal("bad prev link");
  321. if(b == bb)
  322. return;
  323. last = b;
  324. }
  325. sysfatal("block score=%V type=%#x missing from hash table", bb->score, bb->type);
  326. }
  327. void
  328. checklumpcache(void)
  329. {
  330. Lump *b;
  331. u32int size, now, nfree;
  332. int i, k, refed;
  333. now = lumpcache.now;
  334. for(i = 0; i < lumpcache.nheap; i++){
  335. if(lumpcache.heap[i]->heap != i)
  336. sysfatal("lc: mis-heaped at %d: %d", i, lumpcache.heap[i]->heap);
  337. if(i > 0 && lumpcache.heap[(i - 1) >> 1]->used2 - now > lumpcache.heap[i]->used2 - now)
  338. sysfatal("lc: bad heap ordering");
  339. k = (i << 1) + 1;
  340. if(k < lumpcache.nheap && lumpcache.heap[i]->used2 - now > lumpcache.heap[k]->used2 - now)
  341. sysfatal("lc: bad heap ordering");
  342. k++;
  343. if(k < lumpcache.nheap && lumpcache.heap[i]->used2 - now > lumpcache.heap[k]->used2 - now)
  344. sysfatal("lc: bad heap ordering");
  345. }
  346. refed = 0;
  347. size = 0;
  348. for(i = 0; i < lumpcache.nblocks; i++){
  349. b = &lumpcache.blocks[i];
  350. if(b->data == nil && b->size != 0)
  351. sysfatal("bad size: %d data=%p", b->size, b->data);
  352. if(b->ref && b->heap == TWID32)
  353. refed++;
  354. if(b->type != TWID8){
  355. findblock(b);
  356. size += b->size;
  357. }
  358. if(b->heap != TWID32
  359. && lumpcache.heap[b->heap] != b)
  360. sysfatal("lc: spurious heap value");
  361. }
  362. if(lumpcache.avail != lumpcache.allowed - size){
  363. fprint(2, "mismatched available=%d and allowed=%d - used=%d space", lumpcache.avail, lumpcache.allowed, size);
  364. *(int*)0=0;
  365. }
  366. nfree = 0;
  367. for(b = lumpcache.free; b != nil; b = b->next){
  368. if(b->type != TWID8 || b->heap != TWID32)
  369. sysfatal("lc: bad free list");
  370. nfree++;
  371. }
  372. if(lumpcache.nheap + nfree + refed != lumpcache.nblocks)
  373. sysfatal("lc: missing blocks: %d %d %d %d", lumpcache.nheap, refed, nfree, lumpcache.nblocks);
  374. }