cache.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876
  1. #include "stdinc.h"
  2. #include "vac.h"
  3. #include "dat.h"
  4. #include "fns.h"
  5. typedef struct Label Label;
  6. enum {
  7. BadHeap = ~0,
  8. };
  9. /*
  10. * the plan is to store data to the cache in c->size blocks
  11. * with the block zero extended to fill it out. When writing to
  12. * venti, the block will be zero truncated. The walker will also check
  13. * that the block fits within psize or dsize as the case may be.
  14. */
  15. struct Cache
  16. {
  17. VtLock *lk;
  18. VtSession *z;
  19. u32int now; /* ticks for usage timestamps */
  20. int size; /* max. size of any block; allocated to each block */
  21. Lump **heads; /* hash table for finding address */
  22. int nheap; /* number of available victims */
  23. Lump **heap; /* heap for locating victims */
  24. long nblocks; /* number of blocks allocated */
  25. Lump *blocks; /* array of block descriptors */
  26. u8int *mem; /* memory for all block descriptors */
  27. Lump *free; /* free list of lumps */
  28. long hashSize;
  29. };
  30. /*
  31. * the tag for a block is hash(index, parent tag)
  32. */
  33. struct Label {
  34. uchar gen[4];
  35. uchar state;
  36. uchar type; /* top bit indicates it is part of a directory */
  37. uchar tag[4]; /* tag of file it is in */
  38. };
  39. static char ENoDir[] = "directory entry is not allocated";
  40. static void fixHeap(int si, Lump *b);
  41. static int upHeap(int i, Lump *b);
  42. static int downHeap(int i, Lump *b);
  43. static char *lumpState(int);
  44. static void lumpSetState(Lump *u, int state);
  45. Cache *
  46. cacheAlloc(VtSession *z, int blockSize, long nblocks)
  47. {
  48. int i;
  49. Cache *c;
  50. Lump *b;
  51. c = vtMemAllocZ(sizeof(Cache));
  52. c->lk = vtLockAlloc();
  53. c->z = z;
  54. c->size = blockSize;
  55. c->nblocks = nblocks;
  56. c->hashSize = nblocks;
  57. c->heads = vtMemAllocZ(c->hashSize*sizeof(Lump*));
  58. c->heap = vtMemAllocZ(nblocks*sizeof(Lump*));
  59. c->blocks = vtMemAllocZ(nblocks*sizeof(Lump));
  60. c->mem = vtMemAllocZ(nblocks * blockSize);
  61. for(i = 0; i < nblocks; i++){
  62. b = &c->blocks[i];
  63. b->lk = vtLockAlloc();
  64. b->c = c;
  65. b->data = &c->mem[i * blockSize];
  66. b->addr = i+1;
  67. b->state = LumpFree;
  68. b->heap = BadHeap;
  69. b->next = c->free;
  70. c->free = b;
  71. }
  72. c->nheap = 0;
  73. return c;
  74. }
  75. long
  76. cacheGetSize(Cache *c)
  77. {
  78. return c->nblocks;
  79. }
  80. int
  81. cacheGetBlockSize(Cache *c)
  82. {
  83. return c->size;
  84. }
  85. int
  86. cacheSetSize(Cache *c, long nblocks)
  87. {
  88. USED(c);
  89. USED(nblocks);
  90. return 0;
  91. }
  92. void
  93. cacheFree(Cache *c)
  94. {
  95. int i;
  96. for(i = 0; i < c->nblocks; i++){
  97. assert(c->blocks[i].ref == 0);
  98. vtLockFree(c->blocks[i].lk);
  99. }
  100. vtMemFree(c->heads);
  101. vtMemFree(c->blocks);
  102. vtMemFree(c->mem);
  103. vtMemFree(c);
  104. }
  105. static u32int
  106. hash(Cache *c, uchar score[VtScoreSize], int type)
  107. {
  108. u32int h;
  109. uchar *p = score + VtScoreSize-4;
  110. h = (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
  111. h += type;
  112. return h % c->hashSize;
  113. }
  114. static void
  115. findLump(Cache *c, Lump *bb)
  116. {
  117. Lump *b, *last;
  118. int h;
  119. last = nil;
  120. h = hash(c, bb->score, bb->type);
  121. for(b = c->heads[h]; b != nil; b = b->next){
  122. if(last != b->prev)
  123. vtFatal("bad prev link");
  124. if(b == bb)
  125. return;
  126. last = b;
  127. }
  128. vtFatal("block missing from hash table");
  129. }
  130. void
  131. cacheCheck(Cache *c)
  132. {
  133. u32int size, now;
  134. int i, k, refed, free;
  135. static uchar zero[VtScoreSize];
  136. Lump *p;
  137. size = c->size;
  138. now = c->now;
  139. free = 0;
  140. for(p=c->free; p; p=p->next)
  141. free++;
  142. for(i = 0; i < c->nheap; i++){
  143. if(c->heap[i]->heap != i)
  144. vtFatal("mis-heaped at %d: %d", i, c->heap[i]->heap);
  145. if(i > 0 && c->heap[(i - 1) >> 1]->used2 - now > c->heap[i]->used2 - now)
  146. vtFatal("bad heap ordering");
  147. k = (i << 1) + 1;
  148. if(k < c->nheap && c->heap[i]->used2 - now > c->heap[k]->used2 - now)
  149. vtFatal("bad heap ordering");
  150. k++;
  151. if(k < c->nheap && c->heap[i]->used2 - now > c->heap[k]->used2 - now)
  152. vtFatal("bad heap ordering");
  153. }
  154. refed = 0;
  155. for(i = 0; i < c->nblocks; i++){
  156. if(c->blocks[i].data != &c->mem[i * size])
  157. vtFatal("mis-blocked at %d", i);
  158. if(c->blocks[i].ref && c->blocks[i].heap == BadHeap){
  159. refed++;
  160. }
  161. if(memcmp(zero, c->blocks[i].score, VtScoreSize))
  162. findLump(c, &c->blocks[i]);
  163. }
  164. if(refed > 0)fprint(2, "cacheCheck: nheap %d refed %d free %d\n", c->nheap, refed, free);
  165. assert(c->nheap + refed + free == c->nblocks);
  166. refed = 0;
  167. for(i = 0; i < c->nblocks; i++){
  168. if(c->blocks[i].ref) {
  169. if(1)fprint(2, "%d %V %d %s\n", c->blocks[i].type, c->blocks[i].score, c->blocks[i].ref, lumpState(c->blocks[i].state));
  170. refed++;
  171. }
  172. }
  173. if(refed > 0)fprint(2, "cacheCheck: in used %d\n", refed);
  174. }
  175. /*
  176. * delete an arbitrary block from the heap
  177. */
  178. static void
  179. delHeap(Lump *db)
  180. {
  181. fixHeap(db->heap, db->c->heap[--db->c->nheap]);
  182. db->heap = BadHeap;
  183. }
  184. static void
  185. fixHeap(int si, Lump *b)
  186. {
  187. int i;
  188. i = upHeap(si, b);
  189. if(i == si)
  190. downHeap(i, b);
  191. }
  192. static int
  193. upHeap(int i, Lump *b)
  194. {
  195. Lump *bb;
  196. u32int now;
  197. int p;
  198. Cache *c;
  199. c = b->c;
  200. now = c->now;
  201. for(; i != 0; i = p){
  202. p = (i - 1) >> 1;
  203. bb = c->heap[p];
  204. if(b->used2 - now >= bb->used2 - now)
  205. break;
  206. c->heap[i] = bb;
  207. bb->heap = i;
  208. }
  209. c->heap[i] = b;
  210. b->heap = i;
  211. return i;
  212. }
  213. static int
  214. downHeap(int i, Lump *b)
  215. {
  216. Lump *bb;
  217. u32int now;
  218. int k;
  219. Cache *c;
  220. c = b->c;
  221. now = c->now;
  222. for(; ; i = k){
  223. k = (i << 1) + 1;
  224. if(k >= c->nheap)
  225. break;
  226. if(k + 1 < c->nheap && c->heap[k]->used2 - now > c->heap[k + 1]->used2 - now)
  227. k++;
  228. bb = c->heap[k];
  229. if(b->used2 - now <= bb->used2 - now)
  230. break;
  231. c->heap[i] = bb;
  232. bb->heap = i;
  233. }
  234. c->heap[i] = b;
  235. b->heap = i;
  236. return i;
  237. }
  238. /* called with c->lk held */
  239. Lump *
  240. cacheBumpLump(Cache *c)
  241. {
  242. Lump *b;
  243. /*
  244. * missed: locate the block with the oldest second to last use.
  245. * remove it from the heap, and fix up the heap.
  246. */
  247. if(c->free) {
  248. b = c->free;
  249. c->free = b->next;
  250. } else {
  251. for(;;){
  252. if(c->nheap == 0) {
  253. cacheCheck(c);
  254. assert(0);
  255. return nil;
  256. }
  257. b = c->heap[0];
  258. delHeap(b);
  259. if(b->ref == 0)
  260. break;
  261. }
  262. /*
  263. * unchain the block from hash chain
  264. */
  265. if(b->prev == nil)
  266. c->heads[hash(c, b->score, b->type)] = b->next;
  267. else
  268. b->prev->next = b->next;
  269. if(b->next != nil)
  270. b->next->prev = b->prev;
  271. }
  272. /*
  273. * the new block has no last use, so assume it happens sometime in the middle
  274. */
  275. b->used = (b->used2 + c->now) / 2;
  276. b->asize = 0;
  277. return b;
  278. }
  279. Lump *
  280. cacheAllocLump(Cache *c, int type, int size, int dir)
  281. {
  282. Lump *b;
  283. ulong h;
  284. assert(size <= c->size);
  285. again:
  286. vtLock(c->lk);
  287. b = cacheBumpLump(c);
  288. if(b == nil) {
  289. vtUnlock(c->lk);
  290. fprint(2, "cache is full\n");
  291. /* XXX should be better */
  292. sleep(100);
  293. goto again;
  294. }
  295. vtLock(b->lk);
  296. assert(b->ref == 0);
  297. b->ref++;
  298. b->used2 = b->used;
  299. b->used = c->now++;
  300. /* convert addr into score */
  301. memset(b->score, 0, VtScoreSize-4);
  302. b->score[VtScoreSize-4] = b->addr>>24;
  303. b->score[VtScoreSize-3] = b->addr>>16;
  304. b->score[VtScoreSize-2] = b->addr>>8;
  305. b->score[VtScoreSize-1] = b->addr;
  306. b->dir = dir;
  307. b->type = type;
  308. b->gen = 0;
  309. b->asize = size;
  310. b->state = LumpFree;
  311. h = hash(c, b->score, b->type);
  312. /* chain onto correct hash */
  313. b->next = c->heads[h];
  314. c->heads[h] = b;
  315. if(b->next != nil)
  316. b->next->prev = b;
  317. b->prev = nil;
  318. vtUnlock(c->lk);
  319. vtZeroExtend(type, b->data, 0, size);
  320. lumpSetState(b, LumpActive);
  321. return b;
  322. }
  323. int
  324. scoreIsLocal(uchar score[VtScoreSize])
  325. {
  326. static uchar zero[VtScoreSize];
  327. return memcmp(score, zero, VtScoreSize-4) == 0;
  328. }
  329. Lump *
  330. cacheGetLump(Cache *c, uchar score[VtScoreSize], int type, int size)
  331. {
  332. Lump *b;
  333. ulong h;
  334. int n;
  335. static uchar zero[VtScoreSize];
  336. assert(size <= c->size);
  337. h = hash(c, score, type);
  338. again:
  339. /*
  340. * look for the block in the cache
  341. */
  342. vtLock(c->lk);
  343. for(b = c->heads[h]; b != nil; b = b->next){
  344. if(memcmp(b->score, score, VtScoreSize) == 0 && b->type == type)
  345. goto found;
  346. }
  347. /* should not be looking for a temp block */
  348. if(scoreIsLocal(score)) {
  349. if(memcmp(score, zero, VtScoreSize) == 0)
  350. vtSetError("looking for zero score");
  351. else
  352. vtSetError("missing local block");
  353. vtUnlock(c->lk);
  354. return nil;
  355. }
  356. b = cacheBumpLump(c);
  357. if(b == nil) {
  358. vtUnlock(c->lk);
  359. sleep(100);
  360. goto again;
  361. }
  362. /* chain onto correct hash */
  363. b->next = c->heads[h];
  364. c->heads[h] = b;
  365. if(b->next != nil)
  366. b->next->prev = b;
  367. b->prev = nil;
  368. memmove(b->score, score, VtScoreSize);
  369. b->type = type;
  370. b->state = LumpFree;
  371. found:
  372. b->ref++;
  373. b->used2 = b->used;
  374. b->used = c->now++;
  375. if(b->heap != BadHeap)
  376. fixHeap(b->heap, b);
  377. vtUnlock(c->lk);
  378. vtLock(b->lk);
  379. if(b->state != LumpFree)
  380. return b;
  381. n = vtRead(c->z, score, type, b->data, size);
  382. if(n < 0) {
  383. lumpDecRef(b, 1);
  384. return nil;
  385. }
  386. if(!vtSha1Check(score, b->data, n)) {
  387. vtSetError("vtSha1Check failed");
  388. lumpDecRef(b, 1);
  389. return nil;
  390. }
  391. vtZeroExtend(type, b->data, n, size);
  392. b->asize = size;
  393. lumpSetState(b, LumpVenti);
  394. return b;
  395. }
  396. static char *
  397. lumpState(int state)
  398. {
  399. switch(state) {
  400. default:
  401. return "Unknown!!";
  402. case LumpFree:
  403. return "Free";
  404. case LumpActive:
  405. return "Active";
  406. case LumpSnap:
  407. return "Snap";
  408. case LumpZombie:
  409. return "Zombie";
  410. case LumpVenti:
  411. return "Venti";
  412. }
  413. }
  414. static void
  415. lumpSetState(Lump *u, int state)
  416. {
  417. // if(u->state != LumpFree)
  418. // fprint(2, "%V: %s -> %s\n", u->score, lumpState(u->state), lumpState(state));
  419. u->state = state;
  420. }
  421. int
  422. lumpGetScore(Lump *u, int offset, uchar score[VtScoreSize])
  423. {
  424. uchar *sp;
  425. VtRoot root;
  426. VtEntry dir;
  427. vtLock(u->lk);
  428. switch(u->type) {
  429. default:
  430. vtSetError("bad type");
  431. goto Err;
  432. case VtPointerType0:
  433. case VtPointerType1:
  434. case VtPointerType2:
  435. case VtPointerType3:
  436. case VtPointerType4:
  437. case VtPointerType5:
  438. case VtPointerType6:
  439. if((offset+1)*VtScoreSize > u->asize)
  440. sp = nil;
  441. else
  442. sp = u->data + offset*VtScoreSize;
  443. break;
  444. case VtRootType:
  445. if(u->asize < VtRootSize) {
  446. vtSetError("runt root block");
  447. goto Err;
  448. }
  449. if(!vtRootUnpack(&root, u->data))
  450. goto Err;
  451. sp = root.score;
  452. break;
  453. case VtDirType:
  454. if((offset+1)*VtEntrySize > u->asize) {
  455. vtSetError(ENoDir);
  456. goto Err;
  457. }
  458. if(!vtEntryUnpack(&dir, u->data, offset))
  459. goto Err;
  460. if(!dir.flags & VtEntryActive) {
  461. vtSetError(ENoDir);
  462. goto Err;
  463. }
  464. sp = dir.score;
  465. break;
  466. }
  467. if(sp == nil)
  468. memmove(score, vtZeroScore, VtScoreSize);
  469. else
  470. memmove(score, sp, VtScoreSize);
  471. vtUnlock(u->lk);
  472. return !scoreIsLocal(score);
  473. Err:
  474. vtUnlock(u->lk);
  475. return 0;
  476. }
  477. Lump *
  478. lumpWalk(Lump *u, int offset, int type, int size, int readOnly, int lock)
  479. {
  480. Lump *v, *vv;
  481. Cache *c;
  482. uchar score[VtScoreSize], *sp;
  483. VtRoot root;
  484. VtEntry dir;
  485. int split, isdir;
  486. c = u->c;
  487. vtLock(u->lk);
  488. Again:
  489. v = nil;
  490. vv = nil;
  491. isdir = u->dir;
  492. switch(u->type) {
  493. default:
  494. vtSetError("bad type");
  495. goto Err;
  496. case VtPointerType0:
  497. case VtPointerType1:
  498. case VtPointerType2:
  499. case VtPointerType3:
  500. case VtPointerType4:
  501. case VtPointerType5:
  502. case VtPointerType6:
  503. if((offset+1)*VtScoreSize > u->asize)
  504. sp = nil;
  505. else
  506. sp = u->data + offset*VtScoreSize;
  507. break;
  508. case VtRootType:
  509. if(u->asize < VtRootSize) {
  510. vtSetError("runt root block");
  511. goto Err;
  512. }
  513. if(!vtRootUnpack(&root, u->data))
  514. goto Err;
  515. sp = root.score;
  516. break;
  517. case VtDirType:
  518. if((offset+1)*VtEntrySize > u->asize) {
  519. vtSetError(ENoDir);
  520. goto Err;
  521. }
  522. if(!vtEntryUnpack(&dir, u->data, offset))
  523. goto Err;
  524. if(!(dir.flags & VtEntryActive)) {
  525. vtSetError(ENoDir);
  526. goto Err;
  527. }
  528. isdir = (dir.flags & VtEntryDir) != 0;
  529. // sp = dir.score;
  530. sp = u->data + offset*VtEntrySize + 20;
  531. break;
  532. }
  533. if(sp == nil)
  534. memmove(score, vtZeroScore, VtScoreSize);
  535. else
  536. memmove(score, sp, VtScoreSize);
  537. vtUnlock(u->lk);
  538. if(0)fprint(2, "lumpWalk: %V:%s %d:%d-> %V:%d\n", u->score, lumpState(u->state), u->type, offset, score, type);
  539. v = cacheGetLump(c, score, type, size);
  540. if(v == nil)
  541. return nil;
  542. split = 1;
  543. if(readOnly)
  544. split = 0;
  545. switch(v->state) {
  546. default:
  547. assert(0);
  548. case LumpFree:
  549. fprint(2, "block is free %V!\n", v->score);
  550. vtSetError("phase error");
  551. goto Err2;
  552. case LumpActive:
  553. if(v->gen < u->gen) {
  554. print("LumpActive gen\n");
  555. lumpSetState(v, LumpSnap);
  556. v->gen = u->gen;
  557. } else
  558. split = 0;
  559. break;
  560. case LumpSnap:
  561. case LumpVenti:
  562. break;
  563. }
  564. /* easy case */
  565. if(!split) {
  566. if(!lock)
  567. vtUnlock(v->lk);
  568. return v;
  569. }
  570. if(sp == nil) {
  571. vtSetError("bad offset");
  572. goto Err2;
  573. }
  574. vv = cacheAllocLump(c, v->type, size, isdir);
  575. /* vv is locked */
  576. vv->gen = u->gen;
  577. memmove(vv->data, v->data, v->asize);
  578. if(0)fprint(2, "split %V into %V\n", v->score, vv->score);
  579. lumpDecRef(v, 1);
  580. v = nil;
  581. vtLock(u->lk);
  582. if(u->state != LumpActive) {
  583. vtSetError("bad parent state: can not happen");
  584. goto Err;
  585. }
  586. /* check that nothing changed underfoot */
  587. if(memcmp(sp, score, VtScoreSize) != 0) {
  588. lumpDecRef(vv, 1);
  589. fprint(2, "lumpWalk: parent changed under foot\n");
  590. goto Again;
  591. }
  592. /* XXX - hold Active blocks up - will go eventually */
  593. lumpIncRef(vv);
  594. /* change the parent */
  595. memmove(sp, vv->score, VtScoreSize);
  596. vtUnlock(u->lk);
  597. if(!lock)
  598. vtUnlock(vv->lk);
  599. return vv;
  600. Err:
  601. vtUnlock(u->lk);
  602. lumpDecRef(v, 0);
  603. lumpDecRef(vv, 1);
  604. return nil;
  605. Err2:
  606. lumpDecRef(v, 1);
  607. return nil;
  608. }
  609. void
  610. lumpFreeEntry(Lump *u, int entry)
  611. {
  612. uchar score[VtScoreSize];
  613. int type;
  614. ulong gen;
  615. VtEntry dir;
  616. Cache *c;
  617. c = u->c;
  618. vtLock(u->lk);
  619. if(u->state == LumpVenti)
  620. goto Exit;
  621. switch(u->type) {
  622. default:
  623. fprint(2, "freeing bad lump type: %d\n", u->type);
  624. return;
  625. case VtPointerType0:
  626. if((entry+1)*VtScoreSize > u->asize)
  627. goto Exit;
  628. memmove(score, u->data + entry*VtScoreSize, VtScoreSize);
  629. memmove(u->data + entry*VtScoreSize, vtZeroScore, VtScoreSize);
  630. type = u->dir?VtDirType:VtDataType;
  631. break;
  632. case VtPointerType1:
  633. case VtPointerType2:
  634. case VtPointerType3:
  635. case VtPointerType4:
  636. case VtPointerType5:
  637. case VtPointerType6:
  638. if((entry+1)*VtScoreSize > u->asize)
  639. goto Exit;
  640. memmove(score, u->data + entry*VtScoreSize, VtScoreSize);
  641. memmove(u->data + entry*VtScoreSize, vtZeroScore, VtScoreSize);
  642. type = u->type-1;
  643. break;
  644. case VtDirType:
  645. if((entry+1)*VtEntrySize > u->asize)
  646. goto Exit;
  647. if(!vtEntryUnpack(&dir, u->data, entry))
  648. goto Exit;
  649. if(!dir.flags & VtEntryActive)
  650. goto Exit;
  651. gen = dir.gen;
  652. if(gen != ~0)
  653. gen++;
  654. if(dir.depth == 0)
  655. type = (dir.flags&VtEntryDir)?VtDirType:VtDataType;
  656. else
  657. type = VtPointerType0 + dir.depth - 1;
  658. memmove(score, dir.score, VtScoreSize);
  659. memset(&dir, 0, sizeof(dir));
  660. dir.gen = gen;
  661. vtEntryPack(&dir, u->data, entry);
  662. break;
  663. case VtDataType:
  664. type = VtErrType;
  665. break;
  666. }
  667. vtUnlock(u->lk);
  668. if(type == VtErrType || !scoreIsLocal(score))
  669. return;
  670. u = cacheGetLump(c, score, type, c->size);
  671. if(u == nil)
  672. return;
  673. lumpDecRef(u, 1);
  674. /* XXX remove extra reference */
  675. lumpDecRef(u, 0);
  676. return;
  677. Exit:
  678. vtUnlock(u->lk);
  679. return;
  680. }
  681. void
  682. lumpCleanup(Lump *u)
  683. {
  684. int i, n;
  685. switch(u->type) {
  686. default:
  687. return;
  688. case VtPointerType0:
  689. case VtPointerType1:
  690. case VtPointerType2:
  691. case VtPointerType3:
  692. case VtPointerType4:
  693. case VtPointerType5:
  694. case VtPointerType6:
  695. n = u->asize/VtScoreSize;
  696. break;
  697. case VtDirType:
  698. n = u->asize/VtEntrySize;
  699. break;
  700. }
  701. for(i=0; i<n; i++)
  702. lumpFreeEntry(u, i);
  703. }
  704. void
  705. lumpDecRef(Lump *b, int unlock)
  706. {
  707. int i;
  708. Cache *c;
  709. if(b == nil)
  710. return;
  711. if(unlock)
  712. vtUnlock(b->lk);
  713. c = b->c;
  714. vtLock(c->lk);
  715. if(--b->ref > 0) {
  716. vtUnlock(c->lk);
  717. return;
  718. }
  719. assert(b->ref == 0);
  720. switch(b->state) {
  721. default:
  722. fprint(2, "bad state: %s\n", lumpState(b->state));
  723. assert(0);
  724. case LumpActive:
  725. /* hack - but will do for now */
  726. b->ref++;
  727. vtUnlock(c->lk);
  728. lumpCleanup(b);
  729. vtLock(c->lk);
  730. b->ref--;
  731. lumpSetState(b, LumpFree);
  732. break;
  733. case LumpZombie:
  734. lumpSetState(b, LumpFree);
  735. break;
  736. case LumpFree:
  737. case LumpVenti:
  738. break;
  739. }
  740. /*
  741. * reinsert in the free heap
  742. */
  743. if(b->heap == BadHeap) {
  744. i = upHeap(c->nheap++, b);
  745. c->heap[i] = b;
  746. b->heap = i;
  747. }
  748. vtUnlock(c->lk);
  749. }
  750. Lump *
  751. lumpIncRef(Lump *b)
  752. {
  753. Cache *c;
  754. c = b->c;
  755. vtLock(c->lk);
  756. assert(b->ref > 0);
  757. b->ref++;
  758. vtUnlock(c->lk);
  759. return b;
  760. }