cache.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878
  1. #include "stdinc.h"
  2. #include "vac.h"
  3. #include "dat.h"
  4. #include "fns.h"
  5. typedef struct Label Label;
  6. enum {
  7. BadHeap = ~0,
  8. };
  9. /*
  10. * the plan is to store data to the cache in c->size blocks
  11. * with the block zero extended to fill it out. When writing to
  12. * venti, the block will be zero truncated. The walker will also check
  13. * that the block fits within psize or dsize as the case may be.
  14. */
  15. struct Cache
  16. {
  17. VtLock *lk;
  18. VtSession *z;
  19. u32int now; /* ticks for usage timestamps */
  20. int size; /* max. size of any block; allocated to each block */
  21. Lump **heads; /* hash table for finding address */
  22. int nheap; /* number of available victims */
  23. Lump **heap; /* heap for locating victims */
  24. long nblocks; /* number of blocks allocated */
  25. Lump *blocks; /* array of block descriptors */
  26. u8int *mem; /* memory for all block descriptors */
  27. Lump *free; /* free list of lumps */
  28. long hashSize;
  29. };
  30. /*
  31. * the tag for a block is hash(index, parent tag)
  32. */
  33. struct Label {
  34. uchar gen[4];
  35. uchar state;
  36. uchar type; /* top bit indicates it is part of a directory */
  37. uchar tag[4]; /* tag of file it is in */
  38. };
  39. static char ENoDir[] = "directory entry is not allocated";
  40. static void fixHeap(int si, Lump *b);
  41. static int upHeap(int i, Lump *b);
  42. static int downHeap(int i, Lump *b);
  43. static char *lumpState(int);
  44. static void lumpSetState(Lump *u, int state);
  45. Cache *
  46. cacheAlloc(VtSession *z, int blockSize, long nblocks)
  47. {
  48. int i;
  49. Cache *c;
  50. Lump *b;
  51. c = vtMemAllocZ(sizeof(Cache));
  52. c->lk = vtLockAlloc();
  53. c->z = z;
  54. c->size = blockSize;
  55. c->nblocks = nblocks;
  56. c->hashSize = nblocks;
  57. c->heads = vtMemAllocZ(c->hashSize*sizeof(Lump*));
  58. c->heap = vtMemAllocZ(nblocks*sizeof(Lump*));
  59. c->blocks = vtMemAllocZ(nblocks*sizeof(Lump));
  60. c->mem = vtMemAllocZ(nblocks * blockSize);
  61. for(i = 0; i < nblocks; i++){
  62. b = &c->blocks[i];
  63. b->lk = vtLockAlloc();
  64. b->c = c;
  65. b->data = &c->mem[i * blockSize];
  66. b->addr = i+1;
  67. b->state = LumpFree;
  68. b->heap = BadHeap;
  69. b->next = c->free;
  70. c->free = b;
  71. }
  72. c->nheap = 0;
  73. return c;
  74. }
  75. long
  76. cacheGetSize(Cache *c)
  77. {
  78. return c->nblocks;
  79. }
  80. int
  81. cacheGetBlockSize(Cache *c)
  82. {
  83. return c->size;
  84. }
  85. int
  86. cacheSetSize(Cache *c, long nblocks)
  87. {
  88. USED(c);
  89. USED(nblocks);
  90. return 0;
  91. }
  92. void
  93. cacheFree(Cache *c)
  94. {
  95. int i;
  96. for(i = 0; i < c->nblocks; i++){
  97. assert(c->blocks[i].ref == 0);
  98. vtLockFree(c->blocks[i].lk);
  99. }
  100. vtMemFree(c->heads);
  101. vtMemFree(c->blocks);
  102. vtMemFree(c->mem);
  103. vtMemFree(c);
  104. }
  105. static u32int
  106. hash(Cache *c, uchar score[VtScoreSize], int type)
  107. {
  108. u32int h;
  109. uchar *p = score + VtScoreSize-4;
  110. h = (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
  111. h += type;
  112. return h % c->hashSize;
  113. }
  114. static void
  115. findLump(Cache *c, Lump *bb)
  116. {
  117. Lump *b, *last;
  118. int h;
  119. last = nil;
  120. h = hash(c, bb->score, bb->type);
  121. for(b = c->heads[h]; b != nil; b = b->next){
  122. if(last != b->prev)
  123. vtFatal("bad prev link");
  124. if(b == bb)
  125. return;
  126. last = b;
  127. }
  128. vtFatal("block missing from hash table");
  129. }
  130. void
  131. cacheCheck(Cache *c)
  132. {
  133. u32int size, now;
  134. int i, k, refed, free;
  135. static uchar zero[VtScoreSize];
  136. Lump *p;
  137. size = c->size;
  138. now = c->now;
  139. free = 0;
  140. for(p=c->free; p; p=p->next)
  141. free++;
  142. for(i = 0; i < c->nheap; i++){
  143. if(c->heap[i]->heap != i)
  144. vtFatal("mis-heaped at %d: %d", i, c->heap[i]->heap);
  145. if(i > 0 && c->heap[(i - 1) >> 1]->used2 - now > c->heap[i]->used2 - now)
  146. vtFatal("bad heap ordering");
  147. k = (i << 1) + 1;
  148. if(k < c->nheap && c->heap[i]->used2 - now > c->heap[k]->used2 - now)
  149. vtFatal("bad heap ordering");
  150. k++;
  151. if(k < c->nheap && c->heap[i]->used2 - now > c->heap[k]->used2 - now)
  152. vtFatal("bad heap ordering");
  153. }
  154. refed = 0;
  155. for(i = 0; i < c->nblocks; i++){
  156. if(c->blocks[i].data != &c->mem[i * size])
  157. vtFatal("mis-blocked at %d", i);
  158. if(c->blocks[i].ref && c->blocks[i].heap == BadHeap){
  159. refed++;
  160. }
  161. if(memcmp(zero, c->blocks[i].score, VtScoreSize))
  162. findLump(c, &c->blocks[i]);
  163. }
  164. if(refed > 0)fprint(2, "cacheCheck: nheap %d refed %d free %d\n", c->nheap, refed, free);
  165. assert(c->nheap + refed + free == c->nblocks);
  166. refed = 0;
  167. for(i = 0; i < c->nblocks; i++){
  168. if(c->blocks[i].ref) {
  169. if(1)fprint(2, "%d %V %d %s\n", c->blocks[i].type, c->blocks[i].score, c->blocks[i].ref, lumpState(c->blocks[i].state));
  170. refed++;
  171. }
  172. }
  173. if(refed > 0)fprint(2, "cacheCheck: in used %d\n", refed);
  174. }
  175. /*
  176. * delete an arbitrary block from the heap
  177. */
  178. static void
  179. delHeap(Lump *db)
  180. {
  181. fixHeap(db->heap, db->c->heap[--db->c->nheap]);
  182. db->heap = BadHeap;
  183. }
  184. static void
  185. fixHeap(int si, Lump *b)
  186. {
  187. int i;
  188. i = upHeap(si, b);
  189. if(i == si)
  190. downHeap(i, b);
  191. }
  192. static int
  193. upHeap(int i, Lump *b)
  194. {
  195. Lump *bb;
  196. u32int now;
  197. int p;
  198. Cache *c;
  199. c = b->c;
  200. now = c->now;
  201. for(; i != 0; i = p){
  202. p = (i - 1) >> 1;
  203. bb = c->heap[p];
  204. if(b->used2 - now >= bb->used2 - now)
  205. break;
  206. c->heap[i] = bb;
  207. bb->heap = i;
  208. }
  209. c->heap[i] = b;
  210. b->heap = i;
  211. return i;
  212. }
  213. static int
  214. downHeap(int i, Lump *b)
  215. {
  216. Lump *bb;
  217. u32int now;
  218. int k;
  219. Cache *c;
  220. c = b->c;
  221. now = c->now;
  222. for(; ; i = k){
  223. k = (i << 1) + 1;
  224. if(k >= c->nheap)
  225. break;
  226. if(k + 1 < c->nheap && c->heap[k]->used2 - now > c->heap[k + 1]->used2 - now)
  227. k++;
  228. bb = c->heap[k];
  229. if(b->used2 - now <= bb->used2 - now)
  230. break;
  231. c->heap[i] = bb;
  232. bb->heap = i;
  233. }
  234. c->heap[i] = b;
  235. b->heap = i;
  236. return i;
  237. }
  238. /* called with c->lk held */
  239. Lump *
  240. cacheBumpLump(Cache *c)
  241. {
  242. Lump *b;
  243. /*
  244. * missed: locate the block with the oldest second to last use.
  245. * remove it from the heap, and fix up the heap.
  246. */
  247. if(c->free) {
  248. b = c->free;
  249. c->free = b->next;
  250. } else {
  251. for(;;){
  252. if(c->nheap == 0) {
  253. cacheCheck(c);
  254. assert(0);
  255. return nil;
  256. }
  257. b = c->heap[0];
  258. delHeap(b);
  259. if(b->ref == 0)
  260. break;
  261. }
  262. /*
  263. * unchain the block from hash chain
  264. */
  265. if(b->prev == nil)
  266. c->heads[hash(c, b->score, b->type)] = b->next;
  267. else
  268. b->prev->next = b->next;
  269. if(b->next != nil)
  270. b->next->prev = b->prev;
  271. }
  272. /*
  273. * the new block has no last use, so assume it happens sometime in the middle
  274. */
  275. b->used = (b->used2 + c->now) / 2;
  276. b->asize = 0;
  277. return b;
  278. }
  279. Lump *
  280. cacheAllocLump(Cache *c, int type, int size, int dir)
  281. {
  282. Lump *b;
  283. ulong h;
  284. assert(size <= c->size);
  285. again:
  286. vtLock(c->lk);
  287. b = cacheBumpLump(c);
  288. if(b == nil) {
  289. vtUnlock(c->lk);
  290. fprint(2, "cache is full\n");
  291. /* XXX should be better */
  292. sleep(100);
  293. goto again;
  294. }
  295. vtLock(b->lk);
  296. assert(b->ref == 0);
  297. b->ref++;
  298. b->used2 = b->used;
  299. b->used = c->now++;
  300. /* convert addr into score */
  301. memset(b->score, 0, VtScoreSize-4);
  302. b->score[VtScoreSize-4] = b->addr>>24;
  303. b->score[VtScoreSize-3] = b->addr>>16;
  304. b->score[VtScoreSize-2] = b->addr>>8;
  305. b->score[VtScoreSize-1] = b->addr;
  306. b->dir = dir;
  307. b->type = type;
  308. b->gen = 0;
  309. b->asize = size;
  310. b->state = LumpFree;
  311. h = hash(c, b->score, b->type);
  312. /* chain onto correct hash */
  313. b->next = c->heads[h];
  314. c->heads[h] = b;
  315. if(b->next != nil)
  316. b->next->prev = b;
  317. b->prev = nil;
  318. vtUnlock(c->lk);
  319. vtZeroExtend(type, b->data, 0, size);
  320. lumpSetState(b, LumpActive);
  321. return b;
  322. }
  323. int
  324. scoreIsLocal(uchar score[VtScoreSize])
  325. {
  326. static uchar zero[VtScoreSize];
  327. return memcmp(score, zero, VtScoreSize-4) == 0;
  328. }
  329. Lump *
  330. cacheGetLump(Cache *c, uchar score[VtScoreSize], int type, int size)
  331. {
  332. Lump *b;
  333. ulong h;
  334. int n;
  335. static uchar zero[VtScoreSize];
  336. assert(size <= c->size);
  337. h = hash(c, score, type);
  338. again:
  339. /*
  340. * look for the block in the cache
  341. */
  342. vtLock(c->lk);
  343. for(b = c->heads[h]; b != nil; b = b->next){
  344. if(memcmp(b->score, score, VtScoreSize) == 0 && b->type == type)
  345. goto found;
  346. }
  347. /* should not be looking for a temp block */
  348. if(scoreIsLocal(score)) {
  349. if(memcmp(score, zero, VtScoreSize) == 0)
  350. vtSetError("looking for zero score");
  351. else
  352. vtSetError("missing local block");
  353. vtUnlock(c->lk);
  354. return nil;
  355. }
  356. b = cacheBumpLump(c);
  357. if(b == nil) {
  358. vtUnlock(c->lk);
  359. sleep(100);
  360. goto again;
  361. }
  362. /* chain onto correct hash */
  363. b->next = c->heads[h];
  364. c->heads[h] = b;
  365. if(b->next != nil)
  366. b->next->prev = b;
  367. b->prev = nil;
  368. memmove(b->score, score, VtScoreSize);
  369. b->type = type;
  370. b->state = LumpFree;
  371. found:
  372. b->ref++;
  373. b->used2 = b->used;
  374. b->used = c->now++;
  375. if(b->heap != BadHeap)
  376. fixHeap(b->heap, b);
  377. vtUnlock(c->lk);
  378. vtLock(b->lk);
  379. if(b->state != LumpFree)
  380. return b;
  381. n = vtRead(c->z, score, type, b->data, size);
  382. if(n < 0) {
  383. fprint(2, "vtRead failed: %V %d %d: %R\n", score, type, size);
  384. abort();
  385. lumpDecRef(b, 1);
  386. return nil;
  387. }
  388. if(!vtSha1Check(score, b->data, n)) {
  389. vtSetError("vtSha1Check failed");
  390. lumpDecRef(b, 1);
  391. return nil;
  392. }
  393. vtZeroExtend(type, b->data, n, size);
  394. b->asize = size;
  395. lumpSetState(b, LumpVenti);
  396. return b;
  397. }
  398. static char *
  399. lumpState(int state)
  400. {
  401. switch(state) {
  402. default:
  403. return "Unknown!!";
  404. case LumpFree:
  405. return "Free";
  406. case LumpActive:
  407. return "Active";
  408. case LumpSnap:
  409. return "Snap";
  410. case LumpZombie:
  411. return "Zombie";
  412. case LumpVenti:
  413. return "Venti";
  414. }
  415. }
  416. static void
  417. lumpSetState(Lump *u, int state)
  418. {
  419. // if(u->state != LumpFree)
  420. // fprint(2, "%V: %s -> %s\n", u->score, lumpState(u->state), lumpState(state));
  421. u->state = state;
  422. }
  423. int
  424. lumpGetScore(Lump *u, int offset, uchar score[VtScoreSize])
  425. {
  426. uchar *sp;
  427. VtRoot root;
  428. VtEntry dir;
  429. vtLock(u->lk);
  430. switch(u->type) {
  431. default:
  432. vtSetError("bad type");
  433. goto Err;
  434. case VtPointerType0:
  435. case VtPointerType1:
  436. case VtPointerType2:
  437. case VtPointerType3:
  438. case VtPointerType4:
  439. case VtPointerType5:
  440. case VtPointerType6:
  441. if((offset+1)*VtScoreSize > u->asize)
  442. sp = nil;
  443. else
  444. sp = u->data + offset*VtScoreSize;
  445. break;
  446. case VtRootType:
  447. if(u->asize < VtRootSize) {
  448. vtSetError("runt root block");
  449. goto Err;
  450. }
  451. if(!vtRootUnpack(&root, u->data))
  452. goto Err;
  453. sp = root.score;
  454. break;
  455. case VtDirType:
  456. if((offset+1)*VtEntrySize > u->asize) {
  457. vtSetError(ENoDir);
  458. goto Err;
  459. }
  460. if(!vtEntryUnpack(&dir, u->data, offset))
  461. goto Err;
  462. if(!dir.flags & VtEntryActive) {
  463. vtSetError(ENoDir);
  464. goto Err;
  465. }
  466. sp = dir.score;
  467. break;
  468. }
  469. if(sp == nil)
  470. memmove(score, vtZeroScore, VtScoreSize);
  471. else
  472. memmove(score, sp, VtScoreSize);
  473. vtUnlock(u->lk);
  474. return !scoreIsLocal(score);
  475. Err:
  476. vtUnlock(u->lk);
  477. return 0;
  478. }
  479. Lump *
  480. lumpWalk(Lump *u, int offset, int type, int size, int readOnly, int lock)
  481. {
  482. Lump *v, *vv;
  483. Cache *c;
  484. uchar score[VtScoreSize], *sp;
  485. VtRoot root;
  486. VtEntry dir;
  487. int split, isdir;
  488. c = u->c;
  489. vtLock(u->lk);
  490. Again:
  491. v = nil;
  492. vv = nil;
  493. isdir = u->dir;
  494. switch(u->type) {
  495. default:
  496. vtSetError("bad type");
  497. goto Err;
  498. case VtPointerType0:
  499. case VtPointerType1:
  500. case VtPointerType2:
  501. case VtPointerType3:
  502. case VtPointerType4:
  503. case VtPointerType5:
  504. case VtPointerType6:
  505. if((offset+1)*VtScoreSize > u->asize)
  506. sp = nil;
  507. else
  508. sp = u->data + offset*VtScoreSize;
  509. break;
  510. case VtRootType:
  511. if(u->asize < VtRootSize) {
  512. vtSetError("runt root block");
  513. goto Err;
  514. }
  515. if(!vtRootUnpack(&root, u->data))
  516. goto Err;
  517. sp = root.score;
  518. break;
  519. case VtDirType:
  520. if((offset+1)*VtEntrySize > u->asize) {
  521. vtSetError(ENoDir);
  522. goto Err;
  523. }
  524. if(!vtEntryUnpack(&dir, u->data, offset))
  525. goto Err;
  526. if(!(dir.flags & VtEntryActive)) {
  527. vtSetError(ENoDir);
  528. goto Err;
  529. }
  530. isdir = (dir.flags & VtEntryDir) != 0;
  531. // sp = dir.score;
  532. sp = u->data + offset*VtEntrySize + 20;
  533. break;
  534. }
  535. if(sp == nil)
  536. memmove(score, vtZeroScore, VtScoreSize);
  537. else
  538. memmove(score, sp, VtScoreSize);
  539. vtUnlock(u->lk);
  540. if(0)fprint(2, "lumpWalk: %V:%s %d:%d-> %V:%d\n", u->score, lumpState(u->state), u->type, offset, score, type);
  541. v = cacheGetLump(c, score, type, size);
  542. if(v == nil)
  543. return nil;
  544. split = 1;
  545. if(readOnly)
  546. split = 0;
  547. switch(v->state) {
  548. default:
  549. assert(0);
  550. case LumpFree:
  551. fprint(2, "block is free %V!\n", v->score);
  552. vtSetError("phase error");
  553. goto Err2;
  554. case LumpActive:
  555. if(v->gen < u->gen) {
  556. print("LumpActive gen\n");
  557. lumpSetState(v, LumpSnap);
  558. v->gen = u->gen;
  559. } else
  560. split = 0;
  561. break;
  562. case LumpSnap:
  563. case LumpVenti:
  564. break;
  565. }
  566. /* easy case */
  567. if(!split) {
  568. if(!lock)
  569. vtUnlock(v->lk);
  570. return v;
  571. }
  572. if(sp == nil) {
  573. vtSetError("bad offset");
  574. goto Err2;
  575. }
  576. vv = cacheAllocLump(c, v->type, size, isdir);
  577. /* vv is locked */
  578. vv->gen = u->gen;
  579. memmove(vv->data, v->data, v->asize);
  580. if(0)fprint(2, "split %V into %V\n", v->score, vv->score);
  581. lumpDecRef(v, 1);
  582. v = nil;
  583. vtLock(u->lk);
  584. if(u->state != LumpActive) {
  585. vtSetError("bad parent state: can not happen");
  586. goto Err;
  587. }
  588. /* check that nothing changed underfoot */
  589. if(memcmp(sp, score, VtScoreSize) != 0) {
  590. lumpDecRef(vv, 1);
  591. fprint(2, "lumpWalk: parent changed under foot\n");
  592. goto Again;
  593. }
  594. /* XXX - hold Active blocks up - will go eventually */
  595. lumpIncRef(vv);
  596. /* change the parent */
  597. memmove(sp, vv->score, VtScoreSize);
  598. vtUnlock(u->lk);
  599. if(!lock)
  600. vtUnlock(vv->lk);
  601. return vv;
  602. Err:
  603. vtUnlock(u->lk);
  604. lumpDecRef(v, 0);
  605. lumpDecRef(vv, 1);
  606. return nil;
  607. Err2:
  608. lumpDecRef(v, 1);
  609. return nil;
  610. }
  611. void
  612. lumpFreeEntry(Lump *u, int entry)
  613. {
  614. uchar score[VtScoreSize];
  615. int type;
  616. ulong gen;
  617. VtEntry dir;
  618. Cache *c;
  619. c = u->c;
  620. vtLock(u->lk);
  621. if(u->state == LumpVenti)
  622. goto Exit;
  623. switch(u->type) {
  624. default:
  625. fprint(2, "freeing bad lump type: %d\n", u->type);
  626. return;
  627. case VtPointerType0:
  628. if((entry+1)*VtScoreSize > u->asize)
  629. goto Exit;
  630. memmove(score, u->data + entry*VtScoreSize, VtScoreSize);
  631. memmove(u->data + entry*VtScoreSize, vtZeroScore, VtScoreSize);
  632. type = u->dir?VtDirType:VtDataType;
  633. break;
  634. case VtPointerType1:
  635. case VtPointerType2:
  636. case VtPointerType3:
  637. case VtPointerType4:
  638. case VtPointerType5:
  639. case VtPointerType6:
  640. if((entry+1)*VtScoreSize > u->asize)
  641. goto Exit;
  642. memmove(score, u->data + entry*VtScoreSize, VtScoreSize);
  643. memmove(u->data + entry*VtScoreSize, vtZeroScore, VtScoreSize);
  644. type = u->type-1;
  645. break;
  646. case VtDirType:
  647. if((entry+1)*VtEntrySize > u->asize)
  648. goto Exit;
  649. if(!vtEntryUnpack(&dir, u->data, entry))
  650. goto Exit;
  651. if(!dir.flags & VtEntryActive)
  652. goto Exit;
  653. gen = dir.gen;
  654. if(gen != ~0)
  655. gen++;
  656. if(dir.depth == 0)
  657. type = (dir.flags&VtEntryDir)?VtDirType:VtDataType;
  658. else
  659. type = VtPointerType0 + dir.depth - 1;
  660. memmove(score, dir.score, VtScoreSize);
  661. memset(&dir, 0, sizeof(dir));
  662. dir.gen = gen;
  663. vtEntryPack(&dir, u->data, entry);
  664. break;
  665. case VtDataType:
  666. type = VtErrType;
  667. break;
  668. }
  669. vtUnlock(u->lk);
  670. if(type == VtErrType || !scoreIsLocal(score))
  671. return;
  672. u = cacheGetLump(c, score, type, c->size);
  673. if(u == nil)
  674. return;
  675. lumpDecRef(u, 1);
  676. /* XXX remove extra reference */
  677. lumpDecRef(u, 0);
  678. return;
  679. Exit:
  680. vtUnlock(u->lk);
  681. return;
  682. }
  683. void
  684. lumpCleanup(Lump *u)
  685. {
  686. int i, n;
  687. switch(u->type) {
  688. default:
  689. return;
  690. case VtPointerType0:
  691. case VtPointerType1:
  692. case VtPointerType2:
  693. case VtPointerType3:
  694. case VtPointerType4:
  695. case VtPointerType5:
  696. case VtPointerType6:
  697. n = u->asize/VtScoreSize;
  698. break;
  699. case VtDirType:
  700. n = u->asize/VtEntrySize;
  701. break;
  702. }
  703. for(i=0; i<n; i++)
  704. lumpFreeEntry(u, i);
  705. }
  706. void
  707. lumpDecRef(Lump *b, int unlock)
  708. {
  709. int i;
  710. Cache *c;
  711. if(b == nil)
  712. return;
  713. if(unlock)
  714. vtUnlock(b->lk);
  715. c = b->c;
  716. vtLock(c->lk);
  717. if(--b->ref > 0) {
  718. vtUnlock(c->lk);
  719. return;
  720. }
  721. assert(b->ref == 0);
  722. switch(b->state) {
  723. default:
  724. fprint(2, "bad state: %s\n", lumpState(b->state));
  725. assert(0);
  726. case LumpActive:
  727. /* hack - but will do for now */
  728. b->ref++;
  729. vtUnlock(c->lk);
  730. lumpCleanup(b);
  731. vtLock(c->lk);
  732. b->ref--;
  733. lumpSetState(b, LumpFree);
  734. break;
  735. case LumpZombie:
  736. lumpSetState(b, LumpFree);
  737. break;
  738. case LumpFree:
  739. case LumpVenti:
  740. break;
  741. }
  742. /*
  743. * reinsert in the free heap
  744. */
  745. if(b->heap == BadHeap) {
  746. i = upHeap(c->nheap++, b);
  747. c->heap[i] = b;
  748. b->heap = i;
  749. }
  750. vtUnlock(c->lk);
  751. }
  752. Lump *
  753. lumpIncRef(Lump *b)
  754. {
  755. Cache *c;
  756. c = b->c;
  757. vtLock(c->lk);
  758. assert(b->ref > 0);
  759. b->ref++;
  760. vtUnlock(c->lk);
  761. return b;
  762. }