cache.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877
  1. #include "stdinc.h"
  2. #include "vac.h"
  3. #include "dat.h"
  4. #include "fns.h"
  5. typedef struct Label Label;
  6. enum {
  7. BadHeap = ~0,
  8. };
  9. /*
  10. * the plan is to store data to the cache in c->size blocks
  11. * with the block zero extended to fill it out. When writing to
  12. * venti, the block will be zero truncated. The walker will also check
  13. * that the block fits within psize or dsize as the case may be.
  14. */
  15. struct Cache
  16. {
  17. VtLock *lk;
  18. VtSession *z;
  19. u32int now; /* ticks for usage timestamps */
  20. int size; /* max. size of any block; allocated to each block */
  21. Lump **heads; /* hash table for finding address */
  22. int nheap; /* number of available victims */
  23. Lump **heap; /* heap for locating victims */
  24. long nblocks; /* number of blocks allocated */
  25. Lump *blocks; /* array of block descriptors */
  26. u8int *mem; /* memory for all block descriptors */
  27. Lump *free; /* free list of lumps */
  28. long hashSize;
  29. };
  30. /*
  31. * the tag for a block is hash(index, parent tag)
  32. */
  33. struct Label {
  34. uchar gen[4];
  35. uchar state;
  36. uchar type; /* top bit indicates it is part of a directory */
  37. uchar tag[4]; /* tag of file it is in */
  38. };
  39. static char ENoDir[] = "directory entry is not allocated";
  40. static void fixHeap(int si, Lump *b);
  41. static int upHeap(int i, Lump *b);
  42. static int downHeap(int i, Lump *b);
  43. static char *lumpState(int);
  44. static void lumpSetState(Lump *u, int state);
  45. Cache *
  46. cacheAlloc(VtSession *z, int blockSize, long nblocks)
  47. {
  48. int i;
  49. Cache *c;
  50. Lump *b;
  51. c = vtMemAllocZ(sizeof(Cache));
  52. c->lk = vtLockAlloc();
  53. c->z = z;
  54. c->size = blockSize;
  55. c->nblocks = nblocks;
  56. c->hashSize = nblocks;
  57. c->heads = vtMemAllocZ(c->hashSize*sizeof(Lump*));
  58. c->heap = vtMemAllocZ(nblocks*sizeof(Lump*));
  59. c->blocks = vtMemAllocZ(nblocks*sizeof(Lump));
  60. c->mem = vtMemAllocZ(nblocks * blockSize);
  61. for(i = 0; i < nblocks; i++){
  62. b = &c->blocks[i];
  63. b->lk = vtLockAlloc();
  64. b->c = c;
  65. b->data = &c->mem[i * blockSize];
  66. b->addr = i+1;
  67. b->state = LumpFree;
  68. b->heap = BadHeap;
  69. b->next = c->free;
  70. c->free = b;
  71. }
  72. c->nheap = 0;
  73. return c;
  74. }
  75. long
  76. cacheGetSize(Cache *c)
  77. {
  78. return c->nblocks;
  79. }
  80. int
  81. cacheGetBlockSize(Cache *c)
  82. {
  83. return c->size;
  84. }
  85. int
  86. cacheSetSize(Cache *c, long nblocks)
  87. {
  88. USED(c);
  89. USED(nblocks);
  90. return 0;
  91. }
  92. void
  93. cacheFree(Cache *c)
  94. {
  95. int i;
  96. for(i = 0; i < c->nblocks; i++){
  97. assert(c->blocks[i].ref == 0);
  98. vtLockFree(c->blocks[i].lk);
  99. }
  100. vtMemFree(c->heads);
  101. vtMemFree(c->blocks);
  102. vtMemFree(c->mem);
  103. vtMemFree(c);
  104. }
  105. static u32int
  106. hash(Cache *c, uchar score[VtScoreSize], int type)
  107. {
  108. u32int h;
  109. uchar *p = score + VtScoreSize-4;
  110. h = (p[0] << 24) | (p[1] << 16) | (p[2] << 8) | p[3];
  111. h += type;
  112. return h % c->hashSize;
  113. }
  114. static void
  115. findLump(Cache *c, Lump *bb)
  116. {
  117. Lump *b, *last;
  118. int h;
  119. last = nil;
  120. h = hash(c, bb->score, bb->type);
  121. for(b = c->heads[h]; b != nil; b = b->next){
  122. if(last != b->prev)
  123. vtFatal("bad prev link");
  124. if(b == bb)
  125. return;
  126. last = b;
  127. }
  128. vtFatal("block missing from hash table");
  129. }
  130. void
  131. cacheCheck(Cache *c)
  132. {
  133. u32int size, now;
  134. int i, k, refed, free;
  135. static uchar zero[VtScoreSize];
  136. Lump *p;
  137. size = c->size;
  138. now = c->now;
  139. free = 0;
  140. for(p=c->free; p; p=p->next)
  141. free++;
  142. for(i = 0; i < c->nheap; i++){
  143. if(c->heap[i]->heap != i)
  144. vtFatal("mis-heaped at %d: %d", i, c->heap[i]->heap);
  145. if(i > 0 && c->heap[(i - 1) >> 1]->used2 - now > c->heap[i]->used2 - now)
  146. vtFatal("bad heap ordering");
  147. k = (i << 1) + 1;
  148. if(k < c->nheap && c->heap[i]->used2 - now > c->heap[k]->used2 - now)
  149. vtFatal("bad heap ordering");
  150. k++;
  151. if(k < c->nheap && c->heap[i]->used2 - now > c->heap[k]->used2 - now)
  152. vtFatal("bad heap ordering");
  153. }
  154. refed = 0;
  155. for(i = 0; i < c->nblocks; i++){
  156. if(c->blocks[i].data != &c->mem[i * size])
  157. vtFatal("mis-blocked at %d", i);
  158. if(c->blocks[i].ref && c->blocks[i].heap == BadHeap){
  159. refed++;
  160. }
  161. if(memcmp(zero, c->blocks[i].score, VtScoreSize))
  162. findLump(c, &c->blocks[i]);
  163. }
  164. if(refed > 0)fprint(2, "cacheCheck: nheap %d refed %d free %d\n", c->nheap, refed, free);
  165. assert(c->nheap + refed + free == c->nblocks);
  166. refed = 0;
  167. for(i = 0; i < c->nblocks; i++){
  168. if(c->blocks[i].ref) {
  169. if(1)fprint(2, "%d %V %d %s\n", c->blocks[i].type, c->blocks[i].score, c->blocks[i].ref, lumpState(c->blocks[i].state));
  170. refed++;
  171. }
  172. }
  173. if(refed > 0)fprint(2, "cacheCheck: in used %d\n", refed);
  174. }
  175. /*
  176. * delete an arbitrary block from the heap
  177. */
  178. static void
  179. delHeap(Lump *db)
  180. {
  181. fixHeap(db->heap, db->c->heap[--db->c->nheap]);
  182. db->heap = BadHeap;
  183. }
  184. static void
  185. fixHeap(int si, Lump *b)
  186. {
  187. int i;
  188. i = upHeap(si, b);
  189. if(i == si)
  190. downHeap(i, b);
  191. }
  192. static int
  193. upHeap(int i, Lump *b)
  194. {
  195. Lump *bb;
  196. u32int now;
  197. int p;
  198. Cache *c;
  199. c = b->c;
  200. now = c->now;
  201. for(; i != 0; i = p){
  202. p = (i - 1) >> 1;
  203. bb = c->heap[p];
  204. if(b->used2 - now >= bb->used2 - now)
  205. break;
  206. c->heap[i] = bb;
  207. bb->heap = i;
  208. }
  209. c->heap[i] = b;
  210. b->heap = i;
  211. return i;
  212. }
  213. static int
  214. downHeap(int i, Lump *b)
  215. {
  216. Lump *bb;
  217. u32int now;
  218. int k;
  219. Cache *c;
  220. c = b->c;
  221. now = c->now;
  222. for(; ; i = k){
  223. k = (i << 1) + 1;
  224. if(k >= c->nheap)
  225. break;
  226. if(k + 1 < c->nheap && c->heap[k]->used2 - now > c->heap[k + 1]->used2 - now)
  227. k++;
  228. bb = c->heap[k];
  229. if(b->used2 - now <= bb->used2 - now)
  230. break;
  231. c->heap[i] = bb;
  232. bb->heap = i;
  233. }
  234. c->heap[i] = b;
  235. b->heap = i;
  236. return i;
  237. }
  238. /* called with c->lk held */
  239. Lump *
  240. cacheBumpLump(Cache *c)
  241. {
  242. Lump *b;
  243. /*
  244. * missed: locate the block with the oldest second to last use.
  245. * remove it from the heap, and fix up the heap.
  246. */
  247. if(c->free) {
  248. b = c->free;
  249. c->free = b->next;
  250. } else {
  251. for(;;){
  252. if(c->nheap == 0) {
  253. cacheCheck(c);
  254. assert(0);
  255. return nil;
  256. }
  257. b = c->heap[0];
  258. delHeap(b);
  259. if(b->ref == 0)
  260. break;
  261. }
  262. /*
  263. * unchain the block from hash chain
  264. */
  265. if(b->prev == nil)
  266. c->heads[hash(c, b->score, b->type)] = b->next;
  267. else
  268. b->prev->next = b->next;
  269. if(b->next != nil)
  270. b->next->prev = b->prev;
  271. }
  272. /*
  273. * the new block has no last use, so assume it happens sometime in the middle
  274. */
  275. b->used = (b->used2 + c->now) / 2;
  276. b->asize = 0;
  277. return b;
  278. }
  279. Lump *
  280. cacheAllocLump(Cache *c, int type, int size, int dir)
  281. {
  282. Lump *b;
  283. ulong h;
  284. assert(size <= c->size);
  285. again:
  286. vtLock(c->lk);
  287. b = cacheBumpLump(c);
  288. if(b == nil) {
  289. vtUnlock(c->lk);
  290. fprint(2, "cache is full\n");
  291. /* XXX should be better */
  292. sleep(100);
  293. goto again;
  294. }
  295. vtLock(b->lk);
  296. assert(b->ref == 0);
  297. b->ref++;
  298. b->used2 = b->used;
  299. b->used = c->now++;
  300. /* convert addr into score */
  301. memset(b->score, 0, VtScoreSize-4);
  302. b->score[VtScoreSize-4] = b->addr>>24;
  303. b->score[VtScoreSize-3] = b->addr>>16;
  304. b->score[VtScoreSize-2] = b->addr>>8;
  305. b->score[VtScoreSize-1] = b->addr;
  306. b->dir = dir;
  307. b->type = type;
  308. b->gen = 0;
  309. b->asize = size;
  310. b->state = LumpFree;
  311. h = hash(c, b->score, b->type);
  312. /* chain onto correct hash */
  313. b->next = c->heads[h];
  314. c->heads[h] = b;
  315. if(b->next != nil)
  316. b->next->prev = b;
  317. b->prev = nil;
  318. vtUnlock(c->lk);
  319. vtZeroExtend(type, b->data, 0, size);
  320. lumpSetState(b, LumpActive);
  321. return b;
  322. }
  323. int
  324. scoreIsLocal(uchar score[VtScoreSize])
  325. {
  326. static uchar zero[VtScoreSize];
  327. return memcmp(score, zero, VtScoreSize-4) == 0;
  328. }
  329. Lump *
  330. cacheGetLump(Cache *c, uchar score[VtScoreSize], int type, int size)
  331. {
  332. Lump *b;
  333. ulong h;
  334. int n;
  335. static uchar zero[VtScoreSize];
  336. assert(size <= c->size);
  337. h = hash(c, score, type);
  338. again:
  339. /*
  340. * look for the block in the cache
  341. */
  342. vtLock(c->lk);
  343. for(b = c->heads[h]; b != nil; b = b->next){
  344. if(memcmp(b->score, score, VtScoreSize) == 0 && b->type == type)
  345. goto found;
  346. }
  347. /* should not be looking for a temp block */
  348. if(scoreIsLocal(score)) {
  349. if(memcmp(score, zero, VtScoreSize) == 0)
  350. vtSetError("looking for zero score");
  351. else
  352. vtSetError("missing local block");
  353. vtUnlock(c->lk);
  354. return nil;
  355. }
  356. b = cacheBumpLump(c);
  357. if(b == nil) {
  358. vtUnlock(c->lk);
  359. sleep(100);
  360. goto again;
  361. }
  362. /* chain onto correct hash */
  363. b->next = c->heads[h];
  364. c->heads[h] = b;
  365. if(b->next != nil)
  366. b->next->prev = b;
  367. b->prev = nil;
  368. memmove(b->score, score, VtScoreSize);
  369. b->type = type;
  370. b->state = LumpFree;
  371. found:
  372. b->ref++;
  373. b->used2 = b->used;
  374. b->used = c->now++;
  375. if(b->heap != BadHeap)
  376. fixHeap(b->heap, b);
  377. vtUnlock(c->lk);
  378. vtLock(b->lk);
  379. if(b->state != LumpFree)
  380. return b;
  381. n = vtRead(c->z, score, type, b->data, size);
  382. if(n < 0) {
  383. fprint(2, "read %V: %r\n", score);
  384. lumpDecRef(b, 1);
  385. return nil;
  386. }
  387. if(!vtSha1Check(score, b->data, n)) {
  388. vtSetError("vtSha1Check failed");
  389. lumpDecRef(b, 1);
  390. return nil;
  391. }
  392. vtZeroExtend(type, b->data, n, size);
  393. b->asize = size;
  394. lumpSetState(b, LumpVenti);
  395. return b;
  396. }
  397. static char *
  398. lumpState(int state)
  399. {
  400. switch(state) {
  401. default:
  402. return "Unknown!!";
  403. case LumpFree:
  404. return "Free";
  405. case LumpActive:
  406. return "Active";
  407. case LumpSnap:
  408. return "Snap";
  409. case LumpZombie:
  410. return "Zombie";
  411. case LumpVenti:
  412. return "Venti";
  413. }
  414. }
  415. static void
  416. lumpSetState(Lump *u, int state)
  417. {
  418. // if(u->state != LumpFree)
  419. // fprint(2, "%V: %s -> %s\n", u->score, lumpState(u->state), lumpState(state));
  420. u->state = state;
  421. }
  422. int
  423. lumpGetScore(Lump *u, int offset, uchar score[VtScoreSize])
  424. {
  425. uchar *sp;
  426. VtRoot root;
  427. VtEntry dir;
  428. vtLock(u->lk);
  429. switch(u->type) {
  430. default:
  431. vtSetError("bad type");
  432. goto Err;
  433. case VtPointerType0:
  434. case VtPointerType1:
  435. case VtPointerType2:
  436. case VtPointerType3:
  437. case VtPointerType4:
  438. case VtPointerType5:
  439. case VtPointerType6:
  440. if((offset+1)*VtScoreSize > u->asize)
  441. sp = nil;
  442. else
  443. sp = u->data + offset*VtScoreSize;
  444. break;
  445. case VtRootType:
  446. if(u->asize < VtRootSize) {
  447. vtSetError("runt root block");
  448. goto Err;
  449. }
  450. if(!vtRootUnpack(&root, u->data))
  451. goto Err;
  452. sp = root.score;
  453. break;
  454. case VtDirType:
  455. if((offset+1)*VtEntrySize > u->asize) {
  456. vtSetError(ENoDir);
  457. goto Err;
  458. }
  459. if(!vtEntryUnpack(&dir, u->data, offset))
  460. goto Err;
  461. if(!dir.flags & VtEntryActive) {
  462. vtSetError(ENoDir);
  463. goto Err;
  464. }
  465. sp = dir.score;
  466. break;
  467. }
  468. if(sp == nil)
  469. memmove(score, vtZeroScore, VtScoreSize);
  470. else
  471. memmove(score, sp, VtScoreSize);
  472. vtUnlock(u->lk);
  473. return !scoreIsLocal(score);
  474. Err:
  475. vtUnlock(u->lk);
  476. return 0;
  477. }
  478. Lump *
  479. lumpWalk(Lump *u, int offset, int type, int size, int readOnly, int lock)
  480. {
  481. Lump *v, *vv;
  482. Cache *c;
  483. uchar score[VtScoreSize], *sp;
  484. VtRoot root;
  485. VtEntry dir;
  486. int split, isdir;
  487. c = u->c;
  488. vtLock(u->lk);
  489. Again:
  490. v = nil;
  491. vv = nil;
  492. isdir = u->dir;
  493. switch(u->type) {
  494. default:
  495. vtSetError("bad type");
  496. goto Err;
  497. case VtPointerType0:
  498. case VtPointerType1:
  499. case VtPointerType2:
  500. case VtPointerType3:
  501. case VtPointerType4:
  502. case VtPointerType5:
  503. case VtPointerType6:
  504. if((offset+1)*VtScoreSize > u->asize)
  505. sp = nil;
  506. else
  507. sp = u->data + offset*VtScoreSize;
  508. break;
  509. case VtRootType:
  510. if(u->asize < VtRootSize) {
  511. vtSetError("runt root block");
  512. goto Err;
  513. }
  514. if(!vtRootUnpack(&root, u->data))
  515. goto Err;
  516. sp = root.score;
  517. break;
  518. case VtDirType:
  519. if((offset+1)*VtEntrySize > u->asize) {
  520. vtSetError(ENoDir);
  521. goto Err;
  522. }
  523. if(!vtEntryUnpack(&dir, u->data, offset))
  524. goto Err;
  525. if(!(dir.flags & VtEntryActive)) {
  526. vtSetError(ENoDir);
  527. goto Err;
  528. }
  529. isdir = (dir.flags & VtEntryDir) != 0;
  530. // sp = dir.score;
  531. sp = u->data + offset*VtEntrySize + 20;
  532. break;
  533. }
  534. if(sp == nil)
  535. memmove(score, vtZeroScore, VtScoreSize);
  536. else
  537. memmove(score, sp, VtScoreSize);
  538. vtUnlock(u->lk);
  539. if(0)fprint(2, "lumpWalk: %V:%s %d:%d-> %V:%d\n", u->score, lumpState(u->state), u->type, offset, score, type);
  540. v = cacheGetLump(c, score, type, size);
  541. if(v == nil)
  542. return nil;
  543. split = 1;
  544. if(readOnly)
  545. split = 0;
  546. switch(v->state) {
  547. default:
  548. assert(0);
  549. case LumpFree:
  550. fprint(2, "block is free %V!\n", v->score);
  551. vtSetError("phase error");
  552. goto Err2;
  553. case LumpActive:
  554. if(v->gen < u->gen) {
  555. print("LumpActive gen\n");
  556. lumpSetState(v, LumpSnap);
  557. v->gen = u->gen;
  558. } else
  559. split = 0;
  560. break;
  561. case LumpSnap:
  562. case LumpVenti:
  563. break;
  564. }
  565. /* easy case */
  566. if(!split) {
  567. if(!lock)
  568. vtUnlock(v->lk);
  569. return v;
  570. }
  571. if(sp == nil) {
  572. vtSetError("bad offset");
  573. goto Err2;
  574. }
  575. vv = cacheAllocLump(c, v->type, size, isdir);
  576. /* vv is locked */
  577. vv->gen = u->gen;
  578. memmove(vv->data, v->data, v->asize);
  579. if(0)fprint(2, "split %V into %V\n", v->score, vv->score);
  580. lumpDecRef(v, 1);
  581. v = nil;
  582. vtLock(u->lk);
  583. if(u->state != LumpActive) {
  584. vtSetError("bad parent state: can not happen");
  585. goto Err;
  586. }
  587. /* check that nothing changed underfoot */
  588. if(memcmp(sp, score, VtScoreSize) != 0) {
  589. lumpDecRef(vv, 1);
  590. fprint(2, "lumpWalk: parent changed under foot\n");
  591. goto Again;
  592. }
  593. /* XXX - hold Active blocks up - will go eventually */
  594. lumpIncRef(vv);
  595. /* change the parent */
  596. memmove(sp, vv->score, VtScoreSize);
  597. vtUnlock(u->lk);
  598. if(!lock)
  599. vtUnlock(vv->lk);
  600. return vv;
  601. Err:
  602. vtUnlock(u->lk);
  603. lumpDecRef(v, 0);
  604. lumpDecRef(vv, 1);
  605. return nil;
  606. Err2:
  607. lumpDecRef(v, 1);
  608. return nil;
  609. }
  610. void
  611. lumpFreeEntry(Lump *u, int entry)
  612. {
  613. uchar score[VtScoreSize];
  614. int type;
  615. ulong gen;
  616. VtEntry dir;
  617. Cache *c;
  618. c = u->c;
  619. vtLock(u->lk);
  620. if(u->state == LumpVenti)
  621. goto Exit;
  622. switch(u->type) {
  623. default:
  624. fprint(2, "freeing bad lump type: %d\n", u->type);
  625. return;
  626. case VtPointerType0:
  627. if((entry+1)*VtScoreSize > u->asize)
  628. goto Exit;
  629. memmove(score, u->data + entry*VtScoreSize, VtScoreSize);
  630. memmove(u->data + entry*VtScoreSize, vtZeroScore, VtScoreSize);
  631. type = u->dir?VtDirType:VtDataType;
  632. break;
  633. case VtPointerType1:
  634. case VtPointerType2:
  635. case VtPointerType3:
  636. case VtPointerType4:
  637. case VtPointerType5:
  638. case VtPointerType6:
  639. if((entry+1)*VtScoreSize > u->asize)
  640. goto Exit;
  641. memmove(score, u->data + entry*VtScoreSize, VtScoreSize);
  642. memmove(u->data + entry*VtScoreSize, vtZeroScore, VtScoreSize);
  643. type = u->type-1;
  644. break;
  645. case VtDirType:
  646. if((entry+1)*VtEntrySize > u->asize)
  647. goto Exit;
  648. if(!vtEntryUnpack(&dir, u->data, entry))
  649. goto Exit;
  650. if(!dir.flags & VtEntryActive)
  651. goto Exit;
  652. gen = dir.gen;
  653. if(gen != ~0)
  654. gen++;
  655. if(dir.depth == 0)
  656. type = (dir.flags&VtEntryDir)?VtDirType:VtDataType;
  657. else
  658. type = VtPointerType0 + dir.depth - 1;
  659. memmove(score, dir.score, VtScoreSize);
  660. memset(&dir, 0, sizeof(dir));
  661. dir.gen = gen;
  662. vtEntryPack(&dir, u->data, entry);
  663. break;
  664. case VtDataType:
  665. type = VtErrType;
  666. break;
  667. }
  668. vtUnlock(u->lk);
  669. if(type == VtErrType || !scoreIsLocal(score))
  670. return;
  671. u = cacheGetLump(c, score, type, c->size);
  672. if(u == nil)
  673. return;
  674. lumpDecRef(u, 1);
  675. /* XXX remove extra reference */
  676. lumpDecRef(u, 0);
  677. return;
  678. Exit:
  679. vtUnlock(u->lk);
  680. return;
  681. }
  682. void
  683. lumpCleanup(Lump *u)
  684. {
  685. int i, n;
  686. switch(u->type) {
  687. default:
  688. return;
  689. case VtPointerType0:
  690. case VtPointerType1:
  691. case VtPointerType2:
  692. case VtPointerType3:
  693. case VtPointerType4:
  694. case VtPointerType5:
  695. case VtPointerType6:
  696. n = u->asize/VtScoreSize;
  697. break;
  698. case VtDirType:
  699. n = u->asize/VtEntrySize;
  700. break;
  701. }
  702. for(i=0; i<n; i++)
  703. lumpFreeEntry(u, i);
  704. }
  705. void
  706. lumpDecRef(Lump *b, int unlock)
  707. {
  708. int i;
  709. Cache *c;
  710. if(b == nil)
  711. return;
  712. if(unlock)
  713. vtUnlock(b->lk);
  714. c = b->c;
  715. vtLock(c->lk);
  716. if(--b->ref > 0) {
  717. vtUnlock(c->lk);
  718. return;
  719. }
  720. assert(b->ref == 0);
  721. switch(b->state) {
  722. default:
  723. fprint(2, "bad state: %s\n", lumpState(b->state));
  724. assert(0);
  725. case LumpActive:
  726. /* hack - but will do for now */
  727. b->ref++;
  728. vtUnlock(c->lk);
  729. lumpCleanup(b);
  730. vtLock(c->lk);
  731. b->ref--;
  732. lumpSetState(b, LumpFree);
  733. break;
  734. case LumpZombie:
  735. lumpSetState(b, LumpFree);
  736. break;
  737. case LumpFree:
  738. case LumpVenti:
  739. break;
  740. }
  741. /*
  742. * reinsert in the free heap
  743. */
  744. if(b->heap == BadHeap) {
  745. i = upHeap(c->nheap++, b);
  746. c->heap[i] = b;
  747. b->heap = i;
  748. }
  749. vtUnlock(c->lk);
  750. }
  751. Lump *
  752. lumpIncRef(Lump *b)
  753. {
  754. Cache *c;
  755. c = b->c;
  756. vtLock(c->lk);
  757. assert(b->ref > 0);
  758. b->ref++;
  759. vtUnlock(c->lk);
  760. return b;
  761. }