cache.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098
  1. #include "stdinc.h"
  2. #include "dat.h"
  3. #include "fns.h"
  4. #include "error.h"
  5. #include "9.h" /* for cacheFlush */
  6. typedef struct FreeList FreeList;
  7. typedef struct BAddr BAddr;
  8. enum {
  9. BadHeap = ~0,
  10. };
  11. /*
  12. * Store data to the memory cache in c->size blocks
  13. * with the block zero extended to fill it out. When writing to
  14. * Venti, the block will be zero truncated. The walker will also check
  15. * that the block fits within psize or dsize as the case may be.
  16. */
  17. struct Cache
  18. {
  19. VtLock *lk;
  20. VtLock *dirtylk;
  21. int ref;
  22. int mode;
  23. Disk *disk;
  24. int size; /* block size */
  25. int ndmap; /* size of per-block dirty pointer map used in blockWrite */
  26. VtSession *z;
  27. u32int now; /* ticks for usage timestamps */
  28. Block **heads; /* hash table for finding address */
  29. int nheap; /* number of available victims */
  30. Block **heap; /* heap for locating victims */
  31. long nblocks; /* number of blocks allocated */
  32. Block *blocks; /* array of block descriptors */
  33. u8int *mem; /* memory for all block data & blists */
  34. BList *blfree;
  35. VtRendez *blrend;
  36. int ndirty; /* number of dirty blocks in the cache */
  37. int maxdirty; /* max number of dirty blocks */
  38. u32int vers;
  39. long hashSize;
  40. FreeList *fl;
  41. VtRendez *die; /* daemon threads should die when != nil */
  42. VtRendez *flush;
  43. VtRendez *flushwait;
  44. VtRendez *heapwait;
  45. BAddr *baddr;
  46. int bw, br, be;
  47. int nflush;
  48. Periodic *sync;
  49. /* unlink daemon */
  50. BList *uhead;
  51. BList *utail;
  52. VtRendez *unlink;
  53. /* block counts */
  54. int nused;
  55. int ndisk;
  56. };
  57. struct BList {
  58. int part;
  59. u32int addr;
  60. uchar type;
  61. u32int tag;
  62. u32int epoch;
  63. u32int vers;
  64. int recurse; /* for block unlink */
  65. /* for roll back */
  66. int index; /* -1 indicates not valid */
  67. union {
  68. uchar score[VtScoreSize];
  69. uchar entry[VtEntrySize];
  70. } old;
  71. BList *next;
  72. };
  73. struct BAddr {
  74. int part;
  75. u32int addr;
  76. u32int vers;
  77. };
  78. struct FreeList {
  79. VtLock *lk;
  80. u32int last; /* last block allocated */
  81. u32int end; /* end of data partition */
  82. u32int nfree; /* number of free blocks */
  83. u32int nused; /* number of used blocks */
  84. u32int epochLow; /* low epoch when last updated nfree and nused */
  85. };
  86. static FreeList *flAlloc(u32int end);
  87. static void flFree(FreeList *fl);
  88. static Block *cacheBumpBlock(Cache *c);
  89. static void heapDel(Block*);
  90. static void heapIns(Block*);
  91. static void cacheCheck(Cache*);
  92. static void unlinkThread(void *a);
  93. static void flushThread(void *a);
  94. static void flushBody(Cache *c);
  95. static void unlinkBody(Cache *c);
  96. static int cacheFlushBlock(Cache *c);
  97. static void cacheSync(void*);
  98. static BList *blistAlloc(Block*);
  99. static void blistFree(Cache*, BList*);
  100. static void doRemoveLink(Cache*, BList*);
  101. static void doRemoveLinkList(Cache*, BList*);
  102. /*
  103. * Mapping from local block type to Venti type
  104. */
  105. int vtType[BtMax] = {
  106. VtDataType, /* BtData | 0 */
  107. VtPointerType0, /* BtData | 1 */
  108. VtPointerType1, /* BtData | 2 */
  109. VtPointerType2, /* BtData | 3 */
  110. VtPointerType3, /* BtData | 4 */
  111. VtPointerType4, /* BtData | 5 */
  112. VtPointerType5, /* BtData | 6 */
  113. VtPointerType6, /* BtData | 7 */
  114. VtDirType, /* BtDir | 0 */
  115. VtPointerType0, /* BtDir | 1 */
  116. VtPointerType1, /* BtDir | 2 */
  117. VtPointerType2, /* BtDir | 3 */
  118. VtPointerType3, /* BtDir | 4 */
  119. VtPointerType4, /* BtDir | 5 */
  120. VtPointerType5, /* BtDir | 6 */
  121. VtPointerType6, /* BtDir | 7 */
  122. };
  123. /*
  124. * Allocate the memory cache.
  125. */
  126. Cache *
  127. cacheAlloc(Disk *disk, VtSession *z, ulong nblocks, int mode)
  128. {
  129. int i;
  130. Cache *c;
  131. Block *b;
  132. BList *bl;
  133. u8int *p;
  134. int nbl;
  135. c = vtMemAllocZ(sizeof(Cache));
  136. /* reasonable number of BList elements */
  137. nbl = nblocks * 4;
  138. c->lk = vtLockAlloc();
  139. c->dirtylk = vtLockAlloc(); /* allowed to dirty blocks */
  140. c->ref = 1;
  141. c->disk = disk;
  142. c->z = z;
  143. c->size = diskBlockSize(disk);
  144. bwatchSetBlockSize(c->size);
  145. /* round c->size up to be a nice multiple */
  146. c->size = (c->size + 127) & ~127;
  147. c->ndmap = (c->size/20 + 7) / 8;
  148. c->nblocks = nblocks;
  149. c->hashSize = nblocks;
  150. c->heads = vtMemAllocZ(c->hashSize*sizeof(Block*));
  151. c->heap = vtMemAllocZ(nblocks*sizeof(Block*));
  152. c->blocks = vtMemAllocZ(nblocks*sizeof(Block));
  153. c->mem = vtMemAllocZ(nblocks * (c->size + c->ndmap) + nbl * sizeof(BList));
  154. c->baddr = vtMemAllocZ(nblocks * sizeof(BAddr));
  155. c->mode = mode;
  156. c->vers++;
  157. p = c->mem;
  158. for(i = 0; i < nblocks; i++){
  159. b = &c->blocks[i];
  160. b->lk = vtLockAlloc();
  161. b->c = c;
  162. b->data = p;
  163. b->heap = i;
  164. b->ioready = vtRendezAlloc(b->lk);
  165. c->heap[i] = b;
  166. p += c->size;
  167. }
  168. c->nheap = nblocks;
  169. for(i = 0; i < nbl; i++){
  170. bl = (BList*)p;
  171. bl->next = c->blfree;
  172. c->blfree = bl;
  173. p += sizeof(BList);
  174. }
  175. /* separate loop to keep blocks and blists reasonably aligned */
  176. for(i = 0; i < nblocks; i++){
  177. b = &c->blocks[i];
  178. b->dmap = p;
  179. p += c->ndmap;
  180. }
  181. c->blrend = vtRendezAlloc(c->lk);
  182. c->maxdirty = nblocks*(DirtyPercentage*0.01);
  183. c->fl = flAlloc(diskSize(disk, PartData));
  184. c->unlink = vtRendezAlloc(c->lk);
  185. c->flush = vtRendezAlloc(c->lk);
  186. c->flushwait = vtRendezAlloc(c->lk);
  187. c->heapwait = vtRendezAlloc(c->lk);
  188. c->sync = periodicAlloc(cacheSync, c, 30*1000);
  189. if(mode == OReadWrite){
  190. c->ref += 2;
  191. vtThread(unlinkThread, c);
  192. vtThread(flushThread, c);
  193. }
  194. cacheCheck(c);
  195. return c;
  196. }
  197. /*
  198. * Free the whole memory cache, flushing all dirty blocks to the disk.
  199. */
  200. void
  201. cacheFree(Cache *c)
  202. {
  203. int i;
  204. /* kill off daemon threads */
  205. vtLock(c->lk);
  206. c->die = vtRendezAlloc(c->lk);
  207. periodicKill(c->sync);
  208. vtWakeup(c->flush);
  209. vtWakeup(c->unlink);
  210. while(c->ref > 1)
  211. vtSleep(c->die);
  212. /* flush everything out */
  213. do {
  214. unlinkBody(c);
  215. vtUnlock(c->lk);
  216. while(cacheFlushBlock(c))
  217. ;
  218. diskFlush(c->disk);
  219. vtLock(c->lk);
  220. } while(c->uhead || c->ndirty);
  221. vtUnlock(c->lk);
  222. cacheCheck(c);
  223. for(i = 0; i < c->nblocks; i++){
  224. assert(c->blocks[i].ref == 0);
  225. vtRendezFree(c->blocks[i].ioready);
  226. vtLockFree(c->blocks[i].lk);
  227. }
  228. flFree(c->fl);
  229. vtMemFree(c->baddr);
  230. vtMemFree(c->heads);
  231. vtMemFree(c->blocks);
  232. vtMemFree(c->mem);
  233. vtLockFree(c->lk);
  234. diskFree(c->disk);
  235. vtRendezFree(c->blrend);
  236. /* don't close vtSession */
  237. vtMemFree(c);
  238. }
  239. static void
  240. cacheDump(Cache *c)
  241. {
  242. int i;
  243. Block *b;
  244. for(i = 0; i < c->nblocks; i++){
  245. b = &c->blocks[i];
  246. fprint(2, "%d. p=%d a=%ud %V t=%d ref=%d state=%s io=%s pc=%#p\n",
  247. i, b->part, b->addr, b->score, b->l.type, b->ref,
  248. bsStr(b->l.state), bioStr(b->iostate), b->pc);
  249. }
  250. }
  251. static void
  252. cacheCheck(Cache *c)
  253. {
  254. u32int size, now;
  255. int i, k, refed;
  256. static uchar zero[VtScoreSize];
  257. Block *b;
  258. size = c->size;
  259. now = c->now;
  260. for(i = 0; i < c->nheap; i++){
  261. if(c->heap[i]->heap != i)
  262. vtFatal("mis-heaped at %d: %d", i, c->heap[i]->heap);
  263. if(i > 0 && c->heap[(i - 1) >> 1]->used - now > c->heap[i]->used - now)
  264. vtFatal("bad heap ordering");
  265. k = (i << 1) + 1;
  266. if(k < c->nheap && c->heap[i]->used - now > c->heap[k]->used - now)
  267. vtFatal("bad heap ordering");
  268. k++;
  269. if(k < c->nheap && c->heap[i]->used - now > c->heap[k]->used - now)
  270. vtFatal("bad heap ordering");
  271. }
  272. refed = 0;
  273. for(i = 0; i < c->nblocks; i++){
  274. b = &c->blocks[i];
  275. if(b->data != &c->mem[i * size])
  276. vtFatal("mis-blocked at %d", i);
  277. if(b->ref && b->heap == BadHeap){
  278. refed++;
  279. }
  280. }
  281. if(c->nheap + refed != c->nblocks){
  282. fprint(2, "cacheCheck: nheap %d refed %d nblocks %ld\n", c->nheap, refed, c->nblocks);
  283. cacheDump(c);
  284. }
  285. assert(c->nheap + refed == c->nblocks);
  286. refed = 0;
  287. for(i = 0; i < c->nblocks; i++){
  288. b = &c->blocks[i];
  289. if(b->ref){
  290. if(1)fprint(2, "p=%d a=%ud %V ref=%d %L\n", b->part, b->addr, b->score, b->ref, &b->l);
  291. refed++;
  292. }
  293. }
  294. if(refed > 0)fprint(2, "cacheCheck: in used %d\n", refed);
  295. }
  296. /*
  297. * locate the block with the oldest second to last use.
  298. * remove it from the heap, and fix up the heap.
  299. */
  300. /* called with c->lk held */
  301. static Block *
  302. cacheBumpBlock(Cache *c)
  303. {
  304. int printed;
  305. Block *b;
  306. /*
  307. * locate the block with the oldest second to last use.
  308. * remove it from the heap, and fix up the heap.
  309. */
  310. printed = 0;
  311. if(c->nheap == 0){
  312. while(c->nheap == 0){
  313. vtWakeup(c->flush);
  314. vtSleep(c->heapwait);
  315. if(c->nheap == 0){
  316. printed = 1;
  317. fprint(2, "entire cache is busy, %d dirty -- waking flush thread\n", c->ndirty);
  318. }
  319. }
  320. if(printed)
  321. fprint(2, "cache is okay again, %d dirty\n", c->ndirty);
  322. }
  323. b = c->heap[0];
  324. heapDel(b);
  325. assert(b->heap == BadHeap);
  326. assert(b->ref == 0);
  327. assert(b->iostate != BioDirty && b->iostate != BioReading && b->iostate != BioWriting);
  328. assert(b->prior == nil);
  329. assert(b->uhead == nil);
  330. /*
  331. * unchain the block from hash chain
  332. */
  333. if(b->prev){
  334. *(b->prev) = b->next;
  335. if(b->next)
  336. b->next->prev = b->prev;
  337. b->prev = nil;
  338. }
  339. if(0)fprint(2, "droping %d:%x:%V\n", b->part, b->addr, b->score);
  340. /* set block to a reasonable state */
  341. b->ref = 1;
  342. b->part = PartError;
  343. memset(&b->l, 0, sizeof(b->l));
  344. b->iostate = BioEmpty;
  345. return b;
  346. }
  347. /*
  348. * look for a particular version of the block in the memory cache.
  349. */
  350. static Block *
  351. _cacheLocalLookup(Cache *c, int part, u32int addr, u32int vers,
  352. int waitlock, int *lockfailure)
  353. {
  354. Block *b;
  355. ulong h;
  356. h = addr % c->hashSize;
  357. if(lockfailure)
  358. *lockfailure = 0;
  359. /*
  360. * look for the block in the cache
  361. */
  362. vtLock(c->lk);
  363. for(b = c->heads[h]; b != nil; b = b->next){
  364. if(b->part == part && b->addr == addr)
  365. break;
  366. }
  367. if(b == nil || b->vers != vers){
  368. vtUnlock(c->lk);
  369. return nil;
  370. }
  371. if(!waitlock && !vtCanLock(b->lk)){
  372. *lockfailure = 1;
  373. vtUnlock(c->lk);
  374. return nil;
  375. }
  376. heapDel(b);
  377. b->ref++;
  378. vtUnlock(c->lk);
  379. bwatchLock(b);
  380. if(waitlock)
  381. vtLock(b->lk);
  382. b->nlock = 1;
  383. for(;;){
  384. switch(b->iostate){
  385. default:
  386. abort();
  387. case BioEmpty:
  388. case BioLabel:
  389. case BioClean:
  390. case BioDirty:
  391. if(b->vers != vers){
  392. blockPut(b);
  393. return nil;
  394. }
  395. return b;
  396. case BioReading:
  397. case BioWriting:
  398. vtSleep(b->ioready);
  399. break;
  400. case BioVentiError:
  401. blockPut(b);
  402. vtSetError("venti i/o error block 0x%.8ux", addr);
  403. return nil;
  404. case BioReadError:
  405. blockPut(b);
  406. vtSetError("error reading block 0x%.8ux", addr);
  407. return nil;
  408. }
  409. }
  410. /* NOT REACHED */
  411. }
  412. static Block*
  413. cacheLocalLookup(Cache *c, int part, u32int addr, u32int vers)
  414. {
  415. return _cacheLocalLookup(c, part, addr, vers, 1, 0);
  416. }
  417. /*
  418. * fetch a local (on-disk) block from the memory cache.
  419. * if it's not there, load it, bumping some other block.
  420. */
  421. Block *
  422. _cacheLocal(Cache *c, int part, u32int addr, int mode, u32int epoch)
  423. {
  424. Block *b;
  425. ulong h;
  426. assert(part != PartVenti);
  427. h = addr % c->hashSize;
  428. /*
  429. * look for the block in the cache
  430. */
  431. vtLock(c->lk);
  432. for(b = c->heads[h]; b != nil; b = b->next){
  433. if(b->part != part || b->addr != addr)
  434. continue;
  435. if(epoch && b->l.epoch != epoch){
  436. fprint(2, "_cacheLocal want epoch %ud got %ud\n", epoch, b->l.epoch);
  437. vtUnlock(c->lk);
  438. vtSetError(ELabelMismatch);
  439. return nil;
  440. }
  441. heapDel(b);
  442. b->ref++;
  443. break;
  444. }
  445. if(b == nil){
  446. b = cacheBumpBlock(c);
  447. b->part = part;
  448. b->addr = addr;
  449. localToGlobal(addr, b->score);
  450. /* chain onto correct hash */
  451. b->next = c->heads[h];
  452. c->heads[h] = b;
  453. if(b->next != nil)
  454. b->next->prev = &b->next;
  455. b->prev = &c->heads[h];
  456. }
  457. vtUnlock(c->lk);
  458. /*
  459. * BUG: what if the epoch changes right here?
  460. * In the worst case, we could end up in some weird
  461. * lock loop, because the block we want no longer exists,
  462. * and instead we're trying to lock a block we have no
  463. * business grabbing.
  464. *
  465. * For now, I'm not going to worry about it.
  466. */
  467. if(0)fprint(2, "cacheLocal: %d: %d %x\n", getpid(), b->part, b->addr);
  468. bwatchLock(b);
  469. vtLock(b->lk);
  470. b->nlock = 1;
  471. if(part == PartData && b->iostate == BioEmpty){
  472. if(!readLabel(c, &b->l, addr)){
  473. blockPut(b);
  474. return nil;
  475. }
  476. blockSetIOState(b, BioLabel);
  477. }
  478. if(epoch && b->l.epoch != epoch){
  479. blockPut(b);
  480. fprint(2, "_cacheLocal want epoch %ud got %ud\n", epoch, b->l.epoch);
  481. vtSetError(ELabelMismatch);
  482. return nil;
  483. }
  484. b->pc = getcallerpc(&c);
  485. for(;;){
  486. switch(b->iostate){
  487. default:
  488. abort();
  489. case BioEmpty:
  490. case BioLabel:
  491. if(mode == OOverWrite){
  492. blockSetIOState(b, BioClean);
  493. return b;
  494. }
  495. diskRead(c->disk, b);
  496. vtSleep(b->ioready);
  497. break;
  498. case BioClean:
  499. case BioDirty:
  500. return b;
  501. case BioReading:
  502. case BioWriting:
  503. vtSleep(b->ioready);
  504. break;
  505. case BioReadError:
  506. blockSetIOState(b, BioEmpty);
  507. blockPut(b);
  508. vtSetError("error reading block 0x%.8ux", addr);
  509. return nil;
  510. }
  511. }
  512. /* NOT REACHED */
  513. }
  514. Block *
  515. cacheLocal(Cache *c, int part, u32int addr, int mode)
  516. {
  517. return _cacheLocal(c, part, addr, mode, 0);
  518. }
  519. /*
  520. * fetch a local (on-disk) block from the memory cache.
  521. * if it's not there, load it, bumping some other block.
  522. * check tag and type.
  523. */
  524. Block *
  525. cacheLocalData(Cache *c, u32int addr, int type, u32int tag, int mode, u32int epoch)
  526. {
  527. Block *b;
  528. b = _cacheLocal(c, PartData, addr, mode, epoch);
  529. if(b == nil)
  530. return nil;
  531. if(b->l.type != type || b->l.tag != tag){
  532. fprint(2, "cacheLocalData: addr=%d type got %d exp %d: tag got %ux exp %ux\n",
  533. addr, b->l.type, type, b->l.tag, tag);
  534. vtSetError(ELabelMismatch);
  535. blockPut(b);
  536. return nil;
  537. }
  538. b->pc = getcallerpc(&c);
  539. return b;
  540. }
  541. /*
  542. * fetch a global (Venti) block from the memory cache.
  543. * if it's not there, load it, bumping some other block.
  544. * check tag and type if it's really a local block in disguise.
  545. */
  546. Block *
  547. cacheGlobal(Cache *c, uchar score[VtScoreSize], int type, u32int tag, int mode)
  548. {
  549. int n;
  550. Block *b;
  551. ulong h;
  552. u32int addr;
  553. addr = globalToLocal(score);
  554. if(addr != NilBlock){
  555. b = cacheLocalData(c, addr, type, tag, mode, 0);
  556. if(b)
  557. b->pc = getcallerpc(&c);
  558. return b;
  559. }
  560. h = (u32int)(score[0]|(score[1]<<8)|(score[2]<<16)|(score[3]<<24)) % c->hashSize;
  561. /*
  562. * look for the block in the cache
  563. */
  564. vtLock(c->lk);
  565. for(b = c->heads[h]; b != nil; b = b->next){
  566. if(b->part != PartVenti || memcmp(b->score, score, VtScoreSize) != 0 || b->l.type != type)
  567. continue;
  568. heapDel(b);
  569. b->ref++;
  570. break;
  571. }
  572. if(b == nil){
  573. if(0)fprint(2, "cacheGlobal %V %d\n", score, type);
  574. b = cacheBumpBlock(c);
  575. b->part = PartVenti;
  576. b->addr = NilBlock;
  577. b->l.type = type;
  578. memmove(b->score, score, VtScoreSize);
  579. /* chain onto correct hash */
  580. b->next = c->heads[h];
  581. c->heads[h] = b;
  582. if(b->next != nil)
  583. b->next->prev = &b->next;
  584. b->prev = &c->heads[h];
  585. }
  586. vtUnlock(c->lk);
  587. bwatchLock(b);
  588. vtLock(b->lk);
  589. b->nlock = 1;
  590. b->pc = getcallerpc(&c);
  591. switch(b->iostate){
  592. default:
  593. abort();
  594. case BioEmpty:
  595. n = vtRead(c->z, score, vtType[type], b->data, c->size);
  596. if(n < 0 || !vtSha1Check(score, b->data, n)){
  597. blockSetIOState(b, BioVentiError);
  598. blockPut(b);
  599. vtSetError(
  600. "venti error reading block %V or wrong score: %r",
  601. score);
  602. return nil;
  603. }
  604. vtZeroExtend(vtType[type], b->data, n, c->size);
  605. blockSetIOState(b, BioClean);
  606. return b;
  607. case BioClean:
  608. return b;
  609. case BioVentiError:
  610. blockPut(b);
  611. vtSetError("venti i/o error or wrong score, block %V", score);
  612. return nil;
  613. case BioReadError:
  614. blockPut(b);
  615. vtSetError("error reading block %V", b->score);
  616. return nil;
  617. }
  618. /* NOT REACHED */
  619. }
  620. /*
  621. * allocate a new on-disk block and load it into the memory cache.
  622. * BUG: if the disk is full, should we flush some of it to Venti?
  623. */
  624. static u32int lastAlloc;
  625. Block *
  626. cacheAllocBlock(Cache *c, int type, u32int tag, u32int epoch, u32int epochLow)
  627. {
  628. FreeList *fl;
  629. u32int addr;
  630. Block *b;
  631. int n, nwrap;
  632. Label lab;
  633. n = c->size / LabelSize;
  634. fl = c->fl;
  635. vtLock(fl->lk);
  636. addr = fl->last;
  637. b = cacheLocal(c, PartLabel, addr/n, OReadOnly);
  638. if(b == nil){
  639. fprint(2, "cacheAllocBlock: xxx %R\n");
  640. vtUnlock(fl->lk);
  641. return nil;
  642. }
  643. nwrap = 0;
  644. for(;;){
  645. if(++addr >= fl->end){
  646. addr = 0;
  647. if(++nwrap >= 2){
  648. blockPut(b);
  649. fl->last = 0;
  650. vtSetError("disk is full");
  651. fprint(2, "cacheAllocBlock: xxx1 %R\n");
  652. vtUnlock(fl->lk);
  653. return nil;
  654. }
  655. }
  656. if(addr%n == 0){
  657. blockPut(b);
  658. b = cacheLocal(c, PartLabel, addr/n, OReadOnly);
  659. if(b == nil){
  660. fl->last = addr;
  661. fprint(2, "cacheAllocBlock: xxx2 %R\n");
  662. vtUnlock(fl->lk);
  663. return nil;
  664. }
  665. }
  666. if(!labelUnpack(&lab, b->data, addr%n))
  667. continue;
  668. if(lab.state == BsFree)
  669. goto Found;
  670. if(lab.state&BsClosed)
  671. if(lab.epochClose <= epochLow || lab.epoch==lab.epochClose)
  672. goto Found;
  673. }
  674. Found:
  675. blockPut(b);
  676. b = cacheLocal(c, PartData, addr, OOverWrite);
  677. if(b == nil){
  678. fprint(2, "cacheAllocBlock: xxx3 %R\n");
  679. return nil;
  680. }
  681. assert(b->iostate == BioLabel || b->iostate == BioClean);
  682. fl->last = addr;
  683. lab.type = type;
  684. lab.tag = tag;
  685. lab.state = BsAlloc;
  686. lab.epoch = epoch;
  687. lab.epochClose = ~(u32int)0;
  688. if(!blockSetLabel(b, &lab, 1)){
  689. fprint(2, "cacheAllocBlock: xxx4 %R\n");
  690. blockPut(b);
  691. return nil;
  692. }
  693. vtZeroExtend(vtType[type], b->data, 0, c->size);
  694. if(0)diskWrite(c->disk, b);
  695. if(0)fprint(2, "fsAlloc %ud type=%d tag = %ux\n", addr, type, tag);
  696. lastAlloc = addr;
  697. fl->nused++;
  698. vtUnlock(fl->lk);
  699. b->pc = getcallerpc(&c);
  700. return b;
  701. }
  702. int
  703. cacheDirty(Cache *c)
  704. {
  705. return c->ndirty;
  706. }
  707. void
  708. cacheCountUsed(Cache *c, u32int epochLow, u32int *used, u32int *total, u32int *bsize)
  709. {
  710. int n;
  711. u32int addr, nused;
  712. Block *b;
  713. Label lab;
  714. FreeList *fl;
  715. fl = c->fl;
  716. n = c->size / LabelSize;
  717. *bsize = c->size;
  718. vtLock(fl->lk);
  719. if(fl->epochLow == epochLow){
  720. *used = fl->nused;
  721. *total = fl->end;
  722. vtUnlock(fl->lk);
  723. return;
  724. }
  725. b = nil;
  726. nused = 0;
  727. for(addr=0; addr<fl->end; addr++){
  728. if(addr%n == 0){
  729. blockPut(b);
  730. b = cacheLocal(c, PartLabel, addr/n, OReadOnly);
  731. if(b == nil){
  732. fprint(2, "flCountUsed: loading %ux: %R\n", addr/n);
  733. break;
  734. }
  735. }
  736. if(!labelUnpack(&lab, b->data, addr%n))
  737. continue;
  738. if(lab.state == BsFree)
  739. continue;
  740. if(lab.state&BsClosed)
  741. if(lab.epochClose <= epochLow || lab.epoch==lab.epochClose)
  742. continue;
  743. nused++;
  744. }
  745. blockPut(b);
  746. if(addr == fl->end){
  747. fl->nused = nused;
  748. fl->epochLow = epochLow;
  749. }
  750. *used = nused;
  751. *total = fl->end;
  752. vtUnlock(fl->lk);
  753. return;
  754. }
  755. static FreeList *
  756. flAlloc(u32int end)
  757. {
  758. FreeList *fl;
  759. fl = vtMemAllocZ(sizeof(*fl));
  760. fl->lk = vtLockAlloc();
  761. fl->last = 0;
  762. fl->end = end;
  763. return fl;
  764. }
  765. static void
  766. flFree(FreeList *fl)
  767. {
  768. vtLockFree(fl->lk);
  769. vtMemFree(fl);
  770. }
  771. u32int
  772. cacheLocalSize(Cache *c, int part)
  773. {
  774. return diskSize(c->disk, part);
  775. }
  776. /*
  777. * The thread that has locked b may refer to it by
  778. * multiple names. Nlock counts the number of
  779. * references the locking thread holds. It will call
  780. * blockPut once per reference.
  781. */
  782. void
  783. blockDupLock(Block *b)
  784. {
  785. assert(b->nlock > 0);
  786. b->nlock++;
  787. }
  788. /*
  789. * we're done with the block.
  790. * unlock it. can't use it after calling this.
  791. */
  792. void
  793. blockPut(Block* b)
  794. {
  795. Cache *c;
  796. if(b == nil)
  797. return;
  798. if(0)fprint(2, "blockPut: %d: %d %x %d %s\n", getpid(), b->part, b->addr, c->nheap, bioStr(b->iostate));
  799. if(b->iostate == BioDirty)
  800. bwatchDependency(b);
  801. if(--b->nlock > 0)
  802. return;
  803. /*
  804. * b->nlock should probably stay at zero while
  805. * the block is unlocked, but diskThread and vtSleep
  806. * conspire to assume that they can just vtLock(b->lk); blockPut(b),
  807. * so we have to keep b->nlock set to 1 even
  808. * when the block is unlocked.
  809. */
  810. assert(b->nlock == 0);
  811. b->nlock = 1;
  812. // b->pc = 0;
  813. bwatchUnlock(b);
  814. vtUnlock(b->lk);
  815. c = b->c;
  816. vtLock(c->lk);
  817. if(--b->ref > 0){
  818. vtUnlock(c->lk);
  819. return;
  820. }
  821. assert(b->ref == 0);
  822. switch(b->iostate){
  823. default:
  824. b->used = c->now++;
  825. heapIns(b);
  826. break;
  827. case BioEmpty:
  828. case BioLabel:
  829. if(c->nheap == 0)
  830. b->used = c->now++;
  831. else
  832. b->used = c->heap[0]->used;
  833. heapIns(b);
  834. break;
  835. case BioDirty:
  836. break;
  837. }
  838. vtUnlock(c->lk);
  839. }
  840. /*
  841. * set the label associated with a block.
  842. */
  843. Block*
  844. _blockSetLabel(Block *b, Label *l)
  845. {
  846. int lpb;
  847. Block *bb;
  848. u32int a;
  849. Cache *c;
  850. c = b->c;
  851. assert(b->part == PartData);
  852. assert(b->iostate == BioLabel || b->iostate == BioClean || b->iostate == BioDirty);
  853. lpb = c->size / LabelSize;
  854. a = b->addr / lpb;
  855. bb = cacheLocal(c, PartLabel, a, OReadWrite);
  856. if(bb == nil){
  857. blockPut(b);
  858. return nil;
  859. }
  860. b->l = *l;
  861. labelPack(l, bb->data, b->addr%lpb);
  862. blockDirty(bb);
  863. return bb;
  864. }
  865. int
  866. blockSetLabel(Block *b, Label *l, int allocating)
  867. {
  868. Block *lb;
  869. Label oldl;
  870. oldl = b->l;
  871. lb = _blockSetLabel(b, l);
  872. if(lb == nil)
  873. return 0;
  874. /*
  875. * If we're allocating the block, make sure the label (bl)
  876. * goes to disk before the data block (b) itself. This is to help
  877. * the blocks that in turn depend on b.
  878. *
  879. * Suppose bx depends on (must be written out after) b.
  880. * Once we write b we'll think it's safe to write bx.
  881. * Bx can't get at b unless it has a valid label, though.
  882. *
  883. * Allocation is the only case in which having a current label
  884. * is vital because:
  885. *
  886. * - l.type is set at allocation and never changes.
  887. * - l.tag is set at allocation and never changes.
  888. * - l.state is not checked when we load blocks.
  889. * - the archiver cares deeply about l.state being
  890. * BaActive vs. BaCopied, but that's handled
  891. * by direct calls to _blockSetLabel.
  892. */
  893. if(allocating)
  894. blockDependency(b, lb, -1, nil, nil);
  895. blockPut(lb);
  896. return 1;
  897. }
  898. /*
  899. * Record that bb must be written out before b.
  900. * If index is given, we're about to overwrite the score/e
  901. * at that index in the block. Save the old value so we
  902. * can write a safer ``old'' version of the block if pressed.
  903. */
  904. void
  905. blockDependency(Block *b, Block *bb, int index, uchar *score, Entry *e)
  906. {
  907. BList *p;
  908. if(bb->iostate == BioClean)
  909. return;
  910. /*
  911. * Dependencies for blocks containing Entry structures
  912. * or scores must always be explained. The problem with
  913. * only explaining some of them is this. Suppose we have two
  914. * dependencies for the same field, the first explained
  915. * and the second not. We try to write the block when the first
  916. * dependency is not written but the second is. We will roll back
  917. * the first change even though the second trumps it.
  918. */
  919. if(index == -1 && bb->part == PartData)
  920. assert(b->l.type == BtData);
  921. if(bb->iostate != BioDirty){
  922. fprint(2, "%d:%x:%d iostate is %d in blockDependency\n",
  923. bb->part, bb->addr, bb->l.type, bb->iostate);
  924. abort();
  925. }
  926. p = blistAlloc(bb);
  927. if(p == nil)
  928. return;
  929. assert(bb->iostate == BioDirty);
  930. if(0)fprint(2, "%d:%x:%d depends on %d:%x:%d\n", b->part, b->addr, b->l.type, bb->part, bb->addr, bb->l.type);
  931. p->part = bb->part;
  932. p->addr = bb->addr;
  933. p->type = bb->l.type;
  934. p->vers = bb->vers;
  935. p->index = index;
  936. if(p->index >= 0){
  937. /*
  938. * This test would just be b->l.type==BtDir except
  939. * we need to exclude the super block.
  940. */
  941. if(b->l.type == BtDir && b->part == PartData)
  942. entryPack(e, p->old.entry, 0);
  943. else
  944. memmove(p->old.score, score, VtScoreSize);
  945. }
  946. p->next = b->prior;
  947. b->prior = p;
  948. }
  949. /*
  950. * Mark an in-memory block as dirty. If there are too many
  951. * dirty blocks, start writing some out to disk.
  952. *
  953. * If there were way too many dirty blocks, we used to
  954. * try to do some flushing ourselves, but it's just too dangerous --
  955. * it implies that the callers cannot have any of our priors locked,
  956. * but this is hard to avoid in some cases.
  957. */
  958. int
  959. blockDirty(Block *b)
  960. {
  961. Cache *c;
  962. c = b->c;
  963. assert(b->part != PartVenti);
  964. if(b->iostate == BioDirty)
  965. return 1;
  966. assert(b->iostate == BioClean);
  967. vtLock(c->dirtylk);
  968. vtLock(c->lk);
  969. b->iostate = BioDirty;
  970. c->ndirty++;
  971. if(c->ndirty > (c->maxdirty>>1))
  972. vtWakeup(c->flush);
  973. vtUnlock(c->lk);
  974. vtUnlock(c->dirtylk);
  975. return 1;
  976. }
  977. /*
  978. * We've decided to write out b. Maybe b has some pointers to blocks
  979. * that haven't yet been written to disk. If so, construct a slightly out-of-date
  980. * copy of b that is safe to write out. (diskThread will make sure the block
  981. * remains marked as dirty.)
  982. */
  983. uchar *
  984. blockRollback(Block *b, uchar *buf)
  985. {
  986. u32int addr;
  987. BList *p;
  988. Super super;
  989. /* easy case */
  990. if(b->prior == nil)
  991. return b->data;
  992. memmove(buf, b->data, b->c->size);
  993. for(p=b->prior; p; p=p->next){
  994. /*
  995. * we know p->index >= 0 because blockWrite has vetted this block for us.
  996. */
  997. assert(p->index >= 0);
  998. assert(b->part == PartSuper || (b->part == PartData && b->l.type != BtData));
  999. if(b->part == PartSuper){
  1000. assert(p->index == 0);
  1001. superUnpack(&super, buf);
  1002. addr = globalToLocal(p->old.score);
  1003. if(addr == NilBlock){
  1004. fprint(2, "rolling back super block: bad replacement addr %V\n", p->old.score);
  1005. abort();
  1006. }
  1007. super.active = addr;
  1008. superPack(&super, buf);
  1009. continue;
  1010. }
  1011. if(b->l.type == BtDir)
  1012. memmove(buf+p->index*VtEntrySize, p->old.entry, VtEntrySize);
  1013. else
  1014. memmove(buf+p->index*VtScoreSize, p->old.score, VtScoreSize);
  1015. }
  1016. return buf;
  1017. }
  1018. /*
  1019. * Try to write block b.
  1020. * If b depends on other blocks:
  1021. *
  1022. * If the block has been written out, remove the dependency.
  1023. * If the dependency is replaced by a more recent dependency,
  1024. * throw it out.
  1025. * If we know how to write out an old version of b that doesn't
  1026. * depend on it, do that.
  1027. *
  1028. * Otherwise, bail.
  1029. */
  1030. int
  1031. blockWrite(Block *b)
  1032. {
  1033. uchar *dmap;
  1034. Cache *c;
  1035. BList *p, **pp;
  1036. Block *bb;
  1037. int lockfail;
  1038. c = b->c;
  1039. if(b->iostate != BioDirty)
  1040. return 1;
  1041. dmap = b->dmap;
  1042. memset(dmap, 0, c->ndmap);
  1043. pp = &b->prior;
  1044. for(p=*pp; p; p=*pp){
  1045. if(p->index >= 0){
  1046. /* more recent dependency has succeeded; this one can go */
  1047. if(dmap[p->index/8] & (1<<(p->index%8)))
  1048. goto ignblock;
  1049. }
  1050. lockfail = 0;
  1051. bb = _cacheLocalLookup(c, p->part, p->addr, p->vers, 0, &lockfail);
  1052. if(bb == nil){
  1053. if(lockfail)
  1054. return 0;
  1055. /* block not in cache => was written already */
  1056. dmap[p->index/8] |= 1<<(p->index%8);
  1057. goto ignblock;
  1058. }
  1059. /*
  1060. * same version of block is still in cache.
  1061. *
  1062. * the assertion is true because the block still has version p->vers,
  1063. * which means it hasn't been written out since we last saw it.
  1064. */
  1065. if(bb->iostate != BioDirty){
  1066. fprint(2, "%d:%x:%d iostate is %d in blockWrite\n",
  1067. bb->part, bb->addr, bb->l.type, bb->iostate);
  1068. /* probably BioWriting if it happens? */
  1069. if(bb->iostate == BioClean)
  1070. goto ignblock;
  1071. }
  1072. blockPut(bb);
  1073. if(p->index < 0){
  1074. /*
  1075. * We don't know how to temporarily undo
  1076. * b's dependency on bb, so just don't write b yet.
  1077. */
  1078. if(0) fprint(2, "blockWrite skipping %d %x %d %d; need to write %d %x %d\n",
  1079. b->part, b->addr, b->vers, b->l.type, p->part, p->addr, bb->vers);
  1080. return 0;
  1081. }
  1082. /* keep walking down the list */
  1083. pp = &p->next;
  1084. continue;
  1085. ignblock:
  1086. *pp = p->next;
  1087. blistFree(c, p);
  1088. continue;
  1089. }
  1090. /*
  1091. * DiskWrite must never be called with a double-locked block.
  1092. * This call to diskWrite is okay because blockWrite is only called
  1093. * from the cache flush thread, which never double-locks a block.
  1094. */
  1095. diskWrite(c->disk, b);
  1096. return 1;
  1097. }
  1098. /*
  1099. * Change the I/O state of block b.
  1100. * Just an assignment except for magic in
  1101. * switch statement (read comments there).
  1102. */
  1103. void
  1104. blockSetIOState(Block *b, int iostate)
  1105. {
  1106. int dowakeup;
  1107. Cache *c;
  1108. BList *p, *q;
  1109. if(0) fprint(2, "iostate part=%d addr=%x %s->%s\n", b->part, b->addr, bioStr(b->iostate), bioStr(iostate));
  1110. c = b->c;
  1111. dowakeup = 0;
  1112. switch(iostate){
  1113. default:
  1114. abort();
  1115. case BioEmpty:
  1116. assert(!b->uhead);
  1117. break;
  1118. case BioLabel:
  1119. assert(!b->uhead);
  1120. break;
  1121. case BioClean:
  1122. bwatchDependency(b);
  1123. /*
  1124. * If b->prior is set, it means a write just finished.
  1125. * The prior list isn't needed anymore.
  1126. */
  1127. for(p=b->prior; p; p=q){
  1128. q = p->next;
  1129. blistFree(c, p);
  1130. }
  1131. b->prior = nil;
  1132. /*
  1133. * Freeing a block or just finished a write.
  1134. * Move the blocks from the per-block unlink
  1135. * queue to the cache unlink queue.
  1136. */
  1137. if(b->iostate == BioDirty || b->iostate == BioWriting){
  1138. vtLock(c->lk);
  1139. c->ndirty--;
  1140. b->iostate = iostate; /* change here to keep in sync with ndirty */
  1141. b->vers = c->vers++;
  1142. if(b->uhead){
  1143. /* add unlink blocks to unlink queue */
  1144. if(c->uhead == nil){
  1145. c->uhead = b->uhead;
  1146. vtWakeup(c->unlink);
  1147. }else
  1148. c->utail->next = b->uhead;
  1149. c->utail = b->utail;
  1150. b->uhead = nil;
  1151. }
  1152. vtUnlock(c->lk);
  1153. }
  1154. assert(!b->uhead);
  1155. dowakeup = 1;
  1156. break;
  1157. case BioDirty:
  1158. /*
  1159. * Wrote out an old version of the block (see blockRollback).
  1160. * Bump a version count, leave it dirty.
  1161. */
  1162. if(b->iostate == BioWriting){
  1163. vtLock(c->lk);
  1164. b->vers = c->vers++;
  1165. vtUnlock(c->lk);
  1166. dowakeup = 1;
  1167. }
  1168. break;
  1169. case BioReading:
  1170. case BioWriting:
  1171. /*
  1172. * Adding block to disk queue. Bump reference count.
  1173. * diskThread decs the count later by calling blockPut.
  1174. * This is here because we need to lock c->lk to
  1175. * manipulate the ref count.
  1176. */
  1177. vtLock(c->lk);
  1178. b->ref++;
  1179. vtUnlock(c->lk);
  1180. break;
  1181. case BioReadError:
  1182. case BioVentiError:
  1183. /*
  1184. * Oops.
  1185. */
  1186. dowakeup = 1;
  1187. break;
  1188. }
  1189. b->iostate = iostate;
  1190. /*
  1191. * Now that the state has changed, we can wake the waiters.
  1192. */
  1193. if(dowakeup)
  1194. vtWakeupAll(b->ioready);
  1195. }
  1196. /*
  1197. * The active file system is a tree of blocks.
  1198. * When we add snapshots to the mix, the entire file system
  1199. * becomes a dag and thus requires a bit more care.
  1200. *
  1201. * The life of the file system is divided into epochs. A snapshot
  1202. * ends one epoch and begins the next. Each file system block
  1203. * is marked with the epoch in which it was created (b.epoch).
  1204. * When the block is unlinked from the file system (closed), it is marked
  1205. * with the epoch in which it was removed (b.epochClose).
  1206. * Once we have discarded or archived all snapshots up to
  1207. * b.epochClose, we can reclaim the block.
  1208. *
  1209. * If a block was created in a past epoch but is not yet closed,
  1210. * it is treated as copy-on-write. Of course, in order to insert the
  1211. * new pointer into the tree, the parent must be made writable,
  1212. * and so on up the tree. The recursion stops because the root
  1213. * block is always writable.
  1214. *
  1215. * If blocks are never closed, they will never be reused, and
  1216. * we will run out of disk space. But marking a block as closed
  1217. * requires some care about dependencies and write orderings.
  1218. *
  1219. * (1) If a block p points at a copy-on-write block b and we
  1220. * copy b to create bb, then p must be written out after bb and
  1221. * lbb (bb's label block).
  1222. *
  1223. * (2) We have to mark b as closed, but only after we switch
  1224. * the pointer, so lb must be written out after p. In fact, we
  1225. * can't even update the in-memory copy, or the cache might
  1226. * mistakenly give out b for reuse before p gets written.
  1227. *
  1228. * CacheAllocBlock's call to blockSetLabel records a "bb after lbb" dependency.
  1229. * The caller is expected to record a "p after bb" dependency
  1230. * to finish (1), and also expected to call blockRemoveLink
  1231. * to arrange for (2) to happen once p is written.
  1232. *
  1233. * Until (2) happens, some pieces of the code (e.g., the archiver)
  1234. * still need to know whether a block has been copied, so we
  1235. * set the BsCopied bit in the label and force that to disk *before*
  1236. * the copy gets written out.
  1237. */
  1238. Block*
  1239. blockCopy(Block *b, u32int tag, u32int ehi, u32int elo)
  1240. {
  1241. Block *bb, *lb;
  1242. Label l;
  1243. if((b->l.state&BsClosed) || b->l.epoch >= ehi)
  1244. fprint(2, "blockCopy %#ux %L but fs is [%ud,%ud]\n",
  1245. b->addr, &b->l, elo, ehi);
  1246. bb = cacheAllocBlock(b->c, b->l.type, tag, ehi, elo);
  1247. if(bb == nil){
  1248. blockPut(b);
  1249. return nil;
  1250. }
  1251. /*
  1252. * Update label so we know the block has been copied.
  1253. * (It will be marked closed once it has been unlinked from
  1254. * the tree.) This must follow cacheAllocBlock since we
  1255. * can't be holding onto lb when we call cacheAllocBlock.
  1256. */
  1257. if((b->l.state&BsCopied)==0)
  1258. if(b->part == PartData){ /* not the superblock */
  1259. l = b->l;
  1260. l.state |= BsCopied;
  1261. lb = _blockSetLabel(b, &l);
  1262. if(lb == nil){
  1263. /* can't set label => can't copy block */
  1264. blockPut(b);
  1265. l.type = BtMax;
  1266. l.state = BsFree;
  1267. l.epoch = 0;
  1268. l.epochClose = 0;
  1269. l.tag = 0;
  1270. blockSetLabel(bb, &l, 0);
  1271. blockPut(bb);
  1272. return nil;
  1273. }
  1274. blockDependency(bb, lb, -1, nil, nil);
  1275. blockPut(lb);
  1276. }
  1277. memmove(bb->data, b->data, b->c->size);
  1278. blockDirty(bb);
  1279. blockPut(b);
  1280. return bb;
  1281. }
  1282. /*
  1283. * Block b once pointed at the block bb at addr/type/tag, but no longer does.
  1284. * If recurse is set, we are unlinking all of bb's children as well.
  1285. *
  1286. * We can't reclaim bb (or its kids) until the block b gets written to disk. We add
  1287. * the relevant information to b's list of unlinked blocks. Once b is written,
  1288. * the list will be queued for processing.
  1289. *
  1290. * If b depends on bb, it doesn't anymore, so we remove bb from the prior list.
  1291. */
  1292. void
  1293. blockRemoveLink(Block *b, u32int addr, int type, u32int tag, int recurse)
  1294. {
  1295. BList *p, **pp, bl;
  1296. /* remove bb from prior list */
  1297. for(pp=&b->prior; (p=*pp)!=nil; ){
  1298. if(p->part == PartData && p->addr == addr){
  1299. *pp = p->next;
  1300. blistFree(b->c, p);
  1301. }else
  1302. pp = &p->next;
  1303. }
  1304. bl.part = PartData;
  1305. bl.addr = addr;
  1306. bl.type = type;
  1307. bl.tag = tag;
  1308. if(b->l.epoch == 0)
  1309. assert(b->part == PartSuper);
  1310. bl.epoch = b->l.epoch;
  1311. bl.next = nil;
  1312. bl.recurse = recurse;
  1313. p = blistAlloc(b);
  1314. if(p == nil){
  1315. /*
  1316. * We were out of blists so blistAlloc wrote b to disk.
  1317. */
  1318. doRemoveLink(b->c, &bl);
  1319. return;
  1320. }
  1321. /* Uhead is only processed when the block goes from Dirty -> Clean */
  1322. assert(b->iostate == BioDirty);
  1323. *p = bl;
  1324. if(b->uhead == nil)
  1325. b->uhead = p;
  1326. else
  1327. b->utail->next = p;
  1328. b->utail = p;
  1329. }
  1330. /*
  1331. * Process removal of a single block and perhaps its children.
  1332. */
  1333. static void
  1334. doRemoveLink(Cache *c, BList *p)
  1335. {
  1336. int i, n, recurse;
  1337. u32int a;
  1338. Block *b;
  1339. Label l;
  1340. BList bl;
  1341. recurse = (p->recurse && p->type != BtData && p->type != BtDir);
  1342. /*
  1343. * We're not really going to overwrite b, but if we're not
  1344. * going to look at its contents, there is no point in reading
  1345. * them from the disk.
  1346. */
  1347. b = cacheLocalData(c, p->addr, p->type, p->tag, recurse ? OReadOnly : OOverWrite, 0);
  1348. if(b == nil)
  1349. return;
  1350. /*
  1351. * When we're unlinking from the superblock, close with the next epoch.
  1352. */
  1353. if(p->epoch == 0)
  1354. p->epoch = b->l.epoch+1;
  1355. /* sanity check */
  1356. if(b->l.epoch > p->epoch){
  1357. fprint(2, "doRemoveLink: strange epoch %ud > %ud\n", b->l.epoch, p->epoch);
  1358. blockPut(b);
  1359. return;
  1360. }
  1361. if(recurse){
  1362. n = c->size / VtScoreSize;
  1363. for(i=0; i<n; i++){
  1364. a = globalToLocal(b->data + i*VtScoreSize);
  1365. if(a == NilBlock || !readLabel(c, &l, a))
  1366. continue;
  1367. if(l.state&BsClosed)
  1368. continue;
  1369. /*
  1370. * If stack space becomes an issue...
  1371. p->addr = a;
  1372. p->type = l.type;
  1373. p->tag = l.tag;
  1374. doRemoveLink(c, p);
  1375. */
  1376. bl.part = PartData;
  1377. bl.addr = a;
  1378. bl.type = l.type;
  1379. bl.tag = l.tag;
  1380. bl.epoch = p->epoch;
  1381. bl.next = nil;
  1382. bl.recurse = 1;
  1383. /* give up the block lock - share with others */
  1384. blockPut(b);
  1385. doRemoveLink(c, &bl);
  1386. b = cacheLocalData(c, p->addr, p->type, p->tag, OReadOnly, 0);
  1387. if(b == nil){
  1388. fprint(2, "warning: lost block in doRemoveLink\n");
  1389. return;
  1390. }
  1391. }
  1392. }
  1393. l = b->l;
  1394. l.state |= BsClosed;
  1395. l.epochClose = p->epoch;
  1396. blockSetLabel(b, &l, 0);
  1397. blockPut(b);
  1398. }
  1399. /*
  1400. * Allocate a BList so that we can record a dependency
  1401. * or queue a removal related to block b.
  1402. * If we can't find a BList, we write out b and return nil.
  1403. */
  1404. static BList *
  1405. blistAlloc(Block *b)
  1406. {
  1407. Cache *c;
  1408. BList *p;
  1409. if(b->iostate != BioDirty){
  1410. /*
  1411. * should not happen anymore -
  1412. * blockDirty used to flush but no longer does.
  1413. */
  1414. assert(b->iostate == BioClean);
  1415. fprint(2, "blistAlloc: called on clean block\n");
  1416. return nil;
  1417. }
  1418. c = b->c;
  1419. vtLock(c->lk);
  1420. if(c->blfree == nil){
  1421. /*
  1422. * No free BLists. What are our options?
  1423. */
  1424. /* Block has no priors? Just write it. */
  1425. if(b->prior == nil){
  1426. vtUnlock(c->lk);
  1427. diskWriteAndWait(c->disk, b);
  1428. return nil;
  1429. }
  1430. /*
  1431. * Wake the flush thread, which will hopefully free up
  1432. * some BLists for us. We used to flush a block from
  1433. * our own prior list and reclaim that BList, but this is
  1434. * a no-no: some of the blocks on our prior list may
  1435. * be locked by our caller. Or maybe their label blocks
  1436. * are locked by our caller. In any event, it's too hard
  1437. * to make sure we can do I/O for ourselves. Instead,
  1438. * we assume the flush thread will find something.
  1439. * (The flush thread never blocks waiting for a block,
  1440. * so it can't deadlock like we can.)
  1441. */
  1442. vtLock(c->lk);
  1443. while(c->blfree == nil){
  1444. vtWakeup(c->flush);
  1445. vtSleep(c->blrend);
  1446. if(c->blfree == nil)
  1447. fprint(2, "flushing for blists\n");
  1448. }
  1449. }
  1450. p = c->blfree;
  1451. c->blfree = p->next;
  1452. vtUnlock(c->lk);
  1453. return p;
  1454. }
  1455. static void
  1456. blistFree(Cache *c, BList *bl)
  1457. {
  1458. vtLock(c->lk);
  1459. bl->next = c->blfree;
  1460. c->blfree = bl;
  1461. vtWakeup(c->blrend);
  1462. vtUnlock(c->lk);
  1463. }
  1464. char*
  1465. bsStr(int state)
  1466. {
  1467. static char s[100];
  1468. if(state == BsFree)
  1469. return "Free";
  1470. if(state == BsBad)
  1471. return "Bad";
  1472. sprint(s, "%x", state);
  1473. if(!(state&BsAlloc))
  1474. strcat(s, ",Free"); /* should not happen */
  1475. if(state&BsCopied)
  1476. strcat(s, ",Copied");
  1477. if(state&BsVenti)
  1478. strcat(s, ",Venti");
  1479. if(state&BsClosed)
  1480. strcat(s, ",Closed");
  1481. return s;
  1482. }
  1483. char *
  1484. bioStr(int iostate)
  1485. {
  1486. switch(iostate){
  1487. default:
  1488. return "Unknown!!";
  1489. case BioEmpty:
  1490. return "Empty";
  1491. case BioLabel:
  1492. return "Label";
  1493. case BioClean:
  1494. return "Clean";
  1495. case BioDirty:
  1496. return "Dirty";
  1497. case BioReading:
  1498. return "Reading";
  1499. case BioWriting:
  1500. return "Writing";
  1501. case BioReadError:
  1502. return "ReadError";
  1503. case BioVentiError:
  1504. return "VentiError";
  1505. case BioMax:
  1506. return "Max";
  1507. }
  1508. }
  1509. static char *bttab[] = {
  1510. "BtData",
  1511. "BtData+1",
  1512. "BtData+2",
  1513. "BtData+3",
  1514. "BtData+4",
  1515. "BtData+5",
  1516. "BtData+6",
  1517. "BtData+7",
  1518. "BtDir",
  1519. "BtDir+1",
  1520. "BtDir+2",
  1521. "BtDir+3",
  1522. "BtDir+4",
  1523. "BtDir+5",
  1524. "BtDir+6",
  1525. "BtDir+7",
  1526. };
  1527. char*
  1528. btStr(int type)
  1529. {
  1530. if(type < nelem(bttab))
  1531. return bttab[type];
  1532. return "unknown";
  1533. }
  1534. int
  1535. labelFmt(Fmt *f)
  1536. {
  1537. Label *l;
  1538. l = va_arg(f->args, Label*);
  1539. return fmtprint(f, "%s,%s,e=%ud,%d,tag=%#ux",
  1540. btStr(l->type), bsStr(l->state), l->epoch, (int)l->epochClose, l->tag);
  1541. }
  1542. int
  1543. scoreFmt(Fmt *f)
  1544. {
  1545. uchar *v;
  1546. int i;
  1547. u32int addr;
  1548. v = va_arg(f->args, uchar*);
  1549. if(v == nil){
  1550. fmtprint(f, "*");
  1551. }else if((addr = globalToLocal(v)) != NilBlock)
  1552. fmtprint(f, "0x%.8ux", addr);
  1553. else{
  1554. for(i = 0; i < VtScoreSize; i++)
  1555. fmtprint(f, "%2.2ux", v[i]);
  1556. }
  1557. return 0;
  1558. }
  1559. static int
  1560. upHeap(int i, Block *b)
  1561. {
  1562. Block *bb;
  1563. u32int now;
  1564. int p;
  1565. Cache *c;
  1566. c = b->c;
  1567. now = c->now;
  1568. for(; i != 0; i = p){
  1569. p = (i - 1) >> 1;
  1570. bb = c->heap[p];
  1571. if(b->used - now >= bb->used - now)
  1572. break;
  1573. c->heap[i] = bb;
  1574. bb->heap = i;
  1575. }
  1576. c->heap[i] = b;
  1577. b->heap = i;
  1578. return i;
  1579. }
  1580. static int
  1581. downHeap(int i, Block *b)
  1582. {
  1583. Block *bb;
  1584. u32int now;
  1585. int k;
  1586. Cache *c;
  1587. c = b->c;
  1588. now = c->now;
  1589. for(; ; i = k){
  1590. k = (i << 1) + 1;
  1591. if(k >= c->nheap)
  1592. break;
  1593. if(k + 1 < c->nheap && c->heap[k]->used - now > c->heap[k + 1]->used - now)
  1594. k++;
  1595. bb = c->heap[k];
  1596. if(b->used - now <= bb->used - now)
  1597. break;
  1598. c->heap[i] = bb;
  1599. bb->heap = i;
  1600. }
  1601. c->heap[i] = b;
  1602. b->heap = i;
  1603. return i;
  1604. }
  1605. /*
  1606. * Delete a block from the heap.
  1607. * Called with c->lk held.
  1608. */
  1609. static void
  1610. heapDel(Block *b)
  1611. {
  1612. int i, si;
  1613. Cache *c;
  1614. c = b->c;
  1615. si = b->heap;
  1616. if(si == BadHeap)
  1617. return;
  1618. b->heap = BadHeap;
  1619. c->nheap--;
  1620. if(si == c->nheap)
  1621. return;
  1622. b = c->heap[c->nheap];
  1623. i = upHeap(si, b);
  1624. if(i == si)
  1625. downHeap(i, b);
  1626. }
  1627. /*
  1628. * Insert a block into the heap.
  1629. * Called with c->lk held.
  1630. */
  1631. static void
  1632. heapIns(Block *b)
  1633. {
  1634. assert(b->heap == BadHeap);
  1635. upHeap(b->c->nheap++, b);
  1636. vtWakeup(b->c->heapwait);
  1637. }
  1638. /*
  1639. * Get just the label for a block.
  1640. */
  1641. int
  1642. readLabel(Cache *c, Label *l, u32int addr)
  1643. {
  1644. int lpb;
  1645. Block *b;
  1646. u32int a;
  1647. lpb = c->size / LabelSize;
  1648. a = addr / lpb;
  1649. b = cacheLocal(c, PartLabel, a, OReadOnly);
  1650. if(b == nil){
  1651. blockPut(b);
  1652. return 0;
  1653. }
  1654. if(!labelUnpack(l, b->data, addr%lpb)){
  1655. blockPut(b);
  1656. return 0;
  1657. }
  1658. blockPut(b);
  1659. return 1;
  1660. }
  1661. /*
  1662. * Process unlink queue.
  1663. * Called with c->lk held.
  1664. */
  1665. static void
  1666. unlinkBody(Cache *c)
  1667. {
  1668. BList *p;
  1669. while(c->uhead != nil){
  1670. p = c->uhead;
  1671. c->uhead = p->next;
  1672. vtUnlock(c->lk);
  1673. doRemoveLink(c, p);
  1674. vtLock(c->lk);
  1675. p->next = c->blfree;
  1676. c->blfree = p;
  1677. }
  1678. }
  1679. /*
  1680. * Occasionally unlink the blocks on the cache unlink queue.
  1681. */
  1682. static void
  1683. unlinkThread(void *a)
  1684. {
  1685. Cache *c = a;
  1686. vtThreadSetName("unlink");
  1687. vtLock(c->lk);
  1688. for(;;){
  1689. while(c->uhead == nil && c->die == nil)
  1690. vtSleep(c->unlink);
  1691. if(c->die != nil)
  1692. break;
  1693. unlinkBody(c);
  1694. }
  1695. c->ref--;
  1696. vtWakeup(c->die);
  1697. vtUnlock(c->lk);
  1698. }
  1699. static int
  1700. baddrCmp(void *a0, void *a1)
  1701. {
  1702. BAddr *b0, *b1;
  1703. b0 = a0;
  1704. b1 = a1;
  1705. if(b0->part < b1->part)
  1706. return -1;
  1707. if(b0->part > b1->part)
  1708. return 1;
  1709. if(b0->addr < b1->addr)
  1710. return -1;
  1711. if(b0->addr > b1->addr)
  1712. return 1;
  1713. return 0;
  1714. }
  1715. /*
  1716. * Scan the block list for dirty blocks; add them to the list c->baddr.
  1717. */
  1718. static void
  1719. flushFill(Cache *c)
  1720. {
  1721. int i, ndirty;
  1722. BAddr *p;
  1723. Block *b;
  1724. vtLock(c->lk);
  1725. if(c->ndirty == 0){
  1726. vtUnlock(c->lk);
  1727. return;
  1728. }
  1729. p = c->baddr;
  1730. ndirty = 0;
  1731. for(i=0; i<c->nblocks; i++){
  1732. b = c->blocks + i;
  1733. if(b->part == PartError)
  1734. continue;
  1735. if(b->iostate == BioDirty || b->iostate == BioWriting)
  1736. ndirty++;
  1737. if(b->iostate != BioDirty)
  1738. continue;
  1739. p->part = b->part;
  1740. p->addr = b->addr;
  1741. p->vers = b->vers;
  1742. p++;
  1743. }
  1744. if(ndirty != c->ndirty){
  1745. fprint(2, "ndirty mismatch expected %d found %d\n",
  1746. c->ndirty, ndirty);
  1747. c->ndirty = ndirty;
  1748. }
  1749. vtUnlock(c->lk);
  1750. c->bw = p - c->baddr;
  1751. qsort(c->baddr, c->bw, sizeof(BAddr), baddrCmp);
  1752. }
  1753. /*
  1754. * This is not thread safe, i.e. it can't be called from multiple threads.
  1755. *
  1756. * It's okay how we use it, because it only gets called in
  1757. * the flushThread. And cacheFree, but only after
  1758. * cacheFree has killed off the flushThread.
  1759. */
  1760. static int
  1761. cacheFlushBlock(Cache *c)
  1762. {
  1763. Block *b;
  1764. BAddr *p;
  1765. int lockfail, nfail;
  1766. nfail = 0;
  1767. for(;;){
  1768. if(c->br == c->be){
  1769. if(c->bw == 0 || c->bw == c->be)
  1770. flushFill(c);
  1771. c->br = 0;
  1772. c->be = c->bw;
  1773. c->bw = 0;
  1774. c->nflush = 0;
  1775. }
  1776. if(c->br == c->be)
  1777. return 0;
  1778. p = c->baddr + c->br;
  1779. c->br++;
  1780. b = _cacheLocalLookup(c, p->part, p->addr, p->vers, 0, &lockfail);
  1781. if(b && blockWrite(b)){
  1782. c->nflush++;
  1783. blockPut(b);
  1784. return 1;
  1785. }
  1786. if(b)
  1787. blockPut(b);
  1788. /*
  1789. * Why didn't we write the block?
  1790. */
  1791. /* Block already written out */
  1792. if(b == nil && !lockfail)
  1793. continue;
  1794. /* Failed to acquire lock; sleep if happens a lot. */
  1795. if(lockfail && ++nfail > 100){
  1796. sleep(500);
  1797. nfail = 0;
  1798. }
  1799. /* Requeue block. */
  1800. if(c->bw < c->be)
  1801. c->baddr[c->bw++] = *p;
  1802. }
  1803. }
  1804. /*
  1805. * Occasionally flush dirty blocks from memory to the disk.
  1806. */
  1807. static void
  1808. flushThread(void *a)
  1809. {
  1810. Cache *c = a;
  1811. int i;
  1812. vtThreadSetName("flush");
  1813. vtLock(c->lk);
  1814. while(c->die == nil){
  1815. vtSleep(c->flush);
  1816. vtUnlock(c->lk);
  1817. for(i=0; i<FlushSize; i++)
  1818. if(!cacheFlushBlock(c)){
  1819. /*
  1820. * If i==0, could be someone is waking us repeatedly
  1821. * to flush the cache but there's no work to do.
  1822. * Pause a little.
  1823. */
  1824. if(i==0){
  1825. // fprint(2, "flushthread found nothing to flush - %d dirty\n", c->ndirty);
  1826. sleep(250);
  1827. }
  1828. break;
  1829. }
  1830. if(i==0 && c->ndirty){
  1831. /*
  1832. * All the blocks are being written right now -- there's nothing to do.
  1833. * We might be spinning with cacheFlush though -- he'll just keep
  1834. * kicking us until c->ndirty goes down. Probably we should sleep
  1835. * on something that the diskThread can kick, but for now we'll
  1836. * just pause for a little while waiting for disks to finish.
  1837. */
  1838. sleep(100);
  1839. }
  1840. vtLock(c->lk);
  1841. vtWakeupAll(c->flushwait);
  1842. }
  1843. c->ref--;
  1844. vtWakeup(c->die);
  1845. vtUnlock(c->lk);
  1846. }
  1847. /*
  1848. * Flush the cache.
  1849. */
  1850. void
  1851. cacheFlush(Cache *c, int wait)
  1852. {
  1853. /*
  1854. * Lock c->dirtylk so that more blocks aren't being dirtied
  1855. * while we try to write out what's already here.
  1856. * Otherwise we might not ever finish!
  1857. */
  1858. vtLock(c->dirtylk);
  1859. vtLock(c->lk);
  1860. if(wait){
  1861. while(c->ndirty){
  1862. // consPrint("cacheFlush: %d dirty blocks, uhead %p\n",
  1863. // c->ndirty, c->uhead);
  1864. vtWakeup(c->flush);
  1865. vtSleep(c->flushwait);
  1866. }
  1867. // consPrint("cacheFlush: done (uhead %p)\n", c->ndirty, c->uhead);
  1868. }else if(c->ndirty)
  1869. vtWakeup(c->flush);
  1870. vtUnlock(c->lk);
  1871. vtUnlock(c->dirtylk);
  1872. }
  1873. /*
  1874. * Kick the flushThread every 30 seconds.
  1875. */
  1876. static void
  1877. cacheSync(void *v)
  1878. {
  1879. Cache *c;
  1880. c = v;
  1881. cacheFlush(c, 0);
  1882. }