dcache.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706
  1. /*
  2. * Disk cache.
  3. *
  4. * Caches raw disk blocks. Getdblock() gets a block, putdblock puts it back.
  5. * Getdblock has a mode parameter that determines i/o and access to a block:
  6. * if mode is OREAD or ORDWR, it is read from disk if not already in memory.
  7. * If mode is ORDWR or OWRITE, it is locked for exclusive use before being returned.
  8. * It is *not* marked dirty -- once changes have been made, they should be noted
  9. * by using dirtydblock() before putdblock().
  10. *
  11. * There is a global cache lock as well as a lock on each block.
  12. * Within a thread, the cache lock can be acquired while holding a block lock,
  13. * but not vice versa; and a block cannot be locked if you already hold the lock
  14. * on another block.
  15. *
  16. * The flush proc writes out dirty blocks in batches, one batch per dirty tag.
  17. * For example, the DirtyArena blocks are all written to disk before any of the
  18. * DirtyArenaCib blocks.
  19. *
  20. * This code used to be in charge of flushing the dirty index blocks out to
  21. * disk, but updating the index turned out to benefit from extra care.
  22. * Now cached index blocks are never marked dirty. The index.c code takes
  23. * care of updating them behind our back, and uses _getdblock to update any
  24. * cached copies of the blocks as it changes them on disk.
  25. */
  26. #include "stdinc.h"
  27. #include "dat.h"
  28. #include "fns.h"
  29. typedef struct DCache DCache;
  30. enum
  31. {
  32. HashLog = 9,
  33. HashSize = 1<<HashLog,
  34. HashMask = HashSize - 1,
  35. };
  36. struct DCache
  37. {
  38. QLock lock;
  39. RWLock dirtylock; /* must be held to inspect or set b->dirty */
  40. Rendez full;
  41. Round round;
  42. DBlock *free; /* list of available lumps */
  43. u32int now; /* ticks for usage timestamps */
  44. int size; /* max. size of any block; allocated to each block */
  45. DBlock **heads; /* hash table for finding address */
  46. int nheap; /* number of available victims */
  47. DBlock **heap; /* heap for locating victims */
  48. int nblocks; /* number of blocks allocated */
  49. DBlock *blocks; /* array of block descriptors */
  50. DBlock **write; /* array of block pointers to be written */
  51. u8int *mem; /* memory for all block descriptors */
  52. int ndirty; /* number of dirty blocks */
  53. int maxdirty; /* max. number of dirty blocks */
  54. };
  55. typedef struct Ra Ra;
  56. struct Ra
  57. {
  58. Part *part;
  59. u64int addr;
  60. };
  61. static DCache dcache;
  62. static int downheap(int i, DBlock *b);
  63. static int upheap(int i, DBlock *b);
  64. static DBlock *bumpdblock(void);
  65. static void delheap(DBlock *db);
  66. static void fixheap(int i, DBlock *b);
  67. static void flushproc(void*);
  68. static void writeproc(void*);
  69. void
  70. initdcache(u32int mem)
  71. {
  72. DBlock *b, *last;
  73. u32int nblocks, blocksize;
  74. int i;
  75. u8int *p;
  76. if(mem < maxblocksize * 2)
  77. sysfatal("need at least %d bytes for the disk cache", maxblocksize * 2);
  78. if(maxblocksize == 0)
  79. sysfatal("no max. block size given for disk cache");
  80. blocksize = maxblocksize;
  81. nblocks = mem / blocksize;
  82. dcache.full.l = &dcache.lock;
  83. dcache.nblocks = nblocks;
  84. dcache.maxdirty = (nblocks * 2) / 3;
  85. trace(TraceProc, "initialize disk cache with %d blocks of %d bytes, maximum %d dirty blocks\n",
  86. nblocks, blocksize, dcache.maxdirty);
  87. dcache.size = blocksize;
  88. dcache.heads = MKNZ(DBlock*, HashSize);
  89. dcache.heap = MKNZ(DBlock*, nblocks);
  90. dcache.blocks = MKNZ(DBlock, nblocks);
  91. dcache.write = MKNZ(DBlock*, nblocks);
  92. dcache.mem = MKNZ(u8int, (nblocks+1+128) * blocksize);
  93. last = nil;
  94. p = (u8int*)(((ulong)dcache.mem+blocksize-1)&~(ulong)(blocksize-1));
  95. for(i = 0; i < nblocks; i++){
  96. b = &dcache.blocks[i];
  97. b->data = &p[i * blocksize];
  98. b->heap = TWID32;
  99. b->writedonechan = chancreate(sizeof(void*), 1);
  100. b->next = last;
  101. last = b;
  102. }
  103. dcache.free = last;
  104. dcache.nheap = 0;
  105. setstat(StatDcacheSize, nblocks);
  106. initround(&dcache.round, "dcache", 120*1000);
  107. vtproc(flushproc, nil);
  108. vtproc(delaykickroundproc, &dcache.round);
  109. }
  110. static u32int
  111. pbhash(u64int addr)
  112. {
  113. u32int h;
  114. #define hashit(c) ((((c) * 0x6b43a9b5) >> (32 - HashLog)) & HashMask)
  115. h = (addr >> 32) ^ addr;
  116. return hashit(h);
  117. }
  118. DBlock*
  119. getdblock(Part *part, u64int addr, int mode)
  120. {
  121. DBlock *b;
  122. uint ms;
  123. ms = msec();
  124. b = _getdblock(part, addr, mode, 1);
  125. if(mode == OREAD || mode == ORDWR)
  126. addstat(StatDcacheRead, 1);
  127. if(mode == OWRITE || mode == ORDWR)
  128. addstat(StatDcacheWrite, 1);
  129. ms = msec() - ms;
  130. addstat2(StatDcacheLookup, 1, StatDcacheLookupTime, ms);
  131. return b;
  132. }
  133. DBlock*
  134. _getdblock(Part *part, u64int addr, int mode, int load)
  135. {
  136. DBlock *b;
  137. u32int h, size;
  138. trace(TraceBlock, "getdblock enter %s 0x%llux", part->name, addr);
  139. size = part->blocksize;
  140. if(size > dcache.size){
  141. seterr(EAdmin, "block size %d too big for cache with size %d", size, dcache.size);
  142. return nil;
  143. }
  144. h = pbhash(addr);
  145. /*
  146. * look for the block in the cache
  147. */
  148. qlock(&dcache.lock);
  149. again:
  150. for(b = dcache.heads[h]; b != nil; b = b->next){
  151. if(b->part == part && b->addr == addr){
  152. if(load)
  153. addstat(StatDcacheHit, 1);
  154. goto found;
  155. }
  156. }
  157. /*
  158. * missed: locate the block with the oldest second to last use.
  159. * remove it from the heap, and fix up the heap.
  160. */
  161. if(!load){
  162. qunlock(&dcache.lock);
  163. return nil;
  164. }
  165. addstat(StatDcacheMiss, 1);
  166. b = bumpdblock();
  167. if(b == nil){
  168. trace(TraceBlock, "all disk cache blocks in use");
  169. addstat(StatDcacheStall, 1);
  170. rsleep(&dcache.full);
  171. addstat(StatDcacheStall, -1);
  172. goto again;
  173. }
  174. assert(!b->dirty);
  175. /*
  176. * the new block has no last use, so assume it happens sometime in the middle
  177. ZZZ this is not reasonable
  178. */
  179. b->used = (b->used2 + dcache.now) / 2;
  180. /*
  181. * rechain the block on the correct hash chain
  182. */
  183. b->next = dcache.heads[h];
  184. dcache.heads[h] = b;
  185. if(b->next != nil)
  186. b->next->prev = b;
  187. b->prev = nil;
  188. b->addr = addr;
  189. b->part = part;
  190. b->size = 0;
  191. found:
  192. b->ref++;
  193. b->used2 = b->used;
  194. b->used = dcache.now++;
  195. if(b->heap != TWID32)
  196. fixheap(b->heap, b);
  197. if((mode == ORDWR || mode == OWRITE) && part->writechan == nil){
  198. trace(TraceBlock, "getdblock allocwriteproc %s", part->name);
  199. part->writechan = chancreate(sizeof(DBlock*), dcache.nblocks);
  200. vtproc(writeproc, part);
  201. }
  202. qunlock(&dcache.lock);
  203. trace(TraceBlock, "getdblock lock");
  204. addstat(StatDblockStall, 1);
  205. if(mode == OREAD)
  206. rlock(&b->lock);
  207. else
  208. wlock(&b->lock);
  209. addstat(StatDblockStall, -1);
  210. trace(TraceBlock, "getdblock locked");
  211. if(b->size != size){
  212. if(mode == OREAD){
  213. addstat(StatDblockStall, 1);
  214. runlock(&b->lock);
  215. wlock(&b->lock);
  216. addstat(StatDblockStall, -1);
  217. }
  218. if(b->size < size){
  219. if(mode == OWRITE)
  220. memset(&b->data[b->size], 0, size - b->size);
  221. else{
  222. trace(TraceBlock, "getdblock readpart %s 0x%llux", part->name, addr);
  223. diskaccess(0);
  224. if(readpart(part, addr + b->size, &b->data[b->size], size - b->size) < 0){
  225. b->mode = ORDWR; /* so putdblock wunlocks */
  226. putdblock(b);
  227. return nil;
  228. }
  229. trace(TraceBlock, "getdblock readpartdone");
  230. addstat(StatApartRead, 1);
  231. addstat(StatApartReadBytes, size-b->size);
  232. }
  233. }
  234. b->size = size;
  235. if(mode == OREAD){
  236. addstat(StatDblockStall, 1);
  237. wunlock(&b->lock);
  238. rlock(&b->lock);
  239. addstat(StatDblockStall, -1);
  240. }
  241. }
  242. b->mode = mode;
  243. trace(TraceBlock, "getdblock exit");
  244. return b;
  245. }
  246. void
  247. putdblock(DBlock *b)
  248. {
  249. if(b == nil)
  250. return;
  251. trace(TraceBlock, "putdblock %s 0x%llux", b->part->name, b->addr);
  252. if(b->mode == OREAD)
  253. runlock(&b->lock);
  254. else
  255. wunlock(&b->lock);
  256. qlock(&dcache.lock);
  257. if(--b->ref == 0 && !b->dirty){
  258. if(b->heap == TWID32)
  259. upheap(dcache.nheap++, b);
  260. rwakeupall(&dcache.full);
  261. }
  262. qunlock(&dcache.lock);
  263. }
  264. void
  265. dirtydblock(DBlock *b, int dirty)
  266. {
  267. int odirty;
  268. trace(TraceBlock, "dirtydblock enter %s 0x%llux %d from 0x%lux",
  269. b->part->name, b->addr, dirty, getcallerpc(&b));
  270. assert(b->ref != 0);
  271. assert(b->mode==ORDWR || b->mode==OWRITE);
  272. odirty = b->dirty;
  273. if(b->dirty)
  274. assert(b->dirty == dirty);
  275. else
  276. b->dirty = dirty;
  277. qlock(&dcache.lock);
  278. if(!odirty){
  279. dcache.ndirty++;
  280. setstat(StatDcacheDirty, dcache.ndirty);
  281. if(dcache.ndirty >= dcache.maxdirty)
  282. kickround(&dcache.round, 0);
  283. else
  284. delaykickround(&dcache.round);
  285. }
  286. qunlock(&dcache.lock);
  287. }
  288. static void
  289. unchain(DBlock *b)
  290. {
  291. ulong h;
  292. /*
  293. * unchain the block
  294. */
  295. if(b->prev == nil){
  296. h = pbhash(b->addr);
  297. if(dcache.heads[h] != b)
  298. sysfatal("bad hash chains in disk cache");
  299. dcache.heads[h] = b->next;
  300. }else
  301. b->prev->next = b->next;
  302. if(b->next != nil)
  303. b->next->prev = b->prev;
  304. }
  305. /*
  306. * remove some block from use and update the free list and counters
  307. */
  308. static DBlock*
  309. bumpdblock(void)
  310. {
  311. DBlock *b;
  312. trace(TraceBlock, "bumpdblock enter");
  313. b = dcache.free;
  314. if(b != nil){
  315. dcache.free = b->next;
  316. return b;
  317. }
  318. if(dcache.ndirty >= dcache.maxdirty)
  319. kickdcache();
  320. /*
  321. * remove blocks until we find one that is unused
  322. * referenced blocks are left in the heap even though
  323. * they can't be scavenged; this is simple a speed optimization
  324. */
  325. for(;;){
  326. if(dcache.nheap == 0){
  327. kickdcache();
  328. trace(TraceBlock, "bumpdblock gotnothing");
  329. return nil;
  330. }
  331. b = dcache.heap[0];
  332. delheap(b);
  333. if(!b->ref && !b->dirty)
  334. break;
  335. }
  336. trace(TraceBlock, "bumpdblock bumping %s 0x%llux", b->part->name, b->addr);
  337. unchain(b);
  338. return b;
  339. }
  340. void
  341. emptydcache(void)
  342. {
  343. DBlock *b;
  344. qlock(&dcache.lock);
  345. while(dcache.nheap > 0){
  346. b = dcache.heap[0];
  347. delheap(b);
  348. if(!b->ref && !b->dirty){
  349. unchain(b);
  350. b->next = dcache.free;
  351. dcache.free = b;
  352. }
  353. }
  354. qunlock(&dcache.lock);
  355. }
  356. /*
  357. * delete an arbitrary block from the heap
  358. */
  359. static void
  360. delheap(DBlock *db)
  361. {
  362. if(db->heap == TWID32)
  363. return;
  364. fixheap(db->heap, dcache.heap[--dcache.nheap]);
  365. db->heap = TWID32;
  366. }
  367. /*
  368. * push an element up or down to it's correct new location
  369. */
  370. static void
  371. fixheap(int i, DBlock *b)
  372. {
  373. if(upheap(i, b) == i)
  374. downheap(i, b);
  375. }
  376. static int
  377. upheap(int i, DBlock *b)
  378. {
  379. DBlock *bb;
  380. u32int now;
  381. int p;
  382. now = dcache.now;
  383. for(; i != 0; i = p){
  384. p = (i - 1) >> 1;
  385. bb = dcache.heap[p];
  386. if(b->used2 - now >= bb->used2 - now)
  387. break;
  388. dcache.heap[i] = bb;
  389. bb->heap = i;
  390. }
  391. dcache.heap[i] = b;
  392. b->heap = i;
  393. return i;
  394. }
  395. static int
  396. downheap(int i, DBlock *b)
  397. {
  398. DBlock *bb;
  399. u32int now;
  400. int k;
  401. now = dcache.now;
  402. for(; ; i = k){
  403. k = (i << 1) + 1;
  404. if(k >= dcache.nheap)
  405. break;
  406. if(k + 1 < dcache.nheap && dcache.heap[k]->used2 - now > dcache.heap[k + 1]->used2 - now)
  407. k++;
  408. bb = dcache.heap[k];
  409. if(b->used2 - now <= bb->used2 - now)
  410. break;
  411. dcache.heap[i] = bb;
  412. bb->heap = i;
  413. }
  414. dcache.heap[i] = b;
  415. b->heap = i;
  416. return i;
  417. }
  418. static void
  419. findblock(DBlock *bb)
  420. {
  421. DBlock *b, *last;
  422. int h;
  423. last = nil;
  424. h = pbhash(bb->addr);
  425. for(b = dcache.heads[h]; b != nil; b = b->next){
  426. if(last != b->prev)
  427. sysfatal("bad prev link");
  428. if(b == bb)
  429. return;
  430. last = b;
  431. }
  432. sysfatal("block missing from hash table");
  433. }
  434. void
  435. checkdcache(void)
  436. {
  437. DBlock *b;
  438. u32int size, now;
  439. int i, k, refed, nfree;
  440. qlock(&dcache.lock);
  441. size = dcache.size;
  442. now = dcache.now;
  443. for(i = 0; i < dcache.nheap; i++){
  444. if(dcache.heap[i]->heap != i)
  445. sysfatal("dc: mis-heaped at %d: %d", i, dcache.heap[i]->heap);
  446. if(i > 0 && dcache.heap[(i - 1) >> 1]->used2 - now > dcache.heap[i]->used2 - now)
  447. sysfatal("dc: bad heap ordering");
  448. k = (i << 1) + 1;
  449. if(k < dcache.nheap && dcache.heap[i]->used2 - now > dcache.heap[k]->used2 - now)
  450. sysfatal("dc: bad heap ordering");
  451. k++;
  452. if(k < dcache.nheap && dcache.heap[i]->used2 - now > dcache.heap[k]->used2 - now)
  453. sysfatal("dc: bad heap ordering");
  454. }
  455. refed = 0;
  456. for(i = 0; i < dcache.nblocks; i++){
  457. b = &dcache.blocks[i];
  458. if(b->data != &dcache.mem[i * size])
  459. sysfatal("dc: mis-blocked at %d", i);
  460. if(b->ref && b->heap == TWID32)
  461. refed++;
  462. if(b->addr)
  463. findblock(b);
  464. if(b->heap != TWID32
  465. && dcache.heap[b->heap] != b)
  466. sysfatal("dc: spurious heap value");
  467. }
  468. nfree = 0;
  469. for(b = dcache.free; b != nil; b = b->next){
  470. if(b->addr != 0 || b->heap != TWID32)
  471. sysfatal("dc: bad free list");
  472. nfree++;
  473. }
  474. if(dcache.nheap + nfree + refed != dcache.nblocks)
  475. sysfatal("dc: missing blocks: %d %d %d", dcache.nheap, refed, dcache.nblocks);
  476. qunlock(&dcache.lock);
  477. }
  478. void
  479. flushdcache(void)
  480. {
  481. trace(TraceProc, "flushdcache enter");
  482. kickround(&dcache.round, 1);
  483. trace(TraceProc, "flushdcache exit");
  484. }
  485. void
  486. kickdcache(void)
  487. {
  488. kickround(&dcache.round, 0);
  489. }
  490. static int
  491. parallelwrites(DBlock **b, DBlock **eb, int dirty)
  492. {
  493. DBlock **p, **q;
  494. Part *part;
  495. for(p=b; p<eb && (*p)->dirty == dirty; p++){
  496. assert(b<=p && p<eb);
  497. sendp((*p)->part->writechan, *p);
  498. }
  499. q = p;
  500. for(p=b; p<q; p++){
  501. assert(b<=p && p<eb);
  502. recvp((*p)->writedonechan);
  503. }
  504. /*
  505. * Flush the partitions that have been written to.
  506. */
  507. part = nil;
  508. for(p=b; p<q; p++){
  509. if(part != (*p)->part){
  510. part = (*p)->part;
  511. flushpart(part); /* what if it fails? */
  512. }
  513. }
  514. return p-b;
  515. }
  516. /*
  517. * Sort first by dirty flag, then by partition, then by address in partition.
  518. */
  519. static int
  520. writeblockcmp(const void *va, const void *vb)
  521. {
  522. DBlock *a, *b;
  523. a = *(DBlock**)va;
  524. b = *(DBlock**)vb;
  525. if(a->dirty != b->dirty)
  526. return a->dirty - b->dirty;
  527. if(a->part != b->part){
  528. if(a->part < b->part)
  529. return -1;
  530. if(a->part > b->part)
  531. return 1;
  532. }
  533. if(a->addr < b->addr)
  534. return -1;
  535. return 1;
  536. }
  537. static void
  538. flushproc(void *v)
  539. {
  540. int i, j, n;
  541. ulong t0;
  542. DBlock *b, **write;
  543. USED(v);
  544. threadsetname("flushproc");
  545. for(;;){
  546. waitforkick(&dcache.round);
  547. trace(TraceWork, "start");
  548. t0 = nsec()/1000;
  549. trace(TraceProc, "build t=%lud", (ulong)(nsec()/1000)-t0);
  550. write = dcache.write;
  551. n = 0;
  552. for(i=0; i<dcache.nblocks; i++){
  553. b = &dcache.blocks[i];
  554. if(b->dirty)
  555. write[n++] = b;
  556. }
  557. qsort(write, n, sizeof(write[0]), writeblockcmp);
  558. /* Write each stage of blocks out. */
  559. trace(TraceProc, "writeblocks t=%lud", (ulong)(nsec()/1000)-t0);
  560. i = 0;
  561. for(j=1; j<DirtyMax; j++){
  562. trace(TraceProc, "writeblocks.%d t=%lud",
  563. j, (ulong)(nsec()/1000)-t0);
  564. i += parallelwrites(write+i, write+n, j);
  565. }
  566. if(i != n){
  567. fprint(2, "in flushproc i=%d n=%d\n", i, n);
  568. for(i=0; i<n; i++)
  569. fprint(2, "\tblock %d: dirty=%d\n",
  570. i, write[i]->dirty);
  571. abort();
  572. }
  573. /*
  574. * b->dirty is protected by b->lock while ndirty is protected
  575. * by dcache.lock, so the --ndirty below is the delayed one
  576. * from clearing b->dirty in the write proc. It may happen
  577. * that some other proc has come along and redirtied b since
  578. * the write. That's okay, it just means that ndirty may be
  579. * one too high until we catch up and do the decrement.
  580. */
  581. trace(TraceProc, "undirty.%d t=%lud", j, (ulong)(nsec()/1000)-t0);
  582. qlock(&dcache.lock);
  583. for(i=0; i<n; i++){
  584. b = write[i];
  585. --dcache.ndirty;
  586. if(b->ref == 0 && b->heap == TWID32){
  587. upheap(dcache.nheap++, b);
  588. rwakeupall(&dcache.full);
  589. }
  590. }
  591. setstat(StatDcacheDirty, dcache.ndirty);
  592. qunlock(&dcache.lock);
  593. addstat(StatDcacheFlush, 1);
  594. trace(TraceWork, "finish");
  595. }
  596. }
  597. static void
  598. writeproc(void *v)
  599. {
  600. DBlock *b;
  601. Part *p;
  602. p = v;
  603. threadsetname("writeproc:%s", p->name);
  604. for(;;){
  605. b = recvp(p->writechan);
  606. trace(TraceWork, "start");
  607. assert(b->part == p);
  608. trace(TraceProc, "wlock %s 0x%llux", p->name, b->addr);
  609. wlock(&b->lock);
  610. trace(TraceProc, "writepart %s 0x%llux", p->name, b->addr);
  611. diskaccess(0);
  612. if(writepart(p, b->addr, b->data, b->size) < 0)
  613. fprint(2, "%s: writeproc: part %s addr 0x%llux: write error: %r\n",
  614. argv0, p->name, b->addr);
  615. addstat(StatApartWrite, 1);
  616. addstat(StatApartWriteBytes, b->size);
  617. b->dirty = 0;
  618. wunlock(&b->lock);
  619. trace(TraceProc, "finish %s 0x%llux", p->name, b->addr);
  620. trace(TraceWork, "finish");
  621. sendp(b->writedonechan, b);
  622. }
  623. }