cache.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613
  1. /*
  2. * This file is part of the UCB release of Plan 9. It is subject to the license
  3. * terms in the LICENSE file found in the top-level directory of this
  4. * distribution and at http://akaros.cs.berkeley.edu/files/Plan9License. No
  5. * part of the UCB release of Plan 9, including this file, may be copied,
  6. * modified, propagated, or distributed except according to the terms contained
  7. * in the LICENSE file.
  8. */
  9. #include "u.h"
  10. #include "../port/lib.h"
  11. #include "mem.h"
  12. #include "dat.h"
  13. #include "fns.h"
  14. enum
  15. {
  16. NHASH = 128,
  17. MAXCACHE = 1024*1024,
  18. NFILE = 4096,
  19. NEXTENT = 200, /* extent allocation size */
  20. };
  21. typedef struct Extent Extent;
  22. struct Extent
  23. {
  24. int bid;
  25. uint32_t start;
  26. int len;
  27. Page *cache;
  28. Extent *next;
  29. };
  30. typedef struct Mntcache Mntcache;
  31. struct Mntcache
  32. {
  33. Qid qid;
  34. uint devno;
  35. Dev* dev;
  36. QLock QLock;
  37. Extent *list;
  38. Mntcache *hash;
  39. Mntcache *prev;
  40. Mntcache *next;
  41. };
  42. typedef struct Cache Cache;
  43. struct Cache
  44. {
  45. Lock Lock;
  46. int pgno;
  47. Mntcache *head;
  48. Mntcache *tail;
  49. Mntcache *hash[NHASH];
  50. };
  51. typedef struct Ecache Ecache;
  52. struct Ecache
  53. {
  54. Lock Lock;
  55. int total;
  56. int free;
  57. Extent* head;
  58. };
  59. static Image fscache;
  60. static Cache cache;
  61. static Ecache ecache;
  62. static int maxcache = MAXCACHE;
  63. static void
  64. extentfree(Extent* e)
  65. {
  66. lock(&ecache.Lock);
  67. e->next = ecache.head;
  68. ecache.head = e;
  69. ecache.free++;
  70. unlock(&ecache.Lock);
  71. }
  72. static Extent*
  73. extentalloc(void)
  74. {
  75. Extent *e;
  76. int i;
  77. lock(&ecache.Lock);
  78. if(ecache.head == nil){
  79. e = malloc(NEXTENT*sizeof(Extent));
  80. if(e == nil){
  81. unlock(&ecache.Lock);
  82. return nil;
  83. }
  84. for(i = 0; i < NEXTENT; i++){
  85. e->next = ecache.head;
  86. ecache.head = e;
  87. e++;
  88. }
  89. ecache.free += NEXTENT;
  90. ecache.total += NEXTENT;
  91. }
  92. e = ecache.head;
  93. ecache.head = e->next;
  94. memset(e, 0, sizeof(Extent));
  95. ecache.free--;
  96. unlock(&ecache.Lock);
  97. return e;
  98. }
  99. void
  100. cinit(void)
  101. {
  102. int i;
  103. Mntcache *mc;
  104. if((cache.head = malloc(sizeof(Mntcache)*NFILE)) == nil)
  105. panic("cinit: no memory");
  106. mc = cache.head;
  107. /* a better algorithm would be nice */
  108. // if(conf.npage*PGSZ > 200*MB)
  109. // maxcache = 10*MAXCACHE;
  110. // if(conf.npage*PGSZ > 400*MB)
  111. // maxcache = 50*MAXCACHE;
  112. for(i = 0; i < NFILE-1; i++) {
  113. mc->next = mc+1;
  114. mc->prev = mc-1;
  115. mc++;
  116. }
  117. cache.tail = mc;
  118. cache.tail->next = 0;
  119. cache.head->prev = 0;
  120. fscache.notext = 1;
  121. }
  122. Page*
  123. cpage(Extent *e)
  124. {
  125. /* Easy consistency check */
  126. if(e->cache->daddr != e->bid)
  127. return 0;
  128. return lookpage(&fscache, e->bid);
  129. }
  130. void
  131. cnodata(Mntcache *mc)
  132. {
  133. Extent *e, *n;
  134. /*
  135. * Invalidate all extent data
  136. * Image lru will waste the pages
  137. */
  138. for(e = mc->list; e; e = n) {
  139. n = e->next;
  140. extentfree(e);
  141. }
  142. mc->list = 0;
  143. }
  144. void
  145. ctail(Mntcache *mc)
  146. {
  147. /* Unlink and send to the tail */
  148. if(mc->prev)
  149. mc->prev->next = mc->next;
  150. else
  151. cache.head = mc->next;
  152. if(mc->next)
  153. mc->next->prev = mc->prev;
  154. else
  155. cache.tail = mc->prev;
  156. if(cache.tail) {
  157. mc->prev = cache.tail;
  158. cache.tail->next = mc;
  159. mc->next = 0;
  160. cache.tail = mc;
  161. }
  162. else {
  163. cache.head = mc;
  164. cache.tail = mc;
  165. mc->prev = 0;
  166. mc->next = 0;
  167. }
  168. }
  169. void
  170. copen(Chan *c)
  171. {
  172. int h;
  173. Extent *e, *next;
  174. Mntcache *mc, *f, **l;
  175. /* directories aren't cacheable and append-only files confuse us */
  176. if(c->qid.type&(QTDIR|QTAPPEND))
  177. return;
  178. h = c->qid.path%NHASH;
  179. lock(&cache.Lock);
  180. for(mc = cache.hash[h]; mc != nil; mc = mc->hash) {
  181. if(mc->qid.path == c->qid.path)
  182. if(mc->qid.type == c->qid.type)
  183. if(mc->devno == c->devno && mc->dev == c->dev) {
  184. c->mc = mc;
  185. ctail(mc);
  186. unlock(&cache.Lock);
  187. /* File was updated, invalidate cache */
  188. if(mc->qid.vers != c->qid.vers) {
  189. mc->qid.vers = c->qid.vers;
  190. qlock(&mc->QLock);
  191. cnodata(mc);
  192. qunlock(&mc->QLock);
  193. }
  194. return;
  195. }
  196. }
  197. /* LRU the cache headers */
  198. mc = cache.head;
  199. l = &cache.hash[mc->qid.path%NHASH];
  200. for(f = *l; f; f = f->hash) {
  201. if(f == mc) {
  202. *l = mc->hash;
  203. break;
  204. }
  205. l = &f->hash;
  206. }
  207. mc->qid = c->qid;
  208. mc->devno = c->devno;
  209. mc->dev = c->dev;
  210. l = &cache.hash[h];
  211. mc->hash = *l;
  212. *l = mc;
  213. ctail(mc);
  214. qlock(&mc->QLock);
  215. c->mc = mc;
  216. e = mc->list;
  217. mc->list = 0;
  218. unlock(&cache.Lock);
  219. while(e) {
  220. next = e->next;
  221. extentfree(e);
  222. e = next;
  223. }
  224. qunlock(&mc->QLock);
  225. }
  226. static int
  227. cdev(Mntcache *mc, Chan *c)
  228. {
  229. if(mc->qid.path != c->qid.path)
  230. return 0;
  231. if(mc->qid.type != c->qid.type)
  232. return 0;
  233. if(mc->devno != c->devno)
  234. return 0;
  235. if(mc->dev != c->dev)
  236. return 0;
  237. if(mc->qid.vers != c->qid.vers)
  238. return 0;
  239. return 1;
  240. }
  241. int
  242. cread(Chan *c, uint8_t *buf, int len, int64_t off)
  243. {
  244. Proc *up = externup();
  245. KMap *k;
  246. Page *p;
  247. Mntcache *mc;
  248. Extent *e, **t;
  249. int o, l, total;
  250. uint32_t offset;
  251. if(off+len > maxcache)
  252. return 0;
  253. mc = c->mc;
  254. if(mc == nil)
  255. return 0;
  256. qlock(&mc->QLock);
  257. if(cdev(mc, c) == 0) {
  258. qunlock(&mc->QLock);
  259. return 0;
  260. }
  261. offset = off;
  262. t = &mc->list;
  263. for(e = *t; e; e = e->next) {
  264. if(offset >= e->start && offset < e->start+e->len)
  265. break;
  266. t = &e->next;
  267. }
  268. if(e == 0) {
  269. qunlock(&mc->QLock);
  270. return 0;
  271. }
  272. total = 0;
  273. while(len) {
  274. p = cpage(e);
  275. if(p == 0) {
  276. *t = e->next;
  277. extentfree(e);
  278. qunlock(&mc->QLock);
  279. return total;
  280. }
  281. o = offset - e->start;
  282. l = len;
  283. if(l > e->len-o)
  284. l = e->len-o;
  285. k = kmap(p);
  286. if(waserror()) {
  287. kunmap(k);
  288. putpage(p);
  289. qunlock(&mc->QLock);
  290. nexterror();
  291. }
  292. memmove(buf, (uint8_t*)VA(k) + o, l);
  293. poperror();
  294. kunmap(k);
  295. putpage(p);
  296. buf += l;
  297. len -= l;
  298. offset += l;
  299. total += l;
  300. t = &e->next;
  301. e = e->next;
  302. if(e == 0 || e->start != offset)
  303. break;
  304. }
  305. qunlock(&mc->QLock);
  306. return total;
  307. }
  308. Extent*
  309. cchain(uint8_t *buf, uint32_t offset, int len, Extent **tail)
  310. {
  311. Proc *up = externup();
  312. int l;
  313. Page *p;
  314. KMap *k;
  315. Extent *e, *start, **t;
  316. start = 0;
  317. *tail = 0;
  318. t = &start;
  319. while(len) {
  320. e = extentalloc();
  321. if(e == 0)
  322. break;
  323. p = auxpage(BIGPGSZ);
  324. if(p == 0) {
  325. extentfree(e);
  326. break;
  327. }
  328. l = len;
  329. if(l > BIGPGSZ)
  330. l = BIGPGSZ;
  331. e->cache = p;
  332. e->start = offset;
  333. e->len = l;
  334. lock(&cache.Lock);
  335. e->bid = cache.pgno;
  336. cache.pgno += BIGPGSZ;
  337. /* wrap the counter; low bits are unused by pghash but checked by lookpage */
  338. if((cache.pgno & ~(BIGPGSZ-1)) == 0){
  339. if(cache.pgno == BIGPGSZ-1){
  340. print("cache wrapped\n");
  341. cache.pgno = 0;
  342. }else
  343. cache.pgno++;
  344. }
  345. unlock(&cache.Lock);
  346. p->daddr = e->bid;
  347. k = kmap(p);
  348. if(waserror()) { /* buf may be virtual */
  349. kunmap(k);
  350. nexterror();
  351. }
  352. memmove((void*)VA(k), buf, l);
  353. poperror();
  354. kunmap(k);
  355. cachepage(p, &fscache);
  356. putpage(p);
  357. buf += l;
  358. offset += l;
  359. len -= l;
  360. *t = e;
  361. *tail = e;
  362. t = &e->next;
  363. }
  364. return start;
  365. }
  366. int
  367. cpgmove(Extent *e, uint8_t *buf, int boff, int len)
  368. {
  369. Proc *up = externup();
  370. Page *p;
  371. KMap *k;
  372. p = cpage(e);
  373. if(p == 0)
  374. return 0;
  375. k = kmap(p);
  376. if(waserror()) { /* Since buf may be virtual */
  377. kunmap(k);
  378. nexterror();
  379. }
  380. memmove((uint8_t*)VA(k)+boff, buf, len);
  381. poperror();
  382. kunmap(k);
  383. putpage(p);
  384. return 1;
  385. }
  386. void
  387. cupdate(Chan *c, uint8_t *buf, int len, int64_t off)
  388. {
  389. Mntcache *mc;
  390. Extent *tail;
  391. Extent *e, *f, *p;
  392. int o, ee, eblock;
  393. uint32_t offset;
  394. if(off > maxcache || len == 0)
  395. return;
  396. mc = c->mc;
  397. if(mc == nil)
  398. return;
  399. qlock(&mc->QLock);
  400. if(cdev(mc, c) == 0) {
  401. qunlock(&mc->QLock);
  402. return;
  403. }
  404. /*
  405. * Find the insertion point
  406. */
  407. offset = off;
  408. p = 0;
  409. for(f = mc->list; f; f = f->next) {
  410. if(f->start > offset)
  411. break;
  412. p = f;
  413. }
  414. /* trim if there is a successor */
  415. eblock = offset+len;
  416. if(f != 0 && eblock > f->start) {
  417. len -= (eblock - f->start);
  418. if(len <= 0) {
  419. qunlock(&mc->QLock);
  420. return;
  421. }
  422. }
  423. if(p == 0) { /* at the head */
  424. e = cchain(buf, offset, len, &tail);
  425. if(e != 0) {
  426. mc->list = e;
  427. tail->next = f;
  428. }
  429. qunlock(&mc->QLock);
  430. return;
  431. }
  432. /* trim to the predecessor */
  433. ee = p->start+p->len;
  434. if(offset < ee) {
  435. o = ee - offset;
  436. len -= o;
  437. if(len <= 0) {
  438. qunlock(&mc->QLock);
  439. return;
  440. }
  441. buf += o;
  442. offset += o;
  443. }
  444. /* try and pack data into the predecessor */
  445. if(offset == ee && p->len < BIGPGSZ) {
  446. o = len;
  447. if(o > BIGPGSZ - p->len)
  448. o = BIGPGSZ - p->len;
  449. if(cpgmove(p, buf, p->len, o)) {
  450. p->len += o;
  451. buf += o;
  452. len -= o;
  453. offset += o;
  454. if(len <= 0) {
  455. if(f && p->start + p->len > f->start) print("CACHE: p->start=%lu p->len=%d f->start=%lu\n", p->start, p->len, f->start);
  456. qunlock(&mc->QLock);
  457. return;
  458. }
  459. }
  460. }
  461. e = cchain(buf, offset, len, &tail);
  462. if(e != 0) {
  463. p->next = e;
  464. tail->next = f;
  465. }
  466. qunlock(&mc->QLock);
  467. }
  468. void
  469. cwrite(Chan* c, uint8_t *buf, int len, int64_t off)
  470. {
  471. int o, eo;
  472. Mntcache *mc;
  473. uint32_t eblock, ee;
  474. Extent *p, *f, *e, *tail;
  475. uint32_t offset;
  476. if(off > maxcache || len == 0)
  477. return;
  478. mc = c->mc;
  479. if(mc == nil)
  480. return;
  481. qlock(&mc->QLock);
  482. if(cdev(mc, c) == 0) {
  483. qunlock(&mc->QLock);
  484. return;
  485. }
  486. offset = off;
  487. mc->qid.vers++;
  488. c->qid.vers++;
  489. p = 0;
  490. for(f = mc->list; f; f = f->next) {
  491. if(f->start >= offset)
  492. break;
  493. p = f;
  494. }
  495. if(p != 0) {
  496. ee = p->start+p->len;
  497. eo = offset - p->start;
  498. /* pack in predecessor if there is space */
  499. if(offset <= ee && eo < BIGPGSZ) {
  500. o = len;
  501. if(o > BIGPGSZ - eo)
  502. o = BIGPGSZ - eo;
  503. if(cpgmove(p, buf, eo, o)) {
  504. if(eo+o > p->len)
  505. p->len = eo+o;
  506. buf += o;
  507. len -= o;
  508. offset += o;
  509. }
  510. }
  511. }
  512. /* free the overlap -- it's a rare case */
  513. eblock = offset+len;
  514. while(f && f->start < eblock) {
  515. e = f->next;
  516. extentfree(f);
  517. f = e;
  518. }
  519. /* link the block (if any) into the middle */
  520. e = cchain(buf, offset, len, &tail);
  521. if(e != 0) {
  522. tail->next = f;
  523. f = e;
  524. }
  525. if(p == 0)
  526. mc->list = f;
  527. else
  528. p->next = f;
  529. qunlock(&mc->QLock);
  530. }