cache.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610
  1. /*
  2. * This file is part of the UCB release of Plan 9. It is subject to the license
  3. * terms in the LICENSE file found in the top-level directory of this
  4. * distribution and at http://akaros.cs.berkeley.edu/files/Plan9License. No
  5. * part of the UCB release of Plan 9, including this file, may be copied,
  6. * modified, propagated, or distributed except according to the terms contained
  7. * in the LICENSE file.
  8. */
  9. #include "u.h"
  10. #include "../port/lib.h"
  11. #include "mem.h"
  12. #include "dat.h"
  13. #include "fns.h"
  14. enum
  15. {
  16. NHASH = 128,
  17. MAXCACHE = 1024*1024,
  18. NFILE = 4096,
  19. NEXTENT = 200, /* extent allocation size */
  20. };
  21. typedef struct Extent Extent;
  22. struct Extent
  23. {
  24. int bid;
  25. uint32_t start;
  26. int len;
  27. Page *cache;
  28. Extent *next;
  29. };
  30. typedef struct Mntcache Mntcache;
  31. struct Mntcache
  32. {
  33. Qid qid;
  34. uint devno;
  35. Dev* dev;
  36. QLock;
  37. Extent *list;
  38. Mntcache *hash;
  39. Mntcache *prev;
  40. Mntcache *next;
  41. };
  42. typedef struct Cache Cache;
  43. struct Cache
  44. {
  45. Lock;
  46. int pgno;
  47. Mntcache *head;
  48. Mntcache *tail;
  49. Mntcache *hash[NHASH];
  50. };
  51. typedef struct Ecache Ecache;
  52. struct Ecache
  53. {
  54. Lock;
  55. int total;
  56. int free;
  57. Extent* head;
  58. };
  59. static Image fscache;
  60. static Cache cache;
  61. static Ecache ecache;
  62. static int maxcache = MAXCACHE;
  63. static void
  64. extentfree(Extent* e)
  65. {
  66. lock(&ecache);
  67. e->next = ecache.head;
  68. ecache.head = e;
  69. ecache.free++;
  70. unlock(&ecache);
  71. }
  72. static Extent*
  73. extentalloc(void)
  74. {
  75. Extent *e;
  76. int i;
  77. lock(&ecache);
  78. if(ecache.head == nil){
  79. e = malloc(NEXTENT*sizeof(Extent));
  80. if(e == nil){
  81. unlock(&ecache);
  82. return nil;
  83. }
  84. for(i = 0; i < NEXTENT; i++){
  85. e->next = ecache.head;
  86. ecache.head = e;
  87. e++;
  88. }
  89. ecache.free += NEXTENT;
  90. ecache.total += NEXTENT;
  91. }
  92. e = ecache.head;
  93. ecache.head = e->next;
  94. memset(e, 0, sizeof(Extent));
  95. ecache.free--;
  96. unlock(&ecache);
  97. return e;
  98. }
  99. void
  100. cinit(void)
  101. {
  102. int i;
  103. Mntcache *mc;
  104. if((cache.head = malloc(sizeof(Mntcache)*NFILE)) == nil)
  105. panic("cinit: no memory");
  106. mc = cache.head;
  107. /* a better algorithm would be nice */
  108. // if(conf.npage*PGSZ > 200*MB)
  109. // maxcache = 10*MAXCACHE;
  110. // if(conf.npage*PGSZ > 400*MB)
  111. // maxcache = 50*MAXCACHE;
  112. for(i = 0; i < NFILE-1; i++) {
  113. mc->next = mc+1;
  114. mc->prev = mc-1;
  115. mc++;
  116. }
  117. cache.tail = mc;
  118. cache.tail->next = 0;
  119. cache.head->prev = 0;
  120. fscache.notext = 1;
  121. }
  122. Page*
  123. cpage(Extent *e)
  124. {
  125. /* Easy consistency check */
  126. if(e->cache->daddr != e->bid)
  127. return 0;
  128. return lookpage(&fscache, e->bid);
  129. }
  130. void
  131. cnodata(Mntcache *mc)
  132. {
  133. Extent *e, *n;
  134. /*
  135. * Invalidate all extent data
  136. * Image lru will waste the pages
  137. */
  138. for(e = mc->list; e; e = n) {
  139. n = e->next;
  140. extentfree(e);
  141. }
  142. mc->list = 0;
  143. }
  144. void
  145. ctail(Mntcache *mc)
  146. {
  147. /* Unlink and send to the tail */
  148. if(mc->prev)
  149. mc->prev->next = mc->next;
  150. else
  151. cache.head = mc->next;
  152. if(mc->next)
  153. mc->next->prev = mc->prev;
  154. else
  155. cache.tail = mc->prev;
  156. if(cache.tail) {
  157. mc->prev = cache.tail;
  158. cache.tail->next = mc;
  159. mc->next = 0;
  160. cache.tail = mc;
  161. }
  162. else {
  163. cache.head = mc;
  164. cache.tail = mc;
  165. mc->prev = 0;
  166. mc->next = 0;
  167. }
  168. }
  169. void
  170. copen(Chan *c)
  171. {
  172. int h;
  173. Extent *e, *next;
  174. Mntcache *mc, *f, **l;
  175. /* directories aren't cacheable and append-only files confuse us */
  176. if(c->qid.type&(QTDIR|QTAPPEND))
  177. return;
  178. h = c->qid.path%NHASH;
  179. lock(&cache);
  180. for(mc = cache.hash[h]; mc != nil; mc = mc->hash) {
  181. if(mc->qid.path == c->qid.path)
  182. if(mc->qid.type == c->qid.type)
  183. if(mc->devno == c->devno && mc->dev == c->dev) {
  184. c->mc = mc;
  185. ctail(mc);
  186. unlock(&cache);
  187. /* File was updated, invalidate cache */
  188. if(mc->qid.vers != c->qid.vers) {
  189. mc->qid.vers = c->qid.vers;
  190. qlock(mc);
  191. cnodata(mc);
  192. qunlock(mc);
  193. }
  194. return;
  195. }
  196. }
  197. /* LRU the cache headers */
  198. mc = cache.head;
  199. l = &cache.hash[mc->qid.path%NHASH];
  200. for(f = *l; f; f = f->hash) {
  201. if(f == mc) {
  202. *l = mc->hash;
  203. break;
  204. }
  205. l = &f->hash;
  206. }
  207. mc->qid = c->qid;
  208. mc->devno = c->devno;
  209. mc->dev = c->dev;
  210. l = &cache.hash[h];
  211. mc->hash = *l;
  212. *l = mc;
  213. ctail(mc);
  214. qlock(mc);
  215. c->mc = mc;
  216. e = mc->list;
  217. mc->list = 0;
  218. unlock(&cache);
  219. while(e) {
  220. next = e->next;
  221. extentfree(e);
  222. e = next;
  223. }
  224. qunlock(mc);
  225. }
  226. static int
  227. cdev(Mntcache *mc, Chan *c)
  228. {
  229. if(mc->qid.path != c->qid.path)
  230. return 0;
  231. if(mc->qid.type != c->qid.type)
  232. return 0;
  233. if(mc->devno != c->devno)
  234. return 0;
  235. if(mc->dev != c->dev)
  236. return 0;
  237. if(mc->qid.vers != c->qid.vers)
  238. return 0;
  239. return 1;
  240. }
  241. int
  242. cread(Chan *c, uint8_t *buf, int len, int64_t off)
  243. {
  244. KMap *k;
  245. Page *p;
  246. Mntcache *mc;
  247. Extent *e, **t;
  248. int o, l, total;
  249. uint32_t offset;
  250. if(off+len > maxcache)
  251. return 0;
  252. mc = c->mc;
  253. if(mc == nil)
  254. return 0;
  255. qlock(mc);
  256. if(cdev(mc, c) == 0) {
  257. qunlock(mc);
  258. return 0;
  259. }
  260. offset = off;
  261. t = &mc->list;
  262. for(e = *t; e; e = e->next) {
  263. if(offset >= e->start && offset < e->start+e->len)
  264. break;
  265. t = &e->next;
  266. }
  267. if(e == 0) {
  268. qunlock(mc);
  269. return 0;
  270. }
  271. total = 0;
  272. while(len) {
  273. p = cpage(e);
  274. if(p == 0) {
  275. *t = e->next;
  276. extentfree(e);
  277. qunlock(mc);
  278. return total;
  279. }
  280. o = offset - e->start;
  281. l = len;
  282. if(l > e->len-o)
  283. l = e->len-o;
  284. k = kmap(p);
  285. if(waserror()) {
  286. kunmap(k);
  287. putpage(p);
  288. qunlock(mc);
  289. nexterror();
  290. }
  291. memmove(buf, (uint8_t*)VA(k) + o, l);
  292. poperror();
  293. kunmap(k);
  294. putpage(p);
  295. buf += l;
  296. len -= l;
  297. offset += l;
  298. total += l;
  299. t = &e->next;
  300. e = e->next;
  301. if(e == 0 || e->start != offset)
  302. break;
  303. }
  304. qunlock(mc);
  305. return total;
  306. }
  307. Extent*
  308. cchain(uint8_t *buf, uint32_t offset, int len, Extent **tail)
  309. {
  310. int l;
  311. Page *p;
  312. KMap *k;
  313. Extent *e, *start, **t;
  314. start = 0;
  315. *tail = 0;
  316. t = &start;
  317. while(len) {
  318. e = extentalloc();
  319. if(e == 0)
  320. break;
  321. p = auxpage(BIGPGSZ);
  322. if(p == 0) {
  323. extentfree(e);
  324. break;
  325. }
  326. l = len;
  327. if(l > BIGPGSZ)
  328. l = BIGPGSZ;
  329. e->cache = p;
  330. e->start = offset;
  331. e->len = l;
  332. lock(&cache);
  333. e->bid = cache.pgno;
  334. cache.pgno += BIGPGSZ;
  335. /* wrap the counter; low bits are unused by pghash but checked by lookpage */
  336. if((cache.pgno & ~(BIGPGSZ-1)) == 0){
  337. if(cache.pgno == BIGPGSZ-1){
  338. print("cache wrapped\n");
  339. cache.pgno = 0;
  340. }else
  341. cache.pgno++;
  342. }
  343. unlock(&cache);
  344. p->daddr = e->bid;
  345. k = kmap(p);
  346. if(waserror()) { /* buf may be virtual */
  347. kunmap(k);
  348. nexterror();
  349. }
  350. memmove((void*)VA(k), buf, l);
  351. poperror();
  352. kunmap(k);
  353. cachepage(p, &fscache);
  354. putpage(p);
  355. buf += l;
  356. offset += l;
  357. len -= l;
  358. *t = e;
  359. *tail = e;
  360. t = &e->next;
  361. }
  362. return start;
  363. }
  364. int
  365. cpgmove(Extent *e, uint8_t *buf, int boff, int len)
  366. {
  367. Page *p;
  368. KMap *k;
  369. p = cpage(e);
  370. if(p == 0)
  371. return 0;
  372. k = kmap(p);
  373. if(waserror()) { /* Since buf may be virtual */
  374. kunmap(k);
  375. nexterror();
  376. }
  377. memmove((uint8_t*)VA(k)+boff, buf, len);
  378. poperror();
  379. kunmap(k);
  380. putpage(p);
  381. return 1;
  382. }
  383. void
  384. cupdate(Chan *c, uint8_t *buf, int len, int64_t off)
  385. {
  386. Mntcache *mc;
  387. Extent *tail;
  388. Extent *e, *f, *p;
  389. int o, ee, eblock;
  390. uint32_t offset;
  391. if(off > maxcache || len == 0)
  392. return;
  393. mc = c->mc;
  394. if(mc == nil)
  395. return;
  396. qlock(mc);
  397. if(cdev(mc, c) == 0) {
  398. qunlock(mc);
  399. return;
  400. }
  401. /*
  402. * Find the insertion point
  403. */
  404. offset = off;
  405. p = 0;
  406. for(f = mc->list; f; f = f->next) {
  407. if(f->start > offset)
  408. break;
  409. p = f;
  410. }
  411. /* trim if there is a successor */
  412. eblock = offset+len;
  413. if(f != 0 && eblock > f->start) {
  414. len -= (eblock - f->start);
  415. if(len <= 0) {
  416. qunlock(mc);
  417. return;
  418. }
  419. }
  420. if(p == 0) { /* at the head */
  421. e = cchain(buf, offset, len, &tail);
  422. if(e != 0) {
  423. mc->list = e;
  424. tail->next = f;
  425. }
  426. qunlock(mc);
  427. return;
  428. }
  429. /* trim to the predecessor */
  430. ee = p->start+p->len;
  431. if(offset < ee) {
  432. o = ee - offset;
  433. len -= o;
  434. if(len <= 0) {
  435. qunlock(mc);
  436. return;
  437. }
  438. buf += o;
  439. offset += o;
  440. }
  441. /* try and pack data into the predecessor */
  442. if(offset == ee && p->len < BIGPGSZ) {
  443. o = len;
  444. if(o > BIGPGSZ - p->len)
  445. o = BIGPGSZ - p->len;
  446. if(cpgmove(p, buf, p->len, o)) {
  447. p->len += o;
  448. buf += o;
  449. len -= o;
  450. offset += o;
  451. if(len <= 0) {
  452. if(f && p->start + p->len > f->start) print("CACHE: p->start=%uld p->len=%d f->start=%uld\n", p->start, p->len, f->start);
  453. qunlock(mc);
  454. return;
  455. }
  456. }
  457. }
  458. e = cchain(buf, offset, len, &tail);
  459. if(e != 0) {
  460. p->next = e;
  461. tail->next = f;
  462. }
  463. qunlock(mc);
  464. }
  465. void
  466. cwrite(Chan* c, uint8_t *buf, int len, int64_t off)
  467. {
  468. int o, eo;
  469. Mntcache *mc;
  470. uint32_t eblock, ee;
  471. Extent *p, *f, *e, *tail;
  472. uint32_t offset;
  473. if(off > maxcache || len == 0)
  474. return;
  475. mc = c->mc;
  476. if(mc == nil)
  477. return;
  478. qlock(mc);
  479. if(cdev(mc, c) == 0) {
  480. qunlock(mc);
  481. return;
  482. }
  483. offset = off;
  484. mc->qid.vers++;
  485. c->qid.vers++;
  486. p = 0;
  487. for(f = mc->list; f; f = f->next) {
  488. if(f->start >= offset)
  489. break;
  490. p = f;
  491. }
  492. if(p != 0) {
  493. ee = p->start+p->len;
  494. eo = offset - p->start;
  495. /* pack in predecessor if there is space */
  496. if(offset <= ee && eo < BIGPGSZ) {
  497. o = len;
  498. if(o > BIGPGSZ - eo)
  499. o = BIGPGSZ - eo;
  500. if(cpgmove(p, buf, eo, o)) {
  501. if(eo+o > p->len)
  502. p->len = eo+o;
  503. buf += o;
  504. len -= o;
  505. offset += o;
  506. }
  507. }
  508. }
  509. /* free the overlap -- it's a rare case */
  510. eblock = offset+len;
  511. while(f && f->start < eblock) {
  512. e = f->next;
  513. extentfree(f);
  514. f = e;
  515. }
  516. /* link the block (if any) into the middle */
  517. e = cchain(buf, offset, len, &tail);
  518. if(e != 0) {
  519. tail->next = f;
  520. f = e;
  521. }
  522. if(p == 0)
  523. mc->list = f;
  524. else
  525. p->next = f;
  526. qunlock(mc);
  527. }