cache.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608
  1. /*
  2. * This file is part of the UCB release of Plan 9. It is subject to the license
  3. * terms in the LICENSE file found in the top-level directory of this
  4. * distribution and at http://akaros.cs.berkeley.edu/files/Plan9License. No
  5. * part of the UCB release of Plan 9, including this file, may be copied,
  6. * modified, propagated, or distributed except according to the terms contained
  7. * in the LICENSE file.
  8. */
  9. #include "u.h"
  10. #include "../port/lib.h"
  11. #include "mem.h"
  12. #include "dat.h"
  13. #include "fns.h"
  14. enum {
  15. NHASH = 128,
  16. MAXCACHE = 1024 * 1024,
  17. NFILE = 4096,
  18. NEXTENT = 200, /* extent allocation size */
  19. };
  20. typedef struct Extent Extent;
  21. struct Extent {
  22. int bid;
  23. u32 start;
  24. int len;
  25. Page *cache;
  26. Extent *next;
  27. };
  28. typedef struct Mntcache Mntcache;
  29. struct Mntcache {
  30. Qid qid;
  31. u32 devno;
  32. Dev *dev;
  33. QLock QLock;
  34. Extent *list;
  35. Mntcache *hash;
  36. Mntcache *prev;
  37. Mntcache *next;
  38. };
  39. typedef struct Cache Cache;
  40. struct Cache {
  41. Lock Lock;
  42. int pgno;
  43. Mntcache *head;
  44. Mntcache *tail;
  45. Mntcache *hash[NHASH];
  46. };
  47. typedef struct Ecache Ecache;
  48. struct Ecache {
  49. Lock Lock;
  50. int total;
  51. int free;
  52. Extent *head;
  53. };
  54. static Image fscache;
  55. static Cache cache;
  56. static Ecache ecache;
  57. static int maxcache = MAXCACHE;
  58. static void
  59. extentfree(Extent *e)
  60. {
  61. lock(&ecache.Lock);
  62. e->next = ecache.head;
  63. ecache.head = e;
  64. ecache.free++;
  65. unlock(&ecache.Lock);
  66. }
  67. static Extent *
  68. extentalloc(void)
  69. {
  70. Extent *e;
  71. int i;
  72. lock(&ecache.Lock);
  73. if(ecache.head == nil){
  74. e = malloc(NEXTENT * sizeof(Extent));
  75. if(e == nil){
  76. unlock(&ecache.Lock);
  77. return nil;
  78. }
  79. for(i = 0; i < NEXTENT; i++){
  80. e->next = ecache.head;
  81. ecache.head = e;
  82. e++;
  83. }
  84. ecache.free += NEXTENT;
  85. ecache.total += NEXTENT;
  86. }
  87. e = ecache.head;
  88. ecache.head = e->next;
  89. memset(e, 0, sizeof(Extent));
  90. ecache.free--;
  91. unlock(&ecache.Lock);
  92. return e;
  93. }
  94. void
  95. cinit(void)
  96. {
  97. int i;
  98. Mntcache *mc;
  99. if((cache.head = malloc(sizeof(Mntcache) * NFILE)) == nil)
  100. panic("cinit: no memory");
  101. mc = cache.head;
  102. /* a better algorithm would be nice */
  103. // if(conf.npage*PGSZ > 200*MB)
  104. // maxcache = 10*MAXCACHE;
  105. // if(conf.npage*PGSZ > 400*MB)
  106. // maxcache = 50*MAXCACHE;
  107. for(i = 0; i < NFILE - 1; i++){
  108. mc->next = mc + 1;
  109. mc->prev = mc - 1;
  110. mc++;
  111. }
  112. cache.tail = mc;
  113. cache.tail->next = 0;
  114. cache.head->prev = 0;
  115. fscache.notext = 1;
  116. }
  117. Page *
  118. cpage(Extent *e)
  119. {
  120. /* Easy consistency check */
  121. if(e->cache->daddr != e->bid)
  122. return 0;
  123. return lookpage(&fscache, e->bid);
  124. }
  125. void
  126. cnodata(Mntcache *mc)
  127. {
  128. Extent *e, *n;
  129. /*
  130. * Invalidate all extent data
  131. * Image lru will waste the pages
  132. */
  133. for(e = mc->list; e; e = n){
  134. n = e->next;
  135. extentfree(e);
  136. }
  137. mc->list = 0;
  138. }
  139. void
  140. ctail(Mntcache *mc)
  141. {
  142. /* Unlink and send to the tail */
  143. if(mc->prev)
  144. mc->prev->next = mc->next;
  145. else
  146. cache.head = mc->next;
  147. if(mc->next)
  148. mc->next->prev = mc->prev;
  149. else
  150. cache.tail = mc->prev;
  151. if(cache.tail){
  152. mc->prev = cache.tail;
  153. cache.tail->next = mc;
  154. mc->next = 0;
  155. cache.tail = mc;
  156. } else {
  157. cache.head = mc;
  158. cache.tail = mc;
  159. mc->prev = 0;
  160. mc->next = 0;
  161. }
  162. }
  163. void
  164. copen(Chan *c)
  165. {
  166. int h;
  167. Extent *e, *next;
  168. Mntcache *mc, *f, **l;
  169. /* directories aren't cacheable and append-only files confuse us */
  170. if(c->qid.type & (QTDIR | QTAPPEND))
  171. return;
  172. h = c->qid.path % NHASH;
  173. lock(&cache.Lock);
  174. for(mc = cache.hash[h]; mc != nil; mc = mc->hash){
  175. if(mc->qid.path == c->qid.path)
  176. if(mc->qid.type == c->qid.type)
  177. if(mc->devno == c->devno && mc->dev == c->dev){
  178. c->mc = mc;
  179. ctail(mc);
  180. unlock(&cache.Lock);
  181. /* File was updated, invalidate cache */
  182. if(mc->qid.vers != c->qid.vers){
  183. mc->qid.vers = c->qid.vers;
  184. qlock(&mc->QLock);
  185. cnodata(mc);
  186. qunlock(&mc->QLock);
  187. }
  188. return;
  189. }
  190. }
  191. /* LRU the cache headers */
  192. mc = cache.head;
  193. l = &cache.hash[mc->qid.path % NHASH];
  194. for(f = *l; f; f = f->hash){
  195. if(f == mc){
  196. *l = mc->hash;
  197. break;
  198. }
  199. l = &f->hash;
  200. }
  201. mc->qid = c->qid;
  202. mc->devno = c->devno;
  203. mc->dev = c->dev;
  204. l = &cache.hash[h];
  205. mc->hash = *l;
  206. *l = mc;
  207. ctail(mc);
  208. qlock(&mc->QLock);
  209. c->mc = mc;
  210. e = mc->list;
  211. mc->list = 0;
  212. unlock(&cache.Lock);
  213. while(e){
  214. next = e->next;
  215. extentfree(e);
  216. e = next;
  217. }
  218. qunlock(&mc->QLock);
  219. }
  220. static int
  221. cdev(Mntcache *mc, Chan *c)
  222. {
  223. if(mc->qid.path != c->qid.path)
  224. return 0;
  225. if(mc->qid.type != c->qid.type)
  226. return 0;
  227. if(mc->devno != c->devno)
  228. return 0;
  229. if(mc->dev != c->dev)
  230. return 0;
  231. if(mc->qid.vers != c->qid.vers)
  232. return 0;
  233. return 1;
  234. }
  235. int
  236. cread(Chan *c, u8 *buf, int len, i64 off)
  237. {
  238. Proc *up = externup();
  239. KMap *k;
  240. Page *p;
  241. Mntcache *mc;
  242. Extent *e, **t;
  243. int o, l, total;
  244. u32 offset;
  245. if(off + len > maxcache)
  246. return 0;
  247. mc = c->mc;
  248. if(mc == nil)
  249. return 0;
  250. qlock(&mc->QLock);
  251. if(cdev(mc, c) == 0){
  252. qunlock(&mc->QLock);
  253. return 0;
  254. }
  255. offset = off;
  256. t = &mc->list;
  257. for(e = *t; e; e = e->next){
  258. if(offset >= e->start && offset < e->start + e->len)
  259. break;
  260. t = &e->next;
  261. }
  262. if(e == 0){
  263. qunlock(&mc->QLock);
  264. return 0;
  265. }
  266. total = 0;
  267. while(len){
  268. p = cpage(e);
  269. if(p == 0){
  270. *t = e->next;
  271. extentfree(e);
  272. qunlock(&mc->QLock);
  273. return total;
  274. }
  275. o = offset - e->start;
  276. l = len;
  277. if(l > e->len - o)
  278. l = e->len - o;
  279. k = kmap(p);
  280. if(waserror()){
  281. kunmap(k);
  282. putpage(p);
  283. qunlock(&mc->QLock);
  284. nexterror();
  285. }
  286. memmove(buf, (u8 *)VA(k) + o, l);
  287. poperror();
  288. kunmap(k);
  289. putpage(p);
  290. buf += l;
  291. len -= l;
  292. offset += l;
  293. total += l;
  294. t = &e->next;
  295. e = e->next;
  296. if(e == 0 || e->start != offset)
  297. break;
  298. }
  299. qunlock(&mc->QLock);
  300. return total;
  301. }
  302. Extent *
  303. cchain(u8 *buf, u32 offset, int len, Extent **tail)
  304. {
  305. Proc *up = externup();
  306. int l;
  307. Page *p;
  308. KMap *k;
  309. Extent *e, *start, **t;
  310. start = 0;
  311. *tail = 0;
  312. t = &start;
  313. while(len){
  314. e = extentalloc();
  315. if(e == 0)
  316. break;
  317. p = auxpage(BIGPGSZ);
  318. if(p == 0){
  319. extentfree(e);
  320. break;
  321. }
  322. l = len;
  323. if(l > BIGPGSZ)
  324. l = BIGPGSZ;
  325. e->cache = p;
  326. e->start = offset;
  327. e->len = l;
  328. lock(&cache.Lock);
  329. e->bid = cache.pgno;
  330. cache.pgno += BIGPGSZ;
  331. /* wrap the counter; low bits are unused by pghash but checked by lookpage */
  332. if((cache.pgno & ~(BIGPGSZ - 1)) == 0){
  333. if(cache.pgno == BIGPGSZ - 1){
  334. print("cache wrapped\n");
  335. cache.pgno = 0;
  336. } else
  337. cache.pgno++;
  338. }
  339. unlock(&cache.Lock);
  340. p->daddr = e->bid;
  341. k = kmap(p);
  342. if(waserror()) { /* buf may be virtual */
  343. kunmap(k);
  344. nexterror();
  345. }
  346. memmove((void *)VA(k), buf, l);
  347. poperror();
  348. kunmap(k);
  349. cachepage(p, &fscache);
  350. putpage(p);
  351. buf += l;
  352. offset += l;
  353. len -= l;
  354. *t = e;
  355. *tail = e;
  356. t = &e->next;
  357. }
  358. return start;
  359. }
  360. int
  361. cpgmove(Extent *e, u8 *buf, int boff, int len)
  362. {
  363. Proc *up = externup();
  364. Page *p;
  365. KMap *k;
  366. p = cpage(e);
  367. if(p == 0)
  368. return 0;
  369. k = kmap(p);
  370. if(waserror()) { /* Since buf may be virtual */
  371. kunmap(k);
  372. nexterror();
  373. }
  374. memmove((u8 *)VA(k) + boff, buf, len);
  375. poperror();
  376. kunmap(k);
  377. putpage(p);
  378. return 1;
  379. }
  380. void
  381. cupdate(Chan *c, u8 *buf, int len, i64 off)
  382. {
  383. Mntcache *mc;
  384. Extent *tail;
  385. Extent *e, *f, *p;
  386. int o, ee, eblock;
  387. u32 offset;
  388. if(off > maxcache || len == 0)
  389. return;
  390. mc = c->mc;
  391. if(mc == nil)
  392. return;
  393. qlock(&mc->QLock);
  394. if(cdev(mc, c) == 0){
  395. qunlock(&mc->QLock);
  396. return;
  397. }
  398. /*
  399. * Find the insertion point
  400. */
  401. offset = off;
  402. p = 0;
  403. for(f = mc->list; f; f = f->next){
  404. if(f->start > offset)
  405. break;
  406. p = f;
  407. }
  408. /* trim if there is a successor */
  409. eblock = offset + len;
  410. if(f != 0 && eblock > f->start){
  411. len -= (eblock - f->start);
  412. if(len <= 0){
  413. qunlock(&mc->QLock);
  414. return;
  415. }
  416. }
  417. if(p == 0) { /* at the head */
  418. e = cchain(buf, offset, len, &tail);
  419. if(e != 0){
  420. mc->list = e;
  421. tail->next = f;
  422. }
  423. qunlock(&mc->QLock);
  424. return;
  425. }
  426. /* trim to the predecessor */
  427. ee = p->start + p->len;
  428. if(offset < ee){
  429. o = ee - offset;
  430. len -= o;
  431. if(len <= 0){
  432. qunlock(&mc->QLock);
  433. return;
  434. }
  435. buf += o;
  436. offset += o;
  437. }
  438. /* try and pack data into the predecessor */
  439. if(offset == ee && p->len < BIGPGSZ){
  440. o = len;
  441. if(o > BIGPGSZ - p->len)
  442. o = BIGPGSZ - p->len;
  443. if(cpgmove(p, buf, p->len, o)){
  444. p->len += o;
  445. buf += o;
  446. len -= o;
  447. offset += o;
  448. if(len <= 0){
  449. if(f && p->start + p->len > f->start)
  450. print("CACHE: p->start=%lu p->len=%d f->start=%lu\n", p->start, p->len, f->start);
  451. qunlock(&mc->QLock);
  452. return;
  453. }
  454. }
  455. }
  456. e = cchain(buf, offset, len, &tail);
  457. if(e != 0){
  458. p->next = e;
  459. tail->next = f;
  460. }
  461. qunlock(&mc->QLock);
  462. }
  463. void
  464. cwrite(Chan *c, u8 *buf, int len, i64 off)
  465. {
  466. int o, eo;
  467. Mntcache *mc;
  468. u32 eblock, ee;
  469. Extent *p, *f, *e, *tail;
  470. u32 offset;
  471. if(off > maxcache || len == 0)
  472. return;
  473. mc = c->mc;
  474. if(mc == nil)
  475. return;
  476. qlock(&mc->QLock);
  477. if(cdev(mc, c) == 0){
  478. qunlock(&mc->QLock);
  479. return;
  480. }
  481. offset = off;
  482. mc->qid.vers++;
  483. c->qid.vers++;
  484. p = 0;
  485. for(f = mc->list; f; f = f->next){
  486. if(f->start >= offset)
  487. break;
  488. p = f;
  489. }
  490. if(p != 0){
  491. ee = p->start + p->len;
  492. eo = offset - p->start;
  493. /* pack in predecessor if there is space */
  494. if(offset <= ee && eo < BIGPGSZ){
  495. o = len;
  496. if(o > BIGPGSZ - eo)
  497. o = BIGPGSZ - eo;
  498. if(cpgmove(p, buf, eo, o)){
  499. if(eo + o > p->len)
  500. p->len = eo + o;
  501. buf += o;
  502. len -= o;
  503. offset += o;
  504. }
  505. }
  506. }
  507. /* free the overlap -- it's a rare case */
  508. eblock = offset + len;
  509. while(f && f->start < eblock){
  510. e = f->next;
  511. extentfree(f);
  512. f = e;
  513. }
  514. /* link the block (if any) into the middle */
  515. e = cchain(buf, offset, len, &tail);
  516. if(e != 0){
  517. tail->next = f;
  518. f = e;
  519. }
  520. if(p == 0)
  521. mc->list = f;
  522. else
  523. p->next = f;
  524. qunlock(&mc->QLock);
  525. }