bwatch.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421
  1. #include "stdinc.h"
  2. #include "dat.h"
  3. #include "fns.h"
  4. #include "error.h"
  5. /*
  6. * Lock watcher. Check that locking of blocks is always down.
  7. *
  8. * This is REALLY slow, and it won't work when the blocks aren't
  9. * arranged in a tree (e.g., after the first snapshot). But it's great
  10. * for debugging.
  11. */
  12. enum
  13. {
  14. MaxLock = 16,
  15. HashSize = 1009,
  16. };
  17. /*
  18. * Thread-specific watch state.
  19. */
  20. typedef struct WThread WThread;
  21. struct WThread
  22. {
  23. Block *b[MaxLock]; /* blocks currently held */
  24. uint nb;
  25. uint pid;
  26. };
  27. typedef struct WMap WMap;
  28. typedef struct WEntry WEntry;
  29. struct WEntry
  30. {
  31. uchar c[VtScoreSize];
  32. uchar p[VtScoreSize];
  33. int off;
  34. WEntry *cprev;
  35. WEntry *cnext;
  36. WEntry *pprev;
  37. WEntry *pnext;
  38. };
  39. struct WMap
  40. {
  41. VtLock *lk;
  42. WEntry *hchild[HashSize];
  43. WEntry *hparent[HashSize];
  44. };
  45. static WMap map;
  46. static void **wp;
  47. static uint blockSize;
  48. static WEntry *pool;
  49. uint bwatchDisabled;
  50. static uint
  51. hash(uchar score[VtScoreSize])
  52. {
  53. uint i, h;
  54. h = 0;
  55. for(i=0; i<VtScoreSize; i++)
  56. h = h*37 + score[i];
  57. return h%HashSize;
  58. }
  59. #include <pool.h>
  60. static void
  61. freeWEntry(WEntry *e)
  62. {
  63. memset(e, 0, sizeof(WEntry));
  64. e->pnext = pool;
  65. pool = e;
  66. }
  67. static WEntry*
  68. allocWEntry(void)
  69. {
  70. int i;
  71. WEntry *w;
  72. w = pool;
  73. if(w == nil){
  74. w = vtMemAllocZ(1024*sizeof(WEntry));
  75. for(i=0; i<1024; i++)
  76. freeWEntry(&w[i]);
  77. w = pool;
  78. }
  79. pool = w->pnext;
  80. memset(w, 0, sizeof(WEntry));
  81. return w;
  82. }
  83. /*
  84. * remove all dependencies with score as a parent
  85. */
  86. static void
  87. _bwatchResetParent(uchar *score)
  88. {
  89. WEntry *w, *next;
  90. uint h;
  91. h = hash(score);
  92. for(w=map.hparent[h]; w; w=next){
  93. next = w->pnext;
  94. if(memcmp(w->p, score, VtScoreSize) == 0){
  95. if(w->pnext)
  96. w->pnext->pprev = w->pprev;
  97. if(w->pprev)
  98. w->pprev->pnext = w->pnext;
  99. else
  100. map.hparent[h] = w->pnext;
  101. if(w->cnext)
  102. w->cnext->cprev = w->cprev;
  103. if(w->cprev)
  104. w->cprev->cnext = w->cnext;
  105. else
  106. map.hchild[hash(w->c)] = w->cnext;
  107. freeWEntry(w);
  108. }
  109. }
  110. }
  111. /*
  112. * and child
  113. */
  114. static void
  115. _bwatchResetChild(uchar *score)
  116. {
  117. WEntry *w, *next;
  118. uint h;
  119. h = hash(score);
  120. for(w=map.hchild[h]; w; w=next){
  121. next = w->cnext;
  122. if(memcmp(w->c, score, VtScoreSize) == 0){
  123. if(w->pnext)
  124. w->pnext->pprev = w->pprev;
  125. if(w->pprev)
  126. w->pprev->pnext = w->pnext;
  127. else
  128. map.hparent[hash(w->p)] = w->pnext;
  129. if(w->cnext)
  130. w->cnext->cprev = w->cprev;
  131. if(w->cprev)
  132. w->cprev->cnext = w->cnext;
  133. else
  134. map.hchild[h] = w->cnext;
  135. freeWEntry(w);
  136. }
  137. }
  138. }
  139. static uchar*
  140. parent(uchar c[VtScoreSize], int *off)
  141. {
  142. WEntry *w;
  143. uint h;
  144. h = hash(c);
  145. for(w=map.hchild[h]; w; w=w->cnext)
  146. if(memcmp(w->c, c, VtScoreSize) == 0){
  147. *off = w->off;
  148. return w->p;
  149. }
  150. return nil;
  151. }
  152. static void
  153. addChild(uchar p[VtEntrySize], uchar c[VtEntrySize], int off)
  154. {
  155. uint h;
  156. WEntry *w;
  157. w = allocWEntry();
  158. memmove(w->p, p, VtScoreSize);
  159. memmove(w->c, c, VtScoreSize);
  160. w->off = off;
  161. h = hash(p);
  162. w->pnext = map.hparent[h];
  163. if(w->pnext)
  164. w->pnext->pprev = w;
  165. map.hparent[h] = w;
  166. h = hash(c);
  167. w->cnext = map.hchild[h];
  168. if(w->cnext)
  169. w->cnext->cprev = w;
  170. map.hchild[h] = w;
  171. }
  172. void
  173. bwatchReset(uchar score[VtScoreSize])
  174. {
  175. vtLock(map.lk);
  176. _bwatchResetParent(score);
  177. _bwatchResetChild(score);
  178. vtUnlock(map.lk);
  179. }
  180. void
  181. bwatchInit(void)
  182. {
  183. map.lk = vtLockAlloc();
  184. wp = privalloc();
  185. *wp = nil;
  186. }
  187. void
  188. bwatchSetBlockSize(uint bs)
  189. {
  190. blockSize = bs;
  191. }
  192. static WThread*
  193. getWThread(void)
  194. {
  195. WThread *w;
  196. w = *wp;
  197. if(w == nil || w->pid != getpid()){
  198. w = vtMemAllocZ(sizeof(WThread));
  199. *wp = w;
  200. w->pid = getpid();
  201. }
  202. return w;
  203. }
  204. /*
  205. * Derive dependencies from the contents of b.
  206. */
  207. void
  208. bwatchDependency(Block *b)
  209. {
  210. int i, epb, ppb;
  211. Entry e;
  212. if(bwatchDisabled)
  213. return;
  214. vtLock(map.lk);
  215. _bwatchResetParent(b->score);
  216. switch(b->l.type){
  217. case BtData:
  218. break;
  219. case BtDir:
  220. epb = blockSize / VtEntrySize;
  221. for(i=0; i<epb; i++){
  222. entryUnpack(&e, b->data, i);
  223. if(!(e.flags & VtEntryActive))
  224. continue;
  225. addChild(b->score, e.score, i);
  226. }
  227. break;
  228. default:
  229. ppb = blockSize / VtScoreSize;
  230. for(i=0; i<ppb; i++)
  231. addChild(b->score, b->data+i*VtScoreSize, i);
  232. break;
  233. }
  234. vtUnlock(map.lk);
  235. }
  236. static int
  237. depth(uchar *s)
  238. {
  239. int d, x;
  240. d = -1;
  241. while(s){
  242. d++;
  243. s = parent(s, &x);
  244. }
  245. return d;
  246. }
  247. static int
  248. lockConflicts(uchar xhave[VtScoreSize], uchar xwant[VtScoreSize])
  249. {
  250. uchar *have, *want;
  251. int havedepth, wantdepth, havepos, wantpos;
  252. have = xhave;
  253. want = xwant;
  254. havedepth = depth(have);
  255. wantdepth = depth(want);
  256. /*
  257. * walk one or the other up until they're both
  258. * at the same level.
  259. */
  260. havepos = -1;
  261. wantpos = -1;
  262. have = xhave;
  263. want = xwant;
  264. while(wantdepth > havedepth){
  265. wantdepth--;
  266. want = parent(want, &wantpos);
  267. }
  268. while(havedepth > wantdepth){
  269. havedepth--;
  270. have = parent(have, &havepos);
  271. }
  272. /*
  273. * walk them up simultaneously until we reach
  274. * a common ancestor.
  275. */
  276. while(have && want && memcmp(have, want, VtScoreSize) != 0){
  277. have = parent(have, &havepos);
  278. want = parent(want, &wantpos);
  279. }
  280. /*
  281. * not part of same tree. happens mainly with
  282. * newly allocated blocks.
  283. */
  284. if(!have || !want)
  285. return 0;
  286. /*
  287. * never walked want: means we want to lock
  288. * an ancestor of have. no no.
  289. */
  290. if(wantpos == -1)
  291. return 1;
  292. /*
  293. * never walked have: means we want to lock a
  294. * child of have. that's okay.
  295. */
  296. if(havepos == -1)
  297. return 0;
  298. /*
  299. * walked both: they're from different places in the tree.
  300. * require that the left one be locked before the right one.
  301. * (this is questionable, but it puts a total order on the block tree).
  302. */
  303. return havepos < wantpos;
  304. }
  305. static void
  306. stop(void)
  307. {
  308. int fd;
  309. char buf[32];
  310. snprint(buf, sizeof buf, "#p/%d/ctl", getpid());
  311. fd = open(buf, OWRITE);
  312. write(fd, "stop", 4);
  313. close(fd);
  314. }
  315. /*
  316. * Check whether the calling thread can validly lock b.
  317. * That is, check that the calling thread doesn't hold
  318. * locks for any of b's children.
  319. */
  320. void
  321. bwatchLock(Block *b)
  322. {
  323. int i;
  324. WThread *w;
  325. if(bwatchDisabled)
  326. return;
  327. if(b->part != PartData)
  328. return;
  329. vtLock(map.lk);
  330. w = getWThread();
  331. for(i=0; i<w->nb; i++){
  332. if(lockConflicts(w->b[i]->score, b->score)){
  333. fprint(2, "%d: have block %V; shouldn't lock %V\n",
  334. w->pid, w->b[i]->score, b->score);
  335. stop();
  336. }
  337. }
  338. vtUnlock(map.lk);
  339. if(w->nb >= MaxLock){
  340. fprint(2, "%d: too many blocks held\n", w->pid);
  341. stop();
  342. }else
  343. w->b[w->nb++] = b;
  344. }
  345. /*
  346. * Note that the calling thread is about to unlock b.
  347. */
  348. void
  349. bwatchUnlock(Block *b)
  350. {
  351. int i;
  352. WThread *w;
  353. if(bwatchDisabled)
  354. return;
  355. if(b->part != PartData)
  356. return;
  357. w = getWThread();
  358. for(i=0; i<w->nb; i++)
  359. if(w->b[i] == b)
  360. break;
  361. if(i>=w->nb){
  362. fprint(2, "%d: unlock of unlocked block %V\n", w->pid, b->score);
  363. stop();
  364. }else
  365. w->b[i] = w->b[--w->nb];
  366. }