tcklock.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339
  1. /*
  2. * This file is part of the UCB release of Plan 9. It is subject to the license
  3. * terms in the LICENSE file found in the top-level directory of this
  4. * distribution and at http://akaros.cs.berkeley.edu/files/Plan9License. No
  5. * part of the UCB release of Plan 9, including this file, may be copied,
  6. * modified, propagated, or distributed except according to the terms contained
  7. * in the LICENSE file.
  8. */
  9. /*
  10. * Ticket locks.
  11. * D. P. Reed and R. K. Kanodia.
  12. * Synchronization with Eventcounts and Sequencers.
  13. * Communications of the ACM, 22(2):115–23, Feb. 1979.
  14. *
  15. * A variant is used in Linux.
  16. *
  17. * These are here to measure them and compare wrt taslocks.
  18. * If there's no difference, these should go
  19. * (because taslocks are known to be robust and don't have bugs).
  20. */
  21. #include "u.h"
  22. #include "../port/lib.h"
  23. #include "mem.h"
  24. #include "dat.h"
  25. #include "fns.h"
  26. #include "../port/edf.h"
  27. Lockstats lockstats;
  28. Waitstats waitstats;
  29. Lock waitstatslk;
  30. static void
  31. newwaitstats(void)
  32. {
  33. if(waitstats.pcs != nil)
  34. return;
  35. waitstats.pcs = malloc(NWstats * sizeof waitstats.pcs[0]);
  36. waitstats.ns = malloc(NWstats * sizeof waitstats.ns[0]);
  37. waitstats.wait = malloc(NWstats * sizeof waitstats.wait[0]);
  38. waitstats.total = malloc(NWstats * sizeof waitstats.total[0]);
  39. waitstats.type = malloc(NWstats * sizeof waitstats.type[0]);
  40. }
  41. void
  42. startwaitstats(int on)
  43. {
  44. newwaitstats();
  45. mfence();
  46. waitstats.on = on;
  47. print("lockstats %s\n", on?"on":"off");
  48. }
  49. void
  50. clearwaitstats(void)
  51. {
  52. newwaitstats();
  53. memset(waitstats.ns, 0, NWstats * sizeof(int));
  54. memset(waitstats.wait, 0, NWstats * sizeof(uint64_t));
  55. memset(waitstats.total, 0, NWstats * sizeof(uint64_t));
  56. }
  57. void
  58. addwaitstat(uintptr_t pc, uint64_t t0, int type)
  59. {
  60. uint i;
  61. uint64_t w;
  62. if(waitstats.on == 0)
  63. return;
  64. cycles(&w);
  65. w -= t0;
  66. mfence();
  67. for(i = 0; i < NWstats; i++)
  68. if(waitstats.pcs[i] == pc){
  69. ainc(&waitstats.ns[i]);
  70. if(w > waitstats.wait[i])
  71. waitstats.wait[i] = w; /* race but ok */
  72. waitstats.total[i] += w; /* race but ok */
  73. return;
  74. }
  75. if(!canlock(&waitstatslk))
  76. return;
  77. for(i = 0; i < NWstats; i++)
  78. if(waitstats.pcs[i] == pc){
  79. ainc(&waitstats.ns[i]);
  80. if(w > waitstats.wait[i])
  81. waitstats.wait[i] = w; /* race but ok */
  82. waitstats.total[i] += w;
  83. unlock(&waitstatslk);
  84. return;
  85. }
  86. for(i = 0; i < NWstats; i++)
  87. if(waitstats.pcs[i] == 0){
  88. waitstats.ns[i] = 1;
  89. waitstats.type[i] = type;
  90. waitstats.wait[i] = w;
  91. waitstats.total[i] = w;
  92. mfence();
  93. waitstats.pcs[i] = pc;
  94. waitstats.npcs++;
  95. break;
  96. }
  97. unlock(&waitstatslk);
  98. }
  99. void
  100. lockloop(Lock *l, uintptr_t pc)
  101. {
  102. Proc *p;
  103. p = l->p;
  104. print("lock %#p loop key %#ux pc %#p held by pc %#p proc %d\n",
  105. l, l->key, pc, l->pc, p ? p->pid : 0);
  106. dumpaproc(up);
  107. if(p != nil)
  108. dumpaproc(p);
  109. }
  110. static uint32_t
  111. getuser(uint32_t key)
  112. {
  113. return key & 0xFFFF;
  114. }
  115. static uint32_t
  116. getticket(uint32_t key)
  117. {
  118. return (key>>16) & 0xFFFF;
  119. }
  120. static uint32_t
  121. incuser(uint32_t *key)
  122. {
  123. uint32_t old, new;
  124. do{
  125. old = *key;
  126. new = (old&0xFFFF0000) | ((old+1)&0xFFFF);
  127. }while(!cas32(key, old, new));
  128. return getuser(new);
  129. }
  130. static uint32_t
  131. incticket(uint32_t *key)
  132. {
  133. uint32_t old, new;
  134. do{
  135. old = *key;
  136. new = ((old+0x10000)&0xFFFF0000) | (old&0xFFFF);
  137. }while(!cas32(key, old, new));
  138. return getticket(new);
  139. }
  140. static uint32_t
  141. myticket(uint32_t user)
  142. {
  143. return (user-1) & 0xFFFF;
  144. }
  145. int
  146. lock(Lock *l)
  147. {
  148. int i;
  149. uintptr_t pc;
  150. uint32_t user;
  151. uint64_t t0;
  152. pc = getcallerpc(&l);
  153. lockstats.locks++;
  154. if(up)
  155. ainc(&up->nlocks); /* prevent being scheded */
  156. cycles(&t0);
  157. user = incuser(&l->key);
  158. if(getticket(l->key) != myticket(user)){
  159. if(up)
  160. adec(&up->nlocks);
  161. lockstats.glare++;
  162. i = 0;
  163. while(getticket(l->key) != myticket(user)){
  164. if(conf.nmach < 2 && up && up->edf && (up->edf->flags & Admitted)){
  165. /*
  166. * Priority inversion, yield on a uniprocessor; on a
  167. * multiprocessor, the other processor will unlock
  168. */
  169. print("inversion %#p pc %#p proc %d held by pc %#p proc %d\n",
  170. l, pc, up ? up->pid : 0, l->pc, l->p ? l->p->pid : 0);
  171. up->edf->d = todget(nil); /* yield to process with lock */
  172. }
  173. if(i++ > 100000000){
  174. i = 0;
  175. lockloop(l, pc);
  176. }
  177. }
  178. if(up)
  179. ainc(&up->nlocks);
  180. }
  181. l->pc = pc;
  182. l->p = up;
  183. l->m = m;
  184. l->isilock = 0;
  185. if(up)
  186. up->lastlock = l;
  187. if(l != &waitstatslk)
  188. addwaitstat(pc, t0, WSlock);
  189. return 0;
  190. }
  191. void
  192. ilock(Lock *l)
  193. {
  194. Mpl pl;
  195. uintptr_t pc;
  196. uint64_t t0;
  197. uint32_t user;
  198. pc = getcallerpc(&l);
  199. lockstats.locks++;
  200. pl = splhi();
  201. cycles(&t0);
  202. user = incuser(&l->key);
  203. if(getticket(l->key) != myticket(user)){
  204. splx(pl);
  205. while(getticket(l->key) != myticket(user))
  206. ;
  207. pl = splhi();
  208. }
  209. m->ilockdepth++;
  210. if(up)
  211. up->lastilock = l;
  212. l->pl = pl;
  213. l->pc = pc;
  214. l->p = up;
  215. l->isilock = 1;
  216. l->m = m;
  217. if(l != &waitstatslk)
  218. addwaitstat(pc, t0, WSlock);
  219. }
  220. int
  221. canlock(Lock *l)
  222. {
  223. Lock try, new;
  224. uintptr_t pc;
  225. uint64_t t0;
  226. pc = getcallerpc(&l);
  227. lockstats.locks++;
  228. if(up)
  229. ainc(&up->nlocks); /* prevent being scheded */
  230. cycles(&t0);
  231. try = *l;
  232. if(getuser(try.key) != getticket(try.key)){
  233. Cant:
  234. if(up)
  235. adec(&up->nlocks);
  236. return 0;
  237. }
  238. new = try;
  239. incuser(&new.key);
  240. if(!cas32(&l->key, try.key, new.key))
  241. goto Cant;
  242. l->pc = pc;
  243. l->p = up;
  244. l->m = m;
  245. if(up)
  246. up->lastlock = l;
  247. l->isilock = 0;
  248. return 1;
  249. }
  250. void
  251. unlock(Lock *l)
  252. {
  253. if(getticket(l->key) == getuser(l->key))
  254. print("unlock: not locked: pc %#p\n", getcallerpc(&l));
  255. if(l->isilock)
  256. print("unlock of ilock: pc %#p, held by %#p\n", getcallerpc(&l), l->pc);
  257. if(l->p != up)
  258. print("unlock: up changed: pc %#p, acquired at pc %#p,"
  259. " lock p %#p, unlock up %#p\n", getcallerpc(&l), l->pc, l->p, up);
  260. l->m = nil;
  261. incticket(&l->key);
  262. if(up && adec(&up->nlocks) == 0 && up->delaysched && islo()){
  263. /*
  264. * Call sched if the need arose while locks were held
  265. * But, don't do it from interrupt routines, hence the islo() test
  266. */
  267. sched();
  268. }
  269. }
  270. void
  271. iunlock(Lock *l)
  272. {
  273. Mpl pl;
  274. if(getticket(l->key) == getuser(l->key))
  275. print("iunlock: not locked: pc %#p\n", getcallerpc(&l));
  276. if(!l->isilock)
  277. print("iunlock of lock: pc %#p, held by %#p\n", getcallerpc(&l), l->pc);
  278. if(islo())
  279. print("iunlock while lo: pc %#p, held by %#p\n", getcallerpc(&l), l->pc);
  280. if(l->m != m){
  281. print("iunlock by cpu%d, locked by cpu%d: pc %#p, held by %#p\n",
  282. machp()->machno, l->machp()->machno, getcallerpc(&l), l->pc);
  283. }
  284. pl = l->pl;
  285. l->m = nil;
  286. incticket(&l->key);
  287. m->ilockdepth--;
  288. if(up)
  289. up->lastilock = nil;
  290. splx(pl);
  291. }
  292. void
  293. portmwait(void *value)
  294. {
  295. while (*(void**)value == nil)
  296. ;
  297. }
  298. void (*mwait)(void *) = portmwait;