proc.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609
  1. #include <u.h>
  2. #include "../port/lib.h"
  3. #include "mem.h"
  4. #include "dat.h"
  5. #include "fns.h"
  6. #include "../port/error.h"
  7. #include "edf.h"
  8. #include <trace.h>
  9. int schedgain = 30; /* units in seconds */
  10. int nrdy;
  11. Ref noteidalloc;
  12. void updatecpu(Proc*);
  13. int reprioritize(Proc*);
  14. ulong delayedscheds; /* statistics */
  15. long skipscheds;
  16. long preempts;
  17. ulong load;
  18. static Ref pidalloc;
  19. static struct Procalloc
  20. {
  21. Lock;
  22. Proc* ht[128];
  23. Proc* arena;
  24. Proc* free;
  25. } procalloc;
  26. enum
  27. {
  28. Q=10,
  29. DQ=4,
  30. Scaling=2,
  31. };
  32. Schedq runq[Nrq];
  33. ulong runvec;
  34. char *statename[] =
  35. { /* BUG: generate automatically */
  36. "Dead",
  37. "Moribund",
  38. "Ready",
  39. "Scheding",
  40. "Running",
  41. "Queueing",
  42. "QueueingR",
  43. "QueueingW",
  44. "Wakeme",
  45. "Broken",
  46. "Stopped",
  47. "Rendez",
  48. "Waitrelease",
  49. };
  50. static void pidhash(Proc*);
  51. static void pidunhash(Proc*);
  52. static void rebalance(void);
  53. /*
  54. * Always splhi()'ed.
  55. */
  56. void
  57. schedinit(void) /* never returns */
  58. {
  59. Edf *e;
  60. setlabel(&m->sched);
  61. if(up) {
  62. if((e = up->edf) && (e->flags & Admitted))
  63. edfrecord(up);
  64. m->proc = 0;
  65. switch(up->state) {
  66. case Running:
  67. ready(up);
  68. break;
  69. case Moribund:
  70. up->state = Dead;
  71. edfstop(up);
  72. if (up->edf)
  73. free(up->edf);
  74. up->edf = nil;
  75. /*
  76. * Holding locks from pexit:
  77. * procalloc
  78. * palloc
  79. */
  80. mmurelease(up);
  81. up->qnext = procalloc.free;
  82. procalloc.free = up;
  83. unlock(&palloc);
  84. unlock(&procalloc);
  85. break;
  86. }
  87. up->mach = nil;
  88. updatecpu(up);
  89. up = nil;
  90. }
  91. sched();
  92. }
  93. /*
  94. * If changing this routine, look also at sleep(). It
  95. * contains a copy of the guts of sched().
  96. */
  97. void
  98. sched(void)
  99. {
  100. Proc *p;
  101. if(m->ilockdepth)
  102. panic("ilockdepth %d, last lock %#p at %#lux, sched called from %#p",
  103. m->ilockdepth, up?up->lastilock:nil,
  104. (up && up->lastilock)?up->lastilock->pc:0,
  105. getcallerpc(&p+2));
  106. if(up){
  107. /*
  108. * Delay the sched until the process gives up the locks
  109. * it is holding. This avoids dumb lock loops.
  110. * Don't delay if the process is Moribund.
  111. * It called sched to die.
  112. * But do sched eventually. This avoids a missing unlock
  113. * from hanging the entire kernel.
  114. * But don't reschedule procs holding palloc or procalloc.
  115. * Those are far too important to be holding while asleep.
  116. *
  117. * This test is not exact. There can still be a few instructions
  118. * in the middle of taslock when a process holds a lock
  119. * but Lock.p has not yet been initialized.
  120. */
  121. if(up->nlocks.ref)
  122. if(up->state != Moribund)
  123. if(up->delaysched < 20
  124. || palloc.Lock.p == up
  125. || procalloc.Lock.p == up){
  126. up->delaysched++;
  127. delayedscheds++;
  128. return;
  129. }
  130. up->delaysched = 0;
  131. splhi();
  132. /* statistics */
  133. m->cs++;
  134. procsave(up);
  135. if(setlabel(&up->sched)){
  136. procrestore(up);
  137. spllo();
  138. return;
  139. }
  140. gotolabel(&m->sched);
  141. }
  142. p = runproc();
  143. if(!p->edf){
  144. updatecpu(p);
  145. p->priority = reprioritize(p);
  146. }
  147. if(p != m->readied)
  148. m->schedticks = m->ticks + HZ/10;
  149. m->readied = 0;
  150. up = p;
  151. up->state = Running;
  152. up->mach = MACHP(m->machno);
  153. m->proc = up;
  154. mmuswitch(up);
  155. gotolabel(&up->sched);
  156. }
  157. int
  158. anyready(void)
  159. {
  160. return runvec;
  161. }
  162. int
  163. anyhigher(void)
  164. {
  165. return runvec & ~((1<<(up->priority+1))-1);
  166. }
  167. /*
  168. * here once per clock tick to see if we should resched
  169. */
  170. void
  171. hzsched(void)
  172. {
  173. /* once a second, rebalance will reprioritize ready procs */
  174. if(m->machno == 0)
  175. rebalance();
  176. /* unless preempted, get to run for at least 100ms */
  177. if(anyhigher()
  178. || (!up->fixedpri && m->ticks > m->schedticks && anyready())){
  179. m->readied = nil; /* avoid cooperative scheduling */
  180. up->delaysched++;
  181. }
  182. }
  183. /*
  184. * here at the end of non-clock interrupts to see if we should preempt the
  185. * current process. Returns 1 if preempted, 0 otherwise.
  186. */
  187. int
  188. preempted(void)
  189. {
  190. if(up && up->state == Running)
  191. if(up->preempted == 0)
  192. if(anyhigher())
  193. if(!active.exiting){
  194. m->readied = nil; /* avoid cooperative scheduling */
  195. up->preempted = 1;
  196. sched();
  197. splhi();
  198. up->preempted = 0;
  199. return 1;
  200. }
  201. return 0;
  202. }
  203. /*
  204. * Update the cpu time average for this particular process,
  205. * which is about to change from up -> not up or vice versa.
  206. * p->lastupdate is the last time an updatecpu happened.
  207. *
  208. * The cpu time average is a decaying average that lasts
  209. * about D clock ticks. D is chosen to be approximately
  210. * the cpu time of a cpu-intensive "quick job". A job has to run
  211. * for approximately D clock ticks before we home in on its
  212. * actual cpu usage. Thus if you manage to get in and get out
  213. * quickly, you won't be penalized during your burst. Once you
  214. * start using your share of the cpu for more than about D
  215. * clock ticks though, your p->cpu hits 1000 (1.0) and you end up
  216. * below all the other quick jobs. Interactive tasks, because
  217. * they basically always use less than their fair share of cpu,
  218. * will be rewarded.
  219. *
  220. * If the process has not been running, then we want to
  221. * apply the filter
  222. *
  223. * cpu = cpu * (D-1)/D
  224. *
  225. * n times, yielding
  226. *
  227. * cpu = cpu * ((D-1)/D)^n
  228. *
  229. * but D is big enough that this is approximately
  230. *
  231. * cpu = cpu * (D-n)/D
  232. *
  233. * so we use that instead.
  234. *
  235. * If the process has been running, we apply the filter to
  236. * 1 - cpu, yielding a similar equation. Note that cpu is
  237. * stored in fixed point (* 1000).
  238. *
  239. * Updatecpu must be called before changing up, in order
  240. * to maintain accurate cpu usage statistics. It can be called
  241. * at any time to bring the stats for a given proc up-to-date.
  242. */
  243. void
  244. updatecpu(Proc *p)
  245. {
  246. int n, t, ocpu;
  247. int D = schedgain*HZ*Scaling;
  248. if(p->edf)
  249. return;
  250. t = MACHP(0)->ticks*Scaling + Scaling/2;
  251. n = t - p->lastupdate;
  252. p->lastupdate = t;
  253. if(n == 0)
  254. return;
  255. if(n > D)
  256. n = D;
  257. ocpu = p->cpu;
  258. if(p != up)
  259. p->cpu = (ocpu*(D-n))/D;
  260. else{
  261. t = 1000 - ocpu;
  262. t = (t*(D-n))/D;
  263. p->cpu = 1000 - t;
  264. }
  265. //iprint("pid %d %s for %d cpu %d -> %d\n", p->pid,p==up?"active":"inactive",n, ocpu,p->cpu);
  266. }
  267. /*
  268. * On average, p has used p->cpu of a cpu recently.
  269. * Its fair share is conf.nmach/m->load of a cpu. If it has been getting
  270. * too much, penalize it. If it has been getting not enough, reward it.
  271. * I don't think you can get much more than your fair share that
  272. * often, so most of the queues are for using less. Having a priority
  273. * of 3 means you're just right. Having a higher priority (up to p->basepri)
  274. * means you're not using as much as you could.
  275. */
  276. int
  277. reprioritize(Proc *p)
  278. {
  279. int fairshare, n, load, ratio;
  280. load = MACHP(0)->load;
  281. if(load == 0)
  282. return p->basepri;
  283. /*
  284. * fairshare = 1.000 * conf.nproc * 1.000/load,
  285. * except the decimal point is moved three places
  286. * on both load and fairshare.
  287. */
  288. fairshare = (conf.nmach*1000*1000)/load;
  289. n = p->cpu;
  290. if(n == 0)
  291. n = 1;
  292. ratio = (fairshare+n/2) / n;
  293. if(ratio > p->basepri)
  294. ratio = p->basepri;
  295. if(ratio < 0)
  296. panic("reprioritize");
  297. //iprint("pid %d cpu %d load %d fair %d pri %d\n", p->pid, p->cpu, load, fairshare, ratio);
  298. return ratio;
  299. }
  300. /*
  301. * add a process to a scheduling queue
  302. */
  303. void
  304. queueproc(Schedq *rq, Proc *p)
  305. {
  306. int pri;
  307. pri = rq - runq;
  308. lock(runq);
  309. p->priority = pri;
  310. p->rnext = 0;
  311. if(rq->tail)
  312. rq->tail->rnext = p;
  313. else
  314. rq->head = p;
  315. rq->tail = p;
  316. rq->n++;
  317. nrdy++;
  318. runvec |= 1<<pri;
  319. unlock(runq);
  320. }
  321. /*
  322. * try to remove a process from a scheduling queue (called splhi)
  323. */
  324. Proc*
  325. dequeueproc(Schedq *rq, Proc *tp)
  326. {
  327. Proc *l, *p;
  328. if(!canlock(runq))
  329. return nil;
  330. /*
  331. * the queue may have changed before we locked runq,
  332. * refind the target process.
  333. */
  334. l = 0;
  335. for(p = rq->head; p; p = p->rnext){
  336. if(p == tp)
  337. break;
  338. l = p;
  339. }
  340. /*
  341. * p->mach==0 only when process state is saved
  342. */
  343. if(p == 0 || p->mach){
  344. unlock(runq);
  345. return nil;
  346. }
  347. if(p->rnext == 0)
  348. rq->tail = l;
  349. if(l)
  350. l->rnext = p->rnext;
  351. else
  352. rq->head = p->rnext;
  353. if(rq->head == nil)
  354. runvec &= ~(1<<(rq-runq));
  355. rq->n--;
  356. nrdy--;
  357. if(p->state != Ready)
  358. print("dequeueproc %s %lud %s\n", p->text, p->pid, statename[p->state]);
  359. unlock(runq);
  360. return p;
  361. }
  362. /*
  363. * ready(p) picks a new priority for a process and sticks it in the
  364. * runq for that priority.
  365. */
  366. void
  367. ready(Proc *p)
  368. {
  369. int s, pri;
  370. Schedq *rq;
  371. void (*pt)(Proc*, int, vlong);
  372. s = splhi();
  373. if(edfready(p)){
  374. splx(s);
  375. return;
  376. }
  377. if(up != p)
  378. m->readied = p; /* group scheduling */
  379. updatecpu(p);
  380. pri = reprioritize(p);
  381. p->priority = pri;
  382. rq = &runq[pri];
  383. p->state = Ready;
  384. queueproc(rq, p);
  385. pt = proctrace;
  386. if(pt)
  387. pt(p, SReady, 0);
  388. splx(s);
  389. }
  390. /*
  391. * yield the processor and drop our priority
  392. */
  393. void
  394. yield(void)
  395. {
  396. if(anyready()){
  397. /* pretend we just used 1/2 tick */
  398. up->lastupdate -= Scaling/2;
  399. sched();
  400. }
  401. }
  402. /*
  403. * recalculate priorities once a second. We need to do this
  404. * since priorities will otherwise only be recalculated when
  405. * the running process blocks.
  406. */
  407. ulong balancetime;
  408. static void
  409. rebalance(void)
  410. {
  411. int pri, npri, t, x;
  412. Schedq *rq;
  413. Proc *p;
  414. t = m->ticks;
  415. if(t - balancetime < HZ)
  416. return;
  417. balancetime = t;
  418. for(pri=0, rq=runq; pri<Npriq; pri++, rq++){
  419. another:
  420. p = rq->head;
  421. if(p == nil)
  422. continue;
  423. if(p->mp != MACHP(m->machno))
  424. continue;
  425. if(pri == p->basepri)
  426. continue;
  427. updatecpu(p);
  428. npri = reprioritize(p);
  429. if(npri != pri){
  430. x = splhi();
  431. p = dequeueproc(rq, p);
  432. if(p)
  433. queueproc(&runq[npri], p);
  434. splx(x);
  435. goto another;
  436. }
  437. }
  438. }
  439. /*
  440. * pick a process to run
  441. */
  442. Proc*
  443. runproc(void)
  444. {
  445. Schedq *rq;
  446. Proc *p;
  447. ulong start, now;
  448. int i;
  449. void (*pt)(Proc*, int, vlong);
  450. start = perfticks();
  451. /* cooperative scheduling until the clock ticks */
  452. if((p=m->readied) && p->mach==0 && p->state==Ready
  453. && runq[Nrq-1].head == nil && runq[Nrq-2].head == nil){
  454. skipscheds++;
  455. rq = &runq[p->priority];
  456. goto found;
  457. }
  458. preempts++;
  459. loop:
  460. /*
  461. * find a process that last ran on this processor (affinity),
  462. * or one that hasn't moved in a while (load balancing). Every
  463. * time around the loop affinity goes down.
  464. */
  465. spllo();
  466. for(i = 0;; i++){
  467. /*
  468. * find the highest priority target process that this
  469. * processor can run given affinity constraints.
  470. *
  471. */
  472. for(rq = &runq[Nrq-1]; rq >= runq; rq--){
  473. for(p = rq->head; p; p = p->rnext){
  474. if(p->mp == nil || p->mp == MACHP(m->machno)
  475. || (!p->wired && i > 0))
  476. goto found;
  477. }
  478. }
  479. /* waste time or halt the CPU */
  480. idlehands();
  481. /* remember how much time we're here */
  482. now = perfticks();
  483. m->perf.inidle += now-start;
  484. start = now;
  485. }
  486. found:
  487. splhi();
  488. p = dequeueproc(rq, p);
  489. if(p == nil)
  490. goto loop;
  491. p->state = Scheding;
  492. p->mp = MACHP(m->machno);
  493. if(edflock(p)){
  494. edfrun(p, rq == &runq[PriEdf]); /* start deadline timer and do admin */
  495. edfunlock();
  496. }
  497. pt = proctrace;
  498. if(pt)
  499. pt(p, SRun, 0);
  500. return p;
  501. }
  502. int
  503. canpage(Proc *p)
  504. {
  505. int ok = 0;
  506. splhi();
  507. lock(runq);
  508. /* Only reliable way to see if we are Running */
  509. if(p->mach == 0) {
  510. p->newtlb = 1;
  511. ok = 1;
  512. }
  513. unlock(runq);
  514. spllo();
  515. return ok;
  516. }
  517. Proc*
  518. newproc(void)
  519. {
  520. char msg[64];
  521. Proc *p;
  522. lock(&procalloc);
  523. for(;;) {
  524. if(p = procalloc.free)
  525. break;
  526. snprint(msg, sizeof msg, "no procs; %s forking",
  527. up? up->text: "kernel");
  528. unlock(&procalloc);
  529. resrcwait(msg);
  530. lock(&procalloc);
  531. }
  532. procalloc.free = p->qnext;
  533. unlock(&procalloc);
  534. p->state = Scheding;
  535. p->psstate = "New";
  536. p->mach = 0;
  537. p->qnext = 0;
  538. p->nchild = 0;
  539. p->nwait = 0;
  540. p->waitq = 0;
  541. p->parent = 0;
  542. p->pgrp = 0;
  543. p->egrp = 0;
  544. p->fgrp = 0;
  545. p->rgrp = 0;
  546. p->pdbg = 0;
  547. p->fpstate = FPinit;
  548. p->kp = 0;
  549. p->procctl = 0;
  550. p->notepending = 0;
  551. p->ureg = 0;
  552. p->privatemem = 0;
  553. p->noswap = 0;
  554. p->errstr = p->errbuf0;
  555. p->syserrstr = p->errbuf1;
  556. p->errbuf0[0] = '\0';
  557. p->errbuf1[0] = '\0';
  558. p->nlocks.ref = 0;
  559. p->delaysched = 0;
  560. p->trace = 0;
  561. kstrdup(&p->user, "*nouser");
  562. kstrdup(&p->text, "*notext");
  563. kstrdup(&p->args, "");
  564. p->nargs = 0;
  565. p->setargs = 0;
  566. memset(p->seg, 0, sizeof p->seg);
  567. p->pid = incref(&pidalloc);
  568. pidhash(p);
  569. p->noteid = incref(&noteidalloc);
  570. if(p->pid==0 || p->noteid==0)
  571. panic("pidalloc");
  572. if(p->kstack == 0)
  573. p->kstack = smalloc(KSTACK);
  574. /* sched params */
  575. p->mp = 0;
  576. p->wired = 0;
  577. procpriority(p, PriNormal, 0);
  578. p->cpu = 0;
  579. p->lastupdate = MACHP(0)->ticks*Scaling;
  580. p->edf = nil;
  581. return p;
  582. }
  583. /*
  584. * wire this proc to a machine
  585. */
  586. void
  587. procwired(Proc *p, int bm)
  588. {
  589. Proc *pp;
  590. int i;
  591. char nwired[MAXMACH];
  592. Mach *wm;
  593. if(bm < 0){
  594. /* pick a machine to wire to */
  595. memset(nwired, 0, sizeof(nwired));
  596. p->wired = 0;
  597. pp = proctab(0);
  598. for(i=0; i<conf.nproc; i++, pp++){
  599. wm = pp->wired;
  600. if(wm && pp->pid)
  601. nwired[wm->machno]++;
  602. }
  603. bm = 0;
  604. for(i=0; i<conf.nmach; i++)
  605. if(nwired[i] < nwired[bm])
  606. bm = i;
  607. } else {
  608. /* use the virtual machine requested */
  609. bm = bm % conf.nmach;
  610. }
  611. p->wired = MACHP(bm);
  612. p->mp = p->wired;
  613. }
  614. void
  615. procpriority(Proc *p, int pri, int fixed)
  616. {
  617. if(pri >= Npriq)
  618. pri = Npriq - 1;
  619. else if(pri < 0)
  620. pri = 0;
  621. p->basepri = pri;
  622. p->priority = pri;
  623. if(fixed){
  624. p->fixedpri = 1;
  625. } else {
  626. p->fixedpri = 0;
  627. }
  628. }
  629. void
  630. procinit0(void) /* bad planning - clashes with devproc.c */
  631. {
  632. Proc *p;
  633. int i;
  634. procalloc.free = xalloc(conf.nproc*sizeof(Proc));
  635. if(procalloc.free == nil){
  636. xsummary();
  637. panic("cannot allocate %lud procs (%ludMB)\n", conf.nproc, conf.nproc*sizeof(Proc)/(1024*1024));
  638. }
  639. procalloc.arena = procalloc.free;
  640. p = procalloc.free;
  641. for(i=0; i<conf.nproc-1; i++,p++)
  642. p->qnext = p+1;
  643. p->qnext = 0;
  644. }
  645. /*
  646. * sleep if a condition is not true. Another process will
  647. * awaken us after it sets the condition. When we awaken
  648. * the condition may no longer be true.
  649. *
  650. * we lock both the process and the rendezvous to keep r->p
  651. * and p->r synchronized.
  652. */
  653. void
  654. sleep(Rendez *r, int (*f)(void*), void *arg)
  655. {
  656. int s;
  657. void (*pt)(Proc*, int, vlong);
  658. s = splhi();
  659. if(up->nlocks.ref)
  660. print("process %lud sleeps with %lud locks held, last lock %#p locked at pc %#lux, sleep called from %#p\n",
  661. up->pid, up->nlocks.ref, up->lastlock, up->lastlock->pc, getcallerpc(&r));
  662. lock(r);
  663. lock(&up->rlock);
  664. if(r->p){
  665. print("double sleep called from %#p, %lud %lud\n", getcallerpc(&r), r->p->pid, up->pid);
  666. dumpstack();
  667. }
  668. /*
  669. * Wakeup only knows there may be something to do by testing
  670. * r->p in order to get something to lock on.
  671. * Flush that information out to memory in case the sleep is
  672. * committed.
  673. */
  674. r->p = up;
  675. if((*f)(arg) || up->notepending){
  676. /*
  677. * if condition happened or a note is pending
  678. * never mind
  679. */
  680. r->p = nil;
  681. unlock(&up->rlock);
  682. unlock(r);
  683. } else {
  684. /*
  685. * now we are committed to
  686. * change state and call scheduler
  687. */
  688. pt = proctrace;
  689. if(pt)
  690. pt(up, SSleep, 0);
  691. up->state = Wakeme;
  692. up->r = r;
  693. /* statistics */
  694. m->cs++;
  695. procsave(up);
  696. if(setlabel(&up->sched)) {
  697. /*
  698. * here when the process is awakened
  699. */
  700. procrestore(up);
  701. spllo();
  702. } else {
  703. /*
  704. * here to go to sleep (i.e. stop Running)
  705. */
  706. unlock(&up->rlock);
  707. unlock(r);
  708. gotolabel(&m->sched);
  709. }
  710. }
  711. if(up->notepending) {
  712. up->notepending = 0;
  713. splx(s);
  714. if(up->procctl == Proc_exitme && up->closingfgrp)
  715. forceclosefgrp();
  716. error(Eintr);
  717. }
  718. splx(s);
  719. }
  720. static int
  721. tfn(void *arg)
  722. {
  723. return up->trend == nil || up->tfn(arg);
  724. }
  725. void
  726. twakeup(Ureg*, Timer *t)
  727. {
  728. Proc *p;
  729. Rendez *trend;
  730. p = t->ta;
  731. trend = p->trend;
  732. p->trend = 0;
  733. if(trend)
  734. wakeup(trend);
  735. }
  736. void
  737. tsleep(Rendez *r, int (*fn)(void*), void *arg, ulong ms)
  738. {
  739. if (up->tt){
  740. print("tsleep: timer active: mode %d, tf %#p\n", up->tmode, up->tf);
  741. timerdel(up);
  742. }
  743. up->tns = MS2NS(ms);
  744. up->tf = twakeup;
  745. up->tmode = Trelative;
  746. up->ta = up;
  747. up->trend = r;
  748. up->tfn = fn;
  749. timeradd(up);
  750. if(waserror()){
  751. timerdel(up);
  752. nexterror();
  753. }
  754. sleep(r, tfn, arg);
  755. if (up->tt)
  756. timerdel(up);
  757. up->twhen = 0;
  758. poperror();
  759. }
  760. /*
  761. * Expects that only one process can call wakeup for any given Rendez.
  762. * We hold both locks to ensure that r->p and p->r remain consistent.
  763. * Richard Miller has a better solution that doesn't require both to
  764. * be held simultaneously, but I'm a paranoid - presotto.
  765. */
  766. Proc*
  767. wakeup(Rendez *r)
  768. {
  769. Proc *p;
  770. int s;
  771. s = splhi();
  772. lock(r);
  773. p = r->p;
  774. if(p != nil){
  775. lock(&p->rlock);
  776. if(p->state != Wakeme || p->r != r){
  777. iprint("%p %p %d\n", p->r, r, p->state);
  778. panic("wakeup: state");
  779. }
  780. r->p = nil;
  781. p->r = nil;
  782. ready(p);
  783. unlock(&p->rlock);
  784. }
  785. unlock(r);
  786. splx(s);
  787. return p;
  788. }
  789. /*
  790. * if waking a sleeping process, this routine must hold both
  791. * p->rlock and r->lock. However, it can't know them in
  792. * the same order as wakeup causing a possible lock ordering
  793. * deadlock. We break the deadlock by giving up the p->rlock
  794. * lock if we can't get the r->lock and retrying.
  795. */
  796. int
  797. postnote(Proc *p, int dolock, char *n, int flag)
  798. {
  799. int s, ret;
  800. Rendez *r;
  801. Proc *d, **l;
  802. if(dolock)
  803. qlock(&p->debug);
  804. if(flag != NUser && (p->notify == 0 || p->notified))
  805. p->nnote = 0;
  806. ret = 0;
  807. if(p->nnote < NNOTE) {
  808. strcpy(p->note[p->nnote].msg, n);
  809. p->note[p->nnote++].flag = flag;
  810. ret = 1;
  811. }
  812. p->notepending = 1;
  813. if(dolock)
  814. qunlock(&p->debug);
  815. /* this loop is to avoid lock ordering problems. */
  816. for(;;){
  817. s = splhi();
  818. lock(&p->rlock);
  819. r = p->r;
  820. /* waiting for a wakeup? */
  821. if(r == nil)
  822. break; /* no */
  823. /* try for the second lock */
  824. if(canlock(r)){
  825. if(p->state != Wakeme || r->p != p)
  826. panic("postnote: state %d %d %d", r->p != p, p->r != r, p->state);
  827. p->r = nil;
  828. r->p = nil;
  829. ready(p);
  830. unlock(r);
  831. break;
  832. }
  833. /* give other process time to get out of critical section and try again */
  834. unlock(&p->rlock);
  835. splx(s);
  836. sched();
  837. }
  838. unlock(&p->rlock);
  839. splx(s);
  840. if(p->state != Rendezvous)
  841. return ret;
  842. /* Try and pull out of a rendezvous */
  843. lock(p->rgrp);
  844. if(p->state == Rendezvous) {
  845. p->rendval = ~0;
  846. l = &REND(p->rgrp, p->rendtag);
  847. for(d = *l; d; d = d->rendhash) {
  848. if(d == p) {
  849. *l = p->rendhash;
  850. break;
  851. }
  852. l = &d->rendhash;
  853. }
  854. ready(p);
  855. }
  856. unlock(p->rgrp);
  857. return ret;
  858. }
  859. /*
  860. * weird thing: keep at most NBROKEN around
  861. */
  862. #define NBROKEN 4
  863. struct
  864. {
  865. QLock;
  866. int n;
  867. Proc *p[NBROKEN];
  868. }broken;
  869. void
  870. addbroken(Proc *p)
  871. {
  872. qlock(&broken);
  873. if(broken.n == NBROKEN) {
  874. ready(broken.p[0]);
  875. memmove(&broken.p[0], &broken.p[1], sizeof(Proc*)*(NBROKEN-1));
  876. --broken.n;
  877. }
  878. broken.p[broken.n++] = p;
  879. qunlock(&broken);
  880. edfstop(up);
  881. p->state = Broken;
  882. p->psstate = 0;
  883. sched();
  884. }
  885. void
  886. unbreak(Proc *p)
  887. {
  888. int b;
  889. qlock(&broken);
  890. for(b=0; b < broken.n; b++)
  891. if(broken.p[b] == p) {
  892. broken.n--;
  893. memmove(&broken.p[b], &broken.p[b+1],
  894. sizeof(Proc*)*(NBROKEN-(b+1)));
  895. ready(p);
  896. break;
  897. }
  898. qunlock(&broken);
  899. }
  900. int
  901. freebroken(void)
  902. {
  903. int i, n;
  904. qlock(&broken);
  905. n = broken.n;
  906. for(i=0; i<n; i++) {
  907. ready(broken.p[i]);
  908. broken.p[i] = 0;
  909. }
  910. broken.n = 0;
  911. qunlock(&broken);
  912. return n;
  913. }
  914. void
  915. pexit(char *exitstr, int freemem)
  916. {
  917. Proc *p;
  918. Segment **s, **es;
  919. long utime, stime;
  920. Waitq *wq, *f, *next;
  921. Fgrp *fgrp;
  922. Egrp *egrp;
  923. Rgrp *rgrp;
  924. Pgrp *pgrp;
  925. Chan *dot;
  926. void (*pt)(Proc*, int, vlong);
  927. up->alarm = 0;
  928. if (up->tt)
  929. timerdel(up);
  930. pt = proctrace;
  931. if(pt)
  932. pt(up, SDead, 0);
  933. /* nil out all the resources under lock (free later) */
  934. qlock(&up->debug);
  935. fgrp = up->fgrp;
  936. up->fgrp = nil;
  937. egrp = up->egrp;
  938. up->egrp = nil;
  939. rgrp = up->rgrp;
  940. up->rgrp = nil;
  941. pgrp = up->pgrp;
  942. up->pgrp = nil;
  943. dot = up->dot;
  944. up->dot = nil;
  945. qunlock(&up->debug);
  946. if(fgrp)
  947. closefgrp(fgrp);
  948. if(egrp)
  949. closeegrp(egrp);
  950. if(rgrp)
  951. closergrp(rgrp);
  952. if(dot)
  953. cclose(dot);
  954. if(pgrp)
  955. closepgrp(pgrp);
  956. /*
  957. * if not a kernel process and have a parent,
  958. * do some housekeeping.
  959. */
  960. if(up->kp == 0) {
  961. p = up->parent;
  962. if(p == 0) {
  963. if(exitstr == 0)
  964. exitstr = "unknown";
  965. panic("boot process died: %s", exitstr);
  966. }
  967. while(waserror())
  968. ;
  969. wq = smalloc(sizeof(Waitq));
  970. poperror();
  971. wq->w.pid = up->pid;
  972. utime = up->time[TUser] + up->time[TCUser];
  973. stime = up->time[TSys] + up->time[TCSys];
  974. wq->w.time[TUser] = tk2ms(utime);
  975. wq->w.time[TSys] = tk2ms(stime);
  976. wq->w.time[TReal] = tk2ms(MACHP(0)->ticks - up->time[TReal]);
  977. if(exitstr && exitstr[0])
  978. snprint(wq->w.msg, sizeof(wq->w.msg), "%s %lud: %s", up->text, up->pid, exitstr);
  979. else
  980. wq->w.msg[0] = '\0';
  981. lock(&p->exl);
  982. /*
  983. * Check that parent is still alive.
  984. */
  985. if(p->pid == up->parentpid && p->state != Broken) {
  986. p->nchild--;
  987. p->time[TCUser] += utime;
  988. p->time[TCSys] += stime;
  989. /*
  990. * If there would be more than 128 wait records
  991. * processes for my parent, then don't leave a wait
  992. * record behind. This helps prevent badly written
  993. * daemon processes from accumulating lots of wait
  994. * records.
  995. */
  996. if(p->nwait < 128) {
  997. wq->next = p->waitq;
  998. p->waitq = wq;
  999. p->nwait++;
  1000. wq = nil;
  1001. wakeup(&p->waitr);
  1002. }
  1003. }
  1004. unlock(&p->exl);
  1005. if(wq)
  1006. free(wq);
  1007. }
  1008. if(!freemem)
  1009. addbroken(up);
  1010. qlock(&up->seglock);
  1011. es = &up->seg[NSEG];
  1012. for(s = up->seg; s < es; s++) {
  1013. if(*s) {
  1014. putseg(*s);
  1015. *s = 0;
  1016. }
  1017. }
  1018. qunlock(&up->seglock);
  1019. lock(&up->exl); /* Prevent my children from leaving waits */
  1020. pidunhash(up);
  1021. up->pid = 0;
  1022. wakeup(&up->waitr);
  1023. unlock(&up->exl);
  1024. for(f = up->waitq; f; f = next) {
  1025. next = f->next;
  1026. free(f);
  1027. }
  1028. /* release debuggers */
  1029. qlock(&up->debug);
  1030. if(up->pdbg) {
  1031. wakeup(&up->pdbg->sleep);
  1032. up->pdbg = 0;
  1033. }
  1034. qunlock(&up->debug);
  1035. /* Sched must not loop for these locks */
  1036. lock(&procalloc);
  1037. lock(&palloc);
  1038. edfstop(up);
  1039. up->state = Moribund;
  1040. sched();
  1041. panic("pexit");
  1042. }
  1043. int
  1044. haswaitq(void *x)
  1045. {
  1046. Proc *p;
  1047. p = (Proc *)x;
  1048. return p->waitq != 0;
  1049. }
  1050. ulong
  1051. pwait(Waitmsg *w)
  1052. {
  1053. ulong cpid;
  1054. Waitq *wq;
  1055. if(!canqlock(&up->qwaitr))
  1056. error(Einuse);
  1057. if(waserror()) {
  1058. qunlock(&up->qwaitr);
  1059. nexterror();
  1060. }
  1061. lock(&up->exl);
  1062. if(up->nchild == 0 && up->waitq == 0) {
  1063. unlock(&up->exl);
  1064. error(Enochild);
  1065. }
  1066. unlock(&up->exl);
  1067. sleep(&up->waitr, haswaitq, up);
  1068. lock(&up->exl);
  1069. wq = up->waitq;
  1070. up->waitq = wq->next;
  1071. up->nwait--;
  1072. unlock(&up->exl);
  1073. qunlock(&up->qwaitr);
  1074. poperror();
  1075. if(w)
  1076. memmove(w, &wq->w, sizeof(Waitmsg));
  1077. cpid = wq->w.pid;
  1078. free(wq);
  1079. return cpid;
  1080. }
  1081. Proc*
  1082. proctab(int i)
  1083. {
  1084. return &procalloc.arena[i];
  1085. }
  1086. void
  1087. dumpaproc(Proc *p)
  1088. {
  1089. ulong bss;
  1090. char *s;
  1091. if(p == 0)
  1092. return;
  1093. bss = 0;
  1094. if(p->seg[BSEG])
  1095. bss = p->seg[BSEG]->top;
  1096. s = p->psstate;
  1097. if(s == 0)
  1098. s = statename[p->state];
  1099. print("%3lud:%10s pc %8lux dbgpc %8lux %8s (%s) ut %ld st %ld bss %lux qpc %lux nl %lud nd %lud lpc %lux pri %lud\n",
  1100. p->pid, p->text, p->pc, dbgpc(p), s, statename[p->state],
  1101. p->time[0], p->time[1], bss, p->qpc, p->nlocks.ref, p->delaysched, p->lastlock ? p->lastlock->pc : 0, p->priority);
  1102. }
  1103. void
  1104. procdump(void)
  1105. {
  1106. int i;
  1107. Proc *p;
  1108. if(up)
  1109. print("up %lud\n", up->pid);
  1110. else
  1111. print("no current process\n");
  1112. for(i=0; i<conf.nproc; i++) {
  1113. p = &procalloc.arena[i];
  1114. if(p->state == Dead)
  1115. continue;
  1116. dumpaproc(p);
  1117. }
  1118. }
  1119. /*
  1120. * wait till all processes have flushed their mmu
  1121. * state about segement s
  1122. */
  1123. void
  1124. procflushseg(Segment *s)
  1125. {
  1126. int i, ns, nm, nwait;
  1127. Proc *p;
  1128. /*
  1129. * tell all processes with this
  1130. * segment to flush their mmu's
  1131. */
  1132. nwait = 0;
  1133. for(i=0; i<conf.nproc; i++) {
  1134. p = &procalloc.arena[i];
  1135. if(p->state == Dead)
  1136. continue;
  1137. for(ns = 0; ns < NSEG; ns++)
  1138. if(p->seg[ns] == s){
  1139. p->newtlb = 1;
  1140. for(nm = 0; nm < conf.nmach; nm++){
  1141. if(MACHP(nm)->proc == p){
  1142. MACHP(nm)->flushmmu = 1;
  1143. nwait++;
  1144. }
  1145. }
  1146. break;
  1147. }
  1148. }
  1149. if(nwait == 0)
  1150. return;
  1151. /*
  1152. * wait for all processors to take a clock interrupt
  1153. * and flush their mmu's
  1154. */
  1155. for(nm = 0; nm < conf.nmach; nm++)
  1156. if(MACHP(nm) != m)
  1157. while(MACHP(nm)->flushmmu)
  1158. sched();
  1159. }
  1160. void
  1161. scheddump(void)
  1162. {
  1163. Proc *p;
  1164. Schedq *rq;
  1165. for(rq = &runq[Nrq-1]; rq >= runq; rq--){
  1166. if(rq->head == 0)
  1167. continue;
  1168. print("rq%ld:", rq-runq);
  1169. for(p = rq->head; p; p = p->rnext)
  1170. print(" %lud(%lud)", p->pid, m->ticks - p->readytime);
  1171. print("\n");
  1172. delay(150);
  1173. }
  1174. print("nrdy %d\n", nrdy);
  1175. }
  1176. void
  1177. kproc(char *name, void (*func)(void *), void *arg)
  1178. {
  1179. Proc *p;
  1180. static Pgrp *kpgrp;
  1181. p = newproc();
  1182. p->psstate = 0;
  1183. p->procmode = 0640;
  1184. p->kp = 1;
  1185. p->noswap = 1;
  1186. p->fpsave = up->fpsave;
  1187. p->scallnr = up->scallnr;
  1188. p->s = up->s;
  1189. p->nerrlab = 0;
  1190. p->slash = up->slash;
  1191. p->dot = up->dot;
  1192. if(p->dot)
  1193. incref(p->dot);
  1194. memmove(p->note, up->note, sizeof(p->note));
  1195. p->nnote = up->nnote;
  1196. p->notified = 0;
  1197. p->lastnote = up->lastnote;
  1198. p->notify = up->notify;
  1199. p->ureg = 0;
  1200. p->dbgreg = 0;
  1201. procpriority(p, PriKproc, 0);
  1202. kprocchild(p, func, arg);
  1203. kstrdup(&p->user, eve);
  1204. kstrdup(&p->text, name);
  1205. if(kpgrp == 0)
  1206. kpgrp = newpgrp();
  1207. p->pgrp = kpgrp;
  1208. incref(kpgrp);
  1209. memset(p->time, 0, sizeof(p->time));
  1210. p->time[TReal] = MACHP(0)->ticks;
  1211. ready(p);
  1212. /*
  1213. * since the bss/data segments are now shareable,
  1214. * any mmu info about this process is now stale
  1215. * and has to be discarded.
  1216. */
  1217. p->newtlb = 1;
  1218. flushmmu();
  1219. }
  1220. /*
  1221. * called splhi() by notify(). See comment in notify for the
  1222. * reasoning.
  1223. */
  1224. void
  1225. procctl(Proc *p)
  1226. {
  1227. char *state;
  1228. ulong s;
  1229. switch(p->procctl) {
  1230. case Proc_exitbig:
  1231. spllo();
  1232. pexit("Killed: Insufficient physical memory", 1);
  1233. case Proc_exitme:
  1234. spllo(); /* pexit has locks in it */
  1235. pexit("Killed", 1);
  1236. case Proc_traceme:
  1237. if(p->nnote == 0)
  1238. return;
  1239. /* No break */
  1240. case Proc_stopme:
  1241. p->procctl = 0;
  1242. state = p->psstate;
  1243. p->psstate = "Stopped";
  1244. /* free a waiting debugger */
  1245. s = spllo();
  1246. qlock(&p->debug);
  1247. if(p->pdbg) {
  1248. wakeup(&p->pdbg->sleep);
  1249. p->pdbg = 0;
  1250. }
  1251. qunlock(&p->debug);
  1252. splhi();
  1253. p->state = Stopped;
  1254. sched();
  1255. p->psstate = state;
  1256. splx(s);
  1257. return;
  1258. }
  1259. }
  1260. #include "errstr.h"
  1261. void
  1262. error(char *err)
  1263. {
  1264. spllo();
  1265. assert(up->nerrlab < NERR);
  1266. kstrcpy(up->errstr, err, ERRMAX);
  1267. setlabel(&up->errlab[NERR-1]);
  1268. nexterror();
  1269. }
  1270. void
  1271. nexterror(void)
  1272. {
  1273. gotolabel(&up->errlab[--up->nerrlab]);
  1274. }
  1275. void
  1276. exhausted(char *resource)
  1277. {
  1278. char buf[ERRMAX];
  1279. sprint(buf, "no free %s", resource);
  1280. iprint("%s\n", buf);
  1281. error(buf);
  1282. }
  1283. void
  1284. killbig(char *why)
  1285. {
  1286. int i;
  1287. Segment *s;
  1288. ulong l, max;
  1289. Proc *p, *ep, *kp;
  1290. max = 0;
  1291. kp = 0;
  1292. ep = procalloc.arena+conf.nproc;
  1293. for(p = procalloc.arena; p < ep; p++) {
  1294. if(p->state == Dead || p->kp)
  1295. continue;
  1296. l = 0;
  1297. for(i=1; i<NSEG; i++) {
  1298. s = p->seg[i];
  1299. if(s != 0)
  1300. l += s->top - s->base;
  1301. }
  1302. if(l > max && ((p->procmode&0222) || strcmp(eve, p->user)!=0)) {
  1303. kp = p;
  1304. max = l;
  1305. }
  1306. }
  1307. print("%lud: %s killed: %s\n", kp->pid, kp->text, why);
  1308. for(p = procalloc.arena; p < ep; p++) {
  1309. if(p->state == Dead || p->kp)
  1310. continue;
  1311. if(p != kp && p->seg[BSEG] && p->seg[BSEG] == kp->seg[BSEG])
  1312. p->procctl = Proc_exitbig;
  1313. }
  1314. kp->procctl = Proc_exitbig;
  1315. for(i = 0; i < NSEG; i++) {
  1316. s = kp->seg[i];
  1317. if(s != 0 && canqlock(&s->lk)) {
  1318. mfreeseg(s, s->base, (s->top - s->base)/BY2PG);
  1319. qunlock(&s->lk);
  1320. }
  1321. }
  1322. }
  1323. /*
  1324. * change ownership to 'new' of all processes owned by 'old'. Used when
  1325. * eve changes.
  1326. */
  1327. void
  1328. renameuser(char *old, char *new)
  1329. {
  1330. Proc *p, *ep;
  1331. ep = procalloc.arena+conf.nproc;
  1332. for(p = procalloc.arena; p < ep; p++)
  1333. if(p->user!=nil && strcmp(old, p->user)==0)
  1334. kstrdup(&p->user, new);
  1335. }
  1336. /*
  1337. * time accounting called by clock() splhi'd
  1338. */
  1339. void
  1340. accounttime(void)
  1341. {
  1342. Proc *p;
  1343. ulong n, per;
  1344. static ulong nrun;
  1345. p = m->proc;
  1346. if(p) {
  1347. nrun++;
  1348. p->time[p->insyscall]++;
  1349. }
  1350. /* calculate decaying duty cycles */
  1351. n = perfticks();
  1352. per = n - m->perf.last;
  1353. m->perf.last = n;
  1354. per = (m->perf.period*(HZ-1) + per)/HZ;
  1355. if(per != 0)
  1356. m->perf.period = per;
  1357. m->perf.avg_inidle = (m->perf.avg_inidle*(HZ-1)+m->perf.inidle)/HZ;
  1358. m->perf.inidle = 0;
  1359. m->perf.avg_inintr = (m->perf.avg_inintr*(HZ-1)+m->perf.inintr)/HZ;
  1360. m->perf.inintr = 0;
  1361. /* only one processor gets to compute system load averages */
  1362. if(m->machno != 0)
  1363. return;
  1364. /*
  1365. * calculate decaying load average.
  1366. * if we decay by (n-1)/n then it takes
  1367. * n clock ticks to go from load L to .36 L once
  1368. * things quiet down. it takes about 5 n clock
  1369. * ticks to go to zero. so using HZ means this is
  1370. * approximately the load over the last second,
  1371. * with a tail lasting about 5 seconds.
  1372. */
  1373. n = nrun;
  1374. nrun = 0;
  1375. n = (nrdy+n)*1000;
  1376. m->load = (m->load*(HZ-1)+n)/HZ;
  1377. }
  1378. static void
  1379. pidhash(Proc *p)
  1380. {
  1381. int h;
  1382. h = p->pid % nelem(procalloc.ht);
  1383. lock(&procalloc);
  1384. p->pidhash = procalloc.ht[h];
  1385. procalloc.ht[h] = p;
  1386. unlock(&procalloc);
  1387. }
  1388. static void
  1389. pidunhash(Proc *p)
  1390. {
  1391. int h;
  1392. Proc **l;
  1393. h = p->pid % nelem(procalloc.ht);
  1394. lock(&procalloc);
  1395. for(l = &procalloc.ht[h]; *l != nil; l = &(*l)->pidhash)
  1396. if(*l == p){
  1397. *l = p->pidhash;
  1398. break;
  1399. }
  1400. unlock(&procalloc);
  1401. }
  1402. int
  1403. procindex(ulong pid)
  1404. {
  1405. Proc *p;
  1406. int h;
  1407. int s;
  1408. s = -1;
  1409. h = pid % nelem(procalloc.ht);
  1410. lock(&procalloc);
  1411. for(p = procalloc.ht[h]; p != nil; p = p->pidhash)
  1412. if(p->pid == pid){
  1413. s = p - procalloc.arena;
  1414. break;
  1415. }
  1416. unlock(&procalloc);
  1417. return s;
  1418. }