proc.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610
  1. #include <u.h>
  2. #include "../port/lib.h"
  3. #include "mem.h"
  4. #include "dat.h"
  5. #include "fns.h"
  6. #include "../port/error.h"
  7. #include "../port/edf.h"
  8. #include <trace.h>
  9. int schedgain = 30; /* units in seconds */
  10. int nrdy;
  11. Ref noteidalloc;
  12. void updatecpu(Proc*);
  13. int reprioritize(Proc*);
  14. ulong delayedscheds; /* statistics */
  15. long skipscheds;
  16. long preempts;
  17. ulong load;
  18. static Ref pidalloc;
  19. static struct Procalloc
  20. {
  21. Lock;
  22. Proc* ht[128];
  23. Proc* arena;
  24. Proc* free;
  25. } procalloc;
  26. enum
  27. {
  28. Q=10,
  29. DQ=4,
  30. Scaling=2,
  31. };
  32. Schedq runq[Nrq];
  33. ulong runvec;
  34. char *statename[] =
  35. { /* BUG: generate automatically */
  36. "Dead",
  37. "Moribund",
  38. "Ready",
  39. "Scheding",
  40. "Running",
  41. "Queueing",
  42. "QueueingR",
  43. "QueueingW",
  44. "Wakeme",
  45. "Broken",
  46. "Stopped",
  47. "Rendez",
  48. "Waitrelease",
  49. };
  50. static void pidhash(Proc*);
  51. static void pidunhash(Proc*);
  52. static void rebalance(void);
  53. /*
  54. * Always splhi()'ed.
  55. */
  56. void
  57. schedinit(void) /* never returns */
  58. {
  59. Edf *e;
  60. setlabel(&m->sched);
  61. if(up) {
  62. if((e = up->edf) && (e->flags & Admitted))
  63. edfrecord(up);
  64. m->proc = 0;
  65. switch(up->state) {
  66. case Running:
  67. ready(up);
  68. break;
  69. case Moribund:
  70. up->state = Dead;
  71. edfstop(up);
  72. if (up->edf)
  73. free(up->edf);
  74. up->edf = nil;
  75. /*
  76. * Holding locks from pexit:
  77. * procalloc
  78. * palloc
  79. */
  80. mmurelease(up);
  81. up->qnext = procalloc.free;
  82. procalloc.free = up;
  83. unlock(&palloc);
  84. unlock(&procalloc);
  85. break;
  86. }
  87. up->mach = nil;
  88. updatecpu(up);
  89. up = nil;
  90. }
  91. sched();
  92. }
  93. /*
  94. * If changing this routine, look also at sleep(). It
  95. * contains a copy of the guts of sched().
  96. */
  97. void
  98. sched(void)
  99. {
  100. Proc *p;
  101. if(m->ilockdepth)
  102. panic("cpu%d: ilockdepth %d, last lock %#p at %#p, sched called from %#p",
  103. m->machno,
  104. m->ilockdepth,
  105. up? up->lastilock: nil,
  106. (up && up->lastilock)? up->lastilock->pc: 0,
  107. getcallerpc(&p+2));
  108. if(up){
  109. /*
  110. * Delay the sched until the process gives up the locks
  111. * it is holding. This avoids dumb lock loops.
  112. * Don't delay if the process is Moribund.
  113. * It called sched to die.
  114. * But do sched eventually. This avoids a missing unlock
  115. * from hanging the entire kernel.
  116. * But don't reschedule procs holding palloc or procalloc.
  117. * Those are far too important to be holding while asleep.
  118. *
  119. * This test is not exact. There can still be a few instructions
  120. * in the middle of taslock when a process holds a lock
  121. * but Lock.p has not yet been initialized.
  122. */
  123. if(up->nlocks.ref)
  124. if(up->state != Moribund)
  125. if(up->delaysched < 20
  126. || palloc.Lock.p == up
  127. || procalloc.Lock.p == up){
  128. up->delaysched++;
  129. delayedscheds++;
  130. return;
  131. }
  132. up->delaysched = 0;
  133. splhi();
  134. /* statistics */
  135. m->cs++;
  136. procsave(up);
  137. if(setlabel(&up->sched)){
  138. procrestore(up);
  139. spllo();
  140. return;
  141. }
  142. gotolabel(&m->sched);
  143. }
  144. p = runproc();
  145. if(!p->edf){
  146. updatecpu(p);
  147. p->priority = reprioritize(p);
  148. }
  149. if(p != m->readied)
  150. m->schedticks = m->ticks + HZ/10;
  151. m->readied = 0;
  152. up = p;
  153. up->state = Running;
  154. up->mach = MACHP(m->machno);
  155. m->proc = up;
  156. mmuswitch(up);
  157. gotolabel(&up->sched);
  158. }
  159. int
  160. anyready(void)
  161. {
  162. return runvec;
  163. }
  164. int
  165. anyhigher(void)
  166. {
  167. return runvec & ~((1<<(up->priority+1))-1);
  168. }
  169. /*
  170. * here once per clock tick to see if we should resched
  171. */
  172. void
  173. hzsched(void)
  174. {
  175. /* once a second, rebalance will reprioritize ready procs */
  176. if(m->machno == 0)
  177. rebalance();
  178. /* unless preempted, get to run for at least 100ms */
  179. if(anyhigher()
  180. || (!up->fixedpri && m->ticks > m->schedticks && anyready())){
  181. m->readied = nil; /* avoid cooperative scheduling */
  182. up->delaysched++;
  183. }
  184. }
  185. /*
  186. * here at the end of non-clock interrupts to see if we should preempt the
  187. * current process. Returns 1 if preempted, 0 otherwise.
  188. */
  189. int
  190. preempted(void)
  191. {
  192. if(up && up->state == Running)
  193. if(up->preempted == 0)
  194. if(anyhigher())
  195. if(!active.exiting){
  196. m->readied = nil; /* avoid cooperative scheduling */
  197. up->preempted = 1;
  198. sched();
  199. splhi();
  200. up->preempted = 0;
  201. return 1;
  202. }
  203. return 0;
  204. }
  205. /*
  206. * Update the cpu time average for this particular process,
  207. * which is about to change from up -> not up or vice versa.
  208. * p->lastupdate is the last time an updatecpu happened.
  209. *
  210. * The cpu time average is a decaying average that lasts
  211. * about D clock ticks. D is chosen to be approximately
  212. * the cpu time of a cpu-intensive "quick job". A job has to run
  213. * for approximately D clock ticks before we home in on its
  214. * actual cpu usage. Thus if you manage to get in and get out
  215. * quickly, you won't be penalized during your burst. Once you
  216. * start using your share of the cpu for more than about D
  217. * clock ticks though, your p->cpu hits 1000 (1.0) and you end up
  218. * below all the other quick jobs. Interactive tasks, because
  219. * they basically always use less than their fair share of cpu,
  220. * will be rewarded.
  221. *
  222. * If the process has not been running, then we want to
  223. * apply the filter
  224. *
  225. * cpu = cpu * (D-1)/D
  226. *
  227. * n times, yielding
  228. *
  229. * cpu = cpu * ((D-1)/D)^n
  230. *
  231. * but D is big enough that this is approximately
  232. *
  233. * cpu = cpu * (D-n)/D
  234. *
  235. * so we use that instead.
  236. *
  237. * If the process has been running, we apply the filter to
  238. * 1 - cpu, yielding a similar equation. Note that cpu is
  239. * stored in fixed point (* 1000).
  240. *
  241. * Updatecpu must be called before changing up, in order
  242. * to maintain accurate cpu usage statistics. It can be called
  243. * at any time to bring the stats for a given proc up-to-date.
  244. */
  245. void
  246. updatecpu(Proc *p)
  247. {
  248. int n, t, ocpu;
  249. int D = schedgain*HZ*Scaling;
  250. if(p->edf)
  251. return;
  252. t = MACHP(0)->ticks*Scaling + Scaling/2;
  253. n = t - p->lastupdate;
  254. p->lastupdate = t;
  255. if(n == 0)
  256. return;
  257. if(n > D)
  258. n = D;
  259. ocpu = p->cpu;
  260. if(p != up)
  261. p->cpu = (ocpu*(D-n))/D;
  262. else{
  263. t = 1000 - ocpu;
  264. t = (t*(D-n))/D;
  265. p->cpu = 1000 - t;
  266. }
  267. //iprint("pid %d %s for %d cpu %d -> %d\n", p->pid,p==up?"active":"inactive",n, ocpu,p->cpu);
  268. }
  269. /*
  270. * On average, p has used p->cpu of a cpu recently.
  271. * Its fair share is conf.nmach/m->load of a cpu. If it has been getting
  272. * too much, penalize it. If it has been getting not enough, reward it.
  273. * I don't think you can get much more than your fair share that
  274. * often, so most of the queues are for using less. Having a priority
  275. * of 3 means you're just right. Having a higher priority (up to p->basepri)
  276. * means you're not using as much as you could.
  277. */
  278. int
  279. reprioritize(Proc *p)
  280. {
  281. int fairshare, n, load, ratio;
  282. load = MACHP(0)->load;
  283. if(load == 0)
  284. return p->basepri;
  285. /*
  286. * fairshare = 1.000 * conf.nproc * 1.000/load,
  287. * except the decimal point is moved three places
  288. * on both load and fairshare.
  289. */
  290. fairshare = (conf.nmach*1000*1000)/load;
  291. n = p->cpu;
  292. if(n == 0)
  293. n = 1;
  294. ratio = (fairshare+n/2) / n;
  295. if(ratio > p->basepri)
  296. ratio = p->basepri;
  297. if(ratio < 0)
  298. panic("reprioritize");
  299. //iprint("pid %d cpu %d load %d fair %d pri %d\n", p->pid, p->cpu, load, fairshare, ratio);
  300. return ratio;
  301. }
  302. /*
  303. * add a process to a scheduling queue
  304. */
  305. void
  306. queueproc(Schedq *rq, Proc *p)
  307. {
  308. int pri;
  309. pri = rq - runq;
  310. lock(runq);
  311. p->priority = pri;
  312. p->rnext = 0;
  313. if(rq->tail)
  314. rq->tail->rnext = p;
  315. else
  316. rq->head = p;
  317. rq->tail = p;
  318. rq->n++;
  319. nrdy++;
  320. runvec |= 1<<pri;
  321. unlock(runq);
  322. }
  323. /*
  324. * try to remove a process from a scheduling queue (called splhi)
  325. */
  326. Proc*
  327. dequeueproc(Schedq *rq, Proc *tp)
  328. {
  329. Proc *l, *p;
  330. if(!canlock(runq))
  331. return nil;
  332. /*
  333. * the queue may have changed before we locked runq,
  334. * refind the target process.
  335. */
  336. l = 0;
  337. for(p = rq->head; p; p = p->rnext){
  338. if(p == tp)
  339. break;
  340. l = p;
  341. }
  342. /*
  343. * p->mach==0 only when process state is saved
  344. */
  345. if(p == 0 || p->mach){
  346. unlock(runq);
  347. return nil;
  348. }
  349. if(p->rnext == 0)
  350. rq->tail = l;
  351. if(l)
  352. l->rnext = p->rnext;
  353. else
  354. rq->head = p->rnext;
  355. if(rq->head == nil)
  356. runvec &= ~(1<<(rq-runq));
  357. rq->n--;
  358. nrdy--;
  359. if(p->state != Ready)
  360. print("dequeueproc %s %lud %s\n", p->text, p->pid, statename[p->state]);
  361. unlock(runq);
  362. return p;
  363. }
  364. /*
  365. * ready(p) picks a new priority for a process and sticks it in the
  366. * runq for that priority.
  367. */
  368. void
  369. ready(Proc *p)
  370. {
  371. int s, pri;
  372. Schedq *rq;
  373. void (*pt)(Proc*, int, vlong);
  374. s = splhi();
  375. if(edfready(p)){
  376. splx(s);
  377. return;
  378. }
  379. if(up != p && (p->wired == nil || p->wired == m))
  380. m->readied = p; /* group scheduling */
  381. updatecpu(p);
  382. pri = reprioritize(p);
  383. p->priority = pri;
  384. rq = &runq[pri];
  385. p->state = Ready;
  386. queueproc(rq, p);
  387. pt = proctrace;
  388. if(pt)
  389. pt(p, SReady, 0);
  390. splx(s);
  391. }
  392. /*
  393. * yield the processor and drop our priority
  394. */
  395. void
  396. yield(void)
  397. {
  398. if(anyready()){
  399. /* pretend we just used 1/2 tick */
  400. up->lastupdate -= Scaling/2;
  401. sched();
  402. }
  403. }
  404. /*
  405. * recalculate priorities once a second. We need to do this
  406. * since priorities will otherwise only be recalculated when
  407. * the running process blocks.
  408. */
  409. ulong balancetime;
  410. static void
  411. rebalance(void)
  412. {
  413. int pri, npri, t, x;
  414. Schedq *rq;
  415. Proc *p;
  416. t = m->ticks;
  417. if(t - balancetime < HZ)
  418. return;
  419. balancetime = t;
  420. for(pri=0, rq=runq; pri<Npriq; pri++, rq++){
  421. another:
  422. p = rq->head;
  423. if(p == nil)
  424. continue;
  425. if(p->mp != MACHP(m->machno))
  426. continue;
  427. if(pri == p->basepri)
  428. continue;
  429. updatecpu(p);
  430. npri = reprioritize(p);
  431. if(npri != pri){
  432. x = splhi();
  433. p = dequeueproc(rq, p);
  434. if(p)
  435. queueproc(&runq[npri], p);
  436. splx(x);
  437. goto another;
  438. }
  439. }
  440. }
  441. /*
  442. * pick a process to run
  443. */
  444. Proc*
  445. runproc(void)
  446. {
  447. Schedq *rq;
  448. Proc *p;
  449. ulong start, now;
  450. int i;
  451. void (*pt)(Proc*, int, vlong);
  452. start = perfticks();
  453. /* cooperative scheduling until the clock ticks */
  454. if((p=m->readied) && p->mach==0 && p->state==Ready
  455. && (p->wired == nil || p->wired == m)
  456. && runq[Nrq-1].head == nil && runq[Nrq-2].head == nil){
  457. skipscheds++;
  458. rq = &runq[p->priority];
  459. goto found;
  460. }
  461. preempts++;
  462. loop:
  463. /*
  464. * find a process that last ran on this processor (affinity),
  465. * or one that hasn't moved in a while (load balancing). Every
  466. * time around the loop affinity goes down.
  467. */
  468. spllo();
  469. for(i = 0;; i++){
  470. /*
  471. * find the highest priority target process that this
  472. * processor can run given affinity constraints.
  473. *
  474. */
  475. for(rq = &runq[Nrq-1]; rq >= runq; rq--){
  476. for(p = rq->head; p; p = p->rnext){
  477. if(p->mp == nil || p->mp == MACHP(m->machno)
  478. || (!p->wired && i > 0))
  479. goto found;
  480. }
  481. }
  482. /* waste time or halt the CPU */
  483. idlehands();
  484. /* remember how much time we're here */
  485. now = perfticks();
  486. m->perf.inidle += now-start;
  487. start = now;
  488. }
  489. found:
  490. splhi();
  491. p = dequeueproc(rq, p);
  492. if(p == nil)
  493. goto loop;
  494. p->state = Scheding;
  495. p->mp = MACHP(m->machno);
  496. if(edflock(p)){
  497. edfrun(p, rq == &runq[PriEdf]); /* start deadline timer and do admin */
  498. edfunlock();
  499. }
  500. pt = proctrace;
  501. if(pt)
  502. pt(p, SRun, 0);
  503. return p;
  504. }
  505. int
  506. canpage(Proc *p)
  507. {
  508. int ok = 0;
  509. splhi();
  510. lock(runq);
  511. /* Only reliable way to see if we are Running */
  512. if(p->mach == 0) {
  513. p->newtlb = 1;
  514. ok = 1;
  515. }
  516. unlock(runq);
  517. spllo();
  518. return ok;
  519. }
  520. Proc*
  521. newproc(void)
  522. {
  523. char msg[64];
  524. Proc *p;
  525. lock(&procalloc);
  526. for(;;) {
  527. if(p = procalloc.free)
  528. break;
  529. snprint(msg, sizeof msg, "no procs; %s forking",
  530. up? up->text: "kernel");
  531. unlock(&procalloc);
  532. resrcwait(msg);
  533. lock(&procalloc);
  534. }
  535. procalloc.free = p->qnext;
  536. unlock(&procalloc);
  537. p->state = Scheding;
  538. p->psstate = "New";
  539. p->mach = 0;
  540. p->qnext = 0;
  541. p->nchild = 0;
  542. p->nwait = 0;
  543. p->waitq = 0;
  544. p->parent = 0;
  545. p->pgrp = 0;
  546. p->egrp = 0;
  547. p->fgrp = 0;
  548. p->rgrp = 0;
  549. p->pdbg = 0;
  550. p->fpstate = FPinit;
  551. p->kp = 0;
  552. if(up && up->procctl == Proc_tracesyscall)
  553. p->procctl = Proc_tracesyscall;
  554. else
  555. p->procctl = 0;
  556. p->syscalltrace = 0;
  557. p->notepending = 0;
  558. p->ureg = 0;
  559. p->privatemem = 0;
  560. p->noswap = 0;
  561. p->errstr = p->errbuf0;
  562. p->syserrstr = p->errbuf1;
  563. p->errbuf0[0] = '\0';
  564. p->errbuf1[0] = '\0';
  565. p->nlocks.ref = 0;
  566. p->delaysched = 0;
  567. p->trace = 0;
  568. kstrdup(&p->user, "*nouser");
  569. kstrdup(&p->text, "*notext");
  570. kstrdup(&p->args, "");
  571. p->nargs = 0;
  572. p->setargs = 0;
  573. memset(p->seg, 0, sizeof p->seg);
  574. p->pid = incref(&pidalloc);
  575. pidhash(p);
  576. p->noteid = incref(&noteidalloc);
  577. if(p->pid==0 || p->noteid==0)
  578. panic("pidalloc");
  579. if(p->kstack == 0)
  580. p->kstack = smalloc(KSTACK);
  581. /* sched params */
  582. p->mp = 0;
  583. p->wired = 0;
  584. procpriority(p, PriNormal, 0);
  585. p->cpu = 0;
  586. p->lastupdate = MACHP(0)->ticks*Scaling;
  587. p->edf = nil;
  588. return p;
  589. }
  590. /*
  591. * wire this proc to a machine
  592. */
  593. void
  594. procwired(Proc *p, int bm)
  595. {
  596. Proc *pp;
  597. int i;
  598. char nwired[MAXMACH];
  599. Mach *wm;
  600. if(bm < 0){
  601. /* pick a machine to wire to */
  602. memset(nwired, 0, sizeof(nwired));
  603. p->wired = 0;
  604. pp = proctab(0);
  605. for(i=0; i<conf.nproc; i++, pp++){
  606. wm = pp->wired;
  607. if(wm && pp->pid)
  608. nwired[wm->machno]++;
  609. }
  610. bm = 0;
  611. for(i=0; i<conf.nmach; i++)
  612. if(nwired[i] < nwired[bm])
  613. bm = i;
  614. } else {
  615. /* use the virtual machine requested */
  616. bm = bm % conf.nmach;
  617. }
  618. p->wired = MACHP(bm);
  619. p->mp = p->wired;
  620. }
  621. void
  622. procpriority(Proc *p, int pri, int fixed)
  623. {
  624. if(pri >= Npriq)
  625. pri = Npriq - 1;
  626. else if(pri < 0)
  627. pri = 0;
  628. p->basepri = pri;
  629. p->priority = pri;
  630. if(fixed){
  631. p->fixedpri = 1;
  632. } else {
  633. p->fixedpri = 0;
  634. }
  635. }
  636. void
  637. procinit0(void) /* bad planning - clashes with devproc.c */
  638. {
  639. Proc *p;
  640. int i;
  641. procalloc.free = xalloc(conf.nproc*sizeof(Proc));
  642. if(procalloc.free == nil){
  643. xsummary();
  644. panic("cannot allocate %lud procs (%ludMB)\n", conf.nproc, conf.nproc*sizeof(Proc)/(1024*1024));
  645. }
  646. procalloc.arena = procalloc.free;
  647. p = procalloc.free;
  648. for(i=0; i<conf.nproc-1; i++,p++)
  649. p->qnext = p+1;
  650. p->qnext = 0;
  651. }
  652. /*
  653. * sleep if a condition is not true. Another process will
  654. * awaken us after it sets the condition. When we awaken
  655. * the condition may no longer be true.
  656. *
  657. * we lock both the process and the rendezvous to keep r->p
  658. * and p->r synchronized.
  659. */
  660. void
  661. sleep(Rendez *r, int (*f)(void*), void *arg)
  662. {
  663. int s;
  664. void (*pt)(Proc*, int, vlong);
  665. s = splhi();
  666. if(up->nlocks.ref)
  667. print("process %lud sleeps with %lud locks held, last lock %#p locked at pc %#lux, sleep called from %#p\n",
  668. up->pid, up->nlocks.ref, up->lastlock, up->lastlock->pc, getcallerpc(&r));
  669. lock(r);
  670. lock(&up->rlock);
  671. if(r->p){
  672. print("double sleep called from %#p, %lud %lud\n", getcallerpc(&r), r->p->pid, up->pid);
  673. dumpstack();
  674. }
  675. /*
  676. * Wakeup only knows there may be something to do by testing
  677. * r->p in order to get something to lock on.
  678. * Flush that information out to memory in case the sleep is
  679. * committed.
  680. */
  681. r->p = up;
  682. if((*f)(arg) || up->notepending){
  683. /*
  684. * if condition happened or a note is pending
  685. * never mind
  686. */
  687. r->p = nil;
  688. unlock(&up->rlock);
  689. unlock(r);
  690. } else {
  691. /*
  692. * now we are committed to
  693. * change state and call scheduler
  694. */
  695. pt = proctrace;
  696. if(pt)
  697. pt(up, SSleep, 0);
  698. up->state = Wakeme;
  699. up->r = r;
  700. /* statistics */
  701. m->cs++;
  702. procsave(up);
  703. if(setlabel(&up->sched)) {
  704. /*
  705. * here when the process is awakened
  706. */
  707. procrestore(up);
  708. spllo();
  709. } else {
  710. /*
  711. * here to go to sleep (i.e. stop Running)
  712. */
  713. unlock(&up->rlock);
  714. unlock(r);
  715. gotolabel(&m->sched);
  716. }
  717. }
  718. if(up->notepending) {
  719. up->notepending = 0;
  720. splx(s);
  721. if(up->procctl == Proc_exitme && up->closingfgrp)
  722. forceclosefgrp();
  723. error(Eintr);
  724. }
  725. splx(s);
  726. }
  727. static int
  728. tfn(void *arg)
  729. {
  730. return up->trend == nil || up->tfn(arg);
  731. }
  732. void
  733. twakeup(Ureg*, Timer *t)
  734. {
  735. Proc *p;
  736. Rendez *trend;
  737. p = t->ta;
  738. trend = p->trend;
  739. p->trend = 0;
  740. if(trend)
  741. wakeup(trend);
  742. }
  743. void
  744. tsleep(Rendez *r, int (*fn)(void*), void *arg, ulong ms)
  745. {
  746. if (up->tt){
  747. print("tsleep: timer active: mode %d, tf %#p\n", up->tmode, up->tf);
  748. timerdel(up);
  749. }
  750. up->tns = MS2NS(ms);
  751. up->tf = twakeup;
  752. up->tmode = Trelative;
  753. up->ta = up;
  754. up->trend = r;
  755. up->tfn = fn;
  756. timeradd(up);
  757. if(waserror()){
  758. timerdel(up);
  759. nexterror();
  760. }
  761. sleep(r, tfn, arg);
  762. if(up->tt)
  763. timerdel(up);
  764. up->twhen = 0;
  765. poperror();
  766. }
  767. /*
  768. * Expects that only one process can call wakeup for any given Rendez.
  769. * We hold both locks to ensure that r->p and p->r remain consistent.
  770. * Richard Miller has a better solution that doesn't require both to
  771. * be held simultaneously, but I'm a paranoid - presotto.
  772. */
  773. Proc*
  774. wakeup(Rendez *r)
  775. {
  776. Proc *p;
  777. int s;
  778. s = splhi();
  779. lock(r);
  780. p = r->p;
  781. if(p != nil){
  782. lock(&p->rlock);
  783. if(p->state != Wakeme || p->r != r){
  784. iprint("%p %p %d\n", p->r, r, p->state);
  785. panic("wakeup: state");
  786. }
  787. r->p = nil;
  788. p->r = nil;
  789. ready(p);
  790. unlock(&p->rlock);
  791. }
  792. unlock(r);
  793. splx(s);
  794. return p;
  795. }
  796. /*
  797. * if waking a sleeping process, this routine must hold both
  798. * p->rlock and r->lock. However, it can't know them in
  799. * the same order as wakeup causing a possible lock ordering
  800. * deadlock. We break the deadlock by giving up the p->rlock
  801. * lock if we can't get the r->lock and retrying.
  802. */
  803. int
  804. postnote(Proc *p, int dolock, char *n, int flag)
  805. {
  806. int s, ret;
  807. Rendez *r;
  808. Proc *d, **l;
  809. if(dolock)
  810. qlock(&p->debug);
  811. if(flag != NUser && (p->notify == 0 || p->notified))
  812. p->nnote = 0;
  813. ret = 0;
  814. if(p->nnote < NNOTE) {
  815. strcpy(p->note[p->nnote].msg, n);
  816. p->note[p->nnote++].flag = flag;
  817. ret = 1;
  818. }
  819. p->notepending = 1;
  820. if(dolock)
  821. qunlock(&p->debug);
  822. /* this loop is to avoid lock ordering problems. */
  823. for(;;){
  824. s = splhi();
  825. lock(&p->rlock);
  826. r = p->r;
  827. /* waiting for a wakeup? */
  828. if(r == nil)
  829. break; /* no */
  830. /* try for the second lock */
  831. if(canlock(r)){
  832. if(p->state != Wakeme || r->p != p)
  833. panic("postnote: state %d %d %d", r->p != p, p->r != r, p->state);
  834. p->r = nil;
  835. r->p = nil;
  836. ready(p);
  837. unlock(r);
  838. break;
  839. }
  840. /* give other process time to get out of critical section and try again */
  841. unlock(&p->rlock);
  842. splx(s);
  843. sched();
  844. }
  845. unlock(&p->rlock);
  846. splx(s);
  847. if(p->state != Rendezvous)
  848. return ret;
  849. /* Try and pull out of a rendezvous */
  850. lock(p->rgrp);
  851. if(p->state == Rendezvous) {
  852. p->rendval = ~0;
  853. l = &REND(p->rgrp, p->rendtag);
  854. for(d = *l; d; d = d->rendhash) {
  855. if(d == p) {
  856. *l = p->rendhash;
  857. break;
  858. }
  859. l = &d->rendhash;
  860. }
  861. ready(p);
  862. }
  863. unlock(p->rgrp);
  864. return ret;
  865. }
  866. /*
  867. * weird thing: keep at most NBROKEN around
  868. */
  869. #define NBROKEN 4
  870. struct
  871. {
  872. QLock;
  873. int n;
  874. Proc *p[NBROKEN];
  875. }broken;
  876. void
  877. addbroken(Proc *p)
  878. {
  879. qlock(&broken);
  880. if(broken.n == NBROKEN) {
  881. ready(broken.p[0]);
  882. memmove(&broken.p[0], &broken.p[1], sizeof(Proc*)*(NBROKEN-1));
  883. --broken.n;
  884. }
  885. broken.p[broken.n++] = p;
  886. qunlock(&broken);
  887. edfstop(up);
  888. p->state = Broken;
  889. p->psstate = 0;
  890. sched();
  891. }
  892. void
  893. unbreak(Proc *p)
  894. {
  895. int b;
  896. qlock(&broken);
  897. for(b=0; b < broken.n; b++)
  898. if(broken.p[b] == p) {
  899. broken.n--;
  900. memmove(&broken.p[b], &broken.p[b+1],
  901. sizeof(Proc*)*(NBROKEN-(b+1)));
  902. ready(p);
  903. break;
  904. }
  905. qunlock(&broken);
  906. }
  907. int
  908. freebroken(void)
  909. {
  910. int i, n;
  911. qlock(&broken);
  912. n = broken.n;
  913. for(i=0; i<n; i++) {
  914. ready(broken.p[i]);
  915. broken.p[i] = 0;
  916. }
  917. broken.n = 0;
  918. qunlock(&broken);
  919. return n;
  920. }
  921. void
  922. pexit(char *exitstr, int freemem)
  923. {
  924. Proc *p;
  925. Segment **s, **es;
  926. long utime, stime;
  927. Waitq *wq, *f, *next;
  928. Fgrp *fgrp;
  929. Egrp *egrp;
  930. Rgrp *rgrp;
  931. Pgrp *pgrp;
  932. Chan *dot;
  933. void (*pt)(Proc*, int, vlong);
  934. if(up->syscalltrace)
  935. free(up->syscalltrace);
  936. up->alarm = 0;
  937. if (up->tt)
  938. timerdel(up);
  939. pt = proctrace;
  940. if(pt)
  941. pt(up, SDead, 0);
  942. /* nil out all the resources under lock (free later) */
  943. qlock(&up->debug);
  944. fgrp = up->fgrp;
  945. up->fgrp = nil;
  946. egrp = up->egrp;
  947. up->egrp = nil;
  948. rgrp = up->rgrp;
  949. up->rgrp = nil;
  950. pgrp = up->pgrp;
  951. up->pgrp = nil;
  952. dot = up->dot;
  953. up->dot = nil;
  954. qunlock(&up->debug);
  955. if(fgrp)
  956. closefgrp(fgrp);
  957. if(egrp)
  958. closeegrp(egrp);
  959. if(rgrp)
  960. closergrp(rgrp);
  961. if(dot)
  962. cclose(dot);
  963. if(pgrp)
  964. closepgrp(pgrp);
  965. /*
  966. * if not a kernel process and have a parent,
  967. * do some housekeeping.
  968. */
  969. if(up->kp == 0) {
  970. p = up->parent;
  971. if(p == 0) {
  972. if(exitstr == 0)
  973. exitstr = "unknown";
  974. panic("boot process died: %s", exitstr);
  975. }
  976. while(waserror())
  977. ;
  978. wq = smalloc(sizeof(Waitq));
  979. poperror();
  980. wq->w.pid = up->pid;
  981. utime = up->time[TUser] + up->time[TCUser];
  982. stime = up->time[TSys] + up->time[TCSys];
  983. wq->w.time[TUser] = tk2ms(utime);
  984. wq->w.time[TSys] = tk2ms(stime);
  985. wq->w.time[TReal] = tk2ms(MACHP(0)->ticks - up->time[TReal]);
  986. if(exitstr && exitstr[0])
  987. snprint(wq->w.msg, sizeof(wq->w.msg), "%s %lud: %s", up->text, up->pid, exitstr);
  988. else
  989. wq->w.msg[0] = '\0';
  990. lock(&p->exl);
  991. /*
  992. * Check that parent is still alive.
  993. */
  994. if(p->pid == up->parentpid && p->state != Broken) {
  995. p->nchild--;
  996. p->time[TCUser] += utime;
  997. p->time[TCSys] += stime;
  998. /*
  999. * If there would be more than 128 wait records
  1000. * processes for my parent, then don't leave a wait
  1001. * record behind. This helps prevent badly written
  1002. * daemon processes from accumulating lots of wait
  1003. * records.
  1004. */
  1005. if(p->nwait < 128) {
  1006. wq->next = p->waitq;
  1007. p->waitq = wq;
  1008. p->nwait++;
  1009. wq = nil;
  1010. wakeup(&p->waitr);
  1011. }
  1012. }
  1013. unlock(&p->exl);
  1014. if(wq)
  1015. free(wq);
  1016. }
  1017. if(!freemem)
  1018. addbroken(up);
  1019. qlock(&up->seglock);
  1020. es = &up->seg[NSEG];
  1021. for(s = up->seg; s < es; s++) {
  1022. if(*s) {
  1023. putseg(*s);
  1024. *s = 0;
  1025. }
  1026. }
  1027. qunlock(&up->seglock);
  1028. lock(&up->exl); /* Prevent my children from leaving waits */
  1029. pidunhash(up);
  1030. up->pid = 0;
  1031. wakeup(&up->waitr);
  1032. unlock(&up->exl);
  1033. for(f = up->waitq; f; f = next) {
  1034. next = f->next;
  1035. free(f);
  1036. }
  1037. /* release debuggers */
  1038. qlock(&up->debug);
  1039. if(up->pdbg) {
  1040. wakeup(&up->pdbg->sleep);
  1041. up->pdbg = 0;
  1042. }
  1043. qunlock(&up->debug);
  1044. /* Sched must not loop for these locks */
  1045. lock(&procalloc);
  1046. lock(&palloc);
  1047. edfstop(up);
  1048. up->state = Moribund;
  1049. sched();
  1050. panic("pexit");
  1051. }
  1052. int
  1053. haswaitq(void *x)
  1054. {
  1055. Proc *p;
  1056. p = (Proc *)x;
  1057. return p->waitq != 0;
  1058. }
  1059. ulong
  1060. pwait(Waitmsg *w)
  1061. {
  1062. ulong cpid;
  1063. Waitq *wq;
  1064. if(!canqlock(&up->qwaitr))
  1065. error(Einuse);
  1066. if(waserror()) {
  1067. qunlock(&up->qwaitr);
  1068. nexterror();
  1069. }
  1070. lock(&up->exl);
  1071. if(up->nchild == 0 && up->waitq == 0) {
  1072. unlock(&up->exl);
  1073. error(Enochild);
  1074. }
  1075. unlock(&up->exl);
  1076. sleep(&up->waitr, haswaitq, up);
  1077. lock(&up->exl);
  1078. wq = up->waitq;
  1079. up->waitq = wq->next;
  1080. up->nwait--;
  1081. unlock(&up->exl);
  1082. qunlock(&up->qwaitr);
  1083. poperror();
  1084. if(w)
  1085. memmove(w, &wq->w, sizeof(Waitmsg));
  1086. cpid = wq->w.pid;
  1087. free(wq);
  1088. return cpid;
  1089. }
  1090. Proc*
  1091. proctab(int i)
  1092. {
  1093. return &procalloc.arena[i];
  1094. }
  1095. void
  1096. dumpaproc(Proc *p)
  1097. {
  1098. ulong bss;
  1099. char *s;
  1100. if(p == 0)
  1101. return;
  1102. bss = 0;
  1103. if(p->seg[BSEG])
  1104. bss = p->seg[BSEG]->top;
  1105. s = p->psstate;
  1106. if(s == 0)
  1107. s = statename[p->state];
  1108. print("%3lud:%10s pc %8lux dbgpc %8lux %8s (%s) ut %ld st %ld bss %lux qpc %lux nl %lud nd %lud lpc %lux pri %lud\n",
  1109. p->pid, p->text, p->pc, dbgpc(p), s, statename[p->state],
  1110. p->time[0], p->time[1], bss, p->qpc, p->nlocks.ref, p->delaysched, p->lastlock ? p->lastlock->pc : 0, p->priority);
  1111. }
  1112. void
  1113. procdump(void)
  1114. {
  1115. int i;
  1116. Proc *p;
  1117. if(up)
  1118. print("up %lud\n", up->pid);
  1119. else
  1120. print("no current process\n");
  1121. for(i=0; i<conf.nproc; i++) {
  1122. p = &procalloc.arena[i];
  1123. if(p->state == Dead)
  1124. continue;
  1125. dumpaproc(p);
  1126. }
  1127. }
  1128. /*
  1129. * wait till all processes have flushed their mmu
  1130. * state about segement s
  1131. */
  1132. void
  1133. procflushseg(Segment *s)
  1134. {
  1135. int i, ns, nm, nwait;
  1136. Proc *p;
  1137. /*
  1138. * tell all processes with this
  1139. * segment to flush their mmu's
  1140. */
  1141. nwait = 0;
  1142. for(i=0; i<conf.nproc; i++) {
  1143. p = &procalloc.arena[i];
  1144. if(p->state == Dead)
  1145. continue;
  1146. for(ns = 0; ns < NSEG; ns++)
  1147. if(p->seg[ns] == s){
  1148. p->newtlb = 1;
  1149. for(nm = 0; nm < conf.nmach; nm++){
  1150. if(MACHP(nm)->proc == p){
  1151. MACHP(nm)->flushmmu = 1;
  1152. nwait++;
  1153. }
  1154. }
  1155. break;
  1156. }
  1157. }
  1158. if(nwait == 0)
  1159. return;
  1160. /*
  1161. * wait for all processors to take a clock interrupt
  1162. * and flush their mmu's
  1163. */
  1164. for(nm = 0; nm < conf.nmach; nm++)
  1165. if(MACHP(nm) != m)
  1166. while(MACHP(nm)->flushmmu)
  1167. sched();
  1168. }
  1169. void
  1170. scheddump(void)
  1171. {
  1172. Proc *p;
  1173. Schedq *rq;
  1174. for(rq = &runq[Nrq-1]; rq >= runq; rq--){
  1175. if(rq->head == 0)
  1176. continue;
  1177. print("rq%ld:", rq-runq);
  1178. for(p = rq->head; p; p = p->rnext)
  1179. print(" %lud(%lud)", p->pid, m->ticks - p->readytime);
  1180. print("\n");
  1181. delay(150);
  1182. }
  1183. print("nrdy %d\n", nrdy);
  1184. }
  1185. void
  1186. kproc(char *name, void (*func)(void *), void *arg)
  1187. {
  1188. Proc *p;
  1189. static Pgrp *kpgrp;
  1190. p = newproc();
  1191. p->psstate = 0;
  1192. p->procmode = 0640;
  1193. p->kp = 1;
  1194. p->noswap = 1;
  1195. p->fpsave = up->fpsave;
  1196. p->scallnr = up->scallnr;
  1197. p->s = up->s;
  1198. p->nerrlab = 0;
  1199. p->slash = up->slash;
  1200. p->dot = up->dot;
  1201. if(p->dot)
  1202. incref(p->dot);
  1203. memmove(p->note, up->note, sizeof(p->note));
  1204. p->nnote = up->nnote;
  1205. p->notified = 0;
  1206. p->lastnote = up->lastnote;
  1207. p->notify = up->notify;
  1208. p->ureg = 0;
  1209. p->dbgreg = 0;
  1210. procpriority(p, PriKproc, 0);
  1211. kprocchild(p, func, arg);
  1212. kstrdup(&p->user, eve);
  1213. kstrdup(&p->text, name);
  1214. if(kpgrp == 0)
  1215. kpgrp = newpgrp();
  1216. p->pgrp = kpgrp;
  1217. incref(kpgrp);
  1218. memset(p->time, 0, sizeof(p->time));
  1219. p->time[TReal] = MACHP(0)->ticks;
  1220. ready(p);
  1221. }
  1222. /*
  1223. * called splhi() by notify(). See comment in notify for the
  1224. * reasoning.
  1225. */
  1226. void
  1227. procctl(Proc *p)
  1228. {
  1229. char *state;
  1230. ulong s;
  1231. switch(p->procctl) {
  1232. case Proc_exitbig:
  1233. spllo();
  1234. pexit("Killed: Insufficient physical memory", 1);
  1235. case Proc_exitme:
  1236. spllo(); /* pexit has locks in it */
  1237. pexit("Killed", 1);
  1238. case Proc_traceme:
  1239. if(p->nnote == 0)
  1240. return;
  1241. /* No break */
  1242. case Proc_stopme:
  1243. p->procctl = 0;
  1244. state = p->psstate;
  1245. p->psstate = "Stopped";
  1246. /* free a waiting debugger */
  1247. s = spllo();
  1248. qlock(&p->debug);
  1249. if(p->pdbg) {
  1250. wakeup(&p->pdbg->sleep);
  1251. p->pdbg = 0;
  1252. }
  1253. qunlock(&p->debug);
  1254. splhi();
  1255. p->state = Stopped;
  1256. sched();
  1257. p->psstate = state;
  1258. splx(s);
  1259. return;
  1260. }
  1261. }
  1262. #include "errstr.h"
  1263. void
  1264. error(char *err)
  1265. {
  1266. spllo();
  1267. assert(up->nerrlab < NERR);
  1268. kstrcpy(up->errstr, err, ERRMAX);
  1269. setlabel(&up->errlab[NERR-1]);
  1270. nexterror();
  1271. }
  1272. void
  1273. nexterror(void)
  1274. {
  1275. gotolabel(&up->errlab[--up->nerrlab]);
  1276. }
  1277. void
  1278. exhausted(char *resource)
  1279. {
  1280. char buf[ERRMAX];
  1281. snprint(buf, sizeof buf, "no free %s", resource);
  1282. iprint("%s\n", buf);
  1283. error(buf);
  1284. }
  1285. void
  1286. killbig(char *why)
  1287. {
  1288. int i;
  1289. Segment *s;
  1290. ulong l, max;
  1291. Proc *p, *ep, *kp;
  1292. max = 0;
  1293. kp = 0;
  1294. ep = procalloc.arena+conf.nproc;
  1295. for(p = procalloc.arena; p < ep; p++) {
  1296. if(p->state == Dead || p->kp)
  1297. continue;
  1298. l = 0;
  1299. for(i=1; i<NSEG; i++) {
  1300. s = p->seg[i];
  1301. if(s != 0)
  1302. l += s->top - s->base;
  1303. }
  1304. if(l > max && ((p->procmode&0222) || strcmp(eve, p->user)!=0)) {
  1305. kp = p;
  1306. max = l;
  1307. }
  1308. }
  1309. print("%lud: %s killed: %s\n", kp->pid, kp->text, why);
  1310. for(p = procalloc.arena; p < ep; p++) {
  1311. if(p->state == Dead || p->kp)
  1312. continue;
  1313. if(p != kp && p->seg[BSEG] && p->seg[BSEG] == kp->seg[BSEG])
  1314. p->procctl = Proc_exitbig;
  1315. }
  1316. kp->procctl = Proc_exitbig;
  1317. for(i = 0; i < NSEG; i++) {
  1318. s = kp->seg[i];
  1319. if(s != 0 && canqlock(&s->lk)) {
  1320. mfreeseg(s, s->base, (s->top - s->base)/BY2PG);
  1321. qunlock(&s->lk);
  1322. }
  1323. }
  1324. }
  1325. /*
  1326. * change ownership to 'new' of all processes owned by 'old'. Used when
  1327. * eve changes.
  1328. */
  1329. void
  1330. renameuser(char *old, char *new)
  1331. {
  1332. Proc *p, *ep;
  1333. ep = procalloc.arena+conf.nproc;
  1334. for(p = procalloc.arena; p < ep; p++)
  1335. if(p->user!=nil && strcmp(old, p->user)==0)
  1336. kstrdup(&p->user, new);
  1337. }
  1338. /*
  1339. * time accounting called by clock() splhi'd
  1340. */
  1341. void
  1342. accounttime(void)
  1343. {
  1344. Proc *p;
  1345. ulong n, per;
  1346. static ulong nrun;
  1347. p = m->proc;
  1348. if(p) {
  1349. nrun++;
  1350. p->time[p->insyscall]++;
  1351. }
  1352. /* calculate decaying duty cycles */
  1353. n = perfticks();
  1354. per = n - m->perf.last;
  1355. m->perf.last = n;
  1356. per = (m->perf.period*(HZ-1) + per)/HZ;
  1357. if(per != 0)
  1358. m->perf.period = per;
  1359. m->perf.avg_inidle = (m->perf.avg_inidle*(HZ-1)+m->perf.inidle)/HZ;
  1360. m->perf.inidle = 0;
  1361. m->perf.avg_inintr = (m->perf.avg_inintr*(HZ-1)+m->perf.inintr)/HZ;
  1362. m->perf.inintr = 0;
  1363. /* only one processor gets to compute system load averages */
  1364. if(m->machno != 0)
  1365. return;
  1366. /*
  1367. * calculate decaying load average.
  1368. * if we decay by (n-1)/n then it takes
  1369. * n clock ticks to go from load L to .36 L once
  1370. * things quiet down. it takes about 5 n clock
  1371. * ticks to go to zero. so using HZ means this is
  1372. * approximately the load over the last second,
  1373. * with a tail lasting about 5 seconds.
  1374. */
  1375. n = nrun;
  1376. nrun = 0;
  1377. n = (nrdy+n)*1000;
  1378. m->load = (m->load*(HZ-1)+n)/HZ;
  1379. }
  1380. static void
  1381. pidhash(Proc *p)
  1382. {
  1383. int h;
  1384. h = p->pid % nelem(procalloc.ht);
  1385. lock(&procalloc);
  1386. p->pidhash = procalloc.ht[h];
  1387. procalloc.ht[h] = p;
  1388. unlock(&procalloc);
  1389. }
  1390. static void
  1391. pidunhash(Proc *p)
  1392. {
  1393. int h;
  1394. Proc **l;
  1395. h = p->pid % nelem(procalloc.ht);
  1396. lock(&procalloc);
  1397. for(l = &procalloc.ht[h]; *l != nil; l = &(*l)->pidhash)
  1398. if(*l == p){
  1399. *l = p->pidhash;
  1400. break;
  1401. }
  1402. unlock(&procalloc);
  1403. }
  1404. int
  1405. procindex(ulong pid)
  1406. {
  1407. Proc *p;
  1408. int h;
  1409. int s;
  1410. s = -1;
  1411. h = pid % nelem(procalloc.ht);
  1412. lock(&procalloc);
  1413. for(p = procalloc.ht[h]; p != nil; p = p->pidhash)
  1414. if(p->pid == pid){
  1415. s = p - procalloc.arena;
  1416. break;
  1417. }
  1418. unlock(&procalloc);
  1419. return s;
  1420. }