proc.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634
  1. #include <u.h>
  2. #include "../port/lib.h"
  3. #include "mem.h"
  4. #include "dat.h"
  5. #include "fns.h"
  6. #include "../port/error.h"
  7. #include "../port/edf.h"
  8. #include <trace.h>
  9. int schedgain = 30; /* units in seconds */
  10. int nrdy;
  11. Ref noteidalloc;
  12. void updatecpu(Proc*);
  13. int reprioritize(Proc*);
  14. ulong delayedscheds; /* statistics */
  15. long skipscheds;
  16. long preempts;
  17. ulong load;
  18. static Ref pidalloc;
  19. static struct Procalloc
  20. {
  21. Lock;
  22. Proc* ht[128];
  23. Proc* arena;
  24. Proc* free;
  25. } procalloc;
  26. enum
  27. {
  28. Q=10,
  29. DQ=4,
  30. Scaling=2,
  31. };
  32. Schedq runq[Nrq];
  33. ulong runvec;
  34. char *statename[] =
  35. { /* BUG: generate automatically */
  36. "Dead",
  37. "Moribund",
  38. "Ready",
  39. "Scheding",
  40. "Running",
  41. "Queueing",
  42. "QueueingR",
  43. "QueueingW",
  44. "Wakeme",
  45. "Broken",
  46. "Stopped",
  47. "Rendez",
  48. "Waitrelease",
  49. };
  50. static void pidhash(Proc*);
  51. static void pidunhash(Proc*);
  52. static void rebalance(void);
  53. /*
  54. * Always splhi()'ed.
  55. */
  56. void
  57. schedinit(void) /* never returns */
  58. {
  59. Edf *e;
  60. setlabel(&m->sched);
  61. if(up) {
  62. if((e = up->edf) && (e->flags & Admitted))
  63. edfrecord(up);
  64. m->proc = 0;
  65. switch(up->state) {
  66. case Running:
  67. ready(up);
  68. break;
  69. case Moribund:
  70. up->state = Dead;
  71. edfstop(up);
  72. if (up->edf)
  73. free(up->edf);
  74. up->edf = nil;
  75. /*
  76. * Holding locks from pexit:
  77. * procalloc
  78. * palloc
  79. */
  80. mmurelease(up);
  81. up->qnext = procalloc.free;
  82. procalloc.free = up;
  83. unlock(&palloc);
  84. unlock(&procalloc);
  85. break;
  86. }
  87. up->mach = nil;
  88. updatecpu(up);
  89. up = nil;
  90. }
  91. sched();
  92. }
  93. /*
  94. * If changing this routine, look also at sleep(). It
  95. * contains a copy of the guts of sched().
  96. */
  97. void
  98. sched(void)
  99. {
  100. Proc *p;
  101. if(m->ilockdepth)
  102. panic("cpu%d: ilockdepth %d, last lock %#p at %#p, sched called from %#p",
  103. m->machno,
  104. m->ilockdepth,
  105. up? up->lastilock: nil,
  106. (up && up->lastilock)? up->lastilock->pc: 0,
  107. getcallerpc(&p+2));
  108. if(up){
  109. /*
  110. * Delay the sched until the process gives up the locks
  111. * it is holding. This avoids dumb lock loops.
  112. * Don't delay if the process is Moribund.
  113. * It called sched to die.
  114. * But do sched eventually. This avoids a missing unlock
  115. * from hanging the entire kernel.
  116. * But don't reschedule procs holding palloc or procalloc.
  117. * Those are far too important to be holding while asleep.
  118. *
  119. * This test is not exact. There can still be a few instructions
  120. * in the middle of taslock when a process holds a lock
  121. * but Lock.p has not yet been initialized.
  122. */
  123. if(up->nlocks.ref)
  124. if(up->state != Moribund)
  125. if(up->delaysched < 20
  126. || palloc.Lock.p == up
  127. || procalloc.Lock.p == up){
  128. up->delaysched++;
  129. delayedscheds++;
  130. return;
  131. }
  132. up->delaysched = 0;
  133. splhi();
  134. /* statistics */
  135. m->cs++;
  136. procsave(up);
  137. if(setlabel(&up->sched)){
  138. procrestore(up);
  139. spllo();
  140. return;
  141. }
  142. gotolabel(&m->sched);
  143. }
  144. p = runproc();
  145. if(!p->edf){
  146. updatecpu(p);
  147. p->priority = reprioritize(p);
  148. }
  149. if(p != m->readied)
  150. m->schedticks = m->ticks + HZ/10;
  151. m->readied = 0;
  152. up = p;
  153. up->state = Running;
  154. up->mach = MACHP(m->machno);
  155. m->proc = up;
  156. mmuswitch(up);
  157. gotolabel(&up->sched);
  158. }
  159. int
  160. anyready(void)
  161. {
  162. return runvec;
  163. }
  164. int
  165. anyhigher(void)
  166. {
  167. return runvec & ~((1<<(up->priority+1))-1);
  168. }
  169. /*
  170. * here once per clock tick to see if we should resched
  171. */
  172. void
  173. hzsched(void)
  174. {
  175. /* once a second, rebalance will reprioritize ready procs */
  176. if(m->machno == 0)
  177. rebalance();
  178. /* unless preempted, get to run for at least 100ms */
  179. if(anyhigher()
  180. || (!up->fixedpri && m->ticks > m->schedticks && anyready())){
  181. m->readied = nil; /* avoid cooperative scheduling */
  182. up->delaysched++;
  183. }
  184. }
  185. /*
  186. * here at the end of non-clock interrupts to see if we should preempt the
  187. * current process. Returns 1 if preempted, 0 otherwise.
  188. */
  189. int
  190. preempted(void)
  191. {
  192. if(up && up->state == Running)
  193. if(up->preempted == 0)
  194. if(anyhigher())
  195. if(!active.exiting){
  196. m->readied = nil; /* avoid cooperative scheduling */
  197. up->preempted = 1;
  198. sched();
  199. splhi();
  200. up->preempted = 0;
  201. return 1;
  202. }
  203. return 0;
  204. }
  205. /*
  206. * Update the cpu time average for this particular process,
  207. * which is about to change from up -> not up or vice versa.
  208. * p->lastupdate is the last time an updatecpu happened.
  209. *
  210. * The cpu time average is a decaying average that lasts
  211. * about D clock ticks. D is chosen to be approximately
  212. * the cpu time of a cpu-intensive "quick job". A job has to run
  213. * for approximately D clock ticks before we home in on its
  214. * actual cpu usage. Thus if you manage to get in and get out
  215. * quickly, you won't be penalized during your burst. Once you
  216. * start using your share of the cpu for more than about D
  217. * clock ticks though, your p->cpu hits 1000 (1.0) and you end up
  218. * below all the other quick jobs. Interactive tasks, because
  219. * they basically always use less than their fair share of cpu,
  220. * will be rewarded.
  221. *
  222. * If the process has not been running, then we want to
  223. * apply the filter
  224. *
  225. * cpu = cpu * (D-1)/D
  226. *
  227. * n times, yielding
  228. *
  229. * cpu = cpu * ((D-1)/D)^n
  230. *
  231. * but D is big enough that this is approximately
  232. *
  233. * cpu = cpu * (D-n)/D
  234. *
  235. * so we use that instead.
  236. *
  237. * If the process has been running, we apply the filter to
  238. * 1 - cpu, yielding a similar equation. Note that cpu is
  239. * stored in fixed point (* 1000).
  240. *
  241. * Updatecpu must be called before changing up, in order
  242. * to maintain accurate cpu usage statistics. It can be called
  243. * at any time to bring the stats for a given proc up-to-date.
  244. */
  245. void
  246. updatecpu(Proc *p)
  247. {
  248. int n, t, ocpu;
  249. int D = schedgain*HZ*Scaling;
  250. if(p->edf)
  251. return;
  252. t = MACHP(0)->ticks*Scaling + Scaling/2;
  253. n = t - p->lastupdate;
  254. p->lastupdate = t;
  255. if(n == 0)
  256. return;
  257. if(n > D)
  258. n = D;
  259. ocpu = p->cpu;
  260. if(p != up)
  261. p->cpu = (ocpu*(D-n))/D;
  262. else{
  263. t = 1000 - ocpu;
  264. t = (t*(D-n))/D;
  265. p->cpu = 1000 - t;
  266. }
  267. //iprint("pid %d %s for %d cpu %d -> %d\n", p->pid,p==up?"active":"inactive",n, ocpu,p->cpu);
  268. }
  269. /*
  270. * On average, p has used p->cpu of a cpu recently.
  271. * Its fair share is conf.nmach/m->load of a cpu. If it has been getting
  272. * too much, penalize it. If it has been getting not enough, reward it.
  273. * I don't think you can get much more than your fair share that
  274. * often, so most of the queues are for using less. Having a priority
  275. * of 3 means you're just right. Having a higher priority (up to p->basepri)
  276. * means you're not using as much as you could.
  277. */
  278. int
  279. reprioritize(Proc *p)
  280. {
  281. int fairshare, n, load, ratio;
  282. load = MACHP(0)->load;
  283. if(load == 0)
  284. return p->basepri;
  285. /*
  286. * fairshare = 1.000 * conf.nproc * 1.000/load,
  287. * except the decimal point is moved three places
  288. * on both load and fairshare.
  289. */
  290. fairshare = (conf.nmach*1000*1000)/load;
  291. n = p->cpu;
  292. if(n == 0)
  293. n = 1;
  294. ratio = (fairshare+n/2) / n;
  295. if(ratio > p->basepri)
  296. ratio = p->basepri;
  297. if(ratio < 0)
  298. panic("reprioritize");
  299. //iprint("pid %d cpu %d load %d fair %d pri %d\n", p->pid, p->cpu, load, fairshare, ratio);
  300. return ratio;
  301. }
  302. /*
  303. * add a process to a scheduling queue
  304. */
  305. void
  306. queueproc(Schedq *rq, Proc *p)
  307. {
  308. int pri;
  309. pri = rq - runq;
  310. lock(runq);
  311. p->priority = pri;
  312. p->rnext = 0;
  313. if(rq->tail)
  314. rq->tail->rnext = p;
  315. else
  316. rq->head = p;
  317. rq->tail = p;
  318. rq->n++;
  319. nrdy++;
  320. runvec |= 1<<pri;
  321. unlock(runq);
  322. }
  323. /*
  324. * try to remove a process from a scheduling queue (called splhi)
  325. */
  326. Proc*
  327. dequeueproc(Schedq *rq, Proc *tp)
  328. {
  329. Proc *l, *p;
  330. if(!canlock(runq))
  331. return nil;
  332. /*
  333. * the queue may have changed before we locked runq,
  334. * refind the target process.
  335. */
  336. l = 0;
  337. for(p = rq->head; p; p = p->rnext){
  338. if(p == tp)
  339. break;
  340. l = p;
  341. }
  342. /*
  343. * p->mach==0 only when process state is saved
  344. */
  345. if(p == 0 || p->mach){
  346. unlock(runq);
  347. return nil;
  348. }
  349. if(p->rnext == 0)
  350. rq->tail = l;
  351. if(l)
  352. l->rnext = p->rnext;
  353. else
  354. rq->head = p->rnext;
  355. if(rq->head == nil)
  356. runvec &= ~(1<<(rq-runq));
  357. rq->n--;
  358. nrdy--;
  359. if(p->state != Ready)
  360. print("dequeueproc %s %lud %s\n", p->text, p->pid, statename[p->state]);
  361. unlock(runq);
  362. return p;
  363. }
  364. /*
  365. * ready(p) picks a new priority for a process and sticks it in the
  366. * runq for that priority.
  367. */
  368. void
  369. ready(Proc *p)
  370. {
  371. int s, pri;
  372. Schedq *rq;
  373. void (*pt)(Proc*, int, vlong);
  374. s = splhi();
  375. if(edfready(p)){
  376. splx(s);
  377. return;
  378. }
  379. if(up != p && (p->wired == nil || p->wired == m))
  380. m->readied = p; /* group scheduling */
  381. updatecpu(p);
  382. pri = reprioritize(p);
  383. p->priority = pri;
  384. rq = &runq[pri];
  385. p->state = Ready;
  386. queueproc(rq, p);
  387. pt = proctrace;
  388. if(pt)
  389. pt(p, SReady, 0);
  390. splx(s);
  391. }
  392. /*
  393. * yield the processor and drop our priority
  394. */
  395. void
  396. yield(void)
  397. {
  398. if(anyready()){
  399. /* pretend we just used 1/2 tick */
  400. up->lastupdate -= Scaling/2;
  401. sched();
  402. }
  403. }
  404. /*
  405. * recalculate priorities once a second. We need to do this
  406. * since priorities will otherwise only be recalculated when
  407. * the running process blocks.
  408. */
  409. ulong balancetime;
  410. static void
  411. rebalance(void)
  412. {
  413. int pri, npri, t, x;
  414. Schedq *rq;
  415. Proc *p;
  416. t = m->ticks;
  417. if(t - balancetime < HZ)
  418. return;
  419. balancetime = t;
  420. for(pri=0, rq=runq; pri<Npriq; pri++, rq++){
  421. another:
  422. p = rq->head;
  423. if(p == nil)
  424. continue;
  425. if(p->mp != MACHP(m->machno))
  426. continue;
  427. if(pri == p->basepri)
  428. continue;
  429. updatecpu(p);
  430. npri = reprioritize(p);
  431. if(npri != pri){
  432. x = splhi();
  433. p = dequeueproc(rq, p);
  434. if(p)
  435. queueproc(&runq[npri], p);
  436. splx(x);
  437. goto another;
  438. }
  439. }
  440. }
  441. /*
  442. * pick a process to run
  443. */
  444. Proc*
  445. runproc(void)
  446. {
  447. Schedq *rq;
  448. Proc *p;
  449. ulong start, now;
  450. int i;
  451. void (*pt)(Proc*, int, vlong);
  452. start = perfticks();
  453. /* cooperative scheduling until the clock ticks */
  454. if((p=m->readied) && p->mach==0 && p->state==Ready
  455. && (p->wired == nil || p->wired == m)
  456. && runq[Nrq-1].head == nil && runq[Nrq-2].head == nil){
  457. skipscheds++;
  458. rq = &runq[p->priority];
  459. goto found;
  460. }
  461. preempts++;
  462. loop:
  463. /*
  464. * find a process that last ran on this processor (affinity),
  465. * or one that hasn't moved in a while (load balancing). Every
  466. * time around the loop affinity goes down.
  467. */
  468. spllo();
  469. for(i = 0;; i++){
  470. /*
  471. * find the highest priority target process that this
  472. * processor can run given affinity constraints.
  473. *
  474. */
  475. for(rq = &runq[Nrq-1]; rq >= runq; rq--){
  476. for(p = rq->head; p; p = p->rnext){
  477. if(p->mp == nil || p->mp == MACHP(m->machno)
  478. || (!p->wired && i > 0))
  479. goto found;
  480. }
  481. }
  482. /* waste time or halt the CPU */
  483. idlehands();
  484. /* remember how much time we're here */
  485. now = perfticks();
  486. m->perf.inidle += now-start;
  487. start = now;
  488. }
  489. found:
  490. splhi();
  491. p = dequeueproc(rq, p);
  492. if(p == nil)
  493. goto loop;
  494. p->state = Scheding;
  495. p->mp = MACHP(m->machno);
  496. if(edflock(p)){
  497. edfrun(p, rq == &runq[PriEdf]); /* start deadline timer and do admin */
  498. edfunlock();
  499. }
  500. pt = proctrace;
  501. if(pt)
  502. pt(p, SRun, 0);
  503. return p;
  504. }
  505. int
  506. canpage(Proc *p)
  507. {
  508. int ok = 0;
  509. splhi();
  510. lock(runq);
  511. /* Only reliable way to see if we are Running */
  512. if(p->mach == 0) {
  513. p->newtlb = 1;
  514. ok = 1;
  515. }
  516. unlock(runq);
  517. spllo();
  518. return ok;
  519. }
  520. void
  521. noprocpanic(char *msg)
  522. {
  523. /*
  524. * setting exiting will make hzclock() on each processor call exit(0).
  525. * clearing our bit in machs avoids calling exit(0) from hzclock()
  526. * on this processor.
  527. */
  528. lock(&active);
  529. active.machs &= ~(1<<m->machno);
  530. active.exiting = 1;
  531. unlock(&active);
  532. procdump();
  533. delay(1000);
  534. panic(msg);
  535. }
  536. Proc*
  537. newproc(void)
  538. {
  539. char msg[64];
  540. Proc *p;
  541. lock(&procalloc);
  542. while((p = procalloc.free) == nil) {
  543. unlock(&procalloc);
  544. snprint(msg, sizeof msg, "no procs; %s forking",
  545. up? up->text: "kernel");
  546. /*
  547. * the situation is unlikely to heal itself.
  548. * dump the proc table and restart by default.
  549. * *noprocspersist in plan9.ini will yield the old
  550. * behaviour of trying forever.
  551. */
  552. if(getconf("*noprocspersist") == nil)
  553. noprocpanic(msg);
  554. resrcwait(msg);
  555. lock(&procalloc);
  556. }
  557. procalloc.free = p->qnext;
  558. unlock(&procalloc);
  559. p->state = Scheding;
  560. p->psstate = "New";
  561. p->mach = 0;
  562. p->qnext = 0;
  563. p->nchild = 0;
  564. p->nwait = 0;
  565. p->waitq = 0;
  566. p->parent = 0;
  567. p->pgrp = 0;
  568. p->egrp = 0;
  569. p->fgrp = 0;
  570. p->rgrp = 0;
  571. p->pdbg = 0;
  572. p->fpstate = FPinit;
  573. p->kp = 0;
  574. if(up && up->procctl == Proc_tracesyscall)
  575. p->procctl = Proc_tracesyscall;
  576. else
  577. p->procctl = 0;
  578. p->syscalltrace = 0;
  579. p->notepending = 0;
  580. p->ureg = 0;
  581. p->privatemem = 0;
  582. p->noswap = 0;
  583. p->errstr = p->errbuf0;
  584. p->syserrstr = p->errbuf1;
  585. p->errbuf0[0] = '\0';
  586. p->errbuf1[0] = '\0';
  587. p->nlocks.ref = 0;
  588. p->delaysched = 0;
  589. p->trace = 0;
  590. kstrdup(&p->user, "*nouser");
  591. kstrdup(&p->text, "*notext");
  592. kstrdup(&p->args, "");
  593. p->nargs = 0;
  594. p->setargs = 0;
  595. memset(p->seg, 0, sizeof p->seg);
  596. p->pid = incref(&pidalloc);
  597. pidhash(p);
  598. p->noteid = incref(&noteidalloc);
  599. if(p->pid==0 || p->noteid==0)
  600. panic("pidalloc");
  601. if(p->kstack == 0)
  602. p->kstack = smalloc(KSTACK);
  603. /* sched params */
  604. p->mp = 0;
  605. p->wired = 0;
  606. procpriority(p, PriNormal, 0);
  607. p->cpu = 0;
  608. p->lastupdate = MACHP(0)->ticks*Scaling;
  609. p->edf = nil;
  610. return p;
  611. }
  612. /*
  613. * wire this proc to a machine
  614. */
  615. void
  616. procwired(Proc *p, int bm)
  617. {
  618. Proc *pp;
  619. int i;
  620. char nwired[MAXMACH];
  621. Mach *wm;
  622. if(bm < 0){
  623. /* pick a machine to wire to */
  624. memset(nwired, 0, sizeof(nwired));
  625. p->wired = 0;
  626. pp = proctab(0);
  627. for(i=0; i<conf.nproc; i++, pp++){
  628. wm = pp->wired;
  629. if(wm && pp->pid)
  630. nwired[wm->machno]++;
  631. }
  632. bm = 0;
  633. for(i=0; i<conf.nmach; i++)
  634. if(nwired[i] < nwired[bm])
  635. bm = i;
  636. } else {
  637. /* use the virtual machine requested */
  638. bm = bm % conf.nmach;
  639. }
  640. p->wired = MACHP(bm);
  641. p->mp = p->wired;
  642. }
  643. void
  644. procpriority(Proc *p, int pri, int fixed)
  645. {
  646. if(pri >= Npriq)
  647. pri = Npriq - 1;
  648. else if(pri < 0)
  649. pri = 0;
  650. p->basepri = pri;
  651. p->priority = pri;
  652. if(fixed){
  653. p->fixedpri = 1;
  654. } else {
  655. p->fixedpri = 0;
  656. }
  657. }
  658. void
  659. procinit0(void) /* bad planning - clashes with devproc.c */
  660. {
  661. Proc *p;
  662. int i;
  663. procalloc.free = xalloc(conf.nproc*sizeof(Proc));
  664. if(procalloc.free == nil){
  665. xsummary();
  666. panic("cannot allocate %lud procs (%ludMB)\n", conf.nproc, conf.nproc*sizeof(Proc)/(1024*1024));
  667. }
  668. procalloc.arena = procalloc.free;
  669. p = procalloc.free;
  670. for(i=0; i<conf.nproc-1; i++,p++)
  671. p->qnext = p+1;
  672. p->qnext = 0;
  673. }
  674. /*
  675. * sleep if a condition is not true. Another process will
  676. * awaken us after it sets the condition. When we awaken
  677. * the condition may no longer be true.
  678. *
  679. * we lock both the process and the rendezvous to keep r->p
  680. * and p->r synchronized.
  681. */
  682. void
  683. sleep(Rendez *r, int (*f)(void*), void *arg)
  684. {
  685. int s;
  686. void (*pt)(Proc*, int, vlong);
  687. s = splhi();
  688. if(up->nlocks.ref)
  689. print("process %lud sleeps with %lud locks held, last lock %#p locked at pc %#lux, sleep called from %#p\n",
  690. up->pid, up->nlocks.ref, up->lastlock, up->lastlock->pc, getcallerpc(&r));
  691. lock(r);
  692. lock(&up->rlock);
  693. if(r->p){
  694. print("double sleep called from %#p, %lud %lud\n", getcallerpc(&r), r->p->pid, up->pid);
  695. dumpstack();
  696. }
  697. /*
  698. * Wakeup only knows there may be something to do by testing
  699. * r->p in order to get something to lock on.
  700. * Flush that information out to memory in case the sleep is
  701. * committed.
  702. */
  703. r->p = up;
  704. if((*f)(arg) || up->notepending){
  705. /*
  706. * if condition happened or a note is pending
  707. * never mind
  708. */
  709. r->p = nil;
  710. unlock(&up->rlock);
  711. unlock(r);
  712. } else {
  713. /*
  714. * now we are committed to
  715. * change state and call scheduler
  716. */
  717. pt = proctrace;
  718. if(pt)
  719. pt(up, SSleep, 0);
  720. up->state = Wakeme;
  721. up->r = r;
  722. /* statistics */
  723. m->cs++;
  724. procsave(up);
  725. if(setlabel(&up->sched)) {
  726. /*
  727. * here when the process is awakened
  728. */
  729. procrestore(up);
  730. spllo();
  731. } else {
  732. /*
  733. * here to go to sleep (i.e. stop Running)
  734. */
  735. unlock(&up->rlock);
  736. unlock(r);
  737. gotolabel(&m->sched);
  738. }
  739. }
  740. if(up->notepending) {
  741. up->notepending = 0;
  742. splx(s);
  743. if(up->procctl == Proc_exitme && up->closingfgrp)
  744. forceclosefgrp();
  745. error(Eintr);
  746. }
  747. splx(s);
  748. }
  749. static int
  750. tfn(void *arg)
  751. {
  752. return up->trend == nil || up->tfn(arg);
  753. }
  754. void
  755. twakeup(Ureg*, Timer *t)
  756. {
  757. Proc *p;
  758. Rendez *trend;
  759. p = t->ta;
  760. trend = p->trend;
  761. p->trend = 0;
  762. if(trend)
  763. wakeup(trend);
  764. }
  765. void
  766. tsleep(Rendez *r, int (*fn)(void*), void *arg, ulong ms)
  767. {
  768. if (up->tt){
  769. print("tsleep: timer active: mode %d, tf %#p\n", up->tmode, up->tf);
  770. timerdel(up);
  771. }
  772. up->tns = MS2NS(ms);
  773. up->tf = twakeup;
  774. up->tmode = Trelative;
  775. up->ta = up;
  776. up->trend = r;
  777. up->tfn = fn;
  778. timeradd(up);
  779. if(waserror()){
  780. timerdel(up);
  781. nexterror();
  782. }
  783. sleep(r, tfn, arg);
  784. if(up->tt)
  785. timerdel(up);
  786. up->twhen = 0;
  787. poperror();
  788. }
  789. /*
  790. * Expects that only one process can call wakeup for any given Rendez.
  791. * We hold both locks to ensure that r->p and p->r remain consistent.
  792. * Richard Miller has a better solution that doesn't require both to
  793. * be held simultaneously, but I'm a paranoid - presotto.
  794. */
  795. Proc*
  796. wakeup(Rendez *r)
  797. {
  798. Proc *p;
  799. int s;
  800. s = splhi();
  801. lock(r);
  802. p = r->p;
  803. if(p != nil){
  804. lock(&p->rlock);
  805. if(p->state != Wakeme || p->r != r){
  806. iprint("%p %p %d\n", p->r, r, p->state);
  807. panic("wakeup: state");
  808. }
  809. r->p = nil;
  810. p->r = nil;
  811. ready(p);
  812. unlock(&p->rlock);
  813. }
  814. unlock(r);
  815. splx(s);
  816. return p;
  817. }
  818. /*
  819. * if waking a sleeping process, this routine must hold both
  820. * p->rlock and r->lock. However, it can't know them in
  821. * the same order as wakeup causing a possible lock ordering
  822. * deadlock. We break the deadlock by giving up the p->rlock
  823. * lock if we can't get the r->lock and retrying.
  824. */
  825. int
  826. postnote(Proc *p, int dolock, char *n, int flag)
  827. {
  828. int s, ret;
  829. Rendez *r;
  830. Proc *d, **l;
  831. if(dolock)
  832. qlock(&p->debug);
  833. if(flag != NUser && (p->notify == 0 || p->notified))
  834. p->nnote = 0;
  835. ret = 0;
  836. if(p->nnote < NNOTE) {
  837. strcpy(p->note[p->nnote].msg, n);
  838. p->note[p->nnote++].flag = flag;
  839. ret = 1;
  840. }
  841. p->notepending = 1;
  842. if(dolock)
  843. qunlock(&p->debug);
  844. /* this loop is to avoid lock ordering problems. */
  845. for(;;){
  846. s = splhi();
  847. lock(&p->rlock);
  848. r = p->r;
  849. /* waiting for a wakeup? */
  850. if(r == nil)
  851. break; /* no */
  852. /* try for the second lock */
  853. if(canlock(r)){
  854. if(p->state != Wakeme || r->p != p)
  855. panic("postnote: state %d %d %d", r->p != p, p->r != r, p->state);
  856. p->r = nil;
  857. r->p = nil;
  858. ready(p);
  859. unlock(r);
  860. break;
  861. }
  862. /* give other process time to get out of critical section and try again */
  863. unlock(&p->rlock);
  864. splx(s);
  865. sched();
  866. }
  867. unlock(&p->rlock);
  868. splx(s);
  869. if(p->state != Rendezvous)
  870. return ret;
  871. /* Try and pull out of a rendezvous */
  872. lock(p->rgrp);
  873. if(p->state == Rendezvous) {
  874. p->rendval = ~0;
  875. l = &REND(p->rgrp, p->rendtag);
  876. for(d = *l; d; d = d->rendhash) {
  877. if(d == p) {
  878. *l = p->rendhash;
  879. break;
  880. }
  881. l = &d->rendhash;
  882. }
  883. ready(p);
  884. }
  885. unlock(p->rgrp);
  886. return ret;
  887. }
  888. /*
  889. * weird thing: keep at most NBROKEN around
  890. */
  891. #define NBROKEN 4
  892. struct
  893. {
  894. QLock;
  895. int n;
  896. Proc *p[NBROKEN];
  897. }broken;
  898. void
  899. addbroken(Proc *p)
  900. {
  901. qlock(&broken);
  902. if(broken.n == NBROKEN) {
  903. ready(broken.p[0]);
  904. memmove(&broken.p[0], &broken.p[1], sizeof(Proc*)*(NBROKEN-1));
  905. --broken.n;
  906. }
  907. broken.p[broken.n++] = p;
  908. qunlock(&broken);
  909. edfstop(up);
  910. p->state = Broken;
  911. p->psstate = 0;
  912. sched();
  913. }
  914. void
  915. unbreak(Proc *p)
  916. {
  917. int b;
  918. qlock(&broken);
  919. for(b=0; b < broken.n; b++)
  920. if(broken.p[b] == p) {
  921. broken.n--;
  922. memmove(&broken.p[b], &broken.p[b+1],
  923. sizeof(Proc*)*(NBROKEN-(b+1)));
  924. ready(p);
  925. break;
  926. }
  927. qunlock(&broken);
  928. }
  929. int
  930. freebroken(void)
  931. {
  932. int i, n;
  933. qlock(&broken);
  934. n = broken.n;
  935. for(i=0; i<n; i++) {
  936. ready(broken.p[i]);
  937. broken.p[i] = 0;
  938. }
  939. broken.n = 0;
  940. qunlock(&broken);
  941. return n;
  942. }
  943. void
  944. pexit(char *exitstr, int freemem)
  945. {
  946. Proc *p;
  947. Segment **s, **es;
  948. long utime, stime;
  949. Waitq *wq, *f, *next;
  950. Fgrp *fgrp;
  951. Egrp *egrp;
  952. Rgrp *rgrp;
  953. Pgrp *pgrp;
  954. Chan *dot;
  955. void (*pt)(Proc*, int, vlong);
  956. if(up->syscalltrace)
  957. free(up->syscalltrace);
  958. up->alarm = 0;
  959. if (up->tt)
  960. timerdel(up);
  961. pt = proctrace;
  962. if(pt)
  963. pt(up, SDead, 0);
  964. /* nil out all the resources under lock (free later) */
  965. qlock(&up->debug);
  966. fgrp = up->fgrp;
  967. up->fgrp = nil;
  968. egrp = up->egrp;
  969. up->egrp = nil;
  970. rgrp = up->rgrp;
  971. up->rgrp = nil;
  972. pgrp = up->pgrp;
  973. up->pgrp = nil;
  974. dot = up->dot;
  975. up->dot = nil;
  976. qunlock(&up->debug);
  977. if(fgrp)
  978. closefgrp(fgrp);
  979. if(egrp)
  980. closeegrp(egrp);
  981. if(rgrp)
  982. closergrp(rgrp);
  983. if(dot)
  984. cclose(dot);
  985. if(pgrp)
  986. closepgrp(pgrp);
  987. /*
  988. * if not a kernel process and have a parent,
  989. * do some housekeeping.
  990. */
  991. if(up->kp == 0) {
  992. p = up->parent;
  993. if(p == 0) {
  994. if(exitstr == 0)
  995. exitstr = "unknown";
  996. panic("boot process died: %s", exitstr);
  997. }
  998. while(waserror())
  999. ;
  1000. wq = smalloc(sizeof(Waitq));
  1001. poperror();
  1002. wq->w.pid = up->pid;
  1003. utime = up->time[TUser] + up->time[TCUser];
  1004. stime = up->time[TSys] + up->time[TCSys];
  1005. wq->w.time[TUser] = tk2ms(utime);
  1006. wq->w.time[TSys] = tk2ms(stime);
  1007. wq->w.time[TReal] = tk2ms(MACHP(0)->ticks - up->time[TReal]);
  1008. if(exitstr && exitstr[0])
  1009. snprint(wq->w.msg, sizeof(wq->w.msg), "%s %lud: %s", up->text, up->pid, exitstr);
  1010. else
  1011. wq->w.msg[0] = '\0';
  1012. lock(&p->exl);
  1013. /*
  1014. * Check that parent is still alive.
  1015. */
  1016. if(p->pid == up->parentpid && p->state != Broken) {
  1017. p->nchild--;
  1018. p->time[TCUser] += utime;
  1019. p->time[TCSys] += stime;
  1020. /*
  1021. * If there would be more than 128 wait records
  1022. * processes for my parent, then don't leave a wait
  1023. * record behind. This helps prevent badly written
  1024. * daemon processes from accumulating lots of wait
  1025. * records.
  1026. */
  1027. if(p->nwait < 128) {
  1028. wq->next = p->waitq;
  1029. p->waitq = wq;
  1030. p->nwait++;
  1031. wq = nil;
  1032. wakeup(&p->waitr);
  1033. }
  1034. }
  1035. unlock(&p->exl);
  1036. if(wq)
  1037. free(wq);
  1038. }
  1039. if(!freemem)
  1040. addbroken(up);
  1041. qlock(&up->seglock);
  1042. es = &up->seg[NSEG];
  1043. for(s = up->seg; s < es; s++) {
  1044. if(*s) {
  1045. putseg(*s);
  1046. *s = 0;
  1047. }
  1048. }
  1049. qunlock(&up->seglock);
  1050. lock(&up->exl); /* Prevent my children from leaving waits */
  1051. pidunhash(up);
  1052. up->pid = 0;
  1053. wakeup(&up->waitr);
  1054. unlock(&up->exl);
  1055. for(f = up->waitq; f; f = next) {
  1056. next = f->next;
  1057. free(f);
  1058. }
  1059. /* release debuggers */
  1060. qlock(&up->debug);
  1061. if(up->pdbg) {
  1062. wakeup(&up->pdbg->sleep);
  1063. up->pdbg = 0;
  1064. }
  1065. qunlock(&up->debug);
  1066. /* Sched must not loop for these locks */
  1067. lock(&procalloc);
  1068. lock(&palloc);
  1069. edfstop(up);
  1070. up->state = Moribund;
  1071. sched();
  1072. panic("pexit");
  1073. }
  1074. int
  1075. haswaitq(void *x)
  1076. {
  1077. Proc *p;
  1078. p = (Proc *)x;
  1079. return p->waitq != 0;
  1080. }
  1081. ulong
  1082. pwait(Waitmsg *w)
  1083. {
  1084. ulong cpid;
  1085. Waitq *wq;
  1086. if(!canqlock(&up->qwaitr))
  1087. error(Einuse);
  1088. if(waserror()) {
  1089. qunlock(&up->qwaitr);
  1090. nexterror();
  1091. }
  1092. lock(&up->exl);
  1093. if(up->nchild == 0 && up->waitq == 0) {
  1094. unlock(&up->exl);
  1095. error(Enochild);
  1096. }
  1097. unlock(&up->exl);
  1098. sleep(&up->waitr, haswaitq, up);
  1099. lock(&up->exl);
  1100. wq = up->waitq;
  1101. up->waitq = wq->next;
  1102. up->nwait--;
  1103. unlock(&up->exl);
  1104. qunlock(&up->qwaitr);
  1105. poperror();
  1106. if(w)
  1107. memmove(w, &wq->w, sizeof(Waitmsg));
  1108. cpid = wq->w.pid;
  1109. free(wq);
  1110. return cpid;
  1111. }
  1112. Proc*
  1113. proctab(int i)
  1114. {
  1115. return &procalloc.arena[i];
  1116. }
  1117. void
  1118. dumpaproc(Proc *p)
  1119. {
  1120. ulong bss;
  1121. char *s;
  1122. if(p == 0)
  1123. return;
  1124. bss = 0;
  1125. if(p->seg[BSEG])
  1126. bss = p->seg[BSEG]->top;
  1127. s = p->psstate;
  1128. if(s == 0)
  1129. s = statename[p->state];
  1130. print("%3lud:%10s pc %8lux dbgpc %8lux %8s (%s) ut %ld st %ld bss %lux qpc %lux nl %lud nd %lud lpc %lux pri %lud\n",
  1131. p->pid, p->text, p->pc, dbgpc(p), s, statename[p->state],
  1132. p->time[0], p->time[1], bss, p->qpc, p->nlocks.ref, p->delaysched, p->lastlock ? p->lastlock->pc : 0, p->priority);
  1133. }
  1134. void
  1135. procdump(void)
  1136. {
  1137. int i;
  1138. Proc *p;
  1139. if(up)
  1140. print("up %lud\n", up->pid);
  1141. else
  1142. print("no current process\n");
  1143. for(i=0; i<conf.nproc; i++) {
  1144. p = &procalloc.arena[i];
  1145. if(p->state == Dead)
  1146. continue;
  1147. dumpaproc(p);
  1148. }
  1149. }
  1150. /*
  1151. * wait till all processes have flushed their mmu
  1152. * state about segement s
  1153. */
  1154. void
  1155. procflushseg(Segment *s)
  1156. {
  1157. int i, ns, nm, nwait;
  1158. Proc *p;
  1159. /*
  1160. * tell all processes with this
  1161. * segment to flush their mmu's
  1162. */
  1163. nwait = 0;
  1164. for(i=0; i<conf.nproc; i++) {
  1165. p = &procalloc.arena[i];
  1166. if(p->state == Dead)
  1167. continue;
  1168. for(ns = 0; ns < NSEG; ns++)
  1169. if(p->seg[ns] == s){
  1170. p->newtlb = 1;
  1171. for(nm = 0; nm < conf.nmach; nm++){
  1172. if(MACHP(nm)->proc == p){
  1173. MACHP(nm)->flushmmu = 1;
  1174. nwait++;
  1175. }
  1176. }
  1177. break;
  1178. }
  1179. }
  1180. if(nwait == 0)
  1181. return;
  1182. /*
  1183. * wait for all processors to take a clock interrupt
  1184. * and flush their mmu's
  1185. */
  1186. for(nm = 0; nm < conf.nmach; nm++)
  1187. if(MACHP(nm) != m)
  1188. while(MACHP(nm)->flushmmu)
  1189. sched();
  1190. }
  1191. void
  1192. scheddump(void)
  1193. {
  1194. Proc *p;
  1195. Schedq *rq;
  1196. for(rq = &runq[Nrq-1]; rq >= runq; rq--){
  1197. if(rq->head == 0)
  1198. continue;
  1199. print("rq%ld:", rq-runq);
  1200. for(p = rq->head; p; p = p->rnext)
  1201. print(" %lud(%lud)", p->pid, m->ticks - p->readytime);
  1202. print("\n");
  1203. delay(150);
  1204. }
  1205. print("nrdy %d\n", nrdy);
  1206. }
  1207. void
  1208. kproc(char *name, void (*func)(void *), void *arg)
  1209. {
  1210. Proc *p;
  1211. static Pgrp *kpgrp;
  1212. p = newproc();
  1213. p->psstate = 0;
  1214. p->procmode = 0640;
  1215. p->kp = 1;
  1216. p->noswap = 1;
  1217. p->fpsave = up->fpsave;
  1218. p->scallnr = up->scallnr;
  1219. p->s = up->s;
  1220. p->nerrlab = 0;
  1221. p->slash = up->slash;
  1222. p->dot = up->dot;
  1223. if(p->dot)
  1224. incref(p->dot);
  1225. memmove(p->note, up->note, sizeof(p->note));
  1226. p->nnote = up->nnote;
  1227. p->notified = 0;
  1228. p->lastnote = up->lastnote;
  1229. p->notify = up->notify;
  1230. p->ureg = 0;
  1231. p->dbgreg = 0;
  1232. procpriority(p, PriKproc, 0);
  1233. kprocchild(p, func, arg);
  1234. kstrdup(&p->user, eve);
  1235. kstrdup(&p->text, name);
  1236. if(kpgrp == 0)
  1237. kpgrp = newpgrp();
  1238. p->pgrp = kpgrp;
  1239. incref(kpgrp);
  1240. memset(p->time, 0, sizeof(p->time));
  1241. p->time[TReal] = MACHP(0)->ticks;
  1242. ready(p);
  1243. }
  1244. /*
  1245. * called splhi() by notify(). See comment in notify for the
  1246. * reasoning.
  1247. */
  1248. void
  1249. procctl(Proc *p)
  1250. {
  1251. char *state;
  1252. ulong s;
  1253. switch(p->procctl) {
  1254. case Proc_exitbig:
  1255. spllo();
  1256. pexit("Killed: Insufficient physical memory", 1);
  1257. case Proc_exitme:
  1258. spllo(); /* pexit has locks in it */
  1259. pexit("Killed", 1);
  1260. case Proc_traceme:
  1261. if(p->nnote == 0)
  1262. return;
  1263. /* No break */
  1264. case Proc_stopme:
  1265. p->procctl = 0;
  1266. state = p->psstate;
  1267. p->psstate = "Stopped";
  1268. /* free a waiting debugger */
  1269. s = spllo();
  1270. qlock(&p->debug);
  1271. if(p->pdbg) {
  1272. wakeup(&p->pdbg->sleep);
  1273. p->pdbg = 0;
  1274. }
  1275. qunlock(&p->debug);
  1276. splhi();
  1277. p->state = Stopped;
  1278. sched();
  1279. p->psstate = state;
  1280. splx(s);
  1281. return;
  1282. }
  1283. }
  1284. #include "errstr.h"
  1285. void
  1286. error(char *err)
  1287. {
  1288. spllo();
  1289. assert(up->nerrlab < NERR);
  1290. kstrcpy(up->errstr, err, ERRMAX);
  1291. setlabel(&up->errlab[NERR-1]);
  1292. nexterror();
  1293. }
  1294. void
  1295. nexterror(void)
  1296. {
  1297. gotolabel(&up->errlab[--up->nerrlab]);
  1298. }
  1299. void
  1300. exhausted(char *resource)
  1301. {
  1302. char buf[ERRMAX];
  1303. snprint(buf, sizeof buf, "no free %s", resource);
  1304. iprint("%s\n", buf);
  1305. error(buf);
  1306. }
  1307. void
  1308. killbig(char *why)
  1309. {
  1310. int i;
  1311. Segment *s;
  1312. ulong l, max;
  1313. Proc *p, *ep, *kp;
  1314. max = 0;
  1315. kp = 0;
  1316. ep = procalloc.arena+conf.nproc;
  1317. for(p = procalloc.arena; p < ep; p++) {
  1318. if(p->state == Dead || p->kp)
  1319. continue;
  1320. l = 0;
  1321. for(i=1; i<NSEG; i++) {
  1322. s = p->seg[i];
  1323. if(s != 0)
  1324. l += s->top - s->base;
  1325. }
  1326. if(l > max && ((p->procmode&0222) || strcmp(eve, p->user)!=0)) {
  1327. kp = p;
  1328. max = l;
  1329. }
  1330. }
  1331. print("%lud: %s killed: %s\n", kp->pid, kp->text, why);
  1332. for(p = procalloc.arena; p < ep; p++) {
  1333. if(p->state == Dead || p->kp)
  1334. continue;
  1335. if(p != kp && p->seg[BSEG] && p->seg[BSEG] == kp->seg[BSEG])
  1336. p->procctl = Proc_exitbig;
  1337. }
  1338. kp->procctl = Proc_exitbig;
  1339. for(i = 0; i < NSEG; i++) {
  1340. s = kp->seg[i];
  1341. if(s != 0 && canqlock(&s->lk)) {
  1342. mfreeseg(s, s->base, (s->top - s->base)/BY2PG);
  1343. qunlock(&s->lk);
  1344. }
  1345. }
  1346. }
  1347. /*
  1348. * change ownership to 'new' of all processes owned by 'old'. Used when
  1349. * eve changes.
  1350. */
  1351. void
  1352. renameuser(char *old, char *new)
  1353. {
  1354. Proc *p, *ep;
  1355. ep = procalloc.arena+conf.nproc;
  1356. for(p = procalloc.arena; p < ep; p++)
  1357. if(p->user!=nil && strcmp(old, p->user)==0)
  1358. kstrdup(&p->user, new);
  1359. }
  1360. /*
  1361. * time accounting called by clock() splhi'd
  1362. */
  1363. void
  1364. accounttime(void)
  1365. {
  1366. Proc *p;
  1367. ulong n, per;
  1368. static ulong nrun;
  1369. p = m->proc;
  1370. if(p) {
  1371. nrun++;
  1372. p->time[p->insyscall]++;
  1373. }
  1374. /* calculate decaying duty cycles */
  1375. n = perfticks();
  1376. per = n - m->perf.last;
  1377. m->perf.last = n;
  1378. per = (m->perf.period*(HZ-1) + per)/HZ;
  1379. if(per != 0)
  1380. m->perf.period = per;
  1381. m->perf.avg_inidle = (m->perf.avg_inidle*(HZ-1)+m->perf.inidle)/HZ;
  1382. m->perf.inidle = 0;
  1383. m->perf.avg_inintr = (m->perf.avg_inintr*(HZ-1)+m->perf.inintr)/HZ;
  1384. m->perf.inintr = 0;
  1385. /* only one processor gets to compute system load averages */
  1386. if(m->machno != 0)
  1387. return;
  1388. /*
  1389. * calculate decaying load average.
  1390. * if we decay by (n-1)/n then it takes
  1391. * n clock ticks to go from load L to .36 L once
  1392. * things quiet down. it takes about 5 n clock
  1393. * ticks to go to zero. so using HZ means this is
  1394. * approximately the load over the last second,
  1395. * with a tail lasting about 5 seconds.
  1396. */
  1397. n = nrun;
  1398. nrun = 0;
  1399. n = (nrdy+n)*1000;
  1400. m->load = (m->load*(HZ-1)+n)/HZ;
  1401. }
  1402. static void
  1403. pidhash(Proc *p)
  1404. {
  1405. int h;
  1406. h = p->pid % nelem(procalloc.ht);
  1407. lock(&procalloc);
  1408. p->pidhash = procalloc.ht[h];
  1409. procalloc.ht[h] = p;
  1410. unlock(&procalloc);
  1411. }
  1412. static void
  1413. pidunhash(Proc *p)
  1414. {
  1415. int h;
  1416. Proc **l;
  1417. h = p->pid % nelem(procalloc.ht);
  1418. lock(&procalloc);
  1419. for(l = &procalloc.ht[h]; *l != nil; l = &(*l)->pidhash)
  1420. if(*l == p){
  1421. *l = p->pidhash;
  1422. break;
  1423. }
  1424. unlock(&procalloc);
  1425. }
  1426. int
  1427. procindex(ulong pid)
  1428. {
  1429. Proc *p;
  1430. int h;
  1431. int s;
  1432. s = -1;
  1433. h = pid % nelem(procalloc.ht);
  1434. lock(&procalloc);
  1435. for(p = procalloc.ht[h]; p != nil; p = p->pidhash)
  1436. if(p->pid == pid){
  1437. s = p - procalloc.arena;
  1438. break;
  1439. }
  1440. unlock(&procalloc);
  1441. return s;
  1442. }