proc.c 22 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321
  1. #include "u.h"
  2. #include "../port/lib.h"
  3. #include "mem.h"
  4. #include "dat.h"
  5. #include "fns.h"
  6. #include "../port/error.h"
  7. int nrdy;
  8. Ref noteidalloc;
  9. long delayedscheds; /* statistics */
  10. static Ref pidalloc;
  11. static struct Procalloc
  12. {
  13. Lock;
  14. Proc* ht[128];
  15. Proc* arena;
  16. Proc* free;
  17. } procalloc;
  18. static Schedq runq[Nrq];
  19. extern Edfinterface nulledf;
  20. Edfinterface *edf = &nulledf;
  21. char *statename[] =
  22. { /* BUG: generate automatically */
  23. "Dead",
  24. "Moribund",
  25. "Ready",
  26. "Scheding",
  27. "Running",
  28. "Queueing",
  29. "QueueingR",
  30. "QueueingW",
  31. "Wakeme",
  32. "Broken",
  33. "Stopped",
  34. "Rendez",
  35. "Released",
  36. };
  37. static void pidhash(Proc*);
  38. static void pidunhash(Proc*);
  39. /*
  40. * Always splhi()'ed.
  41. */
  42. void
  43. schedinit(void) /* never returns */
  44. {
  45. setlabel(&m->sched);
  46. if(up) {
  47. m->proc = 0;
  48. switch(up->state) {
  49. case Running:
  50. ready(up);
  51. break;
  52. case Moribund:
  53. up->state = Dead;
  54. if (edf->isedf(up))
  55. edf->edfbury(up);
  56. /*
  57. * Holding locks from pexit:
  58. * procalloc
  59. * palloc
  60. */
  61. mmurelease(up);
  62. up->qnext = procalloc.free;
  63. procalloc.free = up;
  64. unlock(&palloc);
  65. unlock(&procalloc);
  66. break;
  67. }
  68. up->mach = 0;
  69. up = nil;
  70. }
  71. sched();
  72. }
  73. /*
  74. * If changing this routine, look also at sleep(). It
  75. * contains a copy of the guts of sched().
  76. */
  77. void
  78. sched(void)
  79. {
  80. int x[1];
  81. if(m->ilockdepth)
  82. panic("ilockdepth %d, last lock 0x%p at 0x%lux, sched called from 0x%lux",
  83. m->ilockdepth, up?up->lastilock:nil,
  84. (up && up->lastilock)?up->lastilock->pc:0, getcallerpc(x+3));
  85. if(up){
  86. if(up->nlocks && up->state != Moribund){
  87. delayedscheds++;
  88. return;
  89. }
  90. splhi();
  91. /* statistics */
  92. m->cs++;
  93. procsave(up);
  94. if(setlabel(&up->sched)){
  95. procrestore(up);
  96. spllo();
  97. return;
  98. }
  99. gotolabel(&m->sched);
  100. }
  101. up = runproc();
  102. up->state = Running;
  103. up->mach = MACHP(m->machno);
  104. m->proc = up;
  105. mmuswitch(up);
  106. gotolabel(&up->sched);
  107. }
  108. int
  109. anyready(void)
  110. {
  111. return nrdy || edf->edfanyready();
  112. }
  113. int
  114. anyhigher(void)
  115. {
  116. Schedq *rq;
  117. if(nrdy == 0)
  118. return 0;
  119. for(rq = &runq[Nrq-1]; rq > &runq[up->priority]; rq--)
  120. if(rq->head != nil)
  121. return 1;
  122. return 0;
  123. }
  124. enum
  125. {
  126. Squantum = 1,
  127. };
  128. void
  129. ready(Proc *p)
  130. {
  131. int s, pri;
  132. Schedq *rq;
  133. s = splhi();
  134. if(edf->isedf(p)){
  135. edf->edfready(p);
  136. splx(s);
  137. return;
  138. }
  139. if(p->fixedpri){
  140. pri = p->basepri;
  141. } else {
  142. /* history counts */
  143. if(p->state == Running){
  144. p->rt++;
  145. pri = (p->art + p->rt)/2;
  146. } else {
  147. p->art = (p->art + p->rt + 2)/2;
  148. pri = (p->art + p->rt)/2;
  149. p->rt = 0;
  150. }
  151. pri = p->basepri - (pri/Squantum);
  152. if(pri < 0)
  153. pri = 0;
  154. /* the only intersection between the classes is at PriNormal */
  155. if(pri < PriNormal && p->basepri > PriNormal)
  156. pri = PriNormal;
  157. /* stick at low priority any process waiting for a lock */
  158. if(p->lockwait)
  159. pri = PriLock;
  160. }
  161. p->priority = pri;
  162. rq = &runq[p->priority];
  163. lock(runq);
  164. p->rnext = 0;
  165. if(rq->tail)
  166. rq->tail->rnext = p;
  167. else
  168. rq->head = p;
  169. rq->tail = p;
  170. rq->n++;
  171. nrdy++;
  172. p->readytime = m->ticks;
  173. p->state = Ready;
  174. unlock(runq);
  175. splx(s);
  176. }
  177. Proc*
  178. runproc(void)
  179. {
  180. Schedq *rq, *xrq;
  181. Proc *p, *l;
  182. ulong rt;
  183. ulong start, now;
  184. start = perfticks();
  185. if ((p = edf->edfrunproc()) != nil)
  186. return p;
  187. loop:
  188. /*
  189. * find a process that last ran on this processor (affinity),
  190. * or one that hasn't moved in a while (load balancing).
  191. */
  192. spllo();
  193. for(;;){
  194. if((++(m->fairness) & 0x3) == 0){
  195. /*
  196. * once in a while, run process that's been waiting longest
  197. * regardless of movetime
  198. */
  199. rt = 0xffffffff;
  200. xrq = nil;
  201. for(rq = runq; rq < &runq[Nrq]; rq++){
  202. p = rq->head;
  203. if(p == 0)
  204. continue;
  205. if(p->readytime < rt){
  206. xrq = rq;
  207. rt = p->readytime;
  208. }
  209. }
  210. if(xrq != nil){
  211. rq = xrq;
  212. p = rq->head;
  213. if(p != nil && p->wired == nil)
  214. p->movetime = 0;
  215. goto found;
  216. }
  217. } else {
  218. /*
  219. * get highest priority process that this
  220. * processor can run given affinity constraints
  221. */
  222. for(rq = &runq[Nrq-1]; rq >= runq; rq--){
  223. p = rq->head;
  224. if(p == 0)
  225. continue;
  226. for(; p; p = p->rnext){
  227. if(p->mp == MACHP(m->machno)
  228. || p->movetime < MACHP(0)->ticks)
  229. goto found;
  230. }
  231. }
  232. }
  233. /* remember how much time we're here */
  234. idlehands();
  235. now = perfticks();
  236. m->perf.inidle += now-start;
  237. start = now;
  238. }
  239. found:
  240. splhi();
  241. if(!canlock(runq))
  242. goto loop;
  243. l = 0;
  244. for(p = rq->head; p; p = p->rnext){
  245. if(p->mp == MACHP(m->machno) || p->movetime <= MACHP(0)->ticks)
  246. break;
  247. l = p;
  248. }
  249. /*
  250. * p->mach==0 only when process state is saved
  251. */
  252. if(p == 0 || p->mach){
  253. unlock(runq);
  254. goto loop;
  255. }
  256. if(p->rnext == 0)
  257. rq->tail = l;
  258. if(l)
  259. l->rnext = p->rnext;
  260. else
  261. rq->head = p->rnext;
  262. rq->n--;
  263. nrdy--;
  264. if(p->state != Ready)
  265. print("runproc %s %lud %s\n", p->text, p->pid, statename[p->state]);
  266. unlock(runq);
  267. p->state = Scheding;
  268. if(p->mp != MACHP(m->machno))
  269. p->movetime = MACHP(0)->ticks + HZ/10;
  270. p->mp = MACHP(m->machno);
  271. return p;
  272. }
  273. int
  274. canpage(Proc *p)
  275. {
  276. int ok = 0;
  277. splhi();
  278. lock(runq);
  279. /* Only reliable way to see if we are Running */
  280. if(p->mach == 0) {
  281. p->newtlb = 1;
  282. ok = 1;
  283. }
  284. unlock(runq);
  285. spllo();
  286. return ok;
  287. }
  288. Proc*
  289. newproc(void)
  290. {
  291. Proc *p;
  292. lock(&procalloc);
  293. for(;;) {
  294. if(p = procalloc.free)
  295. break;
  296. unlock(&procalloc);
  297. resrcwait("no procs");
  298. lock(&procalloc);
  299. }
  300. procalloc.free = p->qnext;
  301. unlock(&procalloc);
  302. p->state = Scheding;
  303. p->psstate = "New";
  304. p->mach = 0;
  305. p->qnext = 0;
  306. p->nchild = 0;
  307. p->nwait = 0;
  308. p->waitq = 0;
  309. p->parent = 0;
  310. p->pgrp = 0;
  311. p->egrp = 0;
  312. p->fgrp = 0;
  313. p->rgrp = 0;
  314. p->pdbg = 0;
  315. p->fpstate = FPinit;
  316. p->kp = 0;
  317. p->procctl = 0;
  318. p->notepending = 0;
  319. p->mp = 0;
  320. p->movetime = 0;
  321. p->wired = 0;
  322. p->ureg = 0;
  323. p->privatemem = 0;
  324. p->noswap = 0;
  325. p->lockwait = nil;
  326. p->errstr = p->errbuf0;
  327. p->syserrstr = p->errbuf1;
  328. p->errbuf0[0] = '\0';
  329. p->errbuf1[0] = '\0';
  330. p->nlocks = 0;
  331. p->delaysched = 0;
  332. p->movetime = 0;
  333. kstrdup(&p->user, "*nouser");
  334. kstrdup(&p->text, "*notext");
  335. kstrdup(&p->args, "");
  336. p->nargs = 0;
  337. p->setargs = 0;
  338. memset(p->seg, 0, sizeof p->seg);
  339. p->pid = incref(&pidalloc);
  340. pidhash(p);
  341. p->noteid = incref(&noteidalloc);
  342. if(p->pid==0 || p->noteid==0)
  343. panic("pidalloc");
  344. if(p->kstack == 0)
  345. p->kstack = smalloc(KSTACK);
  346. p->task = nil;
  347. return p;
  348. }
  349. /*
  350. * wire this proc to a machine
  351. */
  352. void
  353. procwired(Proc *p, int bm)
  354. {
  355. Proc *pp;
  356. int i;
  357. char nwired[MAXMACH];
  358. Mach *wm;
  359. if(bm < 0){
  360. /* pick a machine to wire to */
  361. memset(nwired, 0, sizeof(nwired));
  362. p->wired = 0;
  363. pp = proctab(0);
  364. for(i=0; i<conf.nproc; i++, pp++){
  365. wm = pp->wired;
  366. if(wm && pp->pid)
  367. nwired[wm->machno]++;
  368. }
  369. bm = 0;
  370. for(i=0; i<conf.nmach; i++)
  371. if(nwired[i] < nwired[bm])
  372. bm = i;
  373. } else {
  374. /* use the virtual machine requested */
  375. bm = bm % conf.nmach;
  376. }
  377. p->wired = MACHP(bm);
  378. p->movetime = 0xffffffff;
  379. p->mp = p->wired;
  380. }
  381. void
  382. procinit0(void) /* bad planning - clashes with devproc.c */
  383. {
  384. Proc *p;
  385. int i;
  386. procalloc.free = xalloc(conf.nproc*sizeof(Proc));
  387. if(procalloc.free == nil)
  388. panic("cannot allocate %lud procs\n", conf.nproc);
  389. procalloc.arena = procalloc.free;
  390. p = procalloc.free;
  391. for(i=0; i<conf.nproc-1; i++,p++)
  392. p->qnext = p+1;
  393. p->qnext = 0;
  394. }
  395. /*
  396. * sleep if a condition is not true. Another process will
  397. * awaken us after it sets the condition. When we awaken
  398. * the condition may no longer be true.
  399. *
  400. * we lock both the process and the rendezvous to keep r->p
  401. * and p->r synchronized.
  402. */
  403. void
  404. sleep(Rendez *r, int (*f)(void*), void *arg)
  405. {
  406. int s;
  407. s = splhi();
  408. if (up->nlocks)
  409. print("process %lud sleeps with %lud locks held, last lock 0x%p locked at pc 0x%lux\n",
  410. up->pid, up->nlocks, up->lastlock, up->lastlock->pc);
  411. lock(r);
  412. lock(&up->rlock);
  413. if(r->p){
  414. print("double sleep %lud %lud\n", r->p->pid, up->pid);
  415. dumpstack();
  416. }
  417. /*
  418. * Wakeup only knows there may be something to do by testing
  419. * r->p in order to get something to lock on.
  420. * Flush that information out to memory in case the sleep is
  421. * committed.
  422. */
  423. r->p = up;
  424. if((*f)(arg) || up->notepending){
  425. /*
  426. * if condition happened or a note is pending
  427. * never mind
  428. */
  429. r->p = nil;
  430. unlock(&up->rlock);
  431. unlock(r);
  432. } else {
  433. /*
  434. * now we are committed to
  435. * change state and call scheduler
  436. */
  437. up->state = Wakeme;
  438. up->r = r;
  439. /* statistics */
  440. m->cs++;
  441. procsave(up);
  442. if(setlabel(&up->sched)) {
  443. /*
  444. * here when the process is awakened
  445. */
  446. procrestore(up);
  447. spllo();
  448. } else {
  449. /*
  450. * here to go to sleep (i.e. stop Running)
  451. */
  452. unlock(&up->rlock);
  453. unlock(r);
  454. // Behind unlock, we may call wakeup on ourselves.
  455. if (edf->isedf(up))
  456. edf->edfblock(up);
  457. gotolabel(&m->sched);
  458. }
  459. }
  460. if(up->notepending) {
  461. up->notepending = 0;
  462. splx(s);
  463. error(Eintr);
  464. }
  465. splx(s);
  466. }
  467. int
  468. tfn(void *arg)
  469. {
  470. return MACHP(0)->ticks >= up->twhen || up->tfn(arg);
  471. }
  472. void
  473. tsleep(Rendez *r, int (*fn)(void*), void *arg, int ms)
  474. {
  475. ulong when;
  476. Proc *f, **l;
  477. when = ms2tk(ms) + MACHP(0)->ticks;
  478. lock(&talarm);
  479. /* take out of list if checkalarm didn't */
  480. if(up->trend) {
  481. l = &talarm.list;
  482. for(f = *l; f; f = f->tlink) {
  483. if(f == up) {
  484. *l = up->tlink;
  485. break;
  486. }
  487. l = &f->tlink;
  488. }
  489. }
  490. /* insert in increasing time order */
  491. l = &talarm.list;
  492. for(f = *l; f; f = f->tlink) {
  493. if(f->twhen >= when)
  494. break;
  495. l = &f->tlink;
  496. }
  497. up->trend = r;
  498. up->twhen = when;
  499. up->tfn = fn;
  500. up->tlink = *l;
  501. *l = up;
  502. unlock(&talarm);
  503. if(waserror()){
  504. up->twhen = 0;
  505. nexterror();
  506. }
  507. sleep(r, tfn, arg);
  508. up->twhen = 0;
  509. poperror();
  510. }
  511. /*
  512. * Expects that only one process can call wakeup for any given Rendez.
  513. * We hold both locks to ensure that r->p and p->r remain consistent.
  514. * Richard Miller has a better solution that doesn't require both to
  515. * be held simultaneously, but I'm a paranoid - presotto.
  516. */
  517. Proc*
  518. wakeup(Rendez *r)
  519. {
  520. Proc *p;
  521. int s;
  522. s = splhi();
  523. lock(r);
  524. p = r->p;
  525. if(p != nil){
  526. lock(&p->rlock);
  527. if(p->state != Wakeme || p->r != r)
  528. panic("wakeup: state");
  529. r->p = nil;
  530. p->r = nil;
  531. ready(p);
  532. unlock(&p->rlock);
  533. }
  534. unlock(r);
  535. splx(s);
  536. return p;
  537. }
  538. /*
  539. * if waking a sleeping process, this routine must hold both
  540. * p->rlock and r->lock. However, it can't know them in
  541. * the same order as wakeup causing a possible lock ordering
  542. * deadlock. We break the deadlock by giving up the p->rlock
  543. * lock if we can't get the r->lock and retrying.
  544. */
  545. int
  546. postnote(Proc *p, int dolock, char *n, int flag)
  547. {
  548. int s, ret;
  549. Rendez *r;
  550. Proc *d, **l;
  551. if(dolock)
  552. qlock(&p->debug);
  553. if(flag != NUser && (p->notify == 0 || p->notified))
  554. p->nnote = 0;
  555. ret = 0;
  556. if(p->nnote < NNOTE) {
  557. strcpy(p->note[p->nnote].msg, n);
  558. p->note[p->nnote++].flag = flag;
  559. ret = 1;
  560. }
  561. p->notepending = 1;
  562. if(dolock)
  563. qunlock(&p->debug);
  564. /* this loop is to avoid lock ordering problems. */
  565. for(;;){
  566. s = splhi();
  567. lock(&p->rlock);
  568. r = p->r;
  569. /* waiting for a wakeup? */
  570. if(r == nil)
  571. break; /* no */
  572. /* try for the second lock */
  573. if(canlock(r)){
  574. if(p->state != Wakeme || r->p != p)
  575. panic("postnote: state %d %d %d", r->p != p, p->r != r, p->state);
  576. p->r = nil;
  577. r->p = nil;
  578. ready(p);
  579. unlock(r);
  580. break;
  581. }
  582. /* give other process time to get out of critical section and try again */
  583. unlock(&p->rlock);
  584. splx(s);
  585. sched();
  586. }
  587. unlock(&p->rlock);
  588. splx(s);
  589. if(p->state != Rendezvous)
  590. return ret;
  591. /* Try and pull out of a rendezvous */
  592. lock(p->rgrp);
  593. if(p->state == Rendezvous) {
  594. p->rendval = ~0;
  595. l = &REND(p->rgrp, p->rendtag);
  596. for(d = *l; d; d = d->rendhash) {
  597. if(d == p) {
  598. *l = p->rendhash;
  599. break;
  600. }
  601. l = &d->rendhash;
  602. }
  603. ready(p);
  604. }
  605. unlock(p->rgrp);
  606. return ret;
  607. }
  608. /*
  609. * weird thing: keep at most NBROKEN around
  610. */
  611. #define NBROKEN 4
  612. struct
  613. {
  614. QLock;
  615. int n;
  616. Proc *p[NBROKEN];
  617. }broken;
  618. void
  619. addbroken(Proc *p)
  620. {
  621. qlock(&broken);
  622. if(broken.n == NBROKEN) {
  623. ready(broken.p[0]);
  624. memmove(&broken.p[0], &broken.p[1], sizeof(Proc*)*(NBROKEN-1));
  625. --broken.n;
  626. }
  627. broken.p[broken.n++] = p;
  628. qunlock(&broken);
  629. if (edf->isedf(up))
  630. edf->edfbury(up);
  631. p->state = Broken;
  632. p->psstate = 0;
  633. sched();
  634. }
  635. void
  636. unbreak(Proc *p)
  637. {
  638. int b;
  639. qlock(&broken);
  640. for(b=0; b < broken.n; b++)
  641. if(broken.p[b] == p) {
  642. broken.n--;
  643. memmove(&broken.p[b], &broken.p[b+1],
  644. sizeof(Proc*)*(NBROKEN-(b+1)));
  645. ready(p);
  646. break;
  647. }
  648. qunlock(&broken);
  649. }
  650. int
  651. freebroken(void)
  652. {
  653. int i, n;
  654. qlock(&broken);
  655. n = broken.n;
  656. for(i=0; i<n; i++) {
  657. ready(broken.p[i]);
  658. broken.p[i] = 0;
  659. }
  660. broken.n = 0;
  661. qunlock(&broken);
  662. return n;
  663. }
  664. void
  665. pexit(char *exitstr, int freemem)
  666. {
  667. Proc *p;
  668. Segment **s, **es;
  669. long utime, stime;
  670. Waitq *wq, *f, *next;
  671. Fgrp *fgrp;
  672. Egrp *egrp;
  673. Rgrp *rgrp;
  674. Pgrp *pgrp;
  675. Chan *dot;
  676. up->alarm = 0;
  677. /* nil out all the resources under lock (free later) */
  678. qlock(&up->debug);
  679. fgrp = up->fgrp;
  680. up->fgrp = nil;
  681. egrp = up->egrp;
  682. up->egrp = nil;
  683. rgrp = up->rgrp;
  684. up->rgrp = nil;
  685. pgrp = up->pgrp;
  686. up->pgrp = nil;
  687. dot = up->dot;
  688. up->dot = nil;
  689. qunlock(&up->debug);
  690. if(fgrp)
  691. closefgrp(fgrp);
  692. if(egrp)
  693. closeegrp(egrp);
  694. if(rgrp)
  695. closergrp(rgrp);
  696. if(dot)
  697. cclose(dot);
  698. if(pgrp)
  699. closepgrp(pgrp);
  700. /*
  701. * if not a kernel process and have a parent,
  702. * do some housekeeping.
  703. */
  704. if(up->kp == 0) {
  705. p = up->parent;
  706. if(p == 0) {
  707. if(exitstr == 0)
  708. exitstr = "unknown";
  709. panic("boot process died: %s", exitstr);
  710. }
  711. while(waserror())
  712. ;
  713. wq = smalloc(sizeof(Waitq));
  714. poperror();
  715. wq->w.pid = up->pid;
  716. utime = up->time[TUser] + up->time[TCUser];
  717. stime = up->time[TSys] + up->time[TCSys];
  718. wq->w.time[TUser] = tk2ms(utime);
  719. wq->w.time[TSys] = tk2ms(stime);
  720. wq->w.time[TReal] = tk2ms(MACHP(0)->ticks - up->time[TReal]);
  721. if(exitstr && exitstr[0])
  722. snprint(wq->w.msg, sizeof(wq->w.msg), "%s %lud: %s", up->text, up->pid, exitstr);
  723. else
  724. wq->w.msg[0] = '\0';
  725. lock(&p->exl);
  726. /*
  727. * If my parent is no longer alive, or if there would be more
  728. * than 128 zombie child processes for my parent, then don't
  729. * leave a wait record behind. This helps prevent badly
  730. * written daemon processes from accumulating lots of wait
  731. * records.
  732. */
  733. if(p->pid == up->parentpid && p->state != Broken && p->nwait < 128) {
  734. p->nchild--;
  735. p->time[TCUser] += utime;
  736. p->time[TCSys] += stime;
  737. wq->next = p->waitq;
  738. p->waitq = wq;
  739. p->nwait++;
  740. wakeup(&p->waitr);
  741. unlock(&p->exl);
  742. }
  743. else {
  744. unlock(&p->exl);
  745. free(wq);
  746. }
  747. }
  748. if(!freemem)
  749. addbroken(up);
  750. qlock(&up->seglock);
  751. es = &up->seg[NSEG];
  752. for(s = up->seg; s < es; s++) {
  753. if(*s) {
  754. putseg(*s);
  755. *s = 0;
  756. }
  757. }
  758. qunlock(&up->seglock);
  759. lock(&up->exl); /* Prevent my children from leaving waits */
  760. pidunhash(up);
  761. up->pid = 0;
  762. wakeup(&up->waitr);
  763. unlock(&up->exl);
  764. for(f = up->waitq; f; f = next) {
  765. next = f->next;
  766. free(f);
  767. }
  768. /* release debuggers */
  769. qlock(&up->debug);
  770. if(up->pdbg) {
  771. wakeup(&up->pdbg->sleep);
  772. up->pdbg = 0;
  773. }
  774. qunlock(&up->debug);
  775. /* Sched must not loop for these locks */
  776. lock(&procalloc);
  777. lock(&palloc);
  778. if (edf->isedf(up))
  779. edf->edfbury(up);
  780. up->state = Moribund;
  781. sched();
  782. panic("pexit");
  783. }
  784. int
  785. haswaitq(void *x)
  786. {
  787. Proc *p;
  788. p = (Proc *)x;
  789. return p->waitq != 0;
  790. }
  791. ulong
  792. pwait(Waitmsg *w)
  793. {
  794. ulong cpid;
  795. Waitq *wq;
  796. if(!canqlock(&up->qwaitr))
  797. error(Einuse);
  798. if(waserror()) {
  799. qunlock(&up->qwaitr);
  800. nexterror();
  801. }
  802. lock(&up->exl);
  803. if(up->nchild == 0 && up->waitq == 0) {
  804. unlock(&up->exl);
  805. error(Enochild);
  806. }
  807. unlock(&up->exl);
  808. sleep(&up->waitr, haswaitq, up);
  809. lock(&up->exl);
  810. wq = up->waitq;
  811. up->waitq = wq->next;
  812. up->nwait--;
  813. unlock(&up->exl);
  814. qunlock(&up->qwaitr);
  815. poperror();
  816. if(w)
  817. memmove(w, &wq->w, sizeof(Waitmsg));
  818. cpid = wq->w.pid;
  819. free(wq);
  820. return cpid;
  821. }
  822. Proc*
  823. proctab(int i)
  824. {
  825. return &procalloc.arena[i];
  826. }
  827. void
  828. dumpaproc(Proc *p)
  829. {
  830. ulong bss;
  831. char *s;
  832. if(p == 0)
  833. return;
  834. bss = 0;
  835. if(p->seg[BSEG])
  836. bss = p->seg[BSEG]->top;
  837. s = p->psstate;
  838. if(s == 0)
  839. s = statename[p->state];
  840. print("%3lud:%10s pc %8lux dbgpc %8lux %8s (%s) ut %ld st %ld bss %lux qpc %lux nl %lud nd %lud lpc %lux\n",
  841. p->pid, p->text, p->pc, dbgpc(p), s, statename[p->state],
  842. p->time[0], p->time[1], bss, p->qpc, p->nlocks, p->delaysched, p->lastlock ? p->lastlock->pc : 0);
  843. }
  844. void
  845. procdump(void)
  846. {
  847. int i;
  848. Proc *p;
  849. if(up)
  850. print("up %lud\n", up->pid);
  851. else
  852. print("no current process\n");
  853. for(i=0; i<conf.nproc; i++) {
  854. p = &procalloc.arena[i];
  855. if(p->state == Dead)
  856. continue;
  857. dumpaproc(p);
  858. }
  859. }
  860. /*
  861. * wait till all processes have flushed their mmu
  862. * state about segement s
  863. */
  864. void
  865. procflushseg(Segment *s)
  866. {
  867. int i, ns, nm, nwait;
  868. Proc *p;
  869. /*
  870. * tell all processes with this
  871. * segment to flush their mmu's
  872. */
  873. nwait = 0;
  874. for(i=0; i<conf.nproc; i++) {
  875. p = &procalloc.arena[i];
  876. if(p->state == Dead)
  877. continue;
  878. for(ns = 0; ns < NSEG; ns++)
  879. if(p->seg[ns] == s){
  880. p->newtlb = 1;
  881. for(nm = 0; nm < conf.nmach; nm++){
  882. if(MACHP(nm)->proc == p){
  883. MACHP(nm)->flushmmu = 1;
  884. nwait++;
  885. }
  886. }
  887. break;
  888. }
  889. }
  890. if(nwait == 0)
  891. return;
  892. /*
  893. * wait for all processors to take a clock interrupt
  894. * and flush their mmu's
  895. */
  896. for(nm = 0; nm < conf.nmach; nm++)
  897. if(MACHP(nm) != m)
  898. while(MACHP(nm)->flushmmu)
  899. sched();
  900. }
  901. void
  902. scheddump(void)
  903. {
  904. Proc *p;
  905. Schedq *rq;
  906. for(rq = &runq[Nrq-1]; rq >= runq; rq--){
  907. if(rq->head == 0)
  908. continue;
  909. print("rq%ld:", rq-runq);
  910. for(p = rq->head; p; p = p->rnext)
  911. print(" %lud(%lud, %lud)", p->pid, m->ticks - p->readytime,
  912. MACHP(0)->ticks - p->movetime);
  913. print("\n");
  914. delay(150);
  915. }
  916. print("nrdy %d\n", nrdy);
  917. }
  918. void
  919. kproc(char *name, void (*func)(void *), void *arg)
  920. {
  921. Proc *p;
  922. static Pgrp *kpgrp;
  923. p = newproc();
  924. p->psstate = 0;
  925. p->procmode = 0640;
  926. p->kp = 1;
  927. p->noswap = 1;
  928. p->fpsave = up->fpsave;
  929. p->scallnr = up->scallnr;
  930. p->s = up->s;
  931. p->nerrlab = 0;
  932. p->slash = up->slash;
  933. p->dot = up->dot;
  934. incref(p->dot);
  935. memmove(p->note, up->note, sizeof(p->note));
  936. p->nnote = up->nnote;
  937. p->notified = 0;
  938. p->lastnote = up->lastnote;
  939. p->notify = up->notify;
  940. p->ureg = 0;
  941. p->dbgreg = 0;
  942. p->basepri = PriKproc;
  943. p->priority = p->basepri;
  944. kprocchild(p, func, arg);
  945. kstrdup(&p->user, eve);
  946. kstrdup(&p->text, name);
  947. if(kpgrp == 0)
  948. kpgrp = newpgrp();
  949. p->pgrp = kpgrp;
  950. incref(kpgrp);
  951. memset(p->time, 0, sizeof(p->time));
  952. p->time[TReal] = MACHP(0)->ticks;
  953. ready(p);
  954. /*
  955. * since the bss/data segments are now shareable,
  956. * any mmu info about this process is now stale
  957. * and has to be discarded.
  958. */
  959. p->newtlb = 1;
  960. flushmmu();
  961. }
  962. /*
  963. * called splhi() by notify(). See comment in notify for the
  964. * reasoning.
  965. */
  966. void
  967. procctl(Proc *p)
  968. {
  969. char *state;
  970. ulong s;
  971. switch(p->procctl) {
  972. case Proc_exitbig:
  973. spllo();
  974. pexit("Killed: Insufficient physical memory", 1);
  975. case Proc_exitme:
  976. spllo(); /* pexit has locks in it */
  977. pexit("Killed", 1);
  978. case Proc_traceme:
  979. if(p->nnote == 0)
  980. return;
  981. /* No break */
  982. case Proc_stopme:
  983. p->procctl = 0;
  984. state = p->psstate;
  985. p->psstate = "Stopped";
  986. /* free a waiting debugger */
  987. s = spllo();
  988. qlock(&p->debug);
  989. if(p->pdbg) {
  990. wakeup(&p->pdbg->sleep);
  991. p->pdbg = 0;
  992. }
  993. qunlock(&p->debug);
  994. splhi();
  995. p->state = Stopped;
  996. if (edf->isedf(up))
  997. edf->edfblock(up);
  998. sched();
  999. p->psstate = state;
  1000. splx(s);
  1001. return;
  1002. }
  1003. }
  1004. #include "errstr.h"
  1005. void
  1006. error(char *err)
  1007. {
  1008. spllo();
  1009. kstrcpy(up->errstr, err, ERRMAX);
  1010. setlabel(&up->errlab[NERR-1]);
  1011. nexterror();
  1012. }
  1013. void
  1014. nexterror(void)
  1015. {
  1016. gotolabel(&up->errlab[--up->nerrlab]);
  1017. }
  1018. void
  1019. exhausted(char *resource)
  1020. {
  1021. char buf[ERRMAX];
  1022. sprint(buf, "no free %s", resource);
  1023. iprint("%s\n", buf);
  1024. error(buf);
  1025. }
  1026. void
  1027. killbig(void)
  1028. {
  1029. int i;
  1030. Segment *s;
  1031. ulong l, max;
  1032. Proc *p, *ep, *kp;
  1033. max = 0;
  1034. kp = 0;
  1035. ep = procalloc.arena+conf.nproc;
  1036. for(p = procalloc.arena; p < ep; p++) {
  1037. if(p->state == Dead || p->kp)
  1038. continue;
  1039. l = 0;
  1040. for(i=1; i<NSEG; i++) {
  1041. s = p->seg[i];
  1042. if(s != 0)
  1043. l += s->top - s->base;
  1044. }
  1045. if(l > max && strcmp(p->text, "kfs") != 0){
  1046. kp = p;
  1047. max = l;
  1048. }
  1049. }
  1050. kp->procctl = Proc_exitbig;
  1051. for(i = 0; i < NSEG; i++) {
  1052. s = kp->seg[i];
  1053. if(s != 0 && canqlock(&s->lk)) {
  1054. mfreeseg(s, s->base, (s->top - s->base)/BY2PG);
  1055. qunlock(&s->lk);
  1056. }
  1057. }
  1058. print("%lud: %s killed because no swap configured\n", kp->pid, kp->text);
  1059. }
  1060. /*
  1061. * change ownership to 'new' of all processes owned by 'old'. Used when
  1062. * eve changes.
  1063. */
  1064. void
  1065. renameuser(char *old, char *new)
  1066. {
  1067. Proc *p, *ep;
  1068. ep = procalloc.arena+conf.nproc;
  1069. for(p = procalloc.arena; p < ep; p++)
  1070. if(p->user!=nil && strcmp(old, p->user)==0)
  1071. kstrdup(&p->user, new);
  1072. }
  1073. /*
  1074. * time accounting called by clock() splhi'd
  1075. */
  1076. void
  1077. accounttime(void)
  1078. {
  1079. Proc *p;
  1080. ulong n, per;
  1081. static ulong nrun;
  1082. p = m->proc;
  1083. if(p) {
  1084. nrun++;
  1085. p->time[p->insyscall]++;
  1086. }
  1087. /* calculate decaying duty cycles */
  1088. n = perfticks();
  1089. per = n - m->perf.last;
  1090. m->perf.last = n;
  1091. per = (m->perf.period*(HZ-1) + per)/HZ;
  1092. if(per != 0)
  1093. m->perf.period = per;
  1094. m->perf.avg_inidle = (m->perf.avg_inidle*(HZ-1)+m->perf.inidle)/HZ;
  1095. m->perf.inidle = 0;
  1096. m->perf.avg_inintr = (m->perf.avg_inintr*(HZ-1)+m->perf.inintr)/HZ;
  1097. m->perf.inintr = 0;
  1098. /* only one processor gets to compute system load averages */
  1099. if(m->machno != 0)
  1100. return;
  1101. /* calculate decaying load average */
  1102. n = nrun;
  1103. nrun = 0;
  1104. n = (nrdy+n)*1000;
  1105. m->load = (m->load*19+n)/20;
  1106. }
  1107. static void
  1108. pidhash(Proc *p)
  1109. {
  1110. int h;
  1111. h = p->pid % nelem(procalloc.ht);
  1112. lock(&procalloc);
  1113. p->pidhash = procalloc.ht[h];
  1114. procalloc.ht[h] = p;
  1115. unlock(&procalloc);
  1116. }
  1117. static void
  1118. pidunhash(Proc *p)
  1119. {
  1120. int h;
  1121. Proc **l;
  1122. h = p->pid % nelem(procalloc.ht);
  1123. lock(&procalloc);
  1124. for(l = &procalloc.ht[h]; *l != nil; l = &(*l)->pidhash)
  1125. if(*l == p){
  1126. *l = p->pidhash;
  1127. break;
  1128. }
  1129. unlock(&procalloc);
  1130. }
  1131. int
  1132. procindex(ulong pid)
  1133. {
  1134. Proc *p;
  1135. int h;
  1136. int s;
  1137. s = -1;
  1138. h = pid % nelem(procalloc.ht);
  1139. lock(&procalloc);
  1140. for(p = procalloc.ht[h]; p != nil; p = p->pidhash)
  1141. if(p->pid == pid){
  1142. s = p - procalloc.arena;
  1143. break;
  1144. }
  1145. unlock(&procalloc);
  1146. return s;
  1147. }