trap.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994
  1. #include "u.h"
  2. #include "tos.h"
  3. #include "../port/lib.h"
  4. #include "mem.h"
  5. #include "dat.h"
  6. #include "fns.h"
  7. #include "io.h"
  8. #include "ureg.h"
  9. #include "../port/error.h"
  10. #include <trace.h>
  11. void noted(Ureg*, ulong);
  12. static void debugbpt(Ureg*, void*);
  13. static void fault386(Ureg*, void*);
  14. static void doublefault(Ureg*, void*);
  15. static void unexpected(Ureg*, void*);
  16. static void _dumpstack(Ureg*);
  17. static Lock vctllock;
  18. static Vctl *vctl[256];
  19. enum
  20. {
  21. Ntimevec = 20 /* number of time buckets for each intr */
  22. };
  23. ulong intrtimes[256][Ntimevec];
  24. void
  25. intrenable(int irq, void (*f)(Ureg*, void*), void* a, int tbdf, char *name)
  26. {
  27. int vno;
  28. Vctl *v;
  29. if(f == nil){
  30. print("intrenable: nil handler for %d, tbdf 0x%uX for %s\n",
  31. irq, tbdf, name);
  32. return;
  33. }
  34. v = xalloc(sizeof(Vctl));
  35. v->isintr = 1;
  36. v->irq = irq;
  37. v->tbdf = tbdf;
  38. v->f = f;
  39. v->a = a;
  40. strncpy(v->name, name, KNAMELEN-1);
  41. v->name[KNAMELEN-1] = 0;
  42. ilock(&vctllock);
  43. vno = arch->intrenable(v);
  44. if(vno == -1){
  45. iunlock(&vctllock);
  46. print("intrenable: couldn't enable irq %d, tbdf 0x%uX for %s\n",
  47. irq, tbdf, v->name);
  48. xfree(v);
  49. return;
  50. }
  51. if(vctl[vno]){
  52. if(vctl[vno]->isr != v->isr || vctl[vno]->eoi != v->eoi)
  53. panic("intrenable: handler: %s %s %luX %luX %luX %luX\n",
  54. vctl[vno]->name, v->name,
  55. vctl[vno]->isr, v->isr, vctl[vno]->eoi, v->eoi);
  56. v->next = vctl[vno];
  57. }
  58. vctl[vno] = v;
  59. iunlock(&vctllock);
  60. }
  61. int
  62. intrdisable(int irq, void (*f)(Ureg *, void *), void *a, int tbdf, char *name)
  63. {
  64. Vctl **pv, *v;
  65. int vno;
  66. /*
  67. * For now, none of this will work with the APIC code,
  68. * there is no mapping between irq and vector as the IRQ
  69. * is pretty meaningless.
  70. */
  71. if(arch->intrvecno == nil)
  72. return -1;
  73. vno = arch->intrvecno(irq);
  74. ilock(&vctllock);
  75. pv = &vctl[vno];
  76. while (*pv &&
  77. ((*pv)->irq != irq || (*pv)->tbdf != tbdf || (*pv)->f != f || (*pv)->a != a ||
  78. strcmp((*pv)->name, name)))
  79. pv = &((*pv)->next);
  80. assert(*pv);
  81. v = *pv;
  82. *pv = (*pv)->next; /* Link out the entry */
  83. if(vctl[vno] == nil && arch->intrdisable != nil)
  84. arch->intrdisable(irq);
  85. iunlock(&vctllock);
  86. xfree(v);
  87. return 0;
  88. }
  89. static long
  90. irqallocread(Chan*, void *vbuf, long n, vlong offset)
  91. {
  92. char *buf, *p, str[2*(11+1)+KNAMELEN+1+1];
  93. int m, vno;
  94. long oldn;
  95. Vctl *v;
  96. if(n < 0 || offset < 0)
  97. error(Ebadarg);
  98. oldn = n;
  99. buf = vbuf;
  100. for(vno=0; vno<nelem(vctl); vno++){
  101. for(v=vctl[vno]; v; v=v->next){
  102. m = snprint(str, sizeof str, "%11d %11d %.*s\n", vno, v->irq, KNAMELEN, v->name);
  103. if(m <= offset) /* if do not want this, skip entry */
  104. offset -= m;
  105. else{
  106. /* skip offset bytes */
  107. m -= offset;
  108. p = str+offset;
  109. offset = 0;
  110. /* write at most max(n,m) bytes */
  111. if(m > n)
  112. m = n;
  113. memmove(buf, p, m);
  114. n -= m;
  115. buf += m;
  116. if(n == 0)
  117. return oldn;
  118. }
  119. }
  120. }
  121. return oldn - n;
  122. }
  123. void
  124. trapenable(int vno, void (*f)(Ureg*, void*), void* a, char *name)
  125. {
  126. Vctl *v;
  127. if(vno < 0 || vno >= VectorPIC)
  128. panic("trapenable: vno %d\n", vno);
  129. v = xalloc(sizeof(Vctl));
  130. v->tbdf = BUSUNKNOWN;
  131. v->f = f;
  132. v->a = a;
  133. strncpy(v->name, name, KNAMELEN);
  134. v->name[KNAMELEN-1] = 0;
  135. lock(&vctllock);
  136. if(vctl[vno])
  137. v->next = vctl[vno]->next;
  138. vctl[vno] = v;
  139. unlock(&vctllock);
  140. }
  141. static void
  142. nmienable(void)
  143. {
  144. int x;
  145. /*
  146. * Hack: should be locked with NVRAM access.
  147. */
  148. outb(0x70, 0x80); /* NMI latch clear */
  149. outb(0x70, 0);
  150. x = inb(0x61) & 0x07; /* Enable NMI */
  151. outb(0x61, 0x08|x);
  152. outb(0x61, x);
  153. }
  154. void
  155. trapinit(void)
  156. {
  157. int d1, v;
  158. ulong vaddr;
  159. Segdesc *idt;
  160. idt = (Segdesc*)IDTADDR;
  161. vaddr = (ulong)vectortable;
  162. for(v = 0; v < 256; v++){
  163. d1 = (vaddr & 0xFFFF0000)|SEGP;
  164. switch(v){
  165. case VectorBPT:
  166. d1 |= SEGPL(3)|SEGIG;
  167. break;
  168. case VectorSYSCALL:
  169. d1 |= SEGPL(3)|SEGIG;
  170. break;
  171. default:
  172. d1 |= SEGPL(0)|SEGIG;
  173. break;
  174. }
  175. idt[v].d0 = (vaddr & 0xFFFF)|(KESEL<<16);
  176. idt[v].d1 = d1;
  177. vaddr += 6;
  178. }
  179. /*
  180. * Special traps.
  181. * Syscall() is called directly without going through trap().
  182. */
  183. trapenable(VectorBPT, debugbpt, 0, "debugpt");
  184. trapenable(VectorPF, fault386, 0, "fault386");
  185. trapenable(Vector2F, doublefault, 0, "doublefault");
  186. trapenable(Vector15, unexpected, 0, "unexpected");
  187. nmienable();
  188. addarchfile("irqalloc", 0444, irqallocread, nil);
  189. }
  190. static char* excname[32] = {
  191. "divide error",
  192. "debug exception",
  193. "nonmaskable interrupt",
  194. "breakpoint",
  195. "overflow",
  196. "bounds check",
  197. "invalid opcode",
  198. "coprocessor not available",
  199. "double fault",
  200. "coprocessor segment overrun",
  201. "invalid TSS",
  202. "segment not present",
  203. "stack exception",
  204. "general protection violation",
  205. "page fault",
  206. "15 (reserved)",
  207. "coprocessor error",
  208. "alignment check",
  209. "machine check",
  210. "19 (reserved)",
  211. "20 (reserved)",
  212. "21 (reserved)",
  213. "22 (reserved)",
  214. "23 (reserved)",
  215. "24 (reserved)",
  216. "25 (reserved)",
  217. "26 (reserved)",
  218. "27 (reserved)",
  219. "28 (reserved)",
  220. "29 (reserved)",
  221. "30 (reserved)",
  222. "31 (reserved)",
  223. };
  224. /*
  225. * keep histogram of interrupt service times
  226. */
  227. void
  228. intrtime(Mach*, int vno)
  229. {
  230. ulong diff;
  231. ulong x;
  232. x = perfticks();
  233. diff = x - m->perf.intrts;
  234. m->perf.intrts = x;
  235. m->perf.inintr += diff;
  236. if(up == nil && m->perf.inidle > diff)
  237. m->perf.inidle -= diff;
  238. diff /= m->cpumhz*100; // quantum = 100µsec
  239. if(diff >= Ntimevec)
  240. diff = Ntimevec-1;
  241. intrtimes[vno][diff]++;
  242. }
  243. /* go to user space */
  244. void
  245. kexit(Ureg*)
  246. {
  247. uvlong t;
  248. Tos *tos;
  249. /* precise time accounting, kernel exit */
  250. tos = (Tos*)(USTKTOP-sizeof(Tos));
  251. cycles(&t);
  252. tos->kcycles += t - up->kentry;
  253. tos->pcycles = up->pcycles;
  254. tos->pid = up->pid;
  255. }
  256. /*
  257. * All traps come here. It is slower to have all traps call trap()
  258. * rather than directly vectoring the handler. However, this avoids a
  259. * lot of code duplication and possible bugs. The only exception is
  260. * VectorSYSCALL.
  261. * Trap is called with interrupts disabled via interrupt-gates.
  262. */
  263. void
  264. trap(Ureg* ureg)
  265. {
  266. int i, vno, user;
  267. char buf[ERRMAX];
  268. Vctl *ctl, *v;
  269. Mach *mach;
  270. void (*pt)(Proc*, int, vlong);
  271. m->perf.intrts = perfticks();
  272. user = (ureg->cs & 0xFFFF) == UESEL;
  273. if(user){
  274. up->dbgreg = ureg;
  275. cycles(&up->kentry);
  276. }
  277. vno = ureg->trap;
  278. if(ctl = vctl[vno]){
  279. if(ctl->isintr){
  280. pt = proctrace;
  281. if(up && up->trace && pt)
  282. pt(up, (vno << 16) | SInts, 0);
  283. m->intr++;
  284. if(vno >= VectorPIC && vno != VectorSYSCALL)
  285. m->lastintr = ctl->irq;
  286. }
  287. if(ctl->isr)
  288. ctl->isr(vno);
  289. for(v = ctl; v != nil; v = v->next){
  290. if(v->f)
  291. v->f(ureg, v->a);
  292. }
  293. if(ctl->eoi)
  294. ctl->eoi(vno);
  295. if(ctl->isintr){
  296. intrtime(m, vno);
  297. pt = proctrace;
  298. if(up && up->trace && pt)
  299. pt(up, (vno << 16) | SInte, 0);
  300. if(up && ctl->irq != IrqTIMER && ctl->irq != IrqCLOCK)
  301. preempted();
  302. }
  303. }
  304. else if(vno <= nelem(excname) && user){
  305. spllo();
  306. sprint(buf, "sys: trap: %s", excname[vno]);
  307. postnote(up, 1, buf, NDebug);
  308. }
  309. else if(vno >= VectorPIC && vno != VectorSYSCALL){
  310. /*
  311. * An unknown interrupt.
  312. * Check for a default IRQ7. This can happen when
  313. * the IRQ input goes away before the acknowledge.
  314. * In this case, a 'default IRQ7' is generated, but
  315. * the corresponding bit in the ISR isn't set.
  316. * In fact, just ignore all such interrupts.
  317. */
  318. /* call all interrupt routines, just in case */
  319. for(i = VectorPIC; i <= MaxIrqLAPIC; i++){
  320. ctl = vctl[i];
  321. if(ctl == nil)
  322. continue;
  323. if(!ctl->isintr)
  324. continue;
  325. for(v = ctl; v != nil; v = v->next){
  326. if(v->f)
  327. v->f(ureg, v->a);
  328. }
  329. /* should we do this? */
  330. if(ctl->eoi)
  331. ctl->eoi(i);
  332. }
  333. /* clear the interrupt */
  334. i8259isr(vno);
  335. if(0)print("cpu%d: spurious interrupt %d, last %d",
  336. m->machno, vno, m->lastintr);
  337. for(i = 0; i < 32; i++){
  338. if(!(active.machs & (1<<i)))
  339. continue;
  340. mach = MACHP(i);
  341. if(m->machno == mach->machno)
  342. continue;
  343. print(": cpu%d: last %d", mach->machno, mach->lastintr);
  344. }
  345. print("\n");
  346. m->spuriousintr++;
  347. if(user){
  348. /* if we delayed sched because we held a lock, sched now */
  349. if(up->delaysched)
  350. sched();
  351. kexit(ureg);
  352. }
  353. return;
  354. }
  355. else{
  356. if(vno == VectorNMI){
  357. nmienable();
  358. if(m->machno != 0){
  359. print("cpu%d: PC %8.8luX\n",
  360. m->machno, ureg->pc);
  361. for(;;);
  362. }
  363. }
  364. dumpregs(ureg);
  365. if(!user){
  366. ureg->sp = (ulong)&ureg->sp;
  367. _dumpstack(ureg);
  368. }
  369. if(vno < nelem(excname))
  370. panic("%s", excname[vno]);
  371. panic("unknown trap/intr: %d\n", vno);
  372. }
  373. if(user){
  374. if(up->procctl || up->nnote){
  375. splhi();
  376. notify(ureg);
  377. }
  378. /* if we delayed sched because we held a lock, sched now */
  379. if(up->delaysched)
  380. sched();
  381. kexit(ureg);
  382. }
  383. }
  384. /*
  385. * dump registers
  386. */
  387. void
  388. dumpregs2(Ureg* ureg)
  389. {
  390. if(up)
  391. print("cpu%d: registers for %s %lud\n",
  392. m->machno, up->text, up->pid);
  393. else
  394. print("cpu%d: registers for kernel\n", m->machno);
  395. print("FLAGS=%luX TRAP=%luX ECODE=%luX PC=%luX",
  396. ureg->flags, ureg->trap, ureg->ecode, ureg->pc);
  397. print(" SS=%4.4luX USP=%luX\n", ureg->ss & 0xFFFF, ureg->usp);
  398. print(" AX %8.8luX BX %8.8luX CX %8.8luX DX %8.8luX\n",
  399. ureg->ax, ureg->bx, ureg->cx, ureg->dx);
  400. print(" SI %8.8luX DI %8.8luX BP %8.8luX\n",
  401. ureg->si, ureg->di, ureg->bp);
  402. print(" CS %4.4luX DS %4.4luX ES %4.4luX FS %4.4luX GS %4.4luX\n",
  403. ureg->cs & 0xFFFF, ureg->ds & 0xFFFF, ureg->es & 0xFFFF,
  404. ureg->fs & 0xFFFF, ureg->gs & 0xFFFF);
  405. }
  406. void
  407. dumpregs(Ureg* ureg)
  408. {
  409. extern ulong etext;
  410. vlong mca, mct;
  411. dumpregs2(ureg);
  412. /*
  413. * Processor control registers.
  414. * If machine check exception, time stamp counter, page size extensions
  415. * or enhanced virtual 8086 mode extensions are supported, there is a
  416. * CR4. If there is a CR4 and machine check extensions, read the machine
  417. * check address and machine check type registers if RDMSR supported.
  418. */
  419. print(" CR0 %8.8lux CR2 %8.8lux CR3 %8.8lux",
  420. getcr0(), getcr2(), getcr3());
  421. if(m->cpuiddx & 0x9A){
  422. print(" CR4 %8.8lux", getcr4());
  423. if((m->cpuiddx & 0xA0) == 0xA0){
  424. rdmsr(0x00, &mca);
  425. rdmsr(0x01, &mct);
  426. print("\n MCA %8.8llux MCT %8.8llux", mca, mct);
  427. }
  428. }
  429. print("\n ur %lux up %lux\n", ureg, up);
  430. }
  431. /*
  432. * Fill in enough of Ureg to get a stack trace, and call a function.
  433. * Used by debugging interface rdb.
  434. */
  435. void
  436. callwithureg(void (*fn)(Ureg*))
  437. {
  438. Ureg ureg;
  439. ureg.pc = getcallerpc(&fn);
  440. ureg.sp = (ulong)&fn;
  441. fn(&ureg);
  442. }
  443. static void
  444. _dumpstack(Ureg *ureg)
  445. {
  446. ulong l, v, i, estack;
  447. extern ulong etext;
  448. int x;
  449. iprint("dumpstack\n");
  450. x = 0;
  451. x += print("ktrace /kernel/path %.8lux %.8lux\n", ureg->pc, ureg->sp);
  452. i = 0;
  453. if(up
  454. && (ulong)&l >= (ulong)up->kstack
  455. && (ulong)&l <= (ulong)up->kstack+KSTACK)
  456. estack = (ulong)up->kstack+KSTACK;
  457. else if((ulong)&l >= (ulong)m->stack
  458. && (ulong)&l <= (ulong)m+BY2PG)
  459. estack = (ulong)m+MACHSIZE;
  460. else
  461. return;
  462. x += print("estackx %.8lux\n", estack);
  463. for(l=(ulong)&l; l<estack; l+=4){
  464. v = *(ulong*)l;
  465. if((KTZERO < v && v < (ulong)&etext) || estack-l<256){
  466. /*
  467. * we could Pick off general CALL (((uchar*)v)[-5] == 0xE8)
  468. * and CALL indirect through AX (((uchar*)v)[-2] == 0xFF && ((uchar*)v)[-2] == 0xD0),
  469. * but this is too clever and misses faulting address.
  470. */
  471. x += print("%.8lux=%.8lux ", l, v);
  472. i++;
  473. }
  474. if(i == 4){
  475. i = 0;
  476. x += print("\n");
  477. }
  478. }
  479. iprint("serialoq %d printed %d\n", qlen(serialoq), x);
  480. if(i)
  481. print("\n");
  482. }
  483. void
  484. dumpstack(void)
  485. {
  486. callwithureg(_dumpstack);
  487. }
  488. static void
  489. debugbpt(Ureg* ureg, void*)
  490. {
  491. char buf[ERRMAX];
  492. if(up == 0)
  493. panic("kernel bpt");
  494. /* restore pc to instruction that caused the trap */
  495. ureg->pc--;
  496. sprint(buf, "sys: breakpoint");
  497. postnote(up, 1, buf, NDebug);
  498. }
  499. static void
  500. doublefault(Ureg*, void*)
  501. {
  502. panic("double fault");
  503. }
  504. static void
  505. unexpected(Ureg* ureg, void*)
  506. {
  507. print("unexpected trap %lud; ignoring\n", ureg->trap);
  508. }
  509. static void
  510. fault386(Ureg* ureg, void*)
  511. {
  512. ulong addr;
  513. int read, user, n, insyscall;
  514. char buf[ERRMAX];
  515. addr = getcr2();
  516. user = (ureg->cs & 0xFFFF) == UESEL;
  517. if(!user && mmukmapsync(addr))
  518. return;
  519. read = !(ureg->ecode & 2);
  520. if(up == nil)
  521. panic("fault but up is zero; pc 0x%8.8lux addr 0x%8.8lux\n", ureg->pc, addr);
  522. insyscall = up->insyscall;
  523. up->insyscall = 1;
  524. n = fault(addr, read);
  525. if(n < 0){
  526. if(!user){
  527. dumpregs(ureg);
  528. panic("fault: 0x%lux\n", addr);
  529. }
  530. sprint(buf, "sys: trap: fault %s addr=0x%lux",
  531. read? "read" : "write", addr);
  532. postnote(up, 1, buf, NDebug);
  533. }
  534. up->insyscall = insyscall;
  535. }
  536. /*
  537. * system calls
  538. */
  539. #include "../port/systab.h"
  540. /*
  541. * Syscall is called directly from assembler without going through trap().
  542. */
  543. void
  544. syscall(Ureg* ureg)
  545. {
  546. char *e;
  547. ulong sp;
  548. long ret;
  549. int i, s;
  550. ulong scallnr;
  551. if((ureg->cs & 0xFFFF) != UESEL)
  552. panic("syscall: cs 0x%4.4luX\n", ureg->cs);
  553. cycles(&up->kentry);
  554. m->syscall++;
  555. up->insyscall = 1;
  556. up->pc = ureg->pc;
  557. up->dbgreg = ureg;
  558. if(up->procctl == Proc_tracesyscall){
  559. up->procctl = Proc_stopme;
  560. procctl(up);
  561. }
  562. scallnr = ureg->ax;
  563. up->scallnr = scallnr;
  564. if(scallnr == RFORK && up->fpstate == FPactive){
  565. fpsave(&up->fpsave);
  566. up->fpstate = FPinactive;
  567. }
  568. spllo();
  569. sp = ureg->usp;
  570. up->nerrlab = 0;
  571. ret = -1;
  572. if(!waserror()){
  573. if(scallnr >= nsyscall || systab[scallnr] == 0){
  574. pprint("bad sys call number %d pc %lux\n",
  575. scallnr, ureg->pc);
  576. postnote(up, 1, "sys: bad sys call", NDebug);
  577. error(Ebadarg);
  578. }
  579. if(sp<(USTKTOP-BY2PG) || sp>(USTKTOP-sizeof(Sargs)-BY2WD))
  580. validaddr(sp, sizeof(Sargs)+BY2WD, 0);
  581. up->s = *((Sargs*)(sp+BY2WD));
  582. up->psstate = sysctab[scallnr];
  583. ret = systab[scallnr](up->s.args);
  584. poperror();
  585. }else{
  586. /* failure: save the error buffer for errstr */
  587. e = up->syserrstr;
  588. up->syserrstr = up->errstr;
  589. up->errstr = e;
  590. if(0 && up->pid == 1)
  591. print("syscall %lud error %s\n", scallnr, up->syserrstr);
  592. }
  593. if(up->nerrlab){
  594. print("bad errstack [%lud]: %d extra\n", scallnr, up->nerrlab);
  595. for(i = 0; i < NERR; i++)
  596. print("sp=%lux pc=%lux\n",
  597. up->errlab[i].sp, up->errlab[i].pc);
  598. panic("error stack");
  599. }
  600. /*
  601. * Put return value in frame. On the x86 the syscall is
  602. * just another trap and the return value from syscall is
  603. * ignored. On other machines the return value is put into
  604. * the results register by caller of syscall.
  605. */
  606. ureg->ax = ret;
  607. if(up->procctl == Proc_tracesyscall){
  608. up->procctl = Proc_stopme;
  609. s = splhi();
  610. procctl(up);
  611. splx(s);
  612. }
  613. up->insyscall = 0;
  614. up->psstate = 0;
  615. if(scallnr == NOTED)
  616. noted(ureg, *(ulong*)(sp+BY2WD));
  617. if(scallnr!=RFORK && (up->procctl || up->nnote)){
  618. splhi();
  619. notify(ureg);
  620. }
  621. /* if we delayed sched because we held a lock, sched now */
  622. if(up->delaysched)
  623. sched();
  624. kexit(ureg);
  625. }
  626. /*
  627. * Call user, if necessary, with note.
  628. * Pass user the Ureg struct and the note on his stack.
  629. */
  630. int
  631. notify(Ureg* ureg)
  632. {
  633. int l;
  634. ulong s, sp;
  635. Note *n;
  636. if(up->procctl)
  637. procctl(up);
  638. if(up->nnote == 0)
  639. return 0;
  640. if(up->fpstate == FPactive){
  641. fpsave(&up->fpsave);
  642. up->fpstate = FPinactive;
  643. }
  644. up->fpstate |= FPillegal;
  645. s = spllo();
  646. qlock(&up->debug);
  647. up->notepending = 0;
  648. n = &up->note[0];
  649. if(strncmp(n->msg, "sys:", 4) == 0){
  650. l = strlen(n->msg);
  651. if(l > ERRMAX-15) /* " pc=0x12345678\0" */
  652. l = ERRMAX-15;
  653. sprint(n->msg+l, " pc=0x%.8lux", ureg->pc);
  654. }
  655. if(n->flag!=NUser && (up->notified || up->notify==0)){
  656. if(n->flag == NDebug)
  657. pprint("suicide: %s\n", n->msg);
  658. qunlock(&up->debug);
  659. pexit(n->msg, n->flag!=NDebug);
  660. }
  661. if(up->notified){
  662. qunlock(&up->debug);
  663. splhi();
  664. return 0;
  665. }
  666. if(!up->notify){
  667. qunlock(&up->debug);
  668. pexit(n->msg, n->flag!=NDebug);
  669. }
  670. sp = ureg->usp;
  671. sp -= sizeof(Ureg);
  672. if(!okaddr((ulong)up->notify, 1, 0)
  673. || !okaddr(sp-ERRMAX-4*BY2WD, sizeof(Ureg)+ERRMAX+4*BY2WD, 1)){
  674. pprint("suicide: bad address in notify\n");
  675. qunlock(&up->debug);
  676. pexit("Suicide", 0);
  677. }
  678. up->ureg = (void*)sp;
  679. memmove((Ureg*)sp, ureg, sizeof(Ureg));
  680. *(Ureg**)(sp-BY2WD) = up->ureg; /* word under Ureg is old up->ureg */
  681. up->ureg = (void*)sp;
  682. sp -= BY2WD+ERRMAX;
  683. memmove((char*)sp, up->note[0].msg, ERRMAX);
  684. sp -= 3*BY2WD;
  685. *(ulong*)(sp+2*BY2WD) = sp+3*BY2WD; /* arg 2 is string */
  686. *(ulong*)(sp+1*BY2WD) = (ulong)up->ureg; /* arg 1 is ureg* */
  687. *(ulong*)(sp+0*BY2WD) = 0; /* arg 0 is pc */
  688. ureg->usp = sp;
  689. ureg->pc = (ulong)up->notify;
  690. up->notified = 1;
  691. up->nnote--;
  692. memmove(&up->lastnote, &up->note[0], sizeof(Note));
  693. memmove(&up->note[0], &up->note[1], up->nnote*sizeof(Note));
  694. qunlock(&up->debug);
  695. splx(s);
  696. return 1;
  697. }
  698. /*
  699. * Return user to state before notify()
  700. */
  701. void
  702. noted(Ureg* ureg, ulong arg0)
  703. {
  704. Ureg *nureg;
  705. ulong oureg, sp;
  706. qlock(&up->debug);
  707. if(arg0!=NRSTR && !up->notified) {
  708. qunlock(&up->debug);
  709. pprint("call to noted() when not notified\n");
  710. pexit("Suicide", 0);
  711. }
  712. up->notified = 0;
  713. nureg = up->ureg; /* pointer to user returned Ureg struct */
  714. up->fpstate &= ~FPillegal;
  715. /* sanity clause */
  716. oureg = (ulong)nureg;
  717. if(!okaddr((ulong)oureg-BY2WD, BY2WD+sizeof(Ureg), 0)){
  718. pprint("bad ureg in noted or call to noted when not notified\n");
  719. qunlock(&up->debug);
  720. pexit("Suicide", 0);
  721. }
  722. /*
  723. * Check the segment selectors are all valid, otherwise
  724. * a fault will be taken on attempting to return to the
  725. * user process.
  726. * Take care with the comparisons as different processor
  727. * generations push segment descriptors in different ways.
  728. */
  729. if((nureg->cs & 0xFFFF) != UESEL || (nureg->ss & 0xFFFF) != UDSEL
  730. || (nureg->ds & 0xFFFF) != UDSEL || (nureg->es & 0xFFFF) != UDSEL
  731. || (nureg->fs & 0xFFFF) != UDSEL || (nureg->gs & 0xFFFF) != UDSEL){
  732. pprint("bad segment selector in noted\n");
  733. qunlock(&up->debug);
  734. pexit("Suicide", 0);
  735. }
  736. /* don't let user change system flags */
  737. nureg->flags = (ureg->flags & ~0xCD5) | (nureg->flags & 0xCD5);
  738. memmove(ureg, nureg, sizeof(Ureg));
  739. switch(arg0){
  740. case NCONT:
  741. case NRSTR:
  742. if(!okaddr(nureg->pc, 1, 0) || !okaddr(nureg->usp, BY2WD, 0)){
  743. qunlock(&up->debug);
  744. pprint("suicide: trap in noted\n");
  745. pexit("Suicide", 0);
  746. }
  747. up->ureg = (Ureg*)(*(ulong*)(oureg-BY2WD));
  748. qunlock(&up->debug);
  749. break;
  750. case NSAVE:
  751. if(!okaddr(nureg->pc, BY2WD, 0)
  752. || !okaddr(nureg->usp, BY2WD, 0)){
  753. qunlock(&up->debug);
  754. pprint("suicide: trap in noted\n");
  755. pexit("Suicide", 0);
  756. }
  757. qunlock(&up->debug);
  758. sp = oureg-4*BY2WD-ERRMAX;
  759. splhi();
  760. ureg->sp = sp;
  761. ((ulong*)sp)[1] = oureg; /* arg 1 0(FP) is ureg* */
  762. ((ulong*)sp)[0] = 0; /* arg 0 is pc */
  763. break;
  764. default:
  765. pprint("unknown noted arg 0x%lux\n", arg0);
  766. up->lastnote.flag = NDebug;
  767. /* fall through */
  768. case NDFLT:
  769. if(up->lastnote.flag == NDebug){
  770. qunlock(&up->debug);
  771. pprint("suicide: %s\n", up->lastnote.msg);
  772. } else
  773. qunlock(&up->debug);
  774. pexit(up->lastnote.msg, up->lastnote.flag!=NDebug);
  775. }
  776. }
  777. long
  778. execregs(ulong entry, ulong ssize, ulong nargs)
  779. {
  780. ulong *sp;
  781. Ureg *ureg;
  782. up->fpstate = FPinit;
  783. fpoff();
  784. sp = (ulong*)(USTKTOP - ssize);
  785. *--sp = nargs;
  786. ureg = up->dbgreg;
  787. ureg->usp = (ulong)sp;
  788. ureg->pc = entry;
  789. return USTKTOP-sizeof(Tos); /* address of kernel/user shared data */
  790. }
  791. /*
  792. * return the userpc the last exception happened at
  793. */
  794. ulong
  795. userpc(void)
  796. {
  797. Ureg *ureg;
  798. ureg = (Ureg*)up->dbgreg;
  799. return ureg->pc;
  800. }
  801. /* This routine must save the values of registers the user is not permitted
  802. * to write from devproc and then restore the saved values before returning.
  803. */
  804. void
  805. setregisters(Ureg* ureg, char* pureg, char* uva, int n)
  806. {
  807. ulong flags;
  808. ulong cs;
  809. ulong ss;
  810. flags = ureg->flags;
  811. cs = ureg->cs;
  812. ss = ureg->ss;
  813. memmove(pureg, uva, n);
  814. ureg->flags = (ureg->flags & 0x00FF) | (flags & 0xFF00);
  815. ureg->cs = cs;
  816. ureg->ss = ss;
  817. }
  818. static void
  819. linkproc(void)
  820. {
  821. spllo();
  822. up->kpfun(up->kparg);
  823. pexit("kproc dying", 0);
  824. }
  825. void
  826. kprocchild(Proc* p, void (*func)(void*), void* arg)
  827. {
  828. /*
  829. * gotolabel() needs a word on the stack in
  830. * which to place the return PC used to jump
  831. * to linkproc().
  832. */
  833. p->sched.pc = (ulong)linkproc;
  834. p->sched.sp = (ulong)p->kstack+KSTACK-BY2WD;
  835. p->kpfun = func;
  836. p->kparg = arg;
  837. }
  838. void
  839. forkchild(Proc *p, Ureg *ureg)
  840. {
  841. Ureg *cureg;
  842. /*
  843. * Add 2*BY2WD to the stack to account for
  844. * - the return PC
  845. * - trap's argument (ur)
  846. */
  847. p->sched.sp = (ulong)p->kstack+KSTACK-(sizeof(Ureg)+2*BY2WD);
  848. p->sched.pc = (ulong)forkret;
  849. cureg = (Ureg*)(p->sched.sp+2*BY2WD);
  850. memmove(cureg, ureg, sizeof(Ureg));
  851. /* return value of syscall in child */
  852. cureg->ax = 0;
  853. /* Things from bottom of syscall which were never executed */
  854. p->psstate = 0;
  855. p->insyscall = 0;
  856. }
  857. /* Give enough context in the ureg to produce a kernel stack for
  858. * a sleeping process
  859. */
  860. void
  861. setkernur(Ureg* ureg, Proc* p)
  862. {
  863. ureg->pc = p->sched.pc;
  864. ureg->sp = p->sched.sp+4;
  865. }
  866. ulong
  867. dbgpc(Proc *p)
  868. {
  869. Ureg *ureg;
  870. ureg = p->dbgreg;
  871. if(ureg == 0)
  872. return 0;
  873. return ureg->pc;
  874. }