mmu.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284
  1. #include "u.h"
  2. #include "../port/lib.h"
  3. #include "mem.h"
  4. #include "dat.h"
  5. #include "fns.h"
  6. #include "/sys/src/boot/alphapc/conf.h"
  7. static uvlong origlvl1; /* physical address */
  8. static uvlong klvl2; /* physical, as created by boot loader */
  9. static uchar *nextio; /* next virtual address to be allocated by kmapv */
  10. extern Bootconf *bootconf;
  11. #define LVL2OFF(v) ((((long)(v))>>(2*PGSHIFT-3))&(PTE2PG-1))
  12. #define LVL3OFF(v) ((((long)(v))>>(PGSHIFT))&(PTE2PG-1))
  13. static void
  14. setptb(ulong pa)
  15. {
  16. m->ptbr = (uvlong)pa>>PGSHIFT;
  17. swpctx(m);
  18. }
  19. void
  20. mmuinit(void)
  21. {
  22. uvlong *plvl2;
  23. /* set PCB to new one in mach structure before stomping on old one */
  24. m->usp = 0;
  25. m->fen = 1;
  26. m->ptbr = bootconf->pcb->ptbr;
  27. origlvl1 = (m->ptbr << PGSHIFT);
  28. setpcb(m);
  29. plvl2 = (uvlong*) (KZERO|origlvl1|(BY2PG-8));
  30. klvl2 = (*plvl2 >> 32)<<PGSHIFT;
  31. nextio = (uchar*) (KZERO|bootconf->maxphys);
  32. }
  33. static void
  34. mmuptefree(Proc* proc)
  35. {
  36. uvlong *lvl2;
  37. Page **last, *page;
  38. if(proc->mmutop && proc->mmuused){
  39. lvl2 = (uvlong*)proc->mmulvl2->va;
  40. last = &proc->mmuused;
  41. for(page = *last; page; page = page->next){
  42. lvl2[page->daddr] = 0;
  43. last = &page->next;
  44. }
  45. *last = proc->mmufree;
  46. proc->mmufree = proc->mmuused;
  47. proc->mmuused = 0;
  48. }
  49. }
  50. void
  51. mmuswitch(Proc *proc)
  52. {
  53. if(proc->newtlb){
  54. mmuptefree(proc);
  55. proc->newtlb = 0;
  56. }
  57. /* tell processor about new page table and flush cached entries */
  58. if(proc->mmutop == 0)
  59. setptb(origlvl1);
  60. else
  61. setptb(proc->mmutop->pa);
  62. tlbflush(-1, 0);
  63. icflush();
  64. }
  65. /* point to protoype page map */
  66. void
  67. mmupark(void)
  68. {
  69. setptb(origlvl1);
  70. icflush();
  71. }
  72. /*
  73. * give all page table pages back to the free pool. This is called in sched()
  74. * with palloc locked.
  75. */
  76. void
  77. mmurelease(Proc *proc)
  78. {
  79. Page *page, *next;
  80. mmupark();
  81. mmuptefree(proc);
  82. proc->mmuused = 0;
  83. if(proc->mmutop) {
  84. proc->mmutop->next = proc->mmufree;
  85. proc->mmufree = proc->mmutop;
  86. proc->mmutop = 0;
  87. }
  88. if(proc->mmulvl2) {
  89. proc->mmulvl2->next = proc->mmufree;
  90. proc->mmufree = proc->mmulvl2;
  91. proc->mmulvl2 = 0;
  92. }
  93. for(page = proc->mmufree; page; page = next){
  94. next = page->next;
  95. if(--page->ref)
  96. panic("mmurelease: page->ref %d\n", page->ref);
  97. pagechainhead(page);
  98. }
  99. if(proc->mmufree && palloc.r.p)
  100. wakeup(&palloc.r);
  101. proc->mmufree = 0;
  102. }
  103. void
  104. mmunewtop(void)
  105. {
  106. Page *top, *lvl2;
  107. uvlong *ppte;
  108. top = newpage(1, 0, 0);
  109. top->va = VA(kmap(top));
  110. lvl2 = newpage(1, 0, 0);
  111. lvl2->va = VA(kmap(lvl2));
  112. ppte = (uvlong *)top->va;
  113. ppte[0] = PTEPFN(lvl2->pa) | PTEKVALID;
  114. ppte[PTE2PG-2] = PTEPFN(top->pa) | PTEKVALID;
  115. ppte[PTE2PG-1] = PTEPFN(klvl2) | PTEKVALID;
  116. up->mmutop = top;
  117. up->mmulvl2 = lvl2;
  118. setptb(top->pa);
  119. tlbflush(-1, 0);
  120. icflush();
  121. }
  122. void
  123. putmmu(ulong va, ulong pa, Page *pg)
  124. {
  125. int lvl2off;
  126. uvlong *lvl2, *pt;
  127. int s;
  128. if(up->mmutop == 0)
  129. mmunewtop();
  130. lvl2 = (uvlong*)up->mmulvl2->va;
  131. lvl2off = LVL2OFF(va);
  132. /*
  133. * if bottom level page table missing, allocate one
  134. * and point the top level page at it.
  135. */
  136. s = splhi();
  137. if(lvl2[lvl2off] == 0){
  138. if(up->mmufree == 0){
  139. spllo();
  140. pg = newpage(1, 0, 0);
  141. pg->va = VA(kmap(pg));
  142. splhi();
  143. } else {
  144. pg = up->mmufree;
  145. up->mmufree = pg->next;
  146. memset((void*)pg->va, 0, BY2PG);
  147. }
  148. lvl2[lvl2off] = PTEPFN(pg->pa) | PTEVALID;
  149. pg->daddr = lvl2off;
  150. pg->next = up->mmuused;
  151. up->mmuused = pg;
  152. }
  153. /*
  154. * put in new mmu entry
  155. */
  156. pt = (uvlong*)(((lvl2[lvl2off] >> 32)<<PGSHIFT)|KZERO);
  157. pt[LVL3OFF(va)] = FIXPTE(pa);
  158. /* flush cached mmu entries */
  159. tlbflush(3, va);
  160. icflush();
  161. splx(s);
  162. }
  163. void *
  164. kmapv(uvlong pa, int size)
  165. {
  166. void *va, *new;
  167. int lvl2off, i, npage, offset;
  168. uvlong *lvl2, *pt;
  169. offset = pa&(BY2PG-1);
  170. npage = ((size+offset+BY2PG-1)>>PGSHIFT);
  171. va = nextio+offset;
  172. lvl2 = (uvlong*)(KZERO|klvl2);
  173. for (i = 0; i < npage; i++) {
  174. lvl2off = LVL2OFF(nextio);
  175. if (lvl2[lvl2off] == 0) {
  176. new = xspanalloc(BY2PG, BY2PG, 0);
  177. memset(new, 0, BY2PG);
  178. lvl2[lvl2off] = PTEPFN(PADDR(new)) | PTEKVALID | PTEASM;
  179. }
  180. pt = (uvlong*)(((lvl2[lvl2off] >> 32)<<PGSHIFT)|KZERO);
  181. pt[LVL3OFF(nextio)] = PTEPFN(pa) | PTEKVALID | PTEASM;
  182. nextio += BY2PG;
  183. pa += BY2PG;
  184. }
  185. return va;
  186. }
  187. void
  188. flushmmu(void)
  189. {
  190. int s;
  191. s = splhi();
  192. up->newtlb = 1;
  193. mmuswitch(up);
  194. splx(s);
  195. }
  196. void*
  197. vmap(ulong pa, int size)
  198. {
  199. void *va;
  200. /*
  201. * Viability hack. Only for PCI framebuffers.
  202. */
  203. if(pa == 0)
  204. return 0;
  205. va = kmapv(((uvlong)0x88<<32LL)|pa, size);
  206. if(va == nil)
  207. return 0;
  208. return (void*)va;
  209. }
  210. void
  211. vunmap(void*, int)
  212. {
  213. print("vunmap: virtual mapping not freed\n");
  214. }
  215. void
  216. mmudump(void)
  217. {
  218. Page *top, *lvl2;
  219. iprint("ptbr %lux up %lux\n", (ulong)m->ptbr, up);
  220. if(up) {
  221. top = up->mmutop;
  222. if(top != nil)
  223. iprint("top %lux top[N-1] %llux\n", top->va, ((uvlong *)top->va)[PTE2PG-1]);
  224. lvl2 = up->mmulvl2;
  225. if(lvl2 != nil)
  226. iprint("lvl2 %lux\n", lvl2->va);
  227. }
  228. }
  229. ulong
  230. upaalloc(int, int)
  231. {
  232. return 0;
  233. }
  234. void
  235. upafree(ulong, int)
  236. {
  237. }
  238. void
  239. checkmmu(ulong, ulong)
  240. {
  241. }
  242. void
  243. countpagerefs(ulong*, int)
  244. {
  245. }