mmu.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. #include "u.h"
  2. #include "../port/lib.h"
  3. #include "mem.h"
  4. #include "dat.h"
  5. #include "fns.h"
  6. #include "arm.h"
  7. #define L1X(va) FEXT((va), 20, 12)
  8. #define L2X(va) FEXT((va), 12, 8)
  9. enum {
  10. L1lo = UZERO/MiB, /* L1X(UZERO)? */
  11. L1hi = (USTKTOP+MiB-1)/MiB, /* L1X(USTKTOP+MiB-1)? */
  12. };
  13. void
  14. mmuinit(void)
  15. {
  16. PTE *l1, *l2;
  17. uintptr pa, va;
  18. l1 = (PTE*)PADDR(L1);
  19. l2 = (PTE*)PADDR(L2);
  20. /*
  21. * map all of ram at KZERO
  22. */
  23. va = KZERO;
  24. for(pa = PHYSDRAM; pa < PHYSDRAM+DRAMSIZE; pa += MiB){
  25. l1[L1X(va)] = pa|Dom0|L1AP(Krw)|Section|Cached|Buffered;
  26. va += MiB;
  27. }
  28. /*
  29. * identity map first MB of ram so mmu can be enabled
  30. */
  31. l1[L1X(PHYSDRAM)] = PHYSDRAM|Dom0|L1AP(Krw)|Section|Cached|Buffered;
  32. /*
  33. * map i/o registers
  34. */
  35. va = VIRTIO;
  36. for(pa = PHYSIO; pa < PHYSIO+IOSIZE; pa += MiB){
  37. l1[L1X(va)] = pa|Dom0|L1AP(Krw)|Section;
  38. va += MiB;
  39. }
  40. /*
  41. * double map exception vectors at top of virtual memory
  42. */
  43. va = HVECTORS;
  44. l1[L1X(va)] = (uintptr)l2|Dom0|Coarse;
  45. l2[L2X(va)] = PHYSDRAM|L2AP(Krw)|Small;
  46. }
  47. void
  48. mmuinit1(void)
  49. {
  50. PTE *l1;
  51. l1 = (PTE*)L1;
  52. m->mmul1 = l1;
  53. /*
  54. * undo identity map of first MB of ram
  55. */
  56. l1[L1X(PHYSDRAM)] = 0;
  57. cachedwbse(&l1[L1X(PHYSDRAM)], sizeof(PTE));
  58. mmuinvalidate();
  59. }
  60. static void
  61. mmul2empty(Proc* proc, int clear)
  62. {
  63. PTE *l1;
  64. Page **l2, *page;
  65. l1 = m->mmul1;
  66. l2 = &proc->mmul2;
  67. for(page = *l2; page != nil; page = page->next){
  68. if(clear)
  69. memset(UINT2PTR(page->va), 0, BY2PG);
  70. l1[page->daddr] = Fault;
  71. l2 = &page->next;
  72. }
  73. *l2 = proc->mmul2cache;
  74. proc->mmul2cache = proc->mmul2;
  75. proc->mmul2 = nil;
  76. }
  77. static void
  78. mmul1empty(void)
  79. {
  80. #ifdef notdef
  81. /* there's a bug in here */
  82. PTE *l1;
  83. /* clean out any user mappings still in l1 */
  84. if(m->mmul1lo > L1lo){
  85. if(m->mmul1lo == 1)
  86. m->mmul1[L1lo] = Fault;
  87. else
  88. memset(&m->mmul1[L1lo], 0, m->mmul1lo*sizeof(PTE));
  89. m->mmul1lo = L1lo;
  90. }
  91. if(m->mmul1hi < L1hi){
  92. l1 = &m->mmul1[m->mmul1hi];
  93. if((L1hi - m->mmul1hi) == 1)
  94. *l1 = Fault;
  95. else
  96. memset(l1, 0, (L1hi - m->mmul1hi)*sizeof(PTE));
  97. m->mmul1hi = L1hi;
  98. }
  99. #else
  100. memset(&m->mmul1[L1lo], 0, (L1hi - L1lo)*sizeof(PTE));
  101. #endif /* notdef */
  102. }
  103. void
  104. mmuswitch(Proc* proc)
  105. {
  106. int x;
  107. PTE *l1;
  108. Page *page;
  109. /* do kprocs get here and if so, do they need to? */
  110. if(m->mmupid == proc->pid && !proc->newtlb)
  111. return;
  112. m->mmupid = proc->pid;
  113. /* write back dirty and invalidate l1 caches */
  114. cacheuwbinv();
  115. if(proc->newtlb){
  116. mmul2empty(proc, 1);
  117. proc->newtlb = 0;
  118. }
  119. mmul1empty();
  120. /* move in new map */
  121. l1 = m->mmul1;
  122. for(page = proc->mmul2; page != nil; page = page->next){
  123. x = page->daddr;
  124. l1[x] = PPN(page->pa)|Dom0|Coarse;
  125. /* know here that L1lo < x < L1hi */
  126. if(x+1 - m->mmul1lo < m->mmul1hi - x)
  127. m->mmul1lo = x+1;
  128. else
  129. m->mmul1hi = x;
  130. }
  131. /* make sure map is in memory */
  132. /* could be smarter about how much? */
  133. cachedwbse(&l1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
  134. /* lose any possible stale tlb entries */
  135. mmuinvalidate();
  136. }
  137. void
  138. flushmmu(void)
  139. {
  140. int s;
  141. s = splhi();
  142. up->newtlb = 1;
  143. mmuswitch(up);
  144. splx(s);
  145. }
  146. void
  147. mmurelease(Proc* proc)
  148. {
  149. Page *page, *next;
  150. /* write back dirty and invalidate l1 caches */
  151. cacheuwbinv();
  152. mmul2empty(proc, 0);
  153. for(page = proc->mmul2cache; page != nil; page = next){
  154. next = page->next;
  155. if(--page->ref)
  156. panic("mmurelease: page->ref %d", page->ref);
  157. pagechainhead(page);
  158. }
  159. if(proc->mmul2cache && palloc.r.p)
  160. wakeup(&palloc.r);
  161. proc->mmul2cache = nil;
  162. mmul1empty();
  163. /* make sure map is in memory */
  164. /* could be smarter about how much? */
  165. cachedwbse(&m->mmul1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
  166. /* lose any possible stale tlb entries */
  167. mmuinvalidate();
  168. }
  169. void
  170. putmmu(uintptr va, uintptr pa, Page* page)
  171. {
  172. int x;
  173. Page *pg;
  174. PTE *l1, *pte;
  175. x = L1X(va);
  176. l1 = &m->mmul1[x];
  177. if(*l1 == Fault){
  178. /* wasteful - l2 pages only have 256 entries - fix */
  179. if(up->mmul2cache == nil){
  180. /* auxpg since we don't need much? memset if so */
  181. pg = newpage(1, 0, 0);
  182. pg->va = VA(kmap(pg));
  183. }
  184. else{
  185. pg = up->mmul2cache;
  186. up->mmul2cache = pg->next;
  187. memset(UINT2PTR(pg->va), 0, BY2PG);
  188. }
  189. pg->daddr = x;
  190. pg->next = up->mmul2;
  191. up->mmul2 = pg;
  192. /* force l2 page to memory */
  193. cachedwbse((void *)pg->va, BY2PG);
  194. *l1 = PPN(pg->pa)|Dom0|Coarse;
  195. cachedwbse(l1, sizeof *l1);
  196. if(x >= m->mmul1lo && x < m->mmul1hi){
  197. if(x+1 - m->mmul1lo < m->mmul1hi - x)
  198. m->mmul1lo = x+1;
  199. else
  200. m->mmul1hi = x;
  201. }
  202. }
  203. pte = UINT2PTR(KADDR(PPN(*l1)));
  204. /* protection bits are
  205. * PTERONLY|PTEVALID;
  206. * PTEWRITE|PTEVALID;
  207. * PTEWRITE|PTEUNCACHED|PTEVALID;
  208. */
  209. x = Small;
  210. if(!(pa & PTEUNCACHED))
  211. x |= Cached|Buffered;
  212. if(pa & PTEWRITE)
  213. x |= L2AP(Urw);
  214. else
  215. x |= L2AP(Uro);
  216. pte[L2X(va)] = PPN(pa)|x;
  217. cachedwbse(&pte[L2X(va)], sizeof pte[0]);
  218. /* clear out the current entry */
  219. mmuinvalidateaddr(PPN(va));
  220. /* write back dirty entries - we need this because the pio() in
  221. * fault.c is writing via a different virt addr and won't clean
  222. * its changes out of the dcache. Page coloring doesn't work
  223. * on this mmu because the virtual cache is set associative
  224. * rather than direct mapped.
  225. */
  226. cachedwbinv();
  227. if(page->cachectl[0] == PG_TXTFLUSH){
  228. /* pio() sets PG_TXTFLUSH whenever a text pg has been written */
  229. cacheiinv();
  230. page->cachectl[0] = PG_NOFLUSH;
  231. }
  232. checkmmu(va, PPN(pa));
  233. }
  234. /*
  235. * Return the number of bytes that can be accessed via KADDR(pa).
  236. * If pa is not a valid argument to KADDR, return 0.
  237. */
  238. uintptr
  239. cankaddr(uintptr pa)
  240. {
  241. if(pa < PHYSDRAM + memsize) /* assumes PHYSDRAM is 0 */
  242. return PHYSDRAM + memsize - pa;
  243. return 0;
  244. }
  245. uintptr
  246. mmukmap(uintptr va, uintptr pa, usize size)
  247. {
  248. int o;
  249. usize n;
  250. PTE *pte, *pte0;
  251. assert((va & (MiB-1)) == 0);
  252. o = pa & (MiB-1);
  253. pa -= o;
  254. size += o;
  255. pte = pte0 = &m->mmul1[L1X(va)];
  256. for(n = 0; n < size; n += MiB)
  257. if(*pte++ != Fault)
  258. return 0;
  259. pte = pte0;
  260. for(n = 0; n < size; n += MiB){
  261. *pte++ = (pa+n)|Dom0|L1AP(Krw)|Section;
  262. mmuinvalidateaddr(va+n);
  263. }
  264. cachedwbse(pte0, pte - pte0);
  265. return va + o;
  266. }
  267. void
  268. checkmmu(uintptr va, uintptr pa)
  269. {
  270. USED(va);
  271. USED(pa);
  272. }