mmu.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496
  1. #include "u.h"
  2. #include "../port/lib.h"
  3. #include "mem.h"
  4. #include "dat.h"
  5. #include "fns.h"
  6. #include "arm.h"
  7. #define L1X(va) FEXT((va), 20, 12)
  8. #define L2X(va) FEXT((va), 12, 8)
  9. enum {
  10. L1lo = UZERO/MiB, /* L1X(UZERO)? */
  11. L1hi = (USTKTOP+MiB-1)/MiB, /* L1X(USTKTOP+MiB-1)? */
  12. };
  13. #define ISHOLE(pte) ((pte) == 0)
  14. /* dump level 1 page table at virtual addr l1 */
  15. void
  16. mmudump(PTE *l1)
  17. {
  18. int i, type, rngtype;
  19. uintptr pa, startva, startpa;
  20. uvlong va, endva;
  21. PTE pte;
  22. // pa -= MACHSIZE+1024; /* put level 2 entries below level 1 */
  23. // l2 = KADDR(pa);
  24. print("\n");
  25. endva = startva = startpa = 0;
  26. rngtype = 0;
  27. /* dump first level of ptes */
  28. for (va = i = 0; i < 4096; i++) {
  29. pte = l1[i];
  30. pa = pte & ~(MB - 1);
  31. type = pte & (Fine|Section|Coarse);
  32. if (ISHOLE(pte)) {
  33. if (endva != 0) { /* open range? close it */
  34. print("l1 maps va (%#lux-%#llux) -> pa %#lux type %#ux\n",
  35. startva, endva-1, startpa, rngtype);
  36. endva = 0;
  37. }
  38. } else {
  39. if (endva == 0) { /* no open range? start one */
  40. startva = va;
  41. startpa = pa;
  42. rngtype = type;
  43. }
  44. endva = va + MB; /* continue the open range */
  45. // if (type == Coarse) {
  46. // // could dump the l2 table for this l1 entry
  47. // }
  48. }
  49. va += MB;
  50. }
  51. if (endva != 0) /* close an open range */
  52. print("l1 maps va (%#lux-%#llux) -> pa %#lux type %#ux\n",
  53. startva, endva-1, startpa, rngtype);
  54. }
  55. /* identity map the megabyte containing va, uncached */
  56. static void
  57. idmap(PTE *l1, ulong va)
  58. {
  59. va &= ~(MB-1);
  60. l1[L1X(va)] = va | Dom0 | L1AP(Krw) | Section;
  61. }
  62. /* map `mbs' megabytes from virt to phys */
  63. void
  64. mmumap(uintptr virt, uintptr phys, int mbs)
  65. {
  66. uint off;
  67. PTE *l1;
  68. phys &= ~(MB-1);
  69. virt &= ~(MB-1);
  70. l1 = KADDR(ttbget());
  71. for (off = 0; mbs-- > 0; off += MB)
  72. l1[L1X(virt + off)] = (phys + off) | Dom0 | L1AP(Krw) | Section;
  73. cacheuwbinv();
  74. l2cacheuwbinv();
  75. mmuinvalidate();
  76. }
  77. /* identity map `mbs' megabytes from phys */
  78. void
  79. mmuidmap(uintptr phys, int mbs)
  80. {
  81. mmumap(phys, phys, mbs);
  82. }
  83. void
  84. mmuinit(void)
  85. {
  86. uintptr pa;
  87. PTE *l1, *l2;
  88. pa = ttbget();
  89. l1 = KADDR(pa);
  90. /* redundant with l.s; only covers first MB of 17MB */
  91. l1[L1X(VIRTIO)] = PHYSIO|Dom0|L1AP(Krw)|Section;
  92. idmap(l1, PHYSETHER); /* igep 9221 ethernet regs */
  93. idmap(l1, PHYSL4PROT);
  94. idmap(l1, PHYSL3);
  95. idmap(l1, PHYSSMS);
  96. idmap(l1, PHYSDRC);
  97. idmap(l1, PHYSGPMC);
  98. /* map high vectors to start of dram, but only 4K, not 1MB */
  99. pa -= MACHSIZE+2*1024;
  100. l2 = KADDR(pa);
  101. memset(l2, 0, 1024);
  102. /* vectors step on u-boot, but so do page tables */
  103. l2[L2X(HVECTORS)] = PHYSDRAM|L2AP(Krw)|Small;
  104. l1[L1X(HVECTORS)] = pa|Dom0|Coarse; /* vectors -> ttb-machsize-2k */
  105. coherence();
  106. cacheuwbinv();
  107. l2cacheuwbinv();
  108. mmuinvalidate();
  109. m->mmul1 = l1;
  110. // mmudump(l1); /* DEBUG */
  111. }
  112. static void
  113. mmul2empty(Proc* proc, int clear)
  114. {
  115. PTE *l1;
  116. Page **l2, *page;
  117. l1 = m->mmul1;
  118. l2 = &proc->mmul2;
  119. for(page = *l2; page != nil; page = page->next){
  120. if(clear)
  121. memset(UINT2PTR(page->va), 0, BY2PG);
  122. l1[page->daddr] = Fault;
  123. l2 = &page->next;
  124. }
  125. *l2 = proc->mmul2cache;
  126. proc->mmul2cache = proc->mmul2;
  127. proc->mmul2 = nil;
  128. }
  129. static void
  130. mmul1empty(void)
  131. {
  132. #ifdef notdef
  133. /* there's a bug in here */
  134. PTE *l1;
  135. /* clean out any user mappings still in l1 */
  136. if(m->mmul1lo > L1lo){
  137. if(m->mmul1lo == 1)
  138. m->mmul1[L1lo] = Fault;
  139. else
  140. memset(&m->mmul1[L1lo], 0, m->mmul1lo*sizeof(PTE));
  141. m->mmul1lo = L1lo;
  142. }
  143. if(m->mmul1hi < L1hi){
  144. l1 = &m->mmul1[m->mmul1hi];
  145. if((L1hi - m->mmul1hi) == 1)
  146. *l1 = Fault;
  147. else
  148. memset(l1, 0, (L1hi - m->mmul1hi)*sizeof(PTE));
  149. m->mmul1hi = L1hi;
  150. }
  151. #else
  152. memset(&m->mmul1[L1lo], 0, (L1hi - L1lo)*sizeof(PTE));
  153. #endif /* notdef */
  154. }
  155. void
  156. mmuswitch(Proc* proc)
  157. {
  158. int x;
  159. PTE *l1;
  160. Page *page;
  161. /* do kprocs get here and if so, do they need to? */
  162. if(m->mmupid == proc->pid && !proc->newtlb)
  163. return;
  164. m->mmupid = proc->pid;
  165. /* write back dirty and invalidate l1 caches */
  166. cacheuwbinv();
  167. if(proc->newtlb){
  168. mmul2empty(proc, 1);
  169. proc->newtlb = 0;
  170. }
  171. mmul1empty();
  172. /* move in new map */
  173. l1 = m->mmul1;
  174. for(page = proc->mmul2; page != nil; page = page->next){
  175. x = page->daddr;
  176. l1[x] = PPN(page->pa)|Dom0|Coarse;
  177. /* know here that L1lo < x < L1hi */
  178. if(x+1 - m->mmul1lo < m->mmul1hi - x)
  179. m->mmul1lo = x+1;
  180. else
  181. m->mmul1hi = x;
  182. }
  183. /* make sure map is in memory */
  184. /* could be smarter about how much? */
  185. cachedwbse(&l1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
  186. /* lose any possible stale tlb entries */
  187. mmuinvalidate();
  188. //print("mmuswitch l1lo %d l1hi %d %d\n",
  189. // m->mmul1lo, m->mmul1hi, proc->kp);
  190. }
  191. void
  192. flushmmu(void)
  193. {
  194. int s;
  195. s = splhi();
  196. up->newtlb = 1;
  197. mmuswitch(up);
  198. splx(s);
  199. }
  200. void
  201. mmurelease(Proc* proc)
  202. {
  203. Page *page, *next;
  204. /* write back dirty and invalidate l1 caches */
  205. cacheuwbinv();
  206. mmul2empty(proc, 0);
  207. for(page = proc->mmul2cache; page != nil; page = next){
  208. next = page->next;
  209. if(--page->ref)
  210. panic("mmurelease: page->ref %d", page->ref);
  211. pagechainhead(page);
  212. }
  213. if(proc->mmul2cache && palloc.r.p)
  214. wakeup(&palloc.r);
  215. proc->mmul2cache = nil;
  216. mmul1empty();
  217. /* make sure map is in memory */
  218. /* could be smarter about how much? */
  219. cachedwbse(&m->mmul1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
  220. /* lose any possible stale tlb entries */
  221. mmuinvalidate();
  222. }
  223. void
  224. putmmu(uintptr va, uintptr pa, Page* page)
  225. {
  226. int x;
  227. Page *pg;
  228. PTE *l1, *pte;
  229. x = L1X(va);
  230. l1 = &m->mmul1[x];
  231. //print("putmmu(%#p, %#p, %#p) ", va, pa, page->pa);
  232. //print("mmul1 %#p l1 %#p *l1 %#ux x %d pid %d\n",
  233. // m->mmul1, l1, *l1, x, up->pid);
  234. if(*l1 == Fault){
  235. /* wasteful - l2 pages only have 256 entries - fix */
  236. if(up->mmul2cache == nil){
  237. /* auxpg since we don't need much? memset if so */
  238. pg = newpage(1, 0, 0);
  239. pg->va = VA(kmap(pg));
  240. }
  241. else{
  242. pg = up->mmul2cache;
  243. up->mmul2cache = pg->next;
  244. memset(UINT2PTR(pg->va), 0, BY2PG);
  245. }
  246. pg->daddr = x;
  247. pg->next = up->mmul2;
  248. up->mmul2 = pg;
  249. /* force l2 page to memory */
  250. cachedwbse((void *)pg->va, BY2PG);
  251. *l1 = PPN(pg->pa)|Dom0|Coarse;
  252. cachedwbse(l1, sizeof *l1);
  253. //print("l1 %#p *l1 %#ux x %d pid %d\n", l1, *l1, x, up->pid);
  254. if(x >= m->mmul1lo && x < m->mmul1hi){
  255. if(x+1 - m->mmul1lo < m->mmul1hi - x)
  256. m->mmul1lo = x+1;
  257. else
  258. m->mmul1hi = x;
  259. }
  260. }
  261. pte = UINT2PTR(KADDR(PPN(*l1)));
  262. //print("pte %#p index %ld was %#ux\n", pte, L2X(va), *(pte+L2X(va)));
  263. /* protection bits are
  264. * PTERONLY|PTEVALID;
  265. * PTEWRITE|PTEVALID;
  266. * PTEWRITE|PTEUNCACHED|PTEVALID;
  267. */
  268. x = Small;
  269. if(!(pa & PTEUNCACHED))
  270. x |= Cached|Buffered;
  271. if(pa & PTEWRITE)
  272. x |= L2AP(Urw);
  273. else
  274. x |= L2AP(Uro);
  275. pte[L2X(va)] = PPN(pa)|x;
  276. cachedwbse(&pte[L2X(va)], sizeof pte[0]);
  277. /* clear out the current entry */
  278. mmuinvalidateaddr(PPN(va));
  279. /* write back dirty entries - we need this because the pio() in
  280. * fault.c is writing via a different virt addr and won't clean
  281. * its changes out of the dcache. Page coloring doesn't work
  282. * on this mmu because the virtual cache is set associative
  283. * rather than direct mapped.
  284. */
  285. cachedwbinv();
  286. if(page->cachectl[0] == PG_TXTFLUSH){
  287. /* pio() sets PG_TXTFLUSH whenever a text pg has been written */
  288. cacheiinv();
  289. page->cachectl[0] = PG_NOFLUSH;
  290. }
  291. //print("putmmu %#p %#p %#p\n", va, pa, PPN(pa)|x);
  292. }
  293. void*
  294. mmuuncache(void* v, usize size)
  295. {
  296. int x;
  297. PTE *pte;
  298. uintptr va;
  299. /*
  300. * Simple helper for ucalloc().
  301. * Uncache a Section, must already be
  302. * valid in the MMU.
  303. */
  304. va = PTR2UINT(v);
  305. assert(!(va & (1*MiB-1)) && size == 1*MiB);
  306. x = L1X(va);
  307. pte = &m->mmul1[x];
  308. if((*pte & (Fine|Section|Coarse)) != Section)
  309. return nil;
  310. *pte &= ~(Cached|Buffered);
  311. mmuinvalidateaddr(va);
  312. cachedwbinvse(pte, 4);
  313. return v;
  314. }
  315. uintptr
  316. mmukmap(uintptr va, uintptr pa, usize size)
  317. {
  318. int x;
  319. PTE *pte;
  320. /*
  321. * Stub.
  322. */
  323. assert(!(va & (1*MiB-1)) && !(pa & (1*MiB-1)) && size == 1*MiB);
  324. x = L1X(va);
  325. pte = &m->mmul1[x];
  326. if(*pte != Fault)
  327. return 0;
  328. *pte = pa|Dom0|L1AP(Krw)|Section;
  329. mmuinvalidateaddr(va);
  330. cachedwbinvse(pte, 4);
  331. return va;
  332. }
  333. uintptr
  334. mmukunmap(uintptr va, uintptr pa, usize size)
  335. {
  336. int x;
  337. PTE *pte;
  338. /*
  339. * Stub.
  340. */
  341. assert(!(va & (1*MiB-1)) && !(pa & (1*MiB-1)) && size == 1*MiB);
  342. x = L1X(va);
  343. pte = &m->mmul1[x];
  344. if(*pte != (pa|Dom0|L1AP(Krw)|Section))
  345. return 0;
  346. *pte = Fault;
  347. mmuinvalidateaddr(va);
  348. cachedwbinvse(pte, 4);
  349. return va;
  350. }
  351. /*
  352. * Return the number of bytes that can be accessed via KADDR(pa).
  353. * If pa is not a valid argument to KADDR, return 0.
  354. */
  355. uintptr
  356. cankaddr(uintptr pa)
  357. {
  358. if(pa >= PHYSDRAM && pa < PHYSDRAM+memsize)
  359. return PHYSDRAM+memsize - pa;
  360. return 0;
  361. }
  362. /* from 386 */
  363. void*
  364. vmap(uintptr pa, usize size)
  365. {
  366. uintptr pae, va;
  367. usize o, osize;
  368. /*
  369. * XXX - replace with new vm stuff.
  370. * Crock after crock - the first 4MB is mapped with 2MB pages
  371. * so catch that and return good values because the current mmukmap
  372. * will fail.
  373. */
  374. if(pa+size < 4*MiB)
  375. return UINT2PTR(kseg0|pa);
  376. osize = size;
  377. o = pa & (BY2PG-1);
  378. pa -= o;
  379. size += o;
  380. size = ROUNDUP(size, PGSIZE);
  381. va = kseg0|pa;
  382. pae = mmukmap(va, pa, size);
  383. if(pae == 0 || pae-size != pa)
  384. panic("vmap(%#p, %ld) called from %#p: mmukmap fails %#p",
  385. pa+o, osize, getcallerpc(&pa), pae);
  386. return UINT2PTR(va+o);
  387. }
  388. /* from 386 */
  389. void
  390. vunmap(void* v, usize size)
  391. {
  392. /*
  393. * XXX - replace with new vm stuff.
  394. * Can't do this until do real vmap for all space that
  395. * might be used, e.g. stuff below 1MB which is currently
  396. * mapped automagically at boot but that isn't used (or
  397. * at least shouldn't be used) by the kernel.
  398. upafree(PADDR(v), size);
  399. */
  400. USED(v, size);
  401. }
  402. /*
  403. * Notes.
  404. * Everything is in domain 0;
  405. * domain 0 access bits in the DAC register are set
  406. * to Client, which means access is controlled by the
  407. * permission values set in the PTE.
  408. *
  409. * L1 access control for the kernel is set to 1 (RW,
  410. * no user mode access);
  411. * L2 access control for the kernel is set to 1 (ditto)
  412. * for all 4 AP sets;
  413. * L1 user mode access is never set;
  414. * L2 access control for user mode is set to either
  415. * 2 (RO) or 3 (RW) depending on whether text or data,
  416. * for all 4 AP sets.
  417. * (To get kernel RO set AP to 0 and S bit in control
  418. * register c1).
  419. * Coarse L1 page-tables are used. They have 256 entries
  420. * and so consume 1024 bytes per table.
  421. * Small L2 page-tables are used. They have 1024 entries
  422. * and so consume 4096 bytes per table.
  423. *
  424. * 4KiB. That's the size of 1) a page, 2) the
  425. * size allocated for an L2 page-table page (note only 1KiB
  426. * is needed per L2 page - to be dealt with later) and
  427. * 3) the size of the area in L1 needed to hold the PTEs
  428. * to map 1GiB of user space (0 -> 0x3fffffff, 1024 entries).
  429. */