mmu.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750
  1. /*
  2. * arm arch v7 mmu
  3. *
  4. * we initially thought that we needn't flush the l2 cache since external
  5. * devices needn't see page tables. sadly, reality does not agree with
  6. * the manuals.
  7. *
  8. * we use l1 and l2 cache ops here because they are empirically needed.
  9. */
  10. #include "u.h"
  11. #include "../port/lib.h"
  12. #include "mem.h"
  13. #include "dat.h"
  14. #include "fns.h"
  15. #include "arm.h"
  16. #define L1X(va) FEXT((va), 20, 12)
  17. #define L2X(va) FEXT((va), 12, 8)
  18. enum {
  19. Debug = 0,
  20. L1lo = UZERO/MiB, /* L1X(UZERO)? */
  21. #ifdef SMALL_ARM /* well under 1GB of RAM? */
  22. L1hi = (USTKTOP+MiB-1)/MiB, /* L1X(USTKTOP+MiB-1)? */
  23. #else
  24. /*
  25. * on trimslice, top of 1GB ram can't be addressible, as high
  26. * virtual memory (0xfff.....) contains high vectors. We
  27. * moved USTKTOP down another MB to utterly avoid KADDR(stack_base)
  28. * mapping to high exception vectors. USTKTOP is thus
  29. * (0x40000000 - 64*KiB - MiB), which in kernel virtual space is
  30. * (0x100000000ull - 64*KiB - MiB), but we need the whole user
  31. * virtual address space to be unmapped in a new process.
  32. */
  33. L1hi = DRAMSIZE/MiB,
  34. #endif
  35. };
  36. #define ISHOLE(type) ((type) == 0)
  37. typedef struct Range Range;
  38. struct Range {
  39. uintptr startva;
  40. uvlong endva;
  41. uintptr startpa;
  42. uvlong endpa;
  43. ulong attrs;
  44. int type; /* L1 Section or Coarse? */
  45. };
  46. static void mmul1empty(void);
  47. static char *
  48. typename(int type)
  49. {
  50. static char numb[20];
  51. switch(type) {
  52. case Coarse:
  53. return "4KB-page table(s)";
  54. case Section:
  55. return "1MB section(s)";
  56. default:
  57. snprint(numb, sizeof numb, "type %d", type);
  58. return numb;
  59. }
  60. }
  61. static void
  62. prl1range(Range *rp)
  63. {
  64. int attrs;
  65. iprint("l1 maps va (%#8.8lux-%#llux) -> ", rp->startva, rp->endva-1);
  66. if (rp->startva == rp->startpa)
  67. iprint("identity-mapped");
  68. else
  69. iprint("pa %#8.8lux", rp->startpa);
  70. iprint(" attrs ");
  71. attrs = rp->attrs;
  72. if (attrs) {
  73. if (attrs & Cached)
  74. iprint("C");
  75. if (attrs & Buffered)
  76. iprint("B");
  77. if (attrs & L1sharable)
  78. iprint("S1");
  79. if (attrs & L1wralloc)
  80. iprint("A1");
  81. } else
  82. iprint("\"\"");
  83. iprint(" %s\n", typename(rp->type));
  84. delay(100);
  85. rp->endva = 0;
  86. }
  87. static void
  88. l2dump(Range *rp, PTE pte)
  89. {
  90. USED(rp, pte);
  91. }
  92. /* dump level 1 page table at virtual addr l1 */
  93. void
  94. mmudump(PTE *l1)
  95. {
  96. int i, type, attrs;
  97. uintptr pa;
  98. uvlong va;
  99. PTE pte;
  100. Range rng;
  101. /* dump first level of ptes */
  102. iprint("cpu%d l1 pt @ %#p:\n", m->machno, PADDR(l1));
  103. memset(&rng, 0, sizeof rng);
  104. for (va = i = 0; i < 4096; i++, va += MB) {
  105. pte = l1[i];
  106. type = pte & (Section|Coarse);
  107. if (type == Section)
  108. pa = pte & ~(MB - 1);
  109. else
  110. pa = pte & ~(KiB - 1);
  111. attrs = 0;
  112. if (!ISHOLE(type) && type == Section)
  113. attrs = pte & L1ptedramattrs;
  114. /* if a range is open but this pte isn't part, close & open */
  115. if (!ISHOLE(type) &&
  116. (pa != rng.endpa || type != rng.type || attrs != rng.attrs))
  117. if (rng.endva != 0) { /* range is open? close it */
  118. prl1range(&rng);
  119. rng.type = 0;
  120. rng.attrs = 0;
  121. }
  122. if (ISHOLE(type)) { /* end of any open range? */
  123. if (rng.endva != 0) /* range is open? close it */
  124. prl1range(&rng);
  125. } else { /* continuation or new range */
  126. if (rng.endva == 0) { /* no open range? start one */
  127. rng.startva = va;
  128. rng.startpa = pa;
  129. rng.type = type;
  130. rng.attrs = attrs;
  131. }
  132. rng.endva = va + MB; /* continue the open range */
  133. rng.endpa = pa + MB;
  134. }
  135. if (type == Coarse)
  136. l2dump(&rng, pte);
  137. }
  138. if (rng.endva != 0) /* close any open range */
  139. prl1range(&rng);
  140. iprint("\n");
  141. }
  142. /*
  143. * map `mbs' megabytes from virt to phys, uncached.
  144. * device registers are sharable, except the private memory region:
  145. * 2 4K pages, at 0x50040000 on the tegra2.
  146. */
  147. void
  148. mmumap(uintptr virt, uintptr phys, int mbs)
  149. {
  150. uint off;
  151. PTE *l1;
  152. phys &= ~(MB-1);
  153. virt &= ~(MB-1);
  154. l1 = KADDR(ttbget());
  155. for (off = 0; mbs-- > 0; off += MB)
  156. l1[L1X(virt + off)] = (phys + off) | Dom0 | L1AP(Krw) |
  157. Section | L1sharable;
  158. allcache->wbse(l1, L1SIZE);
  159. mmuinvalidate();
  160. }
  161. /* identity map `mbs' megabytes from phys */
  162. void
  163. mmuidmap(uintptr phys, int mbs)
  164. {
  165. mmumap(phys, phys, mbs);
  166. }
  167. PTE *
  168. newl2page(void)
  169. {
  170. PTE *p;
  171. if ((uintptr)l2pages >= HVECTORS - BY2PG)
  172. panic("l2pages");
  173. p = (PTE *)l2pages;
  174. l2pages += BY2PG;
  175. return p;
  176. }
  177. /*
  178. * replace an L1 section pte with an L2 page table and an L1 coarse pte,
  179. * with the same attributes as the original pte and covering the same
  180. * region of memory.
  181. */
  182. static void
  183. expand(uintptr va)
  184. {
  185. int x;
  186. uintptr tva, pa;
  187. PTE oldpte;
  188. PTE *l1, *l2;
  189. va &= ~(MB-1);
  190. x = L1X(va);
  191. l1 = &m->mmul1[x];
  192. oldpte = *l1;
  193. if (oldpte == Fault || (oldpte & (Coarse|Section)) != Section)
  194. return; /* make idempotent */
  195. /* wasteful - l2 pages only have 256 entries - fix */
  196. /*
  197. * it may be very early, before any memory allocators are
  198. * configured, so do a crude allocation from the top of memory.
  199. */
  200. l2 = newl2page();
  201. memset(l2, 0, BY2PG);
  202. /* write new L1 l2 entry back into L1 descriptors */
  203. *l1 = PPN(PADDR(l2))|Dom0|Coarse;
  204. /* fill l2 page with l2 ptes with equiv attrs; copy AP bits */
  205. x = Small | oldpte & (Cached|Buffered) | (oldpte & (1<<15 | 3<<10)) >> 6;
  206. if (oldpte & L1sharable)
  207. x |= L2sharable;
  208. if (oldpte & L1wralloc)
  209. x |= L2wralloc;
  210. pa = oldpte & ~(MiB - 1);
  211. for(tva = va; tva < va + MiB; tva += BY2PG, pa += BY2PG)
  212. l2[L2X(tva)] = PPN(pa) | x;
  213. /* force l2 page to memory */
  214. allcache->wbse(l2, BY2PG);
  215. /* clear out the current entry */
  216. mmuinvalidateaddr(PPN(va));
  217. allcache->wbinvse(l1, sizeof *l1);
  218. if ((*l1 & (Coarse|Section)) != Coarse)
  219. panic("explode %#p", va);
  220. }
  221. /*
  222. * cpu0's l1 page table has likely changed since we copied it in
  223. * launchinit, notably to allocate uncached sections for ucalloc.
  224. * so copy it again from cpu0's.
  225. */
  226. void
  227. mmuninit(void)
  228. {
  229. int s;
  230. PTE *l1, *newl1;
  231. s = splhi();
  232. l1 = m->mmul1;
  233. newl1 = mallocalign(L1SIZE, L1SIZE, 0, 0);
  234. assert(newl1);
  235. allcache->wbinvse((PTE *)L1, L1SIZE); /* get cpu0's up-to-date copy */
  236. memmove(newl1, (PTE *)L1, L1SIZE);
  237. allcache->wbse(newl1, L1SIZE);
  238. mmuinvalidate();
  239. coherence();
  240. ttbput(PADDR(newl1)); /* switch */
  241. coherence();
  242. mmuinvalidate();
  243. coherence();
  244. m->mmul1 = newl1;
  245. coherence();
  246. mmul1empty();
  247. coherence();
  248. mmuinvalidate();
  249. coherence();
  250. // mmudump(m->mmul1); /* DEBUG */
  251. splx(s);
  252. free(l1);
  253. }
  254. /* l1 is base of my l1 descriptor table */
  255. static PTE *
  256. l2pteaddr(PTE *l1, uintptr va)
  257. {
  258. uintptr l2pa;
  259. PTE pte;
  260. PTE *l2;
  261. expand(va);
  262. pte = l1[L1X(va)];
  263. if ((pte & (Coarse|Section)) != Coarse)
  264. panic("l2pteaddr l1 pte %#8.8ux @ %#p not Coarse",
  265. pte, &l1[L1X(va)]);
  266. l2pa = pte & ~(KiB - 1);
  267. l2 = (PTE *)KADDR(l2pa);
  268. return &l2[L2X(va)];
  269. }
  270. void
  271. mmuinit(void)
  272. {
  273. ulong va;
  274. uintptr pa;
  275. PTE *l1, *l2;
  276. if (m->machno != 0) {
  277. mmuninit();
  278. return;
  279. }
  280. pa = ttbget();
  281. l1 = KADDR(pa);
  282. /* identity map most of the io space */
  283. mmuidmap(PHYSIO, (PHYSIOEND - PHYSIO + MB - 1) / MB);
  284. /* move the rest to more convenient addresses */
  285. mmumap(VIRTNOR, PHYSNOR, 256); /* 0x40000000 v -> 0xd0000000 p */
  286. mmumap(VIRTAHB, PHYSAHB, 256); /* 0xb0000000 v -> 0xc0000000 p */
  287. /* map high vectors to start of dram, but only 4K, not 1MB */
  288. pa -= MACHSIZE+BY2PG; /* page tables must be page aligned */
  289. l2 = KADDR(pa);
  290. memset(l2, 0, 1024);
  291. m->mmul1 = l1; /* used by explode in l2pteaddr */
  292. /* map private mem region (8K at soc.scu) without sharable bits */
  293. va = soc.scu;
  294. *l2pteaddr(l1, va) &= ~L2sharable;
  295. va += BY2PG;
  296. *l2pteaddr(l1, va) &= ~L2sharable;
  297. /*
  298. * below (and above!) the vectors in virtual space may be dram.
  299. * populate the rest of l2 for the last MB.
  300. */
  301. for (va = -MiB; va != 0; va += BY2PG)
  302. l2[L2X(va)] = PADDR(va) | L2AP(Krw) | Small | L2ptedramattrs;
  303. /* map high vectors page to 0; must match attributes of KZERO->0 map */
  304. l2[L2X(HVECTORS)] = PHYSDRAM | L2AP(Krw) | Small | L2ptedramattrs;
  305. coherence();
  306. l1[L1X(HVECTORS)] = pa | Dom0 | Coarse; /* l1 -> ttb-machsize-4k */
  307. /* make kernel text unwritable */
  308. for(va = KTZERO; va < (ulong)etext; va += BY2PG)
  309. *l2pteaddr(l1, va) |= L2apro;
  310. allcache->wbinv();
  311. mmuinvalidate();
  312. m->mmul1 = l1;
  313. coherence();
  314. mmul1empty();
  315. coherence();
  316. // mmudump(l1); /* DEBUG */
  317. }
  318. static void
  319. mmul2empty(Proc* proc, int clear)
  320. {
  321. PTE *l1;
  322. Page **l2, *page;
  323. l1 = m->mmul1;
  324. l2 = &proc->mmul2;
  325. for(page = *l2; page != nil; page = page->next){
  326. if(clear)
  327. memset(UINT2PTR(page->va), 0, BY2PG);
  328. l1[page->daddr] = Fault;
  329. allcache->wbse(l1, sizeof *l1);
  330. l2 = &page->next;
  331. }
  332. *l2 = proc->mmul2cache;
  333. proc->mmul2cache = proc->mmul2;
  334. proc->mmul2 = nil;
  335. }
  336. static void
  337. mmul1empty(void)
  338. {
  339. #ifdef notdef
  340. /* there's a bug in here */
  341. PTE *l1;
  342. /* clean out any user mappings still in l1 */
  343. if(m->mmul1lo > L1lo){
  344. if(m->mmul1lo == 1)
  345. m->mmul1[L1lo] = Fault;
  346. else
  347. memset(&m->mmul1[L1lo], 0, m->mmul1lo*sizeof(PTE));
  348. m->mmul1lo = L1lo;
  349. }
  350. if(m->mmul1hi < L1hi){
  351. l1 = &m->mmul1[m->mmul1hi];
  352. if((L1hi - m->mmul1hi) == 1)
  353. *l1 = Fault;
  354. else
  355. memset(l1, 0, (L1hi - m->mmul1hi)*sizeof(PTE));
  356. m->mmul1hi = L1hi;
  357. }
  358. #else
  359. memset(&m->mmul1[L1lo], 0, (L1hi - L1lo)*sizeof(PTE));
  360. #endif /* notdef */
  361. allcache->wbse(&m->mmul1[L1lo], (L1hi - L1lo)*sizeof(PTE));
  362. }
  363. void
  364. mmuswitch(Proc* proc)
  365. {
  366. int x;
  367. PTE *l1;
  368. Page *page;
  369. /* do kprocs get here and if so, do they need to? */
  370. if(m->mmupid == proc->pid && !proc->newtlb)
  371. return;
  372. m->mmupid = proc->pid;
  373. /* write back dirty and invalidate caches */
  374. l1cache->wbinv();
  375. if(proc->newtlb){
  376. mmul2empty(proc, 1);
  377. proc->newtlb = 0;
  378. }
  379. mmul1empty();
  380. /* move in new map */
  381. l1 = m->mmul1;
  382. for(page = proc->mmul2; page != nil; page = page->next){
  383. x = page->daddr;
  384. l1[x] = PPN(page->pa)|Dom0|Coarse;
  385. /* know here that L1lo < x < L1hi */
  386. if(x+1 - m->mmul1lo < m->mmul1hi - x)
  387. m->mmul1lo = x+1;
  388. else
  389. m->mmul1hi = x;
  390. }
  391. /* make sure map is in memory */
  392. /* could be smarter about how much? */
  393. allcache->wbse(&l1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
  394. /* lose any possible stale tlb entries */
  395. mmuinvalidate();
  396. //print("mmuswitch l1lo %d l1hi %d %d\n",
  397. // m->mmul1lo, m->mmul1hi, proc->kp);
  398. wakewfi(); /* in case there's another runnable proc */
  399. }
  400. void
  401. flushmmu(void)
  402. {
  403. int s;
  404. s = splhi();
  405. up->newtlb = 1;
  406. mmuswitch(up);
  407. splx(s);
  408. }
  409. void
  410. mmurelease(Proc* proc)
  411. {
  412. Page *page, *next;
  413. /* write back dirty and invalidate caches */
  414. l1cache->wbinv();
  415. mmul2empty(proc, 0);
  416. for(page = proc->mmul2cache; page != nil; page = next){
  417. next = page->next;
  418. if(--page->ref)
  419. panic("mmurelease: page->ref %d", page->ref);
  420. pagechainhead(page);
  421. }
  422. if(proc->mmul2cache && palloc.r.p)
  423. wakeup(&palloc.r);
  424. proc->mmul2cache = nil;
  425. mmul1empty();
  426. /* make sure map is in memory */
  427. /* could be smarter about how much? */
  428. allcache->wbse(&m->mmul1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));
  429. /* lose any possible stale tlb entries */
  430. mmuinvalidate();
  431. }
  432. void
  433. putmmu(uintptr va, uintptr pa, Page* page)
  434. {
  435. int x;
  436. Page *pg;
  437. PTE *l1, *pte;
  438. x = L1X(va);
  439. l1 = &m->mmul1[x];
  440. if (Debug) {
  441. iprint("putmmu(%#p, %#p, %#p) ", va, pa, page->pa);
  442. iprint("mmul1 %#p l1 %#p *l1 %#ux x %d pid %ld\n",
  443. m->mmul1, l1, *l1, x, up->pid);
  444. if (*l1)
  445. panic("putmmu: old l1 pte non-zero; stuck?");
  446. }
  447. if(*l1 == Fault){
  448. /* wasteful - l2 pages only have 256 entries - fix */
  449. if(up->mmul2cache == nil){
  450. /* auxpg since we don't need much? memset if so */
  451. pg = newpage(1, 0, 0);
  452. pg->va = VA(kmap(pg));
  453. }
  454. else{
  455. pg = up->mmul2cache;
  456. up->mmul2cache = pg->next;
  457. memset(UINT2PTR(pg->va), 0, BY2PG);
  458. }
  459. pg->daddr = x;
  460. pg->next = up->mmul2;
  461. up->mmul2 = pg;
  462. /* force l2 page to memory */
  463. allcache->wbse((void *)pg->va, BY2PG);
  464. *l1 = PPN(pg->pa)|Dom0|Coarse;
  465. allcache->wbse(l1, sizeof *l1);
  466. if (Debug)
  467. iprint("l1 %#p *l1 %#ux x %d pid %ld\n", l1, *l1, x, up->pid);
  468. if(x >= m->mmul1lo && x < m->mmul1hi){
  469. if(x+1 - m->mmul1lo < m->mmul1hi - x)
  470. m->mmul1lo = x+1;
  471. else
  472. m->mmul1hi = x;
  473. }
  474. }
  475. pte = UINT2PTR(KADDR(PPN(*l1)));
  476. if (Debug) {
  477. iprint("pte %#p index %ld was %#ux\n", pte, L2X(va), *(pte+L2X(va)));
  478. if (*(pte+L2X(va)))
  479. panic("putmmu: old l2 pte non-zero; stuck?");
  480. }
  481. /* protection bits are
  482. * PTERONLY|PTEVALID;
  483. * PTEWRITE|PTEVALID;
  484. * PTEWRITE|PTEUNCACHED|PTEVALID;
  485. */
  486. x = Small;
  487. if(!(pa & PTEUNCACHED))
  488. x |= L2ptedramattrs;
  489. if(pa & PTEWRITE)
  490. x |= L2AP(Urw);
  491. else
  492. x |= L2AP(Uro);
  493. pte[L2X(va)] = PPN(pa)|x;
  494. allcache->wbse(&pte[L2X(va)], sizeof pte[0]);
  495. /* clear out the current entry */
  496. mmuinvalidateaddr(PPN(va));
  497. /* write back dirty entries - we need this because the pio() in
  498. * fault.c is writing via a different virt addr and won't clean
  499. * its changes out of the dcache. Page coloring doesn't work
  500. * on this mmu because the virtual cache is set associative
  501. * rather than direct mapped.
  502. */
  503. l1cache->wb();
  504. if(page->cachectl[0] == PG_TXTFLUSH){
  505. /* pio() sets PG_TXTFLUSH whenever a text pg has been written */
  506. cacheiinv();
  507. page->cachectl[0] = PG_NOFLUSH;
  508. }
  509. if (Debug)
  510. iprint("putmmu %#p %#p %#p\n", va, pa, PPN(pa)|x);
  511. }
  512. void*
  513. mmuuncache(void* v, usize size)
  514. {
  515. int x;
  516. PTE *pte;
  517. uintptr va;
  518. /*
  519. * Simple helper for ucalloc().
  520. * Uncache a Section, must already be
  521. * valid in the MMU.
  522. */
  523. va = PTR2UINT(v);
  524. assert(!(va & (1*MiB-1)) && size == 1*MiB);
  525. x = L1X(va);
  526. pte = &m->mmul1[x];
  527. if((*pte & (Section|Coarse)) != Section)
  528. return nil;
  529. *pte &= ~L1ptedramattrs;
  530. *pte |= L1sharable;
  531. mmuinvalidateaddr(va);
  532. allcache->wbse(pte, 4);
  533. return v;
  534. }
  535. uintptr
  536. mmukmap(uintptr va, uintptr pa, usize size)
  537. {
  538. int x;
  539. PTE *pte;
  540. /*
  541. * Stub.
  542. */
  543. assert(!(va & (1*MiB-1)) && !(pa & (1*MiB-1)) && size == 1*MiB);
  544. x = L1X(va);
  545. pte = &m->mmul1[x];
  546. if(*pte != Fault)
  547. return 0;
  548. *pte = pa|Dom0|L1AP(Krw)|Section;
  549. mmuinvalidateaddr(va);
  550. allcache->wbse(pte, 4);
  551. return va;
  552. }
  553. uintptr
  554. mmukunmap(uintptr va, uintptr pa, usize size)
  555. {
  556. int x;
  557. PTE *pte;
  558. /*
  559. * Stub.
  560. */
  561. assert(!(va & (1*MiB-1)) && !(pa & (1*MiB-1)) && size == 1*MiB);
  562. x = L1X(va);
  563. pte = &m->mmul1[x];
  564. if(*pte != (pa|Dom0|L1AP(Krw)|Section))
  565. return 0;
  566. *pte = Fault;
  567. mmuinvalidateaddr(va);
  568. allcache->wbse(pte, 4);
  569. return va;
  570. }
  571. /*
  572. * Return the number of bytes that can be accessed via KADDR(pa).
  573. * If pa is not a valid argument to KADDR, return 0.
  574. */
  575. uintptr
  576. cankaddr(uintptr pa)
  577. {
  578. if((PHYSDRAM == 0 || pa >= PHYSDRAM) && pa < PHYSDRAM+memsize)
  579. return PHYSDRAM+memsize - pa;
  580. return 0;
  581. }
  582. /* from 386 */
  583. void*
  584. vmap(uintptr pa, usize size)
  585. {
  586. uintptr pae, va;
  587. usize o, osize;
  588. /*
  589. * XXX - replace with new vm stuff.
  590. * Crock after crock - the first 4MB is mapped with 2MB pages
  591. * so catch that and return good values because the current mmukmap
  592. * will fail.
  593. */
  594. if(pa+size < 4*MiB)
  595. return UINT2PTR(kseg0|pa);
  596. osize = size;
  597. o = pa & (BY2PG-1);
  598. pa -= o;
  599. size += o;
  600. size = ROUNDUP(size, BY2PG);
  601. va = kseg0|pa;
  602. pae = mmukmap(va, pa, size);
  603. if(pae == 0 || pae-size != pa)
  604. panic("vmap(%#p, %ld) called from %#p: mmukmap fails %#p",
  605. pa+o, osize, getcallerpc(&pa), pae);
  606. return UINT2PTR(va+o);
  607. }
  608. /* from 386 */
  609. void
  610. vunmap(void* v, usize size)
  611. {
  612. /*
  613. * XXX - replace with new vm stuff.
  614. * Can't do this until do real vmap for all space that
  615. * might be used, e.g. stuff below 1MB which is currently
  616. * mapped automagically at boot but that isn't used (or
  617. * at least shouldn't be used) by the kernel.
  618. upafree(PADDR(v), size);
  619. */
  620. USED(v, size);
  621. }
  622. /*
  623. * Notes.
  624. * Everything is in domain 0;
  625. * domain 0 access bits in the DAC register are set
  626. * to Client, which means access is controlled by the
  627. * permission values set in the PTE.
  628. *
  629. * L1 access control for the kernel is set to 1 (RW,
  630. * no user mode access);
  631. * L2 access control for the kernel is set to 1 (ditto)
  632. * for all 4 AP sets;
  633. * L1 user mode access is never set;
  634. * L2 access control for user mode is set to either
  635. * 2 (RO) or 3 (RW) depending on whether text or data,
  636. * for all 4 AP sets.
  637. * (To get kernel RO set AP to 0 and S bit in control
  638. * register c1).
  639. * Coarse L1 page-tables are used. They have 256 entries
  640. * and so consume 1024 bytes per table.
  641. * Small L2 page-tables are used. They have 1024 entries
  642. * and so consume 4096 bytes per table.
  643. *
  644. * 4KiB. That's the size of 1) a page, 2) the
  645. * size allocated for an L2 page-table page (note only 1KiB
  646. * is needed per L2 page - to be dealt with later) and
  647. * 3) the size of the area in L1 needed to hold the PTEs
  648. * to map 1GiB of user space (0 -> 0x3fffffff, 1024 entries).
  649. */