mmu.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533
  1. #include "u.h"
  2. #include "../port/lib.h"
  3. #include "mem.h"
  4. #include "dat.h"
  5. #include "fns.h"
  6. #include "io.h"
  7. #include "ureg.h"
  8. #include "../port/error.h"
  9. /*
  10. * to avoid mmu and cash flushing, we use the pid register in the MMU
  11. * to map all user addresses. Although there are 64 possible pids, we
  12. * can only use 31 because there are only 32 protection domains and we
  13. * need one for the kernel. Pid i is thus associated with domain i.
  14. * Domain 0 is used for the kernel.
  15. */
  16. /* real protection bits */
  17. enum
  18. {
  19. /* level 1 descriptor bits */
  20. L1TypeMask= (3<<0),
  21. L1Invalid= (0<<0),
  22. L1PageTable= (1<<0),
  23. L1Section= (2<<0),
  24. L1Cached= (1<<3),
  25. L1Buffered= (1<<2),
  26. L1DomShift= 5,
  27. L1Domain0= (0<<L1DomShift),
  28. L1KernelRO= (0x0<<10),
  29. L1KernelRW= (0x1<<10),
  30. L1UserRO= (0x2<<10),
  31. L1UserRW= (0x3<<10),
  32. L1SectBaseMask= (0xFFF<<20),
  33. L1PTBaseMask= (0x3FFFFF<<10),
  34. /* level 2 descriptor bits */
  35. L2TypeMask= (3<<0),
  36. L2SmallPage= (2<<0),
  37. L2LargePage= (1<<0),
  38. L2Cached= (1<<3),
  39. L2Buffered= (1<<2),
  40. L2KernelRW= (0x55<<4),
  41. L2UserRO= (0xAA<<4),
  42. L2UserRW= (0xFF<<4),
  43. L2PageBaseMask= (0xFFFFF<<12),
  44. /* domain values */
  45. Dnoaccess= 0,
  46. Dclient= 1,
  47. Dmanager= 3,
  48. };
  49. ulong *l1table;
  50. static int mmuinited;
  51. /*
  52. * We map all of memory, flash, and the zeros area with sections.
  53. * Special use space is mapped on the fly with regmap.
  54. */
  55. void
  56. mmuinit(void)
  57. {
  58. ulong a, o;
  59. ulong *t;
  60. /* get a prototype level 1 page */
  61. l1table = xspanalloc(16*1024, 16*1024, 0);
  62. memset(l1table, 0, 16*1024);
  63. /* map low mem (I really don't know why I have to do this -- presotto) */
  64. for(o = 0; o < 1*OneMeg; o += OneMeg)
  65. l1table[(0+o)>>20] = L1Section | L1KernelRW| L1Domain0
  66. | L1Cached | L1Buffered
  67. | ((0+o)&L1SectBaseMask);
  68. /* map DRAM */
  69. for(o = 0; o < DRAMTOP-DRAMZERO; o += OneMeg)
  70. l1table[(DRAMZERO+o)>>20] = L1Section | L1KernelRW| L1Domain0
  71. | L1Cached | L1Buffered
  72. | ((PHYSDRAM0+o)&L1SectBaseMask);
  73. /* uncached DRAM */
  74. for(o = 0; o < UCDRAMTOP-UCDRAMZERO; o += OneMeg)
  75. l1table[(UCDRAMZERO+o)>>20] = L1Section | L1KernelRW| L1Domain0
  76. | ((PHYSDRAM0+o)&L1SectBaseMask);
  77. /* map zeros area */
  78. for(o = 0; o < NULLTOP-NULLZERO; o += OneMeg)
  79. l1table[(NULLZERO+o)>>20] = L1Section | L1KernelRW | L1Domain0
  80. | L1Cached | L1Buffered
  81. | ((PHYSNULL0+o)&L1SectBaseMask);
  82. /* map flash */
  83. for(o = 0; o < FLASHTOP-FLASHZERO; o += OneMeg)
  84. l1table[(FLASHZERO+o)>>20] = L1Section | L1KernelRW | L1Domain0
  85. | ((PHYSFLASH0+o)&L1SectBaseMask);
  86. /* map peripheral control module regs */
  87. mapspecial(0x80000000, OneMeg);
  88. /* map system control module regs */
  89. mapspecial(0x90000000, OneMeg);
  90. /*
  91. * double map start of ram to exception vectors
  92. */
  93. a = EVECTORS;
  94. t = xspanalloc(BY2PG, 1024, 0);
  95. memset(t, 0, BY2PG);
  96. l1table[a>>20] = L1PageTable | L1Domain0 | (((ulong)t) & L1PTBaseMask);
  97. t[(a&0xfffff)>>PGSHIFT] = L2SmallPage | L2KernelRW | (PHYSDRAM0 & L2PageBaseMask);
  98. mmurestart();
  99. mmuinited = 1;
  100. }
  101. void
  102. mmurestart(void) {
  103. /* set up the domain register to cause all domains to obey pte access bits */
  104. putdac(Dclient);
  105. /* point to map */
  106. putttb((ulong)l1table);
  107. /* enable mmu */
  108. wbflush();
  109. mmuinvalidate();
  110. mmuenable();
  111. cacheflush();
  112. }
  113. /*
  114. * map on request
  115. */
  116. static void*
  117. _map(ulong pa, int len, ulong zero, ulong top, ulong l1prop, ulong l2prop)
  118. {
  119. ulong *t;
  120. ulong va, i, base, end, off, entry;
  121. int large;
  122. ulong* rv;
  123. rv = nil;
  124. large = len >= 128*1024;
  125. if(large){
  126. base = pa & ~(OneMeg-1);
  127. end = (pa+len-1) & ~(OneMeg-1);
  128. } else {
  129. base = pa & ~(BY2PG-1);
  130. end = (pa+len-1) & ~(BY2PG-1);
  131. }
  132. off = pa - base;
  133. for(va = zero; va < top && base <= end; va += OneMeg){
  134. switch(l1table[va>>20] & L1TypeMask){
  135. default:
  136. /* found unused entry on level 1 table */
  137. if(large){
  138. if(rv == nil)
  139. rv = (ulong*)(va+off);
  140. l1table[va>>20] = L1Section | l1prop | L1Domain0 |
  141. (base & L1SectBaseMask);
  142. base += OneMeg;
  143. continue;
  144. } else {
  145. /* create an L2 page table and keep going */
  146. t = xspanalloc(BY2PG, 1024, 0);
  147. memset(t, 0, BY2PG);
  148. l1table[va>>20] = L1PageTable | L1Domain0 |
  149. (((ulong)t) & L1PTBaseMask);
  150. }
  151. break;
  152. case L1Section:
  153. /* if it's already mapped in a one meg area, don't remap */
  154. entry = l1table[va>>20];
  155. i = entry & L1SectBaseMask;
  156. if(pa >= i && (pa+len) <= i + OneMeg)
  157. if((entry & ~L1SectBaseMask) == (L1Section | l1prop | L1Domain0))
  158. return (void*)(va + (pa & (OneMeg-1)));
  159. continue;
  160. case L1PageTable:
  161. if(large)
  162. continue;
  163. break;
  164. }
  165. /* here if we're using page maps instead of sections */
  166. t = (ulong*)(l1table[va>>20] & L1PTBaseMask);
  167. for(i = 0; i < OneMeg && base <= end; i += BY2PG){
  168. entry = t[i>>PGSHIFT];
  169. /* found unused entry on level 2 table */
  170. if((entry & L2TypeMask) != L2SmallPage){
  171. if(rv == nil)
  172. rv = (ulong*)(va+i+off);
  173. t[i>>PGSHIFT] = L2SmallPage | l2prop |
  174. (base & L2PageBaseMask);
  175. base += BY2PG;
  176. continue;
  177. }
  178. }
  179. }
  180. /* didn't fit */
  181. if(base <= end)
  182. return nil;
  183. cacheflush();
  184. return rv;
  185. }
  186. /* map in i/o registers */
  187. void*
  188. mapspecial(ulong pa, int len)
  189. {
  190. return _map(pa, len, REGZERO, REGTOP, L1KernelRW, L2KernelRW);
  191. }
  192. /* map add on memory */
  193. void*
  194. mapmem(ulong pa, int len, int cached)
  195. {
  196. ulong l1, l2;
  197. if(cached){
  198. l1 = L1KernelRW|L1Cached|L1Buffered;
  199. l2 = L2KernelRW|L2Cached|L2Buffered;
  200. } else {
  201. l1 = L1KernelRW;
  202. l2 = L2KernelRW;
  203. }
  204. return _map(pa, len, EMEMZERO, EMEMTOP, l1, l2);
  205. }
  206. /* map a virtual address to a physical one */
  207. ulong
  208. mmu_paddr(ulong va)
  209. {
  210. ulong entry;
  211. ulong *t;
  212. entry = l1table[va>>20];
  213. switch(entry & L1TypeMask){
  214. case L1Section:
  215. return (entry & L1SectBaseMask) | (va & (OneMeg-1));
  216. case L1PageTable:
  217. t = (ulong*)(entry & L1PTBaseMask);
  218. va &= OneMeg-1;
  219. entry = t[va>>PGSHIFT];
  220. switch(entry & L1TypeMask){
  221. case L2SmallPage:
  222. return (entry & L2PageBaseMask) | (va & (BY2PG-1));
  223. }
  224. }
  225. return 0;
  226. }
  227. /* map a physical address to a virtual one */
  228. ulong
  229. findva(ulong pa, ulong zero, ulong top)
  230. {
  231. int i;
  232. ulong entry, va;
  233. ulong start, end;
  234. ulong *t;
  235. for(va = zero; va < top; va += OneMeg){
  236. /* search the L1 entry */
  237. entry = l1table[va>>20];
  238. switch(entry & L1TypeMask){
  239. default:
  240. return 0; /* no holes */
  241. case L1Section:
  242. start = entry & L1SectBaseMask;
  243. end = start + OneMeg;
  244. if(pa >= start && pa < end)
  245. return va | (pa & (OneMeg-1));
  246. continue;
  247. case L1PageTable:
  248. break;
  249. }
  250. /* search the L2 entry */
  251. t = (ulong*)(l1table[va>>20] & L1PTBaseMask);
  252. for(i = 0; i < OneMeg; i += BY2PG){
  253. entry = t[i>>PGSHIFT];
  254. /* found unused entry on level 2 table */
  255. if((entry & L2TypeMask) != L2SmallPage)
  256. break;
  257. start = entry & L2PageBaseMask;
  258. end = start + BY2PG;
  259. if(pa >= start && pa < end)
  260. return va | (BY2PG*i) | (pa & (BY2PG-1));
  261. }
  262. }
  263. return 0;
  264. }
  265. ulong
  266. mmu_kaddr(ulong pa)
  267. {
  268. ulong va;
  269. /* try the easy stuff first (the first case is true most of the time) */
  270. if(pa >= PHYSDRAM0 && pa <= PHYSDRAM0+(DRAMTOP-DRAMZERO))
  271. return DRAMZERO+(pa-PHYSDRAM0);
  272. if(/* pa >= PHYSFLASH0 && */ pa <= PHYSFLASH0+(FLASHTOP-FLASHZERO))
  273. return FLASHZERO+(pa-PHYSFLASH0);
  274. if(pa >= PHYSNULL0 && pa <= PHYSNULL0+(NULLTOP-NULLZERO))
  275. return NULLZERO+(pa-PHYSNULL0);
  276. if(!mmuinited)
  277. return 0; /* this shouldn't happen */
  278. /* walk the map for the special regs and extended memory */
  279. va = findva(pa, EMEMZERO, EMEMTOP);
  280. if(va != 0)
  281. return va;
  282. return findva(pa, REGZERO, REGTOP);
  283. }
  284. /*
  285. * Return the number of bytes that can be accessed via KADDR(pa).
  286. * If pa is not a valid argument to KADDR, return 0.
  287. */
  288. ulong
  289. cankaddr(ulong pa)
  290. {
  291. /*
  292. * Is this enough?
  293. * We'll find out if anyone still has one
  294. * of these...
  295. */
  296. if(pa >= PHYSDRAM0 && pa <= PHYSDRAM0+(DRAMTOP-DRAMZERO))
  297. return PHYSDRAM0+(DRAMTOP-DRAMZERO) - pa;
  298. return 0;
  299. }
  300. /*
  301. * table to map fault.c bits to physical bits
  302. */
  303. static ulong mmubits[16] =
  304. {
  305. [PTEVALID] L2SmallPage|L2Cached|L2Buffered|L2UserRO,
  306. [PTEVALID|PTEWRITE] L2SmallPage|L2Cached|L2Buffered|L2UserRW,
  307. [PTEVALID|PTEUNCACHED] L2SmallPage|L2UserRO,
  308. [PTEVALID|PTEUNCACHED|PTEWRITE] L2SmallPage|L2UserRW,
  309. [PTEKERNEL|PTEVALID] L2SmallPage|L2Cached|L2Buffered|L2KernelRW,
  310. [PTEKERNEL|PTEVALID|PTEWRITE] L2SmallPage|L2Cached|L2Buffered|L2KernelRW,
  311. [PTEKERNEL|PTEVALID|PTEUNCACHED] L2SmallPage|L2KernelRW,
  312. [PTEKERNEL|PTEVALID|PTEUNCACHED|PTEWRITE] L2SmallPage|L2KernelRW,
  313. };
  314. /*
  315. * add an entry to the current map
  316. */
  317. void
  318. putmmu(ulong va, ulong pa, Page *pg)
  319. {
  320. Page *l2pg;
  321. ulong *t, *l1p, *l2p;
  322. int s;
  323. s = splhi();
  324. /* clear out the current entry */
  325. mmuinvalidateaddr(va);
  326. l2pg = up->l1page[va>>20];
  327. if(l2pg == nil){
  328. l2pg = up->mmufree;
  329. if(l2pg != nil){
  330. up->mmufree = l2pg->next;
  331. } else {
  332. l2pg = auxpage();
  333. if(l2pg == nil)
  334. pexit("out of memory", 1);
  335. }
  336. l2pg->va = VA(kmap(l2pg));
  337. up->l1page[va>>20] = l2pg;
  338. memset((uchar*)(l2pg->va), 0, BY2PG);
  339. }
  340. /* always point L1 entry to L2 page, can't hurt */
  341. l1p = &l1table[va>>20];
  342. *l1p = L1PageTable | L1Domain0 | (l2pg->pa & L1PTBaseMask);
  343. up->l1table[va>>20] = *l1p;
  344. t = (ulong*)l2pg->va;
  345. /* set L2 entry */
  346. l2p = &t[(va & (OneMeg-1))>>PGSHIFT];
  347. *l2p = mmubits[pa & (PTEKERNEL|PTEVALID|PTEUNCACHED|PTEWRITE)]
  348. | (pa & ~(PTEKERNEL|PTEVALID|PTEUNCACHED|PTEWRITE));
  349. /* write back dirty entries - we need this because the pio() in
  350. * fault.c is writing via a different virt addr and won't clean
  351. * its changes out of the dcache. Page coloring doesn't work
  352. * on this mmu because the virtual cache is set associative
  353. * rather than direct mapped.
  354. */
  355. cachewb();
  356. if(pg->cachectl[0] == PG_TXTFLUSH){
  357. /* pio() sets PG_TXTFLUSH whenever a text page has been written */
  358. icacheinvalidate();
  359. pg->cachectl[0] = PG_NOFLUSH;
  360. }
  361. splx(s);
  362. }
  363. /*
  364. * free up all page tables for this proc
  365. */
  366. void
  367. mmuptefree(Proc *p)
  368. {
  369. Page *pg;
  370. int i;
  371. for(i = 0; i < Nmeg; i++){
  372. pg = p->l1page[i];
  373. if(pg == nil)
  374. continue;
  375. p->l1page[i] = nil;
  376. pg->next = p->mmufree;
  377. p->mmufree = pg;
  378. }
  379. memset(p->l1table, 0, sizeof(p->l1table));
  380. }
  381. /*
  382. * this is called with palloc locked so the pagechainhead is kosher
  383. */
  384. void
  385. mmurelease(Proc* p)
  386. {
  387. Page *pg, *next;
  388. /* write back dirty cache entries before changing map */
  389. cacheflush();
  390. mmuptefree(p);
  391. for(pg = p->mmufree; pg; pg = next){
  392. next = pg->next;
  393. if(--pg->ref)
  394. panic("mmurelease: pg->ref %d\n", pg->ref);
  395. pagechainhead(pg);
  396. }
  397. if(p->mmufree && palloc.r.p)
  398. wakeup(&palloc.r);
  399. p->mmufree = nil;
  400. memset(l1table, 0, sizeof(p->l1table));
  401. cachewbregion((ulong)l1table, sizeof(p->l1table));
  402. }
  403. void
  404. mmuswitch(Proc *p)
  405. {
  406. if(m->mmupid == p->pid && p->newtlb == 0)
  407. return;
  408. m->mmupid = p->pid;
  409. /* write back dirty cache entries and invalidate all cache entries */
  410. cacheflush();
  411. if(p->newtlb){
  412. mmuptefree(p);
  413. p->newtlb = 0;
  414. }
  415. /* move in new map */
  416. memmove(l1table, p->l1table, sizeof(p->l1table));
  417. /* make sure map is in memory */
  418. cachewbregion((ulong)l1table, sizeof(p->l1table));
  419. /* lose any possible stale tlb entries */
  420. mmuinvalidate();
  421. }
  422. void
  423. flushmmu(void)
  424. {
  425. int s;
  426. s = splhi();
  427. up->newtlb = 1;
  428. mmuswitch(up);
  429. splx(s);
  430. }
  431. void
  432. peekmmu(ulong va)
  433. {
  434. ulong e, d;
  435. e = l1table[va>>20];
  436. switch(e & L1TypeMask){
  437. default:
  438. iprint("l1: %#p[%#lux] = %#lux invalid\n", l1table, va>>20, e);
  439. break;
  440. case L1PageTable:
  441. iprint("l1: %#p[%#lux] = %#lux pt\n", l1table, va>>20, e);
  442. va &= OneMeg-1;
  443. va >>= PGSHIFT;
  444. e &= L1PTBaseMask;
  445. d = ((ulong*)e)[va];
  446. iprint("l2: %#lux[%#lux] = %#lux\n", e, va, d);
  447. break;
  448. case L1Section:
  449. iprint("l1: %#p[%#lux] = %#lux section\n", l1table, va>>20, e);
  450. break;
  451. }
  452. }
  453. void
  454. checkmmu(ulong, ulong)
  455. {
  456. }
  457. void
  458. countpagerefs(ulong*, int)
  459. {
  460. }