mmu.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506
  1. #include "u.h"
  2. #include "../port/lib.h"
  3. #include "mem.h"
  4. #include "dat.h"
  5. #include "fns.h"
  6. #include "io.h"
  7. #include "ureg.h"
  8. #include "../port/error.h"
  9. /*
  10. * to avoid mmu and cash flushing, we use the pid register in the MMU
  11. * to map all user addresses. Although there are 64 possible pids, we
  12. * can only use 31 because there are only 32 protection domains and we
  13. * need one for the kernel. Pid i is thus associated with domain i.
  14. * Domain 0 is used for the kernel.
  15. */
  16. /* real protection bits */
  17. enum
  18. {
  19. /* level 1 descriptor bits */
  20. L1TypeMask= (3<<0),
  21. L1Invalid= (0<<0),
  22. L1PageTable= (1<<0),
  23. L1Section= (2<<0),
  24. L1Cached= (1<<3),
  25. L1Buffered= (1<<2),
  26. L1DomShift= 5,
  27. L1Domain0= (0<<L1DomShift),
  28. L1KernelRO= (0x0<<10),
  29. L1KernelRW= (0x1<<10),
  30. L1UserRO= (0x2<<10),
  31. L1UserRW= (0x3<<10),
  32. L1SectBaseMask= (0xFFF<<20),
  33. L1PTBaseMask= (0x3FFFFF<<10),
  34. /* level 2 descriptor bits */
  35. L2TypeMask= (3<<0),
  36. L2SmallPage= (2<<0),
  37. L2LargePage= (1<<0),
  38. L2Cached= (1<<3),
  39. L2Buffered= (1<<2),
  40. L2KernelRW= (0x55<<4),
  41. L2UserRO= (0xAA<<4),
  42. L2UserRW= (0xFF<<4),
  43. L2PageBaseMask= (0xFFFFF<<12),
  44. /* domain values */
  45. Dnoaccess= 0,
  46. Dclient= 1,
  47. Dmanager= 3,
  48. };
  49. ulong *l1table;
  50. static int mmuinited;
  51. /*
  52. * We map all of memory, flash, and the zeros area with sections.
  53. * Special use space is mapped on the fly with regmap.
  54. */
  55. void
  56. mmuinit(void)
  57. {
  58. ulong a, o;
  59. ulong *t;
  60. /* get a prototype level 1 page */
  61. l1table = xspanalloc(16*1024, 16*1024, 0);
  62. memset(l1table, 0, 16*1024);
  63. /* map low mem (I really don't know why I have to do this -- presotto) */
  64. for(o = 0; o < 1*OneMeg; o += OneMeg)
  65. l1table[(0+o)>>20] = L1Section | L1KernelRW| L1Domain0
  66. | L1Cached | L1Buffered
  67. | ((0+o)&L1SectBaseMask);
  68. /* map DRAM */
  69. for(o = 0; o < DRAMTOP-DRAMZERO; o += OneMeg)
  70. l1table[(DRAMZERO+o)>>20] = L1Section | L1KernelRW| L1Domain0
  71. | L1Cached | L1Buffered
  72. | ((PHYSDRAM0+o)&L1SectBaseMask);
  73. /* uncached DRAM */
  74. for(o = 0; o < UCDRAMTOP-UCDRAMZERO; o += OneMeg)
  75. l1table[(UCDRAMZERO+o)>>20] = L1Section | L1KernelRW| L1Domain0
  76. | ((PHYSDRAM0+o)&L1SectBaseMask);
  77. /* map zeros area */
  78. for(o = 0; o < NULLTOP-NULLZERO; o += OneMeg)
  79. l1table[(NULLZERO+o)>>20] = L1Section | L1KernelRW | L1Domain0
  80. | L1Cached | L1Buffered
  81. | ((PHYSNULL0+o)&L1SectBaseMask);
  82. /* map flash */
  83. for(o = 0; o < FLASHTOP-FLASHZERO; o += OneMeg)
  84. l1table[(FLASHZERO+o)>>20] = L1Section | L1KernelRW | L1Domain0
  85. | ((PHYSFLASH0+o)&L1SectBaseMask);
  86. /* map peripheral control module regs */
  87. mapspecial(0x80000000, OneMeg);
  88. /* map system control module regs */
  89. mapspecial(0x90000000, OneMeg);
  90. /*
  91. * double map start of ram to exception vectors
  92. */
  93. a = EVECTORS;
  94. t = xspanalloc(BY2PG, 1024, 0);
  95. memset(t, 0, BY2PG);
  96. l1table[a>>20] = L1PageTable | L1Domain0 | (((ulong)t) & L1PTBaseMask);
  97. t[(a&0xfffff)>>PGSHIFT] = L2SmallPage | L2KernelRW | (PHYSDRAM0 & L2PageBaseMask);
  98. mmurestart();
  99. mmuinited = 1;
  100. }
  101. void
  102. mmurestart(void) {
  103. /* set up the domain register to cause all domains to obey pte access bits */
  104. putdac(Dclient);
  105. /* point to map */
  106. putttb((ulong)l1table);
  107. /* enable mmu */
  108. wbflush();
  109. mmuinvalidate();
  110. mmuenable();
  111. cacheflush();
  112. }
  113. /*
  114. * map on request
  115. */
  116. static void*
  117. _map(ulong pa, int len, ulong zero, ulong top, ulong l1prop, ulong l2prop)
  118. {
  119. ulong *t;
  120. ulong va, i, base, end, off, entry;
  121. int large;
  122. ulong* rv;
  123. rv = nil;
  124. large = len >= 128*1024;
  125. if(large){
  126. base = pa & ~(OneMeg-1);
  127. end = (pa+len-1) & ~(OneMeg-1);
  128. } else {
  129. base = pa & ~(BY2PG-1);
  130. end = (pa+len-1) & ~(BY2PG-1);
  131. }
  132. off = pa - base;
  133. for(va = zero; va < top && base <= end; va += OneMeg){
  134. switch(l1table[va>>20] & L1TypeMask){
  135. default:
  136. /* found unused entry on level 1 table */
  137. if(large){
  138. if(rv == nil)
  139. rv = (ulong*)(va+off);
  140. l1table[va>>20] = L1Section | l1prop | L1Domain0 |
  141. (base & L1SectBaseMask);
  142. base += OneMeg;
  143. continue;
  144. } else {
  145. /* create an L2 page table and keep going */
  146. t = xspanalloc(BY2PG, 1024, 0);
  147. memset(t, 0, BY2PG);
  148. l1table[va>>20] = L1PageTable | L1Domain0 |
  149. (((ulong)t) & L1PTBaseMask);
  150. }
  151. break;
  152. case L1Section:
  153. /* if it's already mapped in a one meg area, don't remap */
  154. entry = l1table[va>>20];
  155. i = entry & L1SectBaseMask;
  156. if(pa >= i && (pa+len) <= i + OneMeg)
  157. if((entry & ~L1SectBaseMask) == (L1Section | l1prop | L1Domain0))
  158. return (void*)(va + (pa & (OneMeg-1)));
  159. continue;
  160. case L1PageTable:
  161. if(large)
  162. continue;
  163. break;
  164. }
  165. /* here if we're using page maps instead of sections */
  166. t = (ulong*)(l1table[va>>20] & L1PTBaseMask);
  167. for(i = 0; i < OneMeg && base <= end; i += BY2PG){
  168. entry = t[i>>PGSHIFT];
  169. /* found unused entry on level 2 table */
  170. if((entry & L2TypeMask) != L2SmallPage){
  171. if(rv == nil)
  172. rv = (ulong*)(va+i+off);
  173. t[i>>PGSHIFT] = L2SmallPage | l2prop |
  174. (base & L2PageBaseMask);
  175. base += BY2PG;
  176. continue;
  177. }
  178. }
  179. }
  180. /* didn't fit */
  181. if(base <= end)
  182. return nil;
  183. cacheflush();
  184. return rv;
  185. }
  186. /* map in i/o registers */
  187. void*
  188. mapspecial(ulong pa, int len)
  189. {
  190. return _map(pa, len, REGZERO, REGTOP, L1KernelRW, L2KernelRW);
  191. }
  192. /* map add on memory */
  193. void*
  194. mapmem(ulong pa, int len, int cached)
  195. {
  196. ulong l1, l2;
  197. if(cached){
  198. l1 = L1KernelRW|L1Cached|L1Buffered;
  199. l2 = L2KernelRW|L2Cached|L2Buffered;
  200. } else {
  201. l1 = L1KernelRW;
  202. l2 = L2KernelRW;
  203. }
  204. return _map(pa, len, EMEMZERO, EMEMTOP, l1, l2);
  205. }
  206. /* map a virtual address to a physical one */
  207. ulong
  208. mmu_paddr(ulong va)
  209. {
  210. ulong entry;
  211. ulong *t;
  212. entry = l1table[va>>20];
  213. switch(entry & L1TypeMask){
  214. case L1Section:
  215. return (entry & L1SectBaseMask) | (va & (OneMeg-1));
  216. case L1PageTable:
  217. t = (ulong*)(entry & L1PTBaseMask);
  218. va &= OneMeg-1;
  219. entry = t[va>>PGSHIFT];
  220. switch(entry & L1TypeMask){
  221. case L2SmallPage:
  222. return (entry & L2PageBaseMask) | (va & (BY2PG-1));
  223. }
  224. }
  225. return 0;
  226. }
  227. /* map a physical address to a virtual one */
  228. ulong
  229. findva(ulong pa, ulong zero, ulong top)
  230. {
  231. int i;
  232. ulong entry, va;
  233. ulong start, end;
  234. ulong *t;
  235. for(va = zero; va < top; va += OneMeg){
  236. /* search the L1 entry */
  237. entry = l1table[va>>20];
  238. switch(entry & L1TypeMask){
  239. default:
  240. return 0; /* no holes */
  241. case L1Section:
  242. start = entry & L1SectBaseMask;
  243. end = start + OneMeg;
  244. if(pa >= start && pa < end)
  245. return va | (pa & (OneMeg-1));
  246. continue;
  247. case L1PageTable:
  248. break;
  249. }
  250. /* search the L2 entry */
  251. t = (ulong*)(l1table[va>>20] & L1PTBaseMask);
  252. for(i = 0; i < OneMeg; i += BY2PG){
  253. entry = t[i>>PGSHIFT];
  254. /* found unused entry on level 2 table */
  255. if((entry & L2TypeMask) != L2SmallPage)
  256. break;
  257. start = entry & L2PageBaseMask;
  258. end = start + BY2PG;
  259. if(pa >= start && pa < end)
  260. return va | (BY2PG*i) | (pa & (BY2PG-1));
  261. }
  262. }
  263. return 0;
  264. }
  265. ulong
  266. mmu_kaddr(ulong pa)
  267. {
  268. ulong va;
  269. /* try the easy stuff first (the first case is true most of the time) */
  270. if(pa >= PHYSDRAM0 && pa <= PHYSDRAM0+(DRAMTOP-DRAMZERO))
  271. return DRAMZERO+(pa-PHYSDRAM0);
  272. if(pa >= PHYSFLASH0 && pa <= PHYSFLASH0+(FLASHTOP-FLASHZERO))
  273. return FLASHZERO+(pa-PHYSFLASH0);
  274. if(pa >= PHYSNULL0 && pa <= PHYSNULL0+(NULLTOP-NULLZERO))
  275. return NULLZERO+(pa-PHYSNULL0);
  276. if(!mmuinited)
  277. return 0; /* this shouldn't happen */
  278. /* walk the map for the special regs and extended memory */
  279. va = findva(pa, EMEMZERO, EMEMTOP);
  280. if(va != 0)
  281. return va;
  282. return findva(pa, REGZERO, REGTOP);
  283. }
  284. /*
  285. * table to map fault.c bits to physical bits
  286. */
  287. static ulong mmubits[16] =
  288. {
  289. [PTEVALID] L2SmallPage|L2Cached|L2Buffered|L2UserRO,
  290. [PTEVALID|PTEWRITE] L2SmallPage|L2Cached|L2Buffered|L2UserRW,
  291. [PTEVALID|PTEUNCACHED] L2SmallPage|L2UserRO,
  292. [PTEVALID|PTEUNCACHED|PTEWRITE] L2SmallPage|L2UserRW,
  293. [PTEKERNEL|PTEVALID] L2SmallPage|L2Cached|L2Buffered|L2KernelRW,
  294. [PTEKERNEL|PTEVALID|PTEWRITE] L2SmallPage|L2Cached|L2Buffered|L2KernelRW,
  295. [PTEKERNEL|PTEVALID|PTEUNCACHED] L2SmallPage|L2KernelRW,
  296. [PTEKERNEL|PTEVALID|PTEUNCACHED|PTEWRITE] L2SmallPage|L2KernelRW,
  297. };
  298. /*
  299. * add an entry to the current map
  300. */
  301. void
  302. putmmu(ulong va, ulong pa, Page *pg)
  303. {
  304. Page *l2pg;
  305. ulong *t, *l1p, *l2p;
  306. int s;
  307. s = splhi();
  308. /* clear out the current entry */
  309. mmuinvalidateaddr(va);
  310. l2pg = up->l1page[va>>20];
  311. if(l2pg == nil){
  312. l2pg = up->mmufree;
  313. if(l2pg != nil){
  314. up->mmufree = l2pg->next;
  315. } else {
  316. l2pg = auxpage();
  317. if(l2pg == nil)
  318. pexit("out of memory", 1);
  319. }
  320. l2pg->va = VA(kmap(l2pg));
  321. up->l1page[va>>20] = l2pg;
  322. memset((uchar*)(l2pg->va), 0, BY2PG);
  323. }
  324. /* always point L1 entry to L2 page, can't hurt */
  325. l1p = &l1table[va>>20];
  326. *l1p = L1PageTable | L1Domain0 | (l2pg->pa & L1PTBaseMask);
  327. up->l1table[va>>20] = *l1p;
  328. t = (ulong*)l2pg->va;
  329. /* set L2 entry */
  330. l2p = &t[(va & (OneMeg-1))>>PGSHIFT];
  331. *l2p = mmubits[pa & (PTEKERNEL|PTEVALID|PTEUNCACHED|PTEWRITE)]
  332. | (pa & ~(PTEKERNEL|PTEVALID|PTEUNCACHED|PTEWRITE));
  333. /* write back dirty entries - we need this because the pio() in
  334. * fault.c is writing via a different virt addr and won't clean
  335. * its changes out of the dcache. Page coloring doesn't work
  336. * on this mmu because the virtual cache is set associative
  337. * rather than direct mapped.
  338. */
  339. cachewb();
  340. if(pg->cachectl[0] == PG_TXTFLUSH){
  341. /* pio() sets PG_TXTFLUSH whenever a text page has been written */
  342. icacheinvalidate();
  343. pg->cachectl[0] = PG_NOFLUSH;
  344. }
  345. splx(s);
  346. }
  347. /*
  348. * free up all page tables for this proc
  349. */
  350. void
  351. mmuptefree(Proc *p)
  352. {
  353. Page *pg;
  354. int i;
  355. for(i = 0; i < Nmeg; i++){
  356. pg = p->l1page[i];
  357. if(pg == nil)
  358. continue;
  359. p->l1page[i] = nil;
  360. pg->next = p->mmufree;
  361. p->mmufree = pg;
  362. }
  363. memset(p->l1table, 0, sizeof(p->l1table));
  364. }
  365. /*
  366. * this is called with palloc locked so the pagechainhead is kosher
  367. */
  368. void
  369. mmurelease(Proc* p)
  370. {
  371. Page *pg, *next;
  372. /* write back dirty cache entries before changing map */
  373. cacheflush();
  374. mmuptefree(p);
  375. for(pg = p->mmufree; pg; pg = next){
  376. next = pg->next;
  377. if(--pg->ref)
  378. panic("mmurelease: pg->ref %d\n", pg->ref);
  379. pagechainhead(pg);
  380. }
  381. if(p->mmufree && palloc.r.p)
  382. wakeup(&palloc.r);
  383. p->mmufree = nil;
  384. memset(l1table, 0, sizeof(p->l1table));
  385. cachewbregion((ulong)l1table, sizeof(p->l1table));
  386. }
  387. void
  388. mmuswitch(Proc *p)
  389. {
  390. if(m->mmupid == p->pid && p->newtlb == 0)
  391. return;
  392. m->mmupid = p->pid;
  393. /* write back dirty cache entries and invalidate all cache entries */
  394. cacheflush();
  395. if(p->newtlb){
  396. mmuptefree(p);
  397. p->newtlb = 0;
  398. }
  399. /* move in new map */
  400. memmove(l1table, p->l1table, sizeof(p->l1table));
  401. /* make sure map is in memory */
  402. cachewbregion((ulong)l1table, sizeof(p->l1table));
  403. /* lose any possible stale tlb entries */
  404. mmuinvalidate();
  405. }
  406. void
  407. flushmmu(void)
  408. {
  409. int s;
  410. s = splhi();
  411. up->newtlb = 1;
  412. mmuswitch(up);
  413. splx(s);
  414. }
  415. void
  416. peekmmu(ulong va)
  417. {
  418. ulong e, d;
  419. e = l1table[va>>20];
  420. switch(e & L1TypeMask){
  421. default:
  422. iprint("l1: %lux[%lux] = %lux invalid\n", l1table, va>>20, e);
  423. break;
  424. case L1PageTable:
  425. iprint("l1: %lux[%lux] = %lux pt\n", l1table, va>>20, e);
  426. va &= OneMeg-1;
  427. va >>= PGSHIFT;
  428. e &= L1PTBaseMask;
  429. d = ((ulong*)e)[va];
  430. iprint("l2: %lux[%lux] = %lux\n", e, va, d);
  431. break;
  432. case L1Section:
  433. iprint("l1: %lux[%lux] = %lux section\n", l1table, va>>20, e);
  434. break;
  435. }
  436. }