asm.c 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469
  1. /*
  2. * This file is part of the UCB release of Plan 9. It is subject to the license
  3. * terms in the LICENSE file found in the top-level directory of this
  4. * distribution and at http://akaros.cs.berkeley.edu/files/Plan9License. No
  5. * part of the UCB release of Plan 9, including this file, may be copied,
  6. * modified, propagated, or distributed except according to the terms contained
  7. * in the LICENSE file.
  8. */
  9. /*
  10. * To do:
  11. * find a purpose for this...
  12. */
  13. #include "u.h"
  14. #include "../port/lib.h"
  15. #include "mmu.h"
  16. #include "mem.h"
  17. #include "dat.h"
  18. #include "fns.h"
  19. //#undef DBG
  20. void msg(char *);
  21. //#define DBG msg
  22. /*
  23. * Address Space Map.
  24. * Low duty cycle.
  25. */
  26. typedef struct Asm Asm;
  27. typedef struct Asm {
  28. uintmem addr;
  29. uintmem size;
  30. int type;
  31. int location;
  32. Asm* next;
  33. } Asm;
  34. enum {
  35. AsmNONE = 0,
  36. AsmMEMORY = 1,
  37. AsmRESERVED = 2,
  38. AsmACPIRECLAIM = 3,
  39. AsmACPINVS = 4,
  40. AsmDEV = 5,
  41. };
  42. static Lock asmlock;
  43. static Asm asmarray[64] = {
  44. { 0, ~0, AsmNONE, 0, },
  45. };
  46. static int asmindex = 1;
  47. static Asm* asmlist = &asmarray[0];
  48. static Asm* asmfreelist;
  49. /*static*/ void
  50. asmdump(void)
  51. {
  52. Asm* assem;
  53. DBG("asm: index %d:\n", asmindex);
  54. for(assem = asmlist; assem != nil; assem = assem->next){
  55. DBG(" %#P %#P %d (%P)\n",
  56. assem->addr, assem->addr+assem->size,
  57. assem->type, assem->size);
  58. }
  59. }
  60. static Asm*
  61. asmnew(uintmem addr, uintmem size, int type)
  62. {
  63. Asm * assem;
  64. if(asmfreelist != nil){
  65. assem = asmfreelist;
  66. asmfreelist = assem->next;
  67. assem->next = nil;
  68. }
  69. else{
  70. if(asmindex >= nelem(asmarray))
  71. return nil;
  72. assem = &asmarray[asmindex++];
  73. }
  74. assem->addr = addr;
  75. assem->size = size;
  76. assem->type = type;
  77. return assem;
  78. }
  79. int
  80. asmfree(uintmem addr, uintmem size, int type)
  81. {
  82. Asm *np, *pp, **ppp;
  83. DBG("asmfree: %#p@%#p, type 0x%x\n", size, addr, type);
  84. if(size == 0)
  85. return 0;
  86. lock(&asmlock);
  87. /*
  88. * Find either a map entry with an address greater
  89. * than that being returned, or the end of the map.
  90. */
  91. pp = nil;
  92. ppp = &asmlist;
  93. for(np = *ppp; np != nil && np->addr <= addr; np = np->next){
  94. pp = np;
  95. ppp = &np->next;
  96. }
  97. if((pp != nil && pp->addr+pp->size > addr)
  98. || (np != nil && addr+size > np->addr)){
  99. unlock(&asmlock);
  100. DBG("asmfree: overlap %#Px@%#P, type %d\n", size, addr, type);
  101. return -1;
  102. }
  103. if(pp != nil && pp->type == type && pp->addr+pp->size == addr){
  104. pp->size += size;
  105. if(np != nil && np->type == type && addr+size == np->addr){
  106. pp->size += np->size;
  107. pp->next = np->next;
  108. np->next = asmfreelist;
  109. asmfreelist = np;
  110. }
  111. unlock(&asmlock);
  112. return 0;
  113. }
  114. if(np != nil && np->type == type && addr+size == np->addr){
  115. np->addr -= size;
  116. np->size += size;
  117. unlock(&asmlock);
  118. return 0;
  119. }
  120. if((pp = asmnew(addr, size, type)) == nil){
  121. unlock(&asmlock);
  122. DBG("asmfree: losing %#P@%#P, type %d\n", size, addr, type);
  123. return -1;
  124. }
  125. *ppp = pp;
  126. pp->next = np;
  127. unlock(&asmlock);
  128. return 0;
  129. }
  130. uintmem
  131. asmalloc(uintmem addr, uintmem size, int type, int align)
  132. {
  133. uintmem a, o;
  134. Asm *assem, *pp;
  135. DBG("asmalloc: %p@%p, type %d\n", size, addr, type);
  136. //msg("before asmlock\n");
  137. lock(&asmlock);
  138. //msg("after lock\n");
  139. for(pp = nil, assem = asmlist; assem != nil; pp = assem, assem = assem->next){
  140. //msg("loop\n");
  141. if(assem->type != type)
  142. continue;
  143. a = assem->addr;
  144. //msg("loop 2\n");
  145. if(addr != 0){
  146. //msg("loop 3\n");
  147. /*
  148. * A specific address range has been given:
  149. * if the current map entry is greater then
  150. * the address is not in the map;
  151. * if the current map entry does not overlap
  152. * the beginning of the requested range then
  153. * continue on to the next map entry;
  154. * if the current map entry does not entirely
  155. * contain the requested range then the range
  156. * is not in the map.
  157. * The comparisons are strange to prevent
  158. * overflow.
  159. */
  160. if(a > addr)
  161. break;
  162. if(assem->size < addr - a)
  163. continue;
  164. if(addr - a > assem->size - size)
  165. break;
  166. a = addr;
  167. }
  168. //msg("loop 4\n");
  169. if(align > 0)
  170. a = ((a+align-1)/align)*align;
  171. if(assem->addr+assem->size-a < size)
  172. continue;
  173. //msg("loop 5\n");
  174. o = assem->addr;
  175. assem->addr = a+size;
  176. assem->size -= a-o+size;
  177. if(assem->size == 0){
  178. if(pp != nil)
  179. pp->next = assem->next;
  180. assem->next = asmfreelist;
  181. asmfreelist = assem;
  182. }
  183. //msg("loop 6\n");
  184. unlock(&asmlock);
  185. //msg("loop 7\n");
  186. if(o != a)
  187. asmfree(o, a-o, type);
  188. return a;
  189. }
  190. //msg("loop 8\n");
  191. unlock(&asmlock);
  192. //msg("loop 9\n");
  193. return 0;
  194. }
  195. static void
  196. asminsert(uintmem addr, uintmem size, int type)
  197. {
  198. if(type == AsmNONE || asmalloc(addr, size, AsmNONE, 0) == 0)
  199. return;
  200. if(asmfree(addr, size, type) == 0)
  201. return;
  202. asmfree(addr, size, 0);
  203. }
  204. void
  205. asminit(void)
  206. {
  207. sys->pmstart = ROUNDUP(PADDR(end), PGSZ);
  208. sys->pmend = sys->pmstart;
  209. asmalloc(0, sys->pmstart, AsmNONE, 0);
  210. }
  211. /*
  212. * Notes:
  213. * asmmapinit and asmmodinit called from multiboot;
  214. * subject to change; the numerology here is probably suspect.
  215. * Multiboot defines the alignment of modules as 4096.
  216. */
  217. void
  218. asmmapinit(uintmem addr, uintmem size, int type)
  219. {
  220. switch(type){
  221. default:
  222. asminsert(addr, size, type);
  223. break;
  224. case AsmMEMORY:
  225. /*
  226. * Adjust things for the peculiarities of this
  227. * architecture.
  228. * Sys->pmend is the largest physical memory address found,
  229. * there may be gaps between it and sys->pmstart, the range
  230. * and how much of it is occupied, might need to be known
  231. * for setting up allocators later.
  232. */
  233. if(addr < 1*MiB || addr+size < sys->pmstart)
  234. break;
  235. if(addr < sys->pmstart){
  236. size -= sys->pmstart - addr;
  237. addr = sys->pmstart;
  238. }
  239. asminsert(addr, size, type);
  240. sys->pmoccupied += size;
  241. if(addr+size > sys->pmend)
  242. sys->pmend = addr+size;
  243. break;
  244. }
  245. }
  246. void
  247. asmmodinit(uint32_t start, uint32_t end, char* s)
  248. {
  249. DBG("asmmodinit: %#x -> %#x: <%s> %#x\n",
  250. start, end, s, ROUNDUP(end, 4096));
  251. if(start < sys->pmstart)
  252. return;
  253. end = ROUNDUP(end, 4096);
  254. if(end > sys->pmstart){
  255. asmalloc(sys->pmstart, end-sys->pmstart, AsmNONE, 0);
  256. sys->pmstart = end;
  257. }
  258. }
  259. static int npg[4];
  260. void*
  261. asmbootalloc(usize size)
  262. {
  263. uintptr_t va;
  264. assert(sys->vmunused+size <= sys->vmunmapped);
  265. va = sys->vmunused;
  266. sys->vmunused += size;
  267. memset(UINT2PTR(va), 0, size);
  268. return UINT2PTR(va);
  269. }
  270. static PTE
  271. asmwalkalloc(usize size)
  272. {
  273. uintmem pa;
  274. assert(size == PTSZ && sys->vmunused+size <= sys->vmunmapped);
  275. if(!ALIGNED(sys->vmunused, PTSZ)){
  276. DBG("asmwalkalloc: %llu wasted\n",
  277. ROUNDUP(sys->vmunused, PTSZ) - sys->vmunused);
  278. sys->vmunused = ROUNDUP(sys->vmunused, PTSZ);
  279. }
  280. if((pa = mmuphysaddr(sys->vmunused)) != ~0)
  281. sys->vmunused += size;
  282. return pa;
  283. }
  284. // still needed so iallocb gets initialised correctly. needs to go.
  285. #define ConfCrap
  286. void
  287. asmmeminit(void)
  288. {
  289. int i, l;
  290. Asm* assem;
  291. PTE *pte, *root;
  292. uintptr va;
  293. uintmem hi, lo, mem, nextmem, pa;
  294. #ifdef ConfCrap
  295. int cx;
  296. #endif /* ConfCrap */
  297. assert(!((sys->vmunmapped|sys->vmend) & sys->pgszmask[1]));
  298. if((pa = mmuphysaddr(sys->vmunused)) == ~0)
  299. panic("asmmeminit 1");
  300. // vmunmapped is the START of unmapped memory (there is none on riscv yet).
  301. // it is the END of mapped memory we have not used.
  302. // vmunused is the START of mapped memory that is not used and the END
  303. // of memory that is used.
  304. // This code falls apart if sys->vmend - sys->vmunmapped is 0.
  305. // The goal is to map memory not mapped. But it's all mapped.
  306. root = UINT2PTR(machp()->MMU.root->va);
  307. #if 0
  308. pa += sys->vmunmapped - sys->vmunused;
  309. mem = asmalloc(pa, sys->vmend - sys->vmunmapped, 1, 0);
  310. if(mem != pa)
  311. panic("asmmeminit 2");
  312. DBG("pa %#llx mem %#llx\n", pa, mem);
  313. /* assume already 2MiB aligned*/
  314. assert(ALIGNED(sys->vmunmapped, 2*MiB));
  315. while(sys->vmunmapped < sys->vmend){
  316. l = mmuwalk(root, sys->vmunmapped, 1, &pte, asmwalkalloc);
  317. DBG("%#p l %d\n", sys->vmunmapped, l);
  318. *pte = pa|PteRW|PteP;
  319. sys->vmunmapped += 2*MiB;
  320. pa += 2*MiB;
  321. }
  322. #endif
  323. #ifdef ConfCrap
  324. cx = 0;
  325. #endif /* ConfCrap */
  326. for(assem = asmlist; assem != nil; assem = assem->next){
  327. DBG("asm: addr %#P end %#P type %d size %P\n",
  328. assem->addr, assem->addr+assem->size,
  329. assem->type, assem->size);
  330. if((assem->type != AsmMEMORY)&&(assem->type != AsmRESERVED)) {
  331. DBG("Skipping, it's not AsmMEMORY or AsmRESERVED\n");
  332. continue;
  333. }
  334. va = (uintptr_t)kseg2+assem->addr;
  335. DBG("asm: addr %#P end %#P type %d size %P\n",
  336. assem->addr, assem->addr+assem->size,
  337. assem->type, assem->size);
  338. lo = assem->addr;
  339. hi = assem->addr+assem->size;
  340. /* Convert a range into pages */
  341. for(mem = lo; mem < hi; mem = nextmem){
  342. nextmem = (mem + PGLSZ(0)) & ~sys->pgszmask[0];
  343. /* Try large pages first */
  344. for(i = sys->npgsz - 1; i >= 0; i--){
  345. if((mem & sys->pgszmask[i]) != 0)
  346. continue;
  347. if(mem + PGLSZ(i) > hi)
  348. continue;
  349. /* This page fits entirely within the range. */
  350. /* Mark it a usable */
  351. if((l = mmuwalk(root, va, i, &pte, asmwalkalloc)) < 0)
  352. panic("asmmeminit 3");
  353. //print("ASMMEMINIT pte is %p\n", pte);
  354. if (assem->type == AsmMEMORY)
  355. *pte = mem|PteRW|PteP;
  356. else
  357. *pte = mem|PteP;
  358. if(l > 0)
  359. *pte |= PteFinal;
  360. nextmem = mem + PGLSZ(i);
  361. va += PGLSZ(i);
  362. npg[i]++;
  363. break;
  364. }
  365. }
  366. #ifdef ConfCrap
  367. /*
  368. * Fill in conf crap.
  369. */
  370. if(cx >= nelem(conf.mem))
  371. continue;
  372. lo = ROUNDUP(assem->addr, PGSZ);
  373. //if(lo >= 600ull*MiB)
  374. // continue;
  375. conf.mem[cx].base = lo;
  376. hi = ROUNDDN(hi, PGSZ);
  377. //if(hi > 600ull*MiB)
  378. // hi = 600*MiB;
  379. conf.mem[cx].npage = (hi - lo)/PGSZ;
  380. conf.npage += conf.mem[cx].npage;
  381. DBG("cm %d: addr %#llx npage %lu\n",
  382. cx, conf.mem[cx].base, conf.mem[cx].npage);
  383. cx++;
  384. #endif /* ConfCrap */
  385. }
  386. DBG("%d %d %d\n", npg[0], npg[1], npg[2]);
  387. #ifdef ConfCrap
  388. /*
  389. * Fill in more conf crap.
  390. * This is why I hate Plan 9.
  391. */
  392. conf.upages = conf.npage;
  393. i = (sys->vmend - sys->vmstart)/PGSZ; /* close enough */
  394. conf.ialloc = (i/2)*PGSZ;
  395. DBG("npage %llu upage %lu kpage %d\n",
  396. conf.npage, conf.upages, i);
  397. #endif /* ConfCrap */
  398. }
  399. void
  400. asmumeminit(void)
  401. {
  402. Asm *assem;
  403. extern void physallocdump(void);
  404. for(assem = asmlist; assem != nil; assem = assem->next){
  405. if(assem->type != AsmMEMORY)
  406. continue;
  407. physinit(assem->addr, assem->size);
  408. }
  409. physallocdump();
  410. }