asm.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447
  1. /*
  2. * This file is part of the UCB release of Plan 9. It is subject to the license
  3. * terms in the LICENSE file found in the top-level directory of this
  4. * distribution and at http://akaros.cs.berkeley.edu/files/Plan9License. No
  5. * part of the UCB release of Plan 9, including this file, may be copied,
  6. * modified, propagated, or distributed except according to the terms contained
  7. * in the LICENSE file.
  8. */
  9. /*
  10. * To do:
  11. * find a purpose for this...
  12. */
  13. #include "u.h"
  14. #include "../port/lib.h"
  15. #include "mem.h"
  16. #include "dat.h"
  17. #include "fns.h"
  18. #include "amd64.h"
  19. /*
  20. * Address Space Map.
  21. * Low duty cycle.
  22. */
  23. typedef struct Asm Asm;
  24. typedef struct Asm {
  25. uintmem addr;
  26. uintmem size;
  27. int type;
  28. int location;
  29. Asm* next;
  30. } Asm;
  31. enum {
  32. AsmNONE = 0,
  33. AsmMEMORY = 1,
  34. AsmRESERVED = 2,
  35. AsmACPIRECLAIM = 3,
  36. AsmACPINVS = 4,
  37. AsmDEV = 5,
  38. };
  39. static Lock asmlock;
  40. static Asm asmarray[64] = {
  41. { 0, ~0, AsmNONE, 0, },
  42. };
  43. static int asmindex = 1;
  44. static Asm* asmlist = &asmarray[0];
  45. static Asm* asmfreelist;
  46. /*static*/ void
  47. asmdump(void)
  48. {
  49. Asm* assem;
  50. print("asm: index %d:\n", asmindex);
  51. for(assem = asmlist; assem != nil; assem = assem->next){
  52. print(" %#P %#P %d (%P)\n",
  53. assem->addr, assem->addr+assem->size,
  54. assem->type, assem->size);
  55. }
  56. }
  57. static Asm*
  58. asmnew(uintmem addr, uintmem size, int type)
  59. {
  60. Asm * assem;
  61. if(asmfreelist != nil){
  62. assem = asmfreelist;
  63. asmfreelist = assem->next;
  64. assem->next = nil;
  65. }
  66. else{
  67. if(asmindex >= nelem(asmarray))
  68. return nil;
  69. assem = &asmarray[asmindex++];
  70. }
  71. assem->addr = addr;
  72. assem->size = size;
  73. assem->type = type;
  74. return assem;
  75. }
  76. int
  77. asmfree(uintmem addr, uintmem size, int type)
  78. {
  79. Asm *np, *pp, **ppp;
  80. DBG("asmfree: %#P@%#P, type %d\n", size, addr, type);
  81. if(size == 0)
  82. return 0;
  83. lock(&asmlock);
  84. /*
  85. * Find either a map entry with an address greater
  86. * than that being returned, or the end of the map.
  87. */
  88. pp = nil;
  89. ppp = &asmlist;
  90. for(np = *ppp; np != nil && np->addr <= addr; np = np->next){
  91. pp = np;
  92. ppp = &np->next;
  93. }
  94. if((pp != nil && pp->addr+pp->size > addr)
  95. || (np != nil && addr+size > np->addr)){
  96. unlock(&asmlock);
  97. DBG("asmfree: overlap %#Px@%#P, type %d\n", size, addr, type);
  98. return -1;
  99. }
  100. if(pp != nil && pp->type == type && pp->addr+pp->size == addr){
  101. pp->size += size;
  102. if(np != nil && np->type == type && addr+size == np->addr){
  103. pp->size += np->size;
  104. pp->next = np->next;
  105. np->next = asmfreelist;
  106. asmfreelist = np;
  107. }
  108. unlock(&asmlock);
  109. return 0;
  110. }
  111. if(np != nil && np->type == type && addr+size == np->addr){
  112. np->addr -= size;
  113. np->size += size;
  114. unlock(&asmlock);
  115. return 0;
  116. }
  117. if((pp = asmnew(addr, size, type)) == nil){
  118. unlock(&asmlock);
  119. DBG("asmfree: losing %#P@%#P, type %d\n", size, addr, type);
  120. return -1;
  121. }
  122. *ppp = pp;
  123. pp->next = np;
  124. unlock(&asmlock);
  125. return 0;
  126. }
  127. uintmem
  128. asmalloc(uintmem addr, uintmem size, int type, int align)
  129. {
  130. uintmem a, o;
  131. Asm *assem, *pp;
  132. DBG("asmalloc: %#P@%#P, type %d\n", size, addr, type);
  133. lock(&asmlock);
  134. for(pp = nil, assem = asmlist; assem != nil; pp = assem, assem = assem->next){
  135. if(assem->type != type)
  136. continue;
  137. a = assem->addr;
  138. if(addr != 0){
  139. /*
  140. * A specific address range has been given:
  141. * if the current map entry is greater then
  142. * the address is not in the map;
  143. * if the current map entry does not overlap
  144. * the beginning of the requested range then
  145. * continue on to the next map entry;
  146. * if the current map entry does not entirely
  147. * contain the requested range then the range
  148. * is not in the map.
  149. * The comparisons are strange to prevent
  150. * overflow.
  151. */
  152. if(a > addr)
  153. break;
  154. if(assem->size < addr - a)
  155. continue;
  156. if(addr - a > assem->size - size)
  157. break;
  158. a = addr;
  159. }
  160. if(align > 0)
  161. a = ((a+align-1)/align)*align;
  162. if(assem->addr+assem->size-a < size)
  163. continue;
  164. o = assem->addr;
  165. assem->addr = a+size;
  166. assem->size -= a-o+size;
  167. if(assem->size == 0){
  168. if(pp != nil)
  169. pp->next = assem->next;
  170. assem->next = asmfreelist;
  171. asmfreelist = assem;
  172. }
  173. unlock(&asmlock);
  174. if(o != a)
  175. asmfree(o, a-o, type);
  176. return a;
  177. }
  178. unlock(&asmlock);
  179. return 0;
  180. }
  181. static void
  182. asminsert(uintmem addr, uintmem size, int type)
  183. {
  184. if(type == AsmNONE || asmalloc(addr, size, AsmNONE, 0) == 0)
  185. return;
  186. if(asmfree(addr, size, type) == 0)
  187. return;
  188. asmfree(addr, size, 0);
  189. }
  190. void
  191. asminit(void)
  192. {
  193. sys->pmstart = ROUNDUP(PADDR(end), PGSZ);
  194. sys->pmend = sys->pmstart;
  195. asmalloc(0, sys->pmstart, AsmNONE, 0);
  196. }
  197. /*
  198. * Notes:
  199. * asmmapinit and asmmodinit called from multiboot;
  200. * subject to change; the numerology here is probably suspect.
  201. * Multiboot defines the alignment of modules as 4096.
  202. */
  203. void
  204. asmmapinit(uintmem addr, uintmem size, int type)
  205. {
  206. switch(type){
  207. default:
  208. asminsert(addr, size, type);
  209. break;
  210. case AsmMEMORY:
  211. /*
  212. * Adjust things for the peculiarities of this
  213. * architecture.
  214. * Sys->pmend is the largest physical memory address found,
  215. * there may be gaps between it and sys->pmstart, the range
  216. * and how much of it is occupied, might need to be known
  217. * for setting up allocators later.
  218. */
  219. if(addr < 1*MiB || addr+size < sys->pmstart)
  220. break;
  221. if(addr < sys->pmstart){
  222. size -= sys->pmstart - addr;
  223. addr = sys->pmstart;
  224. }
  225. asminsert(addr, size, type);
  226. sys->pmoccupied += size;
  227. if(addr+size > sys->pmend)
  228. sys->pmend = addr+size;
  229. break;
  230. }
  231. }
  232. void
  233. asmmodinit(uint32_t start, uint32_t end, char* s)
  234. {
  235. DBG("asmmodinit: %#x -> %#x: <%s> %#x\n",
  236. start, end, s, ROUNDUP(end, 4096));
  237. if(start < sys->pmstart)
  238. return;
  239. end = ROUNDUP(end, 4096);
  240. if(end > sys->pmstart){
  241. asmalloc(sys->pmstart, end-sys->pmstart, AsmNONE, 0);
  242. sys->pmstart = end;
  243. }
  244. }
  245. static int npg[4];
  246. void*
  247. asmbootalloc(usize size)
  248. {
  249. uintptr_t va;
  250. assert(sys->vmunused+size <= sys->vmunmapped);
  251. va = sys->vmunused;
  252. sys->vmunused += size;
  253. memset(UINT2PTR(va), 0, size);
  254. return UINT2PTR(va);
  255. }
  256. static PTE
  257. asmwalkalloc(usize size)
  258. {
  259. uintmem pa;
  260. assert(size == PTSZ && sys->vmunused+size <= sys->vmunmapped);
  261. if(!ALIGNED(sys->vmunused, PTSZ)){
  262. DBG("asmwalkalloc: %llu wasted\n",
  263. ROUNDUP(sys->vmunused, PTSZ) - sys->vmunused);
  264. sys->vmunused = ROUNDUP(sys->vmunused, PTSZ);
  265. }
  266. if((pa = mmuphysaddr(sys->vmunused)) != ~0)
  267. sys->vmunused += size;
  268. return pa;
  269. }
  270. // still needed so iallocb gets initialised correctly. needs to go.
  271. #define ConfCrap
  272. void
  273. asmmeminit(void)
  274. {
  275. int i, l;
  276. Asm* assem;
  277. PTE *pte, *pml4;
  278. uintptr va;
  279. uintmem hi, lo, mem, nextmem, pa;
  280. #ifdef ConfCrap
  281. int cx;
  282. #endif /* ConfCrap */
  283. assert(!((sys->vmunmapped|sys->vmend) & sys->pgszmask[1]));
  284. if((pa = mmuphysaddr(sys->vmunused)) == ~0)
  285. panic("asmmeminit 1");
  286. pa += sys->vmunmapped - sys->vmunused;
  287. mem = asmalloc(pa, sys->vmend - sys->vmunmapped, 1, 0);
  288. if(mem != pa)
  289. panic("asmmeminit 2");
  290. DBG("pa %#llux mem %#llux\n", pa, mem);
  291. /* assume already 2MiB aligned*/
  292. assert(ALIGNED(sys->vmunmapped, 2*MiB));
  293. pml4 = UINT2PTR(machp()->MMU.pml4->va);
  294. while(sys->vmunmapped < sys->vmend){
  295. l = mmuwalk(pml4, sys->vmunmapped, 1, &pte, asmwalkalloc);
  296. DBG("%#p l %d\n", sys->vmunmapped, l);
  297. *pte = pa|PtePS|PteRW|PteP;
  298. sys->vmunmapped += 2*MiB;
  299. pa += 2*MiB;
  300. }
  301. #ifdef ConfCrap
  302. cx = 0;
  303. #endif /* ConfCrap */
  304. for(assem = asmlist; assem != nil; assem = assem->next){
  305. print("asm: addr %#P end %#P type %d size %P\n",
  306. assem->addr, assem->addr+assem->size,
  307. assem->type, assem->size);
  308. if((assem->type != AsmMEMORY)&&(assem->type != AsmRESERVED)) {
  309. print("Skipping, it's not AsmMEMORY or AsmRESERVED\n");
  310. continue;
  311. }
  312. va = KSEG2+assem->addr;
  313. print("asm: addr %#P end %#P type %d size %P\n",
  314. assem->addr, assem->addr+assem->size,
  315. assem->type, assem->size);
  316. lo = assem->addr;
  317. hi = assem->addr+assem->size;
  318. /* Convert a range into pages */
  319. for(mem = lo; mem < hi; mem = nextmem){
  320. nextmem = (mem + PGLSZ(0)) & ~sys->pgszmask[0];
  321. /* Try large pages first */
  322. for(i = sys->npgsz - 1; i >= 0; i--){
  323. if((mem & sys->pgszmask[i]) != 0)
  324. continue;
  325. if(mem + PGLSZ(i) > hi)
  326. continue;
  327. /* This page fits entirely within the range. */
  328. /* Mark it a usable */
  329. if((l = mmuwalk(pml4, va, i, &pte, asmwalkalloc)) < 0)
  330. panic("asmmeminit 3");
  331. if (assem->type == AsmMEMORY)
  332. *pte = mem|PteRW|PteP;
  333. else
  334. *pte = mem|PteP;
  335. if(l > 0)
  336. *pte |= PtePS;
  337. nextmem = mem + PGLSZ(i);
  338. va += PGLSZ(i);
  339. npg[i]++;
  340. break;
  341. }
  342. }
  343. #ifdef ConfCrap
  344. /*
  345. * Fill in conf crap.
  346. */
  347. if(cx >= nelem(conf.mem))
  348. continue;
  349. lo = ROUNDUP(assem->addr, PGSZ);
  350. //if(lo >= 600ull*MiB)
  351. // continue;
  352. conf.mem[cx].base = lo;
  353. hi = ROUNDDN(hi, PGSZ);
  354. //if(hi > 600ull*MiB)
  355. // hi = 600*MiB;
  356. conf.mem[cx].npage = (hi - lo)/PGSZ;
  357. conf.npage += conf.mem[cx].npage;
  358. print("cm %d: addr %#llux npage %lu\n",
  359. cx, conf.mem[cx].base, conf.mem[cx].npage);
  360. cx++;
  361. #endif /* ConfCrap */
  362. }
  363. print("%d %d %d\n", npg[0], npg[1], npg[2]);
  364. #ifdef ConfCrap
  365. /*
  366. * Fill in more conf crap.
  367. * This is why I hate Plan 9.
  368. */
  369. conf.upages = conf.npage;
  370. i = (sys->vmend - sys->vmstart)/PGSZ; /* close enough */
  371. conf.ialloc = (i/2)*PGSZ;
  372. print("npage %llu upage %lu kpage %d\n",
  373. conf.npage, conf.upages, i);
  374. #endif /* ConfCrap */
  375. }
  376. void
  377. asmumeminit(void)
  378. {
  379. Asm *assem;
  380. extern void physallocdump(void);
  381. for(assem = asmlist; assem != nil; assem = assem->next){
  382. if(assem->type != AsmMEMORY)
  383. continue;
  384. physinit(assem->addr, assem->size);
  385. }
  386. physallocdump();
  387. }