memory.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897
  1. /*
  2. * Size memory and create the kernel page-tables on the fly while doing so.
  3. * Called from main(), this code should only be run by the bootstrap processor.
  4. *
  5. * MemMin is what the bootstrap code in l.s has already mapped;
  6. * MemMax is the limit of physical memory to scan.
  7. */
  8. #include "u.h"
  9. #include "../port/lib.h"
  10. #include "mem.h"
  11. #include "dat.h"
  12. #include "fns.h"
  13. #include "io.h"
  14. #include "ureg.h"
  15. #define MEMDEBUG 0
  16. enum {
  17. MemUPA = 0, /* unbacked physical address */
  18. MemRAM = 1, /* physical memory */
  19. MemUMB = 2, /* upper memory block (<16MB) */
  20. MemReserved = 3,
  21. NMemType = 4,
  22. KB = 1024,
  23. MemMin = 8*MB,
  24. MemMax = (3*1024+768)*MB,
  25. };
  26. typedef struct Map Map;
  27. struct Map {
  28. ulong size;
  29. ulong addr;
  30. };
  31. typedef struct RMap RMap;
  32. struct RMap {
  33. char* name;
  34. Map* map;
  35. Map* mapend;
  36. Lock;
  37. };
  38. /*
  39. * Memory allocation tracking.
  40. */
  41. static Map mapupa[16];
  42. static RMap rmapupa = {
  43. "unallocated unbacked physical memory",
  44. mapupa,
  45. &mapupa[nelem(mapupa)-1],
  46. };
  47. static Map mapram[16];
  48. static RMap rmapram = {
  49. "physical memory",
  50. mapram,
  51. &mapram[nelem(mapram)-1],
  52. };
  53. static Map mapumb[64];
  54. static RMap rmapumb = {
  55. "upper memory block",
  56. mapumb,
  57. &mapumb[nelem(mapumb)-1],
  58. };
  59. static Map mapumbrw[16];
  60. static RMap rmapumbrw = {
  61. "UMB device memory",
  62. mapumbrw,
  63. &mapumbrw[nelem(mapumbrw)-1],
  64. };
  65. void
  66. mapprint(RMap *rmap)
  67. {
  68. Map *mp;
  69. print("%s\n", rmap->name);
  70. for(mp = rmap->map; mp->size; mp++)
  71. print("\t%8.8luX %8.8luX (%lud)\n", mp->addr, mp->addr+mp->size, mp->size);
  72. }
  73. void
  74. memdebug(void)
  75. {
  76. ulong maxpa, maxpa1, maxpa2;
  77. maxpa = (nvramread(0x18)<<8)|nvramread(0x17);
  78. maxpa1 = (nvramread(0x31)<<8)|nvramread(0x30);
  79. maxpa2 = (nvramread(0x16)<<8)|nvramread(0x15);
  80. print("maxpa = %luX -> %luX, maxpa1 = %luX maxpa2 = %luX\n",
  81. maxpa, MB+maxpa*KB, maxpa1, maxpa2);
  82. mapprint(&rmapram);
  83. mapprint(&rmapumb);
  84. mapprint(&rmapumbrw);
  85. mapprint(&rmapupa);
  86. }
  87. void
  88. mapfree(RMap* rmap, ulong addr, ulong size)
  89. {
  90. Map *mp;
  91. ulong t;
  92. if(size <= 0)
  93. return;
  94. lock(rmap);
  95. for(mp = rmap->map; mp->addr <= addr && mp->size; mp++)
  96. ;
  97. if(mp > rmap->map && (mp-1)->addr+(mp-1)->size == addr){
  98. (mp-1)->size += size;
  99. if(addr+size == mp->addr){
  100. (mp-1)->size += mp->size;
  101. while(mp->size){
  102. mp++;
  103. (mp-1)->addr = mp->addr;
  104. (mp-1)->size = mp->size;
  105. }
  106. }
  107. }
  108. else{
  109. if(addr+size == mp->addr && mp->size){
  110. mp->addr -= size;
  111. mp->size += size;
  112. }
  113. else do{
  114. if(mp >= rmap->mapend){
  115. print("mapfree: %s: losing 0x%luX, %ld\n",
  116. rmap->name, addr, size);
  117. break;
  118. }
  119. t = mp->addr;
  120. mp->addr = addr;
  121. addr = t;
  122. t = mp->size;
  123. mp->size = size;
  124. mp++;
  125. }while(size = t);
  126. }
  127. unlock(rmap);
  128. }
  129. ulong
  130. mapalloc(RMap* rmap, ulong addr, int size, int align)
  131. {
  132. Map *mp;
  133. ulong maddr, oaddr;
  134. lock(rmap);
  135. for(mp = rmap->map; mp->size; mp++){
  136. maddr = mp->addr;
  137. if(addr){
  138. /*
  139. * A specific address range has been given:
  140. * if the current map entry is greater then
  141. * the address is not in the map;
  142. * if the current map entry does not overlap
  143. * the beginning of the requested range then
  144. * continue on to the next map entry;
  145. * if the current map entry does not entirely
  146. * contain the requested range then the range
  147. * is not in the map.
  148. */
  149. if(maddr > addr)
  150. break;
  151. if(mp->size < addr - maddr) /* maddr+mp->size < addr, but no overflow */
  152. continue;
  153. if(addr - maddr > mp->size - size) /* addr+size > maddr+mp->size, but no overflow */
  154. break;
  155. maddr = addr;
  156. }
  157. if(align > 0)
  158. maddr = ((maddr+align-1)/align)*align;
  159. if(mp->addr+mp->size-maddr < size)
  160. continue;
  161. oaddr = mp->addr;
  162. mp->addr = maddr+size;
  163. mp->size -= maddr-oaddr+size;
  164. if(mp->size == 0){
  165. do{
  166. mp++;
  167. (mp-1)->addr = mp->addr;
  168. }while((mp-1)->size = mp->size);
  169. }
  170. unlock(rmap);
  171. if(oaddr != maddr)
  172. mapfree(rmap, oaddr, maddr-oaddr);
  173. return maddr;
  174. }
  175. unlock(rmap);
  176. return 0;
  177. }
  178. /*
  179. * Allocate from the ram map directly to make page tables.
  180. * Called by mmuwalk during e820scan.
  181. */
  182. void*
  183. rampage(void)
  184. {
  185. ulong m;
  186. m = mapalloc(&rmapram, 0, BY2PG, BY2PG);
  187. if(m == 0)
  188. return nil;
  189. return KADDR(m);
  190. }
  191. static void
  192. umbexclude(void)
  193. {
  194. int size;
  195. ulong addr;
  196. char *op, *p, *rptr;
  197. if((p = getconf("umbexclude")) == nil)
  198. return;
  199. while(p && *p != '\0' && *p != '\n'){
  200. op = p;
  201. addr = strtoul(p, &rptr, 0);
  202. if(rptr == nil || rptr == p || *rptr != '-'){
  203. print("umbexclude: invalid argument <%s>\n", op);
  204. break;
  205. }
  206. p = rptr+1;
  207. size = strtoul(p, &rptr, 0) - addr + 1;
  208. if(size <= 0){
  209. print("umbexclude: bad range <%s>\n", op);
  210. break;
  211. }
  212. if(rptr != nil && *rptr == ',')
  213. *rptr++ = '\0';
  214. p = rptr;
  215. mapalloc(&rmapumb, addr, size, 0);
  216. }
  217. }
  218. static void
  219. umbscan(void)
  220. {
  221. uchar o[2], *p;
  222. /*
  223. * Scan the Upper Memory Blocks (0xA0000->0xF0000) for pieces
  224. * which aren't used; they can be used later for devices which
  225. * want to allocate some virtual address space.
  226. * Check for two things:
  227. * 1) device BIOS ROM. This should start with a two-byte header
  228. * of 0x55 0xAA, followed by a byte giving the size of the ROM
  229. * in 512-byte chunks. These ROM's must start on a 2KB boundary.
  230. * 2) device memory. This is read-write.
  231. * There are some assumptions: there's VGA memory at 0xA0000 and
  232. * the VGA BIOS ROM is at 0xC0000. Also, if there's no ROM signature
  233. * at 0xE0000 then the whole 64KB up to 0xF0000 is theoretically up
  234. * for grabs; check anyway.
  235. */
  236. p = KADDR(0xD0000);
  237. while(p < (uchar*)KADDR(0xE0000)){
  238. /*
  239. * Check for the ROM signature, skip if valid.
  240. */
  241. if(p[0] == 0x55 && p[1] == 0xAA){
  242. p += p[2]*512;
  243. continue;
  244. }
  245. /*
  246. * Is it writeable? If yes, then stick it in
  247. * the UMB device memory map. A floating bus will
  248. * return 0xff, so add that to the map of the
  249. * UMB space available for allocation.
  250. * If it is neither of those, ignore it.
  251. */
  252. o[0] = p[0];
  253. p[0] = 0xCC;
  254. o[1] = p[2*KB-1];
  255. p[2*KB-1] = 0xCC;
  256. if(p[0] == 0xCC && p[2*KB-1] == 0xCC){
  257. p[0] = o[0];
  258. p[2*KB-1] = o[1];
  259. mapfree(&rmapumbrw, PADDR(p), 2*KB);
  260. }
  261. else if(p[0] == 0xFF && p[1] == 0xFF)
  262. mapfree(&rmapumb, PADDR(p), 2*KB);
  263. p += 2*KB;
  264. }
  265. p = KADDR(0xE0000);
  266. if(p[0] != 0x55 || p[1] != 0xAA){
  267. p[0] = 0xCC;
  268. p[64*KB-1] = 0xCC;
  269. if(p[0] != 0xCC && p[64*KB-1] != 0xCC)
  270. mapfree(&rmapumb, PADDR(p), 64*KB);
  271. }
  272. umbexclude();
  273. }
  274. static void
  275. lowraminit(void)
  276. {
  277. ulong n, pa, x;
  278. uchar *bda;
  279. /*
  280. * Initialise the memory bank information for conventional memory
  281. * (i.e. less than 640KB). The base is the first location after the
  282. * bootstrap processor MMU information and the limit is obtained from
  283. * the BIOS data area.
  284. */
  285. x = PADDR(CPU0END);
  286. bda = (uchar*)KADDR(0x400);
  287. n = ((bda[0x14]<<8)|bda[0x13])*KB-x;
  288. mapfree(&rmapram, x, n);
  289. memset(KADDR(x), 0, n); /* keep us honest */
  290. x = PADDR(PGROUND((ulong)end));
  291. pa = MemMin;
  292. if(x > pa)
  293. panic("kernel too big");
  294. mapfree(&rmapram, x, pa-x);
  295. memset(KADDR(x), 0, pa-x); /* keep us honest */
  296. }
  297. static void
  298. ramscan(ulong maxmem)
  299. {
  300. ulong *k0, kzero, map, maxkpa, maxpa, pa, *pte, *table, *va, vbase, x;
  301. int nvalid[NMemType];
  302. /*
  303. * The bootstrap code has has created a prototype page
  304. * table which maps the first MemMin of physical memory to KZERO.
  305. * The page directory is at m->pdb and the first page of
  306. * free memory is after the per-processor MMU information.
  307. */
  308. pa = MemMin;
  309. /*
  310. * Check if the extended memory size can be obtained from the CMOS.
  311. * If it's 0 then it's either not known or >= 64MB. Always check
  312. * at least 24MB in case there's a memory gap (up to 8MB) below 16MB;
  313. * in this case the memory from the gap is remapped to the top of
  314. * memory.
  315. * The value in CMOS is supposed to be the number of KB above 1MB.
  316. */
  317. if(maxmem == 0){
  318. x = (nvramread(0x18)<<8)|nvramread(0x17);
  319. if(x == 0 || x >= (63*KB))
  320. maxpa = MemMax;
  321. else
  322. maxpa = MB+x*KB;
  323. if(maxpa < 24*MB)
  324. maxpa = 24*MB;
  325. }else
  326. maxpa = maxmem;
  327. maxkpa = (u32int)-KZERO; /* 2^32 - KZERO */
  328. /*
  329. * March up memory from MemMin to maxpa 1MB at a time,
  330. * mapping the first page and checking the page can
  331. * be written and read correctly. The page tables are created here
  332. * on the fly, allocating from low memory as necessary.
  333. */
  334. k0 = (ulong*)KADDR(0);
  335. kzero = *k0;
  336. map = 0;
  337. x = 0x12345678;
  338. memset(nvalid, 0, sizeof(nvalid));
  339. /*
  340. * Can't map memory to KADDR(pa) when we're walking because
  341. * can only use KADDR for relatively low addresses.
  342. * Instead, map each 4MB we scan to the virtual address range
  343. * MemMin->MemMin+4MB while we are scanning.
  344. */
  345. vbase = MemMin;
  346. while(pa < maxpa){
  347. /*
  348. * Map the page. Use mapalloc(&rmapram, ...) to make
  349. * the page table if necessary, it will be returned to the
  350. * pool later if it isn't needed. Map in a fixed range (the second 4M)
  351. * because high physical addresses cannot be passed to KADDR.
  352. */
  353. va = (void*)(vbase + pa%(4*MB));
  354. table = &m->pdb[PDX(va)];
  355. if(pa%(4*MB) == 0){
  356. if(map == 0 && (map = mapalloc(&rmapram, 0, BY2PG, BY2PG)) == 0)
  357. break;
  358. memset(KADDR(map), 0, BY2PG);
  359. *table = map|PTEWRITE|PTEVALID;
  360. memset(nvalid, 0, sizeof(nvalid));
  361. }
  362. table = KADDR(PPN(*table));
  363. pte = &table[PTX(va)];
  364. *pte = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
  365. mmuflushtlb(PADDR(m->pdb));
  366. /*
  367. * Write a pattern to the page and write a different
  368. * pattern to a possible mirror at KZERO. If the data
  369. * reads back correctly the chunk is some type of RAM (possibly
  370. * a linearly-mapped VGA framebuffer, for instance...) and
  371. * can be cleared and added to the memory pool. If not, the
  372. * chunk is marked uncached and added to the UMB pool if <16MB
  373. * or is marked invalid and added to the UPA pool.
  374. */
  375. *va = x;
  376. *k0 = ~x;
  377. if(*va == x){
  378. nvalid[MemRAM] += MB/BY2PG;
  379. mapfree(&rmapram, pa, MB);
  380. do{
  381. *pte++ = pa|PTEWRITE|PTEVALID;
  382. pa += BY2PG;
  383. }while(pa % MB);
  384. mmuflushtlb(PADDR(m->pdb));
  385. /* memset(va, 0, MB); so damn slow to memset all of memory */
  386. }
  387. else if(pa < 16*MB){
  388. nvalid[MemUMB] += MB/BY2PG;
  389. mapfree(&rmapumb, pa, MB);
  390. do{
  391. *pte++ = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
  392. pa += BY2PG;
  393. }while(pa % MB);
  394. }
  395. else{
  396. nvalid[MemUPA] += MB/BY2PG;
  397. mapfree(&rmapupa, pa, MB);
  398. *pte = 0;
  399. pa += MB;
  400. }
  401. /*
  402. * Done with this 4MB chunk, review the options:
  403. * 1) not physical memory and >=16MB - invalidate the PDB entry;
  404. * 2) physical memory - use the 4MB page extension if possible;
  405. * 3) not physical memory and <16MB - use the 4MB page extension
  406. * if possible;
  407. * 4) mixed or no 4MB page extension - commit the already
  408. * initialised space for the page table.
  409. */
  410. if(pa%(4*MB) == 0 && pa >= 32*MB && nvalid[MemUPA] == (4*MB)/BY2PG){
  411. /*
  412. * If we encounter a 4MB chunk of missing memory
  413. * at a sufficiently high offset, call it the end of
  414. * memory. Otherwise we run the risk of thinking
  415. * that video memory is real RAM.
  416. */
  417. break;
  418. }
  419. if(pa <= maxkpa && pa%(4*MB) == 0){
  420. table = &m->pdb[PDX(KADDR(pa - 4*MB))];
  421. if(nvalid[MemUPA] == (4*MB)/BY2PG)
  422. *table = 0;
  423. else if(nvalid[MemRAM] == (4*MB)/BY2PG && (m->cpuiddx & 0x08))
  424. *table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEVALID;
  425. else if(nvalid[MemUMB] == (4*MB)/BY2PG && (m->cpuiddx & 0x08))
  426. *table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEUNCACHED|PTEVALID;
  427. else{
  428. *table = map|PTEWRITE|PTEVALID;
  429. map = 0;
  430. }
  431. }
  432. mmuflushtlb(PADDR(m->pdb));
  433. x += 0x3141526;
  434. }
  435. /*
  436. * If we didn't reach the end of the 4MB chunk, that part won't
  437. * be mapped. Commit the already initialised space for the page table.
  438. */
  439. if(pa % (4*MB) && pa <= maxkpa){
  440. m->pdb[PDX(KADDR(pa))] = map|PTEWRITE|PTEVALID;
  441. map = 0;
  442. }
  443. if(map)
  444. mapfree(&rmapram, map, BY2PG);
  445. m->pdb[PDX(vbase)] = 0;
  446. mmuflushtlb(PADDR(m->pdb));
  447. mapfree(&rmapupa, pa, (u32int)-pa);
  448. *k0 = kzero;
  449. }
  450. /*
  451. * BIOS Int 0x15 E820 memory map.
  452. */
  453. enum
  454. {
  455. SMAP = ('S'<<24)|('M'<<16)|('A'<<8)|'P',
  456. Ememory = 1,
  457. Ereserved = 2,
  458. Carry = 1,
  459. };
  460. typedef struct Emap Emap;
  461. struct Emap
  462. {
  463. uvlong base;
  464. uvlong len;
  465. ulong type;
  466. };
  467. static Emap emap[16];
  468. int nemap;
  469. static char *etypes[] =
  470. {
  471. "type=0",
  472. "memory",
  473. "reserved",
  474. "acpi reclaim",
  475. "acpi nvs",
  476. };
  477. static int
  478. emapcmp(const void *va, const void *vb)
  479. {
  480. Emap *a, *b;
  481. a = (Emap*)va;
  482. b = (Emap*)vb;
  483. if(a->base < b->base)
  484. return -1;
  485. if(a->base > b->base)
  486. return 1;
  487. if(a->len < b->len)
  488. return -1;
  489. if(a->len > b->len)
  490. return 1;
  491. return a->type - b->type;
  492. }
  493. static void
  494. map(ulong base, ulong len, int type)
  495. {
  496. ulong e, n;
  497. ulong *table, flags, maxkpa;
  498. /*
  499. * Split any call crossing MemMin to make below simpler.
  500. */
  501. if(base < MemMin && len > MemMin-base){
  502. n = MemMin - base;
  503. map(base, n, type);
  504. map(MemMin, len-n, type);
  505. }
  506. /*
  507. * Let lowraminit and umbscan hash out the low MemMin.
  508. */
  509. if(base < MemMin)
  510. return;
  511. /*
  512. * Any non-memory below 16*MB is used as upper mem blocks.
  513. */
  514. if(type == MemUPA && base < 16*MB && base+len > 16*MB){
  515. map(base, 16*MB-base, MemUMB);
  516. map(16*MB, len-(16*MB-base), MemUPA);
  517. return;
  518. }
  519. /*
  520. * Memory below CPU0END is reserved for the kernel
  521. * and already mapped.
  522. */
  523. if(base < PADDR(CPU0END)){
  524. n = PADDR(CPU0END) - base;
  525. if(len <= n)
  526. return;
  527. map(PADDR(CPU0END), len-n, type);
  528. return;
  529. }
  530. /*
  531. * Memory between KTZERO and end is the kernel itself
  532. * and is already mapped.
  533. */
  534. if(base < PADDR(KTZERO) && base+len > PADDR(KTZERO)){
  535. map(base, PADDR(KTZERO)-base, type);
  536. return;
  537. }
  538. if(PADDR(KTZERO) < base && base < PADDR(PGROUND((ulong)end))){
  539. n = PADDR(PGROUND((ulong)end));
  540. if(len <= n)
  541. return;
  542. map(PADDR(PGROUND((ulong)end)), len-n, type);
  543. return;
  544. }
  545. /*
  546. * Now we have a simple case.
  547. */
  548. // print("map %.8lux %.8lux %d\n", base, base+len, type);
  549. switch(type){
  550. case MemRAM:
  551. mapfree(&rmapram, base, len);
  552. flags = PTEWRITE|PTEVALID;
  553. break;
  554. case MemUMB:
  555. mapfree(&rmapumb, base, len);
  556. flags = PTEWRITE|PTEUNCACHED|PTEVALID;
  557. break;
  558. case MemUPA:
  559. mapfree(&rmapupa, base, len);
  560. flags = 0;
  561. break;
  562. default:
  563. case MemReserved:
  564. flags = 0;
  565. break;
  566. }
  567. /*
  568. * bottom MemMin is already mapped - just twiddle flags.
  569. * (not currently used - see above)
  570. */
  571. if(base < MemMin){
  572. table = KADDR(PPN(m->pdb[PDX(base)]));
  573. e = base+len;
  574. base = PPN(base);
  575. for(; base<e; base+=BY2PG)
  576. table[PTX(base)] |= flags;
  577. return;
  578. }
  579. /*
  580. * Only map from KZERO to 2^32.
  581. */
  582. if(flags){
  583. maxkpa = -KZERO;
  584. if(base >= maxkpa)
  585. return;
  586. if(len > maxkpa-base)
  587. len = maxkpa - base;
  588. pdbmap(m->pdb, base|flags, base+KZERO, len);
  589. }
  590. }
  591. static int
  592. e820scan(void)
  593. {
  594. int i;
  595. Ureg u;
  596. ulong cont, base, len;
  597. uvlong last;
  598. Emap *e;
  599. if(getconf("*norealmode") || getconf("*noe820scan"))
  600. return -1;
  601. cont = 0;
  602. for(i=0; i<nelem(emap); i++){
  603. memset(&u, 0, sizeof u);
  604. u.ax = 0xE820;
  605. u.bx = cont;
  606. u.cx = 20;
  607. u.dx = SMAP;
  608. u.es = (PADDR(RMBUF)>>4)&0xF000;
  609. u.di = PADDR(RMBUF)&0xFFFF;
  610. u.trap = 0x15;
  611. realmode(&u);
  612. cont = u.bx;
  613. if((u.flags&Carry) || u.ax != SMAP || u.cx != 20)
  614. break;
  615. e = &emap[nemap++];
  616. *e = *(Emap*)RMBUF;
  617. if(u.bx == 0)
  618. break;
  619. }
  620. if(nemap == 0)
  621. return -1;
  622. qsort(emap, nemap, sizeof emap[0], emapcmp);
  623. if(getconf("*noe820print") == nil){
  624. for(i=0; i<nemap; i++){
  625. e = &emap[i];
  626. print("E820: %.8llux %.8llux ", e->base, e->base+e->len);
  627. if(e->type < nelem(etypes))
  628. print("%s\n", etypes[e->type]);
  629. else
  630. print("type=%lud\n", e->type);
  631. }
  632. }
  633. last = 0;
  634. for(i=0; i<nemap; i++){
  635. e = &emap[i];
  636. /*
  637. * pull out the info but only about the low 32 bits...
  638. */
  639. if(e->base >= (1LL<<32))
  640. break;
  641. base = e->base;
  642. if(base+e->len > (1LL<<32))
  643. len = -base;
  644. else
  645. len = e->len;
  646. /*
  647. * If the map skips addresses, mark them available.
  648. */
  649. if(last < e->base)
  650. map(last, e->base-last, MemUPA);
  651. last = base+len;
  652. if(e->type == Ememory)
  653. map(base, len, MemRAM);
  654. else
  655. map(base, len, MemReserved);
  656. }
  657. if(last < (1LL<<32))
  658. map(last, (u32int)-last, MemUPA);
  659. return 0;
  660. }
  661. void
  662. meminit(void)
  663. {
  664. int i;
  665. Map *mp;
  666. Confmem *cm;
  667. ulong pa, *pte;
  668. ulong maxmem, lost;
  669. char *p;
  670. if(p = getconf("*maxmem"))
  671. maxmem = strtoul(p, 0, 0);
  672. else
  673. maxmem = 0;
  674. /*
  675. * Set special attributes for memory between 640KB and 1MB:
  676. * VGA memory is writethrough;
  677. * BIOS ROM's/UMB's are uncached;
  678. * then scan for useful memory.
  679. */
  680. for(pa = 0xA0000; pa < 0xC0000; pa += BY2PG){
  681. pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 0);
  682. *pte |= PTEWT;
  683. }
  684. for(pa = 0xC0000; pa < 0x100000; pa += BY2PG){
  685. pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 0);
  686. *pte |= PTEUNCACHED;
  687. }
  688. mmuflushtlb(PADDR(m->pdb));
  689. umbscan();
  690. lowraminit();
  691. if(e820scan() < 0)
  692. ramscan(maxmem);
  693. /*
  694. * Set the conf entries describing banks of allocatable memory.
  695. */
  696. for(i=0; i<nelem(mapram) && i<nelem(conf.mem); i++){
  697. mp = &rmapram.map[i];
  698. cm = &conf.mem[i];
  699. cm->base = mp->addr;
  700. cm->npage = mp->size/BY2PG;
  701. }
  702. lost = 0;
  703. for(; i<nelem(mapram); i++)
  704. lost += rmapram.map[i].size;
  705. if(lost)
  706. print("meminit - lost %lud bytes\n", lost);
  707. if(MEMDEBUG)
  708. memdebug();
  709. }
  710. /*
  711. * Allocate memory from the upper memory blocks.
  712. */
  713. ulong
  714. umbmalloc(ulong addr, int size, int align)
  715. {
  716. ulong a;
  717. if(a = mapalloc(&rmapumb, addr, size, align))
  718. return (ulong)KADDR(a);
  719. return 0;
  720. }
  721. void
  722. umbfree(ulong addr, int size)
  723. {
  724. mapfree(&rmapumb, PADDR(addr), size);
  725. }
  726. ulong
  727. umbrwmalloc(ulong addr, int size, int align)
  728. {
  729. ulong a;
  730. uchar o[2], *p;
  731. if(a = mapalloc(&rmapumbrw, addr, size, align))
  732. return(ulong)KADDR(a);
  733. /*
  734. * Perhaps the memory wasn't visible before
  735. * the interface is initialised, so try again.
  736. */
  737. if((a = umbmalloc(addr, size, align)) == 0)
  738. return 0;
  739. p = (uchar*)a;
  740. o[0] = p[0];
  741. p[0] = 0xCC;
  742. o[1] = p[size-1];
  743. p[size-1] = 0xCC;
  744. if(p[0] == 0xCC && p[size-1] == 0xCC){
  745. p[0] = o[0];
  746. p[size-1] = o[1];
  747. return a;
  748. }
  749. umbfree(a, size);
  750. return 0;
  751. }
  752. void
  753. umbrwfree(ulong addr, int size)
  754. {
  755. mapfree(&rmapumbrw, PADDR(addr), size);
  756. }
  757. /*
  758. * Give out otherwise-unused physical address space
  759. * for use in configuring devices. Note that unlike upamalloc
  760. * before it, upaalloc does not map the physical address
  761. * into virtual memory. Call vmap to do that.
  762. */
  763. ulong
  764. upaalloc(int size, int align)
  765. {
  766. ulong a;
  767. a = mapalloc(&rmapupa, 0, size, align);
  768. if(a == 0){
  769. print("out of physical address space allocating %d\n", size);
  770. mapprint(&rmapupa);
  771. }
  772. return a;
  773. }
  774. void
  775. upafree(ulong pa, int size)
  776. {
  777. mapfree(&rmapupa, pa, size);
  778. }
  779. void
  780. upareserve(ulong pa, int size)
  781. {
  782. ulong a;
  783. a = mapalloc(&rmapupa, pa, size, 0);
  784. if(a != pa){
  785. /*
  786. * This can happen when we're using the E820
  787. * map, which might have already reserved some
  788. * of the regions claimed by the pci devices.
  789. */
  790. // print("upareserve: cannot reserve pa=%#.8lux size=%d\n", pa, size);
  791. if(a != 0)
  792. mapfree(&rmapupa, a, size);
  793. }
  794. }
  795. void
  796. memorysummary(void)
  797. {
  798. memdebug();
  799. }