memory.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899
  1. /*
  2. * Size memory and create the kernel page-tables on the fly while doing so.
  3. * Called from main(), this code should only be run by the bootstrap processor.
  4. *
  5. * MemMin is what the bootstrap code in l.s has already mapped;
  6. * MemMax is the limit of physical memory to scan.
  7. */
  8. #include "u.h"
  9. #include "../port/lib.h"
  10. #include "mem.h"
  11. #include "dat.h"
  12. #include "fns.h"
  13. #include "io.h"
  14. #include "ureg.h"
  15. #define MEMDEBUG 0
  16. enum {
  17. MemUPA = 0, /* unbacked physical address */
  18. MemRAM = 1, /* physical memory */
  19. MemUMB = 2, /* upper memory block (<16MB) */
  20. MemReserved = 3,
  21. NMemType = 4,
  22. KB = 1024,
  23. MemMin = 8*MB,
  24. MemMax = (3*1024+768)*MB,
  25. };
  26. typedef struct Map Map;
  27. struct Map {
  28. ulong size;
  29. ulong addr;
  30. };
  31. typedef struct RMap RMap;
  32. struct RMap {
  33. char* name;
  34. Map* map;
  35. Map* mapend;
  36. Lock;
  37. };
  38. /*
  39. * Memory allocation tracking.
  40. */
  41. static Map mapupa[16];
  42. static RMap rmapupa = {
  43. "unallocated unbacked physical memory",
  44. mapupa,
  45. &mapupa[nelem(mapupa)-1],
  46. };
  47. static Map xmapupa[16];
  48. static RMap xrmapupa = {
  49. "unbacked physical memory",
  50. xmapupa,
  51. &xmapupa[nelem(xmapupa)-1],
  52. };
  53. static Map mapram[16];
  54. static RMap rmapram = {
  55. "physical memory",
  56. mapram,
  57. &mapram[nelem(mapram)-1],
  58. };
  59. static Map mapumb[64];
  60. static RMap rmapumb = {
  61. "upper memory block",
  62. mapumb,
  63. &mapumb[nelem(mapumb)-1],
  64. };
  65. static Map mapumbrw[16];
  66. static RMap rmapumbrw = {
  67. "UMB device memory",
  68. mapumbrw,
  69. &mapumbrw[nelem(mapumbrw)-1],
  70. };
  71. void
  72. mapprint(RMap *rmap)
  73. {
  74. Map *mp;
  75. print("%s\n", rmap->name);
  76. for(mp = rmap->map; mp->size; mp++)
  77. print("\t%8.8luX %8.8luX (%lud)\n", mp->addr, mp->addr+mp->size, mp->size);
  78. }
  79. void
  80. memdebug(void)
  81. {
  82. ulong maxpa, maxpa1, maxpa2;
  83. maxpa = (nvramread(0x18)<<8)|nvramread(0x17);
  84. maxpa1 = (nvramread(0x31)<<8)|nvramread(0x30);
  85. maxpa2 = (nvramread(0x16)<<8)|nvramread(0x15);
  86. print("maxpa = %luX -> %luX, maxpa1 = %luX maxpa2 = %luX\n",
  87. maxpa, MB+maxpa*KB, maxpa1, maxpa2);
  88. mapprint(&rmapram);
  89. mapprint(&rmapumb);
  90. mapprint(&rmapumbrw);
  91. mapprint(&rmapupa);
  92. }
  93. void
  94. mapfree(RMap* rmap, ulong addr, ulong size)
  95. {
  96. Map *mp;
  97. ulong t;
  98. if(size <= 0)
  99. return;
  100. lock(rmap);
  101. for(mp = rmap->map; mp->addr <= addr && mp->size; mp++)
  102. ;
  103. if(mp > rmap->map && (mp-1)->addr+(mp-1)->size == addr){
  104. (mp-1)->size += size;
  105. if(addr+size == mp->addr){
  106. (mp-1)->size += mp->size;
  107. while(mp->size){
  108. mp++;
  109. (mp-1)->addr = mp->addr;
  110. (mp-1)->size = mp->size;
  111. }
  112. }
  113. }
  114. else{
  115. if(addr+size == mp->addr && mp->size){
  116. mp->addr -= size;
  117. mp->size += size;
  118. }
  119. else do{
  120. if(mp >= rmap->mapend){
  121. print("mapfree: %s: losing 0x%luX, %ld\n",
  122. rmap->name, addr, size);
  123. break;
  124. }
  125. t = mp->addr;
  126. mp->addr = addr;
  127. addr = t;
  128. t = mp->size;
  129. mp->size = size;
  130. mp++;
  131. }while(size = t);
  132. }
  133. unlock(rmap);
  134. }
  135. ulong
  136. mapalloc(RMap* rmap, ulong addr, int size, int align)
  137. {
  138. Map *mp;
  139. ulong maddr, oaddr;
  140. lock(rmap);
  141. for(mp = rmap->map; mp->size; mp++){
  142. maddr = mp->addr;
  143. if(addr){
  144. /*
  145. * A specific address range has been given:
  146. * if the current map entry is greater then
  147. * the address is not in the map;
  148. * if the current map entry does not overlap
  149. * the beginning of the requested range then
  150. * continue on to the next map entry;
  151. * if the current map entry does not entirely
  152. * contain the requested range then the range
  153. * is not in the map.
  154. */
  155. if(maddr > addr)
  156. break;
  157. if(mp->size < addr - maddr) /* maddr+mp->size < addr, but no overflow */
  158. continue;
  159. if(addr - maddr > mp->size - size) /* addr+size > maddr+mp->size, but no overflow */
  160. break;
  161. maddr = addr;
  162. }
  163. if(align > 0)
  164. maddr = ((maddr+align-1)/align)*align;
  165. if(mp->addr+mp->size-maddr < size)
  166. continue;
  167. oaddr = mp->addr;
  168. mp->addr = maddr+size;
  169. mp->size -= maddr-oaddr+size;
  170. if(mp->size == 0){
  171. do{
  172. mp++;
  173. (mp-1)->addr = mp->addr;
  174. }while((mp-1)->size = mp->size);
  175. }
  176. unlock(rmap);
  177. if(oaddr != maddr)
  178. mapfree(rmap, oaddr, maddr-oaddr);
  179. return maddr;
  180. }
  181. unlock(rmap);
  182. return 0;
  183. }
  184. /*
  185. * Allocate from the ram map directly to make page tables.
  186. * Called by mmuwalk during e820scan.
  187. */
  188. void*
  189. rampage(void)
  190. {
  191. ulong m;
  192. m = mapalloc(&rmapram, 0, BY2PG, BY2PG);
  193. if(m == 0)
  194. return nil;
  195. return KADDR(m);
  196. }
  197. static void
  198. umbexclude(void)
  199. {
  200. int size;
  201. ulong addr;
  202. char *op, *p, *rptr;
  203. if((p = getconf("umbexclude")) == nil)
  204. return;
  205. while(p && *p != '\0' && *p != '\n'){
  206. op = p;
  207. addr = strtoul(p, &rptr, 0);
  208. if(rptr == nil || rptr == p || *rptr != '-'){
  209. print("umbexclude: invalid argument <%s>\n", op);
  210. break;
  211. }
  212. p = rptr+1;
  213. size = strtoul(p, &rptr, 0) - addr + 1;
  214. if(size <= 0){
  215. print("umbexclude: bad range <%s>\n", op);
  216. break;
  217. }
  218. if(rptr != nil && *rptr == ',')
  219. *rptr++ = '\0';
  220. p = rptr;
  221. mapalloc(&rmapumb, addr, size, 0);
  222. }
  223. }
  224. static void
  225. umbscan(void)
  226. {
  227. uchar *p;
  228. /*
  229. * Scan the Upper Memory Blocks (0xA0000->0xF0000) for pieces
  230. * which aren't used; they can be used later for devices which
  231. * want to allocate some virtual address space.
  232. * Check for two things:
  233. * 1) device BIOS ROM. This should start with a two-byte header
  234. * of 0x55 0xAA, followed by a byte giving the size of the ROM
  235. * in 512-byte chunks. These ROM's must start on a 2KB boundary.
  236. * 2) device memory. This is read-write.
  237. * There are some assumptions: there's VGA memory at 0xA0000 and
  238. * the VGA BIOS ROM is at 0xC0000. Also, if there's no ROM signature
  239. * at 0xE0000 then the whole 64KB up to 0xF0000 is theoretically up
  240. * for grabs; check anyway.
  241. */
  242. p = KADDR(0xD0000);
  243. while(p < (uchar*)KADDR(0xE0000)){
  244. /*
  245. * Test for 0x55 0xAA before poking obtrusively,
  246. * some machines (e.g. Thinkpad X20) seem to map
  247. * something dynamic here (cardbus?) causing weird
  248. * problems if it is changed.
  249. */
  250. if(p[0] == 0x55 && p[1] == 0xAA){
  251. p += p[2]*512;
  252. continue;
  253. }
  254. p[0] = 0xCC;
  255. p[2*KB-1] = 0xCC;
  256. if(p[0] != 0xCC || p[2*KB-1] != 0xCC){
  257. p[0] = 0x55;
  258. p[1] = 0xAA;
  259. p[2] = 4;
  260. if(p[0] == 0x55 && p[1] == 0xAA){
  261. p += p[2]*512;
  262. continue;
  263. }
  264. if(p[0] == 0xFF && p[1] == 0xFF)
  265. mapfree(&rmapumb, PADDR(p), 2*KB);
  266. }
  267. else
  268. mapfree(&rmapumbrw, PADDR(p), 2*KB);
  269. p += 2*KB;
  270. }
  271. p = KADDR(0xE0000);
  272. if(p[0] != 0x55 || p[1] != 0xAA){
  273. p[0] = 0xCC;
  274. p[64*KB-1] = 0xCC;
  275. if(p[0] != 0xCC && p[64*KB-1] != 0xCC)
  276. mapfree(&rmapumb, PADDR(p), 64*KB);
  277. }
  278. umbexclude();
  279. }
  280. static void
  281. lowraminit(void)
  282. {
  283. ulong n, pa, x;
  284. uchar *bda;
  285. /*
  286. * Initialise the memory bank information for conventional memory
  287. * (i.e. less than 640KB). The base is the first location after the
  288. * bootstrap processor MMU information and the limit is obtained from
  289. * the BIOS data area.
  290. */
  291. x = PADDR(CPU0END);
  292. bda = (uchar*)KADDR(0x400);
  293. n = ((bda[0x14]<<8)|bda[0x13])*KB-x;
  294. mapfree(&rmapram, x, n);
  295. memset(KADDR(x), 0, n); /* keep us honest */
  296. x = PADDR(PGROUND((ulong)end));
  297. pa = MemMin;
  298. if(x > pa)
  299. panic("kernel too big");
  300. mapfree(&rmapram, x, pa-x);
  301. memset(KADDR(x), 0, pa-x); /* keep us honest */
  302. }
  303. static void
  304. ramscan(ulong maxmem)
  305. {
  306. ulong *k0, kzero, map, maxkpa, maxpa, pa, *pte, *table, *va, vbase, x;
  307. int nvalid[NMemType];
  308. /*
  309. * The bootstrap code has has created a prototype page
  310. * table which maps the first MemMin of physical memory to KZERO.
  311. * The page directory is at m->pdb and the first page of
  312. * free memory is after the per-processor MMU information.
  313. */
  314. pa = MemMin;
  315. /*
  316. * Check if the extended memory size can be obtained from the CMOS.
  317. * If it's 0 then it's either not known or >= 64MB. Always check
  318. * at least 24MB in case there's a memory gap (up to 8MB) below 16MB;
  319. * in this case the memory from the gap is remapped to the top of
  320. * memory.
  321. * The value in CMOS is supposed to be the number of KB above 1MB.
  322. */
  323. if(maxmem == 0){
  324. x = (nvramread(0x18)<<8)|nvramread(0x17);
  325. if(x == 0 || x >= (63*KB))
  326. maxpa = MemMax;
  327. else
  328. maxpa = MB+x*KB;
  329. if(maxpa < 24*MB)
  330. maxpa = 24*MB;
  331. }else
  332. maxpa = maxmem;
  333. maxkpa = (u32int)-KZERO; /* 2^32 - KZERO */
  334. /*
  335. * March up memory from MemMin to maxpa 1MB at a time,
  336. * mapping the first page and checking the page can
  337. * be written and read correctly. The page tables are created here
  338. * on the fly, allocating from low memory as necessary.
  339. */
  340. k0 = (ulong*)KADDR(0);
  341. kzero = *k0;
  342. map = 0;
  343. x = 0x12345678;
  344. memset(nvalid, 0, sizeof(nvalid));
  345. /*
  346. * Can't map memory to KADDR(pa) when we're walking because
  347. * can only use KADDR for relatively low addresses.
  348. * Instead, map each 4MB we scan to the virtual address range
  349. * MemMin->MemMin+4MB while we are scanning.
  350. */
  351. vbase = MemMin;
  352. while(pa < maxpa){
  353. /*
  354. * Map the page. Use mapalloc(&rmapram, ...) to make
  355. * the page table if necessary, it will be returned to the
  356. * pool later if it isn't needed. Map in a fixed range (the second 4M)
  357. * because high physical addresses cannot be passed to KADDR.
  358. */
  359. va = (void*)(vbase + pa%(4*MB));
  360. table = &m->pdb[PDX(va)];
  361. if(pa%(4*MB) == 0){
  362. if(map == 0 && (map = mapalloc(&rmapram, 0, BY2PG, BY2PG)) == 0)
  363. break;
  364. memset(KADDR(map), 0, BY2PG);
  365. *table = map|PTEWRITE|PTEVALID;
  366. memset(nvalid, 0, sizeof(nvalid));
  367. }
  368. table = KADDR(PPN(*table));
  369. pte = &table[PTX(va)];
  370. *pte = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
  371. mmuflushtlb(PADDR(m->pdb));
  372. /*
  373. * Write a pattern to the page and write a different
  374. * pattern to a possible mirror at KZERO. If the data
  375. * reads back correctly the chunk is some type of RAM (possibly
  376. * a linearly-mapped VGA framebuffer, for instance...) and
  377. * can be cleared and added to the memory pool. If not, the
  378. * chunk is marked uncached and added to the UMB pool if <16MB
  379. * or is marked invalid and added to the UPA pool.
  380. */
  381. *va = x;
  382. *k0 = ~x;
  383. if(*va == x){
  384. nvalid[MemRAM] += MB/BY2PG;
  385. mapfree(&rmapram, pa, MB);
  386. do{
  387. *pte++ = pa|PTEWRITE|PTEVALID;
  388. pa += BY2PG;
  389. }while(pa % MB);
  390. mmuflushtlb(PADDR(m->pdb));
  391. /* memset(va, 0, MB); so damn slow to memset all of memory */
  392. }
  393. else if(pa < 16*MB){
  394. nvalid[MemUMB] += MB/BY2PG;
  395. mapfree(&rmapumb, pa, MB);
  396. do{
  397. *pte++ = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
  398. pa += BY2PG;
  399. }while(pa % MB);
  400. }
  401. else{
  402. nvalid[MemUPA] += MB/BY2PG;
  403. mapfree(&rmapupa, pa, MB);
  404. *pte = 0;
  405. pa += MB;
  406. }
  407. /*
  408. * Done with this 4MB chunk, review the options:
  409. * 1) not physical memory and >=16MB - invalidate the PDB entry;
  410. * 2) physical memory - use the 4MB page extension if possible;
  411. * 3) not physical memory and <16MB - use the 4MB page extension
  412. * if possible;
  413. * 4) mixed or no 4MB page extension - commit the already
  414. * initialised space for the page table.
  415. */
  416. if(pa%(4*MB) == 0 && pa >= 32*MB && nvalid[MemUPA] == (4*MB)/BY2PG){
  417. /*
  418. * If we encounter a 4MB chunk of missing memory
  419. * at a sufficiently high offset, call it the end of
  420. * memory. Otherwise we run the risk of thinking
  421. * that video memory is real RAM.
  422. */
  423. break;
  424. }
  425. if(pa <= maxkpa && pa%(4*MB) == 0){
  426. table = &m->pdb[PDX(KADDR(pa - 4*MB))];
  427. if(nvalid[MemUPA] == (4*MB)/BY2PG)
  428. *table = 0;
  429. else if(nvalid[MemRAM] == (4*MB)/BY2PG && (m->cpuiddx & 0x08))
  430. *table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEVALID;
  431. else if(nvalid[MemUMB] == (4*MB)/BY2PG && (m->cpuiddx & 0x08))
  432. *table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEUNCACHED|PTEVALID;
  433. else{
  434. *table = map|PTEWRITE|PTEVALID;
  435. map = 0;
  436. }
  437. }
  438. mmuflushtlb(PADDR(m->pdb));
  439. x += 0x3141526;
  440. }
  441. /*
  442. * If we didn't reach the end of the 4MB chunk, that part won't
  443. * be mapped. Commit the already initialised space for the page table.
  444. */
  445. if(pa % (4*MB) && pa <= maxkpa){
  446. m->pdb[PDX(KADDR(pa))] = map|PTEWRITE|PTEVALID;
  447. map = 0;
  448. }
  449. if(map)
  450. mapfree(&rmapram, map, BY2PG);
  451. m->pdb[PDX(vbase)] = 0;
  452. mmuflushtlb(PADDR(m->pdb));
  453. mapfree(&rmapupa, pa, (u32int)-pa);
  454. *k0 = kzero;
  455. }
  456. /*
  457. * BIOS Int 0x15 E820 memory map.
  458. */
  459. enum
  460. {
  461. SMAP = ('S'<<24)|('M'<<16)|('A'<<8)|'P',
  462. Ememory = 1,
  463. Ereserved = 2,
  464. Carry = 1,
  465. };
  466. typedef struct Emap Emap;
  467. struct Emap
  468. {
  469. uvlong base;
  470. uvlong len;
  471. ulong type;
  472. };
  473. static Emap emap[16];
  474. int nemap;
  475. static char *etypes[] =
  476. {
  477. "type=0",
  478. "memory",
  479. "reserved",
  480. "acpi reclaim",
  481. "acpi nvs",
  482. };
  483. static int
  484. emapcmp(const void *va, const void *vb)
  485. {
  486. Emap *a, *b;
  487. a = (Emap*)va;
  488. b = (Emap*)vb;
  489. if(a->base < b->base)
  490. return -1;
  491. if(a->base > b->base)
  492. return 1;
  493. if(a->len < b->len)
  494. return -1;
  495. if(a->len > b->len)
  496. return 1;
  497. return a->type - b->type;
  498. }
  499. static void
  500. map(ulong base, ulong len, int type)
  501. {
  502. ulong e, n;
  503. ulong *table, flags, maxkpa;
  504. /*
  505. * Split any call crossing MemMin to make below simpler.
  506. */
  507. if(base < MemMin && len > MemMin-base){
  508. n = MemMin - base;
  509. map(base, n, type);
  510. map(MemMin, len-n, type);
  511. }
  512. /*
  513. * Let lowraminit and umbscan hash out the low MemMin.
  514. */
  515. if(base < MemMin)
  516. return;
  517. /*
  518. * Any non-memory below 16*MB is used as upper mem blocks.
  519. */
  520. if(type == MemUPA && base < 16*MB && base+len > 16*MB){
  521. map(base, 16*MB-base, MemUMB);
  522. map(16*MB, len-(16*MB-base), MemUPA);
  523. return;
  524. }
  525. /*
  526. * Memory below CPU0END is reserved for the kernel
  527. * and already mapped.
  528. */
  529. if(base < PADDR(CPU0END)){
  530. n = PADDR(CPU0END) - base;
  531. if(len <= n)
  532. return;
  533. map(PADDR(CPU0END), len-n, type);
  534. return;
  535. }
  536. /*
  537. * Memory between KTZERO and end is the kernel itself
  538. * and is already mapped.
  539. */
  540. if(base < PADDR(KTZERO) && base+len > PADDR(KTZERO)){
  541. map(base, PADDR(KTZERO)-base, type);
  542. return;
  543. }
  544. if(PADDR(KTZERO) < base && base < PADDR(PGROUND((ulong)end))){
  545. n = PADDR(PGROUND((ulong)end));
  546. if(len <= n)
  547. return;
  548. map(PADDR(PGROUND((ulong)end)), len-n, type);
  549. return;
  550. }
  551. /*
  552. * Now we have a simple case.
  553. */
  554. // print("map %.8lux %.8lux %d\n", base, base+len, type);
  555. switch(type){
  556. case MemRAM:
  557. mapfree(&rmapram, base, len);
  558. flags = PTEWRITE|PTEVALID;
  559. break;
  560. case MemUMB:
  561. mapfree(&rmapumb, base, len);
  562. flags = PTEWRITE|PTEUNCACHED|PTEVALID;
  563. break;
  564. case MemUPA:
  565. mapfree(&rmapupa, base, len);
  566. flags = 0;
  567. break;
  568. default:
  569. case MemReserved:
  570. flags = 0;
  571. break;
  572. }
  573. /*
  574. * bottom MemMin is already mapped - just twiddle flags.
  575. * (not currently used - see above)
  576. */
  577. if(base < MemMin){
  578. table = KADDR(PPN(m->pdb[PDX(base)]));
  579. e = base+len;
  580. base = PPN(base);
  581. for(; base<e; base+=BY2PG)
  582. table[PTX(base)] |= flags;
  583. return;
  584. }
  585. /*
  586. * Only map from KZERO to 2^32.
  587. */
  588. if(flags){
  589. maxkpa = -KZERO;
  590. if(base >= maxkpa)
  591. return;
  592. if(len > maxkpa-base)
  593. len = maxkpa - base;
  594. pdbmap(m->pdb, base|flags, base+KZERO, len);
  595. }
  596. }
  597. static int
  598. e820scan(void)
  599. {
  600. int i;
  601. Ureg u;
  602. ulong cont, base, len;
  603. uvlong last;
  604. Emap *e;
  605. if(getconf("*norealmode") || getconf("*noe820scan"))
  606. return -1;
  607. cont = 0;
  608. for(i=0; i<nelem(emap); i++){
  609. memset(&u, 0, sizeof u);
  610. u.ax = 0xE820;
  611. u.bx = cont;
  612. u.cx = 20;
  613. u.dx = SMAP;
  614. u.es = (PADDR(RMBUF)>>4)&0xF000;
  615. u.di = PADDR(RMBUF)&0xFFFF;
  616. u.trap = 0x15;
  617. realmode(&u);
  618. cont = u.bx;
  619. if((u.flags&Carry) || u.ax != SMAP || u.cx != 20)
  620. break;
  621. e = &emap[nemap++];
  622. *e = *(Emap*)RMBUF;
  623. if(u.bx == 0)
  624. break;
  625. }
  626. if(nemap == 0)
  627. return -1;
  628. qsort(emap, nemap, sizeof emap[0], emapcmp);
  629. if(getconf("*noe820print") == nil){
  630. for(i=0; i<nemap; i++){
  631. e = &emap[i];
  632. print("E820: %.8llux %.8llux ", e->base, e->base+e->len);
  633. if(e->type < nelem(etypes))
  634. print("%s\n", etypes[e->type]);
  635. else
  636. print("type=%lud\n", e->type);
  637. }
  638. }
  639. last = 0;
  640. for(i=0; i<nemap; i++){
  641. e = &emap[i];
  642. /*
  643. * pull out the info but only about the low 32 bits...
  644. */
  645. if(e->base >= (1LL<<32))
  646. break;
  647. base = e->base;
  648. if(base+e->len > (1LL<<32))
  649. len = -base;
  650. else
  651. len = e->len;
  652. /*
  653. * If the map skips addresses, mark them available.
  654. */
  655. if(last < e->base)
  656. map(last, e->base-last, MemUPA);
  657. last = base+len;
  658. if(e->type == Ememory)
  659. map(base, len, MemRAM);
  660. else
  661. map(base, len, MemReserved);
  662. }
  663. if(last < (1LL<<32))
  664. map(last, (u32int)-last, MemUPA);
  665. return 0;
  666. }
  667. void
  668. meminit(void)
  669. {
  670. int i;
  671. Map *mp;
  672. Confmem *cm;
  673. ulong pa, *pte;
  674. ulong maxmem, lost;
  675. char *p;
  676. if(p = getconf("*maxmem"))
  677. maxmem = strtoul(p, 0, 0);
  678. else
  679. maxmem = 0;
  680. /*
  681. * Set special attributes for memory between 640KB and 1MB:
  682. * VGA memory is writethrough;
  683. * BIOS ROM's/UMB's are uncached;
  684. * then scan for useful memory.
  685. */
  686. for(pa = 0xA0000; pa < 0xC0000; pa += BY2PG){
  687. pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 0);
  688. *pte |= PTEWT;
  689. }
  690. for(pa = 0xC0000; pa < 0x100000; pa += BY2PG){
  691. pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 0);
  692. *pte |= PTEUNCACHED;
  693. }
  694. mmuflushtlb(PADDR(m->pdb));
  695. umbscan();
  696. lowraminit();
  697. if(e820scan() < 0)
  698. ramscan(maxmem);
  699. /*
  700. * Set the conf entries describing banks of allocatable memory.
  701. */
  702. for(i=0; i<nelem(mapram) && i<nelem(conf.mem); i++){
  703. mp = &rmapram.map[i];
  704. cm = &conf.mem[i];
  705. cm->base = mp->addr;
  706. cm->npage = mp->size/BY2PG;
  707. }
  708. lost = 0;
  709. for(; i<nelem(mapram); i++)
  710. lost += rmapram.map[i].size;
  711. if(lost)
  712. print("meminit - lost %lud bytes\n", lost);
  713. if(MEMDEBUG)
  714. memdebug();
  715. }
  716. /*
  717. * Allocate memory from the upper memory blocks.
  718. */
  719. ulong
  720. umbmalloc(ulong addr, int size, int align)
  721. {
  722. ulong a;
  723. if(a = mapalloc(&rmapumb, addr, size, align))
  724. return (ulong)KADDR(a);
  725. return 0;
  726. }
  727. void
  728. umbfree(ulong addr, int size)
  729. {
  730. mapfree(&rmapumb, PADDR(addr), size);
  731. }
  732. ulong
  733. umbrwmalloc(ulong addr, int size, int align)
  734. {
  735. ulong a;
  736. uchar *p;
  737. if(a = mapalloc(&rmapumbrw, addr, size, align))
  738. return(ulong)KADDR(a);
  739. /*
  740. * Perhaps the memory wasn't visible before
  741. * the interface is initialised, so try again.
  742. */
  743. if((a = umbmalloc(addr, size, align)) == 0)
  744. return 0;
  745. p = (uchar*)a;
  746. p[0] = 0xCC;
  747. p[size-1] = 0xCC;
  748. if(p[0] == 0xCC && p[size-1] == 0xCC)
  749. return a;
  750. umbfree(a, size);
  751. return 0;
  752. }
  753. void
  754. umbrwfree(ulong addr, int size)
  755. {
  756. mapfree(&rmapumbrw, PADDR(addr), size);
  757. }
  758. /*
  759. * Give out otherwise-unused physical address space
  760. * for use in configuring devices. Note that unlike upamalloc
  761. * before it, upaalloc does not map the physical address
  762. * into virtual memory. Call vmap to do that.
  763. */
  764. ulong
  765. upaalloc(int size, int align)
  766. {
  767. ulong a;
  768. a = mapalloc(&rmapupa, 0, size, align);
  769. if(a == 0){
  770. print("out of physical address space allocating %d\n", size);
  771. mapprint(&rmapupa);
  772. }
  773. return a;
  774. }
  775. void
  776. upafree(ulong pa, int size)
  777. {
  778. mapfree(&rmapupa, pa, size);
  779. }
  780. void
  781. upareserve(ulong pa, int size)
  782. {
  783. ulong a;
  784. a = mapalloc(&rmapupa, pa, size, 0);
  785. if(a != pa){
  786. /*
  787. * This can happen when we're using the E820
  788. * map, which might have already reserved some
  789. * of the regions claimed by the pci devices.
  790. */
  791. // print("upareserve: cannot reserve pa=%#.8lux size=%d\n", pa, size);
  792. if(a != 0)
  793. mapfree(&rmapupa, a, size);
  794. }
  795. }
  796. void
  797. memorysummary(void)
  798. {
  799. memdebug();
  800. }