memory.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894
  1. /*
  2. * Size memory and create the kernel page-tables on the fly while doing so.
  3. * Called from main(), this code should only be run by the bootstrap processor.
  4. */
  5. #include "u.h"
  6. #include "../port/lib.h"
  7. #include "mem.h"
  8. #include "dat.h"
  9. #include "fns.h"
  10. #include "io.h"
  11. #include "ureg.h"
  12. #define MEMDEBUG 0
  13. enum {
  14. MemUPA = 0, /* unbacked physical address */
  15. MemRAM = 1, /* physical memory */
  16. MemUMB = 2, /* upper memory block (<16MB) */
  17. MemReserved = 3,
  18. NMemType = 4,
  19. KB = 1024,
  20. MemMinMB = 4, /* minimum physical memory (<=4MB) */
  21. MemMaxMB = 3*1024+768, /* maximum physical memory to check */
  22. NMemBase = 10,
  23. };
  24. typedef struct Map Map;
  25. struct Map {
  26. ulong size;
  27. ulong addr;
  28. };
  29. typedef struct RMap RMap;
  30. struct RMap {
  31. char* name;
  32. Map* map;
  33. Map* mapend;
  34. Lock;
  35. };
  36. /*
  37. * Memory allocation tracking.
  38. */
  39. static Map mapupa[16];
  40. static RMap rmapupa = {
  41. "unallocated unbacked physical memory",
  42. mapupa,
  43. &mapupa[nelem(mapupa)-1],
  44. };
  45. static Map xmapupa[16];
  46. static RMap xrmapupa = {
  47. "unbacked physical memory",
  48. xmapupa,
  49. &xmapupa[nelem(xmapupa)-1],
  50. };
  51. static Map mapram[16];
  52. static RMap rmapram = {
  53. "physical memory",
  54. mapram,
  55. &mapram[nelem(mapram)-1],
  56. };
  57. static Map mapumb[64];
  58. static RMap rmapumb = {
  59. "upper memory block",
  60. mapumb,
  61. &mapumb[nelem(mapumb)-1],
  62. };
  63. static Map mapumbrw[16];
  64. static RMap rmapumbrw = {
  65. "UMB device memory",
  66. mapumbrw,
  67. &mapumbrw[nelem(mapumbrw)-1],
  68. };
  69. void
  70. mapprint(RMap *rmap)
  71. {
  72. Map *mp;
  73. print("%s\n", rmap->name);
  74. for(mp = rmap->map; mp->size; mp++)
  75. print("\t%8.8luX %8.8luX (%lud)\n", mp->addr, mp->addr+mp->size, mp->size);
  76. }
  77. void
  78. memdebug(void)
  79. {
  80. ulong maxpa, maxpa1, maxpa2;
  81. maxpa = (nvramread(0x18)<<8)|nvramread(0x17);
  82. maxpa1 = (nvramread(0x31)<<8)|nvramread(0x30);
  83. maxpa2 = (nvramread(0x16)<<8)|nvramread(0x15);
  84. print("maxpa = %luX -> %luX, maxpa1 = %luX maxpa2 = %luX\n",
  85. maxpa, MB+maxpa*KB, maxpa1, maxpa2);
  86. mapprint(&rmapram);
  87. mapprint(&rmapumb);
  88. mapprint(&rmapumbrw);
  89. mapprint(&rmapupa);
  90. }
  91. void
  92. mapfree(RMap* rmap, ulong addr, ulong size)
  93. {
  94. Map *mp;
  95. ulong t;
  96. if(size <= 0)
  97. return;
  98. lock(rmap);
  99. for(mp = rmap->map; mp->addr <= addr && mp->size; mp++)
  100. ;
  101. if(mp > rmap->map && (mp-1)->addr+(mp-1)->size == addr){
  102. (mp-1)->size += size;
  103. if(addr+size == mp->addr){
  104. (mp-1)->size += mp->size;
  105. while(mp->size){
  106. mp++;
  107. (mp-1)->addr = mp->addr;
  108. (mp-1)->size = mp->size;
  109. }
  110. }
  111. }
  112. else{
  113. if(addr+size == mp->addr && mp->size){
  114. mp->addr -= size;
  115. mp->size += size;
  116. }
  117. else do{
  118. if(mp >= rmap->mapend){
  119. print("mapfree: %s: losing 0x%luX, %ld\n",
  120. rmap->name, addr, size);
  121. break;
  122. }
  123. t = mp->addr;
  124. mp->addr = addr;
  125. addr = t;
  126. t = mp->size;
  127. mp->size = size;
  128. mp++;
  129. }while(size = t);
  130. }
  131. unlock(rmap);
  132. }
  133. ulong
  134. mapalloc(RMap* rmap, ulong addr, int size, int align)
  135. {
  136. Map *mp;
  137. ulong maddr, oaddr;
  138. lock(rmap);
  139. for(mp = rmap->map; mp->size; mp++){
  140. maddr = mp->addr;
  141. if(addr){
  142. /*
  143. * A specific address range has been given:
  144. * if the current map entry is greater then
  145. * the address is not in the map;
  146. * if the current map entry does not overlap
  147. * the beginning of the requested range then
  148. * continue on to the next map entry;
  149. * if the current map entry does not entirely
  150. * contain the requested range then the range
  151. * is not in the map.
  152. */
  153. if(maddr > addr)
  154. break;
  155. if(mp->size < addr - maddr) /* maddr+mp->size < addr, but no overflow */
  156. continue;
  157. if(addr - maddr > mp->size - size) /* addr+size > maddr+mp->size, but no overflow */
  158. break;
  159. maddr = addr;
  160. }
  161. if(align > 0)
  162. maddr = ((maddr+align-1)/align)*align;
  163. if(mp->addr+mp->size-maddr < size)
  164. continue;
  165. oaddr = mp->addr;
  166. mp->addr = maddr+size;
  167. mp->size -= maddr-oaddr+size;
  168. if(mp->size == 0){
  169. do{
  170. mp++;
  171. (mp-1)->addr = mp->addr;
  172. }while((mp-1)->size = mp->size);
  173. }
  174. unlock(rmap);
  175. if(oaddr != maddr)
  176. mapfree(rmap, oaddr, maddr-oaddr);
  177. return maddr;
  178. }
  179. unlock(rmap);
  180. return 0;
  181. }
  182. /*
  183. * Allocate from the ram map directly to make page tables.
  184. * Called by mmuwalk during e820scan.
  185. */
  186. void*
  187. rampage(void)
  188. {
  189. ulong m;
  190. m = mapalloc(&rmapram, 0, BY2PG, BY2PG);
  191. if(m == 0)
  192. return nil;
  193. return KADDR(m);
  194. }
  195. static void
  196. umbexclude(void)
  197. {
  198. int size;
  199. ulong addr;
  200. char *op, *p, *rptr;
  201. if((p = getconf("umbexclude")) == nil)
  202. return;
  203. while(p && *p != '\0' && *p != '\n'){
  204. op = p;
  205. addr = strtoul(p, &rptr, 0);
  206. if(rptr == nil || rptr == p || *rptr != '-'){
  207. print("umbexclude: invalid argument <%s>\n", op);
  208. break;
  209. }
  210. p = rptr+1;
  211. size = strtoul(p, &rptr, 0) - addr + 1;
  212. if(size <= 0){
  213. print("umbexclude: bad range <%s>\n", op);
  214. break;
  215. }
  216. if(rptr != nil && *rptr == ',')
  217. *rptr++ = '\0';
  218. p = rptr;
  219. mapalloc(&rmapumb, addr, size, 0);
  220. }
  221. }
  222. static void
  223. umbscan(void)
  224. {
  225. uchar *p;
  226. /*
  227. * Scan the Upper Memory Blocks (0xA0000->0xF0000) for pieces
  228. * which aren't used; they can be used later for devices which
  229. * want to allocate some virtual address space.
  230. * Check for two things:
  231. * 1) device BIOS ROM. This should start with a two-byte header
  232. * of 0x55 0xAA, followed by a byte giving the size of the ROM
  233. * in 512-byte chunks. These ROM's must start on a 2KB boundary.
  234. * 2) device memory. This is read-write.
  235. * There are some assumptions: there's VGA memory at 0xA0000 and
  236. * the VGA BIOS ROM is at 0xC0000. Also, if there's no ROM signature
  237. * at 0xE0000 then the whole 64KB up to 0xF0000 is theoretically up
  238. * for grabs; check anyway.
  239. */
  240. p = KADDR(0xD0000);
  241. while(p < (uchar*)KADDR(0xE0000)){
  242. /*
  243. * Test for 0x55 0xAA before poking obtrusively,
  244. * some machines (e.g. Thinkpad X20) seem to map
  245. * something dynamic here (cardbus?) causing weird
  246. * problems if it is changed.
  247. */
  248. if(p[0] == 0x55 && p[1] == 0xAA){
  249. p += p[2]*512;
  250. continue;
  251. }
  252. p[0] = 0xCC;
  253. p[2*KB-1] = 0xCC;
  254. if(p[0] != 0xCC || p[2*KB-1] != 0xCC){
  255. p[0] = 0x55;
  256. p[1] = 0xAA;
  257. p[2] = 4;
  258. if(p[0] == 0x55 && p[1] == 0xAA){
  259. p += p[2]*512;
  260. continue;
  261. }
  262. if(p[0] == 0xFF && p[1] == 0xFF)
  263. mapfree(&rmapumb, PADDR(p), 2*KB);
  264. }
  265. else
  266. mapfree(&rmapumbrw, PADDR(p), 2*KB);
  267. p += 2*KB;
  268. }
  269. p = KADDR(0xE0000);
  270. if(p[0] != 0x55 || p[1] != 0xAA){
  271. p[0] = 0xCC;
  272. p[64*KB-1] = 0xCC;
  273. if(p[0] != 0xCC && p[64*KB-1] != 0xCC)
  274. mapfree(&rmapumb, PADDR(p), 64*KB);
  275. }
  276. umbexclude();
  277. }
  278. static void
  279. lowraminit(void)
  280. {
  281. ulong n, pa, x;
  282. uchar *bda;
  283. /*
  284. * Initialise the memory bank information for conventional memory
  285. * (i.e. less than 640KB). The base is the first location after the
  286. * bootstrap processor MMU information and the limit is obtained from
  287. * the BIOS data area.
  288. */
  289. x = PADDR(CPU0MACH+BY2PG);
  290. bda = (uchar*)KADDR(0x400);
  291. n = ((bda[0x14]<<8)|bda[0x13])*KB-x;
  292. mapfree(&rmapram, x, n);
  293. memset(KADDR(x), 0, n); /* keep us honest */
  294. x = PADDR(PGROUND((ulong)end));
  295. pa = MemMinMB*MB;
  296. mapfree(&rmapram, x, pa-x);
  297. memset(KADDR(x), 0, pa-x); /* keep us honest */
  298. }
  299. static void
  300. ramscan(ulong maxmem)
  301. {
  302. ulong *k0, kzero, map, maxkpa, maxpa, pa, *pte, *table, *va, vbase, x;
  303. int nvalid[NMemType];
  304. /*
  305. * The bootstrap code has has created a prototype page
  306. * table which maps the first MemMinMB of physical memory to KZERO.
  307. * The page directory is at m->pdb and the first page of
  308. * free memory is after the per-processor MMU information.
  309. */
  310. pa = MemMinMB*MB;
  311. /*
  312. * Check if the extended memory size can be obtained from the CMOS.
  313. * If it's 0 then it's either not known or >= 64MB. Always check
  314. * at least 24MB in case there's a memory gap (up to 8MB) below 16MB;
  315. * in this case the memory from the gap is remapped to the top of
  316. * memory.
  317. * The value in CMOS is supposed to be the number of KB above 1MB.
  318. */
  319. if(maxmem == 0){
  320. x = (nvramread(0x18)<<8)|nvramread(0x17);
  321. if(x == 0 || x >= (63*KB))
  322. maxpa = MemMaxMB*MB;
  323. else
  324. maxpa = MB+x*KB;
  325. if(maxpa < 24*MB)
  326. maxpa = 24*MB;
  327. }else
  328. maxpa = maxmem;
  329. maxkpa = (u32int)-KZERO; /* 2^32 - KZERO */
  330. /*
  331. * March up memory from MemMinMB to maxpa 1MB at a time,
  332. * mapping the first page and checking the page can
  333. * be written and read correctly. The page tables are created here
  334. * on the fly, allocating from low memory as necessary.
  335. */
  336. k0 = (ulong*)KADDR(0);
  337. kzero = *k0;
  338. map = 0;
  339. x = 0x12345678;
  340. memset(nvalid, 0, sizeof(nvalid));
  341. /*
  342. * Can't map memory to KADDR(pa) when we're walking because
  343. * can only use KADDR for relatively low addresses. Instead,
  344. * map each 4MB we scan to the virtual address range 4MB-8MB
  345. * while we are scanning.
  346. */
  347. vbase = 4*MB;
  348. while(pa < maxpa){
  349. /*
  350. * Map the page. Use mapalloc(&rmapram, ...) to make
  351. * the page table if necessary, it will be returned to the
  352. * pool later if it isn't needed. Map in a fixed range (the second 4M)
  353. * because high physical addresses cannot be passed to KADDR.
  354. */
  355. va = (void*)(vbase + pa%(4*MB));
  356. table = &m->pdb[PDX(va)];
  357. if(pa%(4*MB) == 0){
  358. if(map == 0 && (map = mapalloc(&rmapram, 0, BY2PG, BY2PG)) == 0)
  359. break;
  360. memset(KADDR(map), 0, BY2PG);
  361. *table = map|PTEWRITE|PTEVALID;
  362. memset(nvalid, 0, sizeof(nvalid));
  363. }
  364. table = KADDR(PPN(*table));
  365. pte = &table[PTX(va)];
  366. *pte = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
  367. mmuflushtlb(PADDR(m->pdb));
  368. /*
  369. * Write a pattern to the page and write a different
  370. * pattern to a possible mirror at KZERO. If the data
  371. * reads back correctly the chunk is some type of RAM (possibly
  372. * a linearly-mapped VGA framebuffer, for instance...) and
  373. * can be cleared and added to the memory pool. If not, the
  374. * chunk is marked uncached and added to the UMB pool if <16MB
  375. * or is marked invalid and added to the UPA pool.
  376. */
  377. *va = x;
  378. *k0 = ~x;
  379. if(*va == x){
  380. nvalid[MemRAM] += MB/BY2PG;
  381. mapfree(&rmapram, pa, MB);
  382. do{
  383. *pte++ = pa|PTEWRITE|PTEVALID;
  384. pa += BY2PG;
  385. }while(pa % MB);
  386. mmuflushtlb(PADDR(m->pdb));
  387. /* memset(va, 0, MB); so damn slow to memset all of memory */
  388. }
  389. else if(pa < 16*MB){
  390. nvalid[MemUMB] += MB/BY2PG;
  391. mapfree(&rmapumb, pa, MB);
  392. do{
  393. *pte++ = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
  394. pa += BY2PG;
  395. }while(pa % MB);
  396. }
  397. else{
  398. nvalid[MemUPA] += MB/BY2PG;
  399. mapfree(&rmapupa, pa, MB);
  400. *pte = 0;
  401. pa += MB;
  402. }
  403. /*
  404. * Done with this 4MB chunk, review the options:
  405. * 1) not physical memory and >=16MB - invalidate the PDB entry;
  406. * 2) physical memory - use the 4MB page extension if possible;
  407. * 3) not physical memory and <16MB - use the 4MB page extension
  408. * if possible;
  409. * 4) mixed or no 4MB page extension - commit the already
  410. * initialised space for the page table.
  411. */
  412. if(pa%(4*MB) == 0 && pa >= 32*MB && nvalid[MemUPA] == (4*MB)/BY2PG){
  413. /*
  414. * If we encounter a 4MB chunk of missing memory
  415. * at a sufficiently high offset, call it the end of
  416. * memory. Otherwise we run the risk of thinking
  417. * that video memory is real RAM.
  418. */
  419. break;
  420. }
  421. if(pa <= maxkpa && pa%(4*MB) == 0){
  422. table = &m->pdb[PDX(KADDR(pa - 4*MB))];
  423. if(nvalid[MemUPA] == (4*MB)/BY2PG)
  424. *table = 0;
  425. else if(nvalid[MemRAM] == (4*MB)/BY2PG && (m->cpuiddx & 0x08))
  426. *table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEVALID;
  427. else if(nvalid[MemUMB] == (4*MB)/BY2PG && (m->cpuiddx & 0x08))
  428. *table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEUNCACHED|PTEVALID;
  429. else{
  430. *table = map|PTEWRITE|PTEVALID;
  431. map = 0;
  432. }
  433. }
  434. mmuflushtlb(PADDR(m->pdb));
  435. x += 0x3141526;
  436. }
  437. /*
  438. * If we didn't reach the end of the 4MB chunk, that part won't
  439. * be mapped. Commit the already initialised space for the page table.
  440. */
  441. if(pa % (4*MB) && pa <= maxkpa){
  442. m->pdb[PDX(KADDR(pa))] = map|PTEWRITE|PTEVALID;
  443. map = 0;
  444. }
  445. if(map)
  446. mapfree(&rmapram, map, BY2PG);
  447. m->pdb[PDX(vbase)] = 0;
  448. mmuflushtlb(PADDR(m->pdb));
  449. mapfree(&rmapupa, pa, (u32int)-pa);
  450. *k0 = kzero;
  451. }
  452. /*
  453. * BIOS Int 0x15 E820 memory map.
  454. */
  455. enum
  456. {
  457. SMAP = ('S'<<24)|('M'<<16)|('A'<<8)|'P',
  458. Ememory = 1,
  459. Ereserved = 2,
  460. Carry = 1,
  461. };
  462. typedef struct Emap Emap;
  463. struct Emap
  464. {
  465. uvlong base;
  466. uvlong len;
  467. ulong type;
  468. };
  469. static Emap emap[16];
  470. int nemap;
  471. static char *etypes[] =
  472. {
  473. "type=0",
  474. "memory",
  475. "reserved",
  476. "acpi reclaim",
  477. "acpi nvs",
  478. };
  479. static int
  480. emapcmp(const void *va, const void *vb)
  481. {
  482. Emap *a, *b;
  483. a = (Emap*)va;
  484. b = (Emap*)vb;
  485. if(a->base < b->base)
  486. return -1;
  487. if(a->base > b->base)
  488. return 1;
  489. if(a->len < b->len)
  490. return -1;
  491. if(a->len > b->len)
  492. return 1;
  493. return a->type - b->type;
  494. }
  495. static void
  496. map(ulong base, ulong len, int type)
  497. {
  498. ulong e, n;
  499. ulong *table, flags, maxkpa;
  500. /*
  501. * Split any call crossing 4*MB to make below simpler.
  502. */
  503. if(base < 4*MB && len > 4*MB-base){
  504. n = 4*MB - base;
  505. map(base, n, type);
  506. map(4*MB, len-n, type);
  507. }
  508. /*
  509. * Let lowraminit and umbscan hash out the low 4MB.
  510. */
  511. if(base < 4*MB)
  512. return;
  513. /*
  514. * Any non-memory below 16*MB is used as upper mem blocks.
  515. */
  516. if(type == MemUPA && base < 16*MB && base+len > 16*MB){
  517. map(base, 16*MB-base, MemUMB);
  518. map(16*MB, len-(16*MB-base), MemUPA);
  519. return;
  520. }
  521. /*
  522. * Memory below CPU0MACH is reserved for the kernel
  523. * and already mapped.
  524. */
  525. if(base < PADDR(CPU0MACH)+BY2PG){
  526. n = PADDR(CPU0MACH)+BY2PG - base;
  527. if(len <= n)
  528. return;
  529. map(PADDR(CPU0MACH), len-n, type);
  530. return;
  531. }
  532. /*
  533. * Memory between KTZERO and end is the kernel itself
  534. * and is already mapped.
  535. */
  536. if(base < PADDR(KTZERO) && base+len > PADDR(KTZERO)){
  537. map(base, PADDR(KTZERO)-base, type);
  538. return;
  539. }
  540. if(PADDR(KTZERO) < base && base < PADDR(PGROUND((ulong)end))){
  541. n = PADDR(PGROUND((ulong)end));
  542. if(len <= n)
  543. return;
  544. map(PADDR(PGROUND((ulong)end)), len-n, type);
  545. return;
  546. }
  547. /*
  548. * Now we have a simple case.
  549. */
  550. // print("map %.8lux %.8lux %d\n", base, base+len, type);
  551. switch(type){
  552. case MemRAM:
  553. mapfree(&rmapram, base, len);
  554. flags = PTEWRITE|PTEVALID;
  555. break;
  556. case MemUMB:
  557. mapfree(&rmapumb, base, len);
  558. flags = PTEWRITE|PTEUNCACHED|PTEVALID;
  559. break;
  560. case MemUPA:
  561. mapfree(&rmapupa, base, len);
  562. flags = 0;
  563. break;
  564. default:
  565. case MemReserved:
  566. flags = 0;
  567. break;
  568. }
  569. /*
  570. * bottom 4MB is already mapped - just twiddle flags.
  571. * (not currently used - see above)
  572. */
  573. if(base < 4*MB){
  574. table = KADDR(PPN(m->pdb[PDX(base)]));
  575. e = base+len;
  576. base = PPN(base);
  577. for(; base<e; base+=BY2PG)
  578. table[PTX(base)] |= flags;
  579. return;
  580. }
  581. /*
  582. * Only map from KZERO to 2^32.
  583. */
  584. if(flags){
  585. maxkpa = -KZERO;
  586. if(base >= maxkpa)
  587. return;
  588. if(len > maxkpa-base)
  589. len = maxkpa - base;
  590. pdbmap(m->pdb, base|flags, base+KZERO, len);
  591. }
  592. }
  593. static int
  594. e820scan(void)
  595. {
  596. int i;
  597. Ureg u;
  598. ulong cont, base, len;
  599. uvlong last;
  600. Emap *e;
  601. if(getconf("*norealmode") || getconf("*noe820scan"))
  602. return -1;
  603. cont = 0;
  604. for(i=0; i<nelem(emap); i++){
  605. memset(&u, 0, sizeof u);
  606. u.ax = 0xE820;
  607. u.bx = cont;
  608. u.cx = 20;
  609. u.dx = SMAP;
  610. u.es = (PADDR(RMBUF)>>4)&0xF000;
  611. u.di = PADDR(RMBUF)&0xFFFF;
  612. u.trap = 0x15;
  613. realmode(&u);
  614. cont = u.bx;
  615. if((u.flags&Carry) || u.ax != SMAP || u.cx != 20)
  616. break;
  617. e = &emap[nemap++];
  618. *e = *(Emap*)RMBUF;
  619. if(u.bx == 0)
  620. break;
  621. }
  622. if(nemap == 0)
  623. return -1;
  624. qsort(emap, nemap, sizeof emap[0], emapcmp);
  625. for(i=0; i<nemap; i++){
  626. e = &emap[i];
  627. print("E820: %.8llux %.8llux ", e->base, e->base+e->len);
  628. if(e->type < nelem(etypes))
  629. print("%s\n", etypes[e->type]);
  630. else
  631. print("type=%lud\n", e->type);
  632. }
  633. last = 0;
  634. for(i=0; i<nemap; i++){
  635. e = &emap[i];
  636. /*
  637. * pull out the info but only about the low 32 bits...
  638. */
  639. if(e->base >= (1LL<<32))
  640. break;
  641. base = e->base;
  642. if(base+e->len > (1LL<<32))
  643. len = -base;
  644. else
  645. len = e->len;
  646. /*
  647. * If the map skips addresses, mark them available.
  648. */
  649. if(last < e->base)
  650. map(last, e->base-last, MemUPA);
  651. last = base+len;
  652. if(e->type == Ememory)
  653. map(base, len, MemRAM);
  654. else
  655. map(base, len, MemReserved);
  656. }
  657. if(last < (1LL<<32))
  658. map(last, (u32int)-last, MemUPA);
  659. return 0;
  660. }
  661. void
  662. meminit(void)
  663. {
  664. int i;
  665. Map *mp;
  666. Confmem *cm;
  667. ulong pa, *pte;
  668. ulong maxmem, lost;
  669. char *p;
  670. if(p = getconf("*maxmem"))
  671. maxmem = strtoul(p, 0, 0);
  672. else
  673. maxmem = 0;
  674. /*
  675. * Set special attributes for memory between 640KB and 1MB:
  676. * VGA memory is writethrough;
  677. * BIOS ROM's/UMB's are uncached;
  678. * then scan for useful memory.
  679. */
  680. for(pa = 0xA0000; pa < 0xC0000; pa += BY2PG){
  681. pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 0);
  682. *pte |= PTEWT;
  683. }
  684. for(pa = 0xC0000; pa < 0x100000; pa += BY2PG){
  685. pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 0);
  686. *pte |= PTEUNCACHED;
  687. }
  688. mmuflushtlb(PADDR(m->pdb));
  689. umbscan();
  690. lowraminit();
  691. if(e820scan() < 0)
  692. ramscan(maxmem);
  693. /*
  694. * Set the conf entries describing banks of allocatable memory.
  695. */
  696. for(i=0; i<nelem(mapram) && i<nelem(conf.mem); i++){
  697. mp = &rmapram.map[i];
  698. cm = &conf.mem[i];
  699. cm->base = mp->addr;
  700. cm->npage = mp->size/BY2PG;
  701. }
  702. lost = 0;
  703. for(; i<nelem(mapram); i++)
  704. lost += rmapram.map[i].size;
  705. if(lost)
  706. print("meminit - lost %lud bytes\n", lost);
  707. if(MEMDEBUG)
  708. memdebug();
  709. }
  710. /*
  711. * Allocate memory from the upper memory blocks.
  712. */
  713. ulong
  714. umbmalloc(ulong addr, int size, int align)
  715. {
  716. ulong a;
  717. if(a = mapalloc(&rmapumb, addr, size, align))
  718. return (ulong)KADDR(a);
  719. return 0;
  720. }
  721. void
  722. umbfree(ulong addr, int size)
  723. {
  724. mapfree(&rmapumb, PADDR(addr), size);
  725. }
  726. ulong
  727. umbrwmalloc(ulong addr, int size, int align)
  728. {
  729. ulong a;
  730. uchar *p;
  731. if(a = mapalloc(&rmapumbrw, addr, size, align))
  732. return(ulong)KADDR(a);
  733. /*
  734. * Perhaps the memory wasn't visible before
  735. * the interface is initialised, so try again.
  736. */
  737. if((a = umbmalloc(addr, size, align)) == 0)
  738. return 0;
  739. p = (uchar*)a;
  740. p[0] = 0xCC;
  741. p[size-1] = 0xCC;
  742. if(p[0] == 0xCC && p[size-1] == 0xCC)
  743. return a;
  744. umbfree(a, size);
  745. return 0;
  746. }
  747. void
  748. umbrwfree(ulong addr, int size)
  749. {
  750. mapfree(&rmapumbrw, PADDR(addr), size);
  751. }
  752. /*
  753. * Give out otherwise-unused physical address space
  754. * for use in configuring devices. Note that unlike upamalloc
  755. * before it, upaalloc does not map the physical address
  756. * into virtual memory. Call vmap to do that.
  757. */
  758. ulong
  759. upaalloc(int size, int align)
  760. {
  761. ulong a;
  762. a = mapalloc(&rmapupa, 0, size, align);
  763. if(a == 0){
  764. print("out of physical address space allocating %d\n", size);
  765. mapprint(&rmapupa);
  766. }
  767. return a;
  768. }
  769. void
  770. upafree(ulong pa, int size)
  771. {
  772. mapfree(&rmapupa, pa, size);
  773. }
  774. void
  775. upareserve(ulong pa, int size)
  776. {
  777. ulong a;
  778. a = mapalloc(&rmapupa, pa, size, 0);
  779. if(a != pa){
  780. /*
  781. * This can happen when we're using the E820
  782. * map, which might have already reserved some
  783. * of the regions claimed by the pci devices.
  784. */
  785. // print("upareserve: cannot reserve pa=%#.8lux size=%d\n", pa, size);
  786. if(a != 0)
  787. mapfree(&rmapupa, a, size);
  788. }
  789. }
  790. void
  791. memorysummary(void)
  792. {
  793. memdebug();
  794. }