memory.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860
  1. /*
  2. * Size memory and create the kernel page-tables on the fly while doing so.
  3. * Called from main(), this code should only be run by the bootstrap processor.
  4. */
  5. #include "u.h"
  6. #include "../port/lib.h"
  7. #include "mem.h"
  8. #include "dat.h"
  9. #include "fns.h"
  10. #include "io.h"
  11. #include "ureg.h"
  12. #define MEMDEBUG 0
  13. enum {
  14. MemUPA = 0, /* unbacked physical address */
  15. MemRAM = 1, /* physical memory */
  16. MemUMB = 2, /* upper memory block (<16MB) */
  17. MemReserved = 3,
  18. NMemType = 4,
  19. KB = 1024,
  20. MemMinMB = 4, /* minimum physical memory (<=4MB) */
  21. MemMaxMB = 3*1024+768, /* maximum physical memory to check */
  22. NMemBase = 10,
  23. };
  24. typedef struct Map Map;
  25. struct Map {
  26. ulong size;
  27. ulong addr;
  28. };
  29. typedef struct RMap RMap;
  30. struct RMap {
  31. char* name;
  32. Map* map;
  33. Map* mapend;
  34. Lock;
  35. };
  36. /*
  37. * Memory allocation tracking.
  38. */
  39. static Map mapupa[16];
  40. static RMap rmapupa = {
  41. "unallocated unbacked physical memory",
  42. mapupa,
  43. &mapupa[nelem(mapupa)-1],
  44. };
  45. static Map xmapupa[16];
  46. static RMap xrmapupa = {
  47. "unbacked physical memory",
  48. xmapupa,
  49. &xmapupa[nelem(xmapupa)-1],
  50. };
  51. static Map mapram[16];
  52. static RMap rmapram = {
  53. "physical memory",
  54. mapram,
  55. &mapram[nelem(mapram)-1],
  56. };
  57. static Map mapumb[64];
  58. static RMap rmapumb = {
  59. "upper memory block",
  60. mapumb,
  61. &mapumb[nelem(mapumb)-1],
  62. };
  63. static Map mapumbrw[16];
  64. static RMap rmapumbrw = {
  65. "UMB device memory",
  66. mapumbrw,
  67. &mapumbrw[nelem(mapumbrw)-1],
  68. };
  69. void
  70. mapprint(RMap *rmap)
  71. {
  72. Map *mp;
  73. print("%s\n", rmap->name);
  74. for(mp = rmap->map; mp->size; mp++)
  75. print("\t%8.8luX %8.8luX (%lud)\n", mp->addr, mp->addr+mp->size, mp->size);
  76. }
  77. void
  78. memdebug(void)
  79. {
  80. ulong maxpa, maxpa1, maxpa2;
  81. maxpa = (nvramread(0x18)<<8)|nvramread(0x17);
  82. maxpa1 = (nvramread(0x31)<<8)|nvramread(0x30);
  83. maxpa2 = (nvramread(0x16)<<8)|nvramread(0x15);
  84. print("maxpa = %luX -> %luX, maxpa1 = %luX maxpa2 = %luX\n",
  85. maxpa, MB+maxpa*KB, maxpa1, maxpa2);
  86. mapprint(&rmapram);
  87. mapprint(&rmapumb);
  88. mapprint(&rmapumbrw);
  89. mapprint(&rmapupa);
  90. }
  91. void
  92. mapfree(RMap* rmap, ulong addr, ulong size)
  93. {
  94. Map *mp;
  95. ulong t;
  96. if(size <= 0)
  97. return;
  98. lock(rmap);
  99. for(mp = rmap->map; mp->addr <= addr && mp->size; mp++)
  100. ;
  101. if(mp > rmap->map && (mp-1)->addr+(mp-1)->size == addr){
  102. (mp-1)->size += size;
  103. if(addr+size == mp->addr){
  104. (mp-1)->size += mp->size;
  105. while(mp->size){
  106. mp++;
  107. (mp-1)->addr = mp->addr;
  108. (mp-1)->size = mp->size;
  109. }
  110. }
  111. }
  112. else{
  113. if(addr+size == mp->addr && mp->size){
  114. mp->addr -= size;
  115. mp->size += size;
  116. }
  117. else do{
  118. if(mp >= rmap->mapend){
  119. print("mapfree: %s: losing 0x%luX, %ld\n",
  120. rmap->name, addr, size);
  121. break;
  122. }
  123. t = mp->addr;
  124. mp->addr = addr;
  125. addr = t;
  126. t = mp->size;
  127. mp->size = size;
  128. mp++;
  129. }while(size = t);
  130. }
  131. unlock(rmap);
  132. }
  133. ulong
  134. mapalloc(RMap* rmap, ulong addr, int size, int align)
  135. {
  136. Map *mp;
  137. ulong maddr, oaddr;
  138. lock(rmap);
  139. for(mp = rmap->map; mp->size; mp++){
  140. maddr = mp->addr;
  141. if(addr){
  142. /*
  143. * A specific address range has been given:
  144. * if the current map entry is greater then
  145. * the address is not in the map;
  146. * if the current map entry does not overlap
  147. * the beginning of the requested range then
  148. * continue on to the next map entry;
  149. * if the current map entry does not entirely
  150. * contain the requested range then the range
  151. * is not in the map.
  152. */
  153. if(maddr > addr)
  154. break;
  155. if(mp->size < addr - maddr) /* maddr+mp->size < addr, but no overflow */
  156. continue;
  157. if(addr - maddr > mp->size - size) /* addr+size > maddr+mp->size, but no overflow */
  158. break;
  159. maddr = addr;
  160. }
  161. if(align > 0)
  162. maddr = ((maddr+align-1)/align)*align;
  163. if(mp->addr+mp->size-maddr < size)
  164. continue;
  165. oaddr = mp->addr;
  166. mp->addr = maddr+size;
  167. mp->size -= maddr-oaddr+size;
  168. if(mp->size == 0){
  169. do{
  170. mp++;
  171. (mp-1)->addr = mp->addr;
  172. }while((mp-1)->size = mp->size);
  173. }
  174. unlock(rmap);
  175. if(oaddr != maddr)
  176. mapfree(rmap, oaddr, maddr-oaddr);
  177. return maddr;
  178. }
  179. unlock(rmap);
  180. return 0;
  181. }
  182. /*
  183. * Allocate from the ram map directly to make page tables.
  184. * Called by mmuwalk during e820scan.
  185. */
  186. void*
  187. rampage(void)
  188. {
  189. ulong m;
  190. m = mapalloc(&rmapram, 0, BY2PG, BY2PG);
  191. if(m == 0)
  192. return nil;
  193. return KADDR(m);
  194. }
  195. static void
  196. umbscan(void)
  197. {
  198. uchar *p;
  199. /*
  200. * Scan the Upper Memory Blocks (0xA0000->0xF0000) for pieces
  201. * which aren't used; they can be used later for devices which
  202. * want to allocate some virtual address space.
  203. * Check for two things:
  204. * 1) device BIOS ROM. This should start with a two-byte header
  205. * of 0x55 0xAA, followed by a byte giving the size of the ROM
  206. * in 512-byte chunks. These ROM's must start on a 2KB boundary.
  207. * 2) device memory. This is read-write.
  208. * There are some assumptions: there's VGA memory at 0xA0000 and
  209. * the VGA BIOS ROM is at 0xC0000. Also, if there's no ROM signature
  210. * at 0xE0000 then the whole 64KB up to 0xF0000 is theoretically up
  211. * for grabs; check anyway.
  212. */
  213. p = KADDR(0xD0000);
  214. while(p < (uchar*)KADDR(0xE0000)){
  215. /*
  216. * Test for 0x55 0xAA before poking obtrusively,
  217. * some machines (e.g. Thinkpad X20) seem to map
  218. * something dynamic here (cardbus?) causing weird
  219. * problems if it is changed.
  220. */
  221. if(p[0] == 0x55 && p[1] == 0xAA){
  222. p += p[2]*512;
  223. continue;
  224. }
  225. p[0] = 0xCC;
  226. p[2*KB-1] = 0xCC;
  227. if(p[0] != 0xCC || p[2*KB-1] != 0xCC){
  228. p[0] = 0x55;
  229. p[1] = 0xAA;
  230. p[2] = 4;
  231. if(p[0] == 0x55 && p[1] == 0xAA){
  232. p += p[2]*512;
  233. continue;
  234. }
  235. if(p[0] == 0xFF && p[1] == 0xFF)
  236. mapfree(&rmapumb, PADDR(p), 2*KB);
  237. }
  238. else
  239. mapfree(&rmapumbrw, PADDR(p), 2*KB);
  240. p += 2*KB;
  241. }
  242. p = KADDR(0xE0000);
  243. if(p[0] != 0x55 || p[1] != 0xAA){
  244. p[0] = 0xCC;
  245. p[64*KB-1] = 0xCC;
  246. if(p[0] != 0xCC && p[64*KB-1] != 0xCC)
  247. mapfree(&rmapumb, PADDR(p), 64*KB);
  248. }
  249. }
  250. static void
  251. lowraminit(void)
  252. {
  253. ulong n, pa, x;
  254. uchar *bda;
  255. /*
  256. * Initialise the memory bank information for conventional memory
  257. * (i.e. less than 640KB). The base is the first location after the
  258. * bootstrap processor MMU information and the limit is obtained from
  259. * the BIOS data area.
  260. */
  261. x = PADDR(CPU0MACH+BY2PG);
  262. bda = (uchar*)KADDR(0x400);
  263. n = ((bda[0x14]<<8)|bda[0x13])*KB-x;
  264. mapfree(&rmapram, x, n);
  265. memset(KADDR(x), 0, n); /* keep us honest */
  266. x = PADDR(PGROUND((ulong)end));
  267. pa = MemMinMB*MB;
  268. mapfree(&rmapram, x, pa-x);
  269. memset(KADDR(x), 0, pa-x); /* keep us honest */
  270. }
  271. static void
  272. ramscan(ulong maxmem)
  273. {
  274. ulong *k0, kzero, map, maxkpa, maxpa, pa, *pte, *table, *va, vbase, x;
  275. int nvalid[NMemType];
  276. /*
  277. * The bootstrap code has has created a prototype page
  278. * table which maps the first MemMinMB of physical memory to KZERO.
  279. * The page directory is at m->pdb and the first page of
  280. * free memory is after the per-processor MMU information.
  281. */
  282. pa = MemMinMB*MB;
  283. /*
  284. * Check if the extended memory size can be obtained from the CMOS.
  285. * If it's 0 then it's either not known or >= 64MB. Always check
  286. * at least 24MB in case there's a memory gap (up to 8MB) below 16MB;
  287. * in this case the memory from the gap is remapped to the top of
  288. * memory.
  289. * The value in CMOS is supposed to be the number of KB above 1MB.
  290. */
  291. if(maxmem == 0){
  292. x = (nvramread(0x18)<<8)|nvramread(0x17);
  293. if(x == 0 || x >= (63*KB))
  294. maxpa = MemMaxMB*MB;
  295. else
  296. maxpa = MB+x*KB;
  297. if(maxpa < 24*MB)
  298. maxpa = 24*MB;
  299. }else
  300. maxpa = maxmem;
  301. maxkpa = (u32int)-KZERO; /* 2^32 - KZERO */
  302. /*
  303. * March up memory from MemMinMB to maxpa 1MB at a time,
  304. * mapping the first page and checking the page can
  305. * be written and read correctly. The page tables are created here
  306. * on the fly, allocating from low memory as necessary.
  307. */
  308. k0 = (ulong*)KADDR(0);
  309. kzero = *k0;
  310. map = 0;
  311. x = 0x12345678;
  312. memset(nvalid, 0, sizeof(nvalid));
  313. /*
  314. * Can't map memory to KADDR(pa) when we're walking because
  315. * can only use KADDR for relatively low addresses. Instead,
  316. * map each 4MB we scan to the virtual address range 4MB-8MB
  317. * while we are scanning.
  318. */
  319. vbase = 4*MB;
  320. while(pa < maxpa){
  321. /*
  322. * Map the page. Use mapalloc(&rmapram, ...) to make
  323. * the page table if necessary, it will be returned to the
  324. * pool later if it isn't needed. Map in a fixed range (the second 4M)
  325. * because high physical addresses cannot be passed to KADDR.
  326. */
  327. va = (void*)(vbase + pa%(4*MB));
  328. table = &m->pdb[PDX(va)];
  329. if(pa%(4*MB) == 0){
  330. if(map == 0 && (map = mapalloc(&rmapram, 0, BY2PG, BY2PG)) == 0)
  331. break;
  332. memset(KADDR(map), 0, BY2PG);
  333. *table = map|PTEWRITE|PTEVALID;
  334. memset(nvalid, 0, sizeof(nvalid));
  335. }
  336. table = KADDR(PPN(*table));
  337. pte = &table[PTX(va)];
  338. *pte = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
  339. mmuflushtlb(PADDR(m->pdb));
  340. /*
  341. * Write a pattern to the page and write a different
  342. * pattern to a possible mirror at KZERO. If the data
  343. * reads back correctly the chunk is some type of RAM (possibly
  344. * a linearly-mapped VGA framebuffer, for instance...) and
  345. * can be cleared and added to the memory pool. If not, the
  346. * chunk is marked uncached and added to the UMB pool if <16MB
  347. * or is marked invalid and added to the UPA pool.
  348. */
  349. *va = x;
  350. *k0 = ~x;
  351. if(*va == x){
  352. nvalid[MemRAM] += MB/BY2PG;
  353. mapfree(&rmapram, pa, MB);
  354. do{
  355. *pte++ = pa|PTEWRITE|PTEVALID;
  356. pa += BY2PG;
  357. }while(pa % MB);
  358. mmuflushtlb(PADDR(m->pdb));
  359. /* memset(va, 0, MB); so damn slow to memset all of memory */
  360. }
  361. else if(pa < 16*MB){
  362. nvalid[MemUMB] += MB/BY2PG;
  363. mapfree(&rmapumb, pa, MB);
  364. do{
  365. *pte++ = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
  366. pa += BY2PG;
  367. }while(pa % MB);
  368. }
  369. else{
  370. nvalid[MemUPA] += MB/BY2PG;
  371. mapfree(&rmapupa, pa, MB);
  372. *pte = 0;
  373. pa += MB;
  374. }
  375. /*
  376. * Done with this 4MB chunk, review the options:
  377. * 1) not physical memory and >=16MB - invalidate the PDB entry;
  378. * 2) physical memory - use the 4MB page extension if possible;
  379. * 3) not physical memory and <16MB - use the 4MB page extension
  380. * if possible;
  381. * 4) mixed or no 4MB page extension - commit the already
  382. * initialised space for the page table.
  383. */
  384. if(pa%(4*MB) == 0 && pa >= 32*MB && nvalid[MemUPA] == (4*MB)/BY2PG){
  385. /*
  386. * If we encounter a 4MB chunk of missing memory
  387. * at a sufficiently high offset, call it the end of
  388. * memory. Otherwise we run the risk of thinking
  389. * that video memory is real RAM.
  390. */
  391. break;
  392. }
  393. if(pa <= maxkpa && pa%(4*MB) == 0){
  394. table = &m->pdb[PDX(KADDR(pa - 4*MB))];
  395. if(nvalid[MemUPA] == (4*MB)/BY2PG)
  396. *table = 0;
  397. else if(nvalid[MemRAM] == (4*MB)/BY2PG && (m->cpuiddx & 0x08))
  398. *table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEVALID;
  399. else if(nvalid[MemUMB] == (4*MB)/BY2PG && (m->cpuiddx & 0x08))
  400. *table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEUNCACHED|PTEVALID;
  401. else{
  402. *table = map|PTEWRITE|PTEVALID;
  403. map = 0;
  404. }
  405. }
  406. mmuflushtlb(PADDR(m->pdb));
  407. x += 0x3141526;
  408. }
  409. /*
  410. * If we didn't reach the end of the 4MB chunk, that part won't
  411. * be mapped. Commit the already initialised space for the page table.
  412. */
  413. if(pa % (4*MB) && pa <= maxkpa){
  414. m->pdb[PDX(KADDR(pa))] = map|PTEWRITE|PTEVALID;
  415. map = 0;
  416. }
  417. if(map)
  418. mapfree(&rmapram, map, BY2PG);
  419. m->pdb[PDX(vbase)] = 0;
  420. mmuflushtlb(PADDR(m->pdb));
  421. mapfree(&rmapupa, pa, (u32int)-pa);
  422. *k0 = kzero;
  423. }
  424. /*
  425. * BIOS Int 0x15 E820 memory map.
  426. */
  427. enum
  428. {
  429. SMAP = ('S'<<24)|('M'<<16)|('A'<<8)|'P',
  430. Ememory = 1,
  431. Ereserved = 2,
  432. Carry = 1,
  433. };
  434. typedef struct Emap Emap;
  435. struct Emap
  436. {
  437. uvlong base;
  438. uvlong len;
  439. ulong type;
  440. };
  441. static Emap emap[16];
  442. int nemap;
  443. static char *etypes[] =
  444. {
  445. "type=0",
  446. "memory",
  447. "reserved",
  448. "acpi reclaim",
  449. "acpi nvs",
  450. };
  451. static int
  452. emapcmp(const void *va, const void *vb)
  453. {
  454. Emap *a, *b;
  455. a = (Emap*)va;
  456. b = (Emap*)vb;
  457. if(a->base < b->base)
  458. return -1;
  459. if(a->base > b->base)
  460. return 1;
  461. if(a->len < b->len)
  462. return -1;
  463. if(a->len > b->len)
  464. return 1;
  465. return a->type - b->type;
  466. }
  467. static void
  468. map(ulong base, ulong len, int type)
  469. {
  470. ulong e, n;
  471. ulong *table, flags, maxkpa;
  472. /*
  473. * Split any call crossing 4*MB to make below simpler.
  474. */
  475. if(base < 4*MB && len > 4*MB-base){
  476. n = 4*MB - base;
  477. map(base, n, type);
  478. map(4*MB, len-n, type);
  479. }
  480. /*
  481. * Let lowraminit and umbscan hash out the low 4MB.
  482. */
  483. if(base < 4*MB)
  484. return;
  485. /*
  486. * Any non-memory below 16*MB is used as upper mem blocks.
  487. */
  488. if(type == MemUPA && base < 16*MB && base+len > 16*MB){
  489. map(base, 16*MB-base, MemUMB);
  490. map(16*MB, len-(16*MB-base), MemUPA);
  491. return;
  492. }
  493. /*
  494. * Memory below CPU0MACH is reserved for the kernel
  495. * and already mapped.
  496. */
  497. if(base < PADDR(CPU0MACH)+BY2PG){
  498. n = PADDR(CPU0MACH)+BY2PG - base;
  499. if(len <= n)
  500. return;
  501. map(PADDR(CPU0MACH), len-n, type);
  502. return;
  503. }
  504. /*
  505. * Memory between KTZERO and end is the kernel itself
  506. * and is already mapped.
  507. */
  508. if(base < PADDR(KTZERO) && base+len > PADDR(KTZERO)){
  509. map(base, PADDR(KTZERO)-base, type);
  510. return;
  511. }
  512. if(PADDR(KTZERO) < base && base < PADDR(PGROUND((ulong)end))){
  513. n = PADDR(PGROUND((ulong)end));
  514. if(len <= n)
  515. return;
  516. map(PADDR(PGROUND((ulong)end)), len-n, type);
  517. return;
  518. }
  519. /*
  520. * Now we have a simple case.
  521. */
  522. // print("map %.8lux %.8lux %d\n", base, base+len, type);
  523. switch(type){
  524. case MemRAM:
  525. mapfree(&rmapram, base, len);
  526. flags = PTEWRITE|PTEVALID;
  527. break;
  528. case MemUMB:
  529. mapfree(&rmapumb, base, len);
  530. flags = PTEWRITE|PTEUNCACHED|PTEVALID;
  531. break;
  532. case MemUPA:
  533. mapfree(&rmapupa, base, len);
  534. flags = 0;
  535. break;
  536. default:
  537. case MemReserved:
  538. flags = 0;
  539. break;
  540. }
  541. /*
  542. * bottom 4MB is already mapped - just twiddle flags.
  543. * (not currently used - see above)
  544. */
  545. if(base < 4*MB){
  546. table = KADDR(PPN(m->pdb[PDX(base)]));
  547. e = base+len;
  548. base = PPN(base);
  549. for(; base<e; base+=BY2PG)
  550. table[PTX(base)] |= flags;
  551. return;
  552. }
  553. /*
  554. * Only map from KZERO to 2^32.
  555. */
  556. if(flags){
  557. maxkpa = -KZERO;
  558. if(base >= maxkpa)
  559. return;
  560. if(len > maxkpa-base)
  561. len = maxkpa - base;
  562. pdbmap(m->pdb, base|flags, base+KZERO, len);
  563. }
  564. }
  565. static int
  566. e820scan(void)
  567. {
  568. int i;
  569. Ureg u;
  570. ulong cont, base, len;
  571. uvlong last;
  572. Emap *e;
  573. if(getconf("*norealmode") || getconf("*noe820scan"))
  574. return -1;
  575. cont = 0;
  576. for(i=0; i<nelem(emap); i++){
  577. memset(&u, 0, sizeof u);
  578. u.ax = 0xE820;
  579. u.bx = cont;
  580. u.cx = 20;
  581. u.dx = SMAP;
  582. u.es = (PADDR(RMBUF)>>4)&0xF000;
  583. u.di = PADDR(RMBUF)&0xFFFF;
  584. u.trap = 0x15;
  585. realmode(&u);
  586. cont = u.bx;
  587. if((u.flags&Carry) || u.ax != SMAP || u.cx != 20)
  588. break;
  589. e = &emap[nemap++];
  590. *e = *(Emap*)RMBUF;
  591. if(u.bx == 0)
  592. break;
  593. }
  594. if(nemap == 0)
  595. return -1;
  596. qsort(emap, nemap, sizeof emap[0], emapcmp);
  597. for(i=0; i<nemap; i++){
  598. e = &emap[i];
  599. print("E820: %.8llux %.8llux ", e->base, e->base+e->len);
  600. if(e->type < nelem(etypes))
  601. print("%s\n", etypes[e->type]);
  602. else
  603. print("type=%lud\n", e->type);
  604. }
  605. last = 0;
  606. for(i=0; i<nemap; i++){
  607. e = &emap[i];
  608. /*
  609. * pull out the info but only about the low 32 bits...
  610. */
  611. if(e->base >= (1LL<<32))
  612. break;
  613. base = e->base;
  614. if(base+e->len > (1LL<<32))
  615. len = -base;
  616. else
  617. len = e->len;
  618. /*
  619. * If the map skips addresses, mark them available.
  620. */
  621. if(last < e->base)
  622. map(last, e->base-last, MemUPA);
  623. last = base+len;
  624. if(e->type == Ememory)
  625. map(base, len, MemRAM);
  626. else
  627. map(base, len, MemReserved);
  628. }
  629. if(last < (1LL<<32))
  630. map(last, (u32int)-last, MemUPA);
  631. return 0;
  632. }
  633. void
  634. meminit(void)
  635. {
  636. int i;
  637. Map *mp;
  638. Confmem *cm;
  639. ulong pa, *pte;
  640. ulong maxmem, lost;
  641. char *p;
  642. if(p = getconf("*maxmem"))
  643. maxmem = strtoul(p, 0, 0);
  644. else
  645. maxmem = 0;
  646. /*
  647. * Set special attributes for memory between 640KB and 1MB:
  648. * VGA memory is writethrough;
  649. * BIOS ROM's/UMB's are uncached;
  650. * then scan for useful memory.
  651. */
  652. for(pa = 0xA0000; pa < 0xC0000; pa += BY2PG){
  653. pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 0);
  654. *pte |= PTEWT;
  655. }
  656. for(pa = 0xC0000; pa < 0x100000; pa += BY2PG){
  657. pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 0);
  658. *pte |= PTEUNCACHED;
  659. }
  660. mmuflushtlb(PADDR(m->pdb));
  661. umbscan();
  662. lowraminit();
  663. if(e820scan() < 0)
  664. ramscan(maxmem);
  665. /*
  666. * Set the conf entries describing banks of allocatable memory.
  667. */
  668. for(i=0; i<nelem(mapram) && i<nelem(conf.mem); i++){
  669. mp = &rmapram.map[i];
  670. cm = &conf.mem[i];
  671. cm->base = mp->addr;
  672. cm->npage = mp->size/BY2PG;
  673. }
  674. lost = 0;
  675. for(; i<nelem(mapram); i++)
  676. lost += rmapram.map[i].size;
  677. if(lost)
  678. print("meminit - lost %lud bytes\n", lost);
  679. if(MEMDEBUG)
  680. memdebug();
  681. }
  682. /*
  683. * Allocate memory from the upper memory blocks.
  684. */
  685. ulong
  686. umbmalloc(ulong addr, int size, int align)
  687. {
  688. ulong a;
  689. if(a = mapalloc(&rmapumb, addr, size, align))
  690. return (ulong)KADDR(a);
  691. return 0;
  692. }
  693. void
  694. umbfree(ulong addr, int size)
  695. {
  696. mapfree(&rmapumb, PADDR(addr), size);
  697. }
  698. ulong
  699. umbrwmalloc(ulong addr, int size, int align)
  700. {
  701. ulong a;
  702. uchar *p;
  703. if(a = mapalloc(&rmapumbrw, addr, size, align))
  704. return(ulong)KADDR(a);
  705. /*
  706. * Perhaps the memory wasn't visible before
  707. * the interface is initialised, so try again.
  708. */
  709. if((a = umbmalloc(addr, size, align)) == 0)
  710. return 0;
  711. p = (uchar*)a;
  712. p[0] = 0xCC;
  713. p[size-1] = 0xCC;
  714. if(p[0] == 0xCC && p[size-1] == 0xCC)
  715. return a;
  716. umbfree(a, size);
  717. return 0;
  718. }
  719. void
  720. umbrwfree(ulong addr, int size)
  721. {
  722. mapfree(&rmapumbrw, PADDR(addr), size);
  723. }
  724. /*
  725. * Give out otherwise-unused physical address space
  726. * for use in configuring devices. Note that unlike upamalloc
  727. * before it, upaalloc does not map the physical address
  728. * into virtual memory. Call vmap to do that.
  729. */
  730. ulong
  731. upaalloc(int size, int align)
  732. {
  733. ulong a;
  734. a = mapalloc(&rmapupa, 0, size, align);
  735. if(a == 0){
  736. print("out of physical address space allocating %d\n", size);
  737. mapprint(&rmapupa);
  738. }
  739. return a;
  740. }
  741. void
  742. upafree(ulong pa, int size)
  743. {
  744. mapfree(&rmapupa, pa, size);
  745. }
  746. void
  747. upareserve(ulong pa, int size)
  748. {
  749. ulong a;
  750. a = mapalloc(&rmapupa, pa, size, 0);
  751. if(a != pa){
  752. /*
  753. * This can happen when we're using the E820
  754. * map, which might have already reserved some
  755. * of the regions claimed by the pci devices.
  756. */
  757. // print("upareserve: cannot reserve pa=%#.8lux size=%d\n", pa, size);
  758. if(a != 0)
  759. mapfree(&rmapupa, a, size);
  760. }
  761. }
  762. void
  763. memorysummary(void)
  764. {
  765. memdebug();
  766. }