entry.S 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594
  1. #include "mem.h"
  2. #include "amd64.h"
  3. #ifndef __ASSEMBLER__
  4. #define __ASSEMBLER__
  5. #endif
  6. // It gets REALLY ugly to try to link this at some low address and then have the rest of the
  7. // kernel linked high. Really, really ugly. And that defines any attempt to load at a randome
  8. // address. So, you have to learn to write position independent code here.
  9. // It will make you stronger. Assuming you survive the training.
  10. .code32
  11. #define pFARJMP32(s, o) .byte 0xea; .long o; .word s /* far jump to ptr32:16 */
  12. /* do we enter in 16-bit mode? If so, take the code from coreboot that goes from
  13. * 16->32
  14. */
  15. /*
  16. * Enter here in 32-bit protected mode. Welcome to 1982.
  17. * Make sure the GDT is set as it should be:
  18. * disable interrupts;
  19. * load the GDT with the table in _gdt32p;
  20. * load all the data segments
  21. * load the code segment via a far jump.
  22. */
  23. #define MULTIBOOT_PAGE_ALIGN (1<<0)
  24. #define MULTIBOOT_MEMORY_INFO (1<<1)
  25. #define MULTIBOOT_HEADER_MAGIC (0x1BADB002)
  26. #define MULTIBOOT_HEADER_FLAGS (MULTIBOOT_MEMORY_INFO | MULTIBOOT_PAGE_ALIGN)
  27. #define CHECKSUM (-(MULTIBOOT_HEADER_MAGIC + MULTIBOOT_HEADER_FLAGS))
  28. # The kernel bootstrap (this code) is linked and loaded at physical address
  29. # 0x00100000 (1MB), which is the start of extended memory. (See kernel.ld)
  30. # Flagging boottext to be text. Check out:
  31. # http://sourceware.org/binutils/docs/as/Section.html
  32. .section .boottext, "awx"
  33. .code32
  34. .align 4
  35. _protected:
  36. multiboot_header:
  37. .long MULTIBOOT_HEADER_MAGIC
  38. .long MULTIBOOT_HEADER_FLAGS
  39. .long CHECKSUM
  40. .globl _start
  41. _start:
  42. cli
  43. jmp 1f
  44. /* This is the GDT for the ROM stage part of coreboot. It
  45. * is different from the RAM stage GDT which is defined in
  46. * c_start.S
  47. */
  48. .align 4
  49. .globl gdtptr
  50. gdt:
  51. gdtptr:
  52. .word gdt_end - gdt -1 /* compute the table limit */
  53. .long gdt /* we know the offset */
  54. .word 0
  55. /* selgdt 0x08, flat code segment */
  56. .word 0xffff, 0x0000
  57. .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, So we get 4Gbytes for limit */
  58. /* selgdt 0x10,flat data segment */
  59. .word 0xffff, 0x0000
  60. .byte 0x00, 0x93, 0xcf, 0x00
  61. /* long mode code segment. */
  62. .quad 0x0020980000000000 /* Long mode CS */
  63. gdt_end:
  64. /*
  65. * When we come here we are in protected mode. We expand
  66. * the stack and copies the data segment from ROM to the
  67. * memory.
  68. *
  69. * After that, we call the chipset bootstrap routine that
  70. * does what is left of the chipset initialization.
  71. *
  72. * NOTE aligned to 4 so that we are sure that the prefetch
  73. * cache will be reloaded.
  74. */
  75. .align 4
  76. 1:
  77. // jmp 1b
  78. .globl protected_start
  79. protected_start:
  80. lgdt %cs:gdtptr
  81. ljmp $8, $__protected_start
  82. __protected_start:
  83. /* Save the BIST value */
  84. movl %eax, %ebp
  85. movw $0x10, %ax
  86. movw %ax, %ds
  87. movw %ax, %es
  88. movw %ax, %ss
  89. movw %ax, %fs
  90. movw %ax, %gs
  91. /* Restore the BIST value to %eax */
  92. movl %ebp, %eax
  93. entry32:
  94. 1:
  95. movb $0x30, %al
  96. movw $0x30, %dx
  97. outb %dx
  98. // This gets us into a reasonable mode. We can skip the plan 9 gdt code.
  99. call 1f
  100. 1:
  101. popl %ebp
  102. /* when you execute this instruction, bp has the value
  103. * of 1f.
  104. * So add the length of this instruction and the
  105. * 5 bytes of the jmp that follows it.
  106. * It will then point to start of header.
  107. */
  108. addl $12, %ebp
  109. /* Now make it point to gdt32p (gdt, 32 bits, physical)
  110. */
  111. addl $14, %ebp
  112. JMP _endofheader
  113. _startofheader:
  114. .byte 0x90 /* NOP */
  115. .byte 0x90 /* NOP */
  116. _multibootheader: /* must be 4-byte aligned */
  117. .long 0x1badb002 /* magic */
  118. .long 0x00000003 /* flags */
  119. .long -(0x1badb002 + 0x00000003) /* checksum */
  120. _gdt32p:
  121. .quad 0x0000000000000000 /* NULL descriptor */
  122. .quad 0x00cf9a000000ffff /* CS */
  123. .quad 0x00cf92000000ffff /* DS */
  124. .quad 0x0020980000000000 /* Long mode CS */
  125. _gdtptr32p:
  126. .word 4*8-1
  127. .long _gdt32p
  128. _gdt64p:
  129. .quad 0x0000000000000000 /* NULL descriptor */
  130. .quad 0x0020980000000000 /* CS */
  131. _gdtptr64p:
  132. .word 2*8-1
  133. .quad _gdt64p
  134. _endofheader:
  135. pushl %eax /* possible passed-in magic */
  136. /*
  137. * Make the basic page tables for CPU0 to map 0-4MiB physical
  138. * to KZERO, and include an identity map for the switch from protected
  139. * to paging mode. There`s an assumption here that the creation and later
  140. * removal of the identity map will not interfere with the KZERO mappings;
  141. * the conditions for clearing the identity map are
  142. * clear PML4 entry when (KZER0 & 0x0000ff8000000000) != 0;
  143. * clear PDP entry when (KZER0 & 0x0000007fc0000000) != 0;
  144. * don`t clear PD entry when (KZER0 & 0x000000003fe00000) == 0;
  145. * the code below assumes these conditions are met.
  146. *
  147. * Assume a recent processor with Page Size Extensions
  148. * and use two 2MiB entries.
  149. */
  150. /*
  151. * The layout is decribed in data.h:
  152. * _protected: start of kernel text
  153. * - 4*KiB unused
  154. * - 4*KiB unused
  155. * - 4*KiB ptrpage
  156. * - 4*KiB syspage
  157. * - MACHSZ m
  158. * - 4*KiB vsvmpage for gdt, tss
  159. * - PTSZ PT for PMAPADDR unused - assumes in KZERO PD
  160. * - PTSZ PD
  161. * - PTSZ PDP
  162. * - PTSZ PML4
  163. * - MACHSTKSZ stack
  164. */
  165. /*
  166. * Macros for accessing page table entries; change the
  167. * C-style array-index macros into a page table byte offset
  168. */
  169. #define PML4O(v) ((PTLX((v), 3))<<3)
  170. #define PDPO(v) ((PTLX((v), 2))<<3)
  171. #define PDO(v) ((PTLX((v), 1))<<3)
  172. #define PTO(v) ((PTLX((v), 0))<<3)
  173. _warp64:
  174. movl $_protected-(MACHSTKSZ+4*PTSZ+5*(4*KiB)+MACHSZ/*+KZERO*/), %esi
  175. movl %esi, %edi
  176. xorl %eax, %eax
  177. movl $((MACHSTKSZ+4*PTSZ+5*(4*KiB)+MACHSZ)>>2), %ecx
  178. cld
  179. rep; stosl /* stack, P*, vsvm, m, sys */
  180. movl %esi, %eax /* sys-KZERO */
  181. addl $(MACHSTKSZ), %eax /* PML4 */
  182. movl %eax, %CR3 /* load the mmu */
  183. movl %eax, %edx
  184. addl $(PTSZ|PteRW|PteP), %edx /* PDP at PML4 + PTSZ */
  185. movl %edx, PML4O(0)(%eax) /* PML4E for identity map */
  186. movl %edx, PML4O(KZERO)(%eax) /* PML4E for KZERO, PMAPADDR */
  187. addl $PTSZ, %eax /* PDP at PML4 + PTSZ */
  188. addl $PTSZ, %edx /* PD at PML4 + 2*PTSZ */
  189. movl %edx, PDPO(0)(%eax) /* PDPE for identity map */
  190. movl %edx, PDPO(KZERO)(%eax) /* PDPE for KZERO, PMAPADDR */
  191. addl $PTSZ, %eax /* PD at PML4 + 2*PTSZ */
  192. movl $(PtePS|PteRW|PteP), %edx
  193. movl %edx, PDO(0)(%eax) /* PDE for identity 0-[24]MiB */
  194. movl %edx, PDO(KZERO)(%eax) /* PDE for KZERO 0-[24]MiB */
  195. addl $PGLSZ(1), %edx
  196. movl %edx, PDO(KZERO+PGLSZ(1))(%eax) /* PDE for KZERO [24]-[48]MiB */
  197. addl $PGLSZ(1), %edx
  198. movl %edx, PDO(KZERO+PGLSZ(1)+PGLSZ(1))(%eax) /* PDE for KZERO [4]-[6]MiB */
  199. addl $PGLSZ(1), %edx
  200. movl %edx, PDO(KZERO+PGLSZ(1)+PGLSZ(1)+PGLSZ(1))(%eax) /* PDE for KZERO [6]-[8]MiB */
  201. // and up through 12. This sucks, we'll make it better later. //
  202. // We'll just have init the pml2 at compile time. Apologies.
  203. addl $PGLSZ(1), %edx
  204. movl %edx, PDO(KZERO+PGLSZ(1)+PGLSZ(1)+PGLSZ(1)+PGLSZ(1))(%eax)
  205. addl $PGLSZ(1), %edx
  206. movl %edx, PDO(KZERO+PGLSZ(1)+PGLSZ(1)+PGLSZ(1)+PGLSZ(1)+PGLSZ(1))(%eax)
  207. movl %eax, %edx /* PD at PML4 + 2*PTSZ */
  208. addl $(PTSZ|PteRW|PteP), %edx /* PT at PML4 + 3*PTSZ */
  209. movl %edx, PDO(PMAPADDR)(%eax) /* PDE for PMAPADDR */
  210. /*
  211. * Enable and activate Long Mode. From the manual:
  212. * make sure Page Size Extentions are off, and Page Global
  213. * Extensions and Physical Address Extensions are on in CR4;
  214. * set Long Mode Enable in the Extended Feature Enable MSR;
  215. * set Paging Enable in CR0;
  216. * make an inter-segment jump to the Long Mode code.
  217. * It`s all in 32-bit mode until the jump is made.
  218. */
  219. lme:
  220. movl %cr4, %eax
  221. ANDL $~Pse, %eax /* Page Size */
  222. ORL $(Pge|Pae), %eax /* Page Global, Phys. Address */
  223. movl %eax, %cr4
  224. movl $Efer, %ecx /* Extended Feature Enable */
  225. RDMSR
  226. ORL $Lme, %eax /* Long Mode Enable */
  227. WRMSR
  228. movl %cr0, %edx
  229. ANDL $~(Cd|Nw|Ts|Mp), %edx
  230. ORL $(Pg|Wp), %edx /* Paging Enable */
  231. movl %edx, %cr0
  232. ljmp $0x18, $_identity
  233. //pFARJMP32(SSEL(3, SsTIGDT|SsRPL0), _identity-KZERO)
  234. /*
  235. * Long mode. Welcome to 2003.
  236. * Jump out of the identity map space;
  237. * load a proper long mode GDT.
  238. */
  239. .code64
  240. _identity:
  241. movq $_start64v, %rax
  242. JMP *%rax
  243. .section .text
  244. _gdt64v:
  245. .quad 0x0000000000000000 /* NULL descriptor */
  246. .quad 0x0020980000000000 /* CS */
  247. _gdtptr64v:
  248. .word 3*8-1
  249. .quad _gdt64v
  250. // At this point, we are safe to use kernel addresses, as we are in
  251. // kernel virtual address space.
  252. _start64v:
  253. movq $_gdtptr64v, %rax
  254. lgdt (%rax)
  255. XORQ %rdx, %rdx
  256. movw %dx, %ds /* not used in long mode */
  257. movw %dx, %es /* not used in long mode */
  258. movw %dx, %fs
  259. movw %dx, %gs
  260. movw %dx, %ss /* not used in long mode */
  261. movq %rsi, %rsi /* sys-KZERO */
  262. movq %rsi, %rax
  263. addq $KZERO, %rax
  264. movq %rax, sys /* sys */
  265. addq $(MACHSTKSZ), %rax /* PML4 and top of stack */
  266. movq %rax, %rsp /* set stack */
  267. // Don't undo this until all APs are started. Then we don't need to bother
  268. // having the APs remap it. Save work.
  269. // OK, this part is called "we climbed up the tree on a ladder, now pull
  270. // the ladder up after us.". We remove the identity mapping.
  271. _zap0pml4:
  272. cmpq $PML4O(KZERO), %rdx /* KZER0 & 0x0000ff8000000000 */
  273. JE _zap0pdp
  274. //movq %rdx, PML4O(0)(%rax) /* zap identity map PML4E */
  275. _zap0pdp:
  276. addq $PTSZ, %rax /* PDP at PML4 + PTSZ */
  277. cmpq $PDPO(KZERO), %rdx /* KZER0 & 0x0000007fc0000000 */
  278. JE _zap0pd
  279. //movq %rdx, PDPO(0)(%rax) /* zap identity map PDPE */
  280. _zap0pd:
  281. addq $PTSZ, %rax /* PD at PML4 + 2*PTSZ */
  282. cmpq $PDO(KZERO), %rdx /* KZER0 & 0x000000003fe00000 */
  283. JE _zap0done
  284. //movq %rdx, PDO(0)(%rax) /* zap identity map PDE */
  285. _zap0done:
  286. // now for the scary part. In some sense, all page table zapping to date
  287. // has been theoretical. This is going to flush it. If we survive this ...
  288. addq $(MACHSTKSZ), %rsi /* PML4-KZERO */
  289. movq %rsi, %CR3 /* flush TLB */
  290. addq $(2*PTSZ+4*KiB), %rax /* PD+PT+vsvm */
  291. movq %rax, entrym
  292. movq $0, (%rax) /* machp()->machno = 0 */
  293. PUSHQ %rdx /* clear flags */
  294. POPFQ
  295. movq %rbx, %rbx /* push multiboot args */
  296. movq %rbx, %rsi
  297. movq %rax, %rax
  298. movq %rax, %rdi /* multiboot magic */
  299. xorq %rbp, %rbp /* stack trace ends here */
  300. CALL main
  301. .globl ndnr
  302. ndnr: /* no deposit, no return */
  303. /* do not resuscitate */
  304. _dnr:
  305. sti
  306. hlt
  307. JMP _dnr /* do not resuscitate */
  308. // SIPI startup handler. The first bits of this code, which are 16-bit, are copied
  309. // to 0x3000. That code jumps to the 32-bit entry point right after the lgdt, which is in
  310. // the normal place, no need to copy it. If this works, it's a lot more compact
  311. // than what Plan 9 used to do.
  312. /*
  313. * Start-up request IPI handler.
  314. *
  315. * This code is executed on an application processor in response to receiving
  316. * a Start-up IPI (SIPI) from another processor.
  317. * This must be placed on a 4KiB boundary
  318. * somewhere in the 1st MiB of conventional memory. However,
  319. * due to some shortcuts below it's restricted further to within the 1st 64KiB.
  320. * The AP starts in real-mode, with
  321. * CS selector set to the startup memory address/16;
  322. * CS base set to startup memory address;
  323. * CS limit set to 64KiB;
  324. * CPL and IP set to 0.
  325. */
  326. /*
  327. * Real mode. Welcome to 1978.
  328. * Load a basic GDT, turn on protected mode and make
  329. * inter-segment jump to the protected mode code.
  330. */
  331. .align 4096
  332. .code32
  333. .globl b1978
  334. b1978:
  335. _sipistartofheader:
  336. NOP; NOP; NOP
  337. .quad 0xa5a5a5a5a5a5a5a5
  338. // real mode gdt located in low 64k
  339. // GOT TO THIS LOOP
  340. //1: jmp 1b
  341. // clang stupidity. Or smartness. It can't do .code16!
  342. .byte 0xfa //cli
  343. .byte 0x66, 0x31, 0xc0 //xorl %eax, %eax
  344. .byte 0x0f, 0x22, 0xd8 // movl %eax, %cr3 // invalidate tlb
  345. .byte 0x8e, 0xd8 //movw %ax, %ds
  346. .byte 0x8c, 0xc8 // movw %cs, %ax
  347. .byte 0xbb, 0x80, 0x30 //movw $0x3080, %bx
  348. .byte 0x67, 0x66, 0x0f, 0x01, 0x13 // data32 lgdt (%ebx)
  349. .byte 0x0f, 0x20, 0xc0 //movl %cr0, %eax
  350. .byte 0x66, 0x25, 0xd1, 0xff, 0xfa, 0x7f // andl $0x7FFAFFD1, %eax /* PG,AM,WP,NE,TS,EM,MP = 0 */
  351. .byte 0x66, 0x0d, 0x01, 0x00, 0x00, 0x60 // orl $0x60000001, %eax /* CD, NW, PE = 1 */
  352. .byte 0x0f, 0x22, 0xc0 // movl %eax, %cr0
  353. //ljmpl $8, $0x3040
  354. .byte 0x66, 0xea // ljmpl, 066 prefix since we're 16 bits
  355. .byte 0x40, 0x30, 0x00, 0x00 // 32 bit offset
  356. .byte 0x08, 0x00 // 16 bit segment
  357. .align 32
  358. .code32
  359. movw $0x10, %ax
  360. MOVW %AX, %DS
  361. MOVW %AX, %ES
  362. MOVW %AX, %FS
  363. MOVW %AX, %GS
  364. MOVW %AX, %SS
  365. /* Now that we are in protected mode jump to a 32 bit code segment. */
  366. ljmpl $8, $_approtected
  367. .align 64
  368. gdt78:
  369. gdtptr78:
  370. .word 4*8-1
  371. .long 0x3080 // $gdt78-$b1978
  372. .word 0 // unused
  373. /* selgdt 0x08, flat code segment */
  374. .word 0xffff, 0x0000
  375. .byte 0x00, 0x9b, 0xcf, 0x00 /* G=1 and 0x0f, So we get 4Gbytes for limit */
  376. /* selgdt 0x10,flat data segment */
  377. .word 0xffff, 0x0000
  378. .byte 0x00, 0x93, 0xcf, 0x00
  379. .quad 0x0020980000000000 /* Long mode CS */
  380. gdt78_end:
  381. .global e1978
  382. e1978:
  383. /*
  384. * Protected mode. Welcome to 1982.
  385. * Get the local APIC ID from the memory mapped APIC;
  386. #ifdef UseOwnPageTables
  387. * load the PDB with the page table address, which is located
  388. * in the word immediately preceeding _real<>-KZERO(SB);
  389. * this is also the (physical) address of the top of stack;
  390. #else
  391. * load the PML4 with the shared page table address;
  392. #endif
  393. * make an identity map for the inter-segment jump below,
  394. * using the stack space to hold a temporary PDP and PD;
  395. * enable and activate long mode;
  396. * make an inter-segment jump to the long mode code.
  397. */
  398. .section .boottext, "awx"
  399. .code32
  400. /*
  401. * Macros for accessing page table entries; must turn
  402. * the C-style array-index macros into a page table byte
  403. * offset.
  404. */
  405. #define PML4O(v) ((PTLX((v), 3))<<3)
  406. #define PDPO(v) ((PTLX((v), 2))<<3)
  407. #define PDO(v) ((PTLX((v), 1))<<3)
  408. #define PTO(v) ((PTLX((v), 0))<<3)
  409. _approtected:
  410. MOVL $0xfee00000, %ebp /* apicbase */
  411. MOVL 0x20(%eBP), %eBP /* Id */
  412. SHRL $24, %eBP /* becomes RARG later */
  413. #ifdef UseOwnPageTables
  414. MOVL $_real<>-KZERO(SB), AX
  415. MOVL -4(AX), %eSI /* page table PML4 */
  416. #else
  417. MOVL $(0x00100000+MACHSTKSZ), %eSI /* page table PML4 */
  418. #endif
  419. // endif before
  420. MOVL %eSI, %eAX
  421. MOVL %eAX, %CR3 /* load the mmu */
  422. #if 0
  423. MOVL %eAX, %eDX
  424. SUBL $MACHSTKSZ, %eDX /* PDP for identity map */
  425. ADDL $(PteRW|PteP), %eDX
  426. MOVL %eDX, PML4O(0)(%eAX) /* PML4E for identity map */
  427. SUBL $MACHSTKSZ, %eAX /* PDP for identity map */
  428. ADDL $PTSZ, %eDX
  429. MOVL %eDX, PDPO(0)(%eAX) /* PDPE for identity map */
  430. MOVL $(PtePS|PteRW|PteP), %edX
  431. ADDL $PTSZ, %eAX /* PD for identity map */
  432. MOVL %eDX, PDO(0)(%eAX) /* PDE for identity 0-[24]MiB */
  433. #endif
  434. /*
  435. * Enable and activate Long Mode. From the manual:
  436. * make sure Page Size Extentions are off, and Page Global
  437. * Extensions and Physical Address Extensions are on in CR4;
  438. * set Long Mode Enable in the Extended Feature Enable MSR;
  439. * set Paging Enable in CR0;
  440. * make an inter-segment jump to the Long Mode code.
  441. * It's all in 32-bit mode until the jump is made.
  442. */
  443. aplme:
  444. MOVL %CR4, %eAX
  445. ANDL $~Pse, %eAX /* Page Size */
  446. ORL $(Pge|Pae), %eAX /* Page Global, Phys. Address */
  447. MOVL %eAX, %CR4
  448. MOVL $Efer, %eCX /* Extended Feature Enable */
  449. RDMSR
  450. ORL $Lme, %eAX /* Long Mode Enable */
  451. WRMSR
  452. MOVL %CR0, %eDX
  453. ANDL $~(Cd|Nw|Ts|Mp), %eDX
  454. ORL $(Pg|Wp), %eDX /* Paging Enable */
  455. MOVL %eDX, %CR0
  456. ljmp $0x18, $_apidentity
  457. /*
  458. * Long mode. Welcome to 2003.
  459. * Jump out of the identity map space;
  460. * load a proper long mode GDT;
  461. * zap the identity map;
  462. * initialise the stack and call the
  463. * C startup code in m->splpc.
  464. */
  465. .code64
  466. _apidentity:
  467. MOVQ $_apstart64v, %rAX
  468. JMP *%rAX
  469. .section .text
  470. _apstart64v:
  471. MOVQ $_gdtptr64v, %rAX
  472. lgdt (%rax)
  473. XORQ %rDX, %rDX
  474. MOVW %DX, %DS /* not used in long mode */
  475. MOVW %DX, %ES /* not used in long mode */
  476. MOVW %DX, %FS
  477. MOVW %DX, %GS
  478. MOVW %DX, %SS /* not used in long mode */
  479. movq %rsi, %rsi /* PML4-KZERO */
  480. MOVQ %rsI, %rAX
  481. ADDQ $KZERO, %rAX /* PML4 and top of stack */
  482. MOVQ %rAX, %rSP /* set stack */
  483. // DON'T ZAP.
  484. // DO IT LATER.
  485. //MOVQ %rDX, PML4O(0)(%rAX) /* zap identity map */
  486. MOVQ %rSI, %CR3 /* flush TLB */
  487. #ifndef UseOwnPageTables
  488. /*
  489. * SI still points to the base of the bootstrap
  490. * processor page tables.
  491. * Want to use that for clearing the identity map,
  492. * but want to use the passed-in address for
  493. * setting up the stack and Mach.
  494. */
  495. // oh, barf.
  496. // MOVQ $_real, %rAX
  497. MOVQ $0x3000, %rAX
  498. MOVL -4(%rAX), %eSI /* PML4 */
  499. MOVq %rSI, %rSI /* PML4-KZERO */
  500. #endif
  501. MOVQ %rSI, %rAX
  502. ADDQ $KZERO, %rAX /* PML4 and top of stack */
  503. MOVQ %rAX, %rSP /* set stack */
  504. PUSHQ %rDX /* clear flags */
  505. POPFQ
  506. // put this in %rdx so it can be the third argument. We need to write it into
  507. // %gs
  508. ADDQ /*$4*PTSZ+$4*KiB*/$0x5000, %raX /* PML4+PDP+PD+PT+vsvm */
  509. MOVq %rbp, %rdi /* APIC ID */
  510. movq %rax, %rsi /* Mach * */
  511. MOVQ 8(%rsi), %rAX /* m->splpc */
  512. xorq %rbp, %rbp /* stack trace ends here */
  513. CALL *%raX /* CALL squidboy(SB) */