rebootcode.s 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293
  1. /*
  2. * armv6 reboot code
  3. */
  4. #include "arm.s"
  5. /*
  6. * Turn off MMU, then copy the new kernel to its correct location
  7. * in physical memory. Then jump to the start of the kernel.
  8. */
  9. /* main(PADDR(entry), PADDR(code), size); */
  10. TEXT main(SB), 1, $-4
  11. MOVW $setR12(SB), R12
  12. /* copy in arguments before stack gets unmapped */
  13. MOVW R0, R8 /* entry point */
  14. MOVW p2+4(FP), R9 /* source */
  15. MOVW n+8(FP), R10 /* byte count */
  16. /* SVC mode, interrupts disabled */
  17. MOVW $(PsrDirq|PsrDfiq|PsrMsvc), R1
  18. MOVW R1, CPSR
  19. /* prepare to turn off mmu */
  20. BL cachesoff(SB)
  21. /* turn off mmu */
  22. MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
  23. BIC $CpCmmu, R1
  24. MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
  25. /* set up a tiny stack for local vars and memmove args */
  26. MOVW R8, SP /* stack top just before kernel dest */
  27. SUB $20, SP /* allocate stack frame */
  28. /* copy the kernel to final destination */
  29. MOVW R8, 16(SP) /* save dest (entry point) */
  30. MOVW R8, R0 /* first arg is dest */
  31. MOVW R9, 8(SP) /* push src */
  32. MOVW R10, 12(SP) /* push size */
  33. BL memmove(SB)
  34. MOVW 16(SP), R8 /* restore entry point */
  35. /* jump to kernel physical entry point */
  36. B (R8)
  37. B 0(PC)
  38. /*
  39. * turn the caches off, double map PHYSDRAM & KZERO, invalidate TLBs, revert
  40. * to tiny addresses. upon return, it will be safe to turn off the mmu.
  41. * clobbers R0-R2, and returns with SP invalid.
  42. */
  43. TEXT cachesoff(SB), 1, $-4
  44. /* write back and invalidate caches */
  45. BARRIERS
  46. MOVW $0, R0
  47. MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEall
  48. MCR CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall
  49. /* turn caches off */
  50. MRC CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
  51. BIC $(CpCdcache|CpCicache|CpCpredict), R1
  52. MCR CpSC, 0, R1, C(CpCONTROL), C(0), CpMainctl
  53. /* invalidate stale TLBs before changing them */
  54. BARRIERS
  55. MOVW $KZERO, R0 /* some valid virtual address */
  56. MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
  57. BARRIERS
  58. /* from here on, R0 is base of physical memory */
  59. MOVW $PHYSDRAM, R0
  60. /* redo double map of first MiB PHYSDRAM = KZERO */
  61. MOVW $(L1+L1X(PHYSDRAM)), R2 /* address of PHYSDRAM's PTE */
  62. MOVW $PTEDRAM, R1 /* PTE bits */
  63. ORR R0, R1 /* dram base */
  64. MOVW R1, (R2)
  65. /* invalidate stale TLBs again */
  66. BARRIERS
  67. MCR CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
  68. BARRIERS
  69. /* relocate SB and return address to PHYSDRAM addressing */
  70. MOVW $KSEGM, R1 /* clear segment bits */
  71. BIC R1, R12 /* adjust SB */
  72. ORR R0, R12
  73. BIC R1, R14 /* adjust return address */
  74. ORR R0, R14
  75. RET