rebootcode.s 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. /*
  2. * arm v7 reboot code
  3. *
  4. * must fit in 11K to avoid stepping on PTEs; see mem.h.
  5. * cache parameters are at CACHECONF.
  6. */
  7. #include "arm.s"
  8. /*
  9. * All caches but L1 should be off before calling this.
  10. * Turn off MMU, then copy the new kernel to its correct location
  11. * in physical memory. Then jump to the start of the kernel.
  12. */
  13. /* main(PADDR(entry), PADDR(code), size); */
  14. TEXT main(SB), 1, $-4
  15. MOVW $setR12(SB), R12
  16. MOVW R0, p1+0(FP) /* destination, passed in R0 */
  17. CPSID /* splhi */
  18. PUTC('R')
  19. BL cachesoff(SB)
  20. /* now back in 29- or 26-bit addressing, mainly for SB */
  21. /* double mapping of PHYSDRAM & KZERO now in effect */
  22. PUTC('e')
  23. /* before turning MMU off, switch to PHYSDRAM-based addresses */
  24. DMB
  25. MOVW $KSEGM, R7 /* clear segment bits */
  26. MOVW $PHYSDRAM, R0 /* set dram base bits */
  27. BIC R7, R12 /* adjust SB */
  28. ORR R0, R12
  29. BL _r15warp(SB)
  30. /* don't care about saving R14; we're not returning */
  31. /*
  32. * now running in PHYSDRAM segment, not KZERO.
  33. */
  34. PUTC('b')
  35. /* invalidate mmu mappings */
  36. MOVW $KZERO, R0 /* some valid virtual address */
  37. MTCP CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
  38. BARRIERS
  39. PUTC('o')
  40. /*
  41. * turn the MMU off
  42. */
  43. MFCP CpSC, 0, R0, C(CpCONTROL), C(0)
  44. BIC $CpCmmu, R0
  45. MTCP CpSC, 0, R0, C(CpCONTROL), C(0)
  46. BARRIERS
  47. PUTC('o')
  48. /* copy in arguments from stack frame before moving stack */
  49. MOVW p2+4(FP), R4 /* phys source */
  50. MOVW n+8(FP), R5 /* byte count */
  51. MOVW p1+0(FP), R6 /* phys destination */
  52. /* set up a new stack for local vars and memmove args */
  53. MOVW R6, SP /* tiny trampoline stack */
  54. SUB $(0x20 + 4), SP /* back up before a.out header */
  55. // MOVW R14, -48(SP) /* store return addr */
  56. SUB $48, SP /* allocate stack frame */
  57. MOVW R5, 40(SP) /* save count */
  58. MOVW R6, 44(SP) /* save dest/entry */
  59. /* copy the new kernel into place */
  60. DELAY(printloop2, 2)
  61. PUTC('t')
  62. MOVW 40(SP), R5 /* restore count */
  63. MOVW 44(SP), R6 /* restore dest/entry */
  64. MOVW R6, 0(SP) /* normally saved LR goes here */
  65. MOVW R6, 4(SP) /* push dest */
  66. MOVW R6, R0
  67. MOVW R4, 8(SP) /* push src */
  68. MOVW R5, 12(SP) /* push size */
  69. BL memmove(SB)
  70. PUTC('-')
  71. PUTC('>')
  72. DELAY(printloopret, 1)
  73. PUTC('\r')
  74. DELAY(printloopnl, 1)
  75. PUTC('\n')
  76. /*
  77. * jump to kernel entry point. Note the true kernel entry point is
  78. * the virtual address KZERO|R6, but this must wait until
  79. * the MMU is enabled by the kernel in l.s
  80. */
  81. MOVW 44(SP), R6 /* restore R6 (dest/entry) */
  82. ORR R6, R6 /* NOP: avoid link bug */
  83. B (R6)
  84. PUTC('?')
  85. PUTC('?')
  86. B 0(PC)
  87. /*
  88. * turn the caches off, double map PHYSDRAM & KZERO, invalidate TLBs, revert
  89. * to tiny addresses. upon return, it will be safe to turn off the mmu.
  90. */
  91. TEXT cachesoff(SB), 1, $-4
  92. MOVM.DB.W [R14,R1-R10], (R13) /* save regs on stack */
  93. CPSID
  94. BARRIERS
  95. SUB $12, SP /* paranoia */
  96. BL cacheuwbinv(SB)
  97. ADD $12, SP /* paranoia */
  98. MFCP CpSC, 0, R0, C(CpCONTROL), C(0)
  99. BIC $(CpCicache|CpCdcache), R0
  100. MTCP CpSC, 0, R0, C(CpCONTROL), C(0) /* caches off */
  101. BARRIERS
  102. /*
  103. * caches are off
  104. */
  105. /* invalidate stale TLBs before changing them */
  106. MOVW $KZERO, R0 /* some valid virtual address */
  107. MTCP CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
  108. BARRIERS
  109. /* redo double map of PHYSDRAM, KZERO */
  110. MOVW $PHYSDRAM, R3
  111. CMP $KZERO, R3
  112. BEQ noun2map
  113. MOVW $(L1+L1X(PHYSDRAM)), R4 /* address of PHYSDRAM's PTE */
  114. MOVW $PTEDRAM, R2 /* PTE bits */
  115. MOVW $DOUBLEMAPMBS, R5
  116. _ptrdbl:
  117. ORR R3, R2, R1 /* first identity-map 0 to 0, etc. */
  118. MOVW R1, (R4)
  119. ADD $4, R4 /* bump PTE address */
  120. ADD $MiB, R3 /* bump pa */
  121. SUB.S $1, R5
  122. BNE _ptrdbl
  123. noun2map:
  124. /*
  125. * flush stale TLB entries
  126. */
  127. BARRIERS
  128. MOVW $KZERO, R0 /* some valid virtual address */
  129. MTCP CpSC, 0, R0, C(CpTLB), C(CpTLBinvu), CpTLBinv
  130. BARRIERS
  131. /* switch back to PHYSDRAM addressing, mainly for SB */
  132. MOVW $KSEGM, R7 /* clear segment bits */
  133. MOVW $PHYSDRAM, R0 /* set dram base bits */
  134. BIC R7, R12 /* adjust SB */
  135. ORR R0, R12
  136. BIC R7, SP
  137. ORR R0, SP
  138. MOVM.IA.W (R13), [R14,R1-R10] /* restore regs from stack */
  139. MOVW $KSEGM, R0 /* clear segment bits */
  140. BIC R0, R14 /* adjust link */
  141. MOVW $PHYSDRAM, R0 /* set dram base bits */
  142. ORR R0, R14
  143. RET
  144. TEXT _r15warp(SB), 1, $-4
  145. BIC R7, R14 /* link */
  146. ORR R0, R14
  147. BIC R7, R13 /* SP */
  148. ORR R0, R13
  149. RET
  150. TEXT panic(SB), 1, $-4 /* stub */
  151. PUTC('?')
  152. PUTC('!')
  153. RET
  154. TEXT pczeroseg(SB), 1, $-4 /* stub */
  155. RET
  156. #include "cache.v7.s"
  157. /* modifies R0, R3—R6 */
  158. TEXT printhex(SB), 1, $-4
  159. MOVW R0, R3
  160. MOVW $(32-4), R5 /* bits to shift right */
  161. nextdig:
  162. SRA R5, R3, R4
  163. AND $0xf, R4
  164. ADD $'0', R4
  165. CMP.S $'9', R4
  166. BLE nothex /* if R4 <= 9, jump */
  167. ADD $('a'-('9'+1)), R4
  168. nothex:
  169. PUTC(R4)
  170. SUB.S $4, R5
  171. BGE nextdig
  172. PUTC('\r')
  173. PUTC('\n')
  174. DELAY(proct, 50)
  175. RET