cache.v7.s 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. /*
  2. * cortex arm arch v7 cache flushing and invalidation
  3. * included by l.s and rebootcode.s
  4. */
  5. TEXT cacheiinv(SB), $-4 /* I invalidate */
  6. MOVW $0, R0
  7. MTCP CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvi), CpCACHEall /* ok on cortex */
  8. ISB
  9. RET
  10. /*
  11. * set/way operators, passed a suitable set/way value in R0.
  12. */
  13. TEXT cachedwb_sw(SB), $-4
  14. MTCP CpSC, 0, R0, C(CpCACHE), C(CpCACHEwb), CpCACHEsi
  15. RET
  16. TEXT cachedwbinv_sw(SB), $-4
  17. MTCP CpSC, 0, R0, C(CpCACHE), C(CpCACHEwbi), CpCACHEsi
  18. RET
  19. TEXT cachedinv_sw(SB), $-4
  20. MTCP CpSC, 0, R0, C(CpCACHE), C(CpCACHEinvd), CpCACHEsi
  21. RET
  22. /* set cache size select */
  23. TEXT setcachelvl(SB), $-4
  24. MTCP CpSC, CpIDcssel, R0, C(CpID), C(CpIDidct), 0
  25. ISB
  26. RET
  27. /* return cache sizes */
  28. TEXT getwayssets(SB), $-4
  29. MFCP CpSC, CpIDcsize, R0, C(CpID), C(CpIDidct), 0
  30. RET
  31. /*
  32. * l1 cache operations.
  33. * l1 and l2 ops are intended to be called from C, thus need save no
  34. * caller's regs, only those we need to preserve across calls.
  35. */
  36. TEXT cachedwb(SB), $-4
  37. MOVW.W R14, -8(R13)
  38. MOVW $cachedwb_sw(SB), R0
  39. MOVW $1, R8
  40. BL wholecache(SB)
  41. MOVW.P 8(R13), R15
  42. TEXT cachedwbinv(SB), $-4
  43. MOVW.W R14, -8(R13)
  44. MOVW $cachedwbinv_sw(SB), R0
  45. MOVW $1, R8
  46. BL wholecache(SB)
  47. MOVW.P 8(R13), R15
  48. TEXT cachedinv(SB), $-4
  49. MOVW.W R14, -8(R13)
  50. MOVW $cachedinv_sw(SB), R0
  51. MOVW $1, R8
  52. BL wholecache(SB)
  53. MOVW.P 8(R13), R15
  54. TEXT cacheuwbinv(SB), $-4
  55. MOVM.DB.W [R14], (R13) /* save lr on stack */
  56. MOVW CPSR, R1
  57. CPSID /* splhi */
  58. MOVM.DB.W [R1], (R13) /* save R1 on stack */
  59. BL cachedwbinv(SB)
  60. BL cacheiinv(SB)
  61. MOVM.IA.W (R13), [R1] /* restore R1 (saved CPSR) */
  62. MOVW R1, CPSR
  63. MOVM.IA.W (R13), [R14] /* restore lr */
  64. RET
  65. /*
  66. * architectural l2 cache operations
  67. */
  68. TEXT _l2cacheuwb(SB), $-4
  69. MOVW.W R14, -8(R13)
  70. MOVW $cachedwb_sw(SB), R0
  71. MOVW $2, R8
  72. BL wholecache(SB)
  73. MOVW.P 8(R13), R15 /* return */
  74. TEXT _l2cacheuwbinv(SB), $-4
  75. MOVW.W R14, -8(R13)
  76. MOVW CPSR, R1
  77. CPSID /* splhi */
  78. MOVM.DB.W [R1], (R13) /* save R1 on stack */
  79. MOVW $cachedwbinv_sw(SB), R0
  80. MOVW $2, R8
  81. BL wholecache(SB)
  82. BL _l2cacheuinv(SB)
  83. MOVM.IA.W (R13), [R1] /* restore R1 (saved CPSR) */
  84. MOVW R1, CPSR
  85. MOVW.P 8(R13), R15 /* return */
  86. TEXT _l2cacheuinv(SB), $-4
  87. MOVW.W R14, -8(R13)
  88. MOVW $cachedinv_sw(SB), R0
  89. MOVW $2, R8
  90. BL wholecache(SB)
  91. MOVW.P 8(R13), R15 /* return */
  92. /*
  93. * callers are assumed to be the above l1 and l2 ops.
  94. * R0 is the function to call in the innermost loop.
  95. * R8 is the cache level (1-origin: 1 or 2).
  96. *
  97. * R0 func to call at entry
  98. * R1 func to call after entry
  99. * R2 nsets
  100. * R3 way shift (computed from R8)
  101. * R4 set shift (computed from R8)
  102. * R5 nways
  103. * R6 set scratch
  104. * R7 way scratch
  105. * R8 cache level, 0-origin
  106. * R9 extern reg up
  107. * R10 extern reg m
  108. *
  109. * initial translation by 5c, then massaged by hand.
  110. */
  111. TEXT wholecache+0(SB), $-4
  112. MOVW CPSR, R2
  113. MOVM.DB.W [R2,R14], (SP) /* save regs on stack */
  114. MOVW R0, R1 /* save argument for inner loop in R1 */
  115. SUB $1, R8 /* convert cache level to zero origin */
  116. /* we might not have the MMU on yet, so map R1 (func) to R14's space */
  117. MOVW R14, R0 /* get R14's segment ... */
  118. AND $KSEGM, R0
  119. BIC $KSEGM, R1 /* strip segment from func address */
  120. ORR R0, R1 /* combine them */
  121. /* get cache sizes */
  122. SLL $1, R8, R0 /* R0 = (cache - 1) << 1 */
  123. MTCP CpSC, CpIDcssel, R0, C(CpID), C(CpIDidct), 0 /* set cache select */
  124. ISB
  125. MFCP CpSC, CpIDcsize, R0, C(CpID), C(CpIDidct), 0 /* get cache sizes */
  126. /* compute # of ways and sets for this cache level */
  127. SRA $3, R0, R5 /* R5 (ways) = R0 >> 3 */
  128. AND $((1<<10)-1), R5 /* R5 = (R0 >> 3) & MASK(10) */
  129. ADD $1, R5 /* R5 (ways) = ((R0 >> 3) & MASK(10)) + 1 */
  130. SRA $13, R0, R2 /* R2 = R0 >> 13 */
  131. AND $((1<<15)-1), R2 /* R2 = (R0 >> 13) & MASK(15) */
  132. ADD $1, R2 /* R2 (sets) = ((R0 >> 13) & MASK(15)) + 1 */
  133. /* precompute set/way shifts for inner loop */
  134. MOVW $(CACHECONF+0), R3 /* +0 = l1waysh */
  135. MOVW $(CACHECONF+4), R4 /* +4 = l1setsh */
  136. CMP $0, R8 /* cache == 1? */
  137. ADD.NE $(4*2), R3 /* no, assume l2: +8 = l2waysh */
  138. ADD.NE $(4*2), R3 /* +12 = l2setsh */
  139. MOVW R14, R0 /* get R14's segment ... */
  140. AND $KSEGM, R0
  141. BIC $KSEGM, R3 /* strip segment from address */
  142. ORR R0, R3 /* combine them */
  143. BIC $KSEGM, R4 /* strip segment from address */
  144. ORR R0, R4 /* combine them */
  145. MOVW (R3), R3
  146. MOVW (R4), R4
  147. CMP $0, R3 /* sanity checks */
  148. BEQ wbuggery
  149. CMP $0, R4
  150. BEQ sbuggery
  151. CPSID /* splhi to make entire op atomic */
  152. BARRIERS
  153. /* iterate over ways */
  154. MOVW $0, R7 /* R7: way */
  155. outer:
  156. /* iterate over sets */
  157. MOVW $0, R6 /* R6: set */
  158. inner:
  159. /* compute set/way register contents */
  160. SLL R3, R7, R0 /* R0 = way << R3 (L?WAYSH) */
  161. ORR R8<<1, R0 /* R0 = way << L?WAYSH | (cache - 1) << 1 */
  162. ORR R6<<R4, R0 /* R0 = way<<L?WAYSH | (cache-1)<<1 |set<<R4 */
  163. BL (R1) /* call set/way operation with R0 arg. */
  164. ADD $1, R6 /* set++ */
  165. CMP R2, R6 /* set >= sets? */
  166. BLT inner /* no, do next set */
  167. ADD $1, R7 /* way++ */
  168. CMP R5, R7 /* way >= ways? */
  169. BLT outer /* no, do next way */
  170. MOVM.IA.W (SP), [R2,R14] /* restore regs */
  171. BARRIERS
  172. MOVW R2, CPSR /* splx */
  173. RET
  174. wbuggery:
  175. PUTC('?')
  176. PUTC('c')
  177. PUTC('w')
  178. B topanic
  179. sbuggery:
  180. PUTC('?')
  181. PUTC('c')
  182. PUTC('s')
  183. topanic:
  184. MOVW $.string<>+0(SB), R0
  185. BIC $KSEGM, R0 /* strip segment from address */
  186. MOVW R14, R1 /* get R14's segment ... */
  187. AND $KSEGM, R1
  188. ORR R1, R0 /* combine them */
  189. SUB $12, R13 /* not that it matters, since we're panicing */
  190. MOVW R14, 8(R13)
  191. BL panic(SB) /* panic("msg %#p", LR) */
  192. bugloop:
  193. WFI
  194. B bugloop
  195. DATA .string<>+0(SB)/8,$"bad cach"
  196. DATA .string<>+8(SB)/8,$"e params"
  197. DATA .string<>+16(SB)/8,$"\073 pc %\043p"
  198. DATA .string<>+24(SB)/1,$"\z"
  199. GLOBL .string<>+0(SB),$25