asm_macros.S 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. /*
  2. * Copyright (c) 2016-2023, ARM Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #ifndef ASM_MACROS_S
  7. #define ASM_MACROS_S
  8. #include <arch.h>
  9. #include <common/asm_macros_common.S>
  10. #include <lib/cpus/cpu_ops.h>
  11. #include <lib/spinlock.h>
  12. /*
  13. * TLBI instruction with type specifier that implements the workaround for
  14. * errata 813419 of Cortex-A57.
  15. */
  16. #if ERRATA_A57_813419
  17. #define TLB_INVALIDATE(_reg, _coproc) \
  18. stcopr _reg, _coproc; \
  19. dsb ish; \
  20. stcopr _reg, _coproc
  21. #else
  22. #define TLB_INVALIDATE(_reg, _coproc) \
  23. stcopr _reg, _coproc
  24. #endif
  25. /*
  26. * Co processor register accessors
  27. */
  28. .macro ldcopr reg, coproc, opc1, CRn, CRm, opc2
  29. mrc \coproc, \opc1, \reg, \CRn, \CRm, \opc2
  30. .endm
  31. .macro ldcopr16 reg1, reg2, coproc, opc1, CRm
  32. mrrc \coproc, \opc1, \reg1, \reg2, \CRm
  33. .endm
  34. .macro stcopr reg, coproc, opc1, CRn, CRm, opc2
  35. mcr \coproc, \opc1, \reg, \CRn, \CRm, \opc2
  36. .endm
  37. .macro stcopr16 reg1, reg2, coproc, opc1, CRm
  38. mcrr \coproc, \opc1, \reg1, \reg2, \CRm
  39. .endm
  40. /* Cache line size helpers */
  41. .macro dcache_line_size reg, tmp
  42. ldcopr \tmp, CTR
  43. ubfx \tmp, \tmp, #CTR_DMINLINE_SHIFT, #CTR_DMINLINE_WIDTH
  44. mov \reg, #CPU_WORD_SIZE
  45. lsl \reg, \reg, \tmp
  46. .endm
  47. .macro icache_line_size reg, tmp
  48. ldcopr \tmp, CTR
  49. and \tmp, \tmp, #CTR_IMINLINE_MASK
  50. mov \reg, #CPU_WORD_SIZE
  51. lsl \reg, \reg, \tmp
  52. .endm
  53. /*
  54. * Declare the exception vector table, enforcing it is aligned on a
  55. * 32 byte boundary.
  56. */
  57. .macro vector_base label
  58. .section .vectors, "ax"
  59. .align 5
  60. \label:
  61. .endm
  62. /*
  63. * This macro calculates the base address of the current CPU's multi
  64. * processor(MP) stack using the plat_my_core_pos() index, the name of
  65. * the stack storage and the size of each stack.
  66. * Out: r0 = physical address of stack base
  67. * Clobber: r14, r1, r2
  68. */
  69. .macro get_my_mp_stack _name, _size
  70. bl plat_my_core_pos
  71. ldr r2, =(\_name + \_size)
  72. mov r1, #\_size
  73. mla r0, r0, r1, r2
  74. .endm
  75. /*
  76. * This macro calculates the base address of a uniprocessor(UP) stack
  77. * using the name of the stack storage and the size of the stack
  78. * Out: r0 = physical address of stack base
  79. */
  80. .macro get_up_stack _name, _size
  81. ldr r0, =(\_name + \_size)
  82. .endm
  83. #if ARM_ARCH_MAJOR == 7 && !defined(ARMV7_SUPPORTS_VIRTUALIZATION)
  84. /*
  85. * Macro for mitigating against speculative execution.
  86. * ARMv7 cores without Virtualization extension do not support the
  87. * eret instruction.
  88. */
  89. .macro exception_return
  90. movs pc, lr
  91. dsb nsh
  92. isb
  93. .endm
  94. #else
  95. /*
  96. * Macro for mitigating against speculative execution beyond ERET. Uses the
  97. * speculation barrier instruction introduced by FEAT_SB, if it's enabled.
  98. */
  99. .macro exception_return
  100. eret
  101. #if ENABLE_FEAT_SB
  102. sb
  103. #else
  104. dsb nsh
  105. isb
  106. #endif
  107. .endm
  108. #endif
  109. /* Macro for error synchronization */
  110. .macro synchronize_errors
  111. /* Complete any stores that may return an abort */
  112. dsb sy
  113. /* Synchronise the CPU context with the completion of the dsb */
  114. isb
  115. .endm
  116. #if (ARM_ARCH_MAJOR == 7)
  117. /* ARMv7 does not support stl instruction */
  118. .macro stl _reg, _write_lock
  119. dmb
  120. str \_reg, \_write_lock
  121. dsb
  122. .endm
  123. #endif
  124. /*
  125. * Helper macro to generate the best mov/movw/movt combinations
  126. * according to the value to be moved.
  127. */
  128. .macro mov_imm _reg, _val
  129. .if ((\_val) & 0xffff0000) == 0
  130. mov \_reg, #(\_val)
  131. .else
  132. movw \_reg, #((\_val) & 0xffff)
  133. movt \_reg, #((\_val) >> 16)
  134. .endif
  135. .endm
  136. /*
  137. * Macro to mark instances where we're jumping to a function and don't
  138. * expect a return. To provide the function being jumped to with
  139. * additional information, we use 'bl' instruction to jump rather than
  140. * 'b'.
  141. *
  142. * Debuggers infer the location of a call from where LR points to, which
  143. * is usually the instruction after 'bl'. If this macro expansion
  144. * happens to be the last location in a function, that'll cause the LR
  145. * to point a location beyond the function, thereby misleading debugger
  146. * back trace. We therefore insert a 'nop' after the function call for
  147. * debug builds, unless 'skip_nop' parameter is non-zero.
  148. */
  149. .macro no_ret _func:req, skip_nop=0
  150. bl \_func
  151. #if DEBUG
  152. .ifeq \skip_nop
  153. nop
  154. .endif
  155. #endif
  156. .endm
  157. /*
  158. * Reserve space for a spin lock in assembly file.
  159. */
  160. .macro define_asm_spinlock _name:req
  161. .align SPINLOCK_ASM_ALIGN
  162. \_name:
  163. .space SPINLOCK_ASM_SIZE
  164. .endm
  165. /*
  166. * Helper macro to OR the bottom 32 bits of `_val` into `_reg_l`
  167. * and the top 32 bits of `_val` into `_reg_h`. If either the bottom
  168. * or top word of `_val` is zero, the corresponding OR operation
  169. * is skipped.
  170. */
  171. .macro orr64_imm _reg_l, _reg_h, _val
  172. .if (\_val >> 32)
  173. orr \_reg_h, \_reg_h, #(\_val >> 32)
  174. .endif
  175. .if (\_val & 0xffffffff)
  176. orr \_reg_l, \_reg_l, #(\_val & 0xffffffff)
  177. .endif
  178. .endm
  179. /*
  180. * Helper macro to bitwise-clear bits in `_reg_l` and
  181. * `_reg_h` given a 64 bit immediate `_val`. The set bits
  182. * in the bottom word of `_val` dictate which bits from
  183. * `_reg_l` should be cleared. Similarly, the set bits in
  184. * the top word of `_val` dictate which bits from `_reg_h`
  185. * should be cleared. If either the bottom or top word of
  186. * `_val` is zero, the corresponding BIC operation is skipped.
  187. */
  188. .macro bic64_imm _reg_l, _reg_h, _val
  189. .if (\_val >> 32)
  190. bic \_reg_h, \_reg_h, #(\_val >> 32)
  191. .endif
  192. .if (\_val & 0xffffffff)
  193. bic \_reg_l, \_reg_l, #(\_val & 0xffffffff)
  194. .endif
  195. .endm
  196. /*
  197. * Helper macro for carrying out division in software when
  198. * hardware division is not suported. \top holds the dividend
  199. * in the function call and the remainder after
  200. * the function is executed. \bot holds the divisor. \div holds
  201. * the quotient and \temp is a temporary registed used in calcualtion.
  202. * The division algorithm has been obtained from:
  203. * http://www.keil.com/support/man/docs/armasm/armasm_dom1359731155623.htm
  204. */
  205. .macro softudiv div:req,top:req,bot:req,temp:req
  206. mov \temp, \bot
  207. cmp \temp, \top, lsr #1
  208. div1:
  209. movls \temp, \temp, lsl #1
  210. cmp \temp, \top, lsr #1
  211. bls div1
  212. mov \div, #0
  213. div2:
  214. cmp \top, \temp
  215. subcs \top, \top,\temp
  216. ADC \div, \div, \div
  217. mov \temp, \temp, lsr #1
  218. cmp \temp, \bot
  219. bhs div2
  220. .endm
  221. /*
  222. * Helper macro to instruction adr <reg>, <symbol> where <symbol> is
  223. * within the range +/- 4 GB.
  224. */
  225. .macro adr_l, dst, sym
  226. adrp \dst, \sym
  227. add \dst, \dst, :lo12:\sym
  228. .endm
  229. #endif /* ASM_MACROS_S */