asm_macros.S 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. /*
  2. * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #ifndef ASM_MACROS_S
  7. #define ASM_MACROS_S
  8. #include <arch.h>
  9. #include <common/asm_macros_common.S>
  10. #include <lib/spinlock.h>
  11. /*
  12. * TLBI instruction with type specifier that implements the workaround for
  13. * errata 813419 of Cortex-A57 or errata 1286807 of Cortex-A76.
  14. */
  15. #if ERRATA_A57_813419 || ERRATA_A76_1286807
  16. #define TLB_INVALIDATE(_type) \
  17. tlbi _type; \
  18. dsb ish; \
  19. tlbi _type
  20. #else
  21. #define TLB_INVALIDATE(_type) \
  22. tlbi _type
  23. #endif
  24. /*
  25. * Create a stack frame at the start of an assembly function. Will also
  26. * add all necessary call frame information (cfi) directives for a
  27. * pretty stack trace. This is necessary as there is quite a bit of
  28. * flexibility within a stack frame and the stack pointer can move
  29. * around throughout the function. If the debugger isn't told where to
  30. * find things, it gets lost, gives up and displays nothing. So inform
  31. * the debugger of what's where. Anchor the Canonical Frame Address
  32. * (CFA; the thing used to track what's where) to the frame pointer as
  33. * that's not expected to change in the function body and no extra
  34. * bookkeeping will be necessary, allowing free movement of the sp
  35. *
  36. * _frame_size: requested space for caller to use. Must be a mutliple
  37. * of 16 for stack pointer alignment
  38. */
  39. .macro func_prologue _frame_size=0
  40. .if \_frame_size & 0xf
  41. .error "frame_size must have stack pointer alignment (multiple of 16)"
  42. .endif
  43. /* put frame record at top of frame */
  44. stp x29, x30, [sp, #-0x10]!
  45. mov x29,sp
  46. .if \_frame_size
  47. sub sp, sp, #\_frame_size
  48. .endif
  49. /* point CFA to start of frame record, i.e. x29 + 0x10 */
  50. .cfi_def_cfa x29, 0x10
  51. /* inform it about x29, x30 locations */
  52. .cfi_offset x30, -0x8
  53. .cfi_offset x29, -0x10
  54. .endm
  55. /*
  56. * Clear stack frame at the end of an assembly function.
  57. *
  58. * _frame_size: the value passed to func_prologue
  59. */
  60. .macro func_epilogue _frame_size=0
  61. /* remove requested space */
  62. .if \_frame_size
  63. add sp, sp, #\_frame_size
  64. .endif
  65. ldp x29, x30, [sp], #0x10
  66. .endm
  67. .macro dcache_line_size reg, tmp
  68. mrs \tmp, ctr_el0
  69. ubfx \tmp, \tmp, #16, #4
  70. mov \reg, #4
  71. lsl \reg, \reg, \tmp
  72. .endm
  73. .macro icache_line_size reg, tmp
  74. mrs \tmp, ctr_el0
  75. and \tmp, \tmp, #0xf
  76. mov \reg, #4
  77. lsl \reg, \reg, \tmp
  78. .endm
  79. .macro smc_check label
  80. mrs x0, esr_el3
  81. ubfx x0, x0, #ESR_EC_SHIFT, #ESR_EC_LENGTH
  82. cmp x0, #EC_AARCH64_SMC
  83. b.ne $label
  84. .endm
  85. /*
  86. * Declare the exception vector table, enforcing it is aligned on a
  87. * 2KB boundary, as required by the ARMv8 architecture.
  88. * Use zero bytes as the fill value to be stored in the padding bytes
  89. * so that it inserts illegal AArch64 instructions. This increases
  90. * security, robustness and potentially facilitates debugging.
  91. */
  92. .macro vector_base label, section_name=.vectors
  93. .section \section_name, "ax"
  94. .align 11, 0
  95. \label:
  96. .endm
  97. /*
  98. * Create an entry in the exception vector table, enforcing it is
  99. * aligned on a 128-byte boundary, as required by the ARMv8 architecture.
  100. * Use zero bytes as the fill value to be stored in the padding bytes
  101. * so that it inserts illegal AArch64 instructions. This increases
  102. * security, robustness and potentially facilitates debugging.
  103. */
  104. .macro vector_entry label, section_name=.vectors
  105. .cfi_sections .debug_frame
  106. .section \section_name, "ax"
  107. .align 7, 0
  108. .type \label, %function
  109. .cfi_startproc
  110. \label:
  111. .endm
  112. /*
  113. * Add the bytes until fill the full exception vector, whose size is always
  114. * 32 instructions. If there are more than 32 instructions in the
  115. * exception vector then an error is emitted.
  116. */
  117. .macro end_vector_entry label
  118. .cfi_endproc
  119. .fill \label + (32 * 4) - .
  120. .endm
  121. /*
  122. * This macro calculates the base address of the current CPU's MP stack
  123. * using the plat_my_core_pos() index, the name of the stack storage
  124. * and the size of each stack
  125. * Out: X0 = physical address of stack base
  126. * Clobber: X30, X1, X2
  127. */
  128. .macro get_my_mp_stack _name, _size
  129. bl plat_my_core_pos
  130. adrp x2, (\_name + \_size)
  131. add x2, x2, :lo12:(\_name + \_size)
  132. mov x1, #\_size
  133. madd x0, x0, x1, x2
  134. .endm
  135. /*
  136. * This macro calculates the base address of a UP stack using the
  137. * name of the stack storage and the size of the stack
  138. * Out: X0 = physical address of stack base
  139. */
  140. .macro get_up_stack _name, _size
  141. adrp x0, (\_name + \_size)
  142. add x0, x0, :lo12:(\_name + \_size)
  143. .endm
  144. /*
  145. * Helper macro to generate the best mov/movk combinations according
  146. * the value to be moved. The 16 bits from '_shift' are tested and
  147. * if not zero, they are moved into '_reg' without affecting
  148. * other bits.
  149. */
  150. .macro _mov_imm16 _reg, _val, _shift
  151. .if (\_val >> \_shift) & 0xffff
  152. .if (\_val & (1 << \_shift - 1))
  153. movk \_reg, (\_val >> \_shift) & 0xffff, LSL \_shift
  154. .else
  155. mov \_reg, \_val & (0xffff << \_shift)
  156. .endif
  157. .endif
  158. .endm
  159. /*
  160. * Helper macro to load arbitrary values into 32 or 64-bit registers
  161. * which generates the best mov/movk combinations. Many base addresses
  162. * are 64KB aligned the macro will eliminate updating bits 15:0 in
  163. * that case
  164. */
  165. .macro mov_imm _reg, _val
  166. .if (\_val) == 0
  167. mov \_reg, #0
  168. .else
  169. _mov_imm16 \_reg, (\_val), 0
  170. _mov_imm16 \_reg, (\_val), 16
  171. _mov_imm16 \_reg, (\_val), 32
  172. _mov_imm16 \_reg, (\_val), 48
  173. .endif
  174. .endm
  175. /*
  176. * Macro to mark instances where we're jumping to a function and don't
  177. * expect a return. To provide the function being jumped to with
  178. * additional information, we use 'bl' instruction to jump rather than
  179. * 'b'.
  180. *
  181. * Debuggers infer the location of a call from where LR points to, which
  182. * is usually the instruction after 'bl'. If this macro expansion
  183. * happens to be the last location in a function, that'll cause the LR
  184. * to point a location beyond the function, thereby misleading debugger
  185. * back trace. We therefore insert a 'nop' after the function call for
  186. * debug builds, unless 'skip_nop' parameter is non-zero.
  187. */
  188. .macro no_ret _func:req, skip_nop=0
  189. bl \_func
  190. #if DEBUG
  191. .ifeq \skip_nop
  192. nop
  193. .endif
  194. #endif
  195. .endm
  196. /*
  197. * Reserve space for a spin lock in assembly file.
  198. */
  199. .macro define_asm_spinlock _name:req
  200. .align SPINLOCK_ASM_ALIGN
  201. \_name:
  202. .space SPINLOCK_ASM_SIZE
  203. .endm
  204. /*
  205. * With RAS extension executes esb instruction, else NOP
  206. */
  207. .macro esb
  208. .inst 0xd503221f
  209. .endm
  210. /*
  211. * Helper macro to read system register value into x0
  212. */
  213. .macro read reg:req
  214. #if ENABLE_BTI
  215. bti j
  216. #endif
  217. mrs x0, \reg
  218. ret
  219. .endm
  220. /*
  221. * Helper macro to write value from x1 to system register
  222. */
  223. .macro write reg:req
  224. #if ENABLE_BTI
  225. bti j
  226. #endif
  227. msr \reg, x1
  228. ret
  229. .endm
  230. /*
  231. * The "sb" instruction was introduced later into the architecture,
  232. * so not all toolchains understand it. Some deny its usage unless
  233. * a supported processor is specified on the build command line.
  234. * Use sb's system register encoding to work around this, we already
  235. * guard the sb execution with a feature flag.
  236. */
  237. .macro sb_barrier_insn
  238. msr SYSREG_SB, xzr
  239. .endm
  240. /*
  241. * Macro for using speculation barrier instruction introduced by
  242. * FEAT_SB, if it's enabled.
  243. */
  244. .macro speculation_barrier
  245. #if ENABLE_FEAT_SB
  246. sb_barrier_insn
  247. #else
  248. dsb sy
  249. isb
  250. #endif
  251. .endm
  252. /*
  253. * Macro for mitigating against speculative execution beyond ERET. Uses the
  254. * speculation barrier instruction introduced by FEAT_SB, if it's enabled.
  255. */
  256. .macro exception_return
  257. eret
  258. #if ENABLE_FEAT_SB
  259. sb_barrier_insn
  260. #else
  261. dsb nsh
  262. isb
  263. #endif
  264. .endm
  265. /*
  266. * Macro to unmask External Aborts by changing PSTATE.A bit.
  267. * Put explicit synchronization event to ensure newly unmasked interrupt
  268. * is taken immediately.
  269. */
  270. .macro unmask_async_ea
  271. msr daifclr, #DAIF_ABT_BIT
  272. isb
  273. .endm
  274. /* Macro for error synchronization on exception boundries.
  275. * With FEAT_RAS enabled, it is assumed that FEAT_IESB is also present
  276. * and enabled.
  277. * FEAT_IESB provides an implicit error synchronization event at exception
  278. * entry and exception return, so there is no need for any explicit instruction.
  279. */
  280. .macro synchronize_errors
  281. #if !ENABLE_FEAT_RAS
  282. /* Complete any stores that may return an abort */
  283. dsb sy
  284. /* Synchronise the CPU context with the completion of the dsb */
  285. isb
  286. #endif
  287. .endm
  288. /*
  289. * Helper macro to instruction adr <reg>, <symbol> where <symbol> is
  290. * within the range +/- 4 GB.
  291. */
  292. .macro adr_l, dst, sym
  293. adrp \dst, \sym
  294. add \dst, \dst, :lo12:\sym
  295. .endm
  296. #endif /* ASM_MACROS_S */