misc_helpers.S 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. /*
  2. * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <arch.h>
  7. #include <asm_macros.S>
  8. #include <assert_macros.S>
  9. #include <common/bl_common.h>
  10. #include <lib/xlat_tables/xlat_tables_defs.h>
  11. .globl smc
  12. .globl zeromem
  13. .globl zero_normalmem
  14. .globl memcpy4
  15. .globl disable_mmu_icache_secure
  16. .globl disable_mmu_secure
  17. .globl fixup_gdt_reloc
  18. #define PAGE_START_MASK ~(PAGE_SIZE_MASK)
  19. func smc
  20. /*
  21. * For AArch32 only r0-r3 will be in the registers;
  22. * rest r4-r6 will be pushed on to the stack. So here, we'll
  23. * have to load them from the stack to registers r4-r6 explicitly.
  24. * Clobbers: r4-r6
  25. */
  26. ldm sp, {r4, r5, r6}
  27. smc #0
  28. endfunc smc
  29. /* -----------------------------------------------------------------------
  30. * void zeromem(void *mem, unsigned int length)
  31. *
  32. * Initialise a region in normal memory to 0. This functions complies with the
  33. * AAPCS and can be called from C code.
  34. *
  35. * -----------------------------------------------------------------------
  36. */
  37. func zeromem
  38. /*
  39. * Readable names for registers
  40. *
  41. * Registers r0, r1 and r2 are also set by zeromem which
  42. * branches into the fallback path directly, so cursor, length and
  43. * stop_address should not be retargeted to other registers.
  44. */
  45. cursor .req r0 /* Start address and then current address */
  46. length .req r1 /* Length in bytes of the region to zero out */
  47. /*
  48. * Reusing the r1 register as length is only used at the beginning of
  49. * the function.
  50. */
  51. stop_address .req r1 /* Address past the last zeroed byte */
  52. zeroreg1 .req r2 /* Source register filled with 0 */
  53. zeroreg2 .req r3 /* Source register filled with 0 */
  54. tmp .req r12 /* Temporary scratch register */
  55. mov zeroreg1, #0
  56. /* stop_address is the address past the last to zero */
  57. add stop_address, cursor, length
  58. /*
  59. * Length cannot be used anymore as it shares the same register with
  60. * stop_address.
  61. */
  62. .unreq length
  63. /*
  64. * If the start address is already aligned to 8 bytes, skip this loop.
  65. */
  66. tst cursor, #(8-1)
  67. beq .Lzeromem_8bytes_aligned
  68. /* Calculate the next address aligned to 8 bytes */
  69. orr tmp, cursor, #(8-1)
  70. adds tmp, tmp, #1
  71. /* If it overflows, fallback to byte per byte zeroing */
  72. beq .Lzeromem_1byte_aligned
  73. /* If the next aligned address is after the stop address, fall back */
  74. cmp tmp, stop_address
  75. bhs .Lzeromem_1byte_aligned
  76. /* zero byte per byte */
  77. 1:
  78. strb zeroreg1, [cursor], #1
  79. cmp cursor, tmp
  80. bne 1b
  81. /* zero 8 bytes at a time */
  82. .Lzeromem_8bytes_aligned:
  83. /* Calculate the last 8 bytes aligned address. */
  84. bic tmp, stop_address, #(8-1)
  85. cmp cursor, tmp
  86. bhs 2f
  87. mov zeroreg2, #0
  88. 1:
  89. stmia cursor!, {zeroreg1, zeroreg2}
  90. cmp cursor, tmp
  91. blo 1b
  92. 2:
  93. /* zero byte per byte */
  94. .Lzeromem_1byte_aligned:
  95. cmp cursor, stop_address
  96. beq 2f
  97. 1:
  98. strb zeroreg1, [cursor], #1
  99. cmp cursor, stop_address
  100. bne 1b
  101. 2:
  102. bx lr
  103. .unreq cursor
  104. /*
  105. * length is already unreq'ed to reuse the register for another
  106. * variable.
  107. */
  108. .unreq stop_address
  109. .unreq zeroreg1
  110. .unreq zeroreg2
  111. .unreq tmp
  112. endfunc zeromem
  113. /*
  114. * AArch32 does not have special ways of zeroing normal memory as AArch64 does
  115. * using the DC ZVA instruction, so we just alias zero_normalmem to zeromem.
  116. */
  117. .equ zero_normalmem, zeromem
  118. /* --------------------------------------------------------------------------
  119. * void memcpy4(void *dest, const void *src, unsigned int length)
  120. *
  121. * Copy length bytes from memory area src to memory area dest.
  122. * The memory areas should not overlap.
  123. * Destination and source addresses must be 4-byte aligned.
  124. * --------------------------------------------------------------------------
  125. */
  126. func memcpy4
  127. #if ENABLE_ASSERTIONS
  128. orr r3, r0, r1
  129. tst r3, #0x3
  130. ASM_ASSERT(eq)
  131. #endif
  132. /* copy 4 bytes at a time */
  133. m_loop4:
  134. cmp r2, #4
  135. blo m_loop1
  136. ldr r3, [r1], #4
  137. str r3, [r0], #4
  138. subs r2, r2, #4
  139. bne m_loop4
  140. bx lr
  141. /* copy byte per byte */
  142. m_loop1:
  143. ldrb r3, [r1], #1
  144. strb r3, [r0], #1
  145. subs r2, r2, #1
  146. bne m_loop1
  147. bx lr
  148. endfunc memcpy4
  149. /* ---------------------------------------------------------------------------
  150. * Disable the MMU in Secure State
  151. * ---------------------------------------------------------------------------
  152. */
  153. func disable_mmu_secure
  154. mov r1, #(SCTLR_M_BIT | SCTLR_C_BIT)
  155. do_disable_mmu:
  156. #if ERRATA_A9_794073
  157. stcopr r0, BPIALL
  158. dsb
  159. #endif
  160. ldcopr r0, SCTLR
  161. bic r0, r0, r1
  162. stcopr r0, SCTLR
  163. isb // ensure MMU is off
  164. dsb sy
  165. bx lr
  166. endfunc disable_mmu_secure
  167. func disable_mmu_icache_secure
  168. ldr r1, =(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT)
  169. b do_disable_mmu
  170. endfunc disable_mmu_icache_secure
  171. /* ---------------------------------------------------------------------------
  172. * Helper to fixup Global Descriptor table (GDT) and dynamic relocations
  173. * (.rel.dyn) at runtime.
  174. *
  175. * This function is meant to be used when the firmware is compiled with -fpie
  176. * and linked with -pie options. We rely on the linker script exporting
  177. * appropriate markers for start and end of the section. For GOT, we
  178. * expect __GOT_START__ and __GOT_END__. Similarly for .rela.dyn, we expect
  179. * __RELA_START__ and __RELA_END__.
  180. *
  181. * The function takes the limits of the memory to apply fixups to as
  182. * arguments (which is usually the limits of the relocable BL image).
  183. * r0 - the start of the fixup region
  184. * r1 - the limit of the fixup region
  185. * These addresses have to be 4KB page aligned.
  186. * ---------------------------------------------------------------------------
  187. */
  188. /* Relocation codes */
  189. #define R_ARM_RELATIVE 23
  190. func fixup_gdt_reloc
  191. mov r6, r0
  192. mov r7, r1
  193. #if ENABLE_ASSERTIONS
  194. /* Test if the limits are 4K aligned */
  195. orr r0, r0, r1
  196. mov r1, #(PAGE_SIZE_MASK)
  197. tst r0, r1
  198. ASM_ASSERT(eq)
  199. #endif
  200. /*
  201. * Calculate the offset based on return address in lr.
  202. * Assume that this function is called within a page at the start of
  203. * fixup region.
  204. */
  205. ldr r1, =PAGE_START_MASK
  206. and r2, lr, r1
  207. subs r0, r2, r6 /* Diff(S) = Current Address - Compiled Address */
  208. beq 3f /* Diff(S) = 0. No relocation needed */
  209. ldr r1, =__GOT_START__
  210. add r1, r1, r0
  211. ldr r2, =__GOT_END__
  212. add r2, r2, r0
  213. /*
  214. * GOT is an array of 32_bit addresses which must be fixed up as
  215. * new_addr = old_addr + Diff(S).
  216. * The new_addr is the address currently the binary is executing from
  217. * and old_addr is the address at compile time.
  218. */
  219. 1: ldr r3, [r1]
  220. /* Skip adding offset if address is < lower limit */
  221. cmp r3, r6
  222. blo 2f
  223. /* Skip adding offset if address is > upper limit */
  224. cmp r3, r7
  225. bhi 2f
  226. add r3, r3, r0
  227. str r3, [r1]
  228. 2: add r1, r1, #4
  229. cmp r1, r2
  230. blo 1b
  231. /* Starting dynamic relocations. Use ldr to get RELA_START and END */
  232. 3: ldr r1, =__RELA_START__
  233. add r1, r1, r0
  234. ldr r2, =__RELA_END__
  235. add r2, r2, r0
  236. /*
  237. * According to ELF-32 specification, the RELA data structure is as
  238. * follows:
  239. * typedef struct {
  240. * Elf32_Addr r_offset;
  241. * Elf32_Xword r_info;
  242. * } Elf32_Rela;
  243. *
  244. * r_offset is address of reference
  245. * r_info is symbol index and type of relocation (in this case
  246. * code 23 which corresponds to R_ARM_RELATIVE).
  247. *
  248. * Size of Elf32_Rela structure is 8 bytes.
  249. */
  250. /* Skip R_ARM_NONE entry with code 0 */
  251. 1: ldr r3, [r1, #4]
  252. ands r3, r3, #0xff
  253. beq 2f
  254. #if ENABLE_ASSERTIONS
  255. /* Assert that the relocation type is R_ARM_RELATIVE */
  256. cmp r3, #R_ARM_RELATIVE
  257. ASM_ASSERT(eq)
  258. #endif
  259. ldr r3, [r1] /* r_offset */
  260. add r3, r0, r3 /* Diff(S) + r_offset */
  261. ldr r4, [r3]
  262. /* Skip adding offset if address is < lower limit */
  263. cmp r4, r6
  264. blo 2f
  265. /* Skip adding offset if address is > upper limit */
  266. cmp r4, r7
  267. bhi 2f
  268. add r4, r0, r4
  269. str r4, [r3]
  270. 2: add r1, r1, #8
  271. cmp r1, r2
  272. blo 1b
  273. bx lr
  274. endfunc fixup_gdt_reloc