denver.S 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338
  1. /*
  2. * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
  3. * Copyright (c) 2020-2022, NVIDIA Corporation. All rights reserved.
  4. *
  5. * SPDX-License-Identifier: BSD-3-Clause
  6. */
  7. #include <arch.h>
  8. #include <asm_macros.S>
  9. #include <assert_macros.S>
  10. #include <context.h>
  11. #include <denver.h>
  12. #include <cpu_macros.S>
  13. #include <plat_macros.S>
  14. /* -------------------------------------------------
  15. * CVE-2017-5715 mitigation
  16. *
  17. * Flush the indirect branch predictor and RSB on
  18. * entry to EL3 by issuing a newly added instruction
  19. * for Denver CPUs.
  20. *
  21. * To achieve this without performing any branch
  22. * instruction, a per-cpu vbar is installed which
  23. * executes the workaround and then branches off to
  24. * the corresponding vector entry in the main vector
  25. * table.
  26. * -------------------------------------------------
  27. */
  28. vector_base workaround_bpflush_runtime_exceptions
  29. .macro apply_workaround
  30. stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
  31. /* Disable cycle counter when event counting is prohibited */
  32. mrs x1, pmcr_el0
  33. orr x0, x1, #PMCR_EL0_DP_BIT
  34. msr pmcr_el0, x0
  35. isb
  36. /* -------------------------------------------------
  37. * A new write-only system register where a write of
  38. * 1 to bit 0 will cause the indirect branch predictor
  39. * and RSB to be flushed.
  40. *
  41. * A write of 0 to bit 0 will be ignored. A write of
  42. * 1 to any other bit will cause an MCA.
  43. * -------------------------------------------------
  44. */
  45. mov x0, #1
  46. msr s3_0_c15_c0_6, x0
  47. isb
  48. ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
  49. .endm
  50. /* ---------------------------------------------------------------------
  51. * Current EL with SP_EL0 : 0x0 - 0x200
  52. * ---------------------------------------------------------------------
  53. */
  54. vector_entry workaround_bpflush_sync_exception_sp_el0
  55. b sync_exception_sp_el0
  56. end_vector_entry workaround_bpflush_sync_exception_sp_el0
  57. vector_entry workaround_bpflush_irq_sp_el0
  58. b irq_sp_el0
  59. end_vector_entry workaround_bpflush_irq_sp_el0
  60. vector_entry workaround_bpflush_fiq_sp_el0
  61. b fiq_sp_el0
  62. end_vector_entry workaround_bpflush_fiq_sp_el0
  63. vector_entry workaround_bpflush_serror_sp_el0
  64. b serror_sp_el0
  65. end_vector_entry workaround_bpflush_serror_sp_el0
  66. /* ---------------------------------------------------------------------
  67. * Current EL with SP_ELx: 0x200 - 0x400
  68. * ---------------------------------------------------------------------
  69. */
  70. vector_entry workaround_bpflush_sync_exception_sp_elx
  71. b sync_exception_sp_elx
  72. end_vector_entry workaround_bpflush_sync_exception_sp_elx
  73. vector_entry workaround_bpflush_irq_sp_elx
  74. b irq_sp_elx
  75. end_vector_entry workaround_bpflush_irq_sp_elx
  76. vector_entry workaround_bpflush_fiq_sp_elx
  77. b fiq_sp_elx
  78. end_vector_entry workaround_bpflush_fiq_sp_elx
  79. vector_entry workaround_bpflush_serror_sp_elx
  80. b serror_sp_elx
  81. end_vector_entry workaround_bpflush_serror_sp_elx
  82. /* ---------------------------------------------------------------------
  83. * Lower EL using AArch64 : 0x400 - 0x600
  84. * ---------------------------------------------------------------------
  85. */
  86. vector_entry workaround_bpflush_sync_exception_aarch64
  87. apply_workaround
  88. b sync_exception_aarch64
  89. end_vector_entry workaround_bpflush_sync_exception_aarch64
  90. vector_entry workaround_bpflush_irq_aarch64
  91. apply_workaround
  92. b irq_aarch64
  93. end_vector_entry workaround_bpflush_irq_aarch64
  94. vector_entry workaround_bpflush_fiq_aarch64
  95. apply_workaround
  96. b fiq_aarch64
  97. end_vector_entry workaround_bpflush_fiq_aarch64
  98. vector_entry workaround_bpflush_serror_aarch64
  99. apply_workaround
  100. b serror_aarch64
  101. end_vector_entry workaround_bpflush_serror_aarch64
  102. /* ---------------------------------------------------------------------
  103. * Lower EL using AArch32 : 0x600 - 0x800
  104. * ---------------------------------------------------------------------
  105. */
  106. vector_entry workaround_bpflush_sync_exception_aarch32
  107. apply_workaround
  108. b sync_exception_aarch32
  109. end_vector_entry workaround_bpflush_sync_exception_aarch32
  110. vector_entry workaround_bpflush_irq_aarch32
  111. apply_workaround
  112. b irq_aarch32
  113. end_vector_entry workaround_bpflush_irq_aarch32
  114. vector_entry workaround_bpflush_fiq_aarch32
  115. apply_workaround
  116. b fiq_aarch32
  117. end_vector_entry workaround_bpflush_fiq_aarch32
  118. vector_entry workaround_bpflush_serror_aarch32
  119. apply_workaround
  120. b serror_aarch32
  121. end_vector_entry workaround_bpflush_serror_aarch32
  122. .global denver_disable_dco
  123. /* ---------------------------------------------
  124. * Disable debug interfaces
  125. * ---------------------------------------------
  126. */
  127. func denver_disable_ext_debug
  128. mov x0, #1
  129. msr osdlr_el1, x0
  130. isb
  131. dsb sy
  132. ret
  133. endfunc denver_disable_ext_debug
  134. /* ----------------------------------------------------
  135. * Enable dynamic code optimizer (DCO)
  136. * ----------------------------------------------------
  137. */
  138. func denver_enable_dco
  139. /* DCO is not supported on PN5 and later */
  140. mrs x1, midr_el1
  141. mov_imm x2, DENVER_MIDR_PN4
  142. cmp x1, x2
  143. b.hi 1f
  144. mov x18, x30
  145. bl plat_my_core_pos
  146. mov x1, #1
  147. lsl x1, x1, x0
  148. msr s3_0_c15_c0_2, x1
  149. mov x30, x18
  150. 1: ret
  151. endfunc denver_enable_dco
  152. /* ----------------------------------------------------
  153. * Disable dynamic code optimizer (DCO)
  154. * ----------------------------------------------------
  155. */
  156. func denver_disable_dco
  157. /* DCO is not supported on PN5 and later */
  158. mrs x1, midr_el1
  159. mov_imm x2, DENVER_MIDR_PN4
  160. cmp x1, x2
  161. b.hi 2f
  162. /* turn off background work */
  163. mov x18, x30
  164. bl plat_my_core_pos
  165. mov x1, #1
  166. lsl x1, x1, x0
  167. lsl x2, x1, #16
  168. msr s3_0_c15_c0_2, x2
  169. isb
  170. /* wait till the background work turns off */
  171. 1: mrs x2, s3_0_c15_c0_2
  172. lsr x2, x2, #32
  173. and w2, w2, 0xFFFF
  174. and x2, x2, x1
  175. cbnz x2, 1b
  176. mov x30, x18
  177. 2: ret
  178. endfunc denver_disable_dco
  179. workaround_reset_start denver, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
  180. #if IMAGE_BL31
  181. adr x1, workaround_bpflush_runtime_exceptions
  182. msr vbar_el3, x1
  183. #endif
  184. workaround_reset_end denver, CVE(2017, 5715)
  185. check_erratum_custom_start denver, CVE(2017, 5715)
  186. mov x0, #ERRATA_MISSING
  187. #if WORKAROUND_CVE_2017_5715
  188. /*
  189. * Check if the CPU supports the special instruction
  190. * required to flush the indirect branch predictor and
  191. * RSB. Support for this operation can be determined by
  192. * comparing bits 19:16 of ID_AFR0_EL1 with 0b0001.
  193. */
  194. mrs x1, id_afr0_el1
  195. mov x2, #0x10000
  196. and x1, x1, x2
  197. cbz x1, 1f
  198. mov x0, #ERRATA_APPLIES
  199. 1:
  200. #endif
  201. ret
  202. check_erratum_custom_end denver, CVE(2017, 5715)
  203. workaround_reset_start denver, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
  204. /*
  205. * Denver CPUs with DENVER_MIDR_PN3 or earlier, use different
  206. * bits in the ACTLR_EL3 register to disable speculative
  207. * store buffer and memory disambiguation.
  208. */
  209. mrs x0, midr_el1
  210. mov_imm x1, DENVER_MIDR_PN4
  211. cmp x0, x1
  212. mrs x0, actlr_el3
  213. mov x1, #(DENVER_CPU_DIS_MD_EL3 | DENVER_CPU_DIS_SSB_EL3)
  214. mov x2, #(DENVER_PN4_CPU_DIS_MD_EL3 | DENVER_PN4_CPU_DIS_SSB_EL3)
  215. csel x3, x1, x2, ne
  216. orr x0, x0, x3
  217. msr actlr_el3, x0
  218. isb
  219. dsb sy
  220. workaround_reset_end denver, CVE(2018, 3639)
  221. check_erratum_chosen denver, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
  222. cpu_reset_func_start denver
  223. /* ----------------------------------------------------
  224. * Reset ACTLR.PMSTATE to C1 state
  225. * ----------------------------------------------------
  226. */
  227. mrs x0, actlr_el1
  228. bic x0, x0, #DENVER_CPU_PMSTATE_MASK
  229. orr x0, x0, #DENVER_CPU_PMSTATE_C1
  230. msr actlr_el1, x0
  231. /* ----------------------------------------------------
  232. * Enable dynamic code optimizer (DCO)
  233. * ----------------------------------------------------
  234. */
  235. bl denver_enable_dco
  236. cpu_reset_func_end denver
  237. /* ----------------------------------------------------
  238. * The CPU Ops core power down function for Denver.
  239. * ----------------------------------------------------
  240. */
  241. func denver_core_pwr_dwn
  242. mov x19, x30
  243. /* ---------------------------------------------
  244. * Force the debug interfaces to be quiescent
  245. * ---------------------------------------------
  246. */
  247. bl denver_disable_ext_debug
  248. ret x19
  249. endfunc denver_core_pwr_dwn
  250. /* -------------------------------------------------------
  251. * The CPU Ops cluster power down function for Denver.
  252. * -------------------------------------------------------
  253. */
  254. func denver_cluster_pwr_dwn
  255. ret
  256. endfunc denver_cluster_pwr_dwn
  257. /* ---------------------------------------------
  258. * This function provides Denver specific
  259. * register information for crash reporting.
  260. * It needs to return with x6 pointing to
  261. * a list of register names in ascii and
  262. * x8 - x15 having values of registers to be
  263. * reported.
  264. * ---------------------------------------------
  265. */
  266. .section .rodata.denver_regs, "aS"
  267. denver_regs: /* The ascii list of register names to be reported */
  268. .asciz "actlr_el1", ""
  269. func denver_cpu_reg_dump
  270. adr x6, denver_regs
  271. mrs x8, ACTLR_EL1
  272. ret
  273. endfunc denver_cpu_reg_dump
  274. /* macro to declare cpu_ops for Denver SKUs */
  275. .macro denver_cpu_ops_wa midr
  276. declare_cpu_ops_wa denver, \midr, \
  277. denver_reset_func, \
  278. check_erratum_denver_5715, \
  279. CPU_NO_EXTRA2_FUNC, \
  280. CPU_NO_EXTRA3_FUNC, \
  281. denver_core_pwr_dwn, \
  282. denver_cluster_pwr_dwn
  283. .endm
  284. denver_cpu_ops_wa DENVER_MIDR_PN0
  285. denver_cpu_ops_wa DENVER_MIDR_PN1
  286. denver_cpu_ops_wa DENVER_MIDR_PN2
  287. denver_cpu_ops_wa DENVER_MIDR_PN3
  288. denver_cpu_ops_wa DENVER_MIDR_PN4
  289. denver_cpu_ops_wa DENVER_MIDR_PN5
  290. denver_cpu_ops_wa DENVER_MIDR_PN6
  291. denver_cpu_ops_wa DENVER_MIDR_PN7
  292. denver_cpu_ops_wa DENVER_MIDR_PN8
  293. denver_cpu_ops_wa DENVER_MIDR_PN9