xlat_tables_arch.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. /*
  2. * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <assert.h>
  7. #include <stdbool.h>
  8. #include <stdint.h>
  9. #include <arch.h>
  10. #include <arch_features.h>
  11. #include <arch_helpers.h>
  12. #include <lib/cassert.h>
  13. #include <lib/utils_def.h>
  14. #include <lib/xlat_tables/xlat_tables_v2.h>
  15. #include "../xlat_tables_private.h"
  16. /*
  17. * Returns true if the provided granule size is supported, false otherwise.
  18. */
  19. bool xlat_arch_is_granule_size_supported(size_t size)
  20. {
  21. if (size == PAGE_SIZE_4KB) {
  22. /* MSB of TGRAN4 field will be '1' for unsupported feature */
  23. return is_feat_tgran4K_present();
  24. } else if (size == PAGE_SIZE_16KB) {
  25. return is_feat_tgran16K_present();
  26. } else if (size == PAGE_SIZE_64KB) {
  27. /* MSB of TGRAN64 field will be '1' for unsupported feature */
  28. return is_feat_tgran64K_present();
  29. } else {
  30. return false;
  31. }
  32. }
  33. size_t xlat_arch_get_max_supported_granule_size(void)
  34. {
  35. if (xlat_arch_is_granule_size_supported(PAGE_SIZE_64KB)) {
  36. return PAGE_SIZE_64KB;
  37. } else if (xlat_arch_is_granule_size_supported(PAGE_SIZE_16KB)) {
  38. return PAGE_SIZE_16KB;
  39. } else {
  40. assert(xlat_arch_is_granule_size_supported(PAGE_SIZE_4KB));
  41. return PAGE_SIZE_4KB;
  42. }
  43. }
  44. /*
  45. * Determine the physical address space encoded in the 'attr' parameter.
  46. *
  47. * The physical address will fall into one of four spaces; secure,
  48. * nonsecure, root, or realm if RME is enabled, or one of two spaces;
  49. * secure and nonsecure otherwise.
  50. */
  51. uint32_t xlat_arch_get_pas(uint32_t attr)
  52. {
  53. uint32_t pas = MT_PAS(attr);
  54. switch (pas) {
  55. #if ENABLE_RME
  56. /* TTD.NSE = 1 and TTD.NS = 1 for Realm PAS */
  57. case MT_REALM:
  58. return LOWER_ATTRS(EL3_S1_NSE | NS);
  59. /* TTD.NSE = 1 and TTD.NS = 0 for Root PAS */
  60. case MT_ROOT:
  61. return LOWER_ATTRS(EL3_S1_NSE);
  62. #endif
  63. case MT_NS:
  64. return LOWER_ATTRS(NS);
  65. default: /* MT_SECURE */
  66. return 0U;
  67. }
  68. }
  69. unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr)
  70. {
  71. /* Physical address can't exceed 48 bits */
  72. assert((max_addr & ADDR_MASK_48_TO_63) == 0U);
  73. /* 48 bits address */
  74. if ((max_addr & ADDR_MASK_44_TO_47) != 0U)
  75. return TCR_PS_BITS_256TB;
  76. /* 44 bits address */
  77. if ((max_addr & ADDR_MASK_42_TO_43) != 0U)
  78. return TCR_PS_BITS_16TB;
  79. /* 42 bits address */
  80. if ((max_addr & ADDR_MASK_40_TO_41) != 0U)
  81. return TCR_PS_BITS_4TB;
  82. /* 40 bits address */
  83. if ((max_addr & ADDR_MASK_36_TO_39) != 0U)
  84. return TCR_PS_BITS_1TB;
  85. /* 36 bits address */
  86. if ((max_addr & ADDR_MASK_32_TO_35) != 0U)
  87. return TCR_PS_BITS_64GB;
  88. return TCR_PS_BITS_4GB;
  89. }
  90. #if ENABLE_ASSERTIONS
  91. /*
  92. * Physical Address ranges supported in the AArch64 Memory Model. Value 0b110 is
  93. * supported in ARMv8.2 onwards.
  94. */
  95. static const unsigned int pa_range_bits_arr[] = {
  96. PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
  97. PARANGE_0101, PARANGE_0110, PARANGE_0111
  98. };
  99. unsigned long long xlat_arch_get_max_supported_pa(void)
  100. {
  101. u_register_t pa_range = read_id_aa64mmfr0_el1() &
  102. ID_AA64MMFR0_EL1_PARANGE_MASK;
  103. /* All other values are reserved */
  104. assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
  105. return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
  106. }
  107. /*
  108. * Return minimum virtual address space size supported by the architecture
  109. */
  110. uintptr_t xlat_get_min_virt_addr_space_size(void)
  111. {
  112. uintptr_t ret;
  113. if (is_feat_ttst_present())
  114. ret = MIN_VIRT_ADDR_SPACE_SIZE_TTST;
  115. else
  116. ret = MIN_VIRT_ADDR_SPACE_SIZE;
  117. return ret;
  118. }
  119. #endif /* ENABLE_ASSERTIONS*/
  120. bool is_mmu_enabled_ctx(const xlat_ctx_t *ctx)
  121. {
  122. if (ctx->xlat_regime == EL1_EL0_REGIME) {
  123. assert(xlat_arch_current_el() >= 1U);
  124. return (read_sctlr_el1() & SCTLR_M_BIT) != 0U;
  125. } else if (ctx->xlat_regime == EL2_REGIME) {
  126. assert(xlat_arch_current_el() >= 2U);
  127. return (read_sctlr_el2() & SCTLR_M_BIT) != 0U;
  128. } else {
  129. assert(ctx->xlat_regime == EL3_REGIME);
  130. assert(xlat_arch_current_el() >= 3U);
  131. return (read_sctlr_el3() & SCTLR_M_BIT) != 0U;
  132. }
  133. }
  134. bool is_dcache_enabled(void)
  135. {
  136. unsigned int el = get_current_el_maybe_constant();
  137. if (el == 1U) {
  138. return (read_sctlr_el1() & SCTLR_C_BIT) != 0U;
  139. } else if (el == 2U) {
  140. return (read_sctlr_el2() & SCTLR_C_BIT) != 0U;
  141. } else {
  142. return (read_sctlr_el3() & SCTLR_C_BIT) != 0U;
  143. }
  144. }
  145. uint64_t xlat_arch_regime_get_xn_desc(int xlat_regime)
  146. {
  147. if (xlat_regime == EL1_EL0_REGIME) {
  148. return UPPER_ATTRS(UXN) | UPPER_ATTRS(PXN);
  149. } else {
  150. assert((xlat_regime == EL2_REGIME) ||
  151. (xlat_regime == EL3_REGIME));
  152. return UPPER_ATTRS(XN);
  153. }
  154. }
  155. void xlat_arch_tlbi_va(uintptr_t va, int xlat_regime)
  156. {
  157. /*
  158. * Ensure the translation table write has drained into memory before
  159. * invalidating the TLB entry.
  160. */
  161. dsbishst();
  162. /*
  163. * This function only supports invalidation of TLB entries for the EL3
  164. * and EL1&0 translation regimes.
  165. *
  166. * Also, it is architecturally UNDEFINED to invalidate TLBs of a higher
  167. * exception level (see section D4.9.2 of the ARM ARM rev B.a).
  168. */
  169. if (xlat_regime == EL1_EL0_REGIME) {
  170. assert(xlat_arch_current_el() >= 1U);
  171. tlbivaae1is(TLBI_ADDR(va));
  172. } else if (xlat_regime == EL2_REGIME) {
  173. assert(xlat_arch_current_el() >= 2U);
  174. tlbivae2is(TLBI_ADDR(va));
  175. } else {
  176. assert(xlat_regime == EL3_REGIME);
  177. assert(xlat_arch_current_el() >= 3U);
  178. tlbivae3is(TLBI_ADDR(va));
  179. }
  180. }
  181. void xlat_arch_tlbi_va_sync(void)
  182. {
  183. /*
  184. * A TLB maintenance instruction can complete at any time after
  185. * it is issued, but is only guaranteed to be complete after the
  186. * execution of DSB by the PE that executed the TLB maintenance
  187. * instruction. After the TLB invalidate instruction is
  188. * complete, no new memory accesses using the invalidated TLB
  189. * entries will be observed by any observer of the system
  190. * domain. See section D4.8.2 of the ARMv8 (issue k), paragraph
  191. * "Ordering and completion of TLB maintenance instructions".
  192. */
  193. dsbish();
  194. /*
  195. * The effects of a completed TLB maintenance instruction are
  196. * only guaranteed to be visible on the PE that executed the
  197. * instruction after the execution of an ISB instruction by the
  198. * PE that executed the TLB maintenance instruction.
  199. */
  200. isb();
  201. }
  202. unsigned int xlat_arch_current_el(void)
  203. {
  204. unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
  205. assert(el > 0U);
  206. return el;
  207. }
  208. void setup_mmu_cfg(uint64_t *params, unsigned int flags,
  209. const uint64_t *base_table, unsigned long long max_pa,
  210. uintptr_t max_va, int xlat_regime)
  211. {
  212. uint64_t mair, ttbr0, tcr;
  213. uintptr_t virtual_addr_space_size;
  214. /* Set attributes in the right indices of the MAIR. */
  215. mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
  216. mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX);
  217. mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX);
  218. /*
  219. * Limit the input address ranges and memory region sizes translated
  220. * using TTBR0 to the given virtual address space size.
  221. */
  222. assert(max_va < ((uint64_t)UINTPTR_MAX));
  223. virtual_addr_space_size = (uintptr_t)max_va + 1U;
  224. assert(virtual_addr_space_size >=
  225. xlat_get_min_virt_addr_space_size());
  226. assert(virtual_addr_space_size <= MAX_VIRT_ADDR_SPACE_SIZE);
  227. assert(IS_POWER_OF_TWO(virtual_addr_space_size));
  228. /*
  229. * __builtin_ctzll(0) is undefined but here we are guaranteed that
  230. * virtual_addr_space_size is in the range [1,UINTPTR_MAX].
  231. */
  232. int t0sz = 64 - __builtin_ctzll(virtual_addr_space_size);
  233. tcr = (uint64_t)t0sz << TCR_T0SZ_SHIFT;
  234. /*
  235. * Set the cacheability and shareability attributes for memory
  236. * associated with translation table walks.
  237. */
  238. if ((flags & XLAT_TABLE_NC) != 0U) {
  239. /* Inner & outer non-cacheable non-shareable. */
  240. tcr |= TCR_SH_NON_SHAREABLE |
  241. TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC;
  242. } else {
  243. /* Inner & outer WBWA & shareable. */
  244. tcr |= TCR_SH_INNER_SHAREABLE |
  245. TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA;
  246. }
  247. /*
  248. * It is safer to restrict the max physical address accessible by the
  249. * hardware as much as possible.
  250. */
  251. unsigned long long tcr_ps_bits = tcr_physical_addr_size_bits(max_pa);
  252. if (xlat_regime == EL1_EL0_REGIME) {
  253. /*
  254. * TCR_EL1.EPD1: Disable translation table walk for addresses
  255. * that are translated using TTBR1_EL1.
  256. */
  257. tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
  258. } else if (xlat_regime == EL2_REGIME) {
  259. tcr |= TCR_EL2_RES1 | (tcr_ps_bits << TCR_EL2_PS_SHIFT);
  260. } else {
  261. assert(xlat_regime == EL3_REGIME);
  262. tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
  263. }
  264. /* Set TTBR bits as well */
  265. ttbr0 = (uint64_t) base_table;
  266. if (is_feat_ttcnp_present()) {
  267. /* Enable CnP bit so as to share page tables with all PEs. */
  268. ttbr0 |= TTBR_CNP_BIT;
  269. }
  270. params[MMU_CFG_MAIR] = mair;
  271. params[MMU_CFG_TCR] = tcr;
  272. params[MMU_CFG_TTBR0] = ttbr0;
  273. }