xlat_tables.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. /*
  2. * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <assert.h>
  7. #include <stdint.h>
  8. #include <platform_def.h>
  9. #include <arch.h>
  10. #include <arch_features.h>
  11. #include <common/bl_common.h>
  12. #include <lib/utils.h>
  13. #include <lib/xlat_tables/xlat_tables.h>
  14. #include <lib/xlat_tables/xlat_tables_arch.h>
  15. #include <plat/common/common_def.h>
  16. #include "../xlat_tables_private.h"
  17. #define XLAT_TABLE_LEVEL_BASE \
  18. GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE)
  19. #define NUM_BASE_LEVEL_ENTRIES \
  20. GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
  21. static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
  22. __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
  23. static unsigned long long tcr_ps_bits;
  24. static unsigned long long calc_physical_addr_size_bits(
  25. unsigned long long max_addr)
  26. {
  27. /* Physical address can't exceed 48 bits */
  28. assert((max_addr & ADDR_MASK_48_TO_63) == 0U);
  29. /* 48 bits address */
  30. if ((max_addr & ADDR_MASK_44_TO_47) != 0U)
  31. return TCR_PS_BITS_256TB;
  32. /* 44 bits address */
  33. if ((max_addr & ADDR_MASK_42_TO_43) != 0U)
  34. return TCR_PS_BITS_16TB;
  35. /* 42 bits address */
  36. if ((max_addr & ADDR_MASK_40_TO_41) != 0U)
  37. return TCR_PS_BITS_4TB;
  38. /* 40 bits address */
  39. if ((max_addr & ADDR_MASK_36_TO_39) != 0U)
  40. return TCR_PS_BITS_1TB;
  41. /* 36 bits address */
  42. if ((max_addr & ADDR_MASK_32_TO_35) != 0U)
  43. return TCR_PS_BITS_64GB;
  44. return TCR_PS_BITS_4GB;
  45. }
  46. #if ENABLE_ASSERTIONS
  47. /*
  48. * Physical Address ranges supported in the AArch64 Memory Model. Value 0b110 is
  49. * supported in ARMv8.2 onwards.
  50. */
  51. static const unsigned int pa_range_bits_arr[] = {
  52. PARANGE_0000, PARANGE_0001, PARANGE_0010, PARANGE_0011, PARANGE_0100,
  53. PARANGE_0101, PARANGE_0110, PARANGE_0111
  54. };
  55. static unsigned long long get_max_supported_pa(void)
  56. {
  57. u_register_t pa_range = read_id_aa64mmfr0_el1() &
  58. ID_AA64MMFR0_EL1_PARANGE_MASK;
  59. /* All other values are reserved */
  60. assert(pa_range < ARRAY_SIZE(pa_range_bits_arr));
  61. return (1ULL << pa_range_bits_arr[pa_range]) - 1ULL;
  62. }
  63. /*
  64. * Return minimum virtual address space size supported by the architecture
  65. */
  66. static uintptr_t xlat_get_min_virt_addr_space_size(void)
  67. {
  68. uintptr_t ret;
  69. if (is_feat_ttst_present())
  70. ret = MIN_VIRT_ADDR_SPACE_SIZE_TTST;
  71. else
  72. ret = MIN_VIRT_ADDR_SPACE_SIZE;
  73. return ret;
  74. }
  75. #endif /* ENABLE_ASSERTIONS */
  76. unsigned int xlat_arch_current_el(void)
  77. {
  78. unsigned int el = (unsigned int)GET_EL(read_CurrentEl());
  79. assert(el > 0U);
  80. return el;
  81. }
  82. uint64_t xlat_arch_get_xn_desc(unsigned int el)
  83. {
  84. if (el == 3U) {
  85. return UPPER_ATTRS(XN);
  86. } else {
  87. assert(el == 1U);
  88. return UPPER_ATTRS(PXN);
  89. }
  90. }
  91. void init_xlat_tables(void)
  92. {
  93. unsigned long long max_pa;
  94. uintptr_t max_va;
  95. assert(PLAT_VIRT_ADDR_SPACE_SIZE >=
  96. (xlat_get_min_virt_addr_space_size() - 1U));
  97. assert(PLAT_VIRT_ADDR_SPACE_SIZE <= MAX_VIRT_ADDR_SPACE_SIZE);
  98. assert(IS_POWER_OF_TWO(PLAT_VIRT_ADDR_SPACE_SIZE));
  99. print_mmap();
  100. init_xlation_table(0U, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
  101. &max_va, &max_pa);
  102. assert(max_va <= (PLAT_VIRT_ADDR_SPACE_SIZE - 1U));
  103. assert(max_pa <= (PLAT_PHY_ADDR_SPACE_SIZE - 1U));
  104. assert((PLAT_PHY_ADDR_SPACE_SIZE - 1U) <= get_max_supported_pa());
  105. tcr_ps_bits = calc_physical_addr_size_bits(max_pa);
  106. }
  107. /*******************************************************************************
  108. * Macro generating the code for the function enabling the MMU in the given
  109. * exception level, assuming that the pagetables have already been created.
  110. *
  111. * _el: Exception level at which the function will run
  112. * _tcr_extra: Extra bits to set in the TCR register. This mask will
  113. * be OR'ed with the default TCR value.
  114. * _tlbi_fct: Function to invalidate the TLBs at the current
  115. * exception level
  116. ******************************************************************************/
  117. #define DEFINE_ENABLE_MMU_EL(_el, _tcr_extra, _tlbi_fct) \
  118. void enable_mmu_el##_el(unsigned int flags) \
  119. { \
  120. uint64_t mair, tcr, ttbr; \
  121. uint32_t sctlr; \
  122. \
  123. assert(IS_IN_EL(_el)); \
  124. assert((read_sctlr_el##_el() & SCTLR_M_BIT) == 0U); \
  125. \
  126. /* Set attributes in the right indices of the MAIR */ \
  127. mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX); \
  128. mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, \
  129. ATTR_IWBWA_OWBWA_NTR_INDEX); \
  130. mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE, \
  131. ATTR_NON_CACHEABLE_INDEX); \
  132. write_mair_el##_el(mair); \
  133. \
  134. /* Invalidate TLBs at the current exception level */ \
  135. _tlbi_fct(); \
  136. \
  137. /* Set TCR bits as well. */ \
  138. /* Set T0SZ to (64 - width of virtual address space) */ \
  139. int t0sz = 64 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE);\
  140. \
  141. if ((flags & XLAT_TABLE_NC) != 0U) { \
  142. /* Inner & outer non-cacheable non-shareable. */\
  143. tcr = TCR_SH_NON_SHAREABLE | \
  144. TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC | \
  145. ((uint64_t)t0sz << TCR_T0SZ_SHIFT); \
  146. } else { \
  147. /* Inner & outer WBWA & shareable. */ \
  148. tcr = TCR_SH_INNER_SHAREABLE | \
  149. TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | \
  150. ((uint64_t)t0sz << TCR_T0SZ_SHIFT); \
  151. } \
  152. tcr |= _tcr_extra; \
  153. write_tcr_el##_el(tcr); \
  154. \
  155. /* Set TTBR bits as well */ \
  156. ttbr = (uint64_t) base_xlation_table; \
  157. write_ttbr0_el##_el(ttbr); \
  158. \
  159. /* Ensure all translation table writes have drained */ \
  160. /* into memory, the TLB invalidation is complete, */ \
  161. /* and translation register writes are committed */ \
  162. /* before enabling the MMU */ \
  163. dsbish(); \
  164. isb(); \
  165. \
  166. sctlr = read_sctlr_el##_el(); \
  167. sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
  168. \
  169. if ((flags & DISABLE_DCACHE) != 0U) \
  170. sctlr &= ~SCTLR_C_BIT; \
  171. else \
  172. sctlr |= SCTLR_C_BIT; \
  173. \
  174. write_sctlr_el##_el(sctlr); \
  175. \
  176. /* Ensure the MMU enable takes effect immediately */ \
  177. isb(); \
  178. } \
  179. \
  180. void enable_mmu_direct_el##_el(unsigned int flags) \
  181. { \
  182. enable_mmu_el##_el(flags); \
  183. }
  184. /* Define EL1 and EL3 variants of the function enabling the MMU */
  185. DEFINE_ENABLE_MMU_EL(1,
  186. /*
  187. * TCR_EL1.EPD1: Disable translation table walk for addresses
  188. * that are translated using TTBR1_EL1.
  189. */
  190. TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT),
  191. tlbivmalle1)
  192. DEFINE_ENABLE_MMU_EL(3,
  193. TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT),
  194. tlbialle3)