xlat_tables.c 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141
  1. /*
  2. * Copyright (c) 2016-2018, Arm Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <assert.h>
  7. #include <platform_def.h>
  8. #include <arch.h>
  9. #include <arch_helpers.h>
  10. #include <lib/utils.h>
  11. #include <lib/xlat_tables/xlat_tables_arch.h>
  12. #include <lib/xlat_tables/xlat_tables.h>
  13. #include "../xlat_tables_private.h"
  14. #if (ARM_ARCH_MAJOR == 7) && !defined(ARMV7_SUPPORTS_LARGE_PAGE_ADDRESSING)
  15. #error ARMv7 target does not support LPAE MMU descriptors
  16. #endif
  17. #define XLAT_TABLE_LEVEL_BASE \
  18. GET_XLAT_TABLE_LEVEL_BASE(PLAT_VIRT_ADDR_SPACE_SIZE)
  19. #define NUM_BASE_LEVEL_ENTRIES \
  20. GET_NUM_BASE_LEVEL_ENTRIES(PLAT_VIRT_ADDR_SPACE_SIZE)
  21. static uint64_t base_xlation_table[NUM_BASE_LEVEL_ENTRIES]
  22. __aligned(NUM_BASE_LEVEL_ENTRIES * sizeof(uint64_t));
  23. #if ENABLE_ASSERTIONS
  24. static unsigned long long get_max_supported_pa(void)
  25. {
  26. /* Physical address space size for long descriptor format. */
  27. return (1ULL << 40) - 1ULL;
  28. }
  29. #endif /* ENABLE_ASSERTIONS */
  30. unsigned int xlat_arch_current_el(void)
  31. {
  32. /*
  33. * If EL3 is in AArch32 mode, all secure PL1 modes (Monitor, System,
  34. * SVC, Abort, UND, IRQ and FIQ modes) execute at EL3.
  35. */
  36. return 3U;
  37. }
  38. uint64_t xlat_arch_get_xn_desc(unsigned int el __unused)
  39. {
  40. return UPPER_ATTRS(XN);
  41. }
  42. void init_xlat_tables(void)
  43. {
  44. unsigned long long max_pa;
  45. uintptr_t max_va;
  46. assert(PLAT_VIRT_ADDR_SPACE_SIZE >= MIN_VIRT_ADDR_SPACE_SIZE);
  47. assert(PLAT_VIRT_ADDR_SPACE_SIZE <= MAX_VIRT_ADDR_SPACE_SIZE);
  48. assert(IS_POWER_OF_TWO(PLAT_VIRT_ADDR_SPACE_SIZE));
  49. print_mmap();
  50. init_xlation_table(0U, base_xlation_table, XLAT_TABLE_LEVEL_BASE,
  51. &max_va, &max_pa);
  52. assert(max_va <= (PLAT_VIRT_ADDR_SPACE_SIZE - 1U));
  53. assert(max_pa <= (PLAT_PHY_ADDR_SPACE_SIZE - 1U));
  54. assert((PLAT_PHY_ADDR_SPACE_SIZE - 1U) <= get_max_supported_pa());
  55. }
  56. void enable_mmu_svc_mon(unsigned int flags)
  57. {
  58. unsigned int mair0, ttbcr, sctlr;
  59. uint64_t ttbr0;
  60. assert(IS_IN_SECURE());
  61. assert((read_sctlr() & SCTLR_M_BIT) == 0U);
  62. /* Set attributes in the right indices of the MAIR */
  63. mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
  64. mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
  65. ATTR_IWBWA_OWBWA_NTR_INDEX);
  66. mair0 |= MAIR0_ATTR_SET(ATTR_NON_CACHEABLE,
  67. ATTR_NON_CACHEABLE_INDEX);
  68. write_mair0(mair0);
  69. /* Invalidate TLBs at the current exception level */
  70. tlbiall();
  71. /*
  72. * Set TTBCR bits as well. Set TTBR0 table properties. Disable TTBR1.
  73. */
  74. int t0sz = 32 - __builtin_ctzll(PLAT_VIRT_ADDR_SPACE_SIZE);
  75. if ((flags & XLAT_TABLE_NC) != 0U) {
  76. /* Inner & outer non-cacheable non-shareable. */
  77. ttbcr = TTBCR_EAE_BIT |
  78. TTBCR_SH0_NON_SHAREABLE | TTBCR_RGN0_OUTER_NC |
  79. TTBCR_RGN0_INNER_NC | (uint32_t) t0sz;
  80. } else {
  81. /* Inner & outer WBWA & shareable. */
  82. ttbcr = TTBCR_EAE_BIT |
  83. TTBCR_SH0_INNER_SHAREABLE | TTBCR_RGN0_OUTER_WBA |
  84. TTBCR_RGN0_INNER_WBA | (uint32_t) t0sz;
  85. }
  86. ttbcr |= TTBCR_EPD1_BIT;
  87. write_ttbcr(ttbcr);
  88. /* Set TTBR0 bits as well */
  89. ttbr0 = (uintptr_t) base_xlation_table;
  90. write64_ttbr0(ttbr0);
  91. write64_ttbr1(0U);
  92. /*
  93. * Ensure all translation table writes have drained
  94. * into memory, the TLB invalidation is complete,
  95. * and translation register writes are committed
  96. * before enabling the MMU
  97. */
  98. dsbish();
  99. isb();
  100. sctlr = read_sctlr();
  101. sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
  102. if ((flags & DISABLE_DCACHE) != 0U)
  103. sctlr &= ~SCTLR_C_BIT;
  104. else
  105. sctlr |= SCTLR_C_BIT;
  106. write_sctlr(sctlr);
  107. /* Ensure the MMU enable takes effect immediately */
  108. isb();
  109. }
  110. void enable_mmu_direct_svc_mon(unsigned int flags)
  111. {
  112. enable_mmu_svc_mon(flags);
  113. }