smmu_v3.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. /*
  2. * Copyright (c) 2017-2024, ARM Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <common/debug.h>
  7. #include <cdefs.h>
  8. #include <drivers/arm/smmu_v3.h>
  9. #include <drivers/delay_timer.h>
  10. #include <lib/mmio.h>
  11. #include <arch_features.h>
  12. /* SMMU poll number of retries */
  13. #define SMMU_POLL_TIMEOUT_US U(1000)
  14. static int smmuv3_poll(uintptr_t smmu_reg, uint32_t mask,
  15. uint32_t value)
  16. {
  17. uint32_t reg_val;
  18. uint64_t timeout;
  19. /* Set 1ms timeout value */
  20. timeout = timeout_init_us(SMMU_POLL_TIMEOUT_US);
  21. do {
  22. reg_val = mmio_read_32(smmu_reg);
  23. if ((reg_val & mask) == value)
  24. return 0;
  25. } while (!timeout_elapsed(timeout));
  26. ERROR("Timeout polling SMMUv3 register @%p\n", (void *)smmu_reg);
  27. ERROR("Read value 0x%x, expected 0x%x\n", reg_val,
  28. value == 0U ? reg_val & ~mask : reg_val | mask);
  29. return -1;
  30. }
  31. /*
  32. * Abort all incoming transactions in order to implement a default
  33. * deny policy on reset.
  34. */
  35. int __init smmuv3_security_init(uintptr_t smmu_base)
  36. {
  37. /* Attribute update has completed when SMMU_(S)_GBPA.Update bit is 0 */
  38. if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U)
  39. return -1;
  40. /*
  41. * SMMU_(S)_CR0 resets to zero with all streams bypassing the SMMU,
  42. * so just abort all incoming transactions.
  43. */
  44. mmio_setbits_32(smmu_base + SMMU_GBPA,
  45. SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT);
  46. if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U)
  47. return -1;
  48. /* Check if the SMMU supports secure state */
  49. if ((mmio_read_32(smmu_base + SMMU_S_IDR1) &
  50. SMMU_S_IDR1_SECURE_IMPL) == 0U)
  51. return 0;
  52. /* Abort all incoming secure transactions */
  53. if (smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U) != 0U)
  54. return -1;
  55. mmio_setbits_32(smmu_base + SMMU_S_GBPA,
  56. SMMU_S_GBPA_UPDATE | SMMU_S_GBPA_ABORT);
  57. return smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U);
  58. }
  59. /* Initialize the SMMU by invalidating all secure caches and TLBs. */
  60. int __init smmuv3_init(uintptr_t smmu_base)
  61. {
  62. /*
  63. * Initiate invalidation of secure caches and TLBs if the SMMU
  64. * supports secure state. If not, it's implementation defined
  65. * as to how SMMU_S_INIT register is accessed.
  66. * As per Arm SMMUv3 specification the SMMU_S_INIT register in a SMMU
  67. * with RME implementation has following properties:
  68. * a) all SMMU registers that are specified to be accessible only in
  69. * the Secure physical address space are additionally accessible in
  70. * Root physical address space.
  71. * b) as GPT information is permitted to be cached in a TLB, the
  72. * SMMU_S_INIT.INV_ALL operation also invalidates all GPT information
  73. * cached in TLBs.
  74. * Additionally, it is Root firmware’s responsibility to write to
  75. * INV_ALL before enabling SMMU_ROOT_CR0.{ACCESSEN,GPCEN}.
  76. */
  77. mmio_write_32(smmu_base + SMMU_S_INIT, SMMU_S_INIT_INV_ALL);
  78. /* Wait for global invalidation operation to finish */
  79. if (smmuv3_poll(smmu_base + SMMU_S_INIT,
  80. SMMU_S_INIT_INV_ALL, 0U) != 0) {
  81. return -1;
  82. }
  83. #if ENABLE_RME
  84. if (is_feat_rme_present()) {
  85. if ((mmio_read_32(smmu_base + SMMU_ROOT_IDR0) &
  86. SMMU_ROOT_IDR0_ROOT_IMPL) == 0U) {
  87. WARN("Skip SMMU GPC configuration.\n");
  88. } else {
  89. uint64_t gpccr_el3 = read_gpccr_el3();
  90. uint64_t gptbr_el3 = read_gptbr_el3();
  91. /* SMMU_ROOT_GPT_BASE_CFG[16] is RES0. */
  92. gpccr_el3 &= ~(1UL << 16);
  93. /*
  94. * TODO: SMMU_ROOT_GPT_BASE_CFG is 64b in the spec,
  95. * but SMMU model only accepts 32b access.
  96. */
  97. mmio_write_32(smmu_base + SMMU_ROOT_GPT_BASE_CFG,
  98. gpccr_el3);
  99. /*
  100. * pa_gpt_table_base[51:12] maps to GPTBR_EL3[39:0]
  101. * whereas it maps to SMMU_ROOT_GPT_BASE[51:12]
  102. * hence needs a 12 bit left shit.
  103. */
  104. mmio_write_64(smmu_base + SMMU_ROOT_GPT_BASE,
  105. gptbr_el3 << 12);
  106. /*
  107. * ACCESSEN=1: SMMU- and client-originated accesses are
  108. * not terminated by this mechanism.
  109. * GPCEN=1: All clients and SMMU-originated accesses,
  110. * except GPT-walks, are subject to GPC.
  111. */
  112. mmio_setbits_32(smmu_base + SMMU_ROOT_CR0,
  113. SMMU_ROOT_CR0_GPCEN |
  114. SMMU_ROOT_CR0_ACCESSEN);
  115. /* Poll for ACCESSEN and GPCEN ack bits. */
  116. if (smmuv3_poll(smmu_base + SMMU_ROOT_CR0ACK,
  117. SMMU_ROOT_CR0_GPCEN |
  118. SMMU_ROOT_CR0_ACCESSEN,
  119. SMMU_ROOT_CR0_GPCEN |
  120. SMMU_ROOT_CR0_ACCESSEN) != 0) {
  121. WARN("Failed enabling SMMU GPC.\n");
  122. /*
  123. * Do not return in error, but fall back to
  124. * invalidating all entries through the secure
  125. * register file.
  126. */
  127. }
  128. }
  129. }
  130. #endif /* ENABLE_RME */
  131. return 0;
  132. }
  133. int smmuv3_ns_set_abort_all(uintptr_t smmu_base)
  134. {
  135. /* Attribute update has completed when SMMU_GBPA.Update bit is 0 */
  136. if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) {
  137. return -1;
  138. }
  139. /*
  140. * Set GBPA's ABORT bit. Other GBPA fields are presumably ignored then,
  141. * so simply preserve their value.
  142. */
  143. mmio_setbits_32(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT);
  144. if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) {
  145. return -1;
  146. }
  147. /* Disable the SMMU to engage the GBPA fields previously configured. */
  148. mmio_clrbits_32(smmu_base + SMMU_CR0, SMMU_CR0_SMMUEN);
  149. if (smmuv3_poll(smmu_base + SMMU_CR0ACK, SMMU_CR0_SMMUEN, 0U) != 0U) {
  150. return -1;
  151. }
  152. return 0;
  153. }