xlat_mpu_core.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408
  1. /*
  2. * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <assert.h>
  7. #include <errno.h>
  8. #include <stdbool.h>
  9. #include <stdint.h>
  10. #include <string.h>
  11. #include <arch_features.h>
  12. #include <common/debug.h>
  13. #include <lib/utils_def.h>
  14. #include <lib/xlat_tables/xlat_tables_defs.h>
  15. #include <lib/xlat_tables/xlat_tables_v2.h>
  16. #include "xlat_mpu_private.h"
  17. #include <fvp_r_arch_helpers.h>
  18. #include <platform_def.h>
  19. #warning "xlat_mpu library is currently experimental and its API may change in future."
  20. /* Helper function that cleans the data cache only if it is enabled. */
  21. static inline __attribute__((unused))
  22. void xlat_clean_dcache_range(uintptr_t addr, size_t size)
  23. {
  24. if (is_dcache_enabled()) {
  25. clean_dcache_range(addr, size);
  26. }
  27. }
  28. /* Calculate region-attributes byte for PRBAR part of MPU-region descriptor: */
  29. uint64_t prbar_attr_value(uint32_t attr)
  30. {
  31. uint64_t retValue = UL(0);
  32. uint64_t extract; /* temp var holding bit extracted from attr */
  33. /* Extract and stuff SH: */
  34. extract = (uint64_t) ((attr >> MT_SHAREABILITY_SHIFT)
  35. & MT_SHAREABILITY_MASK);
  36. retValue |= (extract << PRBAR_SH_SHIFT);
  37. /* Extract and stuff AP: */
  38. extract = (uint64_t) ((attr >> MT_PERM_SHIFT) & MT_PERM_MASK);
  39. if (extract == 0U) {
  40. retValue |= (UL(2) << PRBAR_AP_SHIFT);
  41. } else /* extract == 1 */ {
  42. retValue |= (UL(0) << PRBAR_AP_SHIFT);
  43. }
  44. /* Extract and stuff XN: */
  45. extract = (uint64_t) ((attr >> MT_EXECUTE_SHIFT) & MT_EXECUTE_MASK);
  46. retValue |= (extract << PRBAR_XN_SHIFT);
  47. /* However, also don't execute in peripheral space: */
  48. extract = (uint64_t) ((attr >> MT_TYPE_SHIFT) & MT_TYPE_MASK);
  49. if (extract == 0U) {
  50. retValue |= (UL(1) << PRBAR_XN_SHIFT);
  51. }
  52. return retValue;
  53. }
  54. /* Calculate region-attributes byte for PRLAR part of MPU-region descriptor: */
  55. uint64_t prlar_attr_value(uint32_t attr)
  56. {
  57. uint64_t retValue = UL(0);
  58. uint64_t extract; /* temp var holding bit extracted from attr */
  59. /* Extract and stuff AttrIndx: */
  60. extract = (uint64_t) ((attr >> MT_TYPE_SHIFT)
  61. & MT_TYPE_MASK);
  62. switch (extract) {
  63. case UL(0):
  64. retValue |= (UL(1) << PRLAR_ATTR_SHIFT);
  65. break;
  66. case UL(2):
  67. /* 0, so OR in nothing */
  68. break;
  69. case UL(3):
  70. retValue |= (UL(2) << PRLAR_ATTR_SHIFT);
  71. break;
  72. default:
  73. retValue |= (extract << PRLAR_ATTR_SHIFT);
  74. break;
  75. }
  76. /* Stuff EN: */
  77. retValue |= (UL(1) << PRLAR_EN_SHIFT);
  78. /* Force NS to 0 (Secure); v8-R64 only supports Secure: */
  79. extract = ~(1U << PRLAR_NS_SHIFT);
  80. retValue &= extract;
  81. return retValue;
  82. }
  83. /*
  84. * Function that writes an MPU "translation" into the MPU registers. If not
  85. * possible (e.g., if no more MPU regions available) boot is aborted.
  86. */
  87. static void mpu_map_region(mmap_region_t *mm)
  88. {
  89. uint64_t prenr_el2_value = 0UL;
  90. uint64_t prbar_attrs = 0UL;
  91. uint64_t prlar_attrs = 0UL;
  92. int region_to_use = 0;
  93. /* If all MPU regions in use, then abort boot: */
  94. prenr_el2_value = read_prenr_el2();
  95. assert(prenr_el2_value != 0xffffffff);
  96. /* Find and select first-available MPU region (PRENR has an enable bit
  97. * for each MPU region, 1 for in-use or 0 for unused):
  98. */
  99. for (region_to_use = 0; region_to_use < N_MPU_REGIONS;
  100. region_to_use++) {
  101. if (((prenr_el2_value >> region_to_use) & 1) == 0) {
  102. break;
  103. }
  104. }
  105. write_prselr_el2((uint64_t) (region_to_use));
  106. isb();
  107. /* Set base and limit addresses: */
  108. write_prbar_el2(mm->base_pa & PRBAR_PRLAR_ADDR_MASK);
  109. write_prlar_el2((mm->base_pa + mm->size - 1UL)
  110. & PRBAR_PRLAR_ADDR_MASK);
  111. dsbsy();
  112. isb();
  113. /* Set attributes: */
  114. prbar_attrs = prbar_attr_value(mm->attr);
  115. write_prbar_el2(read_prbar_el2() | prbar_attrs);
  116. prlar_attrs = prlar_attr_value(mm->attr);
  117. write_prlar_el2(read_prlar_el2() | prlar_attrs);
  118. dsbsy();
  119. isb();
  120. /* Mark this MPU region as used: */
  121. prenr_el2_value |= (1 << region_to_use);
  122. write_prenr_el2(prenr_el2_value);
  123. isb();
  124. }
  125. /*
  126. * Function that verifies that a region can be mapped.
  127. * Returns:
  128. * 0: Success, the mapping is allowed.
  129. * EINVAL: Invalid values were used as arguments.
  130. * ERANGE: The memory limits were surpassed.
  131. * ENOMEM: There is not enough memory in the mmap array.
  132. * EPERM: Region overlaps another one in an invalid way.
  133. */
  134. static int mmap_add_region_check(const xlat_ctx_t *ctx, const mmap_region_t *mm)
  135. {
  136. unsigned long long base_pa = mm->base_pa;
  137. uintptr_t base_va = mm->base_va;
  138. size_t size = mm->size;
  139. unsigned long long end_pa = base_pa + size - 1U;
  140. uintptr_t end_va = base_va + size - 1U;
  141. if (base_pa != base_va) {
  142. return -EINVAL; /* MPU does not perform address translation */
  143. }
  144. if ((base_pa % 64ULL) != 0ULL) {
  145. return -EINVAL; /* MPU requires 64-byte alignment */
  146. }
  147. /* Check for overflows */
  148. if ((base_pa > end_pa) || (base_va > end_va)) {
  149. return -ERANGE;
  150. }
  151. if (end_pa > ctx->pa_max_address) {
  152. return -ERANGE;
  153. }
  154. /* Check that there is space in the ctx->mmap array */
  155. if (ctx->mmap[ctx->mmap_num - 1].size != 0U) {
  156. return -ENOMEM;
  157. }
  158. /* Check for PAs and VAs overlaps with all other regions */
  159. for (const mmap_region_t *mm_cursor = ctx->mmap;
  160. mm_cursor->size != 0U; ++mm_cursor) {
  161. uintptr_t mm_cursor_end_va =
  162. mm_cursor->base_va + mm_cursor->size - 1U;
  163. /*
  164. * Check if one of the regions is completely inside the other
  165. * one.
  166. */
  167. bool fully_overlapped_va =
  168. ((base_va >= mm_cursor->base_va) &&
  169. (end_va <= mm_cursor_end_va)) ||
  170. ((mm_cursor->base_va >= base_va) &&
  171. (mm_cursor_end_va <= end_va));
  172. /*
  173. * Full VA overlaps are only allowed if both regions are
  174. * identity mapped (zero offset) or have the same VA to PA
  175. * offset. Also, make sure that it's not the exact same area.
  176. * This can only be done with static regions.
  177. */
  178. if (fully_overlapped_va) {
  179. #if PLAT_XLAT_TABLES_DYNAMIC
  180. if (((mm->attr & MT_DYNAMIC) != 0U) ||
  181. ((mm_cursor->attr & MT_DYNAMIC) != 0U)) {
  182. return -EPERM;
  183. }
  184. #endif /* PLAT_XLAT_TABLES_DYNAMIC */
  185. if ((mm_cursor->base_va - mm_cursor->base_pa)
  186. != (base_va - base_pa)) {
  187. return -EPERM;
  188. }
  189. if ((base_va == mm_cursor->base_va) &&
  190. (size == mm_cursor->size)) {
  191. return -EPERM;
  192. }
  193. } else {
  194. /*
  195. * If the regions do not have fully overlapping VAs,
  196. * then they must have fully separated VAs and PAs.
  197. * Partial overlaps are not allowed
  198. */
  199. unsigned long long mm_cursor_end_pa =
  200. mm_cursor->base_pa + mm_cursor->size - 1U;
  201. bool separated_pa = (end_pa < mm_cursor->base_pa) ||
  202. (base_pa > mm_cursor_end_pa);
  203. bool separated_va = (end_va < mm_cursor->base_va) ||
  204. (base_va > mm_cursor_end_va);
  205. if (!separated_va || !separated_pa) {
  206. return -EPERM;
  207. }
  208. }
  209. }
  210. return 0;
  211. }
  212. void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
  213. {
  214. mmap_region_t *mm_cursor = ctx->mmap, *mm_destination;
  215. const mmap_region_t *mm_end = ctx->mmap + ctx->mmap_num;
  216. const mmap_region_t *mm_last;
  217. unsigned long long end_pa = mm->base_pa + mm->size - 1U;
  218. uintptr_t end_va = mm->base_va + mm->size - 1U;
  219. int ret;
  220. /* Ignore empty regions */
  221. if (mm->size == 0U) {
  222. return;
  223. }
  224. /* Static regions must be added before initializing the xlat tables. */
  225. assert(!ctx->initialized);
  226. ret = mmap_add_region_check(ctx, mm);
  227. if (ret != 0) {
  228. ERROR("mmap_add_region_check() failed. error %d\n", ret);
  229. assert(false);
  230. return;
  231. }
  232. /*
  233. * Find the last entry marker in the mmap
  234. */
  235. mm_last = ctx->mmap;
  236. while ((mm_last->size != 0U) && (mm_last < mm_end)) {
  237. ++mm_last;
  238. }
  239. /*
  240. * Check if we have enough space in the memory mapping table.
  241. * This shouldn't happen as we have checked in mmap_add_region_check
  242. * that there is free space.
  243. */
  244. assert(mm_last->size == 0U);
  245. /* Make room for new region by moving other regions up by one place */
  246. mm_destination = mm_cursor + 1;
  247. (void)memmove(mm_destination, mm_cursor,
  248. (uintptr_t)mm_last - (uintptr_t)mm_cursor);
  249. /*
  250. * Check we haven't lost the empty sentinel from the end of the array.
  251. * This shouldn't happen as we have checked in mmap_add_region_check
  252. * that there is free space.
  253. */
  254. assert(mm_end->size == 0U);
  255. *mm_cursor = *mm;
  256. if (end_pa > ctx->max_pa) {
  257. ctx->max_pa = end_pa;
  258. }
  259. if (end_va > ctx->max_va) {
  260. ctx->max_va = end_va;
  261. }
  262. }
  263. void mmap_add_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
  264. {
  265. const mmap_region_t *mm_cursor = mm;
  266. while (mm_cursor->granularity != 0U) {
  267. mmap_add_region_ctx(ctx, mm_cursor);
  268. mm_cursor++;
  269. }
  270. }
  271. void __init init_xlat_tables_ctx(xlat_ctx_t *ctx)
  272. {
  273. uint64_t mair = UL(0);
  274. assert(ctx != NULL);
  275. assert(!ctx->initialized);
  276. assert((ctx->xlat_regime == EL2_REGIME) ||
  277. (ctx->xlat_regime == EL1_EL0_REGIME));
  278. /* Note: Add EL3_REGIME if EL3 is supported in future v8-R64 cores. */
  279. assert(!is_mpu_enabled_ctx(ctx));
  280. mmap_region_t *mm = ctx->mmap;
  281. assert(ctx->va_max_address >=
  282. (xlat_get_min_virt_addr_space_size() - 1U));
  283. assert(ctx->va_max_address <= (MAX_VIRT_ADDR_SPACE_SIZE - 1U));
  284. assert(IS_POWER_OF_TWO(ctx->va_max_address + 1U));
  285. xlat_mmap_print(mm);
  286. /* All tables must be zeroed before mapping any region. */
  287. for (unsigned int i = 0U; i < ctx->base_table_entries; i++)
  288. ctx->base_table[i] = INVALID_DESC;
  289. /* Also mark all MPU regions as invalid in the MPU hardware itself: */
  290. write_prenr_el2(0);
  291. /* Sufficient for current, max-32-region implementations. */
  292. dsbsy();
  293. isb();
  294. while (mm->size != 0U) {
  295. if (read_prenr_el2() == ALL_MPU_EL2_REGIONS_USED) {
  296. ERROR("Not enough MPU regions to map region:\n"
  297. " VA:0x%lx PA:0x%llx size:0x%zx attr:0x%x\n",
  298. mm->base_va, mm->base_pa, mm->size, mm->attr);
  299. panic();
  300. } else {
  301. #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
  302. xlat_clean_dcache_range((uintptr_t)mm->base_va,
  303. mm->size);
  304. #endif
  305. mpu_map_region(mm);
  306. }
  307. mm++;
  308. }
  309. ctx->initialized = true;
  310. xlat_tables_print(ctx);
  311. /* Set attributes in the right indices of the MAIR */
  312. mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
  313. mair |= MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
  314. ATTR_IWBWA_OWBWA_NTR_INDEX);
  315. mair |= MAIR_ATTR_SET(ATTR_NON_CACHEABLE,
  316. ATTR_NON_CACHEABLE_INDEX);
  317. write_mair_el2(mair);
  318. dsbsy();
  319. isb();
  320. }
  321. /*
  322. * Function to wipe clean and disable all MPU regions. This function expects
  323. * that the MPU has already been turned off, and caching concerns addressed,
  324. * but it nevertheless also explicitly turns off the MPU.
  325. */
  326. void clear_all_mpu_regions(void)
  327. {
  328. uint64_t sctlr_el2_value = 0UL;
  329. uint64_t region_n = 0UL;
  330. /*
  331. * MPU should already be disabled, but explicitly disable it
  332. * nevertheless:
  333. */
  334. sctlr_el2_value = read_sctlr_el2() & ~(1UL);
  335. write_sctlr_el2(sctlr_el2_value);
  336. /* Disable all regions: */
  337. write_prenr_el2(0UL);
  338. /* Sequence through all regions, zeroing them out and turning off: */
  339. for (region_n = 0UL; region_n < N_MPU_REGIONS; region_n++) {
  340. write_prselr_el2(region_n);
  341. isb();
  342. write_prbar_el2((uint64_t) 0);
  343. write_prlar_el2((uint64_t) 0);
  344. dsbsy();
  345. isb();
  346. }
  347. }