xlat_tables_internal.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082
  1. /*
  2. * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <arch.h>
  7. #include <arch_helpers.h>
  8. #include <assert.h>
  9. #include <cassert.h>
  10. #include <common_def.h>
  11. #include <debug.h>
  12. #include <errno.h>
  13. #include <platform_def.h>
  14. #include <string.h>
  15. #include <types.h>
  16. #include <utils.h>
  17. #include <xlat_tables_v2.h>
  18. #ifdef AARCH32
  19. # include "aarch32/xlat_tables_arch.h"
  20. #else
  21. # include "aarch64/xlat_tables_arch.h"
  22. #endif
  23. #include "xlat_tables_private.h"
  24. #if PLAT_XLAT_TABLES_DYNAMIC
  25. /*
  26. * The following functions assume that they will be called using subtables only.
  27. * The base table can't be unmapped, so it is not needed to do any special
  28. * handling for it.
  29. */
  30. /*
  31. * Returns the index of the array corresponding to the specified translation
  32. * table.
  33. */
  34. static int xlat_table_get_index(xlat_ctx_t *ctx, const uint64_t *table)
  35. {
  36. for (unsigned int i = 0; i < ctx->tables_num; i++)
  37. if (ctx->tables[i] == table)
  38. return i;
  39. /*
  40. * Maybe we were asked to get the index of the base level table, which
  41. * should never happen.
  42. */
  43. assert(0);
  44. return -1;
  45. }
  46. /* Returns a pointer to an empty translation table. */
  47. static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
  48. {
  49. for (unsigned int i = 0; i < ctx->tables_num; i++)
  50. if (ctx->tables_mapped_regions[i] == 0)
  51. return ctx->tables[i];
  52. return NULL;
  53. }
  54. /* Increments region count for a given table. */
  55. static void xlat_table_inc_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
  56. {
  57. ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]++;
  58. }
  59. /* Decrements region count for a given table. */
  60. static void xlat_table_dec_regions_count(xlat_ctx_t *ctx, const uint64_t *table)
  61. {
  62. ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)]--;
  63. }
  64. /* Returns 0 if the speficied table isn't empty, otherwise 1. */
  65. static int xlat_table_is_empty(xlat_ctx_t *ctx, const uint64_t *table)
  66. {
  67. return !ctx->tables_mapped_regions[xlat_table_get_index(ctx, table)];
  68. }
  69. #else /* PLAT_XLAT_TABLES_DYNAMIC */
  70. /* Returns a pointer to the first empty translation table. */
  71. static uint64_t *xlat_table_get_empty(xlat_ctx_t *ctx)
  72. {
  73. assert(ctx->next_table < ctx->tables_num);
  74. return ctx->tables[ctx->next_table++];
  75. }
  76. #endif /* PLAT_XLAT_TABLES_DYNAMIC */
  77. /* Returns a block/page table descriptor for the given level and attributes. */
  78. static uint64_t xlat_desc(mmap_attr_t attr, unsigned long long addr_pa,
  79. int level, uint64_t execute_never_mask)
  80. {
  81. uint64_t desc;
  82. int mem_type;
  83. /* Make sure that the granularity is fine enough to map this address. */
  84. assert((addr_pa & XLAT_BLOCK_MASK(level)) == 0);
  85. desc = addr_pa;
  86. /*
  87. * There are different translation table descriptors for level 3 and the
  88. * rest.
  89. */
  90. desc |= (level == XLAT_TABLE_LEVEL_MAX) ? PAGE_DESC : BLOCK_DESC;
  91. /*
  92. * Always set the access flag, as TF doesn't manage access flag faults.
  93. * Deduce other fields of the descriptor based on the MT_NS and MT_RW
  94. * memory region attributes.
  95. */
  96. desc |= (attr & MT_NS) ? LOWER_ATTRS(NS) : 0;
  97. desc |= (attr & MT_RW) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO);
  98. desc |= LOWER_ATTRS(ACCESS_FLAG);
  99. /*
  100. * Deduce shareability domain and executability of the memory region
  101. * from the memory type of the attributes (MT_TYPE).
  102. *
  103. * Data accesses to device memory and non-cacheable normal memory are
  104. * coherent for all observers in the system, and correspondingly are
  105. * always treated as being Outer Shareable. Therefore, for these 2 types
  106. * of memory, it is not strictly needed to set the shareability field
  107. * in the translation tables.
  108. */
  109. mem_type = MT_TYPE(attr);
  110. if (mem_type == MT_DEVICE) {
  111. desc |= LOWER_ATTRS(ATTR_DEVICE_INDEX | OSH);
  112. /*
  113. * Always map device memory as execute-never.
  114. * This is to avoid the possibility of a speculative instruction
  115. * fetch, which could be an issue if this memory region
  116. * corresponds to a read-sensitive peripheral.
  117. */
  118. desc |= execute_never_mask;
  119. } else { /* Normal memory */
  120. /*
  121. * Always map read-write normal memory as execute-never.
  122. * (Trusted Firmware doesn't self-modify its code, therefore
  123. * R/W memory is reserved for data storage, which must not be
  124. * executable.)
  125. * Note that setting the XN bit here is for consistency only.
  126. * The function that enables the MMU sets the SCTLR_ELx.WXN bit,
  127. * which makes any writable memory region to be treated as
  128. * execute-never, regardless of the value of the XN bit in the
  129. * translation table.
  130. *
  131. * For read-only memory, rely on the MT_EXECUTE/MT_EXECUTE_NEVER
  132. * attribute to figure out the value of the XN bit.
  133. */
  134. if ((attr & MT_RW) || (attr & MT_EXECUTE_NEVER)) {
  135. desc |= execute_never_mask;
  136. }
  137. if (mem_type == MT_MEMORY) {
  138. desc |= LOWER_ATTRS(ATTR_IWBWA_OWBWA_NTR_INDEX | ISH);
  139. } else {
  140. assert(mem_type == MT_NON_CACHEABLE);
  141. desc |= LOWER_ATTRS(ATTR_NON_CACHEABLE_INDEX | OSH);
  142. }
  143. }
  144. return desc;
  145. }
  146. /*
  147. * Enumeration of actions that can be made when mapping table entries depending
  148. * on the previous value in that entry and information about the region being
  149. * mapped.
  150. */
  151. typedef enum {
  152. /* Do nothing */
  153. ACTION_NONE,
  154. /* Write a block (or page, if in level 3) entry. */
  155. ACTION_WRITE_BLOCK_ENTRY,
  156. /*
  157. * Create a new table and write a table entry pointing to it. Recurse
  158. * into it for further processing.
  159. */
  160. ACTION_CREATE_NEW_TABLE,
  161. /*
  162. * There is a table descriptor in this entry, read it and recurse into
  163. * that table for further processing.
  164. */
  165. ACTION_RECURSE_INTO_TABLE,
  166. } action_t;
  167. #if PLAT_XLAT_TABLES_DYNAMIC
  168. /*
  169. * Recursive function that writes to the translation tables and unmaps the
  170. * specified region.
  171. */
  172. static void xlat_tables_unmap_region(xlat_ctx_t *ctx, mmap_region_t *mm,
  173. const uintptr_t table_base_va,
  174. uint64_t *const table_base,
  175. const int table_entries,
  176. const unsigned int level)
  177. {
  178. assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
  179. uint64_t *subtable;
  180. uint64_t desc;
  181. uintptr_t table_idx_va;
  182. uintptr_t table_idx_end_va; /* End VA of this entry */
  183. uintptr_t region_end_va = mm->base_va + mm->size - 1;
  184. int table_idx;
  185. if (mm->base_va > table_base_va) {
  186. /* Find the first index of the table affected by the region. */
  187. table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
  188. table_idx = (table_idx_va - table_base_va) >>
  189. XLAT_ADDR_SHIFT(level);
  190. assert(table_idx < table_entries);
  191. } else {
  192. /* Start from the beginning of the table. */
  193. table_idx_va = table_base_va;
  194. table_idx = 0;
  195. }
  196. while (table_idx < table_entries) {
  197. table_idx_end_va = table_idx_va + XLAT_BLOCK_SIZE(level) - 1;
  198. desc = table_base[table_idx];
  199. uint64_t desc_type = desc & DESC_MASK;
  200. action_t action = ACTION_NONE;
  201. if ((mm->base_va <= table_idx_va) &&
  202. (region_end_va >= table_idx_end_va)) {
  203. /* Region covers all block */
  204. if (level == 3) {
  205. /*
  206. * Last level, only page descriptors allowed,
  207. * erase it.
  208. */
  209. assert(desc_type == PAGE_DESC);
  210. action = ACTION_WRITE_BLOCK_ENTRY;
  211. } else {
  212. /*
  213. * Other levels can have table descriptors. If
  214. * so, recurse into it and erase descriptors
  215. * inside it as needed. If there is a block
  216. * descriptor, just erase it. If an invalid
  217. * descriptor is found, this table isn't
  218. * actually mapped, which shouldn't happen.
  219. */
  220. if (desc_type == TABLE_DESC) {
  221. action = ACTION_RECURSE_INTO_TABLE;
  222. } else {
  223. assert(desc_type == BLOCK_DESC);
  224. action = ACTION_WRITE_BLOCK_ENTRY;
  225. }
  226. }
  227. } else if ((mm->base_va <= table_idx_end_va) ||
  228. (region_end_va >= table_idx_va)) {
  229. /*
  230. * Region partially covers block.
  231. *
  232. * It can't happen in level 3.
  233. *
  234. * There must be a table descriptor here, if not there
  235. * was a problem when mapping the region.
  236. */
  237. assert(level < 3);
  238. assert(desc_type == TABLE_DESC);
  239. action = ACTION_RECURSE_INTO_TABLE;
  240. }
  241. if (action == ACTION_WRITE_BLOCK_ENTRY) {
  242. table_base[table_idx] = INVALID_DESC;
  243. xlat_arch_tlbi_va(table_idx_va);
  244. } else if (action == ACTION_RECURSE_INTO_TABLE) {
  245. subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
  246. /* Recurse to write into subtable */
  247. xlat_tables_unmap_region(ctx, mm, table_idx_va,
  248. subtable, XLAT_TABLE_ENTRIES,
  249. level + 1);
  250. /*
  251. * If the subtable is now empty, remove its reference.
  252. */
  253. if (xlat_table_is_empty(ctx, subtable)) {
  254. table_base[table_idx] = INVALID_DESC;
  255. xlat_arch_tlbi_va(table_idx_va);
  256. }
  257. } else {
  258. assert(action == ACTION_NONE);
  259. }
  260. table_idx++;
  261. table_idx_va += XLAT_BLOCK_SIZE(level);
  262. /* If reached the end of the region, exit */
  263. if (region_end_va <= table_idx_va)
  264. break;
  265. }
  266. if (level > ctx->base_level)
  267. xlat_table_dec_regions_count(ctx, table_base);
  268. }
  269. #endif /* PLAT_XLAT_TABLES_DYNAMIC */
  270. /*
  271. * From the given arguments, it decides which action to take when mapping the
  272. * specified region.
  273. */
  274. static action_t xlat_tables_map_region_action(const mmap_region_t *mm,
  275. const int desc_type, const unsigned long long dest_pa,
  276. const uintptr_t table_entry_base_va, const int level)
  277. {
  278. uintptr_t mm_end_va = mm->base_va + mm->size - 1;
  279. uintptr_t table_entry_end_va =
  280. table_entry_base_va + XLAT_BLOCK_SIZE(level) - 1;
  281. /*
  282. * The descriptor types allowed depend on the current table level.
  283. */
  284. if ((mm->base_va <= table_entry_base_va) &&
  285. (mm_end_va >= table_entry_end_va)) {
  286. /*
  287. * Table entry is covered by region
  288. * --------------------------------
  289. *
  290. * This means that this table entry can describe the whole
  291. * translation with this granularity in principle.
  292. */
  293. if (level == 3) {
  294. /*
  295. * Last level, only page descriptors are allowed.
  296. */
  297. if (desc_type == PAGE_DESC) {
  298. /*
  299. * There's another region mapped here, don't
  300. * overwrite.
  301. */
  302. return ACTION_NONE;
  303. } else {
  304. assert(desc_type == INVALID_DESC);
  305. return ACTION_WRITE_BLOCK_ENTRY;
  306. }
  307. } else {
  308. /*
  309. * Other levels. Table descriptors are allowed. Block
  310. * descriptors too, but they have some limitations.
  311. */
  312. if (desc_type == TABLE_DESC) {
  313. /* There's already a table, recurse into it. */
  314. return ACTION_RECURSE_INTO_TABLE;
  315. } else if (desc_type == INVALID_DESC) {
  316. /*
  317. * There's nothing mapped here, create a new
  318. * entry.
  319. *
  320. * Check if the destination granularity allows
  321. * us to use a block descriptor or we need a
  322. * finer table for it.
  323. *
  324. * Also, check if the current level allows block
  325. * descriptors. If not, create a table instead.
  326. */
  327. if ((dest_pa & XLAT_BLOCK_MASK(level)) ||
  328. (level < MIN_LVL_BLOCK_DESC))
  329. return ACTION_CREATE_NEW_TABLE;
  330. else
  331. return ACTION_WRITE_BLOCK_ENTRY;
  332. } else {
  333. /*
  334. * There's another region mapped here, don't
  335. * overwrite.
  336. */
  337. assert(desc_type == BLOCK_DESC);
  338. return ACTION_NONE;
  339. }
  340. }
  341. } else if ((mm->base_va <= table_entry_end_va) ||
  342. (mm_end_va >= table_entry_base_va)) {
  343. /*
  344. * Region partially covers table entry
  345. * -----------------------------------
  346. *
  347. * This means that this table entry can't describe the whole
  348. * translation, a finer table is needed.
  349. * There cannot be partial block overlaps in level 3. If that
  350. * happens, some of the preliminary checks when adding the
  351. * mmap region failed to detect that PA and VA must at least be
  352. * aligned to PAGE_SIZE.
  353. */
  354. assert(level < 3);
  355. if (desc_type == INVALID_DESC) {
  356. /*
  357. * The block is not fully covered by the region. Create
  358. * a new table, recurse into it and try to map the
  359. * region with finer granularity.
  360. */
  361. return ACTION_CREATE_NEW_TABLE;
  362. } else {
  363. assert(desc_type == TABLE_DESC);
  364. /*
  365. * The block is not fully covered by the region, but
  366. * there is already a table here. Recurse into it and
  367. * try to map with finer granularity.
  368. *
  369. * PAGE_DESC for level 3 has the same value as
  370. * TABLE_DESC, but this code can't run on a level 3
  371. * table because there can't be overlaps in level 3.
  372. */
  373. return ACTION_RECURSE_INTO_TABLE;
  374. }
  375. }
  376. /*
  377. * This table entry is outside of the region specified in the arguments,
  378. * don't write anything to it.
  379. */
  380. return ACTION_NONE;
  381. }
  382. /*
  383. * Recursive function that writes to the translation tables and maps the
  384. * specified region. On success, it returns the VA of the last byte that was
  385. * succesfully mapped. On error, it returns the VA of the next entry that
  386. * should have been mapped.
  387. */
  388. static uintptr_t xlat_tables_map_region(xlat_ctx_t *ctx, mmap_region_t *mm,
  389. const uintptr_t table_base_va,
  390. uint64_t *const table_base,
  391. const int table_entries,
  392. const unsigned int level)
  393. {
  394. assert(level >= ctx->base_level && level <= XLAT_TABLE_LEVEL_MAX);
  395. uintptr_t mm_end_va = mm->base_va + mm->size - 1;
  396. uintptr_t table_idx_va;
  397. unsigned long long table_idx_pa;
  398. uint64_t *subtable;
  399. uint64_t desc;
  400. int table_idx;
  401. if (mm->base_va > table_base_va) {
  402. /* Find the first index of the table affected by the region. */
  403. table_idx_va = mm->base_va & ~XLAT_BLOCK_MASK(level);
  404. table_idx = (table_idx_va - table_base_va) >>
  405. XLAT_ADDR_SHIFT(level);
  406. assert(table_idx < table_entries);
  407. } else {
  408. /* Start from the beginning of the table. */
  409. table_idx_va = table_base_va;
  410. table_idx = 0;
  411. }
  412. #if PLAT_XLAT_TABLES_DYNAMIC
  413. if (level > ctx->base_level)
  414. xlat_table_inc_regions_count(ctx, table_base);
  415. #endif
  416. while (table_idx < table_entries) {
  417. desc = table_base[table_idx];
  418. table_idx_pa = mm->base_pa + table_idx_va - mm->base_va;
  419. action_t action = xlat_tables_map_region_action(mm,
  420. desc & DESC_MASK, table_idx_pa, table_idx_va, level);
  421. if (action == ACTION_WRITE_BLOCK_ENTRY) {
  422. table_base[table_idx] =
  423. xlat_desc(mm->attr, table_idx_pa, level,
  424. ctx->execute_never_mask);
  425. } else if (action == ACTION_CREATE_NEW_TABLE) {
  426. subtable = xlat_table_get_empty(ctx);
  427. if (subtable == NULL) {
  428. /* Not enough free tables to map this region */
  429. return table_idx_va;
  430. }
  431. /* Point to new subtable from this one. */
  432. table_base[table_idx] = TABLE_DESC | (unsigned long)subtable;
  433. /* Recurse to write into subtable */
  434. uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
  435. subtable, XLAT_TABLE_ENTRIES,
  436. level + 1);
  437. if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
  438. return end_va;
  439. } else if (action == ACTION_RECURSE_INTO_TABLE) {
  440. subtable = (uint64_t *)(uintptr_t)(desc & TABLE_ADDR_MASK);
  441. /* Recurse to write into subtable */
  442. uintptr_t end_va = xlat_tables_map_region(ctx, mm, table_idx_va,
  443. subtable, XLAT_TABLE_ENTRIES,
  444. level + 1);
  445. if (end_va != table_idx_va + XLAT_BLOCK_SIZE(level) - 1)
  446. return end_va;
  447. } else {
  448. assert(action == ACTION_NONE);
  449. }
  450. table_idx++;
  451. table_idx_va += XLAT_BLOCK_SIZE(level);
  452. /* If reached the end of the region, exit */
  453. if (mm_end_va <= table_idx_va)
  454. break;
  455. }
  456. return table_idx_va - 1;
  457. }
  458. void print_mmap(mmap_region_t *const mmap)
  459. {
  460. #if LOG_LEVEL >= LOG_LEVEL_VERBOSE
  461. tf_printf("mmap:\n");
  462. mmap_region_t *mm = mmap;
  463. while (mm->size) {
  464. tf_printf(" VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
  465. (void *)mm->base_va, mm->base_pa,
  466. mm->size, mm->attr);
  467. ++mm;
  468. };
  469. tf_printf("\n");
  470. #endif
  471. }
  472. /*
  473. * Function that verifies that a region can be mapped.
  474. * Returns:
  475. * 0: Success, the mapping is allowed.
  476. * EINVAL: Invalid values were used as arguments.
  477. * ERANGE: The memory limits were surpassed.
  478. * ENOMEM: There is not enough memory in the mmap array.
  479. * EPERM: Region overlaps another one in an invalid way.
  480. */
  481. static int mmap_add_region_check(xlat_ctx_t *ctx, unsigned long long base_pa,
  482. uintptr_t base_va, size_t size,
  483. mmap_attr_t attr)
  484. {
  485. mmap_region_t *mm = ctx->mmap;
  486. unsigned long long end_pa = base_pa + size - 1;
  487. uintptr_t end_va = base_va + size - 1;
  488. if (!IS_PAGE_ALIGNED(base_pa) || !IS_PAGE_ALIGNED(base_va) ||
  489. !IS_PAGE_ALIGNED(size))
  490. return -EINVAL;
  491. /* Check for overflows */
  492. if ((base_pa > end_pa) || (base_va > end_va))
  493. return -ERANGE;
  494. if ((base_va + (uintptr_t)size - (uintptr_t)1) > ctx->va_max_address)
  495. return -ERANGE;
  496. if ((base_pa + (unsigned long long)size - 1ULL) > ctx->pa_max_address)
  497. return -ERANGE;
  498. /* Check that there is space in the mmap array */
  499. if (ctx->mmap[ctx->mmap_num - 1].size != 0)
  500. return -ENOMEM;
  501. /* Check for PAs and VAs overlaps with all other regions */
  502. for (mm = ctx->mmap; mm->size; ++mm) {
  503. uintptr_t mm_end_va = mm->base_va + mm->size - 1;
  504. /*
  505. * Check if one of the regions is completely inside the other
  506. * one.
  507. */
  508. int fully_overlapped_va =
  509. ((base_va >= mm->base_va) && (end_va <= mm_end_va)) ||
  510. ((mm->base_va >= base_va) && (mm_end_va <= end_va));
  511. /*
  512. * Full VA overlaps are only allowed if both regions are
  513. * identity mapped (zero offset) or have the same VA to PA
  514. * offset. Also, make sure that it's not the exact same area.
  515. * This can only be done with static regions.
  516. */
  517. if (fully_overlapped_va) {
  518. #if PLAT_XLAT_TABLES_DYNAMIC
  519. if ((attr & MT_DYNAMIC) || (mm->attr & MT_DYNAMIC))
  520. return -EPERM;
  521. #endif /* PLAT_XLAT_TABLES_DYNAMIC */
  522. if ((mm->base_va - mm->base_pa) != (base_va - base_pa))
  523. return -EPERM;
  524. if ((base_va == mm->base_va) && (size == mm->size))
  525. return -EPERM;
  526. } else {
  527. /*
  528. * If the regions do not have fully overlapping VAs,
  529. * then they must have fully separated VAs and PAs.
  530. * Partial overlaps are not allowed
  531. */
  532. unsigned long long mm_end_pa =
  533. mm->base_pa + mm->size - 1;
  534. int separated_pa =
  535. (end_pa < mm->base_pa) || (base_pa > mm_end_pa);
  536. int separated_va =
  537. (end_va < mm->base_va) || (base_va > mm_end_va);
  538. if (!(separated_va && separated_pa))
  539. return -EPERM;
  540. }
  541. }
  542. return 0;
  543. }
  544. void mmap_add_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
  545. {
  546. mmap_region_t *mm_cursor = ctx->mmap;
  547. mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
  548. unsigned long long end_pa = mm->base_pa + mm->size - 1;
  549. uintptr_t end_va = mm->base_va + mm->size - 1;
  550. int ret;
  551. /* Ignore empty regions */
  552. if (!mm->size)
  553. return;
  554. /* Static regions must be added before initializing the xlat tables. */
  555. assert(!ctx->initialized);
  556. ret = mmap_add_region_check(ctx, mm->base_pa, mm->base_va, mm->size,
  557. mm->attr);
  558. if (ret != 0) {
  559. ERROR("mmap_add_region_check() failed. error %d\n", ret);
  560. assert(0);
  561. return;
  562. }
  563. /*
  564. * Find correct place in mmap to insert new region.
  565. *
  566. * 1 - Lower region VA end first.
  567. * 2 - Smaller region size first.
  568. *
  569. * VA 0 0xFF
  570. *
  571. * 1st |------|
  572. * 2nd |------------|
  573. * 3rd |------|
  574. * 4th |---|
  575. * 5th |---|
  576. * 6th |----------|
  577. * 7th |-------------------------------------|
  578. *
  579. * This is required for overlapping regions only. It simplifies adding
  580. * regions with the loop in xlat_tables_init_internal because the outer
  581. * ones won't overwrite block or page descriptors of regions added
  582. * previously.
  583. *
  584. * Overlapping is only allowed for static regions.
  585. */
  586. while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va
  587. && mm_cursor->size)
  588. ++mm_cursor;
  589. while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va)
  590. && (mm_cursor->size < mm->size))
  591. ++mm_cursor;
  592. /* Make room for new region by moving other regions up by one place */
  593. memmove(mm_cursor + 1, mm_cursor,
  594. (uintptr_t)mm_last - (uintptr_t)mm_cursor);
  595. /*
  596. * Check we haven't lost the empty sentinel from the end of the array.
  597. * This shouldn't happen as we have checked in mmap_add_region_check
  598. * that there is free space.
  599. */
  600. assert(mm_last->size == 0);
  601. mm_cursor->base_pa = mm->base_pa;
  602. mm_cursor->base_va = mm->base_va;
  603. mm_cursor->size = mm->size;
  604. mm_cursor->attr = mm->attr;
  605. if (end_pa > ctx->max_pa)
  606. ctx->max_pa = end_pa;
  607. if (end_va > ctx->max_va)
  608. ctx->max_va = end_va;
  609. }
  610. #if PLAT_XLAT_TABLES_DYNAMIC
  611. int mmap_add_dynamic_region_ctx(xlat_ctx_t *ctx, mmap_region_t *mm)
  612. {
  613. mmap_region_t *mm_cursor = ctx->mmap;
  614. mmap_region_t *mm_last = mm_cursor + ctx->mmap_num;
  615. unsigned long long end_pa = mm->base_pa + mm->size - 1;
  616. uintptr_t end_va = mm->base_va + mm->size - 1;
  617. int ret;
  618. /* Nothing to do */
  619. if (!mm->size)
  620. return 0;
  621. ret = mmap_add_region_check(ctx, mm->base_pa, mm->base_va, mm->size, mm->attr | MT_DYNAMIC);
  622. if (ret != 0)
  623. return ret;
  624. /*
  625. * Find the adequate entry in the mmap array in the same way done for
  626. * static regions in mmap_add_region_ctx().
  627. */
  628. while ((mm_cursor->base_va + mm_cursor->size - 1) < end_va && mm_cursor->size)
  629. ++mm_cursor;
  630. while ((mm_cursor->base_va + mm_cursor->size - 1 == end_va) && (mm_cursor->size < mm->size))
  631. ++mm_cursor;
  632. /* Make room for new region by moving other regions up by one place */
  633. memmove(mm_cursor + 1, mm_cursor, (uintptr_t)mm_last - (uintptr_t)mm_cursor);
  634. /*
  635. * Check we haven't lost the empty sentinal from the end of the array.
  636. * This shouldn't happen as we have checked in mmap_add_region_check
  637. * that there is free space.
  638. */
  639. assert(mm_last->size == 0);
  640. mm_cursor->base_pa = mm->base_pa;
  641. mm_cursor->base_va = mm->base_va;
  642. mm_cursor->size = mm->size;
  643. mm_cursor->attr = mm->attr | MT_DYNAMIC;
  644. /*
  645. * Update the translation tables if the xlat tables are initialized. If
  646. * not, this region will be mapped when they are initialized.
  647. */
  648. if (ctx->initialized) {
  649. uintptr_t end_va = xlat_tables_map_region(ctx, mm_cursor, 0, ctx->base_table,
  650. ctx->base_table_entries, ctx->base_level);
  651. /* Failed to map, remove mmap entry, unmap and return error. */
  652. if (end_va != mm_cursor->base_va + mm_cursor->size - 1) {
  653. memmove(mm_cursor, mm_cursor + 1, (uintptr_t)mm_last - (uintptr_t)mm_cursor);
  654. /*
  655. * Check if the mapping function actually managed to map
  656. * anything. If not, just return now.
  657. */
  658. if (mm_cursor->base_va >= end_va)
  659. return -ENOMEM;
  660. /*
  661. * Something went wrong after mapping some table entries,
  662. * undo every change done up to this point.
  663. */
  664. mmap_region_t unmap_mm = {
  665. .base_pa = 0,
  666. .base_va = mm->base_va,
  667. .size = end_va - mm->base_va,
  668. .attr = 0
  669. };
  670. xlat_tables_unmap_region(ctx, &unmap_mm, 0, ctx->base_table,
  671. ctx->base_table_entries, ctx->base_level);
  672. return -ENOMEM;
  673. }
  674. /*
  675. * Make sure that all entries are written to the memory. There
  676. * is no need to invalidate entries when mapping dynamic regions
  677. * because new table/block/page descriptors only replace old
  678. * invalid descriptors, that aren't TLB cached.
  679. */
  680. dsbishst();
  681. }
  682. if (end_pa > ctx->max_pa)
  683. ctx->max_pa = end_pa;
  684. if (end_va > ctx->max_va)
  685. ctx->max_va = end_va;
  686. return 0;
  687. }
  688. /*
  689. * Removes the region with given base Virtual Address and size from the given
  690. * context.
  691. *
  692. * Returns:
  693. * 0: Success.
  694. * EINVAL: Invalid values were used as arguments (region not found).
  695. * EPERM: Tried to remove a static region.
  696. */
  697. int mmap_remove_dynamic_region_ctx(xlat_ctx_t *ctx, uintptr_t base_va,
  698. size_t size)
  699. {
  700. mmap_region_t *mm = ctx->mmap;
  701. mmap_region_t *mm_last = mm + ctx->mmap_num;
  702. int update_max_va_needed = 0;
  703. int update_max_pa_needed = 0;
  704. /* Check sanity of mmap array. */
  705. assert(mm[ctx->mmap_num].size == 0);
  706. while (mm->size) {
  707. if ((mm->base_va == base_va) && (mm->size == size))
  708. break;
  709. ++mm;
  710. }
  711. /* Check that the region was found */
  712. if (mm->size == 0)
  713. return -EINVAL;
  714. /* If the region is static it can't be removed */
  715. if (!(mm->attr & MT_DYNAMIC))
  716. return -EPERM;
  717. /* Check if this region is using the top VAs or PAs. */
  718. if ((mm->base_va + mm->size - 1) == ctx->max_va)
  719. update_max_va_needed = 1;
  720. if ((mm->base_pa + mm->size - 1) == ctx->max_pa)
  721. update_max_pa_needed = 1;
  722. /* Update the translation tables if needed */
  723. if (ctx->initialized) {
  724. xlat_tables_unmap_region(ctx, mm, 0, ctx->base_table,
  725. ctx->base_table_entries,
  726. ctx->base_level);
  727. xlat_arch_tlbi_va_sync();
  728. }
  729. /* Remove this region by moving the rest down by one place. */
  730. memmove(mm, mm + 1, (uintptr_t)mm_last - (uintptr_t)mm);
  731. /* Check if we need to update the max VAs and PAs */
  732. if (update_max_va_needed) {
  733. ctx->max_va = 0;
  734. mm = ctx->mmap;
  735. while (mm->size) {
  736. if ((mm->base_va + mm->size - 1) > ctx->max_va)
  737. ctx->max_va = mm->base_va + mm->size - 1;
  738. ++mm;
  739. }
  740. }
  741. if (update_max_pa_needed) {
  742. ctx->max_pa = 0;
  743. mm = ctx->mmap;
  744. while (mm->size) {
  745. if ((mm->base_pa + mm->size - 1) > ctx->max_pa)
  746. ctx->max_pa = mm->base_pa + mm->size - 1;
  747. ++mm;
  748. }
  749. }
  750. return 0;
  751. }
  752. #endif /* PLAT_XLAT_TABLES_DYNAMIC */
  753. #if LOG_LEVEL >= LOG_LEVEL_VERBOSE
  754. /* Print the attributes of the specified block descriptor. */
  755. static void xlat_desc_print(uint64_t desc, uint64_t execute_never_mask)
  756. {
  757. int mem_type_index = ATTR_INDEX_GET(desc);
  758. if (mem_type_index == ATTR_IWBWA_OWBWA_NTR_INDEX) {
  759. tf_printf("MEM");
  760. } else if (mem_type_index == ATTR_NON_CACHEABLE_INDEX) {
  761. tf_printf("NC");
  762. } else {
  763. assert(mem_type_index == ATTR_DEVICE_INDEX);
  764. tf_printf("DEV");
  765. }
  766. tf_printf(LOWER_ATTRS(AP_RO) & desc ? "-RO" : "-RW");
  767. tf_printf(LOWER_ATTRS(NS) & desc ? "-NS" : "-S");
  768. tf_printf(execute_never_mask & desc ? "-XN" : "-EXEC");
  769. }
  770. static const char * const level_spacers[] = {
  771. "[LV0] ",
  772. " [LV1] ",
  773. " [LV2] ",
  774. " [LV3] "
  775. };
  776. static const char *invalid_descriptors_ommited =
  777. "%s(%d invalid descriptors omitted)\n";
  778. /*
  779. * Recursive function that reads the translation tables passed as an argument
  780. * and prints their status.
  781. */
  782. static void xlat_tables_print_internal(const uintptr_t table_base_va,
  783. uint64_t *const table_base, const int table_entries,
  784. const int level, const uint64_t execute_never_mask)
  785. {
  786. assert(level <= XLAT_TABLE_LEVEL_MAX);
  787. uint64_t desc;
  788. uintptr_t table_idx_va = table_base_va;
  789. int table_idx = 0;
  790. size_t level_size = XLAT_BLOCK_SIZE(level);
  791. /*
  792. * Keep track of how many invalid descriptors are counted in a row.
  793. * Whenever multiple invalid descriptors are found, only the first one
  794. * is printed, and a line is added to inform about how many descriptors
  795. * have been omitted.
  796. */
  797. int invalid_row_count = 0;
  798. while (table_idx < table_entries) {
  799. desc = table_base[table_idx];
  800. if ((desc & DESC_MASK) == INVALID_DESC) {
  801. if (invalid_row_count == 0) {
  802. tf_printf("%sVA:%p size:0x%zx\n",
  803. level_spacers[level],
  804. (void *)table_idx_va, level_size);
  805. }
  806. invalid_row_count++;
  807. } else {
  808. if (invalid_row_count > 1) {
  809. tf_printf(invalid_descriptors_ommited,
  810. level_spacers[level],
  811. invalid_row_count - 1);
  812. }
  813. invalid_row_count = 0;
  814. /*
  815. * Check if this is a table or a block. Tables are only
  816. * allowed in levels other than 3, but DESC_PAGE has the
  817. * same value as DESC_TABLE, so we need to check.
  818. */
  819. if (((desc & DESC_MASK) == TABLE_DESC) &&
  820. (level < XLAT_TABLE_LEVEL_MAX)) {
  821. /*
  822. * Do not print any PA for a table descriptor,
  823. * as it doesn't directly map physical memory
  824. * but instead points to the next translation
  825. * table in the translation table walk.
  826. */
  827. tf_printf("%sVA:%p size:0x%zx\n",
  828. level_spacers[level],
  829. (void *)table_idx_va, level_size);
  830. uintptr_t addr_inner = desc & TABLE_ADDR_MASK;
  831. xlat_tables_print_internal(table_idx_va,
  832. (uint64_t *)addr_inner,
  833. XLAT_TABLE_ENTRIES, level+1,
  834. execute_never_mask);
  835. } else {
  836. tf_printf("%sVA:%p PA:0x%llx size:0x%zx ",
  837. level_spacers[level],
  838. (void *)table_idx_va,
  839. (unsigned long long)(desc & TABLE_ADDR_MASK),
  840. level_size);
  841. xlat_desc_print(desc, execute_never_mask);
  842. tf_printf("\n");
  843. }
  844. }
  845. table_idx++;
  846. table_idx_va += level_size;
  847. }
  848. if (invalid_row_count > 1) {
  849. tf_printf(invalid_descriptors_ommited,
  850. level_spacers[level], invalid_row_count - 1);
  851. }
  852. }
  853. #endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
  854. void xlat_tables_print(xlat_ctx_t *ctx)
  855. {
  856. #if LOG_LEVEL >= LOG_LEVEL_VERBOSE
  857. xlat_tables_print_internal(0, ctx->base_table, ctx->base_table_entries,
  858. ctx->base_level, ctx->execute_never_mask);
  859. #endif /* LOG_LEVEL >= LOG_LEVEL_VERBOSE */
  860. }
  861. void init_xlation_table(xlat_ctx_t *ctx)
  862. {
  863. mmap_region_t *mm = ctx->mmap;
  864. /* All tables must be zeroed before mapping any region. */
  865. for (unsigned int i = 0; i < ctx->base_table_entries; i++)
  866. ctx->base_table[i] = INVALID_DESC;
  867. for (unsigned int j = 0; j < ctx->tables_num; j++) {
  868. #if PLAT_XLAT_TABLES_DYNAMIC
  869. ctx->tables_mapped_regions[j] = 0;
  870. #endif
  871. for (unsigned int i = 0; i < XLAT_TABLE_ENTRIES; i++)
  872. ctx->tables[j][i] = INVALID_DESC;
  873. }
  874. while (mm->size) {
  875. uintptr_t end_va = xlat_tables_map_region(ctx, mm, 0, ctx->base_table,
  876. ctx->base_table_entries, ctx->base_level);
  877. if (end_va != mm->base_va + mm->size - 1) {
  878. ERROR("Not enough memory to map region:\n"
  879. " VA:%p PA:0x%llx size:0x%zx attr:0x%x\n",
  880. (void *)mm->base_va, mm->base_pa, mm->size, mm->attr);
  881. panic();
  882. }
  883. mm++;
  884. }
  885. ctx->initialized = 1;
  886. }