gpt_rme.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260
  1. /*
  2. * Copyright (c) 2022, Arm Limited. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <assert.h>
  7. #include <errno.h>
  8. #include <inttypes.h>
  9. #include <limits.h>
  10. #include <stdint.h>
  11. #include <arch.h>
  12. #include <arch_helpers.h>
  13. #include <common/debug.h>
  14. #include "gpt_rme_private.h"
  15. #include <lib/gpt_rme/gpt_rme.h>
  16. #include <lib/smccc.h>
  17. #include <lib/spinlock.h>
  18. #include <lib/xlat_tables/xlat_tables_v2.h>
  19. #if !ENABLE_RME
  20. #error "ENABLE_RME must be enabled to use the GPT library."
  21. #endif
  22. /*
  23. * Lookup T from PPS
  24. *
  25. * PPS Size T
  26. * 0b000 4GB 32
  27. * 0b001 64GB 36
  28. * 0b010 1TB 40
  29. * 0b011 4TB 42
  30. * 0b100 16TB 44
  31. * 0b101 256TB 48
  32. * 0b110 4PB 52
  33. *
  34. * See section 15.1.27 of the RME specification.
  35. */
  36. static const gpt_t_val_e gpt_t_lookup[] = {PPS_4GB_T, PPS_64GB_T,
  37. PPS_1TB_T, PPS_4TB_T,
  38. PPS_16TB_T, PPS_256TB_T,
  39. PPS_4PB_T};
  40. /*
  41. * Lookup P from PGS
  42. *
  43. * PGS Size P
  44. * 0b00 4KB 12
  45. * 0b10 16KB 14
  46. * 0b01 64KB 16
  47. *
  48. * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo.
  49. *
  50. * See section 15.1.27 of the RME specification.
  51. */
  52. static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P};
  53. /*
  54. * This structure contains GPT configuration data.
  55. */
  56. typedef struct {
  57. uintptr_t plat_gpt_l0_base;
  58. gpccr_pps_e pps;
  59. gpt_t_val_e t;
  60. gpccr_pgs_e pgs;
  61. gpt_p_val_e p;
  62. } gpt_config_t;
  63. static gpt_config_t gpt_config;
  64. /* These variables are used during initialization of the L1 tables. */
  65. static unsigned int gpt_next_l1_tbl_idx;
  66. static uintptr_t gpt_l1_tbl;
  67. /*
  68. * This function checks to see if a GPI value is valid.
  69. *
  70. * These are valid GPI values.
  71. * GPT_GPI_NO_ACCESS U(0x0)
  72. * GPT_GPI_SECURE U(0x8)
  73. * GPT_GPI_NS U(0x9)
  74. * GPT_GPI_ROOT U(0xA)
  75. * GPT_GPI_REALM U(0xB)
  76. * GPT_GPI_ANY U(0xF)
  77. *
  78. * Parameters
  79. * gpi GPI to check for validity.
  80. *
  81. * Return
  82. * true for a valid GPI, false for an invalid one.
  83. */
  84. static bool gpt_is_gpi_valid(unsigned int gpi)
  85. {
  86. if ((gpi == GPT_GPI_NO_ACCESS) || (gpi == GPT_GPI_ANY) ||
  87. ((gpi >= GPT_GPI_SECURE) && (gpi <= GPT_GPI_REALM))) {
  88. return true;
  89. }
  90. return false;
  91. }
  92. /*
  93. * This function checks to see if two PAS regions overlap.
  94. *
  95. * Parameters
  96. * base_1: base address of first PAS
  97. * size_1: size of first PAS
  98. * base_2: base address of second PAS
  99. * size_2: size of second PAS
  100. *
  101. * Return
  102. * True if PAS regions overlap, false if they do not.
  103. */
  104. static bool gpt_check_pas_overlap(uintptr_t base_1, size_t size_1,
  105. uintptr_t base_2, size_t size_2)
  106. {
  107. if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) {
  108. return true;
  109. }
  110. return false;
  111. }
  112. /*
  113. * This helper function checks to see if a PAS region from index 0 to
  114. * (pas_idx - 1) occupies the L0 region at index l0_idx in the L0 table.
  115. *
  116. * Parameters
  117. * l0_idx: Index of the L0 entry to check
  118. * pas_regions: PAS region array
  119. * pas_idx: Upper bound of the PAS array index.
  120. *
  121. * Return
  122. * True if a PAS region occupies the L0 region in question, false if not.
  123. */
  124. static bool gpt_does_previous_pas_exist_here(unsigned int l0_idx,
  125. pas_region_t *pas_regions,
  126. unsigned int pas_idx)
  127. {
  128. /* Iterate over PAS regions up to pas_idx. */
  129. for (unsigned int i = 0U; i < pas_idx; i++) {
  130. if (gpt_check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx),
  131. GPT_L0GPTSZ_ACTUAL_SIZE,
  132. pas_regions[i].base_pa, pas_regions[i].size)) {
  133. return true;
  134. }
  135. }
  136. return false;
  137. }
  138. /*
  139. * This function iterates over all of the PAS regions and checks them to ensure
  140. * proper alignment of base and size, that the GPI is valid, and that no regions
  141. * overlap. As a part of the overlap checks, this function checks existing L0
  142. * mappings against the new PAS regions in the event that gpt_init_pas_l1_tables
  143. * is called multiple times to place L1 tables in different areas of memory. It
  144. * also counts the number of L1 tables needed and returns it on success.
  145. *
  146. * Parameters
  147. * *pas_regions Pointer to array of PAS region structures.
  148. * pas_region_cnt Total number of PAS regions in the array.
  149. *
  150. * Return
  151. * Negative Linux error code in the event of a failure, number of L1 regions
  152. * required when successful.
  153. */
  154. static int gpt_validate_pas_mappings(pas_region_t *pas_regions,
  155. unsigned int pas_region_cnt)
  156. {
  157. unsigned int idx;
  158. unsigned int l1_cnt = 0U;
  159. unsigned int pas_l1_cnt;
  160. uint64_t *l0_desc = (uint64_t *)gpt_config.plat_gpt_l0_base;
  161. assert(pas_regions != NULL);
  162. assert(pas_region_cnt != 0U);
  163. for (idx = 0U; idx < pas_region_cnt; idx++) {
  164. /* Check for arithmetic overflow in region. */
  165. if ((ULONG_MAX - pas_regions[idx].base_pa) <
  166. pas_regions[idx].size) {
  167. ERROR("[GPT] Address overflow in PAS[%u]!\n", idx);
  168. return -EOVERFLOW;
  169. }
  170. /* Initial checks for PAS validity. */
  171. if (((pas_regions[idx].base_pa + pas_regions[idx].size) >
  172. GPT_PPS_ACTUAL_SIZE(gpt_config.t)) ||
  173. !gpt_is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) {
  174. ERROR("[GPT] PAS[%u] is invalid!\n", idx);
  175. return -EFAULT;
  176. }
  177. /*
  178. * Make sure this PAS does not overlap with another one. We
  179. * start from idx + 1 instead of 0 since prior PAS mappings will
  180. * have already checked themselves against this one.
  181. */
  182. for (unsigned int i = idx + 1; i < pas_region_cnt; i++) {
  183. if (gpt_check_pas_overlap(pas_regions[idx].base_pa,
  184. pas_regions[idx].size,
  185. pas_regions[i].base_pa,
  186. pas_regions[i].size)) {
  187. ERROR("[GPT] PAS[%u] overlaps with PAS[%u]\n",
  188. i, idx);
  189. return -EFAULT;
  190. }
  191. }
  192. /*
  193. * Since this function can be called multiple times with
  194. * separate L1 tables we need to check the existing L0 mapping
  195. * to see if this PAS would fall into one that has already been
  196. * initialized.
  197. */
  198. for (unsigned int i = GPT_L0_IDX(pas_regions[idx].base_pa);
  199. i <= GPT_L0_IDX(pas_regions[idx].base_pa + pas_regions[idx].size - 1);
  200. i++) {
  201. if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) &&
  202. (GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) {
  203. /* This descriptor is unused so continue. */
  204. continue;
  205. }
  206. /*
  207. * This descriptor has been initialized in a previous
  208. * call to this function so cannot be initialized again.
  209. */
  210. ERROR("[GPT] PAS[%u] overlaps with previous L0[%d]!\n",
  211. idx, i);
  212. return -EFAULT;
  213. }
  214. /* Check for block mapping (L0) type. */
  215. if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
  216. GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
  217. /* Make sure base and size are block-aligned. */
  218. if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) ||
  219. !GPT_IS_L0_ALIGNED(pas_regions[idx].size)) {
  220. ERROR("[GPT] PAS[%u] is not block-aligned!\n",
  221. idx);
  222. return -EFAULT;
  223. }
  224. continue;
  225. }
  226. /* Check for granule mapping (L1) type. */
  227. if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
  228. GPT_PAS_ATTR_MAP_TYPE_GRANULE) {
  229. /* Make sure base and size are granule-aligned. */
  230. if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) ||
  231. !GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) {
  232. ERROR("[GPT] PAS[%u] is not granule-aligned!\n",
  233. idx);
  234. return -EFAULT;
  235. }
  236. /* Find how many L1 tables this PAS occupies. */
  237. pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa +
  238. pas_regions[idx].size - 1) -
  239. GPT_L0_IDX(pas_regions[idx].base_pa) + 1);
  240. /*
  241. * This creates a situation where, if multiple PAS
  242. * regions occupy the same table descriptor, we can get
  243. * an artificially high total L1 table count. The way we
  244. * handle this is by checking each PAS against those
  245. * before it in the array, and if they both occupy the
  246. * same PAS we subtract from pas_l1_cnt and only the
  247. * first PAS in the array gets to count it.
  248. */
  249. /*
  250. * If L1 count is greater than 1 we know the start and
  251. * end PAs are in different L0 regions so we must check
  252. * both for overlap against other PAS.
  253. */
  254. if (pas_l1_cnt > 1) {
  255. if (gpt_does_previous_pas_exist_here(
  256. GPT_L0_IDX(pas_regions[idx].base_pa +
  257. pas_regions[idx].size - 1),
  258. pas_regions, idx)) {
  259. pas_l1_cnt = pas_l1_cnt - 1;
  260. }
  261. }
  262. if (gpt_does_previous_pas_exist_here(
  263. GPT_L0_IDX(pas_regions[idx].base_pa),
  264. pas_regions, idx)) {
  265. pas_l1_cnt = pas_l1_cnt - 1;
  266. }
  267. l1_cnt += pas_l1_cnt;
  268. continue;
  269. }
  270. /* If execution reaches this point, mapping type is invalid. */
  271. ERROR("[GPT] PAS[%u] has invalid mapping type 0x%x.\n", idx,
  272. GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
  273. return -EINVAL;
  274. }
  275. return l1_cnt;
  276. }
  277. /*
  278. * This function validates L0 initialization parameters.
  279. *
  280. * Parameters
  281. * l0_mem_base Base address of memory used for L0 tables.
  282. * l1_mem_size Size of memory available for L0 tables.
  283. *
  284. * Return
  285. * Negative Linux error code in the event of a failure, 0 for success.
  286. */
  287. static int gpt_validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base,
  288. size_t l0_mem_size)
  289. {
  290. size_t l0_alignment;
  291. /*
  292. * Make sure PPS is valid and then store it since macros need this value
  293. * to work.
  294. */
  295. if (pps > GPT_PPS_MAX) {
  296. ERROR("[GPT] Invalid PPS: 0x%x\n", pps);
  297. return -EINVAL;
  298. }
  299. gpt_config.pps = pps;
  300. gpt_config.t = gpt_t_lookup[pps];
  301. /* Alignment must be the greater of 4k or l0 table size. */
  302. l0_alignment = PAGE_SIZE_4KB;
  303. if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) {
  304. l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t);
  305. }
  306. /* Check base address. */
  307. if ((l0_mem_base == 0U) || ((l0_mem_base & (l0_alignment - 1)) != 0U)) {
  308. ERROR("[GPT] Invalid L0 base address: 0x%lx\n", l0_mem_base);
  309. return -EFAULT;
  310. }
  311. /* Check size. */
  312. if (l0_mem_size < GPT_L0_TABLE_SIZE(gpt_config.t)) {
  313. ERROR("[GPT] Inadequate L0 memory: need 0x%lx, have 0x%lx)\n",
  314. GPT_L0_TABLE_SIZE(gpt_config.t),
  315. l0_mem_size);
  316. return -ENOMEM;
  317. }
  318. return 0;
  319. }
  320. /*
  321. * In the event that L1 tables are needed, this function validates
  322. * the L1 table generation parameters.
  323. *
  324. * Parameters
  325. * l1_mem_base Base address of memory used for L1 table allocation.
  326. * l1_mem_size Total size of memory available for L1 tables.
  327. * l1_gpt_cnt Number of L1 tables needed.
  328. *
  329. * Return
  330. * Negative Linux error code in the event of a failure, 0 for success.
  331. */
  332. static int gpt_validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size,
  333. unsigned int l1_gpt_cnt)
  334. {
  335. size_t l1_gpt_mem_sz;
  336. /* Check if the granularity is supported */
  337. if (!xlat_arch_is_granule_size_supported(
  338. GPT_PGS_ACTUAL_SIZE(gpt_config.p))) {
  339. return -EPERM;
  340. }
  341. /* Make sure L1 tables are aligned to their size. */
  342. if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1)) != 0U) {
  343. ERROR("[GPT] Unaligned L1 GPT base address: 0x%lx\n",
  344. l1_mem_base);
  345. return -EFAULT;
  346. }
  347. /* Get total memory needed for L1 tables. */
  348. l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p);
  349. /* Check for overflow. */
  350. if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) {
  351. ERROR("[GPT] Overflow calculating L1 memory size.\n");
  352. return -ENOMEM;
  353. }
  354. /* Make sure enough space was supplied. */
  355. if (l1_mem_size < l1_gpt_mem_sz) {
  356. ERROR("[GPT] Inadequate memory for L1 GPTs. ");
  357. ERROR(" Expected 0x%lx bytes. Got 0x%lx bytes\n",
  358. l1_gpt_mem_sz, l1_mem_size);
  359. return -ENOMEM;
  360. }
  361. VERBOSE("[GPT] Requested 0x%lx bytes for L1 GPTs.\n", l1_gpt_mem_sz);
  362. return 0;
  363. }
  364. /*
  365. * This function initializes L0 block descriptors (regions that cannot be
  366. * transitioned at the granule level) according to the provided PAS.
  367. *
  368. * Parameters
  369. * *pas Pointer to the structure defining the PAS region to
  370. * initialize.
  371. */
  372. static void gpt_generate_l0_blk_desc(pas_region_t *pas)
  373. {
  374. uint64_t gpt_desc;
  375. unsigned int end_idx;
  376. unsigned int idx;
  377. uint64_t *l0_gpt_arr;
  378. assert(gpt_config.plat_gpt_l0_base != 0U);
  379. assert(pas != NULL);
  380. /*
  381. * Checking of PAS parameters has already been done in
  382. * gpt_validate_pas_mappings so no need to check the same things again.
  383. */
  384. l0_gpt_arr = (uint64_t *)gpt_config.plat_gpt_l0_base;
  385. /* Create the GPT Block descriptor for this PAS region */
  386. gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs));
  387. /* Start index of this region in L0 GPTs */
  388. idx = GPT_L0_IDX(pas->base_pa);
  389. /*
  390. * Determine number of L0 GPT descriptors covered by
  391. * this PAS region and use the count to populate these
  392. * descriptors.
  393. */
  394. end_idx = GPT_L0_IDX(pas->base_pa + pas->size);
  395. /* Generate the needed block descriptors. */
  396. for (; idx < end_idx; idx++) {
  397. l0_gpt_arr[idx] = gpt_desc;
  398. VERBOSE("[GPT] L0 entry (BLOCK) index %u [%p]: GPI = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
  399. idx, &l0_gpt_arr[idx],
  400. (gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) &
  401. GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]);
  402. }
  403. }
  404. /*
  405. * Helper function to determine if the end physical address lies in the same L0
  406. * region as the current physical address. If true, the end physical address is
  407. * returned else, the start address of the next region is returned.
  408. *
  409. * Parameters
  410. * cur_pa Physical address of the current PA in the loop through
  411. * the range.
  412. * end_pa Physical address of the end PA in a PAS range.
  413. *
  414. * Return
  415. * The PA of the end of the current range.
  416. */
  417. static uintptr_t gpt_get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa)
  418. {
  419. uintptr_t cur_idx;
  420. uintptr_t end_idx;
  421. cur_idx = GPT_L0_IDX(cur_pa);
  422. end_idx = GPT_L0_IDX(end_pa);
  423. assert(cur_idx <= end_idx);
  424. if (cur_idx == end_idx) {
  425. return end_pa;
  426. }
  427. return (cur_idx + 1U) << GPT_L0_IDX_SHIFT;
  428. }
  429. /*
  430. * Helper function to fill out GPI entries in a single L1 table. This function
  431. * fills out entire L1 descriptors at a time to save memory writes.
  432. *
  433. * Parameters
  434. * gpi GPI to set this range to
  435. * l1 Pointer to L1 table to fill out
  436. * first Address of first granule in range.
  437. * last Address of last granule in range (inclusive).
  438. */
  439. static void gpt_fill_l1_tbl(uint64_t gpi, uint64_t *l1, uintptr_t first,
  440. uintptr_t last)
  441. {
  442. uint64_t gpi_field = GPT_BUILD_L1_DESC(gpi);
  443. uint64_t gpi_mask = 0xFFFFFFFFFFFFFFFF;
  444. assert(first <= last);
  445. assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) == 0U);
  446. assert((last & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) == 0U);
  447. assert(GPT_L0_IDX(first) == GPT_L0_IDX(last));
  448. assert(l1 != NULL);
  449. /* Shift the mask if we're starting in the middle of an L1 entry. */
  450. gpi_mask = gpi_mask << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2);
  451. /* Fill out each L1 entry for this region. */
  452. for (unsigned int i = GPT_L1_IDX(gpt_config.p, first);
  453. i <= GPT_L1_IDX(gpt_config.p, last); i++) {
  454. /* Account for stopping in the middle of an L1 entry. */
  455. if (i == GPT_L1_IDX(gpt_config.p, last)) {
  456. gpi_mask &= (gpi_mask >> ((15 -
  457. GPT_L1_GPI_IDX(gpt_config.p, last)) << 2));
  458. }
  459. /* Write GPI values. */
  460. assert((l1[i] & gpi_mask) ==
  461. (GPT_BUILD_L1_DESC(GPT_GPI_ANY) & gpi_mask));
  462. l1[i] = (l1[i] & ~gpi_mask) | (gpi_mask & gpi_field);
  463. /* Reset mask. */
  464. gpi_mask = 0xFFFFFFFFFFFFFFFF;
  465. }
  466. }
  467. /*
  468. * This function finds the next available unused L1 table and initializes all
  469. * granules descriptor entries to GPI_ANY. This ensures that there are no chunks
  470. * of GPI_NO_ACCESS (0b0000) memory floating around in the system in the
  471. * event that a PAS region stops midway through an L1 table, thus guaranteeing
  472. * that all memory not explicitly assigned is GPI_ANY. This function does not
  473. * check for overflow conditions, that should be done by the caller.
  474. *
  475. * Return
  476. * Pointer to the next available L1 table.
  477. */
  478. static uint64_t *gpt_get_new_l1_tbl(void)
  479. {
  480. /* Retrieve the next L1 table. */
  481. uint64_t *l1 = (uint64_t *)((uint64_t)(gpt_l1_tbl) +
  482. (GPT_L1_TABLE_SIZE(gpt_config.p) *
  483. gpt_next_l1_tbl_idx));
  484. /* Increment L1 counter. */
  485. gpt_next_l1_tbl_idx++;
  486. /* Initialize all GPIs to GPT_GPI_ANY */
  487. for (unsigned int i = 0U; i < GPT_L1_ENTRY_COUNT(gpt_config.p); i++) {
  488. l1[i] = GPT_BUILD_L1_DESC(GPT_GPI_ANY);
  489. }
  490. return l1;
  491. }
  492. /*
  493. * When L1 tables are needed, this function creates the necessary L0 table
  494. * descriptors and fills out the L1 table entries according to the supplied
  495. * PAS range.
  496. *
  497. * Parameters
  498. * *pas Pointer to the structure defining the PAS region.
  499. */
  500. static void gpt_generate_l0_tbl_desc(pas_region_t *pas)
  501. {
  502. uintptr_t end_pa;
  503. uintptr_t cur_pa;
  504. uintptr_t last_gran_pa;
  505. uint64_t *l0_gpt_base;
  506. uint64_t *l1_gpt_arr;
  507. unsigned int l0_idx;
  508. assert(gpt_config.plat_gpt_l0_base != 0U);
  509. assert(pas != NULL);
  510. /*
  511. * Checking of PAS parameters has already been done in
  512. * gpt_validate_pas_mappings so no need to check the same things again.
  513. */
  514. end_pa = pas->base_pa + pas->size;
  515. l0_gpt_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
  516. /* We start working from the granule at base PA */
  517. cur_pa = pas->base_pa;
  518. /* Iterate over each L0 region in this memory range. */
  519. for (l0_idx = GPT_L0_IDX(pas->base_pa);
  520. l0_idx <= GPT_L0_IDX(end_pa - 1U);
  521. l0_idx++) {
  522. /*
  523. * See if the L0 entry is already a table descriptor or if we
  524. * need to create one.
  525. */
  526. if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) {
  527. /* Get the L1 array from the L0 entry. */
  528. l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]);
  529. } else {
  530. /* Get a new L1 table from the L1 memory space. */
  531. l1_gpt_arr = gpt_get_new_l1_tbl();
  532. /* Fill out the L0 descriptor and flush it. */
  533. l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr);
  534. }
  535. VERBOSE("[GPT] L0 entry (TABLE) index %u [%p] ==> L1 Addr 0x%llx (0x%" PRIx64 ")\n",
  536. l0_idx, &l0_gpt_base[l0_idx],
  537. (unsigned long long)(l1_gpt_arr),
  538. l0_gpt_base[l0_idx]);
  539. /*
  540. * Determine the PA of the last granule in this L0 descriptor.
  541. */
  542. last_gran_pa = gpt_get_l1_end_pa(cur_pa, end_pa) -
  543. GPT_PGS_ACTUAL_SIZE(gpt_config.p);
  544. /*
  545. * Fill up L1 GPT entries between these two addresses. This
  546. * function needs the addresses of the first granule and last
  547. * granule in the range.
  548. */
  549. gpt_fill_l1_tbl(GPT_PAS_ATTR_GPI(pas->attrs), l1_gpt_arr,
  550. cur_pa, last_gran_pa);
  551. /* Advance cur_pa to first granule in next L0 region. */
  552. cur_pa = gpt_get_l1_end_pa(cur_pa, end_pa);
  553. }
  554. }
  555. /*
  556. * This function flushes a range of L0 descriptors used by a given PAS region
  557. * array. There is a chance that some unmodified L0 descriptors would be flushed
  558. * in the case that there are "holes" in an array of PAS regions but overall
  559. * this should be faster than individually flushing each modified L0 descriptor
  560. * as they are created.
  561. *
  562. * Parameters
  563. * *pas Pointer to an array of PAS regions.
  564. * pas_count Number of entries in the PAS array.
  565. */
  566. static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count)
  567. {
  568. unsigned int idx;
  569. unsigned int start_idx;
  570. unsigned int end_idx;
  571. uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base;
  572. assert(pas != NULL);
  573. assert(pas_count > 0);
  574. /* Initial start and end values. */
  575. start_idx = GPT_L0_IDX(pas[0].base_pa);
  576. end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1);
  577. /* Find lowest and highest L0 indices used in this PAS array. */
  578. for (idx = 1; idx < pas_count; idx++) {
  579. if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) {
  580. start_idx = GPT_L0_IDX(pas[idx].base_pa);
  581. }
  582. if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1) > end_idx) {
  583. end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1);
  584. }
  585. }
  586. /*
  587. * Flush all covered L0 descriptors, add 1 because we need to include
  588. * the end index value.
  589. */
  590. flush_dcache_range((uintptr_t)&l0[start_idx],
  591. ((end_idx + 1) - start_idx) * sizeof(uint64_t));
  592. }
  593. /*
  594. * Public API to enable granule protection checks once the tables have all been
  595. * initialized. This function is called at first initialization and then again
  596. * later during warm boots of CPU cores.
  597. *
  598. * Return
  599. * Negative Linux error code in the event of a failure, 0 for success.
  600. */
  601. int gpt_enable(void)
  602. {
  603. u_register_t gpccr_el3;
  604. /*
  605. * Granule tables must be initialised before enabling
  606. * granule protection.
  607. */
  608. if (gpt_config.plat_gpt_l0_base == 0U) {
  609. ERROR("[GPT] Tables have not been initialized!\n");
  610. return -EPERM;
  611. }
  612. /* Write the base address of the L0 tables into GPTBR */
  613. write_gptbr_el3(((gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT)
  614. >> GPTBR_BADDR_SHIFT) & GPTBR_BADDR_MASK);
  615. /* GPCCR_EL3.PPS */
  616. gpccr_el3 = SET_GPCCR_PPS(gpt_config.pps);
  617. /* GPCCR_EL3.PGS */
  618. gpccr_el3 |= SET_GPCCR_PGS(gpt_config.pgs);
  619. /*
  620. * Since EL3 maps the L1 region as Inner shareable, use the same
  621. * shareability attribute for GPC as well so that
  622. * GPC fetches are visible to PEs
  623. */
  624. gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_IS);
  625. /* Outer and Inner cacheability set to Normal memory, WB, RA, WA. */
  626. gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA);
  627. gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA);
  628. /* Prepopulate GPCCR_EL3 but don't enable GPC yet */
  629. write_gpccr_el3(gpccr_el3);
  630. isb();
  631. /* Invalidate any stale TLB entries and any cached register fields */
  632. tlbipaallos();
  633. dsb();
  634. isb();
  635. /* Enable GPT */
  636. gpccr_el3 |= GPCCR_GPC_BIT;
  637. /* TODO: Configure GPCCR_EL3_GPCP for Fault control. */
  638. write_gpccr_el3(gpccr_el3);
  639. isb();
  640. tlbipaallos();
  641. dsb();
  642. isb();
  643. return 0;
  644. }
  645. /*
  646. * Public API to disable granule protection checks.
  647. */
  648. void gpt_disable(void)
  649. {
  650. u_register_t gpccr_el3 = read_gpccr_el3();
  651. write_gpccr_el3(gpccr_el3 & ~GPCCR_GPC_BIT);
  652. dsbsy();
  653. isb();
  654. }
  655. /*
  656. * Public API that initializes the entire protected space to GPT_GPI_ANY using
  657. * the L0 tables (block descriptors). Ideally, this function is invoked prior
  658. * to DDR discovery and initialization. The MMU must be initialized before
  659. * calling this function.
  660. *
  661. * Parameters
  662. * pps PPS value to use for table generation
  663. * l0_mem_base Base address of L0 tables in memory.
  664. * l0_mem_size Total size of memory available for L0 tables.
  665. *
  666. * Return
  667. * Negative Linux error code in the event of a failure, 0 for success.
  668. */
  669. int gpt_init_l0_tables(gpccr_pps_e pps, uintptr_t l0_mem_base,
  670. size_t l0_mem_size)
  671. {
  672. int ret;
  673. uint64_t gpt_desc;
  674. /* Ensure that MMU and Data caches are enabled. */
  675. assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
  676. /* Validate other parameters. */
  677. ret = gpt_validate_l0_params(pps, l0_mem_base, l0_mem_size);
  678. if (ret != 0) {
  679. return ret;
  680. }
  681. /* Create the descriptor to initialize L0 entries with. */
  682. gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY);
  683. /* Iterate through all L0 entries */
  684. for (unsigned int i = 0U; i < GPT_L0_REGION_COUNT(gpt_config.t); i++) {
  685. ((uint64_t *)l0_mem_base)[i] = gpt_desc;
  686. }
  687. /* Flush updated L0 tables to memory. */
  688. flush_dcache_range((uintptr_t)l0_mem_base,
  689. (size_t)GPT_L0_TABLE_SIZE(gpt_config.t));
  690. /* Stash the L0 base address once initial setup is complete. */
  691. gpt_config.plat_gpt_l0_base = l0_mem_base;
  692. return 0;
  693. }
  694. /*
  695. * Public API that carves out PAS regions from the L0 tables and builds any L1
  696. * tables that are needed. This function ideally is run after DDR discovery and
  697. * initialization. The L0 tables must have already been initialized to GPI_ANY
  698. * when this function is called.
  699. *
  700. * This function can be called multiple times with different L1 memory ranges
  701. * and PAS regions if it is desirable to place L1 tables in different locations
  702. * in memory. (ex: you have multiple DDR banks and want to place the L1 tables
  703. * in the DDR bank that they control)
  704. *
  705. * Parameters
  706. * pgs PGS value to use for table generation.
  707. * l1_mem_base Base address of memory used for L1 tables.
  708. * l1_mem_size Total size of memory available for L1 tables.
  709. * *pas_regions Pointer to PAS regions structure array.
  710. * pas_count Total number of PAS regions.
  711. *
  712. * Return
  713. * Negative Linux error code in the event of a failure, 0 for success.
  714. */
  715. int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base,
  716. size_t l1_mem_size, pas_region_t *pas_regions,
  717. unsigned int pas_count)
  718. {
  719. int ret;
  720. int l1_gpt_cnt;
  721. /* Ensure that MMU and Data caches are enabled. */
  722. assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
  723. /* PGS is needed for gpt_validate_pas_mappings so check it now. */
  724. if (pgs > GPT_PGS_MAX) {
  725. ERROR("[GPT] Invalid PGS: 0x%x\n", pgs);
  726. return -EINVAL;
  727. }
  728. gpt_config.pgs = pgs;
  729. gpt_config.p = gpt_p_lookup[pgs];
  730. /* Make sure L0 tables have been initialized. */
  731. if (gpt_config.plat_gpt_l0_base == 0U) {
  732. ERROR("[GPT] L0 tables must be initialized first!\n");
  733. return -EPERM;
  734. }
  735. /* Check if L1 GPTs are required and how many. */
  736. l1_gpt_cnt = gpt_validate_pas_mappings(pas_regions, pas_count);
  737. if (l1_gpt_cnt < 0) {
  738. return l1_gpt_cnt;
  739. }
  740. VERBOSE("[GPT] %u L1 GPTs requested.\n", l1_gpt_cnt);
  741. /* If L1 tables are needed then validate the L1 parameters. */
  742. if (l1_gpt_cnt > 0) {
  743. ret = gpt_validate_l1_params(l1_mem_base, l1_mem_size,
  744. l1_gpt_cnt);
  745. if (ret != 0) {
  746. return ret;
  747. }
  748. /* Set up parameters for L1 table generation. */
  749. gpt_l1_tbl = l1_mem_base;
  750. gpt_next_l1_tbl_idx = 0U;
  751. }
  752. INFO("[GPT] Boot Configuration\n");
  753. INFO(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t);
  754. INFO(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p);
  755. INFO(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
  756. INFO(" PAS count: 0x%x\n", pas_count);
  757. INFO(" L0 base: 0x%lx\n", gpt_config.plat_gpt_l0_base);
  758. /* Generate the tables in memory. */
  759. for (unsigned int idx = 0U; idx < pas_count; idx++) {
  760. INFO("[GPT] PAS[%u]: base 0x%lx, size 0x%lx, GPI 0x%x, type 0x%x\n",
  761. idx, pas_regions[idx].base_pa, pas_regions[idx].size,
  762. GPT_PAS_ATTR_GPI(pas_regions[idx].attrs),
  763. GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
  764. /* Check if a block or table descriptor is required */
  765. if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
  766. GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
  767. gpt_generate_l0_blk_desc(&pas_regions[idx]);
  768. } else {
  769. gpt_generate_l0_tbl_desc(&pas_regions[idx]);
  770. }
  771. }
  772. /* Flush modified L0 tables. */
  773. flush_l0_for_pas_array(pas_regions, pas_count);
  774. /* Flush L1 tables if needed. */
  775. if (l1_gpt_cnt > 0) {
  776. flush_dcache_range(l1_mem_base,
  777. GPT_L1_TABLE_SIZE(gpt_config.p) *
  778. l1_gpt_cnt);
  779. }
  780. /* Make sure that all the entries are written to the memory. */
  781. dsbishst();
  782. tlbipaallos();
  783. dsb();
  784. isb();
  785. return 0;
  786. }
  787. /*
  788. * Public API to initialize the runtime gpt_config structure based on the values
  789. * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization
  790. * typically happens in a bootloader stage prior to setting up the EL3 runtime
  791. * environment for the granule transition service so this function detects the
  792. * initialization from a previous stage. Granule protection checks must be
  793. * enabled already or this function will return an error.
  794. *
  795. * Return
  796. * Negative Linux error code in the event of a failure, 0 for success.
  797. */
  798. int gpt_runtime_init(void)
  799. {
  800. u_register_t reg;
  801. /* Ensure that MMU and Data caches are enabled. */
  802. assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
  803. /* Ensure GPC are already enabled. */
  804. if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0U) {
  805. ERROR("[GPT] Granule protection checks are not enabled!\n");
  806. return -EPERM;
  807. }
  808. /*
  809. * Read the L0 table address from GPTBR, we don't need the L1 base
  810. * address since those are included in the L0 tables as needed.
  811. */
  812. reg = read_gptbr_el3();
  813. gpt_config.plat_gpt_l0_base = ((reg >> GPTBR_BADDR_SHIFT) &
  814. GPTBR_BADDR_MASK) <<
  815. GPTBR_BADDR_VAL_SHIFT;
  816. /* Read GPCCR to get PGS and PPS values. */
  817. reg = read_gpccr_el3();
  818. gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK;
  819. gpt_config.t = gpt_t_lookup[gpt_config.pps];
  820. gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK;
  821. gpt_config.p = gpt_p_lookup[gpt_config.pgs];
  822. VERBOSE("[GPT] Runtime Configuration\n");
  823. VERBOSE(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t);
  824. VERBOSE(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p);
  825. VERBOSE(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
  826. VERBOSE(" L0 base: 0x%lx\n", gpt_config.plat_gpt_l0_base);
  827. return 0;
  828. }
  829. /*
  830. * The L1 descriptors are protected by a spinlock to ensure that multiple
  831. * CPUs do not attempt to change the descriptors at once. In the future it
  832. * would be better to have separate spinlocks for each L1 descriptor.
  833. */
  834. static spinlock_t gpt_lock;
  835. /*
  836. * A helper to write the value (target_pas << gpi_shift) to the index of
  837. * the gpt_l1_addr
  838. */
  839. static inline void write_gpt(uint64_t *gpt_l1_desc, uint64_t *gpt_l1_addr,
  840. unsigned int gpi_shift, unsigned int idx,
  841. unsigned int target_pas)
  842. {
  843. *gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift);
  844. *gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift);
  845. gpt_l1_addr[idx] = *gpt_l1_desc;
  846. }
  847. /*
  848. * Helper to retrieve the gpt_l1_* information from the base address
  849. * returned in gpi_info
  850. */
  851. static int get_gpi_params(uint64_t base, gpi_info_t *gpi_info)
  852. {
  853. uint64_t gpt_l0_desc, *gpt_l0_base;
  854. gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
  855. gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)];
  856. if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) {
  857. VERBOSE("[GPT] Granule is not covered by a table descriptor!\n");
  858. VERBOSE(" Base=0x%" PRIx64 "\n", base);
  859. return -EINVAL;
  860. }
  861. /* Get the table index and GPI shift from PA. */
  862. gpi_info->gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc);
  863. gpi_info->idx = GPT_L1_IDX(gpt_config.p, base);
  864. gpi_info->gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2;
  865. gpi_info->gpt_l1_desc = (gpi_info->gpt_l1_addr)[gpi_info->idx];
  866. gpi_info->gpi = (gpi_info->gpt_l1_desc >> gpi_info->gpi_shift) &
  867. GPT_L1_GRAN_DESC_GPI_MASK;
  868. return 0;
  869. }
  870. /*
  871. * This function is the granule transition delegate service. When a granule
  872. * transition request occurs it is routed to this function to have the request,
  873. * if valid, fulfilled following A1.1.1 Delegate of RME supplement
  874. *
  875. * TODO: implement support for transitioning multiple granules at once.
  876. *
  877. * Parameters
  878. * base Base address of the region to transition, must be
  879. * aligned to granule size.
  880. * size Size of region to transition, must be aligned to granule
  881. * size.
  882. * src_sec_state Security state of the caller.
  883. *
  884. * Return
  885. * Negative Linux error code in the event of a failure, 0 for success.
  886. */
  887. int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
  888. {
  889. gpi_info_t gpi_info;
  890. uint64_t nse;
  891. int res;
  892. unsigned int target_pas;
  893. /* Ensure that the tables have been set up before taking requests. */
  894. assert(gpt_config.plat_gpt_l0_base != 0UL);
  895. /* Ensure that caches are enabled. */
  896. assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
  897. /* Delegate request can only come from REALM or SECURE */
  898. assert(src_sec_state == SMC_FROM_REALM ||
  899. src_sec_state == SMC_FROM_SECURE);
  900. /* See if this is a single or a range of granule transition. */
  901. if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
  902. return -EINVAL;
  903. }
  904. /* Check that base and size are valid */
  905. if ((ULONG_MAX - base) < size) {
  906. VERBOSE("[GPT] Transition request address overflow!\n");
  907. VERBOSE(" Base=0x%" PRIx64 "\n", base);
  908. VERBOSE(" Size=0x%lx\n", size);
  909. return -EINVAL;
  910. }
  911. /* Make sure base and size are valid. */
  912. if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
  913. ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
  914. (size == 0UL) ||
  915. ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
  916. VERBOSE("[GPT] Invalid granule transition address range!\n");
  917. VERBOSE(" Base=0x%" PRIx64 "\n", base);
  918. VERBOSE(" Size=0x%lx\n", size);
  919. return -EINVAL;
  920. }
  921. target_pas = GPT_GPI_REALM;
  922. if (src_sec_state == SMC_FROM_SECURE) {
  923. target_pas = GPT_GPI_SECURE;
  924. }
  925. /*
  926. * Access to L1 tables is controlled by a global lock to ensure
  927. * that no more than one CPU is allowed to make changes at any
  928. * given time.
  929. */
  930. spin_lock(&gpt_lock);
  931. res = get_gpi_params(base, &gpi_info);
  932. if (res != 0) {
  933. spin_unlock(&gpt_lock);
  934. return res;
  935. }
  936. /* Check that the current address is in NS state */
  937. if (gpi_info.gpi != GPT_GPI_NS) {
  938. VERBOSE("[GPT] Only Granule in NS state can be delegated.\n");
  939. VERBOSE(" Caller: %u, Current GPI: %u\n", src_sec_state,
  940. gpi_info.gpi);
  941. spin_unlock(&gpt_lock);
  942. return -EPERM;
  943. }
  944. if (src_sec_state == SMC_FROM_SECURE) {
  945. nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
  946. } else {
  947. nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
  948. }
  949. /*
  950. * In order to maintain mutual distrust between Realm and Secure
  951. * states, remove any data speculatively fetched into the target
  952. * physical address space. Issue DC CIPAPA over address range
  953. */
  954. flush_dcache_to_popa_range(nse | base,
  955. GPT_PGS_ACTUAL_SIZE(gpt_config.p));
  956. write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
  957. gpi_info.gpi_shift, gpi_info.idx, target_pas);
  958. dsboshst();
  959. gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p));
  960. dsbosh();
  961. nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
  962. flush_dcache_to_popa_range(nse | base,
  963. GPT_PGS_ACTUAL_SIZE(gpt_config.p));
  964. /* Unlock access to the L1 tables. */
  965. spin_unlock(&gpt_lock);
  966. /*
  967. * The isb() will be done as part of context
  968. * synchronization when returning to lower EL
  969. */
  970. VERBOSE("[GPT] Granule 0x%" PRIx64 ", GPI 0x%x->0x%x\n",
  971. base, gpi_info.gpi, target_pas);
  972. return 0;
  973. }
  974. /*
  975. * This function is the granule transition undelegate service. When a granule
  976. * transition request occurs it is routed to this function where the request is
  977. * validated then fulfilled if possible.
  978. *
  979. * TODO: implement support for transitioning multiple granules at once.
  980. *
  981. * Parameters
  982. * base Base address of the region to transition, must be
  983. * aligned to granule size.
  984. * size Size of region to transition, must be aligned to granule
  985. * size.
  986. * src_sec_state Security state of the caller.
  987. *
  988. * Return
  989. * Negative Linux error code in the event of a failure, 0 for success.
  990. */
  991. int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
  992. {
  993. gpi_info_t gpi_info;
  994. uint64_t nse;
  995. int res;
  996. /* Ensure that the tables have been set up before taking requests. */
  997. assert(gpt_config.plat_gpt_l0_base != 0UL);
  998. /* Ensure that MMU and caches are enabled. */
  999. assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
  1000. /* Delegate request can only come from REALM or SECURE */
  1001. assert(src_sec_state == SMC_FROM_REALM ||
  1002. src_sec_state == SMC_FROM_SECURE);
  1003. /* See if this is a single or a range of granule transition. */
  1004. if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
  1005. return -EINVAL;
  1006. }
  1007. /* Check that base and size are valid */
  1008. if ((ULONG_MAX - base) < size) {
  1009. VERBOSE("[GPT] Transition request address overflow!\n");
  1010. VERBOSE(" Base=0x%" PRIx64 "\n", base);
  1011. VERBOSE(" Size=0x%lx\n", size);
  1012. return -EINVAL;
  1013. }
  1014. /* Make sure base and size are valid. */
  1015. if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
  1016. ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
  1017. (size == 0UL) ||
  1018. ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
  1019. VERBOSE("[GPT] Invalid granule transition address range!\n");
  1020. VERBOSE(" Base=0x%" PRIx64 "\n", base);
  1021. VERBOSE(" Size=0x%lx\n", size);
  1022. return -EINVAL;
  1023. }
  1024. /*
  1025. * Access to L1 tables is controlled by a global lock to ensure
  1026. * that no more than one CPU is allowed to make changes at any
  1027. * given time.
  1028. */
  1029. spin_lock(&gpt_lock);
  1030. res = get_gpi_params(base, &gpi_info);
  1031. if (res != 0) {
  1032. spin_unlock(&gpt_lock);
  1033. return res;
  1034. }
  1035. /* Check that the current address is in the delegated state */
  1036. if ((src_sec_state == SMC_FROM_REALM &&
  1037. gpi_info.gpi != GPT_GPI_REALM) ||
  1038. (src_sec_state == SMC_FROM_SECURE &&
  1039. gpi_info.gpi != GPT_GPI_SECURE)) {
  1040. VERBOSE("[GPT] Only Granule in REALM or SECURE state can be undelegated.\n");
  1041. VERBOSE(" Caller: %u, Current GPI: %u\n", src_sec_state,
  1042. gpi_info.gpi);
  1043. spin_unlock(&gpt_lock);
  1044. return -EPERM;
  1045. }
  1046. /* In order to maintain mutual distrust between Realm and Secure
  1047. * states, remove access now, in order to guarantee that writes
  1048. * to the currently-accessible physical address space will not
  1049. * later become observable.
  1050. */
  1051. write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
  1052. gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NO_ACCESS);
  1053. dsboshst();
  1054. gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p));
  1055. dsbosh();
  1056. if (src_sec_state == SMC_FROM_SECURE) {
  1057. nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
  1058. } else {
  1059. nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
  1060. }
  1061. /* Ensure that the scrubbed data has made it past the PoPA */
  1062. flush_dcache_to_popa_range(nse | base,
  1063. GPT_PGS_ACTUAL_SIZE(gpt_config.p));
  1064. /*
  1065. * Remove any data loaded speculatively
  1066. * in NS space from before the scrubbing
  1067. */
  1068. nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
  1069. flush_dcache_to_popa_range(nse | base,
  1070. GPT_PGS_ACTUAL_SIZE(gpt_config.p));
  1071. /* Clear existing GPI encoding and transition granule. */
  1072. write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
  1073. gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NS);
  1074. dsboshst();
  1075. /* Ensure that all agents observe the new NS configuration */
  1076. gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p));
  1077. dsbosh();
  1078. /* Unlock access to the L1 tables. */
  1079. spin_unlock(&gpt_lock);
  1080. /*
  1081. * The isb() will be done as part of context
  1082. * synchronization when returning to lower EL
  1083. */
  1084. VERBOSE("[GPT] Granule 0x%" PRIx64 ", GPI 0x%x->0x%x\n",
  1085. base, gpi_info.gpi, GPT_GPI_NS);
  1086. return 0;
  1087. }