gpt_rme.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962
  1. /*
  2. * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <assert.h>
  7. #include <errno.h>
  8. #include <inttypes.h>
  9. #include <limits.h>
  10. #include <stdint.h>
  11. #include <arch.h>
  12. #include <arch_features.h>
  13. #include <arch_helpers.h>
  14. #include <common/debug.h>
  15. #include "gpt_rme_private.h"
  16. #include <lib/gpt_rme/gpt_rme.h>
  17. #include <lib/smccc.h>
  18. #include <lib/spinlock.h>
  19. #include <lib/xlat_tables/xlat_tables_v2.h>
  20. #if !ENABLE_RME
  21. #error "ENABLE_RME must be enabled to use the GPT library"
  22. #endif
  23. /*
  24. * Lookup T from PPS
  25. *
  26. * PPS Size T
  27. * 0b000 4GB 32
  28. * 0b001 64GB 36
  29. * 0b010 1TB 40
  30. * 0b011 4TB 42
  31. * 0b100 16TB 44
  32. * 0b101 256TB 48
  33. * 0b110 4PB 52
  34. *
  35. * See section 15.1.27 of the RME specification.
  36. */
  37. static const gpt_t_val_e gpt_t_lookup[] = {PPS_4GB_T, PPS_64GB_T,
  38. PPS_1TB_T, PPS_4TB_T,
  39. PPS_16TB_T, PPS_256TB_T,
  40. PPS_4PB_T};
  41. /*
  42. * Lookup P from PGS
  43. *
  44. * PGS Size P
  45. * 0b00 4KB 12
  46. * 0b10 16KB 14
  47. * 0b01 64KB 16
  48. *
  49. * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo.
  50. *
  51. * See section 15.1.27 of the RME specification.
  52. */
  53. static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P};
  54. static void shatter_2mb(uintptr_t base, const gpi_info_t *gpi_info,
  55. uint64_t l1_desc);
  56. static void shatter_32mb(uintptr_t base, const gpi_info_t *gpi_info,
  57. uint64_t l1_desc);
  58. static void shatter_512mb(uintptr_t base, const gpi_info_t *gpi_info,
  59. uint64_t l1_desc);
  60. /*
  61. * This structure contains GPT configuration data
  62. */
  63. typedef struct {
  64. uintptr_t plat_gpt_l0_base;
  65. gpccr_pps_e pps;
  66. gpt_t_val_e t;
  67. gpccr_pgs_e pgs;
  68. gpt_p_val_e p;
  69. } gpt_config_t;
  70. static gpt_config_t gpt_config;
  71. /*
  72. * Number of L1 entries in 2MB, depending on GPCCR_EL3.PGS:
  73. * +-------+------------+
  74. * | PGS | L1 entries |
  75. * +-------+------------+
  76. * | 4KB | 32 |
  77. * +-------+------------+
  78. * | 16KB | 8 |
  79. * +-------+------------+
  80. * | 64KB | 2 |
  81. * +-------+------------+
  82. */
  83. static unsigned int gpt_l1_cnt_2mb;
  84. /*
  85. * Mask for the L1 index field, depending on
  86. * GPCCR_EL3.L0GPTSZ and GPCCR_EL3.PGS:
  87. * +---------+-------------------------------+
  88. * | | PGS |
  89. * +---------+----------+----------+---------+
  90. * | L0GPTSZ | 4KB | 16KB | 64KB |
  91. * +---------+----------+----------+---------+
  92. * | 1GB | 0x3FFF | 0xFFF | 0x3FF |
  93. * +---------+----------+----------+---------+
  94. * | 16GB | 0x3FFFF | 0xFFFF | 0x3FFF |
  95. * +---------+----------+----------+---------+
  96. * | 64GB | 0xFFFFF | 0x3FFFF | 0xFFFF |
  97. * +---------+----------+----------+---------+
  98. * | 512GB | 0x7FFFFF | 0x1FFFFF | 0x7FFFF |
  99. * +---------+----------+----------+---------+
  100. */
  101. static uint64_t gpt_l1_index_mask;
  102. /* Number of 128-bit L1 entries in 2MB, 32MB and 512MB */
  103. #define L1_QWORDS_2MB (gpt_l1_cnt_2mb / 2U)
  104. #define L1_QWORDS_32MB (L1_QWORDS_2MB * 16U)
  105. #define L1_QWORDS_512MB (L1_QWORDS_32MB * 16U)
  106. /* Size in bytes of L1 entries in 2MB, 32MB */
  107. #define L1_BYTES_2MB (gpt_l1_cnt_2mb * sizeof(uint64_t))
  108. #define L1_BYTES_32MB (L1_BYTES_2MB * 16U)
  109. /* Get the index into the L1 table from a physical address */
  110. #define GPT_L1_INDEX(_pa) \
  111. (((_pa) >> (unsigned int)GPT_L1_IDX_SHIFT(gpt_config.p)) & gpt_l1_index_mask)
  112. /* These variables are used during initialization of the L1 tables */
  113. static uintptr_t gpt_l1_tbl;
  114. /* These variable is used during runtime */
  115. #if (RME_GPT_BITLOCK_BLOCK == 0)
  116. /*
  117. * The GPTs are protected by a global spinlock to ensure
  118. * that multiple CPUs do not attempt to change the descriptors at once.
  119. */
  120. static spinlock_t gpt_lock;
  121. #else
  122. /* Bitlocks base address */
  123. static bitlock_t *gpt_bitlock_base;
  124. #endif
  125. /* Lock/unlock macros for GPT entries */
  126. #if (RME_GPT_BITLOCK_BLOCK == 0)
  127. /*
  128. * Access to GPT is controlled by a global lock to ensure
  129. * that no more than one CPU is allowed to make changes at any
  130. * given time.
  131. */
  132. #define GPT_LOCK spin_lock(&gpt_lock)
  133. #define GPT_UNLOCK spin_unlock(&gpt_lock)
  134. #else
  135. /*
  136. * Access to a block of memory is controlled by a bitlock.
  137. * Size of block = RME_GPT_BITLOCK_BLOCK * 512MB.
  138. */
  139. #define GPT_LOCK bit_lock(gpi_info.lock, gpi_info.mask)
  140. #define GPT_UNLOCK bit_unlock(gpi_info.lock, gpi_info.mask)
  141. #endif
  142. static void tlbi_page_dsbosh(uintptr_t base)
  143. {
  144. /* Look-up table for invalidation TLBs for 4KB, 16KB and 64KB pages */
  145. static const gpt_tlbi_lookup_t tlbi_page_lookup[] = {
  146. { tlbirpalos_4k, ~(SZ_4K - 1UL) },
  147. { tlbirpalos_64k, ~(SZ_64K - 1UL) },
  148. { tlbirpalos_16k, ~(SZ_16K - 1UL) }
  149. };
  150. tlbi_page_lookup[gpt_config.pgs].function(
  151. base & tlbi_page_lookup[gpt_config.pgs].mask);
  152. dsbosh();
  153. }
  154. /*
  155. * Helper function to fill out GPI entries in a single L1 table
  156. * with Granules or Contiguous descriptor.
  157. *
  158. * Parameters
  159. * l1 Pointer to 2MB, 32MB or 512MB aligned L1 table entry to fill out
  160. * l1_desc GPT Granules or Contiguous descriptor set this range to
  161. * cnt Number of double 128-bit L1 entries to fill
  162. *
  163. */
  164. static void fill_desc(uint64_t *l1, uint64_t l1_desc, unsigned int cnt)
  165. {
  166. uint128_t *l1_quad = (uint128_t *)l1;
  167. uint128_t l1_quad_desc = (uint128_t)l1_desc | ((uint128_t)l1_desc << 64);
  168. VERBOSE("GPT: %s(%p 0x%"PRIx64" %u)\n", __func__, l1, l1_desc, cnt);
  169. for (unsigned int i = 0U; i < cnt; i++) {
  170. *l1_quad++ = l1_quad_desc;
  171. }
  172. }
  173. static void shatter_2mb(uintptr_t base, const gpi_info_t *gpi_info,
  174. uint64_t l1_desc)
  175. {
  176. unsigned long idx = GPT_L1_INDEX(ALIGN_2MB(base));
  177. VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
  178. __func__, base, l1_desc);
  179. /* Convert 2MB Contiguous block to Granules */
  180. fill_desc(&gpi_info->gpt_l1_addr[idx], l1_desc, L1_QWORDS_2MB);
  181. }
  182. static void shatter_32mb(uintptr_t base, const gpi_info_t *gpi_info,
  183. uint64_t l1_desc)
  184. {
  185. unsigned long idx = GPT_L1_INDEX(ALIGN_2MB(base));
  186. const uint64_t *l1_gran = &gpi_info->gpt_l1_addr[idx];
  187. uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
  188. uint64_t *l1;
  189. VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
  190. __func__, base, l1_desc);
  191. /* Get index corresponding to 32MB aligned address */
  192. idx = GPT_L1_INDEX(ALIGN_32MB(base));
  193. l1 = &gpi_info->gpt_l1_addr[idx];
  194. /* 16 x 2MB blocks in 32MB */
  195. for (unsigned int i = 0U; i < 16U; i++) {
  196. /* Fill with Granules or Contiguous descriptors */
  197. fill_desc(l1, (l1 == l1_gran) ? l1_desc : l1_cont_desc,
  198. L1_QWORDS_2MB);
  199. l1 = (uint64_t *)((uintptr_t)l1 + L1_BYTES_2MB);
  200. }
  201. }
  202. static void shatter_512mb(uintptr_t base, const gpi_info_t *gpi_info,
  203. uint64_t l1_desc)
  204. {
  205. unsigned long idx = GPT_L1_INDEX(ALIGN_32MB(base));
  206. const uint64_t *l1_32mb = &gpi_info->gpt_l1_addr[idx];
  207. uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
  208. uint64_t *l1;
  209. VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n",
  210. __func__, base, l1_desc);
  211. /* Get index corresponding to 512MB aligned address */
  212. idx = GPT_L1_INDEX(ALIGN_512MB(base));
  213. l1 = &gpi_info->gpt_l1_addr[idx];
  214. /* 16 x 32MB blocks in 512MB */
  215. for (unsigned int i = 0U; i < 16U; i++) {
  216. if (l1 == l1_32mb) {
  217. /* Shatter this 32MB block */
  218. shatter_32mb(base, gpi_info, l1_desc);
  219. } else {
  220. /* Fill 32MB with Contiguous descriptors */
  221. fill_desc(l1, l1_cont_desc, L1_QWORDS_32MB);
  222. }
  223. l1 = (uint64_t *)((uintptr_t)l1 + L1_BYTES_32MB);
  224. }
  225. }
  226. /*
  227. * This function checks to see if a GPI value is valid.
  228. *
  229. * These are valid GPI values.
  230. * GPT_GPI_NO_ACCESS U(0x0)
  231. * GPT_GPI_SECURE U(0x8)
  232. * GPT_GPI_NS U(0x9)
  233. * GPT_GPI_ROOT U(0xA)
  234. * GPT_GPI_REALM U(0xB)
  235. * GPT_GPI_ANY U(0xF)
  236. *
  237. * Parameters
  238. * gpi GPI to check for validity.
  239. *
  240. * Return
  241. * true for a valid GPI, false for an invalid one.
  242. */
  243. static bool is_gpi_valid(unsigned int gpi)
  244. {
  245. if ((gpi == GPT_GPI_NO_ACCESS) || (gpi == GPT_GPI_ANY) ||
  246. ((gpi >= GPT_GPI_SECURE) && (gpi <= GPT_GPI_REALM))) {
  247. return true;
  248. }
  249. return false;
  250. }
  251. /*
  252. * This function checks to see if two PAS regions overlap.
  253. *
  254. * Parameters
  255. * base_1: base address of first PAS
  256. * size_1: size of first PAS
  257. * base_2: base address of second PAS
  258. * size_2: size of second PAS
  259. *
  260. * Return
  261. * True if PAS regions overlap, false if they do not.
  262. */
  263. static bool check_pas_overlap(uintptr_t base_1, size_t size_1,
  264. uintptr_t base_2, size_t size_2)
  265. {
  266. if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) {
  267. return true;
  268. }
  269. return false;
  270. }
  271. /*
  272. * This helper function checks to see if a PAS region from index 0 to
  273. * (pas_idx - 1) occupies the L0 region at index l0_idx in the L0 table.
  274. *
  275. * Parameters
  276. * l0_idx: Index of the L0 entry to check
  277. * pas_regions: PAS region array
  278. * pas_idx: Upper bound of the PAS array index.
  279. *
  280. * Return
  281. * True if a PAS region occupies the L0 region in question, false if not.
  282. */
  283. static bool does_previous_pas_exist_here(unsigned int l0_idx,
  284. pas_region_t *pas_regions,
  285. unsigned int pas_idx)
  286. {
  287. /* Iterate over PAS regions up to pas_idx */
  288. for (unsigned int i = 0U; i < pas_idx; i++) {
  289. if (check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx),
  290. GPT_L0GPTSZ_ACTUAL_SIZE,
  291. pas_regions[i].base_pa, pas_regions[i].size)) {
  292. return true;
  293. }
  294. }
  295. return false;
  296. }
  297. /*
  298. * This function iterates over all of the PAS regions and checks them to ensure
  299. * proper alignment of base and size, that the GPI is valid, and that no regions
  300. * overlap. As a part of the overlap checks, this function checks existing L0
  301. * mappings against the new PAS regions in the event that gpt_init_pas_l1_tables
  302. * is called multiple times to place L1 tables in different areas of memory. It
  303. * also counts the number of L1 tables needed and returns it on success.
  304. *
  305. * Parameters
  306. * *pas_regions Pointer to array of PAS region structures.
  307. * pas_region_cnt Total number of PAS regions in the array.
  308. *
  309. * Return
  310. * Negative Linux error code in the event of a failure, number of L1 regions
  311. * required when successful.
  312. */
  313. static int validate_pas_mappings(pas_region_t *pas_regions,
  314. unsigned int pas_region_cnt)
  315. {
  316. unsigned int idx;
  317. unsigned int l1_cnt = 0U;
  318. unsigned int pas_l1_cnt;
  319. uint64_t *l0_desc = (uint64_t *)gpt_config.plat_gpt_l0_base;
  320. assert(pas_regions != NULL);
  321. assert(pas_region_cnt != 0U);
  322. for (idx = 0U; idx < pas_region_cnt; idx++) {
  323. /* Check for arithmetic overflow in region */
  324. if ((ULONG_MAX - pas_regions[idx].base_pa) <
  325. pas_regions[idx].size) {
  326. ERROR("GPT: Address overflow in PAS[%u]!\n", idx);
  327. return -EOVERFLOW;
  328. }
  329. /* Initial checks for PAS validity */
  330. if (((pas_regions[idx].base_pa + pas_regions[idx].size) >
  331. GPT_PPS_ACTUAL_SIZE(gpt_config.t)) ||
  332. !is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) {
  333. ERROR("GPT: PAS[%u] is invalid!\n", idx);
  334. return -EFAULT;
  335. }
  336. /*
  337. * Make sure this PAS does not overlap with another one. We
  338. * start from idx + 1 instead of 0 since prior PAS mappings will
  339. * have already checked themselves against this one.
  340. */
  341. for (unsigned int i = idx + 1U; i < pas_region_cnt; i++) {
  342. if (check_pas_overlap(pas_regions[idx].base_pa,
  343. pas_regions[idx].size,
  344. pas_regions[i].base_pa,
  345. pas_regions[i].size)) {
  346. ERROR("GPT: PAS[%u] overlaps with PAS[%u]\n",
  347. i, idx);
  348. return -EFAULT;
  349. }
  350. }
  351. /*
  352. * Since this function can be called multiple times with
  353. * separate L1 tables we need to check the existing L0 mapping
  354. * to see if this PAS would fall into one that has already been
  355. * initialized.
  356. */
  357. for (unsigned int i =
  358. (unsigned int)GPT_L0_IDX(pas_regions[idx].base_pa);
  359. i <= GPT_L0_IDX(pas_regions[idx].base_pa +
  360. pas_regions[idx].size - 1UL);
  361. i++) {
  362. if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) &&
  363. (GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) {
  364. /* This descriptor is unused so continue */
  365. continue;
  366. }
  367. /*
  368. * This descriptor has been initialized in a previous
  369. * call to this function so cannot be initialized again.
  370. */
  371. ERROR("GPT: PAS[%u] overlaps with previous L0[%u]!\n",
  372. idx, i);
  373. return -EFAULT;
  374. }
  375. /* Check for block mapping (L0) type */
  376. if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
  377. GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
  378. /* Make sure base and size are block-aligned */
  379. if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) ||
  380. !GPT_IS_L0_ALIGNED(pas_regions[idx].size)) {
  381. ERROR("GPT: PAS[%u] is not block-aligned!\n",
  382. idx);
  383. return -EFAULT;
  384. }
  385. continue;
  386. }
  387. /* Check for granule mapping (L1) type */
  388. if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
  389. GPT_PAS_ATTR_MAP_TYPE_GRANULE) {
  390. /* Make sure base and size are granule-aligned */
  391. if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) ||
  392. !GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) {
  393. ERROR("GPT: PAS[%u] is not granule-aligned!\n",
  394. idx);
  395. return -EFAULT;
  396. }
  397. /* Find how many L1 tables this PAS occupies */
  398. pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa +
  399. pas_regions[idx].size - 1UL) -
  400. GPT_L0_IDX(pas_regions[idx].base_pa) + 1U);
  401. /*
  402. * This creates a situation where, if multiple PAS
  403. * regions occupy the same table descriptor, we can get
  404. * an artificially high total L1 table count. The way we
  405. * handle this is by checking each PAS against those
  406. * before it in the array, and if they both occupy the
  407. * same PAS we subtract from pas_l1_cnt and only the
  408. * first PAS in the array gets to count it.
  409. */
  410. /*
  411. * If L1 count is greater than 1 we know the start and
  412. * end PAs are in different L0 regions so we must check
  413. * both for overlap against other PAS.
  414. */
  415. if (pas_l1_cnt > 1) {
  416. if (does_previous_pas_exist_here(
  417. GPT_L0_IDX(pas_regions[idx].base_pa +
  418. pas_regions[idx].size - 1UL),
  419. pas_regions, idx)) {
  420. pas_l1_cnt--;
  421. }
  422. }
  423. if (does_previous_pas_exist_here(
  424. GPT_L0_IDX(pas_regions[idx].base_pa),
  425. pas_regions, idx)) {
  426. pas_l1_cnt--;
  427. }
  428. l1_cnt += pas_l1_cnt;
  429. continue;
  430. }
  431. /* If execution reaches this point, mapping type is invalid */
  432. ERROR("GPT: PAS[%u] has invalid mapping type 0x%x.\n", idx,
  433. GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
  434. return -EINVAL;
  435. }
  436. return l1_cnt;
  437. }
  438. /*
  439. * This function validates L0 initialization parameters.
  440. *
  441. * Parameters
  442. * l0_mem_base Base address of memory used for L0 tables.
  443. * l0_mem_size Size of memory available for L0 tables.
  444. *
  445. * Return
  446. * Negative Linux error code in the event of a failure, 0 for success.
  447. */
  448. static int validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base,
  449. size_t l0_mem_size)
  450. {
  451. size_t l0_alignment, locks_size = 0;
  452. /*
  453. * Make sure PPS is valid and then store it since macros need this value
  454. * to work.
  455. */
  456. if (pps > GPT_PPS_MAX) {
  457. ERROR("GPT: Invalid PPS: 0x%x\n", pps);
  458. return -EINVAL;
  459. }
  460. gpt_config.pps = pps;
  461. gpt_config.t = gpt_t_lookup[pps];
  462. /* Alignment must be the greater of 4KB or l0 table size */
  463. l0_alignment = PAGE_SIZE_4KB;
  464. if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) {
  465. l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t);
  466. }
  467. /* Check base address */
  468. if ((l0_mem_base == 0UL) ||
  469. ((l0_mem_base & (l0_alignment - 1UL)) != 0UL)) {
  470. ERROR("GPT: Invalid L0 base address: 0x%lx\n", l0_mem_base);
  471. return -EFAULT;
  472. }
  473. #if (RME_GPT_BITLOCK_BLOCK != 0)
  474. /*
  475. * Size of bitlocks in bytes for the protected address space
  476. * with RME_GPT_BITLOCK_BLOCK * 512MB per bitlock.
  477. */
  478. locks_size = GPT_PPS_ACTUAL_SIZE(gpt_config.t) /
  479. (RME_GPT_BITLOCK_BLOCK * SZ_512M * 8U);
  480. /*
  481. * If protected space size is less than the size covered
  482. * by 'bitlock' structure, check for a single bitlock.
  483. */
  484. if (locks_size < LOCK_SIZE) {
  485. locks_size = LOCK_SIZE;
  486. }
  487. #endif
  488. /* Check size for L0 tables and bitlocks */
  489. if (l0_mem_size < (GPT_L0_TABLE_SIZE(gpt_config.t) + locks_size)) {
  490. ERROR("GPT: Inadequate L0 memory\n");
  491. ERROR(" Expected 0x%lx bytes, got 0x%lx bytes\n",
  492. GPT_L0_TABLE_SIZE(gpt_config.t) + locks_size,
  493. l0_mem_size);
  494. return -ENOMEM;
  495. }
  496. return 0;
  497. }
  498. /*
  499. * In the event that L1 tables are needed, this function validates
  500. * the L1 table generation parameters.
  501. *
  502. * Parameters
  503. * l1_mem_base Base address of memory used for L1 table allocation.
  504. * l1_mem_size Total size of memory available for L1 tables.
  505. * l1_gpt_cnt Number of L1 tables needed.
  506. *
  507. * Return
  508. * Negative Linux error code in the event of a failure, 0 for success.
  509. */
  510. static int validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size,
  511. unsigned int l1_gpt_cnt)
  512. {
  513. size_t l1_gpt_mem_sz;
  514. /* Check if the granularity is supported */
  515. if (!xlat_arch_is_granule_size_supported(
  516. GPT_PGS_ACTUAL_SIZE(gpt_config.p))) {
  517. return -EPERM;
  518. }
  519. /* Make sure L1 tables are aligned to their size */
  520. if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1UL)) != 0UL) {
  521. ERROR("GPT: Unaligned L1 GPT base address: 0x%"PRIxPTR"\n",
  522. l1_mem_base);
  523. return -EFAULT;
  524. }
  525. /* Get total memory needed for L1 tables */
  526. l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p);
  527. /* Check for overflow */
  528. if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) {
  529. ERROR("GPT: Overflow calculating L1 memory size\n");
  530. return -ENOMEM;
  531. }
  532. /* Make sure enough space was supplied */
  533. if (l1_mem_size < l1_gpt_mem_sz) {
  534. ERROR("%sL1 GPTs%s", (const char *)"GPT: Inadequate ",
  535. (const char *)" memory\n");
  536. ERROR(" Expected 0x%lx bytes, got 0x%lx bytes\n",
  537. l1_gpt_mem_sz, l1_mem_size);
  538. return -ENOMEM;
  539. }
  540. VERBOSE("GPT: Requested 0x%lx bytes for L1 GPTs\n", l1_gpt_mem_sz);
  541. return 0;
  542. }
  543. /*
  544. * This function initializes L0 block descriptors (regions that cannot be
  545. * transitioned at the granule level) according to the provided PAS.
  546. *
  547. * Parameters
  548. * *pas Pointer to the structure defining the PAS region to
  549. * initialize.
  550. */
  551. static void generate_l0_blk_desc(pas_region_t *pas)
  552. {
  553. uint64_t gpt_desc;
  554. unsigned long idx, end_idx;
  555. uint64_t *l0_gpt_arr;
  556. assert(gpt_config.plat_gpt_l0_base != 0U);
  557. assert(pas != NULL);
  558. /*
  559. * Checking of PAS parameters has already been done in
  560. * validate_pas_mappings so no need to check the same things again.
  561. */
  562. l0_gpt_arr = (uint64_t *)gpt_config.plat_gpt_l0_base;
  563. /* Create the GPT Block descriptor for this PAS region */
  564. gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs));
  565. /* Start index of this region in L0 GPTs */
  566. idx = GPT_L0_IDX(pas->base_pa);
  567. /*
  568. * Determine number of L0 GPT descriptors covered by
  569. * this PAS region and use the count to populate these
  570. * descriptors.
  571. */
  572. end_idx = GPT_L0_IDX(pas->base_pa + pas->size);
  573. /* Generate the needed block descriptors */
  574. for (; idx < end_idx; idx++) {
  575. l0_gpt_arr[idx] = gpt_desc;
  576. VERBOSE("GPT: L0 entry (BLOCK) index %lu [%p]: GPI = 0x%"PRIx64" (0x%"PRIx64")\n",
  577. idx, &l0_gpt_arr[idx],
  578. (gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) &
  579. GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]);
  580. }
  581. }
  582. /*
  583. * Helper function to determine if the end physical address lies in the same L0
  584. * region as the current physical address. If true, the end physical address is
  585. * returned else, the start address of the next region is returned.
  586. *
  587. * Parameters
  588. * cur_pa Physical address of the current PA in the loop through
  589. * the range.
  590. * end_pa Physical address of the end PA in a PAS range.
  591. *
  592. * Return
  593. * The PA of the end of the current range.
  594. */
  595. static uintptr_t get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa)
  596. {
  597. uintptr_t cur_idx;
  598. uintptr_t end_idx;
  599. cur_idx = GPT_L0_IDX(cur_pa);
  600. end_idx = GPT_L0_IDX(end_pa);
  601. assert(cur_idx <= end_idx);
  602. if (cur_idx == end_idx) {
  603. return end_pa;
  604. }
  605. return (cur_idx + 1UL) << GPT_L0_IDX_SHIFT;
  606. }
  607. /*
  608. * Helper function to fill out GPI entries from 'first' granule address of
  609. * the specified 'length' in a single L1 table with 'l1_desc' Contiguous
  610. * descriptor.
  611. *
  612. * Parameters
  613. * l1 Pointer to L1 table to fill out
  614. * first Address of first granule in range
  615. * length Length of the range in bytes
  616. * gpi GPI set this range to
  617. *
  618. * Return
  619. * Address of next granule in range.
  620. */
  621. __unused static uintptr_t fill_l1_cont_desc(uint64_t *l1, uintptr_t first,
  622. size_t length, unsigned int gpi)
  623. {
  624. /*
  625. * Look up table for contiguous blocks and descriptors.
  626. * Entries should be defined in descending block sizes:
  627. * 512MB, 32MB and 2MB.
  628. */
  629. static const gpt_fill_lookup_t gpt_fill_lookup[] = {
  630. #if (RME_GPT_MAX_BLOCK == 512)
  631. { SZ_512M, GPT_L1_CONT_DESC_512MB },
  632. #endif
  633. #if (RME_GPT_MAX_BLOCK >= 32)
  634. { SZ_32M, GPT_L1_CONT_DESC_32MB },
  635. #endif
  636. #if (RME_GPT_MAX_BLOCK != 0)
  637. { SZ_2M, GPT_L1_CONT_DESC_2MB }
  638. #endif
  639. };
  640. /*
  641. * Iterate through all block sizes (512MB, 32MB and 2MB)
  642. * starting with maximum supported.
  643. */
  644. for (unsigned long i = 0UL; i < ARRAY_SIZE(gpt_fill_lookup); i++) {
  645. /* Calculate index */
  646. unsigned long idx = GPT_L1_INDEX(first);
  647. /* Contiguous block size */
  648. size_t cont_size = gpt_fill_lookup[i].size;
  649. if (GPT_REGION_IS_CONT(length, first, cont_size)) {
  650. /* Generate Contiguous descriptor */
  651. uint64_t l1_desc = GPT_L1_GPI_CONT_DESC(gpi,
  652. gpt_fill_lookup[i].desc);
  653. /* Number of 128-bit L1 entries in block */
  654. unsigned int cnt;
  655. switch (cont_size) {
  656. case SZ_512M:
  657. cnt = L1_QWORDS_512MB;
  658. break;
  659. case SZ_32M:
  660. cnt = L1_QWORDS_32MB;
  661. break;
  662. default: /* SZ_2MB */
  663. cnt = L1_QWORDS_2MB;
  664. }
  665. VERBOSE("GPT: Contiguous descriptor 0x%"PRIxPTR" %luMB\n",
  666. first, cont_size / SZ_1M);
  667. /* Fill Contiguous descriptors */
  668. fill_desc(&l1[idx], l1_desc, cnt);
  669. first += cont_size;
  670. length -= cont_size;
  671. if (length == 0UL) {
  672. break;
  673. }
  674. }
  675. }
  676. return first;
  677. }
  678. /* Build Granules descriptor with the same 'gpi' for every GPI entry */
  679. static uint64_t build_l1_desc(unsigned int gpi)
  680. {
  681. uint64_t l1_desc = (uint64_t)gpi | ((uint64_t)gpi << 4);
  682. l1_desc |= (l1_desc << 8);
  683. l1_desc |= (l1_desc << 16);
  684. return (l1_desc | (l1_desc << 32));
  685. }
  686. /*
  687. * Helper function to fill out GPI entries from 'first' to 'last' granule
  688. * address in a single L1 table with 'l1_desc' Granules descriptor.
  689. *
  690. * Parameters
  691. * l1 Pointer to L1 table to fill out
  692. * first Address of first granule in range
  693. * last Address of last granule in range (inclusive)
  694. * gpi GPI set this range to
  695. *
  696. * Return
  697. * Address of next granule in range.
  698. */
  699. static uintptr_t fill_l1_gran_desc(uint64_t *l1, uintptr_t first,
  700. uintptr_t last, unsigned int gpi)
  701. {
  702. uint64_t gpi_mask;
  703. unsigned long i;
  704. /* Generate Granules descriptor */
  705. uint64_t l1_desc = build_l1_desc(gpi);
  706. /* Shift the mask if we're starting in the middle of an L1 entry */
  707. gpi_mask = ULONG_MAX << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2);
  708. /* Fill out each L1 entry for this region */
  709. for (i = GPT_L1_INDEX(first); i <= GPT_L1_INDEX(last); i++) {
  710. /* Account for stopping in the middle of an L1 entry */
  711. if (i == GPT_L1_INDEX(last)) {
  712. gpi_mask &= (gpi_mask >> ((15U -
  713. GPT_L1_GPI_IDX(gpt_config.p, last)) << 2));
  714. }
  715. assert((l1[i] & gpi_mask) == (GPT_L1_ANY_DESC & gpi_mask));
  716. /* Write GPI values */
  717. l1[i] = (l1[i] & ~gpi_mask) | (l1_desc & gpi_mask);
  718. /* Reset mask */
  719. gpi_mask = ULONG_MAX;
  720. }
  721. return last + GPT_PGS_ACTUAL_SIZE(gpt_config.p);
  722. }
  723. /*
  724. * Helper function to fill out GPI entries in a single L1 table.
  725. * This function fills out an entire L1 table with either Granules or Contiguous
  726. * (RME_GPT_MAX_BLOCK != 0) descriptors depending on region length and alignment.
  727. * Note. If RME_GPT_MAX_BLOCK == 0, then the L1 tables are filled with regular
  728. * Granules descriptors.
  729. *
  730. * Parameters
  731. * l1 Pointer to L1 table to fill out
  732. * first Address of first granule in range
  733. * last Address of last granule in range (inclusive)
  734. * gpi GPI set this range to
  735. */
  736. static void fill_l1_tbl(uint64_t *l1, uintptr_t first, uintptr_t last,
  737. unsigned int gpi)
  738. {
  739. assert(l1 != NULL);
  740. assert(first <= last);
  741. assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) == 0UL);
  742. assert((last & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) == 0UL);
  743. assert(GPT_L0_IDX(first) == GPT_L0_IDX(last));
  744. #if (RME_GPT_MAX_BLOCK != 0)
  745. while (first <= last) {
  746. /* Region length */
  747. size_t length = last - first + GPT_PGS_ACTUAL_SIZE(gpt_config.p);
  748. if (length < SZ_2M) {
  749. /*
  750. * Fill with Granule descriptors in case of
  751. * region length < 2MB.
  752. */
  753. first = fill_l1_gran_desc(l1, first, last, gpi);
  754. } else if ((first & (SZ_2M - UL(1))) == UL(0)) {
  755. /*
  756. * For region length >= 2MB and at least 2MB aligned
  757. * call to fill_l1_cont_desc will iterate through
  758. * all block sizes (512MB, 32MB and 2MB) supported and
  759. * fill corresponding Contiguous descriptors.
  760. */
  761. first = fill_l1_cont_desc(l1, first, length, gpi);
  762. } else {
  763. /*
  764. * For not aligned region >= 2MB fill with Granules
  765. * descriptors up to the next 2MB aligned address.
  766. */
  767. uintptr_t new_last = ALIGN_2MB(first + SZ_2M) -
  768. GPT_PGS_ACTUAL_SIZE(gpt_config.p);
  769. first = fill_l1_gran_desc(l1, first, new_last, gpi);
  770. }
  771. }
  772. #else
  773. /* Fill with Granule descriptors */
  774. first = fill_l1_gran_desc(l1, first, last, gpi);
  775. #endif
  776. assert(first == (last + GPT_PGS_ACTUAL_SIZE(gpt_config.p)));
  777. }
  778. /*
  779. * This function finds the next available unused L1 table and initializes all
  780. * granules descriptor entries to GPI_ANY. This ensures that there are no chunks
  781. * of GPI_NO_ACCESS (0b0000) memory floating around in the system in the
  782. * event that a PAS region stops midway through an L1 table, thus guaranteeing
  783. * that all memory not explicitly assigned is GPI_ANY. This function does not
  784. * check for overflow conditions, that should be done by the caller.
  785. *
  786. * Return
  787. * Pointer to the next available L1 table.
  788. */
  789. static uint64_t *get_new_l1_tbl(void)
  790. {
  791. /* Retrieve the next L1 table */
  792. uint64_t *l1 = (uint64_t *)gpt_l1_tbl;
  793. /* Increment L1 GPT address */
  794. gpt_l1_tbl += GPT_L1_TABLE_SIZE(gpt_config.p);
  795. /* Initialize all GPIs to GPT_GPI_ANY */
  796. for (unsigned int i = 0U; i < GPT_L1_ENTRY_COUNT(gpt_config.p); i++) {
  797. l1[i] = GPT_L1_ANY_DESC;
  798. }
  799. return l1;
  800. }
  801. /*
  802. * When L1 tables are needed, this function creates the necessary L0 table
  803. * descriptors and fills out the L1 table entries according to the supplied
  804. * PAS range.
  805. *
  806. * Parameters
  807. * *pas Pointer to the structure defining the PAS region.
  808. */
  809. static void generate_l0_tbl_desc(pas_region_t *pas)
  810. {
  811. uintptr_t end_pa;
  812. uintptr_t cur_pa;
  813. uintptr_t last_gran_pa;
  814. uint64_t *l0_gpt_base;
  815. uint64_t *l1_gpt_arr;
  816. unsigned int l0_idx, gpi;
  817. assert(gpt_config.plat_gpt_l0_base != 0U);
  818. assert(pas != NULL);
  819. /*
  820. * Checking of PAS parameters has already been done in
  821. * validate_pas_mappings so no need to check the same things again.
  822. */
  823. end_pa = pas->base_pa + pas->size;
  824. l0_gpt_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
  825. /* We start working from the granule at base PA */
  826. cur_pa = pas->base_pa;
  827. /* Get GPI */
  828. gpi = GPT_PAS_ATTR_GPI(pas->attrs);
  829. /* Iterate over each L0 region in this memory range */
  830. for (l0_idx = (unsigned int)GPT_L0_IDX(pas->base_pa);
  831. l0_idx <= (unsigned int)GPT_L0_IDX(end_pa - 1UL);
  832. l0_idx++) {
  833. /*
  834. * See if the L0 entry is already a table descriptor or if we
  835. * need to create one.
  836. */
  837. if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) {
  838. /* Get the L1 array from the L0 entry */
  839. l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]);
  840. } else {
  841. /* Get a new L1 table from the L1 memory space */
  842. l1_gpt_arr = get_new_l1_tbl();
  843. /* Fill out the L0 descriptor and flush it */
  844. l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr);
  845. }
  846. VERBOSE("GPT: L0 entry (TABLE) index %u [%p] ==> L1 Addr %p (0x%"PRIx64")\n",
  847. l0_idx, &l0_gpt_base[l0_idx], l1_gpt_arr, l0_gpt_base[l0_idx]);
  848. /*
  849. * Determine the PA of the last granule in this L0 descriptor.
  850. */
  851. last_gran_pa = get_l1_end_pa(cur_pa, end_pa) -
  852. GPT_PGS_ACTUAL_SIZE(gpt_config.p);
  853. /*
  854. * Fill up L1 GPT entries between these two addresses. This
  855. * function needs the addresses of the first granule and last
  856. * granule in the range.
  857. */
  858. fill_l1_tbl(l1_gpt_arr, cur_pa, last_gran_pa, gpi);
  859. /* Advance cur_pa to first granule in next L0 region */
  860. cur_pa = get_l1_end_pa(cur_pa, end_pa);
  861. }
  862. }
  863. /*
  864. * This function flushes a range of L0 descriptors used by a given PAS region
  865. * array. There is a chance that some unmodified L0 descriptors would be flushed
  866. * in the case that there are "holes" in an array of PAS regions but overall
  867. * this should be faster than individually flushing each modified L0 descriptor
  868. * as they are created.
  869. *
  870. * Parameters
  871. * *pas Pointer to an array of PAS regions.
  872. * pas_count Number of entries in the PAS array.
  873. */
  874. static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count)
  875. {
  876. unsigned long idx;
  877. unsigned long start_idx;
  878. unsigned long end_idx;
  879. uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base;
  880. assert(pas != NULL);
  881. assert(pas_count != 0U);
  882. /* Initial start and end values */
  883. start_idx = GPT_L0_IDX(pas[0].base_pa);
  884. end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1UL);
  885. /* Find lowest and highest L0 indices used in this PAS array */
  886. for (idx = 1UL; idx < pas_count; idx++) {
  887. if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) {
  888. start_idx = GPT_L0_IDX(pas[idx].base_pa);
  889. }
  890. if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL) > end_idx) {
  891. end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL);
  892. }
  893. }
  894. /*
  895. * Flush all covered L0 descriptors, add 1 because we need to include
  896. * the end index value.
  897. */
  898. flush_dcache_range((uintptr_t)&l0[start_idx],
  899. ((end_idx + 1UL) - start_idx) * sizeof(uint64_t));
  900. }
  901. /*
  902. * Public API to enable granule protection checks once the tables have all been
  903. * initialized. This function is called at first initialization and then again
  904. * later during warm boots of CPU cores.
  905. *
  906. * Return
  907. * Negative Linux error code in the event of a failure, 0 for success.
  908. */
  909. int gpt_enable(void)
  910. {
  911. u_register_t gpccr_el3;
  912. /*
  913. * Granule tables must be initialised before enabling
  914. * granule protection.
  915. */
  916. if (gpt_config.plat_gpt_l0_base == 0UL) {
  917. ERROR("GPT: Tables have not been initialized!\n");
  918. return -EPERM;
  919. }
  920. /* Write the base address of the L0 tables into GPTBR */
  921. write_gptbr_el3(((gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT)
  922. >> GPTBR_BADDR_SHIFT) & GPTBR_BADDR_MASK);
  923. /* GPCCR_EL3.PPS */
  924. gpccr_el3 = SET_GPCCR_PPS(gpt_config.pps);
  925. /* GPCCR_EL3.PGS */
  926. gpccr_el3 |= SET_GPCCR_PGS(gpt_config.pgs);
  927. /*
  928. * Since EL3 maps the L1 region as Inner shareable, use the same
  929. * shareability attribute for GPC as well so that
  930. * GPC fetches are visible to PEs
  931. */
  932. gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_IS);
  933. /* Outer and Inner cacheability set to Normal memory, WB, RA, WA */
  934. gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA);
  935. gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA);
  936. /* Prepopulate GPCCR_EL3 but don't enable GPC yet */
  937. write_gpccr_el3(gpccr_el3);
  938. isb();
  939. /* Invalidate any stale TLB entries and any cached register fields */
  940. tlbipaallos();
  941. dsb();
  942. isb();
  943. /* Enable GPT */
  944. gpccr_el3 |= GPCCR_GPC_BIT;
  945. /* TODO: Configure GPCCR_EL3_GPCP for Fault control */
  946. write_gpccr_el3(gpccr_el3);
  947. isb();
  948. tlbipaallos();
  949. dsb();
  950. isb();
  951. return 0;
  952. }
  953. /*
  954. * Public API to disable granule protection checks.
  955. */
  956. void gpt_disable(void)
  957. {
  958. u_register_t gpccr_el3 = read_gpccr_el3();
  959. write_gpccr_el3(gpccr_el3 & ~GPCCR_GPC_BIT);
  960. dsbsy();
  961. isb();
  962. }
  963. /*
  964. * Public API that initializes the entire protected space to GPT_GPI_ANY using
  965. * the L0 tables (block descriptors). Ideally, this function is invoked prior
  966. * to DDR discovery and initialization. The MMU must be initialized before
  967. * calling this function.
  968. *
  969. * Parameters
  970. * pps PPS value to use for table generation
  971. * l0_mem_base Base address of L0 tables in memory.
  972. * l0_mem_size Total size of memory available for L0 tables.
  973. *
  974. * Return
  975. * Negative Linux error code in the event of a failure, 0 for success.
  976. */
  977. int gpt_init_l0_tables(gpccr_pps_e pps, uintptr_t l0_mem_base,
  978. size_t l0_mem_size)
  979. {
  980. uint64_t gpt_desc;
  981. size_t locks_size = 0;
  982. __unused bitlock_t *bit_locks;
  983. int ret;
  984. /* Ensure that MMU and Data caches are enabled */
  985. assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
  986. /* Validate other parameters */
  987. ret = validate_l0_params(pps, l0_mem_base, l0_mem_size);
  988. if (ret != 0) {
  989. return ret;
  990. }
  991. /* Create the descriptor to initialize L0 entries with */
  992. gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY);
  993. /* Iterate through all L0 entries */
  994. for (unsigned int i = 0U; i < GPT_L0_REGION_COUNT(gpt_config.t); i++) {
  995. ((uint64_t *)l0_mem_base)[i] = gpt_desc;
  996. }
  997. #if (RME_GPT_BITLOCK_BLOCK != 0)
  998. /* Initialise bitlocks at the end of L0 table */
  999. bit_locks = (bitlock_t *)(l0_mem_base +
  1000. GPT_L0_TABLE_SIZE(gpt_config.t));
  1001. /* Size of bitlocks in bytes */
  1002. locks_size = GPT_PPS_ACTUAL_SIZE(gpt_config.t) /
  1003. (RME_GPT_BITLOCK_BLOCK * SZ_512M * 8U);
  1004. /*
  1005. * If protected space size is less than the size covered
  1006. * by 'bitlock' structure, initialise a single bitlock.
  1007. */
  1008. if (locks_size < LOCK_SIZE) {
  1009. locks_size = LOCK_SIZE;
  1010. }
  1011. for (size_t i = 0UL; i < (locks_size/LOCK_SIZE); i++) {
  1012. bit_locks[i].lock = 0U;
  1013. }
  1014. #endif
  1015. /* Flush updated L0 tables and bitlocks to memory */
  1016. flush_dcache_range((uintptr_t)l0_mem_base,
  1017. GPT_L0_TABLE_SIZE(gpt_config.t) + locks_size);
  1018. /* Stash the L0 base address once initial setup is complete */
  1019. gpt_config.plat_gpt_l0_base = l0_mem_base;
  1020. return 0;
  1021. }
  1022. /*
  1023. * Public API that carves out PAS regions from the L0 tables and builds any L1
  1024. * tables that are needed. This function ideally is run after DDR discovery and
  1025. * initialization. The L0 tables must have already been initialized to GPI_ANY
  1026. * when this function is called.
  1027. *
  1028. * This function can be called multiple times with different L1 memory ranges
  1029. * and PAS regions if it is desirable to place L1 tables in different locations
  1030. * in memory. (ex: you have multiple DDR banks and want to place the L1 tables
  1031. * in the DDR bank that they control).
  1032. *
  1033. * Parameters
  1034. * pgs PGS value to use for table generation.
  1035. * l1_mem_base Base address of memory used for L1 tables.
  1036. * l1_mem_size Total size of memory available for L1 tables.
  1037. * *pas_regions Pointer to PAS regions structure array.
  1038. * pas_count Total number of PAS regions.
  1039. *
  1040. * Return
  1041. * Negative Linux error code in the event of a failure, 0 for success.
  1042. */
  1043. int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base,
  1044. size_t l1_mem_size, pas_region_t *pas_regions,
  1045. unsigned int pas_count)
  1046. {
  1047. int l1_gpt_cnt, ret;
  1048. /* Ensure that MMU and Data caches are enabled */
  1049. assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
  1050. /* PGS is needed for validate_pas_mappings so check it now */
  1051. if (pgs > GPT_PGS_MAX) {
  1052. ERROR("GPT: Invalid PGS: 0x%x\n", pgs);
  1053. return -EINVAL;
  1054. }
  1055. gpt_config.pgs = pgs;
  1056. gpt_config.p = gpt_p_lookup[pgs];
  1057. /* Make sure L0 tables have been initialized */
  1058. if (gpt_config.plat_gpt_l0_base == 0U) {
  1059. ERROR("GPT: L0 tables must be initialized first!\n");
  1060. return -EPERM;
  1061. }
  1062. /* Check if L1 GPTs are required and how many */
  1063. l1_gpt_cnt = validate_pas_mappings(pas_regions, pas_count);
  1064. if (l1_gpt_cnt < 0) {
  1065. return l1_gpt_cnt;
  1066. }
  1067. VERBOSE("GPT: %i L1 GPTs requested\n", l1_gpt_cnt);
  1068. /* If L1 tables are needed then validate the L1 parameters */
  1069. if (l1_gpt_cnt > 0) {
  1070. ret = validate_l1_params(l1_mem_base, l1_mem_size,
  1071. (unsigned int)l1_gpt_cnt);
  1072. if (ret != 0) {
  1073. return ret;
  1074. }
  1075. /* Set up parameters for L1 table generation */
  1076. gpt_l1_tbl = l1_mem_base;
  1077. }
  1078. /* Number of L1 entries in 2MB depends on GPCCR_EL3.PGS value */
  1079. gpt_l1_cnt_2mb = (unsigned int)GPT_L1_ENTRY_COUNT_2MB(gpt_config.p);
  1080. /* Mask for the L1 index field */
  1081. gpt_l1_index_mask = GPT_L1_IDX_MASK(gpt_config.p);
  1082. INFO("GPT: Boot Configuration\n");
  1083. INFO(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t);
  1084. INFO(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p);
  1085. INFO(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
  1086. INFO(" PAS count: %u\n", pas_count);
  1087. INFO(" L0 base: 0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
  1088. /* Generate the tables in memory */
  1089. for (unsigned int idx = 0U; idx < pas_count; idx++) {
  1090. VERBOSE("GPT: PAS[%u]: base 0x%"PRIxPTR"\tsize 0x%lx\tGPI 0x%x\ttype 0x%x\n",
  1091. idx, pas_regions[idx].base_pa, pas_regions[idx].size,
  1092. GPT_PAS_ATTR_GPI(pas_regions[idx].attrs),
  1093. GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
  1094. /* Check if a block or table descriptor is required */
  1095. if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
  1096. GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
  1097. generate_l0_blk_desc(&pas_regions[idx]);
  1098. } else {
  1099. generate_l0_tbl_desc(&pas_regions[idx]);
  1100. }
  1101. }
  1102. /* Flush modified L0 tables */
  1103. flush_l0_for_pas_array(pas_regions, pas_count);
  1104. /* Flush L1 tables if needed */
  1105. if (l1_gpt_cnt > 0) {
  1106. flush_dcache_range(l1_mem_base,
  1107. GPT_L1_TABLE_SIZE(gpt_config.p) *
  1108. (size_t)l1_gpt_cnt);
  1109. }
  1110. /* Make sure that all the entries are written to the memory */
  1111. dsbishst();
  1112. tlbipaallos();
  1113. dsb();
  1114. isb();
  1115. return 0;
  1116. }
  1117. /*
  1118. * Public API to initialize the runtime gpt_config structure based on the values
  1119. * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization
  1120. * typically happens in a bootloader stage prior to setting up the EL3 runtime
  1121. * environment for the granule transition service so this function detects the
  1122. * initialization from a previous stage. Granule protection checks must be
  1123. * enabled already or this function will return an error.
  1124. *
  1125. * Return
  1126. * Negative Linux error code in the event of a failure, 0 for success.
  1127. */
  1128. int gpt_runtime_init(void)
  1129. {
  1130. u_register_t reg;
  1131. /* Ensure that MMU and Data caches are enabled */
  1132. assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
  1133. /* Ensure GPC are already enabled */
  1134. if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0U) {
  1135. ERROR("GPT: Granule protection checks are not enabled!\n");
  1136. return -EPERM;
  1137. }
  1138. /*
  1139. * Read the L0 table address from GPTBR, we don't need the L1 base
  1140. * address since those are included in the L0 tables as needed.
  1141. */
  1142. reg = read_gptbr_el3();
  1143. gpt_config.plat_gpt_l0_base = ((reg >> GPTBR_BADDR_SHIFT) &
  1144. GPTBR_BADDR_MASK) <<
  1145. GPTBR_BADDR_VAL_SHIFT;
  1146. /* Read GPCCR to get PGS and PPS values */
  1147. reg = read_gpccr_el3();
  1148. gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK;
  1149. gpt_config.t = gpt_t_lookup[gpt_config.pps];
  1150. gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK;
  1151. gpt_config.p = gpt_p_lookup[gpt_config.pgs];
  1152. /* Number of L1 entries in 2MB depends on GPCCR_EL3.PGS value */
  1153. gpt_l1_cnt_2mb = (unsigned int)GPT_L1_ENTRY_COUNT_2MB(gpt_config.p);
  1154. /* Mask for the L1 index field */
  1155. gpt_l1_index_mask = GPT_L1_IDX_MASK(gpt_config.p);
  1156. #if (RME_GPT_BITLOCK_BLOCK != 0)
  1157. /* Bitlocks at the end of L0 table */
  1158. gpt_bitlock_base = (bitlock_t *)(gpt_config.plat_gpt_l0_base +
  1159. GPT_L0_TABLE_SIZE(gpt_config.t));
  1160. #endif
  1161. VERBOSE("GPT: Runtime Configuration\n");
  1162. VERBOSE(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t);
  1163. VERBOSE(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p);
  1164. VERBOSE(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
  1165. VERBOSE(" L0 base: 0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
  1166. #if (RME_GPT_BITLOCK_BLOCK != 0)
  1167. VERBOSE(" Bitlocks: 0x%"PRIxPTR"\n", (uintptr_t)gpt_bitlock_base);
  1168. #endif
  1169. return 0;
  1170. }
  1171. /*
  1172. * A helper to write the value (target_pas << gpi_shift) to the index of
  1173. * the gpt_l1_addr.
  1174. */
  1175. static inline void write_gpt(uint64_t *gpt_l1_desc, uint64_t *gpt_l1_addr,
  1176. unsigned int gpi_shift, unsigned int idx,
  1177. unsigned int target_pas)
  1178. {
  1179. *gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift);
  1180. *gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift);
  1181. gpt_l1_addr[idx] = *gpt_l1_desc;
  1182. dsboshst();
  1183. }
  1184. /*
  1185. * Helper to retrieve the gpt_l1_* information from the base address
  1186. * returned in gpi_info.
  1187. */
  1188. static int get_gpi_params(uint64_t base, gpi_info_t *gpi_info)
  1189. {
  1190. uint64_t gpt_l0_desc, *gpt_l0_base;
  1191. __unused unsigned int block_idx;
  1192. gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
  1193. gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)];
  1194. if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) {
  1195. VERBOSE("GPT: Granule is not covered by a table descriptor!\n");
  1196. VERBOSE(" Base=0x%"PRIx64"\n", base);
  1197. return -EINVAL;
  1198. }
  1199. /* Get the table index and GPI shift from PA */
  1200. gpi_info->gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc);
  1201. gpi_info->idx = (unsigned int)GPT_L1_INDEX(base);
  1202. gpi_info->gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2;
  1203. #if (RME_GPT_BITLOCK_BLOCK != 0)
  1204. /* Block index */
  1205. block_idx = (unsigned int)(base / (RME_GPT_BITLOCK_BLOCK * SZ_512M));
  1206. /* Bitlock address and mask */
  1207. gpi_info->lock = &gpt_bitlock_base[block_idx / LOCK_BITS];
  1208. gpi_info->mask = 1U << (block_idx & (LOCK_BITS - 1U));
  1209. #endif
  1210. return 0;
  1211. }
  1212. /*
  1213. * Helper to retrieve the gpt_l1_desc and GPI information from gpi_info.
  1214. * This function is called with bitlock or spinlock acquired.
  1215. */
  1216. static void read_gpi(gpi_info_t *gpi_info)
  1217. {
  1218. gpi_info->gpt_l1_desc = (gpi_info->gpt_l1_addr)[gpi_info->idx];
  1219. if ((gpi_info->gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
  1220. GPT_L1_TYPE_CONT_DESC) {
  1221. /* Read GPI from Contiguous descriptor */
  1222. gpi_info->gpi = (unsigned int)GPT_L1_CONT_GPI(gpi_info->gpt_l1_desc);
  1223. } else {
  1224. /* Read GPI from Granules descriptor */
  1225. gpi_info->gpi = (unsigned int)((gpi_info->gpt_l1_desc >> gpi_info->gpi_shift) &
  1226. GPT_L1_GRAN_DESC_GPI_MASK);
  1227. }
  1228. }
  1229. static void flush_page_to_popa(uintptr_t addr)
  1230. {
  1231. size_t size = GPT_PGS_ACTUAL_SIZE(gpt_config.p);
  1232. if (is_feat_mte2_supported()) {
  1233. flush_dcache_to_popa_range_mte2(addr, size);
  1234. } else {
  1235. flush_dcache_to_popa_range(addr, size);
  1236. }
  1237. }
  1238. /*
  1239. * Helper function to check if all L1 entries in 2MB block have
  1240. * the same Granules descriptor value.
  1241. *
  1242. * Parameters
  1243. * base Base address of the region to be checked
  1244. * gpi_info Pointer to 'gpt_config_t' structure
  1245. * l1_desc GPT Granules descriptor with all entries
  1246. * set to the same GPI.
  1247. *
  1248. * Return
  1249. * true if L1 all entries have the same descriptor value, false otherwise.
  1250. */
  1251. __unused static bool check_fuse_2mb(uint64_t base, const gpi_info_t *gpi_info,
  1252. uint64_t l1_desc)
  1253. {
  1254. /* Last L1 entry index in 2MB block */
  1255. unsigned int long idx = GPT_L1_INDEX(ALIGN_2MB(base)) +
  1256. gpt_l1_cnt_2mb - 1UL;
  1257. /* Number of L1 entries in 2MB block */
  1258. unsigned int cnt = gpt_l1_cnt_2mb;
  1259. /*
  1260. * Start check from the last L1 entry and continue until the first
  1261. * non-matching to the passed Granules descriptor value is found.
  1262. */
  1263. while (cnt-- != 0U) {
  1264. if (gpi_info->gpt_l1_addr[idx--] != l1_desc) {
  1265. /* Non-matching L1 entry found */
  1266. return false;
  1267. }
  1268. }
  1269. return true;
  1270. }
  1271. __unused static void fuse_2mb(uint64_t base, const gpi_info_t *gpi_info,
  1272. uint64_t l1_desc)
  1273. {
  1274. /* L1 entry index of the start of 2MB block */
  1275. unsigned long idx_2 = GPT_L1_INDEX(ALIGN_2MB(base));
  1276. /* 2MB Contiguous descriptor */
  1277. uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
  1278. VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
  1279. fill_desc(&gpi_info->gpt_l1_addr[idx_2], l1_cont_desc, L1_QWORDS_2MB);
  1280. }
  1281. /*
  1282. * Helper function to check if all 1st L1 entries of 2MB blocks
  1283. * in 32MB have the same 2MB Contiguous descriptor value.
  1284. *
  1285. * Parameters
  1286. * base Base address of the region to be checked
  1287. * gpi_info Pointer to 'gpt_config_t' structure
  1288. * l1_desc GPT Granules descriptor.
  1289. *
  1290. * Return
  1291. * true if all L1 entries have the same descriptor value, false otherwise.
  1292. */
  1293. __unused static bool check_fuse_32mb(uint64_t base, const gpi_info_t *gpi_info,
  1294. uint64_t l1_desc)
  1295. {
  1296. /* The 1st L1 entry index of the last 2MB block in 32MB */
  1297. unsigned long idx = GPT_L1_INDEX(ALIGN_32MB(base)) +
  1298. (15UL * gpt_l1_cnt_2mb);
  1299. /* 2MB Contiguous descriptor */
  1300. uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 2MB);
  1301. /* Number of 2MB blocks in 32MB */
  1302. unsigned int cnt = 16U;
  1303. /* Set the first L1 entry to 2MB Contiguous descriptor */
  1304. gpi_info->gpt_l1_addr[GPT_L1_INDEX(ALIGN_2MB(base))] = l1_cont_desc;
  1305. /*
  1306. * Start check from the 1st L1 entry of the last 2MB block and
  1307. * continue until the first non-matching to 2MB Contiguous descriptor
  1308. * value is found.
  1309. */
  1310. while (cnt-- != 0U) {
  1311. if (gpi_info->gpt_l1_addr[idx] != l1_cont_desc) {
  1312. /* Non-matching L1 entry found */
  1313. return false;
  1314. }
  1315. idx -= gpt_l1_cnt_2mb;
  1316. }
  1317. return true;
  1318. }
  1319. __unused static void fuse_32mb(uint64_t base, const gpi_info_t *gpi_info,
  1320. uint64_t l1_desc)
  1321. {
  1322. /* L1 entry index of the start of 32MB block */
  1323. unsigned long idx_32 = GPT_L1_INDEX(ALIGN_32MB(base));
  1324. /* 32MB Contiguous descriptor */
  1325. uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
  1326. VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
  1327. fill_desc(&gpi_info->gpt_l1_addr[idx_32], l1_cont_desc, L1_QWORDS_32MB);
  1328. }
  1329. /*
  1330. * Helper function to check if all 1st L1 entries of 32MB blocks
  1331. * in 512MB have the same 32MB Contiguous descriptor value.
  1332. *
  1333. * Parameters
  1334. * base Base address of the region to be checked
  1335. * gpi_info Pointer to 'gpt_config_t' structure
  1336. * l1_desc GPT Granules descriptor.
  1337. *
  1338. * Return
  1339. * true if all L1 entries have the same descriptor value, false otherwise.
  1340. */
  1341. __unused static bool check_fuse_512mb(uint64_t base, const gpi_info_t *gpi_info,
  1342. uint64_t l1_desc)
  1343. {
  1344. /* The 1st L1 entry index of the last 32MB block in 512MB */
  1345. unsigned long idx = GPT_L1_INDEX(ALIGN_512MB(base)) +
  1346. (15UL * 16UL * gpt_l1_cnt_2mb);
  1347. /* 32MB Contiguous descriptor */
  1348. uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 32MB);
  1349. /* Number of 32MB blocks in 512MB */
  1350. unsigned int cnt = 16U;
  1351. /* Set the first L1 entry to 2MB Contiguous descriptor */
  1352. gpi_info->gpt_l1_addr[GPT_L1_INDEX(ALIGN_32MB(base))] = l1_cont_desc;
  1353. /*
  1354. * Start check from the 1st L1 entry of the last 32MB block and
  1355. * continue until the first non-matching to 32MB Contiguous descriptor
  1356. * value is found.
  1357. */
  1358. while (cnt-- != 0U) {
  1359. if (gpi_info->gpt_l1_addr[idx] != l1_cont_desc) {
  1360. /* Non-matching L1 entry found */
  1361. return false;
  1362. }
  1363. idx -= 16UL * gpt_l1_cnt_2mb;
  1364. }
  1365. return true;
  1366. }
  1367. __unused static void fuse_512mb(uint64_t base, const gpi_info_t *gpi_info,
  1368. uint64_t l1_desc)
  1369. {
  1370. /* L1 entry index of the start of 512MB block */
  1371. unsigned long idx_512 = GPT_L1_INDEX(ALIGN_512MB(base));
  1372. /* 512MB Contiguous descriptor */
  1373. uint64_t l1_cont_desc = GPT_L1_CONT_DESC(l1_desc, 512MB);
  1374. VERBOSE("GPT: %s(0x%"PRIxPTR" 0x%"PRIx64")\n", __func__, base, l1_desc);
  1375. fill_desc(&gpi_info->gpt_l1_addr[idx_512], l1_cont_desc, L1_QWORDS_512MB);
  1376. }
  1377. /*
  1378. * Helper function to convert GPI entries in a single L1 table
  1379. * from Granules to Contiguous descriptor.
  1380. *
  1381. * Parameters
  1382. * base Base address of the region to be written
  1383. * gpi_info Pointer to 'gpt_config_t' structure
  1384. * l1_desc GPT Granules descriptor with all entries
  1385. * set to the same GPI.
  1386. */
  1387. __unused static void fuse_block(uint64_t base, const gpi_info_t *gpi_info,
  1388. uint64_t l1_desc)
  1389. {
  1390. /* Start with check for 2MB block */
  1391. if (!check_fuse_2mb(base, gpi_info, l1_desc)) {
  1392. /* Check for 2MB fusing failed */
  1393. return;
  1394. }
  1395. #if (RME_GPT_MAX_BLOCK == 2)
  1396. fuse_2mb(base, gpi_info, l1_desc);
  1397. #else
  1398. /* Check for 32MB block */
  1399. if (!check_fuse_32mb(base, gpi_info, l1_desc)) {
  1400. /* Check for 32MB fusing failed, fuse to 2MB */
  1401. fuse_2mb(base, gpi_info, l1_desc);
  1402. return;
  1403. }
  1404. #if (RME_GPT_MAX_BLOCK == 32)
  1405. fuse_32mb(base, gpi_info, l1_desc);
  1406. #else
  1407. /* Check for 512MB block */
  1408. if (!check_fuse_512mb(base, gpi_info, l1_desc)) {
  1409. /* Check for 512MB fusing failed, fuse to 32MB */
  1410. fuse_32mb(base, gpi_info, l1_desc);
  1411. return;
  1412. }
  1413. /* Fuse to 512MB */
  1414. fuse_512mb(base, gpi_info, l1_desc);
  1415. #endif /* RME_GPT_MAX_BLOCK == 32 */
  1416. #endif /* RME_GPT_MAX_BLOCK == 2 */
  1417. }
  1418. /*
  1419. * Helper function to convert GPI entries in a single L1 table
  1420. * from Contiguous to Granules descriptor. This function updates
  1421. * descriptor to Granules in passed 'gpt_config_t' structure as
  1422. * the result of shuttering.
  1423. *
  1424. * Parameters
  1425. * base Base address of the region to be written
  1426. * gpi_info Pointer to 'gpt_config_t' structure
  1427. * l1_desc GPT Granules descriptor set this range to.
  1428. */
  1429. __unused static void shatter_block(uint64_t base, gpi_info_t *gpi_info,
  1430. uint64_t l1_desc)
  1431. {
  1432. /* Look-up table for 2MB, 32MB and 512MB locks shattering */
  1433. static const gpt_shatter_func gpt_shatter_lookup[] = {
  1434. shatter_2mb,
  1435. shatter_32mb,
  1436. shatter_512mb
  1437. };
  1438. /* Look-up table for invalidation TLBs for 2MB, 32MB and 512MB blocks */
  1439. static const gpt_tlbi_lookup_t tlbi_lookup[] = {
  1440. { tlbirpalos_2m, ~(SZ_2M - 1UL) },
  1441. { tlbirpalos_32m, ~(SZ_32M - 1UL) },
  1442. { tlbirpalos_512m, ~(SZ_512M - 1UL) }
  1443. };
  1444. /* Get shattering level from Contig field of Contiguous descriptor */
  1445. unsigned long level = GPT_L1_CONT_CONTIG(gpi_info->gpt_l1_desc) - 1UL;
  1446. /* Shatter contiguous block */
  1447. gpt_shatter_lookup[level](base, gpi_info, l1_desc);
  1448. tlbi_lookup[level].function(base & tlbi_lookup[level].mask);
  1449. dsbosh();
  1450. /*
  1451. * Update 'gpt_config_t' structure's descriptor to Granules to reflect
  1452. * the shattered GPI back to caller.
  1453. */
  1454. gpi_info->gpt_l1_desc = l1_desc;
  1455. }
  1456. /*
  1457. * This function is the granule transition delegate service. When a granule
  1458. * transition request occurs it is routed to this function to have the request,
  1459. * if valid, fulfilled following A1.1.1 Delegate of RME supplement.
  1460. *
  1461. * TODO: implement support for transitioning multiple granules at once.
  1462. *
  1463. * Parameters
  1464. * base Base address of the region to transition, must be
  1465. * aligned to granule size.
  1466. * size Size of region to transition, must be aligned to granule
  1467. * size.
  1468. * src_sec_state Security state of the caller.
  1469. *
  1470. * Return
  1471. * Negative Linux error code in the event of a failure, 0 for success.
  1472. */
  1473. int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
  1474. {
  1475. gpi_info_t gpi_info;
  1476. uint64_t nse, __unused l1_desc;
  1477. unsigned int target_pas;
  1478. int res;
  1479. /* Ensure that the tables have been set up before taking requests */
  1480. assert(gpt_config.plat_gpt_l0_base != 0UL);
  1481. /* Ensure that caches are enabled */
  1482. assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
  1483. /* See if this is a single or a range of granule transition */
  1484. if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
  1485. return -EINVAL;
  1486. }
  1487. /* Check that base and size are valid */
  1488. if ((ULONG_MAX - base) < size) {
  1489. VERBOSE("GPT: Transition request address overflow!\n");
  1490. VERBOSE(" Base=0x%"PRIx64"\n", base);
  1491. VERBOSE(" Size=0x%lx\n", size);
  1492. return -EINVAL;
  1493. }
  1494. /* Make sure base and size are valid */
  1495. if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
  1496. ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
  1497. (size == 0UL) ||
  1498. ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
  1499. VERBOSE("GPT: Invalid granule transition address range!\n");
  1500. VERBOSE(" Base=0x%"PRIx64"\n", base);
  1501. VERBOSE(" Size=0x%lx\n", size);
  1502. return -EINVAL;
  1503. }
  1504. /* Delegate request can only come from REALM or SECURE */
  1505. if ((src_sec_state != SMC_FROM_REALM) &&
  1506. (src_sec_state != SMC_FROM_SECURE)) {
  1507. VERBOSE("GPT: Invalid caller security state 0x%x\n",
  1508. src_sec_state);
  1509. return -EINVAL;
  1510. }
  1511. if (src_sec_state == SMC_FROM_REALM) {
  1512. target_pas = GPT_GPI_REALM;
  1513. nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
  1514. l1_desc = GPT_L1_REALM_DESC;
  1515. } else {
  1516. target_pas = GPT_GPI_SECURE;
  1517. nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
  1518. l1_desc = GPT_L1_SECURE_DESC;
  1519. }
  1520. res = get_gpi_params(base, &gpi_info);
  1521. if (res != 0) {
  1522. return res;
  1523. }
  1524. /*
  1525. * Access to GPT is controlled by a lock to ensure that no more
  1526. * than one CPU is allowed to make changes at any given time.
  1527. */
  1528. GPT_LOCK;
  1529. read_gpi(&gpi_info);
  1530. /* Check that the current address is in NS state */
  1531. if (gpi_info.gpi != GPT_GPI_NS) {
  1532. VERBOSE("GPT: Only Granule in NS state can be delegated.\n");
  1533. VERBOSE(" Caller: %u, Current GPI: %u\n", src_sec_state,
  1534. gpi_info.gpi);
  1535. GPT_UNLOCK;
  1536. return -EPERM;
  1537. }
  1538. #if (RME_GPT_MAX_BLOCK != 0)
  1539. /* Check for Contiguous descriptor */
  1540. if ((gpi_info.gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
  1541. GPT_L1_TYPE_CONT_DESC) {
  1542. shatter_block(base, &gpi_info, GPT_L1_NS_DESC);
  1543. }
  1544. #endif
  1545. /*
  1546. * In order to maintain mutual distrust between Realm and Secure
  1547. * states, remove any data speculatively fetched into the target
  1548. * physical address space.
  1549. * Issue DC CIPAPA or DC_CIGDPAPA on implementations with FEAT_MTE2.
  1550. */
  1551. flush_page_to_popa(base | nse);
  1552. write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
  1553. gpi_info.gpi_shift, gpi_info.idx, target_pas);
  1554. /* Ensure that all agents observe the new configuration */
  1555. tlbi_page_dsbosh(base);
  1556. nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
  1557. /* Ensure that the scrubbed data have made it past the PoPA */
  1558. flush_page_to_popa(base | nse);
  1559. #if (RME_GPT_MAX_BLOCK != 0)
  1560. if (gpi_info.gpt_l1_desc == l1_desc) {
  1561. /* Try to fuse */
  1562. fuse_block(base, &gpi_info, l1_desc);
  1563. }
  1564. #endif
  1565. /* Unlock the lock to GPT */
  1566. GPT_UNLOCK;
  1567. /*
  1568. * The isb() will be done as part of context
  1569. * synchronization when returning to lower EL.
  1570. */
  1571. VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n",
  1572. base, gpi_info.gpi, target_pas);
  1573. return 0;
  1574. }
  1575. /*
  1576. * This function is the granule transition undelegate service. When a granule
  1577. * transition request occurs it is routed to this function where the request is
  1578. * validated then fulfilled if possible.
  1579. *
  1580. * TODO: implement support for transitioning multiple granules at once.
  1581. *
  1582. * Parameters
  1583. * base Base address of the region to transition, must be
  1584. * aligned to granule size.
  1585. * size Size of region to transition, must be aligned to granule
  1586. * size.
  1587. * src_sec_state Security state of the caller.
  1588. *
  1589. * Return
  1590. * Negative Linux error code in the event of a failure, 0 for success.
  1591. */
  1592. int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
  1593. {
  1594. gpi_info_t gpi_info;
  1595. uint64_t nse, __unused l1_desc;
  1596. int res;
  1597. /* Ensure that the tables have been set up before taking requests */
  1598. assert(gpt_config.plat_gpt_l0_base != 0UL);
  1599. /* Ensure that MMU and caches are enabled */
  1600. assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
  1601. /* See if this is a single or a range of granule transition */
  1602. if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
  1603. return -EINVAL;
  1604. }
  1605. /* Check that base and size are valid */
  1606. if ((ULONG_MAX - base) < size) {
  1607. VERBOSE("GPT: Transition request address overflow!\n");
  1608. VERBOSE(" Base=0x%"PRIx64"\n", base);
  1609. VERBOSE(" Size=0x%lx\n", size);
  1610. return -EINVAL;
  1611. }
  1612. /* Make sure base and size are valid */
  1613. if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
  1614. ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
  1615. (size == 0UL) ||
  1616. ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
  1617. VERBOSE("GPT: Invalid granule transition address range!\n");
  1618. VERBOSE(" Base=0x%"PRIx64"\n", base);
  1619. VERBOSE(" Size=0x%lx\n", size);
  1620. return -EINVAL;
  1621. }
  1622. res = get_gpi_params(base, &gpi_info);
  1623. if (res != 0) {
  1624. return res;
  1625. }
  1626. /*
  1627. * Access to GPT is controlled by a lock to ensure that no more
  1628. * than one CPU is allowed to make changes at any given time.
  1629. */
  1630. GPT_LOCK;
  1631. read_gpi(&gpi_info);
  1632. /* Check that the current address is in the delegated state */
  1633. if ((src_sec_state == SMC_FROM_REALM) &&
  1634. (gpi_info.gpi == GPT_GPI_REALM)) {
  1635. l1_desc = GPT_L1_REALM_DESC;
  1636. nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
  1637. } else if ((src_sec_state == SMC_FROM_SECURE) &&
  1638. (gpi_info.gpi == GPT_GPI_SECURE)) {
  1639. l1_desc = GPT_L1_SECURE_DESC;
  1640. nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
  1641. } else {
  1642. VERBOSE("GPT: Only Granule in REALM or SECURE state can be undelegated\n");
  1643. VERBOSE(" Caller: %u Current GPI: %u\n", src_sec_state,
  1644. gpi_info.gpi);
  1645. GPT_UNLOCK;
  1646. return -EPERM;
  1647. }
  1648. #if (RME_GPT_MAX_BLOCK != 0)
  1649. /* Check for Contiguous descriptor */
  1650. if ((gpi_info.gpt_l1_desc & GPT_L1_TYPE_CONT_DESC_MASK) ==
  1651. GPT_L1_TYPE_CONT_DESC) {
  1652. shatter_block(base, &gpi_info, l1_desc);
  1653. }
  1654. #endif
  1655. /*
  1656. * In order to maintain mutual distrust between Realm and Secure
  1657. * states, remove access now, in order to guarantee that writes
  1658. * to the currently-accessible physical address space will not
  1659. * later become observable.
  1660. */
  1661. write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
  1662. gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NO_ACCESS);
  1663. /* Ensure that all agents observe the new NO_ACCESS configuration */
  1664. tlbi_page_dsbosh(base);
  1665. /* Ensure that the scrubbed data have made it past the PoPA */
  1666. flush_page_to_popa(base | nse);
  1667. /*
  1668. * Remove any data loaded speculatively in NS space from before
  1669. * the scrubbing.
  1670. */
  1671. nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
  1672. flush_page_to_popa(base | nse);
  1673. /* Clear existing GPI encoding and transition granule */
  1674. write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
  1675. gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NS);
  1676. /* Ensure that all agents observe the new NS configuration */
  1677. tlbi_page_dsbosh(base);
  1678. #if (RME_GPT_MAX_BLOCK != 0)
  1679. if (gpi_info.gpt_l1_desc == GPT_L1_NS_DESC) {
  1680. /* Try to fuse */
  1681. fuse_block(base, &gpi_info, GPT_L1_NS_DESC);
  1682. }
  1683. #endif
  1684. /* Unlock the lock to GPT */
  1685. GPT_UNLOCK;
  1686. /*
  1687. * The isb() will be done as part of context
  1688. * synchronization when returning to lower EL.
  1689. */
  1690. VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n",
  1691. base, gpi_info.gpi, GPT_GPI_NS);
  1692. return 0;
  1693. }