gpt_rme_private.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360
  1. /*
  2. * Copyright (c) 2022-2024, Arm Limited. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #ifndef GPT_RME_PRIVATE_H
  7. #define GPT_RME_PRIVATE_H
  8. #include <arch.h>
  9. #include <lib/gpt_rme/gpt_rme.h>
  10. #include <lib/spinlock.h>
  11. #include <lib/utils_def.h>
  12. /******************************************************************************/
  13. /* GPT descriptor definitions */
  14. /******************************************************************************/
  15. /* GPT level 0 descriptor bit definitions */
  16. #define GPT_L0_TYPE_MASK UL(0xF)
  17. #define GPT_L0_TYPE_SHIFT U(0)
  18. /* GPT level 0 table and block descriptors */
  19. #define GPT_L0_TYPE_TBL_DESC UL(3)
  20. #define GPT_L0_TYPE_BLK_DESC UL(1)
  21. #define GPT_L0_TBL_DESC_L1ADDR_MASK UL(0xFFFFFFFFFF)
  22. #define GPT_L0_TBL_DESC_L1ADDR_SHIFT U(12)
  23. #define GPT_L0_BLK_DESC_GPI_MASK UL(0xF)
  24. #define GPT_L0_BLK_DESC_GPI_SHIFT U(4)
  25. /* GPT level 1 Contiguous descriptor */
  26. #define GPT_L1_TYPE_CONT_DESC_MASK UL(0xF)
  27. #define GPT_L1_TYPE_CONT_DESC UL(1)
  28. /* GPT level 1 Contiguous descriptor definitions */
  29. #define GPT_L1_CONTIG_2MB UL(1)
  30. #define GPT_L1_CONTIG_32MB UL(2)
  31. #define GPT_L1_CONTIG_512MB UL(3)
  32. #define GPT_L1_CONT_DESC_GPI_SHIFT U(4)
  33. #define GPT_L1_CONT_DESC_GPI_MASK UL(0xF)
  34. #define GPT_L1_CONT_DESC_CONTIG_SHIFT U(8)
  35. #define GPT_L1_CONT_DESC_CONTIG_MASK UL(3)
  36. /* GPT level 1 Granules descriptor bit definitions */
  37. #define GPT_L1_GRAN_DESC_GPI_MASK UL(0xF)
  38. /* L1 Contiguous descriptors templates */
  39. #define GPT_L1_CONT_DESC_2MB \
  40. (GPT_L1_TYPE_CONT_DESC | \
  41. (GPT_L1_CONTIG_2MB << GPT_L1_CONT_DESC_CONTIG_SHIFT))
  42. #define GPT_L1_CONT_DESC_32MB \
  43. (GPT_L1_TYPE_CONT_DESC | \
  44. (GPT_L1_CONTIG_32MB << GPT_L1_CONT_DESC_CONTIG_SHIFT))
  45. #define GPT_L1_CONT_DESC_512MB \
  46. (GPT_L1_TYPE_CONT_DESC | \
  47. (GPT_L1_CONTIG_512MB << GPT_L1_CONT_DESC_CONTIG_SHIFT))
  48. /* Create L1 Contiguous descriptor from GPI and template */
  49. #define GPT_L1_GPI_CONT_DESC(_gpi, _desc) \
  50. ((_desc) | ((uint64_t)(_gpi) << GPT_L1_CONT_DESC_GPI_SHIFT))
  51. /* Create L1 Contiguous descriptor from Granules descriptor and size */
  52. #define GPT_L1_CONT_DESC(_desc, _size) \
  53. (GPT_L1_CONT_DESC_##_size | \
  54. (((_desc) & GPT_L1_GRAN_DESC_GPI_MASK) << \
  55. GPT_L1_CONT_DESC_GPI_SHIFT))
  56. /* Create L1 Contiguous descriptor from GPI and size */
  57. #define GPT_L1_CONT_DESC_SIZE(_gpi, _size) \
  58. (GPT_L1_CONT_DESC_##_size | \
  59. (((uint64_t)(_gpi) << GPT_L1_CONT_DESC_GPI_SHIFT))
  60. #define GPT_L1_GPI_BYTE(_gpi) (uint64_t)((_gpi) | ((_gpi) << 4))
  61. #define GPT_L1_GPI_HALF(_gpi) (GPT_L1_GPI_BYTE(_gpi) | (GPT_L1_GPI_BYTE(_gpi) << 8))
  62. #define GPT_L1_GPI_WORD(_gpi) (GPT_L1_GPI_HALF(_gpi) | (GPT_L1_GPI_HALF(_gpi) << 16))
  63. /*
  64. * This macro generates a Granules descriptor
  65. * with the same value for every GPI entry.
  66. */
  67. #define GPT_BUILD_L1_DESC(_gpi) (GPT_L1_GPI_WORD(_gpi) | (GPT_L1_GPI_WORD(_gpi) << 32))
  68. #define GPT_L1_SECURE_DESC GPT_BUILD_L1_DESC(GPT_GPI_SECURE)
  69. #define GPT_L1_NS_DESC GPT_BUILD_L1_DESC(GPT_GPI_NS)
  70. #define GPT_L1_REALM_DESC GPT_BUILD_L1_DESC(GPT_GPI_REALM)
  71. #define GPT_L1_ANY_DESC GPT_BUILD_L1_DESC(GPT_GPI_ANY)
  72. /******************************************************************************/
  73. /* GPT platform configuration */
  74. /******************************************************************************/
  75. /* This value comes from GPCCR_EL3 so no externally supplied definition */
  76. #define GPT_L0GPTSZ ((unsigned int)((read_gpccr_el3() >> \
  77. GPCCR_L0GPTSZ_SHIFT) & GPCCR_L0GPTSZ_MASK))
  78. /* The "S" value is directly related to L0GPTSZ */
  79. #define GPT_S_VAL (GPT_L0GPTSZ + 30U)
  80. /*
  81. * Map PPS values to T values.
  82. *
  83. * PPS Size T
  84. * 0b000 4GB 32
  85. * 0b001 64GB 36
  86. * 0b010 1TB 40
  87. * 0b011 4TB 42
  88. * 0b100 16TB 44
  89. * 0b101 256TB 48
  90. * 0b110 4PB 52
  91. *
  92. * See section 15.1.27 of the RME specification.
  93. */
  94. typedef enum {
  95. PPS_4GB_T = 32U,
  96. PPS_64GB_T = 36U,
  97. PPS_1TB_T = 40U,
  98. PPS_4TB_T = 42U,
  99. PPS_16TB_T = 44U,
  100. PPS_256TB_T = 48U,
  101. PPS_4PB_T = 52U
  102. } gpt_t_val_e;
  103. /*
  104. * Map PGS values to P values.
  105. *
  106. * PGS Size P
  107. * 0b00 4KB 12
  108. * 0b10 16KB 14
  109. * 0b01 64KB 16
  110. *
  111. * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo.
  112. *
  113. * See section 15.1.27 of the RME specification.
  114. */
  115. typedef enum {
  116. PGS_4KB_P = 12U,
  117. PGS_16KB_P = 14U,
  118. PGS_64KB_P = 16U
  119. } gpt_p_val_e;
  120. #define LOCK_SIZE sizeof(((bitlock_t *)NULL)->lock)
  121. #define LOCK_TYPE typeof(((bitlock_t *)NULL)->lock)
  122. #define LOCK_BITS (LOCK_SIZE * 8U)
  123. /*
  124. * Internal structure to retrieve the values from get_gpi_params();
  125. */
  126. typedef struct {
  127. uint64_t gpt_l1_desc;
  128. uint64_t *gpt_l1_addr;
  129. unsigned int idx;
  130. unsigned int gpi_shift;
  131. unsigned int gpi;
  132. #if (RME_GPT_BITLOCK_BLOCK != 0)
  133. bitlock_t *lock;
  134. LOCK_TYPE mask;
  135. #endif
  136. } gpi_info_t;
  137. /*
  138. * Look up structure for contiguous blocks and descriptors
  139. */
  140. typedef struct {
  141. size_t size;
  142. unsigned int desc;
  143. } gpt_fill_lookup_t;
  144. typedef void (*gpt_shatter_func)(uintptr_t base, const gpi_info_t *gpi_info,
  145. uint64_t l1_desc);
  146. typedef void (*gpt_tlbi_func)(uintptr_t base);
  147. /*
  148. * Look-up structure for
  149. * invalidating TLBs of GPT entries by Physical address, last level.
  150. */
  151. typedef struct {
  152. gpt_tlbi_func function;
  153. size_t mask;
  154. } gpt_tlbi_lookup_t;
  155. /* Max valid value for PGS */
  156. #define GPT_PGS_MAX (2U)
  157. /* Max valid value for PPS */
  158. #define GPT_PPS_MAX (6U)
  159. /******************************************************************************/
  160. /* L0 address attribute macros */
  161. /******************************************************************************/
  162. /*
  163. * Width of the L0 index field.
  164. *
  165. * If S is greater than or equal to T then there is a single L0 region covering
  166. * the entire protected space so there is no L0 index, so the width (and the
  167. * derivative mask value) are both zero. If we don't specifically handle this
  168. * special case we'll get a negative width value which does not make sense and
  169. * would cause problems.
  170. */
  171. #define GPT_L0_IDX_WIDTH(_t) (((unsigned int)(_t) > GPT_S_VAL) ? \
  172. ((unsigned int)(_t) - GPT_S_VAL) : (0U))
  173. /* Bit shift for the L0 index field in a PA */
  174. #define GPT_L0_IDX_SHIFT (GPT_S_VAL)
  175. /*
  176. * Mask for the L0 index field, must be shifted.
  177. *
  178. * The value 0x3FFFFF is 22 bits wide which is the maximum possible width of the
  179. * L0 index within a physical address. This is calculated by
  180. * ((t_max - 1) - s_min + 1) where t_max is 52 for 4PB, the largest PPS, and
  181. * s_min is 30 for 1GB, the smallest L0GPTSZ.
  182. */
  183. #define GPT_L0_IDX_MASK(_t) (0x3FFFFFUL >> (22U - \
  184. (GPT_L0_IDX_WIDTH(_t))))
  185. /* Total number of L0 regions */
  186. #define GPT_L0_REGION_COUNT(_t) ((GPT_L0_IDX_MASK(_t)) + 1U)
  187. /* Total size of each GPT L0 region in bytes */
  188. #define GPT_L0_REGION_SIZE (1UL << (GPT_L0_IDX_SHIFT))
  189. /* Total size in bytes of the whole L0 table */
  190. #define GPT_L0_TABLE_SIZE(_t) ((GPT_L0_REGION_COUNT(_t)) << 3U)
  191. /******************************************************************************/
  192. /* L1 address attribute macros */
  193. /******************************************************************************/
  194. /*
  195. * Width of the L1 index field.
  196. *
  197. * This field does not have a special case to handle widths less than zero like
  198. * the L0 index field above since all valid combinations of PGS (p) and L0GPTSZ
  199. * (s) will result in a positive width value.
  200. */
  201. #define GPT_L1_IDX_WIDTH(_p) ((GPT_S_VAL - 1U) - \
  202. ((unsigned int)(_p) + 3U))
  203. /* Bit shift for the L1 index field */
  204. #define GPT_L1_IDX_SHIFT(_p) ((unsigned int)(_p) + 4U)
  205. /*
  206. * Mask for the L1 index field, must be shifted.
  207. *
  208. * The value 0x7FFFFF is 23 bits wide and is the maximum possible width of the
  209. * L1 index within a physical address. It is calculated by
  210. * ((s_max - 1) - (p_min + 4) + 1) where s_max is 39 for 512GB, the largest
  211. * L0GPTSZ, and p_min is 12 for 4KB granules, the smallest PGS.
  212. */
  213. #define GPT_L1_IDX_MASK(_p) (0x7FFFFFUL >> (23U - \
  214. (GPT_L1_IDX_WIDTH(_p))))
  215. /* Bit shift for the index of the L1 GPI in a PA */
  216. #define GPT_L1_GPI_IDX_SHIFT(_p) (_p)
  217. /* Mask for the index of the L1 GPI in a PA */
  218. #define GPT_L1_GPI_IDX_MASK (0xF)
  219. /* Total number of entries in each L1 table */
  220. #define GPT_L1_ENTRY_COUNT(_p) ((GPT_L1_IDX_MASK(_p)) + 1UL)
  221. /* Number of L1 entries in 2MB block */
  222. #define GPT_L1_ENTRY_COUNT_2MB(_p) (SZ_2M >> GPT_L1_IDX_SHIFT(_p))
  223. /* Total size in bytes of each L1 table */
  224. #define GPT_L1_TABLE_SIZE(_p) ((GPT_L1_ENTRY_COUNT(_p)) << 3U)
  225. /******************************************************************************/
  226. /* General helper macros */
  227. /******************************************************************************/
  228. /* Protected space actual size in bytes */
  229. #define GPT_PPS_ACTUAL_SIZE(_t) (1UL << (unsigned int)(_t))
  230. /* Granule actual size in bytes */
  231. #define GPT_PGS_ACTUAL_SIZE(_p) (1UL << (unsigned int)(_p))
  232. /* Number of granules in 2MB block */
  233. #define GPT_PGS_COUNT_2MB(_p) (1UL << (21U - (unsigned int)(_p)))
  234. /* L0 GPT region size in bytes */
  235. #define GPT_L0GPTSZ_ACTUAL_SIZE (1UL << GPT_S_VAL)
  236. /* Get the index of the L0 entry from a physical address */
  237. #define GPT_L0_IDX(_pa) ((_pa) >> GPT_L0_IDX_SHIFT)
  238. /*
  239. * This definition is used to determine if a physical address lies on an L0
  240. * region boundary.
  241. */
  242. #define GPT_IS_L0_ALIGNED(_pa) \
  243. (((_pa) & (GPT_L0_REGION_SIZE - UL(1))) == UL(0))
  244. /* Get the type field from an L0 descriptor */
  245. #define GPT_L0_TYPE(_desc) (((_desc) >> GPT_L0_TYPE_SHIFT) & \
  246. GPT_L0_TYPE_MASK)
  247. /* Create an L0 block descriptor */
  248. #define GPT_L0_BLK_DESC(_gpi) (GPT_L0_TYPE_BLK_DESC | \
  249. (((_gpi) & GPT_L0_BLK_DESC_GPI_MASK) << \
  250. GPT_L0_BLK_DESC_GPI_SHIFT))
  251. /* Create an L0 table descriptor with an L1 table address */
  252. #define GPT_L0_TBL_DESC(_pa) (GPT_L0_TYPE_TBL_DESC | ((uint64_t)(_pa) & \
  253. (GPT_L0_TBL_DESC_L1ADDR_MASK << \
  254. GPT_L0_TBL_DESC_L1ADDR_SHIFT)))
  255. /* Get the GPI from an L0 block descriptor */
  256. #define GPT_L0_BLKD_GPI(_desc) (((_desc) >> GPT_L0_BLK_DESC_GPI_SHIFT) & \
  257. GPT_L0_BLK_DESC_GPI_MASK)
  258. /* Get the L1 address from an L0 table descriptor */
  259. #define GPT_L0_TBLD_ADDR(_desc) ((uint64_t *)(((_desc) & \
  260. (GPT_L0_TBL_DESC_L1ADDR_MASK << \
  261. GPT_L0_TBL_DESC_L1ADDR_SHIFT))))
  262. /* Get the GPI from L1 Contiguous descriptor */
  263. #define GPT_L1_CONT_GPI(_desc) \
  264. (((_desc) >> GPT_L1_CONT_DESC_GPI_SHIFT) & GPT_L1_CONT_DESC_GPI_MASK)
  265. /* Get the GPI from L1 Granules descriptor */
  266. #define GPT_L1_GRAN_GPI(_desc) ((_desc) & GPT_L1_GRAN_DESC_GPI_MASK)
  267. /* Get the Contig from L1 Contiguous descriptor */
  268. #define GPT_L1_CONT_CONTIG(_desc) \
  269. (((_desc) >> GPT_L1_CONT_DESC_CONTIG_SHIFT) & \
  270. GPT_L1_CONT_DESC_CONTIG_MASK)
  271. /* Get the index into the L1 table from a physical address */
  272. #define GPT_L1_IDX(_p, _pa) \
  273. (((_pa) >> GPT_L1_IDX_SHIFT(_p)) & GPT_L1_IDX_MASK(_p))
  274. /* Get the index of the GPI within an L1 table entry from a physical address */
  275. #define GPT_L1_GPI_IDX(_p, _pa) \
  276. (((_pa) >> GPT_L1_GPI_IDX_SHIFT(_p)) & GPT_L1_GPI_IDX_MASK)
  277. /* Determine if an address is granule-aligned */
  278. #define GPT_IS_L1_ALIGNED(_p, _pa) \
  279. (((_pa) & (GPT_PGS_ACTUAL_SIZE(_p) - UL(1))) == UL(0))
  280. /* Get aligned addresses */
  281. #define ALIGN_2MB(_addr) ((_addr) & ~(SZ_2M - 1UL))
  282. #define ALIGN_32MB(_addr) ((_addr) & ~(SZ_32M - 1UL))
  283. #define ALIGN_512MB(_addr) ((_addr) & ~(SZ_512M - 1UL))
  284. /* Determine if region is contiguous */
  285. #define GPT_REGION_IS_CONT(_len, _addr, _size) \
  286. (((_len) >= (_size)) && (((_addr) & ((_size) - UL(1))) == UL(0)))
  287. /* Get 32MB block number in 512MB block: 0-15 */
  288. #define GET_32MB_NUM(_addr) ((_addr >> 25) & 0xF)
  289. /* Get 2MB block number in 32MB block: 0-15 */
  290. #define GET_2MB_NUM(_addr) ((_addr >> 21) & 0xF)
  291. #endif /* GPT_RME_PRIVATE_H */