ls_common.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. /*
  2. * Copyright 2018-2022 NXP
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. *
  6. */
  7. #include <assert.h>
  8. #include <arch.h>
  9. #include <arch_helpers.h>
  10. #include <common/debug.h>
  11. #include <lib/mmio.h>
  12. #include <lib/xlat_tables/xlat_tables_v2.h>
  13. #include <mmu_def.h>
  14. #include <plat/common/platform.h>
  15. #include "plat_common.h"
  16. #include "platform_def.h"
  17. const mmap_region_t *plat_ls_get_mmap(void);
  18. /*
  19. * Table of memory regions for various BL stages to map using the MMU.
  20. * This doesn't include Trusted SRAM as arm_setup_page_tables() already
  21. * takes care of mapping it.
  22. *
  23. * The flash needs to be mapped as writable in order to erase the FIP's Table of
  24. * Contents in case of unrecoverable error (see plat_error_handler()).
  25. */
  26. #ifdef IMAGE_BL2
  27. const mmap_region_t plat_ls_mmap[] = {
  28. LS_MAP_CCSR,
  29. {0}
  30. };
  31. #endif
  32. #ifdef IMAGE_BL31
  33. const mmap_region_t plat_ls_mmap[] = {
  34. LS_MAP_CCSR,
  35. #ifdef NXP_DCSR_ADDR
  36. LS_MAP_DCSR,
  37. #endif
  38. LS_MAP_OCRAM,
  39. {0}
  40. };
  41. #endif
  42. #ifdef IMAGE_BL32
  43. const mmap_region_t plat_ls_mmap[] = {
  44. LS_MAP_CCSR,
  45. LS_MAP_BL32_SEC_MEM,
  46. {0}
  47. };
  48. #endif
  49. /* Weak definitions may be overridden in specific NXP SoC */
  50. #pragma weak plat_get_ns_image_entrypoint
  51. #pragma weak plat_ls_get_mmap
  52. #if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE)
  53. static void mmap_add_ddr_regions_statically(void)
  54. {
  55. int i = 0;
  56. dram_regions_info_t *info_dram_regions = get_dram_regions_info();
  57. /* MMU map for Non-Secure DRAM Regions */
  58. VERBOSE("DRAM Region %d: %p - %p\n", i,
  59. (void *) info_dram_regions->region[i].addr,
  60. (void *) (info_dram_regions->region[i].addr
  61. + info_dram_regions->region[i].size
  62. - 1));
  63. mmap_add_region(info_dram_regions->region[i].addr,
  64. info_dram_regions->region[i].addr,
  65. info_dram_regions->region[i].size,
  66. MT_MEMORY | MT_RW | MT_NS);
  67. /* MMU map for Secure DDR Region on DRAM-0 */
  68. if (info_dram_regions->region[i].size >
  69. (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) {
  70. VERBOSE("Secure DRAM Region %d: %p - %p\n", i,
  71. (void *) (info_dram_regions->region[i].addr
  72. + info_dram_regions->region[i].size),
  73. (void *) (info_dram_regions->region[i].addr
  74. + info_dram_regions->region[i].size
  75. + NXP_SECURE_DRAM_SIZE
  76. + NXP_SP_SHRD_DRAM_SIZE
  77. - 1));
  78. mmap_add_region((info_dram_regions->region[i].addr
  79. + info_dram_regions->region[i].size),
  80. (info_dram_regions->region[i].addr
  81. + info_dram_regions->region[i].size),
  82. (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE),
  83. MT_MEMORY | MT_RW | MT_SECURE);
  84. }
  85. #ifdef IMAGE_BL31
  86. for (i = 1; i < info_dram_regions->num_dram_regions; i++) {
  87. if (info_dram_regions->region[i].size == 0)
  88. break;
  89. VERBOSE("DRAM Region %d: %p - %p\n", i,
  90. (void *) info_dram_regions->region[i].addr,
  91. (void *) (info_dram_regions->region[i].addr
  92. + info_dram_regions->region[i].size
  93. - 1));
  94. mmap_add_region(info_dram_regions->region[i].addr,
  95. info_dram_regions->region[i].addr,
  96. info_dram_regions->region[i].size,
  97. MT_MEMORY | MT_RW | MT_NS);
  98. }
  99. #endif
  100. }
  101. #endif
  102. #if defined(PLAT_XLAT_TABLES_DYNAMIC)
  103. void mmap_add_ddr_region_dynamically(void)
  104. {
  105. int ret, i = 0;
  106. dram_regions_info_t *info_dram_regions = get_dram_regions_info();
  107. /* MMU map for Non-Secure DRAM Regions */
  108. VERBOSE("DRAM Region %d: %p - %p\n", i,
  109. (void *) info_dram_regions->region[i].addr,
  110. (void *) (info_dram_regions->region[i].addr
  111. + info_dram_regions->region[i].size
  112. - 1));
  113. ret = mmap_add_dynamic_region(info_dram_regions->region[i].addr,
  114. info_dram_regions->region[i].addr,
  115. info_dram_regions->region[i].size,
  116. MT_MEMORY | MT_RW | MT_NS);
  117. if (ret != 0) {
  118. ERROR("Failed to add dynamic memory region\n");
  119. panic();
  120. }
  121. /* MMU map for Secure DDR Region on DRAM-0 */
  122. if (info_dram_regions->region[i].size >
  123. (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) {
  124. VERBOSE("Secure DRAM Region %d: %p - %p\n", i,
  125. (void *) (info_dram_regions->region[i].addr
  126. + info_dram_regions->region[i].size),
  127. (void *) (info_dram_regions->region[i].addr
  128. + info_dram_regions->region[i].size
  129. + NXP_SECURE_DRAM_SIZE
  130. + NXP_SP_SHRD_DRAM_SIZE
  131. - 1));
  132. ret = mmap_add_dynamic_region((info_dram_regions->region[i].addr
  133. + info_dram_regions->region[i].size),
  134. (info_dram_regions->region[i].addr
  135. + info_dram_regions->region[i].size),
  136. (NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE),
  137. MT_MEMORY | MT_RW | MT_SECURE);
  138. if (ret != 0) {
  139. ERROR("Failed to add dynamic memory region\n");
  140. panic();
  141. }
  142. }
  143. #ifdef IMAGE_BL31
  144. for (i = 1; i < info_dram_regions->num_dram_regions; i++) {
  145. if (info_dram_regions->region[i].size == 0) {
  146. break;
  147. }
  148. VERBOSE("DRAM Region %d: %p - %p\n", i,
  149. (void *) info_dram_regions->region[i].addr,
  150. (void *) (info_dram_regions->region[i].addr
  151. + info_dram_regions->region[i].size
  152. - 1));
  153. ret = mmap_add_dynamic_region(info_dram_regions->region[i].addr,
  154. info_dram_regions->region[i].addr,
  155. info_dram_regions->region[i].size,
  156. MT_MEMORY | MT_RW | MT_NS);
  157. if (ret != 0) {
  158. ERROR("Failed to add dynamic memory region\n");
  159. panic();
  160. }
  161. }
  162. #endif
  163. }
  164. #endif
  165. /*
  166. * Set up the page tables for the generic and platform-specific memory regions.
  167. * The extents of the generic memory regions are specified by the function
  168. * arguments and consist of:
  169. * - Trusted SRAM seen by the BL image;
  170. * - Code section;
  171. * - Read-only data section;
  172. * - Coherent memory region, if applicable.
  173. */
  174. void ls_setup_page_tables(uintptr_t total_base,
  175. size_t total_size,
  176. uintptr_t code_start,
  177. uintptr_t code_limit,
  178. uintptr_t rodata_start,
  179. uintptr_t rodata_limit
  180. #if USE_COHERENT_MEM
  181. ,
  182. uintptr_t coh_start,
  183. uintptr_t coh_limit
  184. #endif
  185. )
  186. {
  187. /*
  188. * Map the Trusted SRAM with appropriate memory attributes.
  189. * Subsequent mappings will adjust the attributes for specific regions.
  190. */
  191. VERBOSE("Memory seen by this BL image: %p - %p\n",
  192. (void *) total_base, (void *) (total_base + total_size));
  193. mmap_add_region(total_base, total_base,
  194. total_size,
  195. MT_MEMORY | MT_RW | MT_SECURE);
  196. /* Re-map the code section */
  197. VERBOSE("Code region: %p - %p\n",
  198. (void *) code_start, (void *) code_limit);
  199. mmap_add_region(code_start, code_start,
  200. code_limit - code_start,
  201. MT_CODE | MT_SECURE);
  202. /* Re-map the read-only data section */
  203. VERBOSE("Read-only data region: %p - %p\n",
  204. (void *) rodata_start, (void *) rodata_limit);
  205. mmap_add_region(rodata_start, rodata_start,
  206. rodata_limit - rodata_start,
  207. MT_RO_DATA | MT_SECURE);
  208. #if USE_COHERENT_MEM
  209. /* Re-map the coherent memory region */
  210. VERBOSE("Coherent region: %p - %p\n",
  211. (void *) coh_start, (void *) coh_limit);
  212. mmap_add_region(coh_start, coh_start,
  213. coh_limit - coh_start,
  214. MT_DEVICE | MT_RW | MT_SECURE);
  215. #endif
  216. /* Now (re-)map the platform-specific memory regions */
  217. mmap_add(plat_ls_get_mmap());
  218. #if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE)
  219. mmap_add_ddr_regions_statically();
  220. #endif
  221. /* Create the page tables to reflect the above mappings */
  222. init_xlat_tables();
  223. }
  224. /*******************************************************************************
  225. * Returns NXP platform specific memory map regions.
  226. ******************************************************************************/
  227. const mmap_region_t *plat_ls_get_mmap(void)
  228. {
  229. return plat_ls_mmap;
  230. }
  231. /*
  232. * This function get the number of clusters and cores count per cluster
  233. * in the SoC.
  234. */
  235. void get_cluster_info(const struct soc_type *soc_list, uint8_t ps_count,
  236. uint8_t *num_clusters, uint8_t *cores_per_cluster)
  237. {
  238. const soc_info_t *soc_info = get_soc_info();
  239. *num_clusters = NUMBER_OF_CLUSTERS;
  240. *cores_per_cluster = CORES_PER_CLUSTER;
  241. unsigned int i;
  242. for (i = 0U; i < ps_count; i++) {
  243. if (soc_list[i].version == soc_info->svr_reg.bf_ver.version) {
  244. *num_clusters = soc_list[i].num_clusters;
  245. *cores_per_cluster = soc_list[i].cores_per_cluster;
  246. break;
  247. }
  248. }
  249. VERBOSE("NUM of cluster = 0x%x, Cores per cluster = 0x%x\n",
  250. *num_clusters, *cores_per_cluster);
  251. }