bl_common.ld.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. /*
  2. * Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #ifndef BL_COMMON_LD_H
  7. #define BL_COMMON_LD_H
  8. #include <platform_def.h>
  9. #ifdef __aarch64__
  10. #define STRUCT_ALIGN 8
  11. #define BSS_ALIGN 16
  12. #else
  13. #define STRUCT_ALIGN 4
  14. #define BSS_ALIGN 8
  15. #endif
  16. #ifndef DATA_ALIGN
  17. #define DATA_ALIGN 1
  18. #endif
  19. #define CPU_OPS \
  20. . = ALIGN(STRUCT_ALIGN); \
  21. __CPU_OPS_START__ = .; \
  22. KEEP(*(.cpu_ops)) \
  23. __CPU_OPS_END__ = .;
  24. #define PARSER_LIB_DESCS \
  25. . = ALIGN(STRUCT_ALIGN); \
  26. __PARSER_LIB_DESCS_START__ = .; \
  27. KEEP(*(.img_parser_lib_descs)) \
  28. __PARSER_LIB_DESCS_END__ = .;
  29. #define RT_SVC_DESCS \
  30. . = ALIGN(STRUCT_ALIGN); \
  31. __RT_SVC_DESCS_START__ = .; \
  32. KEEP(*(.rt_svc_descs)) \
  33. __RT_SVC_DESCS_END__ = .;
  34. #if SPMC_AT_EL3
  35. #define EL3_LP_DESCS \
  36. . = ALIGN(STRUCT_ALIGN); \
  37. __EL3_LP_DESCS_START__ = .; \
  38. KEEP(*(.el3_lp_descs)) \
  39. __EL3_LP_DESCS_END__ = .;
  40. #else
  41. #define EL3_LP_DESCS
  42. #endif
  43. #if ENABLE_SPMD_LP
  44. #define SPMD_LP_DESCS \
  45. . = ALIGN(STRUCT_ALIGN); \
  46. __SPMD_LP_DESCS_START__ = .; \
  47. KEEP(*(.spmd_lp_descs)) \
  48. __SPMD_LP_DESCS_END__ = .;
  49. #else
  50. #define SPMD_LP_DESCS
  51. #endif
  52. #define PMF_SVC_DESCS \
  53. . = ALIGN(STRUCT_ALIGN); \
  54. __PMF_SVC_DESCS_START__ = .; \
  55. KEEP(*(.pmf_svc_descs)) \
  56. __PMF_SVC_DESCS_END__ = .;
  57. #define FCONF_POPULATOR \
  58. . = ALIGN(STRUCT_ALIGN); \
  59. __FCONF_POPULATOR_START__ = .; \
  60. KEEP(*(.fconf_populator)) \
  61. __FCONF_POPULATOR_END__ = .;
  62. /*
  63. * Keep the .got section in the RO section as it is patched prior to enabling
  64. * the MMU and having the .got in RO is better for security. GOT is a table of
  65. * addresses so ensure pointer size alignment.
  66. */
  67. #define GOT \
  68. . = ALIGN(STRUCT_ALIGN); \
  69. __GOT_START__ = .; \
  70. *(.got) \
  71. __GOT_END__ = .;
  72. /*
  73. * The base xlat table
  74. *
  75. * It is put into the rodata section if PLAT_RO_XLAT_TABLES=1,
  76. * or into the bss section otherwise.
  77. */
  78. #define BASE_XLAT_TABLE \
  79. . = ALIGN(16); \
  80. __BASE_XLAT_TABLE_START__ = .; \
  81. *(.base_xlat_table) \
  82. __BASE_XLAT_TABLE_END__ = .;
  83. #if PLAT_RO_XLAT_TABLES
  84. #define BASE_XLAT_TABLE_RO BASE_XLAT_TABLE
  85. #define BASE_XLAT_TABLE_BSS
  86. #else
  87. #define BASE_XLAT_TABLE_RO
  88. #define BASE_XLAT_TABLE_BSS BASE_XLAT_TABLE
  89. #endif
  90. #define RODATA_COMMON \
  91. RT_SVC_DESCS \
  92. FCONF_POPULATOR \
  93. PMF_SVC_DESCS \
  94. PARSER_LIB_DESCS \
  95. CPU_OPS \
  96. GOT \
  97. BASE_XLAT_TABLE_RO \
  98. EL3_LP_DESCS \
  99. SPMD_LP_DESCS
  100. /*
  101. * .data must be placed at a lower address than the stacks if the stack
  102. * protector is enabled. Alternatively, the .data.stack_protector_canary
  103. * section can be placed independently of the main .data section.
  104. */
  105. #define DATA_SECTION \
  106. .data . : ALIGN(DATA_ALIGN) { \
  107. __DATA_START__ = .; \
  108. *(SORT_BY_ALIGNMENT(.data*)) \
  109. __DATA_END__ = .; \
  110. }
  111. /*
  112. * .rela.dyn needs to come after .data for the read-elf utility to parse
  113. * this section correctly.
  114. */
  115. #if __aarch64__
  116. #define RELA_DYN_NAME .rela.dyn
  117. #define RELOC_SECTIONS_PATTERN *(.rela*)
  118. #else
  119. #define RELA_DYN_NAME .rel.dyn
  120. #define RELOC_SECTIONS_PATTERN *(.rel*)
  121. #endif
  122. #define RELA_SECTION \
  123. RELA_DYN_NAME : ALIGN(STRUCT_ALIGN) { \
  124. __RELA_START__ = .; \
  125. RELOC_SECTIONS_PATTERN \
  126. __RELA_END__ = .; \
  127. }
  128. #if !(defined(IMAGE_BL31) && RECLAIM_INIT_CODE)
  129. #define STACK_SECTION \
  130. .stacks (NOLOAD) : { \
  131. __STACKS_START__ = .; \
  132. *(.tzfw_normal_stacks) \
  133. __STACKS_END__ = .; \
  134. }
  135. #endif
  136. /*
  137. * If BL doesn't use any bakery lock then __PERCPU_BAKERY_LOCK_SIZE__
  138. * will be zero. For this reason, the only two valid values for
  139. * __PERCPU_BAKERY_LOCK_SIZE__ are 0 or the platform defined value
  140. * PLAT_PERCPU_BAKERY_LOCK_SIZE.
  141. */
  142. #ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
  143. #define BAKERY_LOCK_SIZE_CHECK \
  144. ASSERT((__PERCPU_BAKERY_LOCK_SIZE__ == 0) || \
  145. (__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE), \
  146. "PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
  147. #else
  148. #define BAKERY_LOCK_SIZE_CHECK
  149. #endif
  150. /*
  151. * Bakery locks are stored in normal .bss memory
  152. *
  153. * Each lock's data is spread across multiple cache lines, one per CPU,
  154. * but multiple locks can share the same cache line.
  155. * The compiler will allocate enough memory for one CPU's bakery locks,
  156. * the remaining cache lines are allocated by the linker script
  157. */
  158. #if !USE_COHERENT_MEM
  159. #define BAKERY_LOCK_NORMAL \
  160. . = ALIGN(CACHE_WRITEBACK_GRANULE); \
  161. __BAKERY_LOCK_START__ = .; \
  162. __PERCPU_BAKERY_LOCK_START__ = .; \
  163. *(.bakery_lock) \
  164. . = ALIGN(CACHE_WRITEBACK_GRANULE); \
  165. __PERCPU_BAKERY_LOCK_END__ = .; \
  166. __PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__); \
  167. . = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1)); \
  168. __BAKERY_LOCK_END__ = .; \
  169. BAKERY_LOCK_SIZE_CHECK
  170. #else
  171. #define BAKERY_LOCK_NORMAL
  172. #endif
  173. /*
  174. * Time-stamps are stored in normal .bss memory
  175. *
  176. * The compiler will allocate enough memory for one CPU's time-stamps,
  177. * the remaining memory for other CPUs is allocated by the
  178. * linker script
  179. */
  180. #define PMF_TIMESTAMP \
  181. . = ALIGN(CACHE_WRITEBACK_GRANULE); \
  182. __PMF_TIMESTAMP_START__ = .; \
  183. KEEP(*(.pmf_timestamp_array)) \
  184. . = ALIGN(CACHE_WRITEBACK_GRANULE); \
  185. __PMF_PERCPU_TIMESTAMP_END__ = .; \
  186. __PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__); \
  187. . = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1)); \
  188. __PMF_TIMESTAMP_END__ = .;
  189. /*
  190. * The .bss section gets initialised to 0 at runtime.
  191. * Its base address has bigger alignment for better performance of the
  192. * zero-initialization code.
  193. */
  194. #define BSS_SECTION \
  195. .bss (NOLOAD) : ALIGN(BSS_ALIGN) { \
  196. __BSS_START__ = .; \
  197. *(SORT_BY_ALIGNMENT(.bss*)) \
  198. *(COMMON) \
  199. BAKERY_LOCK_NORMAL \
  200. PMF_TIMESTAMP \
  201. BASE_XLAT_TABLE_BSS \
  202. __BSS_END__ = .; \
  203. }
  204. /*
  205. * The .xlat_table section is for full, aligned page tables (4K).
  206. * Removing them from .bss avoids forcing 4K alignment on
  207. * the .bss section. The tables are initialized to zero by the translation
  208. * tables library.
  209. */
  210. #define XLAT_TABLE_SECTION \
  211. .xlat_table (NOLOAD) : { \
  212. __XLAT_TABLE_START__ = .; \
  213. *(.xlat_table) \
  214. __XLAT_TABLE_END__ = .; \
  215. }
  216. #endif /* BL_COMMON_LD_H */