bl31.ld.S 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. /*
  2. * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: BSD-3-Clause
  5. */
  6. #include <common/bl_common.ld.h>
  7. #include <lib/xlat_tables/xlat_tables_defs.h>
  8. OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
  9. OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
  10. ENTRY(bl31_entrypoint)
  11. MEMORY {
  12. RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
  13. #if SEPARATE_NOBITS_REGION
  14. NOBITS (rw!a): ORIGIN = BL31_NOBITS_BASE, LENGTH = BL31_NOBITS_LIMIT - BL31_NOBITS_BASE
  15. #else /* SEPARATE_NOBITS_REGION */
  16. # define NOBITS RAM
  17. #endif /* SEPARATE_NOBITS_REGION */
  18. }
  19. #ifdef PLAT_EXTRA_LD_SCRIPT
  20. # include <plat.ld.S>
  21. #endif /* PLAT_EXTRA_LD_SCRIPT */
  22. SECTIONS {
  23. . = BL31_BASE;
  24. ASSERT(. == ALIGN(PAGE_SIZE),
  25. "BL31_BASE address is not aligned on a page boundary.")
  26. __BL31_START__ = .;
  27. #if SEPARATE_CODE_AND_RODATA
  28. .text . : {
  29. __TEXT_START__ = .;
  30. *bl31_entrypoint.o(.text*)
  31. *(SORT_BY_ALIGNMENT(SORT(.text*)))
  32. *(.vectors)
  33. . = ALIGN(PAGE_SIZE);
  34. __TEXT_END__ = .;
  35. } >RAM
  36. .rodata . : {
  37. __RODATA_START__ = .;
  38. *(SORT_BY_ALIGNMENT(.rodata*))
  39. # if PLAT_EXTRA_RODATA_INCLUDES
  40. # include <plat.ld.rodata.inc>
  41. # endif /* PLAT_EXTRA_RODATA_INCLUDES */
  42. RODATA_COMMON
  43. . = ALIGN(8);
  44. # include <lib/el3_runtime/pubsub_events.h>
  45. . = ALIGN(PAGE_SIZE);
  46. __RODATA_END__ = .;
  47. } >RAM
  48. #else /* SEPARATE_CODE_AND_RODATA */
  49. .ro . : {
  50. __RO_START__ = .;
  51. *bl31_entrypoint.o(.text*)
  52. *(SORT_BY_ALIGNMENT(.text*))
  53. *(SORT_BY_ALIGNMENT(.rodata*))
  54. RODATA_COMMON
  55. . = ALIGN(8);
  56. # include <lib/el3_runtime/pubsub_events.h>
  57. *(.vectors)
  58. __RO_END_UNALIGNED__ = .;
  59. /*
  60. * Memory page(s) mapped to this section will be marked as read-only,
  61. * executable. No RW data from the next section must creep in. Ensure
  62. * that the rest of the current memory page is unused.
  63. */
  64. . = ALIGN(PAGE_SIZE);
  65. __RO_END__ = .;
  66. } >RAM
  67. #endif /* SEPARATE_CODE_AND_RODATA */
  68. ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
  69. "cpu_ops not defined for this platform.")
  70. #if SPM_MM
  71. # ifndef SPM_SHIM_EXCEPTIONS_VMA
  72. # define SPM_SHIM_EXCEPTIONS_VMA RAM
  73. # endif /* SPM_SHIM_EXCEPTIONS_VMA */
  74. /*
  75. * Exception vectors of the SPM shim layer. They must be aligned to a 2K
  76. * address but we need to place them in a separate page so that we can set
  77. * individual permissions on them, so the actual alignment needed is the
  78. * page size.
  79. *
  80. * There's no need to include this into the RO section of BL31 because it
  81. * doesn't need to be accessed by BL31.
  82. */
  83. .spm_shim_exceptions : ALIGN(PAGE_SIZE) {
  84. __SPM_SHIM_EXCEPTIONS_START__ = .;
  85. *(.spm_shim_exceptions)
  86. . = ALIGN(PAGE_SIZE);
  87. __SPM_SHIM_EXCEPTIONS_END__ = .;
  88. } >SPM_SHIM_EXCEPTIONS_VMA AT>RAM
  89. PROVIDE(__SPM_SHIM_EXCEPTIONS_LMA__ = LOADADDR(.spm_shim_exceptions));
  90. . = LOADADDR(.spm_shim_exceptions) + SIZEOF(.spm_shim_exceptions);
  91. #endif /* SPM_MM */
  92. __RW_START__ = .;
  93. DATA_SECTION >RAM
  94. RELA_SECTION >RAM
  95. #ifdef BL31_PROGBITS_LIMIT
  96. ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.")
  97. #endif /* BL31_PROGBITS_LIMIT */
  98. #if SEPARATE_NOBITS_REGION
  99. . = ALIGN(PAGE_SIZE);
  100. __RW_END__ = .;
  101. __BL31_END__ = .;
  102. ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
  103. . = BL31_NOBITS_BASE;
  104. ASSERT(. == ALIGN(PAGE_SIZE),
  105. "BL31 NOBITS base address is not aligned on a page boundary.")
  106. __NOBITS_START__ = .;
  107. #endif /* SEPARATE_NOBITS_REGION */
  108. STACK_SECTION >NOBITS
  109. BSS_SECTION >NOBITS
  110. XLAT_TABLE_SECTION >NOBITS
  111. #if USE_COHERENT_MEM
  112. /*
  113. * The base address of the coherent memory section must be page-aligned to
  114. * guarantee that the coherent data are stored on their own pages and are
  115. * not mixed with normal data. This is required to set up the correct
  116. * memory attributes for the coherent data page tables.
  117. */
  118. .coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
  119. __COHERENT_RAM_START__ = .;
  120. /*
  121. * Bakery locks are stored in coherent memory. Each lock's data is
  122. * contiguous and fully allocated by the compiler.
  123. */
  124. *(.bakery_lock)
  125. *(.tzfw_coherent_mem)
  126. __COHERENT_RAM_END_UNALIGNED__ = .;
  127. /*
  128. * Memory page(s) mapped to this section will be marked as device
  129. * memory. No other unexpected data must creep in. Ensure the rest of
  130. * the current memory page is unused.
  131. */
  132. . = ALIGN(PAGE_SIZE);
  133. __COHERENT_RAM_END__ = .;
  134. } >NOBITS
  135. #endif /* USE_COHERENT_MEM */
  136. #if SEPARATE_NOBITS_REGION
  137. __NOBITS_END__ = .;
  138. ASSERT(. <= BL31_NOBITS_LIMIT, "BL31 NOBITS region has exceeded its limit.")
  139. #else /* SEPARATE_NOBITS_REGION */
  140. __RW_END__ = .;
  141. __BL31_END__ = .;
  142. ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
  143. #endif /* SEPARATE_NOBITS_REGION */
  144. /DISCARD/ : {
  145. *(.dynsym .dynstr .hash .gnu.hash)
  146. }
  147. }