220-gc_sections.patch 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531
  1. From: Felix Fietkau <nbd@nbd.name>
  2. use -ffunction-sections, -fdata-sections and --gc-sections
  3. In combination with kernel symbol export stripping this significantly reduces
  4. the kernel image size. Used on both ARM and MIPS architectures.
  5. Signed-off-by: Felix Fietkau <nbd@nbd.name>
  6. Signed-off-by: Jonas Gorski <jogo@openwrt.org>
  7. Signed-off-by: Gabor Juhos <juhosg@openwrt.org>
  8. ---
  9. --- a/arch/mips/Makefile
  10. +++ b/arch/mips/Makefile
  11. @@ -89,10 +89,14 @@ all-$(CONFIG_SYS_SUPPORTS_ZBOOT)+= vmlin
  12. #
  13. cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
  14. cflags-y += -msoft-float
  15. -LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
  16. +LDFLAGS_vmlinux += -G 0 -static -n -nostdlib --gc-sections
  17. KBUILD_AFLAGS_MODULE += -mlong-calls
  18. KBUILD_CFLAGS_MODULE += -mlong-calls
  19. +ifndef CONFIG_FUNCTION_TRACER
  20. +KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
  21. +endif
  22. +
  23. #
  24. # pass -msoft-float to GAS if it supports it. However on newer binutils
  25. # (specifically newer than 2.24.51.20140728) we then also need to explicitly
  26. --- a/arch/mips/kernel/vmlinux.lds.S
  27. +++ b/arch/mips/kernel/vmlinux.lds.S
  28. @@ -67,7 +67,7 @@ SECTIONS
  29. /* Exception table for data bus errors */
  30. __dbe_table : {
  31. __start___dbe_table = .;
  32. - *(__dbe_table)
  33. + KEEP(*(__dbe_table))
  34. __stop___dbe_table = .;
  35. }
  36. @@ -112,7 +112,7 @@ SECTIONS
  37. . = ALIGN(4);
  38. .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
  39. __mips_machines_start = .;
  40. - *(.mips.machines.init)
  41. + KEEP(*(.mips.machines.init))
  42. __mips_machines_end = .;
  43. }
  44. --- a/include/asm-generic/vmlinux.lds.h
  45. +++ b/include/asm-generic/vmlinux.lds.h
  46. @@ -89,7 +89,7 @@
  47. #ifdef CONFIG_FTRACE_MCOUNT_RECORD
  48. #define MCOUNT_REC() . = ALIGN(8); \
  49. VMLINUX_SYMBOL(__start_mcount_loc) = .; \
  50. - *(__mcount_loc) \
  51. + KEEP(*(__mcount_loc)) \
  52. VMLINUX_SYMBOL(__stop_mcount_loc) = .;
  53. #else
  54. #define MCOUNT_REC()
  55. @@ -97,7 +97,7 @@
  56. #ifdef CONFIG_TRACE_BRANCH_PROFILING
  57. #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
  58. - *(_ftrace_annotated_branch) \
  59. + KEEP(*(_ftrace_annotated_branch)) \
  60. VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
  61. #else
  62. #define LIKELY_PROFILE()
  63. @@ -105,7 +105,7 @@
  64. #ifdef CONFIG_PROFILE_ALL_BRANCHES
  65. #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
  66. - *(_ftrace_branch) \
  67. + KEEP(*(_ftrace_branch)) \
  68. VMLINUX_SYMBOL(__stop_branch_profile) = .;
  69. #else
  70. #define BRANCH_PROFILE()
  71. @@ -114,7 +114,7 @@
  72. #ifdef CONFIG_KPROBES
  73. #define KPROBE_BLACKLIST() . = ALIGN(8); \
  74. VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \
  75. - *(_kprobe_blacklist) \
  76. + KEEP(*(_kprobe_blacklist)) \
  77. VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .;
  78. #else
  79. #define KPROBE_BLACKLIST()
  80. @@ -123,7 +123,7 @@
  81. #ifdef CONFIG_EVENT_TRACING
  82. #define FTRACE_EVENTS() . = ALIGN(8); \
  83. VMLINUX_SYMBOL(__start_ftrace_events) = .; \
  84. - *(_ftrace_events) \
  85. + KEEP(*(_ftrace_events)) \
  86. VMLINUX_SYMBOL(__stop_ftrace_events) = .;
  87. #else
  88. #define FTRACE_EVENTS()
  89. @@ -131,7 +131,7 @@
  90. #ifdef CONFIG_TRACING
  91. #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
  92. - *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
  93. + KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
  94. VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
  95. #define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
  96. *(__tracepoint_str) /* Trace_printk fmt' pointer */ \
  97. @@ -144,7 +144,7 @@
  98. #ifdef CONFIG_FTRACE_SYSCALLS
  99. #define TRACE_SYSCALLS() . = ALIGN(8); \
  100. VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
  101. - *(__syscalls_metadata) \
  102. + KEEP(*(__syscalls_metadata)) \
  103. VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
  104. #else
  105. #define TRACE_SYSCALLS()
  106. @@ -158,8 +158,8 @@
  107. #define _OF_TABLE_1(name) \
  108. . = ALIGN(8); \
  109. VMLINUX_SYMBOL(__##name##_of_table) = .; \
  110. - *(__##name##_of_table) \
  111. - *(__##name##_of_table_end)
  112. + KEEP(*(__##name##_of_table)) \
  113. + KEEP(*(__##name##_of_table_end))
  114. #define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc)
  115. #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
  116. @@ -171,7 +171,7 @@
  117. #define KERNEL_DTB() \
  118. STRUCT_ALIGN(); \
  119. VMLINUX_SYMBOL(__dtb_start) = .; \
  120. - *(.dtb.init.rodata) \
  121. + KEEP(*(.dtb.init.rodata)) \
  122. VMLINUX_SYMBOL(__dtb_end) = .;
  123. /* .data section */
  124. @@ -187,16 +187,17 @@
  125. /* implement dynamic printk debug */ \
  126. . = ALIGN(8); \
  127. VMLINUX_SYMBOL(__start___jump_table) = .; \
  128. - *(__jump_table) \
  129. + KEEP(*(__jump_table)) \
  130. VMLINUX_SYMBOL(__stop___jump_table) = .; \
  131. . = ALIGN(8); \
  132. VMLINUX_SYMBOL(__start___verbose) = .; \
  133. - *(__verbose) \
  134. + KEEP(*(__verbose)) \
  135. VMLINUX_SYMBOL(__stop___verbose) = .; \
  136. LIKELY_PROFILE() \
  137. BRANCH_PROFILE() \
  138. TRACE_PRINTKS() \
  139. - TRACEPOINT_STR()
  140. + TRACEPOINT_STR() \
  141. + *(.data.[a-zA-Z_]*)
  142. /*
  143. * Data section helpers
  144. @@ -250,35 +251,35 @@
  145. /* PCI quirks */ \
  146. .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
  147. VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
  148. - *(.pci_fixup_early) \
  149. + KEEP(*(.pci_fixup_early)) \
  150. VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
  151. VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
  152. - *(.pci_fixup_header) \
  153. + KEEP(*(.pci_fixup_header)) \
  154. VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
  155. VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
  156. - *(.pci_fixup_final) \
  157. + KEEP(*(.pci_fixup_final)) \
  158. VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
  159. VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
  160. - *(.pci_fixup_enable) \
  161. + KEEP(*(.pci_fixup_enable)) \
  162. VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
  163. VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
  164. - *(.pci_fixup_resume) \
  165. + KEEP(*(.pci_fixup_resume)) \
  166. VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
  167. VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
  168. - *(.pci_fixup_resume_early) \
  169. + KEEP(*(.pci_fixup_resume_early)) \
  170. VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
  171. VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
  172. - *(.pci_fixup_suspend) \
  173. + KEEP(*(.pci_fixup_suspend)) \
  174. VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
  175. VMLINUX_SYMBOL(__start_pci_fixups_suspend_late) = .; \
  176. - *(.pci_fixup_suspend_late) \
  177. + KEEP(*(.pci_fixup_suspend_late)) \
  178. VMLINUX_SYMBOL(__end_pci_fixups_suspend_late) = .; \
  179. } \
  180. \
  181. /* Built-in firmware blobs */ \
  182. .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
  183. VMLINUX_SYMBOL(__start_builtin_fw) = .; \
  184. - *(.builtin_fw) \
  185. + KEEP(*(.builtin_fw)) \
  186. VMLINUX_SYMBOL(__end_builtin_fw) = .; \
  187. } \
  188. \
  189. @@ -287,49 +288,49 @@
  190. /* Kernel symbol table: Normal symbols */ \
  191. __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
  192. VMLINUX_SYMBOL(__start___ksymtab) = .; \
  193. - *(SORT(___ksymtab+*)) \
  194. + KEEP(*(SORT(___ksymtab+*))) \
  195. VMLINUX_SYMBOL(__stop___ksymtab) = .; \
  196. } \
  197. \
  198. /* Kernel symbol table: GPL-only symbols */ \
  199. __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
  200. VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
  201. - *(SORT(___ksymtab_gpl+*)) \
  202. + KEEP(*(SORT(___ksymtab_gpl+*))) \
  203. VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
  204. } \
  205. \
  206. /* Kernel symbol table: Normal unused symbols */ \
  207. __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
  208. VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
  209. - *(SORT(___ksymtab_unused+*)) \
  210. + KEEP(*(SORT(___ksymtab_unused+*))) \
  211. VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
  212. } \
  213. \
  214. /* Kernel symbol table: GPL-only unused symbols */ \
  215. __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
  216. VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
  217. - *(SORT(___ksymtab_unused_gpl+*)) \
  218. + KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
  219. VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
  220. } \
  221. \
  222. /* Kernel symbol table: GPL-future-only symbols */ \
  223. __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
  224. VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
  225. - *(SORT(___ksymtab_gpl_future+*)) \
  226. + KEEP(*(SORT(___ksymtab_gpl_future+*))) \
  227. VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
  228. } \
  229. \
  230. /* Kernel symbol table: Normal symbols */ \
  231. __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
  232. VMLINUX_SYMBOL(__start___kcrctab) = .; \
  233. - *(SORT(___kcrctab+*)) \
  234. + KEEP(*(SORT(___kcrctab+*))) \
  235. VMLINUX_SYMBOL(__stop___kcrctab) = .; \
  236. } \
  237. \
  238. /* Kernel symbol table: GPL-only symbols */ \
  239. __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
  240. VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
  241. - *(SORT(___kcrctab_gpl+*)) \
  242. + KEEP(*(SORT(___kcrctab_gpl+*))) \
  243. VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
  244. } \
  245. \
  246. @@ -343,14 +344,14 @@
  247. /* Kernel symbol table: GPL-only unused symbols */ \
  248. __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
  249. VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
  250. - *(SORT(___kcrctab_unused_gpl+*)) \
  251. + KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
  252. VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
  253. } \
  254. \
  255. /* Kernel symbol table: GPL-future-only symbols */ \
  256. __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
  257. VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
  258. - *(SORT(___kcrctab_gpl_future+*)) \
  259. + KEEP(*(SORT(___kcrctab_gpl_future+*))) \
  260. VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
  261. } \
  262. \
  263. @@ -369,14 +370,14 @@
  264. /* Built-in module parameters. */ \
  265. __param : AT(ADDR(__param) - LOAD_OFFSET) { \
  266. VMLINUX_SYMBOL(__start___param) = .; \
  267. - *(__param) \
  268. + KEEP(*(__param)) \
  269. VMLINUX_SYMBOL(__stop___param) = .; \
  270. } \
  271. \
  272. /* Built-in module versions. */ \
  273. __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
  274. VMLINUX_SYMBOL(__start___modver) = .; \
  275. - *(__modver) \
  276. + KEEP(*(__modver)) \
  277. VMLINUX_SYMBOL(__stop___modver) = .; \
  278. . = ALIGN((align)); \
  279. VMLINUX_SYMBOL(__end_rodata) = .; \
  280. @@ -432,7 +433,7 @@
  281. #define ENTRY_TEXT \
  282. ALIGN_FUNCTION(); \
  283. VMLINUX_SYMBOL(__entry_text_start) = .; \
  284. - *(.entry.text) \
  285. + KEEP(*(.entry.text)) \
  286. VMLINUX_SYMBOL(__entry_text_end) = .;
  287. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  288. @@ -460,7 +461,7 @@
  289. . = ALIGN(align); \
  290. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
  291. VMLINUX_SYMBOL(__start___ex_table) = .; \
  292. - *(__ex_table) \
  293. + KEEP(*(__ex_table)) \
  294. VMLINUX_SYMBOL(__stop___ex_table) = .; \
  295. }
  296. @@ -476,8 +477,8 @@
  297. #ifdef CONFIG_CONSTRUCTORS
  298. #define KERNEL_CTORS() . = ALIGN(8); \
  299. VMLINUX_SYMBOL(__ctors_start) = .; \
  300. - *(.ctors) \
  301. - *(.init_array) \
  302. + KEEP(*(.ctors)) \
  303. + KEEP(*(.init_array)) \
  304. VMLINUX_SYMBOL(__ctors_end) = .;
  305. #else
  306. #define KERNEL_CTORS()
  307. @@ -525,7 +526,7 @@
  308. #define SBSS(sbss_align) \
  309. . = ALIGN(sbss_align); \
  310. .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
  311. - *(.sbss) \
  312. + *(.sbss .sbss.*) \
  313. *(.scommon) \
  314. }
  315. @@ -543,7 +544,7 @@
  316. BSS_FIRST_SECTIONS \
  317. *(.bss..page_aligned) \
  318. *(.dynbss) \
  319. - *(.bss) \
  320. + *(.bss .bss.*) \
  321. *(COMMON) \
  322. }
  323. @@ -592,7 +593,7 @@
  324. . = ALIGN(8); \
  325. __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
  326. VMLINUX_SYMBOL(__start___bug_table) = .; \
  327. - *(__bug_table) \
  328. + KEEP(*(__bug_table)) \
  329. VMLINUX_SYMBOL(__stop___bug_table) = .; \
  330. }
  331. #else
  332. @@ -604,7 +605,7 @@
  333. . = ALIGN(4); \
  334. .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
  335. VMLINUX_SYMBOL(__tracedata_start) = .; \
  336. - *(.tracedata) \
  337. + KEEP(*(.tracedata)) \
  338. VMLINUX_SYMBOL(__tracedata_end) = .; \
  339. }
  340. #else
  341. @@ -621,17 +622,17 @@
  342. #define INIT_SETUP(initsetup_align) \
  343. . = ALIGN(initsetup_align); \
  344. VMLINUX_SYMBOL(__setup_start) = .; \
  345. - *(.init.setup) \
  346. + KEEP(*(.init.setup)) \
  347. VMLINUX_SYMBOL(__setup_end) = .;
  348. #define INIT_CALLS_LEVEL(level) \
  349. VMLINUX_SYMBOL(__initcall##level##_start) = .; \
  350. - *(.initcall##level##.init) \
  351. - *(.initcall##level##s.init) \
  352. + KEEP(*(.initcall##level##.init)) \
  353. + KEEP(*(.initcall##level##s.init)) \
  354. #define INIT_CALLS \
  355. VMLINUX_SYMBOL(__initcall_start) = .; \
  356. - *(.initcallearly.init) \
  357. + KEEP(*(.initcallearly.init)) \
  358. INIT_CALLS_LEVEL(0) \
  359. INIT_CALLS_LEVEL(1) \
  360. INIT_CALLS_LEVEL(2) \
  361. @@ -645,21 +646,21 @@
  362. #define CON_INITCALL \
  363. VMLINUX_SYMBOL(__con_initcall_start) = .; \
  364. - *(.con_initcall.init) \
  365. + KEEP(*(.con_initcall.init)) \
  366. VMLINUX_SYMBOL(__con_initcall_end) = .;
  367. #define SECURITY_INITCALL \
  368. VMLINUX_SYMBOL(__security_initcall_start) = .; \
  369. - *(.security_initcall.init) \
  370. + KEEP(*(.security_initcall.init)) \
  371. VMLINUX_SYMBOL(__security_initcall_end) = .;
  372. #ifdef CONFIG_BLK_DEV_INITRD
  373. #define INIT_RAM_FS \
  374. . = ALIGN(4); \
  375. VMLINUX_SYMBOL(__initramfs_start) = .; \
  376. - *(.init.ramfs) \
  377. + KEEP(*(.init.ramfs)) \
  378. . = ALIGN(8); \
  379. - *(.init.ramfs.info)
  380. + KEEP(*(.init.ramfs.info))
  381. #else
  382. #define INIT_RAM_FS
  383. #endif
  384. --- a/arch/arm/Makefile
  385. +++ b/arch/arm/Makefile
  386. @@ -18,11 +18,16 @@ ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
  387. LDFLAGS_vmlinux += --be8
  388. LDFLAGS_MODULE += --be8
  389. endif
  390. +LDFLAGS_vmlinux += --gc-sections
  391. OBJCOPYFLAGS :=-O binary -R .comment -S
  392. GZFLAGS :=-9
  393. #KBUILD_CFLAGS +=-pipe
  394. +ifndef CONFIG_FUNCTION_TRACER
  395. +KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
  396. +endif
  397. +
  398. # Never generate .eh_frame
  399. KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
  400. --- a/arch/arm/kernel/vmlinux.lds.S
  401. +++ b/arch/arm/kernel/vmlinux.lds.S
  402. @@ -12,13 +12,13 @@
  403. #define PROC_INFO \
  404. . = ALIGN(4); \
  405. VMLINUX_SYMBOL(__proc_info_begin) = .; \
  406. - *(.proc.info.init) \
  407. + KEEP(*(.proc.info.init)) \
  408. VMLINUX_SYMBOL(__proc_info_end) = .;
  409. #define IDMAP_TEXT \
  410. ALIGN_FUNCTION(); \
  411. VMLINUX_SYMBOL(__idmap_text_start) = .; \
  412. - *(.idmap.text) \
  413. + KEEP(*(.idmap.text)) \
  414. VMLINUX_SYMBOL(__idmap_text_end) = .; \
  415. . = ALIGN(32); \
  416. VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
  417. @@ -93,7 +93,7 @@ SECTIONS
  418. .text : { /* Real text segment */
  419. _stext = .; /* Text and read-only data */
  420. __exception_text_start = .;
  421. - *(.exception.text)
  422. + KEEP(*(.exception.text))
  423. __exception_text_end = .;
  424. IRQENTRY_TEXT
  425. TEXT_TEXT
  426. @@ -118,7 +118,7 @@ SECTIONS
  427. __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
  428. __start___ex_table = .;
  429. #ifdef CONFIG_MMU
  430. - *(__ex_table)
  431. + KEEP(*(__ex_table))
  432. #endif
  433. __stop___ex_table = .;
  434. }
  435. @@ -130,12 +130,12 @@ SECTIONS
  436. . = ALIGN(8);
  437. .ARM.unwind_idx : {
  438. __start_unwind_idx = .;
  439. - *(.ARM.exidx*)
  440. + KEEP(*(.ARM.exidx*))
  441. __stop_unwind_idx = .;
  442. }
  443. .ARM.unwind_tab : {
  444. __start_unwind_tab = .;
  445. - *(.ARM.extab*)
  446. + KEEP(*(.ARM.extab*))
  447. __stop_unwind_tab = .;
  448. }
  449. #endif
  450. @@ -154,14 +154,14 @@ SECTIONS
  451. */
  452. __vectors_start = .;
  453. .vectors 0 : AT(__vectors_start) {
  454. - *(.vectors)
  455. + KEEP(*(.vectors))
  456. }
  457. . = __vectors_start + SIZEOF(.vectors);
  458. __vectors_end = .;
  459. __stubs_start = .;
  460. .stubs 0x1000 : AT(__stubs_start) {
  461. - *(.stubs)
  462. + KEEP(*(.stubs))
  463. }
  464. . = __stubs_start + SIZEOF(.stubs);
  465. __stubs_end = .;
  466. @@ -175,24 +175,24 @@ SECTIONS
  467. }
  468. .init.arch.info : {
  469. __arch_info_begin = .;
  470. - *(.arch.info.init)
  471. + KEEP(*(.arch.info.init))
  472. __arch_info_end = .;
  473. }
  474. .init.tagtable : {
  475. __tagtable_begin = .;
  476. - *(.taglist.init)
  477. + KEEP(*(.taglist.init))
  478. __tagtable_end = .;
  479. }
  480. #ifdef CONFIG_SMP_ON_UP
  481. .init.smpalt : {
  482. __smpalt_begin = .;
  483. - *(.alt.smp.init)
  484. + KEEP(*(.alt.smp.init))
  485. __smpalt_end = .;
  486. }
  487. #endif
  488. .init.pv_table : {
  489. __pv_table_begin = .;
  490. - *(.pv_table)
  491. + KEEP(*(.pv_table))
  492. __pv_table_end = .;
  493. }
  494. .init.data : {
  495. --- a/arch/arm/boot/compressed/Makefile
  496. +++ b/arch/arm/boot/compressed/Makefile
  497. @@ -122,6 +122,7 @@ ifeq ($(CONFIG_FUNCTION_TRACER),y)
  498. ORIG_CFLAGS := $(KBUILD_CFLAGS)
  499. KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
  500. endif
  501. +KBUILD_CFLAGS_KERNEL := $(patsubst -f%-sections,,$(KBUILD_CFLAGS_KERNEL))
  502. ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
  503. asflags-y := -DZIMAGE