Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

treewide: Convert macro and uses of __section(foo) to __section("foo")

Use a more generic form for __section that requires quotes to avoid
complications with clang and gcc differences.

Remove the quote operator # from compiler_attributes.h __section macro.

Convert all unquoted __section(foo) uses to quoted __section("foo").
Also convert __attribute__((section("foo"))) uses to __section("foo")
even if the __attribute__ has multiple list entry forms.

Conversion done using the script at:

https://lore.kernel.org/lkml/75393e5ddc272dc7403de74d645e6c6e0f4e70eb.camel@perches.com/2-convert_section.pl

Signed-off-by: Joe Perches <joe@perches.com>
Reviewed-by: Nick Desaulniers <ndesaulniers@gooogle.com>
Reviewed-by: Miguel Ojeda <ojeda@kernel.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Joe Perches and committed by
Linus Torvalds
33def849 986b9eac

+196 -196
+4 -4
arch/arc/include/asm/linkage.h
··· 64 64 #else /* !__ASSEMBLY__ */ 65 65 66 66 #ifdef CONFIG_ARC_HAS_ICCM 67 - #define __arcfp_code __section(.text.arcfp) 67 + #define __arcfp_code __section(".text.arcfp") 68 68 #else 69 - #define __arcfp_code __section(.text) 69 + #define __arcfp_code __section(".text") 70 70 #endif 71 71 72 72 #ifdef CONFIG_ARC_HAS_DCCM 73 - #define __arcfp_data __section(.data.arcfp) 73 + #define __arcfp_data __section(".data.arcfp") 74 74 #else 75 - #define __arcfp_data __section(.data) 75 + #define __arcfp_data __section(".data") 76 76 #endif 77 77 78 78 #endif /* __ASSEMBLY__ */
+1 -1
arch/arc/include/asm/mach_desc.h
··· 53 53 */ 54 54 #define MACHINE_START(_type, _name) \ 55 55 static const struct machine_desc __mach_desc_##_type \ 56 - __used __section(.arch.info.init) = { \ 56 + __used __section(".arch.info.init") = { \ 57 57 .name = _name, 58 58 59 59 #define MACHINE_END \
+1 -1
arch/arc/plat-hsdk/platform.c
··· 13 13 #include <asm/io.h> 14 14 #include <asm/mach_desc.h> 15 15 16 - int arc_hsdk_axi_dmac_coherent __section(.data) = 0; 16 + int arc_hsdk_axi_dmac_coherent __section(".data") = 0; 17 17 18 18 #define ARC_CCM_UNUSED_ADDR 0x60000000 19 19
+1 -1
arch/arm/include/asm/cache.h
··· 24 24 #define ARCH_SLAB_MINALIGN 8 25 25 #endif 26 26 27 - #define __read_mostly __attribute__((__section__(".data..read_mostly"))) 27 + #define __read_mostly __section(".data..read_mostly") 28 28 29 29 #endif
+1 -1
arch/arm/include/asm/cpuidle.h
··· 42 42 43 43 #define CPUIDLE_METHOD_OF_DECLARE(name, _method, _ops) \ 44 44 static const struct of_cpuidle_method __cpuidle_method_of_table_##name \ 45 - __used __section(__cpuidle_method_of_table) \ 45 + __used __section("__cpuidle_method_of_table") \ 46 46 = { .method = _method, .ops = _ops } 47 47 48 48 extern int arm_cpuidle_suspend(int index);
+1 -1
arch/arm/include/asm/idmap.h
··· 6 6 #include <linux/pgtable.h> 7 7 8 8 /* Tag a function as requiring to be executed via an identity mapping. */ 9 - #define __idmap __section(.idmap.text) noinline notrace 9 + #define __idmap __section(".idmap.text") noinline notrace 10 10 11 11 extern pgd_t *idmap_pgd; 12 12
+2 -2
arch/arm/include/asm/mach/arch.h
··· 81 81 #define MACHINE_START(_type,_name) \ 82 82 static const struct machine_desc __mach_desc_##_type \ 83 83 __used \ 84 - __attribute__((__section__(".arch.info.init"))) = { \ 84 + __section(".arch.info.init") = { \ 85 85 .nr = MACH_TYPE_##_type, \ 86 86 .name = _name, 87 87 ··· 91 91 #define DT_MACHINE_START(_name, _namestr) \ 92 92 static const struct machine_desc __mach_desc_##_name \ 93 93 __used \ 94 - __attribute__((__section__(".arch.info.init"))) = { \ 94 + __section(".arch.info.init") = { \ 95 95 .nr = ~0, \ 96 96 .name = _namestr, 97 97
+1 -1
arch/arm/include/asm/setup.h
··· 14 14 #include <uapi/asm/setup.h> 15 15 16 16 17 - #define __tag __used __attribute__((__section__(".taglist.init"))) 17 + #define __tag __used __section(".taglist.init") 18 18 #define __tagtable(tag, fn) \ 19 19 static const struct tagtable __tagtable_##fn __tag = { tag, fn } 20 20
+1 -1
arch/arm/include/asm/smp.h
··· 112 112 113 113 #define CPU_METHOD_OF_DECLARE(name, _method, _ops) \ 114 114 static const struct of_cpu_method __cpu_method_of_table_##name \ 115 - __used __section(__cpu_method_of_table) \ 115 + __used __section("__cpu_method_of_table") \ 116 116 = { .method = _method, .ops = _ops } 117 117 /* 118 118 * set platform specific SMP operations
+4 -4
arch/arm/include/asm/tcm.h
··· 16 16 #include <linux/compiler.h> 17 17 18 18 /* Tag variables with this */ 19 - #define __tcmdata __section(.tcm.data) 19 + #define __tcmdata __section(".tcm.data") 20 20 /* Tag constants with this */ 21 - #define __tcmconst __section(.tcm.rodata) 21 + #define __tcmconst __section(".tcm.rodata") 22 22 /* Tag functions inside TCM called from outside TCM with this */ 23 - #define __tcmfunc __attribute__((long_call)) __section(.tcm.text) noinline 23 + #define __tcmfunc __attribute__((long_call)) __section(".tcm.text") noinline 24 24 /* Tag function inside TCM called from inside TCM with this */ 25 - #define __tcmlocalfunc __section(.tcm.text) 25 + #define __tcmlocalfunc __section(".tcm.text") 26 26 27 27 void *tcm_alloc(size_t len); 28 28 void tcm_free(void *addr, size_t len);
+1 -1
arch/arm/kernel/cpuidle.c
··· 11 11 extern struct of_cpuidle_method __cpuidle_method_of_table[]; 12 12 13 13 static const struct of_cpuidle_method __cpuidle_method_of_table_sentinel 14 - __used __section(__cpuidle_method_of_table_end); 14 + __used __section("__cpuidle_method_of_table_end"); 15 15 16 16 static struct cpuidle_ops cpuidle_ops[NR_CPUS] __ro_after_init; 17 17
+1 -1
arch/arm/kernel/devtree.c
··· 29 29 extern struct of_cpu_method __cpu_method_of_table[]; 30 30 31 31 static const struct of_cpu_method __cpu_method_of_table_sentinel 32 - __used __section(__cpu_method_of_table_end); 32 + __used __section("__cpu_method_of_table_end"); 33 33 34 34 35 35 static int __init set_smp_ops_by_method(struct device_node *node)
+1 -1
arch/arm64/include/asm/cache.h
··· 79 79 return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK; 80 80 } 81 81 82 - #define __read_mostly __section(.data..read_mostly) 82 + #define __read_mostly __section(".data..read_mostly") 83 83 84 84 static inline int cache_line_size_of_cpu(void) 85 85 {
+1 -1
arch/arm64/kernel/efi.c
··· 54 54 } 55 55 56 56 /* we will fill this structure from the stub, so don't put it in .bss */ 57 - struct screen_info screen_info __section(.data); 57 + struct screen_info screen_info __section(".data"); 58 58 59 59 int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) 60 60 {
+1 -1
arch/arm64/kernel/smp_spin_table.c
··· 19 19 #include <asm/smp_plat.h> 20 20 21 21 extern void secondary_holding_pen(void); 22 - volatile unsigned long __section(.mmuoff.data.read) 22 + volatile unsigned long __section(".mmuoff.data.read") 23 23 secondary_holding_pen_release = INVALID_HWID; 24 24 25 25 static phys_addr_t cpu_release_addr[NR_CPUS];
+1 -1
arch/arm64/mm/mmu.c
··· 43 43 u64 idmap_t0sz = TCR_T0SZ(VA_BITS); 44 44 u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; 45 45 46 - u64 __section(.mmuoff.data.write) vabits_actual; 46 + u64 __section(".mmuoff.data.write") vabits_actual; 47 47 EXPORT_SYMBOL(vabits_actual); 48 48 49 49 u64 kimage_voffset __ro_after_init;
+4 -4
arch/csky/include/asm/tcm.h
··· 10 10 #include <linux/compiler.h> 11 11 12 12 /* Tag variables with this */ 13 - #define __tcmdata __section(.tcm.data) 13 + #define __tcmdata __section(".tcm.data") 14 14 /* Tag constants with this */ 15 - #define __tcmconst __section(.tcm.rodata) 15 + #define __tcmconst __section(".tcm.rodata") 16 16 /* Tag functions inside TCM called from outside TCM with this */ 17 - #define __tcmfunc __section(.tcm.text) noinline 17 + #define __tcmfunc __section(".tcm.text") noinline 18 18 /* Tag function inside TCM called from inside TCM with this */ 19 - #define __tcmlocalfunc __section(.tcm.text) 19 + #define __tcmlocalfunc __section(".tcm.text") 20 20 21 21 void *tcm_alloc(size_t len); 22 22 void tcm_free(void *addr, size_t len);
+1 -1
arch/ia64/include/asm/cache.h
··· 25 25 # define SMP_CACHE_BYTES (1 << 3) 26 26 #endif 27 27 28 - #define __read_mostly __attribute__((__section__(".data..read_mostly"))) 28 + #define __read_mostly __section(".data..read_mostly") 29 29 30 30 #endif /* _ASM_IA64_CACHE_H */
+1 -1
arch/microblaze/kernel/setup.c
··· 46 46 * ASM code. Default position is BSS section which is cleared 47 47 * in machine_early_init(). 48 48 */ 49 - char cmd_line[COMMAND_LINE_SIZE] __attribute__ ((section(".data"))); 49 + char cmd_line[COMMAND_LINE_SIZE] __section(".data"); 50 50 51 51 void __init setup_arch(char **cmdline_p) 52 52 {
+1 -1
arch/mips/include/asm/cache.h
··· 14 14 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT 15 15 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 16 16 17 - #define __read_mostly __attribute__((__section__(".data..read_mostly"))) 17 + #define __read_mostly __section(".data..read_mostly") 18 18 19 19 #endif /* _ASM_CACHE_H */
+1 -1
arch/mips/include/asm/machine.h
··· 23 23 24 24 #define MIPS_MACHINE(name) \ 25 25 static const struct mips_machine __mips_mach_##name \ 26 - __used __section(.mips.machines.init) 26 + __used __section(".mips.machines.init") 27 27 28 28 #define for_each_mips_machine(mach) \ 29 29 for ((mach) = (struct mips_machine *)&__mips_machines_start; \
+1 -1
arch/mips/kernel/setup.c
··· 44 44 #include <asm/prom.h> 45 45 46 46 #ifdef CONFIG_MIPS_ELF_APPENDED_DTB 47 - const char __section(.appended_dtb) __appended_dtb[0x100000]; 47 + const char __section(".appended_dtb") __appended_dtb[0x100000]; 48 48 #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */ 49 49 50 50 struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
+1 -1
arch/mips/mm/init.c
··· 569 569 * size, and waste space. So we place it in its own section and align 570 570 * it in the linker script. 571 571 */ 572 - pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir); 572 + pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir"); 573 573 #ifndef __PAGETABLE_PUD_FOLDED 574 574 pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss; 575 575 #endif
+1 -1
arch/parisc/include/asm/cache.h
··· 22 22 23 23 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES 24 24 25 - #define __read_mostly __section(.data..read_mostly) 25 + #define __read_mostly __section(".data..read_mostly") 26 26 27 27 void parisc_cache_init(void); /* initializes cache-flushing */ 28 28 void disable_sr_hashing_asm(int); /* low level support for above */
+1 -1
arch/parisc/include/asm/ldcw.h
··· 52 52 }) 53 53 54 54 #ifdef CONFIG_SMP 55 - # define __lock_aligned __section(.data..lock_aligned) 55 + # define __lock_aligned __section(".data..lock_aligned") 56 56 #endif 57 57 58 58 #endif /* __PARISC_LDCW_H */
+1 -1
arch/parisc/kernel/ftrace.c
··· 21 21 #include <asm/ftrace.h> 22 22 #include <asm/patch.h> 23 23 24 - #define __hot __attribute__ ((__section__ (".text.hot"))) 24 + #define __hot __section(".text.hot") 25 25 26 26 #ifdef CONFIG_FUNCTION_GRAPH_TRACER 27 27 /*
+3 -3
arch/parisc/mm/init.c
··· 42 42 * guarantee that global objects will be laid out in memory in the same order 43 43 * as the order of declaration, so put these in different sections and use 44 44 * the linker script to order them. */ 45 - pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE))); 45 + pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE))); 46 46 #endif 47 47 48 - pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE))); 49 - pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE))); 48 + pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".data..vm0.pgd") __attribute__ ((aligned(PAGE_SIZE))); 49 + pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __section(".data..vm0.pte") __attribute__ ((aligned(PAGE_SIZE))); 50 50 51 51 static struct resource data_resource = { 52 52 .name = "Kernel data",
+1 -1
arch/powerpc/include/asm/cache.h
··· 97 97 98 98 #endif 99 99 100 - #define __read_mostly __section(.data..read_mostly) 100 + #define __read_mostly __section(".data..read_mostly") 101 101 102 102 #ifdef CONFIG_PPC_BOOK3S_32 103 103 extern long _get_L2CR(void);
+1 -1
arch/powerpc/include/asm/machdep.h
··· 232 232 extern struct machdep_calls ppc_md; 233 233 extern struct machdep_calls *machine_id; 234 234 235 - #define __machine_desc __attribute__ ((__section__ (".machine.desc"))) 235 + #define __machine_desc __section(".machine.desc") 236 236 237 237 #define define_machine(name) \ 238 238 extern struct machdep_calls mach_##name; \
+1 -1
arch/powerpc/kernel/btext.c
··· 26 26 static void scrollscreen(void); 27 27 #endif 28 28 29 - #define __force_data __section(.data) 29 + #define __force_data __section(".data") 30 30 31 31 static int g_loc_X __force_data; 32 32 static int g_loc_Y __force_data;
+1 -1
arch/powerpc/kernel/prom_init.c
··· 45 45 #include <linux/linux_logo.h> 46 46 47 47 /* All of prom_init bss lives here */ 48 - #define __prombss __section(.bss.prominit) 48 + #define __prombss __section(".bss.prominit") 49 49 50 50 /* 51 51 * Eventually bump that one up
+1 -1
arch/powerpc/kvm/book3s_64_vio_hv.c
··· 32 32 #ifdef CONFIG_BUG 33 33 34 34 #define WARN_ON_ONCE_RM(condition) ({ \ 35 - static bool __section(.data.unlikely) __warned; \ 35 + static bool __section(".data.unlikely") __warned; \ 36 36 int __ret_warn_once = !!(condition); \ 37 37 \ 38 38 if (unlikely(__ret_warn_once && !__warned)) { \
+2 -2
arch/riscv/include/asm/soc.h
··· 13 13 14 14 #define SOC_EARLY_INIT_DECLARE(name, compat, fn) \ 15 15 static const struct of_device_id __soc_early_init__##name \ 16 - __used __section(__soc_early_init_table) \ 16 + __used __section("__soc_early_init_table") \ 17 17 = { .compatible = compat, .data = fn } 18 18 19 19 void soc_early_init(void); ··· 46 46 } \ 47 47 \ 48 48 static const struct soc_builtin_dtb __soc_builtin_dtb__##name \ 49 - __used __section(__soc_builtin_dtb_table) = \ 49 + __used __section("__soc_builtin_dtb_table") = \ 50 50 { \ 51 51 .vendor_id = vendor, \ 52 52 .arch_id = arch, \
+2 -2
arch/riscv/kernel/cpu_ops.c
··· 15 15 16 16 const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init; 17 17 18 - void *__cpu_up_stack_pointer[NR_CPUS] __section(.data); 19 - void *__cpu_up_task_pointer[NR_CPUS] __section(.data); 18 + void *__cpu_up_stack_pointer[NR_CPUS] __section(".data"); 19 + void *__cpu_up_task_pointer[NR_CPUS] __section(".data"); 20 20 21 21 extern const struct cpu_operations cpu_ops_sbi; 22 22 extern const struct cpu_operations cpu_ops_spinwait;
+2 -2
arch/riscv/kernel/setup.c
··· 32 32 #include "head.h" 33 33 34 34 #if defined(CONFIG_DUMMY_CONSOLE) || defined(CONFIG_EFI) 35 - struct screen_info screen_info __section(.data) = { 35 + struct screen_info screen_info __section(".data") = { 36 36 .orig_video_lines = 30, 37 37 .orig_video_cols = 80, 38 38 .orig_video_mode = 0, ··· 47 47 * This is used before the kernel initializes the BSS so it can't be in the 48 48 * BSS. 49 49 */ 50 - atomic_t hart_lottery __section(.sdata); 50 + atomic_t hart_lottery __section(".sdata"); 51 51 unsigned long boot_cpu_hartid; 52 52 static DEFINE_PER_CPU(struct cpu, cpu_devices); 53 53
+1 -1
arch/s390/boot/startup.c
··· 46 46 .diag0c = _diag0c_dma, 47 47 .diag308_reset = _diag308_reset_dma 48 48 }; 49 - static struct diag210 _diag210_tmp_dma __section(.dma.data); 49 + static struct diag210 _diag210_tmp_dma __section(".dma.data"); 50 50 struct diag210 *__bootdata_preserved(__diag210_tmp_dma) = &_diag210_tmp_dma; 51 51 52 52 void error(char *x)
+1 -1
arch/s390/include/asm/cache.h
··· 14 14 #define L1_CACHE_SHIFT 8 15 15 #define NET_SKB_PAD 32 16 16 17 - #define __read_mostly __section(.data..read_mostly) 17 + #define __read_mostly __section(".data..read_mostly") 18 18 19 19 #endif
+2 -2
arch/s390/include/asm/sections.h
··· 26 26 * final .boot.data section, which should be identical in the decompressor and 27 27 * the decompressed kernel (that is checked during the build). 28 28 */ 29 - #define __bootdata(var) __section(.boot.data.var) var 29 + #define __bootdata(var) __section(".boot.data.var") var 30 30 31 31 /* 32 32 * .boot.preserved.data is similar to .boot.data, but it is not part of the 33 33 * .init section and thus will be preserved for later use in the decompressed 34 34 * kernel. 35 35 */ 36 - #define __bootdata_preserved(var) __section(.boot.preserved.data.var) var 36 + #define __bootdata_preserved(var) __section(".boot.preserved.data.var") var 37 37 38 38 extern unsigned long __sdma, __edma; 39 39 extern unsigned long __stext_dma, __etext_dma;
+1 -1
arch/s390/mm/init.c
··· 48 48 #include <asm/uv.h> 49 49 #include <linux/virtio_config.h> 50 50 51 - pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir); 51 + pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir"); 52 52 53 53 unsigned long empty_zero_page, zero_page_mask; 54 54 EXPORT_SYMBOL(empty_zero_page);
+1 -1
arch/sh/boards/of-generic.c
··· 49 49 50 50 extern const struct of_cpu_method __cpu_method_of_table[]; 51 51 const struct of_cpu_method __cpu_method_of_table_sentinel 52 - __section(__cpu_method_of_table_end); 52 + __section("__cpu_method_of_table_end"); 53 53 54 54 static void sh_of_smp_probe(void) 55 55 {
+1 -1
arch/sh/include/asm/cache.h
··· 14 14 15 15 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 16 16 17 - #define __read_mostly __attribute__((__section__(".data..read_mostly"))) 17 + #define __read_mostly __section(".data..read_mostly") 18 18 19 19 #ifndef __ASSEMBLY__ 20 20 struct cache_info {
+1 -1
arch/sh/include/asm/machvec.h
··· 36 36 #define get_system_type() sh_mv.mv_name 37 37 38 38 #define __initmv \ 39 - __used __section(.machvec.init) 39 + __used __section(".machvec.init") 40 40 41 41 #endif /* _ASM_SH_MACHVEC_H */
+1 -1
arch/sh/include/asm/smp.h
··· 71 71 72 72 #define CPU_METHOD_OF_DECLARE(name, _method, _ops) \ 73 73 static const struct of_cpu_method __cpu_method_of_table_##name \ 74 - __used __section(__cpu_method_of_table) \ 74 + __used __section("__cpu_method_of_table") \ 75 75 = { .method = _method, .ops = _ops } 76 76 77 77 #else
+1 -1
arch/sparc/include/asm/cache.h
··· 21 21 22 22 #define SMP_CACHE_BYTES (1 << SMP_CACHE_BYTES_SHIFT) 23 23 24 - #define __read_mostly __attribute__((__section__(".data..read_mostly"))) 24 + #define __read_mostly __section(".data..read_mostly") 25 25 26 26 #endif /* !(_SPARC_CACHE_H) */
+1 -1
arch/sparc/kernel/btext.c
··· 24 24 static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb); 25 25 static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb); 26 26 27 - #define __force_data __attribute__((__section__(".data"))) 27 + #define __force_data __section(".data") 28 28 29 29 static int g_loc_X __force_data; 30 30 static int g_loc_Y __force_data;
+11 -11
arch/um/include/shared/init.h
··· 45 45 46 46 /* These are for everybody (although not all archs will actually 47 47 discard it in modules) */ 48 - #define __init __section(.init.text) 49 - #define __initdata __section(.init.data) 50 - #define __exitdata __section(.exit.data) 51 - #define __exit_call __used __section(.exitcall.exit) 48 + #define __init __section(".init.text") 49 + #define __initdata __section(".init.data") 50 + #define __exitdata __section(".exit.data") 51 + #define __exit_call __used __section(".exitcall.exit") 52 52 53 53 #ifdef MODULE 54 - #define __exit __section(.exit.text) 54 + #define __exit __section(".exit.text") 55 55 #else 56 - #define __exit __used __section(.exit.text) 56 + #define __exit __used __section(".exit.text") 57 57 #endif 58 58 59 59 #endif ··· 102 102 * Mark functions and data as being only used at initialization 103 103 * or exit time. 104 104 */ 105 - #define __uml_init_setup __used __section(.uml.setup.init) 106 - #define __uml_setup_help __used __section(.uml.help.init) 107 - #define __uml_postsetup_call __used __section(.uml.postsetup.init) 108 - #define __uml_exit_call __used __section(.uml.exitcall.exit) 105 + #define __uml_init_setup __used __section(".uml.setup.init") 106 + #define __uml_setup_help __used __section(".uml.help.init") 107 + #define __uml_postsetup_call __used __section(".uml.postsetup.init") 108 + #define __uml_exit_call __used __section(".uml.exitcall.exit") 109 109 110 110 #ifdef __UM_HOST__ 111 111 ··· 120 120 121 121 #define __exitcall(fn) static exitcall_t __exitcall_##fn __exit_call = fn 122 122 123 - #define __init_call __used __section(.initcall.init) 123 + #define __init_call __used __section(".initcall.init") 124 124 125 125 #endif 126 126
+1 -1
arch/um/kernel/skas/clone.c
··· 21 21 * on some systems. 22 22 */ 23 23 24 - void __attribute__ ((__section__ (".__syscall_stub"))) 24 + void __section(".__syscall_stub") 25 25 stub_clone_handler(void) 26 26 { 27 27 struct stub_data *data = (struct stub_data *) STUB_DATA;
+1 -1
arch/um/kernel/um_arch.c
··· 52 52 }; 53 53 54 54 union thread_union cpu0_irqstack 55 - __attribute__((__section__(".data..init_irqstack"))) = 55 + __section(".data..init_irqstack") = 56 56 { .thread_info = INIT_THREAD_INFO(init_task) }; 57 57 58 58 /* Changed in setup_arch, which is called in early boot */
+4 -4
arch/x86/boot/compressed/pgtable_64.c
··· 10 10 11 11 #ifdef CONFIG_X86_5LEVEL 12 12 /* __pgtable_l5_enabled needs to be in .data to avoid being cleared along with .bss */ 13 - unsigned int __section(.data) __pgtable_l5_enabled; 14 - unsigned int __section(.data) pgdir_shift = 39; 15 - unsigned int __section(.data) ptrs_per_p4d = 1; 13 + unsigned int __section(".data") __pgtable_l5_enabled; 14 + unsigned int __section(".data") pgdir_shift = 39; 15 + unsigned int __section(".data") ptrs_per_p4d = 1; 16 16 #endif 17 17 18 18 struct paging_config { ··· 30 30 * Avoid putting the pointer into .bss as it will be cleared between 31 31 * paging_prepare() and extract_kernel(). 32 32 */ 33 - unsigned long *trampoline_32bit __section(.data); 33 + unsigned long *trampoline_32bit __section(".data"); 34 34 35 35 extern struct boot_params *boot_params; 36 36 int cmdline_find_option_bool(const char *option);
+4 -4
arch/x86/boot/tty.c
··· 25 25 * error during initialization. 26 26 */ 27 27 28 - static void __attribute__((section(".inittext"))) serial_putchar(int ch) 28 + static void __section(".inittext") serial_putchar(int ch) 29 29 { 30 30 unsigned timeout = 0xffff; 31 31 ··· 35 35 outb(ch, early_serial_base + TXR); 36 36 } 37 37 38 - static void __attribute__((section(".inittext"))) bios_putchar(int ch) 38 + static void __section(".inittext") bios_putchar(int ch) 39 39 { 40 40 struct biosregs ireg; 41 41 ··· 47 47 intcall(0x10, &ireg, NULL); 48 48 } 49 49 50 - void __attribute__((section(".inittext"))) putchar(int ch) 50 + void __section(".inittext") putchar(int ch) 51 51 { 52 52 if (ch == '\n') 53 53 putchar('\r'); /* \n -> \r\n */ ··· 58 58 serial_putchar(ch); 59 59 } 60 60 61 - void __attribute__((section(".inittext"))) puts(const char *str) 61 + void __section(".inittext") puts(const char *str) 62 62 { 63 63 while (*str) 64 64 putchar(*str++);
+1 -1
arch/x86/boot/video.h
··· 78 78 u16 xmode_n; /* Size of unprobed mode range */ 79 79 }; 80 80 81 - #define __videocard struct card_info __attribute__((used,section(".videocards"))) 81 + #define __videocard struct card_info __section(".videocards") __attribute__((used)) 82 82 extern struct card_info video_cards[], video_cards_end[]; 83 83 84 84 int mode_defined(u16 mode); /* video.c */
+2 -2
arch/x86/include/asm/apic.h
··· 374 374 #define apic_driver(sym) \ 375 375 static const struct apic *__apicdrivers_##sym __used \ 376 376 __aligned(sizeof(struct apic *)) \ 377 - __section(.apicdrivers) = { &sym } 377 + __section(".apicdrivers") = { &sym } 378 378 379 379 #define apic_drivers(sym1, sym2) \ 380 380 static struct apic *__apicdrivers_##sym1##sym2[2] __used \ 381 381 __aligned(sizeof(struct apic *)) \ 382 - __section(.apicdrivers) = { &sym1, &sym2 } 382 + __section(".apicdrivers") = { &sym1, &sym2 } 383 383 384 384 extern struct apic *__apicdrivers[], *__apicdrivers_end[]; 385 385
+1 -1
arch/x86/include/asm/cache.h
··· 8 8 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT) 9 9 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) 10 10 11 - #define __read_mostly __attribute__((__section__(".data..read_mostly"))) 11 + #define __read_mostly __section(".data..read_mostly") 12 12 13 13 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT 14 14 #define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
+1 -1
arch/x86/include/asm/intel-mid.h
··· 43 43 44 44 #define sfi_device(i) \ 45 45 static const struct devs_id *const __intel_mid_sfi_##i##_dev __used \ 46 - __attribute__((__section__(".x86_intel_mid_dev.init"))) = &i 46 + __section(".x86_intel_mid_dev.init") = &i 47 47 48 48 /** 49 49 * struct mid_sd_board_info - template for SD device creation
+1 -1
arch/x86/include/asm/irqflags.h
··· 9 9 #include <asm/nospec-branch.h> 10 10 11 11 /* Provide __cpuidle; we can't safely include <linux/cpu.h> */ 12 - #define __cpuidle __attribute__((__section__(".cpuidle.text"))) 12 + #define __cpuidle __section(".cpuidle.text") 13 13 14 14 /* 15 15 * Interrupt control:
+1 -1
arch/x86/include/asm/mem_encrypt.h
··· 54 54 bool sev_active(void); 55 55 bool sev_es_active(void); 56 56 57 - #define __bss_decrypted __attribute__((__section__(".bss..decrypted"))) 57 + #define __bss_decrypted __section(".bss..decrypted") 58 58 59 59 #else /* !CONFIG_AMD_MEM_ENCRYPT */ 60 60
+1 -1
arch/x86/include/asm/setup.h
··· 119 119 * executable.) 120 120 */ 121 121 #define RESERVE_BRK(name,sz) \ 122 - static void __section(.discard.text) __used notrace \ 122 + static void __section(".discard.text") __used notrace \ 123 123 __brk_reservation_fn_##name##__(void) { \ 124 124 asm volatile ( \ 125 125 ".pushsection .brk_reservation,\"aw\",@nobits;" \
+1 -1
arch/x86/kernel/cpu/cpu.h
··· 38 38 39 39 #define cpu_dev_register(cpu_devX) \ 40 40 static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \ 41 - __attribute__((__section__(".x86_cpu_dev.init"))) = \ 41 + __section(".x86_cpu_dev.init") = \ 42 42 &cpu_devX; 43 43 44 44 extern const struct cpu_dev *const __x86_cpu_dev_start[],
+1 -1
arch/x86/kernel/head64.c
··· 84 84 .address = 0, 85 85 }; 86 86 87 - #define __head __section(.head.text) 87 + #define __head __section(".head.text") 88 88 89 89 static void __head *fixup_pointer(void *ptr, unsigned long physaddr) 90 90 {
+3 -3
arch/x86/mm/mem_encrypt.c
··· 37 37 * reside in the .data section so as not to be zeroed out when the .bss 38 38 * section is later cleared. 39 39 */ 40 - u64 sme_me_mask __section(.data) = 0; 41 - u64 sev_status __section(.data) = 0; 40 + u64 sme_me_mask __section(".data") = 0; 41 + u64 sev_status __section(".data") = 0; 42 42 EXPORT_SYMBOL(sme_me_mask); 43 43 DEFINE_STATIC_KEY_FALSE(sev_enable_key); 44 44 EXPORT_SYMBOL_GPL(sev_enable_key); 45 45 46 - bool sev_enabled __section(.data); 46 + bool sev_enabled __section(".data"); 47 47 48 48 /* Buffer used for early in-place encryption by BSP, no locking needed */ 49 49 static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE);
+1 -1
arch/x86/mm/mem_encrypt_identity.c
··· 81 81 * section is 2MB aligned to allow for simple pagetable setup using only 82 82 * PMD entries (see vmlinux.lds.S). 83 83 */ 84 - static char sme_workarea[2 * PMD_PAGE_SIZE] __section(.init.scratch); 84 + static char sme_workarea[2 * PMD_PAGE_SIZE] __section(".init.scratch"); 85 85 86 86 static char sme_cmdline_arg[] __initdata = "mem_encrypt"; 87 87 static char sme_cmdline_on[] __initdata = "on";
+2 -2
arch/x86/platform/pvh/enlighten.c
··· 19 19 * pvh_bootparams and pvh_start_info need to live in the data segment since 20 20 * they are used after startup_{32|64}, which clear .bss, are invoked. 21 21 */ 22 - struct boot_params pvh_bootparams __attribute__((section(".data"))); 23 - struct hvm_start_info pvh_start_info __attribute__((section(".data"))); 22 + struct boot_params pvh_bootparams __section(".data"); 23 + struct hvm_start_info pvh_start_info __section(".data"); 24 24 25 25 unsigned int pvh_start_info_sz = sizeof(pvh_start_info); 26 26
+2 -2
arch/x86/purgatory/purgatory.c
··· 14 14 15 15 #include "../boot/string.h" 16 16 17 - u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE] __section(.kexec-purgatory); 17 + u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE] __section(".kexec-purgatory"); 18 18 19 - struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX] __section(.kexec-purgatory); 19 + struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX] __section(".kexec-purgatory"); 20 20 21 21 static int verify_sha256_digest(void) 22 22 {
+1 -1
arch/x86/um/stub_segv.c
··· 8 8 #include <sysdep/mcontext.h> 9 9 #include <sys/ucontext.h> 10 10 11 - void __attribute__ ((__section__ (".__syscall_stub"))) 11 + void __section(".__syscall_stub") 12 12 stub_segv_handler(int sig, siginfo_t *info, void *p) 13 13 { 14 14 ucontext_t *uc = p;
+1 -1
arch/x86/xen/enlighten.c
··· 71 71 * NB: needs to live in .data because it's used by xen_prepare_pvh which runs 72 72 * before clearing the bss. 73 73 */ 74 - uint32_t xen_start_flags __attribute__((section(".data"))) = 0; 74 + uint32_t xen_start_flags __section(".data") = 0; 75 75 EXPORT_SYMBOL(xen_start_flags); 76 76 77 77 /*
+1 -1
arch/x86/xen/enlighten_pvh.c
··· 21 21 * The variable xen_pvh needs to live in the data segment since it is used 22 22 * after startup_{32|64} is invoked, which will clear the .bss segment. 23 23 */ 24 - bool xen_pvh __attribute__((section(".data"))) = 0; 24 + bool xen_pvh __section(".data") = 0; 25 25 26 26 void __init xen_pvh_init(struct boot_params *boot_params) 27 27 {
+1 -1
arch/xtensa/kernel/setup.c
··· 93 93 } tagtable_t; 94 94 95 95 #define __tagtable(tag, fn) static tagtable_t __tagtable_##fn \ 96 - __attribute__((used, section(".taglist"))) = { tag, fn } 96 + __section(".taglist") __attribute__((used)) = { tag, fn } 97 97 98 98 /* parse current tag */ 99 99
+1 -1
drivers/clk/clk.c
··· 4363 4363 4364 4364 extern struct of_device_id __clk_of_table; 4365 4365 static const struct of_device_id __clk_of_table_sentinel 4366 - __used __section(__clk_of_table_end); 4366 + __used __section("__clk_of_table_end"); 4367 4367 4368 4368 static LIST_HEAD(of_clk_providers); 4369 4369 static DEFINE_MUTEX(of_clk_mutex);
+1 -1
drivers/clocksource/timer-probe.c
··· 11 11 extern struct of_device_id __timer_of_table[]; 12 12 13 13 static const struct of_device_id __timer_of_table_sentinel 14 - __used __section(__timer_of_table_end); 14 + __used __section("__timer_of_table_end"); 15 15 16 16 void __init timer_probe(void) 17 17 {
+1 -1
drivers/irqchip/irqchip.c
··· 22 22 * special section. 23 23 */ 24 24 static const struct of_device_id 25 - irqchip_of_match_end __used __section(__irqchip_of_table_end); 25 + irqchip_of_match_end __used __section("__irqchip_of_table_end"); 26 26 27 27 extern struct of_device_id __irqchip_of_table[]; 28 28
+1 -1
drivers/of/of_reserved_mem.c
··· 162 162 } 163 163 164 164 static const struct of_device_id __rmem_of_table_sentinel 165 - __used __section(__reservedmem_of_table_end); 165 + __used __section("__reservedmem_of_table_end"); 166 166 167 167 /** 168 168 * __reserved_mem_init_node() - call region specific reserved memory init code
+1 -1
drivers/thermal/thermal_core.h
··· 34 34 35 35 #define THERMAL_TABLE_ENTRY(table, name) \ 36 36 static typeof(name) *__thermal_table_entry_##name \ 37 - __used __section(__##table##_thermal_table) = &name 37 + __used __section("__" #table "_thermal_table") = &name 38 38 39 39 #define THERMAL_GOVERNOR_DECLARE(name) THERMAL_TABLE_ENTRY(governor, name) 40 40
+1 -1
fs/xfs/xfs_message.h
··· 42 42 43 43 #define xfs_printk_once(func, dev, fmt, ...) \ 44 44 ({ \ 45 - static bool __section(.data.once) __print_once; \ 45 + static bool __section(".data.once") __print_once; \ 46 46 bool __ret_print_once = !__print_once; \ 47 47 \ 48 48 if (!__print_once) { \
+3 -3
include/asm-generic/bug.h
··· 141 141 142 142 #ifndef WARN_ON_ONCE 143 143 #define WARN_ON_ONCE(condition) ({ \ 144 - static bool __section(.data.once) __warned; \ 144 + static bool __section(".data.once") __warned; \ 145 145 int __ret_warn_once = !!(condition); \ 146 146 \ 147 147 if (unlikely(__ret_warn_once && !__warned)) { \ ··· 153 153 #endif 154 154 155 155 #define WARN_ONCE(condition, format...) ({ \ 156 - static bool __section(.data.once) __warned; \ 156 + static bool __section(".data.once") __warned; \ 157 157 int __ret_warn_once = !!(condition); \ 158 158 \ 159 159 if (unlikely(__ret_warn_once && !__warned)) { \ ··· 164 164 }) 165 165 166 166 #define WARN_TAINT_ONCE(condition, taint, format...) ({ \ 167 - static bool __section(.data.once) __warned; \ 167 + static bool __section(".data.once") __warned; \ 168 168 int __ret_warn_once = !!(condition); \ 169 169 \ 170 170 if (unlikely(__ret_warn_once && !__warned)) { \
+1 -1
include/asm-generic/error-injection.h
··· 25 25 */ 26 26 #define ALLOW_ERROR_INJECTION(fname, _etype) \ 27 27 static struct error_injection_entry __used \ 28 - __attribute__((__section__("_error_injection_whitelist"))) \ 28 + __section("_error_injection_whitelist") \ 29 29 _eil_addr_##fname = { \ 30 30 .addr = (unsigned long)fname, \ 31 31 .etype = EI_ETYPE_##_etype, \
+2 -2
include/asm-generic/kprobes.h
··· 10 10 */ 11 11 # define __NOKPROBE_SYMBOL(fname) \ 12 12 static unsigned long __used \ 13 - __attribute__((__section__("_kprobe_blacklist"))) \ 13 + __section("_kprobe_blacklist") \ 14 14 _kbl_addr_##fname = (unsigned long)fname; 15 15 # define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname) 16 16 /* Use this to forbid a kprobes attach on very low level functions */ 17 - # define __kprobes __attribute__((__section__(".kprobes.text"))) 17 + # define __kprobes __section(".kprobes.text") 18 18 # define nokprobe_inline __always_inline 19 19 #else 20 20 # define NOKPROBE_SYMBOL(fname)
+1 -1
include/kunit/test.h
··· 288 288 static struct kunit_suite *unique_array[] = { __VA_ARGS__, NULL }; \ 289 289 kunit_test_suites_for_module(unique_array); \ 290 290 static struct kunit_suite **unique_suites \ 291 - __used __section(.kunit_test_suites) = unique_array 291 + __used __section(".kunit_test_suites") = unique_array 292 292 293 293 /** 294 294 * kunit_test_suites() - used to register one or more &struct kunit_suite
+2 -2
include/linux/acpi.h
··· 1153 1153 #define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \ 1154 1154 valid, data, fn) \ 1155 1155 static const struct acpi_probe_entry __acpi_probe_##name \ 1156 - __used __section(__##table##_acpi_probe_table) = { \ 1156 + __used __section("__" #table "_acpi_probe_table") = { \ 1157 1157 .id = table_id, \ 1158 1158 .type = subtable, \ 1159 1159 .subtable_valid = valid, \ ··· 1164 1164 #define ACPI_DECLARE_SUBTABLE_PROBE_ENTRY(table, name, table_id, \ 1165 1165 subtable, valid, data, fn) \ 1166 1166 static const struct acpi_probe_entry __acpi_probe_##name \ 1167 - __used __section(__##table##_acpi_probe_table) = { \ 1167 + __used __section("__" #table "_acpi_probe_table") = { \ 1168 1168 .id = table_id, \ 1169 1169 .type = subtable, \ 1170 1170 .subtable_valid = valid, \
+1 -1
include/linux/cache.h
··· 34 34 * but may get written to during init, so can't live in .rodata (via "const"). 35 35 */ 36 36 #ifndef __ro_after_init 37 - #define __ro_after_init __attribute__((__section__(".data..ro_after_init"))) 37 + #define __ro_after_init __section(".data..ro_after_init") 38 38 #endif 39 39 40 40 #ifndef ____cacheline_aligned
+4 -4
include/linux/compiler.h
··· 24 24 long ______r; \ 25 25 static struct ftrace_likely_data \ 26 26 __aligned(4) \ 27 - __section(_ftrace_annotated_branch) \ 27 + __section("_ftrace_annotated_branch") \ 28 28 ______f = { \ 29 29 .data.func = __func__, \ 30 30 .data.file = __FILE__, \ ··· 60 60 #define __trace_if_value(cond) ({ \ 61 61 static struct ftrace_branch_data \ 62 62 __aligned(4) \ 63 - __section(_ftrace_branch) \ 63 + __section("_ftrace_branch") \ 64 64 __if_trace = { \ 65 65 .func = __func__, \ 66 66 .file = __FILE__, \ ··· 118 118 ".popsection\n\t" 119 119 120 120 /* Annotate a C jump table to allow objtool to follow the code flow */ 121 - #define __annotate_jump_table __section(.rodata..c_jump_table) 121 + #define __annotate_jump_table __section(".rodata..c_jump_table") 122 122 123 123 #else 124 124 #define annotate_reachable() ··· 206 206 * visible to the compiler. 207 207 */ 208 208 #define __ADDRESSABLE(sym) \ 209 - static void * __section(.discard.addressable) __used \ 209 + static void * __section(".discard.addressable") __used \ 210 210 __UNIQUE_ID(__PASTE(__addressable_,sym)) = (void *)&sym; 211 211 212 212 /**
+1 -1
include/linux/compiler_attributes.h
··· 254 254 * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-section-variable-attribute 255 255 * clang: https://clang.llvm.org/docs/AttributeReference.html#section-declspec-allocate 256 256 */ 257 - #define __section(S) __attribute__((__section__(#S))) 257 + #define __section(section) __attribute__((__section__(section))) 258 258 259 259 /* 260 260 * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-unused-function-attribute
+1 -1
include/linux/cpu.h
··· 173 173 void cpu_idle_poll_ctrl(bool enable); 174 174 175 175 /* Attach to any functions which should be considered cpuidle. */ 176 - #define __cpuidle __attribute__((__section__(".cpuidle.text"))) 176 + #define __cpuidle __section(".cpuidle.text") 177 177 178 178 bool cpu_in_idle(unsigned long pc); 179 179
+1 -1
include/linux/dynamic_debug.h
··· 84 84 85 85 #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ 86 86 static struct _ddebug __aligned(8) \ 87 - __section(__dyndbg) name = { \ 87 + __section("__dyndbg") name = { \ 88 88 .modname = KBUILD_MODNAME, \ 89 89 .function = __func__, \ 90 90 .filename = __FILE__, \
+1 -1
include/linux/export.h
··· 130 130 * discarded in the final link stage. 131 131 */ 132 132 #define __ksym_marker(sym) \ 133 - static int __ksym_marker_##sym[0] __section(.discard.ksym) __used 133 + static int __ksym_marker_##sym[0] __section(".discard.ksym") __used 134 134 135 135 #define __EXPORT_SYMBOL(sym, sec, ns) \ 136 136 __ksym_marker(sym); \
+1 -1
include/linux/firmware.h
··· 36 36 37 37 #define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \ 38 38 static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \ 39 - __used __section(.builtin_fw) = { name, blob, size } 39 + __used __section(".builtin_fw") = { name, blob, size } 40 40 41 41 #if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE)) 42 42 int request_firmware(const struct firmware **fw, const char *name,
+17 -17
include/linux/init.h
··· 47 47 48 48 /* These are for everybody (although not all archs will actually 49 49 discard it in modules) */ 50 - #define __init __section(.init.text) __cold __latent_entropy __noinitretpoline 51 - #define __initdata __section(.init.data) 52 - #define __initconst __section(.init.rodata) 53 - #define __exitdata __section(.exit.data) 54 - #define __exit_call __used __section(.exitcall.exit) 50 + #define __init __section(".init.text") __cold __latent_entropy __noinitretpoline 51 + #define __initdata __section(".init.data") 52 + #define __initconst __section(".init.rodata") 53 + #define __exitdata __section(".exit.data") 54 + #define __exit_call __used __section(".exitcall.exit") 55 55 56 56 /* 57 57 * modpost check for section mismatches during the kernel build. ··· 70 70 * 71 71 * The markers follow same syntax rules as __init / __initdata. 72 72 */ 73 - #define __ref __section(.ref.text) noinline 74 - #define __refdata __section(.ref.data) 75 - #define __refconst __section(.ref.rodata) 73 + #define __ref __section(".ref.text") noinline 74 + #define __refdata __section(".ref.data") 75 + #define __refconst __section(".ref.rodata") 76 76 77 77 #ifdef MODULE 78 78 #define __exitused ··· 80 80 #define __exitused __used 81 81 #endif 82 82 83 - #define __exit __section(.exit.text) __exitused __cold notrace 83 + #define __exit __section(".exit.text") __exitused __cold notrace 84 84 85 85 /* Used for MEMORY_HOTPLUG */ 86 - #define __meminit __section(.meminit.text) __cold notrace \ 86 + #define __meminit __section(".meminit.text") __cold notrace \ 87 87 __latent_entropy 88 - #define __meminitdata __section(.meminit.data) 89 - #define __meminitconst __section(.meminit.rodata) 90 - #define __memexit __section(.memexit.text) __exitused __cold notrace 91 - #define __memexitdata __section(.memexit.data) 92 - #define __memexitconst __section(.memexit.rodata) 88 + #define __meminitdata __section(".meminit.data") 89 + #define __meminitconst __section(".meminit.rodata") 90 + #define __memexit __section(".memexit.text") __exitused __cold notrace 91 + #define __memexitdata __section(".memexit.data") 92 + #define __memexitconst __section(".memexit.rodata") 93 93 94 94 /* For assembly routines */ 95 95 #define __HEAD .section ".head.text","ax" ··· 254 254 static const char __setup_str_##unique_id[] __initconst \ 255 255 __aligned(1) = str; \ 256 256 static struct obs_kernel_param __setup_##unique_id \ 257 - __used __section(.init.setup) \ 257 + __used __section(".init.setup") \ 258 258 __attribute__((aligned((sizeof(long))))) \ 259 259 = { __setup_str_##unique_id, fn, early } 260 260 ··· 298 298 #endif 299 299 300 300 /* Data marked not to be saved by software suspend */ 301 - #define __nosavedata __section(.data..nosave) 301 + #define __nosavedata __section(".data..nosave") 302 302 303 303 #ifdef MODULE 304 304 #define __exit_p(x) x
+2 -2
include/linux/init_task.h
··· 40 40 41 41 /* Attach to the init_task data structure for proper alignment */ 42 42 #ifdef CONFIG_ARCH_TASK_STRUCT_ON_STACK 43 - #define __init_task_data __attribute__((__section__(".data..init_task"))) 43 + #define __init_task_data __section(".data..init_task") 44 44 #else 45 45 #define __init_task_data /**/ 46 46 #endif 47 47 48 48 /* Attach to the thread_info data structure for proper alignment */ 49 - #define __init_thread_info __attribute__((__section__(".data..init_thread_info"))) 49 + #define __init_thread_info __section(".data..init_thread_info") 50 50 51 51 #endif
+2 -2
include/linux/interrupt.h
··· 792 792 * We want to know which function is an entrypoint of a hardirq or a softirq. 793 793 */ 794 794 #ifndef __irq_entry 795 - # define __irq_entry __attribute__((__section__(".irqentry.text"))) 795 + # define __irq_entry __section(".irqentry.text") 796 796 #endif 797 797 798 - #define __softirq_entry __attribute__((__section__(".softirqentry.text"))) 798 + #define __softirq_entry __section(".softirqentry.text") 799 799 800 800 #endif
+3 -3
include/linux/kernel.h
··· 729 729 #define do_trace_printk(fmt, args...) \ 730 730 do { \ 731 731 static const char *trace_printk_fmt __used \ 732 - __attribute__((section("__trace_printk_fmt"))) = \ 732 + __section("__trace_printk_fmt") = \ 733 733 __builtin_constant_p(fmt) ? fmt : NULL; \ 734 734 \ 735 735 __trace_printk_check_format(fmt, ##args); \ ··· 773 773 774 774 #define trace_puts(str) ({ \ 775 775 static const char *trace_printk_fmt __used \ 776 - __attribute__((section("__trace_printk_fmt"))) = \ 776 + __section("__trace_printk_fmt") = \ 777 777 __builtin_constant_p(str) ? str : NULL; \ 778 778 \ 779 779 if (__builtin_constant_p(str)) \ ··· 795 795 do { \ 796 796 if (__builtin_constant_p(fmt)) { \ 797 797 static const char *trace_printk_fmt __used \ 798 - __attribute__((section("__trace_printk_fmt"))) = \ 798 + __section("__trace_printk_fmt") = \ 799 799 __builtin_constant_p(fmt) ? fmt : NULL; \ 800 800 \ 801 801 __ftrace_vbprintk(_THIS_IP_, trace_printk_fmt, vargs); \
+2 -2
include/linux/linkage.h
··· 36 36 __stringify(name)) 37 37 #endif 38 38 39 - #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE) 40 - #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE) 39 + #define __page_aligned_data __section(".data..page_aligned") __aligned(PAGE_SIZE) 40 + #define __page_aligned_bss __section(".bss..page_aligned") __aligned(PAGE_SIZE) 41 41 42 42 /* 43 43 * For assembly routines.
+2 -2
include/linux/lsm_hooks.h
··· 1611 1611 1612 1612 #define DEFINE_LSM(lsm) \ 1613 1613 static struct lsm_info __lsm_##lsm \ 1614 - __used __section(.lsm_info.init) \ 1614 + __used __section(".lsm_info.init") \ 1615 1615 __aligned(sizeof(unsigned long)) 1616 1616 1617 1617 #define DEFINE_EARLY_LSM(lsm) \ 1618 1618 static struct lsm_info __early_lsm_##lsm \ 1619 - __used __section(.early_lsm_info.init) \ 1619 + __used __section(".early_lsm_info.init") \ 1620 1620 __aligned(sizeof(unsigned long)) 1621 1621 1622 1622 #ifdef CONFIG_SECURITY_SELINUX_DISABLE
+1 -1
include/linux/module.h
··· 278 278 .version = _version, \ 279 279 }; \ 280 280 static const struct module_version_attribute \ 281 - __used __attribute__ ((__section__ ("__modver"))) \ 281 + __used __section("__modver") \ 282 282 * __moduleparam_const __modver_attr = &___modver_attr 283 283 #endif 284 284
+2 -2
include/linux/moduleparam.h
··· 22 22 23 23 #define __MODULE_INFO(tag, name, info) \ 24 24 static const char __UNIQUE_ID(name)[] \ 25 - __used __attribute__((section(".modinfo"), unused, aligned(1))) \ 25 + __used __section(".modinfo") __attribute__((unused, aligned(1))) \ 26 26 = __MODULE_INFO_PREFIX __stringify(tag) "=" info 27 27 28 28 #define __MODULE_PARM_TYPE(name, _type) \ ··· 289 289 static const char __param_str_##name[] = prefix #name; \ 290 290 static struct kernel_param __moduleparam_const __param_##name \ 291 291 __used \ 292 - __attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \ 292 + __section("__param") __attribute__ ((unused, aligned(sizeof(void *)))) \ 293 293 = { __param_str_##name, THIS_MODULE, ops, \ 294 294 VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } } 295 295
+1 -1
include/linux/mtd/xip.h
··· 28 28 * those functions so they get relocated to ram. 29 29 */ 30 30 #ifdef CONFIG_XIP_KERNEL 31 - #define __xipram noinline __attribute__ ((__section__ (".xiptext"))) 31 + #define __xipram noinline __section(".xiptext") 32 32 #endif 33 33 34 34 /*
+1 -1
include/linux/objtool.h
··· 60 60 * For more information, see tools/objtool/Documentation/stack-validation.txt. 61 61 */ 62 62 #define STACK_FRAME_NON_STANDARD(func) \ 63 - static void __used __section(.discard.func_stack_frame_non_standard) \ 63 + static void __used __section(".discard.func_stack_frame_non_standard") \ 64 64 *__func_stack_frame_non_standard_##func = func 65 65 66 66 #else /* __ASSEMBLY__ */
+1 -1
include/linux/of.h
··· 1299 1299 #if defined(CONFIG_OF) && !defined(MODULE) 1300 1300 #define _OF_DECLARE(table, name, compat, fn, fn_type) \ 1301 1301 static const struct of_device_id __of_table_##name \ 1302 - __used __section(__##table##_of_table) \ 1302 + __used __section("__" #table "_of_table") \ 1303 1303 = { .compatible = compat, \ 1304 1304 .data = (fn == (fn_type)NULL) ? fn : fn } 1305 1305 #else
+1 -1
include/linux/percpu-defs.h
··· 51 51 PER_CPU_ATTRIBUTES 52 52 53 53 #define __PCPU_DUMMY_ATTRS \ 54 - __attribute__((section(".discard"), unused)) 54 + __section(".discard") __attribute__((unused)) 55 55 56 56 /* 57 57 * s390 and alpha modules require percpu variables to be defined as
+2 -2
include/linux/printk.h
··· 437 437 #ifdef CONFIG_PRINTK 438 438 #define printk_once(fmt, ...) \ 439 439 ({ \ 440 - static bool __section(.data.once) __print_once; \ 440 + static bool __section(".data.once") __print_once; \ 441 441 bool __ret_print_once = !__print_once; \ 442 442 \ 443 443 if (!__print_once) { \ ··· 448 448 }) 449 449 #define printk_deferred_once(fmt, ...) \ 450 450 ({ \ 451 - static bool __section(.data.once) __print_once; \ 451 + static bool __section(".data.once") __print_once; \ 452 452 bool __ret_print_once = !__print_once; \ 453 453 \ 454 454 if (!__print_once) { \
+1 -1
include/linux/rcupdate.h
··· 299 299 */ 300 300 #define RCU_LOCKDEP_WARN(c, s) \ 301 301 do { \ 302 - static bool __section(.data.unlikely) __warned; \ 302 + static bool __section(".data.unlikely") __warned; \ 303 303 if (debug_lockdep_rcu_enabled() && !__warned && (c)) { \ 304 304 __warned = true; \ 305 305 lockdep_rcu_suspicious(__FILE__, __LINE__, s); \
+1 -1
include/linux/sched/debug.h
··· 43 43 #endif 44 44 45 45 /* Attach to any functions which should be ignored in wchan output. */ 46 - #define __sched __attribute__((__section__(".sched.text"))) 46 + #define __sched __section(".sched.text") 47 47 48 48 /* Linker adds these: start and end of __sched functions */ 49 49 extern char __sched_text_start[], __sched_text_end[];
+1 -1
include/linux/serial_core.h
··· 373 373 .compatible = compat, \ 374 374 .setup = fn }; \ 375 375 static const struct earlycon_id EARLYCON_USED_OR_UNUSED \ 376 - __section(__earlycon_table) \ 376 + __section("__earlycon_table") \ 377 377 * const __PASTE(__p, unique_id) = &unique_id 378 378 379 379 #define OF_EARLYCON_DECLARE(_name, compat, fn) \
+1 -1
include/linux/spinlock.h
··· 76 76 #define LOCK_SECTION_END \ 77 77 ".previous\n\t" 78 78 79 - #define __lockfunc __attribute__((section(".spinlock.text"))) 79 + #define __lockfunc __section(".spinlock.text") 80 80 81 81 /* 82 82 * Pull the arch_spinlock_t and arch_rwlock_t definitions:
+3 -3
include/linux/syscalls.h
··· 144 144 .flags = TRACE_EVENT_FL_CAP_ANY, \ 145 145 }; \ 146 146 static struct trace_event_call __used \ 147 - __attribute__((section("_ftrace_events"))) \ 147 + __section("_ftrace_events") \ 148 148 *__event_enter_##sname = &event_enter_##sname; 149 149 150 150 #define SYSCALL_TRACE_EXIT_EVENT(sname) \ ··· 160 160 .flags = TRACE_EVENT_FL_CAP_ANY, \ 161 161 }; \ 162 162 static struct trace_event_call __used \ 163 - __attribute__((section("_ftrace_events"))) \ 163 + __section("_ftrace_events") \ 164 164 *__event_exit_##sname = &event_exit_##sname; 165 165 166 166 #define SYSCALL_METADATA(sname, nb, ...) \ ··· 184 184 .enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \ 185 185 }; \ 186 186 static struct syscall_metadata __used \ 187 - __attribute__((section("__syscalls_metadata"))) \ 187 + __section("__syscalls_metadata") \ 188 188 *__p_syscall_meta_##sname = &__syscall_meta_##sname; 189 189 190 190 static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
+1 -1
include/linux/trace_events.h
··· 709 709 tracing_record_cmdline(current); \ 710 710 if (__builtin_constant_p(fmt)) { \ 711 711 static const char *trace_printk_fmt \ 712 - __attribute__((section("__trace_printk_fmt"))) = \ 712 + __section("__trace_printk_fmt") = \ 713 713 __builtin_constant_p(fmt) ? fmt : NULL; \ 714 714 \ 715 715 __trace_bprintk(ip, trace_printk_fmt, ##args); \
+4 -4
include/linux/tracepoint.h
··· 119 119 120 120 #define __TRACEPOINT_ENTRY(name) \ 121 121 static tracepoint_ptr_t __tracepoint_ptr_##name __used \ 122 - __section(__tracepoints_ptrs) = &__tracepoint_##name 122 + __section("__tracepoints_ptrs") = &__tracepoint_##name 123 123 #endif 124 124 125 125 #endif /* _LINUX_TRACEPOINT_H */ ··· 286 286 */ 287 287 #define DEFINE_TRACE_FN(_name, _reg, _unreg, proto, args) \ 288 288 static const char __tpstrtab_##_name[] \ 289 - __section(__tracepoints_strings) = #_name; \ 289 + __section("__tracepoints_strings") = #_name; \ 290 290 extern struct static_call_key STATIC_CALL_KEY(tp_func_##_name); \ 291 291 int __traceiter_##_name(void *__data, proto); \ 292 292 struct tracepoint __tracepoint_##_name __used \ 293 - __section(__tracepoints) = { \ 293 + __section("__tracepoints") = { \ 294 294 .name = __tpstrtab_##_name, \ 295 295 .key = STATIC_KEY_INIT_FALSE, \ 296 296 .static_call_key = &STATIC_CALL_KEY(tp_func_##_name), \ ··· 396 396 static const char *___tp_str __tracepoint_string = str; \ 397 397 ___tp_str; \ 398 398 }) 399 - #define __tracepoint_string __used __section(__tracepoint_str) 399 + #define __tracepoint_string __used __section("__tracepoint_str") 400 400 #else 401 401 /* 402 402 * tracepoint_string() is used to save the string address for userspace
+1 -1
include/trace/bpf_probe.h
··· 79 79 struct bpf_raw_event_map event; \ 80 80 btf_trace_##call handler; \ 81 81 } __bpf_trace_tp_map_##call __used \ 82 - __attribute__((section("__bpf_raw_tp_map"))) = { \ 82 + __section("__bpf_raw_tp_map") = { \ 83 83 .event = { \ 84 84 .tp = &__tracepoint_##call, \ 85 85 .bpf_func = __bpf_trace_##template, \
+5 -5
include/trace/trace_events.h
··· 45 45 .eval_value = a \ 46 46 }; \ 47 47 static struct trace_eval_map __used \ 48 - __attribute__((section("_ftrace_eval_map"))) \ 48 + __section("_ftrace_eval_map") \ 49 49 *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a 50 50 51 51 #undef TRACE_DEFINE_SIZEOF ··· 58 58 .eval_value = sizeof(a) \ 59 59 }; \ 60 60 static struct trace_eval_map __used \ 61 - __attribute__((section("_ftrace_eval_map"))) \ 61 + __section("_ftrace_eval_map") \ 62 62 *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a 63 63 64 64 /* ··· 607 607 * // its only safe to use pointers when doing linker tricks to 608 608 * // create an array. 609 609 * static struct trace_event_call __used 610 - * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>; 610 + * __section("_ftrace_events") *__event_<call> = &event_<call>; 611 611 * 612 612 */ 613 613 ··· 755 755 .flags = TRACE_EVENT_FL_TRACEPOINT, \ 756 756 }; \ 757 757 static struct trace_event_call __used \ 758 - __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 758 + __section("_ftrace_events") *__event_##call = &event_##call 759 759 760 760 #undef DEFINE_EVENT_PRINT 761 761 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ ··· 772 772 .flags = TRACE_EVENT_FL_TRACEPOINT, \ 773 773 }; \ 774 774 static struct trace_event_call __used \ 775 - __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call 775 + __section("_ftrace_events") *__event_##call = &event_##call 776 776 777 777 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+2 -2
kernel/kallsyms.c
··· 40 40 * has one (eg: FRV). 41 41 */ 42 42 extern const unsigned int kallsyms_num_syms 43 - __attribute__((weak, section(".rodata"))); 43 + __section(".rodata") __attribute__((weak)); 44 44 45 45 extern const unsigned long kallsyms_relative_base 46 - __attribute__((weak, section(".rodata"))); 46 + __section(".rodata") __attribute__((weak)); 47 47 48 48 extern const char kallsyms_token_table[] __weak; 49 49 extern const u16 kallsyms_token_index[] __weak;
+1 -1
kernel/sched/deadline.c
··· 2504 2504 } 2505 2505 2506 2506 const struct sched_class dl_sched_class 2507 - __attribute__((section("__dl_sched_class"))) = { 2507 + __section("__dl_sched_class") = { 2508 2508 .enqueue_task = enqueue_task_dl, 2509 2509 .dequeue_task = dequeue_task_dl, 2510 2510 .yield_task = yield_task_dl,
+1 -1
kernel/sched/fair.c
··· 11159 11159 * All the scheduling class methods: 11160 11160 */ 11161 11161 const struct sched_class fair_sched_class 11162 - __attribute__((section("__fair_sched_class"))) = { 11162 + __section("__fair_sched_class") = { 11163 11163 .enqueue_task = enqueue_task_fair, 11164 11164 .dequeue_task = dequeue_task_fair, 11165 11165 .yield_task = yield_task_fair,
+1 -1
kernel/sched/idle.c
··· 458 458 * Simple, special scheduling class for the per-CPU idle tasks: 459 459 */ 460 460 const struct sched_class idle_sched_class 461 - __attribute__((section("__idle_sched_class"))) = { 461 + __section("__idle_sched_class") = { 462 462 /* no enqueue/yield_task for idle tasks */ 463 463 464 464 /* dequeue is not valid, we print a debug message there: */
+1 -1
kernel/sched/rt.c
··· 2430 2430 } 2431 2431 2432 2432 const struct sched_class rt_sched_class 2433 - __attribute__((section("__rt_sched_class"))) = { 2433 + __section("__rt_sched_class") = { 2434 2434 .enqueue_task = enqueue_task_rt, 2435 2435 .dequeue_task = dequeue_task_rt, 2436 2436 .yield_task = yield_task_rt,
+1 -1
kernel/sched/stop_task.c
··· 110 110 * Simple, special scheduling class for the per-CPU stop tasks: 111 111 */ 112 112 const struct sched_class stop_sched_class 113 - __attribute__((section("__stop_sched_class"))) = { 113 + __section("__stop_sched_class") = { 114 114 115 115 .enqueue_task = enqueue_task_stop, 116 116 .dequeue_task = dequeue_task_stop,
+1 -1
kernel/trace/trace.h
··· 99 99 100 100 /* Use this for memory failure errors */ 101 101 #define MEM_FAIL(condition, fmt, ...) ({ \ 102 - static bool __section(.data.once) __warned; \ 102 + static bool __section(".data.once") __warned; \ 103 103 int __ret_warn_once = !!(condition); \ 104 104 \ 105 105 if (unlikely(__ret_warn_once && !__warned)) { \
+1 -1
kernel/trace/trace_export.c
··· 176 176 .flags = TRACE_EVENT_FL_IGNORE_ENABLE, \ 177 177 }; \ 178 178 static struct trace_event_call __used \ 179 - __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call; 179 + __section("_ftrace_events") *__event_##call = &event_##call; 180 180 181 181 #undef FTRACE_ENTRY 182 182 #define FTRACE_ENTRY(call, struct_name, etype, tstruct, print) \
+2 -2
scripts/mod/modpost.c
··· 2254 2254 buf_printf(b, "MODULE_INFO(name, KBUILD_MODNAME);\n"); 2255 2255 buf_printf(b, "\n"); 2256 2256 buf_printf(b, "__visible struct module __this_module\n"); 2257 - buf_printf(b, "__section(.gnu.linkonce.this_module) = {\n"); 2257 + buf_printf(b, "__section(\".gnu.linkonce.this_module\") = {\n"); 2258 2258 buf_printf(b, "\t.name = KBUILD_MODNAME,\n"); 2259 2259 if (mod->has_init) 2260 2260 buf_printf(b, "\t.init = init_module,\n"); ··· 2308 2308 2309 2309 buf_printf(b, "\n"); 2310 2310 buf_printf(b, "static const struct modversion_info ____versions[]\n"); 2311 - buf_printf(b, "__used __section(__versions) = {\n"); 2311 + buf_printf(b, "__used __section(\"__versions\") = {\n"); 2312 2312 2313 2313 for (s = mod->unres; s; s = s->next) { 2314 2314 if (!s->module)
+1 -1
tools/include/linux/objtool.h
··· 60 60 * For more information, see tools/objtool/Documentation/stack-validation.txt. 61 61 */ 62 62 #define STACK_FRAME_NON_STANDARD(func) \ 63 - static void __used __section(.discard.func_stack_frame_non_standard) \ 63 + static void __used __section(".discard.func_stack_frame_non_standard") \ 64 64 *__func_stack_frame_non_standard_##func = func 65 65 66 66 #else /* __ASSEMBLY__ */