Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

FRV: Fix the section attribute on UP DECLARE_PER_CPU()

In non-SMP mode, the variable section attribute specified by DECLARE_PER_CPU()
does not agree with that specified by DEFINE_PER_CPU(). This means that
architectures that have a small data section references relative to a base
register may throw up linkage errors due to too great a displacement between
where the base register points and the per-CPU variable.

On FRV, the .h declaration says that the variable is in the .sdata section, but
the .c definition says it's actually in the .data section. The linker throws
up the following errors:

kernel/built-in.o: In function `release_task':
kernel/exit.c:78: relocation truncated to fit: R_FRV_GPREL12 against symbol `per_cpu__process_counts' defined in .data section in kernel/built-in.o
kernel/exit.c:78: relocation truncated to fit: R_FRV_GPREL12 against symbol `per_cpu__process_counts' defined in .data section in kernel/built-in.o

To fix this, DECLARE_PER_CPU() should simply apply the same section attribute
as does DEFINE_PER_CPU(). However, this is made slightly more complex by
virtue of the fact that there are several variants on DEFINE, so these need to
be matched by variants on DECLARE.

Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

David Howells and committed by
Linus Torvalds
9b8de747 ccc5ff94

+50 -35
+1 -1
arch/alpha/include/asm/percpu.h
··· 73 73 74 74 #endif /* SMP */ 75 75 76 - #define DECLARE_PER_CPU(type, name) extern __typeof__(type) per_cpu_var(name) 76 + #include <asm-generic/percpu.h> 77 77 78 78 #endif /* __ALPHA_PERCPU_H */
+1 -1
arch/ia64/include/asm/smp.h
··· 58 58 extern char no_int_routing __devinitdata; 59 59 60 60 extern cpumask_t cpu_core_map[NR_CPUS]; 61 - DECLARE_PER_CPU(cpumask_t, cpu_sibling_map); 61 + DECLARE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map); 62 62 extern int smp_num_siblings; 63 63 extern void __iomem *ipi_base_addr; 64 64 extern unsigned char smp_int_redirect;
+1 -1
arch/x86/include/asm/desc.h
··· 37 37 struct gdt_page { 38 38 struct desc_struct gdt[GDT_ENTRIES]; 39 39 } __attribute__((aligned(PAGE_SIZE))); 40 - DECLARE_PER_CPU(struct gdt_page, gdt_page); 40 + DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page); 41 41 42 42 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) 43 43 {
+1 -1
arch/x86/include/asm/hardirq.h
··· 26 26 #endif 27 27 } ____cacheline_aligned irq_cpustat_t; 28 28 29 - DECLARE_PER_CPU(irq_cpustat_t, irq_stat); 29 + DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 30 30 31 31 /* We can have at most NR_VECTORS irqs routed to a cpu at a time */ 32 32 #define MAX_HARDIRQS_PER_CPU NR_VECTORS
+3 -3
arch/x86/include/asm/processor.h
··· 138 138 extern __u32 cleared_cpu_caps[NCAPINTS]; 139 139 140 140 #ifdef CONFIG_SMP 141 - DECLARE_PER_CPU(struct cpuinfo_x86, cpu_info); 141 + DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); 142 142 #define cpu_data(cpu) per_cpu(cpu_info, cpu) 143 143 #define current_cpu_data __get_cpu_var(cpu_info) 144 144 #else ··· 270 270 271 271 } ____cacheline_aligned; 272 272 273 - DECLARE_PER_CPU(struct tss_struct, init_tss); 273 + DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); 274 274 275 275 /* 276 276 * Save the original ist values for checking stack pointers during debugging ··· 393 393 }; 394 394 }; 395 395 396 - DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); 396 + DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union); 397 397 DECLARE_INIT_PER_CPU(irq_stack_union); 398 398 399 399 DECLARE_PER_CPU(char *, irq_stack_ptr);
+1 -1
arch/x86/include/asm/tlbflush.h
··· 152 152 struct mm_struct *active_mm; 153 153 int state; 154 154 }; 155 - DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate); 155 + DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate); 156 156 157 157 static inline void reset_lazy_tlbstate(void) 158 158 {
+41 -2
include/asm-generic/percpu.h
··· 73 73 74 74 #endif /* SMP */ 75 75 76 + #ifndef PER_CPU_BASE_SECTION 77 + #ifdef CONFIG_SMP 78 + #define PER_CPU_BASE_SECTION ".data.percpu" 79 + #else 80 + #define PER_CPU_BASE_SECTION ".data" 81 + #endif 82 + #endif 83 + 84 + #ifdef CONFIG_SMP 85 + 86 + #ifdef MODULE 87 + #define PER_CPU_SHARED_ALIGNED_SECTION "" 88 + #else 89 + #define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" 90 + #endif 91 + #define PER_CPU_FIRST_SECTION ".first" 92 + 93 + #else 94 + 95 + #define PER_CPU_SHARED_ALIGNED_SECTION "" 96 + #define PER_CPU_FIRST_SECTION "" 97 + 98 + #endif 99 + 76 100 #ifndef PER_CPU_ATTRIBUTES 77 101 #define PER_CPU_ATTRIBUTES 78 102 #endif 79 103 80 - #define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \ 81 - __typeof__(type) per_cpu_var(name) 104 + #define DECLARE_PER_CPU_SECTION(type, name, section) \ 105 + extern \ 106 + __attribute__((__section__(PER_CPU_BASE_SECTION section))) \ 107 + PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name 108 + 109 + #define DECLARE_PER_CPU(type, name) \ 110 + DECLARE_PER_CPU_SECTION(type, name, "") 111 + 112 + #define DECLARE_PER_CPU_SHARED_ALIGNED(type, name) \ 113 + DECLARE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ 114 + ____cacheline_aligned_in_smp 115 + 116 + #define DECLARE_PER_CPU_PAGE_ALIGNED(type, name) \ 117 + DECLARE_PER_CPU_SECTION(type, name, ".page_aligned") 118 + 119 + #define DECLARE_PER_CPU_FIRST(type, name) \ 120 + DECLARE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) 82 121 83 122 #endif /* _ASM_GENERIC_PERCPU_H_ */
-24
include/linux/percpu.h
··· 9 9 10 10 #include <asm/percpu.h> 11 11 12 - #ifndef PER_CPU_BASE_SECTION 13 - #ifdef CONFIG_SMP 14 - #define PER_CPU_BASE_SECTION ".data.percpu" 15 - #else 16 - #define PER_CPU_BASE_SECTION ".data" 17 - #endif 18 - #endif 19 - 20 - #ifdef CONFIG_SMP 21 - 22 - #ifdef MODULE 23 - #define PER_CPU_SHARED_ALIGNED_SECTION "" 24 - #else 25 - #define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" 26 - #endif 27 - #define PER_CPU_FIRST_SECTION ".first" 28 - 29 - #else 30 - 31 - #define PER_CPU_SHARED_ALIGNED_SECTION "" 32 - #define PER_CPU_FIRST_SECTION "" 33 - 34 - #endif 35 - 36 12 #define DEFINE_PER_CPU_SECTION(type, name, section) \ 37 13 __attribute__((__section__(PER_CPU_BASE_SECTION section))) \ 38 14 PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
+1 -1
net/rds/rds.h
··· 638 638 void rds_rdma_unuse(struct rds_sock *rs, u32 r_key, int force); 639 639 640 640 /* stats.c */ 641 - DECLARE_PER_CPU(struct rds_statistics, rds_stats); 641 + DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics, rds_stats); 642 642 #define rds_stats_inc_which(which, member) do { \ 643 643 per_cpu(which, get_cpu()).member++; \ 644 644 put_cpu(); \