Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
"Fixes for ARM, the most notable being the fix from Nathan Lynch to fix
the state of various registers during execve, to ensure that data
can't be leaked between two executables.

Fixes from Victor Kamensky for get_user() on big endian platforms,
since the addition of 8-byte get_user() support broke these fairly
badly.

A fix from Sudeep Holla for affinity setting when hotplugging CPU 0.

A fix from Stephen Boyd for a perf-induced sleep attempt while atomic.

Lastly, a correctness fix for emulation of the SWP instruction on
ARMv7+, and a fix for wrong carry handling when updating the
translation table base address on LPAE platforms"

* 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
ARM: 8149/1: perf: Don't sleep while atomic when enabling per-cpu interrupts
ARM: 8148/1: flush TLS and thumbee register state during exec
ARM: 8151/1: add missing exports for asm functions required by get_user macro
ARM: 8137/1: fix get_user BE behavior for target variable with size of 8 bytes
ARM: 8135/1: Fix in-correct barrier usage in SWP{B} emulation
ARM: 8133/1: use irq_set_affinity with force=false when migrating irqs
ARM: 8132/1: LPAE: drop wrong carry flag correction after adding TTBR1_OFFSET

+154 -55
+62
arch/arm/include/asm/tls.h
··· 1 1 #ifndef __ASMARM_TLS_H 2 2 #define __ASMARM_TLS_H 3 3 4 + #include <linux/compiler.h> 5 + #include <asm/thread_info.h> 6 + 4 7 #ifdef __ASSEMBLY__ 5 8 #include <asm/asm-offsets.h> 6 9 .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2 ··· 53 50 #endif 54 51 55 52 #ifndef __ASSEMBLY__ 53 + 54 + static inline void set_tls(unsigned long val) 55 + { 56 + struct thread_info *thread; 57 + 58 + thread = current_thread_info(); 59 + 60 + thread->tp_value[0] = val; 61 + 62 + /* 63 + * This code runs with preemption enabled and therefore must 64 + * be reentrant with respect to switch_tls. 65 + * 66 + * We need to ensure ordering between the shadow state and the 67 + * hardware state, so that we don't corrupt the hardware state 68 + * with a stale shadow state during context switch. 69 + * 70 + * If we're preempted here, switch_tls will load TPIDRURO from 71 + * thread_info upon resuming execution and the following mcr 72 + * is merely redundant. 73 + */ 74 + barrier(); 75 + 76 + if (!tls_emu) { 77 + if (has_tls_reg) { 78 + asm("mcr p15, 0, %0, c13, c0, 3" 79 + : : "r" (val)); 80 + } else { 81 + /* 82 + * User space must never try to access this 83 + * directly. Expect your app to break 84 + * eventually if you do so. The user helper 85 + * at 0xffff0fe0 must be used instead. (see 86 + * entry-armv.S for details) 87 + */ 88 + *((unsigned int *)0xffff0ff0) = val; 89 + } 90 + 91 + } 92 + } 93 + 56 94 static inline unsigned long get_tpuser(void) 57 95 { 58 96 unsigned long reg = 0; ··· 103 59 104 60 return reg; 105 61 } 62 + 63 + static inline void set_tpuser(unsigned long val) 64 + { 65 + /* Since TPIDRURW is fully context-switched (unlike TPIDRURO), 66 + * we need not update thread_info. 67 + */ 68 + if (has_tls_reg && !tls_emu) { 69 + asm("mcr p15, 0, %0, c13, c0, 2" 70 + : : "r" (val)); 71 + } 72 + } 73 + 74 + static inline void flush_tls(void) 75 + { 76 + set_tls(0); 77 + set_tpuser(0); 78 + } 79 + 106 80 #endif 107 81 #endif /* __ASMARM_TLS_H */
+39 -9
arch/arm/include/asm/uaccess.h
··· 107 107 extern int __get_user_1(void *); 108 108 extern int __get_user_2(void *); 109 109 extern int __get_user_4(void *); 110 - extern int __get_user_lo8(void *); 110 + extern int __get_user_32t_8(void *); 111 111 extern int __get_user_8(void *); 112 + extern int __get_user_64t_1(void *); 113 + extern int __get_user_64t_2(void *); 114 + extern int __get_user_64t_4(void *); 112 115 113 116 #define __GUP_CLOBBER_1 "lr", "cc" 114 117 #ifdef CONFIG_CPU_USE_DOMAINS ··· 120 117 #define __GUP_CLOBBER_2 "lr", "cc" 121 118 #endif 122 119 #define __GUP_CLOBBER_4 "lr", "cc" 123 - #define __GUP_CLOBBER_lo8 "lr", "cc" 120 + #define __GUP_CLOBBER_32t_8 "lr", "cc" 124 121 #define __GUP_CLOBBER_8 "lr", "cc" 125 122 126 123 #define __get_user_x(__r2,__p,__e,__l,__s) \ ··· 134 131 135 132 /* narrowing a double-word get into a single 32bit word register: */ 136 133 #ifdef __ARMEB__ 137 - #define __get_user_xb(__r2, __p, __e, __l, __s) \ 138 - __get_user_x(__r2, __p, __e, __l, lo8) 134 + #define __get_user_x_32t(__r2, __p, __e, __l, __s) \ 135 + __get_user_x(__r2, __p, __e, __l, 32t_8) 139 136 #else 140 - #define __get_user_xb __get_user_x 137 + #define __get_user_x_32t __get_user_x 141 138 #endif 139 + 140 + /* 141 + * storing result into proper least significant word of 64bit target var, 142 + * different only for big endian case where 64 bit __r2 lsw is r3: 143 + */ 144 + #ifdef __ARMEB__ 145 + #define __get_user_x_64t(__r2, __p, __e, __l, __s) \ 146 + __asm__ __volatile__ ( \ 147 + __asmeq("%0", "r0") __asmeq("%1", "r2") \ 148 + __asmeq("%3", "r1") \ 149 + "bl __get_user_64t_" #__s \ 150 + : "=&r" (__e), "=r" (__r2) \ 151 + : "0" (__p), "r" (__l) \ 152 + : __GUP_CLOBBER_##__s) 153 + #else 154 + #define __get_user_x_64t __get_user_x 155 + #endif 156 + 142 157 143 158 #define __get_user_check(x,p) \ 144 159 ({ \ ··· 167 146 register int __e asm("r0"); \ 168 147 switch (sizeof(*(__p))) { \ 169 148 case 1: \ 170 - __get_user_x(__r2, __p, __e, __l, 1); \ 149 + if (sizeof((x)) >= 8) \ 150 + __get_user_x_64t(__r2, __p, __e, __l, 1); \ 151 + else \ 152 + __get_user_x(__r2, __p, __e, __l, 1); \ 171 153 break; \ 172 154 case 2: \ 173 - __get_user_x(__r2, __p, __e, __l, 2); \ 155 + if (sizeof((x)) >= 8) \ 156 + __get_user_x_64t(__r2, __p, __e, __l, 2); \ 157 + else \ 158 + __get_user_x(__r2, __p, __e, __l, 2); \ 174 159 break; \ 175 160 case 4: \ 176 - __get_user_x(__r2, __p, __e, __l, 4); \ 161 + if (sizeof((x)) >= 8) \ 162 + __get_user_x_64t(__r2, __p, __e, __l, 4); \ 163 + else \ 164 + __get_user_x(__r2, __p, __e, __l, 4); \ 177 165 break; \ 178 166 case 8: \ 179 167 if (sizeof((x)) < 8) \ 180 - __get_user_xb(__r2, __p, __e, __l, 4); \ 168 + __get_user_x_32t(__r2, __p, __e, __l, 4); \ 181 169 else \ 182 170 __get_user_x(__r2, __p, __e, __l, 8); \ 183 171 break; \
+8
arch/arm/kernel/armksyms.c
··· 98 98 EXPORT_SYMBOL(__get_user_1); 99 99 EXPORT_SYMBOL(__get_user_2); 100 100 EXPORT_SYMBOL(__get_user_4); 101 + EXPORT_SYMBOL(__get_user_8); 102 + 103 + #ifdef __ARMEB__ 104 + EXPORT_SYMBOL(__get_user_64t_1); 105 + EXPORT_SYMBOL(__get_user_64t_2); 106 + EXPORT_SYMBOL(__get_user_64t_4); 107 + EXPORT_SYMBOL(__get_user_32t_8); 108 + #endif 101 109 102 110 EXPORT_SYMBOL(__put_user_1); 103 111 EXPORT_SYMBOL(__put_user_2);
+1 -1
arch/arm/kernel/irq.c
··· 175 175 c = irq_data_get_irq_chip(d); 176 176 if (!c->irq_set_affinity) 177 177 pr_debug("IRQ%u: unable to set affinity\n", d->irq); 178 - else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) 178 + else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) 179 179 cpumask_copy(d->affinity, affinity); 180 180 181 181 return ret;
+4 -10
arch/arm/kernel/perf_event_cpu.c
··· 76 76 77 77 static void cpu_pmu_enable_percpu_irq(void *data) 78 78 { 79 - struct arm_pmu *cpu_pmu = data; 80 - struct platform_device *pmu_device = cpu_pmu->plat_device; 81 - int irq = platform_get_irq(pmu_device, 0); 79 + int irq = *(int *)data; 82 80 83 81 enable_percpu_irq(irq, IRQ_TYPE_NONE); 84 - cpumask_set_cpu(smp_processor_id(), &cpu_pmu->active_irqs); 85 82 } 86 83 87 84 static void cpu_pmu_disable_percpu_irq(void *data) 88 85 { 89 - struct arm_pmu *cpu_pmu = data; 90 - struct platform_device *pmu_device = cpu_pmu->plat_device; 91 - int irq = platform_get_irq(pmu_device, 0); 86 + int irq = *(int *)data; 92 87 93 - cpumask_clear_cpu(smp_processor_id(), &cpu_pmu->active_irqs); 94 88 disable_percpu_irq(irq); 95 89 } 96 90 ··· 97 103 98 104 irq = platform_get_irq(pmu_device, 0); 99 105 if (irq >= 0 && irq_is_percpu(irq)) { 100 - on_each_cpu(cpu_pmu_disable_percpu_irq, cpu_pmu, 1); 106 + on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1); 101 107 free_percpu_irq(irq, &percpu_pmu); 102 108 } else { 103 109 for (i = 0; i < irqs; ++i) { ··· 132 138 irq); 133 139 return err; 134 140 } 135 - on_each_cpu(cpu_pmu_enable_percpu_irq, cpu_pmu, 1); 141 + on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); 136 142 } else { 137 143 for (i = 0; i < irqs; ++i) { 138 144 err = 0;
+2
arch/arm/kernel/process.c
··· 334 334 memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); 335 335 memset(&thread->fpstate, 0, sizeof(union fp_state)); 336 336 337 + flush_tls(); 338 + 337 339 thread_notify(THREAD_NOTIFY_FLUSH, thread); 338 340 } 339 341
-15
arch/arm/kernel/swp_emulate.c
··· 142 142 while (1) { 143 143 unsigned long temp; 144 144 145 - /* 146 - * Barrier required between accessing protected resource and 147 - * releasing a lock for it. Legacy code might not have done 148 - * this, and we cannot determine that this is not the case 149 - * being emulated, so insert always. 150 - */ 151 - smp_mb(); 152 - 153 145 if (type == TYPE_SWPB) 154 146 __user_swpb_asm(*data, address, res, temp); 155 147 else ··· 154 162 } 155 163 156 164 if (res == 0) { 157 - /* 158 - * Barrier also required between acquiring a lock for a 159 - * protected resource and accessing the resource. Inserted for 160 - * same reason as above. 161 - */ 162 - smp_mb(); 163 - 164 165 if (type == TYPE_SWPB) 165 166 swpbcounter++; 166 167 else
+1 -1
arch/arm/kernel/thumbee.c
··· 45 45 46 46 switch (cmd) { 47 47 case THREAD_NOTIFY_FLUSH: 48 - thread->thumbee_state = 0; 48 + teehbr_write(0); 49 49 break; 50 50 case THREAD_NOTIFY_SWITCH: 51 51 current_thread_info()->thumbee_state = teehbr_read();
+1 -16
arch/arm/kernel/traps.c
··· 581 581 #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE) 582 582 asmlinkage int arm_syscall(int no, struct pt_regs *regs) 583 583 { 584 - struct thread_info *thread = current_thread_info(); 585 584 siginfo_t info; 586 585 587 586 if ((no >> 16) != (__ARM_NR_BASE>> 16)) ··· 631 632 return regs->ARM_r0; 632 633 633 634 case NR(set_tls): 634 - thread->tp_value[0] = regs->ARM_r0; 635 - if (tls_emu) 636 - return 0; 637 - if (has_tls_reg) { 638 - asm ("mcr p15, 0, %0, c13, c0, 3" 639 - : : "r" (regs->ARM_r0)); 640 - } else { 641 - /* 642 - * User space must never try to access this directly. 643 - * Expect your app to break eventually if you do so. 644 - * The user helper at 0xffff0fe0 must be used instead. 645 - * (see entry-armv.S for details) 646 - */ 647 - *((unsigned int *)0xffff0ff0) = regs->ARM_r0; 648 - } 635 + set_tls(regs->ARM_r0); 649 636 return 0; 650 637 651 638 #ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
+36 -2
arch/arm/lib/getuser.S
··· 80 80 ENDPROC(__get_user_8) 81 81 82 82 #ifdef __ARMEB__ 83 - ENTRY(__get_user_lo8) 83 + ENTRY(__get_user_32t_8) 84 84 check_uaccess r0, 8, r1, r2, __get_user_bad 85 85 #ifdef CONFIG_CPU_USE_DOMAINS 86 86 add r0, r0, #4 ··· 90 90 #endif 91 91 mov r0, #0 92 92 ret lr 93 - ENDPROC(__get_user_lo8) 93 + ENDPROC(__get_user_32t_8) 94 + 95 + ENTRY(__get_user_64t_1) 96 + check_uaccess r0, 1, r1, r2, __get_user_bad8 97 + 8: TUSER(ldrb) r3, [r0] 98 + mov r0, #0 99 + ret lr 100 + ENDPROC(__get_user_64t_1) 101 + 102 + ENTRY(__get_user_64t_2) 103 + check_uaccess r0, 2, r1, r2, __get_user_bad8 104 + #ifdef CONFIG_CPU_USE_DOMAINS 105 + rb .req ip 106 + 9: ldrbt r3, [r0], #1 107 + 10: ldrbt rb, [r0], #0 108 + #else 109 + rb .req r0 110 + 9: ldrb r3, [r0] 111 + 10: ldrb rb, [r0, #1] 112 + #endif 113 + orr r3, rb, r3, lsl #8 114 + mov r0, #0 115 + ret lr 116 + ENDPROC(__get_user_64t_2) 117 + 118 + ENTRY(__get_user_64t_4) 119 + check_uaccess r0, 4, r1, r2, __get_user_bad8 120 + 11: TUSER(ldr) r3, [r0] 121 + mov r0, #0 122 + ret lr 123 + ENDPROC(__get_user_64t_4) 94 124 #endif 95 125 96 126 __get_user_bad8: ··· 141 111 .long 6b, __get_user_bad8 142 112 #ifdef __ARMEB__ 143 113 .long 7b, __get_user_bad 114 + .long 8b, __get_user_bad8 115 + .long 9b, __get_user_bad8 116 + .long 10b, __get_user_bad8 117 + .long 11b, __get_user_bad8 144 118 #endif 145 119 .popsection
-1
arch/arm/mm/proc-v7-3level.S
··· 146 146 mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits 147 147 mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits 148 148 addls \ttbr1, \ttbr1, #TTBR1_OFFSET 149 - adcls \tmp, \tmp, #0 150 149 mcrr p15, 1, \ttbr1, \tmp, c2 @ load TTBR1 151 150 mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits 152 151 mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits