Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'fixes' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:

- Łukasz Stelmach spotted a couple of issues with the decompressor.

- a couple of kdump fixes found while testing kdump

- replace some perl with shell code

- resolve SIGFPE breakage

- kprobes fixes

* 'fixes' of git://git.armlinux.org.uk/~rmk/linux-arm:
ARM: fix kill( ,SIGFPE) breakage
ARM: 8772/1: kprobes: Prohibit kprobes on get_user functions
ARM: 8771/1: kprobes: Prohibit kprobes on do_undefinstr
ARM: 8770/1: kprobes: Prohibit probing on optimized_callback
ARM: 8769/1: kprobes: Fix to use get_kprobe_ctlblk after irq-disabed
ARM: replace unnecessary perl with sed and the shell $(( )) operator
ARM: kexec: record parent context registers for non-crash CPUs
ARM: kexec: fix kdump register saving on panic()
ARM: 8758/1: decompressor: restore r1 and r2 just before jumping to the kernel
ARM: 8753/1: decompressor: add a missing parameter to the addruart macro

+64 -44
+3 -5
arch/arm/boot/compressed/Makefile
··· 117 117 asflags-y := -DZIMAGE 118 118 119 119 # Supply kernel BSS size to the decompressor via a linker symbol. 120 - KBSS_SZ = $(shell $(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \ 121 - perl -e 'while (<>) { \ 122 - $$bss_start=hex($$1) if /^([[:xdigit:]]+) B __bss_start$$/; \ 123 - $$bss_end=hex($$1) if /^([[:xdigit:]]+) B __bss_stop$$/; \ 124 - }; printf "%d\n", $$bss_end - $$bss_start;') 120 + KBSS_SZ = $(shell echo $$(($$($(CROSS_COMPILE)nm $(obj)/../../../../vmlinux | \ 121 + sed -n -e 's/^\([^ ]*\) [AB] __bss_start$$/-0x\1/p' \ 122 + -e 's/^\([^ ]*\) [AB] __bss_stop$$/+0x\1/p') )) ) 125 123 LDFLAGS_vmlinux = --defsym _kernel_bss_size=$(KBSS_SZ) 126 124 # Supply ZRELADDR to the decompressor via a linker symbol. 127 125 ifneq ($(CONFIG_AUTO_ZRELADDR),y)
+10 -10
arch/arm/boot/compressed/head.S
··· 29 29 #if defined(CONFIG_DEBUG_ICEDCC) 30 30 31 31 #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) 32 - .macro loadsp, rb, tmp 32 + .macro loadsp, rb, tmp1, tmp2 33 33 .endm 34 34 .macro writeb, ch, rb 35 35 mcr p14, 0, \ch, c0, c5, 0 36 36 .endm 37 37 #elif defined(CONFIG_CPU_XSCALE) 38 - .macro loadsp, rb, tmp 38 + .macro loadsp, rb, tmp1, tmp2 39 39 .endm 40 40 .macro writeb, ch, rb 41 41 mcr p14, 0, \ch, c8, c0, 0 42 42 .endm 43 43 #else 44 - .macro loadsp, rb, tmp 44 + .macro loadsp, rb, tmp1, tmp2 45 45 .endm 46 46 .macro writeb, ch, rb 47 47 mcr p14, 0, \ch, c1, c0, 0 ··· 57 57 .endm 58 58 59 59 #if defined(CONFIG_ARCH_SA1100) 60 - .macro loadsp, rb, tmp 60 + .macro loadsp, rb, tmp1, tmp2 61 61 mov \rb, #0x80000000 @ physical base address 62 62 #ifdef CONFIG_DEBUG_LL_SER3 63 63 add \rb, \rb, #0x00050000 @ Ser3 ··· 66 66 #endif 67 67 .endm 68 68 #else 69 - .macro loadsp, rb, tmp 70 - addruart \rb, \tmp 69 + .macro loadsp, rb, tmp1, tmp2 70 + addruart \rb, \tmp1, \tmp2 71 71 .endm 72 72 #endif 73 73 #endif ··· 561 561 bl decompress_kernel 562 562 bl cache_clean_flush 563 563 bl cache_off 564 - mov r1, r7 @ restore architecture number 565 - mov r2, r8 @ restore atags pointer 566 564 567 565 #ifdef CONFIG_ARM_VIRT_EXT 568 566 mrs r0, spsr @ Get saved CPU boot mode ··· 1295 1297 b 1b 1296 1298 1297 1299 @ puts corrupts {r0, r1, r2, r3} 1298 - puts: loadsp r3, r1 1300 + puts: loadsp r3, r2, r1 1299 1301 1: ldrb r2, [r0], #1 1300 1302 teq r2, #0 1301 1303 moveq pc, lr ··· 1312 1314 @ putc corrupts {r0, r1, r2, r3} 1313 1315 putc: 1314 1316 mov r2, r0 1317 + loadsp r3, r1, r0 1315 1318 mov r0, #0 1316 - loadsp r3, r1 1317 1319 b 2b 1318 1320 1319 1321 @ memdump corrupts {r0, r1, r2, r3, r10, r11, r12, lr} ··· 1363 1365 1364 1366 __enter_kernel: 1365 1367 mov r0, #0 @ must be 0 1368 + mov r1, r7 @ restore architecture number 1369 + mov r2, r8 @ restore atags pointer 1366 1370 ARM( mov pc, r4 ) @ call kernel 1367 1371 M_CLASS( add r4, r4, #1 ) @ enter in Thumb mode for M class 1368 1372 THUMB( bx r4 ) @ entry point is always ARM for A/R classes
+10
arch/arm/include/asm/assembler.h
··· 536 536 #endif 537 537 .endm 538 538 539 + #ifdef CONFIG_KPROBES 540 + #define _ASM_NOKPROBE(entry) \ 541 + .pushsection "_kprobe_blacklist", "aw" ; \ 542 + .balign 4 ; \ 543 + .long entry; \ 544 + .popsection 545 + #else 546 + #define _ASM_NOKPROBE(entry) 547 + #endif 548 + 539 549 #endif /* __ASM_ASSEMBLER_H__ */
-13
arch/arm/include/uapi/asm/siginfo.h
··· 1 - #ifndef __ASM_SIGINFO_H 2 - #define __ASM_SIGINFO_H 3 - 4 - #include <asm-generic/siginfo.h> 5 - 6 - /* 7 - * SIGFPE si_codes 8 - */ 9 - #ifdef __KERNEL__ 10 - #define FPE_FIXME 0 /* Broken dup of SI_USER */ 11 - #endif /* __KERNEL__ */ 12 - 13 - #endif
+23 -13
arch/arm/kernel/machine_kexec.c
··· 83 83 { 84 84 struct pt_regs regs; 85 85 86 - crash_setup_regs(&regs, NULL); 86 + crash_setup_regs(&regs, get_irq_regs()); 87 87 printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n", 88 88 smp_processor_id()); 89 89 crash_save_cpu(&regs, smp_processor_id()); ··· 93 93 atomic_dec(&waiting_for_crash_ipi); 94 94 while (1) 95 95 cpu_relax(); 96 + } 97 + 98 + void crash_smp_send_stop(void) 99 + { 100 + static int cpus_stopped; 101 + unsigned long msecs; 102 + 103 + if (cpus_stopped) 104 + return; 105 + 106 + atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); 107 + smp_call_function(machine_crash_nonpanic_core, NULL, false); 108 + msecs = 1000; /* Wait at most a second for the other cpus to stop */ 109 + while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { 110 + mdelay(1); 111 + msecs--; 112 + } 113 + if (atomic_read(&waiting_for_crash_ipi) > 0) 114 + pr_warn("Non-crashing CPUs did not react to IPI\n"); 115 + 116 + cpus_stopped = 1; 96 117 } 97 118 98 119 static void machine_kexec_mask_interrupts(void) ··· 141 120 142 121 void machine_crash_shutdown(struct pt_regs *regs) 143 122 { 144 - unsigned long msecs; 145 - 146 123 local_irq_disable(); 147 - 148 - atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); 149 - smp_call_function(machine_crash_nonpanic_core, NULL, false); 150 - msecs = 1000; /* Wait at most a second for the other cpus to stop */ 151 - while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { 152 - mdelay(1); 153 - msecs--; 154 - } 155 - if (atomic_read(&waiting_for_crash_ipi) > 0) 156 - pr_warn("Non-crashing CPUs did not react to IPI\n"); 124 + crash_smp_send_stop(); 157 125 158 126 crash_save_cpu(regs, smp_processor_id()); 159 127 machine_kexec_mask_interrupts();
+4 -1
arch/arm/kernel/traps.c
··· 19 19 #include <linux/uaccess.h> 20 20 #include <linux/hardirq.h> 21 21 #include <linux/kdebug.h> 22 + #include <linux/kprobes.h> 22 23 #include <linux/module.h> 23 24 #include <linux/kexec.h> 24 25 #include <linux/bug.h> ··· 418 417 raw_spin_unlock_irqrestore(&undef_lock, flags); 419 418 } 420 419 421 - static int call_undef_hook(struct pt_regs *regs, unsigned int instr) 420 + static nokprobe_inline 421 + int call_undef_hook(struct pt_regs *regs, unsigned int instr) 422 422 { 423 423 struct undef_hook *hook; 424 424 unsigned long flags; ··· 492 490 493 491 arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6); 494 492 } 493 + NOKPROBE_SYMBOL(do_undefinstr) 495 494 496 495 /* 497 496 * Handle FIQ similarly to NMI on x86 systems.
+10
arch/arm/lib/getuser.S
··· 38 38 mov r0, #0 39 39 ret lr 40 40 ENDPROC(__get_user_1) 41 + _ASM_NOKPROBE(__get_user_1) 41 42 42 43 ENTRY(__get_user_2) 43 44 check_uaccess r0, 2, r1, r2, __get_user_bad ··· 59 58 mov r0, #0 60 59 ret lr 61 60 ENDPROC(__get_user_2) 61 + _ASM_NOKPROBE(__get_user_2) 62 62 63 63 ENTRY(__get_user_4) 64 64 check_uaccess r0, 4, r1, r2, __get_user_bad ··· 67 65 mov r0, #0 68 66 ret lr 69 67 ENDPROC(__get_user_4) 68 + _ASM_NOKPROBE(__get_user_4) 70 69 71 70 ENTRY(__get_user_8) 72 71 check_uaccess r0, 8, r1, r2, __get_user_bad8 ··· 81 78 mov r0, #0 82 79 ret lr 83 80 ENDPROC(__get_user_8) 81 + _ASM_NOKPROBE(__get_user_8) 84 82 85 83 #ifdef __ARMEB__ 86 84 ENTRY(__get_user_32t_8) ··· 95 91 mov r0, #0 96 92 ret lr 97 93 ENDPROC(__get_user_32t_8) 94 + _ASM_NOKPROBE(__get_user_32t_8) 98 95 99 96 ENTRY(__get_user_64t_1) 100 97 check_uaccess r0, 1, r1, r2, __get_user_bad8 ··· 103 98 mov r0, #0 104 99 ret lr 105 100 ENDPROC(__get_user_64t_1) 101 + _ASM_NOKPROBE(__get_user_64t_1) 106 102 107 103 ENTRY(__get_user_64t_2) 108 104 check_uaccess r0, 2, r1, r2, __get_user_bad8 ··· 120 114 mov r0, #0 121 115 ret lr 122 116 ENDPROC(__get_user_64t_2) 117 + _ASM_NOKPROBE(__get_user_64t_2) 123 118 124 119 ENTRY(__get_user_64t_4) 125 120 check_uaccess r0, 4, r1, r2, __get_user_bad8 ··· 128 121 mov r0, #0 129 122 ret lr 130 123 ENDPROC(__get_user_64t_4) 124 + _ASM_NOKPROBE(__get_user_64t_4) 131 125 #endif 132 126 133 127 __get_user_bad8: ··· 139 131 ret lr 140 132 ENDPROC(__get_user_bad) 141 133 ENDPROC(__get_user_bad8) 134 + _ASM_NOKPROBE(__get_user_bad) 135 + _ASM_NOKPROBE(__get_user_bad8) 142 136 143 137 .pushsection __ex_table, "a" 144 138 .long 1b, __get_user_bad
+3 -1
arch/arm/probes/kprobes/opt-arm.c
··· 165 165 { 166 166 unsigned long flags; 167 167 struct kprobe *p = &op->kp; 168 - struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 168 + struct kprobe_ctlblk *kcb; 169 169 170 170 /* Save skipped registers */ 171 171 regs->ARM_pc = (unsigned long)op->kp.addr; 172 172 regs->ARM_ORIG_r0 = ~0UL; 173 173 174 174 local_irq_save(flags); 175 + kcb = get_kprobe_ctlblk(); 175 176 176 177 if (kprobe_running()) { 177 178 kprobes_inc_nmissed_count(&op->kp); ··· 192 191 193 192 local_irq_restore(flags); 194 193 } 194 + NOKPROBE_SYMBOL(optimized_callback) 195 195 196 196 int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig) 197 197 {
+1 -1
arch/arm/vfp/vfpmodule.c
··· 257 257 258 258 if (exceptions == VFP_EXCEPTION_ERROR) { 259 259 vfp_panic("unhandled bounce", inst); 260 - vfp_raise_sigfpe(FPE_FIXME, regs); 260 + vfp_raise_sigfpe(FPE_FLTINV, regs); 261 261 return; 262 262 } 263 263