Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'exit-cleanups-for-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace

Pull exit cleanups from Eric Biederman:
"While looking at some issues related to the exit path in the kernel I
found several instances where the code is not using the existing
abstractions properly.

This set of changes introduces force_fatal_sig a way of sending a
signal and not allowing it to be caught, and corrects the misuse of
the existing abstractions that I found.

A lot of the misuse of the existing abstractions are silly things such
as doing something after calling a no return function, rolling BUG by
hand, doing more work than necessary to terminate a kernel thread, or
calling do_exit(SIGKILL) instead of calling force_sig(SIGKILL).

In the review a deficiency in force_fatal_sig and force_sig_seccomp
where ptrace or sigaction could prevent the delivery of the signal was
found. I have added a change that adds SA_IMMUTABLE to change that
makes it impossible to interrupt the delivery of those signals, and
allows backporting to fix force_sig_seccomp

And Arnd found an issue where a function passed to kthread_run had the
wrong prototype, and after my cleanup was failing to build."

* 'exit-cleanups-for-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace: (23 commits)
soc: ti: fix wkup_m3_rproc_boot_thread return type
signal: Add SA_IMMUTABLE to ensure forced siganls do not get changed
signal: Replace force_sigsegv(SIGSEGV) with force_fatal_sig(SIGSEGV)
exit/r8188eu: Replace the macro thread_exit with a simple return 0
exit/rtl8712: Replace the macro thread_exit with a simple return 0
exit/rtl8723bs: Replace the macro thread_exit with a simple return 0
signal/x86: In emulate_vsyscall force a signal instead of calling do_exit
signal/sparc32: In setup_rt_frame and setup_fram use force_fatal_sig
signal/sparc32: Exit with a fatal signal when try_to_clear_window_buffer fails
exit/syscall_user_dispatch: Send ordinary signals on failure
signal: Implement force_fatal_sig
exit/kthread: Have kernel threads return instead of calling do_exit
signal/s390: Use force_sigsegv in default_trap_handler
signal/vm86_32: Properly send SIGSEGV when the vm86 state cannot be saved.
signal/vm86_32: Replace open coded BUG_ON with an actual BUG_ON
signal/sparc: In setup_tsb_params convert open coded BUG into BUG
signal/powerpc: On swapcontext failure force SIGSEGV
signal/sh: Use force_sig(SIGKILL) instead of do_group_exit(SIGKILL)
signal/mips: Update (_save|_restore)_fp_context to fail with -EFAULT
signal/sparc32: Remove unreachable do_exit in do_sparc_fault
...

+97 -96
+1 -1
arch/arc/kernel/process.c
··· 294 294 eflags = x->e_flags; 295 295 if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) { 296 296 pr_err("ABI mismatch - you need newer toolchain\n"); 297 - force_sigsegv(SIGSEGV); 297 + force_fatal_sig(SIGSEGV); 298 298 return 0; 299 299 } 300 300
+1 -1
arch/m68k/kernel/traps.c
··· 1145 1145 */ 1146 1146 asmlinkage void fpsp040_die(void) 1147 1147 { 1148 - force_sigsegv(SIGSEGV); 1148 + force_fatal_sig(SIGSEGV); 1149 1149 } 1150 1150 1151 1151 #ifdef CONFIG_M68KFPU_EMU
+2 -2
arch/mips/kernel/r2300_fpu.S
··· 29 29 #define EX2(a,b) \ 30 30 9: a,##b; \ 31 31 .section __ex_table,"a"; \ 32 - PTR 9b,bad_stack; \ 33 - PTR 9b+4,bad_stack; \ 32 + PTR 9b,fault; \ 33 + PTR 9b+4,fault; \ 34 34 .previous 35 35 36 36 .set mips1
-9
arch/mips/kernel/syscall.c
··· 240 240 { 241 241 return -ENOSYS; 242 242 } 243 - 244 - /* 245 - * If we ever come here the user sp is bad. Zap the process right away. 246 - * Due to the bad stack signaling wouldn't work. 247 - */ 248 - asmlinkage void bad_stack(void) 249 - { 250 - do_exit(SIGSEGV); 251 - }
+1 -1
arch/nds32/kernel/traps.c
··· 118 118 /* 119 119 * This function is protected against re-entrancy. 120 120 */ 121 - void die(const char *str, struct pt_regs *regs, int err) 121 + void __noreturn die(const char *str, struct pt_regs *regs, int err) 122 122 { 123 123 struct task_struct *tsk = current; 124 124 static int die_counter;
+1 -5
arch/nds32/mm/fault.c
··· 13 13 14 14 #include <asm/tlbflush.h> 15 15 16 - extern void die(const char *str, struct pt_regs *regs, long err); 16 + extern void __noreturn die(const char *str, struct pt_regs *regs, long err); 17 17 18 18 /* 19 19 * This is useful to dump out the page tables associated with ··· 299 299 300 300 show_pte(mm, addr); 301 301 die("Oops", regs, error_code); 302 - bust_spinlocks(0); 303 - do_exit(SIGKILL); 304 - 305 - return; 306 302 307 303 /* 308 304 * We ran out of memory, or some other thing happened to us that made
+1 -1
arch/openrisc/kernel/traps.c
··· 197 197 } 198 198 199 199 /* This is normally the 'Oops' routine */ 200 - void die(const char *str, struct pt_regs *regs, long err) 200 + void __noreturn die(const char *str, struct pt_regs *regs, long err) 201 201 { 202 202 203 203 console_verbose();
+1 -3
arch/openrisc/mm/fault.c
··· 32 32 */ 33 33 volatile pgd_t *current_pgd[NR_CPUS]; 34 34 35 - extern void die(char *, struct pt_regs *, long); 35 + extern void __noreturn die(char *, struct pt_regs *, long); 36 36 37 37 /* 38 38 * This routine handles page faults. It determines the address, ··· 247 247 printk(" at virtual address 0x%08lx\n", address); 248 248 249 249 die("Oops", regs, write_acc); 250 - 251 - do_exit(SIGKILL); 252 250 253 251 /* 254 252 * We ran out of memory, or some other thing happened to us that made
+4 -2
arch/powerpc/kernel/signal_32.c
··· 1062 1062 * or if another thread unmaps the region containing the context. 1063 1063 * We kill the task with a SIGSEGV in this situation. 1064 1064 */ 1065 - if (do_setcontext(new_ctx, regs, 0)) 1066 - do_exit(SIGSEGV); 1065 + if (do_setcontext(new_ctx, regs, 0)) { 1066 + force_fatal_sig(SIGSEGV); 1067 + return -EFAULT; 1068 + } 1067 1069 1068 1070 set_thread_flag(TIF_RESTOREALL); 1069 1071 return 0;
+6 -3
arch/powerpc/kernel/signal_64.c
··· 703 703 * We kill the task with a SIGSEGV in this situation. 704 704 */ 705 705 706 - if (__get_user_sigset(&set, &new_ctx->uc_sigmask)) 707 - do_exit(SIGSEGV); 706 + if (__get_user_sigset(&set, &new_ctx->uc_sigmask)) { 707 + force_fatal_sig(SIGSEGV); 708 + return -EFAULT; 709 + } 708 710 set_current_blocked(&set); 709 711 710 712 if (!user_read_access_begin(new_ctx, ctx_size)) 711 713 return -EFAULT; 712 714 if (__unsafe_restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext)) { 713 715 user_read_access_end(); 714 - do_exit(SIGSEGV); 716 + force_fatal_sig(SIGSEGV); 717 + return -EFAULT; 715 718 } 716 719 user_read_access_end(); 717 720
+1 -1
arch/s390/include/asm/kdebug.h
··· 23 23 DIE_NMI_IPI, 24 24 }; 25 25 26 - extern void die(struct pt_regs *, const char *); 26 + extern void __noreturn die(struct pt_regs *, const char *); 27 27 28 28 #endif
+1 -1
arch/s390/kernel/dumpstack.c
··· 192 192 193 193 static DEFINE_SPINLOCK(die_lock); 194 194 195 - void die(struct pt_regs *regs, const char *str) 195 + void __noreturn die(struct pt_regs *regs, const char *str) 196 196 { 197 197 static int die_counter; 198 198
+1 -1
arch/s390/kernel/traps.c
··· 84 84 { 85 85 if (user_mode(regs)) { 86 86 report_user_fault(regs, SIGSEGV, 0); 87 - do_exit(SIGSEGV); 87 + force_fatal_sig(SIGSEGV); 88 88 } else 89 89 die(regs, "Unknown program exception"); 90 90 }
-2
arch/s390/mm/fault.c
··· 260 260 " in virtual user address space\n"); 261 261 dump_fault_info(regs); 262 262 die(regs, "Oops"); 263 - do_exit(SIGKILL); 264 263 } 265 264 266 265 static noinline void do_low_address(struct pt_regs *regs) ··· 269 270 if (regs->psw.mask & PSW_MASK_PSTATE) { 270 271 /* Low-address protection hit in user mode 'cannot happen'. */ 271 272 die (regs, "Low-address protection"); 272 - do_exit(SIGKILL); 273 273 } 274 274 275 275 do_no_context(regs);
+6 -4
arch/sh/kernel/cpu/fpu.c
··· 62 62 } 63 63 64 64 if (!tsk_used_math(tsk)) { 65 - local_irq_enable(); 65 + int ret; 66 66 /* 67 67 * does a slab alloc which can sleep 68 68 */ 69 - if (init_fpu(tsk)) { 69 + local_irq_enable(); 70 + ret = init_fpu(tsk); 71 + local_irq_disable(); 72 + if (ret) { 70 73 /* 71 74 * ran out of memory! 72 75 */ 73 - do_group_exit(SIGKILL); 76 + force_sig(SIGKILL); 74 77 return; 75 78 } 76 - local_irq_disable(); 77 79 } 78 80 79 81 grab_fpu(regs);
+1 -1
arch/sh/kernel/traps.c
··· 20 20 21 21 static DEFINE_SPINLOCK(die_lock); 22 22 23 - void die(const char *str, struct pt_regs *regs, long err) 23 + void __noreturn die(const char *str, struct pt_regs *regs, long err) 24 24 { 25 25 static int die_counter; 26 26
-2
arch/sh/mm/fault.c
··· 238 238 show_fault_oops(regs, address); 239 239 240 240 die("Oops", regs, error_code); 241 - bust_spinlocks(0); 242 - do_exit(SIGKILL); 243 241 } 244 242 245 243 static void
+2 -2
arch/sparc/kernel/signal_32.c
··· 244 244 get_sigframe(ksig, regs, sigframe_size); 245 245 246 246 if (invalid_frame_pointer(sf, sigframe_size)) { 247 - do_exit(SIGILL); 247 + force_fatal_sig(SIGILL); 248 248 return -EINVAL; 249 249 } 250 250 ··· 336 336 sf = (struct rt_signal_frame __user *) 337 337 get_sigframe(ksig, regs, sigframe_size); 338 338 if (invalid_frame_pointer(sf, sigframe_size)) { 339 - do_exit(SIGILL); 339 + force_fatal_sig(SIGILL); 340 340 return -EINVAL; 341 341 } 342 342
+4 -2
arch/sparc/kernel/windows.c
··· 121 121 122 122 if ((sp & 7) || 123 123 copy_to_user((char __user *) sp, &tp->reg_window[window], 124 - sizeof(struct reg_window32))) 125 - do_exit(SIGILL); 124 + sizeof(struct reg_window32))) { 125 + force_fatal_sig(SIGILL); 126 + return; 127 + } 126 128 } 127 129 tp->w_saved = 0; 128 130 }
-1
arch/sparc/mm/fault_32.c
··· 248 248 } 249 249 250 250 unhandled_fault(address, tsk, regs); 251 - do_exit(SIGKILL); 252 251 253 252 /* 254 253 * We ran out of memory, or some other thing happened to us that made
+1 -1
arch/sparc/mm/tsb.c
··· 266 266 default: 267 267 printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n", 268 268 current->comm, current->pid, tsb_bytes); 269 - do_exit(SIGSEGV); 269 + BUG(); 270 270 } 271 271 tte |= pte_sz_bits(page_sz); 272 272
+1 -1
arch/um/kernel/trap.c
··· 158 158 159 159 void fatal_sigsegv(void) 160 160 { 161 - force_sigsegv(SIGSEGV); 161 + force_fatal_sig(SIGSEGV); 162 162 do_signal(&current->thread.regs); 163 163 /* 164 164 * This is to tell gcc that we're not returning - do_signal
+2 -1
arch/x86/entry/vsyscall/vsyscall_64.c
··· 226 226 if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) { 227 227 warn_bad_vsyscall(KERN_DEBUG, regs, 228 228 "seccomp tried to change syscall nr or ip"); 229 - do_exit(SIGSYS); 229 + force_fatal_sig(SIGSYS); 230 + return true; 230 231 } 231 232 regs->orig_ax = -1; 232 233 if (tmp)
-3
arch/x86/kernel/doublefault_32.c
··· 77 77 * some way to reconstruct CR3. We could make a credible guess based 78 78 * on cpu_tlbstate, but that would be racy and would not account for 79 79 * PTI. 80 - * 81 - * Instead, don't bother. We can return through 82 - * rewind_stack_do_exit() instead. 83 80 */ 84 81 panic("cannot return from double fault\n"); 85 82 }
+5 -5
arch/x86/kernel/vm86_32.c
··· 106 106 */ 107 107 local_irq_enable(); 108 108 109 - if (!vm86 || !vm86->user_vm86) { 110 - pr_alert("no user_vm86: BAD\n"); 111 - do_exit(SIGSEGV); 112 - } 109 + BUG_ON(!vm86 || !vm86->user_vm86); 110 + 113 111 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->veflags_mask); 114 112 user = vm86->user_vm86; 115 113 ··· 140 142 141 143 user_access_end(); 142 144 145 + exit_vm86: 143 146 preempt_disable(); 144 147 tsk->thread.sp0 = vm86->saved_sp0; 145 148 tsk->thread.sysenter_cs = __KERNEL_CS; ··· 160 161 user_access_end(); 161 162 Efault: 162 163 pr_alert("could not access userspace vm86 info\n"); 163 - do_exit(SIGSEGV); 164 + force_fatal_sig(SIGSEGV); 165 + goto exit_vm86; 164 166 } 165 167 166 168 static int do_vm86_irq_handling(int subfunction, int irqnumber);
+1 -1
arch/xtensa/kernel/traps.c
··· 527 527 528 528 DEFINE_SPINLOCK(die_lock); 529 529 530 - void die(const char * str, struct pt_regs * regs, long err) 530 + void __noreturn die(const char * str, struct pt_regs * regs, long err) 531 531 { 532 532 static int die_counter; 533 533 const char *pr = "";
+1 -2
arch/xtensa/mm/fault.c
··· 238 238 void 239 239 bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) 240 240 { 241 - extern void die(const char*, struct pt_regs*, long); 241 + extern void __noreturn die(const char*, struct pt_regs*, long); 242 242 const struct exception_table_entry *entry; 243 243 244 244 /* Are we prepared to handle this kernel fault? */ ··· 257 257 "address %08lx\n pc = %08lx, ra = %08lx\n", 258 258 address, regs->pc, regs->areg[0]); 259 259 die("Oops", regs, sig); 260 - do_exit(sig); 261 260 }
+2 -2
drivers/firmware/stratix10-svc.c
··· 520 520 * physical address of memory block reserved by secure monitor software at 521 521 * secure world. 522 522 * 523 - * svc_normal_to_secure_shm_thread() calls do_exit() directly since it is a 523 + * svc_normal_to_secure_shm_thread() terminates directly since it is a 524 524 * standlone thread for which no one will call kthread_stop() or return when 525 525 * 'kthread_should_stop()' is true. 526 526 */ ··· 544 544 } 545 545 546 546 complete(&sh_mem->sync_complete); 547 - do_exit(0); 547 + return 0; 548 548 } 549 549 550 550 /**
+4 -3
drivers/soc/ti/wkup_m3_ipc.c
··· 413 413 } 414 414 EXPORT_SYMBOL_GPL(wkup_m3_ipc_put); 415 415 416 - static void wkup_m3_rproc_boot_thread(struct wkup_m3_ipc *m3_ipc) 416 + static int wkup_m3_rproc_boot_thread(void *arg) 417 417 { 418 + struct wkup_m3_ipc *m3_ipc = arg; 418 419 struct device *dev = m3_ipc->dev; 419 420 int ret; 420 421 ··· 427 426 else 428 427 m3_ipc_state = m3_ipc; 429 428 430 - do_exit(0); 429 + return 0; 431 430 } 432 431 433 432 static int wkup_m3_ipc_probe(struct platform_device *pdev) ··· 501 500 * can boot the wkup_m3 as soon as it's ready without holding 502 501 * up kernel boot 503 502 */ 504 - task = kthread_run((void *)wkup_m3_rproc_boot_thread, m3_ipc, 503 + task = kthread_run(wkup_m3_rproc_boot_thread, m3_ipc, 505 504 "wkup_m3_rproc_loader"); 506 505 507 506 if (IS_ERR(task)) {
+1 -1
drivers/staging/r8188eu/core/rtw_cmd.c
··· 323 323 324 324 complete(&pcmdpriv->stop_cmd_thread); 325 325 326 - thread_exit(); 326 + return 0; 327 327 } 328 328 329 329 /*
-2
drivers/staging/r8188eu/include/osdep_service.h
··· 49 49 spinlock_t lock; 50 50 }; 51 51 52 - #define thread_exit() complete_and_exit(NULL, 0) 53 - 54 52 static inline struct list_head *get_list_head(struct __queue *queue) 55 53 { 56 54 return (&(queue->queue));
-1
drivers/staging/rtl8712/osdep_service.h
··· 37 37 38 38 #define _pkt struct sk_buff 39 39 #define _buffer unsigned char 40 - #define thread_exit() complete_and_exit(NULL, 0) 41 40 42 41 #define _init_queue(pqueue) \ 43 42 do { \
+1 -1
drivers/staging/rtl8712/rtl8712_cmd.c
··· 393 393 r8712_free_cmd_obj(pcmd); 394 394 } while (1); 395 395 complete(&pcmdpriv->terminate_cmdthread_comp); 396 - thread_exit(); 396 + return 0; 397 397 } 398 398 399 399 void r8712_event_handle(struct _adapter *padapter, __le32 *peventbuf)
+1 -1
drivers/staging/rtl8723bs/core/rtw_cmd.c
··· 518 518 complete(&pcmdpriv->terminate_cmdthread_comp); 519 519 atomic_set(&pcmdpriv->cmdthd_running, false); 520 520 521 - thread_exit(); 521 + return 0; 522 522 } 523 523 524 524 /*
+1 -1
drivers/staging/rtl8723bs/core/rtw_xmit.c
··· 2500 2500 2501 2501 complete(&padapter->xmitpriv.terminate_xmitthread_comp); 2502 2502 2503 - thread_exit(); 2503 + return 0; 2504 2504 } 2505 2505 2506 2506 void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms)
+1 -1
drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
··· 435 435 436 436 complete(&pxmitpriv->SdioXmitTerminate); 437 437 438 - thread_exit(); 438 + return 0; 439 439 } 440 440 441 441 s32 rtl8723bs_mgnt_xmit(
-2
drivers/staging/rtl8723bs/include/osdep_service_linux.h
··· 45 45 spinlock_t lock; 46 46 }; 47 47 48 - #define thread_exit() complete_and_exit(NULL, 0) 49 - 50 48 static inline struct list_head *get_next(struct list_head *list) 51 49 { 52 50 return list->next;
+1 -1
fs/exec.c
··· 1850 1850 * SIGSEGV. 1851 1851 */ 1852 1852 if (bprm->point_of_no_return && !fatal_signal_pending(current)) 1853 - force_sigsegv(SIGSEGV); 1853 + force_fatal_sig(SIGSEGV); 1854 1854 1855 1855 out_unmark: 1856 1856 current->fs->in_exec = 0;
+1 -4
fs/ocfs2/journal.c
··· 1513 1513 if (quota_enabled) 1514 1514 kfree(rm_quota); 1515 1515 1516 - /* no one is callint kthread_stop() for us so the kthread() api 1517 - * requires that we call do_exit(). And it isn't exported, but 1518 - * complete_and_exit() seems to be a minimal wrapper around it. */ 1519 - complete_and_exit(NULL, status); 1516 + return status; 1520 1517 } 1521 1518 1522 1519 void ocfs2_recovery_thread(struct ocfs2_super *osb, int node_num)
+1
include/linux/sched/signal.h
··· 351 351 extern __must_check bool do_notify_parent(struct task_struct *, int); 352 352 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); 353 353 extern void force_sig(int); 354 + extern void force_fatal_sig(int); 354 355 extern int send_sig(int, struct task_struct *, int); 355 356 extern int zap_other_threads(struct task_struct *p); 356 357 extern struct sigqueue *sigqueue_alloc(void);
+3
include/linux/signal_types.h
··· 70 70 int sig; 71 71 }; 72 72 73 + /* Used to kill the race between sigaction and forced signals */ 74 + #define SA_IMMUTABLE 0x00800000 75 + 73 76 #ifndef __ARCH_UAPI_SA_FLAGS 74 77 #ifdef SA_RESTORER 75 78 #define __ARCH_UAPI_SA_FLAGS SA_RESTORER
+1
include/uapi/asm-generic/signal-defs.h
··· 45 45 #define SA_UNSUPPORTED 0x00000400 46 46 #define SA_EXPOSE_TAGBITS 0x00000800 47 47 /* 0x00010000 used on mips */ 48 + /* 0x00800000 used for internal SA_IMMUTABLE */ 48 49 /* 0x01000000 used on x86 */ 49 50 /* 0x02000000 used on x86 */ 50 51 /*
+8 -4
kernel/entry/syscall_user_dispatch.c
··· 47 47 * access_ok() is performed once, at prctl time, when 48 48 * the selector is loaded by userspace. 49 49 */ 50 - if (unlikely(__get_user(state, sd->selector))) 51 - do_exit(SIGSEGV); 50 + if (unlikely(__get_user(state, sd->selector))) { 51 + force_fatal_sig(SIGSEGV); 52 + return true; 53 + } 52 54 53 55 if (likely(state == SYSCALL_DISPATCH_FILTER_ALLOW)) 54 56 return false; 55 57 56 - if (state != SYSCALL_DISPATCH_FILTER_BLOCK) 57 - do_exit(SIGSYS); 58 + if (state != SYSCALL_DISPATCH_FILTER_BLOCK) { 59 + force_fatal_sig(SIGSYS); 60 + return true; 61 + } 58 62 } 59 63 60 64 sd->on_dispatch = true;
+1 -1
kernel/kthread.c
··· 433 433 * If thread is going to be bound on a particular cpu, give its node 434 434 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE. 435 435 * When woken, the thread will run @threadfn() with @data as its 436 - * argument. @threadfn() can either call do_exit() directly if it is a 436 + * argument. @threadfn() can either return directly if it is a 437 437 * standalone thread for which no one will call kthread_stop(), or 438 438 * return when 'kthread_should_stop()' is true (which means 439 439 * kthread_stop() has been called). The return value should be zero
-1
kernel/reboot.c
··· 359 359 case LINUX_REBOOT_CMD_HALT: 360 360 kernel_halt(); 361 361 do_exit(0); 362 - panic("cannot halt"); 363 362 364 363 case LINUX_REBOOT_CMD_POWER_OFF: 365 364 kernel_power_off();
+24 -10
kernel/signal.c
··· 1323 1323 blocked = sigismember(&t->blocked, sig); 1324 1324 if (blocked || ignored || sigdfl) { 1325 1325 action->sa.sa_handler = SIG_DFL; 1326 + action->sa.sa_flags |= SA_IMMUTABLE; 1326 1327 if (blocked) { 1327 1328 sigdelset(&t->blocked, sig); 1328 1329 recalc_sigpending_and_wake(t); ··· 1650 1649 } 1651 1650 EXPORT_SYMBOL(force_sig); 1652 1651 1652 + void force_fatal_sig(int sig) 1653 + { 1654 + struct kernel_siginfo info; 1655 + 1656 + clear_siginfo(&info); 1657 + info.si_signo = sig; 1658 + info.si_errno = 0; 1659 + info.si_code = SI_KERNEL; 1660 + info.si_pid = 0; 1661 + info.si_uid = 0; 1662 + force_sig_info_to_task(&info, current, true); 1663 + } 1664 + 1653 1665 /* 1654 1666 * When things go south during signal handling, we 1655 1667 * will force a SIGSEGV. And if the signal that caused ··· 1671 1657 */ 1672 1658 void force_sigsegv(int sig) 1673 1659 { 1674 - struct task_struct *p = current; 1675 - 1676 - if (sig == SIGSEGV) { 1677 - unsigned long flags; 1678 - spin_lock_irqsave(&p->sighand->siglock, flags); 1679 - p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; 1680 - spin_unlock_irqrestore(&p->sighand->siglock, flags); 1681 - } 1682 - force_sig(SIGSEGV); 1660 + if (sig == SIGSEGV) 1661 + force_fatal_sig(SIGSEGV); 1662 + else 1663 + force_sig(SIGSEGV); 1683 1664 } 1684 1665 1685 1666 int force_sig_fault_to_task(int sig, int code, void __user *addr ··· 2713 2704 if (!signr) 2714 2705 break; /* will return 0 */ 2715 2706 2716 - if (unlikely(current->ptrace) && signr != SIGKILL) { 2707 + if (unlikely(current->ptrace) && (signr != SIGKILL) && 2708 + !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) { 2717 2709 signr = ptrace_signal(signr, &ksig->info); 2718 2710 if (!signr) 2719 2711 continue; ··· 4064 4054 k = &p->sighand->action[sig-1]; 4065 4055 4066 4056 spin_lock_irq(&p->sighand->siglock); 4057 + if (k->sa.sa_flags & SA_IMMUTABLE) { 4058 + spin_unlock_irq(&p->sighand->siglock); 4059 + return -EINVAL; 4060 + } 4067 4061 if (oact) 4068 4062 *oact = *k; 4069 4063
+1 -1
net/batman-adv/tp_meter.c
··· 890 890 891 891 batadv_tp_vars_put(tp_vars); 892 892 893 - do_exit(0); 893 + return 0; 894 894 } 895 895 896 896 /**