Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

sched/wait: Fix the signal handling fix

Jan Stancek reported that I wrecked things for him by fixing things for
Vladimir :/

His report was due to an UNINTERRUPTIBLE wait getting -EINTR, which
should not be possible, however my previous patch made this possible by
unconditionally checking signal_pending().

We cannot use current->state as was done previously, because the
instruction after the store to that variable it can be changed. We must
instead pass the initial state along and use that.

Fixes: 68985633bccb ("sched/wait: Fix signal handling in bit wait helpers")
Reported-by: Jan Stancek <jstancek@redhat.com>
Reported-by: Chris Mason <clm@fb.com>
Tested-by: Jan Stancek <jstancek@redhat.com>
Tested-by: Vladimir Murzin <vladimir.murzin@arm.com>
Tested-by: Chris Mason <clm@fb.com>
Reviewed-by: Paul Turner <pjt@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: tglx@linutronix.de
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: hpa@zytor.com
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Peter Zijlstra and committed by
Linus Torvalds
dfd01f02 fc891828

+28 -28
+3 -3
fs/cifs/inode.c
··· 1831 1831 * @word: long word containing the bit lock 1832 1832 */ 1833 1833 static int 1834 - cifs_wait_bit_killable(struct wait_bit_key *key) 1834 + cifs_wait_bit_killable(struct wait_bit_key *key, int mode) 1835 1835 { 1836 - if (fatal_signal_pending(current)) 1837 - return -ERESTARTSYS; 1838 1836 freezable_schedule_unsafe(); 1837 + if (signal_pending_state(mode, current)) 1838 + return -ERESTARTSYS; 1839 1839 return 0; 1840 1840 } 1841 1841
+3 -3
fs/nfs/inode.c
··· 75 75 * nfs_wait_bit_killable - helper for functions that are sleeping on bit locks 76 76 * @word: long word containing the bit lock 77 77 */ 78 - int nfs_wait_bit_killable(struct wait_bit_key *key) 78 + int nfs_wait_bit_killable(struct wait_bit_key *key, int mode) 79 79 { 80 - if (fatal_signal_pending(current)) 81 - return -ERESTARTSYS; 82 80 freezable_schedule_unsafe(); 81 + if (signal_pending_state(mode, current)) 82 + return -ERESTARTSYS; 83 83 return 0; 84 84 } 85 85 EXPORT_SYMBOL_GPL(nfs_wait_bit_killable);
+1 -1
fs/nfs/internal.h
··· 379 379 extern void nfs_clear_inode(struct inode *); 380 380 extern void nfs_evict_inode(struct inode *); 381 381 void nfs_zap_acl_cache(struct inode *inode); 382 - extern int nfs_wait_bit_killable(struct wait_bit_key *key); 382 + extern int nfs_wait_bit_killable(struct wait_bit_key *key, int mode); 383 383 384 384 /* super.c */ 385 385 extern const struct super_operations nfs_sops;
+1 -1
fs/nfs/pagelist.c
··· 129 129 set_bit(NFS_IO_INPROGRESS, &c->flags); 130 130 if (atomic_read(&c->io_count) == 0) 131 131 break; 132 - ret = nfs_wait_bit_killable(&q.key); 132 + ret = nfs_wait_bit_killable(&q.key, TASK_KILLABLE); 133 133 } while (atomic_read(&c->io_count) != 0 && !ret); 134 134 finish_wait(wq, &q.wait); 135 135 return ret;
+2 -2
fs/nfs/pnfs.c
··· 1466 1466 } 1467 1467 1468 1468 /* stop waiting if someone clears NFS_LAYOUT_RETRY_LAYOUTGET bit. */ 1469 - static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key) 1469 + static int pnfs_layoutget_retry_bit_wait(struct wait_bit_key *key, int mode) 1470 1470 { 1471 1471 if (!test_bit(NFS_LAYOUT_RETRY_LAYOUTGET, key->flags)) 1472 1472 return 1; 1473 - return nfs_wait_bit_killable(key); 1473 + return nfs_wait_bit_killable(key, mode); 1474 1474 } 1475 1475 1476 1476 static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
+5 -5
include/linux/wait.h
··· 145 145 list_del(&old->task_list); 146 146 } 147 147 148 - typedef int wait_bit_action_f(struct wait_bit_key *); 148 + typedef int wait_bit_action_f(struct wait_bit_key *, int mode); 149 149 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); 150 150 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); 151 151 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); ··· 960 960 } while (0) 961 961 962 962 963 - extern int bit_wait(struct wait_bit_key *); 964 - extern int bit_wait_io(struct wait_bit_key *); 965 - extern int bit_wait_timeout(struct wait_bit_key *); 966 - extern int bit_wait_io_timeout(struct wait_bit_key *); 963 + extern int bit_wait(struct wait_bit_key *, int); 964 + extern int bit_wait_io(struct wait_bit_key *, int); 965 + extern int bit_wait_timeout(struct wait_bit_key *, int); 966 + extern int bit_wait_io_timeout(struct wait_bit_key *, int); 967 967 968 968 /** 969 969 * wait_on_bit - wait for a bit to be cleared
+10 -10
kernel/sched/wait.c
··· 392 392 do { 393 393 prepare_to_wait(wq, &q->wait, mode); 394 394 if (test_bit(q->key.bit_nr, q->key.flags)) 395 - ret = (*action)(&q->key); 395 + ret = (*action)(&q->key, mode); 396 396 } while (test_bit(q->key.bit_nr, q->key.flags) && !ret); 397 397 finish_wait(wq, &q->wait); 398 398 return ret; ··· 431 431 prepare_to_wait_exclusive(wq, &q->wait, mode); 432 432 if (!test_bit(q->key.bit_nr, q->key.flags)) 433 433 continue; 434 - ret = action(&q->key); 434 + ret = action(&q->key, mode); 435 435 if (!ret) 436 436 continue; 437 437 abort_exclusive_wait(wq, &q->wait, mode, &q->key); ··· 581 581 } 582 582 EXPORT_SYMBOL(wake_up_atomic_t); 583 583 584 - __sched int bit_wait(struct wait_bit_key *word) 584 + __sched int bit_wait(struct wait_bit_key *word, int mode) 585 585 { 586 586 schedule(); 587 - if (signal_pending(current)) 587 + if (signal_pending_state(mode, current)) 588 588 return -EINTR; 589 589 return 0; 590 590 } 591 591 EXPORT_SYMBOL(bit_wait); 592 592 593 - __sched int bit_wait_io(struct wait_bit_key *word) 593 + __sched int bit_wait_io(struct wait_bit_key *word, int mode) 594 594 { 595 595 io_schedule(); 596 - if (signal_pending(current)) 596 + if (signal_pending_state(mode, current)) 597 597 return -EINTR; 598 598 return 0; 599 599 } 600 600 EXPORT_SYMBOL(bit_wait_io); 601 601 602 - __sched int bit_wait_timeout(struct wait_bit_key *word) 602 + __sched int bit_wait_timeout(struct wait_bit_key *word, int mode) 603 603 { 604 604 unsigned long now = READ_ONCE(jiffies); 605 605 if (time_after_eq(now, word->timeout)) 606 606 return -EAGAIN; 607 607 schedule_timeout(word->timeout - now); 608 - if (signal_pending(current)) 608 + if (signal_pending_state(mode, current)) 609 609 return -EINTR; 610 610 return 0; 611 611 } 612 612 EXPORT_SYMBOL_GPL(bit_wait_timeout); 613 613 614 - __sched int bit_wait_io_timeout(struct wait_bit_key *word) 614 + __sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode) 615 615 { 616 616 unsigned long now = READ_ONCE(jiffies); 617 617 if (time_after_eq(now, word->timeout)) 618 618 return -EAGAIN; 619 619 io_schedule_timeout(word->timeout - now); 620 - if (signal_pending(current)) 620 + if (signal_pending_state(mode, current)) 621 621 return -EINTR; 622 622 return 0; 623 623 }
+3 -3
net/sunrpc/sched.c
··· 250 250 } 251 251 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue); 252 252 253 - static int rpc_wait_bit_killable(struct wait_bit_key *key) 253 + static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode) 254 254 { 255 - if (fatal_signal_pending(current)) 256 - return -ERESTARTSYS; 257 255 freezable_schedule_unsafe(); 256 + if (signal_pending_state(mode, current)) 257 + return -ERESTARTSYS; 258 258 return 0; 259 259 } 260 260