Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

sunrpc: allow svc_recv() to return -ETIMEDOUT and -EBUSY

To dynamically adjust the thread count, nfsd requires some information
about how busy things are.

Change svc_recv() to take a timeout value, and then allow the wait for
work to time out if it's set. If a timeout is not defined, then the
schedule will be set to MAX_SCHEDULE_TIMEOUT. If the task waits for the
full timeout, then have it return -ETIMEDOUT to the caller.

If it wakes up, finds that there is more work and that no threads are
available, then attempt to set SP_TASK_STARTING. If wasn't already set,
have the task return -EBUSY to cue to the caller that the service could
use more threads.

Signed-off-by: Jeff Layton <jlayton@kernel.org>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>

authored by

Jeff Layton and committed by
Chuck Lever
a0022a38 7f221b34

+47 -13
+1 -1
fs/lockd/svc.c
··· 141 141 */ 142 142 while (!svc_thread_should_stop(rqstp)) { 143 143 nlmsvc_retry_blocked(rqstp); 144 - svc_recv(rqstp); 144 + svc_recv(rqstp, 0); 145 145 } 146 146 if (nlmsvc_ops) 147 147 nlmsvc_invalidate_all();
+1 -1
fs/nfs/callback.c
··· 81 81 set_freezable(); 82 82 83 83 while (!svc_thread_should_stop(rqstp)) 84 - svc_recv(rqstp); 84 + svc_recv(rqstp, 0); 85 85 86 86 svc_exit_thread(rqstp); 87 87 return 0;
+1 -1
fs/nfsd/nfssvc.c
··· 902 902 * The main request loop 903 903 */ 904 904 while (!svc_thread_should_stop(rqstp)) { 905 - svc_recv(rqstp); 905 + svc_recv(rqstp, 0); 906 906 nfsd_file_net_dispose(nn); 907 907 } 908 908
+1
include/linux/sunrpc/svc.h
··· 55 55 SP_TASK_PENDING, /* still work to do even if no xprt is queued */ 56 56 SP_NEED_VICTIM, /* One thread needs to agree to exit */ 57 57 SP_VICTIM_REMAINS, /* One thread needs to actually exit */ 58 + SP_TASK_STARTING, /* Task has started but not added to idle yet */ 58 59 }; 59 60 60 61
+1 -1
include/linux/sunrpc/svcsock.h
··· 61 61 /* 62 62 * Function prototypes. 63 63 */ 64 - void svc_recv(struct svc_rqst *rqstp); 64 + int svc_recv(struct svc_rqst *rqstp, long timeo); 65 65 void svc_send(struct svc_rqst *rqstp); 66 66 int svc_addsock(struct svc_serv *serv, struct net *net, 67 67 const int fd, char *name_return, const size_t len,
+42 -9
net/sunrpc/svc_xprt.c
··· 714 714 return true; 715 715 } 716 716 717 - static void svc_thread_wait_for_work(struct svc_rqst *rqstp) 717 + static bool svc_schedule_timeout(long timeo) 718 + { 719 + return schedule_timeout(timeo ? timeo : MAX_SCHEDULE_TIMEOUT) == 0; 720 + } 721 + 722 + static bool svc_thread_wait_for_work(struct svc_rqst *rqstp, long timeo) 718 723 { 719 724 struct svc_pool *pool = rqstp->rq_pool; 725 + bool did_timeout = false; 720 726 721 727 if (svc_thread_should_sleep(rqstp)) { 722 728 set_current_state(TASK_IDLE | TASK_FREEZABLE); 723 729 llist_add(&rqstp->rq_idle, &pool->sp_idle_threads); 724 730 if (likely(svc_thread_should_sleep(rqstp))) 725 - schedule(); 731 + did_timeout = svc_schedule_timeout(timeo); 726 732 727 733 while (!llist_del_first_this(&pool->sp_idle_threads, 728 734 &rqstp->rq_idle)) { ··· 740 734 * for this new work. This thread can safely sleep 741 735 * until woken again. 742 736 */ 743 - schedule(); 737 + did_timeout = svc_schedule_timeout(timeo); 744 738 set_current_state(TASK_IDLE | TASK_FREEZABLE); 745 739 } 746 740 __set_current_state(TASK_RUNNING); ··· 748 742 cond_resched(); 749 743 } 750 744 try_to_freeze(); 745 + return did_timeout; 751 746 } 752 747 753 748 static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) ··· 842 835 /** 843 836 * svc_recv - Receive and process the next request on any transport 844 837 * @rqstp: an idle RPC service thread 838 + * @timeo: timeout (in jiffies) (0 means infinite timeout) 845 839 * 846 840 * This code is carefully organised not to touch any cachelines in 847 841 * the shared svc_serv structure, only cachelines in the local 848 842 * svc_pool. 843 + * 844 + * If the timeout is 0, then the sleep will never time out. 845 + * 846 + * Returns -ETIMEDOUT if idle for an extended period 847 + * -EBUSY if there is more work to do than available threads 848 + * 0 otherwise. 849 849 */ 850 - void svc_recv(struct svc_rqst *rqstp) 850 + int svc_recv(struct svc_rqst *rqstp, long timeo) 851 851 { 852 852 struct svc_pool *pool = rqstp->rq_pool; 853 + bool did_timeout; 854 + int ret = 0; 853 855 854 856 if (!svc_alloc_arg(rqstp)) 855 - return; 857 + return ret; 856 858 857 - svc_thread_wait_for_work(rqstp); 859 + did_timeout = svc_thread_wait_for_work(rqstp, timeo); 860 + 861 + if (did_timeout && svc_thread_should_sleep(rqstp) && 862 + pool->sp_nrthrmin && pool->sp_nrthreads > pool->sp_nrthrmin) 863 + ret = -ETIMEDOUT; 858 864 859 865 clear_bit(SP_TASK_PENDING, &pool->sp_flags); 860 866 861 867 if (svc_thread_should_stop(rqstp)) { 862 868 svc_thread_wake_next(rqstp); 863 - return; 869 + return ret; 864 870 } 865 871 866 872 rqstp->rq_xprt = svc_xprt_dequeue(pool); ··· 885 865 * cache information to be provided. When there are no 886 866 * idle threads, we reduce the wait time. 887 867 */ 888 - if (pool->sp_idle_threads.first) 868 + if (pool->sp_idle_threads.first) { 889 869 rqstp->rq_chandle.thread_wait = 5 * HZ; 890 - else 870 + } else { 891 871 rqstp->rq_chandle.thread_wait = 1 * HZ; 872 + /* 873 + * No idle threads: signal -EBUSY so the caller 874 + * can consider spawning another thread. Use 875 + * SP_TASK_STARTING to limit this signal to one 876 + * thread at a time; the caller clears this flag 877 + * after starting a new thread. 878 + */ 879 + if (!did_timeout && timeo && 880 + !test_and_set_bit(SP_TASK_STARTING, 881 + &pool->sp_flags)) 882 + ret = -EBUSY; 883 + } 892 884 893 885 trace_svc_xprt_dequeue(rqstp); 894 886 svc_handle_xprt(rqstp, xprt); ··· 919 887 } 920 888 } 921 889 #endif 890 + return ret; 922 891 } 923 892 EXPORT_SYMBOL_GPL(svc_recv); 924 893