Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * linux/include/linux/sunrpc/svc.h
4 *
5 * RPC server declarations.
6 *
7 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 */
9
10
11#ifndef SUNRPC_SVC_H
12#define SUNRPC_SVC_H
13
14#include <linux/in.h>
15#include <linux/in6.h>
16#include <linux/sunrpc/types.h>
17#include <linux/sunrpc/xdr.h>
18#include <linux/sunrpc/auth.h>
19#include <linux/sunrpc/svcauth.h>
20#include <linux/lwq.h>
21#include <linux/wait.h>
22#include <linux/mm.h>
23#include <linux/folio_batch.h>
24#include <linux/kthread.h>
25
26/*
27 *
28 * RPC service thread pool.
29 *
30 * Pool of threads and temporary sockets. Generally there is only
31 * a single one of these per RPC service, but on NUMA machines those
32 * services that can benefit from it (i.e. nfs but not lockd) will
33 * have one pool per NUMA node. This optimisation reduces cross-
34 * node traffic on multi-node NUMA NFS servers.
35 */
36struct svc_pool {
37 unsigned int sp_id; /* pool id; also node id on NUMA */
38 unsigned int sp_nrthreads; /* # of threads currently running in pool */
39 unsigned int sp_nrthrmin; /* Min number of threads to run per pool */
40 unsigned int sp_nrthrmax; /* Max requested number of threads in pool */
41 struct lwq sp_xprts; /* pending transports */
42 struct list_head sp_all_threads; /* all server threads */
43 struct llist_head sp_idle_threads; /* idle server threads */
44
45 /* statistics on pool operation */
46 struct percpu_counter sp_messages_arrived;
47 struct percpu_counter sp_sockets_queued;
48 struct percpu_counter sp_threads_woken;
49
50 unsigned long sp_flags;
51} ____cacheline_aligned_in_smp;
52
53/* bits for sp_flags */
54enum {
55 SP_TASK_PENDING, /* still work to do even if no xprt is queued */
56 SP_NEED_VICTIM, /* One thread needs to agree to exit */
57 SP_VICTIM_REMAINS, /* One thread needs to actually exit */
58 SP_TASK_STARTING, /* Task has started but not added to idle yet */
59};
60
61
62/*
63 * RPC service.
64 *
65 * An RPC service is a ``daemon,'' possibly multithreaded, which
66 * receives and processes incoming RPC messages.
67 * It has one or more transport sockets associated with it, and maintains
68 * a list of idle threads waiting for input.
69 *
70 * We currently do not support more than one RPC program per daemon.
71 */
72struct svc_serv {
73 struct svc_program * sv_programs; /* RPC programs */
74 struct svc_stat * sv_stats; /* RPC statistics */
75 spinlock_t sv_lock;
76 unsigned int sv_nprogs; /* Number of sv_programs */
77 unsigned int sv_nrthreads; /* # of running server threads */
78 unsigned int sv_max_payload; /* datagram payload size */
79 unsigned int sv_max_mesg; /* max_payload + 1 page for overheads */
80 unsigned int sv_xdrsize; /* XDR buffer size */
81 struct list_head sv_permsocks; /* all permanent sockets */
82 struct list_head sv_tempsocks; /* all temporary sockets */
83 int sv_tmpcnt; /* count of temporary "valid" sockets */
84 struct timer_list sv_temptimer; /* timer for aging temporary sockets */
85
86 char * sv_name; /* service name */
87
88 unsigned int sv_nrpools; /* number of thread pools */
89 bool sv_is_pooled; /* is this a pooled service? */
90 struct svc_pool * sv_pools; /* array of thread pools */
91 int (*sv_threadfn)(void *data);
92
93#if defined(CONFIG_SUNRPC_BACKCHANNEL)
94 struct lwq sv_cb_list; /* queue for callback requests
95 * that arrive over the same
96 * connection */
97 bool sv_bc_enabled; /* service uses backchannel */
98#endif /* CONFIG_SUNRPC_BACKCHANNEL */
99};
100
101/* This is used by pool_stats to find and lock an svc */
102struct svc_info {
103 struct svc_serv *serv;
104 struct mutex *mutex;
105};
106
107void svc_destroy(struct svc_serv **svcp);
108
109/*
110 * Maximum payload size supported by a kernel RPC server.
111 * This is use to determine the max number of pages nfsd is
112 * willing to return in a single READ operation.
113 *
114 * These happen to all be powers of 2, which is not strictly
115 * necessary but helps enforce the real limitation, which is
116 * that they should be multiples of PAGE_SIZE.
117 *
118 * For UDP transports, a block plus NFS,RPC, and UDP headers
119 * has to fit into the IP datagram limit of 64K. The largest
120 * feasible number for all known page sizes is probably 48K,
121 * but we choose 32K here. This is the same as the historical
122 * Linux limit; someone who cares more about NFS/UDP performance
123 * can test a larger number.
124 *
125 * For non-UDP transports we have more freedom. A size of 4MB is
126 * chosen to accommodate clients that support larger I/O sizes.
127 */
128enum {
129 RPCSVC_MAXPAYLOAD = 4 * 1024 * 1024,
130 RPCSVC_MAXPAYLOAD_TCP = RPCSVC_MAXPAYLOAD,
131 RPCSVC_MAXPAYLOAD_UDP = 32 * 1024,
132};
133
134extern u32 svc_max_payload(const struct svc_rqst *rqstp);
135
136/*
137 * RPC Call and Reply messages each have their own page array.
138 * rq_pages holds the incoming Call message; rq_respages holds
139 * the outgoing Reply message. Both arrays are sized to
140 * svc_serv_maxpages() entries and are allocated dynamically.
141 *
142 * Pages are sent using ->sendmsg with MSG_SPLICE_PAGES so each
143 * server thread needs to allocate more to replace those used in
144 * sending.
145 *
146 * rq_pages request page contract:
147 *
148 * Transport receive paths that move request data pages out of
149 * rq_pages -- TCP multi-fragment reassembly (svc_tcp_save_pages)
150 * and RDMA Read I/O (svc_rdma_clear_rqst_pages) -- NULL those
151 * entries to prevent svc_rqst_release_pages() from freeing pages
152 * still in transport use, and set rq_pages_nfree to the count.
153 * svc_alloc_arg() refills only that many rq_pages entries.
154 *
155 * For rq_respages, svc_rqst_release_pages() NULLs entries in
156 * [rq_respages, rq_next_page) after each RPC. svc_alloc_arg()
157 * refills only that range.
158 *
159 * xdr_buf holds responses; the structure fits NFS read responses
160 * (header, data pages, optional tail) and enables sharing of
161 * client-side routines.
162 *
163 * The xdr_buf.head kvec always points to the first page in the
164 * rq_*pages list. The xdr_buf.pages pointer points to the second
165 * page on that list. xdr_buf.tail points to the end of the first
166 * page. This assumes that the non-page part of an rpc reply will
167 * fit in a page - NFSd ensures this. lockd also has no trouble.
168 */
169
170/**
171 * svc_serv_maxpages - maximum count of pages needed for one RPC message
172 * @serv: RPC service context
173 *
174 * Returns a count of pages or vectors that can hold the maximum
175 * size RPC message for @serv.
176 *
177 * Each page array can hold at most one payload plus two
178 * overhead pages (one for the RPC header, one for tail data).
179 * nfsd_splice_actor() might need an extra page when a READ
180 * payload is not page-aligned.
181 */
182static inline unsigned long svc_serv_maxpages(const struct svc_serv *serv)
183{
184 return DIV_ROUND_UP(serv->sv_max_mesg, PAGE_SIZE) + 2 + 1;
185}
186
187/*
188 * The context of a single thread, including the request currently being
189 * processed.
190 *
191 * RPC programs are free to use rq_private to stash thread-local information.
192 * The sunrpc layer will not access it.
193 */
194struct svc_rqst {
195 struct list_head rq_all; /* all threads list */
196 struct llist_node rq_idle; /* On the idle list */
197 struct rcu_head rq_rcu_head; /* for RCU deferred kfree */
198 struct svc_xprt * rq_xprt; /* transport ptr */
199
200 struct sockaddr_storage rq_addr; /* peer address */
201 size_t rq_addrlen;
202 struct sockaddr_storage rq_daddr; /* dest addr of request
203 * - reply from here */
204 size_t rq_daddrlen;
205
206 struct svc_serv * rq_server; /* RPC service definition */
207 struct svc_pool * rq_pool; /* thread pool */
208 const struct svc_procedure *rq_procinfo;/* procedure info */
209 struct auth_ops * rq_authop; /* authentication flavour */
210 struct svc_cred rq_cred; /* auth info */
211 void * rq_xprt_ctxt; /* transport specific context ptr */
212 struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
213
214 struct xdr_buf rq_arg;
215 struct xdr_stream rq_arg_stream;
216 struct xdr_stream rq_res_stream;
217 struct folio *rq_scratch_folio;
218 struct xdr_buf rq_res;
219 unsigned long rq_maxpages; /* entries per page array */
220 unsigned long rq_pages_nfree; /* rq_pages entries NULLed by transport */
221 struct page * *rq_pages; /* Call buffer pages */
222 struct page * *rq_respages; /* Reply buffer pages */
223 struct page * *rq_next_page; /* next reply page to use */
224 struct page * *rq_page_end; /* one past the last reply page */
225
226 struct folio_batch rq_fbatch;
227 struct bio_vec *rq_bvec;
228
229 __be32 rq_xid; /* transmission id */
230 u32 rq_prog; /* program number */
231 u32 rq_vers; /* program version */
232 u32 rq_proc; /* procedure number */
233 u32 rq_prot; /* IP protocol */
234 unsigned long rq_flags; /* flags field */
235 ktime_t rq_qtime; /* enqueue time */
236
237 void * rq_argp; /* decoded arguments */
238 void * rq_resp; /* xdr'd results */
239 __be32 *rq_accept_statp;
240 void * rq_auth_data; /* flavor-specific data */
241 __be32 rq_auth_stat; /* authentication status */
242 int rq_auth_slack; /* extra space xdr code
243 * should leave in head
244 * for krb5i, krb5p.
245 */
246 int rq_reserved; /* space on socket outq
247 * reserved for this request
248 */
249 ktime_t rq_stime; /* start time */
250
251 struct cache_req rq_chandle; /* handle passed to caches for
252 * request delaying
253 */
254 /* Catering to nfsd */
255 struct auth_domain * rq_client; /* RPC peer info */
256 struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
257 struct task_struct *rq_task; /* service thread */
258 struct net *rq_bc_net; /* pointer to backchannel's
259 * net namespace
260 */
261
262 int rq_err; /* Thread sets this to inidicate
263 * initialisation success.
264 */
265
266 unsigned long bc_to_initval;
267 unsigned int bc_to_retries;
268 unsigned int rq_status_counter; /* RPC processing counter */
269 void *rq_private; /* For use by the service thread */
270};
271
272/* bits for rq_flags */
273enum {
274 RQ_SECURE, /* secure port */
275 RQ_LOCAL, /* local request */
276 RQ_USEDEFERRAL, /* use deferral */
277 RQ_DROPME, /* drop current reply */
278 RQ_VICTIM, /* Have agreed to shut down */
279 RQ_DATA, /* request has data */
280};
281
282#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
283
284/*
285 * Rigorous type checking on sockaddr type conversions
286 */
287static inline struct sockaddr_in *svc_addr_in(const struct svc_rqst *rqst)
288{
289 return (struct sockaddr_in *) &rqst->rq_addr;
290}
291
292static inline struct sockaddr_in6 *svc_addr_in6(const struct svc_rqst *rqst)
293{
294 return (struct sockaddr_in6 *) &rqst->rq_addr;
295}
296
297static inline struct sockaddr *svc_addr(const struct svc_rqst *rqst)
298{
299 return (struct sockaddr *) &rqst->rq_addr;
300}
301
302static inline struct sockaddr_in *svc_daddr_in(const struct svc_rqst *rqst)
303{
304 return (struct sockaddr_in *) &rqst->rq_daddr;
305}
306
307static inline struct sockaddr_in6 *svc_daddr_in6(const struct svc_rqst *rqst)
308{
309 return (struct sockaddr_in6 *) &rqst->rq_daddr;
310}
311
312static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
313{
314 return (struct sockaddr *) &rqst->rq_daddr;
315}
316
317/**
318 * svc_thread_should_stop - check if this thread should stop
319 * @rqstp: the thread that might need to stop
320 *
321 * To stop an svc thread, the pool flags SP_NEED_VICTIM and SP_VICTIM_REMAINS
322 * are set. The first thread which sees SP_NEED_VICTIM clears it, becoming
323 * the victim using this function. It should then promptly call
324 * svc_exit_thread() to complete the process, clearing SP_VICTIM_REMAINS
325 * so the task waiting for a thread to exit can wake and continue.
326 *
327 * Return values:
328 * %true: caller should invoke svc_exit_thread()
329 * %false: caller should do nothing
330 */
331static inline bool svc_thread_should_stop(struct svc_rqst *rqstp)
332{
333 if (test_and_clear_bit(SP_NEED_VICTIM, &rqstp->rq_pool->sp_flags))
334 set_bit(RQ_VICTIM, &rqstp->rq_flags);
335
336 return test_bit(RQ_VICTIM, &rqstp->rq_flags);
337}
338
339/**
340 * svc_thread_init_status - report whether thread has initialised successfully
341 * @rqstp: the thread in question
342 * @err: errno code
343 *
344 * After performing any initialisation that could fail, and before starting
345 * normal work, each sunrpc svc_thread must call svc_thread_init_status()
346 * with an appropriate error, or zero.
347 *
348 * If zero is passed, the thread is ready and must continue until
349 * svc_thread_should_stop() returns true. If a non-zero error is passed
350 * the call will not return - the thread will exit.
351 */
352static inline void svc_thread_init_status(struct svc_rqst *rqstp, int err)
353{
354 store_release_wake_up(&rqstp->rq_err, err);
355 if (err)
356 kthread_exit(1);
357}
358
359struct svc_deferred_req {
360 u32 prot; /* protocol (UDP or TCP) */
361 struct svc_xprt *xprt;
362 struct sockaddr_storage addr; /* where reply must go */
363 size_t addrlen;
364 struct sockaddr_storage daddr; /* where reply must come from */
365 size_t daddrlen;
366 void *xprt_ctxt;
367 struct cache_deferred_req handle;
368 int argslen;
369 __be32 args[];
370};
371
372struct svc_process_info {
373 union {
374 int (*dispatch)(struct svc_rqst *rqstp);
375 struct {
376 unsigned int lovers;
377 unsigned int hivers;
378 } mismatch;
379 };
380};
381
382/*
383 * RPC program - an array of these can use the same transport endpoint
384 */
385struct svc_program {
386 u32 pg_prog; /* program number */
387 unsigned int pg_lovers; /* lowest version */
388 unsigned int pg_hivers; /* highest version */
389 unsigned int pg_nvers; /* number of versions */
390 const struct svc_version **pg_vers; /* version array */
391 char * pg_name; /* service name */
392 char * pg_class; /* class name: services sharing authentication */
393 enum svc_auth_status (*pg_authenticate)(struct svc_rqst *rqstp);
394 __be32 (*pg_init_request)(struct svc_rqst *,
395 const struct svc_program *,
396 struct svc_process_info *);
397 int (*pg_rpcbind_set)(struct net *net,
398 const struct svc_program *,
399 u32 version, int family,
400 unsigned short proto,
401 unsigned short port);
402};
403
404/*
405 * RPC program version
406 */
407struct svc_version {
408 u32 vs_vers; /* version number */
409 u32 vs_nproc; /* number of procedures */
410 const struct svc_procedure *vs_proc; /* per-procedure info */
411 unsigned long __percpu *vs_count; /* call counts */
412 u32 vs_xdrsize; /* xdrsize needed for this version */
413
414 /* Don't register with rpcbind */
415 bool vs_hidden;
416
417 /* Don't care if the rpcbind registration fails */
418 bool vs_rpcb_optnl;
419
420 /* Need xprt with congestion control */
421 bool vs_need_cong_ctrl;
422
423 /* Dispatch function */
424 int (*vs_dispatch)(struct svc_rqst *rqstp);
425};
426
427/*
428 * RPC procedure info
429 */
430struct svc_procedure {
431 /* process the request: */
432 __be32 (*pc_func)(struct svc_rqst *);
433 /* XDR decode args: */
434 bool (*pc_decode)(struct svc_rqst *rqstp,
435 struct xdr_stream *xdr);
436 /* XDR encode result: */
437 bool (*pc_encode)(struct svc_rqst *rqstp,
438 struct xdr_stream *xdr);
439 /* XDR free result: */
440 void (*pc_release)(struct svc_rqst *);
441 unsigned int pc_argsize; /* argument struct size */
442 unsigned int pc_argzero; /* how much of argument to clear */
443 unsigned int pc_ressize; /* result struct size */
444 unsigned int pc_cachetype; /* cache info (NFS) */
445 unsigned int pc_xdrressize; /* maximum size of XDR reply */
446 const char * pc_name; /* for display */
447};
448
449/*
450 * Function prototypes.
451 */
452int sunrpc_set_pool_mode(const char *val);
453int sunrpc_get_pool_mode(char *val, size_t size);
454void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
455int svc_bind(struct svc_serv *serv, struct net *net);
456struct svc_serv *svc_create(struct svc_program *, unsigned int,
457 int (*threadfn)(void *data));
458bool svc_rqst_replace_page(struct svc_rqst *rqstp,
459 struct page *page);
460void svc_rqst_release_pages(struct svc_rqst *rqstp);
461int svc_new_thread(struct svc_serv *serv, struct svc_pool *pool);
462void svc_exit_thread(struct svc_rqst *);
463struct svc_serv * svc_create_pooled(struct svc_program *prog,
464 unsigned int nprog,
465 struct svc_stat *stats,
466 unsigned int bufsize,
467 int (*threadfn)(void *data));
468int svc_set_pool_threads(struct svc_serv *serv, struct svc_pool *pool,
469 unsigned int min_threads, unsigned int max_threads);
470int svc_set_num_threads(struct svc_serv *serv, unsigned int min_threads,
471 unsigned int nrservs);
472int svc_pool_stats_open(struct svc_info *si, struct file *file);
473void svc_process(struct svc_rqst *rqstp);
474void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp);
475int svc_register(const struct svc_serv *, struct net *, const int,
476 const unsigned short, const unsigned short);
477
478void svc_wake_up(struct svc_serv *);
479void svc_reserve(struct svc_rqst *rqstp, int space);
480void svc_pool_wake_idle_thread(struct svc_pool *pool);
481struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv);
482char * svc_print_addr(struct svc_rqst *, char *, size_t);
483const char * svc_proc_name(const struct svc_rqst *rqstp);
484int svc_encode_result_payload(struct svc_rqst *rqstp,
485 unsigned int offset,
486 unsigned int length);
487char *svc_fill_symlink_pathname(struct svc_rqst *rqstp,
488 struct kvec *first, void *p,
489 size_t total);
490__be32 svc_generic_init_request(struct svc_rqst *rqstp,
491 const struct svc_program *progp,
492 struct svc_process_info *procinfo);
493int svc_generic_rpcbind_set(struct net *net,
494 const struct svc_program *progp,
495 u32 version, int family,
496 unsigned short proto,
497 unsigned short port);
498
499#define RPC_MAX_ADDRBUFLEN (63U)
500
501/**
502 * svc_rqst_page_release - release a page associated with an RPC transaction
503 * @rqstp: RPC transaction context
504 * @page: page to release
505 *
506 * Released pages are batched and freed together, reducing
507 * allocator pressure under heavy RPC workloads.
508 */
509static inline void svc_rqst_page_release(struct svc_rqst *rqstp,
510 struct page *page)
511{
512 if (!folio_batch_add(&rqstp->rq_fbatch, page_folio(page)))
513 __folio_batch_release(&rqstp->rq_fbatch);
514}
515
516/*
517 * When we want to reduce the size of the reserved space in the response
518 * buffer, we need to take into account the size of any checksum data that
519 * may be at the end of the packet. This is difficult to determine exactly
520 * for all cases without actually generating the checksum, so we just use a
521 * static value.
522 */
523static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space)
524{
525 svc_reserve(rqstp, space + rqstp->rq_auth_slack);
526}
527
528/**
529 * svcxdr_init_decode - Prepare an xdr_stream for Call decoding
530 * @rqstp: controlling server RPC transaction context
531 *
532 */
533static inline void svcxdr_init_decode(struct svc_rqst *rqstp)
534{
535 struct xdr_stream *xdr = &rqstp->rq_arg_stream;
536 struct xdr_buf *buf = &rqstp->rq_arg;
537 struct kvec *argv = buf->head;
538
539 WARN_ON(buf->len != buf->head->iov_len + buf->page_len + buf->tail->iov_len);
540 buf->len = buf->head->iov_len + buf->page_len + buf->tail->iov_len;
541
542 xdr_init_decode(xdr, buf, argv->iov_base, NULL);
543 xdr_set_scratch_folio(xdr, rqstp->rq_scratch_folio);
544}
545
546/**
547 * svcxdr_init_encode - Prepare an xdr_stream for svc Reply encoding
548 * @rqstp: controlling server RPC transaction context
549 *
550 */
551static inline void svcxdr_init_encode(struct svc_rqst *rqstp)
552{
553 struct xdr_stream *xdr = &rqstp->rq_res_stream;
554 struct xdr_buf *buf = &rqstp->rq_res;
555 struct kvec *resv = buf->head;
556
557 xdr_reset_scratch_buffer(xdr);
558
559 xdr->buf = buf;
560 xdr->iov = resv;
561 xdr->p = resv->iov_base + resv->iov_len;
562 xdr->end = resv->iov_base + PAGE_SIZE;
563 buf->len = resv->iov_len;
564 xdr->page_ptr = buf->pages - 1;
565 buf->buflen = PAGE_SIZE * (rqstp->rq_page_end - buf->pages);
566 xdr->rqst = NULL;
567}
568
569/**
570 * svcxdr_encode_opaque_pages - Insert pages into an xdr_stream
571 * @xdr: xdr_stream to be updated
572 * @pages: array of pages to insert
573 * @base: starting offset of first data byte in @pages
574 * @len: number of data bytes in @pages to insert
575 *
576 * After the @pages are added, the tail iovec is instantiated pointing
577 * to end of the head buffer, and the stream is set up to encode
578 * subsequent items into the tail.
579 */
580static inline void svcxdr_encode_opaque_pages(struct svc_rqst *rqstp,
581 struct xdr_stream *xdr,
582 struct page **pages,
583 unsigned int base,
584 unsigned int len)
585{
586 xdr_write_pages(xdr, pages, base, len);
587 xdr->page_ptr = rqstp->rq_next_page - 1;
588}
589
590/**
591 * svcxdr_set_auth_slack -
592 * @rqstp: RPC transaction
593 * @slack: buffer space to reserve for the transaction's security flavor
594 *
595 * Set the request's slack space requirement, and set aside that much
596 * space in the rqstp's rq_res.head for use when the auth wraps the Reply.
597 */
598static inline void svcxdr_set_auth_slack(struct svc_rqst *rqstp, int slack)
599{
600 struct xdr_stream *xdr = &rqstp->rq_res_stream;
601 struct xdr_buf *buf = &rqstp->rq_res;
602 struct kvec *resv = buf->head;
603
604 rqstp->rq_auth_slack = slack;
605
606 xdr->end -= XDR_QUADLEN(slack);
607 buf->buflen -= rqstp->rq_auth_slack;
608
609 WARN_ON(xdr->iov != resv);
610 WARN_ON(xdr->p > xdr->end);
611}
612
613/**
614 * svcxdr_set_accept_stat - Reserve space for the accept_stat field
615 * @rqstp: RPC transaction context
616 *
617 * Return values:
618 * %true: Success
619 * %false: No response buffer space was available
620 */
621static inline bool svcxdr_set_accept_stat(struct svc_rqst *rqstp)
622{
623 struct xdr_stream *xdr = &rqstp->rq_res_stream;
624
625 rqstp->rq_accept_statp = xdr_reserve_space(xdr, XDR_UNIT);
626 if (unlikely(!rqstp->rq_accept_statp))
627 return false;
628 *rqstp->rq_accept_statp = rpc_success;
629 return true;
630}
631
632#endif /* SUNRPC_SVC_H */