2 * linux/net/sunrpc/svc.c
4 * High-level RPC service routines
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
8 * Multiple threads pools and NUMAisation
9 * Copyright (c) 2006 Silicon Graphics, Inc.
10 * by Greg Banks <gnb@melbourne.sgi.com>
13 #include <linux/linkage.h>
14 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/net.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
22 #include <linux/sunrpc/types.h>
23 #include <linux/sunrpc/xdr.h>
24 #include <linux/sunrpc/stats.h>
25 #include <linux/sunrpc/svcsock.h>
26 #include <linux/sunrpc/clnt.h>
28 #define RPCDBG_FACILITY RPCDBG_SVCDSP
29 #define RPC_PARANOIA 1
32 * Mode for mapping cpus to pools.
35 SVC_POOL_NONE
= -1, /* uninitialised, choose one of the others */
36 SVC_POOL_GLOBAL
, /* no mapping, just a single global pool
37 * (legacy & UP mode) */
38 SVC_POOL_PERCPU
, /* one pool per cpu */
39 SVC_POOL_PERNODE
/* one pool per numa node */
43 * Structure for mapping cpus to pools and vice versa.
44 * Setup once during sunrpc initialisation.
46 static struct svc_pool_map
{
47 int mode
; /* Note: int not enum to avoid
48 * warnings about "enumeration value
49 * not handled in switch" */
51 unsigned int *pool_to
; /* maps pool id to cpu or node */
52 unsigned int *to_pool
; /* maps cpu or node to pool id */
59 * Detect best pool mapping mode heuristically,
60 * according to the machine's topology.
63 svc_pool_map_choose_mode(void)
67 if (num_online_nodes() > 1) {
69 * Actually have multiple NUMA nodes,
70 * so split pools on NUMA node boundaries
72 return SVC_POOL_PERNODE
;
75 node
= any_online_node(node_online_map
);
76 if (nr_cpus_node(node
) > 2) {
78 * Non-trivial SMP, or CONFIG_NUMA on
79 * non-NUMA hardware, e.g. with a generic
80 * x86_64 kernel on Xeons. In this case we
81 * want to divide the pools on cpu boundaries.
83 return SVC_POOL_PERCPU
;
86 /* default: one global pool */
87 return SVC_POOL_GLOBAL
;
91 * Allocate the to_pool[] and pool_to[] arrays.
92 * Returns 0 on success or an errno.
95 svc_pool_map_alloc_arrays(struct svc_pool_map
*m
, unsigned int maxpools
)
97 m
->to_pool
= kcalloc(maxpools
, sizeof(unsigned int), GFP_KERNEL
);
100 m
->pool_to
= kcalloc(maxpools
, sizeof(unsigned int), GFP_KERNEL
);
113 * Initialise the pool map for SVC_POOL_PERCPU mode.
114 * Returns number of pools or <0 on error.
117 svc_pool_map_init_percpu(struct svc_pool_map
*m
)
119 unsigned int maxpools
= highest_possible_processor_id()+1;
120 unsigned int pidx
= 0;
124 err
= svc_pool_map_alloc_arrays(m
, maxpools
);
128 for_each_online_cpu(cpu
) {
129 BUG_ON(pidx
> maxpools
);
130 m
->to_pool
[cpu
] = pidx
;
131 m
->pool_to
[pidx
] = cpu
;
134 /* cpus brought online later all get mapped to pool0, sorry */
141 * Initialise the pool map for SVC_POOL_PERNODE mode.
142 * Returns number of pools or <0 on error.
145 svc_pool_map_init_pernode(struct svc_pool_map
*m
)
147 unsigned int maxpools
= highest_possible_node_id()+1;
148 unsigned int pidx
= 0;
152 err
= svc_pool_map_alloc_arrays(m
, maxpools
);
156 for_each_node_with_cpus(node
) {
157 /* some architectures (e.g. SN2) have cpuless nodes */
158 BUG_ON(pidx
> maxpools
);
159 m
->to_pool
[node
] = pidx
;
160 m
->pool_to
[pidx
] = node
;
163 /* nodes brought online later all get mapped to pool0, sorry */
170 * Build the global map of cpus to pools and vice versa.
173 svc_pool_map_init(void)
175 struct svc_pool_map
*m
= &svc_pool_map
;
178 if (m
->mode
!= SVC_POOL_NONE
)
181 m
->mode
= svc_pool_map_choose_mode();
184 case SVC_POOL_PERCPU
:
185 npools
= svc_pool_map_init_percpu(m
);
187 case SVC_POOL_PERNODE
:
188 npools
= svc_pool_map_init_pernode(m
);
193 /* default, or memory allocation failure */
195 m
->mode
= SVC_POOL_GLOBAL
;
203 * Set the current thread's cpus_allowed mask so that it
204 * will only run on cpus in the given pool.
206 * Returns 1 and fills in oldmask iff a cpumask was applied.
209 svc_pool_map_set_cpumask(unsigned int pidx
, cpumask_t
*oldmask
)
211 struct svc_pool_map
*m
= &svc_pool_map
;
212 unsigned int node
; /* or cpu */
215 * The caller checks for sv_nrpools > 1, which
216 * implies that we've been initialized and the
217 * map mode is not NONE.
219 BUG_ON(m
->mode
== SVC_POOL_NONE
);
225 case SVC_POOL_PERCPU
:
226 node
= m
->pool_to
[pidx
];
227 *oldmask
= current
->cpus_allowed
;
228 set_cpus_allowed(current
, cpumask_of_cpu(node
));
230 case SVC_POOL_PERNODE
:
231 node
= m
->pool_to
[pidx
];
232 *oldmask
= current
->cpus_allowed
;
233 set_cpus_allowed(current
, node_to_cpumask(node
));
239 * Use the mapping mode to choose a pool for a given CPU.
240 * Used when enqueueing an incoming RPC. Always returns
241 * a non-NULL pool pointer.
244 svc_pool_for_cpu(struct svc_serv
*serv
, int cpu
)
246 struct svc_pool_map
*m
= &svc_pool_map
;
247 unsigned int pidx
= 0;
250 * SVC_POOL_NONE happens in a pure client when
251 * lockd is brought up, so silently treat it the
252 * same as SVC_POOL_GLOBAL.
256 case SVC_POOL_PERCPU
:
257 pidx
= m
->to_pool
[cpu
];
259 case SVC_POOL_PERNODE
:
260 pidx
= m
->to_pool
[cpu_to_node(cpu
)];
263 return &serv
->sv_pools
[pidx
% serv
->sv_nrpools
];
268 * Create an RPC service
270 static struct svc_serv
*
271 __svc_create(struct svc_program
*prog
, unsigned int bufsize
, int npools
,
272 void (*shutdown
)(struct svc_serv
*serv
))
274 struct svc_serv
*serv
;
276 unsigned int xdrsize
;
279 if (!(serv
= kzalloc(sizeof(*serv
), GFP_KERNEL
)))
281 serv
->sv_name
= prog
->pg_name
;
282 serv
->sv_program
= prog
;
283 serv
->sv_nrthreads
= 1;
284 serv
->sv_stats
= prog
->pg_stats
;
285 serv
->sv_bufsz
= bufsize
? bufsize
: 4096;
286 serv
->sv_shutdown
= shutdown
;
289 prog
->pg_lovers
= prog
->pg_nvers
-1;
290 for (vers
=0; vers
<prog
->pg_nvers
; vers
++)
291 if (prog
->pg_vers
[vers
]) {
292 prog
->pg_hivers
= vers
;
293 if (prog
->pg_lovers
> vers
)
294 prog
->pg_lovers
= vers
;
295 if (prog
->pg_vers
[vers
]->vs_xdrsize
> xdrsize
)
296 xdrsize
= prog
->pg_vers
[vers
]->vs_xdrsize
;
298 prog
= prog
->pg_next
;
300 serv
->sv_xdrsize
= xdrsize
;
301 INIT_LIST_HEAD(&serv
->sv_tempsocks
);
302 INIT_LIST_HEAD(&serv
->sv_permsocks
);
303 init_timer(&serv
->sv_temptimer
);
304 spin_lock_init(&serv
->sv_lock
);
306 serv
->sv_nrpools
= npools
;
308 kcalloc(sizeof(struct svc_pool
), serv
->sv_nrpools
,
310 if (!serv
->sv_pools
) {
315 for (i
= 0; i
< serv
->sv_nrpools
; i
++) {
316 struct svc_pool
*pool
= &serv
->sv_pools
[i
];
318 dprintk("initialising pool %u for %s\n",
322 INIT_LIST_HEAD(&pool
->sp_threads
);
323 INIT_LIST_HEAD(&pool
->sp_sockets
);
324 INIT_LIST_HEAD(&pool
->sp_all_threads
);
325 spin_lock_init(&pool
->sp_lock
);
329 /* Remove any stale portmap registrations */
330 svc_register(serv
, 0, 0);
336 svc_create(struct svc_program
*prog
, unsigned int bufsize
,
337 void (*shutdown
)(struct svc_serv
*serv
))
339 return __svc_create(prog
, bufsize
, /*npools*/1, shutdown
);
343 svc_create_pooled(struct svc_program
*prog
, unsigned int bufsize
,
344 void (*shutdown
)(struct svc_serv
*serv
),
345 svc_thread_fn func
, int sig
, struct module
*mod
)
347 struct svc_serv
*serv
;
348 unsigned int npools
= svc_pool_map_init();
350 serv
= __svc_create(prog
, bufsize
, npools
, shutdown
);
353 serv
->sv_function
= func
;
354 serv
->sv_kill_signal
= sig
;
355 serv
->sv_module
= mod
;
362 * Destroy an RPC service. Should be called with the BKL held
365 svc_destroy(struct svc_serv
*serv
)
367 struct svc_sock
*svsk
;
369 dprintk("RPC: svc_destroy(%s, %d)\n",
370 serv
->sv_program
->pg_name
,
373 if (serv
->sv_nrthreads
) {
374 if (--(serv
->sv_nrthreads
) != 0) {
375 svc_sock_update_bufs(serv
);
379 printk("svc_destroy: no threads for serv=%p!\n", serv
);
381 del_timer_sync(&serv
->sv_temptimer
);
383 while (!list_empty(&serv
->sv_tempsocks
)) {
384 svsk
= list_entry(serv
->sv_tempsocks
.next
,
387 svc_delete_socket(svsk
);
389 if (serv
->sv_shutdown
)
390 serv
->sv_shutdown(serv
);
392 while (!list_empty(&serv
->sv_permsocks
)) {
393 svsk
= list_entry(serv
->sv_permsocks
.next
,
396 svc_delete_socket(svsk
);
399 cache_clean_deferred(serv
);
401 /* Unregister service with the portmapper */
402 svc_register(serv
, 0, 0);
403 kfree(serv
->sv_pools
);
408 * Allocate an RPC server's buffer space.
409 * We allocate pages and place them in rq_argpages.
412 svc_init_buffer(struct svc_rqst
*rqstp
, unsigned int size
)
417 if (size
> RPCSVC_MAXPAYLOAD
)
418 size
= RPCSVC_MAXPAYLOAD
;
419 pages
= 2 + (size
+ PAGE_SIZE
-1) / PAGE_SIZE
;
421 BUG_ON(pages
> RPCSVC_MAXPAGES
);
423 struct page
*p
= alloc_page(GFP_KERNEL
);
426 rqstp
->rq_pages
[arghi
++] = p
;
433 * Release an RPC server buffer
436 svc_release_buffer(struct svc_rqst
*rqstp
)
439 for (i
=0; i
<ARRAY_SIZE(rqstp
->rq_pages
); i
++)
440 if (rqstp
->rq_pages
[i
])
441 put_page(rqstp
->rq_pages
[i
]);
445 * Create a thread in the given pool. Caller must hold BKL.
446 * On a NUMA or SMP machine, with a multi-pool serv, the thread
447 * will be restricted to run on the cpus belonging to the pool.
450 __svc_create_thread(svc_thread_fn func
, struct svc_serv
*serv
,
451 struct svc_pool
*pool
)
453 struct svc_rqst
*rqstp
;
455 int have_oldmask
= 0;
458 rqstp
= kzalloc(sizeof(*rqstp
), GFP_KERNEL
);
462 init_waitqueue_head(&rqstp
->rq_wait
);
464 if (!(rqstp
->rq_argp
= kmalloc(serv
->sv_xdrsize
, GFP_KERNEL
))
465 || !(rqstp
->rq_resp
= kmalloc(serv
->sv_xdrsize
, GFP_KERNEL
))
466 || !svc_init_buffer(rqstp
, serv
->sv_bufsz
))
469 serv
->sv_nrthreads
++;
470 spin_lock_bh(&pool
->sp_lock
);
471 pool
->sp_nrthreads
++;
472 list_add(&rqstp
->rq_all
, &pool
->sp_all_threads
);
473 spin_unlock_bh(&pool
->sp_lock
);
474 rqstp
->rq_server
= serv
;
475 rqstp
->rq_pool
= pool
;
477 if (serv
->sv_nrpools
> 1)
478 have_oldmask
= svc_pool_map_set_cpumask(pool
->sp_id
, &oldmask
);
480 error
= kernel_thread((int (*)(void *)) func
, rqstp
, 0);
483 set_cpus_allowed(current
, oldmask
);
487 svc_sock_update_bufs(serv
);
493 svc_exit_thread(rqstp
);
498 * Create a thread in the default pool. Caller must hold BKL.
501 svc_create_thread(svc_thread_fn func
, struct svc_serv
*serv
)
503 return __svc_create_thread(func
, serv
, &serv
->sv_pools
[0]);
507 * Choose a pool in which to create a new thread, for svc_set_num_threads
509 static inline struct svc_pool
*
510 choose_pool(struct svc_serv
*serv
, struct svc_pool
*pool
, unsigned int *state
)
515 return &serv
->sv_pools
[(*state
)++ % serv
->sv_nrpools
];
519 * Choose a thread to kill, for svc_set_num_threads
521 static inline struct task_struct
*
522 choose_victim(struct svc_serv
*serv
, struct svc_pool
*pool
, unsigned int *state
)
525 struct task_struct
*task
= NULL
;
528 spin_lock_bh(&pool
->sp_lock
);
530 /* choose a pool in round-robin fashion */
531 for (i
= 0; i
< serv
->sv_nrpools
; i
++) {
532 pool
= &serv
->sv_pools
[--(*state
) % serv
->sv_nrpools
];
533 spin_lock_bh(&pool
->sp_lock
);
534 if (!list_empty(&pool
->sp_all_threads
))
536 spin_unlock_bh(&pool
->sp_lock
);
542 if (!list_empty(&pool
->sp_all_threads
)) {
543 struct svc_rqst
*rqstp
;
546 * Remove from the pool->sp_all_threads list
547 * so we don't try to kill it again.
549 rqstp
= list_entry(pool
->sp_all_threads
.next
, struct svc_rqst
, rq_all
);
550 list_del_init(&rqstp
->rq_all
);
551 task
= rqstp
->rq_task
;
553 spin_unlock_bh(&pool
->sp_lock
);
559 * Create or destroy enough new threads to make the number
560 * of threads the given number. If `pool' is non-NULL, applies
561 * only to threads in that pool, otherwise round-robins between
562 * all pools. Must be called with a svc_get() reference and
565 * Destroying threads relies on the service threads filling in
566 * rqstp->rq_task, which only the nfs ones do. Assumes the serv
567 * has been created using svc_create_pooled().
569 * Based on code that used to be in nfsd_svc() but tweaked
573 svc_set_num_threads(struct svc_serv
*serv
, struct svc_pool
*pool
, int nrservs
)
575 struct task_struct
*victim
;
577 unsigned int state
= serv
->sv_nrthreads
-1;
580 /* The -1 assumes caller has done a svc_get() */
581 nrservs
-= (serv
->sv_nrthreads
-1);
583 spin_lock_bh(&pool
->sp_lock
);
584 nrservs
-= pool
->sp_nrthreads
;
585 spin_unlock_bh(&pool
->sp_lock
);
588 /* create new threads */
589 while (nrservs
> 0) {
591 __module_get(serv
->sv_module
);
592 error
= __svc_create_thread(serv
->sv_function
, serv
,
593 choose_pool(serv
, pool
, &state
));
595 module_put(serv
->sv_module
);
599 /* destroy old threads */
600 while (nrservs
< 0 &&
601 (victim
= choose_victim(serv
, pool
, &state
)) != NULL
) {
602 send_sig(serv
->sv_kill_signal
, victim
, 1);
610 * Called from a server thread as it's exiting. Caller must hold BKL.
613 svc_exit_thread(struct svc_rqst
*rqstp
)
615 struct svc_serv
*serv
= rqstp
->rq_server
;
616 struct svc_pool
*pool
= rqstp
->rq_pool
;
618 svc_release_buffer(rqstp
);
619 kfree(rqstp
->rq_resp
);
620 kfree(rqstp
->rq_argp
);
621 kfree(rqstp
->rq_auth_data
);
623 spin_lock_bh(&pool
->sp_lock
);
624 pool
->sp_nrthreads
--;
625 list_del(&rqstp
->rq_all
);
626 spin_unlock_bh(&pool
->sp_lock
);
630 /* Release the server */
636 * Register an RPC service with the local portmapper.
637 * To unregister a service, call this routine with
638 * proto and port == 0.
641 svc_register(struct svc_serv
*serv
, int proto
, unsigned short port
)
643 struct svc_program
*progp
;
645 int i
, error
= 0, dummy
;
648 clear_thread_flag(TIF_SIGPENDING
);
650 for (progp
= serv
->sv_program
; progp
; progp
= progp
->pg_next
) {
651 for (i
= 0; i
< progp
->pg_nvers
; i
++) {
652 if (progp
->pg_vers
[i
] == NULL
)
655 dprintk("RPC: svc_register(%s, %s, %d, %d)%s\n",
657 proto
== IPPROTO_UDP
? "udp" : "tcp",
660 progp
->pg_vers
[i
]->vs_hidden
?
661 " (but not telling portmap)" : "");
663 if (progp
->pg_vers
[i
]->vs_hidden
)
666 error
= rpc_register(progp
->pg_prog
, i
, proto
, port
, &dummy
);
669 if (port
&& !dummy
) {
677 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
679 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
686 * Process the RPC request.
689 svc_process(struct svc_rqst
*rqstp
)
691 struct svc_program
*progp
;
692 struct svc_version
*versp
= NULL
; /* compiler food */
693 struct svc_procedure
*procp
= NULL
;
694 struct kvec
* argv
= &rqstp
->rq_arg
.head
[0];
695 struct kvec
* resv
= &rqstp
->rq_res
.head
[0];
696 struct svc_serv
*serv
= rqstp
->rq_server
;
699 u32 dir
, prog
, vers
, proc
;
700 __be32 auth_stat
, rpc_stat
;
704 rpc_stat
= rpc_success
;
706 if (argv
->iov_len
< 6*4)
709 /* setup response xdr_buf.
710 * Initially it has just one page
712 rqstp
->rq_resused
= 1;
713 resv
->iov_base
= page_address(rqstp
->rq_respages
[0]);
715 rqstp
->rq_res
.pages
= rqstp
->rq_respages
+ 1;
716 rqstp
->rq_res
.len
= 0;
717 rqstp
->rq_res
.page_base
= 0;
718 rqstp
->rq_res
.page_len
= 0;
719 rqstp
->rq_res
.buflen
= PAGE_SIZE
;
720 rqstp
->rq_res
.tail
[0].iov_base
= NULL
;
721 rqstp
->rq_res
.tail
[0].iov_len
= 0;
722 /* Will be turned off only in gss privacy case: */
723 rqstp
->rq_sendfile_ok
= 1;
724 /* tcp needs a space for the record length... */
725 if (rqstp
->rq_prot
== IPPROTO_TCP
)
728 rqstp
->rq_xid
= svc_getu32(argv
);
729 svc_putu32(resv
, rqstp
->rq_xid
);
731 dir
= svc_getnl(argv
);
732 vers
= svc_getnl(argv
);
734 /* First words of reply: */
735 svc_putnl(resv
, 1); /* REPLY */
737 if (dir
!= 0) /* direction != CALL */
739 if (vers
!= 2) /* RPC version number */
742 /* Save position in case we later decide to reject: */
743 reply_statp
= resv
->iov_base
+ resv
->iov_len
;
745 svc_putnl(resv
, 0); /* ACCEPT */
747 rqstp
->rq_prog
= prog
= svc_getnl(argv
); /* program number */
748 rqstp
->rq_vers
= vers
= svc_getnl(argv
); /* version number */
749 rqstp
->rq_proc
= proc
= svc_getnl(argv
); /* procedure number */
751 progp
= serv
->sv_program
;
753 for (progp
= serv
->sv_program
; progp
; progp
= progp
->pg_next
)
754 if (prog
== progp
->pg_prog
)
758 * Decode auth data, and add verifier to reply buffer.
759 * We do this before anything else in order to get a decent
762 auth_res
= svc_authenticate(rqstp
, &auth_stat
);
763 /* Also give the program a chance to reject this call: */
764 if (auth_res
== SVC_OK
&& progp
) {
765 auth_stat
= rpc_autherr_badcred
;
766 auth_res
= progp
->pg_authenticate(rqstp
);
772 rpc_stat
= rpc_garbage_args
;
775 rpc_stat
= rpc_system_err
;
788 if (vers
>= progp
->pg_nvers
||
789 !(versp
= progp
->pg_vers
[vers
]))
792 procp
= versp
->vs_proc
+ proc
;
793 if (proc
>= versp
->vs_nproc
|| !procp
->pc_func
)
795 rqstp
->rq_server
= serv
;
796 rqstp
->rq_procinfo
= procp
;
798 /* Syntactic check complete */
799 serv
->sv_stats
->rpccnt
++;
801 /* Build the reply header. */
802 statp
= resv
->iov_base
+resv
->iov_len
;
803 svc_putnl(resv
, RPC_SUCCESS
);
805 /* Bump per-procedure stats counter */
808 /* Initialize storage for argp and resp */
809 memset(rqstp
->rq_argp
, 0, procp
->pc_argsize
);
810 memset(rqstp
->rq_resp
, 0, procp
->pc_ressize
);
812 /* un-reserve some of the out-queue now that we have a
813 * better idea of reply size
815 if (procp
->pc_xdrressize
)
816 svc_reserve(rqstp
, procp
->pc_xdrressize
<<2);
818 /* Call the function that processes the request. */
819 if (!versp
->vs_dispatch
) {
820 /* Decode arguments */
821 xdr
= procp
->pc_decode
;
822 if (xdr
&& !xdr(rqstp
, argv
->iov_base
, rqstp
->rq_argp
))
825 *statp
= procp
->pc_func(rqstp
, rqstp
->rq_argp
, rqstp
->rq_resp
);
828 if (*statp
== rpc_success
&& (xdr
= procp
->pc_encode
)
829 && !xdr(rqstp
, resv
->iov_base
+resv
->iov_len
, rqstp
->rq_resp
)) {
830 dprintk("svc: failed to encode reply\n");
831 /* serv->sv_stats->rpcsystemerr++; */
832 *statp
= rpc_system_err
;
835 dprintk("svc: calling dispatcher\n");
836 if (!versp
->vs_dispatch(rqstp
, statp
)) {
837 /* Release reply info */
838 if (procp
->pc_release
)
839 procp
->pc_release(rqstp
, NULL
, rqstp
->rq_resp
);
844 /* Check RPC status result */
845 if (*statp
!= rpc_success
)
846 resv
->iov_len
= ((void*)statp
) - resv
->iov_base
+ 4;
848 /* Release reply info */
849 if (procp
->pc_release
)
850 procp
->pc_release(rqstp
, NULL
, rqstp
->rq_resp
);
852 if (procp
->pc_encode
== NULL
)
856 if (svc_authorise(rqstp
))
858 return svc_send(rqstp
);
861 svc_authorise(rqstp
); /* doesn't hurt to call this twice */
862 dprintk("svc: svc_process dropit\n");
868 printk("svc: short len %Zd, dropping request\n", argv
->iov_len
);
870 goto dropit
; /* drop request */
874 printk("svc: bad direction %d, dropping request\n", dir
);
876 serv
->sv_stats
->rpcbadfmt
++;
877 goto dropit
; /* drop request */
880 serv
->sv_stats
->rpcbadfmt
++;
881 svc_putnl(resv
, 1); /* REJECT */
882 svc_putnl(resv
, 0); /* RPC_MISMATCH */
883 svc_putnl(resv
, 2); /* Only RPCv2 supported */
888 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat
));
889 serv
->sv_stats
->rpcbadauth
++;
890 /* Restore write pointer to location of accept status: */
891 xdr_ressize_check(rqstp
, reply_statp
);
892 svc_putnl(resv
, 1); /* REJECT */
893 svc_putnl(resv
, 1); /* AUTH_ERROR */
894 svc_putnl(resv
, ntohl(auth_stat
)); /* status */
898 dprintk("svc: unknown program %d\n", prog
);
899 serv
->sv_stats
->rpcbadfmt
++;
900 svc_putnl(resv
, RPC_PROG_UNAVAIL
);
905 printk("svc: unknown version (%d)\n", vers
);
907 serv
->sv_stats
->rpcbadfmt
++;
908 svc_putnl(resv
, RPC_PROG_MISMATCH
);
909 svc_putnl(resv
, progp
->pg_lovers
);
910 svc_putnl(resv
, progp
->pg_hivers
);
915 printk("svc: unknown procedure (%d)\n", proc
);
917 serv
->sv_stats
->rpcbadfmt
++;
918 svc_putnl(resv
, RPC_PROC_UNAVAIL
);
923 printk("svc: failed to decode args\n");
925 rpc_stat
= rpc_garbage_args
;
927 serv
->sv_stats
->rpcbadfmt
++;
928 svc_putnl(resv
, ntohl(rpc_stat
));
933 * Return (transport-specific) limit on the rpc payload.
935 u32
svc_max_payload(const struct svc_rqst
*rqstp
)
937 int max
= RPCSVC_MAXPAYLOAD_TCP
;
939 if (rqstp
->rq_sock
->sk_sock
->type
== SOCK_DGRAM
)
940 max
= RPCSVC_MAXPAYLOAD_UDP
;
941 if (rqstp
->rq_server
->sv_bufsz
< max
)
942 max
= rqstp
->rq_server
->sv_bufsz
;
945 EXPORT_SYMBOL_GPL(svc_max_payload
);
This page took 0.073032 seconds and 6 git commands to generate.