Merge commit 'v2.6.26-rc9' into cpus4096
[deliverable/linux.git] / net / sunrpc / svc.c
1 /*
2 * linux/net/sunrpc/svc.c
3 *
4 * High-level RPC service routines
5 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 *
8 * Multiple threads pools and NUMAisation
9 * Copyright (c) 2006 Silicon Graphics, Inc.
10 * by Greg Banks <gnb@melbourne.sgi.com>
11 */
12
13 #include <linux/linkage.h>
14 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/net.h>
17 #include <linux/in.h>
18 #include <linux/mm.h>
19 #include <linux/interrupt.h>
20 #include <linux/module.h>
21
22 #include <linux/sunrpc/types.h>
23 #include <linux/sunrpc/xdr.h>
24 #include <linux/sunrpc/stats.h>
25 #include <linux/sunrpc/svcsock.h>
26 #include <linux/sunrpc/clnt.h>
27
28 #define RPCDBG_FACILITY RPCDBG_SVCDSP
29
30 #define svc_serv_is_pooled(serv) ((serv)->sv_function)
31
32 /*
33 * Mode for mapping cpus to pools.
34 */
35 enum {
36 SVC_POOL_AUTO = -1, /* choose one of the others */
37 SVC_POOL_GLOBAL, /* no mapping, just a single global pool
38 * (legacy & UP mode) */
39 SVC_POOL_PERCPU, /* one pool per cpu */
40 SVC_POOL_PERNODE /* one pool per numa node */
41 };
42 #define SVC_POOL_DEFAULT SVC_POOL_GLOBAL
43
44 /*
45 * Structure for mapping cpus to pools and vice versa.
46 * Setup once during sunrpc initialisation.
47 */
48 static struct svc_pool_map {
49 int count; /* How many svc_servs use us */
50 int mode; /* Note: int not enum to avoid
51 * warnings about "enumeration value
52 * not handled in switch" */
53 unsigned int npools;
54 unsigned int *pool_to; /* maps pool id to cpu or node */
55 unsigned int *to_pool; /* maps cpu or node to pool id */
56 } svc_pool_map = {
57 .count = 0,
58 .mode = SVC_POOL_DEFAULT
59 };
60 static DEFINE_MUTEX(svc_pool_map_mutex);/* protects svc_pool_map.count only */
61
62 static int
63 param_set_pool_mode(const char *val, struct kernel_param *kp)
64 {
65 int *ip = (int *)kp->arg;
66 struct svc_pool_map *m = &svc_pool_map;
67 int err;
68
69 mutex_lock(&svc_pool_map_mutex);
70
71 err = -EBUSY;
72 if (m->count)
73 goto out;
74
75 err = 0;
76 if (!strncmp(val, "auto", 4))
77 *ip = SVC_POOL_AUTO;
78 else if (!strncmp(val, "global", 6))
79 *ip = SVC_POOL_GLOBAL;
80 else if (!strncmp(val, "percpu", 6))
81 *ip = SVC_POOL_PERCPU;
82 else if (!strncmp(val, "pernode", 7))
83 *ip = SVC_POOL_PERNODE;
84 else
85 err = -EINVAL;
86
87 out:
88 mutex_unlock(&svc_pool_map_mutex);
89 return err;
90 }
91
92 static int
93 param_get_pool_mode(char *buf, struct kernel_param *kp)
94 {
95 int *ip = (int *)kp->arg;
96
97 switch (*ip)
98 {
99 case SVC_POOL_AUTO:
100 return strlcpy(buf, "auto", 20);
101 case SVC_POOL_GLOBAL:
102 return strlcpy(buf, "global", 20);
103 case SVC_POOL_PERCPU:
104 return strlcpy(buf, "percpu", 20);
105 case SVC_POOL_PERNODE:
106 return strlcpy(buf, "pernode", 20);
107 default:
108 return sprintf(buf, "%d", *ip);
109 }
110 }
111
112 module_param_call(pool_mode, param_set_pool_mode, param_get_pool_mode,
113 &svc_pool_map.mode, 0644);
114
115 /*
116 * Detect best pool mapping mode heuristically,
117 * according to the machine's topology.
118 */
119 static int
120 svc_pool_map_choose_mode(void)
121 {
122 unsigned int node;
123
124 if (num_online_nodes() > 1) {
125 /*
126 * Actually have multiple NUMA nodes,
127 * so split pools on NUMA node boundaries
128 */
129 return SVC_POOL_PERNODE;
130 }
131
132 node = any_online_node(node_online_map);
133 if (nr_cpus_node(node) > 2) {
134 /*
135 * Non-trivial SMP, or CONFIG_NUMA on
136 * non-NUMA hardware, e.g. with a generic
137 * x86_64 kernel on Xeons. In this case we
138 * want to divide the pools on cpu boundaries.
139 */
140 return SVC_POOL_PERCPU;
141 }
142
143 /* default: one global pool */
144 return SVC_POOL_GLOBAL;
145 }
146
147 /*
148 * Allocate the to_pool[] and pool_to[] arrays.
149 * Returns 0 on success or an errno.
150 */
151 static int
152 svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools)
153 {
154 m->to_pool = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
155 if (!m->to_pool)
156 goto fail;
157 m->pool_to = kcalloc(maxpools, sizeof(unsigned int), GFP_KERNEL);
158 if (!m->pool_to)
159 goto fail_free;
160
161 return 0;
162
163 fail_free:
164 kfree(m->to_pool);
165 fail:
166 return -ENOMEM;
167 }
168
169 /*
170 * Initialise the pool map for SVC_POOL_PERCPU mode.
171 * Returns number of pools or <0 on error.
172 */
173 static int
174 svc_pool_map_init_percpu(struct svc_pool_map *m)
175 {
176 unsigned int maxpools = nr_cpu_ids;
177 unsigned int pidx = 0;
178 unsigned int cpu;
179 int err;
180
181 err = svc_pool_map_alloc_arrays(m, maxpools);
182 if (err)
183 return err;
184
185 for_each_online_cpu(cpu) {
186 BUG_ON(pidx > maxpools);
187 m->to_pool[cpu] = pidx;
188 m->pool_to[pidx] = cpu;
189 pidx++;
190 }
191 /* cpus brought online later all get mapped to pool0, sorry */
192
193 return pidx;
194 };
195
196
197 /*
198 * Initialise the pool map for SVC_POOL_PERNODE mode.
199 * Returns number of pools or <0 on error.
200 */
201 static int
202 svc_pool_map_init_pernode(struct svc_pool_map *m)
203 {
204 unsigned int maxpools = nr_node_ids;
205 unsigned int pidx = 0;
206 unsigned int node;
207 int err;
208
209 err = svc_pool_map_alloc_arrays(m, maxpools);
210 if (err)
211 return err;
212
213 for_each_node_with_cpus(node) {
214 /* some architectures (e.g. SN2) have cpuless nodes */
215 BUG_ON(pidx > maxpools);
216 m->to_pool[node] = pidx;
217 m->pool_to[pidx] = node;
218 pidx++;
219 }
220 /* nodes brought online later all get mapped to pool0, sorry */
221
222 return pidx;
223 }
224
225
226 /*
227 * Add a reference to the global map of cpus to pools (and
228 * vice versa). Initialise the map if we're the first user.
229 * Returns the number of pools.
230 */
231 static unsigned int
232 svc_pool_map_get(void)
233 {
234 struct svc_pool_map *m = &svc_pool_map;
235 int npools = -1;
236
237 mutex_lock(&svc_pool_map_mutex);
238
239 if (m->count++) {
240 mutex_unlock(&svc_pool_map_mutex);
241 return m->npools;
242 }
243
244 if (m->mode == SVC_POOL_AUTO)
245 m->mode = svc_pool_map_choose_mode();
246
247 switch (m->mode) {
248 case SVC_POOL_PERCPU:
249 npools = svc_pool_map_init_percpu(m);
250 break;
251 case SVC_POOL_PERNODE:
252 npools = svc_pool_map_init_pernode(m);
253 break;
254 }
255
256 if (npools < 0) {
257 /* default, or memory allocation failure */
258 npools = 1;
259 m->mode = SVC_POOL_GLOBAL;
260 }
261 m->npools = npools;
262
263 mutex_unlock(&svc_pool_map_mutex);
264 return m->npools;
265 }
266
267
268 /*
269 * Drop a reference to the global map of cpus to pools.
270 * When the last reference is dropped, the map data is
271 * freed; this allows the sysadmin to change the pool
272 * mode using the pool_mode module option without
273 * rebooting or re-loading sunrpc.ko.
274 */
275 static void
276 svc_pool_map_put(void)
277 {
278 struct svc_pool_map *m = &svc_pool_map;
279
280 mutex_lock(&svc_pool_map_mutex);
281
282 if (!--m->count) {
283 m->mode = SVC_POOL_DEFAULT;
284 kfree(m->to_pool);
285 kfree(m->pool_to);
286 m->npools = 0;
287 }
288
289 mutex_unlock(&svc_pool_map_mutex);
290 }
291
292
293 /*
294 * Set the current thread's cpus_allowed mask so that it
295 * will only run on cpus in the given pool.
296 *
297 * Returns 1 and fills in oldmask iff a cpumask was applied.
298 */
299 static inline int
300 svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask)
301 {
302 struct svc_pool_map *m = &svc_pool_map;
303
304 /*
305 * The caller checks for sv_nrpools > 1, which
306 * implies that we've been initialized.
307 */
308 BUG_ON(m->count == 0);
309
310 switch (m->mode)
311 {
312 default:
313 return 0;
314 case SVC_POOL_PERCPU:
315 {
316 unsigned int cpu = m->pool_to[pidx];
317
318 *oldmask = current->cpus_allowed;
319 set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu));
320 return 1;
321 }
322 case SVC_POOL_PERNODE:
323 {
324 unsigned int node = m->pool_to[pidx];
325 node_to_cpumask_ptr(nodecpumask, node);
326
327 *oldmask = current->cpus_allowed;
328 set_cpus_allowed_ptr(current, nodecpumask);
329 return 1;
330 }
331 }
332 }
333
334 /*
335 * Use the mapping mode to choose a pool for a given CPU.
336 * Used when enqueueing an incoming RPC. Always returns
337 * a non-NULL pool pointer.
338 */
339 struct svc_pool *
340 svc_pool_for_cpu(struct svc_serv *serv, int cpu)
341 {
342 struct svc_pool_map *m = &svc_pool_map;
343 unsigned int pidx = 0;
344
345 /*
346 * An uninitialised map happens in a pure client when
347 * lockd is brought up, so silently treat it the
348 * same as SVC_POOL_GLOBAL.
349 */
350 if (svc_serv_is_pooled(serv)) {
351 switch (m->mode) {
352 case SVC_POOL_PERCPU:
353 pidx = m->to_pool[cpu];
354 break;
355 case SVC_POOL_PERNODE:
356 pidx = m->to_pool[cpu_to_node(cpu)];
357 break;
358 }
359 }
360 return &serv->sv_pools[pidx % serv->sv_nrpools];
361 }
362
363
364 /*
365 * Create an RPC service
366 */
367 static struct svc_serv *
368 __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
369 void (*shutdown)(struct svc_serv *serv))
370 {
371 struct svc_serv *serv;
372 unsigned int vers;
373 unsigned int xdrsize;
374 unsigned int i;
375
376 if (!(serv = kzalloc(sizeof(*serv), GFP_KERNEL)))
377 return NULL;
378 serv->sv_name = prog->pg_name;
379 serv->sv_program = prog;
380 serv->sv_nrthreads = 1;
381 serv->sv_stats = prog->pg_stats;
382 if (bufsize > RPCSVC_MAXPAYLOAD)
383 bufsize = RPCSVC_MAXPAYLOAD;
384 serv->sv_max_payload = bufsize? bufsize : 4096;
385 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
386 serv->sv_shutdown = shutdown;
387 xdrsize = 0;
388 while (prog) {
389 prog->pg_lovers = prog->pg_nvers-1;
390 for (vers=0; vers<prog->pg_nvers ; vers++)
391 if (prog->pg_vers[vers]) {
392 prog->pg_hivers = vers;
393 if (prog->pg_lovers > vers)
394 prog->pg_lovers = vers;
395 if (prog->pg_vers[vers]->vs_xdrsize > xdrsize)
396 xdrsize = prog->pg_vers[vers]->vs_xdrsize;
397 }
398 prog = prog->pg_next;
399 }
400 serv->sv_xdrsize = xdrsize;
401 INIT_LIST_HEAD(&serv->sv_tempsocks);
402 INIT_LIST_HEAD(&serv->sv_permsocks);
403 init_timer(&serv->sv_temptimer);
404 spin_lock_init(&serv->sv_lock);
405
406 serv->sv_nrpools = npools;
407 serv->sv_pools =
408 kcalloc(serv->sv_nrpools, sizeof(struct svc_pool),
409 GFP_KERNEL);
410 if (!serv->sv_pools) {
411 kfree(serv);
412 return NULL;
413 }
414
415 for (i = 0; i < serv->sv_nrpools; i++) {
416 struct svc_pool *pool = &serv->sv_pools[i];
417
418 dprintk("svc: initialising pool %u for %s\n",
419 i, serv->sv_name);
420
421 pool->sp_id = i;
422 INIT_LIST_HEAD(&pool->sp_threads);
423 INIT_LIST_HEAD(&pool->sp_sockets);
424 INIT_LIST_HEAD(&pool->sp_all_threads);
425 spin_lock_init(&pool->sp_lock);
426 }
427
428
429 /* Remove any stale portmap registrations */
430 svc_register(serv, 0, 0);
431
432 return serv;
433 }
434
435 struct svc_serv *
436 svc_create(struct svc_program *prog, unsigned int bufsize,
437 void (*shutdown)(struct svc_serv *serv))
438 {
439 return __svc_create(prog, bufsize, /*npools*/1, shutdown);
440 }
441 EXPORT_SYMBOL(svc_create);
442
443 struct svc_serv *
444 svc_create_pooled(struct svc_program *prog, unsigned int bufsize,
445 void (*shutdown)(struct svc_serv *serv),
446 svc_thread_fn func, int sig, struct module *mod)
447 {
448 struct svc_serv *serv;
449 unsigned int npools = svc_pool_map_get();
450
451 serv = __svc_create(prog, bufsize, npools, shutdown);
452
453 if (serv != NULL) {
454 serv->sv_function = func;
455 serv->sv_kill_signal = sig;
456 serv->sv_module = mod;
457 }
458
459 return serv;
460 }
461 EXPORT_SYMBOL(svc_create_pooled);
462
463 /*
464 * Destroy an RPC service. Should be called with the BKL held
465 */
466 void
467 svc_destroy(struct svc_serv *serv)
468 {
469 dprintk("svc: svc_destroy(%s, %d)\n",
470 serv->sv_program->pg_name,
471 serv->sv_nrthreads);
472
473 if (serv->sv_nrthreads) {
474 if (--(serv->sv_nrthreads) != 0) {
475 svc_sock_update_bufs(serv);
476 return;
477 }
478 } else
479 printk("svc_destroy: no threads for serv=%p!\n", serv);
480
481 del_timer_sync(&serv->sv_temptimer);
482
483 svc_close_all(&serv->sv_tempsocks);
484
485 if (serv->sv_shutdown)
486 serv->sv_shutdown(serv);
487
488 svc_close_all(&serv->sv_permsocks);
489
490 BUG_ON(!list_empty(&serv->sv_permsocks));
491 BUG_ON(!list_empty(&serv->sv_tempsocks));
492
493 cache_clean_deferred(serv);
494
495 if (svc_serv_is_pooled(serv))
496 svc_pool_map_put();
497
498 /* Unregister service with the portmapper */
499 svc_register(serv, 0, 0);
500 kfree(serv->sv_pools);
501 kfree(serv);
502 }
503 EXPORT_SYMBOL(svc_destroy);
504
505 /*
506 * Allocate an RPC server's buffer space.
507 * We allocate pages and place them in rq_argpages.
508 */
509 static int
510 svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
511 {
512 unsigned int pages, arghi;
513
514 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
515 * We assume one is at most one page
516 */
517 arghi = 0;
518 BUG_ON(pages > RPCSVC_MAXPAGES);
519 while (pages) {
520 struct page *p = alloc_page(GFP_KERNEL);
521 if (!p)
522 break;
523 rqstp->rq_pages[arghi++] = p;
524 pages--;
525 }
526 return pages == 0;
527 }
528
529 /*
530 * Release an RPC server buffer
531 */
532 static void
533 svc_release_buffer(struct svc_rqst *rqstp)
534 {
535 unsigned int i;
536
537 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
538 if (rqstp->rq_pages[i])
539 put_page(rqstp->rq_pages[i]);
540 }
541
542 struct svc_rqst *
543 svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool)
544 {
545 struct svc_rqst *rqstp;
546
547 rqstp = kzalloc(sizeof(*rqstp), GFP_KERNEL);
548 if (!rqstp)
549 goto out_enomem;
550
551 init_waitqueue_head(&rqstp->rq_wait);
552
553 serv->sv_nrthreads++;
554 spin_lock_bh(&pool->sp_lock);
555 pool->sp_nrthreads++;
556 list_add(&rqstp->rq_all, &pool->sp_all_threads);
557 spin_unlock_bh(&pool->sp_lock);
558 rqstp->rq_server = serv;
559 rqstp->rq_pool = pool;
560
561 rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL);
562 if (!rqstp->rq_argp)
563 goto out_thread;
564
565 rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL);
566 if (!rqstp->rq_resp)
567 goto out_thread;
568
569 if (!svc_init_buffer(rqstp, serv->sv_max_mesg))
570 goto out_thread;
571
572 return rqstp;
573 out_thread:
574 svc_exit_thread(rqstp);
575 out_enomem:
576 return ERR_PTR(-ENOMEM);
577 }
578 EXPORT_SYMBOL(svc_prepare_thread);
579
580 /*
581 * Create a thread in the given pool. Caller must hold BKL.
582 * On a NUMA or SMP machine, with a multi-pool serv, the thread
583 * will be restricted to run on the cpus belonging to the pool.
584 */
585 static int
586 __svc_create_thread(svc_thread_fn func, struct svc_serv *serv,
587 struct svc_pool *pool)
588 {
589 struct svc_rqst *rqstp;
590 int error = -ENOMEM;
591 int have_oldmask = 0;
592 cpumask_t uninitialized_var(oldmask);
593
594 rqstp = svc_prepare_thread(serv, pool);
595 if (IS_ERR(rqstp)) {
596 error = PTR_ERR(rqstp);
597 goto out;
598 }
599
600 if (serv->sv_nrpools > 1)
601 have_oldmask = svc_pool_map_set_cpumask(pool->sp_id, &oldmask);
602
603 error = kernel_thread((int (*)(void *)) func, rqstp, 0);
604
605 if (have_oldmask)
606 set_cpus_allowed_ptr(current, &oldmask);
607
608 if (error < 0)
609 goto out_thread;
610 svc_sock_update_bufs(serv);
611 error = 0;
612 out:
613 return error;
614
615 out_thread:
616 svc_exit_thread(rqstp);
617 goto out;
618 }
619
620 /*
621 * Choose a pool in which to create a new thread, for svc_set_num_threads
622 */
623 static inline struct svc_pool *
624 choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
625 {
626 if (pool != NULL)
627 return pool;
628
629 return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
630 }
631
632 /*
633 * Choose a thread to kill, for svc_set_num_threads
634 */
635 static inline struct task_struct *
636 choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
637 {
638 unsigned int i;
639 struct task_struct *task = NULL;
640
641 if (pool != NULL) {
642 spin_lock_bh(&pool->sp_lock);
643 } else {
644 /* choose a pool in round-robin fashion */
645 for (i = 0; i < serv->sv_nrpools; i++) {
646 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
647 spin_lock_bh(&pool->sp_lock);
648 if (!list_empty(&pool->sp_all_threads))
649 goto found_pool;
650 spin_unlock_bh(&pool->sp_lock);
651 }
652 return NULL;
653 }
654
655 found_pool:
656 if (!list_empty(&pool->sp_all_threads)) {
657 struct svc_rqst *rqstp;
658
659 /*
660 * Remove from the pool->sp_all_threads list
661 * so we don't try to kill it again.
662 */
663 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
664 list_del_init(&rqstp->rq_all);
665 task = rqstp->rq_task;
666 }
667 spin_unlock_bh(&pool->sp_lock);
668
669 return task;
670 }
671
672 /*
673 * Create or destroy enough new threads to make the number
674 * of threads the given number. If `pool' is non-NULL, applies
675 * only to threads in that pool, otherwise round-robins between
676 * all pools. Must be called with a svc_get() reference and
677 * the BKL held.
678 *
679 * Destroying threads relies on the service threads filling in
680 * rqstp->rq_task, which only the nfs ones do. Assumes the serv
681 * has been created using svc_create_pooled().
682 *
683 * Based on code that used to be in nfsd_svc() but tweaked
684 * to be pool-aware.
685 */
686 int
687 svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
688 {
689 struct task_struct *victim;
690 int error = 0;
691 unsigned int state = serv->sv_nrthreads-1;
692
693 if (pool == NULL) {
694 /* The -1 assumes caller has done a svc_get() */
695 nrservs -= (serv->sv_nrthreads-1);
696 } else {
697 spin_lock_bh(&pool->sp_lock);
698 nrservs -= pool->sp_nrthreads;
699 spin_unlock_bh(&pool->sp_lock);
700 }
701
702 /* create new threads */
703 while (nrservs > 0) {
704 nrservs--;
705 __module_get(serv->sv_module);
706 error = __svc_create_thread(serv->sv_function, serv,
707 choose_pool(serv, pool, &state));
708 if (error < 0) {
709 module_put(serv->sv_module);
710 break;
711 }
712 }
713 /* destroy old threads */
714 while (nrservs < 0 &&
715 (victim = choose_victim(serv, pool, &state)) != NULL) {
716 send_sig(serv->sv_kill_signal, victim, 1);
717 nrservs++;
718 }
719
720 return error;
721 }
722 EXPORT_SYMBOL(svc_set_num_threads);
723
724 /*
725 * Called from a server thread as it's exiting. Caller must hold BKL.
726 */
727 void
728 svc_exit_thread(struct svc_rqst *rqstp)
729 {
730 struct svc_serv *serv = rqstp->rq_server;
731 struct svc_pool *pool = rqstp->rq_pool;
732
733 svc_release_buffer(rqstp);
734 kfree(rqstp->rq_resp);
735 kfree(rqstp->rq_argp);
736 kfree(rqstp->rq_auth_data);
737
738 spin_lock_bh(&pool->sp_lock);
739 pool->sp_nrthreads--;
740 list_del(&rqstp->rq_all);
741 spin_unlock_bh(&pool->sp_lock);
742
743 kfree(rqstp);
744
745 /* Release the server */
746 if (serv)
747 svc_destroy(serv);
748 }
749 EXPORT_SYMBOL(svc_exit_thread);
750
751 /*
752 * Register an RPC service with the local portmapper.
753 * To unregister a service, call this routine with
754 * proto and port == 0.
755 */
756 int
757 svc_register(struct svc_serv *serv, int proto, unsigned short port)
758 {
759 struct svc_program *progp;
760 unsigned long flags;
761 unsigned int i;
762 int error = 0, dummy;
763
764 if (!port)
765 clear_thread_flag(TIF_SIGPENDING);
766
767 for (progp = serv->sv_program; progp; progp = progp->pg_next) {
768 for (i = 0; i < progp->pg_nvers; i++) {
769 if (progp->pg_vers[i] == NULL)
770 continue;
771
772 dprintk("svc: svc_register(%s, %s, %d, %d)%s\n",
773 progp->pg_name,
774 proto == IPPROTO_UDP? "udp" : "tcp",
775 port,
776 i,
777 progp->pg_vers[i]->vs_hidden?
778 " (but not telling portmap)" : "");
779
780 if (progp->pg_vers[i]->vs_hidden)
781 continue;
782
783 error = rpcb_register(progp->pg_prog, i, proto, port, &dummy);
784 if (error < 0)
785 break;
786 if (port && !dummy) {
787 error = -EACCES;
788 break;
789 }
790 }
791 }
792
793 if (!port) {
794 spin_lock_irqsave(&current->sighand->siglock, flags);
795 recalc_sigpending();
796 spin_unlock_irqrestore(&current->sighand->siglock, flags);
797 }
798
799 return error;
800 }
801
802 /*
803 * Printk the given error with the address of the client that caused it.
804 */
805 static int
806 __attribute__ ((format (printf, 2, 3)))
807 svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
808 {
809 va_list args;
810 int r;
811 char buf[RPC_MAX_ADDRBUFLEN];
812
813 if (!net_ratelimit())
814 return 0;
815
816 printk(KERN_WARNING "svc: %s: ",
817 svc_print_addr(rqstp, buf, sizeof(buf)));
818
819 va_start(args, fmt);
820 r = vprintk(fmt, args);
821 va_end(args);
822
823 return r;
824 }
825
826 /*
827 * Process the RPC request.
828 */
829 int
830 svc_process(struct svc_rqst *rqstp)
831 {
832 struct svc_program *progp;
833 struct svc_version *versp = NULL; /* compiler food */
834 struct svc_procedure *procp = NULL;
835 struct kvec * argv = &rqstp->rq_arg.head[0];
836 struct kvec * resv = &rqstp->rq_res.head[0];
837 struct svc_serv *serv = rqstp->rq_server;
838 kxdrproc_t xdr;
839 __be32 *statp;
840 u32 dir, prog, vers, proc;
841 __be32 auth_stat, rpc_stat;
842 int auth_res;
843 __be32 *reply_statp;
844
845 rpc_stat = rpc_success;
846
847 if (argv->iov_len < 6*4)
848 goto err_short_len;
849
850 /* setup response xdr_buf.
851 * Initially it has just one page
852 */
853 rqstp->rq_resused = 1;
854 resv->iov_base = page_address(rqstp->rq_respages[0]);
855 resv->iov_len = 0;
856 rqstp->rq_res.pages = rqstp->rq_respages + 1;
857 rqstp->rq_res.len = 0;
858 rqstp->rq_res.page_base = 0;
859 rqstp->rq_res.page_len = 0;
860 rqstp->rq_res.buflen = PAGE_SIZE;
861 rqstp->rq_res.tail[0].iov_base = NULL;
862 rqstp->rq_res.tail[0].iov_len = 0;
863 /* Will be turned off only in gss privacy case: */
864 rqstp->rq_splice_ok = 1;
865
866 /* Setup reply header */
867 rqstp->rq_xprt->xpt_ops->xpo_prep_reply_hdr(rqstp);
868
869 rqstp->rq_xid = svc_getu32(argv);
870 svc_putu32(resv, rqstp->rq_xid);
871
872 dir = svc_getnl(argv);
873 vers = svc_getnl(argv);
874
875 /* First words of reply: */
876 svc_putnl(resv, 1); /* REPLY */
877
878 if (dir != 0) /* direction != CALL */
879 goto err_bad_dir;
880 if (vers != 2) /* RPC version number */
881 goto err_bad_rpc;
882
883 /* Save position in case we later decide to reject: */
884 reply_statp = resv->iov_base + resv->iov_len;
885
886 svc_putnl(resv, 0); /* ACCEPT */
887
888 rqstp->rq_prog = prog = svc_getnl(argv); /* program number */
889 rqstp->rq_vers = vers = svc_getnl(argv); /* version number */
890 rqstp->rq_proc = proc = svc_getnl(argv); /* procedure number */
891
892 progp = serv->sv_program;
893
894 for (progp = serv->sv_program; progp; progp = progp->pg_next)
895 if (prog == progp->pg_prog)
896 break;
897
898 /*
899 * Decode auth data, and add verifier to reply buffer.
900 * We do this before anything else in order to get a decent
901 * auth verifier.
902 */
903 auth_res = svc_authenticate(rqstp, &auth_stat);
904 /* Also give the program a chance to reject this call: */
905 if (auth_res == SVC_OK && progp) {
906 auth_stat = rpc_autherr_badcred;
907 auth_res = progp->pg_authenticate(rqstp);
908 }
909 switch (auth_res) {
910 case SVC_OK:
911 break;
912 case SVC_GARBAGE:
913 goto err_garbage;
914 case SVC_SYSERR:
915 rpc_stat = rpc_system_err;
916 goto err_bad;
917 case SVC_DENIED:
918 goto err_bad_auth;
919 case SVC_DROP:
920 goto dropit;
921 case SVC_COMPLETE:
922 goto sendit;
923 }
924
925 if (progp == NULL)
926 goto err_bad_prog;
927
928 if (vers >= progp->pg_nvers ||
929 !(versp = progp->pg_vers[vers]))
930 goto err_bad_vers;
931
932 procp = versp->vs_proc + proc;
933 if (proc >= versp->vs_nproc || !procp->pc_func)
934 goto err_bad_proc;
935 rqstp->rq_server = serv;
936 rqstp->rq_procinfo = procp;
937
938 /* Syntactic check complete */
939 serv->sv_stats->rpccnt++;
940
941 /* Build the reply header. */
942 statp = resv->iov_base +resv->iov_len;
943 svc_putnl(resv, RPC_SUCCESS);
944
945 /* Bump per-procedure stats counter */
946 procp->pc_count++;
947
948 /* Initialize storage for argp and resp */
949 memset(rqstp->rq_argp, 0, procp->pc_argsize);
950 memset(rqstp->rq_resp, 0, procp->pc_ressize);
951
952 /* un-reserve some of the out-queue now that we have a
953 * better idea of reply size
954 */
955 if (procp->pc_xdrressize)
956 svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
957
958 /* Call the function that processes the request. */
959 if (!versp->vs_dispatch) {
960 /* Decode arguments */
961 xdr = procp->pc_decode;
962 if (xdr && !xdr(rqstp, argv->iov_base, rqstp->rq_argp))
963 goto err_garbage;
964
965 *statp = procp->pc_func(rqstp, rqstp->rq_argp, rqstp->rq_resp);
966
967 /* Encode reply */
968 if (*statp == rpc_drop_reply) {
969 if (procp->pc_release)
970 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
971 goto dropit;
972 }
973 if (*statp == rpc_success && (xdr = procp->pc_encode)
974 && !xdr(rqstp, resv->iov_base+resv->iov_len, rqstp->rq_resp)) {
975 dprintk("svc: failed to encode reply\n");
976 /* serv->sv_stats->rpcsystemerr++; */
977 *statp = rpc_system_err;
978 }
979 } else {
980 dprintk("svc: calling dispatcher\n");
981 if (!versp->vs_dispatch(rqstp, statp)) {
982 /* Release reply info */
983 if (procp->pc_release)
984 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
985 goto dropit;
986 }
987 }
988
989 /* Check RPC status result */
990 if (*statp != rpc_success)
991 resv->iov_len = ((void*)statp) - resv->iov_base + 4;
992
993 /* Release reply info */
994 if (procp->pc_release)
995 procp->pc_release(rqstp, NULL, rqstp->rq_resp);
996
997 if (procp->pc_encode == NULL)
998 goto dropit;
999
1000 sendit:
1001 if (svc_authorise(rqstp))
1002 goto dropit;
1003 return svc_send(rqstp);
1004
1005 dropit:
1006 svc_authorise(rqstp); /* doesn't hurt to call this twice */
1007 dprintk("svc: svc_process dropit\n");
1008 svc_drop(rqstp);
1009 return 0;
1010
1011 err_short_len:
1012 svc_printk(rqstp, "short len %Zd, dropping request\n",
1013 argv->iov_len);
1014
1015 goto dropit; /* drop request */
1016
1017 err_bad_dir:
1018 svc_printk(rqstp, "bad direction %d, dropping request\n", dir);
1019
1020 serv->sv_stats->rpcbadfmt++;
1021 goto dropit; /* drop request */
1022
1023 err_bad_rpc:
1024 serv->sv_stats->rpcbadfmt++;
1025 svc_putnl(resv, 1); /* REJECT */
1026 svc_putnl(resv, 0); /* RPC_MISMATCH */
1027 svc_putnl(resv, 2); /* Only RPCv2 supported */
1028 svc_putnl(resv, 2);
1029 goto sendit;
1030
1031 err_bad_auth:
1032 dprintk("svc: authentication failed (%d)\n", ntohl(auth_stat));
1033 serv->sv_stats->rpcbadauth++;
1034 /* Restore write pointer to location of accept status: */
1035 xdr_ressize_check(rqstp, reply_statp);
1036 svc_putnl(resv, 1); /* REJECT */
1037 svc_putnl(resv, 1); /* AUTH_ERROR */
1038 svc_putnl(resv, ntohl(auth_stat)); /* status */
1039 goto sendit;
1040
1041 err_bad_prog:
1042 dprintk("svc: unknown program %d\n", prog);
1043 serv->sv_stats->rpcbadfmt++;
1044 svc_putnl(resv, RPC_PROG_UNAVAIL);
1045 goto sendit;
1046
1047 err_bad_vers:
1048 svc_printk(rqstp, "unknown version (%d for prog %d, %s)\n",
1049 vers, prog, progp->pg_name);
1050
1051 serv->sv_stats->rpcbadfmt++;
1052 svc_putnl(resv, RPC_PROG_MISMATCH);
1053 svc_putnl(resv, progp->pg_lovers);
1054 svc_putnl(resv, progp->pg_hivers);
1055 goto sendit;
1056
1057 err_bad_proc:
1058 svc_printk(rqstp, "unknown procedure (%d)\n", proc);
1059
1060 serv->sv_stats->rpcbadfmt++;
1061 svc_putnl(resv, RPC_PROC_UNAVAIL);
1062 goto sendit;
1063
1064 err_garbage:
1065 svc_printk(rqstp, "failed to decode args\n");
1066
1067 rpc_stat = rpc_garbage_args;
1068 err_bad:
1069 serv->sv_stats->rpcbadfmt++;
1070 svc_putnl(resv, ntohl(rpc_stat));
1071 goto sendit;
1072 }
1073 EXPORT_SYMBOL(svc_process);
1074
1075 /*
1076 * Return (transport-specific) limit on the rpc payload.
1077 */
1078 u32 svc_max_payload(const struct svc_rqst *rqstp)
1079 {
1080 u32 max = rqstp->rq_xprt->xpt_class->xcl_max_payload;
1081
1082 if (rqstp->rq_server->sv_max_payload < max)
1083 max = rqstp->rq_server->sv_max_payload;
1084 return max;
1085 }
1086 EXPORT_SYMBOL_GPL(svc_max_payload);
This page took 0.071054 seconds and 6 git commands to generate.