RPC/RDMA: ensure connection attempt is complete before signalling.
[deliverable/linux.git] / fs / nfs / callback.c
1 /*
2 * linux/fs/nfs/callback.c
3 *
4 * Copyright (C) 2004 Trond Myklebust
5 *
6 * NFSv4 callback handling
7 */
8
9 #include <linux/completion.h>
10 #include <linux/ip.h>
11 #include <linux/module.h>
12 #include <linux/smp_lock.h>
13 #include <linux/sunrpc/svc.h>
14 #include <linux/sunrpc/svcsock.h>
15 #include <linux/nfs_fs.h>
16 #include <linux/mutex.h>
17 #include <linux/freezer.h>
18 #include <linux/kthread.h>
19
20 #include <net/inet_sock.h>
21
22 #include "nfs4_fs.h"
23 #include "callback.h"
24 #include "internal.h"
25
26 #define NFSDBG_FACILITY NFSDBG_CALLBACK
27
28 struct nfs_callback_data {
29 unsigned int users;
30 struct svc_rqst *rqst;
31 struct task_struct *task;
32 };
33
34 static struct nfs_callback_data nfs_callback_info;
35 static DEFINE_MUTEX(nfs_callback_mutex);
36 static struct svc_program nfs4_callback_program;
37
38 unsigned int nfs_callback_set_tcpport;
39 unsigned short nfs_callback_tcpport;
40 static const int nfs_set_port_min = 0;
41 static const int nfs_set_port_max = 65535;
42
43 static int param_set_port(const char *val, struct kernel_param *kp)
44 {
45 char *endp;
46 int num = simple_strtol(val, &endp, 0);
47 if (endp == val || *endp || num < nfs_set_port_min || num > nfs_set_port_max)
48 return -EINVAL;
49 *((int *)kp->arg) = num;
50 return 0;
51 }
52
53 module_param_call(callback_tcpport, param_set_port, param_get_int,
54 &nfs_callback_set_tcpport, 0644);
55
56 /*
57 * This is the callback kernel thread.
58 */
59 static int
60 nfs_callback_svc(void *vrqstp)
61 {
62 int err, preverr = 0;
63 struct svc_rqst *rqstp = vrqstp;
64
65 set_freezable();
66
67 /*
68 * FIXME: do we really need to run this under the BKL? If so, please
69 * add a comment about what it's intended to protect.
70 */
71 lock_kernel();
72 while (!kthread_should_stop()) {
73 /*
74 * Listen for a request on the socket
75 */
76 err = svc_recv(rqstp, MAX_SCHEDULE_TIMEOUT);
77 if (err == -EAGAIN || err == -EINTR) {
78 preverr = err;
79 continue;
80 }
81 if (err < 0) {
82 if (err != preverr) {
83 printk(KERN_WARNING "%s: unexpected error "
84 "from svc_recv (%d)\n", __func__, err);
85 preverr = err;
86 }
87 schedule_timeout_uninterruptible(HZ);
88 continue;
89 }
90 preverr = err;
91 svc_process(rqstp);
92 }
93 unlock_kernel();
94 return 0;
95 }
96
97 /*
98 * Bring up the callback thread if it is not already up.
99 */
100 int nfs_callback_up(void)
101 {
102 struct svc_serv *serv = NULL;
103 int ret = 0;
104
105 mutex_lock(&nfs_callback_mutex);
106 if (nfs_callback_info.users++ || nfs_callback_info.task != NULL)
107 goto out;
108 serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, NULL);
109 ret = -ENOMEM;
110 if (!serv)
111 goto out_err;
112
113 ret = svc_create_xprt(serv, "tcp", nfs_callback_set_tcpport,
114 SVC_SOCK_ANONYMOUS);
115 if (ret <= 0)
116 goto out_err;
117 nfs_callback_tcpport = ret;
118 dprintk("Callback port = 0x%x\n", nfs_callback_tcpport);
119
120 nfs_callback_info.rqst = svc_prepare_thread(serv, &serv->sv_pools[0]);
121 if (IS_ERR(nfs_callback_info.rqst)) {
122 ret = PTR_ERR(nfs_callback_info.rqst);
123 nfs_callback_info.rqst = NULL;
124 goto out_err;
125 }
126
127 svc_sock_update_bufs(serv);
128
129 nfs_callback_info.task = kthread_run(nfs_callback_svc,
130 nfs_callback_info.rqst,
131 "nfsv4-svc");
132 if (IS_ERR(nfs_callback_info.task)) {
133 ret = PTR_ERR(nfs_callback_info.task);
134 svc_exit_thread(nfs_callback_info.rqst);
135 nfs_callback_info.rqst = NULL;
136 nfs_callback_info.task = NULL;
137 goto out_err;
138 }
139 out:
140 /*
141 * svc_create creates the svc_serv with sv_nrthreads == 1, and then
142 * svc_prepare_thread increments that. So we need to call svc_destroy
143 * on both success and failure so that the refcount is 1 when the
144 * thread exits.
145 */
146 if (serv)
147 svc_destroy(serv);
148 mutex_unlock(&nfs_callback_mutex);
149 return ret;
150 out_err:
151 dprintk("Couldn't create callback socket or server thread; err = %d\n",
152 ret);
153 nfs_callback_info.users--;
154 goto out;
155 }
156
157 /*
158 * Kill the callback thread if it's no longer being used.
159 */
160 void nfs_callback_down(void)
161 {
162 mutex_lock(&nfs_callback_mutex);
163 nfs_callback_info.users--;
164 if (nfs_callback_info.users == 0 && nfs_callback_info.task != NULL) {
165 kthread_stop(nfs_callback_info.task);
166 svc_exit_thread(nfs_callback_info.rqst);
167 nfs_callback_info.rqst = NULL;
168 nfs_callback_info.task = NULL;
169 }
170 mutex_unlock(&nfs_callback_mutex);
171 }
172
173 static int nfs_callback_authenticate(struct svc_rqst *rqstp)
174 {
175 struct nfs_client *clp;
176 RPC_IFDEBUG(char buf[RPC_MAX_ADDRBUFLEN]);
177
178 /* Don't talk to strangers */
179 clp = nfs_find_client(svc_addr(rqstp), 4);
180 if (clp == NULL)
181 return SVC_DROP;
182
183 dprintk("%s: %s NFSv4 callback!\n", __func__,
184 svc_print_addr(rqstp, buf, sizeof(buf)));
185 nfs_put_client(clp);
186
187 switch (rqstp->rq_authop->flavour) {
188 case RPC_AUTH_NULL:
189 if (rqstp->rq_proc != CB_NULL)
190 return SVC_DENIED;
191 break;
192 case RPC_AUTH_UNIX:
193 break;
194 case RPC_AUTH_GSS:
195 /* FIXME: RPCSEC_GSS handling? */
196 default:
197 return SVC_DENIED;
198 }
199 return SVC_OK;
200 }
201
202 /*
203 * Define NFS4 callback program
204 */
205 static struct svc_version *nfs4_callback_version[] = {
206 [1] = &nfs4_callback_version1,
207 };
208
209 static struct svc_stat nfs4_callback_stats;
210
211 static struct svc_program nfs4_callback_program = {
212 .pg_prog = NFS4_CALLBACK, /* RPC service number */
213 .pg_nvers = ARRAY_SIZE(nfs4_callback_version), /* Number of entries */
214 .pg_vers = nfs4_callback_version, /* version table */
215 .pg_name = "NFSv4 callback", /* service name */
216 .pg_class = "nfs", /* authentication class */
217 .pg_stats = &nfs4_callback_stats,
218 .pg_authenticate = nfs_callback_authenticate,
219 };
This page took 0.0361050000000001 seconds and 5 git commands to generate.