tracing: extend sched_pi_setprio
[deliverable/linux.git] / fs / nfs / nfs4proc.c
1 /*
2 * fs/nfs/nfs4proc.c
3 *
4 * Client-side procedure declarations for NFSv4.
5 *
6 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * All rights reserved.
8 *
9 * Kendrick Smith <kmsmith@umich.edu>
10 * Andy Adamson <andros@umich.edu>
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. Neither the name of the University nor the names of its
22 * contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
28 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
32 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 */
37
38 #include <linux/mm.h>
39 #include <linux/delay.h>
40 #include <linux/errno.h>
41 #include <linux/file.h>
42 #include <linux/string.h>
43 #include <linux/ratelimit.h>
44 #include <linux/printk.h>
45 #include <linux/slab.h>
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/nfs.h>
48 #include <linux/nfs4.h>
49 #include <linux/nfs_fs.h>
50 #include <linux/nfs_page.h>
51 #include <linux/nfs_mount.h>
52 #include <linux/namei.h>
53 #include <linux/mount.h>
54 #include <linux/module.h>
55 #include <linux/xattr.h>
56 #include <linux/utsname.h>
57 #include <linux/freezer.h>
58
59 #include "nfs4_fs.h"
60 #include "delegation.h"
61 #include "internal.h"
62 #include "iostat.h"
63 #include "callback.h"
64 #include "pnfs.h"
65 #include "netns.h"
66 #include "nfs4idmap.h"
67 #include "nfs4session.h"
68 #include "fscache.h"
69
70 #include "nfs4trace.h"
71
72 #define NFSDBG_FACILITY NFSDBG_PROC
73
74 #define NFS4_POLL_RETRY_MIN (HZ/10)
75 #define NFS4_POLL_RETRY_MAX (15*HZ)
76
77 /* file attributes which can be mapped to nfs attributes */
78 #define NFS4_VALID_ATTRS (ATTR_MODE \
79 | ATTR_UID \
80 | ATTR_GID \
81 | ATTR_SIZE \
82 | ATTR_ATIME \
83 | ATTR_MTIME \
84 | ATTR_CTIME \
85 | ATTR_ATIME_SET \
86 | ATTR_MTIME_SET)
87
88 struct nfs4_opendata;
89 static int _nfs4_proc_open(struct nfs4_opendata *data);
90 static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
91 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
92 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
93 static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *, struct nfs4_label *label);
94 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct nfs4_label *label);
95 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
96 struct nfs_fattr *fattr, struct iattr *sattr,
97 struct nfs4_state *state, struct nfs4_label *ilabel,
98 struct nfs4_label *olabel);
99 #ifdef CONFIG_NFS_V4_1
100 static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
101 struct rpc_cred *);
102 static int nfs41_free_stateid(struct nfs_server *, nfs4_stateid *,
103 struct rpc_cred *);
104 #endif
105
106 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
107 static inline struct nfs4_label *
108 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
109 struct iattr *sattr, struct nfs4_label *label)
110 {
111 int err;
112
113 if (label == NULL)
114 return NULL;
115
116 if (nfs_server_capable(dir, NFS_CAP_SECURITY_LABEL) == 0)
117 return NULL;
118
119 err = security_dentry_init_security(dentry, sattr->ia_mode,
120 &dentry->d_name, (void **)&label->label, &label->len);
121 if (err == 0)
122 return label;
123
124 return NULL;
125 }
126 static inline void
127 nfs4_label_release_security(struct nfs4_label *label)
128 {
129 if (label)
130 security_release_secctx(label->label, label->len);
131 }
132 static inline u32 *nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
133 {
134 if (label)
135 return server->attr_bitmask;
136
137 return server->attr_bitmask_nl;
138 }
139 #else
140 static inline struct nfs4_label *
141 nfs4_label_init_security(struct inode *dir, struct dentry *dentry,
142 struct iattr *sattr, struct nfs4_label *l)
143 { return NULL; }
144 static inline void
145 nfs4_label_release_security(struct nfs4_label *label)
146 { return; }
147 static inline u32 *
148 nfs4_bitmask(struct nfs_server *server, struct nfs4_label *label)
149 { return server->attr_bitmask; }
150 #endif
151
152 /* Prevent leaks of NFSv4 errors into userland */
153 static int nfs4_map_errors(int err)
154 {
155 if (err >= -1000)
156 return err;
157 switch (err) {
158 case -NFS4ERR_RESOURCE:
159 case -NFS4ERR_LAYOUTTRYLATER:
160 case -NFS4ERR_RECALLCONFLICT:
161 return -EREMOTEIO;
162 case -NFS4ERR_WRONGSEC:
163 case -NFS4ERR_WRONG_CRED:
164 return -EPERM;
165 case -NFS4ERR_BADOWNER:
166 case -NFS4ERR_BADNAME:
167 return -EINVAL;
168 case -NFS4ERR_SHARE_DENIED:
169 return -EACCES;
170 case -NFS4ERR_MINOR_VERS_MISMATCH:
171 return -EPROTONOSUPPORT;
172 case -NFS4ERR_FILE_OPEN:
173 return -EBUSY;
174 default:
175 dprintk("%s could not handle NFSv4 error %d\n",
176 __func__, -err);
177 break;
178 }
179 return -EIO;
180 }
181
182 /*
183 * This is our standard bitmap for GETATTR requests.
184 */
185 const u32 nfs4_fattr_bitmap[3] = {
186 FATTR4_WORD0_TYPE
187 | FATTR4_WORD0_CHANGE
188 | FATTR4_WORD0_SIZE
189 | FATTR4_WORD0_FSID
190 | FATTR4_WORD0_FILEID,
191 FATTR4_WORD1_MODE
192 | FATTR4_WORD1_NUMLINKS
193 | FATTR4_WORD1_OWNER
194 | FATTR4_WORD1_OWNER_GROUP
195 | FATTR4_WORD1_RAWDEV
196 | FATTR4_WORD1_SPACE_USED
197 | FATTR4_WORD1_TIME_ACCESS
198 | FATTR4_WORD1_TIME_METADATA
199 | FATTR4_WORD1_TIME_MODIFY
200 | FATTR4_WORD1_MOUNTED_ON_FILEID,
201 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
202 FATTR4_WORD2_SECURITY_LABEL
203 #endif
204 };
205
206 static const u32 nfs4_pnfs_open_bitmap[3] = {
207 FATTR4_WORD0_TYPE
208 | FATTR4_WORD0_CHANGE
209 | FATTR4_WORD0_SIZE
210 | FATTR4_WORD0_FSID
211 | FATTR4_WORD0_FILEID,
212 FATTR4_WORD1_MODE
213 | FATTR4_WORD1_NUMLINKS
214 | FATTR4_WORD1_OWNER
215 | FATTR4_WORD1_OWNER_GROUP
216 | FATTR4_WORD1_RAWDEV
217 | FATTR4_WORD1_SPACE_USED
218 | FATTR4_WORD1_TIME_ACCESS
219 | FATTR4_WORD1_TIME_METADATA
220 | FATTR4_WORD1_TIME_MODIFY,
221 FATTR4_WORD2_MDSTHRESHOLD
222 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
223 | FATTR4_WORD2_SECURITY_LABEL
224 #endif
225 };
226
227 static const u32 nfs4_open_noattr_bitmap[3] = {
228 FATTR4_WORD0_TYPE
229 | FATTR4_WORD0_CHANGE
230 | FATTR4_WORD0_FILEID,
231 };
232
233 const u32 nfs4_statfs_bitmap[3] = {
234 FATTR4_WORD0_FILES_AVAIL
235 | FATTR4_WORD0_FILES_FREE
236 | FATTR4_WORD0_FILES_TOTAL,
237 FATTR4_WORD1_SPACE_AVAIL
238 | FATTR4_WORD1_SPACE_FREE
239 | FATTR4_WORD1_SPACE_TOTAL
240 };
241
242 const u32 nfs4_pathconf_bitmap[3] = {
243 FATTR4_WORD0_MAXLINK
244 | FATTR4_WORD0_MAXNAME,
245 0
246 };
247
248 const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
249 | FATTR4_WORD0_MAXREAD
250 | FATTR4_WORD0_MAXWRITE
251 | FATTR4_WORD0_LEASE_TIME,
252 FATTR4_WORD1_TIME_DELTA
253 | FATTR4_WORD1_FS_LAYOUT_TYPES,
254 FATTR4_WORD2_LAYOUT_BLKSIZE
255 | FATTR4_WORD2_CLONE_BLKSIZE
256 };
257
258 const u32 nfs4_fs_locations_bitmap[3] = {
259 FATTR4_WORD0_TYPE
260 | FATTR4_WORD0_CHANGE
261 | FATTR4_WORD0_SIZE
262 | FATTR4_WORD0_FSID
263 | FATTR4_WORD0_FILEID
264 | FATTR4_WORD0_FS_LOCATIONS,
265 FATTR4_WORD1_MODE
266 | FATTR4_WORD1_NUMLINKS
267 | FATTR4_WORD1_OWNER
268 | FATTR4_WORD1_OWNER_GROUP
269 | FATTR4_WORD1_RAWDEV
270 | FATTR4_WORD1_SPACE_USED
271 | FATTR4_WORD1_TIME_ACCESS
272 | FATTR4_WORD1_TIME_METADATA
273 | FATTR4_WORD1_TIME_MODIFY
274 | FATTR4_WORD1_MOUNTED_ON_FILEID,
275 };
276
277 static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dentry,
278 struct nfs4_readdir_arg *readdir)
279 {
280 __be32 *start, *p;
281
282 if (cookie > 2) {
283 readdir->cookie = cookie;
284 memcpy(&readdir->verifier, verifier, sizeof(readdir->verifier));
285 return;
286 }
287
288 readdir->cookie = 0;
289 memset(&readdir->verifier, 0, sizeof(readdir->verifier));
290 if (cookie == 2)
291 return;
292
293 /*
294 * NFSv4 servers do not return entries for '.' and '..'
295 * Therefore, we fake these entries here. We let '.'
296 * have cookie 0 and '..' have cookie 1. Note that
297 * when talking to the server, we always send cookie 0
298 * instead of 1 or 2.
299 */
300 start = p = kmap_atomic(*readdir->pages);
301
302 if (cookie == 0) {
303 *p++ = xdr_one; /* next */
304 *p++ = xdr_zero; /* cookie, first word */
305 *p++ = xdr_one; /* cookie, second word */
306 *p++ = xdr_one; /* entry len */
307 memcpy(p, ".\0\0\0", 4); /* entry */
308 p++;
309 *p++ = xdr_one; /* bitmap length */
310 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
311 *p++ = htonl(8); /* attribute buffer length */
312 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry)));
313 }
314
315 *p++ = xdr_one; /* next */
316 *p++ = xdr_zero; /* cookie, first word */
317 *p++ = xdr_two; /* cookie, second word */
318 *p++ = xdr_two; /* entry len */
319 memcpy(p, "..\0\0", 4); /* entry */
320 p++;
321 *p++ = xdr_one; /* bitmap length */
322 *p++ = htonl(FATTR4_WORD0_FILEID); /* bitmap */
323 *p++ = htonl(8); /* attribute buffer length */
324 p = xdr_encode_hyper(p, NFS_FILEID(d_inode(dentry->d_parent)));
325
326 readdir->pgbase = (char *)p - (char *)start;
327 readdir->count -= readdir->pgbase;
328 kunmap_atomic(start);
329 }
330
331 static long nfs4_update_delay(long *timeout)
332 {
333 long ret;
334 if (!timeout)
335 return NFS4_POLL_RETRY_MAX;
336 if (*timeout <= 0)
337 *timeout = NFS4_POLL_RETRY_MIN;
338 if (*timeout > NFS4_POLL_RETRY_MAX)
339 *timeout = NFS4_POLL_RETRY_MAX;
340 ret = *timeout;
341 *timeout <<= 1;
342 return ret;
343 }
344
345 static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
346 {
347 int res = 0;
348
349 might_sleep();
350
351 freezable_schedule_timeout_killable_unsafe(
352 nfs4_update_delay(timeout));
353 if (fatal_signal_pending(current))
354 res = -ERESTARTSYS;
355 return res;
356 }
357
358 /* This is the error handling routine for processes that are allowed
359 * to sleep.
360 */
361 static int nfs4_do_handle_exception(struct nfs_server *server,
362 int errorcode, struct nfs4_exception *exception)
363 {
364 struct nfs_client *clp = server->nfs_client;
365 struct nfs4_state *state = exception->state;
366 const nfs4_stateid *stateid = exception->stateid;
367 struct inode *inode = exception->inode;
368 int ret = errorcode;
369
370 exception->delay = 0;
371 exception->recovering = 0;
372 exception->retry = 0;
373 switch(errorcode) {
374 case 0:
375 return 0;
376 case -NFS4ERR_OPENMODE:
377 case -NFS4ERR_DELEG_REVOKED:
378 case -NFS4ERR_ADMIN_REVOKED:
379 case -NFS4ERR_BAD_STATEID:
380 if (inode) {
381 int err;
382
383 err = nfs_async_inode_return_delegation(inode,
384 stateid);
385 if (err == 0)
386 goto wait_on_recovery;
387 if (stateid != NULL && stateid->type == NFS4_DELEGATION_STATEID_TYPE) {
388 exception->retry = 1;
389 break;
390 }
391 }
392 if (state == NULL)
393 break;
394 ret = nfs4_schedule_stateid_recovery(server, state);
395 if (ret < 0)
396 break;
397 goto wait_on_recovery;
398 case -NFS4ERR_EXPIRED:
399 if (state != NULL) {
400 ret = nfs4_schedule_stateid_recovery(server, state);
401 if (ret < 0)
402 break;
403 }
404 case -NFS4ERR_STALE_STATEID:
405 case -NFS4ERR_STALE_CLIENTID:
406 nfs4_schedule_lease_recovery(clp);
407 goto wait_on_recovery;
408 case -NFS4ERR_MOVED:
409 ret = nfs4_schedule_migration_recovery(server);
410 if (ret < 0)
411 break;
412 goto wait_on_recovery;
413 case -NFS4ERR_LEASE_MOVED:
414 nfs4_schedule_lease_moved_recovery(clp);
415 goto wait_on_recovery;
416 #if defined(CONFIG_NFS_V4_1)
417 case -NFS4ERR_BADSESSION:
418 case -NFS4ERR_BADSLOT:
419 case -NFS4ERR_BAD_HIGH_SLOT:
420 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
421 case -NFS4ERR_DEADSESSION:
422 case -NFS4ERR_SEQ_FALSE_RETRY:
423 case -NFS4ERR_SEQ_MISORDERED:
424 dprintk("%s ERROR: %d Reset session\n", __func__,
425 errorcode);
426 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
427 goto wait_on_recovery;
428 #endif /* defined(CONFIG_NFS_V4_1) */
429 case -NFS4ERR_FILE_OPEN:
430 if (exception->timeout > HZ) {
431 /* We have retried a decent amount, time to
432 * fail
433 */
434 ret = -EBUSY;
435 break;
436 }
437 case -NFS4ERR_DELAY:
438 nfs_inc_server_stats(server, NFSIOS_DELAY);
439 case -NFS4ERR_GRACE:
440 case -NFS4ERR_LAYOUTTRYLATER:
441 case -NFS4ERR_RECALLCONFLICT:
442 exception->delay = 1;
443 return 0;
444
445 case -NFS4ERR_RETRY_UNCACHED_REP:
446 case -NFS4ERR_OLD_STATEID:
447 exception->retry = 1;
448 break;
449 case -NFS4ERR_BADOWNER:
450 /* The following works around a Linux server bug! */
451 case -NFS4ERR_BADNAME:
452 if (server->caps & NFS_CAP_UIDGID_NOMAP) {
453 server->caps &= ~NFS_CAP_UIDGID_NOMAP;
454 exception->retry = 1;
455 printk(KERN_WARNING "NFS: v4 server %s "
456 "does not accept raw "
457 "uid/gids. "
458 "Reenabling the idmapper.\n",
459 server->nfs_client->cl_hostname);
460 }
461 }
462 /* We failed to handle the error */
463 return nfs4_map_errors(ret);
464 wait_on_recovery:
465 exception->recovering = 1;
466 return 0;
467 }
468
469 /* This is the error handling routine for processes that are allowed
470 * to sleep.
471 */
472 int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
473 {
474 struct nfs_client *clp = server->nfs_client;
475 int ret;
476
477 ret = nfs4_do_handle_exception(server, errorcode, exception);
478 if (exception->delay) {
479 ret = nfs4_delay(server->client, &exception->timeout);
480 goto out_retry;
481 }
482 if (exception->recovering) {
483 ret = nfs4_wait_clnt_recover(clp);
484 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
485 return -EIO;
486 goto out_retry;
487 }
488 return ret;
489 out_retry:
490 if (ret == 0)
491 exception->retry = 1;
492 return ret;
493 }
494
495 static int
496 nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server,
497 int errorcode, struct nfs4_exception *exception)
498 {
499 struct nfs_client *clp = server->nfs_client;
500 int ret;
501
502 ret = nfs4_do_handle_exception(server, errorcode, exception);
503 if (exception->delay) {
504 rpc_delay(task, nfs4_update_delay(&exception->timeout));
505 goto out_retry;
506 }
507 if (exception->recovering) {
508 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
509 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
510 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
511 goto out_retry;
512 }
513 if (test_bit(NFS_MIG_FAILED, &server->mig_status))
514 ret = -EIO;
515 return ret;
516 out_retry:
517 if (ret == 0)
518 exception->retry = 1;
519 return ret;
520 }
521
522 static int
523 nfs4_async_handle_error(struct rpc_task *task, struct nfs_server *server,
524 struct nfs4_state *state, long *timeout)
525 {
526 struct nfs4_exception exception = {
527 .state = state,
528 };
529
530 if (task->tk_status >= 0)
531 return 0;
532 if (timeout)
533 exception.timeout = *timeout;
534 task->tk_status = nfs4_async_handle_exception(task, server,
535 task->tk_status,
536 &exception);
537 if (exception.delay && timeout)
538 *timeout = exception.timeout;
539 if (exception.retry)
540 return -EAGAIN;
541 return 0;
542 }
543
544 /*
545 * Return 'true' if 'clp' is using an rpc_client that is integrity protected
546 * or 'false' otherwise.
547 */
548 static bool _nfs4_is_integrity_protected(struct nfs_client *clp)
549 {
550 rpc_authflavor_t flavor = clp->cl_rpcclient->cl_auth->au_flavor;
551
552 if (flavor == RPC_AUTH_GSS_KRB5I ||
553 flavor == RPC_AUTH_GSS_KRB5P)
554 return true;
555
556 return false;
557 }
558
559 static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
560 {
561 spin_lock(&clp->cl_lock);
562 if (time_before(clp->cl_last_renewal,timestamp))
563 clp->cl_last_renewal = timestamp;
564 spin_unlock(&clp->cl_lock);
565 }
566
567 static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
568 {
569 struct nfs_client *clp = server->nfs_client;
570
571 if (!nfs4_has_session(clp))
572 do_renew_lease(clp, timestamp);
573 }
574
575 struct nfs4_call_sync_data {
576 const struct nfs_server *seq_server;
577 struct nfs4_sequence_args *seq_args;
578 struct nfs4_sequence_res *seq_res;
579 };
580
581 void nfs4_init_sequence(struct nfs4_sequence_args *args,
582 struct nfs4_sequence_res *res, int cache_reply)
583 {
584 args->sa_slot = NULL;
585 args->sa_cache_this = cache_reply;
586 args->sa_privileged = 0;
587
588 res->sr_slot = NULL;
589 }
590
591 static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args)
592 {
593 args->sa_privileged = 1;
594 }
595
596 int nfs40_setup_sequence(struct nfs4_slot_table *tbl,
597 struct nfs4_sequence_args *args,
598 struct nfs4_sequence_res *res,
599 struct rpc_task *task)
600 {
601 struct nfs4_slot *slot;
602
603 /* slot already allocated? */
604 if (res->sr_slot != NULL)
605 goto out_start;
606
607 spin_lock(&tbl->slot_tbl_lock);
608 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
609 goto out_sleep;
610
611 slot = nfs4_alloc_slot(tbl);
612 if (IS_ERR(slot)) {
613 if (slot == ERR_PTR(-ENOMEM))
614 task->tk_timeout = HZ >> 2;
615 goto out_sleep;
616 }
617 spin_unlock(&tbl->slot_tbl_lock);
618
619 args->sa_slot = slot;
620 res->sr_slot = slot;
621
622 out_start:
623 rpc_call_start(task);
624 return 0;
625
626 out_sleep:
627 if (args->sa_privileged)
628 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
629 NULL, RPC_PRIORITY_PRIVILEGED);
630 else
631 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
632 spin_unlock(&tbl->slot_tbl_lock);
633 return -EAGAIN;
634 }
635 EXPORT_SYMBOL_GPL(nfs40_setup_sequence);
636
637 static void nfs40_sequence_free_slot(struct nfs4_sequence_res *res)
638 {
639 struct nfs4_slot *slot = res->sr_slot;
640 struct nfs4_slot_table *tbl;
641
642 tbl = slot->table;
643 spin_lock(&tbl->slot_tbl_lock);
644 if (!nfs41_wake_and_assign_slot(tbl, slot))
645 nfs4_free_slot(tbl, slot);
646 spin_unlock(&tbl->slot_tbl_lock);
647
648 res->sr_slot = NULL;
649 }
650
651 static int nfs40_sequence_done(struct rpc_task *task,
652 struct nfs4_sequence_res *res)
653 {
654 if (res->sr_slot != NULL)
655 nfs40_sequence_free_slot(res);
656 return 1;
657 }
658
659 #if defined(CONFIG_NFS_V4_1)
660
661 static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
662 {
663 struct nfs4_session *session;
664 struct nfs4_slot_table *tbl;
665 struct nfs4_slot *slot = res->sr_slot;
666 bool send_new_highest_used_slotid = false;
667
668 tbl = slot->table;
669 session = tbl->session;
670
671 /* Bump the slot sequence number */
672 if (slot->seq_done)
673 slot->seq_nr++;
674 slot->seq_done = 0;
675
676 spin_lock(&tbl->slot_tbl_lock);
677 /* Be nice to the server: try to ensure that the last transmitted
678 * value for highest_user_slotid <= target_highest_slotid
679 */
680 if (tbl->highest_used_slotid > tbl->target_highest_slotid)
681 send_new_highest_used_slotid = true;
682
683 if (nfs41_wake_and_assign_slot(tbl, slot)) {
684 send_new_highest_used_slotid = false;
685 goto out_unlock;
686 }
687 nfs4_free_slot(tbl, slot);
688
689 if (tbl->highest_used_slotid != NFS4_NO_SLOT)
690 send_new_highest_used_slotid = false;
691 out_unlock:
692 spin_unlock(&tbl->slot_tbl_lock);
693 res->sr_slot = NULL;
694 if (send_new_highest_used_slotid)
695 nfs41_notify_server(session->clp);
696 if (waitqueue_active(&tbl->slot_waitq))
697 wake_up_all(&tbl->slot_waitq);
698 }
699
700 static int nfs41_sequence_process(struct rpc_task *task,
701 struct nfs4_sequence_res *res)
702 {
703 struct nfs4_session *session;
704 struct nfs4_slot *slot = res->sr_slot;
705 struct nfs_client *clp;
706 bool interrupted = false;
707 int ret = 1;
708
709 if (slot == NULL)
710 goto out_noaction;
711 /* don't increment the sequence number if the task wasn't sent */
712 if (!RPC_WAS_SENT(task))
713 goto out;
714
715 session = slot->table->session;
716
717 if (slot->interrupted) {
718 slot->interrupted = 0;
719 interrupted = true;
720 }
721
722 trace_nfs4_sequence_done(session, res);
723 /* Check the SEQUENCE operation status */
724 switch (res->sr_status) {
725 case 0:
726 /* Update the slot's sequence and clientid lease timer */
727 slot->seq_done = 1;
728 clp = session->clp;
729 do_renew_lease(clp, res->sr_timestamp);
730 /* Check sequence flags */
731 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
732 nfs41_update_target_slotid(slot->table, slot, res);
733 break;
734 case 1:
735 /*
736 * sr_status remains 1 if an RPC level error occurred.
737 * The server may or may not have processed the sequence
738 * operation..
739 * Mark the slot as having hosted an interrupted RPC call.
740 */
741 slot->interrupted = 1;
742 goto out;
743 case -NFS4ERR_DELAY:
744 /* The server detected a resend of the RPC call and
745 * returned NFS4ERR_DELAY as per Section 2.10.6.2
746 * of RFC5661.
747 */
748 dprintk("%s: slot=%u seq=%u: Operation in progress\n",
749 __func__,
750 slot->slot_nr,
751 slot->seq_nr);
752 goto out_retry;
753 case -NFS4ERR_BADSLOT:
754 /*
755 * The slot id we used was probably retired. Try again
756 * using a different slot id.
757 */
758 goto retry_nowait;
759 case -NFS4ERR_SEQ_MISORDERED:
760 /*
761 * Was the last operation on this sequence interrupted?
762 * If so, retry after bumping the sequence number.
763 */
764 if (interrupted) {
765 ++slot->seq_nr;
766 goto retry_nowait;
767 }
768 /*
769 * Could this slot have been previously retired?
770 * If so, then the server may be expecting seq_nr = 1!
771 */
772 if (slot->seq_nr != 1) {
773 slot->seq_nr = 1;
774 goto retry_nowait;
775 }
776 break;
777 case -NFS4ERR_SEQ_FALSE_RETRY:
778 ++slot->seq_nr;
779 goto retry_nowait;
780 default:
781 /* Just update the slot sequence no. */
782 slot->seq_done = 1;
783 }
784 out:
785 /* The session may be reset by one of the error handlers. */
786 dprintk("%s: Error %d free the slot \n", __func__, res->sr_status);
787 out_noaction:
788 return ret;
789 retry_nowait:
790 if (rpc_restart_call_prepare(task)) {
791 nfs41_sequence_free_slot(res);
792 task->tk_status = 0;
793 ret = 0;
794 }
795 goto out;
796 out_retry:
797 if (!rpc_restart_call(task))
798 goto out;
799 rpc_delay(task, NFS4_POLL_RETRY_MAX);
800 return 0;
801 }
802
803 int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
804 {
805 if (!nfs41_sequence_process(task, res))
806 return 0;
807 if (res->sr_slot != NULL)
808 nfs41_sequence_free_slot(res);
809 return 1;
810
811 }
812 EXPORT_SYMBOL_GPL(nfs41_sequence_done);
813
814 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
815 {
816 if (res->sr_slot == NULL)
817 return 1;
818 if (res->sr_slot->table->session != NULL)
819 return nfs41_sequence_process(task, res);
820 return nfs40_sequence_done(task, res);
821 }
822
823 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
824 {
825 if (res->sr_slot != NULL) {
826 if (res->sr_slot->table->session != NULL)
827 nfs41_sequence_free_slot(res);
828 else
829 nfs40_sequence_free_slot(res);
830 }
831 }
832
833 int nfs4_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
834 {
835 if (res->sr_slot == NULL)
836 return 1;
837 if (!res->sr_slot->table->session)
838 return nfs40_sequence_done(task, res);
839 return nfs41_sequence_done(task, res);
840 }
841 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
842
843 int nfs41_setup_sequence(struct nfs4_session *session,
844 struct nfs4_sequence_args *args,
845 struct nfs4_sequence_res *res,
846 struct rpc_task *task)
847 {
848 struct nfs4_slot *slot;
849 struct nfs4_slot_table *tbl;
850
851 dprintk("--> %s\n", __func__);
852 /* slot already allocated? */
853 if (res->sr_slot != NULL)
854 goto out_success;
855
856 tbl = &session->fc_slot_table;
857
858 task->tk_timeout = 0;
859
860 spin_lock(&tbl->slot_tbl_lock);
861 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) &&
862 !args->sa_privileged) {
863 /* The state manager will wait until the slot table is empty */
864 dprintk("%s session is draining\n", __func__);
865 goto out_sleep;
866 }
867
868 slot = nfs4_alloc_slot(tbl);
869 if (IS_ERR(slot)) {
870 /* If out of memory, try again in 1/4 second */
871 if (slot == ERR_PTR(-ENOMEM))
872 task->tk_timeout = HZ >> 2;
873 dprintk("<-- %s: no free slots\n", __func__);
874 goto out_sleep;
875 }
876 spin_unlock(&tbl->slot_tbl_lock);
877
878 args->sa_slot = slot;
879
880 dprintk("<-- %s slotid=%u seqid=%u\n", __func__,
881 slot->slot_nr, slot->seq_nr);
882
883 res->sr_slot = slot;
884 res->sr_timestamp = jiffies;
885 res->sr_status_flags = 0;
886 /*
887 * sr_status is only set in decode_sequence, and so will remain
888 * set to 1 if an rpc level failure occurs.
889 */
890 res->sr_status = 1;
891 trace_nfs4_setup_sequence(session, args);
892 out_success:
893 rpc_call_start(task);
894 return 0;
895 out_sleep:
896 /* Privileged tasks are queued with top priority */
897 if (args->sa_privileged)
898 rpc_sleep_on_priority(&tbl->slot_tbl_waitq, task,
899 NULL, RPC_PRIORITY_PRIVILEGED);
900 else
901 rpc_sleep_on(&tbl->slot_tbl_waitq, task, NULL);
902 spin_unlock(&tbl->slot_tbl_lock);
903 return -EAGAIN;
904 }
905 EXPORT_SYMBOL_GPL(nfs41_setup_sequence);
906
907 static int nfs4_setup_sequence(const struct nfs_server *server,
908 struct nfs4_sequence_args *args,
909 struct nfs4_sequence_res *res,
910 struct rpc_task *task)
911 {
912 struct nfs4_session *session = nfs4_get_session(server);
913 int ret = 0;
914
915 if (!session)
916 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
917 args, res, task);
918
919 dprintk("--> %s clp %p session %p sr_slot %u\n",
920 __func__, session->clp, session, res->sr_slot ?
921 res->sr_slot->slot_nr : NFS4_NO_SLOT);
922
923 ret = nfs41_setup_sequence(session, args, res, task);
924
925 dprintk("<-- %s status=%d\n", __func__, ret);
926 return ret;
927 }
928
929 static void nfs41_call_sync_prepare(struct rpc_task *task, void *calldata)
930 {
931 struct nfs4_call_sync_data *data = calldata;
932 struct nfs4_session *session = nfs4_get_session(data->seq_server);
933
934 dprintk("--> %s data->seq_server %p\n", __func__, data->seq_server);
935
936 nfs41_setup_sequence(session, data->seq_args, data->seq_res, task);
937 }
938
939 static void nfs41_call_sync_done(struct rpc_task *task, void *calldata)
940 {
941 struct nfs4_call_sync_data *data = calldata;
942
943 nfs41_sequence_done(task, data->seq_res);
944 }
945
946 static const struct rpc_call_ops nfs41_call_sync_ops = {
947 .rpc_call_prepare = nfs41_call_sync_prepare,
948 .rpc_call_done = nfs41_call_sync_done,
949 };
950
951 #else /* !CONFIG_NFS_V4_1 */
952
953 static int nfs4_setup_sequence(const struct nfs_server *server,
954 struct nfs4_sequence_args *args,
955 struct nfs4_sequence_res *res,
956 struct rpc_task *task)
957 {
958 return nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
959 args, res, task);
960 }
961
962 static int nfs4_sequence_process(struct rpc_task *task, struct nfs4_sequence_res *res)
963 {
964 return nfs40_sequence_done(task, res);
965 }
966
967 static void nfs4_sequence_free_slot(struct nfs4_sequence_res *res)
968 {
969 if (res->sr_slot != NULL)
970 nfs40_sequence_free_slot(res);
971 }
972
973 int nfs4_sequence_done(struct rpc_task *task,
974 struct nfs4_sequence_res *res)
975 {
976 return nfs40_sequence_done(task, res);
977 }
978 EXPORT_SYMBOL_GPL(nfs4_sequence_done);
979
980 #endif /* !CONFIG_NFS_V4_1 */
981
982 static void nfs40_call_sync_prepare(struct rpc_task *task, void *calldata)
983 {
984 struct nfs4_call_sync_data *data = calldata;
985 nfs4_setup_sequence(data->seq_server,
986 data->seq_args, data->seq_res, task);
987 }
988
989 static void nfs40_call_sync_done(struct rpc_task *task, void *calldata)
990 {
991 struct nfs4_call_sync_data *data = calldata;
992 nfs4_sequence_done(task, data->seq_res);
993 }
994
995 static const struct rpc_call_ops nfs40_call_sync_ops = {
996 .rpc_call_prepare = nfs40_call_sync_prepare,
997 .rpc_call_done = nfs40_call_sync_done,
998 };
999
1000 static int nfs4_call_sync_sequence(struct rpc_clnt *clnt,
1001 struct nfs_server *server,
1002 struct rpc_message *msg,
1003 struct nfs4_sequence_args *args,
1004 struct nfs4_sequence_res *res)
1005 {
1006 int ret;
1007 struct rpc_task *task;
1008 struct nfs_client *clp = server->nfs_client;
1009 struct nfs4_call_sync_data data = {
1010 .seq_server = server,
1011 .seq_args = args,
1012 .seq_res = res,
1013 };
1014 struct rpc_task_setup task_setup = {
1015 .rpc_client = clnt,
1016 .rpc_message = msg,
1017 .callback_ops = clp->cl_mvops->call_sync_ops,
1018 .callback_data = &data
1019 };
1020
1021 task = rpc_run_task(&task_setup);
1022 if (IS_ERR(task))
1023 ret = PTR_ERR(task);
1024 else {
1025 ret = task->tk_status;
1026 rpc_put_task(task);
1027 }
1028 return ret;
1029 }
1030
1031 int nfs4_call_sync(struct rpc_clnt *clnt,
1032 struct nfs_server *server,
1033 struct rpc_message *msg,
1034 struct nfs4_sequence_args *args,
1035 struct nfs4_sequence_res *res,
1036 int cache_reply)
1037 {
1038 nfs4_init_sequence(args, res, cache_reply);
1039 return nfs4_call_sync_sequence(clnt, server, msg, args, res);
1040 }
1041
1042 static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
1043 {
1044 struct nfs_inode *nfsi = NFS_I(dir);
1045
1046 spin_lock(&dir->i_lock);
1047 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
1048 if (!cinfo->atomic || cinfo->before != dir->i_version)
1049 nfs_force_lookup_revalidate(dir);
1050 dir->i_version = cinfo->after;
1051 nfsi->attr_gencount = nfs_inc_attr_generation_counter();
1052 nfs_fscache_invalidate(dir);
1053 spin_unlock(&dir->i_lock);
1054 }
1055
1056 struct nfs4_opendata {
1057 struct kref kref;
1058 struct nfs_openargs o_arg;
1059 struct nfs_openres o_res;
1060 struct nfs_open_confirmargs c_arg;
1061 struct nfs_open_confirmres c_res;
1062 struct nfs4_string owner_name;
1063 struct nfs4_string group_name;
1064 struct nfs4_label *a_label;
1065 struct nfs_fattr f_attr;
1066 struct nfs4_label *f_label;
1067 struct dentry *dir;
1068 struct dentry *dentry;
1069 struct nfs4_state_owner *owner;
1070 struct nfs4_state *state;
1071 struct iattr attrs;
1072 unsigned long timestamp;
1073 unsigned int rpc_done : 1;
1074 unsigned int file_created : 1;
1075 unsigned int is_recover : 1;
1076 int rpc_status;
1077 int cancelled;
1078 };
1079
1080 static bool nfs4_clear_cap_atomic_open_v1(struct nfs_server *server,
1081 int err, struct nfs4_exception *exception)
1082 {
1083 if (err != -EINVAL)
1084 return false;
1085 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1086 return false;
1087 server->caps &= ~NFS_CAP_ATOMIC_OPEN_V1;
1088 exception->retry = 1;
1089 return true;
1090 }
1091
1092 static u32
1093 nfs4_map_atomic_open_share(struct nfs_server *server,
1094 fmode_t fmode, int openflags)
1095 {
1096 u32 res = 0;
1097
1098 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
1099 case FMODE_READ:
1100 res = NFS4_SHARE_ACCESS_READ;
1101 break;
1102 case FMODE_WRITE:
1103 res = NFS4_SHARE_ACCESS_WRITE;
1104 break;
1105 case FMODE_READ|FMODE_WRITE:
1106 res = NFS4_SHARE_ACCESS_BOTH;
1107 }
1108 if (!(server->caps & NFS_CAP_ATOMIC_OPEN_V1))
1109 goto out;
1110 /* Want no delegation if we're using O_DIRECT */
1111 if (openflags & O_DIRECT)
1112 res |= NFS4_SHARE_WANT_NO_DELEG;
1113 out:
1114 return res;
1115 }
1116
1117 static enum open_claim_type4
1118 nfs4_map_atomic_open_claim(struct nfs_server *server,
1119 enum open_claim_type4 claim)
1120 {
1121 if (server->caps & NFS_CAP_ATOMIC_OPEN_V1)
1122 return claim;
1123 switch (claim) {
1124 default:
1125 return claim;
1126 case NFS4_OPEN_CLAIM_FH:
1127 return NFS4_OPEN_CLAIM_NULL;
1128 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1129 return NFS4_OPEN_CLAIM_DELEGATE_CUR;
1130 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1131 return NFS4_OPEN_CLAIM_DELEGATE_PREV;
1132 }
1133 }
1134
1135 static void nfs4_init_opendata_res(struct nfs4_opendata *p)
1136 {
1137 p->o_res.f_attr = &p->f_attr;
1138 p->o_res.f_label = p->f_label;
1139 p->o_res.seqid = p->o_arg.seqid;
1140 p->c_res.seqid = p->c_arg.seqid;
1141 p->o_res.server = p->o_arg.server;
1142 p->o_res.access_request = p->o_arg.access;
1143 nfs_fattr_init(&p->f_attr);
1144 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
1145 }
1146
1147 static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1148 struct nfs4_state_owner *sp, fmode_t fmode, int flags,
1149 const struct iattr *attrs,
1150 struct nfs4_label *label,
1151 enum open_claim_type4 claim,
1152 gfp_t gfp_mask)
1153 {
1154 struct dentry *parent = dget_parent(dentry);
1155 struct inode *dir = d_inode(parent);
1156 struct nfs_server *server = NFS_SERVER(dir);
1157 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
1158 struct nfs4_opendata *p;
1159
1160 p = kzalloc(sizeof(*p), gfp_mask);
1161 if (p == NULL)
1162 goto err;
1163
1164 p->f_label = nfs4_label_alloc(server, gfp_mask);
1165 if (IS_ERR(p->f_label))
1166 goto err_free_p;
1167
1168 p->a_label = nfs4_label_alloc(server, gfp_mask);
1169 if (IS_ERR(p->a_label))
1170 goto err_free_f;
1171
1172 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1173 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1174 if (IS_ERR(p->o_arg.seqid))
1175 goto err_free_label;
1176 nfs_sb_active(dentry->d_sb);
1177 p->dentry = dget(dentry);
1178 p->dir = parent;
1179 p->owner = sp;
1180 atomic_inc(&sp->so_count);
1181 p->o_arg.open_flags = flags;
1182 p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE);
1183 p->o_arg.share_access = nfs4_map_atomic_open_share(server,
1184 fmode, flags);
1185 /* don't put an ACCESS op in OPEN compound if O_EXCL, because ACCESS
1186 * will return permission denied for all bits until close */
1187 if (!(flags & O_EXCL)) {
1188 /* ask server to check for all possible rights as results
1189 * are cached */
1190 p->o_arg.access = NFS4_ACCESS_READ | NFS4_ACCESS_MODIFY |
1191 NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE;
1192 }
1193 p->o_arg.clientid = server->nfs_client->cl_clientid;
1194 p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time);
1195 p->o_arg.id.uniquifier = sp->so_seqid.owner_id;
1196 p->o_arg.name = &dentry->d_name;
1197 p->o_arg.server = server;
1198 p->o_arg.bitmask = nfs4_bitmask(server, label);
1199 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1200 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1201 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1202 switch (p->o_arg.claim) {
1203 case NFS4_OPEN_CLAIM_NULL:
1204 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1205 case NFS4_OPEN_CLAIM_DELEGATE_PREV:
1206 p->o_arg.fh = NFS_FH(dir);
1207 break;
1208 case NFS4_OPEN_CLAIM_PREVIOUS:
1209 case NFS4_OPEN_CLAIM_FH:
1210 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1211 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
1212 p->o_arg.fh = NFS_FH(d_inode(dentry));
1213 }
1214 if (attrs != NULL && attrs->ia_valid != 0) {
1215 __u32 verf[2];
1216
1217 p->o_arg.u.attrs = &p->attrs;
1218 memcpy(&p->attrs, attrs, sizeof(p->attrs));
1219
1220 verf[0] = jiffies;
1221 verf[1] = current->pid;
1222 memcpy(p->o_arg.u.verifier.data, verf,
1223 sizeof(p->o_arg.u.verifier.data));
1224 }
1225 p->c_arg.fh = &p->o_res.fh;
1226 p->c_arg.stateid = &p->o_res.stateid;
1227 p->c_arg.seqid = p->o_arg.seqid;
1228 nfs4_init_opendata_res(p);
1229 kref_init(&p->kref);
1230 return p;
1231
1232 err_free_label:
1233 nfs4_label_free(p->a_label);
1234 err_free_f:
1235 nfs4_label_free(p->f_label);
1236 err_free_p:
1237 kfree(p);
1238 err:
1239 dput(parent);
1240 return NULL;
1241 }
1242
1243 static void nfs4_opendata_free(struct kref *kref)
1244 {
1245 struct nfs4_opendata *p = container_of(kref,
1246 struct nfs4_opendata, kref);
1247 struct super_block *sb = p->dentry->d_sb;
1248
1249 nfs_free_seqid(p->o_arg.seqid);
1250 nfs4_sequence_free_slot(&p->o_res.seq_res);
1251 if (p->state != NULL)
1252 nfs4_put_open_state(p->state);
1253 nfs4_put_state_owner(p->owner);
1254
1255 nfs4_label_free(p->a_label);
1256 nfs4_label_free(p->f_label);
1257
1258 dput(p->dir);
1259 dput(p->dentry);
1260 nfs_sb_deactive(sb);
1261 nfs_fattr_free_names(&p->f_attr);
1262 kfree(p->f_attr.mdsthreshold);
1263 kfree(p);
1264 }
1265
1266 static void nfs4_opendata_put(struct nfs4_opendata *p)
1267 {
1268 if (p != NULL)
1269 kref_put(&p->kref, nfs4_opendata_free);
1270 }
1271
1272 static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task)
1273 {
1274 int ret;
1275
1276 ret = rpc_wait_for_completion_task(task);
1277 return ret;
1278 }
1279
1280 static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
1281 fmode_t fmode)
1282 {
1283 switch(fmode & (FMODE_READ|FMODE_WRITE)) {
1284 case FMODE_READ|FMODE_WRITE:
1285 return state->n_rdwr != 0;
1286 case FMODE_WRITE:
1287 return state->n_wronly != 0;
1288 case FMODE_READ:
1289 return state->n_rdonly != 0;
1290 }
1291 WARN_ON_ONCE(1);
1292 return false;
1293 }
1294
1295 static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
1296 {
1297 int ret = 0;
1298
1299 if (open_mode & (O_EXCL|O_TRUNC))
1300 goto out;
1301 switch (mode & (FMODE_READ|FMODE_WRITE)) {
1302 case FMODE_READ:
1303 ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
1304 && state->n_rdonly != 0;
1305 break;
1306 case FMODE_WRITE:
1307 ret |= test_bit(NFS_O_WRONLY_STATE, &state->flags) != 0
1308 && state->n_wronly != 0;
1309 break;
1310 case FMODE_READ|FMODE_WRITE:
1311 ret |= test_bit(NFS_O_RDWR_STATE, &state->flags) != 0
1312 && state->n_rdwr != 0;
1313 }
1314 out:
1315 return ret;
1316 }
1317
1318 static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
1319 enum open_claim_type4 claim)
1320 {
1321 if (delegation == NULL)
1322 return 0;
1323 if ((delegation->type & fmode) != fmode)
1324 return 0;
1325 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
1326 return 0;
1327 switch (claim) {
1328 case NFS4_OPEN_CLAIM_NULL:
1329 case NFS4_OPEN_CLAIM_FH:
1330 break;
1331 case NFS4_OPEN_CLAIM_PREVIOUS:
1332 if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
1333 break;
1334 default:
1335 return 0;
1336 }
1337 nfs_mark_delegation_referenced(delegation);
1338 return 1;
1339 }
1340
1341 static void update_open_stateflags(struct nfs4_state *state, fmode_t fmode)
1342 {
1343 switch (fmode) {
1344 case FMODE_WRITE:
1345 state->n_wronly++;
1346 break;
1347 case FMODE_READ:
1348 state->n_rdonly++;
1349 break;
1350 case FMODE_READ|FMODE_WRITE:
1351 state->n_rdwr++;
1352 }
1353 nfs4_state_set_mode_locked(state, state->state | fmode);
1354 }
1355
1356 static void nfs_test_and_clear_all_open_stateid(struct nfs4_state *state)
1357 {
1358 struct nfs_client *clp = state->owner->so_server->nfs_client;
1359 bool need_recover = false;
1360
1361 if (test_and_clear_bit(NFS_O_RDONLY_STATE, &state->flags) && state->n_rdonly)
1362 need_recover = true;
1363 if (test_and_clear_bit(NFS_O_WRONLY_STATE, &state->flags) && state->n_wronly)
1364 need_recover = true;
1365 if (test_and_clear_bit(NFS_O_RDWR_STATE, &state->flags) && state->n_rdwr)
1366 need_recover = true;
1367 if (need_recover)
1368 nfs4_state_mark_reclaim_nograce(clp, state);
1369 }
1370
1371 static bool nfs_need_update_open_stateid(struct nfs4_state *state,
1372 nfs4_stateid *stateid)
1373 {
1374 if (test_and_set_bit(NFS_OPEN_STATE, &state->flags) == 0)
1375 return true;
1376 if (!nfs4_stateid_match_other(stateid, &state->open_stateid)) {
1377 nfs_test_and_clear_all_open_stateid(state);
1378 return true;
1379 }
1380 if (nfs4_stateid_is_newer(stateid, &state->open_stateid))
1381 return true;
1382 return false;
1383 }
1384
1385 static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1386 {
1387 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1388 return;
1389 if (state->n_wronly)
1390 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1391 if (state->n_rdonly)
1392 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1393 if (state->n_rdwr)
1394 set_bit(NFS_O_RDWR_STATE, &state->flags);
1395 set_bit(NFS_OPEN_STATE, &state->flags);
1396 }
1397
1398 static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
1399 nfs4_stateid *arg_stateid,
1400 nfs4_stateid *stateid, fmode_t fmode)
1401 {
1402 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1403 switch (fmode & (FMODE_READ|FMODE_WRITE)) {
1404 case FMODE_WRITE:
1405 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1406 break;
1407 case FMODE_READ:
1408 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1409 break;
1410 case 0:
1411 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1412 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1413 clear_bit(NFS_OPEN_STATE, &state->flags);
1414 }
1415 if (stateid == NULL)
1416 return;
1417 /* Handle races with OPEN */
1418 if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) ||
1419 (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
1420 !nfs4_stateid_is_newer(stateid, &state->open_stateid))) {
1421 nfs_resync_open_stateid_locked(state);
1422 return;
1423 }
1424 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1425 nfs4_stateid_copy(&state->stateid, stateid);
1426 nfs4_stateid_copy(&state->open_stateid, stateid);
1427 }
1428
1429 static void nfs_clear_open_stateid(struct nfs4_state *state,
1430 nfs4_stateid *arg_stateid,
1431 nfs4_stateid *stateid, fmode_t fmode)
1432 {
1433 write_seqlock(&state->seqlock);
1434 nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode);
1435 write_sequnlock(&state->seqlock);
1436 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1437 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1438 }
1439
1440 static void nfs_set_open_stateid_locked(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode)
1441 {
1442 switch (fmode) {
1443 case FMODE_READ:
1444 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1445 break;
1446 case FMODE_WRITE:
1447 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1448 break;
1449 case FMODE_READ|FMODE_WRITE:
1450 set_bit(NFS_O_RDWR_STATE, &state->flags);
1451 }
1452 if (!nfs_need_update_open_stateid(state, stateid))
1453 return;
1454 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1455 nfs4_stateid_copy(&state->stateid, stateid);
1456 nfs4_stateid_copy(&state->open_stateid, stateid);
1457 }
1458
1459 static void __update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, const nfs4_stateid *deleg_stateid, fmode_t fmode)
1460 {
1461 /*
1462 * Protect the call to nfs4_state_set_mode_locked and
1463 * serialise the stateid update
1464 */
1465 spin_lock(&state->owner->so_lock);
1466 write_seqlock(&state->seqlock);
1467 if (deleg_stateid != NULL) {
1468 nfs4_stateid_copy(&state->stateid, deleg_stateid);
1469 set_bit(NFS_DELEGATED_STATE, &state->flags);
1470 }
1471 if (open_stateid != NULL)
1472 nfs_set_open_stateid_locked(state, open_stateid, fmode);
1473 write_sequnlock(&state->seqlock);
1474 update_open_stateflags(state, fmode);
1475 spin_unlock(&state->owner->so_lock);
1476 }
1477
1478 static int update_open_stateid(struct nfs4_state *state, nfs4_stateid *open_stateid, nfs4_stateid *delegation, fmode_t fmode)
1479 {
1480 struct nfs_inode *nfsi = NFS_I(state->inode);
1481 struct nfs_delegation *deleg_cur;
1482 int ret = 0;
1483
1484 fmode &= (FMODE_READ|FMODE_WRITE);
1485
1486 rcu_read_lock();
1487 deleg_cur = rcu_dereference(nfsi->delegation);
1488 if (deleg_cur == NULL)
1489 goto no_delegation;
1490
1491 spin_lock(&deleg_cur->lock);
1492 if (rcu_dereference(nfsi->delegation) != deleg_cur ||
1493 test_bit(NFS_DELEGATION_RETURNING, &deleg_cur->flags) ||
1494 (deleg_cur->type & fmode) != fmode)
1495 goto no_delegation_unlock;
1496
1497 if (delegation == NULL)
1498 delegation = &deleg_cur->stateid;
1499 else if (!nfs4_stateid_match(&deleg_cur->stateid, delegation))
1500 goto no_delegation_unlock;
1501
1502 nfs_mark_delegation_referenced(deleg_cur);
1503 __update_open_stateid(state, open_stateid, &deleg_cur->stateid, fmode);
1504 ret = 1;
1505 no_delegation_unlock:
1506 spin_unlock(&deleg_cur->lock);
1507 no_delegation:
1508 rcu_read_unlock();
1509
1510 if (!ret && open_stateid != NULL) {
1511 __update_open_stateid(state, open_stateid, NULL, fmode);
1512 ret = 1;
1513 }
1514 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
1515 nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
1516
1517 return ret;
1518 }
1519
1520 static bool nfs4_update_lock_stateid(struct nfs4_lock_state *lsp,
1521 const nfs4_stateid *stateid)
1522 {
1523 struct nfs4_state *state = lsp->ls_state;
1524 bool ret = false;
1525
1526 spin_lock(&state->state_lock);
1527 if (!nfs4_stateid_match_other(stateid, &lsp->ls_stateid))
1528 goto out_noupdate;
1529 if (!nfs4_stateid_is_newer(stateid, &lsp->ls_stateid))
1530 goto out_noupdate;
1531 nfs4_stateid_copy(&lsp->ls_stateid, stateid);
1532 ret = true;
1533 out_noupdate:
1534 spin_unlock(&state->state_lock);
1535 return ret;
1536 }
1537
1538 static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmode)
1539 {
1540 struct nfs_delegation *delegation;
1541
1542 rcu_read_lock();
1543 delegation = rcu_dereference(NFS_I(inode)->delegation);
1544 if (delegation == NULL || (delegation->type & fmode) == fmode) {
1545 rcu_read_unlock();
1546 return;
1547 }
1548 rcu_read_unlock();
1549 nfs4_inode_return_delegation(inode);
1550 }
1551
1552 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
1553 {
1554 struct nfs4_state *state = opendata->state;
1555 struct nfs_inode *nfsi = NFS_I(state->inode);
1556 struct nfs_delegation *delegation;
1557 int open_mode = opendata->o_arg.open_flags;
1558 fmode_t fmode = opendata->o_arg.fmode;
1559 enum open_claim_type4 claim = opendata->o_arg.claim;
1560 nfs4_stateid stateid;
1561 int ret = -EAGAIN;
1562
1563 for (;;) {
1564 spin_lock(&state->owner->so_lock);
1565 if (can_open_cached(state, fmode, open_mode)) {
1566 update_open_stateflags(state, fmode);
1567 spin_unlock(&state->owner->so_lock);
1568 goto out_return_state;
1569 }
1570 spin_unlock(&state->owner->so_lock);
1571 rcu_read_lock();
1572 delegation = rcu_dereference(nfsi->delegation);
1573 if (!can_open_delegated(delegation, fmode, claim)) {
1574 rcu_read_unlock();
1575 break;
1576 }
1577 /* Save the delegation */
1578 nfs4_stateid_copy(&stateid, &delegation->stateid);
1579 rcu_read_unlock();
1580 nfs_release_seqid(opendata->o_arg.seqid);
1581 if (!opendata->is_recover) {
1582 ret = nfs_may_open(state->inode, state->owner->so_cred, open_mode);
1583 if (ret != 0)
1584 goto out;
1585 }
1586 ret = -EAGAIN;
1587
1588 /* Try to update the stateid using the delegation */
1589 if (update_open_stateid(state, NULL, &stateid, fmode))
1590 goto out_return_state;
1591 }
1592 out:
1593 return ERR_PTR(ret);
1594 out_return_state:
1595 atomic_inc(&state->count);
1596 return state;
1597 }
1598
1599 static void
1600 nfs4_opendata_check_deleg(struct nfs4_opendata *data, struct nfs4_state *state)
1601 {
1602 struct nfs_client *clp = NFS_SERVER(state->inode)->nfs_client;
1603 struct nfs_delegation *delegation;
1604 int delegation_flags = 0;
1605
1606 rcu_read_lock();
1607 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1608 if (delegation)
1609 delegation_flags = delegation->flags;
1610 rcu_read_unlock();
1611 switch (data->o_arg.claim) {
1612 default:
1613 break;
1614 case NFS4_OPEN_CLAIM_DELEGATE_CUR:
1615 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
1616 pr_err_ratelimited("NFS: Broken NFSv4 server %s is "
1617 "returning a delegation for "
1618 "OPEN(CLAIM_DELEGATE_CUR)\n",
1619 clp->cl_hostname);
1620 return;
1621 }
1622 if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0)
1623 nfs_inode_set_delegation(state->inode,
1624 data->owner->so_cred,
1625 &data->o_res);
1626 else
1627 nfs_inode_reclaim_delegation(state->inode,
1628 data->owner->so_cred,
1629 &data->o_res);
1630 }
1631
1632 /*
1633 * Check the inode attributes against the CLAIM_PREVIOUS returned attributes
1634 * and update the nfs4_state.
1635 */
1636 static struct nfs4_state *
1637 _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
1638 {
1639 struct inode *inode = data->state->inode;
1640 struct nfs4_state *state = data->state;
1641 int ret;
1642
1643 if (!data->rpc_done) {
1644 if (data->rpc_status) {
1645 ret = data->rpc_status;
1646 goto err;
1647 }
1648 /* cached opens have already been processed */
1649 goto update;
1650 }
1651
1652 ret = nfs_refresh_inode(inode, &data->f_attr);
1653 if (ret)
1654 goto err;
1655
1656 if (data->o_res.delegation_type != 0)
1657 nfs4_opendata_check_deleg(data, state);
1658 update:
1659 update_open_stateid(state, &data->o_res.stateid, NULL,
1660 data->o_arg.fmode);
1661 atomic_inc(&state->count);
1662
1663 return state;
1664 err:
1665 return ERR_PTR(ret);
1666
1667 }
1668
1669 static struct nfs4_state *
1670 _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1671 {
1672 struct inode *inode;
1673 struct nfs4_state *state = NULL;
1674 int ret;
1675
1676 if (!data->rpc_done) {
1677 state = nfs4_try_open_cached(data);
1678 trace_nfs4_cached_open(data->state);
1679 goto out;
1680 }
1681
1682 ret = -EAGAIN;
1683 if (!(data->f_attr.valid & NFS_ATTR_FATTR))
1684 goto err;
1685 inode = nfs_fhget(data->dir->d_sb, &data->o_res.fh, &data->f_attr, data->f_label);
1686 ret = PTR_ERR(inode);
1687 if (IS_ERR(inode))
1688 goto err;
1689 ret = -ENOMEM;
1690 state = nfs4_get_open_state(inode, data->owner);
1691 if (state == NULL)
1692 goto err_put_inode;
1693 if (data->o_res.delegation_type != 0)
1694 nfs4_opendata_check_deleg(data, state);
1695 update_open_stateid(state, &data->o_res.stateid, NULL,
1696 data->o_arg.fmode);
1697 iput(inode);
1698 out:
1699 nfs_release_seqid(data->o_arg.seqid);
1700 return state;
1701 err_put_inode:
1702 iput(inode);
1703 err:
1704 return ERR_PTR(ret);
1705 }
1706
1707 static struct nfs4_state *
1708 nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
1709 {
1710 struct nfs4_state *ret;
1711
1712 if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS)
1713 ret =_nfs4_opendata_reclaim_to_nfs4_state(data);
1714 else
1715 ret = _nfs4_opendata_to_nfs4_state(data);
1716 nfs4_sequence_free_slot(&data->o_res.seq_res);
1717 return ret;
1718 }
1719
1720 static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state)
1721 {
1722 struct nfs_inode *nfsi = NFS_I(state->inode);
1723 struct nfs_open_context *ctx;
1724
1725 spin_lock(&state->inode->i_lock);
1726 list_for_each_entry(ctx, &nfsi->open_files, list) {
1727 if (ctx->state != state)
1728 continue;
1729 get_nfs_open_context(ctx);
1730 spin_unlock(&state->inode->i_lock);
1731 return ctx;
1732 }
1733 spin_unlock(&state->inode->i_lock);
1734 return ERR_PTR(-ENOENT);
1735 }
1736
1737 static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx,
1738 struct nfs4_state *state, enum open_claim_type4 claim)
1739 {
1740 struct nfs4_opendata *opendata;
1741
1742 opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0, 0,
1743 NULL, NULL, claim, GFP_NOFS);
1744 if (opendata == NULL)
1745 return ERR_PTR(-ENOMEM);
1746 opendata->state = state;
1747 atomic_inc(&state->count);
1748 return opendata;
1749 }
1750
1751 static int nfs4_open_recover_helper(struct nfs4_opendata *opendata,
1752 fmode_t fmode)
1753 {
1754 struct nfs4_state *newstate;
1755 int ret;
1756
1757 if (!nfs4_mode_match_open_stateid(opendata->state, fmode))
1758 return 0;
1759 opendata->o_arg.open_flags = 0;
1760 opendata->o_arg.fmode = fmode;
1761 opendata->o_arg.share_access = nfs4_map_atomic_open_share(
1762 NFS_SB(opendata->dentry->d_sb),
1763 fmode, 0);
1764 memset(&opendata->o_res, 0, sizeof(opendata->o_res));
1765 memset(&opendata->c_res, 0, sizeof(opendata->c_res));
1766 nfs4_init_opendata_res(opendata);
1767 ret = _nfs4_recover_proc_open(opendata);
1768 if (ret != 0)
1769 return ret;
1770 newstate = nfs4_opendata_to_nfs4_state(opendata);
1771 if (IS_ERR(newstate))
1772 return PTR_ERR(newstate);
1773 if (newstate != opendata->state)
1774 ret = -ESTALE;
1775 nfs4_close_state(newstate, fmode);
1776 return ret;
1777 }
1778
1779 static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *state)
1780 {
1781 int ret;
1782
1783 /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
1784 clear_bit(NFS_O_RDWR_STATE, &state->flags);
1785 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
1786 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
1787 /* memory barrier prior to reading state->n_* */
1788 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1789 clear_bit(NFS_OPEN_STATE, &state->flags);
1790 smp_rmb();
1791 ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
1792 if (ret != 0)
1793 return ret;
1794 ret = nfs4_open_recover_helper(opendata, FMODE_WRITE);
1795 if (ret != 0)
1796 return ret;
1797 ret = nfs4_open_recover_helper(opendata, FMODE_READ);
1798 if (ret != 0)
1799 return ret;
1800 /*
1801 * We may have performed cached opens for all three recoveries.
1802 * Check if we need to update the current stateid.
1803 */
1804 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0 &&
1805 !nfs4_stateid_match(&state->stateid, &state->open_stateid)) {
1806 write_seqlock(&state->seqlock);
1807 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
1808 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1809 write_sequnlock(&state->seqlock);
1810 }
1811 return 0;
1812 }
1813
1814 /*
1815 * OPEN_RECLAIM:
1816 * reclaim state on the server after a reboot.
1817 */
1818 static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1819 {
1820 struct nfs_delegation *delegation;
1821 struct nfs4_opendata *opendata;
1822 fmode_t delegation_type = 0;
1823 int status;
1824
1825 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1826 NFS4_OPEN_CLAIM_PREVIOUS);
1827 if (IS_ERR(opendata))
1828 return PTR_ERR(opendata);
1829 rcu_read_lock();
1830 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
1831 if (delegation != NULL && test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags) != 0)
1832 delegation_type = delegation->type;
1833 rcu_read_unlock();
1834 opendata->o_arg.u.delegation_type = delegation_type;
1835 status = nfs4_open_recover(opendata, state);
1836 nfs4_opendata_put(opendata);
1837 return status;
1838 }
1839
1840 static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state)
1841 {
1842 struct nfs_server *server = NFS_SERVER(state->inode);
1843 struct nfs4_exception exception = { };
1844 int err;
1845 do {
1846 err = _nfs4_do_open_reclaim(ctx, state);
1847 trace_nfs4_open_reclaim(ctx, 0, err);
1848 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
1849 continue;
1850 if (err != -NFS4ERR_DELAY)
1851 break;
1852 nfs4_handle_exception(server, err, &exception);
1853 } while (exception.retry);
1854 return err;
1855 }
1856
1857 static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
1858 {
1859 struct nfs_open_context *ctx;
1860 int ret;
1861
1862 ctx = nfs4_state_find_open_context(state);
1863 if (IS_ERR(ctx))
1864 return -EAGAIN;
1865 ret = nfs4_do_open_reclaim(ctx, state);
1866 put_nfs_open_context(ctx);
1867 return ret;
1868 }
1869
1870 static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
1871 {
1872 switch (err) {
1873 default:
1874 printk(KERN_ERR "NFS: %s: unhandled error "
1875 "%d.\n", __func__, err);
1876 case 0:
1877 case -ENOENT:
1878 case -EAGAIN:
1879 case -ESTALE:
1880 break;
1881 case -NFS4ERR_BADSESSION:
1882 case -NFS4ERR_BADSLOT:
1883 case -NFS4ERR_BAD_HIGH_SLOT:
1884 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1885 case -NFS4ERR_DEADSESSION:
1886 set_bit(NFS_DELEGATED_STATE, &state->flags);
1887 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
1888 return -EAGAIN;
1889 case -NFS4ERR_STALE_CLIENTID:
1890 case -NFS4ERR_STALE_STATEID:
1891 set_bit(NFS_DELEGATED_STATE, &state->flags);
1892 case -NFS4ERR_EXPIRED:
1893 /* Don't recall a delegation if it was lost */
1894 nfs4_schedule_lease_recovery(server->nfs_client);
1895 return -EAGAIN;
1896 case -NFS4ERR_MOVED:
1897 nfs4_schedule_migration_recovery(server);
1898 return -EAGAIN;
1899 case -NFS4ERR_LEASE_MOVED:
1900 nfs4_schedule_lease_moved_recovery(server->nfs_client);
1901 return -EAGAIN;
1902 case -NFS4ERR_DELEG_REVOKED:
1903 case -NFS4ERR_ADMIN_REVOKED:
1904 case -NFS4ERR_BAD_STATEID:
1905 case -NFS4ERR_OPENMODE:
1906 nfs_inode_find_state_and_recover(state->inode,
1907 stateid);
1908 nfs4_schedule_stateid_recovery(server, state);
1909 return -EAGAIN;
1910 case -NFS4ERR_DELAY:
1911 case -NFS4ERR_GRACE:
1912 set_bit(NFS_DELEGATED_STATE, &state->flags);
1913 ssleep(1);
1914 return -EAGAIN;
1915 case -ENOMEM:
1916 case -NFS4ERR_DENIED:
1917 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
1918 return 0;
1919 }
1920 return err;
1921 }
1922
1923 int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
1924 struct nfs4_state *state, const nfs4_stateid *stateid,
1925 fmode_t type)
1926 {
1927 struct nfs_server *server = NFS_SERVER(state->inode);
1928 struct nfs4_opendata *opendata;
1929 int err = 0;
1930
1931 opendata = nfs4_open_recoverdata_alloc(ctx, state,
1932 NFS4_OPEN_CLAIM_DELEG_CUR_FH);
1933 if (IS_ERR(opendata))
1934 return PTR_ERR(opendata);
1935 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
1936 write_seqlock(&state->seqlock);
1937 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
1938 write_sequnlock(&state->seqlock);
1939 clear_bit(NFS_DELEGATED_STATE, &state->flags);
1940 switch (type & (FMODE_READ|FMODE_WRITE)) {
1941 case FMODE_READ|FMODE_WRITE:
1942 case FMODE_WRITE:
1943 err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
1944 if (err)
1945 break;
1946 err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
1947 if (err)
1948 break;
1949 case FMODE_READ:
1950 err = nfs4_open_recover_helper(opendata, FMODE_READ);
1951 }
1952 nfs4_opendata_put(opendata);
1953 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
1954 }
1955
1956 static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
1957 {
1958 struct nfs4_opendata *data = calldata;
1959
1960 nfs40_setup_sequence(data->o_arg.server->nfs_client->cl_slot_tbl,
1961 &data->c_arg.seq_args, &data->c_res.seq_res, task);
1962 }
1963
1964 static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
1965 {
1966 struct nfs4_opendata *data = calldata;
1967
1968 nfs40_sequence_done(task, &data->c_res.seq_res);
1969
1970 data->rpc_status = task->tk_status;
1971 if (data->rpc_status == 0) {
1972 nfs4_stateid_copy(&data->o_res.stateid, &data->c_res.stateid);
1973 nfs_confirm_seqid(&data->owner->so_seqid, 0);
1974 renew_lease(data->o_res.server, data->timestamp);
1975 data->rpc_done = 1;
1976 }
1977 }
1978
1979 static void nfs4_open_confirm_release(void *calldata)
1980 {
1981 struct nfs4_opendata *data = calldata;
1982 struct nfs4_state *state = NULL;
1983
1984 /* If this request hasn't been cancelled, do nothing */
1985 if (data->cancelled == 0)
1986 goto out_free;
1987 /* In case of error, no cleanup! */
1988 if (!data->rpc_done)
1989 goto out_free;
1990 state = nfs4_opendata_to_nfs4_state(data);
1991 if (!IS_ERR(state))
1992 nfs4_close_state(state, data->o_arg.fmode);
1993 out_free:
1994 nfs4_opendata_put(data);
1995 }
1996
1997 static const struct rpc_call_ops nfs4_open_confirm_ops = {
1998 .rpc_call_prepare = nfs4_open_confirm_prepare,
1999 .rpc_call_done = nfs4_open_confirm_done,
2000 .rpc_release = nfs4_open_confirm_release,
2001 };
2002
2003 /*
2004 * Note: On error, nfs4_proc_open_confirm will free the struct nfs4_opendata
2005 */
2006 static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
2007 {
2008 struct nfs_server *server = NFS_SERVER(d_inode(data->dir));
2009 struct rpc_task *task;
2010 struct rpc_message msg = {
2011 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
2012 .rpc_argp = &data->c_arg,
2013 .rpc_resp = &data->c_res,
2014 .rpc_cred = data->owner->so_cred,
2015 };
2016 struct rpc_task_setup task_setup_data = {
2017 .rpc_client = server->client,
2018 .rpc_message = &msg,
2019 .callback_ops = &nfs4_open_confirm_ops,
2020 .callback_data = data,
2021 .workqueue = nfsiod_workqueue,
2022 .flags = RPC_TASK_ASYNC,
2023 };
2024 int status;
2025
2026 nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
2027 kref_get(&data->kref);
2028 data->rpc_done = 0;
2029 data->rpc_status = 0;
2030 data->timestamp = jiffies;
2031 if (data->is_recover)
2032 nfs4_set_sequence_privileged(&data->c_arg.seq_args);
2033 task = rpc_run_task(&task_setup_data);
2034 if (IS_ERR(task))
2035 return PTR_ERR(task);
2036 status = nfs4_wait_for_completion_rpc_task(task);
2037 if (status != 0) {
2038 data->cancelled = 1;
2039 smp_wmb();
2040 } else
2041 status = data->rpc_status;
2042 rpc_put_task(task);
2043 return status;
2044 }
2045
2046 static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
2047 {
2048 struct nfs4_opendata *data = calldata;
2049 struct nfs4_state_owner *sp = data->owner;
2050 struct nfs_client *clp = sp->so_server->nfs_client;
2051 enum open_claim_type4 claim = data->o_arg.claim;
2052
2053 if (nfs_wait_on_sequence(data->o_arg.seqid, task) != 0)
2054 goto out_wait;
2055 /*
2056 * Check if we still need to send an OPEN call, or if we can use
2057 * a delegation instead.
2058 */
2059 if (data->state != NULL) {
2060 struct nfs_delegation *delegation;
2061
2062 if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
2063 goto out_no_action;
2064 rcu_read_lock();
2065 delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
2066 if (can_open_delegated(delegation, data->o_arg.fmode, claim))
2067 goto unlock_no_action;
2068 rcu_read_unlock();
2069 }
2070 /* Update client id. */
2071 data->o_arg.clientid = clp->cl_clientid;
2072 switch (claim) {
2073 default:
2074 break;
2075 case NFS4_OPEN_CLAIM_PREVIOUS:
2076 case NFS4_OPEN_CLAIM_DELEG_CUR_FH:
2077 case NFS4_OPEN_CLAIM_DELEG_PREV_FH:
2078 data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0];
2079 case NFS4_OPEN_CLAIM_FH:
2080 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR];
2081 nfs_copy_fh(&data->o_res.fh, data->o_arg.fh);
2082 }
2083 data->timestamp = jiffies;
2084 if (nfs4_setup_sequence(data->o_arg.server,
2085 &data->o_arg.seq_args,
2086 &data->o_res.seq_res,
2087 task) != 0)
2088 nfs_release_seqid(data->o_arg.seqid);
2089
2090 /* Set the create mode (note dependency on the session type) */
2091 data->o_arg.createmode = NFS4_CREATE_UNCHECKED;
2092 if (data->o_arg.open_flags & O_EXCL) {
2093 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE;
2094 if (nfs4_has_persistent_session(clp))
2095 data->o_arg.createmode = NFS4_CREATE_GUARDED;
2096 else if (clp->cl_mvops->minor_version > 0)
2097 data->o_arg.createmode = NFS4_CREATE_EXCLUSIVE4_1;
2098 }
2099 return;
2100 unlock_no_action:
2101 trace_nfs4_cached_open(data->state);
2102 rcu_read_unlock();
2103 out_no_action:
2104 task->tk_action = NULL;
2105 out_wait:
2106 nfs4_sequence_done(task, &data->o_res.seq_res);
2107 }
2108
2109 static void nfs4_open_done(struct rpc_task *task, void *calldata)
2110 {
2111 struct nfs4_opendata *data = calldata;
2112
2113 data->rpc_status = task->tk_status;
2114
2115 if (!nfs4_sequence_process(task, &data->o_res.seq_res))
2116 return;
2117
2118 if (task->tk_status == 0) {
2119 if (data->o_res.f_attr->valid & NFS_ATTR_FATTR_TYPE) {
2120 switch (data->o_res.f_attr->mode & S_IFMT) {
2121 case S_IFREG:
2122 break;
2123 case S_IFLNK:
2124 data->rpc_status = -ELOOP;
2125 break;
2126 case S_IFDIR:
2127 data->rpc_status = -EISDIR;
2128 break;
2129 default:
2130 data->rpc_status = -ENOTDIR;
2131 }
2132 }
2133 renew_lease(data->o_res.server, data->timestamp);
2134 if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
2135 nfs_confirm_seqid(&data->owner->so_seqid, 0);
2136 }
2137 data->rpc_done = 1;
2138 }
2139
2140 static void nfs4_open_release(void *calldata)
2141 {
2142 struct nfs4_opendata *data = calldata;
2143 struct nfs4_state *state = NULL;
2144
2145 /* If this request hasn't been cancelled, do nothing */
2146 if (data->cancelled == 0)
2147 goto out_free;
2148 /* In case of error, no cleanup! */
2149 if (data->rpc_status != 0 || !data->rpc_done)
2150 goto out_free;
2151 /* In case we need an open_confirm, no cleanup! */
2152 if (data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM)
2153 goto out_free;
2154 state = nfs4_opendata_to_nfs4_state(data);
2155 if (!IS_ERR(state))
2156 nfs4_close_state(state, data->o_arg.fmode);
2157 out_free:
2158 nfs4_opendata_put(data);
2159 }
2160
2161 static const struct rpc_call_ops nfs4_open_ops = {
2162 .rpc_call_prepare = nfs4_open_prepare,
2163 .rpc_call_done = nfs4_open_done,
2164 .rpc_release = nfs4_open_release,
2165 };
2166
2167 static int nfs4_run_open_task(struct nfs4_opendata *data, int isrecover)
2168 {
2169 struct inode *dir = d_inode(data->dir);
2170 struct nfs_server *server = NFS_SERVER(dir);
2171 struct nfs_openargs *o_arg = &data->o_arg;
2172 struct nfs_openres *o_res = &data->o_res;
2173 struct rpc_task *task;
2174 struct rpc_message msg = {
2175 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN],
2176 .rpc_argp = o_arg,
2177 .rpc_resp = o_res,
2178 .rpc_cred = data->owner->so_cred,
2179 };
2180 struct rpc_task_setup task_setup_data = {
2181 .rpc_client = server->client,
2182 .rpc_message = &msg,
2183 .callback_ops = &nfs4_open_ops,
2184 .callback_data = data,
2185 .workqueue = nfsiod_workqueue,
2186 .flags = RPC_TASK_ASYNC,
2187 };
2188 int status;
2189
2190 nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1);
2191 kref_get(&data->kref);
2192 data->rpc_done = 0;
2193 data->rpc_status = 0;
2194 data->cancelled = 0;
2195 data->is_recover = 0;
2196 if (isrecover) {
2197 nfs4_set_sequence_privileged(&o_arg->seq_args);
2198 data->is_recover = 1;
2199 }
2200 task = rpc_run_task(&task_setup_data);
2201 if (IS_ERR(task))
2202 return PTR_ERR(task);
2203 status = nfs4_wait_for_completion_rpc_task(task);
2204 if (status != 0) {
2205 data->cancelled = 1;
2206 smp_wmb();
2207 } else
2208 status = data->rpc_status;
2209 rpc_put_task(task);
2210
2211 return status;
2212 }
2213
2214 static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
2215 {
2216 struct inode *dir = d_inode(data->dir);
2217 struct nfs_openres *o_res = &data->o_res;
2218 int status;
2219
2220 status = nfs4_run_open_task(data, 1);
2221 if (status != 0 || !data->rpc_done)
2222 return status;
2223
2224 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
2225
2226 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2227 status = _nfs4_proc_open_confirm(data);
2228 if (status != 0)
2229 return status;
2230 }
2231
2232 return status;
2233 }
2234
2235 /*
2236 * Additional permission checks in order to distinguish between an
2237 * open for read, and an open for execute. This works around the
2238 * fact that NFSv4 OPEN treats read and execute permissions as being
2239 * the same.
2240 * Note that in the non-execute case, we want to turn off permission
2241 * checking if we just created a new file (POSIX open() semantics).
2242 */
2243 static int nfs4_opendata_access(struct rpc_cred *cred,
2244 struct nfs4_opendata *opendata,
2245 struct nfs4_state *state, fmode_t fmode,
2246 int openflags)
2247 {
2248 struct nfs_access_entry cache;
2249 u32 mask;
2250
2251 /* access call failed or for some reason the server doesn't
2252 * support any access modes -- defer access call until later */
2253 if (opendata->o_res.access_supported == 0)
2254 return 0;
2255
2256 mask = 0;
2257 /*
2258 * Use openflags to check for exec, because fmode won't
2259 * always have FMODE_EXEC set when file open for exec.
2260 */
2261 if (openflags & __FMODE_EXEC) {
2262 /* ONLY check for exec rights */
2263 mask = MAY_EXEC;
2264 } else if ((fmode & FMODE_READ) && !opendata->file_created)
2265 mask = MAY_READ;
2266
2267 cache.cred = cred;
2268 cache.jiffies = jiffies;
2269 nfs_access_set_mask(&cache, opendata->o_res.access_result);
2270 nfs_access_add_cache(state->inode, &cache);
2271
2272 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
2273 return 0;
2274
2275 /* even though OPEN succeeded, access is denied. Close the file */
2276 nfs4_close_state(state, fmode);
2277 return -EACCES;
2278 }
2279
2280 /*
2281 * Note: On error, nfs4_proc_open will free the struct nfs4_opendata
2282 */
2283 static int _nfs4_proc_open(struct nfs4_opendata *data)
2284 {
2285 struct inode *dir = d_inode(data->dir);
2286 struct nfs_server *server = NFS_SERVER(dir);
2287 struct nfs_openargs *o_arg = &data->o_arg;
2288 struct nfs_openres *o_res = &data->o_res;
2289 int status;
2290
2291 status = nfs4_run_open_task(data, 0);
2292 if (!data->rpc_done)
2293 return status;
2294 if (status != 0) {
2295 if (status == -NFS4ERR_BADNAME &&
2296 !(o_arg->open_flags & O_CREAT))
2297 return -ENOENT;
2298 return status;
2299 }
2300
2301 nfs_fattr_map_and_free_names(server, &data->f_attr);
2302
2303 if (o_arg->open_flags & O_CREAT) {
2304 update_changeattr(dir, &o_res->cinfo);
2305 if (o_arg->open_flags & O_EXCL)
2306 data->file_created = 1;
2307 else if (o_res->cinfo.before != o_res->cinfo.after)
2308 data->file_created = 1;
2309 }
2310 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
2311 server->caps &= ~NFS_CAP_POSIX_LOCK;
2312 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
2313 status = _nfs4_proc_open_confirm(data);
2314 if (status != 0)
2315 return status;
2316 }
2317 if (!(o_res->f_attr->valid & NFS_ATTR_FATTR))
2318 nfs4_proc_getattr(server, &o_res->fh, o_res->f_attr, o_res->f_label);
2319 return 0;
2320 }
2321
2322 static int nfs4_recover_expired_lease(struct nfs_server *server)
2323 {
2324 return nfs4_client_recover_expired_lease(server->nfs_client);
2325 }
2326
2327 /*
2328 * OPEN_EXPIRED:
2329 * reclaim state on the server after a network partition.
2330 * Assumes caller holds the appropriate lock
2331 */
2332 static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2333 {
2334 struct nfs4_opendata *opendata;
2335 int ret;
2336
2337 opendata = nfs4_open_recoverdata_alloc(ctx, state,
2338 NFS4_OPEN_CLAIM_FH);
2339 if (IS_ERR(opendata))
2340 return PTR_ERR(opendata);
2341 ret = nfs4_open_recover(opendata, state);
2342 if (ret == -ESTALE)
2343 d_drop(ctx->dentry);
2344 nfs4_opendata_put(opendata);
2345 return ret;
2346 }
2347
2348 static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state)
2349 {
2350 struct nfs_server *server = NFS_SERVER(state->inode);
2351 struct nfs4_exception exception = { };
2352 int err;
2353
2354 do {
2355 err = _nfs4_open_expired(ctx, state);
2356 trace_nfs4_open_expired(ctx, 0, err);
2357 if (nfs4_clear_cap_atomic_open_v1(server, err, &exception))
2358 continue;
2359 switch (err) {
2360 default:
2361 goto out;
2362 case -NFS4ERR_GRACE:
2363 case -NFS4ERR_DELAY:
2364 nfs4_handle_exception(server, err, &exception);
2365 err = 0;
2366 }
2367 } while (exception.retry);
2368 out:
2369 return err;
2370 }
2371
2372 static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2373 {
2374 struct nfs_open_context *ctx;
2375 int ret;
2376
2377 ctx = nfs4_state_find_open_context(state);
2378 if (IS_ERR(ctx))
2379 return -EAGAIN;
2380 ret = nfs4_do_open_expired(ctx, state);
2381 put_nfs_open_context(ctx);
2382 return ret;
2383 }
2384
2385 static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
2386 {
2387 nfs_remove_bad_delegation(state->inode);
2388 write_seqlock(&state->seqlock);
2389 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2390 write_sequnlock(&state->seqlock);
2391 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2392 }
2393
2394 static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2395 {
2396 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2397 nfs_finish_clear_delegation_stateid(state);
2398 }
2399
2400 static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2401 {
2402 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2403 nfs40_clear_delegation_stateid(state);
2404 return nfs4_open_expired(sp, state);
2405 }
2406
2407 #if defined(CONFIG_NFS_V4_1)
2408 static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2409 {
2410 struct nfs_server *server = NFS_SERVER(state->inode);
2411 nfs4_stateid stateid;
2412 struct nfs_delegation *delegation;
2413 struct rpc_cred *cred;
2414 int status;
2415
2416 /* Get the delegation credential for use by test/free_stateid */
2417 rcu_read_lock();
2418 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2419 if (delegation == NULL) {
2420 rcu_read_unlock();
2421 return;
2422 }
2423
2424 nfs4_stateid_copy(&stateid, &delegation->stateid);
2425 cred = get_rpccred(delegation->cred);
2426 rcu_read_unlock();
2427 status = nfs41_test_stateid(server, &stateid, cred);
2428 trace_nfs4_test_delegation_stateid(state, NULL, status);
2429
2430 if (status != NFS_OK) {
2431 /* Free the stateid unless the server explicitly
2432 * informs us the stateid is unrecognized. */
2433 if (status != -NFS4ERR_BAD_STATEID)
2434 nfs41_free_stateid(server, &stateid, cred);
2435 nfs_finish_clear_delegation_stateid(state);
2436 }
2437
2438 put_rpccred(cred);
2439 }
2440
2441 /**
2442 * nfs41_check_open_stateid - possibly free an open stateid
2443 *
2444 * @state: NFSv4 state for an inode
2445 *
2446 * Returns NFS_OK if recovery for this stateid is now finished.
2447 * Otherwise a negative NFS4ERR value is returned.
2448 */
2449 static int nfs41_check_open_stateid(struct nfs4_state *state)
2450 {
2451 struct nfs_server *server = NFS_SERVER(state->inode);
2452 nfs4_stateid *stateid = &state->open_stateid;
2453 struct rpc_cred *cred = state->owner->so_cred;
2454 int status;
2455
2456 /* If a state reset has been done, test_stateid is unneeded */
2457 if ((test_bit(NFS_O_RDONLY_STATE, &state->flags) == 0) &&
2458 (test_bit(NFS_O_WRONLY_STATE, &state->flags) == 0) &&
2459 (test_bit(NFS_O_RDWR_STATE, &state->flags) == 0))
2460 return -NFS4ERR_BAD_STATEID;
2461
2462 status = nfs41_test_stateid(server, stateid, cred);
2463 trace_nfs4_test_open_stateid(state, NULL, status);
2464 if (status != NFS_OK) {
2465 /* Free the stateid unless the server explicitly
2466 * informs us the stateid is unrecognized. */
2467 if (status != -NFS4ERR_BAD_STATEID)
2468 nfs41_free_stateid(server, stateid, cred);
2469
2470 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
2471 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
2472 clear_bit(NFS_O_RDWR_STATE, &state->flags);
2473 clear_bit(NFS_OPEN_STATE, &state->flags);
2474 }
2475 return status;
2476 }
2477
2478 static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2479 {
2480 int status;
2481
2482 nfs41_check_delegation_stateid(state);
2483 status = nfs41_check_open_stateid(state);
2484 if (status != NFS_OK)
2485 status = nfs4_open_expired(sp, state);
2486 return status;
2487 }
2488 #endif
2489
2490 /*
2491 * on an EXCLUSIVE create, the server should send back a bitmask with FATTR4-*
2492 * fields corresponding to attributes that were used to store the verifier.
2493 * Make sure we clobber those fields in the later setattr call
2494 */
2495 static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata,
2496 struct iattr *sattr, struct nfs4_label **label)
2497 {
2498 const u32 *attrset = opendata->o_res.attrset;
2499
2500 if ((attrset[1] & FATTR4_WORD1_TIME_ACCESS) &&
2501 !(sattr->ia_valid & ATTR_ATIME_SET))
2502 sattr->ia_valid |= ATTR_ATIME;
2503
2504 if ((attrset[1] & FATTR4_WORD1_TIME_MODIFY) &&
2505 !(sattr->ia_valid & ATTR_MTIME_SET))
2506 sattr->ia_valid |= ATTR_MTIME;
2507
2508 /* Except MODE, it seems harmless of setting twice. */
2509 if ((attrset[1] & FATTR4_WORD1_MODE))
2510 sattr->ia_valid &= ~ATTR_MODE;
2511
2512 if (attrset[2] & FATTR4_WORD2_SECURITY_LABEL)
2513 *label = NULL;
2514 }
2515
2516 static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2517 fmode_t fmode,
2518 int flags,
2519 struct nfs_open_context *ctx)
2520 {
2521 struct nfs4_state_owner *sp = opendata->owner;
2522 struct nfs_server *server = sp->so_server;
2523 struct dentry *dentry;
2524 struct nfs4_state *state;
2525 unsigned int seq;
2526 int ret;
2527
2528 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
2529
2530 ret = _nfs4_proc_open(opendata);
2531 if (ret != 0)
2532 goto out;
2533
2534 state = nfs4_opendata_to_nfs4_state(opendata);
2535 ret = PTR_ERR(state);
2536 if (IS_ERR(state))
2537 goto out;
2538 if (server->caps & NFS_CAP_POSIX_LOCK)
2539 set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
2540
2541 dentry = opendata->dentry;
2542 if (d_really_is_negative(dentry)) {
2543 struct dentry *alias;
2544 d_drop(dentry);
2545 alias = d_exact_alias(dentry, state->inode);
2546 if (!alias)
2547 alias = d_splice_alias(igrab(state->inode), dentry);
2548 /* d_splice_alias() can't fail here - it's a non-directory */
2549 if (alias) {
2550 dput(ctx->dentry);
2551 ctx->dentry = dentry = alias;
2552 }
2553 nfs_set_verifier(dentry,
2554 nfs_save_change_attribute(d_inode(opendata->dir)));
2555 }
2556
2557 ret = nfs4_opendata_access(sp->so_cred, opendata, state, fmode, flags);
2558 if (ret != 0)
2559 goto out;
2560
2561 ctx->state = state;
2562 if (d_inode(dentry) == state->inode) {
2563 nfs_inode_attach_open_context(ctx);
2564 if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
2565 nfs4_schedule_stateid_recovery(server, state);
2566 }
2567 out:
2568 return ret;
2569 }
2570
2571 /*
2572 * Returns a referenced nfs4_state
2573 */
2574 static int _nfs4_do_open(struct inode *dir,
2575 struct nfs_open_context *ctx,
2576 int flags,
2577 struct iattr *sattr,
2578 struct nfs4_label *label,
2579 int *opened)
2580 {
2581 struct nfs4_state_owner *sp;
2582 struct nfs4_state *state = NULL;
2583 struct nfs_server *server = NFS_SERVER(dir);
2584 struct nfs4_opendata *opendata;
2585 struct dentry *dentry = ctx->dentry;
2586 struct rpc_cred *cred = ctx->cred;
2587 struct nfs4_threshold **ctx_th = &ctx->mdsthreshold;
2588 fmode_t fmode = ctx->mode & (FMODE_READ|FMODE_WRITE|FMODE_EXEC);
2589 enum open_claim_type4 claim = NFS4_OPEN_CLAIM_NULL;
2590 struct nfs4_label *olabel = NULL;
2591 int status;
2592
2593 /* Protect against reboot recovery conflicts */
2594 status = -ENOMEM;
2595 sp = nfs4_get_state_owner(server, cred, GFP_KERNEL);
2596 if (sp == NULL) {
2597 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
2598 goto out_err;
2599 }
2600 status = nfs4_recover_expired_lease(server);
2601 if (status != 0)
2602 goto err_put_state_owner;
2603 if (d_really_is_positive(dentry))
2604 nfs4_return_incompatible_delegation(d_inode(dentry), fmode);
2605 status = -ENOMEM;
2606 if (d_really_is_positive(dentry))
2607 claim = NFS4_OPEN_CLAIM_FH;
2608 opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, sattr,
2609 label, claim, GFP_KERNEL);
2610 if (opendata == NULL)
2611 goto err_put_state_owner;
2612
2613 if (label) {
2614 olabel = nfs4_label_alloc(server, GFP_KERNEL);
2615 if (IS_ERR(olabel)) {
2616 status = PTR_ERR(olabel);
2617 goto err_opendata_put;
2618 }
2619 }
2620
2621 if (server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
2622 if (!opendata->f_attr.mdsthreshold) {
2623 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
2624 if (!opendata->f_attr.mdsthreshold)
2625 goto err_free_label;
2626 }
2627 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
2628 }
2629 if (d_really_is_positive(dentry))
2630 opendata->state = nfs4_get_open_state(d_inode(dentry), sp);
2631
2632 status = _nfs4_open_and_get_state(opendata, fmode, flags, ctx);
2633 if (status != 0)
2634 goto err_free_label;
2635 state = ctx->state;
2636
2637 if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) &&
2638 (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) {
2639 nfs4_exclusive_attrset(opendata, sattr, &label);
2640 /*
2641 * send create attributes which was not set by open
2642 * with an extra setattr.
2643 */
2644 if (sattr->ia_valid & NFS4_VALID_ATTRS) {
2645 nfs_fattr_init(opendata->o_res.f_attr);
2646 status = nfs4_do_setattr(state->inode, cred,
2647 opendata->o_res.f_attr, sattr,
2648 state, label, olabel);
2649 if (status == 0) {
2650 nfs_setattr_update_inode(state->inode, sattr,
2651 opendata->o_res.f_attr);
2652 nfs_setsecurity(state->inode, opendata->o_res.f_attr, olabel);
2653 }
2654 }
2655 }
2656 if (opened && opendata->file_created)
2657 *opened |= FILE_CREATED;
2658
2659 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server)) {
2660 *ctx_th = opendata->f_attr.mdsthreshold;
2661 opendata->f_attr.mdsthreshold = NULL;
2662 }
2663
2664 nfs4_label_free(olabel);
2665
2666 nfs4_opendata_put(opendata);
2667 nfs4_put_state_owner(sp);
2668 return 0;
2669 err_free_label:
2670 nfs4_label_free(olabel);
2671 err_opendata_put:
2672 nfs4_opendata_put(opendata);
2673 err_put_state_owner:
2674 nfs4_put_state_owner(sp);
2675 out_err:
2676 return status;
2677 }
2678
2679
2680 static struct nfs4_state *nfs4_do_open(struct inode *dir,
2681 struct nfs_open_context *ctx,
2682 int flags,
2683 struct iattr *sattr,
2684 struct nfs4_label *label,
2685 int *opened)
2686 {
2687 struct nfs_server *server = NFS_SERVER(dir);
2688 struct nfs4_exception exception = { };
2689 struct nfs4_state *res;
2690 int status;
2691
2692 do {
2693 status = _nfs4_do_open(dir, ctx, flags, sattr, label, opened);
2694 res = ctx->state;
2695 trace_nfs4_open_file(ctx, flags, status);
2696 if (status == 0)
2697 break;
2698 /* NOTE: BAD_SEQID means the server and client disagree about the
2699 * book-keeping w.r.t. state-changing operations
2700 * (OPEN/CLOSE/LOCK/LOCKU...)
2701 * It is actually a sign of a bug on the client or on the server.
2702 *
2703 * If we receive a BAD_SEQID error in the particular case of
2704 * doing an OPEN, we assume that nfs_increment_open_seqid() will
2705 * have unhashed the old state_owner for us, and that we can
2706 * therefore safely retry using a new one. We should still warn
2707 * the user though...
2708 */
2709 if (status == -NFS4ERR_BAD_SEQID) {
2710 pr_warn_ratelimited("NFS: v4 server %s "
2711 " returned a bad sequence-id error!\n",
2712 NFS_SERVER(dir)->nfs_client->cl_hostname);
2713 exception.retry = 1;
2714 continue;
2715 }
2716 /*
2717 * BAD_STATEID on OPEN means that the server cancelled our
2718 * state before it received the OPEN_CONFIRM.
2719 * Recover by retrying the request as per the discussion
2720 * on Page 181 of RFC3530.
2721 */
2722 if (status == -NFS4ERR_BAD_STATEID) {
2723 exception.retry = 1;
2724 continue;
2725 }
2726 if (status == -EAGAIN) {
2727 /* We must have found a delegation */
2728 exception.retry = 1;
2729 continue;
2730 }
2731 if (nfs4_clear_cap_atomic_open_v1(server, status, &exception))
2732 continue;
2733 res = ERR_PTR(nfs4_handle_exception(server,
2734 status, &exception));
2735 } while (exception.retry);
2736 return res;
2737 }
2738
2739 static int _nfs4_do_setattr(struct inode *inode,
2740 struct nfs_setattrargs *arg,
2741 struct nfs_setattrres *res,
2742 struct rpc_cred *cred,
2743 struct nfs4_state *state)
2744 {
2745 struct nfs_server *server = NFS_SERVER(inode);
2746 struct rpc_message msg = {
2747 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
2748 .rpc_argp = arg,
2749 .rpc_resp = res,
2750 .rpc_cred = cred,
2751 };
2752 struct rpc_cred *delegation_cred = NULL;
2753 unsigned long timestamp = jiffies;
2754 fmode_t fmode;
2755 bool truncate;
2756 int status;
2757
2758 nfs_fattr_init(res->fattr);
2759
2760 /* Servers should only apply open mode checks for file size changes */
2761 truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false;
2762 fmode = truncate ? FMODE_WRITE : FMODE_READ;
2763
2764 if (nfs4_copy_delegation_stateid(inode, fmode, &arg->stateid, &delegation_cred)) {
2765 /* Use that stateid */
2766 } else if (truncate && state != NULL) {
2767 struct nfs_lockowner lockowner = {
2768 .l_owner = current->files,
2769 .l_pid = current->tgid,
2770 };
2771 if (!nfs4_valid_open_stateid(state))
2772 return -EBADF;
2773 if (nfs4_select_rw_stateid(state, FMODE_WRITE, &lockowner,
2774 &arg->stateid, &delegation_cred) == -EIO)
2775 return -EBADF;
2776 } else
2777 nfs4_stateid_copy(&arg->stateid, &zero_stateid);
2778 if (delegation_cred)
2779 msg.rpc_cred = delegation_cred;
2780
2781 status = nfs4_call_sync(server->client, server, &msg, &arg->seq_args, &res->seq_res, 1);
2782
2783 put_rpccred(delegation_cred);
2784 if (status == 0 && state != NULL)
2785 renew_lease(server, timestamp);
2786 trace_nfs4_setattr(inode, &arg->stateid, status);
2787 return status;
2788 }
2789
2790 static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
2791 struct nfs_fattr *fattr, struct iattr *sattr,
2792 struct nfs4_state *state, struct nfs4_label *ilabel,
2793 struct nfs4_label *olabel)
2794 {
2795 struct nfs_server *server = NFS_SERVER(inode);
2796 struct nfs_setattrargs arg = {
2797 .fh = NFS_FH(inode),
2798 .iap = sattr,
2799 .server = server,
2800 .bitmask = server->attr_bitmask,
2801 .label = ilabel,
2802 };
2803 struct nfs_setattrres res = {
2804 .fattr = fattr,
2805 .label = olabel,
2806 .server = server,
2807 };
2808 struct nfs4_exception exception = {
2809 .state = state,
2810 .inode = inode,
2811 .stateid = &arg.stateid,
2812 };
2813 int err;
2814
2815 arg.bitmask = nfs4_bitmask(server, ilabel);
2816 if (ilabel)
2817 arg.bitmask = nfs4_bitmask(server, olabel);
2818
2819 do {
2820 err = _nfs4_do_setattr(inode, &arg, &res, cred, state);
2821 switch (err) {
2822 case -NFS4ERR_OPENMODE:
2823 if (!(sattr->ia_valid & ATTR_SIZE)) {
2824 pr_warn_once("NFSv4: server %s is incorrectly "
2825 "applying open mode checks to "
2826 "a SETATTR that is not "
2827 "changing file size.\n",
2828 server->nfs_client->cl_hostname);
2829 }
2830 if (state && !(state->state & FMODE_WRITE)) {
2831 err = -EBADF;
2832 if (sattr->ia_valid & ATTR_OPEN)
2833 err = -EACCES;
2834 goto out;
2835 }
2836 }
2837 err = nfs4_handle_exception(server, err, &exception);
2838 } while (exception.retry);
2839 out:
2840 return err;
2841 }
2842
2843 static bool
2844 nfs4_wait_on_layoutreturn(struct inode *inode, struct rpc_task *task)
2845 {
2846 if (inode == NULL || !nfs_have_layout(inode))
2847 return false;
2848
2849 return pnfs_wait_on_layoutreturn(inode, task);
2850 }
2851
2852 struct nfs4_closedata {
2853 struct inode *inode;
2854 struct nfs4_state *state;
2855 struct nfs_closeargs arg;
2856 struct nfs_closeres res;
2857 struct nfs_fattr fattr;
2858 unsigned long timestamp;
2859 bool roc;
2860 u32 roc_barrier;
2861 };
2862
2863 static void nfs4_free_closedata(void *data)
2864 {
2865 struct nfs4_closedata *calldata = data;
2866 struct nfs4_state_owner *sp = calldata->state->owner;
2867 struct super_block *sb = calldata->state->inode->i_sb;
2868
2869 if (calldata->roc)
2870 pnfs_roc_release(calldata->state->inode);
2871 nfs4_put_open_state(calldata->state);
2872 nfs_free_seqid(calldata->arg.seqid);
2873 nfs4_put_state_owner(sp);
2874 nfs_sb_deactive(sb);
2875 kfree(calldata);
2876 }
2877
2878 static void nfs4_close_done(struct rpc_task *task, void *data)
2879 {
2880 struct nfs4_closedata *calldata = data;
2881 struct nfs4_state *state = calldata->state;
2882 struct nfs_server *server = NFS_SERVER(calldata->inode);
2883 nfs4_stateid *res_stateid = NULL;
2884
2885 dprintk("%s: begin!\n", __func__);
2886 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
2887 return;
2888 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
2889 /* hmm. we are done with the inode, and in the process of freeing
2890 * the state_owner. we keep this around to process errors
2891 */
2892 switch (task->tk_status) {
2893 case 0:
2894 res_stateid = &calldata->res.stateid;
2895 if (calldata->roc)
2896 pnfs_roc_set_barrier(state->inode,
2897 calldata->roc_barrier);
2898 renew_lease(server, calldata->timestamp);
2899 break;
2900 case -NFS4ERR_ADMIN_REVOKED:
2901 case -NFS4ERR_STALE_STATEID:
2902 case -NFS4ERR_OLD_STATEID:
2903 case -NFS4ERR_BAD_STATEID:
2904 case -NFS4ERR_EXPIRED:
2905 if (!nfs4_stateid_match(&calldata->arg.stateid,
2906 &state->open_stateid)) {
2907 rpc_restart_call_prepare(task);
2908 goto out_release;
2909 }
2910 if (calldata->arg.fmode == 0)
2911 break;
2912 default:
2913 if (nfs4_async_handle_error(task, server, state, NULL) == -EAGAIN) {
2914 rpc_restart_call_prepare(task);
2915 goto out_release;
2916 }
2917 }
2918 nfs_clear_open_stateid(state, &calldata->arg.stateid,
2919 res_stateid, calldata->arg.fmode);
2920 out_release:
2921 nfs_release_seqid(calldata->arg.seqid);
2922 nfs_refresh_inode(calldata->inode, calldata->res.fattr);
2923 dprintk("%s: done, ret = %d!\n", __func__, task->tk_status);
2924 }
2925
2926 static void nfs4_close_prepare(struct rpc_task *task, void *data)
2927 {
2928 struct nfs4_closedata *calldata = data;
2929 struct nfs4_state *state = calldata->state;
2930 struct inode *inode = calldata->inode;
2931 bool is_rdonly, is_wronly, is_rdwr;
2932 int call_close = 0;
2933
2934 dprintk("%s: begin!\n", __func__);
2935 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
2936 goto out_wait;
2937
2938 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE];
2939 spin_lock(&state->owner->so_lock);
2940 is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags);
2941 is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags);
2942 is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags);
2943 nfs4_stateid_copy(&calldata->arg.stateid, &state->open_stateid);
2944 /* Calculate the change in open mode */
2945 calldata->arg.fmode = 0;
2946 if (state->n_rdwr == 0) {
2947 if (state->n_rdonly == 0)
2948 call_close |= is_rdonly;
2949 else if (is_rdonly)
2950 calldata->arg.fmode |= FMODE_READ;
2951 if (state->n_wronly == 0)
2952 call_close |= is_wronly;
2953 else if (is_wronly)
2954 calldata->arg.fmode |= FMODE_WRITE;
2955 if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
2956 call_close |= is_rdwr;
2957 } else if (is_rdwr)
2958 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
2959
2960 if (!nfs4_valid_open_stateid(state))
2961 call_close = 0;
2962 spin_unlock(&state->owner->so_lock);
2963
2964 if (!call_close) {
2965 /* Note: exit _without_ calling nfs4_close_done */
2966 goto out_no_action;
2967 }
2968
2969 if (nfs4_wait_on_layoutreturn(inode, task)) {
2970 nfs_release_seqid(calldata->arg.seqid);
2971 goto out_wait;
2972 }
2973
2974 if (calldata->arg.fmode == 0)
2975 task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
2976 if (calldata->roc)
2977 pnfs_roc_get_barrier(inode, &calldata->roc_barrier);
2978
2979 calldata->arg.share_access =
2980 nfs4_map_atomic_open_share(NFS_SERVER(inode),
2981 calldata->arg.fmode, 0);
2982
2983 nfs_fattr_init(calldata->res.fattr);
2984 calldata->timestamp = jiffies;
2985 if (nfs4_setup_sequence(NFS_SERVER(inode),
2986 &calldata->arg.seq_args,
2987 &calldata->res.seq_res,
2988 task) != 0)
2989 nfs_release_seqid(calldata->arg.seqid);
2990 dprintk("%s: done!\n", __func__);
2991 return;
2992 out_no_action:
2993 task->tk_action = NULL;
2994 out_wait:
2995 nfs4_sequence_done(task, &calldata->res.seq_res);
2996 }
2997
2998 static const struct rpc_call_ops nfs4_close_ops = {
2999 .rpc_call_prepare = nfs4_close_prepare,
3000 .rpc_call_done = nfs4_close_done,
3001 .rpc_release = nfs4_free_closedata,
3002 };
3003
3004 static bool nfs4_roc(struct inode *inode)
3005 {
3006 if (!nfs_have_layout(inode))
3007 return false;
3008 return pnfs_roc(inode);
3009 }
3010
3011 /*
3012 * It is possible for data to be read/written from a mem-mapped file
3013 * after the sys_close call (which hits the vfs layer as a flush).
3014 * This means that we can't safely call nfsv4 close on a file until
3015 * the inode is cleared. This in turn means that we are not good
3016 * NFSv4 citizens - we do not indicate to the server to update the file's
3017 * share state even when we are done with one of the three share
3018 * stateid's in the inode.
3019 *
3020 * NOTE: Caller must be holding the sp->so_owner semaphore!
3021 */
3022 int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait)
3023 {
3024 struct nfs_server *server = NFS_SERVER(state->inode);
3025 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
3026 struct nfs4_closedata *calldata;
3027 struct nfs4_state_owner *sp = state->owner;
3028 struct rpc_task *task;
3029 struct rpc_message msg = {
3030 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE],
3031 .rpc_cred = state->owner->so_cred,
3032 };
3033 struct rpc_task_setup task_setup_data = {
3034 .rpc_client = server->client,
3035 .rpc_message = &msg,
3036 .callback_ops = &nfs4_close_ops,
3037 .workqueue = nfsiod_workqueue,
3038 .flags = RPC_TASK_ASYNC,
3039 };
3040 int status = -ENOMEM;
3041
3042 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_CLEANUP,
3043 &task_setup_data.rpc_client, &msg);
3044
3045 calldata = kzalloc(sizeof(*calldata), gfp_mask);
3046 if (calldata == NULL)
3047 goto out;
3048 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 1);
3049 calldata->inode = state->inode;
3050 calldata->state = state;
3051 calldata->arg.fh = NFS_FH(state->inode);
3052 /* Serialization for the sequence id */
3053 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
3054 calldata->arg.seqid = alloc_seqid(&state->owner->so_seqid, gfp_mask);
3055 if (IS_ERR(calldata->arg.seqid))
3056 goto out_free_calldata;
3057 calldata->arg.fmode = 0;
3058 calldata->arg.bitmask = server->cache_consistency_bitmask;
3059 calldata->res.fattr = &calldata->fattr;
3060 calldata->res.seqid = calldata->arg.seqid;
3061 calldata->res.server = server;
3062 calldata->roc = nfs4_roc(state->inode);
3063 nfs_sb_active(calldata->inode->i_sb);
3064
3065 msg.rpc_argp = &calldata->arg;
3066 msg.rpc_resp = &calldata->res;
3067 task_setup_data.callback_data = calldata;
3068 task = rpc_run_task(&task_setup_data);
3069 if (IS_ERR(task))
3070 return PTR_ERR(task);
3071 status = 0;
3072 if (wait)
3073 status = rpc_wait_for_completion_task(task);
3074 rpc_put_task(task);
3075 return status;
3076 out_free_calldata:
3077 kfree(calldata);
3078 out:
3079 nfs4_put_open_state(state);
3080 nfs4_put_state_owner(sp);
3081 return status;
3082 }
3083
3084 static struct inode *
3085 nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx,
3086 int open_flags, struct iattr *attr, int *opened)
3087 {
3088 struct nfs4_state *state;
3089 struct nfs4_label l = {0, 0, 0, NULL}, *label = NULL;
3090
3091 label = nfs4_label_init_security(dir, ctx->dentry, attr, &l);
3092
3093 /* Protect against concurrent sillydeletes */
3094 state = nfs4_do_open(dir, ctx, open_flags, attr, label, opened);
3095
3096 nfs4_label_release_security(label);
3097
3098 if (IS_ERR(state))
3099 return ERR_CAST(state);
3100 return state->inode;
3101 }
3102
3103 static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync)
3104 {
3105 if (ctx->state == NULL)
3106 return;
3107 if (is_sync)
3108 nfs4_close_sync(ctx->state, ctx->mode);
3109 else
3110 nfs4_close_state(ctx->state, ctx->mode);
3111 }
3112
3113 #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL)
3114 #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL)
3115 #define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_SECURITY_LABEL - 1UL)
3116
3117 static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3118 {
3119 u32 bitmask[3] = {}, minorversion = server->nfs_client->cl_minorversion;
3120 struct nfs4_server_caps_arg args = {
3121 .fhandle = fhandle,
3122 .bitmask = bitmask,
3123 };
3124 struct nfs4_server_caps_res res = {};
3125 struct rpc_message msg = {
3126 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SERVER_CAPS],
3127 .rpc_argp = &args,
3128 .rpc_resp = &res,
3129 };
3130 int status;
3131
3132 bitmask[0] = FATTR4_WORD0_SUPPORTED_ATTRS |
3133 FATTR4_WORD0_FH_EXPIRE_TYPE |
3134 FATTR4_WORD0_LINK_SUPPORT |
3135 FATTR4_WORD0_SYMLINK_SUPPORT |
3136 FATTR4_WORD0_ACLSUPPORT;
3137 if (minorversion)
3138 bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
3139
3140 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3141 if (status == 0) {
3142 /* Sanity check the server answers */
3143 switch (minorversion) {
3144 case 0:
3145 res.attr_bitmask[1] &= FATTR4_WORD1_NFS40_MASK;
3146 res.attr_bitmask[2] = 0;
3147 break;
3148 case 1:
3149 res.attr_bitmask[2] &= FATTR4_WORD2_NFS41_MASK;
3150 break;
3151 case 2:
3152 res.attr_bitmask[2] &= FATTR4_WORD2_NFS42_MASK;
3153 }
3154 memcpy(server->attr_bitmask, res.attr_bitmask, sizeof(server->attr_bitmask));
3155 server->caps &= ~(NFS_CAP_ACLS|NFS_CAP_HARDLINKS|
3156 NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
3157 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|
3158 NFS_CAP_OWNER_GROUP|NFS_CAP_ATIME|
3159 NFS_CAP_CTIME|NFS_CAP_MTIME|
3160 NFS_CAP_SECURITY_LABEL);
3161 if (res.attr_bitmask[0] & FATTR4_WORD0_ACL &&
3162 res.acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
3163 server->caps |= NFS_CAP_ACLS;
3164 if (res.has_links != 0)
3165 server->caps |= NFS_CAP_HARDLINKS;
3166 if (res.has_symlinks != 0)
3167 server->caps |= NFS_CAP_SYMLINKS;
3168 if (res.attr_bitmask[0] & FATTR4_WORD0_FILEID)
3169 server->caps |= NFS_CAP_FILEID;
3170 if (res.attr_bitmask[1] & FATTR4_WORD1_MODE)
3171 server->caps |= NFS_CAP_MODE;
3172 if (res.attr_bitmask[1] & FATTR4_WORD1_NUMLINKS)
3173 server->caps |= NFS_CAP_NLINK;
3174 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER)
3175 server->caps |= NFS_CAP_OWNER;
3176 if (res.attr_bitmask[1] & FATTR4_WORD1_OWNER_GROUP)
3177 server->caps |= NFS_CAP_OWNER_GROUP;
3178 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_ACCESS)
3179 server->caps |= NFS_CAP_ATIME;
3180 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_METADATA)
3181 server->caps |= NFS_CAP_CTIME;
3182 if (res.attr_bitmask[1] & FATTR4_WORD1_TIME_MODIFY)
3183 server->caps |= NFS_CAP_MTIME;
3184 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
3185 if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
3186 server->caps |= NFS_CAP_SECURITY_LABEL;
3187 #endif
3188 memcpy(server->attr_bitmask_nl, res.attr_bitmask,
3189 sizeof(server->attr_bitmask));
3190 server->attr_bitmask_nl[2] &= ~FATTR4_WORD2_SECURITY_LABEL;
3191
3192 memcpy(server->cache_consistency_bitmask, res.attr_bitmask, sizeof(server->cache_consistency_bitmask));
3193 server->cache_consistency_bitmask[0] &= FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE;
3194 server->cache_consistency_bitmask[1] &= FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY;
3195 server->cache_consistency_bitmask[2] = 0;
3196 memcpy(server->exclcreat_bitmask, res.exclcreat_bitmask,
3197 sizeof(server->exclcreat_bitmask));
3198 server->acl_bitmask = res.acl_bitmask;
3199 server->fh_expire_type = res.fh_expire_type;
3200 }
3201
3202 return status;
3203 }
3204
3205 int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
3206 {
3207 struct nfs4_exception exception = { };
3208 int err;
3209 do {
3210 err = nfs4_handle_exception(server,
3211 _nfs4_server_capabilities(server, fhandle),
3212 &exception);
3213 } while (exception.retry);
3214 return err;
3215 }
3216
3217 static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3218 struct nfs_fsinfo *info)
3219 {
3220 u32 bitmask[3];
3221 struct nfs4_lookup_root_arg args = {
3222 .bitmask = bitmask,
3223 };
3224 struct nfs4_lookup_res res = {
3225 .server = server,
3226 .fattr = info->fattr,
3227 .fh = fhandle,
3228 };
3229 struct rpc_message msg = {
3230 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP_ROOT],
3231 .rpc_argp = &args,
3232 .rpc_resp = &res,
3233 };
3234
3235 bitmask[0] = nfs4_fattr_bitmap[0];
3236 bitmask[1] = nfs4_fattr_bitmap[1];
3237 /*
3238 * Process the label in the upcoming getfattr
3239 */
3240 bitmask[2] = nfs4_fattr_bitmap[2] & ~FATTR4_WORD2_SECURITY_LABEL;
3241
3242 nfs_fattr_init(info->fattr);
3243 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3244 }
3245
3246 static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
3247 struct nfs_fsinfo *info)
3248 {
3249 struct nfs4_exception exception = { };
3250 int err;
3251 do {
3252 err = _nfs4_lookup_root(server, fhandle, info);
3253 trace_nfs4_lookup_root(server, fhandle, info->fattr, err);
3254 switch (err) {
3255 case 0:
3256 case -NFS4ERR_WRONGSEC:
3257 goto out;
3258 default:
3259 err = nfs4_handle_exception(server, err, &exception);
3260 }
3261 } while (exception.retry);
3262 out:
3263 return err;
3264 }
3265
3266 static int nfs4_lookup_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3267 struct nfs_fsinfo *info, rpc_authflavor_t flavor)
3268 {
3269 struct rpc_auth_create_args auth_args = {
3270 .pseudoflavor = flavor,
3271 };
3272 struct rpc_auth *auth;
3273 int ret;
3274
3275 auth = rpcauth_create(&auth_args, server->client);
3276 if (IS_ERR(auth)) {
3277 ret = -EACCES;
3278 goto out;
3279 }
3280 ret = nfs4_lookup_root(server, fhandle, info);
3281 out:
3282 return ret;
3283 }
3284
3285 /*
3286 * Retry pseudoroot lookup with various security flavors. We do this when:
3287 *
3288 * NFSv4.0: the PUTROOTFH operation returns NFS4ERR_WRONGSEC
3289 * NFSv4.1: the server does not support the SECINFO_NO_NAME operation
3290 *
3291 * Returns zero on success, or a negative NFS4ERR value, or a
3292 * negative errno value.
3293 */
3294 static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
3295 struct nfs_fsinfo *info)
3296 {
3297 /* Per 3530bis 15.33.5 */
3298 static const rpc_authflavor_t flav_array[] = {
3299 RPC_AUTH_GSS_KRB5P,
3300 RPC_AUTH_GSS_KRB5I,
3301 RPC_AUTH_GSS_KRB5,
3302 RPC_AUTH_UNIX, /* courtesy */
3303 RPC_AUTH_NULL,
3304 };
3305 int status = -EPERM;
3306 size_t i;
3307
3308 if (server->auth_info.flavor_len > 0) {
3309 /* try each flavor specified by user */
3310 for (i = 0; i < server->auth_info.flavor_len; i++) {
3311 status = nfs4_lookup_root_sec(server, fhandle, info,
3312 server->auth_info.flavors[i]);
3313 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3314 continue;
3315 break;
3316 }
3317 } else {
3318 /* no flavors specified by user, try default list */
3319 for (i = 0; i < ARRAY_SIZE(flav_array); i++) {
3320 status = nfs4_lookup_root_sec(server, fhandle, info,
3321 flav_array[i]);
3322 if (status == -NFS4ERR_WRONGSEC || status == -EACCES)
3323 continue;
3324 break;
3325 }
3326 }
3327
3328 /*
3329 * -EACCESS could mean that the user doesn't have correct permissions
3330 * to access the mount. It could also mean that we tried to mount
3331 * with a gss auth flavor, but rpc.gssd isn't running. Either way,
3332 * existing mount programs don't handle -EACCES very well so it should
3333 * be mapped to -EPERM instead.
3334 */
3335 if (status == -EACCES)
3336 status = -EPERM;
3337 return status;
3338 }
3339
3340 /**
3341 * nfs4_proc_get_rootfh - get file handle for server's pseudoroot
3342 * @server: initialized nfs_server handle
3343 * @fhandle: we fill in the pseudo-fs root file handle
3344 * @info: we fill in an FSINFO struct
3345 * @auth_probe: probe the auth flavours
3346 *
3347 * Returns zero on success, or a negative errno.
3348 */
3349 int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
3350 struct nfs_fsinfo *info,
3351 bool auth_probe)
3352 {
3353 int status = 0;
3354
3355 if (!auth_probe)
3356 status = nfs4_lookup_root(server, fhandle, info);
3357
3358 if (auth_probe || status == NFS4ERR_WRONGSEC)
3359 status = server->nfs_client->cl_mvops->find_root_sec(server,
3360 fhandle, info);
3361
3362 if (status == 0)
3363 status = nfs4_server_capabilities(server, fhandle);
3364 if (status == 0)
3365 status = nfs4_do_fsinfo(server, fhandle, info);
3366
3367 return nfs4_map_errors(status);
3368 }
3369
3370 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
3371 struct nfs_fsinfo *info)
3372 {
3373 int error;
3374 struct nfs_fattr *fattr = info->fattr;
3375 struct nfs4_label *label = NULL;
3376
3377 error = nfs4_server_capabilities(server, mntfh);
3378 if (error < 0) {
3379 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
3380 return error;
3381 }
3382
3383 label = nfs4_label_alloc(server, GFP_KERNEL);
3384 if (IS_ERR(label))
3385 return PTR_ERR(label);
3386
3387 error = nfs4_proc_getattr(server, mntfh, fattr, label);
3388 if (error < 0) {
3389 dprintk("nfs4_get_root: getattr error = %d\n", -error);
3390 goto err_free_label;
3391 }
3392
3393 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
3394 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
3395 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
3396
3397 err_free_label:
3398 nfs4_label_free(label);
3399
3400 return error;
3401 }
3402
3403 /*
3404 * Get locations and (maybe) other attributes of a referral.
3405 * Note that we'll actually follow the referral later when
3406 * we detect fsid mismatch in inode revalidation
3407 */
3408 static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir,
3409 const struct qstr *name, struct nfs_fattr *fattr,
3410 struct nfs_fh *fhandle)
3411 {
3412 int status = -ENOMEM;
3413 struct page *page = NULL;
3414 struct nfs4_fs_locations *locations = NULL;
3415
3416 page = alloc_page(GFP_KERNEL);
3417 if (page == NULL)
3418 goto out;
3419 locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
3420 if (locations == NULL)
3421 goto out;
3422
3423 status = nfs4_proc_fs_locations(client, dir, name, locations, page);
3424 if (status != 0)
3425 goto out;
3426
3427 /*
3428 * If the fsid didn't change, this is a migration event, not a
3429 * referral. Cause us to drop into the exception handler, which
3430 * will kick off migration recovery.
3431 */
3432 if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
3433 dprintk("%s: server did not return a different fsid for"
3434 " a referral at %s\n", __func__, name->name);
3435 status = -NFS4ERR_MOVED;
3436 goto out;
3437 }
3438 /* Fixup attributes for the nfs_lookup() call to nfs_fhget() */
3439 nfs_fixup_referral_attributes(&locations->fattr);
3440
3441 /* replace the lookup nfs_fattr with the locations nfs_fattr */
3442 memcpy(fattr, &locations->fattr, sizeof(struct nfs_fattr));
3443 memset(fhandle, 0, sizeof(struct nfs_fh));
3444 out:
3445 if (page)
3446 __free_page(page);
3447 kfree(locations);
3448 return status;
3449 }
3450
3451 static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3452 struct nfs_fattr *fattr, struct nfs4_label *label)
3453 {
3454 struct nfs4_getattr_arg args = {
3455 .fh = fhandle,
3456 .bitmask = server->attr_bitmask,
3457 };
3458 struct nfs4_getattr_res res = {
3459 .fattr = fattr,
3460 .label = label,
3461 .server = server,
3462 };
3463 struct rpc_message msg = {
3464 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
3465 .rpc_argp = &args,
3466 .rpc_resp = &res,
3467 };
3468
3469 args.bitmask = nfs4_bitmask(server, label);
3470
3471 nfs_fattr_init(fattr);
3472 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3473 }
3474
3475 static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
3476 struct nfs_fattr *fattr, struct nfs4_label *label)
3477 {
3478 struct nfs4_exception exception = { };
3479 int err;
3480 do {
3481 err = _nfs4_proc_getattr(server, fhandle, fattr, label);
3482 trace_nfs4_getattr(server, fhandle, fattr, err);
3483 err = nfs4_handle_exception(server, err,
3484 &exception);
3485 } while (exception.retry);
3486 return err;
3487 }
3488
3489 /*
3490 * The file is not closed if it is opened due to the a request to change
3491 * the size of the file. The open call will not be needed once the
3492 * VFS layer lookup-intents are implemented.
3493 *
3494 * Close is called when the inode is destroyed.
3495 * If we haven't opened the file for O_WRONLY, we
3496 * need to in the size_change case to obtain a stateid.
3497 *
3498 * Got race?
3499 * Because OPEN is always done by name in nfsv4, it is
3500 * possible that we opened a different file by the same
3501 * name. We can recognize this race condition, but we
3502 * can't do anything about it besides returning an error.
3503 *
3504 * This will be fixed with VFS changes (lookup-intent).
3505 */
3506 static int
3507 nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
3508 struct iattr *sattr)
3509 {
3510 struct inode *inode = d_inode(dentry);
3511 struct rpc_cred *cred = NULL;
3512 struct nfs4_state *state = NULL;
3513 struct nfs4_label *label = NULL;
3514 int status;
3515
3516 if (pnfs_ld_layoutret_on_setattr(inode) &&
3517 sattr->ia_valid & ATTR_SIZE &&
3518 sattr->ia_size < i_size_read(inode))
3519 pnfs_commit_and_return_layout(inode);
3520
3521 nfs_fattr_init(fattr);
3522
3523 /* Deal with open(O_TRUNC) */
3524 if (sattr->ia_valid & ATTR_OPEN)
3525 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME);
3526
3527 /* Optimization: if the end result is no change, don't RPC */
3528 if ((sattr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
3529 return 0;
3530
3531 /* Search for an existing open(O_WRITE) file */
3532 if (sattr->ia_valid & ATTR_FILE) {
3533 struct nfs_open_context *ctx;
3534
3535 ctx = nfs_file_open_context(sattr->ia_file);
3536 if (ctx) {
3537 cred = ctx->cred;
3538 state = ctx->state;
3539 }
3540 }
3541
3542 label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
3543 if (IS_ERR(label))
3544 return PTR_ERR(label);
3545
3546 status = nfs4_do_setattr(inode, cred, fattr, sattr, state, NULL, label);
3547 if (status == 0) {
3548 nfs_setattr_update_inode(inode, sattr, fattr);
3549 nfs_setsecurity(inode, fattr, label);
3550 }
3551 nfs4_label_free(label);
3552 return status;
3553 }
3554
3555 static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir,
3556 const struct qstr *name, struct nfs_fh *fhandle,
3557 struct nfs_fattr *fattr, struct nfs4_label *label)
3558 {
3559 struct nfs_server *server = NFS_SERVER(dir);
3560 int status;
3561 struct nfs4_lookup_arg args = {
3562 .bitmask = server->attr_bitmask,
3563 .dir_fh = NFS_FH(dir),
3564 .name = name,
3565 };
3566 struct nfs4_lookup_res res = {
3567 .server = server,
3568 .fattr = fattr,
3569 .label = label,
3570 .fh = fhandle,
3571 };
3572 struct rpc_message msg = {
3573 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOOKUP],
3574 .rpc_argp = &args,
3575 .rpc_resp = &res,
3576 };
3577
3578 args.bitmask = nfs4_bitmask(server, label);
3579
3580 nfs_fattr_init(fattr);
3581
3582 dprintk("NFS call lookup %s\n", name->name);
3583 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args, &res.seq_res, 0);
3584 dprintk("NFS reply lookup: %d\n", status);
3585 return status;
3586 }
3587
3588 static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
3589 {
3590 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
3591 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT;
3592 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
3593 fattr->nlink = 2;
3594 }
3595
3596 static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
3597 const struct qstr *name, struct nfs_fh *fhandle,
3598 struct nfs_fattr *fattr, struct nfs4_label *label)
3599 {
3600 struct nfs4_exception exception = { };
3601 struct rpc_clnt *client = *clnt;
3602 int err;
3603 do {
3604 err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr, label);
3605 trace_nfs4_lookup(dir, name, err);
3606 switch (err) {
3607 case -NFS4ERR_BADNAME:
3608 err = -ENOENT;
3609 goto out;
3610 case -NFS4ERR_MOVED:
3611 err = nfs4_get_referral(client, dir, name, fattr, fhandle);
3612 if (err == -NFS4ERR_MOVED)
3613 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3614 goto out;
3615 case -NFS4ERR_WRONGSEC:
3616 err = -EPERM;
3617 if (client != *clnt)
3618 goto out;
3619 client = nfs4_negotiate_security(client, dir, name);
3620 if (IS_ERR(client))
3621 return PTR_ERR(client);
3622
3623 exception.retry = 1;
3624 break;
3625 default:
3626 err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception);
3627 }
3628 } while (exception.retry);
3629
3630 out:
3631 if (err == 0)
3632 *clnt = client;
3633 else if (client != *clnt)
3634 rpc_shutdown_client(client);
3635
3636 return err;
3637 }
3638
3639 static int nfs4_proc_lookup(struct inode *dir, const struct qstr *name,
3640 struct nfs_fh *fhandle, struct nfs_fattr *fattr,
3641 struct nfs4_label *label)
3642 {
3643 int status;
3644 struct rpc_clnt *client = NFS_CLIENT(dir);
3645
3646 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, label);
3647 if (client != NFS_CLIENT(dir)) {
3648 rpc_shutdown_client(client);
3649 nfs_fixup_secinfo_attributes(fattr);
3650 }
3651 return status;
3652 }
3653
3654 struct rpc_clnt *
3655 nfs4_proc_lookup_mountpoint(struct inode *dir, const struct qstr *name,
3656 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
3657 {
3658 struct rpc_clnt *client = NFS_CLIENT(dir);
3659 int status;
3660
3661 status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
3662 if (status < 0)
3663 return ERR_PTR(status);
3664 return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
3665 }
3666
3667 static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3668 {
3669 struct nfs_server *server = NFS_SERVER(inode);
3670 struct nfs4_accessargs args = {
3671 .fh = NFS_FH(inode),
3672 .bitmask = server->cache_consistency_bitmask,
3673 };
3674 struct nfs4_accessres res = {
3675 .server = server,
3676 };
3677 struct rpc_message msg = {
3678 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
3679 .rpc_argp = &args,
3680 .rpc_resp = &res,
3681 .rpc_cred = entry->cred,
3682 };
3683 int mode = entry->mask;
3684 int status = 0;
3685
3686 /*
3687 * Determine which access bits we want to ask for...
3688 */
3689 if (mode & MAY_READ)
3690 args.access |= NFS4_ACCESS_READ;
3691 if (S_ISDIR(inode->i_mode)) {
3692 if (mode & MAY_WRITE)
3693 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
3694 if (mode & MAY_EXEC)
3695 args.access |= NFS4_ACCESS_LOOKUP;
3696 } else {
3697 if (mode & MAY_WRITE)
3698 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
3699 if (mode & MAY_EXEC)
3700 args.access |= NFS4_ACCESS_EXECUTE;
3701 }
3702
3703 res.fattr = nfs_alloc_fattr();
3704 if (res.fattr == NULL)
3705 return -ENOMEM;
3706
3707 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
3708 if (!status) {
3709 nfs_access_set_mask(entry, res.access);
3710 nfs_refresh_inode(inode, res.fattr);
3711 }
3712 nfs_free_fattr(res.fattr);
3713 return status;
3714 }
3715
3716 static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3717 {
3718 struct nfs4_exception exception = { };
3719 int err;
3720 do {
3721 err = _nfs4_proc_access(inode, entry);
3722 trace_nfs4_access(inode, err);
3723 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3724 &exception);
3725 } while (exception.retry);
3726 return err;
3727 }
3728
3729 /*
3730 * TODO: For the time being, we don't try to get any attributes
3731 * along with any of the zero-copy operations READ, READDIR,
3732 * READLINK, WRITE.
3733 *
3734 * In the case of the first three, we want to put the GETATTR
3735 * after the read-type operation -- this is because it is hard
3736 * to predict the length of a GETATTR response in v4, and thus
3737 * align the READ data correctly. This means that the GETATTR
3738 * may end up partially falling into the page cache, and we should
3739 * shift it into the 'tail' of the xdr_buf before processing.
3740 * To do this efficiently, we need to know the total length
3741 * of data received, which doesn't seem to be available outside
3742 * of the RPC layer.
3743 *
3744 * In the case of WRITE, we also want to put the GETATTR after
3745 * the operation -- in this case because we want to make sure
3746 * we get the post-operation mtime and size.
3747 *
3748 * Both of these changes to the XDR layer would in fact be quite
3749 * minor, but I decided to leave them for a subsequent patch.
3750 */
3751 static int _nfs4_proc_readlink(struct inode *inode, struct page *page,
3752 unsigned int pgbase, unsigned int pglen)
3753 {
3754 struct nfs4_readlink args = {
3755 .fh = NFS_FH(inode),
3756 .pgbase = pgbase,
3757 .pglen = pglen,
3758 .pages = &page,
3759 };
3760 struct nfs4_readlink_res res;
3761 struct rpc_message msg = {
3762 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READLINK],
3763 .rpc_argp = &args,
3764 .rpc_resp = &res,
3765 };
3766
3767 return nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), &msg, &args.seq_args, &res.seq_res, 0);
3768 }
3769
3770 static int nfs4_proc_readlink(struct inode *inode, struct page *page,
3771 unsigned int pgbase, unsigned int pglen)
3772 {
3773 struct nfs4_exception exception = { };
3774 int err;
3775 do {
3776 err = _nfs4_proc_readlink(inode, page, pgbase, pglen);
3777 trace_nfs4_readlink(inode, err);
3778 err = nfs4_handle_exception(NFS_SERVER(inode), err,
3779 &exception);
3780 } while (exception.retry);
3781 return err;
3782 }
3783
3784 /*
3785 * This is just for mknod. open(O_CREAT) will always do ->open_context().
3786 */
3787 static int
3788 nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
3789 int flags)
3790 {
3791 struct nfs4_label l, *ilabel = NULL;
3792 struct nfs_open_context *ctx;
3793 struct nfs4_state *state;
3794 int status = 0;
3795
3796 ctx = alloc_nfs_open_context(dentry, FMODE_READ);
3797 if (IS_ERR(ctx))
3798 return PTR_ERR(ctx);
3799
3800 ilabel = nfs4_label_init_security(dir, dentry, sattr, &l);
3801
3802 sattr->ia_mode &= ~current_umask();
3803 state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL);
3804 if (IS_ERR(state)) {
3805 status = PTR_ERR(state);
3806 goto out;
3807 }
3808 out:
3809 nfs4_label_release_security(ilabel);
3810 put_nfs_open_context(ctx);
3811 return status;
3812 }
3813
3814 static int _nfs4_proc_remove(struct inode *dir, const struct qstr *name)
3815 {
3816 struct nfs_server *server = NFS_SERVER(dir);
3817 struct nfs_removeargs args = {
3818 .fh = NFS_FH(dir),
3819 .name = *name,
3820 };
3821 struct nfs_removeres res = {
3822 .server = server,
3823 };
3824 struct rpc_message msg = {
3825 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE],
3826 .rpc_argp = &args,
3827 .rpc_resp = &res,
3828 };
3829 int status;
3830
3831 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
3832 if (status == 0)
3833 update_changeattr(dir, &res.cinfo);
3834 return status;
3835 }
3836
3837 static int nfs4_proc_remove(struct inode *dir, const struct qstr *name)
3838 {
3839 struct nfs4_exception exception = { };
3840 int err;
3841 do {
3842 err = _nfs4_proc_remove(dir, name);
3843 trace_nfs4_remove(dir, name, err);
3844 err = nfs4_handle_exception(NFS_SERVER(dir), err,
3845 &exception);
3846 } while (exception.retry);
3847 return err;
3848 }
3849
3850 static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
3851 {
3852 struct nfs_server *server = NFS_SERVER(dir);
3853 struct nfs_removeargs *args = msg->rpc_argp;
3854 struct nfs_removeres *res = msg->rpc_resp;
3855
3856 res->server = server;
3857 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
3858 nfs4_init_sequence(&args->seq_args, &res->seq_res, 1);
3859
3860 nfs_fattr_init(res->dir_attr);
3861 }
3862
3863 static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
3864 {
3865 nfs4_setup_sequence(NFS_SB(data->dentry->d_sb),
3866 &data->args.seq_args,
3867 &data->res.seq_res,
3868 task);
3869 }
3870
3871 static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
3872 {
3873 struct nfs_unlinkdata *data = task->tk_calldata;
3874 struct nfs_removeres *res = &data->res;
3875
3876 if (!nfs4_sequence_done(task, &res->seq_res))
3877 return 0;
3878 if (nfs4_async_handle_error(task, res->server, NULL,
3879 &data->timeout) == -EAGAIN)
3880 return 0;
3881 update_changeattr(dir, &res->cinfo);
3882 return 1;
3883 }
3884
3885 static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
3886 {
3887 struct nfs_server *server = NFS_SERVER(dir);
3888 struct nfs_renameargs *arg = msg->rpc_argp;
3889 struct nfs_renameres *res = msg->rpc_resp;
3890
3891 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
3892 res->server = server;
3893 nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1);
3894 }
3895
3896 static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task, struct nfs_renamedata *data)
3897 {
3898 nfs4_setup_sequence(NFS_SERVER(data->old_dir),
3899 &data->args.seq_args,
3900 &data->res.seq_res,
3901 task);
3902 }
3903
3904 static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
3905 struct inode *new_dir)
3906 {
3907 struct nfs_renamedata *data = task->tk_calldata;
3908 struct nfs_renameres *res = &data->res;
3909
3910 if (!nfs4_sequence_done(task, &res->seq_res))
3911 return 0;
3912 if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
3913 return 0;
3914
3915 update_changeattr(old_dir, &res->old_cinfo);
3916 update_changeattr(new_dir, &res->new_cinfo);
3917 return 1;
3918 }
3919
3920 static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
3921 {
3922 struct nfs_server *server = NFS_SERVER(inode);
3923 struct nfs4_link_arg arg = {
3924 .fh = NFS_FH(inode),
3925 .dir_fh = NFS_FH(dir),
3926 .name = name,
3927 .bitmask = server->attr_bitmask,
3928 };
3929 struct nfs4_link_res res = {
3930 .server = server,
3931 .label = NULL,
3932 };
3933 struct rpc_message msg = {
3934 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LINK],
3935 .rpc_argp = &arg,
3936 .rpc_resp = &res,
3937 };
3938 int status = -ENOMEM;
3939
3940 res.fattr = nfs_alloc_fattr();
3941 if (res.fattr == NULL)
3942 goto out;
3943
3944 res.label = nfs4_label_alloc(server, GFP_KERNEL);
3945 if (IS_ERR(res.label)) {
3946 status = PTR_ERR(res.label);
3947 goto out;
3948 }
3949 arg.bitmask = nfs4_bitmask(server, res.label);
3950
3951 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
3952 if (!status) {
3953 update_changeattr(dir, &res.cinfo);
3954 status = nfs_post_op_update_inode(inode, res.fattr);
3955 if (!status)
3956 nfs_setsecurity(inode, res.fattr, res.label);
3957 }
3958
3959
3960 nfs4_label_free(res.label);
3961
3962 out:
3963 nfs_free_fattr(res.fattr);
3964 return status;
3965 }
3966
3967 static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
3968 {
3969 struct nfs4_exception exception = { };
3970 int err;
3971 do {
3972 err = nfs4_handle_exception(NFS_SERVER(inode),
3973 _nfs4_proc_link(inode, dir, name),
3974 &exception);
3975 } while (exception.retry);
3976 return err;
3977 }
3978
3979 struct nfs4_createdata {
3980 struct rpc_message msg;
3981 struct nfs4_create_arg arg;
3982 struct nfs4_create_res res;
3983 struct nfs_fh fh;
3984 struct nfs_fattr fattr;
3985 struct nfs4_label *label;
3986 };
3987
3988 static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3989 const struct qstr *name, struct iattr *sattr, u32 ftype)
3990 {
3991 struct nfs4_createdata *data;
3992
3993 data = kzalloc(sizeof(*data), GFP_KERNEL);
3994 if (data != NULL) {
3995 struct nfs_server *server = NFS_SERVER(dir);
3996
3997 data->label = nfs4_label_alloc(server, GFP_KERNEL);
3998 if (IS_ERR(data->label))
3999 goto out_free;
4000
4001 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE];
4002 data->msg.rpc_argp = &data->arg;
4003 data->msg.rpc_resp = &data->res;
4004 data->arg.dir_fh = NFS_FH(dir);
4005 data->arg.server = server;
4006 data->arg.name = name;
4007 data->arg.attrs = sattr;
4008 data->arg.ftype = ftype;
4009 data->arg.bitmask = nfs4_bitmask(server, data->label);
4010 data->res.server = server;
4011 data->res.fh = &data->fh;
4012 data->res.fattr = &data->fattr;
4013 data->res.label = data->label;
4014 nfs_fattr_init(data->res.fattr);
4015 }
4016 return data;
4017 out_free:
4018 kfree(data);
4019 return NULL;
4020 }
4021
4022 static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_createdata *data)
4023 {
4024 int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
4025 &data->arg.seq_args, &data->res.seq_res, 1);
4026 if (status == 0) {
4027 update_changeattr(dir, &data->res.dir_cinfo);
4028 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
4029 }
4030 return status;
4031 }
4032
4033 static void nfs4_free_createdata(struct nfs4_createdata *data)
4034 {
4035 nfs4_label_free(data->label);
4036 kfree(data);
4037 }
4038
4039 static int _nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
4040 struct page *page, unsigned int len, struct iattr *sattr,
4041 struct nfs4_label *label)
4042 {
4043 struct nfs4_createdata *data;
4044 int status = -ENAMETOOLONG;
4045
4046 if (len > NFS4_MAXPATHLEN)
4047 goto out;
4048
4049 status = -ENOMEM;
4050 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4LNK);
4051 if (data == NULL)
4052 goto out;
4053
4054 data->msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK];
4055 data->arg.u.symlink.pages = &page;
4056 data->arg.u.symlink.len = len;
4057 data->arg.label = label;
4058
4059 status = nfs4_do_create(dir, dentry, data);
4060
4061 nfs4_free_createdata(data);
4062 out:
4063 return status;
4064 }
4065
4066 static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry,
4067 struct page *page, unsigned int len, struct iattr *sattr)
4068 {
4069 struct nfs4_exception exception = { };
4070 struct nfs4_label l, *label = NULL;
4071 int err;
4072
4073 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4074
4075 do {
4076 err = _nfs4_proc_symlink(dir, dentry, page, len, sattr, label);
4077 trace_nfs4_symlink(dir, &dentry->d_name, err);
4078 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4079 &exception);
4080 } while (exception.retry);
4081
4082 nfs4_label_release_security(label);
4083 return err;
4084 }
4085
4086 static int _nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
4087 struct iattr *sattr, struct nfs4_label *label)
4088 {
4089 struct nfs4_createdata *data;
4090 int status = -ENOMEM;
4091
4092 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4DIR);
4093 if (data == NULL)
4094 goto out;
4095
4096 data->arg.label = label;
4097 status = nfs4_do_create(dir, dentry, data);
4098
4099 nfs4_free_createdata(data);
4100 out:
4101 return status;
4102 }
4103
4104 static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry,
4105 struct iattr *sattr)
4106 {
4107 struct nfs4_exception exception = { };
4108 struct nfs4_label l, *label = NULL;
4109 int err;
4110
4111 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4112
4113 sattr->ia_mode &= ~current_umask();
4114 do {
4115 err = _nfs4_proc_mkdir(dir, dentry, sattr, label);
4116 trace_nfs4_mkdir(dir, &dentry->d_name, err);
4117 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4118 &exception);
4119 } while (exception.retry);
4120 nfs4_label_release_security(label);
4121
4122 return err;
4123 }
4124
4125 static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
4126 u64 cookie, struct page **pages, unsigned int count, int plus)
4127 {
4128 struct inode *dir = d_inode(dentry);
4129 struct nfs4_readdir_arg args = {
4130 .fh = NFS_FH(dir),
4131 .pages = pages,
4132 .pgbase = 0,
4133 .count = count,
4134 .bitmask = NFS_SERVER(d_inode(dentry))->attr_bitmask,
4135 .plus = plus,
4136 };
4137 struct nfs4_readdir_res res;
4138 struct rpc_message msg = {
4139 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READDIR],
4140 .rpc_argp = &args,
4141 .rpc_resp = &res,
4142 .rpc_cred = cred,
4143 };
4144 int status;
4145
4146 dprintk("%s: dentry = %pd2, cookie = %Lu\n", __func__,
4147 dentry,
4148 (unsigned long long)cookie);
4149 nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
4150 res.pgbase = args.pgbase;
4151 status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
4152 if (status >= 0) {
4153 memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
4154 status += args.pgbase;
4155 }
4156
4157 nfs_invalidate_atime(dir);
4158
4159 dprintk("%s: returns %d\n", __func__, status);
4160 return status;
4161 }
4162
4163 static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
4164 u64 cookie, struct page **pages, unsigned int count, int plus)
4165 {
4166 struct nfs4_exception exception = { };
4167 int err;
4168 do {
4169 err = _nfs4_proc_readdir(dentry, cred, cookie,
4170 pages, count, plus);
4171 trace_nfs4_readdir(d_inode(dentry), err);
4172 err = nfs4_handle_exception(NFS_SERVER(d_inode(dentry)), err,
4173 &exception);
4174 } while (exception.retry);
4175 return err;
4176 }
4177
4178 static int _nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
4179 struct iattr *sattr, struct nfs4_label *label, dev_t rdev)
4180 {
4181 struct nfs4_createdata *data;
4182 int mode = sattr->ia_mode;
4183 int status = -ENOMEM;
4184
4185 data = nfs4_alloc_createdata(dir, &dentry->d_name, sattr, NF4SOCK);
4186 if (data == NULL)
4187 goto out;
4188
4189 if (S_ISFIFO(mode))
4190 data->arg.ftype = NF4FIFO;
4191 else if (S_ISBLK(mode)) {
4192 data->arg.ftype = NF4BLK;
4193 data->arg.u.device.specdata1 = MAJOR(rdev);
4194 data->arg.u.device.specdata2 = MINOR(rdev);
4195 }
4196 else if (S_ISCHR(mode)) {
4197 data->arg.ftype = NF4CHR;
4198 data->arg.u.device.specdata1 = MAJOR(rdev);
4199 data->arg.u.device.specdata2 = MINOR(rdev);
4200 } else if (!S_ISSOCK(mode)) {
4201 status = -EINVAL;
4202 goto out_free;
4203 }
4204
4205 data->arg.label = label;
4206 status = nfs4_do_create(dir, dentry, data);
4207 out_free:
4208 nfs4_free_createdata(data);
4209 out:
4210 return status;
4211 }
4212
4213 static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry,
4214 struct iattr *sattr, dev_t rdev)
4215 {
4216 struct nfs4_exception exception = { };
4217 struct nfs4_label l, *label = NULL;
4218 int err;
4219
4220 label = nfs4_label_init_security(dir, dentry, sattr, &l);
4221
4222 sattr->ia_mode &= ~current_umask();
4223 do {
4224 err = _nfs4_proc_mknod(dir, dentry, sattr, label, rdev);
4225 trace_nfs4_mknod(dir, &dentry->d_name, err);
4226 err = nfs4_handle_exception(NFS_SERVER(dir), err,
4227 &exception);
4228 } while (exception.retry);
4229
4230 nfs4_label_release_security(label);
4231
4232 return err;
4233 }
4234
4235 static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
4236 struct nfs_fsstat *fsstat)
4237 {
4238 struct nfs4_statfs_arg args = {
4239 .fh = fhandle,
4240 .bitmask = server->attr_bitmask,
4241 };
4242 struct nfs4_statfs_res res = {
4243 .fsstat = fsstat,
4244 };
4245 struct rpc_message msg = {
4246 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_STATFS],
4247 .rpc_argp = &args,
4248 .rpc_resp = &res,
4249 };
4250
4251 nfs_fattr_init(fsstat->fattr);
4252 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4253 }
4254
4255 static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
4256 {
4257 struct nfs4_exception exception = { };
4258 int err;
4259 do {
4260 err = nfs4_handle_exception(server,
4261 _nfs4_proc_statfs(server, fhandle, fsstat),
4262 &exception);
4263 } while (exception.retry);
4264 return err;
4265 }
4266
4267 static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
4268 struct nfs_fsinfo *fsinfo)
4269 {
4270 struct nfs4_fsinfo_arg args = {
4271 .fh = fhandle,
4272 .bitmask = server->attr_bitmask,
4273 };
4274 struct nfs4_fsinfo_res res = {
4275 .fsinfo = fsinfo,
4276 };
4277 struct rpc_message msg = {
4278 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSINFO],
4279 .rpc_argp = &args,
4280 .rpc_resp = &res,
4281 };
4282
4283 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4284 }
4285
4286 static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4287 {
4288 struct nfs4_exception exception = { };
4289 unsigned long now = jiffies;
4290 int err;
4291
4292 do {
4293 err = _nfs4_do_fsinfo(server, fhandle, fsinfo);
4294 trace_nfs4_fsinfo(server, fhandle, fsinfo->fattr, err);
4295 if (err == 0) {
4296 nfs4_set_lease_period(server->nfs_client,
4297 fsinfo->lease_time * HZ,
4298 now);
4299 break;
4300 }
4301 err = nfs4_handle_exception(server, err, &exception);
4302 } while (exception.retry);
4303 return err;
4304 }
4305
4306 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
4307 {
4308 int error;
4309
4310 nfs_fattr_init(fsinfo->fattr);
4311 error = nfs4_do_fsinfo(server, fhandle, fsinfo);
4312 if (error == 0) {
4313 /* block layout checks this! */
4314 server->pnfs_blksize = fsinfo->blksize;
4315 set_pnfs_layoutdriver(server, fhandle, fsinfo->layouttype);
4316 }
4317
4318 return error;
4319 }
4320
4321 static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4322 struct nfs_pathconf *pathconf)
4323 {
4324 struct nfs4_pathconf_arg args = {
4325 .fh = fhandle,
4326 .bitmask = server->attr_bitmask,
4327 };
4328 struct nfs4_pathconf_res res = {
4329 .pathconf = pathconf,
4330 };
4331 struct rpc_message msg = {
4332 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_PATHCONF],
4333 .rpc_argp = &args,
4334 .rpc_resp = &res,
4335 };
4336
4337 /* None of the pathconf attributes are mandatory to implement */
4338 if ((args.bitmask[0] & nfs4_pathconf_bitmap[0]) == 0) {
4339 memset(pathconf, 0, sizeof(*pathconf));
4340 return 0;
4341 }
4342
4343 nfs_fattr_init(pathconf->fattr);
4344 return nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
4345 }
4346
4347 static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
4348 struct nfs_pathconf *pathconf)
4349 {
4350 struct nfs4_exception exception = { };
4351 int err;
4352
4353 do {
4354 err = nfs4_handle_exception(server,
4355 _nfs4_proc_pathconf(server, fhandle, pathconf),
4356 &exception);
4357 } while (exception.retry);
4358 return err;
4359 }
4360
4361 int nfs4_set_rw_stateid(nfs4_stateid *stateid,
4362 const struct nfs_open_context *ctx,
4363 const struct nfs_lock_context *l_ctx,
4364 fmode_t fmode)
4365 {
4366 const struct nfs_lockowner *lockowner = NULL;
4367
4368 if (l_ctx != NULL)
4369 lockowner = &l_ctx->lockowner;
4370 return nfs4_select_rw_stateid(ctx->state, fmode, lockowner, stateid, NULL);
4371 }
4372 EXPORT_SYMBOL_GPL(nfs4_set_rw_stateid);
4373
4374 static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
4375 const struct nfs_open_context *ctx,
4376 const struct nfs_lock_context *l_ctx,
4377 fmode_t fmode)
4378 {
4379 nfs4_stateid current_stateid;
4380
4381 /* If the current stateid represents a lost lock, then exit */
4382 if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
4383 return true;
4384 return nfs4_stateid_match(stateid, &current_stateid);
4385 }
4386
4387 static bool nfs4_error_stateid_expired(int err)
4388 {
4389 switch (err) {
4390 case -NFS4ERR_DELEG_REVOKED:
4391 case -NFS4ERR_ADMIN_REVOKED:
4392 case -NFS4ERR_BAD_STATEID:
4393 case -NFS4ERR_STALE_STATEID:
4394 case -NFS4ERR_OLD_STATEID:
4395 case -NFS4ERR_OPENMODE:
4396 case -NFS4ERR_EXPIRED:
4397 return true;
4398 }
4399 return false;
4400 }
4401
4402 void __nfs4_read_done_cb(struct nfs_pgio_header *hdr)
4403 {
4404 nfs_invalidate_atime(hdr->inode);
4405 }
4406
4407 static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr)
4408 {
4409 struct nfs_server *server = NFS_SERVER(hdr->inode);
4410
4411 trace_nfs4_read(hdr, task->tk_status);
4412 if (nfs4_async_handle_error(task, server,
4413 hdr->args.context->state,
4414 NULL) == -EAGAIN) {
4415 rpc_restart_call_prepare(task);
4416 return -EAGAIN;
4417 }
4418
4419 __nfs4_read_done_cb(hdr);
4420 if (task->tk_status > 0)
4421 renew_lease(server, hdr->timestamp);
4422 return 0;
4423 }
4424
4425 static bool nfs4_read_stateid_changed(struct rpc_task *task,
4426 struct nfs_pgio_args *args)
4427 {
4428
4429 if (!nfs4_error_stateid_expired(task->tk_status) ||
4430 nfs4_stateid_is_current(&args->stateid,
4431 args->context,
4432 args->lock_context,
4433 FMODE_READ))
4434 return false;
4435 rpc_restart_call_prepare(task);
4436 return true;
4437 }
4438
4439 static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4440 {
4441
4442 dprintk("--> %s\n", __func__);
4443
4444 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4445 return -EAGAIN;
4446 if (nfs4_read_stateid_changed(task, &hdr->args))
4447 return -EAGAIN;
4448 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4449 nfs4_read_done_cb(task, hdr);
4450 }
4451
4452 static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
4453 struct rpc_message *msg)
4454 {
4455 hdr->timestamp = jiffies;
4456 if (!hdr->pgio_done_cb)
4457 hdr->pgio_done_cb = nfs4_read_done_cb;
4458 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
4459 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0);
4460 }
4461
4462 static int nfs4_proc_pgio_rpc_prepare(struct rpc_task *task,
4463 struct nfs_pgio_header *hdr)
4464 {
4465 if (nfs4_setup_sequence(NFS_SERVER(hdr->inode),
4466 &hdr->args.seq_args,
4467 &hdr->res.seq_res,
4468 task))
4469 return 0;
4470 if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
4471 hdr->args.lock_context,
4472 hdr->rw_ops->rw_mode) == -EIO)
4473 return -EIO;
4474 if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags)))
4475 return -EIO;
4476 return 0;
4477 }
4478
4479 static int nfs4_write_done_cb(struct rpc_task *task,
4480 struct nfs_pgio_header *hdr)
4481 {
4482 struct inode *inode = hdr->inode;
4483
4484 trace_nfs4_write(hdr, task->tk_status);
4485 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4486 hdr->args.context->state,
4487 NULL) == -EAGAIN) {
4488 rpc_restart_call_prepare(task);
4489 return -EAGAIN;
4490 }
4491 if (task->tk_status >= 0) {
4492 renew_lease(NFS_SERVER(inode), hdr->timestamp);
4493 nfs_writeback_update_inode(hdr);
4494 }
4495 return 0;
4496 }
4497
4498 static bool nfs4_write_stateid_changed(struct rpc_task *task,
4499 struct nfs_pgio_args *args)
4500 {
4501
4502 if (!nfs4_error_stateid_expired(task->tk_status) ||
4503 nfs4_stateid_is_current(&args->stateid,
4504 args->context,
4505 args->lock_context,
4506 FMODE_WRITE))
4507 return false;
4508 rpc_restart_call_prepare(task);
4509 return true;
4510 }
4511
4512 static int nfs4_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
4513 {
4514 if (!nfs4_sequence_done(task, &hdr->res.seq_res))
4515 return -EAGAIN;
4516 if (nfs4_write_stateid_changed(task, &hdr->args))
4517 return -EAGAIN;
4518 return hdr->pgio_done_cb ? hdr->pgio_done_cb(task, hdr) :
4519 nfs4_write_done_cb(task, hdr);
4520 }
4521
4522 static
4523 bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
4524 {
4525 /* Don't request attributes for pNFS or O_DIRECT writes */
4526 if (hdr->ds_clp != NULL || hdr->dreq != NULL)
4527 return false;
4528 /* Otherwise, request attributes if and only if we don't hold
4529 * a delegation
4530 */
4531 return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
4532 }
4533
4534 static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
4535 struct rpc_message *msg)
4536 {
4537 struct nfs_server *server = NFS_SERVER(hdr->inode);
4538
4539 if (!nfs4_write_need_cache_consistency_data(hdr)) {
4540 hdr->args.bitmask = NULL;
4541 hdr->res.fattr = NULL;
4542 } else
4543 hdr->args.bitmask = server->cache_consistency_bitmask;
4544
4545 if (!hdr->pgio_done_cb)
4546 hdr->pgio_done_cb = nfs4_write_done_cb;
4547 hdr->res.server = server;
4548 hdr->timestamp = jiffies;
4549
4550 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
4551 nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1);
4552 }
4553
4554 static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
4555 {
4556 nfs4_setup_sequence(NFS_SERVER(data->inode),
4557 &data->args.seq_args,
4558 &data->res.seq_res,
4559 task);
4560 }
4561
4562 static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
4563 {
4564 struct inode *inode = data->inode;
4565
4566 trace_nfs4_commit(data, task->tk_status);
4567 if (nfs4_async_handle_error(task, NFS_SERVER(inode),
4568 NULL, NULL) == -EAGAIN) {
4569 rpc_restart_call_prepare(task);
4570 return -EAGAIN;
4571 }
4572 return 0;
4573 }
4574
4575 static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
4576 {
4577 if (!nfs4_sequence_done(task, &data->res.seq_res))
4578 return -EAGAIN;
4579 return data->commit_done_cb(task, data);
4580 }
4581
4582 static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
4583 {
4584 struct nfs_server *server = NFS_SERVER(data->inode);
4585
4586 if (data->commit_done_cb == NULL)
4587 data->commit_done_cb = nfs4_commit_done_cb;
4588 data->res.server = server;
4589 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
4590 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4591 }
4592
4593 struct nfs4_renewdata {
4594 struct nfs_client *client;
4595 unsigned long timestamp;
4596 };
4597
4598 /*
4599 * nfs4_proc_async_renew(): This is not one of the nfs_rpc_ops; it is a special
4600 * standalone procedure for queueing an asynchronous RENEW.
4601 */
4602 static void nfs4_renew_release(void *calldata)
4603 {
4604 struct nfs4_renewdata *data = calldata;
4605 struct nfs_client *clp = data->client;
4606
4607 if (atomic_read(&clp->cl_count) > 1)
4608 nfs4_schedule_state_renewal(clp);
4609 nfs_put_client(clp);
4610 kfree(data);
4611 }
4612
4613 static void nfs4_renew_done(struct rpc_task *task, void *calldata)
4614 {
4615 struct nfs4_renewdata *data = calldata;
4616 struct nfs_client *clp = data->client;
4617 unsigned long timestamp = data->timestamp;
4618
4619 trace_nfs4_renew_async(clp, task->tk_status);
4620 switch (task->tk_status) {
4621 case 0:
4622 break;
4623 case -NFS4ERR_LEASE_MOVED:
4624 nfs4_schedule_lease_moved_recovery(clp);
4625 break;
4626 default:
4627 /* Unless we're shutting down, schedule state recovery! */
4628 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) == 0)
4629 return;
4630 if (task->tk_status != NFS4ERR_CB_PATH_DOWN) {
4631 nfs4_schedule_lease_recovery(clp);
4632 return;
4633 }
4634 nfs4_schedule_path_down_recovery(clp);
4635 }
4636 do_renew_lease(clp, timestamp);
4637 }
4638
4639 static const struct rpc_call_ops nfs4_renew_ops = {
4640 .rpc_call_done = nfs4_renew_done,
4641 .rpc_release = nfs4_renew_release,
4642 };
4643
4644 static int nfs4_proc_async_renew(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
4645 {
4646 struct rpc_message msg = {
4647 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4648 .rpc_argp = clp,
4649 .rpc_cred = cred,
4650 };
4651 struct nfs4_renewdata *data;
4652
4653 if (renew_flags == 0)
4654 return 0;
4655 if (!atomic_inc_not_zero(&clp->cl_count))
4656 return -EIO;
4657 data = kmalloc(sizeof(*data), GFP_NOFS);
4658 if (data == NULL)
4659 return -ENOMEM;
4660 data->client = clp;
4661 data->timestamp = jiffies;
4662 return rpc_call_async(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT,
4663 &nfs4_renew_ops, data);
4664 }
4665
4666 static int nfs4_proc_renew(struct nfs_client *clp, struct rpc_cred *cred)
4667 {
4668 struct rpc_message msg = {
4669 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENEW],
4670 .rpc_argp = clp,
4671 .rpc_cred = cred,
4672 };
4673 unsigned long now = jiffies;
4674 int status;
4675
4676 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
4677 if (status < 0)
4678 return status;
4679 do_renew_lease(clp, now);
4680 return 0;
4681 }
4682
4683 static inline int nfs4_server_supports_acls(struct nfs_server *server)
4684 {
4685 return server->caps & NFS_CAP_ACLS;
4686 }
4687
4688 /* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
4689 * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
4690 * the stack.
4691 */
4692 #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
4693
4694 static int buf_to_pages_noslab(const void *buf, size_t buflen,
4695 struct page **pages)
4696 {
4697 struct page *newpage, **spages;
4698 int rc = 0;
4699 size_t len;
4700 spages = pages;
4701
4702 do {
4703 len = min_t(size_t, PAGE_SIZE, buflen);
4704 newpage = alloc_page(GFP_KERNEL);
4705
4706 if (newpage == NULL)
4707 goto unwind;
4708 memcpy(page_address(newpage), buf, len);
4709 buf += len;
4710 buflen -= len;
4711 *pages++ = newpage;
4712 rc++;
4713 } while (buflen != 0);
4714
4715 return rc;
4716
4717 unwind:
4718 for(; rc > 0; rc--)
4719 __free_page(spages[rc-1]);
4720 return -ENOMEM;
4721 }
4722
4723 struct nfs4_cached_acl {
4724 int cached;
4725 size_t len;
4726 char data[0];
4727 };
4728
4729 static void nfs4_set_cached_acl(struct inode *inode, struct nfs4_cached_acl *acl)
4730 {
4731 struct nfs_inode *nfsi = NFS_I(inode);
4732
4733 spin_lock(&inode->i_lock);
4734 kfree(nfsi->nfs4_acl);
4735 nfsi->nfs4_acl = acl;
4736 spin_unlock(&inode->i_lock);
4737 }
4738
4739 static void nfs4_zap_acl_attr(struct inode *inode)
4740 {
4741 nfs4_set_cached_acl(inode, NULL);
4742 }
4743
4744 static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_t buflen)
4745 {
4746 struct nfs_inode *nfsi = NFS_I(inode);
4747 struct nfs4_cached_acl *acl;
4748 int ret = -ENOENT;
4749
4750 spin_lock(&inode->i_lock);
4751 acl = nfsi->nfs4_acl;
4752 if (acl == NULL)
4753 goto out;
4754 if (buf == NULL) /* user is just asking for length */
4755 goto out_len;
4756 if (acl->cached == 0)
4757 goto out;
4758 ret = -ERANGE; /* see getxattr(2) man page */
4759 if (acl->len > buflen)
4760 goto out;
4761 memcpy(buf, acl->data, acl->len);
4762 out_len:
4763 ret = acl->len;
4764 out:
4765 spin_unlock(&inode->i_lock);
4766 return ret;
4767 }
4768
4769 static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len)
4770 {
4771 struct nfs4_cached_acl *acl;
4772 size_t buflen = sizeof(*acl) + acl_len;
4773
4774 if (buflen <= PAGE_SIZE) {
4775 acl = kmalloc(buflen, GFP_KERNEL);
4776 if (acl == NULL)
4777 goto out;
4778 acl->cached = 1;
4779 _copy_from_pages(acl->data, pages, pgbase, acl_len);
4780 } else {
4781 acl = kmalloc(sizeof(*acl), GFP_KERNEL);
4782 if (acl == NULL)
4783 goto out;
4784 acl->cached = 0;
4785 }
4786 acl->len = acl_len;
4787 out:
4788 nfs4_set_cached_acl(inode, acl);
4789 }
4790
4791 /*
4792 * The getxattr API returns the required buffer length when called with a
4793 * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
4794 * the required buf. On a NULL buf, we send a page of data to the server
4795 * guessing that the ACL request can be serviced by a page. If so, we cache
4796 * up to the page of ACL data, and the 2nd call to getxattr is serviced by
4797 * the cache. If not so, we throw away the page, and cache the required
4798 * length. The next getxattr call will then produce another round trip to
4799 * the server, this time with the input buf of the required size.
4800 */
4801 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4802 {
4803 struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
4804 struct nfs_getaclargs args = {
4805 .fh = NFS_FH(inode),
4806 .acl_pages = pages,
4807 .acl_len = buflen,
4808 };
4809 struct nfs_getaclres res = {
4810 .acl_len = buflen,
4811 };
4812 struct rpc_message msg = {
4813 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
4814 .rpc_argp = &args,
4815 .rpc_resp = &res,
4816 };
4817 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4818 int ret = -ENOMEM, i;
4819
4820 /* As long as we're doing a round trip to the server anyway,
4821 * let's be prepared for a page of acl data. */
4822 if (npages == 0)
4823 npages = 1;
4824 if (npages > ARRAY_SIZE(pages))
4825 return -ERANGE;
4826
4827 for (i = 0; i < npages; i++) {
4828 pages[i] = alloc_page(GFP_KERNEL);
4829 if (!pages[i])
4830 goto out_free;
4831 }
4832
4833 /* for decoding across pages */
4834 res.acl_scratch = alloc_page(GFP_KERNEL);
4835 if (!res.acl_scratch)
4836 goto out_free;
4837
4838 args.acl_len = npages * PAGE_SIZE;
4839
4840 dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n",
4841 __func__, buf, buflen, npages, args.acl_len);
4842 ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
4843 &msg, &args.seq_args, &res.seq_res, 0);
4844 if (ret)
4845 goto out_free;
4846
4847 /* Handle the case where the passed-in buffer is too short */
4848 if (res.acl_flags & NFS4_ACL_TRUNC) {
4849 /* Did the user only issue a request for the acl length? */
4850 if (buf == NULL)
4851 goto out_ok;
4852 ret = -ERANGE;
4853 goto out_free;
4854 }
4855 nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
4856 if (buf) {
4857 if (res.acl_len > buflen) {
4858 ret = -ERANGE;
4859 goto out_free;
4860 }
4861 _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
4862 }
4863 out_ok:
4864 ret = res.acl_len;
4865 out_free:
4866 for (i = 0; i < npages; i++)
4867 if (pages[i])
4868 __free_page(pages[i]);
4869 if (res.acl_scratch)
4870 __free_page(res.acl_scratch);
4871 return ret;
4872 }
4873
4874 static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
4875 {
4876 struct nfs4_exception exception = { };
4877 ssize_t ret;
4878 do {
4879 ret = __nfs4_get_acl_uncached(inode, buf, buflen);
4880 trace_nfs4_get_acl(inode, ret);
4881 if (ret >= 0)
4882 break;
4883 ret = nfs4_handle_exception(NFS_SERVER(inode), ret, &exception);
4884 } while (exception.retry);
4885 return ret;
4886 }
4887
4888 static ssize_t nfs4_proc_get_acl(struct inode *inode, void *buf, size_t buflen)
4889 {
4890 struct nfs_server *server = NFS_SERVER(inode);
4891 int ret;
4892
4893 if (!nfs4_server_supports_acls(server))
4894 return -EOPNOTSUPP;
4895 ret = nfs_revalidate_inode(server, inode);
4896 if (ret < 0)
4897 return ret;
4898 if (NFS_I(inode)->cache_validity & NFS_INO_INVALID_ACL)
4899 nfs_zap_acl_cache(inode);
4900 ret = nfs4_read_cached_acl(inode, buf, buflen);
4901 if (ret != -ENOENT)
4902 /* -ENOENT is returned if there is no ACL or if there is an ACL
4903 * but no cached acl data, just the acl length */
4904 return ret;
4905 return nfs4_get_acl_uncached(inode, buf, buflen);
4906 }
4907
4908 static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4909 {
4910 struct nfs_server *server = NFS_SERVER(inode);
4911 struct page *pages[NFS4ACL_MAXPAGES];
4912 struct nfs_setaclargs arg = {
4913 .fh = NFS_FH(inode),
4914 .acl_pages = pages,
4915 .acl_len = buflen,
4916 };
4917 struct nfs_setaclres res;
4918 struct rpc_message msg = {
4919 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
4920 .rpc_argp = &arg,
4921 .rpc_resp = &res,
4922 };
4923 unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
4924 int ret, i;
4925
4926 if (!nfs4_server_supports_acls(server))
4927 return -EOPNOTSUPP;
4928 if (npages > ARRAY_SIZE(pages))
4929 return -ERANGE;
4930 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages);
4931 if (i < 0)
4932 return i;
4933 nfs4_inode_return_delegation(inode);
4934 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
4935
4936 /*
4937 * Free each page after tx, so the only ref left is
4938 * held by the network stack
4939 */
4940 for (; i > 0; i--)
4941 put_page(pages[i-1]);
4942
4943 /*
4944 * Acl update can result in inode attribute update.
4945 * so mark the attribute cache invalid.
4946 */
4947 spin_lock(&inode->i_lock);
4948 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
4949 spin_unlock(&inode->i_lock);
4950 nfs_access_zap_cache(inode);
4951 nfs_zap_acl_cache(inode);
4952 return ret;
4953 }
4954
4955 static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen)
4956 {
4957 struct nfs4_exception exception = { };
4958 int err;
4959 do {
4960 err = __nfs4_proc_set_acl(inode, buf, buflen);
4961 trace_nfs4_set_acl(inode, err);
4962 err = nfs4_handle_exception(NFS_SERVER(inode), err,
4963 &exception);
4964 } while (exception.retry);
4965 return err;
4966 }
4967
4968 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
4969 static int _nfs4_get_security_label(struct inode *inode, void *buf,
4970 size_t buflen)
4971 {
4972 struct nfs_server *server = NFS_SERVER(inode);
4973 struct nfs_fattr fattr;
4974 struct nfs4_label label = {0, 0, buflen, buf};
4975
4976 u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
4977 struct nfs4_getattr_arg arg = {
4978 .fh = NFS_FH(inode),
4979 .bitmask = bitmask,
4980 };
4981 struct nfs4_getattr_res res = {
4982 .fattr = &fattr,
4983 .label = &label,
4984 .server = server,
4985 };
4986 struct rpc_message msg = {
4987 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETATTR],
4988 .rpc_argp = &arg,
4989 .rpc_resp = &res,
4990 };
4991 int ret;
4992
4993 nfs_fattr_init(&fattr);
4994
4995 ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 0);
4996 if (ret)
4997 return ret;
4998 if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
4999 return -ENOENT;
5000 if (buflen < label.len)
5001 return -ERANGE;
5002 return 0;
5003 }
5004
5005 static int nfs4_get_security_label(struct inode *inode, void *buf,
5006 size_t buflen)
5007 {
5008 struct nfs4_exception exception = { };
5009 int err;
5010
5011 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
5012 return -EOPNOTSUPP;
5013
5014 do {
5015 err = _nfs4_get_security_label(inode, buf, buflen);
5016 trace_nfs4_get_security_label(inode, err);
5017 err = nfs4_handle_exception(NFS_SERVER(inode), err,
5018 &exception);
5019 } while (exception.retry);
5020 return err;
5021 }
5022
5023 static int _nfs4_do_set_security_label(struct inode *inode,
5024 struct nfs4_label *ilabel,
5025 struct nfs_fattr *fattr,
5026 struct nfs4_label *olabel)
5027 {
5028
5029 struct iattr sattr = {0};
5030 struct nfs_server *server = NFS_SERVER(inode);
5031 const u32 bitmask[3] = { 0, 0, FATTR4_WORD2_SECURITY_LABEL };
5032 struct nfs_setattrargs arg = {
5033 .fh = NFS_FH(inode),
5034 .iap = &sattr,
5035 .server = server,
5036 .bitmask = bitmask,
5037 .label = ilabel,
5038 };
5039 struct nfs_setattrres res = {
5040 .fattr = fattr,
5041 .label = olabel,
5042 .server = server,
5043 };
5044 struct rpc_message msg = {
5045 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETATTR],
5046 .rpc_argp = &arg,
5047 .rpc_resp = &res,
5048 };
5049 int status;
5050
5051 nfs4_stateid_copy(&arg.stateid, &zero_stateid);
5052
5053 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5054 if (status)
5055 dprintk("%s failed: %d\n", __func__, status);
5056
5057 return status;
5058 }
5059
5060 static int nfs4_do_set_security_label(struct inode *inode,
5061 struct nfs4_label *ilabel,
5062 struct nfs_fattr *fattr,
5063 struct nfs4_label *olabel)
5064 {
5065 struct nfs4_exception exception = { };
5066 int err;
5067
5068 do {
5069 err = _nfs4_do_set_security_label(inode, ilabel,
5070 fattr, olabel);
5071 trace_nfs4_set_security_label(inode, err);
5072 err = nfs4_handle_exception(NFS_SERVER(inode), err,
5073 &exception);
5074 } while (exception.retry);
5075 return err;
5076 }
5077
5078 static int
5079 nfs4_set_security_label(struct inode *inode, const void *buf, size_t buflen)
5080 {
5081 struct nfs4_label ilabel, *olabel = NULL;
5082 struct nfs_fattr fattr;
5083 struct rpc_cred *cred;
5084 int status;
5085
5086 if (!nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL))
5087 return -EOPNOTSUPP;
5088
5089 nfs_fattr_init(&fattr);
5090
5091 ilabel.pi = 0;
5092 ilabel.lfs = 0;
5093 ilabel.label = (char *)buf;
5094 ilabel.len = buflen;
5095
5096 cred = rpc_lookup_cred();
5097 if (IS_ERR(cred))
5098 return PTR_ERR(cred);
5099
5100 olabel = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
5101 if (IS_ERR(olabel)) {
5102 status = -PTR_ERR(olabel);
5103 goto out;
5104 }
5105
5106 status = nfs4_do_set_security_label(inode, &ilabel, &fattr, olabel);
5107 if (status == 0)
5108 nfs_setsecurity(inode, &fattr, olabel);
5109
5110 nfs4_label_free(olabel);
5111 out:
5112 put_rpccred(cred);
5113 return status;
5114 }
5115 #endif /* CONFIG_NFS_V4_SECURITY_LABEL */
5116
5117
5118 static void nfs4_init_boot_verifier(const struct nfs_client *clp,
5119 nfs4_verifier *bootverf)
5120 {
5121 __be32 verf[2];
5122
5123 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
5124 /* An impossible timestamp guarantees this value
5125 * will never match a generated boot time. */
5126 verf[0] = 0;
5127 verf[1] = cpu_to_be32(NSEC_PER_SEC + 1);
5128 } else {
5129 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
5130 verf[0] = cpu_to_be32(nn->boot_time.tv_sec);
5131 verf[1] = cpu_to_be32(nn->boot_time.tv_nsec);
5132 }
5133 memcpy(bootverf->data, verf, sizeof(bootverf->data));
5134 }
5135
5136 static int
5137 nfs4_init_nonuniform_client_string(struct nfs_client *clp)
5138 {
5139 size_t len;
5140 char *str;
5141
5142 if (clp->cl_owner_id != NULL)
5143 return 0;
5144
5145 rcu_read_lock();
5146 len = 14 + strlen(clp->cl_ipaddr) + 1 +
5147 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR)) +
5148 1 +
5149 strlen(rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)) +
5150 1;
5151 rcu_read_unlock();
5152
5153 if (len > NFS4_OPAQUE_LIMIT + 1)
5154 return -EINVAL;
5155
5156 /*
5157 * Since this string is allocated at mount time, and held until the
5158 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5159 * about a memory-reclaim deadlock.
5160 */
5161 str = kmalloc(len, GFP_KERNEL);
5162 if (!str)
5163 return -ENOMEM;
5164
5165 rcu_read_lock();
5166 scnprintf(str, len, "Linux NFSv4.0 %s/%s %s",
5167 clp->cl_ipaddr,
5168 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR),
5169 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO));
5170 rcu_read_unlock();
5171
5172 clp->cl_owner_id = str;
5173 return 0;
5174 }
5175
5176 static int
5177 nfs4_init_uniquifier_client_string(struct nfs_client *clp)
5178 {
5179 size_t len;
5180 char *str;
5181
5182 len = 10 + 10 + 1 + 10 + 1 +
5183 strlen(nfs4_client_id_uniquifier) + 1 +
5184 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5185
5186 if (len > NFS4_OPAQUE_LIMIT + 1)
5187 return -EINVAL;
5188
5189 /*
5190 * Since this string is allocated at mount time, and held until the
5191 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5192 * about a memory-reclaim deadlock.
5193 */
5194 str = kmalloc(len, GFP_KERNEL);
5195 if (!str)
5196 return -ENOMEM;
5197
5198 scnprintf(str, len, "Linux NFSv%u.%u %s/%s",
5199 clp->rpc_ops->version, clp->cl_minorversion,
5200 nfs4_client_id_uniquifier,
5201 clp->cl_rpcclient->cl_nodename);
5202 clp->cl_owner_id = str;
5203 return 0;
5204 }
5205
5206 static int
5207 nfs4_init_uniform_client_string(struct nfs_client *clp)
5208 {
5209 size_t len;
5210 char *str;
5211
5212 if (clp->cl_owner_id != NULL)
5213 return 0;
5214
5215 if (nfs4_client_id_uniquifier[0] != '\0')
5216 return nfs4_init_uniquifier_client_string(clp);
5217
5218 len = 10 + 10 + 1 + 10 + 1 +
5219 strlen(clp->cl_rpcclient->cl_nodename) + 1;
5220
5221 if (len > NFS4_OPAQUE_LIMIT + 1)
5222 return -EINVAL;
5223
5224 /*
5225 * Since this string is allocated at mount time, and held until the
5226 * nfs_client is destroyed, we can use GFP_KERNEL here w/o worrying
5227 * about a memory-reclaim deadlock.
5228 */
5229 str = kmalloc(len, GFP_KERNEL);
5230 if (!str)
5231 return -ENOMEM;
5232
5233 scnprintf(str, len, "Linux NFSv%u.%u %s",
5234 clp->rpc_ops->version, clp->cl_minorversion,
5235 clp->cl_rpcclient->cl_nodename);
5236 clp->cl_owner_id = str;
5237 return 0;
5238 }
5239
5240 /*
5241 * nfs4_callback_up_net() starts only "tcp" and "tcp6" callback
5242 * services. Advertise one based on the address family of the
5243 * clientaddr.
5244 */
5245 static unsigned int
5246 nfs4_init_callback_netid(const struct nfs_client *clp, char *buf, size_t len)
5247 {
5248 if (strchr(clp->cl_ipaddr, ':') != NULL)
5249 return scnprintf(buf, len, "tcp6");
5250 else
5251 return scnprintf(buf, len, "tcp");
5252 }
5253
5254 static void nfs4_setclientid_done(struct rpc_task *task, void *calldata)
5255 {
5256 struct nfs4_setclientid *sc = calldata;
5257
5258 if (task->tk_status == 0)
5259 sc->sc_cred = get_rpccred(task->tk_rqstp->rq_cred);
5260 }
5261
5262 static const struct rpc_call_ops nfs4_setclientid_ops = {
5263 .rpc_call_done = nfs4_setclientid_done,
5264 };
5265
5266 /**
5267 * nfs4_proc_setclientid - Negotiate client ID
5268 * @clp: state data structure
5269 * @program: RPC program for NFSv4 callback service
5270 * @port: IP port number for NFS4 callback service
5271 * @cred: RPC credential to use for this call
5272 * @res: where to place the result
5273 *
5274 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5275 */
5276 int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
5277 unsigned short port, struct rpc_cred *cred,
5278 struct nfs4_setclientid_res *res)
5279 {
5280 nfs4_verifier sc_verifier;
5281 struct nfs4_setclientid setclientid = {
5282 .sc_verifier = &sc_verifier,
5283 .sc_prog = program,
5284 .sc_clnt = clp,
5285 };
5286 struct rpc_message msg = {
5287 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
5288 .rpc_argp = &setclientid,
5289 .rpc_resp = res,
5290 .rpc_cred = cred,
5291 };
5292 struct rpc_task *task;
5293 struct rpc_task_setup task_setup_data = {
5294 .rpc_client = clp->cl_rpcclient,
5295 .rpc_message = &msg,
5296 .callback_ops = &nfs4_setclientid_ops,
5297 .callback_data = &setclientid,
5298 .flags = RPC_TASK_TIMEOUT,
5299 };
5300 int status;
5301
5302 /* nfs_client_id4 */
5303 nfs4_init_boot_verifier(clp, &sc_verifier);
5304
5305 if (test_bit(NFS_CS_MIGRATION, &clp->cl_flags))
5306 status = nfs4_init_uniform_client_string(clp);
5307 else
5308 status = nfs4_init_nonuniform_client_string(clp);
5309
5310 if (status)
5311 goto out;
5312
5313 /* cb_client4 */
5314 setclientid.sc_netid_len =
5315 nfs4_init_callback_netid(clp,
5316 setclientid.sc_netid,
5317 sizeof(setclientid.sc_netid));
5318 setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
5319 sizeof(setclientid.sc_uaddr), "%s.%u.%u",
5320 clp->cl_ipaddr, port >> 8, port & 255);
5321
5322 dprintk("NFS call setclientid auth=%s, '%s'\n",
5323 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5324 clp->cl_owner_id);
5325 task = rpc_run_task(&task_setup_data);
5326 if (IS_ERR(task)) {
5327 status = PTR_ERR(task);
5328 goto out;
5329 }
5330 status = task->tk_status;
5331 if (setclientid.sc_cred) {
5332 clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
5333 put_rpccred(setclientid.sc_cred);
5334 }
5335 rpc_put_task(task);
5336 out:
5337 trace_nfs4_setclientid(clp, status);
5338 dprintk("NFS reply setclientid: %d\n", status);
5339 return status;
5340 }
5341
5342 /**
5343 * nfs4_proc_setclientid_confirm - Confirm client ID
5344 * @clp: state data structure
5345 * @res: result of a previous SETCLIENTID
5346 * @cred: RPC credential to use for this call
5347 *
5348 * Returns zero, a negative errno, or a negative NFS4ERR status code.
5349 */
5350 int nfs4_proc_setclientid_confirm(struct nfs_client *clp,
5351 struct nfs4_setclientid_res *arg,
5352 struct rpc_cred *cred)
5353 {
5354 struct rpc_message msg = {
5355 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID_CONFIRM],
5356 .rpc_argp = arg,
5357 .rpc_cred = cred,
5358 };
5359 int status;
5360
5361 dprintk("NFS call setclientid_confirm auth=%s, (client ID %llx)\n",
5362 clp->cl_rpcclient->cl_auth->au_ops->au_name,
5363 clp->cl_clientid);
5364 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5365 trace_nfs4_setclientid_confirm(clp, status);
5366 dprintk("NFS reply setclientid_confirm: %d\n", status);
5367 return status;
5368 }
5369
5370 struct nfs4_delegreturndata {
5371 struct nfs4_delegreturnargs args;
5372 struct nfs4_delegreturnres res;
5373 struct nfs_fh fh;
5374 nfs4_stateid stateid;
5375 unsigned long timestamp;
5376 struct nfs_fattr fattr;
5377 int rpc_status;
5378 struct inode *inode;
5379 bool roc;
5380 u32 roc_barrier;
5381 };
5382
5383 static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
5384 {
5385 struct nfs4_delegreturndata *data = calldata;
5386
5387 if (!nfs4_sequence_done(task, &data->res.seq_res))
5388 return;
5389
5390 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
5391 switch (task->tk_status) {
5392 case 0:
5393 renew_lease(data->res.server, data->timestamp);
5394 case -NFS4ERR_ADMIN_REVOKED:
5395 case -NFS4ERR_DELEG_REVOKED:
5396 case -NFS4ERR_BAD_STATEID:
5397 case -NFS4ERR_OLD_STATEID:
5398 case -NFS4ERR_STALE_STATEID:
5399 case -NFS4ERR_EXPIRED:
5400 task->tk_status = 0;
5401 if (data->roc)
5402 pnfs_roc_set_barrier(data->inode, data->roc_barrier);
5403 break;
5404 default:
5405 if (nfs4_async_handle_error(task, data->res.server,
5406 NULL, NULL) == -EAGAIN) {
5407 rpc_restart_call_prepare(task);
5408 return;
5409 }
5410 }
5411 data->rpc_status = task->tk_status;
5412 }
5413
5414 static void nfs4_delegreturn_release(void *calldata)
5415 {
5416 struct nfs4_delegreturndata *data = calldata;
5417 struct inode *inode = data->inode;
5418
5419 if (inode) {
5420 if (data->roc)
5421 pnfs_roc_release(inode);
5422 nfs_iput_and_deactive(inode);
5423 }
5424 kfree(calldata);
5425 }
5426
5427 static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
5428 {
5429 struct nfs4_delegreturndata *d_data;
5430
5431 d_data = (struct nfs4_delegreturndata *)data;
5432
5433 if (nfs4_wait_on_layoutreturn(d_data->inode, task))
5434 return;
5435
5436 if (d_data->roc)
5437 pnfs_roc_get_barrier(d_data->inode, &d_data->roc_barrier);
5438
5439 nfs4_setup_sequence(d_data->res.server,
5440 &d_data->args.seq_args,
5441 &d_data->res.seq_res,
5442 task);
5443 }
5444
5445 static const struct rpc_call_ops nfs4_delegreturn_ops = {
5446 .rpc_call_prepare = nfs4_delegreturn_prepare,
5447 .rpc_call_done = nfs4_delegreturn_done,
5448 .rpc_release = nfs4_delegreturn_release,
5449 };
5450
5451 static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5452 {
5453 struct nfs4_delegreturndata *data;
5454 struct nfs_server *server = NFS_SERVER(inode);
5455 struct rpc_task *task;
5456 struct rpc_message msg = {
5457 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
5458 .rpc_cred = cred,
5459 };
5460 struct rpc_task_setup task_setup_data = {
5461 .rpc_client = server->client,
5462 .rpc_message = &msg,
5463 .callback_ops = &nfs4_delegreturn_ops,
5464 .flags = RPC_TASK_ASYNC,
5465 };
5466 int status = 0;
5467
5468 data = kzalloc(sizeof(*data), GFP_NOFS);
5469 if (data == NULL)
5470 return -ENOMEM;
5471 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
5472
5473 nfs4_state_protect(server->nfs_client,
5474 NFS_SP4_MACH_CRED_CLEANUP,
5475 &task_setup_data.rpc_client, &msg);
5476
5477 data->args.fhandle = &data->fh;
5478 data->args.stateid = &data->stateid;
5479 data->args.bitmask = server->cache_consistency_bitmask;
5480 nfs_copy_fh(&data->fh, NFS_FH(inode));
5481 nfs4_stateid_copy(&data->stateid, stateid);
5482 data->res.fattr = &data->fattr;
5483 data->res.server = server;
5484 nfs_fattr_init(data->res.fattr);
5485 data->timestamp = jiffies;
5486 data->rpc_status = 0;
5487 data->inode = nfs_igrab_and_active(inode);
5488 if (data->inode)
5489 data->roc = nfs4_roc(inode);
5490
5491 task_setup_data.callback_data = data;
5492 msg.rpc_argp = &data->args;
5493 msg.rpc_resp = &data->res;
5494 task = rpc_run_task(&task_setup_data);
5495 if (IS_ERR(task))
5496 return PTR_ERR(task);
5497 if (!issync)
5498 goto out;
5499 status = nfs4_wait_for_completion_rpc_task(task);
5500 if (status != 0)
5501 goto out;
5502 status = data->rpc_status;
5503 if (status == 0)
5504 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
5505 else
5506 nfs_refresh_inode(inode, &data->fattr);
5507 out:
5508 rpc_put_task(task);
5509 return status;
5510 }
5511
5512 int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync)
5513 {
5514 struct nfs_server *server = NFS_SERVER(inode);
5515 struct nfs4_exception exception = { };
5516 int err;
5517 do {
5518 err = _nfs4_proc_delegreturn(inode, cred, stateid, issync);
5519 trace_nfs4_delegreturn(inode, stateid, err);
5520 switch (err) {
5521 case -NFS4ERR_STALE_STATEID:
5522 case -NFS4ERR_EXPIRED:
5523 case 0:
5524 return 0;
5525 }
5526 err = nfs4_handle_exception(server, err, &exception);
5527 } while (exception.retry);
5528 return err;
5529 }
5530
5531 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
5532 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
5533
5534 /*
5535 * sleep, with exponential backoff, and retry the LOCK operation.
5536 */
5537 static unsigned long
5538 nfs4_set_lock_task_retry(unsigned long timeout)
5539 {
5540 freezable_schedule_timeout_killable_unsafe(timeout);
5541 timeout <<= 1;
5542 if (timeout > NFS4_LOCK_MAXTIMEOUT)
5543 return NFS4_LOCK_MAXTIMEOUT;
5544 return timeout;
5545 }
5546
5547 static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5548 {
5549 struct inode *inode = state->inode;
5550 struct nfs_server *server = NFS_SERVER(inode);
5551 struct nfs_client *clp = server->nfs_client;
5552 struct nfs_lockt_args arg = {
5553 .fh = NFS_FH(inode),
5554 .fl = request,
5555 };
5556 struct nfs_lockt_res res = {
5557 .denied = request,
5558 };
5559 struct rpc_message msg = {
5560 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKT],
5561 .rpc_argp = &arg,
5562 .rpc_resp = &res,
5563 .rpc_cred = state->owner->so_cred,
5564 };
5565 struct nfs4_lock_state *lsp;
5566 int status;
5567
5568 arg.lock_owner.clientid = clp->cl_clientid;
5569 status = nfs4_set_lock_state(state, request);
5570 if (status != 0)
5571 goto out;
5572 lsp = request->fl_u.nfs4_fl.owner;
5573 arg.lock_owner.id = lsp->ls_seqid.owner_id;
5574 arg.lock_owner.s_dev = server->s_dev;
5575 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
5576 switch (status) {
5577 case 0:
5578 request->fl_type = F_UNLCK;
5579 break;
5580 case -NFS4ERR_DENIED:
5581 status = 0;
5582 }
5583 request->fl_ops->fl_release_private(request);
5584 request->fl_ops = NULL;
5585 out:
5586 return status;
5587 }
5588
5589 static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
5590 {
5591 struct nfs4_exception exception = { };
5592 int err;
5593
5594 do {
5595 err = _nfs4_proc_getlk(state, cmd, request);
5596 trace_nfs4_get_lock(request, state, cmd, err);
5597 err = nfs4_handle_exception(NFS_SERVER(state->inode), err,
5598 &exception);
5599 } while (exception.retry);
5600 return err;
5601 }
5602
5603 static int do_vfs_lock(struct inode *inode, struct file_lock *fl)
5604 {
5605 return locks_lock_inode_wait(inode, fl);
5606 }
5607
5608 struct nfs4_unlockdata {
5609 struct nfs_locku_args arg;
5610 struct nfs_locku_res res;
5611 struct nfs4_lock_state *lsp;
5612 struct nfs_open_context *ctx;
5613 struct file_lock fl;
5614 struct nfs_server *server;
5615 unsigned long timestamp;
5616 };
5617
5618 static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
5619 struct nfs_open_context *ctx,
5620 struct nfs4_lock_state *lsp,
5621 struct nfs_seqid *seqid)
5622 {
5623 struct nfs4_unlockdata *p;
5624 struct inode *inode = lsp->ls_state->inode;
5625
5626 p = kzalloc(sizeof(*p), GFP_NOFS);
5627 if (p == NULL)
5628 return NULL;
5629 p->arg.fh = NFS_FH(inode);
5630 p->arg.fl = &p->fl;
5631 p->arg.seqid = seqid;
5632 p->res.seqid = seqid;
5633 p->lsp = lsp;
5634 atomic_inc(&lsp->ls_count);
5635 /* Ensure we don't close file until we're done freeing locks! */
5636 p->ctx = get_nfs_open_context(ctx);
5637 memcpy(&p->fl, fl, sizeof(p->fl));
5638 p->server = NFS_SERVER(inode);
5639 return p;
5640 }
5641
5642 static void nfs4_locku_release_calldata(void *data)
5643 {
5644 struct nfs4_unlockdata *calldata = data;
5645 nfs_free_seqid(calldata->arg.seqid);
5646 nfs4_put_lock_state(calldata->lsp);
5647 put_nfs_open_context(calldata->ctx);
5648 kfree(calldata);
5649 }
5650
5651 static void nfs4_locku_done(struct rpc_task *task, void *data)
5652 {
5653 struct nfs4_unlockdata *calldata = data;
5654
5655 if (!nfs4_sequence_done(task, &calldata->res.seq_res))
5656 return;
5657 switch (task->tk_status) {
5658 case 0:
5659 renew_lease(calldata->server, calldata->timestamp);
5660 do_vfs_lock(calldata->lsp->ls_state->inode, &calldata->fl);
5661 if (nfs4_update_lock_stateid(calldata->lsp,
5662 &calldata->res.stateid))
5663 break;
5664 case -NFS4ERR_BAD_STATEID:
5665 case -NFS4ERR_OLD_STATEID:
5666 case -NFS4ERR_STALE_STATEID:
5667 case -NFS4ERR_EXPIRED:
5668 if (!nfs4_stateid_match(&calldata->arg.stateid,
5669 &calldata->lsp->ls_stateid))
5670 rpc_restart_call_prepare(task);
5671 break;
5672 default:
5673 if (nfs4_async_handle_error(task, calldata->server,
5674 NULL, NULL) == -EAGAIN)
5675 rpc_restart_call_prepare(task);
5676 }
5677 nfs_release_seqid(calldata->arg.seqid);
5678 }
5679
5680 static void nfs4_locku_prepare(struct rpc_task *task, void *data)
5681 {
5682 struct nfs4_unlockdata *calldata = data;
5683
5684 if (nfs_wait_on_sequence(calldata->arg.seqid, task) != 0)
5685 goto out_wait;
5686 nfs4_stateid_copy(&calldata->arg.stateid, &calldata->lsp->ls_stateid);
5687 if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
5688 /* Note: exit _without_ running nfs4_locku_done */
5689 goto out_no_action;
5690 }
5691 calldata->timestamp = jiffies;
5692 if (nfs4_setup_sequence(calldata->server,
5693 &calldata->arg.seq_args,
5694 &calldata->res.seq_res,
5695 task) != 0)
5696 nfs_release_seqid(calldata->arg.seqid);
5697 return;
5698 out_no_action:
5699 task->tk_action = NULL;
5700 out_wait:
5701 nfs4_sequence_done(task, &calldata->res.seq_res);
5702 }
5703
5704 static const struct rpc_call_ops nfs4_locku_ops = {
5705 .rpc_call_prepare = nfs4_locku_prepare,
5706 .rpc_call_done = nfs4_locku_done,
5707 .rpc_release = nfs4_locku_release_calldata,
5708 };
5709
5710 static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
5711 struct nfs_open_context *ctx,
5712 struct nfs4_lock_state *lsp,
5713 struct nfs_seqid *seqid)
5714 {
5715 struct nfs4_unlockdata *data;
5716 struct rpc_message msg = {
5717 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCKU],
5718 .rpc_cred = ctx->cred,
5719 };
5720 struct rpc_task_setup task_setup_data = {
5721 .rpc_client = NFS_CLIENT(lsp->ls_state->inode),
5722 .rpc_message = &msg,
5723 .callback_ops = &nfs4_locku_ops,
5724 .workqueue = nfsiod_workqueue,
5725 .flags = RPC_TASK_ASYNC,
5726 };
5727
5728 nfs4_state_protect(NFS_SERVER(lsp->ls_state->inode)->nfs_client,
5729 NFS_SP4_MACH_CRED_CLEANUP, &task_setup_data.rpc_client, &msg);
5730
5731 /* Ensure this is an unlock - when canceling a lock, the
5732 * canceled lock is passed in, and it won't be an unlock.
5733 */
5734 fl->fl_type = F_UNLCK;
5735
5736 data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid);
5737 if (data == NULL) {
5738 nfs_free_seqid(seqid);
5739 return ERR_PTR(-ENOMEM);
5740 }
5741
5742 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
5743 msg.rpc_argp = &data->arg;
5744 msg.rpc_resp = &data->res;
5745 task_setup_data.callback_data = data;
5746 return rpc_run_task(&task_setup_data);
5747 }
5748
5749 static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
5750 {
5751 struct inode *inode = state->inode;
5752 struct nfs4_state_owner *sp = state->owner;
5753 struct nfs_inode *nfsi = NFS_I(inode);
5754 struct nfs_seqid *seqid;
5755 struct nfs4_lock_state *lsp;
5756 struct rpc_task *task;
5757 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5758 int status = 0;
5759 unsigned char fl_flags = request->fl_flags;
5760
5761 status = nfs4_set_lock_state(state, request);
5762 /* Unlock _before_ we do the RPC call */
5763 request->fl_flags |= FL_EXISTS;
5764 /* Exclude nfs_delegation_claim_locks() */
5765 mutex_lock(&sp->so_delegreturn_mutex);
5766 /* Exclude nfs4_reclaim_open_stateid() - note nesting! */
5767 down_read(&nfsi->rwsem);
5768 if (do_vfs_lock(inode, request) == -ENOENT) {
5769 up_read(&nfsi->rwsem);
5770 mutex_unlock(&sp->so_delegreturn_mutex);
5771 goto out;
5772 }
5773 up_read(&nfsi->rwsem);
5774 mutex_unlock(&sp->so_delegreturn_mutex);
5775 if (status != 0)
5776 goto out;
5777 /* Is this a delegated lock? */
5778 lsp = request->fl_u.nfs4_fl.owner;
5779 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
5780 goto out;
5781 alloc_seqid = NFS_SERVER(inode)->nfs_client->cl_mvops->alloc_seqid;
5782 seqid = alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
5783 status = -ENOMEM;
5784 if (IS_ERR(seqid))
5785 goto out;
5786 task = nfs4_do_unlck(request, nfs_file_open_context(request->fl_file), lsp, seqid);
5787 status = PTR_ERR(task);
5788 if (IS_ERR(task))
5789 goto out;
5790 status = nfs4_wait_for_completion_rpc_task(task);
5791 rpc_put_task(task);
5792 out:
5793 request->fl_flags = fl_flags;
5794 trace_nfs4_unlock(request, state, F_SETLK, status);
5795 return status;
5796 }
5797
5798 struct nfs4_lockdata {
5799 struct nfs_lock_args arg;
5800 struct nfs_lock_res res;
5801 struct nfs4_lock_state *lsp;
5802 struct nfs_open_context *ctx;
5803 struct file_lock fl;
5804 unsigned long timestamp;
5805 int rpc_status;
5806 int cancelled;
5807 struct nfs_server *server;
5808 };
5809
5810 static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
5811 struct nfs_open_context *ctx, struct nfs4_lock_state *lsp,
5812 gfp_t gfp_mask)
5813 {
5814 struct nfs4_lockdata *p;
5815 struct inode *inode = lsp->ls_state->inode;
5816 struct nfs_server *server = NFS_SERVER(inode);
5817 struct nfs_seqid *(*alloc_seqid)(struct nfs_seqid_counter *, gfp_t);
5818
5819 p = kzalloc(sizeof(*p), gfp_mask);
5820 if (p == NULL)
5821 return NULL;
5822
5823 p->arg.fh = NFS_FH(inode);
5824 p->arg.fl = &p->fl;
5825 p->arg.open_seqid = nfs_alloc_seqid(&lsp->ls_state->owner->so_seqid, gfp_mask);
5826 if (IS_ERR(p->arg.open_seqid))
5827 goto out_free;
5828 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
5829 p->arg.lock_seqid = alloc_seqid(&lsp->ls_seqid, gfp_mask);
5830 if (IS_ERR(p->arg.lock_seqid))
5831 goto out_free_seqid;
5832 p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
5833 p->arg.lock_owner.id = lsp->ls_seqid.owner_id;
5834 p->arg.lock_owner.s_dev = server->s_dev;
5835 p->res.lock_seqid = p->arg.lock_seqid;
5836 p->lsp = lsp;
5837 p->server = server;
5838 atomic_inc(&lsp->ls_count);
5839 p->ctx = get_nfs_open_context(ctx);
5840 get_file(fl->fl_file);
5841 memcpy(&p->fl, fl, sizeof(p->fl));
5842 return p;
5843 out_free_seqid:
5844 nfs_free_seqid(p->arg.open_seqid);
5845 out_free:
5846 kfree(p);
5847 return NULL;
5848 }
5849
5850 static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
5851 {
5852 struct nfs4_lockdata *data = calldata;
5853 struct nfs4_state *state = data->lsp->ls_state;
5854
5855 dprintk("%s: begin!\n", __func__);
5856 if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
5857 goto out_wait;
5858 /* Do we need to do an open_to_lock_owner? */
5859 if (!test_bit(NFS_LOCK_INITIALIZED, &data->lsp->ls_flags)) {
5860 if (nfs_wait_on_sequence(data->arg.open_seqid, task) != 0) {
5861 goto out_release_lock_seqid;
5862 }
5863 nfs4_stateid_copy(&data->arg.open_stateid,
5864 &state->open_stateid);
5865 data->arg.new_lock_owner = 1;
5866 data->res.open_seqid = data->arg.open_seqid;
5867 } else {
5868 data->arg.new_lock_owner = 0;
5869 nfs4_stateid_copy(&data->arg.lock_stateid,
5870 &data->lsp->ls_stateid);
5871 }
5872 if (!nfs4_valid_open_stateid(state)) {
5873 data->rpc_status = -EBADF;
5874 task->tk_action = NULL;
5875 goto out_release_open_seqid;
5876 }
5877 data->timestamp = jiffies;
5878 if (nfs4_setup_sequence(data->server,
5879 &data->arg.seq_args,
5880 &data->res.seq_res,
5881 task) == 0)
5882 return;
5883 out_release_open_seqid:
5884 nfs_release_seqid(data->arg.open_seqid);
5885 out_release_lock_seqid:
5886 nfs_release_seqid(data->arg.lock_seqid);
5887 out_wait:
5888 nfs4_sequence_done(task, &data->res.seq_res);
5889 dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
5890 }
5891
5892 static void nfs4_lock_done(struct rpc_task *task, void *calldata)
5893 {
5894 struct nfs4_lockdata *data = calldata;
5895 struct nfs4_lock_state *lsp = data->lsp;
5896
5897 dprintk("%s: begin!\n", __func__);
5898
5899 if (!nfs4_sequence_done(task, &data->res.seq_res))
5900 return;
5901
5902 data->rpc_status = task->tk_status;
5903 switch (task->tk_status) {
5904 case 0:
5905 renew_lease(NFS_SERVER(d_inode(data->ctx->dentry)),
5906 data->timestamp);
5907 if (data->arg.new_lock) {
5908 data->fl.fl_flags &= ~(FL_SLEEP | FL_ACCESS);
5909 if (do_vfs_lock(lsp->ls_state->inode, &data->fl) < 0) {
5910 rpc_restart_call_prepare(task);
5911 break;
5912 }
5913 }
5914 if (data->arg.new_lock_owner != 0) {
5915 nfs_confirm_seqid(&lsp->ls_seqid, 0);
5916 nfs4_stateid_copy(&lsp->ls_stateid, &data->res.stateid);
5917 set_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
5918 } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid))
5919 rpc_restart_call_prepare(task);
5920 break;
5921 case -NFS4ERR_BAD_STATEID:
5922 case -NFS4ERR_OLD_STATEID:
5923 case -NFS4ERR_STALE_STATEID:
5924 case -NFS4ERR_EXPIRED:
5925 if (data->arg.new_lock_owner != 0) {
5926 if (!nfs4_stateid_match(&data->arg.open_stateid,
5927 &lsp->ls_state->open_stateid))
5928 rpc_restart_call_prepare(task);
5929 } else if (!nfs4_stateid_match(&data->arg.lock_stateid,
5930 &lsp->ls_stateid))
5931 rpc_restart_call_prepare(task);
5932 }
5933 dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
5934 }
5935
5936 static void nfs4_lock_release(void *calldata)
5937 {
5938 struct nfs4_lockdata *data = calldata;
5939
5940 dprintk("%s: begin!\n", __func__);
5941 nfs_free_seqid(data->arg.open_seqid);
5942 if (data->cancelled != 0) {
5943 struct rpc_task *task;
5944 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
5945 data->arg.lock_seqid);
5946 if (!IS_ERR(task))
5947 rpc_put_task_async(task);
5948 dprintk("%s: cancelling lock!\n", __func__);
5949 } else
5950 nfs_free_seqid(data->arg.lock_seqid);
5951 nfs4_put_lock_state(data->lsp);
5952 put_nfs_open_context(data->ctx);
5953 fput(data->fl.fl_file);
5954 kfree(data);
5955 dprintk("%s: done!\n", __func__);
5956 }
5957
5958 static const struct rpc_call_ops nfs4_lock_ops = {
5959 .rpc_call_prepare = nfs4_lock_prepare,
5960 .rpc_call_done = nfs4_lock_done,
5961 .rpc_release = nfs4_lock_release,
5962 };
5963
5964 static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
5965 {
5966 switch (error) {
5967 case -NFS4ERR_ADMIN_REVOKED:
5968 case -NFS4ERR_BAD_STATEID:
5969 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5970 if (new_lock_owner != 0 ||
5971 test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0)
5972 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
5973 break;
5974 case -NFS4ERR_STALE_STATEID:
5975 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
5976 case -NFS4ERR_EXPIRED:
5977 nfs4_schedule_lease_recovery(server->nfs_client);
5978 };
5979 }
5980
5981 static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
5982 {
5983 struct nfs4_lockdata *data;
5984 struct rpc_task *task;
5985 struct rpc_message msg = {
5986 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LOCK],
5987 .rpc_cred = state->owner->so_cred,
5988 };
5989 struct rpc_task_setup task_setup_data = {
5990 .rpc_client = NFS_CLIENT(state->inode),
5991 .rpc_message = &msg,
5992 .callback_ops = &nfs4_lock_ops,
5993 .workqueue = nfsiod_workqueue,
5994 .flags = RPC_TASK_ASYNC,
5995 };
5996 int ret;
5997
5998 dprintk("%s: begin!\n", __func__);
5999 data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
6000 fl->fl_u.nfs4_fl.owner,
6001 recovery_type == NFS_LOCK_NEW ? GFP_KERNEL : GFP_NOFS);
6002 if (data == NULL)
6003 return -ENOMEM;
6004 if (IS_SETLKW(cmd))
6005 data->arg.block = 1;
6006 nfs4_init_sequence(&data->arg.seq_args, &data->res.seq_res, 1);
6007 msg.rpc_argp = &data->arg;
6008 msg.rpc_resp = &data->res;
6009 task_setup_data.callback_data = data;
6010 if (recovery_type > NFS_LOCK_NEW) {
6011 if (recovery_type == NFS_LOCK_RECLAIM)
6012 data->arg.reclaim = NFS_LOCK_RECLAIM;
6013 nfs4_set_sequence_privileged(&data->arg.seq_args);
6014 } else
6015 data->arg.new_lock = 1;
6016 task = rpc_run_task(&task_setup_data);
6017 if (IS_ERR(task))
6018 return PTR_ERR(task);
6019 ret = nfs4_wait_for_completion_rpc_task(task);
6020 if (ret == 0) {
6021 ret = data->rpc_status;
6022 if (ret)
6023 nfs4_handle_setlk_error(data->server, data->lsp,
6024 data->arg.new_lock_owner, ret);
6025 } else
6026 data->cancelled = 1;
6027 rpc_put_task(task);
6028 dprintk("%s: done, ret = %d!\n", __func__, ret);
6029 trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
6030 return ret;
6031 }
6032
6033 static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
6034 {
6035 struct nfs_server *server = NFS_SERVER(state->inode);
6036 struct nfs4_exception exception = {
6037 .inode = state->inode,
6038 };
6039 int err;
6040
6041 do {
6042 /* Cache the lock if possible... */
6043 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
6044 return 0;
6045 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_RECLAIM);
6046 if (err != -NFS4ERR_DELAY)
6047 break;
6048 nfs4_handle_exception(server, err, &exception);
6049 } while (exception.retry);
6050 return err;
6051 }
6052
6053 static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request)
6054 {
6055 struct nfs_server *server = NFS_SERVER(state->inode);
6056 struct nfs4_exception exception = {
6057 .inode = state->inode,
6058 };
6059 int err;
6060
6061 err = nfs4_set_lock_state(state, request);
6062 if (err != 0)
6063 return err;
6064 if (!recover_lost_locks) {
6065 set_bit(NFS_LOCK_LOST, &request->fl_u.nfs4_fl.owner->ls_flags);
6066 return 0;
6067 }
6068 do {
6069 if (test_bit(NFS_DELEGATED_STATE, &state->flags) != 0)
6070 return 0;
6071 err = _nfs4_do_setlk(state, F_SETLK, request, NFS_LOCK_EXPIRED);
6072 switch (err) {
6073 default:
6074 goto out;
6075 case -NFS4ERR_GRACE:
6076 case -NFS4ERR_DELAY:
6077 nfs4_handle_exception(server, err, &exception);
6078 err = 0;
6079 }
6080 } while (exception.retry);
6081 out:
6082 return err;
6083 }
6084
6085 #if defined(CONFIG_NFS_V4_1)
6086 /**
6087 * nfs41_check_expired_locks - possibly free a lock stateid
6088 *
6089 * @state: NFSv4 state for an inode
6090 *
6091 * Returns NFS_OK if recovery for this stateid is now finished.
6092 * Otherwise a negative NFS4ERR value is returned.
6093 */
6094 static int nfs41_check_expired_locks(struct nfs4_state *state)
6095 {
6096 int status, ret = -NFS4ERR_BAD_STATEID;
6097 struct nfs4_lock_state *lsp;
6098 struct nfs_server *server = NFS_SERVER(state->inode);
6099
6100 list_for_each_entry(lsp, &state->lock_states, ls_locks) {
6101 if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
6102 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
6103
6104 status = nfs41_test_stateid(server,
6105 &lsp->ls_stateid,
6106 cred);
6107 trace_nfs4_test_lock_stateid(state, lsp, status);
6108 if (status != NFS_OK) {
6109 /* Free the stateid unless the server
6110 * informs us the stateid is unrecognized. */
6111 if (status != -NFS4ERR_BAD_STATEID)
6112 nfs41_free_stateid(server,
6113 &lsp->ls_stateid,
6114 cred);
6115 clear_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags);
6116 ret = status;
6117 }
6118 }
6119 };
6120
6121 return ret;
6122 }
6123
6124 static int nfs41_lock_expired(struct nfs4_state *state, struct file_lock *request)
6125 {
6126 int status = NFS_OK;
6127
6128 if (test_bit(LK_STATE_IN_USE, &state->flags))
6129 status = nfs41_check_expired_locks(state);
6130 if (status != NFS_OK)
6131 status = nfs4_lock_expired(state, request);
6132 return status;
6133 }
6134 #endif
6135
6136 static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6137 {
6138 struct nfs_inode *nfsi = NFS_I(state->inode);
6139 struct nfs4_state_owner *sp = state->owner;
6140 unsigned char fl_flags = request->fl_flags;
6141 int status = -ENOLCK;
6142
6143 if ((fl_flags & FL_POSIX) &&
6144 !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
6145 goto out;
6146 /* Is this a delegated open? */
6147 status = nfs4_set_lock_state(state, request);
6148 if (status != 0)
6149 goto out;
6150 request->fl_flags |= FL_ACCESS;
6151 status = do_vfs_lock(state->inode, request);
6152 if (status < 0)
6153 goto out;
6154 mutex_lock(&sp->so_delegreturn_mutex);
6155 down_read(&nfsi->rwsem);
6156 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
6157 /* Yes: cache locks! */
6158 /* ...but avoid races with delegation recall... */
6159 request->fl_flags = fl_flags & ~FL_SLEEP;
6160 status = do_vfs_lock(state->inode, request);
6161 up_read(&nfsi->rwsem);
6162 mutex_unlock(&sp->so_delegreturn_mutex);
6163 goto out;
6164 }
6165 up_read(&nfsi->rwsem);
6166 mutex_unlock(&sp->so_delegreturn_mutex);
6167 status = _nfs4_do_setlk(state, cmd, request, NFS_LOCK_NEW);
6168 out:
6169 request->fl_flags = fl_flags;
6170 return status;
6171 }
6172
6173 static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
6174 {
6175 struct nfs4_exception exception = {
6176 .state = state,
6177 .inode = state->inode,
6178 };
6179 int err;
6180
6181 do {
6182 err = _nfs4_proc_setlk(state, cmd, request);
6183 if (err == -NFS4ERR_DENIED)
6184 err = -EAGAIN;
6185 err = nfs4_handle_exception(NFS_SERVER(state->inode),
6186 err, &exception);
6187 } while (exception.retry);
6188 return err;
6189 }
6190
6191 static int
6192 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
6193 {
6194 struct nfs_open_context *ctx;
6195 struct nfs4_state *state;
6196 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
6197 int status;
6198
6199 /* verify open state */
6200 ctx = nfs_file_open_context(filp);
6201 state = ctx->state;
6202
6203 if (request->fl_start < 0 || request->fl_end < 0)
6204 return -EINVAL;
6205
6206 if (IS_GETLK(cmd)) {
6207 if (state != NULL)
6208 return nfs4_proc_getlk(state, F_GETLK, request);
6209 return 0;
6210 }
6211
6212 if (!(IS_SETLK(cmd) || IS_SETLKW(cmd)))
6213 return -EINVAL;
6214
6215 if (request->fl_type == F_UNLCK) {
6216 if (state != NULL)
6217 return nfs4_proc_unlck(state, cmd, request);
6218 return 0;
6219 }
6220
6221 if (state == NULL)
6222 return -ENOLCK;
6223 /*
6224 * Don't rely on the VFS having checked the file open mode,
6225 * since it won't do this for flock() locks.
6226 */
6227 switch (request->fl_type) {
6228 case F_RDLCK:
6229 if (!(filp->f_mode & FMODE_READ))
6230 return -EBADF;
6231 break;
6232 case F_WRLCK:
6233 if (!(filp->f_mode & FMODE_WRITE))
6234 return -EBADF;
6235 }
6236
6237 do {
6238 status = nfs4_proc_setlk(state, cmd, request);
6239 if ((status != -EAGAIN) || IS_SETLK(cmd))
6240 break;
6241 timeout = nfs4_set_lock_task_retry(timeout);
6242 status = -ERESTARTSYS;
6243 if (signalled())
6244 break;
6245 } while(status < 0);
6246 return status;
6247 }
6248
6249 int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid)
6250 {
6251 struct nfs_server *server = NFS_SERVER(state->inode);
6252 int err;
6253
6254 err = nfs4_set_lock_state(state, fl);
6255 if (err != 0)
6256 return err;
6257 err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
6258 return nfs4_handle_delegation_recall_error(server, state, stateid, err);
6259 }
6260
6261 struct nfs_release_lockowner_data {
6262 struct nfs4_lock_state *lsp;
6263 struct nfs_server *server;
6264 struct nfs_release_lockowner_args args;
6265 struct nfs_release_lockowner_res res;
6266 unsigned long timestamp;
6267 };
6268
6269 static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata)
6270 {
6271 struct nfs_release_lockowner_data *data = calldata;
6272 struct nfs_server *server = data->server;
6273 nfs40_setup_sequence(server->nfs_client->cl_slot_tbl,
6274 &data->args.seq_args, &data->res.seq_res, task);
6275 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6276 data->timestamp = jiffies;
6277 }
6278
6279 static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
6280 {
6281 struct nfs_release_lockowner_data *data = calldata;
6282 struct nfs_server *server = data->server;
6283
6284 nfs40_sequence_done(task, &data->res.seq_res);
6285
6286 switch (task->tk_status) {
6287 case 0:
6288 renew_lease(server, data->timestamp);
6289 break;
6290 case -NFS4ERR_STALE_CLIENTID:
6291 case -NFS4ERR_EXPIRED:
6292 nfs4_schedule_lease_recovery(server->nfs_client);
6293 break;
6294 case -NFS4ERR_LEASE_MOVED:
6295 case -NFS4ERR_DELAY:
6296 if (nfs4_async_handle_error(task, server,
6297 NULL, NULL) == -EAGAIN)
6298 rpc_restart_call_prepare(task);
6299 }
6300 }
6301
6302 static void nfs4_release_lockowner_release(void *calldata)
6303 {
6304 struct nfs_release_lockowner_data *data = calldata;
6305 nfs4_free_lock_state(data->server, data->lsp);
6306 kfree(calldata);
6307 }
6308
6309 static const struct rpc_call_ops nfs4_release_lockowner_ops = {
6310 .rpc_call_prepare = nfs4_release_lockowner_prepare,
6311 .rpc_call_done = nfs4_release_lockowner_done,
6312 .rpc_release = nfs4_release_lockowner_release,
6313 };
6314
6315 static void
6316 nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_state *lsp)
6317 {
6318 struct nfs_release_lockowner_data *data;
6319 struct rpc_message msg = {
6320 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RELEASE_LOCKOWNER],
6321 };
6322
6323 if (server->nfs_client->cl_mvops->minor_version != 0)
6324 return;
6325
6326 data = kmalloc(sizeof(*data), GFP_NOFS);
6327 if (!data)
6328 return;
6329 data->lsp = lsp;
6330 data->server = server;
6331 data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
6332 data->args.lock_owner.id = lsp->ls_seqid.owner_id;
6333 data->args.lock_owner.s_dev = server->s_dev;
6334
6335 msg.rpc_argp = &data->args;
6336 msg.rpc_resp = &data->res;
6337 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
6338 rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
6339 }
6340
6341 #define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
6342
6343 static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler,
6344 struct dentry *unused, struct inode *inode,
6345 const char *key, const void *buf,
6346 size_t buflen, int flags)
6347 {
6348 return nfs4_proc_set_acl(inode, buf, buflen);
6349 }
6350
6351 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
6352 struct dentry *unused, struct inode *inode,
6353 const char *key, void *buf, size_t buflen)
6354 {
6355 return nfs4_proc_get_acl(inode, buf, buflen);
6356 }
6357
6358 static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry)
6359 {
6360 return nfs4_server_supports_acls(NFS_SERVER(d_inode(dentry)));
6361 }
6362
6363 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
6364
6365 static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler,
6366 struct dentry *unused, struct inode *inode,
6367 const char *key, const void *buf,
6368 size_t buflen, int flags)
6369 {
6370 if (security_ismaclabel(key))
6371 return nfs4_set_security_label(inode, buf, buflen);
6372
6373 return -EOPNOTSUPP;
6374 }
6375
6376 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
6377 struct dentry *unused, struct inode *inode,
6378 const char *key, void *buf, size_t buflen)
6379 {
6380 if (security_ismaclabel(key))
6381 return nfs4_get_security_label(inode, buf, buflen);
6382 return -EOPNOTSUPP;
6383 }
6384
6385 static ssize_t
6386 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
6387 {
6388 int len = 0;
6389
6390 if (nfs_server_capable(inode, NFS_CAP_SECURITY_LABEL)) {
6391 len = security_inode_listsecurity(inode, list, list_len);
6392 if (list_len && len > list_len)
6393 return -ERANGE;
6394 }
6395 return len;
6396 }
6397
6398 static const struct xattr_handler nfs4_xattr_nfs4_label_handler = {
6399 .prefix = XATTR_SECURITY_PREFIX,
6400 .get = nfs4_xattr_get_nfs4_label,
6401 .set = nfs4_xattr_set_nfs4_label,
6402 };
6403
6404 #else
6405
6406 static ssize_t
6407 nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len)
6408 {
6409 return 0;
6410 }
6411
6412 #endif
6413
6414 /*
6415 * nfs_fhget will use either the mounted_on_fileid or the fileid
6416 */
6417 static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr)
6418 {
6419 if (!(((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) ||
6420 (fattr->valid & NFS_ATTR_FATTR_FILEID)) &&
6421 (fattr->valid & NFS_ATTR_FATTR_FSID) &&
6422 (fattr->valid & NFS_ATTR_FATTR_V4_LOCATIONS)))
6423 return;
6424
6425 fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE |
6426 NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_V4_REFERRAL;
6427 fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO;
6428 fattr->nlink = 2;
6429 }
6430
6431 static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6432 const struct qstr *name,
6433 struct nfs4_fs_locations *fs_locations,
6434 struct page *page)
6435 {
6436 struct nfs_server *server = NFS_SERVER(dir);
6437 u32 bitmask[3] = {
6438 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6439 };
6440 struct nfs4_fs_locations_arg args = {
6441 .dir_fh = NFS_FH(dir),
6442 .name = name,
6443 .page = page,
6444 .bitmask = bitmask,
6445 };
6446 struct nfs4_fs_locations_res res = {
6447 .fs_locations = fs_locations,
6448 };
6449 struct rpc_message msg = {
6450 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6451 .rpc_argp = &args,
6452 .rpc_resp = &res,
6453 };
6454 int status;
6455
6456 dprintk("%s: start\n", __func__);
6457
6458 /* Ask for the fileid of the absent filesystem if mounted_on_fileid
6459 * is not supported */
6460 if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
6461 bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
6462 else
6463 bitmask[0] |= FATTR4_WORD0_FILEID;
6464
6465 nfs_fattr_init(&fs_locations->fattr);
6466 fs_locations->server = server;
6467 fs_locations->nlocations = 0;
6468 status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0);
6469 dprintk("%s: returned status = %d\n", __func__, status);
6470 return status;
6471 }
6472
6473 int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
6474 const struct qstr *name,
6475 struct nfs4_fs_locations *fs_locations,
6476 struct page *page)
6477 {
6478 struct nfs4_exception exception = { };
6479 int err;
6480 do {
6481 err = _nfs4_proc_fs_locations(client, dir, name,
6482 fs_locations, page);
6483 trace_nfs4_get_fs_locations(dir, name, err);
6484 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6485 &exception);
6486 } while (exception.retry);
6487 return err;
6488 }
6489
6490 /*
6491 * This operation also signals the server that this client is
6492 * performing migration recovery. The server can stop returning
6493 * NFS4ERR_LEASE_MOVED to this client. A RENEW operation is
6494 * appended to this compound to identify the client ID which is
6495 * performing recovery.
6496 */
6497 static int _nfs40_proc_get_locations(struct inode *inode,
6498 struct nfs4_fs_locations *locations,
6499 struct page *page, struct rpc_cred *cred)
6500 {
6501 struct nfs_server *server = NFS_SERVER(inode);
6502 struct rpc_clnt *clnt = server->client;
6503 u32 bitmask[2] = {
6504 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6505 };
6506 struct nfs4_fs_locations_arg args = {
6507 .clientid = server->nfs_client->cl_clientid,
6508 .fh = NFS_FH(inode),
6509 .page = page,
6510 .bitmask = bitmask,
6511 .migration = 1, /* skip LOOKUP */
6512 .renew = 1, /* append RENEW */
6513 };
6514 struct nfs4_fs_locations_res res = {
6515 .fs_locations = locations,
6516 .migration = 1,
6517 .renew = 1,
6518 };
6519 struct rpc_message msg = {
6520 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6521 .rpc_argp = &args,
6522 .rpc_resp = &res,
6523 .rpc_cred = cred,
6524 };
6525 unsigned long now = jiffies;
6526 int status;
6527
6528 nfs_fattr_init(&locations->fattr);
6529 locations->server = server;
6530 locations->nlocations = 0;
6531
6532 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6533 nfs4_set_sequence_privileged(&args.seq_args);
6534 status = nfs4_call_sync_sequence(clnt, server, &msg,
6535 &args.seq_args, &res.seq_res);
6536 if (status)
6537 return status;
6538
6539 renew_lease(server, now);
6540 return 0;
6541 }
6542
6543 #ifdef CONFIG_NFS_V4_1
6544
6545 /*
6546 * This operation also signals the server that this client is
6547 * performing migration recovery. The server can stop asserting
6548 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID
6549 * performing this operation is identified in the SEQUENCE
6550 * operation in this compound.
6551 *
6552 * When the client supports GETATTR(fs_locations_info), it can
6553 * be plumbed in here.
6554 */
6555 static int _nfs41_proc_get_locations(struct inode *inode,
6556 struct nfs4_fs_locations *locations,
6557 struct page *page, struct rpc_cred *cred)
6558 {
6559 struct nfs_server *server = NFS_SERVER(inode);
6560 struct rpc_clnt *clnt = server->client;
6561 u32 bitmask[2] = {
6562 [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
6563 };
6564 struct nfs4_fs_locations_arg args = {
6565 .fh = NFS_FH(inode),
6566 .page = page,
6567 .bitmask = bitmask,
6568 .migration = 1, /* skip LOOKUP */
6569 };
6570 struct nfs4_fs_locations_res res = {
6571 .fs_locations = locations,
6572 .migration = 1,
6573 };
6574 struct rpc_message msg = {
6575 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FS_LOCATIONS],
6576 .rpc_argp = &args,
6577 .rpc_resp = &res,
6578 .rpc_cred = cred,
6579 };
6580 int status;
6581
6582 nfs_fattr_init(&locations->fattr);
6583 locations->server = server;
6584 locations->nlocations = 0;
6585
6586 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6587 nfs4_set_sequence_privileged(&args.seq_args);
6588 status = nfs4_call_sync_sequence(clnt, server, &msg,
6589 &args.seq_args, &res.seq_res);
6590 if (status == NFS4_OK &&
6591 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6592 status = -NFS4ERR_LEASE_MOVED;
6593 return status;
6594 }
6595
6596 #endif /* CONFIG_NFS_V4_1 */
6597
6598 /**
6599 * nfs4_proc_get_locations - discover locations for a migrated FSID
6600 * @inode: inode on FSID that is migrating
6601 * @locations: result of query
6602 * @page: buffer
6603 * @cred: credential to use for this operation
6604 *
6605 * Returns NFS4_OK on success, a negative NFS4ERR status code if the
6606 * operation failed, or a negative errno if a local error occurred.
6607 *
6608 * On success, "locations" is filled in, but if the server has
6609 * no locations information, NFS_ATTR_FATTR_V4_LOCATIONS is not
6610 * asserted.
6611 *
6612 * -NFS4ERR_LEASE_MOVED is returned if the server still has leases
6613 * from this client that require migration recovery.
6614 */
6615 int nfs4_proc_get_locations(struct inode *inode,
6616 struct nfs4_fs_locations *locations,
6617 struct page *page, struct rpc_cred *cred)
6618 {
6619 struct nfs_server *server = NFS_SERVER(inode);
6620 struct nfs_client *clp = server->nfs_client;
6621 const struct nfs4_mig_recovery_ops *ops =
6622 clp->cl_mvops->mig_recovery_ops;
6623 struct nfs4_exception exception = { };
6624 int status;
6625
6626 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6627 (unsigned long long)server->fsid.major,
6628 (unsigned long long)server->fsid.minor,
6629 clp->cl_hostname);
6630 nfs_display_fhandle(NFS_FH(inode), __func__);
6631
6632 do {
6633 status = ops->get_locations(inode, locations, page, cred);
6634 if (status != -NFS4ERR_DELAY)
6635 break;
6636 nfs4_handle_exception(server, status, &exception);
6637 } while (exception.retry);
6638 return status;
6639 }
6640
6641 /*
6642 * This operation also signals the server that this client is
6643 * performing "lease moved" recovery. The server can stop
6644 * returning NFS4ERR_LEASE_MOVED to this client. A RENEW operation
6645 * is appended to this compound to identify the client ID which is
6646 * performing recovery.
6647 */
6648 static int _nfs40_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6649 {
6650 struct nfs_server *server = NFS_SERVER(inode);
6651 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
6652 struct rpc_clnt *clnt = server->client;
6653 struct nfs4_fsid_present_arg args = {
6654 .fh = NFS_FH(inode),
6655 .clientid = clp->cl_clientid,
6656 .renew = 1, /* append RENEW */
6657 };
6658 struct nfs4_fsid_present_res res = {
6659 .renew = 1,
6660 };
6661 struct rpc_message msg = {
6662 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6663 .rpc_argp = &args,
6664 .rpc_resp = &res,
6665 .rpc_cred = cred,
6666 };
6667 unsigned long now = jiffies;
6668 int status;
6669
6670 res.fh = nfs_alloc_fhandle();
6671 if (res.fh == NULL)
6672 return -ENOMEM;
6673
6674 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6675 nfs4_set_sequence_privileged(&args.seq_args);
6676 status = nfs4_call_sync_sequence(clnt, server, &msg,
6677 &args.seq_args, &res.seq_res);
6678 nfs_free_fhandle(res.fh);
6679 if (status)
6680 return status;
6681
6682 do_renew_lease(clp, now);
6683 return 0;
6684 }
6685
6686 #ifdef CONFIG_NFS_V4_1
6687
6688 /*
6689 * This operation also signals the server that this client is
6690 * performing "lease moved" recovery. The server can stop asserting
6691 * SEQ4_STATUS_LEASE_MOVED for this client. The client ID performing
6692 * this operation is identified in the SEQUENCE operation in this
6693 * compound.
6694 */
6695 static int _nfs41_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6696 {
6697 struct nfs_server *server = NFS_SERVER(inode);
6698 struct rpc_clnt *clnt = server->client;
6699 struct nfs4_fsid_present_arg args = {
6700 .fh = NFS_FH(inode),
6701 };
6702 struct nfs4_fsid_present_res res = {
6703 };
6704 struct rpc_message msg = {
6705 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FSID_PRESENT],
6706 .rpc_argp = &args,
6707 .rpc_resp = &res,
6708 .rpc_cred = cred,
6709 };
6710 int status;
6711
6712 res.fh = nfs_alloc_fhandle();
6713 if (res.fh == NULL)
6714 return -ENOMEM;
6715
6716 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
6717 nfs4_set_sequence_privileged(&args.seq_args);
6718 status = nfs4_call_sync_sequence(clnt, server, &msg,
6719 &args.seq_args, &res.seq_res);
6720 nfs_free_fhandle(res.fh);
6721 if (status == NFS4_OK &&
6722 res.seq_res.sr_status_flags & SEQ4_STATUS_LEASE_MOVED)
6723 status = -NFS4ERR_LEASE_MOVED;
6724 return status;
6725 }
6726
6727 #endif /* CONFIG_NFS_V4_1 */
6728
6729 /**
6730 * nfs4_proc_fsid_present - Is this FSID present or absent on server?
6731 * @inode: inode on FSID to check
6732 * @cred: credential to use for this operation
6733 *
6734 * Server indicates whether the FSID is present, moved, or not
6735 * recognized. This operation is necessary to clear a LEASE_MOVED
6736 * condition for this client ID.
6737 *
6738 * Returns NFS4_OK if the FSID is present on this server,
6739 * -NFS4ERR_MOVED if the FSID is no longer present, a negative
6740 * NFS4ERR code if some error occurred on the server, or a
6741 * negative errno if a local failure occurred.
6742 */
6743 int nfs4_proc_fsid_present(struct inode *inode, struct rpc_cred *cred)
6744 {
6745 struct nfs_server *server = NFS_SERVER(inode);
6746 struct nfs_client *clp = server->nfs_client;
6747 const struct nfs4_mig_recovery_ops *ops =
6748 clp->cl_mvops->mig_recovery_ops;
6749 struct nfs4_exception exception = { };
6750 int status;
6751
6752 dprintk("%s: FSID %llx:%llx on \"%s\"\n", __func__,
6753 (unsigned long long)server->fsid.major,
6754 (unsigned long long)server->fsid.minor,
6755 clp->cl_hostname);
6756 nfs_display_fhandle(NFS_FH(inode), __func__);
6757
6758 do {
6759 status = ops->fsid_present(inode, cred);
6760 if (status != -NFS4ERR_DELAY)
6761 break;
6762 nfs4_handle_exception(server, status, &exception);
6763 } while (exception.retry);
6764 return status;
6765 }
6766
6767 /**
6768 * If 'use_integrity' is true and the state managment nfs_client
6769 * cl_rpcclient is using krb5i/p, use the integrity protected cl_rpcclient
6770 * and the machine credential as per RFC3530bis and RFC5661 Security
6771 * Considerations sections. Otherwise, just use the user cred with the
6772 * filesystem's rpc_client.
6773 */
6774 static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors, bool use_integrity)
6775 {
6776 int status;
6777 struct nfs4_secinfo_arg args = {
6778 .dir_fh = NFS_FH(dir),
6779 .name = name,
6780 };
6781 struct nfs4_secinfo_res res = {
6782 .flavors = flavors,
6783 };
6784 struct rpc_message msg = {
6785 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO],
6786 .rpc_argp = &args,
6787 .rpc_resp = &res,
6788 };
6789 struct rpc_clnt *clnt = NFS_SERVER(dir)->client;
6790 struct rpc_cred *cred = NULL;
6791
6792 if (use_integrity) {
6793 clnt = NFS_SERVER(dir)->nfs_client->cl_rpcclient;
6794 cred = nfs4_get_clid_cred(NFS_SERVER(dir)->nfs_client);
6795 msg.rpc_cred = cred;
6796 }
6797
6798 dprintk("NFS call secinfo %s\n", name->name);
6799
6800 nfs4_state_protect(NFS_SERVER(dir)->nfs_client,
6801 NFS_SP4_MACH_CRED_SECINFO, &clnt, &msg);
6802
6803 status = nfs4_call_sync(clnt, NFS_SERVER(dir), &msg, &args.seq_args,
6804 &res.seq_res, 0);
6805 dprintk("NFS reply secinfo: %d\n", status);
6806
6807 if (cred)
6808 put_rpccred(cred);
6809
6810 return status;
6811 }
6812
6813 int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name,
6814 struct nfs4_secinfo_flavors *flavors)
6815 {
6816 struct nfs4_exception exception = { };
6817 int err;
6818 do {
6819 err = -NFS4ERR_WRONGSEC;
6820
6821 /* try to use integrity protection with machine cred */
6822 if (_nfs4_is_integrity_protected(NFS_SERVER(dir)->nfs_client))
6823 err = _nfs4_proc_secinfo(dir, name, flavors, true);
6824
6825 /*
6826 * if unable to use integrity protection, or SECINFO with
6827 * integrity protection returns NFS4ERR_WRONGSEC (which is
6828 * disallowed by spec, but exists in deployed servers) use
6829 * the current filesystem's rpc_client and the user cred.
6830 */
6831 if (err == -NFS4ERR_WRONGSEC)
6832 err = _nfs4_proc_secinfo(dir, name, flavors, false);
6833
6834 trace_nfs4_secinfo(dir, name, err);
6835 err = nfs4_handle_exception(NFS_SERVER(dir), err,
6836 &exception);
6837 } while (exception.retry);
6838 return err;
6839 }
6840
6841 #ifdef CONFIG_NFS_V4_1
6842 /*
6843 * Check the exchange flags returned by the server for invalid flags, having
6844 * both PNFS and NON_PNFS flags set, and not having one of NON_PNFS, PNFS, or
6845 * DS flags set.
6846 */
6847 static int nfs4_check_cl_exchange_flags(u32 flags)
6848 {
6849 if (flags & ~EXCHGID4_FLAG_MASK_R)
6850 goto out_inval;
6851 if ((flags & EXCHGID4_FLAG_USE_PNFS_MDS) &&
6852 (flags & EXCHGID4_FLAG_USE_NON_PNFS))
6853 goto out_inval;
6854 if (!(flags & (EXCHGID4_FLAG_MASK_PNFS)))
6855 goto out_inval;
6856 return NFS_OK;
6857 out_inval:
6858 return -NFS4ERR_INVAL;
6859 }
6860
6861 static bool
6862 nfs41_same_server_scope(struct nfs41_server_scope *a,
6863 struct nfs41_server_scope *b)
6864 {
6865 if (a->server_scope_sz == b->server_scope_sz &&
6866 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
6867 return true;
6868
6869 return false;
6870 }
6871
6872 static void
6873 nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
6874 {
6875 }
6876
6877 static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
6878 .rpc_call_done = &nfs4_bind_one_conn_to_session_done,
6879 };
6880
6881 /*
6882 * nfs4_proc_bind_one_conn_to_session()
6883 *
6884 * The 4.1 client currently uses the same TCP connection for the
6885 * fore and backchannel.
6886 */
6887 static
6888 int nfs4_proc_bind_one_conn_to_session(struct rpc_clnt *clnt,
6889 struct rpc_xprt *xprt,
6890 struct nfs_client *clp,
6891 struct rpc_cred *cred)
6892 {
6893 int status;
6894 struct nfs41_bind_conn_to_session_args args = {
6895 .client = clp,
6896 .dir = NFS4_CDFC4_FORE_OR_BOTH,
6897 };
6898 struct nfs41_bind_conn_to_session_res res;
6899 struct rpc_message msg = {
6900 .rpc_proc =
6901 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
6902 .rpc_argp = &args,
6903 .rpc_resp = &res,
6904 .rpc_cred = cred,
6905 };
6906 struct rpc_task_setup task_setup_data = {
6907 .rpc_client = clnt,
6908 .rpc_xprt = xprt,
6909 .callback_ops = &nfs4_bind_one_conn_to_session_ops,
6910 .rpc_message = &msg,
6911 .flags = RPC_TASK_TIMEOUT,
6912 };
6913 struct rpc_task *task;
6914
6915 dprintk("--> %s\n", __func__);
6916
6917 nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
6918 if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
6919 args.dir = NFS4_CDFC4_FORE;
6920
6921 /* Do not set the backchannel flag unless this is clnt->cl_xprt */
6922 if (xprt != rcu_access_pointer(clnt->cl_xprt))
6923 args.dir = NFS4_CDFC4_FORE;
6924
6925 task = rpc_run_task(&task_setup_data);
6926 if (!IS_ERR(task)) {
6927 status = task->tk_status;
6928 rpc_put_task(task);
6929 } else
6930 status = PTR_ERR(task);
6931 trace_nfs4_bind_conn_to_session(clp, status);
6932 if (status == 0) {
6933 if (memcmp(res.sessionid.data,
6934 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
6935 dprintk("NFS: %s: Session ID mismatch\n", __func__);
6936 status = -EIO;
6937 goto out;
6938 }
6939 if ((res.dir & args.dir) != res.dir || res.dir == 0) {
6940 dprintk("NFS: %s: Unexpected direction from server\n",
6941 __func__);
6942 status = -EIO;
6943 goto out;
6944 }
6945 if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
6946 dprintk("NFS: %s: Server returned RDMA mode = true\n",
6947 __func__);
6948 status = -EIO;
6949 goto out;
6950 }
6951 }
6952 out:
6953 dprintk("<-- %s status= %d\n", __func__, status);
6954 return status;
6955 }
6956
6957 struct rpc_bind_conn_calldata {
6958 struct nfs_client *clp;
6959 struct rpc_cred *cred;
6960 };
6961
6962 static int
6963 nfs4_proc_bind_conn_to_session_callback(struct rpc_clnt *clnt,
6964 struct rpc_xprt *xprt,
6965 void *calldata)
6966 {
6967 struct rpc_bind_conn_calldata *p = calldata;
6968
6969 return nfs4_proc_bind_one_conn_to_session(clnt, xprt, p->clp, p->cred);
6970 }
6971
6972 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
6973 {
6974 struct rpc_bind_conn_calldata data = {
6975 .clp = clp,
6976 .cred = cred,
6977 };
6978 return rpc_clnt_iterate_for_each_xprt(clp->cl_rpcclient,
6979 nfs4_proc_bind_conn_to_session_callback, &data);
6980 }
6981
6982 /*
6983 * Minimum set of SP4_MACH_CRED operations from RFC 5661 in the enforce map
6984 * and operations we'd like to see to enable certain features in the allow map
6985 */
6986 static const struct nfs41_state_protection nfs4_sp4_mach_cred_request = {
6987 .how = SP4_MACH_CRED,
6988 .enforce.u.words = {
6989 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
6990 1 << (OP_EXCHANGE_ID - 32) |
6991 1 << (OP_CREATE_SESSION - 32) |
6992 1 << (OP_DESTROY_SESSION - 32) |
6993 1 << (OP_DESTROY_CLIENTID - 32)
6994 },
6995 .allow.u.words = {
6996 [0] = 1 << (OP_CLOSE) |
6997 1 << (OP_OPEN_DOWNGRADE) |
6998 1 << (OP_LOCKU) |
6999 1 << (OP_DELEGRETURN) |
7000 1 << (OP_COMMIT),
7001 [1] = 1 << (OP_SECINFO - 32) |
7002 1 << (OP_SECINFO_NO_NAME - 32) |
7003 1 << (OP_LAYOUTRETURN - 32) |
7004 1 << (OP_TEST_STATEID - 32) |
7005 1 << (OP_FREE_STATEID - 32) |
7006 1 << (OP_WRITE - 32)
7007 }
7008 };
7009
7010 /*
7011 * Select the state protection mode for client `clp' given the server results
7012 * from exchange_id in `sp'.
7013 *
7014 * Returns 0 on success, negative errno otherwise.
7015 */
7016 static int nfs4_sp4_select_mode(struct nfs_client *clp,
7017 struct nfs41_state_protection *sp)
7018 {
7019 static const u32 supported_enforce[NFS4_OP_MAP_NUM_WORDS] = {
7020 [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
7021 1 << (OP_EXCHANGE_ID - 32) |
7022 1 << (OP_CREATE_SESSION - 32) |
7023 1 << (OP_DESTROY_SESSION - 32) |
7024 1 << (OP_DESTROY_CLIENTID - 32)
7025 };
7026 unsigned int i;
7027
7028 if (sp->how == SP4_MACH_CRED) {
7029 /* Print state protect result */
7030 dfprintk(MOUNT, "Server SP4_MACH_CRED support:\n");
7031 for (i = 0; i <= LAST_NFS4_OP; i++) {
7032 if (test_bit(i, sp->enforce.u.longs))
7033 dfprintk(MOUNT, " enforce op %d\n", i);
7034 if (test_bit(i, sp->allow.u.longs))
7035 dfprintk(MOUNT, " allow op %d\n", i);
7036 }
7037
7038 /* make sure nothing is on enforce list that isn't supported */
7039 for (i = 0; i < NFS4_OP_MAP_NUM_WORDS; i++) {
7040 if (sp->enforce.u.words[i] & ~supported_enforce[i]) {
7041 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
7042 return -EINVAL;
7043 }
7044 }
7045
7046 /*
7047 * Minimal mode - state operations are allowed to use machine
7048 * credential. Note this already happens by default, so the
7049 * client doesn't have to do anything more than the negotiation.
7050 *
7051 * NOTE: we don't care if EXCHANGE_ID is in the list -
7052 * we're already using the machine cred for exchange_id
7053 * and will never use a different cred.
7054 */
7055 if (test_bit(OP_BIND_CONN_TO_SESSION, sp->enforce.u.longs) &&
7056 test_bit(OP_CREATE_SESSION, sp->enforce.u.longs) &&
7057 test_bit(OP_DESTROY_SESSION, sp->enforce.u.longs) &&
7058 test_bit(OP_DESTROY_CLIENTID, sp->enforce.u.longs)) {
7059 dfprintk(MOUNT, "sp4_mach_cred:\n");
7060 dfprintk(MOUNT, " minimal mode enabled\n");
7061 set_bit(NFS_SP4_MACH_CRED_MINIMAL, &clp->cl_sp4_flags);
7062 } else {
7063 dfprintk(MOUNT, "sp4_mach_cred: disabled\n");
7064 return -EINVAL;
7065 }
7066
7067 if (test_bit(OP_CLOSE, sp->allow.u.longs) &&
7068 test_bit(OP_OPEN_DOWNGRADE, sp->allow.u.longs) &&
7069 test_bit(OP_DELEGRETURN, sp->allow.u.longs) &&
7070 test_bit(OP_LOCKU, sp->allow.u.longs)) {
7071 dfprintk(MOUNT, " cleanup mode enabled\n");
7072 set_bit(NFS_SP4_MACH_CRED_CLEANUP, &clp->cl_sp4_flags);
7073 }
7074
7075 if (test_bit(OP_LAYOUTRETURN, sp->allow.u.longs)) {
7076 dfprintk(MOUNT, " pnfs cleanup mode enabled\n");
7077 set_bit(NFS_SP4_MACH_CRED_PNFS_CLEANUP,
7078 &clp->cl_sp4_flags);
7079 }
7080
7081 if (test_bit(OP_SECINFO, sp->allow.u.longs) &&
7082 test_bit(OP_SECINFO_NO_NAME, sp->allow.u.longs)) {
7083 dfprintk(MOUNT, " secinfo mode enabled\n");
7084 set_bit(NFS_SP4_MACH_CRED_SECINFO, &clp->cl_sp4_flags);
7085 }
7086
7087 if (test_bit(OP_TEST_STATEID, sp->allow.u.longs) &&
7088 test_bit(OP_FREE_STATEID, sp->allow.u.longs)) {
7089 dfprintk(MOUNT, " stateid mode enabled\n");
7090 set_bit(NFS_SP4_MACH_CRED_STATEID, &clp->cl_sp4_flags);
7091 }
7092
7093 if (test_bit(OP_WRITE, sp->allow.u.longs)) {
7094 dfprintk(MOUNT, " write mode enabled\n");
7095 set_bit(NFS_SP4_MACH_CRED_WRITE, &clp->cl_sp4_flags);
7096 }
7097
7098 if (test_bit(OP_COMMIT, sp->allow.u.longs)) {
7099 dfprintk(MOUNT, " commit mode enabled\n");
7100 set_bit(NFS_SP4_MACH_CRED_COMMIT, &clp->cl_sp4_flags);
7101 }
7102 }
7103
7104 return 0;
7105 }
7106
7107 /*
7108 * _nfs4_proc_exchange_id()
7109 *
7110 * Wrapper for EXCHANGE_ID operation.
7111 */
7112 static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
7113 u32 sp4_how)
7114 {
7115 nfs4_verifier verifier;
7116 struct nfs41_exchange_id_args args = {
7117 .verifier = &verifier,
7118 .client = clp,
7119 #ifdef CONFIG_NFS_V4_1_MIGRATION
7120 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
7121 EXCHGID4_FLAG_BIND_PRINC_STATEID |
7122 EXCHGID4_FLAG_SUPP_MOVED_MIGR,
7123 #else
7124 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER |
7125 EXCHGID4_FLAG_BIND_PRINC_STATEID,
7126 #endif
7127 };
7128 struct nfs41_exchange_id_res res = {
7129 0
7130 };
7131 int status;
7132 struct rpc_message msg = {
7133 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_EXCHANGE_ID],
7134 .rpc_argp = &args,
7135 .rpc_resp = &res,
7136 .rpc_cred = cred,
7137 };
7138
7139 nfs4_init_boot_verifier(clp, &verifier);
7140
7141 status = nfs4_init_uniform_client_string(clp);
7142 if (status)
7143 goto out;
7144
7145 dprintk("NFS call exchange_id auth=%s, '%s'\n",
7146 clp->cl_rpcclient->cl_auth->au_ops->au_name,
7147 clp->cl_owner_id);
7148
7149 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
7150 GFP_NOFS);
7151 if (unlikely(res.server_owner == NULL)) {
7152 status = -ENOMEM;
7153 goto out;
7154 }
7155
7156 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
7157 GFP_NOFS);
7158 if (unlikely(res.server_scope == NULL)) {
7159 status = -ENOMEM;
7160 goto out_server_owner;
7161 }
7162
7163 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
7164 if (unlikely(res.impl_id == NULL)) {
7165 status = -ENOMEM;
7166 goto out_server_scope;
7167 }
7168
7169 switch (sp4_how) {
7170 case SP4_NONE:
7171 args.state_protect.how = SP4_NONE;
7172 break;
7173
7174 case SP4_MACH_CRED:
7175 args.state_protect = nfs4_sp4_mach_cred_request;
7176 break;
7177
7178 default:
7179 /* unsupported! */
7180 WARN_ON_ONCE(1);
7181 status = -EINVAL;
7182 goto out_impl_id;
7183 }
7184
7185 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7186 trace_nfs4_exchange_id(clp, status);
7187 if (status == 0)
7188 status = nfs4_check_cl_exchange_flags(res.flags);
7189
7190 if (status == 0)
7191 status = nfs4_sp4_select_mode(clp, &res.state_protect);
7192
7193 if (status == 0) {
7194 clp->cl_clientid = res.clientid;
7195 clp->cl_exchange_flags = res.flags;
7196 /* Client ID is not confirmed */
7197 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R)) {
7198 clear_bit(NFS4_SESSION_ESTABLISHED,
7199 &clp->cl_session->session_state);
7200 clp->cl_seqid = res.seqid;
7201 }
7202
7203 kfree(clp->cl_serverowner);
7204 clp->cl_serverowner = res.server_owner;
7205 res.server_owner = NULL;
7206
7207 /* use the most recent implementation id */
7208 kfree(clp->cl_implid);
7209 clp->cl_implid = res.impl_id;
7210 res.impl_id = NULL;
7211
7212 if (clp->cl_serverscope != NULL &&
7213 !nfs41_same_server_scope(clp->cl_serverscope,
7214 res.server_scope)) {
7215 dprintk("%s: server_scope mismatch detected\n",
7216 __func__);
7217 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
7218 kfree(clp->cl_serverscope);
7219 clp->cl_serverscope = NULL;
7220 }
7221
7222 if (clp->cl_serverscope == NULL) {
7223 clp->cl_serverscope = res.server_scope;
7224 res.server_scope = NULL;
7225 }
7226 }
7227
7228 out_impl_id:
7229 kfree(res.impl_id);
7230 out_server_scope:
7231 kfree(res.server_scope);
7232 out_server_owner:
7233 kfree(res.server_owner);
7234 out:
7235 if (clp->cl_implid != NULL)
7236 dprintk("NFS reply exchange_id: Server Implementation ID: "
7237 "domain: %s, name: %s, date: %llu,%u\n",
7238 clp->cl_implid->domain, clp->cl_implid->name,
7239 clp->cl_implid->date.seconds,
7240 clp->cl_implid->date.nseconds);
7241 dprintk("NFS reply exchange_id: %d\n", status);
7242 return status;
7243 }
7244
7245 /*
7246 * nfs4_proc_exchange_id()
7247 *
7248 * Returns zero, a negative errno, or a negative NFS4ERR status code.
7249 *
7250 * Since the clientid has expired, all compounds using sessions
7251 * associated with the stale clientid will be returning
7252 * NFS4ERR_BADSESSION in the sequence operation, and will therefore
7253 * be in some phase of session reset.
7254 *
7255 * Will attempt to negotiate SP4_MACH_CRED if krb5i / krb5p auth is used.
7256 */
7257 int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
7258 {
7259 rpc_authflavor_t authflavor = clp->cl_rpcclient->cl_auth->au_flavor;
7260 int status;
7261
7262 /* try SP4_MACH_CRED if krb5i/p */
7263 if (authflavor == RPC_AUTH_GSS_KRB5I ||
7264 authflavor == RPC_AUTH_GSS_KRB5P) {
7265 status = _nfs4_proc_exchange_id(clp, cred, SP4_MACH_CRED);
7266 if (!status)
7267 return 0;
7268 }
7269
7270 /* try SP4_NONE */
7271 return _nfs4_proc_exchange_id(clp, cred, SP4_NONE);
7272 }
7273
7274 static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
7275 struct rpc_cred *cred)
7276 {
7277 struct rpc_message msg = {
7278 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
7279 .rpc_argp = clp,
7280 .rpc_cred = cred,
7281 };
7282 int status;
7283
7284 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7285 trace_nfs4_destroy_clientid(clp, status);
7286 if (status)
7287 dprintk("NFS: Got error %d from the server %s on "
7288 "DESTROY_CLIENTID.", status, clp->cl_hostname);
7289 return status;
7290 }
7291
7292 static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
7293 struct rpc_cred *cred)
7294 {
7295 unsigned int loop;
7296 int ret;
7297
7298 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
7299 ret = _nfs4_proc_destroy_clientid(clp, cred);
7300 switch (ret) {
7301 case -NFS4ERR_DELAY:
7302 case -NFS4ERR_CLIENTID_BUSY:
7303 ssleep(1);
7304 break;
7305 default:
7306 return ret;
7307 }
7308 }
7309 return 0;
7310 }
7311
7312 int nfs4_destroy_clientid(struct nfs_client *clp)
7313 {
7314 struct rpc_cred *cred;
7315 int ret = 0;
7316
7317 if (clp->cl_mvops->minor_version < 1)
7318 goto out;
7319 if (clp->cl_exchange_flags == 0)
7320 goto out;
7321 if (clp->cl_preserve_clid)
7322 goto out;
7323 cred = nfs4_get_clid_cred(clp);
7324 ret = nfs4_proc_destroy_clientid(clp, cred);
7325 if (cred)
7326 put_rpccred(cred);
7327 switch (ret) {
7328 case 0:
7329 case -NFS4ERR_STALE_CLIENTID:
7330 clp->cl_exchange_flags = 0;
7331 }
7332 out:
7333 return ret;
7334 }
7335
7336 struct nfs4_get_lease_time_data {
7337 struct nfs4_get_lease_time_args *args;
7338 struct nfs4_get_lease_time_res *res;
7339 struct nfs_client *clp;
7340 };
7341
7342 static void nfs4_get_lease_time_prepare(struct rpc_task *task,
7343 void *calldata)
7344 {
7345 struct nfs4_get_lease_time_data *data =
7346 (struct nfs4_get_lease_time_data *)calldata;
7347
7348 dprintk("--> %s\n", __func__);
7349 /* just setup sequence, do not trigger session recovery
7350 since we're invoked within one */
7351 nfs41_setup_sequence(data->clp->cl_session,
7352 &data->args->la_seq_args,
7353 &data->res->lr_seq_res,
7354 task);
7355 dprintk("<-- %s\n", __func__);
7356 }
7357
7358 /*
7359 * Called from nfs4_state_manager thread for session setup, so don't recover
7360 * from sequence operation or clientid errors.
7361 */
7362 static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata)
7363 {
7364 struct nfs4_get_lease_time_data *data =
7365 (struct nfs4_get_lease_time_data *)calldata;
7366
7367 dprintk("--> %s\n", __func__);
7368 if (!nfs41_sequence_done(task, &data->res->lr_seq_res))
7369 return;
7370 switch (task->tk_status) {
7371 case -NFS4ERR_DELAY:
7372 case -NFS4ERR_GRACE:
7373 dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status);
7374 rpc_delay(task, NFS4_POLL_RETRY_MIN);
7375 task->tk_status = 0;
7376 /* fall through */
7377 case -NFS4ERR_RETRY_UNCACHED_REP:
7378 rpc_restart_call_prepare(task);
7379 return;
7380 }
7381 dprintk("<-- %s\n", __func__);
7382 }
7383
7384 static const struct rpc_call_ops nfs4_get_lease_time_ops = {
7385 .rpc_call_prepare = nfs4_get_lease_time_prepare,
7386 .rpc_call_done = nfs4_get_lease_time_done,
7387 };
7388
7389 int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo)
7390 {
7391 struct rpc_task *task;
7392 struct nfs4_get_lease_time_args args;
7393 struct nfs4_get_lease_time_res res = {
7394 .lr_fsinfo = fsinfo,
7395 };
7396 struct nfs4_get_lease_time_data data = {
7397 .args = &args,
7398 .res = &res,
7399 .clp = clp,
7400 };
7401 struct rpc_message msg = {
7402 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GET_LEASE_TIME],
7403 .rpc_argp = &args,
7404 .rpc_resp = &res,
7405 };
7406 struct rpc_task_setup task_setup = {
7407 .rpc_client = clp->cl_rpcclient,
7408 .rpc_message = &msg,
7409 .callback_ops = &nfs4_get_lease_time_ops,
7410 .callback_data = &data,
7411 .flags = RPC_TASK_TIMEOUT,
7412 };
7413 int status;
7414
7415 nfs4_init_sequence(&args.la_seq_args, &res.lr_seq_res, 0);
7416 nfs4_set_sequence_privileged(&args.la_seq_args);
7417 dprintk("--> %s\n", __func__);
7418 task = rpc_run_task(&task_setup);
7419
7420 if (IS_ERR(task))
7421 status = PTR_ERR(task);
7422 else {
7423 status = task->tk_status;
7424 rpc_put_task(task);
7425 }
7426 dprintk("<-- %s return %d\n", __func__, status);
7427
7428 return status;
7429 }
7430
7431 /*
7432 * Initialize the values to be used by the client in CREATE_SESSION
7433 * If nfs4_init_session set the fore channel request and response sizes,
7434 * use them.
7435 *
7436 * Set the back channel max_resp_sz_cached to zero to force the client to
7437 * always set csa_cachethis to FALSE because the current implementation
7438 * of the back channel DRC only supports caching the CB_SEQUENCE operation.
7439 */
7440 static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args,
7441 struct rpc_clnt *clnt)
7442 {
7443 unsigned int max_rqst_sz, max_resp_sz;
7444 unsigned int max_bc_payload = rpc_max_bc_payload(clnt);
7445
7446 max_rqst_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxwrite_overhead;
7447 max_resp_sz = NFS_MAX_FILE_IO_SIZE + nfs41_maxread_overhead;
7448
7449 /* Fore channel attributes */
7450 args->fc_attrs.max_rqst_sz = max_rqst_sz;
7451 args->fc_attrs.max_resp_sz = max_resp_sz;
7452 args->fc_attrs.max_ops = NFS4_MAX_OPS;
7453 args->fc_attrs.max_reqs = max_session_slots;
7454
7455 dprintk("%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u "
7456 "max_ops=%u max_reqs=%u\n",
7457 __func__,
7458 args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz,
7459 args->fc_attrs.max_ops, args->fc_attrs.max_reqs);
7460
7461 /* Back channel attributes */
7462 args->bc_attrs.max_rqst_sz = max_bc_payload;
7463 args->bc_attrs.max_resp_sz = max_bc_payload;
7464 args->bc_attrs.max_resp_sz_cached = 0;
7465 args->bc_attrs.max_ops = NFS4_MAX_BACK_CHANNEL_OPS;
7466 args->bc_attrs.max_reqs = NFS41_BC_MAX_CALLBACKS;
7467
7468 dprintk("%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u "
7469 "max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n",
7470 __func__,
7471 args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz,
7472 args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops,
7473 args->bc_attrs.max_reqs);
7474 }
7475
7476 static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
7477 struct nfs41_create_session_res *res)
7478 {
7479 struct nfs4_channel_attrs *sent = &args->fc_attrs;
7480 struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
7481
7482 if (rcvd->max_resp_sz > sent->max_resp_sz)
7483 return -EINVAL;
7484 /*
7485 * Our requested max_ops is the minimum we need; we're not
7486 * prepared to break up compounds into smaller pieces than that.
7487 * So, no point even trying to continue if the server won't
7488 * cooperate:
7489 */
7490 if (rcvd->max_ops < sent->max_ops)
7491 return -EINVAL;
7492 if (rcvd->max_reqs == 0)
7493 return -EINVAL;
7494 if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
7495 rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
7496 return 0;
7497 }
7498
7499 static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
7500 struct nfs41_create_session_res *res)
7501 {
7502 struct nfs4_channel_attrs *sent = &args->bc_attrs;
7503 struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
7504
7505 if (!(res->flags & SESSION4_BACK_CHAN))
7506 goto out;
7507 if (rcvd->max_rqst_sz > sent->max_rqst_sz)
7508 return -EINVAL;
7509 if (rcvd->max_resp_sz < sent->max_resp_sz)
7510 return -EINVAL;
7511 if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
7512 return -EINVAL;
7513 /* These would render the backchannel useless: */
7514 if (rcvd->max_ops != sent->max_ops)
7515 return -EINVAL;
7516 if (rcvd->max_reqs != sent->max_reqs)
7517 return -EINVAL;
7518 out:
7519 return 0;
7520 }
7521
7522 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
7523 struct nfs41_create_session_res *res)
7524 {
7525 int ret;
7526
7527 ret = nfs4_verify_fore_channel_attrs(args, res);
7528 if (ret)
7529 return ret;
7530 return nfs4_verify_back_channel_attrs(args, res);
7531 }
7532
7533 static void nfs4_update_session(struct nfs4_session *session,
7534 struct nfs41_create_session_res *res)
7535 {
7536 nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
7537 /* Mark client id and session as being confirmed */
7538 session->clp->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
7539 set_bit(NFS4_SESSION_ESTABLISHED, &session->session_state);
7540 session->flags = res->flags;
7541 memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
7542 if (res->flags & SESSION4_BACK_CHAN)
7543 memcpy(&session->bc_attrs, &res->bc_attrs,
7544 sizeof(session->bc_attrs));
7545 }
7546
7547 static int _nfs4_proc_create_session(struct nfs_client *clp,
7548 struct rpc_cred *cred)
7549 {
7550 struct nfs4_session *session = clp->cl_session;
7551 struct nfs41_create_session_args args = {
7552 .client = clp,
7553 .clientid = clp->cl_clientid,
7554 .seqid = clp->cl_seqid,
7555 .cb_program = NFS4_CALLBACK,
7556 };
7557 struct nfs41_create_session_res res;
7558
7559 struct rpc_message msg = {
7560 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
7561 .rpc_argp = &args,
7562 .rpc_resp = &res,
7563 .rpc_cred = cred,
7564 };
7565 int status;
7566
7567 nfs4_init_channel_attrs(&args, clp->cl_rpcclient);
7568 args.flags = (SESSION4_PERSIST | SESSION4_BACK_CHAN);
7569
7570 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7571 trace_nfs4_create_session(clp, status);
7572
7573 switch (status) {
7574 case -NFS4ERR_STALE_CLIENTID:
7575 case -NFS4ERR_DELAY:
7576 case -ETIMEDOUT:
7577 case -EACCES:
7578 case -EAGAIN:
7579 goto out;
7580 };
7581
7582 clp->cl_seqid++;
7583 if (!status) {
7584 /* Verify the session's negotiated channel_attrs values */
7585 status = nfs4_verify_channel_attrs(&args, &res);
7586 /* Increment the clientid slot sequence id */
7587 if (status)
7588 goto out;
7589 nfs4_update_session(session, &res);
7590 }
7591 out:
7592 return status;
7593 }
7594
7595 /*
7596 * Issues a CREATE_SESSION operation to the server.
7597 * It is the responsibility of the caller to verify the session is
7598 * expired before calling this routine.
7599 */
7600 int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
7601 {
7602 int status;
7603 unsigned *ptr;
7604 struct nfs4_session *session = clp->cl_session;
7605
7606 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
7607
7608 status = _nfs4_proc_create_session(clp, cred);
7609 if (status)
7610 goto out;
7611
7612 /* Init or reset the session slot tables */
7613 status = nfs4_setup_session_slot_tables(session);
7614 dprintk("slot table setup returned %d\n", status);
7615 if (status)
7616 goto out;
7617
7618 ptr = (unsigned *)&session->sess_id.data[0];
7619 dprintk("%s client>seqid %d sessionid %u:%u:%u:%u\n", __func__,
7620 clp->cl_seqid, ptr[0], ptr[1], ptr[2], ptr[3]);
7621 out:
7622 dprintk("<-- %s\n", __func__);
7623 return status;
7624 }
7625
7626 /*
7627 * Issue the over-the-wire RPC DESTROY_SESSION.
7628 * The caller must serialize access to this routine.
7629 */
7630 int nfs4_proc_destroy_session(struct nfs4_session *session,
7631 struct rpc_cred *cred)
7632 {
7633 struct rpc_message msg = {
7634 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
7635 .rpc_argp = session,
7636 .rpc_cred = cred,
7637 };
7638 int status = 0;
7639
7640 dprintk("--> nfs4_proc_destroy_session\n");
7641
7642 /* session is still being setup */
7643 if (!test_and_clear_bit(NFS4_SESSION_ESTABLISHED, &session->session_state))
7644 return 0;
7645
7646 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
7647 trace_nfs4_destroy_session(session->clp, status);
7648
7649 if (status)
7650 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
7651 "Session has been destroyed regardless...\n", status);
7652
7653 dprintk("<-- nfs4_proc_destroy_session\n");
7654 return status;
7655 }
7656
7657 /*
7658 * Renew the cl_session lease.
7659 */
7660 struct nfs4_sequence_data {
7661 struct nfs_client *clp;
7662 struct nfs4_sequence_args args;
7663 struct nfs4_sequence_res res;
7664 };
7665
7666 static void nfs41_sequence_release(void *data)
7667 {
7668 struct nfs4_sequence_data *calldata = data;
7669 struct nfs_client *clp = calldata->clp;
7670
7671 if (atomic_read(&clp->cl_count) > 1)
7672 nfs4_schedule_state_renewal(clp);
7673 nfs_put_client(clp);
7674 kfree(calldata);
7675 }
7676
7677 static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7678 {
7679 switch(task->tk_status) {
7680 case -NFS4ERR_DELAY:
7681 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7682 return -EAGAIN;
7683 default:
7684 nfs4_schedule_lease_recovery(clp);
7685 }
7686 return 0;
7687 }
7688
7689 static void nfs41_sequence_call_done(struct rpc_task *task, void *data)
7690 {
7691 struct nfs4_sequence_data *calldata = data;
7692 struct nfs_client *clp = calldata->clp;
7693
7694 if (!nfs41_sequence_done(task, task->tk_msg.rpc_resp))
7695 return;
7696
7697 trace_nfs4_sequence(clp, task->tk_status);
7698 if (task->tk_status < 0) {
7699 dprintk("%s ERROR %d\n", __func__, task->tk_status);
7700 if (atomic_read(&clp->cl_count) == 1)
7701 goto out;
7702
7703 if (nfs41_sequence_handle_errors(task, clp) == -EAGAIN) {
7704 rpc_restart_call_prepare(task);
7705 return;
7706 }
7707 }
7708 dprintk("%s rpc_cred %p\n", __func__, task->tk_msg.rpc_cred);
7709 out:
7710 dprintk("<-- %s\n", __func__);
7711 }
7712
7713 static void nfs41_sequence_prepare(struct rpc_task *task, void *data)
7714 {
7715 struct nfs4_sequence_data *calldata = data;
7716 struct nfs_client *clp = calldata->clp;
7717 struct nfs4_sequence_args *args;
7718 struct nfs4_sequence_res *res;
7719
7720 args = task->tk_msg.rpc_argp;
7721 res = task->tk_msg.rpc_resp;
7722
7723 nfs41_setup_sequence(clp->cl_session, args, res, task);
7724 }
7725
7726 static const struct rpc_call_ops nfs41_sequence_ops = {
7727 .rpc_call_done = nfs41_sequence_call_done,
7728 .rpc_call_prepare = nfs41_sequence_prepare,
7729 .rpc_release = nfs41_sequence_release,
7730 };
7731
7732 static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp,
7733 struct rpc_cred *cred,
7734 bool is_privileged)
7735 {
7736 struct nfs4_sequence_data *calldata;
7737 struct rpc_message msg = {
7738 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SEQUENCE],
7739 .rpc_cred = cred,
7740 };
7741 struct rpc_task_setup task_setup_data = {
7742 .rpc_client = clp->cl_rpcclient,
7743 .rpc_message = &msg,
7744 .callback_ops = &nfs41_sequence_ops,
7745 .flags = RPC_TASK_ASYNC | RPC_TASK_TIMEOUT,
7746 };
7747
7748 if (!atomic_inc_not_zero(&clp->cl_count))
7749 return ERR_PTR(-EIO);
7750 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7751 if (calldata == NULL) {
7752 nfs_put_client(clp);
7753 return ERR_PTR(-ENOMEM);
7754 }
7755 nfs4_init_sequence(&calldata->args, &calldata->res, 0);
7756 if (is_privileged)
7757 nfs4_set_sequence_privileged(&calldata->args);
7758 msg.rpc_argp = &calldata->args;
7759 msg.rpc_resp = &calldata->res;
7760 calldata->clp = clp;
7761 task_setup_data.callback_data = calldata;
7762
7763 return rpc_run_task(&task_setup_data);
7764 }
7765
7766 static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cred, unsigned renew_flags)
7767 {
7768 struct rpc_task *task;
7769 int ret = 0;
7770
7771 if ((renew_flags & NFS4_RENEW_TIMEOUT) == 0)
7772 return -EAGAIN;
7773 task = _nfs41_proc_sequence(clp, cred, false);
7774 if (IS_ERR(task))
7775 ret = PTR_ERR(task);
7776 else
7777 rpc_put_task_async(task);
7778 dprintk("<-- %s status=%d\n", __func__, ret);
7779 return ret;
7780 }
7781
7782 static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
7783 {
7784 struct rpc_task *task;
7785 int ret;
7786
7787 task = _nfs41_proc_sequence(clp, cred, true);
7788 if (IS_ERR(task)) {
7789 ret = PTR_ERR(task);
7790 goto out;
7791 }
7792 ret = rpc_wait_for_completion_task(task);
7793 if (!ret)
7794 ret = task->tk_status;
7795 rpc_put_task(task);
7796 out:
7797 dprintk("<-- %s status=%d\n", __func__, ret);
7798 return ret;
7799 }
7800
7801 struct nfs4_reclaim_complete_data {
7802 struct nfs_client *clp;
7803 struct nfs41_reclaim_complete_args arg;
7804 struct nfs41_reclaim_complete_res res;
7805 };
7806
7807 static void nfs4_reclaim_complete_prepare(struct rpc_task *task, void *data)
7808 {
7809 struct nfs4_reclaim_complete_data *calldata = data;
7810
7811 nfs41_setup_sequence(calldata->clp->cl_session,
7812 &calldata->arg.seq_args,
7813 &calldata->res.seq_res,
7814 task);
7815 }
7816
7817 static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nfs_client *clp)
7818 {
7819 switch(task->tk_status) {
7820 case 0:
7821 case -NFS4ERR_COMPLETE_ALREADY:
7822 case -NFS4ERR_WRONG_CRED: /* What to do here? */
7823 break;
7824 case -NFS4ERR_DELAY:
7825 rpc_delay(task, NFS4_POLL_RETRY_MAX);
7826 /* fall through */
7827 case -NFS4ERR_RETRY_UNCACHED_REP:
7828 return -EAGAIN;
7829 default:
7830 nfs4_schedule_lease_recovery(clp);
7831 }
7832 return 0;
7833 }
7834
7835 static void nfs4_reclaim_complete_done(struct rpc_task *task, void *data)
7836 {
7837 struct nfs4_reclaim_complete_data *calldata = data;
7838 struct nfs_client *clp = calldata->clp;
7839 struct nfs4_sequence_res *res = &calldata->res.seq_res;
7840
7841 dprintk("--> %s\n", __func__);
7842 if (!nfs41_sequence_done(task, res))
7843 return;
7844
7845 trace_nfs4_reclaim_complete(clp, task->tk_status);
7846 if (nfs41_reclaim_complete_handle_errors(task, clp) == -EAGAIN) {
7847 rpc_restart_call_prepare(task);
7848 return;
7849 }
7850 dprintk("<-- %s\n", __func__);
7851 }
7852
7853 static void nfs4_free_reclaim_complete_data(void *data)
7854 {
7855 struct nfs4_reclaim_complete_data *calldata = data;
7856
7857 kfree(calldata);
7858 }
7859
7860 static const struct rpc_call_ops nfs4_reclaim_complete_call_ops = {
7861 .rpc_call_prepare = nfs4_reclaim_complete_prepare,
7862 .rpc_call_done = nfs4_reclaim_complete_done,
7863 .rpc_release = nfs4_free_reclaim_complete_data,
7864 };
7865
7866 /*
7867 * Issue a global reclaim complete.
7868 */
7869 static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
7870 struct rpc_cred *cred)
7871 {
7872 struct nfs4_reclaim_complete_data *calldata;
7873 struct rpc_task *task;
7874 struct rpc_message msg = {
7875 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RECLAIM_COMPLETE],
7876 .rpc_cred = cred,
7877 };
7878 struct rpc_task_setup task_setup_data = {
7879 .rpc_client = clp->cl_rpcclient,
7880 .rpc_message = &msg,
7881 .callback_ops = &nfs4_reclaim_complete_call_ops,
7882 .flags = RPC_TASK_ASYNC,
7883 };
7884 int status = -ENOMEM;
7885
7886 dprintk("--> %s\n", __func__);
7887 calldata = kzalloc(sizeof(*calldata), GFP_NOFS);
7888 if (calldata == NULL)
7889 goto out;
7890 calldata->clp = clp;
7891 calldata->arg.one_fs = 0;
7892
7893 nfs4_init_sequence(&calldata->arg.seq_args, &calldata->res.seq_res, 0);
7894 nfs4_set_sequence_privileged(&calldata->arg.seq_args);
7895 msg.rpc_argp = &calldata->arg;
7896 msg.rpc_resp = &calldata->res;
7897 task_setup_data.callback_data = calldata;
7898 task = rpc_run_task(&task_setup_data);
7899 if (IS_ERR(task)) {
7900 status = PTR_ERR(task);
7901 goto out;
7902 }
7903 status = nfs4_wait_for_completion_rpc_task(task);
7904 if (status == 0)
7905 status = task->tk_status;
7906 rpc_put_task(task);
7907 return 0;
7908 out:
7909 dprintk("<-- %s status=%d\n", __func__, status);
7910 return status;
7911 }
7912
7913 static void
7914 nfs4_layoutget_prepare(struct rpc_task *task, void *calldata)
7915 {
7916 struct nfs4_layoutget *lgp = calldata;
7917 struct nfs_server *server = NFS_SERVER(lgp->args.inode);
7918 struct nfs4_session *session = nfs4_get_session(server);
7919
7920 dprintk("--> %s\n", __func__);
7921 nfs41_setup_sequence(session, &lgp->args.seq_args,
7922 &lgp->res.seq_res, task);
7923 dprintk("<-- %s\n", __func__);
7924 }
7925
7926 static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
7927 {
7928 struct nfs4_layoutget *lgp = calldata;
7929
7930 dprintk("--> %s\n", __func__);
7931 nfs41_sequence_process(task, &lgp->res.seq_res);
7932 dprintk("<-- %s\n", __func__);
7933 }
7934
7935 static int
7936 nfs4_layoutget_handle_exception(struct rpc_task *task,
7937 struct nfs4_layoutget *lgp, struct nfs4_exception *exception)
7938 {
7939 struct inode *inode = lgp->args.inode;
7940 struct nfs_server *server = NFS_SERVER(inode);
7941 struct pnfs_layout_hdr *lo;
7942 int nfs4err = task->tk_status;
7943 int err, status = 0;
7944 LIST_HEAD(head);
7945
7946 dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
7947
7948 switch (nfs4err) {
7949 case 0:
7950 goto out;
7951
7952 /*
7953 * NFS4ERR_LAYOUTUNAVAILABLE means we are not supposed to use pnfs
7954 * on the file. set tk_status to -ENODATA to tell upper layer to
7955 * retry go inband.
7956 */
7957 case -NFS4ERR_LAYOUTUNAVAILABLE:
7958 status = -ENODATA;
7959 goto out;
7960 /*
7961 * NFS4ERR_BADLAYOUT means the MDS cannot return a layout of
7962 * length lgp->args.minlength != 0 (see RFC5661 section 18.43.3).
7963 */
7964 case -NFS4ERR_BADLAYOUT:
7965 status = -EOVERFLOW;
7966 goto out;
7967 /*
7968 * NFS4ERR_LAYOUTTRYLATER is a conflict with another client
7969 * (or clients) writing to the same RAID stripe except when
7970 * the minlength argument is 0 (see RFC5661 section 18.43.3).
7971 *
7972 * Treat it like we would RECALLCONFLICT -- we retry for a little
7973 * while, and then eventually give up.
7974 */
7975 case -NFS4ERR_LAYOUTTRYLATER:
7976 if (lgp->args.minlength == 0) {
7977 status = -EOVERFLOW;
7978 goto out;
7979 }
7980 status = -EBUSY;
7981 break;
7982 case -NFS4ERR_RECALLCONFLICT:
7983 status = -ERECALLCONFLICT;
7984 break;
7985 case -NFS4ERR_EXPIRED:
7986 case -NFS4ERR_BAD_STATEID:
7987 exception->timeout = 0;
7988 spin_lock(&inode->i_lock);
7989 lo = NFS_I(inode)->layout;
7990 /* If the open stateid was bad, then recover it. */
7991 if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
7992 nfs4_stateid_match_other(&lgp->args.stateid,
7993 &lgp->args.ctx->state->stateid)) {
7994 spin_unlock(&inode->i_lock);
7995 exception->state = lgp->args.ctx->state;
7996 break;
7997 }
7998
7999 /*
8000 * Mark the bad layout state as invalid, then retry
8001 */
8002 pnfs_mark_layout_stateid_invalid(lo, &head);
8003 spin_unlock(&inode->i_lock);
8004 pnfs_free_lseg_list(&head);
8005 status = -EAGAIN;
8006 goto out;
8007 }
8008
8009 err = nfs4_handle_exception(server, nfs4err, exception);
8010 if (!status) {
8011 if (exception->retry)
8012 status = -EAGAIN;
8013 else
8014 status = err;
8015 }
8016 out:
8017 dprintk("<-- %s\n", __func__);
8018 return status;
8019 }
8020
8021 static size_t max_response_pages(struct nfs_server *server)
8022 {
8023 u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
8024 return nfs_page_array_len(0, max_resp_sz);
8025 }
8026
8027 static void nfs4_free_pages(struct page **pages, size_t size)
8028 {
8029 int i;
8030
8031 if (!pages)
8032 return;
8033
8034 for (i = 0; i < size; i++) {
8035 if (!pages[i])
8036 break;
8037 __free_page(pages[i]);
8038 }
8039 kfree(pages);
8040 }
8041
8042 static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
8043 {
8044 struct page **pages;
8045 int i;
8046
8047 pages = kcalloc(size, sizeof(struct page *), gfp_flags);
8048 if (!pages) {
8049 dprintk("%s: can't alloc array of %zu pages\n", __func__, size);
8050 return NULL;
8051 }
8052
8053 for (i = 0; i < size; i++) {
8054 pages[i] = alloc_page(gfp_flags);
8055 if (!pages[i]) {
8056 dprintk("%s: failed to allocate page\n", __func__);
8057 nfs4_free_pages(pages, size);
8058 return NULL;
8059 }
8060 }
8061
8062 return pages;
8063 }
8064
8065 static void nfs4_layoutget_release(void *calldata)
8066 {
8067 struct nfs4_layoutget *lgp = calldata;
8068 struct inode *inode = lgp->args.inode;
8069 struct nfs_server *server = NFS_SERVER(inode);
8070 size_t max_pages = max_response_pages(server);
8071
8072 dprintk("--> %s\n", __func__);
8073 nfs4_free_pages(lgp->args.layout.pages, max_pages);
8074 pnfs_put_layout_hdr(NFS_I(inode)->layout);
8075 put_nfs_open_context(lgp->args.ctx);
8076 kfree(calldata);
8077 dprintk("<-- %s\n", __func__);
8078 }
8079
8080 static const struct rpc_call_ops nfs4_layoutget_call_ops = {
8081 .rpc_call_prepare = nfs4_layoutget_prepare,
8082 .rpc_call_done = nfs4_layoutget_done,
8083 .rpc_release = nfs4_layoutget_release,
8084 };
8085
8086 struct pnfs_layout_segment *
8087 nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags)
8088 {
8089 struct inode *inode = lgp->args.inode;
8090 struct nfs_server *server = NFS_SERVER(inode);
8091 size_t max_pages = max_response_pages(server);
8092 struct rpc_task *task;
8093 struct rpc_message msg = {
8094 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET],
8095 .rpc_argp = &lgp->args,
8096 .rpc_resp = &lgp->res,
8097 .rpc_cred = lgp->cred,
8098 };
8099 struct rpc_task_setup task_setup_data = {
8100 .rpc_client = server->client,
8101 .rpc_message = &msg,
8102 .callback_ops = &nfs4_layoutget_call_ops,
8103 .callback_data = lgp,
8104 .flags = RPC_TASK_ASYNC,
8105 };
8106 struct pnfs_layout_segment *lseg = NULL;
8107 struct nfs4_exception exception = {
8108 .inode = inode,
8109 .timeout = *timeout,
8110 };
8111 int status = 0;
8112
8113 dprintk("--> %s\n", __func__);
8114
8115 /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
8116 pnfs_get_layout_hdr(NFS_I(inode)->layout);
8117
8118 lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
8119 if (!lgp->args.layout.pages) {
8120 nfs4_layoutget_release(lgp);
8121 return ERR_PTR(-ENOMEM);
8122 }
8123 lgp->args.layout.pglen = max_pages * PAGE_SIZE;
8124
8125 lgp->res.layoutp = &lgp->args.layout;
8126 lgp->res.seq_res.sr_slot = NULL;
8127 nfs4_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
8128
8129 task = rpc_run_task(&task_setup_data);
8130 if (IS_ERR(task))
8131 return ERR_CAST(task);
8132 status = nfs4_wait_for_completion_rpc_task(task);
8133 if (status == 0) {
8134 status = nfs4_layoutget_handle_exception(task, lgp, &exception);
8135 *timeout = exception.timeout;
8136 }
8137
8138 trace_nfs4_layoutget(lgp->args.ctx,
8139 &lgp->args.range,
8140 &lgp->res.range,
8141 &lgp->res.stateid,
8142 status);
8143
8144 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
8145 if (status == 0 && lgp->res.layoutp->len)
8146 lseg = pnfs_layout_process(lgp);
8147 nfs4_sequence_free_slot(&lgp->res.seq_res);
8148 rpc_put_task(task);
8149 dprintk("<-- %s status=%d\n", __func__, status);
8150 if (status)
8151 return ERR_PTR(status);
8152 return lseg;
8153 }
8154
8155 static void
8156 nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
8157 {
8158 struct nfs4_layoutreturn *lrp = calldata;
8159
8160 dprintk("--> %s\n", __func__);
8161 nfs41_setup_sequence(lrp->clp->cl_session,
8162 &lrp->args.seq_args,
8163 &lrp->res.seq_res,
8164 task);
8165 }
8166
8167 static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
8168 {
8169 struct nfs4_layoutreturn *lrp = calldata;
8170 struct nfs_server *server;
8171
8172 dprintk("--> %s\n", __func__);
8173
8174 if (!nfs41_sequence_process(task, &lrp->res.seq_res))
8175 return;
8176
8177 server = NFS_SERVER(lrp->args.inode);
8178 switch (task->tk_status) {
8179 default:
8180 task->tk_status = 0;
8181 case 0:
8182 break;
8183 case -NFS4ERR_DELAY:
8184 if (nfs4_async_handle_error(task, server, NULL, NULL) != -EAGAIN)
8185 break;
8186 nfs4_sequence_free_slot(&lrp->res.seq_res);
8187 rpc_restart_call_prepare(task);
8188 return;
8189 }
8190 dprintk("<-- %s\n", __func__);
8191 }
8192
8193 static void nfs4_layoutreturn_release(void *calldata)
8194 {
8195 struct nfs4_layoutreturn *lrp = calldata;
8196 struct pnfs_layout_hdr *lo = lrp->args.layout;
8197 LIST_HEAD(freeme);
8198
8199 dprintk("--> %s\n", __func__);
8200 spin_lock(&lo->plh_inode->i_lock);
8201 if (lrp->res.lrs_present) {
8202 pnfs_mark_matching_lsegs_invalid(lo, &freeme,
8203 &lrp->args.range,
8204 be32_to_cpu(lrp->args.stateid.seqid));
8205 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
8206 } else
8207 pnfs_mark_layout_stateid_invalid(lo, &freeme);
8208 pnfs_clear_layoutreturn_waitbit(lo);
8209 spin_unlock(&lo->plh_inode->i_lock);
8210 nfs4_sequence_free_slot(&lrp->res.seq_res);
8211 pnfs_free_lseg_list(&freeme);
8212 pnfs_put_layout_hdr(lrp->args.layout);
8213 nfs_iput_and_deactive(lrp->inode);
8214 kfree(calldata);
8215 dprintk("<-- %s\n", __func__);
8216 }
8217
8218 static const struct rpc_call_ops nfs4_layoutreturn_call_ops = {
8219 .rpc_call_prepare = nfs4_layoutreturn_prepare,
8220 .rpc_call_done = nfs4_layoutreturn_done,
8221 .rpc_release = nfs4_layoutreturn_release,
8222 };
8223
8224 int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync)
8225 {
8226 struct rpc_task *task;
8227 struct rpc_message msg = {
8228 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTRETURN],
8229 .rpc_argp = &lrp->args,
8230 .rpc_resp = &lrp->res,
8231 .rpc_cred = lrp->cred,
8232 };
8233 struct rpc_task_setup task_setup_data = {
8234 .rpc_client = NFS_SERVER(lrp->args.inode)->client,
8235 .rpc_message = &msg,
8236 .callback_ops = &nfs4_layoutreturn_call_ops,
8237 .callback_data = lrp,
8238 };
8239 int status = 0;
8240
8241 nfs4_state_protect(NFS_SERVER(lrp->args.inode)->nfs_client,
8242 NFS_SP4_MACH_CRED_PNFS_CLEANUP,
8243 &task_setup_data.rpc_client, &msg);
8244
8245 dprintk("--> %s\n", __func__);
8246 if (!sync) {
8247 lrp->inode = nfs_igrab_and_active(lrp->args.inode);
8248 if (!lrp->inode) {
8249 nfs4_layoutreturn_release(lrp);
8250 return -EAGAIN;
8251 }
8252 task_setup_data.flags |= RPC_TASK_ASYNC;
8253 }
8254 nfs4_init_sequence(&lrp->args.seq_args, &lrp->res.seq_res, 1);
8255 task = rpc_run_task(&task_setup_data);
8256 if (IS_ERR(task))
8257 return PTR_ERR(task);
8258 if (sync)
8259 status = task->tk_status;
8260 trace_nfs4_layoutreturn(lrp->args.inode, &lrp->args.stateid, status);
8261 dprintk("<-- %s status=%d\n", __func__, status);
8262 rpc_put_task(task);
8263 return status;
8264 }
8265
8266 static int
8267 _nfs4_proc_getdeviceinfo(struct nfs_server *server,
8268 struct pnfs_device *pdev,
8269 struct rpc_cred *cred)
8270 {
8271 struct nfs4_getdeviceinfo_args args = {
8272 .pdev = pdev,
8273 .notify_types = NOTIFY_DEVICEID4_CHANGE |
8274 NOTIFY_DEVICEID4_DELETE,
8275 };
8276 struct nfs4_getdeviceinfo_res res = {
8277 .pdev = pdev,
8278 };
8279 struct rpc_message msg = {
8280 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETDEVICEINFO],
8281 .rpc_argp = &args,
8282 .rpc_resp = &res,
8283 .rpc_cred = cred,
8284 };
8285 int status;
8286
8287 dprintk("--> %s\n", __func__);
8288 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0);
8289 if (res.notification & ~args.notify_types)
8290 dprintk("%s: unsupported notification\n", __func__);
8291 if (res.notification != args.notify_types)
8292 pdev->nocache = 1;
8293
8294 dprintk("<-- %s status=%d\n", __func__, status);
8295
8296 return status;
8297 }
8298
8299 int nfs4_proc_getdeviceinfo(struct nfs_server *server,
8300 struct pnfs_device *pdev,
8301 struct rpc_cred *cred)
8302 {
8303 struct nfs4_exception exception = { };
8304 int err;
8305
8306 do {
8307 err = nfs4_handle_exception(server,
8308 _nfs4_proc_getdeviceinfo(server, pdev, cred),
8309 &exception);
8310 } while (exception.retry);
8311 return err;
8312 }
8313 EXPORT_SYMBOL_GPL(nfs4_proc_getdeviceinfo);
8314
8315 static void nfs4_layoutcommit_prepare(struct rpc_task *task, void *calldata)
8316 {
8317 struct nfs4_layoutcommit_data *data = calldata;
8318 struct nfs_server *server = NFS_SERVER(data->args.inode);
8319 struct nfs4_session *session = nfs4_get_session(server);
8320
8321 nfs41_setup_sequence(session,
8322 &data->args.seq_args,
8323 &data->res.seq_res,
8324 task);
8325 }
8326
8327 static void
8328 nfs4_layoutcommit_done(struct rpc_task *task, void *calldata)
8329 {
8330 struct nfs4_layoutcommit_data *data = calldata;
8331 struct nfs_server *server = NFS_SERVER(data->args.inode);
8332
8333 if (!nfs41_sequence_done(task, &data->res.seq_res))
8334 return;
8335
8336 switch (task->tk_status) { /* Just ignore these failures */
8337 case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */
8338 case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */
8339 case -NFS4ERR_BADLAYOUT: /* no layout */
8340 case -NFS4ERR_GRACE: /* loca_recalim always false */
8341 task->tk_status = 0;
8342 case 0:
8343 break;
8344 default:
8345 if (nfs4_async_handle_error(task, server, NULL, NULL) == -EAGAIN) {
8346 rpc_restart_call_prepare(task);
8347 return;
8348 }
8349 }
8350 }
8351
8352 static void nfs4_layoutcommit_release(void *calldata)
8353 {
8354 struct nfs4_layoutcommit_data *data = calldata;
8355
8356 pnfs_cleanup_layoutcommit(data);
8357 nfs_post_op_update_inode_force_wcc(data->args.inode,
8358 data->res.fattr);
8359 put_rpccred(data->cred);
8360 nfs_iput_and_deactive(data->inode);
8361 kfree(data);
8362 }
8363
8364 static const struct rpc_call_ops nfs4_layoutcommit_ops = {
8365 .rpc_call_prepare = nfs4_layoutcommit_prepare,
8366 .rpc_call_done = nfs4_layoutcommit_done,
8367 .rpc_release = nfs4_layoutcommit_release,
8368 };
8369
8370 int
8371 nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data, bool sync)
8372 {
8373 struct rpc_message msg = {
8374 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTCOMMIT],
8375 .rpc_argp = &data->args,
8376 .rpc_resp = &data->res,
8377 .rpc_cred = data->cred,
8378 };
8379 struct rpc_task_setup task_setup_data = {
8380 .task = &data->task,
8381 .rpc_client = NFS_CLIENT(data->args.inode),
8382 .rpc_message = &msg,
8383 .callback_ops = &nfs4_layoutcommit_ops,
8384 .callback_data = data,
8385 };
8386 struct rpc_task *task;
8387 int status = 0;
8388
8389 dprintk("NFS: initiating layoutcommit call. sync %d "
8390 "lbw: %llu inode %lu\n", sync,
8391 data->args.lastbytewritten,
8392 data->args.inode->i_ino);
8393
8394 if (!sync) {
8395 data->inode = nfs_igrab_and_active(data->args.inode);
8396 if (data->inode == NULL) {
8397 nfs4_layoutcommit_release(data);
8398 return -EAGAIN;
8399 }
8400 task_setup_data.flags = RPC_TASK_ASYNC;
8401 }
8402 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
8403 task = rpc_run_task(&task_setup_data);
8404 if (IS_ERR(task))
8405 return PTR_ERR(task);
8406 if (sync)
8407 status = task->tk_status;
8408 trace_nfs4_layoutcommit(data->args.inode, &data->args.stateid, status);
8409 dprintk("%s: status %d\n", __func__, status);
8410 rpc_put_task(task);
8411 return status;
8412 }
8413
8414 /**
8415 * Use the state managment nfs_client cl_rpcclient, which uses krb5i (if
8416 * possible) as per RFC3530bis and RFC5661 Security Considerations sections
8417 */
8418 static int
8419 _nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8420 struct nfs_fsinfo *info,
8421 struct nfs4_secinfo_flavors *flavors, bool use_integrity)
8422 {
8423 struct nfs41_secinfo_no_name_args args = {
8424 .style = SECINFO_STYLE_CURRENT_FH,
8425 };
8426 struct nfs4_secinfo_res res = {
8427 .flavors = flavors,
8428 };
8429 struct rpc_message msg = {
8430 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SECINFO_NO_NAME],
8431 .rpc_argp = &args,
8432 .rpc_resp = &res,
8433 };
8434 struct rpc_clnt *clnt = server->client;
8435 struct rpc_cred *cred = NULL;
8436 int status;
8437
8438 if (use_integrity) {
8439 clnt = server->nfs_client->cl_rpcclient;
8440 cred = nfs4_get_clid_cred(server->nfs_client);
8441 msg.rpc_cred = cred;
8442 }
8443
8444 dprintk("--> %s\n", __func__);
8445 status = nfs4_call_sync(clnt, server, &msg, &args.seq_args,
8446 &res.seq_res, 0);
8447 dprintk("<-- %s status=%d\n", __func__, status);
8448
8449 if (cred)
8450 put_rpccred(cred);
8451
8452 return status;
8453 }
8454
8455 static int
8456 nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
8457 struct nfs_fsinfo *info, struct nfs4_secinfo_flavors *flavors)
8458 {
8459 struct nfs4_exception exception = { };
8460 int err;
8461 do {
8462 /* first try using integrity protection */
8463 err = -NFS4ERR_WRONGSEC;
8464
8465 /* try to use integrity protection with machine cred */
8466 if (_nfs4_is_integrity_protected(server->nfs_client))
8467 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8468 flavors, true);
8469
8470 /*
8471 * if unable to use integrity protection, or SECINFO with
8472 * integrity protection returns NFS4ERR_WRONGSEC (which is
8473 * disallowed by spec, but exists in deployed servers) use
8474 * the current filesystem's rpc_client and the user cred.
8475 */
8476 if (err == -NFS4ERR_WRONGSEC)
8477 err = _nfs41_proc_secinfo_no_name(server, fhandle, info,
8478 flavors, false);
8479
8480 switch (err) {
8481 case 0:
8482 case -NFS4ERR_WRONGSEC:
8483 case -ENOTSUPP:
8484 goto out;
8485 default:
8486 err = nfs4_handle_exception(server, err, &exception);
8487 }
8488 } while (exception.retry);
8489 out:
8490 return err;
8491 }
8492
8493 static int
8494 nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
8495 struct nfs_fsinfo *info)
8496 {
8497 int err;
8498 struct page *page;
8499 rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
8500 struct nfs4_secinfo_flavors *flavors;
8501 struct nfs4_secinfo4 *secinfo;
8502 int i;
8503
8504 page = alloc_page(GFP_KERNEL);
8505 if (!page) {
8506 err = -ENOMEM;
8507 goto out;
8508 }
8509
8510 flavors = page_address(page);
8511 err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors);
8512
8513 /*
8514 * Fall back on "guess and check" method if
8515 * the server doesn't support SECINFO_NO_NAME
8516 */
8517 if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
8518 err = nfs4_find_root_sec(server, fhandle, info);
8519 goto out_freepage;
8520 }
8521 if (err)
8522 goto out_freepage;
8523
8524 for (i = 0; i < flavors->num_flavors; i++) {
8525 secinfo = &flavors->flavors[i];
8526
8527 switch (secinfo->flavor) {
8528 case RPC_AUTH_NULL:
8529 case RPC_AUTH_UNIX:
8530 case RPC_AUTH_GSS:
8531 flavor = rpcauth_get_pseudoflavor(secinfo->flavor,
8532 &secinfo->flavor_info);
8533 break;
8534 default:
8535 flavor = RPC_AUTH_MAXFLAVOR;
8536 break;
8537 }
8538
8539 if (!nfs_auth_info_match(&server->auth_info, flavor))
8540 flavor = RPC_AUTH_MAXFLAVOR;
8541
8542 if (flavor != RPC_AUTH_MAXFLAVOR) {
8543 err = nfs4_lookup_root_sec(server, fhandle,
8544 info, flavor);
8545 if (!err)
8546 break;
8547 }
8548 }
8549
8550 if (flavor == RPC_AUTH_MAXFLAVOR)
8551 err = -EPERM;
8552
8553 out_freepage:
8554 put_page(page);
8555 if (err == -EACCES)
8556 return -EPERM;
8557 out:
8558 return err;
8559 }
8560
8561 static int _nfs41_test_stateid(struct nfs_server *server,
8562 nfs4_stateid *stateid,
8563 struct rpc_cred *cred)
8564 {
8565 int status;
8566 struct nfs41_test_stateid_args args = {
8567 .stateid = stateid,
8568 };
8569 struct nfs41_test_stateid_res res;
8570 struct rpc_message msg = {
8571 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_TEST_STATEID],
8572 .rpc_argp = &args,
8573 .rpc_resp = &res,
8574 .rpc_cred = cred,
8575 };
8576 struct rpc_clnt *rpc_client = server->client;
8577
8578 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8579 &rpc_client, &msg);
8580
8581 dprintk("NFS call test_stateid %p\n", stateid);
8582 nfs4_init_sequence(&args.seq_args, &res.seq_res, 0);
8583 nfs4_set_sequence_privileged(&args.seq_args);
8584 status = nfs4_call_sync_sequence(rpc_client, server, &msg,
8585 &args.seq_args, &res.seq_res);
8586 if (status != NFS_OK) {
8587 dprintk("NFS reply test_stateid: failed, %d\n", status);
8588 return status;
8589 }
8590 dprintk("NFS reply test_stateid: succeeded, %d\n", -res.status);
8591 return -res.status;
8592 }
8593
8594 /**
8595 * nfs41_test_stateid - perform a TEST_STATEID operation
8596 *
8597 * @server: server / transport on which to perform the operation
8598 * @stateid: state ID to test
8599 * @cred: credential
8600 *
8601 * Returns NFS_OK if the server recognizes that "stateid" is valid.
8602 * Otherwise a negative NFS4ERR value is returned if the operation
8603 * failed or the state ID is not currently valid.
8604 */
8605 static int nfs41_test_stateid(struct nfs_server *server,
8606 nfs4_stateid *stateid,
8607 struct rpc_cred *cred)
8608 {
8609 struct nfs4_exception exception = { };
8610 int err;
8611 do {
8612 err = _nfs41_test_stateid(server, stateid, cred);
8613 if (err != -NFS4ERR_DELAY)
8614 break;
8615 nfs4_handle_exception(server, err, &exception);
8616 } while (exception.retry);
8617 return err;
8618 }
8619
8620 struct nfs_free_stateid_data {
8621 struct nfs_server *server;
8622 struct nfs41_free_stateid_args args;
8623 struct nfs41_free_stateid_res res;
8624 };
8625
8626 static void nfs41_free_stateid_prepare(struct rpc_task *task, void *calldata)
8627 {
8628 struct nfs_free_stateid_data *data = calldata;
8629 nfs41_setup_sequence(nfs4_get_session(data->server),
8630 &data->args.seq_args,
8631 &data->res.seq_res,
8632 task);
8633 }
8634
8635 static void nfs41_free_stateid_done(struct rpc_task *task, void *calldata)
8636 {
8637 struct nfs_free_stateid_data *data = calldata;
8638
8639 nfs41_sequence_done(task, &data->res.seq_res);
8640
8641 switch (task->tk_status) {
8642 case -NFS4ERR_DELAY:
8643 if (nfs4_async_handle_error(task, data->server, NULL, NULL) == -EAGAIN)
8644 rpc_restart_call_prepare(task);
8645 }
8646 }
8647
8648 static void nfs41_free_stateid_release(void *calldata)
8649 {
8650 kfree(calldata);
8651 }
8652
8653 static const struct rpc_call_ops nfs41_free_stateid_ops = {
8654 .rpc_call_prepare = nfs41_free_stateid_prepare,
8655 .rpc_call_done = nfs41_free_stateid_done,
8656 .rpc_release = nfs41_free_stateid_release,
8657 };
8658
8659 static struct rpc_task *_nfs41_free_stateid(struct nfs_server *server,
8660 nfs4_stateid *stateid,
8661 struct rpc_cred *cred,
8662 bool privileged)
8663 {
8664 struct rpc_message msg = {
8665 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_FREE_STATEID],
8666 .rpc_cred = cred,
8667 };
8668 struct rpc_task_setup task_setup = {
8669 .rpc_client = server->client,
8670 .rpc_message = &msg,
8671 .callback_ops = &nfs41_free_stateid_ops,
8672 .flags = RPC_TASK_ASYNC,
8673 };
8674 struct nfs_free_stateid_data *data;
8675
8676 nfs4_state_protect(server->nfs_client, NFS_SP4_MACH_CRED_STATEID,
8677 &task_setup.rpc_client, &msg);
8678
8679 dprintk("NFS call free_stateid %p\n", stateid);
8680 data = kmalloc(sizeof(*data), GFP_NOFS);
8681 if (!data)
8682 return ERR_PTR(-ENOMEM);
8683 data->server = server;
8684 nfs4_stateid_copy(&data->args.stateid, stateid);
8685
8686 task_setup.callback_data = data;
8687
8688 msg.rpc_argp = &data->args;
8689 msg.rpc_resp = &data->res;
8690 nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
8691 if (privileged)
8692 nfs4_set_sequence_privileged(&data->args.seq_args);
8693
8694 return rpc_run_task(&task_setup);
8695 }
8696
8697 /**
8698 * nfs41_free_stateid - perform a FREE_STATEID operation
8699 *
8700 * @server: server / transport on which to perform the operation
8701 * @stateid: state ID to release
8702 * @cred: credential
8703 *
8704 * Returns NFS_OK if the server freed "stateid". Otherwise a
8705 * negative NFS4ERR value is returned.
8706 */
8707 static int nfs41_free_stateid(struct nfs_server *server,
8708 nfs4_stateid *stateid,
8709 struct rpc_cred *cred)
8710 {
8711 struct rpc_task *task;
8712 int ret;
8713
8714 task = _nfs41_free_stateid(server, stateid, cred, true);
8715 if (IS_ERR(task))
8716 return PTR_ERR(task);
8717 ret = rpc_wait_for_completion_task(task);
8718 if (!ret)
8719 ret = task->tk_status;
8720 rpc_put_task(task);
8721 return ret;
8722 }
8723
8724 static void
8725 nfs41_free_lock_state(struct nfs_server *server, struct nfs4_lock_state *lsp)
8726 {
8727 struct rpc_task *task;
8728 struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
8729
8730 task = _nfs41_free_stateid(server, &lsp->ls_stateid, cred, false);
8731 nfs4_free_lock_state(server, lsp);
8732 if (IS_ERR(task))
8733 return;
8734 rpc_put_task(task);
8735 }
8736
8737 static bool nfs41_match_stateid(const nfs4_stateid *s1,
8738 const nfs4_stateid *s2)
8739 {
8740 if (s1->type != s2->type)
8741 return false;
8742
8743 if (memcmp(s1->other, s2->other, sizeof(s1->other)) != 0)
8744 return false;
8745
8746 if (s1->seqid == s2->seqid)
8747 return true;
8748 if (s1->seqid == 0 || s2->seqid == 0)
8749 return true;
8750
8751 return false;
8752 }
8753
8754 #endif /* CONFIG_NFS_V4_1 */
8755
8756 static bool nfs4_match_stateid(const nfs4_stateid *s1,
8757 const nfs4_stateid *s2)
8758 {
8759 return nfs4_stateid_match(s1, s2);
8760 }
8761
8762
8763 static const struct nfs4_state_recovery_ops nfs40_reboot_recovery_ops = {
8764 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8765 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8766 .recover_open = nfs4_open_reclaim,
8767 .recover_lock = nfs4_lock_reclaim,
8768 .establish_clid = nfs4_init_clientid,
8769 .detect_trunking = nfs40_discover_server_trunking,
8770 };
8771
8772 #if defined(CONFIG_NFS_V4_1)
8773 static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
8774 .owner_flag_bit = NFS_OWNER_RECLAIM_REBOOT,
8775 .state_flag_bit = NFS_STATE_RECLAIM_REBOOT,
8776 .recover_open = nfs4_open_reclaim,
8777 .recover_lock = nfs4_lock_reclaim,
8778 .establish_clid = nfs41_init_clientid,
8779 .reclaim_complete = nfs41_proc_reclaim_complete,
8780 .detect_trunking = nfs41_discover_server_trunking,
8781 };
8782 #endif /* CONFIG_NFS_V4_1 */
8783
8784 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
8785 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8786 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8787 .recover_open = nfs40_open_expired,
8788 .recover_lock = nfs4_lock_expired,
8789 .establish_clid = nfs4_init_clientid,
8790 };
8791
8792 #if defined(CONFIG_NFS_V4_1)
8793 static const struct nfs4_state_recovery_ops nfs41_nograce_recovery_ops = {
8794 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8795 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8796 .recover_open = nfs41_open_expired,
8797 .recover_lock = nfs41_lock_expired,
8798 .establish_clid = nfs41_init_clientid,
8799 };
8800 #endif /* CONFIG_NFS_V4_1 */
8801
8802 static const struct nfs4_state_maintenance_ops nfs40_state_renewal_ops = {
8803 .sched_state_renewal = nfs4_proc_async_renew,
8804 .get_state_renewal_cred_locked = nfs4_get_renew_cred_locked,
8805 .renew_lease = nfs4_proc_renew,
8806 };
8807
8808 #if defined(CONFIG_NFS_V4_1)
8809 static const struct nfs4_state_maintenance_ops nfs41_state_renewal_ops = {
8810 .sched_state_renewal = nfs41_proc_async_sequence,
8811 .get_state_renewal_cred_locked = nfs4_get_machine_cred_locked,
8812 .renew_lease = nfs4_proc_sequence,
8813 };
8814 #endif
8815
8816 static const struct nfs4_mig_recovery_ops nfs40_mig_recovery_ops = {
8817 .get_locations = _nfs40_proc_get_locations,
8818 .fsid_present = _nfs40_proc_fsid_present,
8819 };
8820
8821 #if defined(CONFIG_NFS_V4_1)
8822 static const struct nfs4_mig_recovery_ops nfs41_mig_recovery_ops = {
8823 .get_locations = _nfs41_proc_get_locations,
8824 .fsid_present = _nfs41_proc_fsid_present,
8825 };
8826 #endif /* CONFIG_NFS_V4_1 */
8827
8828 static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
8829 .minor_version = 0,
8830 .init_caps = NFS_CAP_READDIRPLUS
8831 | NFS_CAP_ATOMIC_OPEN
8832 | NFS_CAP_POSIX_LOCK,
8833 .init_client = nfs40_init_client,
8834 .shutdown_client = nfs40_shutdown_client,
8835 .match_stateid = nfs4_match_stateid,
8836 .find_root_sec = nfs4_find_root_sec,
8837 .free_lock_state = nfs4_release_lockowner,
8838 .alloc_seqid = nfs_alloc_seqid,
8839 .call_sync_ops = &nfs40_call_sync_ops,
8840 .reboot_recovery_ops = &nfs40_reboot_recovery_ops,
8841 .nograce_recovery_ops = &nfs40_nograce_recovery_ops,
8842 .state_renewal_ops = &nfs40_state_renewal_ops,
8843 .mig_recovery_ops = &nfs40_mig_recovery_ops,
8844 };
8845
8846 #if defined(CONFIG_NFS_V4_1)
8847 static struct nfs_seqid *
8848 nfs_alloc_no_seqid(struct nfs_seqid_counter *arg1, gfp_t arg2)
8849 {
8850 return NULL;
8851 }
8852
8853 static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
8854 .minor_version = 1,
8855 .init_caps = NFS_CAP_READDIRPLUS
8856 | NFS_CAP_ATOMIC_OPEN
8857 | NFS_CAP_POSIX_LOCK
8858 | NFS_CAP_STATEID_NFSV41
8859 | NFS_CAP_ATOMIC_OPEN_V1,
8860 .init_client = nfs41_init_client,
8861 .shutdown_client = nfs41_shutdown_client,
8862 .match_stateid = nfs41_match_stateid,
8863 .find_root_sec = nfs41_find_root_sec,
8864 .free_lock_state = nfs41_free_lock_state,
8865 .alloc_seqid = nfs_alloc_no_seqid,
8866 .call_sync_ops = &nfs41_call_sync_ops,
8867 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8868 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8869 .state_renewal_ops = &nfs41_state_renewal_ops,
8870 .mig_recovery_ops = &nfs41_mig_recovery_ops,
8871 };
8872 #endif
8873
8874 #if defined(CONFIG_NFS_V4_2)
8875 static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
8876 .minor_version = 2,
8877 .init_caps = NFS_CAP_READDIRPLUS
8878 | NFS_CAP_ATOMIC_OPEN
8879 | NFS_CAP_POSIX_LOCK
8880 | NFS_CAP_STATEID_NFSV41
8881 | NFS_CAP_ATOMIC_OPEN_V1
8882 | NFS_CAP_ALLOCATE
8883 | NFS_CAP_COPY
8884 | NFS_CAP_DEALLOCATE
8885 | NFS_CAP_SEEK
8886 | NFS_CAP_LAYOUTSTATS
8887 | NFS_CAP_CLONE,
8888 .init_client = nfs41_init_client,
8889 .shutdown_client = nfs41_shutdown_client,
8890 .match_stateid = nfs41_match_stateid,
8891 .find_root_sec = nfs41_find_root_sec,
8892 .free_lock_state = nfs41_free_lock_state,
8893 .call_sync_ops = &nfs41_call_sync_ops,
8894 .alloc_seqid = nfs_alloc_no_seqid,
8895 .reboot_recovery_ops = &nfs41_reboot_recovery_ops,
8896 .nograce_recovery_ops = &nfs41_nograce_recovery_ops,
8897 .state_renewal_ops = &nfs41_state_renewal_ops,
8898 .mig_recovery_ops = &nfs41_mig_recovery_ops,
8899 };
8900 #endif
8901
8902 const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
8903 [0] = &nfs_v4_0_minor_ops,
8904 #if defined(CONFIG_NFS_V4_1)
8905 [1] = &nfs_v4_1_minor_ops,
8906 #endif
8907 #if defined(CONFIG_NFS_V4_2)
8908 [2] = &nfs_v4_2_minor_ops,
8909 #endif
8910 };
8911
8912 static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
8913 {
8914 ssize_t error, error2;
8915
8916 error = generic_listxattr(dentry, list, size);
8917 if (error < 0)
8918 return error;
8919 if (list) {
8920 list += error;
8921 size -= error;
8922 }
8923
8924 error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size);
8925 if (error2 < 0)
8926 return error2;
8927 return error + error2;
8928 }
8929
8930 static const struct inode_operations nfs4_dir_inode_operations = {
8931 .create = nfs_create,
8932 .lookup = nfs_lookup,
8933 .atomic_open = nfs_atomic_open,
8934 .link = nfs_link,
8935 .unlink = nfs_unlink,
8936 .symlink = nfs_symlink,
8937 .mkdir = nfs_mkdir,
8938 .rmdir = nfs_rmdir,
8939 .mknod = nfs_mknod,
8940 .rename = nfs_rename,
8941 .permission = nfs_permission,
8942 .getattr = nfs_getattr,
8943 .setattr = nfs_setattr,
8944 .getxattr = generic_getxattr,
8945 .setxattr = generic_setxattr,
8946 .listxattr = nfs4_listxattr,
8947 .removexattr = generic_removexattr,
8948 };
8949
8950 static const struct inode_operations nfs4_file_inode_operations = {
8951 .permission = nfs_permission,
8952 .getattr = nfs_getattr,
8953 .setattr = nfs_setattr,
8954 .getxattr = generic_getxattr,
8955 .setxattr = generic_setxattr,
8956 .listxattr = nfs4_listxattr,
8957 .removexattr = generic_removexattr,
8958 };
8959
8960 const struct nfs_rpc_ops nfs_v4_clientops = {
8961 .version = 4, /* protocol version */
8962 .dentry_ops = &nfs4_dentry_operations,
8963 .dir_inode_ops = &nfs4_dir_inode_operations,
8964 .file_inode_ops = &nfs4_file_inode_operations,
8965 .file_ops = &nfs4_file_operations,
8966 .getroot = nfs4_proc_get_root,
8967 .submount = nfs4_submount,
8968 .try_mount = nfs4_try_mount,
8969 .getattr = nfs4_proc_getattr,
8970 .setattr = nfs4_proc_setattr,
8971 .lookup = nfs4_proc_lookup,
8972 .access = nfs4_proc_access,
8973 .readlink = nfs4_proc_readlink,
8974 .create = nfs4_proc_create,
8975 .remove = nfs4_proc_remove,
8976 .unlink_setup = nfs4_proc_unlink_setup,
8977 .unlink_rpc_prepare = nfs4_proc_unlink_rpc_prepare,
8978 .unlink_done = nfs4_proc_unlink_done,
8979 .rename_setup = nfs4_proc_rename_setup,
8980 .rename_rpc_prepare = nfs4_proc_rename_rpc_prepare,
8981 .rename_done = nfs4_proc_rename_done,
8982 .link = nfs4_proc_link,
8983 .symlink = nfs4_proc_symlink,
8984 .mkdir = nfs4_proc_mkdir,
8985 .rmdir = nfs4_proc_remove,
8986 .readdir = nfs4_proc_readdir,
8987 .mknod = nfs4_proc_mknod,
8988 .statfs = nfs4_proc_statfs,
8989 .fsinfo = nfs4_proc_fsinfo,
8990 .pathconf = nfs4_proc_pathconf,
8991 .set_capabilities = nfs4_server_capabilities,
8992 .decode_dirent = nfs4_decode_dirent,
8993 .pgio_rpc_prepare = nfs4_proc_pgio_rpc_prepare,
8994 .read_setup = nfs4_proc_read_setup,
8995 .read_done = nfs4_read_done,
8996 .write_setup = nfs4_proc_write_setup,
8997 .write_done = nfs4_write_done,
8998 .commit_setup = nfs4_proc_commit_setup,
8999 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
9000 .commit_done = nfs4_commit_done,
9001 .lock = nfs4_proc_lock,
9002 .clear_acl_cache = nfs4_zap_acl_attr,
9003 .close_context = nfs4_close_context,
9004 .open_context = nfs4_atomic_open,
9005 .have_delegation = nfs4_have_delegation,
9006 .return_delegation = nfs4_inode_return_delegation,
9007 .alloc_client = nfs4_alloc_client,
9008 .init_client = nfs4_init_client,
9009 .free_client = nfs4_free_client,
9010 .create_server = nfs4_create_server,
9011 .clone_server = nfs_clone_server,
9012 };
9013
9014 static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
9015 .name = XATTR_NAME_NFSV4_ACL,
9016 .list = nfs4_xattr_list_nfs4_acl,
9017 .get = nfs4_xattr_get_nfs4_acl,
9018 .set = nfs4_xattr_set_nfs4_acl,
9019 };
9020
9021 const struct xattr_handler *nfs4_xattr_handlers[] = {
9022 &nfs4_xattr_nfs4_acl_handler,
9023 #ifdef CONFIG_NFS_V4_SECURITY_LABEL
9024 &nfs4_xattr_nfs4_label_handler,
9025 #endif
9026 NULL
9027 };
9028
9029 /*
9030 * Local variables:
9031 * c-basic-offset: 8
9032 * End:
9033 */
This page took 0.231908 seconds and 5 git commands to generate.