nfsd4: lockt, release_lockowner should renew clients
[deliverable/linux.git] / fs / nfsd / nfs4state.c
1 /*
2 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
4 *
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/clnt.h>
44 #include "xdr4.h"
45 #include "vfs.h"
46 #include "current_stateid.h"
47
48 #include "netns.h"
49
50 #define NFSDDBG_FACILITY NFSDDBG_PROC
51
52 #define all_ones {{~0,~0},~0}
53 static const stateid_t one_stateid = {
54 .si_generation = ~0,
55 .si_opaque = all_ones,
56 };
57 static const stateid_t zero_stateid = {
58 /* all fields zero */
59 };
60 static const stateid_t currentstateid = {
61 .si_generation = 1,
62 };
63
64 static u64 current_sessionid = 1;
65
66 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
67 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
68 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
69
70 /* forward declarations */
71 static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner);
72
73 /* Locking: */
74
75 /* Currently used for almost all code touching nfsv4 state: */
76 static DEFINE_MUTEX(client_mutex);
77
78 /*
79 * Currently used for the del_recall_lru and file hash table. In an
80 * effort to decrease the scope of the client_mutex, this spinlock may
81 * eventually cover more:
82 */
83 static DEFINE_SPINLOCK(recall_lock);
84
85 static struct kmem_cache *openowner_slab = NULL;
86 static struct kmem_cache *lockowner_slab = NULL;
87 static struct kmem_cache *file_slab = NULL;
88 static struct kmem_cache *stateid_slab = NULL;
89 static struct kmem_cache *deleg_slab = NULL;
90
91 void
92 nfs4_lock_state(void)
93 {
94 mutex_lock(&client_mutex);
95 }
96
97 static void free_session(struct kref *);
98
99 /* Must be called under the client_lock */
100 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
101 {
102 kref_put(&ses->se_ref, free_session);
103 }
104
105 static void nfsd4_get_session(struct nfsd4_session *ses)
106 {
107 kref_get(&ses->se_ref);
108 }
109
110 void
111 nfs4_unlock_state(void)
112 {
113 mutex_unlock(&client_mutex);
114 }
115
116 static inline u32
117 opaque_hashval(const void *ptr, int nbytes)
118 {
119 unsigned char *cptr = (unsigned char *) ptr;
120
121 u32 x = 0;
122 while (nbytes--) {
123 x *= 37;
124 x += *cptr++;
125 }
126 return x;
127 }
128
129 static struct list_head del_recall_lru;
130
131 static void nfsd4_free_file(struct nfs4_file *f)
132 {
133 kmem_cache_free(file_slab, f);
134 }
135
136 static inline void
137 put_nfs4_file(struct nfs4_file *fi)
138 {
139 if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) {
140 list_del(&fi->fi_hash);
141 spin_unlock(&recall_lock);
142 iput(fi->fi_inode);
143 nfsd4_free_file(fi);
144 }
145 }
146
147 static inline void
148 get_nfs4_file(struct nfs4_file *fi)
149 {
150 atomic_inc(&fi->fi_ref);
151 }
152
153 static int num_delegations;
154 unsigned int max_delegations;
155
156 /*
157 * Open owner state (share locks)
158 */
159
160 /* hash tables for lock and open owners */
161 #define OWNER_HASH_BITS 8
162 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
163 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
164
165 static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
166 {
167 unsigned int ret;
168
169 ret = opaque_hashval(ownername->data, ownername->len);
170 ret += clientid;
171 return ret & OWNER_HASH_MASK;
172 }
173
174 /* hash table for nfs4_file */
175 #define FILE_HASH_BITS 8
176 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
177
178 static unsigned int file_hashval(struct inode *ino)
179 {
180 /* XXX: why are we hashing on inode pointer, anyway? */
181 return hash_ptr(ino, FILE_HASH_BITS);
182 }
183
184 static struct list_head file_hashtbl[FILE_HASH_SIZE];
185
186 static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag)
187 {
188 WARN_ON_ONCE(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR]));
189 atomic_inc(&fp->fi_access[oflag]);
190 }
191
192 static void nfs4_file_get_access(struct nfs4_file *fp, int oflag)
193 {
194 if (oflag == O_RDWR) {
195 __nfs4_file_get_access(fp, O_RDONLY);
196 __nfs4_file_get_access(fp, O_WRONLY);
197 } else
198 __nfs4_file_get_access(fp, oflag);
199 }
200
201 static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag)
202 {
203 if (fp->fi_fds[oflag]) {
204 fput(fp->fi_fds[oflag]);
205 fp->fi_fds[oflag] = NULL;
206 }
207 }
208
209 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
210 {
211 if (atomic_dec_and_test(&fp->fi_access[oflag])) {
212 nfs4_file_put_fd(fp, oflag);
213 /*
214 * It's also safe to get rid of the RDWR open *if*
215 * we no longer have need of the other kind of access
216 * or if we already have the other kind of open:
217 */
218 if (fp->fi_fds[1-oflag]
219 || atomic_read(&fp->fi_access[1 - oflag]) == 0)
220 nfs4_file_put_fd(fp, O_RDWR);
221 }
222 }
223
224 static void nfs4_file_put_access(struct nfs4_file *fp, int oflag)
225 {
226 if (oflag == O_RDWR) {
227 __nfs4_file_put_access(fp, O_RDONLY);
228 __nfs4_file_put_access(fp, O_WRONLY);
229 } else
230 __nfs4_file_put_access(fp, oflag);
231 }
232
233 static inline int get_new_stid(struct nfs4_stid *stid)
234 {
235 static int min_stateid = 0;
236 struct idr *stateids = &stid->sc_client->cl_stateids;
237 int new_stid;
238 int error;
239
240 error = idr_get_new_above(stateids, stid, min_stateid, &new_stid);
241 /*
242 * Note: the necessary preallocation was done in
243 * nfs4_alloc_stateid(). The idr code caps the number of
244 * preallocations that can exist at a time, but the state lock
245 * prevents anyone from using ours before we get here:
246 */
247 WARN_ON_ONCE(error);
248 /*
249 * It shouldn't be a problem to reuse an opaque stateid value.
250 * I don't think it is for 4.1. But with 4.0 I worry that, for
251 * example, a stray write retransmission could be accepted by
252 * the server when it should have been rejected. Therefore,
253 * adopt a trick from the sctp code to attempt to maximize the
254 * amount of time until an id is reused, by ensuring they always
255 * "increase" (mod INT_MAX):
256 */
257
258 min_stateid = new_stid+1;
259 if (min_stateid == INT_MAX)
260 min_stateid = 0;
261 return new_stid;
262 }
263
264 static void init_stid(struct nfs4_stid *stid, struct nfs4_client *cl, unsigned char type)
265 {
266 stateid_t *s = &stid->sc_stateid;
267 int new_id;
268
269 stid->sc_type = type;
270 stid->sc_client = cl;
271 s->si_opaque.so_clid = cl->cl_clientid;
272 new_id = get_new_stid(stid);
273 s->si_opaque.so_id = (u32)new_id;
274 /* Will be incremented before return to client: */
275 s->si_generation = 0;
276 }
277
278 static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *slab)
279 {
280 struct idr *stateids = &cl->cl_stateids;
281
282 if (!idr_pre_get(stateids, GFP_KERNEL))
283 return NULL;
284 /*
285 * Note: if we fail here (or any time between now and the time
286 * we actually get the new idr), we won't need to undo the idr
287 * preallocation, since the idr code caps the number of
288 * preallocated entries.
289 */
290 return kmem_cache_alloc(slab, GFP_KERNEL);
291 }
292
293 static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
294 {
295 return openlockstateid(nfs4_alloc_stid(clp, stateid_slab));
296 }
297
298 static struct nfs4_delegation *
299 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh, u32 type)
300 {
301 struct nfs4_delegation *dp;
302 struct nfs4_file *fp = stp->st_file;
303
304 dprintk("NFSD alloc_init_deleg\n");
305 /*
306 * Major work on the lease subsystem (for example, to support
307 * calbacks on stat) will be required before we can support
308 * write delegations properly.
309 */
310 if (type != NFS4_OPEN_DELEGATE_READ)
311 return NULL;
312 if (fp->fi_had_conflict)
313 return NULL;
314 if (num_delegations > max_delegations)
315 return NULL;
316 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
317 if (dp == NULL)
318 return dp;
319 init_stid(&dp->dl_stid, clp, NFS4_DELEG_STID);
320 /*
321 * delegation seqid's are never incremented. The 4.1 special
322 * meaning of seqid 0 isn't meaningful, really, but let's avoid
323 * 0 anyway just for consistency and use 1:
324 */
325 dp->dl_stid.sc_stateid.si_generation = 1;
326 num_delegations++;
327 INIT_LIST_HEAD(&dp->dl_perfile);
328 INIT_LIST_HEAD(&dp->dl_perclnt);
329 INIT_LIST_HEAD(&dp->dl_recall_lru);
330 get_nfs4_file(fp);
331 dp->dl_file = fp;
332 dp->dl_type = type;
333 fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
334 dp->dl_time = 0;
335 atomic_set(&dp->dl_count, 1);
336 nfsd4_init_callback(&dp->dl_recall);
337 return dp;
338 }
339
340 void
341 nfs4_put_delegation(struct nfs4_delegation *dp)
342 {
343 if (atomic_dec_and_test(&dp->dl_count)) {
344 dprintk("NFSD: freeing dp %p\n",dp);
345 put_nfs4_file(dp->dl_file);
346 kmem_cache_free(deleg_slab, dp);
347 num_delegations--;
348 }
349 }
350
351 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
352 {
353 if (atomic_dec_and_test(&fp->fi_delegees)) {
354 vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
355 fp->fi_lease = NULL;
356 fput(fp->fi_deleg_file);
357 fp->fi_deleg_file = NULL;
358 }
359 }
360
361 static void unhash_stid(struct nfs4_stid *s)
362 {
363 struct idr *stateids = &s->sc_client->cl_stateids;
364
365 idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
366 }
367
368 /* Called under the state lock. */
369 static void
370 unhash_delegation(struct nfs4_delegation *dp)
371 {
372 unhash_stid(&dp->dl_stid);
373 list_del_init(&dp->dl_perclnt);
374 spin_lock(&recall_lock);
375 list_del_init(&dp->dl_perfile);
376 list_del_init(&dp->dl_recall_lru);
377 spin_unlock(&recall_lock);
378 nfs4_put_deleg_lease(dp->dl_file);
379 nfs4_put_delegation(dp);
380 }
381
382 /*
383 * SETCLIENTID state
384 */
385
386 static unsigned int clientid_hashval(u32 id)
387 {
388 return id & CLIENT_HASH_MASK;
389 }
390
391 static unsigned int clientstr_hashval(const char *name)
392 {
393 return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
394 }
395
396 /*
397 * We store the NONE, READ, WRITE, and BOTH bits separately in the
398 * st_{access,deny}_bmap field of the stateid, in order to track not
399 * only what share bits are currently in force, but also what
400 * combinations of share bits previous opens have used. This allows us
401 * to enforce the recommendation of rfc 3530 14.2.19 that the server
402 * return an error if the client attempt to downgrade to a combination
403 * of share bits not explicable by closing some of its previous opens.
404 *
405 * XXX: This enforcement is actually incomplete, since we don't keep
406 * track of access/deny bit combinations; so, e.g., we allow:
407 *
408 * OPEN allow read, deny write
409 * OPEN allow both, deny none
410 * DOWNGRADE allow read, deny none
411 *
412 * which we should reject.
413 */
414 static unsigned int
415 bmap_to_share_mode(unsigned long bmap) {
416 int i;
417 unsigned int access = 0;
418
419 for (i = 1; i < 4; i++) {
420 if (test_bit(i, &bmap))
421 access |= i;
422 }
423 return access;
424 }
425
426 static bool
427 test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
428 unsigned int access, deny;
429
430 access = bmap_to_share_mode(stp->st_access_bmap);
431 deny = bmap_to_share_mode(stp->st_deny_bmap);
432 if ((access & open->op_share_deny) || (deny & open->op_share_access))
433 return false;
434 return true;
435 }
436
437 /* set share access for a given stateid */
438 static inline void
439 set_access(u32 access, struct nfs4_ol_stateid *stp)
440 {
441 __set_bit(access, &stp->st_access_bmap);
442 }
443
444 /* clear share access for a given stateid */
445 static inline void
446 clear_access(u32 access, struct nfs4_ol_stateid *stp)
447 {
448 __clear_bit(access, &stp->st_access_bmap);
449 }
450
451 /* test whether a given stateid has access */
452 static inline bool
453 test_access(u32 access, struct nfs4_ol_stateid *stp)
454 {
455 return test_bit(access, &stp->st_access_bmap);
456 }
457
458 /* set share deny for a given stateid */
459 static inline void
460 set_deny(u32 access, struct nfs4_ol_stateid *stp)
461 {
462 __set_bit(access, &stp->st_deny_bmap);
463 }
464
465 /* clear share deny for a given stateid */
466 static inline void
467 clear_deny(u32 access, struct nfs4_ol_stateid *stp)
468 {
469 __clear_bit(access, &stp->st_deny_bmap);
470 }
471
472 /* test whether a given stateid is denying specific access */
473 static inline bool
474 test_deny(u32 access, struct nfs4_ol_stateid *stp)
475 {
476 return test_bit(access, &stp->st_deny_bmap);
477 }
478
479 static int nfs4_access_to_omode(u32 access)
480 {
481 switch (access & NFS4_SHARE_ACCESS_BOTH) {
482 case NFS4_SHARE_ACCESS_READ:
483 return O_RDONLY;
484 case NFS4_SHARE_ACCESS_WRITE:
485 return O_WRONLY;
486 case NFS4_SHARE_ACCESS_BOTH:
487 return O_RDWR;
488 }
489 WARN_ON_ONCE(1);
490 return O_RDONLY;
491 }
492
493 /* release all access and file references for a given stateid */
494 static void
495 release_all_access(struct nfs4_ol_stateid *stp)
496 {
497 int i;
498
499 for (i = 1; i < 4; i++) {
500 if (test_access(i, stp))
501 nfs4_file_put_access(stp->st_file,
502 nfs4_access_to_omode(i));
503 clear_access(i, stp);
504 }
505 }
506
507 static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
508 {
509 list_del(&stp->st_perfile);
510 list_del(&stp->st_perstateowner);
511 }
512
513 static void close_generic_stateid(struct nfs4_ol_stateid *stp)
514 {
515 release_all_access(stp);
516 put_nfs4_file(stp->st_file);
517 stp->st_file = NULL;
518 }
519
520 static void free_generic_stateid(struct nfs4_ol_stateid *stp)
521 {
522 kmem_cache_free(stateid_slab, stp);
523 }
524
525 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
526 {
527 struct file *file;
528
529 unhash_generic_stateid(stp);
530 unhash_stid(&stp->st_stid);
531 file = find_any_file(stp->st_file);
532 if (file)
533 locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner));
534 close_generic_stateid(stp);
535 free_generic_stateid(stp);
536 }
537
538 static void unhash_lockowner(struct nfs4_lockowner *lo)
539 {
540 struct nfs4_ol_stateid *stp;
541
542 list_del(&lo->lo_owner.so_strhash);
543 list_del(&lo->lo_perstateid);
544 list_del(&lo->lo_owner_ino_hash);
545 while (!list_empty(&lo->lo_owner.so_stateids)) {
546 stp = list_first_entry(&lo->lo_owner.so_stateids,
547 struct nfs4_ol_stateid, st_perstateowner);
548 release_lock_stateid(stp);
549 }
550 }
551
552 static void release_lockowner(struct nfs4_lockowner *lo)
553 {
554 unhash_lockowner(lo);
555 nfs4_free_lockowner(lo);
556 }
557
558 static void
559 release_stateid_lockowners(struct nfs4_ol_stateid *open_stp)
560 {
561 struct nfs4_lockowner *lo;
562
563 while (!list_empty(&open_stp->st_lockowners)) {
564 lo = list_entry(open_stp->st_lockowners.next,
565 struct nfs4_lockowner, lo_perstateid);
566 release_lockowner(lo);
567 }
568 }
569
570 static void unhash_open_stateid(struct nfs4_ol_stateid *stp)
571 {
572 unhash_generic_stateid(stp);
573 release_stateid_lockowners(stp);
574 close_generic_stateid(stp);
575 }
576
577 static void release_open_stateid(struct nfs4_ol_stateid *stp)
578 {
579 unhash_open_stateid(stp);
580 unhash_stid(&stp->st_stid);
581 free_generic_stateid(stp);
582 }
583
584 static void unhash_openowner(struct nfs4_openowner *oo)
585 {
586 struct nfs4_ol_stateid *stp;
587
588 list_del(&oo->oo_owner.so_strhash);
589 list_del(&oo->oo_perclient);
590 while (!list_empty(&oo->oo_owner.so_stateids)) {
591 stp = list_first_entry(&oo->oo_owner.so_stateids,
592 struct nfs4_ol_stateid, st_perstateowner);
593 release_open_stateid(stp);
594 }
595 }
596
597 static void release_last_closed_stateid(struct nfs4_openowner *oo)
598 {
599 struct nfs4_ol_stateid *s = oo->oo_last_closed_stid;
600
601 if (s) {
602 unhash_stid(&s->st_stid);
603 free_generic_stateid(s);
604 oo->oo_last_closed_stid = NULL;
605 }
606 }
607
608 static void release_openowner(struct nfs4_openowner *oo)
609 {
610 unhash_openowner(oo);
611 list_del(&oo->oo_close_lru);
612 release_last_closed_stateid(oo);
613 nfs4_free_openowner(oo);
614 }
615
616 static inline int
617 hash_sessionid(struct nfs4_sessionid *sessionid)
618 {
619 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
620
621 return sid->sequence % SESSION_HASH_SIZE;
622 }
623
624 #ifdef NFSD_DEBUG
625 static inline void
626 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
627 {
628 u32 *ptr = (u32 *)(&sessionid->data[0]);
629 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
630 }
631 #else
632 static inline void
633 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
634 {
635 }
636 #endif
637
638
639 static void
640 gen_sessionid(struct nfsd4_session *ses)
641 {
642 struct nfs4_client *clp = ses->se_client;
643 struct nfsd4_sessionid *sid;
644
645 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
646 sid->clientid = clp->cl_clientid;
647 sid->sequence = current_sessionid++;
648 sid->reserved = 0;
649 }
650
651 /*
652 * The protocol defines ca_maxresponssize_cached to include the size of
653 * the rpc header, but all we need to cache is the data starting after
654 * the end of the initial SEQUENCE operation--the rest we regenerate
655 * each time. Therefore we can advertise a ca_maxresponssize_cached
656 * value that is the number of bytes in our cache plus a few additional
657 * bytes. In order to stay on the safe side, and not promise more than
658 * we can cache, those additional bytes must be the minimum possible: 24
659 * bytes of rpc header (xid through accept state, with AUTH_NULL
660 * verifier), 12 for the compound header (with zero-length tag), and 44
661 * for the SEQUENCE op response:
662 */
663 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
664
665 static void
666 free_session_slots(struct nfsd4_session *ses)
667 {
668 int i;
669
670 for (i = 0; i < ses->se_fchannel.maxreqs; i++)
671 kfree(ses->se_slots[i]);
672 }
673
674 /*
675 * We don't actually need to cache the rpc and session headers, so we
676 * can allocate a little less for each slot:
677 */
678 static inline int slot_bytes(struct nfsd4_channel_attrs *ca)
679 {
680 return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
681 }
682
683 static int nfsd4_sanitize_slot_size(u32 size)
684 {
685 size -= NFSD_MIN_HDR_SEQ_SZ; /* We don't cache the rpc header */
686 size = min_t(u32, size, NFSD_SLOT_CACHE_SIZE);
687
688 return size;
689 }
690
691 /*
692 * XXX: If we run out of reserved DRC memory we could (up to a point)
693 * re-negotiate active sessions and reduce their slot usage to make
694 * room for new connections. For now we just fail the create session.
695 */
696 static int nfsd4_get_drc_mem(int slotsize, u32 num)
697 {
698 int avail;
699
700 num = min_t(u32, num, NFSD_MAX_SLOTS_PER_SESSION);
701
702 spin_lock(&nfsd_drc_lock);
703 avail = min_t(int, NFSD_MAX_MEM_PER_SESSION,
704 nfsd_drc_max_mem - nfsd_drc_mem_used);
705 num = min_t(int, num, avail / slotsize);
706 nfsd_drc_mem_used += num * slotsize;
707 spin_unlock(&nfsd_drc_lock);
708
709 return num;
710 }
711
712 static void nfsd4_put_drc_mem(int slotsize, int num)
713 {
714 spin_lock(&nfsd_drc_lock);
715 nfsd_drc_mem_used -= slotsize * num;
716 spin_unlock(&nfsd_drc_lock);
717 }
718
719 static struct nfsd4_session *__alloc_session(int slotsize, int numslots)
720 {
721 struct nfsd4_session *new;
722 int mem, i;
723
724 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
725 + sizeof(struct nfsd4_session) > PAGE_SIZE);
726 mem = numslots * sizeof(struct nfsd4_slot *);
727
728 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
729 if (!new)
730 return NULL;
731 /* allocate each struct nfsd4_slot and data cache in one piece */
732 for (i = 0; i < numslots; i++) {
733 mem = sizeof(struct nfsd4_slot) + slotsize;
734 new->se_slots[i] = kzalloc(mem, GFP_KERNEL);
735 if (!new->se_slots[i])
736 goto out_free;
737 }
738 return new;
739 out_free:
740 while (i--)
741 kfree(new->se_slots[i]);
742 kfree(new);
743 return NULL;
744 }
745
746 static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, struct nfsd4_channel_attrs *req, int numslots, int slotsize)
747 {
748 u32 maxrpc = nfsd_serv->sv_max_mesg;
749
750 new->maxreqs = numslots;
751 new->maxresp_cached = min_t(u32, req->maxresp_cached,
752 slotsize + NFSD_MIN_HDR_SEQ_SZ);
753 new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc);
754 new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc);
755 new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND);
756 }
757
758 static void free_conn(struct nfsd4_conn *c)
759 {
760 svc_xprt_put(c->cn_xprt);
761 kfree(c);
762 }
763
764 static void nfsd4_conn_lost(struct svc_xpt_user *u)
765 {
766 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
767 struct nfs4_client *clp = c->cn_session->se_client;
768
769 spin_lock(&clp->cl_lock);
770 if (!list_empty(&c->cn_persession)) {
771 list_del(&c->cn_persession);
772 free_conn(c);
773 }
774 spin_unlock(&clp->cl_lock);
775 nfsd4_probe_callback(clp);
776 }
777
778 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
779 {
780 struct nfsd4_conn *conn;
781
782 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
783 if (!conn)
784 return NULL;
785 svc_xprt_get(rqstp->rq_xprt);
786 conn->cn_xprt = rqstp->rq_xprt;
787 conn->cn_flags = flags;
788 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
789 return conn;
790 }
791
792 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
793 {
794 conn->cn_session = ses;
795 list_add(&conn->cn_persession, &ses->se_conns);
796 }
797
798 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
799 {
800 struct nfs4_client *clp = ses->se_client;
801
802 spin_lock(&clp->cl_lock);
803 __nfsd4_hash_conn(conn, ses);
804 spin_unlock(&clp->cl_lock);
805 }
806
807 static int nfsd4_register_conn(struct nfsd4_conn *conn)
808 {
809 conn->cn_xpt_user.callback = nfsd4_conn_lost;
810 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
811 }
812
813 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
814 {
815 int ret;
816
817 nfsd4_hash_conn(conn, ses);
818 ret = nfsd4_register_conn(conn);
819 if (ret)
820 /* oops; xprt is already down: */
821 nfsd4_conn_lost(&conn->cn_xpt_user);
822 if (conn->cn_flags & NFS4_CDFC4_BACK) {
823 /* callback channel may be back up */
824 nfsd4_probe_callback(ses->se_client);
825 }
826 }
827
828 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
829 {
830 u32 dir = NFS4_CDFC4_FORE;
831
832 if (cses->flags & SESSION4_BACK_CHAN)
833 dir |= NFS4_CDFC4_BACK;
834 return alloc_conn(rqstp, dir);
835 }
836
837 /* must be called under client_lock */
838 static void nfsd4_del_conns(struct nfsd4_session *s)
839 {
840 struct nfs4_client *clp = s->se_client;
841 struct nfsd4_conn *c;
842
843 spin_lock(&clp->cl_lock);
844 while (!list_empty(&s->se_conns)) {
845 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
846 list_del_init(&c->cn_persession);
847 spin_unlock(&clp->cl_lock);
848
849 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
850 free_conn(c);
851
852 spin_lock(&clp->cl_lock);
853 }
854 spin_unlock(&clp->cl_lock);
855 }
856
857 static void __free_session(struct nfsd4_session *ses)
858 {
859 nfsd4_put_drc_mem(slot_bytes(&ses->se_fchannel), ses->se_fchannel.maxreqs);
860 free_session_slots(ses);
861 kfree(ses);
862 }
863
864 static void free_session(struct kref *kref)
865 {
866 struct nfsd4_session *ses;
867 struct nfsd_net *nn;
868
869 ses = container_of(kref, struct nfsd4_session, se_ref);
870 nn = net_generic(ses->se_client->net, nfsd_net_id);
871
872 lockdep_assert_held(&nn->client_lock);
873 nfsd4_del_conns(ses);
874 __free_session(ses);
875 }
876
877 void nfsd4_put_session(struct nfsd4_session *ses)
878 {
879 struct nfsd_net *nn = net_generic(ses->se_client->net, nfsd_net_id);
880
881 spin_lock(&nn->client_lock);
882 nfsd4_put_session_locked(ses);
883 spin_unlock(&nn->client_lock);
884 }
885
886 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fchan)
887 {
888 struct nfsd4_session *new;
889 int numslots, slotsize;
890 /*
891 * Note decreasing slot size below client's request may
892 * make it difficult for client to function correctly, whereas
893 * decreasing the number of slots will (just?) affect
894 * performance. When short on memory we therefore prefer to
895 * decrease number of slots instead of their size.
896 */
897 slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached);
898 numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs);
899 if (numslots < 1)
900 return NULL;
901
902 new = __alloc_session(slotsize, numslots);
903 if (!new) {
904 nfsd4_put_drc_mem(slotsize, fchan->maxreqs);
905 return NULL;
906 }
907 init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize);
908 return new;
909 }
910
911 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
912 {
913 int idx;
914 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
915
916 new->se_client = clp;
917 gen_sessionid(new);
918
919 INIT_LIST_HEAD(&new->se_conns);
920
921 new->se_cb_seq_nr = 1;
922 new->se_flags = cses->flags;
923 new->se_cb_prog = cses->callback_prog;
924 new->se_cb_sec = cses->cb_sec;
925 kref_init(&new->se_ref);
926 idx = hash_sessionid(&new->se_sessionid);
927 spin_lock(&nn->client_lock);
928 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
929 spin_lock(&clp->cl_lock);
930 list_add(&new->se_perclnt, &clp->cl_sessions);
931 spin_unlock(&clp->cl_lock);
932 spin_unlock(&nn->client_lock);
933
934 if (cses->flags & SESSION4_BACK_CHAN) {
935 struct sockaddr *sa = svc_addr(rqstp);
936 /*
937 * This is a little silly; with sessions there's no real
938 * use for the callback address. Use the peer address
939 * as a reasonable default for now, but consider fixing
940 * the rpc client not to require an address in the
941 * future:
942 */
943 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
944 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
945 }
946 }
947
948 /* caller must hold client_lock */
949 static struct nfsd4_session *
950 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
951 {
952 struct nfsd4_session *elem;
953 int idx;
954 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
955
956 dump_sessionid(__func__, sessionid);
957 idx = hash_sessionid(sessionid);
958 /* Search in the appropriate list */
959 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
960 if (!memcmp(elem->se_sessionid.data, sessionid->data,
961 NFS4_MAX_SESSIONID_LEN)) {
962 return elem;
963 }
964 }
965
966 dprintk("%s: session not found\n", __func__);
967 return NULL;
968 }
969
970 /* caller must hold client_lock */
971 static void
972 unhash_session(struct nfsd4_session *ses)
973 {
974 list_del(&ses->se_hash);
975 spin_lock(&ses->se_client->cl_lock);
976 list_del(&ses->se_perclnt);
977 spin_unlock(&ses->se_client->cl_lock);
978 }
979
980 /* must be called under the client_lock */
981 static inline void
982 renew_client_locked(struct nfs4_client *clp)
983 {
984 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
985
986 if (is_client_expired(clp)) {
987 WARN_ON(1);
988 printk("%s: client (clientid %08x/%08x) already expired\n",
989 __func__,
990 clp->cl_clientid.cl_boot,
991 clp->cl_clientid.cl_id);
992 return;
993 }
994
995 dprintk("renewing client (clientid %08x/%08x)\n",
996 clp->cl_clientid.cl_boot,
997 clp->cl_clientid.cl_id);
998 list_move_tail(&clp->cl_lru, &nn->client_lru);
999 clp->cl_time = get_seconds();
1000 }
1001
1002 static inline void
1003 renew_client(struct nfs4_client *clp)
1004 {
1005 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1006
1007 spin_lock(&nn->client_lock);
1008 renew_client_locked(clp);
1009 spin_unlock(&nn->client_lock);
1010 }
1011
1012 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1013 static int
1014 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1015 {
1016 if (clid->cl_boot == nn->boot_time)
1017 return 0;
1018 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1019 clid->cl_boot, clid->cl_id, nn->boot_time);
1020 return 1;
1021 }
1022
1023 /*
1024 * XXX Should we use a slab cache ?
1025 * This type of memory management is somewhat inefficient, but we use it
1026 * anyway since SETCLIENTID is not a common operation.
1027 */
1028 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1029 {
1030 struct nfs4_client *clp;
1031
1032 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1033 if (clp == NULL)
1034 return NULL;
1035 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1036 if (clp->cl_name.data == NULL) {
1037 kfree(clp);
1038 return NULL;
1039 }
1040 clp->cl_name.len = name.len;
1041 return clp;
1042 }
1043
1044 static inline void
1045 free_client(struct nfs4_client *clp)
1046 {
1047 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1048
1049 lockdep_assert_held(&nn->client_lock);
1050 while (!list_empty(&clp->cl_sessions)) {
1051 struct nfsd4_session *ses;
1052 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1053 se_perclnt);
1054 list_del(&ses->se_perclnt);
1055 nfsd4_put_session_locked(ses);
1056 }
1057 free_svc_cred(&clp->cl_cred);
1058 kfree(clp->cl_name.data);
1059 kfree(clp);
1060 }
1061
1062 void
1063 release_session_client(struct nfsd4_session *session)
1064 {
1065 struct nfs4_client *clp = session->se_client;
1066 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1067
1068 if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
1069 return;
1070 if (is_client_expired(clp)) {
1071 free_client(clp);
1072 session->se_client = NULL;
1073 } else
1074 renew_client_locked(clp);
1075 spin_unlock(&nn->client_lock);
1076 }
1077
1078 /* must be called under the client_lock */
1079 static inline void
1080 unhash_client_locked(struct nfs4_client *clp)
1081 {
1082 struct nfsd4_session *ses;
1083
1084 mark_client_expired(clp);
1085 list_del(&clp->cl_lru);
1086 spin_lock(&clp->cl_lock);
1087 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1088 list_del_init(&ses->se_hash);
1089 spin_unlock(&clp->cl_lock);
1090 }
1091
1092 static void
1093 destroy_client(struct nfs4_client *clp)
1094 {
1095 struct nfs4_openowner *oo;
1096 struct nfs4_delegation *dp;
1097 struct list_head reaplist;
1098 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1099
1100 INIT_LIST_HEAD(&reaplist);
1101 spin_lock(&recall_lock);
1102 while (!list_empty(&clp->cl_delegations)) {
1103 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1104 list_del_init(&dp->dl_perclnt);
1105 list_move(&dp->dl_recall_lru, &reaplist);
1106 }
1107 spin_unlock(&recall_lock);
1108 while (!list_empty(&reaplist)) {
1109 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1110 unhash_delegation(dp);
1111 }
1112 while (!list_empty(&clp->cl_openowners)) {
1113 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1114 release_openowner(oo);
1115 }
1116 nfsd4_shutdown_callback(clp);
1117 if (clp->cl_cb_conn.cb_xprt)
1118 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1119 list_del(&clp->cl_idhash);
1120 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1121 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1122 else
1123 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1124 spin_lock(&nn->client_lock);
1125 unhash_client_locked(clp);
1126 if (atomic_read(&clp->cl_refcount) == 0)
1127 free_client(clp);
1128 spin_unlock(&nn->client_lock);
1129 }
1130
1131 static void expire_client(struct nfs4_client *clp)
1132 {
1133 nfsd4_client_record_remove(clp);
1134 destroy_client(clp);
1135 }
1136
1137 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1138 {
1139 memcpy(target->cl_verifier.data, source->data,
1140 sizeof(target->cl_verifier.data));
1141 }
1142
1143 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1144 {
1145 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1146 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1147 }
1148
1149 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1150 {
1151 if (source->cr_principal) {
1152 target->cr_principal =
1153 kstrdup(source->cr_principal, GFP_KERNEL);
1154 if (target->cr_principal == NULL)
1155 return -ENOMEM;
1156 } else
1157 target->cr_principal = NULL;
1158 target->cr_flavor = source->cr_flavor;
1159 target->cr_uid = source->cr_uid;
1160 target->cr_gid = source->cr_gid;
1161 target->cr_group_info = source->cr_group_info;
1162 get_group_info(target->cr_group_info);
1163 return 0;
1164 }
1165
1166 static long long
1167 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
1168 {
1169 long long res;
1170
1171 res = o1->len - o2->len;
1172 if (res)
1173 return res;
1174 return (long long)memcmp(o1->data, o2->data, o1->len);
1175 }
1176
1177 static int same_name(const char *n1, const char *n2)
1178 {
1179 return 0 == memcmp(n1, n2, HEXDIR_LEN);
1180 }
1181
1182 static int
1183 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1184 {
1185 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1186 }
1187
1188 static int
1189 same_clid(clientid_t *cl1, clientid_t *cl2)
1190 {
1191 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1192 }
1193
1194 static bool groups_equal(struct group_info *g1, struct group_info *g2)
1195 {
1196 int i;
1197
1198 if (g1->ngroups != g2->ngroups)
1199 return false;
1200 for (i=0; i<g1->ngroups; i++)
1201 if (GROUP_AT(g1, i) != GROUP_AT(g2, i))
1202 return false;
1203 return true;
1204 }
1205
1206 /*
1207 * RFC 3530 language requires clid_inuse be returned when the
1208 * "principal" associated with a requests differs from that previously
1209 * used. We use uid, gid's, and gss principal string as our best
1210 * approximation. We also don't want to allow non-gss use of a client
1211 * established using gss: in theory cr_principal should catch that
1212 * change, but in practice cr_principal can be null even in the gss case
1213 * since gssd doesn't always pass down a principal string.
1214 */
1215 static bool is_gss_cred(struct svc_cred *cr)
1216 {
1217 /* Is cr_flavor one of the gss "pseudoflavors"?: */
1218 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
1219 }
1220
1221
1222 static bool
1223 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
1224 {
1225 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
1226 || (cr1->cr_uid != cr2->cr_uid)
1227 || (cr1->cr_gid != cr2->cr_gid)
1228 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
1229 return false;
1230 if (cr1->cr_principal == cr2->cr_principal)
1231 return true;
1232 if (!cr1->cr_principal || !cr2->cr_principal)
1233 return false;
1234 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
1235 }
1236
1237 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
1238 {
1239 static u32 current_clientid = 1;
1240
1241 clp->cl_clientid.cl_boot = nn->boot_time;
1242 clp->cl_clientid.cl_id = current_clientid++;
1243 }
1244
1245 static void gen_confirm(struct nfs4_client *clp)
1246 {
1247 __be32 verf[2];
1248 static u32 i;
1249
1250 verf[0] = (__be32)get_seconds();
1251 verf[1] = (__be32)i++;
1252 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
1253 }
1254
1255 static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t)
1256 {
1257 return idr_find(&cl->cl_stateids, t->si_opaque.so_id);
1258 }
1259
1260 static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
1261 {
1262 struct nfs4_stid *s;
1263
1264 s = find_stateid(cl, t);
1265 if (!s)
1266 return NULL;
1267 if (typemask & s->sc_type)
1268 return s;
1269 return NULL;
1270 }
1271
1272 static struct nfs4_client *create_client(struct xdr_netobj name,
1273 struct svc_rqst *rqstp, nfs4_verifier *verf)
1274 {
1275 struct nfs4_client *clp;
1276 struct sockaddr *sa = svc_addr(rqstp);
1277 int ret;
1278 struct net *net = SVC_NET(rqstp);
1279 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1280
1281 clp = alloc_client(name);
1282 if (clp == NULL)
1283 return NULL;
1284
1285 INIT_LIST_HEAD(&clp->cl_sessions);
1286 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
1287 if (ret) {
1288 spin_lock(&nn->client_lock);
1289 free_client(clp);
1290 spin_unlock(&nn->client_lock);
1291 return NULL;
1292 }
1293 idr_init(&clp->cl_stateids);
1294 atomic_set(&clp->cl_refcount, 0);
1295 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1296 INIT_LIST_HEAD(&clp->cl_idhash);
1297 INIT_LIST_HEAD(&clp->cl_openowners);
1298 INIT_LIST_HEAD(&clp->cl_delegations);
1299 INIT_LIST_HEAD(&clp->cl_lru);
1300 INIT_LIST_HEAD(&clp->cl_callbacks);
1301 spin_lock_init(&clp->cl_lock);
1302 nfsd4_init_callback(&clp->cl_cb_null);
1303 clp->cl_time = get_seconds();
1304 clear_bit(0, &clp->cl_cb_slot_busy);
1305 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1306 copy_verf(clp, verf);
1307 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
1308 gen_confirm(clp);
1309 clp->cl_cb_session = NULL;
1310 clp->net = net;
1311 return clp;
1312 }
1313
1314 static void
1315 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
1316 {
1317 struct rb_node **new = &(root->rb_node), *parent = NULL;
1318 struct nfs4_client *clp;
1319
1320 while (*new) {
1321 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
1322 parent = *new;
1323
1324 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
1325 new = &((*new)->rb_left);
1326 else
1327 new = &((*new)->rb_right);
1328 }
1329
1330 rb_link_node(&new_clp->cl_namenode, parent, new);
1331 rb_insert_color(&new_clp->cl_namenode, root);
1332 }
1333
1334 static struct nfs4_client *
1335 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
1336 {
1337 long long cmp;
1338 struct rb_node *node = root->rb_node;
1339 struct nfs4_client *clp;
1340
1341 while (node) {
1342 clp = rb_entry(node, struct nfs4_client, cl_namenode);
1343 cmp = compare_blob(&clp->cl_name, name);
1344 if (cmp > 0)
1345 node = node->rb_left;
1346 else if (cmp < 0)
1347 node = node->rb_right;
1348 else
1349 return clp;
1350 }
1351 return NULL;
1352 }
1353
1354 static void
1355 add_to_unconfirmed(struct nfs4_client *clp)
1356 {
1357 unsigned int idhashval;
1358 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1359
1360 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1361 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
1362 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1363 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
1364 renew_client(clp);
1365 }
1366
1367 static void
1368 move_to_confirmed(struct nfs4_client *clp)
1369 {
1370 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1371 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1372
1373 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
1374 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
1375 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1376 add_clp_to_name_tree(clp, &nn->conf_name_tree);
1377 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1378 renew_client(clp);
1379 }
1380
1381 static struct nfs4_client *
1382 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1383 {
1384 struct nfs4_client *clp;
1385 unsigned int idhashval = clientid_hashval(clid->cl_id);
1386
1387 list_for_each_entry(clp, &nn->conf_id_hashtbl[idhashval], cl_idhash) {
1388 if (same_clid(&clp->cl_clientid, clid)) {
1389 if ((bool)clp->cl_minorversion != sessions)
1390 return NULL;
1391 renew_client(clp);
1392 return clp;
1393 }
1394 }
1395 return NULL;
1396 }
1397
1398 static struct nfs4_client *
1399 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1400 {
1401 struct nfs4_client *clp;
1402 unsigned int idhashval = clientid_hashval(clid->cl_id);
1403
1404 list_for_each_entry(clp, &nn->unconf_id_hashtbl[idhashval], cl_idhash) {
1405 if (same_clid(&clp->cl_clientid, clid)) {
1406 if ((bool)clp->cl_minorversion != sessions)
1407 return NULL;
1408 return clp;
1409 }
1410 }
1411 return NULL;
1412 }
1413
1414 static bool clp_used_exchangeid(struct nfs4_client *clp)
1415 {
1416 return clp->cl_exchange_flags != 0;
1417 }
1418
1419 static struct nfs4_client *
1420 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1421 {
1422 return find_clp_in_name_tree(name, &nn->conf_name_tree);
1423 }
1424
1425 static struct nfs4_client *
1426 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1427 {
1428 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
1429 }
1430
1431 static void
1432 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
1433 {
1434 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
1435 struct sockaddr *sa = svc_addr(rqstp);
1436 u32 scopeid = rpc_get_scope_id(sa);
1437 unsigned short expected_family;
1438
1439 /* Currently, we only support tcp and tcp6 for the callback channel */
1440 if (se->se_callback_netid_len == 3 &&
1441 !memcmp(se->se_callback_netid_val, "tcp", 3))
1442 expected_family = AF_INET;
1443 else if (se->se_callback_netid_len == 4 &&
1444 !memcmp(se->se_callback_netid_val, "tcp6", 4))
1445 expected_family = AF_INET6;
1446 else
1447 goto out_err;
1448
1449 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
1450 se->se_callback_addr_len,
1451 (struct sockaddr *)&conn->cb_addr,
1452 sizeof(conn->cb_addr));
1453
1454 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
1455 goto out_err;
1456
1457 if (conn->cb_addr.ss_family == AF_INET6)
1458 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
1459
1460 conn->cb_prog = se->se_callback_prog;
1461 conn->cb_ident = se->se_callback_ident;
1462 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
1463 return;
1464 out_err:
1465 conn->cb_addr.ss_family = AF_UNSPEC;
1466 conn->cb_addrlen = 0;
1467 dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
1468 "will not receive delegations\n",
1469 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
1470
1471 return;
1472 }
1473
1474 /*
1475 * Cache a reply. nfsd4_check_drc_limit() has bounded the cache size.
1476 */
1477 void
1478 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
1479 {
1480 struct nfsd4_slot *slot = resp->cstate.slot;
1481 unsigned int base;
1482
1483 dprintk("--> %s slot %p\n", __func__, slot);
1484
1485 slot->sl_opcnt = resp->opcnt;
1486 slot->sl_status = resp->cstate.status;
1487
1488 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
1489 if (nfsd4_not_cached(resp)) {
1490 slot->sl_datalen = 0;
1491 return;
1492 }
1493 slot->sl_datalen = (char *)resp->p - (char *)resp->cstate.datap;
1494 base = (char *)resp->cstate.datap -
1495 (char *)resp->xbuf->head[0].iov_base;
1496 if (read_bytes_from_xdr_buf(resp->xbuf, base, slot->sl_data,
1497 slot->sl_datalen))
1498 WARN("%s: sessions DRC could not cache compound\n", __func__);
1499 return;
1500 }
1501
1502 /*
1503 * Encode the replay sequence operation from the slot values.
1504 * If cachethis is FALSE encode the uncached rep error on the next
1505 * operation which sets resp->p and increments resp->opcnt for
1506 * nfs4svc_encode_compoundres.
1507 *
1508 */
1509 static __be32
1510 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
1511 struct nfsd4_compoundres *resp)
1512 {
1513 struct nfsd4_op *op;
1514 struct nfsd4_slot *slot = resp->cstate.slot;
1515
1516 /* Encode the replayed sequence operation */
1517 op = &args->ops[resp->opcnt - 1];
1518 nfsd4_encode_operation(resp, op);
1519
1520 /* Return nfserr_retry_uncached_rep in next operation. */
1521 if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
1522 op = &args->ops[resp->opcnt++];
1523 op->status = nfserr_retry_uncached_rep;
1524 nfsd4_encode_operation(resp, op);
1525 }
1526 return op->status;
1527 }
1528
1529 /*
1530 * The sequence operation is not cached because we can use the slot and
1531 * session values.
1532 */
1533 __be32
1534 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
1535 struct nfsd4_sequence *seq)
1536 {
1537 struct nfsd4_slot *slot = resp->cstate.slot;
1538 __be32 status;
1539
1540 dprintk("--> %s slot %p\n", __func__, slot);
1541
1542 /* Either returns 0 or nfserr_retry_uncached */
1543 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
1544 if (status == nfserr_retry_uncached_rep)
1545 return status;
1546
1547 /* The sequence operation has been encoded, cstate->datap set. */
1548 memcpy(resp->cstate.datap, slot->sl_data, slot->sl_datalen);
1549
1550 resp->opcnt = slot->sl_opcnt;
1551 resp->p = resp->cstate.datap + XDR_QUADLEN(slot->sl_datalen);
1552 status = slot->sl_status;
1553
1554 return status;
1555 }
1556
1557 /*
1558 * Set the exchange_id flags returned by the server.
1559 */
1560 static void
1561 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
1562 {
1563 /* pNFS is not supported */
1564 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
1565
1566 /* Referrals are supported, Migration is not. */
1567 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
1568
1569 /* set the wire flags to return to client. */
1570 clid->flags = new->cl_exchange_flags;
1571 }
1572
1573 static bool client_has_state(struct nfs4_client *clp)
1574 {
1575 /*
1576 * Note clp->cl_openowners check isn't quite right: there's no
1577 * need to count owners without stateid's.
1578 *
1579 * Also note we should probably be using this in 4.0 case too.
1580 */
1581 return !list_empty(&clp->cl_openowners)
1582 || !list_empty(&clp->cl_delegations)
1583 || !list_empty(&clp->cl_sessions);
1584 }
1585
1586 __be32
1587 nfsd4_exchange_id(struct svc_rqst *rqstp,
1588 struct nfsd4_compound_state *cstate,
1589 struct nfsd4_exchange_id *exid)
1590 {
1591 struct nfs4_client *unconf, *conf, *new;
1592 __be32 status;
1593 char addr_str[INET6_ADDRSTRLEN];
1594 nfs4_verifier verf = exid->verifier;
1595 struct sockaddr *sa = svc_addr(rqstp);
1596 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
1597 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1598
1599 rpc_ntop(sa, addr_str, sizeof(addr_str));
1600 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
1601 "ip_addr=%s flags %x, spa_how %d\n",
1602 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
1603 addr_str, exid->flags, exid->spa_how);
1604
1605 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
1606 return nfserr_inval;
1607
1608 /* Currently only support SP4_NONE */
1609 switch (exid->spa_how) {
1610 case SP4_NONE:
1611 break;
1612 default: /* checked by xdr code */
1613 WARN_ON_ONCE(1);
1614 case SP4_SSV:
1615 case SP4_MACH_CRED:
1616 return nfserr_serverfault; /* no excuse :-/ */
1617 }
1618
1619 /* Cases below refer to rfc 5661 section 18.35.4: */
1620 nfs4_lock_state();
1621 conf = find_confirmed_client_by_name(&exid->clname, nn);
1622 if (conf) {
1623 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
1624 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
1625
1626 if (update) {
1627 if (!clp_used_exchangeid(conf)) { /* buggy client */
1628 status = nfserr_inval;
1629 goto out;
1630 }
1631 if (!creds_match) { /* case 9 */
1632 status = nfserr_perm;
1633 goto out;
1634 }
1635 if (!verfs_match) { /* case 8 */
1636 status = nfserr_not_same;
1637 goto out;
1638 }
1639 /* case 6 */
1640 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
1641 new = conf;
1642 goto out_copy;
1643 }
1644 if (!creds_match) { /* case 3 */
1645 if (client_has_state(conf)) {
1646 status = nfserr_clid_inuse;
1647 goto out;
1648 }
1649 expire_client(conf);
1650 goto out_new;
1651 }
1652 if (verfs_match) { /* case 2 */
1653 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
1654 new = conf;
1655 goto out_copy;
1656 }
1657 /* case 5, client reboot */
1658 goto out_new;
1659 }
1660
1661 if (update) { /* case 7 */
1662 status = nfserr_noent;
1663 goto out;
1664 }
1665
1666 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
1667 if (unconf) /* case 4, possible retry or client restart */
1668 expire_client(unconf);
1669
1670 /* case 1 (normal case) */
1671 out_new:
1672 new = create_client(exid->clname, rqstp, &verf);
1673 if (new == NULL) {
1674 status = nfserr_jukebox;
1675 goto out;
1676 }
1677 new->cl_minorversion = 1;
1678
1679 gen_clid(new, nn);
1680 add_to_unconfirmed(new);
1681 out_copy:
1682 exid->clientid.cl_boot = new->cl_clientid.cl_boot;
1683 exid->clientid.cl_id = new->cl_clientid.cl_id;
1684
1685 exid->seqid = new->cl_cs_slot.sl_seqid + 1;
1686 nfsd4_set_ex_flags(new, exid);
1687
1688 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
1689 new->cl_cs_slot.sl_seqid, new->cl_exchange_flags);
1690 status = nfs_ok;
1691
1692 out:
1693 nfs4_unlock_state();
1694 return status;
1695 }
1696
1697 static __be32
1698 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
1699 {
1700 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
1701 slot_seqid);
1702
1703 /* The slot is in use, and no response has been sent. */
1704 if (slot_inuse) {
1705 if (seqid == slot_seqid)
1706 return nfserr_jukebox;
1707 else
1708 return nfserr_seq_misordered;
1709 }
1710 /* Note unsigned 32-bit arithmetic handles wraparound: */
1711 if (likely(seqid == slot_seqid + 1))
1712 return nfs_ok;
1713 if (seqid == slot_seqid)
1714 return nfserr_replay_cache;
1715 return nfserr_seq_misordered;
1716 }
1717
1718 /*
1719 * Cache the create session result into the create session single DRC
1720 * slot cache by saving the xdr structure. sl_seqid has been set.
1721 * Do this for solo or embedded create session operations.
1722 */
1723 static void
1724 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
1725 struct nfsd4_clid_slot *slot, __be32 nfserr)
1726 {
1727 slot->sl_status = nfserr;
1728 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
1729 }
1730
1731 static __be32
1732 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
1733 struct nfsd4_clid_slot *slot)
1734 {
1735 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
1736 return slot->sl_status;
1737 }
1738
1739 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
1740 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
1741 1 + /* MIN tag is length with zero, only length */ \
1742 3 + /* version, opcount, opcode */ \
1743 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1744 /* seqid, slotID, slotID, cache */ \
1745 4 ) * sizeof(__be32))
1746
1747 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
1748 2 + /* verifier: AUTH_NULL, length 0 */\
1749 1 + /* status */ \
1750 1 + /* MIN tag is length with zero, only length */ \
1751 3 + /* opcount, opcode, opstatus*/ \
1752 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1753 /* seqid, slotID, slotID, slotID, status */ \
1754 5 ) * sizeof(__be32))
1755
1756 static bool check_forechannel_attrs(struct nfsd4_channel_attrs fchannel)
1757 {
1758 return fchannel.maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ
1759 || fchannel.maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ;
1760 }
1761
1762 __be32
1763 nfsd4_create_session(struct svc_rqst *rqstp,
1764 struct nfsd4_compound_state *cstate,
1765 struct nfsd4_create_session *cr_ses)
1766 {
1767 struct sockaddr *sa = svc_addr(rqstp);
1768 struct nfs4_client *conf, *unconf;
1769 struct nfsd4_session *new;
1770 struct nfsd4_conn *conn;
1771 struct nfsd4_clid_slot *cs_slot = NULL;
1772 __be32 status = 0;
1773 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1774
1775 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
1776 return nfserr_inval;
1777 if (check_forechannel_attrs(cr_ses->fore_channel))
1778 return nfserr_toosmall;
1779 new = alloc_session(&cr_ses->fore_channel);
1780 if (!new)
1781 return nfserr_jukebox;
1782 status = nfserr_jukebox;
1783 conn = alloc_conn_from_crses(rqstp, cr_ses);
1784 if (!conn)
1785 goto out_free_session;
1786
1787 nfs4_lock_state();
1788 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
1789 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
1790
1791 if (conf) {
1792 cs_slot = &conf->cl_cs_slot;
1793 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1794 if (status == nfserr_replay_cache) {
1795 status = nfsd4_replay_create_session(cr_ses, cs_slot);
1796 goto out_free_conn;
1797 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
1798 status = nfserr_seq_misordered;
1799 goto out_free_conn;
1800 }
1801 } else if (unconf) {
1802 struct nfs4_client *old;
1803 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
1804 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
1805 status = nfserr_clid_inuse;
1806 goto out_free_conn;
1807 }
1808 cs_slot = &unconf->cl_cs_slot;
1809 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1810 if (status) {
1811 /* an unconfirmed replay returns misordered */
1812 status = nfserr_seq_misordered;
1813 goto out_free_conn;
1814 }
1815 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
1816 if (old)
1817 expire_client(old);
1818 move_to_confirmed(unconf);
1819 conf = unconf;
1820 } else {
1821 status = nfserr_stale_clientid;
1822 goto out_free_conn;
1823 }
1824 status = nfs_ok;
1825 /*
1826 * We do not support RDMA or persistent sessions
1827 */
1828 cr_ses->flags &= ~SESSION4_PERSIST;
1829 cr_ses->flags &= ~SESSION4_RDMA;
1830
1831 init_session(rqstp, new, conf, cr_ses);
1832 nfsd4_init_conn(rqstp, conn, new);
1833
1834 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
1835 NFS4_MAX_SESSIONID_LEN);
1836 memcpy(&cr_ses->fore_channel, &new->se_fchannel,
1837 sizeof(struct nfsd4_channel_attrs));
1838 cs_slot->sl_seqid++;
1839 cr_ses->seqid = cs_slot->sl_seqid;
1840
1841 /* cache solo and embedded create sessions under the state lock */
1842 nfsd4_cache_create_session(cr_ses, cs_slot, status);
1843 out:
1844 nfs4_unlock_state();
1845 dprintk("%s returns %d\n", __func__, ntohl(status));
1846 return status;
1847 out_free_conn:
1848 free_conn(conn);
1849 out_free_session:
1850 __free_session(new);
1851 goto out;
1852 }
1853
1854 static bool nfsd4_last_compound_op(struct svc_rqst *rqstp)
1855 {
1856 struct nfsd4_compoundres *resp = rqstp->rq_resp;
1857 struct nfsd4_compoundargs *argp = rqstp->rq_argp;
1858
1859 return argp->opcnt == resp->opcnt;
1860 }
1861
1862 static __be32 nfsd4_map_bcts_dir(u32 *dir)
1863 {
1864 switch (*dir) {
1865 case NFS4_CDFC4_FORE:
1866 case NFS4_CDFC4_BACK:
1867 return nfs_ok;
1868 case NFS4_CDFC4_FORE_OR_BOTH:
1869 case NFS4_CDFC4_BACK_OR_BOTH:
1870 *dir = NFS4_CDFC4_BOTH;
1871 return nfs_ok;
1872 };
1873 return nfserr_inval;
1874 }
1875
1876 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc)
1877 {
1878 struct nfsd4_session *session = cstate->session;
1879 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1880
1881 spin_lock(&nn->client_lock);
1882 session->se_cb_prog = bc->bc_cb_program;
1883 session->se_cb_sec = bc->bc_cb_sec;
1884 spin_unlock(&nn->client_lock);
1885
1886 nfsd4_probe_callback(session->se_client);
1887
1888 return nfs_ok;
1889 }
1890
1891 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
1892 struct nfsd4_compound_state *cstate,
1893 struct nfsd4_bind_conn_to_session *bcts)
1894 {
1895 __be32 status;
1896 struct nfsd4_conn *conn;
1897 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1898
1899 if (!nfsd4_last_compound_op(rqstp))
1900 return nfserr_not_only_op;
1901 spin_lock(&nn->client_lock);
1902 cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid, SVC_NET(rqstp));
1903 /* Sorta weird: we only need the refcnt'ing because new_conn acquires
1904 * client_lock iself: */
1905 if (cstate->session) {
1906 nfsd4_get_session(cstate->session);
1907 atomic_inc(&cstate->session->se_client->cl_refcount);
1908 }
1909 spin_unlock(&nn->client_lock);
1910 if (!cstate->session)
1911 return nfserr_badsession;
1912
1913 status = nfsd4_map_bcts_dir(&bcts->dir);
1914 if (status)
1915 return status;
1916 conn = alloc_conn(rqstp, bcts->dir);
1917 if (!conn)
1918 return nfserr_jukebox;
1919 nfsd4_init_conn(rqstp, conn, cstate->session);
1920 return nfs_ok;
1921 }
1922
1923 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
1924 {
1925 if (!session)
1926 return 0;
1927 return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
1928 }
1929
1930 __be32
1931 nfsd4_destroy_session(struct svc_rqst *r,
1932 struct nfsd4_compound_state *cstate,
1933 struct nfsd4_destroy_session *sessionid)
1934 {
1935 struct nfsd4_session *ses;
1936 __be32 status = nfserr_badsession;
1937 struct nfsd_net *nn = net_generic(SVC_NET(r), nfsd_net_id);
1938
1939 /* Notes:
1940 * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid
1941 * - Should we return nfserr_back_chan_busy if waiting for
1942 * callbacks on to-be-destroyed session?
1943 * - Do we need to clear any callback info from previous session?
1944 */
1945
1946 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
1947 if (!nfsd4_last_compound_op(r))
1948 return nfserr_not_only_op;
1949 }
1950 dump_sessionid(__func__, &sessionid->sessionid);
1951 spin_lock(&nn->client_lock);
1952 ses = find_in_sessionid_hashtbl(&sessionid->sessionid, SVC_NET(r));
1953 if (!ses) {
1954 spin_unlock(&nn->client_lock);
1955 goto out;
1956 }
1957
1958 unhash_session(ses);
1959 spin_unlock(&nn->client_lock);
1960
1961 nfs4_lock_state();
1962 nfsd4_probe_callback_sync(ses->se_client);
1963 nfs4_unlock_state();
1964
1965 spin_lock(&nn->client_lock);
1966 nfsd4_del_conns(ses);
1967 nfsd4_put_session_locked(ses);
1968 spin_unlock(&nn->client_lock);
1969 status = nfs_ok;
1970 out:
1971 dprintk("%s returns %d\n", __func__, ntohl(status));
1972 return status;
1973 }
1974
1975 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
1976 {
1977 struct nfsd4_conn *c;
1978
1979 list_for_each_entry(c, &s->se_conns, cn_persession) {
1980 if (c->cn_xprt == xpt) {
1981 return c;
1982 }
1983 }
1984 return NULL;
1985 }
1986
1987 static void nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
1988 {
1989 struct nfs4_client *clp = ses->se_client;
1990 struct nfsd4_conn *c;
1991 int ret;
1992
1993 spin_lock(&clp->cl_lock);
1994 c = __nfsd4_find_conn(new->cn_xprt, ses);
1995 if (c) {
1996 spin_unlock(&clp->cl_lock);
1997 free_conn(new);
1998 return;
1999 }
2000 __nfsd4_hash_conn(new, ses);
2001 spin_unlock(&clp->cl_lock);
2002 ret = nfsd4_register_conn(new);
2003 if (ret)
2004 /* oops; xprt is already down: */
2005 nfsd4_conn_lost(&new->cn_xpt_user);
2006 return;
2007 }
2008
2009 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
2010 {
2011 struct nfsd4_compoundargs *args = rqstp->rq_argp;
2012
2013 return args->opcnt > session->se_fchannel.maxops;
2014 }
2015
2016 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
2017 struct nfsd4_session *session)
2018 {
2019 struct xdr_buf *xb = &rqstp->rq_arg;
2020
2021 return xb->len > session->se_fchannel.maxreq_sz;
2022 }
2023
2024 __be32
2025 nfsd4_sequence(struct svc_rqst *rqstp,
2026 struct nfsd4_compound_state *cstate,
2027 struct nfsd4_sequence *seq)
2028 {
2029 struct nfsd4_compoundres *resp = rqstp->rq_resp;
2030 struct nfsd4_session *session;
2031 struct nfsd4_slot *slot;
2032 struct nfsd4_conn *conn;
2033 __be32 status;
2034 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2035
2036 if (resp->opcnt != 1)
2037 return nfserr_sequence_pos;
2038
2039 /*
2040 * Will be either used or freed by nfsd4_sequence_check_conn
2041 * below.
2042 */
2043 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
2044 if (!conn)
2045 return nfserr_jukebox;
2046
2047 spin_lock(&nn->client_lock);
2048 status = nfserr_badsession;
2049 session = find_in_sessionid_hashtbl(&seq->sessionid, SVC_NET(rqstp));
2050 if (!session)
2051 goto out;
2052
2053 status = nfserr_too_many_ops;
2054 if (nfsd4_session_too_many_ops(rqstp, session))
2055 goto out;
2056
2057 status = nfserr_req_too_big;
2058 if (nfsd4_request_too_big(rqstp, session))
2059 goto out;
2060
2061 status = nfserr_badslot;
2062 if (seq->slotid >= session->se_fchannel.maxreqs)
2063 goto out;
2064
2065 slot = session->se_slots[seq->slotid];
2066 dprintk("%s: slotid %d\n", __func__, seq->slotid);
2067
2068 /* We do not negotiate the number of slots yet, so set the
2069 * maxslots to the session maxreqs which is used to encode
2070 * sr_highest_slotid and the sr_target_slot id to maxslots */
2071 seq->maxslots = session->se_fchannel.maxreqs;
2072
2073 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
2074 slot->sl_flags & NFSD4_SLOT_INUSE);
2075 if (status == nfserr_replay_cache) {
2076 status = nfserr_seq_misordered;
2077 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
2078 goto out;
2079 cstate->slot = slot;
2080 cstate->session = session;
2081 /* Return the cached reply status and set cstate->status
2082 * for nfsd4_proc_compound processing */
2083 status = nfsd4_replay_cache_entry(resp, seq);
2084 cstate->status = nfserr_replay_cache;
2085 goto out;
2086 }
2087 if (status)
2088 goto out;
2089
2090 nfsd4_sequence_check_conn(conn, session);
2091 conn = NULL;
2092
2093 /* Success! bump slot seqid */
2094 slot->sl_seqid = seq->seqid;
2095 slot->sl_flags |= NFSD4_SLOT_INUSE;
2096 if (seq->cachethis)
2097 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
2098 else
2099 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
2100
2101 cstate->slot = slot;
2102 cstate->session = session;
2103
2104 out:
2105 /* Hold a session reference until done processing the compound. */
2106 if (cstate->session) {
2107 struct nfs4_client *clp = session->se_client;
2108
2109 nfsd4_get_session(cstate->session);
2110 atomic_inc(&clp->cl_refcount);
2111 switch (clp->cl_cb_state) {
2112 case NFSD4_CB_DOWN:
2113 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
2114 break;
2115 case NFSD4_CB_FAULT:
2116 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
2117 break;
2118 default:
2119 seq->status_flags = 0;
2120 }
2121 }
2122 kfree(conn);
2123 spin_unlock(&nn->client_lock);
2124 dprintk("%s: return %d\n", __func__, ntohl(status));
2125 return status;
2126 }
2127
2128 __be32
2129 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
2130 {
2131 struct nfs4_client *conf, *unconf, *clp;
2132 __be32 status = 0;
2133 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2134
2135 nfs4_lock_state();
2136 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
2137 conf = find_confirmed_client(&dc->clientid, true, nn);
2138
2139 if (conf) {
2140 clp = conf;
2141
2142 if (!is_client_expired(conf) && client_has_state(conf)) {
2143 status = nfserr_clientid_busy;
2144 goto out;
2145 }
2146
2147 /* rfc5661 18.50.3 */
2148 if (cstate->session && conf == cstate->session->se_client) {
2149 status = nfserr_clientid_busy;
2150 goto out;
2151 }
2152 } else if (unconf)
2153 clp = unconf;
2154 else {
2155 status = nfserr_stale_clientid;
2156 goto out;
2157 }
2158
2159 expire_client(clp);
2160 out:
2161 nfs4_unlock_state();
2162 dprintk("%s return %d\n", __func__, ntohl(status));
2163 return status;
2164 }
2165
2166 __be32
2167 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
2168 {
2169 __be32 status = 0;
2170
2171 if (rc->rca_one_fs) {
2172 if (!cstate->current_fh.fh_dentry)
2173 return nfserr_nofilehandle;
2174 /*
2175 * We don't take advantage of the rca_one_fs case.
2176 * That's OK, it's optional, we can safely ignore it.
2177 */
2178 return nfs_ok;
2179 }
2180
2181 nfs4_lock_state();
2182 status = nfserr_complete_already;
2183 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
2184 &cstate->session->se_client->cl_flags))
2185 goto out;
2186
2187 status = nfserr_stale_clientid;
2188 if (is_client_expired(cstate->session->se_client))
2189 /*
2190 * The following error isn't really legal.
2191 * But we only get here if the client just explicitly
2192 * destroyed the client. Surely it no longer cares what
2193 * error it gets back on an operation for the dead
2194 * client.
2195 */
2196 goto out;
2197
2198 status = nfs_ok;
2199 nfsd4_client_record_create(cstate->session->se_client);
2200 out:
2201 nfs4_unlock_state();
2202 return status;
2203 }
2204
2205 __be32
2206 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2207 struct nfsd4_setclientid *setclid)
2208 {
2209 struct xdr_netobj clname = setclid->se_name;
2210 nfs4_verifier clverifier = setclid->se_verf;
2211 struct nfs4_client *conf, *unconf, *new;
2212 __be32 status;
2213 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2214
2215 /* Cases below refer to rfc 3530 section 14.2.33: */
2216 nfs4_lock_state();
2217 conf = find_confirmed_client_by_name(&clname, nn);
2218 if (conf) {
2219 /* case 0: */
2220 status = nfserr_clid_inuse;
2221 if (clp_used_exchangeid(conf))
2222 goto out;
2223 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
2224 char addr_str[INET6_ADDRSTRLEN];
2225 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
2226 sizeof(addr_str));
2227 dprintk("NFSD: setclientid: string in use by client "
2228 "at %s\n", addr_str);
2229 goto out;
2230 }
2231 }
2232 unconf = find_unconfirmed_client_by_name(&clname, nn);
2233 if (unconf)
2234 expire_client(unconf);
2235 status = nfserr_jukebox;
2236 new = create_client(clname, rqstp, &clverifier);
2237 if (new == NULL)
2238 goto out;
2239 if (conf && same_verf(&conf->cl_verifier, &clverifier))
2240 /* case 1: probable callback update */
2241 copy_clid(new, conf);
2242 else /* case 4 (new client) or cases 2, 3 (client reboot): */
2243 gen_clid(new, nn);
2244 new->cl_minorversion = 0;
2245 gen_callback(new, setclid, rqstp);
2246 add_to_unconfirmed(new);
2247 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
2248 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
2249 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
2250 status = nfs_ok;
2251 out:
2252 nfs4_unlock_state();
2253 return status;
2254 }
2255
2256
2257 __be32
2258 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2259 struct nfsd4_compound_state *cstate,
2260 struct nfsd4_setclientid_confirm *setclientid_confirm)
2261 {
2262 struct nfs4_client *conf, *unconf;
2263 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
2264 clientid_t * clid = &setclientid_confirm->sc_clientid;
2265 __be32 status;
2266 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2267
2268 if (STALE_CLIENTID(clid, nn))
2269 return nfserr_stale_clientid;
2270 nfs4_lock_state();
2271
2272 conf = find_confirmed_client(clid, false, nn);
2273 unconf = find_unconfirmed_client(clid, false, nn);
2274 /*
2275 * We try hard to give out unique clientid's, so if we get an
2276 * attempt to confirm the same clientid with a different cred,
2277 * there's a bug somewhere. Let's charitably assume it's our
2278 * bug.
2279 */
2280 status = nfserr_serverfault;
2281 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
2282 goto out;
2283 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
2284 goto out;
2285 /* cases below refer to rfc 3530 section 14.2.34: */
2286 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
2287 if (conf && !unconf) /* case 2: probable retransmit */
2288 status = nfs_ok;
2289 else /* case 4: client hasn't noticed we rebooted yet? */
2290 status = nfserr_stale_clientid;
2291 goto out;
2292 }
2293 status = nfs_ok;
2294 if (conf) { /* case 1: callback update */
2295 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
2296 nfsd4_probe_callback(conf);
2297 expire_client(unconf);
2298 } else { /* case 3: normal case; new or rebooted client */
2299 conf = find_confirmed_client_by_name(&unconf->cl_name, nn);
2300 if (conf)
2301 expire_client(conf);
2302 move_to_confirmed(unconf);
2303 nfsd4_probe_callback(unconf);
2304 }
2305 out:
2306 nfs4_unlock_state();
2307 return status;
2308 }
2309
2310 static struct nfs4_file *nfsd4_alloc_file(void)
2311 {
2312 return kmem_cache_alloc(file_slab, GFP_KERNEL);
2313 }
2314
2315 /* OPEN Share state helper functions */
2316 static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino)
2317 {
2318 unsigned int hashval = file_hashval(ino);
2319
2320 atomic_set(&fp->fi_ref, 1);
2321 INIT_LIST_HEAD(&fp->fi_hash);
2322 INIT_LIST_HEAD(&fp->fi_stateids);
2323 INIT_LIST_HEAD(&fp->fi_delegations);
2324 fp->fi_inode = igrab(ino);
2325 fp->fi_had_conflict = false;
2326 fp->fi_lease = NULL;
2327 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
2328 memset(fp->fi_access, 0, sizeof(fp->fi_access));
2329 spin_lock(&recall_lock);
2330 list_add(&fp->fi_hash, &file_hashtbl[hashval]);
2331 spin_unlock(&recall_lock);
2332 }
2333
2334 static void
2335 nfsd4_free_slab(struct kmem_cache **slab)
2336 {
2337 if (*slab == NULL)
2338 return;
2339 kmem_cache_destroy(*slab);
2340 *slab = NULL;
2341 }
2342
2343 void
2344 nfsd4_free_slabs(void)
2345 {
2346 nfsd4_free_slab(&openowner_slab);
2347 nfsd4_free_slab(&lockowner_slab);
2348 nfsd4_free_slab(&file_slab);
2349 nfsd4_free_slab(&stateid_slab);
2350 nfsd4_free_slab(&deleg_slab);
2351 }
2352
2353 int
2354 nfsd4_init_slabs(void)
2355 {
2356 openowner_slab = kmem_cache_create("nfsd4_openowners",
2357 sizeof(struct nfs4_openowner), 0, 0, NULL);
2358 if (openowner_slab == NULL)
2359 goto out_nomem;
2360 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
2361 sizeof(struct nfs4_lockowner), 0, 0, NULL);
2362 if (lockowner_slab == NULL)
2363 goto out_nomem;
2364 file_slab = kmem_cache_create("nfsd4_files",
2365 sizeof(struct nfs4_file), 0, 0, NULL);
2366 if (file_slab == NULL)
2367 goto out_nomem;
2368 stateid_slab = kmem_cache_create("nfsd4_stateids",
2369 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
2370 if (stateid_slab == NULL)
2371 goto out_nomem;
2372 deleg_slab = kmem_cache_create("nfsd4_delegations",
2373 sizeof(struct nfs4_delegation), 0, 0, NULL);
2374 if (deleg_slab == NULL)
2375 goto out_nomem;
2376 return 0;
2377 out_nomem:
2378 nfsd4_free_slabs();
2379 dprintk("nfsd4: out of memory while initializing nfsv4\n");
2380 return -ENOMEM;
2381 }
2382
2383 void nfs4_free_openowner(struct nfs4_openowner *oo)
2384 {
2385 kfree(oo->oo_owner.so_owner.data);
2386 kmem_cache_free(openowner_slab, oo);
2387 }
2388
2389 void nfs4_free_lockowner(struct nfs4_lockowner *lo)
2390 {
2391 kfree(lo->lo_owner.so_owner.data);
2392 kmem_cache_free(lockowner_slab, lo);
2393 }
2394
2395 static void init_nfs4_replay(struct nfs4_replay *rp)
2396 {
2397 rp->rp_status = nfserr_serverfault;
2398 rp->rp_buflen = 0;
2399 rp->rp_buf = rp->rp_ibuf;
2400 }
2401
2402 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
2403 {
2404 struct nfs4_stateowner *sop;
2405
2406 sop = kmem_cache_alloc(slab, GFP_KERNEL);
2407 if (!sop)
2408 return NULL;
2409
2410 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
2411 if (!sop->so_owner.data) {
2412 kmem_cache_free(slab, sop);
2413 return NULL;
2414 }
2415 sop->so_owner.len = owner->len;
2416
2417 INIT_LIST_HEAD(&sop->so_stateids);
2418 sop->so_client = clp;
2419 init_nfs4_replay(&sop->so_replay);
2420 return sop;
2421 }
2422
2423 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
2424 {
2425 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2426
2427 list_add(&oo->oo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]);
2428 list_add(&oo->oo_perclient, &clp->cl_openowners);
2429 }
2430
2431 static struct nfs4_openowner *
2432 alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfsd4_open *open) {
2433 struct nfs4_openowner *oo;
2434
2435 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
2436 if (!oo)
2437 return NULL;
2438 oo->oo_owner.so_is_open_owner = 1;
2439 oo->oo_owner.so_seqid = open->op_seqid;
2440 oo->oo_flags = NFS4_OO_NEW;
2441 oo->oo_time = 0;
2442 oo->oo_last_closed_stid = NULL;
2443 INIT_LIST_HEAD(&oo->oo_close_lru);
2444 hash_openowner(oo, clp, strhashval);
2445 return oo;
2446 }
2447
2448 static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
2449 struct nfs4_openowner *oo = open->op_openowner;
2450 struct nfs4_client *clp = oo->oo_owner.so_client;
2451
2452 init_stid(&stp->st_stid, clp, NFS4_OPEN_STID);
2453 INIT_LIST_HEAD(&stp->st_lockowners);
2454 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
2455 list_add(&stp->st_perfile, &fp->fi_stateids);
2456 stp->st_stateowner = &oo->oo_owner;
2457 get_nfs4_file(fp);
2458 stp->st_file = fp;
2459 stp->st_access_bmap = 0;
2460 stp->st_deny_bmap = 0;
2461 set_access(open->op_share_access, stp);
2462 set_deny(open->op_share_deny, stp);
2463 stp->st_openstp = NULL;
2464 }
2465
2466 static void
2467 move_to_close_lru(struct nfs4_openowner *oo, struct net *net)
2468 {
2469 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2470
2471 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
2472
2473 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
2474 oo->oo_time = get_seconds();
2475 }
2476
2477 static int
2478 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
2479 clientid_t *clid)
2480 {
2481 return (sop->so_owner.len == owner->len) &&
2482 0 == memcmp(sop->so_owner.data, owner->data, owner->len) &&
2483 (sop->so_client->cl_clientid.cl_id == clid->cl_id);
2484 }
2485
2486 static struct nfs4_openowner *
2487 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
2488 bool sessions, struct nfsd_net *nn)
2489 {
2490 struct nfs4_stateowner *so;
2491 struct nfs4_openowner *oo;
2492 struct nfs4_client *clp;
2493
2494 list_for_each_entry(so, &nn->ownerstr_hashtbl[hashval], so_strhash) {
2495 if (!so->so_is_open_owner)
2496 continue;
2497 if (same_owner_str(so, &open->op_owner, &open->op_clientid)) {
2498 oo = openowner(so);
2499 clp = oo->oo_owner.so_client;
2500 if ((bool)clp->cl_minorversion != sessions)
2501 return NULL;
2502 renew_client(oo->oo_owner.so_client);
2503 return oo;
2504 }
2505 }
2506 return NULL;
2507 }
2508
2509 /* search file_hashtbl[] for file */
2510 static struct nfs4_file *
2511 find_file(struct inode *ino)
2512 {
2513 unsigned int hashval = file_hashval(ino);
2514 struct nfs4_file *fp;
2515
2516 spin_lock(&recall_lock);
2517 list_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
2518 if (fp->fi_inode == ino) {
2519 get_nfs4_file(fp);
2520 spin_unlock(&recall_lock);
2521 return fp;
2522 }
2523 }
2524 spin_unlock(&recall_lock);
2525 return NULL;
2526 }
2527
2528 /*
2529 * Called to check deny when READ with all zero stateid or
2530 * WRITE with all zero or all one stateid
2531 */
2532 static __be32
2533 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
2534 {
2535 struct inode *ino = current_fh->fh_dentry->d_inode;
2536 struct nfs4_file *fp;
2537 struct nfs4_ol_stateid *stp;
2538 __be32 ret;
2539
2540 dprintk("NFSD: nfs4_share_conflict\n");
2541
2542 fp = find_file(ino);
2543 if (!fp)
2544 return nfs_ok;
2545 ret = nfserr_locked;
2546 /* Search for conflicting share reservations */
2547 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
2548 if (test_deny(deny_type, stp) ||
2549 test_deny(NFS4_SHARE_DENY_BOTH, stp))
2550 goto out;
2551 }
2552 ret = nfs_ok;
2553 out:
2554 put_nfs4_file(fp);
2555 return ret;
2556 }
2557
2558 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
2559 {
2560 /* We're assuming the state code never drops its reference
2561 * without first removing the lease. Since we're in this lease
2562 * callback (and since the lease code is serialized by the kernel
2563 * lock) we know the server hasn't removed the lease yet, we know
2564 * it's safe to take a reference: */
2565 atomic_inc(&dp->dl_count);
2566
2567 list_add_tail(&dp->dl_recall_lru, &del_recall_lru);
2568
2569 /* only place dl_time is set. protected by lock_flocks*/
2570 dp->dl_time = get_seconds();
2571
2572 nfsd4_cb_recall(dp);
2573 }
2574
2575 /* Called from break_lease() with lock_flocks() held. */
2576 static void nfsd_break_deleg_cb(struct file_lock *fl)
2577 {
2578 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
2579 struct nfs4_delegation *dp;
2580
2581 if (!fp) {
2582 WARN(1, "(%p)->fl_owner NULL\n", fl);
2583 return;
2584 }
2585 if (fp->fi_had_conflict) {
2586 WARN(1, "duplicate break on %p\n", fp);
2587 return;
2588 }
2589 /*
2590 * We don't want the locks code to timeout the lease for us;
2591 * we'll remove it ourself if a delegation isn't returned
2592 * in time:
2593 */
2594 fl->fl_break_time = 0;
2595
2596 spin_lock(&recall_lock);
2597 fp->fi_had_conflict = true;
2598 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
2599 nfsd_break_one_deleg(dp);
2600 spin_unlock(&recall_lock);
2601 }
2602
2603 static
2604 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
2605 {
2606 if (arg & F_UNLCK)
2607 return lease_modify(onlist, arg);
2608 else
2609 return -EAGAIN;
2610 }
2611
2612 static const struct lock_manager_operations nfsd_lease_mng_ops = {
2613 .lm_break = nfsd_break_deleg_cb,
2614 .lm_change = nfsd_change_deleg_cb,
2615 };
2616
2617 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
2618 {
2619 if (nfsd4_has_session(cstate))
2620 return nfs_ok;
2621 if (seqid == so->so_seqid - 1)
2622 return nfserr_replay_me;
2623 if (seqid == so->so_seqid)
2624 return nfs_ok;
2625 return nfserr_bad_seqid;
2626 }
2627
2628 __be32
2629 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
2630 struct nfsd4_open *open, struct nfsd_net *nn)
2631 {
2632 clientid_t *clientid = &open->op_clientid;
2633 struct nfs4_client *clp = NULL;
2634 unsigned int strhashval;
2635 struct nfs4_openowner *oo = NULL;
2636 __be32 status;
2637
2638 if (STALE_CLIENTID(&open->op_clientid, nn))
2639 return nfserr_stale_clientid;
2640 /*
2641 * In case we need it later, after we've already created the
2642 * file and don't want to risk a further failure:
2643 */
2644 open->op_file = nfsd4_alloc_file();
2645 if (open->op_file == NULL)
2646 return nfserr_jukebox;
2647
2648 strhashval = ownerstr_hashval(clientid->cl_id, &open->op_owner);
2649 oo = find_openstateowner_str(strhashval, open, cstate->minorversion, nn);
2650 open->op_openowner = oo;
2651 if (!oo) {
2652 clp = find_confirmed_client(clientid, cstate->minorversion,
2653 nn);
2654 if (clp == NULL)
2655 return nfserr_expired;
2656 goto new_owner;
2657 }
2658 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
2659 /* Replace unconfirmed owners without checking for replay. */
2660 clp = oo->oo_owner.so_client;
2661 release_openowner(oo);
2662 open->op_openowner = NULL;
2663 goto new_owner;
2664 }
2665 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
2666 if (status)
2667 return status;
2668 clp = oo->oo_owner.so_client;
2669 goto alloc_stateid;
2670 new_owner:
2671 oo = alloc_init_open_stateowner(strhashval, clp, open);
2672 if (oo == NULL)
2673 return nfserr_jukebox;
2674 open->op_openowner = oo;
2675 alloc_stateid:
2676 open->op_stp = nfs4_alloc_stateid(clp);
2677 if (!open->op_stp)
2678 return nfserr_jukebox;
2679 return nfs_ok;
2680 }
2681
2682 static inline __be32
2683 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
2684 {
2685 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
2686 return nfserr_openmode;
2687 else
2688 return nfs_ok;
2689 }
2690
2691 static int share_access_to_flags(u32 share_access)
2692 {
2693 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
2694 }
2695
2696 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
2697 {
2698 struct nfs4_stid *ret;
2699
2700 ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
2701 if (!ret)
2702 return NULL;
2703 return delegstateid(ret);
2704 }
2705
2706 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
2707 {
2708 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
2709 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
2710 }
2711
2712 static __be32
2713 nfs4_check_deleg(struct nfs4_client *cl, struct nfs4_file *fp, struct nfsd4_open *open,
2714 struct nfs4_delegation **dp)
2715 {
2716 int flags;
2717 __be32 status = nfserr_bad_stateid;
2718
2719 *dp = find_deleg_stateid(cl, &open->op_delegate_stateid);
2720 if (*dp == NULL)
2721 goto out;
2722 flags = share_access_to_flags(open->op_share_access);
2723 status = nfs4_check_delegmode(*dp, flags);
2724 if (status)
2725 *dp = NULL;
2726 out:
2727 if (!nfsd4_is_deleg_cur(open))
2728 return nfs_ok;
2729 if (status)
2730 return status;
2731 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
2732 return nfs_ok;
2733 }
2734
2735 static __be32
2736 nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp)
2737 {
2738 struct nfs4_ol_stateid *local;
2739 struct nfs4_openowner *oo = open->op_openowner;
2740
2741 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
2742 /* ignore lock owners */
2743 if (local->st_stateowner->so_is_open_owner == 0)
2744 continue;
2745 /* remember if we have seen this open owner */
2746 if (local->st_stateowner == &oo->oo_owner)
2747 *stpp = local;
2748 /* check for conflicting share reservations */
2749 if (!test_share(local, open))
2750 return nfserr_share_denied;
2751 }
2752 return nfs_ok;
2753 }
2754
2755 static inline int nfs4_access_to_access(u32 nfs4_access)
2756 {
2757 int flags = 0;
2758
2759 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
2760 flags |= NFSD_MAY_READ;
2761 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
2762 flags |= NFSD_MAY_WRITE;
2763 return flags;
2764 }
2765
2766 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
2767 struct svc_fh *cur_fh, struct nfsd4_open *open)
2768 {
2769 __be32 status;
2770 int oflag = nfs4_access_to_omode(open->op_share_access);
2771 int access = nfs4_access_to_access(open->op_share_access);
2772
2773 if (!fp->fi_fds[oflag]) {
2774 status = nfsd_open(rqstp, cur_fh, S_IFREG, access,
2775 &fp->fi_fds[oflag]);
2776 if (status)
2777 return status;
2778 }
2779 nfs4_file_get_access(fp, oflag);
2780
2781 return nfs_ok;
2782 }
2783
2784 static inline __be32
2785 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
2786 struct nfsd4_open *open)
2787 {
2788 struct iattr iattr = {
2789 .ia_valid = ATTR_SIZE,
2790 .ia_size = 0,
2791 };
2792 if (!open->op_truncate)
2793 return 0;
2794 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
2795 return nfserr_inval;
2796 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
2797 }
2798
2799 static __be32
2800 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
2801 {
2802 u32 op_share_access = open->op_share_access;
2803 bool new_access;
2804 __be32 status;
2805
2806 new_access = !test_access(op_share_access, stp);
2807 if (new_access) {
2808 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
2809 if (status)
2810 return status;
2811 }
2812 status = nfsd4_truncate(rqstp, cur_fh, open);
2813 if (status) {
2814 if (new_access) {
2815 int oflag = nfs4_access_to_omode(op_share_access);
2816 nfs4_file_put_access(fp, oflag);
2817 }
2818 return status;
2819 }
2820 /* remember the open */
2821 set_access(op_share_access, stp);
2822 set_deny(open->op_share_deny, stp);
2823
2824 return nfs_ok;
2825 }
2826
2827
2828 static void
2829 nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
2830 {
2831 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
2832 }
2833
2834 /* Should we give out recallable state?: */
2835 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
2836 {
2837 if (clp->cl_cb_state == NFSD4_CB_UP)
2838 return true;
2839 /*
2840 * In the sessions case, since we don't have to establish a
2841 * separate connection for callbacks, we assume it's OK
2842 * until we hear otherwise:
2843 */
2844 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
2845 }
2846
2847 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag)
2848 {
2849 struct file_lock *fl;
2850
2851 fl = locks_alloc_lock();
2852 if (!fl)
2853 return NULL;
2854 locks_init_lock(fl);
2855 fl->fl_lmops = &nfsd_lease_mng_ops;
2856 fl->fl_flags = FL_LEASE;
2857 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
2858 fl->fl_end = OFFSET_MAX;
2859 fl->fl_owner = (fl_owner_t)(dp->dl_file);
2860 fl->fl_pid = current->tgid;
2861 return fl;
2862 }
2863
2864 static int nfs4_setlease(struct nfs4_delegation *dp, int flag)
2865 {
2866 struct nfs4_file *fp = dp->dl_file;
2867 struct file_lock *fl;
2868 int status;
2869
2870 fl = nfs4_alloc_init_lease(dp, flag);
2871 if (!fl)
2872 return -ENOMEM;
2873 fl->fl_file = find_readable_file(fp);
2874 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
2875 status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
2876 if (status) {
2877 list_del_init(&dp->dl_perclnt);
2878 locks_free_lock(fl);
2879 return -ENOMEM;
2880 }
2881 fp->fi_lease = fl;
2882 fp->fi_deleg_file = get_file(fl->fl_file);
2883 atomic_set(&fp->fi_delegees, 1);
2884 list_add(&dp->dl_perfile, &fp->fi_delegations);
2885 return 0;
2886 }
2887
2888 static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag)
2889 {
2890 struct nfs4_file *fp = dp->dl_file;
2891
2892 if (!fp->fi_lease)
2893 return nfs4_setlease(dp, flag);
2894 spin_lock(&recall_lock);
2895 if (fp->fi_had_conflict) {
2896 spin_unlock(&recall_lock);
2897 return -EAGAIN;
2898 }
2899 atomic_inc(&fp->fi_delegees);
2900 list_add(&dp->dl_perfile, &fp->fi_delegations);
2901 spin_unlock(&recall_lock);
2902 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
2903 return 0;
2904 }
2905
2906 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
2907 {
2908 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
2909 if (status == -EAGAIN)
2910 open->op_why_no_deleg = WND4_CONTENTION;
2911 else {
2912 open->op_why_no_deleg = WND4_RESOURCE;
2913 switch (open->op_deleg_want) {
2914 case NFS4_SHARE_WANT_READ_DELEG:
2915 case NFS4_SHARE_WANT_WRITE_DELEG:
2916 case NFS4_SHARE_WANT_ANY_DELEG:
2917 break;
2918 case NFS4_SHARE_WANT_CANCEL:
2919 open->op_why_no_deleg = WND4_CANCELLED;
2920 break;
2921 case NFS4_SHARE_WANT_NO_DELEG:
2922 WARN_ON_ONCE(1);
2923 }
2924 }
2925 }
2926
2927 /*
2928 * Attempt to hand out a delegation.
2929 */
2930 static void
2931 nfs4_open_delegation(struct net *net, struct svc_fh *fh,
2932 struct nfsd4_open *open, struct nfs4_ol_stateid *stp)
2933 {
2934 struct nfs4_delegation *dp;
2935 struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner);
2936 int cb_up;
2937 int status = 0, flag = 0;
2938
2939 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
2940 flag = NFS4_OPEN_DELEGATE_NONE;
2941 open->op_recall = 0;
2942 switch (open->op_claim_type) {
2943 case NFS4_OPEN_CLAIM_PREVIOUS:
2944 if (!cb_up)
2945 open->op_recall = 1;
2946 flag = open->op_delegate_type;
2947 if (flag == NFS4_OPEN_DELEGATE_NONE)
2948 goto out;
2949 break;
2950 case NFS4_OPEN_CLAIM_NULL:
2951 /* Let's not give out any delegations till everyone's
2952 * had the chance to reclaim theirs.... */
2953 if (locks_in_grace(net))
2954 goto out;
2955 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
2956 goto out;
2957 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
2958 flag = NFS4_OPEN_DELEGATE_WRITE;
2959 else
2960 flag = NFS4_OPEN_DELEGATE_READ;
2961 break;
2962 default:
2963 goto out;
2964 }
2965
2966 dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh, flag);
2967 if (dp == NULL)
2968 goto out_no_deleg;
2969 status = nfs4_set_delegation(dp, flag);
2970 if (status)
2971 goto out_free;
2972
2973 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
2974
2975 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
2976 STATEID_VAL(&dp->dl_stid.sc_stateid));
2977 out:
2978 open->op_delegate_type = flag;
2979 if (flag == NFS4_OPEN_DELEGATE_NONE) {
2980 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
2981 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE)
2982 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
2983
2984 /* 4.1 client asking for a delegation? */
2985 if (open->op_deleg_want)
2986 nfsd4_open_deleg_none_ext(open, status);
2987 }
2988 return;
2989 out_free:
2990 nfs4_put_delegation(dp);
2991 out_no_deleg:
2992 flag = NFS4_OPEN_DELEGATE_NONE;
2993 goto out;
2994 }
2995
2996 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
2997 struct nfs4_delegation *dp)
2998 {
2999 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
3000 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
3001 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3002 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
3003 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
3004 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
3005 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3006 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
3007 }
3008 /* Otherwise the client must be confused wanting a delegation
3009 * it already has, therefore we don't return
3010 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
3011 */
3012 }
3013
3014 /*
3015 * called with nfs4_lock_state() held.
3016 */
3017 __be32
3018 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
3019 {
3020 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3021 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
3022 struct nfs4_file *fp = NULL;
3023 struct inode *ino = current_fh->fh_dentry->d_inode;
3024 struct nfs4_ol_stateid *stp = NULL;
3025 struct nfs4_delegation *dp = NULL;
3026 __be32 status;
3027
3028 /*
3029 * Lookup file; if found, lookup stateid and check open request,
3030 * and check for delegations in the process of being recalled.
3031 * If not found, create the nfs4_file struct
3032 */
3033 fp = find_file(ino);
3034 if (fp) {
3035 if ((status = nfs4_check_open(fp, open, &stp)))
3036 goto out;
3037 status = nfs4_check_deleg(cl, fp, open, &dp);
3038 if (status)
3039 goto out;
3040 } else {
3041 status = nfserr_bad_stateid;
3042 if (nfsd4_is_deleg_cur(open))
3043 goto out;
3044 status = nfserr_jukebox;
3045 fp = open->op_file;
3046 open->op_file = NULL;
3047 nfsd4_init_file(fp, ino);
3048 }
3049
3050 /*
3051 * OPEN the file, or upgrade an existing OPEN.
3052 * If truncate fails, the OPEN fails.
3053 */
3054 if (stp) {
3055 /* Stateid was found, this is an OPEN upgrade */
3056 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
3057 if (status)
3058 goto out;
3059 } else {
3060 status = nfs4_get_vfs_file(rqstp, fp, current_fh, open);
3061 if (status)
3062 goto out;
3063 status = nfsd4_truncate(rqstp, current_fh, open);
3064 if (status)
3065 goto out;
3066 stp = open->op_stp;
3067 open->op_stp = NULL;
3068 init_open_stateid(stp, fp, open);
3069 }
3070 update_stateid(&stp->st_stid.sc_stateid);
3071 memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3072
3073 if (nfsd4_has_session(&resp->cstate)) {
3074 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3075
3076 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
3077 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3078 open->op_why_no_deleg = WND4_NOT_WANTED;
3079 goto nodeleg;
3080 }
3081 }
3082
3083 /*
3084 * Attempt to hand out a delegation. No error return, because the
3085 * OPEN succeeds even if we fail.
3086 */
3087 nfs4_open_delegation(SVC_NET(rqstp), current_fh, open, stp);
3088 nodeleg:
3089 status = nfs_ok;
3090
3091 dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
3092 STATEID_VAL(&stp->st_stid.sc_stateid));
3093 out:
3094 /* 4.1 client trying to upgrade/downgrade delegation? */
3095 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
3096 open->op_deleg_want)
3097 nfsd4_deleg_xgrade_none_ext(open, dp);
3098
3099 if (fp)
3100 put_nfs4_file(fp);
3101 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
3102 nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate));
3103 /*
3104 * To finish the open response, we just need to set the rflags.
3105 */
3106 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
3107 if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
3108 !nfsd4_has_session(&resp->cstate))
3109 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
3110
3111 return status;
3112 }
3113
3114 void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status)
3115 {
3116 if (open->op_openowner) {
3117 struct nfs4_openowner *oo = open->op_openowner;
3118
3119 if (!list_empty(&oo->oo_owner.so_stateids))
3120 list_del_init(&oo->oo_close_lru);
3121 if (oo->oo_flags & NFS4_OO_NEW) {
3122 if (status) {
3123 release_openowner(oo);
3124 open->op_openowner = NULL;
3125 } else
3126 oo->oo_flags &= ~NFS4_OO_NEW;
3127 }
3128 }
3129 if (open->op_file)
3130 nfsd4_free_file(open->op_file);
3131 if (open->op_stp)
3132 free_generic_stateid(open->op_stp);
3133 }
3134
3135 static __be32 lookup_clientid(clientid_t *clid, bool session, struct nfsd_net *nn, struct nfs4_client **clp)
3136 {
3137 struct nfs4_client *found;
3138
3139 if (STALE_CLIENTID(clid, nn))
3140 return nfserr_stale_clientid;
3141 found = find_confirmed_client(clid, session, nn);
3142 if (clp)
3143 *clp = found;
3144 return found ? nfs_ok : nfserr_expired;
3145 }
3146
3147 __be32
3148 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3149 clientid_t *clid)
3150 {
3151 struct nfs4_client *clp;
3152 __be32 status;
3153 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3154
3155 nfs4_lock_state();
3156 dprintk("process_renew(%08x/%08x): starting\n",
3157 clid->cl_boot, clid->cl_id);
3158 status = lookup_clientid(clid, cstate->minorversion, nn, &clp);
3159 if (status)
3160 goto out;
3161 status = nfserr_cb_path_down;
3162 if (!list_empty(&clp->cl_delegations)
3163 && clp->cl_cb_state != NFSD4_CB_UP)
3164 goto out;
3165 status = nfs_ok;
3166 out:
3167 nfs4_unlock_state();
3168 return status;
3169 }
3170
3171 static void
3172 nfsd4_end_grace(struct nfsd_net *nn)
3173 {
3174 /* do nothing if grace period already ended */
3175 if (nn->grace_ended)
3176 return;
3177
3178 dprintk("NFSD: end of grace period\n");
3179 nn->grace_ended = true;
3180 nfsd4_record_grace_done(nn, nn->boot_time);
3181 locks_end_grace(&nn->nfsd4_manager);
3182 /*
3183 * Now that every NFSv4 client has had the chance to recover and
3184 * to see the (possibly new, possibly shorter) lease time, we
3185 * can safely set the next grace time to the current lease time:
3186 */
3187 nn->nfsd4_grace = nn->nfsd4_lease;
3188 }
3189
3190 static time_t
3191 nfs4_laundromat(struct nfsd_net *nn)
3192 {
3193 struct nfs4_client *clp;
3194 struct nfs4_openowner *oo;
3195 struct nfs4_delegation *dp;
3196 struct list_head *pos, *next, reaplist;
3197 time_t cutoff = get_seconds() - nn->nfsd4_lease;
3198 time_t t, clientid_val = nn->nfsd4_lease;
3199 time_t u, test_val = nn->nfsd4_lease;
3200
3201 nfs4_lock_state();
3202
3203 dprintk("NFSD: laundromat service - starting\n");
3204 nfsd4_end_grace(nn);
3205 INIT_LIST_HEAD(&reaplist);
3206 spin_lock(&nn->client_lock);
3207 list_for_each_safe(pos, next, &nn->client_lru) {
3208 clp = list_entry(pos, struct nfs4_client, cl_lru);
3209 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
3210 t = clp->cl_time - cutoff;
3211 if (clientid_val > t)
3212 clientid_val = t;
3213 break;
3214 }
3215 if (atomic_read(&clp->cl_refcount)) {
3216 dprintk("NFSD: client in use (clientid %08x)\n",
3217 clp->cl_clientid.cl_id);
3218 continue;
3219 }
3220 unhash_client_locked(clp);
3221 list_add(&clp->cl_lru, &reaplist);
3222 }
3223 spin_unlock(&nn->client_lock);
3224 list_for_each_safe(pos, next, &reaplist) {
3225 clp = list_entry(pos, struct nfs4_client, cl_lru);
3226 dprintk("NFSD: purging unused client (clientid %08x)\n",
3227 clp->cl_clientid.cl_id);
3228 expire_client(clp);
3229 }
3230 spin_lock(&recall_lock);
3231 list_for_each_safe(pos, next, &del_recall_lru) {
3232 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3233 if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn)
3234 continue;
3235 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
3236 u = dp->dl_time - cutoff;
3237 if (test_val > u)
3238 test_val = u;
3239 break;
3240 }
3241 list_move(&dp->dl_recall_lru, &reaplist);
3242 }
3243 spin_unlock(&recall_lock);
3244 list_for_each_safe(pos, next, &reaplist) {
3245 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3246 unhash_delegation(dp);
3247 }
3248 test_val = nn->nfsd4_lease;
3249 list_for_each_safe(pos, next, &nn->close_lru) {
3250 oo = container_of(pos, struct nfs4_openowner, oo_close_lru);
3251 if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) {
3252 u = oo->oo_time - cutoff;
3253 if (test_val > u)
3254 test_val = u;
3255 break;
3256 }
3257 release_openowner(oo);
3258 }
3259 if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT)
3260 clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT;
3261 nfs4_unlock_state();
3262 return clientid_val;
3263 }
3264
3265 static struct workqueue_struct *laundry_wq;
3266 static void laundromat_main(struct work_struct *);
3267
3268 static void
3269 laundromat_main(struct work_struct *laundry)
3270 {
3271 time_t t;
3272 struct delayed_work *dwork = container_of(laundry, struct delayed_work,
3273 work);
3274 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
3275 laundromat_work);
3276
3277 t = nfs4_laundromat(nn);
3278 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
3279 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
3280 }
3281
3282 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
3283 {
3284 if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode)
3285 return nfserr_bad_stateid;
3286 return nfs_ok;
3287 }
3288
3289 static int
3290 STALE_STATEID(stateid_t *stateid, struct nfsd_net *nn)
3291 {
3292 if (stateid->si_opaque.so_clid.cl_boot == nn->boot_time)
3293 return 0;
3294 dprintk("NFSD: stale stateid " STATEID_FMT "!\n",
3295 STATEID_VAL(stateid));
3296 return 1;
3297 }
3298
3299 static inline int
3300 access_permit_read(struct nfs4_ol_stateid *stp)
3301 {
3302 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
3303 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
3304 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
3305 }
3306
3307 static inline int
3308 access_permit_write(struct nfs4_ol_stateid *stp)
3309 {
3310 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
3311 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
3312 }
3313
3314 static
3315 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
3316 {
3317 __be32 status = nfserr_openmode;
3318
3319 /* For lock stateid's, we test the parent open, not the lock: */
3320 if (stp->st_openstp)
3321 stp = stp->st_openstp;
3322 if ((flags & WR_STATE) && !access_permit_write(stp))
3323 goto out;
3324 if ((flags & RD_STATE) && !access_permit_read(stp))
3325 goto out;
3326 status = nfs_ok;
3327 out:
3328 return status;
3329 }
3330
3331 static inline __be32
3332 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
3333 {
3334 if (ONE_STATEID(stateid) && (flags & RD_STATE))
3335 return nfs_ok;
3336 else if (locks_in_grace(net)) {
3337 /* Answer in remaining cases depends on existence of
3338 * conflicting state; so we must wait out the grace period. */
3339 return nfserr_grace;
3340 } else if (flags & WR_STATE)
3341 return nfs4_share_conflict(current_fh,
3342 NFS4_SHARE_DENY_WRITE);
3343 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
3344 return nfs4_share_conflict(current_fh,
3345 NFS4_SHARE_DENY_READ);
3346 }
3347
3348 /*
3349 * Allow READ/WRITE during grace period on recovered state only for files
3350 * that are not able to provide mandatory locking.
3351 */
3352 static inline int
3353 grace_disallows_io(struct net *net, struct inode *inode)
3354 {
3355 return locks_in_grace(net) && mandatory_lock(inode);
3356 }
3357
3358 /* Returns true iff a is later than b: */
3359 static bool stateid_generation_after(stateid_t *a, stateid_t *b)
3360 {
3361 return (s32)a->si_generation - (s32)b->si_generation > 0;
3362 }
3363
3364 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
3365 {
3366 /*
3367 * When sessions are used the stateid generation number is ignored
3368 * when it is zero.
3369 */
3370 if (has_session && in->si_generation == 0)
3371 return nfs_ok;
3372
3373 if (in->si_generation == ref->si_generation)
3374 return nfs_ok;
3375
3376 /* If the client sends us a stateid from the future, it's buggy: */
3377 if (stateid_generation_after(in, ref))
3378 return nfserr_bad_stateid;
3379 /*
3380 * However, we could see a stateid from the past, even from a
3381 * non-buggy client. For example, if the client sends a lock
3382 * while some IO is outstanding, the lock may bump si_generation
3383 * while the IO is still in flight. The client could avoid that
3384 * situation by waiting for responses on all the IO requests,
3385 * but better performance may result in retrying IO that
3386 * receives an old_stateid error if requests are rarely
3387 * reordered in flight:
3388 */
3389 return nfserr_old_stateid;
3390 }
3391
3392 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
3393 {
3394 struct nfs4_stid *s;
3395 struct nfs4_ol_stateid *ols;
3396 __be32 status;
3397
3398 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3399 return nfserr_bad_stateid;
3400 /* Client debugging aid. */
3401 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
3402 char addr_str[INET6_ADDRSTRLEN];
3403 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
3404 sizeof(addr_str));
3405 pr_warn_ratelimited("NFSD: client %s testing state ID "
3406 "with incorrect client ID\n", addr_str);
3407 return nfserr_bad_stateid;
3408 }
3409 s = find_stateid(cl, stateid);
3410 if (!s)
3411 return nfserr_bad_stateid;
3412 status = check_stateid_generation(stateid, &s->sc_stateid, 1);
3413 if (status)
3414 return status;
3415 if (!(s->sc_type & (NFS4_OPEN_STID | NFS4_LOCK_STID)))
3416 return nfs_ok;
3417 ols = openlockstateid(s);
3418 if (ols->st_stateowner->so_is_open_owner
3419 && !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
3420 return nfserr_bad_stateid;
3421 return nfs_ok;
3422 }
3423
3424 static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask,
3425 struct nfs4_stid **s, bool sessions,
3426 struct nfsd_net *nn)
3427 {
3428 struct nfs4_client *cl;
3429
3430 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3431 return nfserr_bad_stateid;
3432 if (STALE_STATEID(stateid, nn))
3433 return nfserr_stale_stateid;
3434 cl = find_confirmed_client(&stateid->si_opaque.so_clid, sessions, nn);
3435 if (!cl)
3436 return nfserr_expired;
3437 *s = find_stateid_by_type(cl, stateid, typemask);
3438 if (!*s)
3439 return nfserr_bad_stateid;
3440 return nfs_ok;
3441
3442 }
3443
3444 /*
3445 * Checks for stateid operations
3446 */
3447 __be32
3448 nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
3449 stateid_t *stateid, int flags, struct file **filpp)
3450 {
3451 struct nfs4_stid *s;
3452 struct nfs4_ol_stateid *stp = NULL;
3453 struct nfs4_delegation *dp = NULL;
3454 struct svc_fh *current_fh = &cstate->current_fh;
3455 struct inode *ino = current_fh->fh_dentry->d_inode;
3456 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3457 __be32 status;
3458
3459 if (filpp)
3460 *filpp = NULL;
3461
3462 if (grace_disallows_io(net, ino))
3463 return nfserr_grace;
3464
3465 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3466 return check_special_stateids(net, current_fh, stateid, flags);
3467
3468 status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
3469 &s, cstate->minorversion, nn);
3470 if (status)
3471 return status;
3472 status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
3473 if (status)
3474 goto out;
3475 switch (s->sc_type) {
3476 case NFS4_DELEG_STID:
3477 dp = delegstateid(s);
3478 status = nfs4_check_delegmode(dp, flags);
3479 if (status)
3480 goto out;
3481 if (filpp) {
3482 *filpp = dp->dl_file->fi_deleg_file;
3483 if (!*filpp) {
3484 WARN_ON_ONCE(1);
3485 status = nfserr_serverfault;
3486 goto out;
3487 }
3488 }
3489 break;
3490 case NFS4_OPEN_STID:
3491 case NFS4_LOCK_STID:
3492 stp = openlockstateid(s);
3493 status = nfs4_check_fh(current_fh, stp);
3494 if (status)
3495 goto out;
3496 if (stp->st_stateowner->so_is_open_owner
3497 && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
3498 goto out;
3499 status = nfs4_check_openmode(stp, flags);
3500 if (status)
3501 goto out;
3502 if (filpp) {
3503 if (flags & RD_STATE)
3504 *filpp = find_readable_file(stp->st_file);
3505 else
3506 *filpp = find_writeable_file(stp->st_file);
3507 }
3508 break;
3509 default:
3510 return nfserr_bad_stateid;
3511 }
3512 status = nfs_ok;
3513 out:
3514 return status;
3515 }
3516
3517 static __be32
3518 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
3519 {
3520 if (check_for_locks(stp->st_file, lockowner(stp->st_stateowner)))
3521 return nfserr_locks_held;
3522 release_lock_stateid(stp);
3523 return nfs_ok;
3524 }
3525
3526 /*
3527 * Test if the stateid is valid
3528 */
3529 __be32
3530 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3531 struct nfsd4_test_stateid *test_stateid)
3532 {
3533 struct nfsd4_test_stateid_id *stateid;
3534 struct nfs4_client *cl = cstate->session->se_client;
3535
3536 nfs4_lock_state();
3537 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
3538 stateid->ts_id_status =
3539 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
3540 nfs4_unlock_state();
3541
3542 return nfs_ok;
3543 }
3544
3545 __be32
3546 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3547 struct nfsd4_free_stateid *free_stateid)
3548 {
3549 stateid_t *stateid = &free_stateid->fr_stateid;
3550 struct nfs4_stid *s;
3551 struct nfs4_client *cl = cstate->session->se_client;
3552 __be32 ret = nfserr_bad_stateid;
3553
3554 nfs4_lock_state();
3555 s = find_stateid(cl, stateid);
3556 if (!s)
3557 goto out;
3558 switch (s->sc_type) {
3559 case NFS4_DELEG_STID:
3560 ret = nfserr_locks_held;
3561 goto out;
3562 case NFS4_OPEN_STID:
3563 case NFS4_LOCK_STID:
3564 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
3565 if (ret)
3566 goto out;
3567 if (s->sc_type == NFS4_LOCK_STID)
3568 ret = nfsd4_free_lock_stateid(openlockstateid(s));
3569 else
3570 ret = nfserr_locks_held;
3571 break;
3572 default:
3573 ret = nfserr_bad_stateid;
3574 }
3575 out:
3576 nfs4_unlock_state();
3577 return ret;
3578 }
3579
3580 static inline int
3581 setlkflg (int type)
3582 {
3583 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
3584 RD_STATE : WR_STATE;
3585 }
3586
3587 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
3588 {
3589 struct svc_fh *current_fh = &cstate->current_fh;
3590 struct nfs4_stateowner *sop = stp->st_stateowner;
3591 __be32 status;
3592
3593 status = nfsd4_check_seqid(cstate, sop, seqid);
3594 if (status)
3595 return status;
3596 if (stp->st_stid.sc_type == NFS4_CLOSED_STID)
3597 /*
3598 * "Closed" stateid's exist *only* to return
3599 * nfserr_replay_me from the previous step.
3600 */
3601 return nfserr_bad_stateid;
3602 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
3603 if (status)
3604 return status;
3605 return nfs4_check_fh(current_fh, stp);
3606 }
3607
3608 /*
3609 * Checks for sequence id mutating operations.
3610 */
3611 static __be32
3612 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
3613 stateid_t *stateid, char typemask,
3614 struct nfs4_ol_stateid **stpp,
3615 struct nfsd_net *nn)
3616 {
3617 __be32 status;
3618 struct nfs4_stid *s;
3619
3620 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
3621 seqid, STATEID_VAL(stateid));
3622
3623 *stpp = NULL;
3624 status = nfsd4_lookup_stateid(stateid, typemask, &s,
3625 cstate->minorversion, nn);
3626 if (status)
3627 return status;
3628 *stpp = openlockstateid(s);
3629 cstate->replay_owner = (*stpp)->st_stateowner;
3630
3631 return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp);
3632 }
3633
3634 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
3635 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
3636 {
3637 __be32 status;
3638 struct nfs4_openowner *oo;
3639
3640 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
3641 NFS4_OPEN_STID, stpp, nn);
3642 if (status)
3643 return status;
3644 oo = openowner((*stpp)->st_stateowner);
3645 if (!(oo->oo_flags & NFS4_OO_CONFIRMED))
3646 return nfserr_bad_stateid;
3647 return nfs_ok;
3648 }
3649
3650 __be32
3651 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3652 struct nfsd4_open_confirm *oc)
3653 {
3654 __be32 status;
3655 struct nfs4_openowner *oo;
3656 struct nfs4_ol_stateid *stp;
3657 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3658
3659 dprintk("NFSD: nfsd4_open_confirm on file %.*s\n",
3660 (int)cstate->current_fh.fh_dentry->d_name.len,
3661 cstate->current_fh.fh_dentry->d_name.name);
3662
3663 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
3664 if (status)
3665 return status;
3666
3667 nfs4_lock_state();
3668
3669 status = nfs4_preprocess_seqid_op(cstate,
3670 oc->oc_seqid, &oc->oc_req_stateid,
3671 NFS4_OPEN_STID, &stp, nn);
3672 if (status)
3673 goto out;
3674 oo = openowner(stp->st_stateowner);
3675 status = nfserr_bad_stateid;
3676 if (oo->oo_flags & NFS4_OO_CONFIRMED)
3677 goto out;
3678 oo->oo_flags |= NFS4_OO_CONFIRMED;
3679 update_stateid(&stp->st_stid.sc_stateid);
3680 memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3681 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
3682 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
3683
3684 nfsd4_client_record_create(oo->oo_owner.so_client);
3685 status = nfs_ok;
3686 out:
3687 if (!cstate->replay_owner)
3688 nfs4_unlock_state();
3689 return status;
3690 }
3691
3692 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
3693 {
3694 if (!test_access(access, stp))
3695 return;
3696 nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access));
3697 clear_access(access, stp);
3698 }
3699
3700 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
3701 {
3702 switch (to_access) {
3703 case NFS4_SHARE_ACCESS_READ:
3704 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
3705 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
3706 break;
3707 case NFS4_SHARE_ACCESS_WRITE:
3708 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
3709 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
3710 break;
3711 case NFS4_SHARE_ACCESS_BOTH:
3712 break;
3713 default:
3714 WARN_ON_ONCE(1);
3715 }
3716 }
3717
3718 static void
3719 reset_union_bmap_deny(unsigned long deny, struct nfs4_ol_stateid *stp)
3720 {
3721 int i;
3722 for (i = 0; i < 4; i++) {
3723 if ((i & deny) != i)
3724 clear_deny(i, stp);
3725 }
3726 }
3727
3728 __be32
3729 nfsd4_open_downgrade(struct svc_rqst *rqstp,
3730 struct nfsd4_compound_state *cstate,
3731 struct nfsd4_open_downgrade *od)
3732 {
3733 __be32 status;
3734 struct nfs4_ol_stateid *stp;
3735 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3736
3737 dprintk("NFSD: nfsd4_open_downgrade on file %.*s\n",
3738 (int)cstate->current_fh.fh_dentry->d_name.len,
3739 cstate->current_fh.fh_dentry->d_name.name);
3740
3741 /* We don't yet support WANT bits: */
3742 if (od->od_deleg_want)
3743 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
3744 od->od_deleg_want);
3745
3746 nfs4_lock_state();
3747 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
3748 &od->od_stateid, &stp, nn);
3749 if (status)
3750 goto out;
3751 status = nfserr_inval;
3752 if (!test_access(od->od_share_access, stp)) {
3753 dprintk("NFSD: access not a subset current bitmap: 0x%lx, input access=%08x\n",
3754 stp->st_access_bmap, od->od_share_access);
3755 goto out;
3756 }
3757 if (!test_deny(od->od_share_deny, stp)) {
3758 dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n",
3759 stp->st_deny_bmap, od->od_share_deny);
3760 goto out;
3761 }
3762 nfs4_stateid_downgrade(stp, od->od_share_access);
3763
3764 reset_union_bmap_deny(od->od_share_deny, stp);
3765
3766 update_stateid(&stp->st_stid.sc_stateid);
3767 memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3768 status = nfs_ok;
3769 out:
3770 if (!cstate->replay_owner)
3771 nfs4_unlock_state();
3772 return status;
3773 }
3774
3775 void nfsd4_purge_closed_stateid(struct nfs4_stateowner *so)
3776 {
3777 struct nfs4_openowner *oo;
3778 struct nfs4_ol_stateid *s;
3779
3780 if (!so->so_is_open_owner)
3781 return;
3782 oo = openowner(so);
3783 s = oo->oo_last_closed_stid;
3784 if (!s)
3785 return;
3786 if (!(oo->oo_flags & NFS4_OO_PURGE_CLOSE)) {
3787 /* Release the last_closed_stid on the next seqid bump: */
3788 oo->oo_flags |= NFS4_OO_PURGE_CLOSE;
3789 return;
3790 }
3791 oo->oo_flags &= ~NFS4_OO_PURGE_CLOSE;
3792 release_last_closed_stateid(oo);
3793 }
3794
3795 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
3796 {
3797 unhash_open_stateid(s);
3798 s->st_stid.sc_type = NFS4_CLOSED_STID;
3799 }
3800
3801 /*
3802 * nfs4_unlock_state() called after encode
3803 */
3804 __be32
3805 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3806 struct nfsd4_close *close)
3807 {
3808 __be32 status;
3809 struct nfs4_openowner *oo;
3810 struct nfs4_ol_stateid *stp;
3811 struct net *net = SVC_NET(rqstp);
3812 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3813
3814 dprintk("NFSD: nfsd4_close on file %.*s\n",
3815 (int)cstate->current_fh.fh_dentry->d_name.len,
3816 cstate->current_fh.fh_dentry->d_name.name);
3817
3818 nfs4_lock_state();
3819 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
3820 &close->cl_stateid,
3821 NFS4_OPEN_STID|NFS4_CLOSED_STID,
3822 &stp, nn);
3823 if (status)
3824 goto out;
3825 oo = openowner(stp->st_stateowner);
3826 status = nfs_ok;
3827 update_stateid(&stp->st_stid.sc_stateid);
3828 memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3829
3830 nfsd4_close_open_stateid(stp);
3831 release_last_closed_stateid(oo);
3832 oo->oo_last_closed_stid = stp;
3833
3834 if (list_empty(&oo->oo_owner.so_stateids)) {
3835 if (cstate->minorversion) {
3836 release_openowner(oo);
3837 cstate->replay_owner = NULL;
3838 } else {
3839 /*
3840 * In the 4.0 case we need to keep the owners around a
3841 * little while to handle CLOSE replay.
3842 */
3843 if (list_empty(&oo->oo_owner.so_stateids))
3844 move_to_close_lru(oo, SVC_NET(rqstp));
3845 }
3846 }
3847 out:
3848 if (!cstate->replay_owner)
3849 nfs4_unlock_state();
3850 return status;
3851 }
3852
3853 __be32
3854 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3855 struct nfsd4_delegreturn *dr)
3856 {
3857 struct nfs4_delegation *dp;
3858 stateid_t *stateid = &dr->dr_stateid;
3859 struct nfs4_stid *s;
3860 __be32 status;
3861 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3862
3863 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
3864 return status;
3865
3866 nfs4_lock_state();
3867 status = nfsd4_lookup_stateid(stateid, NFS4_DELEG_STID, &s,
3868 cstate->minorversion, nn);
3869 if (status)
3870 goto out;
3871 dp = delegstateid(s);
3872 status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
3873 if (status)
3874 goto out;
3875
3876 unhash_delegation(dp);
3877 out:
3878 nfs4_unlock_state();
3879
3880 return status;
3881 }
3882
3883
3884 #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start))
3885
3886 #define LOCKOWNER_INO_HASH_MASK (LOCKOWNER_INO_HASH_SIZE - 1)
3887
3888 static inline u64
3889 end_offset(u64 start, u64 len)
3890 {
3891 u64 end;
3892
3893 end = start + len;
3894 return end >= start ? end: NFS4_MAX_UINT64;
3895 }
3896
3897 /* last octet in a range */
3898 static inline u64
3899 last_byte_offset(u64 start, u64 len)
3900 {
3901 u64 end;
3902
3903 WARN_ON_ONCE(!len);
3904 end = start + len;
3905 return end > start ? end - 1: NFS4_MAX_UINT64;
3906 }
3907
3908 static unsigned int lockowner_ino_hashval(struct inode *inode, u32 cl_id, struct xdr_netobj *ownername)
3909 {
3910 return (file_hashval(inode) + cl_id
3911 + opaque_hashval(ownername->data, ownername->len))
3912 & LOCKOWNER_INO_HASH_MASK;
3913 }
3914
3915 /*
3916 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
3917 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
3918 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
3919 * locking, this prevents us from being completely protocol-compliant. The
3920 * real solution to this problem is to start using unsigned file offsets in
3921 * the VFS, but this is a very deep change!
3922 */
3923 static inline void
3924 nfs4_transform_lock_offset(struct file_lock *lock)
3925 {
3926 if (lock->fl_start < 0)
3927 lock->fl_start = OFFSET_MAX;
3928 if (lock->fl_end < 0)
3929 lock->fl_end = OFFSET_MAX;
3930 }
3931
3932 /* Hack!: For now, we're defining this just so we can use a pointer to it
3933 * as a unique cookie to identify our (NFSv4's) posix locks. */
3934 static const struct lock_manager_operations nfsd_posix_mng_ops = {
3935 };
3936
3937 static inline void
3938 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
3939 {
3940 struct nfs4_lockowner *lo;
3941
3942 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
3943 lo = (struct nfs4_lockowner *) fl->fl_owner;
3944 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
3945 lo->lo_owner.so_owner.len, GFP_KERNEL);
3946 if (!deny->ld_owner.data)
3947 /* We just don't care that much */
3948 goto nevermind;
3949 deny->ld_owner.len = lo->lo_owner.so_owner.len;
3950 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
3951 } else {
3952 nevermind:
3953 deny->ld_owner.len = 0;
3954 deny->ld_owner.data = NULL;
3955 deny->ld_clientid.cl_boot = 0;
3956 deny->ld_clientid.cl_id = 0;
3957 }
3958 deny->ld_start = fl->fl_start;
3959 deny->ld_length = NFS4_MAX_UINT64;
3960 if (fl->fl_end != NFS4_MAX_UINT64)
3961 deny->ld_length = fl->fl_end - fl->fl_start + 1;
3962 deny->ld_type = NFS4_READ_LT;
3963 if (fl->fl_type != F_RDLCK)
3964 deny->ld_type = NFS4_WRITE_LT;
3965 }
3966
3967 static bool same_lockowner_ino(struct nfs4_lockowner *lo, struct inode *inode, clientid_t *clid, struct xdr_netobj *owner)
3968 {
3969 struct nfs4_ol_stateid *lst;
3970
3971 if (!same_owner_str(&lo->lo_owner, owner, clid))
3972 return false;
3973 lst = list_first_entry(&lo->lo_owner.so_stateids,
3974 struct nfs4_ol_stateid, st_perstateowner);
3975 return lst->st_file->fi_inode == inode;
3976 }
3977
3978 static struct nfs4_lockowner *
3979 find_lockowner_str(struct inode *inode, clientid_t *clid,
3980 struct xdr_netobj *owner, struct nfsd_net *nn)
3981 {
3982 unsigned int hashval = lockowner_ino_hashval(inode, clid->cl_id, owner);
3983 struct nfs4_lockowner *lo;
3984
3985 list_for_each_entry(lo, &nn->lockowner_ino_hashtbl[hashval], lo_owner_ino_hash) {
3986 if (same_lockowner_ino(lo, inode, clid, owner))
3987 return lo;
3988 }
3989 return NULL;
3990 }
3991
3992 static void hash_lockowner(struct nfs4_lockowner *lo, unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp)
3993 {
3994 struct inode *inode = open_stp->st_file->fi_inode;
3995 unsigned int inohash = lockowner_ino_hashval(inode,
3996 clp->cl_clientid.cl_id, &lo->lo_owner.so_owner);
3997 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
3998
3999 list_add(&lo->lo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]);
4000 list_add(&lo->lo_owner_ino_hash, &nn->lockowner_ino_hashtbl[inohash]);
4001 list_add(&lo->lo_perstateid, &open_stp->st_lockowners);
4002 }
4003
4004 /*
4005 * Alloc a lock owner structure.
4006 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
4007 * occurred.
4008 *
4009 * strhashval = ownerstr_hashval
4010 */
4011
4012 static struct nfs4_lockowner *
4013 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) {
4014 struct nfs4_lockowner *lo;
4015
4016 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
4017 if (!lo)
4018 return NULL;
4019 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
4020 lo->lo_owner.so_is_open_owner = 0;
4021 /* It is the openowner seqid that will be incremented in encode in the
4022 * case of new lockowners; so increment the lock seqid manually: */
4023 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1;
4024 hash_lockowner(lo, strhashval, clp, open_stp);
4025 return lo;
4026 }
4027
4028 static struct nfs4_ol_stateid *
4029 alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp)
4030 {
4031 struct nfs4_ol_stateid *stp;
4032 struct nfs4_client *clp = lo->lo_owner.so_client;
4033
4034 stp = nfs4_alloc_stateid(clp);
4035 if (stp == NULL)
4036 return NULL;
4037 init_stid(&stp->st_stid, clp, NFS4_LOCK_STID);
4038 list_add(&stp->st_perfile, &fp->fi_stateids);
4039 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
4040 stp->st_stateowner = &lo->lo_owner;
4041 get_nfs4_file(fp);
4042 stp->st_file = fp;
4043 stp->st_access_bmap = 0;
4044 stp->st_deny_bmap = open_stp->st_deny_bmap;
4045 stp->st_openstp = open_stp;
4046 return stp;
4047 }
4048
4049 static int
4050 check_lock_length(u64 offset, u64 length)
4051 {
4052 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
4053 LOFF_OVERFLOW(offset, length)));
4054 }
4055
4056 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
4057 {
4058 struct nfs4_file *fp = lock_stp->st_file;
4059 int oflag = nfs4_access_to_omode(access);
4060
4061 if (test_access(access, lock_stp))
4062 return;
4063 nfs4_file_get_access(fp, oflag);
4064 set_access(access, lock_stp);
4065 }
4066
4067 static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new)
4068 {
4069 struct nfs4_file *fi = ost->st_file;
4070 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
4071 struct nfs4_client *cl = oo->oo_owner.so_client;
4072 struct nfs4_lockowner *lo;
4073 unsigned int strhashval;
4074 struct nfsd_net *nn = net_generic(cl->net, nfsd_net_id);
4075
4076 lo = find_lockowner_str(fi->fi_inode, &cl->cl_clientid,
4077 &lock->v.new.owner, nn);
4078 if (lo) {
4079 if (!cstate->minorversion)
4080 return nfserr_bad_seqid;
4081 /* XXX: a lockowner always has exactly one stateid: */
4082 *lst = list_first_entry(&lo->lo_owner.so_stateids,
4083 struct nfs4_ol_stateid, st_perstateowner);
4084 return nfs_ok;
4085 }
4086 strhashval = ownerstr_hashval(cl->cl_clientid.cl_id,
4087 &lock->v.new.owner);
4088 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
4089 if (lo == NULL)
4090 return nfserr_jukebox;
4091 *lst = alloc_init_lock_stateid(lo, fi, ost);
4092 if (*lst == NULL) {
4093 release_lockowner(lo);
4094 return nfserr_jukebox;
4095 }
4096 *new = true;
4097 return nfs_ok;
4098 }
4099
4100 /*
4101 * LOCK operation
4102 */
4103 __be32
4104 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4105 struct nfsd4_lock *lock)
4106 {
4107 struct nfs4_openowner *open_sop = NULL;
4108 struct nfs4_lockowner *lock_sop = NULL;
4109 struct nfs4_ol_stateid *lock_stp;
4110 struct file *filp = NULL;
4111 struct file_lock *file_lock = NULL;
4112 struct file_lock *conflock = NULL;
4113 __be32 status = 0;
4114 bool new_state = false;
4115 int lkflg;
4116 int err;
4117 struct net *net = SVC_NET(rqstp);
4118 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4119
4120 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
4121 (long long) lock->lk_offset,
4122 (long long) lock->lk_length);
4123
4124 if (check_lock_length(lock->lk_offset, lock->lk_length))
4125 return nfserr_inval;
4126
4127 if ((status = fh_verify(rqstp, &cstate->current_fh,
4128 S_IFREG, NFSD_MAY_LOCK))) {
4129 dprintk("NFSD: nfsd4_lock: permission denied!\n");
4130 return status;
4131 }
4132
4133 nfs4_lock_state();
4134
4135 if (lock->lk_is_new) {
4136 struct nfs4_ol_stateid *open_stp = NULL;
4137
4138 if (nfsd4_has_session(cstate))
4139 /* See rfc 5661 18.10.3: given clientid is ignored: */
4140 memcpy(&lock->v.new.clientid,
4141 &cstate->session->se_client->cl_clientid,
4142 sizeof(clientid_t));
4143
4144 status = nfserr_stale_clientid;
4145 if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
4146 goto out;
4147
4148 /* validate and update open stateid and open seqid */
4149 status = nfs4_preprocess_confirmed_seqid_op(cstate,
4150 lock->lk_new_open_seqid,
4151 &lock->lk_new_open_stateid,
4152 &open_stp, nn);
4153 if (status)
4154 goto out;
4155 open_sop = openowner(open_stp->st_stateowner);
4156 status = nfserr_bad_stateid;
4157 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
4158 &lock->v.new.clientid))
4159 goto out;
4160 status = lookup_or_create_lock_state(cstate, open_stp, lock,
4161 &lock_stp, &new_state);
4162 } else
4163 status = nfs4_preprocess_seqid_op(cstate,
4164 lock->lk_old_lock_seqid,
4165 &lock->lk_old_lock_stateid,
4166 NFS4_LOCK_STID, &lock_stp, nn);
4167 if (status)
4168 goto out;
4169 lock_sop = lockowner(lock_stp->st_stateowner);
4170
4171 lkflg = setlkflg(lock->lk_type);
4172 status = nfs4_check_openmode(lock_stp, lkflg);
4173 if (status)
4174 goto out;
4175
4176 status = nfserr_grace;
4177 if (locks_in_grace(net) && !lock->lk_reclaim)
4178 goto out;
4179 status = nfserr_no_grace;
4180 if (!locks_in_grace(net) && lock->lk_reclaim)
4181 goto out;
4182
4183 file_lock = locks_alloc_lock();
4184 if (!file_lock) {
4185 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4186 status = nfserr_jukebox;
4187 goto out;
4188 }
4189
4190 locks_init_lock(file_lock);
4191 switch (lock->lk_type) {
4192 case NFS4_READ_LT:
4193 case NFS4_READW_LT:
4194 filp = find_readable_file(lock_stp->st_file);
4195 if (filp)
4196 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
4197 file_lock->fl_type = F_RDLCK;
4198 break;
4199 case NFS4_WRITE_LT:
4200 case NFS4_WRITEW_LT:
4201 filp = find_writeable_file(lock_stp->st_file);
4202 if (filp)
4203 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
4204 file_lock->fl_type = F_WRLCK;
4205 break;
4206 default:
4207 status = nfserr_inval;
4208 goto out;
4209 }
4210 if (!filp) {
4211 status = nfserr_openmode;
4212 goto out;
4213 }
4214 file_lock->fl_owner = (fl_owner_t)lock_sop;
4215 file_lock->fl_pid = current->tgid;
4216 file_lock->fl_file = filp;
4217 file_lock->fl_flags = FL_POSIX;
4218 file_lock->fl_lmops = &nfsd_posix_mng_ops;
4219 file_lock->fl_start = lock->lk_offset;
4220 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
4221 nfs4_transform_lock_offset(file_lock);
4222
4223 conflock = locks_alloc_lock();
4224 if (!conflock) {
4225 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4226 status = nfserr_jukebox;
4227 goto out;
4228 }
4229
4230 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
4231 switch (-err) {
4232 case 0: /* success! */
4233 update_stateid(&lock_stp->st_stid.sc_stateid);
4234 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid,
4235 sizeof(stateid_t));
4236 status = 0;
4237 break;
4238 case (EAGAIN): /* conflock holds conflicting lock */
4239 status = nfserr_denied;
4240 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
4241 nfs4_set_lock_denied(conflock, &lock->lk_denied);
4242 break;
4243 case (EDEADLK):
4244 status = nfserr_deadlock;
4245 break;
4246 default:
4247 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
4248 status = nfserrno(err);
4249 break;
4250 }
4251 out:
4252 if (status && new_state)
4253 release_lockowner(lock_sop);
4254 if (!cstate->replay_owner)
4255 nfs4_unlock_state();
4256 if (file_lock)
4257 locks_free_lock(file_lock);
4258 if (conflock)
4259 locks_free_lock(conflock);
4260 return status;
4261 }
4262
4263 /*
4264 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
4265 * so we do a temporary open here just to get an open file to pass to
4266 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
4267 * inode operation.)
4268 */
4269 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
4270 {
4271 struct file *file;
4272 __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
4273 if (!err) {
4274 err = nfserrno(vfs_test_lock(file, lock));
4275 nfsd_close(file);
4276 }
4277 return err;
4278 }
4279
4280 /*
4281 * LOCKT operation
4282 */
4283 __be32
4284 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4285 struct nfsd4_lockt *lockt)
4286 {
4287 struct inode *inode;
4288 struct file_lock *file_lock = NULL;
4289 struct nfs4_lockowner *lo;
4290 __be32 status;
4291 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4292
4293 if (locks_in_grace(SVC_NET(rqstp)))
4294 return nfserr_grace;
4295
4296 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
4297 return nfserr_inval;
4298
4299 nfs4_lock_state();
4300
4301 if (!nfsd4_has_session(cstate)) {
4302 status = lookup_clientid(&lockt->lt_clientid, false, nn, NULL);
4303 if (status)
4304 goto out;
4305 }
4306
4307 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4308 goto out;
4309
4310 inode = cstate->current_fh.fh_dentry->d_inode;
4311 file_lock = locks_alloc_lock();
4312 if (!file_lock) {
4313 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4314 status = nfserr_jukebox;
4315 goto out;
4316 }
4317 locks_init_lock(file_lock);
4318 switch (lockt->lt_type) {
4319 case NFS4_READ_LT:
4320 case NFS4_READW_LT:
4321 file_lock->fl_type = F_RDLCK;
4322 break;
4323 case NFS4_WRITE_LT:
4324 case NFS4_WRITEW_LT:
4325 file_lock->fl_type = F_WRLCK;
4326 break;
4327 default:
4328 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
4329 status = nfserr_inval;
4330 goto out;
4331 }
4332
4333 lo = find_lockowner_str(inode, &lockt->lt_clientid, &lockt->lt_owner, nn);
4334 if (lo)
4335 file_lock->fl_owner = (fl_owner_t)lo;
4336 file_lock->fl_pid = current->tgid;
4337 file_lock->fl_flags = FL_POSIX;
4338
4339 file_lock->fl_start = lockt->lt_offset;
4340 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
4341
4342 nfs4_transform_lock_offset(file_lock);
4343
4344 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
4345 if (status)
4346 goto out;
4347
4348 if (file_lock->fl_type != F_UNLCK) {
4349 status = nfserr_denied;
4350 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
4351 }
4352 out:
4353 nfs4_unlock_state();
4354 if (file_lock)
4355 locks_free_lock(file_lock);
4356 return status;
4357 }
4358
4359 __be32
4360 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4361 struct nfsd4_locku *locku)
4362 {
4363 struct nfs4_ol_stateid *stp;
4364 struct file *filp = NULL;
4365 struct file_lock *file_lock = NULL;
4366 __be32 status;
4367 int err;
4368 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4369
4370 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
4371 (long long) locku->lu_offset,
4372 (long long) locku->lu_length);
4373
4374 if (check_lock_length(locku->lu_offset, locku->lu_length))
4375 return nfserr_inval;
4376
4377 nfs4_lock_state();
4378
4379 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
4380 &locku->lu_stateid, NFS4_LOCK_STID,
4381 &stp, nn);
4382 if (status)
4383 goto out;
4384 filp = find_any_file(stp->st_file);
4385 if (!filp) {
4386 status = nfserr_lock_range;
4387 goto out;
4388 }
4389 file_lock = locks_alloc_lock();
4390 if (!file_lock) {
4391 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4392 status = nfserr_jukebox;
4393 goto out;
4394 }
4395 locks_init_lock(file_lock);
4396 file_lock->fl_type = F_UNLCK;
4397 file_lock->fl_owner = (fl_owner_t)lockowner(stp->st_stateowner);
4398 file_lock->fl_pid = current->tgid;
4399 file_lock->fl_file = filp;
4400 file_lock->fl_flags = FL_POSIX;
4401 file_lock->fl_lmops = &nfsd_posix_mng_ops;
4402 file_lock->fl_start = locku->lu_offset;
4403
4404 file_lock->fl_end = last_byte_offset(locku->lu_offset,
4405 locku->lu_length);
4406 nfs4_transform_lock_offset(file_lock);
4407
4408 /*
4409 * Try to unlock the file in the VFS.
4410 */
4411 err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
4412 if (err) {
4413 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
4414 goto out_nfserr;
4415 }
4416 /*
4417 * OK, unlock succeeded; the only thing left to do is update the stateid.
4418 */
4419 update_stateid(&stp->st_stid.sc_stateid);
4420 memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4421
4422 out:
4423 if (!cstate->replay_owner)
4424 nfs4_unlock_state();
4425 if (file_lock)
4426 locks_free_lock(file_lock);
4427 return status;
4428
4429 out_nfserr:
4430 status = nfserrno(err);
4431 goto out;
4432 }
4433
4434 /*
4435 * returns
4436 * 1: locks held by lockowner
4437 * 0: no locks held by lockowner
4438 */
4439 static int
4440 check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner)
4441 {
4442 struct file_lock **flpp;
4443 struct inode *inode = filp->fi_inode;
4444 int status = 0;
4445
4446 lock_flocks();
4447 for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
4448 if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
4449 status = 1;
4450 goto out;
4451 }
4452 }
4453 out:
4454 unlock_flocks();
4455 return status;
4456 }
4457
4458 __be32
4459 nfsd4_release_lockowner(struct svc_rqst *rqstp,
4460 struct nfsd4_compound_state *cstate,
4461 struct nfsd4_release_lockowner *rlockowner)
4462 {
4463 clientid_t *clid = &rlockowner->rl_clientid;
4464 struct nfs4_stateowner *sop;
4465 struct nfs4_lockowner *lo;
4466 struct nfs4_ol_stateid *stp;
4467 struct xdr_netobj *owner = &rlockowner->rl_owner;
4468 struct list_head matches;
4469 unsigned int hashval = ownerstr_hashval(clid->cl_id, owner);
4470 __be32 status;
4471 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4472
4473 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
4474 clid->cl_boot, clid->cl_id);
4475
4476 nfs4_lock_state();
4477
4478 status = lookup_clientid(clid, cstate->minorversion, nn, NULL);
4479 if (status)
4480 goto out;
4481
4482 status = nfserr_locks_held;
4483 INIT_LIST_HEAD(&matches);
4484
4485 list_for_each_entry(sop, &nn->ownerstr_hashtbl[hashval], so_strhash) {
4486 if (sop->so_is_open_owner)
4487 continue;
4488 if (!same_owner_str(sop, owner, clid))
4489 continue;
4490 list_for_each_entry(stp, &sop->so_stateids,
4491 st_perstateowner) {
4492 lo = lockowner(sop);
4493 if (check_for_locks(stp->st_file, lo))
4494 goto out;
4495 list_add(&lo->lo_list, &matches);
4496 }
4497 }
4498 /* Clients probably won't expect us to return with some (but not all)
4499 * of the lockowner state released; so don't release any until all
4500 * have been checked. */
4501 status = nfs_ok;
4502 while (!list_empty(&matches)) {
4503 lo = list_entry(matches.next, struct nfs4_lockowner,
4504 lo_list);
4505 /* unhash_stateowner deletes so_perclient only
4506 * for openowners. */
4507 list_del(&lo->lo_list);
4508 release_lockowner(lo);
4509 }
4510 out:
4511 nfs4_unlock_state();
4512 return status;
4513 }
4514
4515 static inline struct nfs4_client_reclaim *
4516 alloc_reclaim(void)
4517 {
4518 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
4519 }
4520
4521 bool
4522 nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
4523 {
4524 struct nfs4_client_reclaim *crp;
4525
4526 crp = nfsd4_find_reclaim_client(name, nn);
4527 return (crp && crp->cr_clp);
4528 }
4529
4530 /*
4531 * failure => all reset bets are off, nfserr_no_grace...
4532 */
4533 struct nfs4_client_reclaim *
4534 nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
4535 {
4536 unsigned int strhashval;
4537 struct nfs4_client_reclaim *crp;
4538
4539 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
4540 crp = alloc_reclaim();
4541 if (crp) {
4542 strhashval = clientstr_hashval(name);
4543 INIT_LIST_HEAD(&crp->cr_strhash);
4544 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
4545 memcpy(crp->cr_recdir, name, HEXDIR_LEN);
4546 crp->cr_clp = NULL;
4547 nn->reclaim_str_hashtbl_size++;
4548 }
4549 return crp;
4550 }
4551
4552 void
4553 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
4554 {
4555 list_del(&crp->cr_strhash);
4556 kfree(crp);
4557 nn->reclaim_str_hashtbl_size--;
4558 }
4559
4560 void
4561 nfs4_release_reclaim(struct nfsd_net *nn)
4562 {
4563 struct nfs4_client_reclaim *crp = NULL;
4564 int i;
4565
4566 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4567 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
4568 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
4569 struct nfs4_client_reclaim, cr_strhash);
4570 nfs4_remove_reclaim_record(crp, nn);
4571 }
4572 }
4573 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
4574 }
4575
4576 /*
4577 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
4578 struct nfs4_client_reclaim *
4579 nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
4580 {
4581 unsigned int strhashval;
4582 struct nfs4_client_reclaim *crp = NULL;
4583
4584 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
4585
4586 strhashval = clientstr_hashval(recdir);
4587 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
4588 if (same_name(crp->cr_recdir, recdir)) {
4589 return crp;
4590 }
4591 }
4592 return NULL;
4593 }
4594
4595 /*
4596 * Called from OPEN. Look for clientid in reclaim list.
4597 */
4598 __be32
4599 nfs4_check_open_reclaim(clientid_t *clid, bool sessions, struct nfsd_net *nn)
4600 {
4601 struct nfs4_client *clp;
4602
4603 /* find clientid in conf_id_hashtbl */
4604 clp = find_confirmed_client(clid, sessions, nn);
4605 if (clp == NULL)
4606 return nfserr_reclaim_bad;
4607
4608 return nfsd4_client_record_check(clp) ? nfserr_reclaim_bad : nfs_ok;
4609 }
4610
4611 #ifdef CONFIG_NFSD_FAULT_INJECTION
4612
4613 u64 nfsd_forget_client(struct nfs4_client *clp, u64 max)
4614 {
4615 expire_client(clp);
4616 return 1;
4617 }
4618
4619 u64 nfsd_print_client(struct nfs4_client *clp, u64 num)
4620 {
4621 char buf[INET6_ADDRSTRLEN];
4622 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, 129);
4623 printk(KERN_INFO "NFS Client: %s\n", buf);
4624 return 1;
4625 }
4626
4627 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
4628 const char *type)
4629 {
4630 char buf[INET6_ADDRSTRLEN];
4631 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, 129);
4632 printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
4633 }
4634
4635 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max, void (*func)(struct nfs4_lockowner *))
4636 {
4637 struct nfs4_openowner *oop;
4638 struct nfs4_lockowner *lop, *lo_next;
4639 struct nfs4_ol_stateid *stp, *st_next;
4640 u64 count = 0;
4641
4642 list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
4643 list_for_each_entry_safe(stp, st_next, &oop->oo_owner.so_stateids, st_perstateowner) {
4644 list_for_each_entry_safe(lop, lo_next, &stp->st_lockowners, lo_perstateid) {
4645 if (func)
4646 func(lop);
4647 if (++count == max)
4648 return count;
4649 }
4650 }
4651 }
4652
4653 return count;
4654 }
4655
4656 u64 nfsd_forget_client_locks(struct nfs4_client *clp, u64 max)
4657 {
4658 return nfsd_foreach_client_lock(clp, max, release_lockowner);
4659 }
4660
4661 u64 nfsd_print_client_locks(struct nfs4_client *clp, u64 max)
4662 {
4663 u64 count = nfsd_foreach_client_lock(clp, max, NULL);
4664 nfsd_print_count(clp, count, "locked files");
4665 return count;
4666 }
4667
4668 static u64 nfsd_foreach_client_open(struct nfs4_client *clp, u64 max, void (*func)(struct nfs4_openowner *))
4669 {
4670 struct nfs4_openowner *oop, *next;
4671 u64 count = 0;
4672
4673 list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
4674 if (func)
4675 func(oop);
4676 if (++count == max)
4677 break;
4678 }
4679
4680 return count;
4681 }
4682
4683 u64 nfsd_forget_client_openowners(struct nfs4_client *clp, u64 max)
4684 {
4685 return nfsd_foreach_client_open(clp, max, release_openowner);
4686 }
4687
4688 u64 nfsd_print_client_openowners(struct nfs4_client *clp, u64 max)
4689 {
4690 u64 count = nfsd_foreach_client_open(clp, max, NULL);
4691 nfsd_print_count(clp, count, "open files");
4692 return count;
4693 }
4694
4695 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
4696 struct list_head *victims)
4697 {
4698 struct nfs4_delegation *dp, *next;
4699 u64 count = 0;
4700
4701 list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
4702 if (victims)
4703 list_move(&dp->dl_recall_lru, victims);
4704 if (++count == max)
4705 break;
4706 }
4707 return count;
4708 }
4709
4710 u64 nfsd_forget_client_delegations(struct nfs4_client *clp, u64 max)
4711 {
4712 struct nfs4_delegation *dp, *next;
4713 LIST_HEAD(victims);
4714 u64 count;
4715
4716 spin_lock(&recall_lock);
4717 count = nfsd_find_all_delegations(clp, max, &victims);
4718 spin_unlock(&recall_lock);
4719
4720 list_for_each_entry_safe(dp, next, &victims, dl_recall_lru)
4721 unhash_delegation(dp);
4722
4723 return count;
4724 }
4725
4726 u64 nfsd_recall_client_delegations(struct nfs4_client *clp, u64 max)
4727 {
4728 struct nfs4_delegation *dp, *next;
4729 LIST_HEAD(victims);
4730 u64 count;
4731
4732 spin_lock(&recall_lock);
4733 count = nfsd_find_all_delegations(clp, max, &victims);
4734 list_for_each_entry_safe(dp, next, &victims, dl_recall_lru)
4735 nfsd_break_one_deleg(dp);
4736 spin_unlock(&recall_lock);
4737
4738 return count;
4739 }
4740
4741 u64 nfsd_print_client_delegations(struct nfs4_client *clp, u64 max)
4742 {
4743 u64 count = 0;
4744
4745 spin_lock(&recall_lock);
4746 count = nfsd_find_all_delegations(clp, max, NULL);
4747 spin_unlock(&recall_lock);
4748
4749 nfsd_print_count(clp, count, "delegations");
4750 return count;
4751 }
4752
4753 u64 nfsd_for_n_state(u64 max, u64 (*func)(struct nfs4_client *, u64))
4754 {
4755 struct nfs4_client *clp, *next;
4756 u64 count = 0;
4757 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id);
4758
4759 if (!nfsd_netns_ready(nn))
4760 return 0;
4761
4762 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
4763 count += func(clp, max - count);
4764 if ((max != 0) && (count >= max))
4765 break;
4766 }
4767
4768 return count;
4769 }
4770
4771 struct nfs4_client *nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
4772 {
4773 struct nfs4_client *clp;
4774 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id);
4775
4776 if (!nfsd_netns_ready(nn))
4777 return NULL;
4778
4779 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
4780 if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
4781 return clp;
4782 }
4783 return NULL;
4784 }
4785
4786 #endif /* CONFIG_NFSD_FAULT_INJECTION */
4787
4788 /* initialization to perform at module load time: */
4789
4790 void
4791 nfs4_state_init(void)
4792 {
4793 int i;
4794
4795 for (i = 0; i < FILE_HASH_SIZE; i++) {
4796 INIT_LIST_HEAD(&file_hashtbl[i]);
4797 }
4798 INIT_LIST_HEAD(&del_recall_lru);
4799 }
4800
4801 /*
4802 * Since the lifetime of a delegation isn't limited to that of an open, a
4803 * client may quite reasonably hang on to a delegation as long as it has
4804 * the inode cached. This becomes an obvious problem the first time a
4805 * client's inode cache approaches the size of the server's total memory.
4806 *
4807 * For now we avoid this problem by imposing a hard limit on the number
4808 * of delegations, which varies according to the server's memory size.
4809 */
4810 static void
4811 set_max_delegations(void)
4812 {
4813 /*
4814 * Allow at most 4 delegations per megabyte of RAM. Quick
4815 * estimates suggest that in the worst case (where every delegation
4816 * is for a different inode), a delegation could take about 1.5K,
4817 * giving a worst case usage of about 6% of memory.
4818 */
4819 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
4820 }
4821
4822 static int nfs4_state_create_net(struct net *net)
4823 {
4824 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4825 int i;
4826
4827 nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
4828 CLIENT_HASH_SIZE, GFP_KERNEL);
4829 if (!nn->conf_id_hashtbl)
4830 goto err;
4831 nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
4832 CLIENT_HASH_SIZE, GFP_KERNEL);
4833 if (!nn->unconf_id_hashtbl)
4834 goto err_unconf_id;
4835 nn->ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
4836 OWNER_HASH_SIZE, GFP_KERNEL);
4837 if (!nn->ownerstr_hashtbl)
4838 goto err_ownerstr;
4839 nn->lockowner_ino_hashtbl = kmalloc(sizeof(struct list_head) *
4840 LOCKOWNER_INO_HASH_SIZE, GFP_KERNEL);
4841 if (!nn->lockowner_ino_hashtbl)
4842 goto err_lockowner_ino;
4843 nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
4844 SESSION_HASH_SIZE, GFP_KERNEL);
4845 if (!nn->sessionid_hashtbl)
4846 goto err_sessionid;
4847
4848 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4849 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
4850 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
4851 }
4852 for (i = 0; i < OWNER_HASH_SIZE; i++)
4853 INIT_LIST_HEAD(&nn->ownerstr_hashtbl[i]);
4854 for (i = 0; i < LOCKOWNER_INO_HASH_SIZE; i++)
4855 INIT_LIST_HEAD(&nn->lockowner_ino_hashtbl[i]);
4856 for (i = 0; i < SESSION_HASH_SIZE; i++)
4857 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
4858 nn->conf_name_tree = RB_ROOT;
4859 nn->unconf_name_tree = RB_ROOT;
4860 INIT_LIST_HEAD(&nn->client_lru);
4861 INIT_LIST_HEAD(&nn->close_lru);
4862 spin_lock_init(&nn->client_lock);
4863
4864 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
4865 get_net(net);
4866
4867 return 0;
4868
4869 err_sessionid:
4870 kfree(nn->lockowner_ino_hashtbl);
4871 err_lockowner_ino:
4872 kfree(nn->ownerstr_hashtbl);
4873 err_ownerstr:
4874 kfree(nn->unconf_id_hashtbl);
4875 err_unconf_id:
4876 kfree(nn->conf_id_hashtbl);
4877 err:
4878 return -ENOMEM;
4879 }
4880
4881 static void
4882 nfs4_state_destroy_net(struct net *net)
4883 {
4884 int i;
4885 struct nfs4_client *clp = NULL;
4886 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4887 struct rb_node *node, *tmp;
4888
4889 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4890 while (!list_empty(&nn->conf_id_hashtbl[i])) {
4891 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
4892 destroy_client(clp);
4893 }
4894 }
4895
4896 node = rb_first(&nn->unconf_name_tree);
4897 while (node != NULL) {
4898 tmp = node;
4899 node = rb_next(tmp);
4900 clp = rb_entry(tmp, struct nfs4_client, cl_namenode);
4901 rb_erase(tmp, &nn->unconf_name_tree);
4902 destroy_client(clp);
4903 }
4904
4905 kfree(nn->sessionid_hashtbl);
4906 kfree(nn->lockowner_ino_hashtbl);
4907 kfree(nn->ownerstr_hashtbl);
4908 kfree(nn->unconf_id_hashtbl);
4909 kfree(nn->conf_id_hashtbl);
4910 put_net(net);
4911 }
4912
4913 int
4914 nfs4_state_start_net(struct net *net)
4915 {
4916 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4917 int ret;
4918
4919 /*
4920 * FIXME: For now, we hang most of the pernet global stuff off of
4921 * init_net until nfsd is fully containerized. Eventually, we'll
4922 * need to pass a net pointer into this function, take a reference
4923 * to that instead and then do most of the rest of this on a per-net
4924 * basis.
4925 */
4926 if (net != &init_net)
4927 return -EINVAL;
4928
4929 ret = nfs4_state_create_net(net);
4930 if (ret)
4931 return ret;
4932 nfsd4_client_tracking_init(net);
4933 nn->boot_time = get_seconds();
4934 locks_start_grace(net, &nn->nfsd4_manager);
4935 nn->grace_ended = false;
4936 printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
4937 nn->nfsd4_grace, net);
4938 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
4939 return 0;
4940 }
4941
4942 /* initialization to perform when the nfsd service is started: */
4943
4944 int
4945 nfs4_state_start(void)
4946 {
4947 int ret;
4948
4949 ret = set_callback_cred();
4950 if (ret)
4951 return -ENOMEM;
4952 laundry_wq = create_singlethread_workqueue("nfsd4");
4953 if (laundry_wq == NULL) {
4954 ret = -ENOMEM;
4955 goto out_recovery;
4956 }
4957 ret = nfsd4_create_callback_queue();
4958 if (ret)
4959 goto out_free_laundry;
4960
4961 set_max_delegations();
4962
4963 return 0;
4964
4965 out_free_laundry:
4966 destroy_workqueue(laundry_wq);
4967 out_recovery:
4968 return ret;
4969 }
4970
4971 /* should be called with the state lock held */
4972 void
4973 nfs4_state_shutdown_net(struct net *net)
4974 {
4975 struct nfs4_delegation *dp = NULL;
4976 struct list_head *pos, *next, reaplist;
4977 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4978
4979 cancel_delayed_work_sync(&nn->laundromat_work);
4980 locks_end_grace(&nn->nfsd4_manager);
4981
4982 INIT_LIST_HEAD(&reaplist);
4983 spin_lock(&recall_lock);
4984 list_for_each_safe(pos, next, &del_recall_lru) {
4985 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4986 if (dp->dl_stid.sc_client->net != net)
4987 continue;
4988 list_move(&dp->dl_recall_lru, &reaplist);
4989 }
4990 spin_unlock(&recall_lock);
4991 list_for_each_safe(pos, next, &reaplist) {
4992 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4993 unhash_delegation(dp);
4994 }
4995
4996 nfsd4_client_tracking_exit(net);
4997 nfs4_state_destroy_net(net);
4998 }
4999
5000 void
5001 nfs4_state_shutdown(void)
5002 {
5003 destroy_workqueue(laundry_wq);
5004 nfsd4_destroy_callback_queue();
5005 }
5006
5007 static void
5008 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
5009 {
5010 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
5011 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
5012 }
5013
5014 static void
5015 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
5016 {
5017 if (cstate->minorversion) {
5018 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
5019 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
5020 }
5021 }
5022
5023 void
5024 clear_current_stateid(struct nfsd4_compound_state *cstate)
5025 {
5026 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
5027 }
5028
5029 /*
5030 * functions to set current state id
5031 */
5032 void
5033 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
5034 {
5035 put_stateid(cstate, &odp->od_stateid);
5036 }
5037
5038 void
5039 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
5040 {
5041 put_stateid(cstate, &open->op_stateid);
5042 }
5043
5044 void
5045 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
5046 {
5047 put_stateid(cstate, &close->cl_stateid);
5048 }
5049
5050 void
5051 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
5052 {
5053 put_stateid(cstate, &lock->lk_resp_stateid);
5054 }
5055
5056 /*
5057 * functions to consume current state id
5058 */
5059
5060 void
5061 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
5062 {
5063 get_stateid(cstate, &odp->od_stateid);
5064 }
5065
5066 void
5067 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
5068 {
5069 get_stateid(cstate, &drp->dr_stateid);
5070 }
5071
5072 void
5073 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
5074 {
5075 get_stateid(cstate, &fsp->fr_stateid);
5076 }
5077
5078 void
5079 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
5080 {
5081 get_stateid(cstate, &setattr->sa_stateid);
5082 }
5083
5084 void
5085 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
5086 {
5087 get_stateid(cstate, &close->cl_stateid);
5088 }
5089
5090 void
5091 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
5092 {
5093 get_stateid(cstate, &locku->lu_stateid);
5094 }
5095
5096 void
5097 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
5098 {
5099 get_stateid(cstate, &read->rd_stateid);
5100 }
5101
5102 void
5103 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
5104 {
5105 get_stateid(cstate, &write->wr_stateid);
5106 }
This page took 0.142392 seconds and 5 git commands to generate.