nfsd: Convert nfs4_check_open_reclaim() to work with lookup_clientid()
[deliverable/linux.git] / fs / nfsd / nfs4state.c
1 /*
2 * Copyright (c) 2001 The Regents of the University of Michigan.
3 * All rights reserved.
4 *
5 * Kendrick Smith <kmsmith@umich.edu>
6 * Andy Adamson <kandros@umich.edu>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the name of the University nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
28 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
29 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
31 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34
35 #include <linux/file.h>
36 #include <linux/fs.h>
37 #include <linux/slab.h>
38 #include <linux/namei.h>
39 #include <linux/swap.h>
40 #include <linux/pagemap.h>
41 #include <linux/ratelimit.h>
42 #include <linux/sunrpc/svcauth_gss.h>
43 #include <linux/sunrpc/addr.h>
44 #include <linux/hash.h>
45 #include "xdr4.h"
46 #include "xdr4cb.h"
47 #include "vfs.h"
48 #include "current_stateid.h"
49
50 #include "netns.h"
51
52 #define NFSDDBG_FACILITY NFSDDBG_PROC
53
54 #define all_ones {{~0,~0},~0}
55 static const stateid_t one_stateid = {
56 .si_generation = ~0,
57 .si_opaque = all_ones,
58 };
59 static const stateid_t zero_stateid = {
60 /* all fields zero */
61 };
62 static const stateid_t currentstateid = {
63 .si_generation = 1,
64 };
65
66 static u64 current_sessionid = 1;
67
68 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
69 #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
70 #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
71
72 /* forward declarations */
73 static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner);
74
75 /* Locking: */
76
77 /* Currently used for almost all code touching nfsv4 state: */
78 static DEFINE_MUTEX(client_mutex);
79
80 /*
81 * Currently used for the del_recall_lru and file hash table. In an
82 * effort to decrease the scope of the client_mutex, this spinlock may
83 * eventually cover more:
84 */
85 static DEFINE_SPINLOCK(state_lock);
86
87 static struct kmem_cache *openowner_slab;
88 static struct kmem_cache *lockowner_slab;
89 static struct kmem_cache *file_slab;
90 static struct kmem_cache *stateid_slab;
91 static struct kmem_cache *deleg_slab;
92
93 void
94 nfs4_lock_state(void)
95 {
96 mutex_lock(&client_mutex);
97 }
98
99 static void free_session(struct nfsd4_session *);
100
101 static bool is_session_dead(struct nfsd4_session *ses)
102 {
103 return ses->se_flags & NFS4_SESSION_DEAD;
104 }
105
106 static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
107 {
108 if (atomic_read(&ses->se_ref) > ref_held_by_me)
109 return nfserr_jukebox;
110 ses->se_flags |= NFS4_SESSION_DEAD;
111 return nfs_ok;
112 }
113
114 void
115 nfs4_unlock_state(void)
116 {
117 mutex_unlock(&client_mutex);
118 }
119
120 static bool is_client_expired(struct nfs4_client *clp)
121 {
122 return clp->cl_time == 0;
123 }
124
125 static __be32 mark_client_expired_locked(struct nfs4_client *clp)
126 {
127 if (atomic_read(&clp->cl_refcount))
128 return nfserr_jukebox;
129 clp->cl_time = 0;
130 return nfs_ok;
131 }
132
133 static __be32 mark_client_expired(struct nfs4_client *clp)
134 {
135 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
136 __be32 ret;
137
138 spin_lock(&nn->client_lock);
139 ret = mark_client_expired_locked(clp);
140 spin_unlock(&nn->client_lock);
141 return ret;
142 }
143
144 static __be32 get_client_locked(struct nfs4_client *clp)
145 {
146 if (is_client_expired(clp))
147 return nfserr_expired;
148 atomic_inc(&clp->cl_refcount);
149 return nfs_ok;
150 }
151
152 /* must be called under the client_lock */
153 static inline void
154 renew_client_locked(struct nfs4_client *clp)
155 {
156 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
157
158 if (is_client_expired(clp)) {
159 WARN_ON(1);
160 printk("%s: client (clientid %08x/%08x) already expired\n",
161 __func__,
162 clp->cl_clientid.cl_boot,
163 clp->cl_clientid.cl_id);
164 return;
165 }
166
167 dprintk("renewing client (clientid %08x/%08x)\n",
168 clp->cl_clientid.cl_boot,
169 clp->cl_clientid.cl_id);
170 list_move_tail(&clp->cl_lru, &nn->client_lru);
171 clp->cl_time = get_seconds();
172 }
173
174 static inline void
175 renew_client(struct nfs4_client *clp)
176 {
177 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
178
179 spin_lock(&nn->client_lock);
180 renew_client_locked(clp);
181 spin_unlock(&nn->client_lock);
182 }
183
184 static void put_client_renew_locked(struct nfs4_client *clp)
185 {
186 if (!atomic_dec_and_test(&clp->cl_refcount))
187 return;
188 if (!is_client_expired(clp))
189 renew_client_locked(clp);
190 }
191
192 static void put_client_renew(struct nfs4_client *clp)
193 {
194 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
195
196 spin_lock(&nn->client_lock);
197 put_client_renew_locked(clp);
198 spin_unlock(&nn->client_lock);
199 }
200
201 static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
202 {
203 __be32 status;
204
205 if (is_session_dead(ses))
206 return nfserr_badsession;
207 status = get_client_locked(ses->se_client);
208 if (status)
209 return status;
210 atomic_inc(&ses->se_ref);
211 return nfs_ok;
212 }
213
214 static void nfsd4_put_session_locked(struct nfsd4_session *ses)
215 {
216 struct nfs4_client *clp = ses->se_client;
217
218 if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
219 free_session(ses);
220 put_client_renew_locked(clp);
221 }
222
223 static void nfsd4_put_session(struct nfsd4_session *ses)
224 {
225 struct nfs4_client *clp = ses->se_client;
226 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
227
228 spin_lock(&nn->client_lock);
229 nfsd4_put_session_locked(ses);
230 spin_unlock(&nn->client_lock);
231 }
232
233
234 static inline u32
235 opaque_hashval(const void *ptr, int nbytes)
236 {
237 unsigned char *cptr = (unsigned char *) ptr;
238
239 u32 x = 0;
240 while (nbytes--) {
241 x *= 37;
242 x += *cptr++;
243 }
244 return x;
245 }
246
247 static void nfsd4_free_file(struct nfs4_file *f)
248 {
249 kmem_cache_free(file_slab, f);
250 }
251
252 static inline void
253 put_nfs4_file(struct nfs4_file *fi)
254 {
255 if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
256 hlist_del(&fi->fi_hash);
257 spin_unlock(&state_lock);
258 iput(fi->fi_inode);
259 nfsd4_free_file(fi);
260 }
261 }
262
263 static inline void
264 get_nfs4_file(struct nfs4_file *fi)
265 {
266 atomic_inc(&fi->fi_ref);
267 }
268
269 static int num_delegations;
270 unsigned long max_delegations;
271
272 /*
273 * Open owner state (share locks)
274 */
275
276 /* hash tables for lock and open owners */
277 #define OWNER_HASH_BITS 8
278 #define OWNER_HASH_SIZE (1 << OWNER_HASH_BITS)
279 #define OWNER_HASH_MASK (OWNER_HASH_SIZE - 1)
280
281 static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
282 {
283 unsigned int ret;
284
285 ret = opaque_hashval(ownername->data, ownername->len);
286 ret += clientid;
287 return ret & OWNER_HASH_MASK;
288 }
289
290 /* hash table for nfs4_file */
291 #define FILE_HASH_BITS 8
292 #define FILE_HASH_SIZE (1 << FILE_HASH_BITS)
293
294 static unsigned int file_hashval(struct inode *ino)
295 {
296 /* XXX: why are we hashing on inode pointer, anyway? */
297 return hash_ptr(ino, FILE_HASH_BITS);
298 }
299
300 static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
301
302 static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag)
303 {
304 WARN_ON_ONCE(!(fp->fi_fds[oflag] || fp->fi_fds[O_RDWR]));
305 atomic_inc(&fp->fi_access[oflag]);
306 }
307
308 static void nfs4_file_get_access(struct nfs4_file *fp, int oflag)
309 {
310 if (oflag == O_RDWR) {
311 __nfs4_file_get_access(fp, O_RDONLY);
312 __nfs4_file_get_access(fp, O_WRONLY);
313 } else
314 __nfs4_file_get_access(fp, oflag);
315 }
316
317 static void nfs4_file_put_fd(struct nfs4_file *fp, int oflag)
318 {
319 if (fp->fi_fds[oflag]) {
320 fput(fp->fi_fds[oflag]);
321 fp->fi_fds[oflag] = NULL;
322 }
323 }
324
325 static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
326 {
327 if (atomic_dec_and_test(&fp->fi_access[oflag])) {
328 nfs4_file_put_fd(fp, oflag);
329 if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
330 nfs4_file_put_fd(fp, O_RDWR);
331 }
332 }
333
334 static void nfs4_file_put_access(struct nfs4_file *fp, int oflag)
335 {
336 if (oflag == O_RDWR) {
337 __nfs4_file_put_access(fp, O_RDONLY);
338 __nfs4_file_put_access(fp, O_WRONLY);
339 } else
340 __nfs4_file_put_access(fp, oflag);
341 }
342
343 static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct
344 kmem_cache *slab)
345 {
346 struct idr *stateids = &cl->cl_stateids;
347 struct nfs4_stid *stid;
348 int new_id;
349
350 stid = kmem_cache_alloc(slab, GFP_KERNEL);
351 if (!stid)
352 return NULL;
353
354 new_id = idr_alloc_cyclic(stateids, stid, 0, 0, GFP_KERNEL);
355 if (new_id < 0)
356 goto out_free;
357 stid->sc_client = cl;
358 stid->sc_type = 0;
359 stid->sc_stateid.si_opaque.so_id = new_id;
360 stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
361 /* Will be incremented before return to client: */
362 stid->sc_stateid.si_generation = 0;
363
364 /*
365 * It shouldn't be a problem to reuse an opaque stateid value.
366 * I don't think it is for 4.1. But with 4.0 I worry that, for
367 * example, a stray write retransmission could be accepted by
368 * the server when it should have been rejected. Therefore,
369 * adopt a trick from the sctp code to attempt to maximize the
370 * amount of time until an id is reused, by ensuring they always
371 * "increase" (mod INT_MAX):
372 */
373 return stid;
374 out_free:
375 kmem_cache_free(slab, stid);
376 return NULL;
377 }
378
379 static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
380 {
381 return openlockstateid(nfs4_alloc_stid(clp, stateid_slab));
382 }
383
384 /*
385 * When we recall a delegation, we should be careful not to hand it
386 * out again straight away.
387 * To ensure this we keep a pair of bloom filters ('new' and 'old')
388 * in which the filehandles of recalled delegations are "stored".
389 * If a filehandle appear in either filter, a delegation is blocked.
390 * When a delegation is recalled, the filehandle is stored in the "new"
391 * filter.
392 * Every 30 seconds we swap the filters and clear the "new" one,
393 * unless both are empty of course.
394 *
395 * Each filter is 256 bits. We hash the filehandle to 32bit and use the
396 * low 3 bytes as hash-table indices.
397 *
398 * 'state_lock', which is always held when block_delegations() is called,
399 * is used to manage concurrent access. Testing does not need the lock
400 * except when swapping the two filters.
401 */
402 static struct bloom_pair {
403 int entries, old_entries;
404 time_t swap_time;
405 int new; /* index into 'set' */
406 DECLARE_BITMAP(set[2], 256);
407 } blocked_delegations;
408
409 static int delegation_blocked(struct knfsd_fh *fh)
410 {
411 u32 hash;
412 struct bloom_pair *bd = &blocked_delegations;
413
414 if (bd->entries == 0)
415 return 0;
416 if (seconds_since_boot() - bd->swap_time > 30) {
417 spin_lock(&state_lock);
418 if (seconds_since_boot() - bd->swap_time > 30) {
419 bd->entries -= bd->old_entries;
420 bd->old_entries = bd->entries;
421 memset(bd->set[bd->new], 0,
422 sizeof(bd->set[0]));
423 bd->new = 1-bd->new;
424 bd->swap_time = seconds_since_boot();
425 }
426 spin_unlock(&state_lock);
427 }
428 hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
429 if (test_bit(hash&255, bd->set[0]) &&
430 test_bit((hash>>8)&255, bd->set[0]) &&
431 test_bit((hash>>16)&255, bd->set[0]))
432 return 1;
433
434 if (test_bit(hash&255, bd->set[1]) &&
435 test_bit((hash>>8)&255, bd->set[1]) &&
436 test_bit((hash>>16)&255, bd->set[1]))
437 return 1;
438
439 return 0;
440 }
441
442 static void block_delegations(struct knfsd_fh *fh)
443 {
444 u32 hash;
445 struct bloom_pair *bd = &blocked_delegations;
446
447 hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
448
449 __set_bit(hash&255, bd->set[bd->new]);
450 __set_bit((hash>>8)&255, bd->set[bd->new]);
451 __set_bit((hash>>16)&255, bd->set[bd->new]);
452 if (bd->entries == 0)
453 bd->swap_time = seconds_since_boot();
454 bd->entries += 1;
455 }
456
457 static struct nfs4_delegation *
458 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh)
459 {
460 struct nfs4_delegation *dp;
461
462 dprintk("NFSD alloc_init_deleg\n");
463 if (num_delegations > max_delegations)
464 return NULL;
465 if (delegation_blocked(&current_fh->fh_handle))
466 return NULL;
467 dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
468 if (dp == NULL)
469 return dp;
470 /*
471 * delegation seqid's are never incremented. The 4.1 special
472 * meaning of seqid 0 isn't meaningful, really, but let's avoid
473 * 0 anyway just for consistency and use 1:
474 */
475 dp->dl_stid.sc_stateid.si_generation = 1;
476 num_delegations++;
477 INIT_LIST_HEAD(&dp->dl_perfile);
478 INIT_LIST_HEAD(&dp->dl_perclnt);
479 INIT_LIST_HEAD(&dp->dl_recall_lru);
480 dp->dl_file = NULL;
481 dp->dl_type = NFS4_OPEN_DELEGATE_READ;
482 fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
483 dp->dl_time = 0;
484 atomic_set(&dp->dl_count, 1);
485 nfsd4_init_callback(&dp->dl_recall);
486 return dp;
487 }
488
489 static void remove_stid(struct nfs4_stid *s)
490 {
491 struct idr *stateids = &s->sc_client->cl_stateids;
492
493 idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
494 }
495
496 static void nfs4_free_stid(struct kmem_cache *slab, struct nfs4_stid *s)
497 {
498 kmem_cache_free(slab, s);
499 }
500
501 void
502 nfs4_put_delegation(struct nfs4_delegation *dp)
503 {
504 if (atomic_dec_and_test(&dp->dl_count)) {
505 nfs4_free_stid(deleg_slab, &dp->dl_stid);
506 num_delegations--;
507 }
508 }
509
510 static void nfs4_put_deleg_lease(struct nfs4_file *fp)
511 {
512 if (!fp->fi_lease)
513 return;
514 if (atomic_dec_and_test(&fp->fi_delegees)) {
515 vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
516 fp->fi_lease = NULL;
517 fput(fp->fi_deleg_file);
518 fp->fi_deleg_file = NULL;
519 }
520 }
521
522 static void unhash_stid(struct nfs4_stid *s)
523 {
524 s->sc_type = 0;
525 }
526
527 static void
528 hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
529 {
530 lockdep_assert_held(&state_lock);
531
532 dp->dl_stid.sc_type = NFS4_DELEG_STID;
533 list_add(&dp->dl_perfile, &fp->fi_delegations);
534 list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
535 }
536
537 /* Called under the state lock. */
538 static void
539 unhash_delegation(struct nfs4_delegation *dp)
540 {
541 spin_lock(&state_lock);
542 list_del_init(&dp->dl_perclnt);
543 list_del_init(&dp->dl_perfile);
544 list_del_init(&dp->dl_recall_lru);
545 spin_unlock(&state_lock);
546 if (dp->dl_file) {
547 nfs4_put_deleg_lease(dp->dl_file);
548 put_nfs4_file(dp->dl_file);
549 dp->dl_file = NULL;
550 }
551 }
552
553
554
555 static void destroy_revoked_delegation(struct nfs4_delegation *dp)
556 {
557 list_del_init(&dp->dl_recall_lru);
558 remove_stid(&dp->dl_stid);
559 nfs4_put_delegation(dp);
560 }
561
562 static void destroy_delegation(struct nfs4_delegation *dp)
563 {
564 unhash_delegation(dp);
565 remove_stid(&dp->dl_stid);
566 nfs4_put_delegation(dp);
567 }
568
569 static void revoke_delegation(struct nfs4_delegation *dp)
570 {
571 struct nfs4_client *clp = dp->dl_stid.sc_client;
572
573 if (clp->cl_minorversion == 0)
574 destroy_delegation(dp);
575 else {
576 unhash_delegation(dp);
577 dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
578 list_add(&dp->dl_recall_lru, &clp->cl_revoked);
579 }
580 }
581
582 /*
583 * SETCLIENTID state
584 */
585
586 static unsigned int clientid_hashval(u32 id)
587 {
588 return id & CLIENT_HASH_MASK;
589 }
590
591 static unsigned int clientstr_hashval(const char *name)
592 {
593 return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
594 }
595
596 /*
597 * We store the NONE, READ, WRITE, and BOTH bits separately in the
598 * st_{access,deny}_bmap field of the stateid, in order to track not
599 * only what share bits are currently in force, but also what
600 * combinations of share bits previous opens have used. This allows us
601 * to enforce the recommendation of rfc 3530 14.2.19 that the server
602 * return an error if the client attempt to downgrade to a combination
603 * of share bits not explicable by closing some of its previous opens.
604 *
605 * XXX: This enforcement is actually incomplete, since we don't keep
606 * track of access/deny bit combinations; so, e.g., we allow:
607 *
608 * OPEN allow read, deny write
609 * OPEN allow both, deny none
610 * DOWNGRADE allow read, deny none
611 *
612 * which we should reject.
613 */
614 static unsigned int
615 bmap_to_share_mode(unsigned long bmap) {
616 int i;
617 unsigned int access = 0;
618
619 for (i = 1; i < 4; i++) {
620 if (test_bit(i, &bmap))
621 access |= i;
622 }
623 return access;
624 }
625
626 static bool
627 test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
628 unsigned int access, deny;
629
630 access = bmap_to_share_mode(stp->st_access_bmap);
631 deny = bmap_to_share_mode(stp->st_deny_bmap);
632 if ((access & open->op_share_deny) || (deny & open->op_share_access))
633 return false;
634 return true;
635 }
636
637 /* set share access for a given stateid */
638 static inline void
639 set_access(u32 access, struct nfs4_ol_stateid *stp)
640 {
641 __set_bit(access, &stp->st_access_bmap);
642 }
643
644 /* clear share access for a given stateid */
645 static inline void
646 clear_access(u32 access, struct nfs4_ol_stateid *stp)
647 {
648 __clear_bit(access, &stp->st_access_bmap);
649 }
650
651 /* test whether a given stateid has access */
652 static inline bool
653 test_access(u32 access, struct nfs4_ol_stateid *stp)
654 {
655 return test_bit(access, &stp->st_access_bmap);
656 }
657
658 /* set share deny for a given stateid */
659 static inline void
660 set_deny(u32 access, struct nfs4_ol_stateid *stp)
661 {
662 __set_bit(access, &stp->st_deny_bmap);
663 }
664
665 /* clear share deny for a given stateid */
666 static inline void
667 clear_deny(u32 access, struct nfs4_ol_stateid *stp)
668 {
669 __clear_bit(access, &stp->st_deny_bmap);
670 }
671
672 /* test whether a given stateid is denying specific access */
673 static inline bool
674 test_deny(u32 access, struct nfs4_ol_stateid *stp)
675 {
676 return test_bit(access, &stp->st_deny_bmap);
677 }
678
679 static int nfs4_access_to_omode(u32 access)
680 {
681 switch (access & NFS4_SHARE_ACCESS_BOTH) {
682 case NFS4_SHARE_ACCESS_READ:
683 return O_RDONLY;
684 case NFS4_SHARE_ACCESS_WRITE:
685 return O_WRONLY;
686 case NFS4_SHARE_ACCESS_BOTH:
687 return O_RDWR;
688 }
689 WARN_ON_ONCE(1);
690 return O_RDONLY;
691 }
692
693 /* release all access and file references for a given stateid */
694 static void
695 release_all_access(struct nfs4_ol_stateid *stp)
696 {
697 int i;
698
699 for (i = 1; i < 4; i++) {
700 if (test_access(i, stp))
701 nfs4_file_put_access(stp->st_file,
702 nfs4_access_to_omode(i));
703 clear_access(i, stp);
704 }
705 }
706
707 static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
708 {
709 list_del(&stp->st_perfile);
710 list_del(&stp->st_perstateowner);
711 }
712
713 static void close_generic_stateid(struct nfs4_ol_stateid *stp)
714 {
715 release_all_access(stp);
716 put_nfs4_file(stp->st_file);
717 stp->st_file = NULL;
718 }
719
720 static void free_generic_stateid(struct nfs4_ol_stateid *stp)
721 {
722 remove_stid(&stp->st_stid);
723 nfs4_free_stid(stateid_slab, &stp->st_stid);
724 }
725
726 static void __release_lock_stateid(struct nfs4_ol_stateid *stp)
727 {
728 struct file *file;
729
730 list_del(&stp->st_locks);
731 unhash_generic_stateid(stp);
732 unhash_stid(&stp->st_stid);
733 file = find_any_file(stp->st_file);
734 if (file)
735 locks_remove_posix(file, (fl_owner_t)lockowner(stp->st_stateowner));
736 close_generic_stateid(stp);
737 free_generic_stateid(stp);
738 }
739
740 static void unhash_lockowner(struct nfs4_lockowner *lo)
741 {
742 struct nfs4_ol_stateid *stp;
743
744 list_del(&lo->lo_owner.so_strhash);
745 while (!list_empty(&lo->lo_owner.so_stateids)) {
746 stp = list_first_entry(&lo->lo_owner.so_stateids,
747 struct nfs4_ol_stateid, st_perstateowner);
748 __release_lock_stateid(stp);
749 }
750 }
751
752 static void nfs4_free_lockowner(struct nfs4_lockowner *lo)
753 {
754 kfree(lo->lo_owner.so_owner.data);
755 kmem_cache_free(lockowner_slab, lo);
756 }
757
758 static void release_lockowner(struct nfs4_lockowner *lo)
759 {
760 unhash_lockowner(lo);
761 nfs4_free_lockowner(lo);
762 }
763
764 static void release_lockowner_if_empty(struct nfs4_lockowner *lo)
765 {
766 if (list_empty(&lo->lo_owner.so_stateids))
767 release_lockowner(lo);
768 }
769
770 static void release_lock_stateid(struct nfs4_ol_stateid *stp)
771 {
772 struct nfs4_lockowner *lo;
773
774 lo = lockowner(stp->st_stateowner);
775 __release_lock_stateid(stp);
776 release_lockowner_if_empty(lo);
777 }
778
779 static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp)
780 {
781 struct nfs4_ol_stateid *stp;
782
783 while (!list_empty(&open_stp->st_locks)) {
784 stp = list_entry(open_stp->st_locks.next,
785 struct nfs4_ol_stateid, st_locks);
786 release_lock_stateid(stp);
787 }
788 }
789
790 static void unhash_open_stateid(struct nfs4_ol_stateid *stp)
791 {
792 unhash_generic_stateid(stp);
793 release_open_stateid_locks(stp);
794 close_generic_stateid(stp);
795 }
796
797 static void release_open_stateid(struct nfs4_ol_stateid *stp)
798 {
799 unhash_open_stateid(stp);
800 free_generic_stateid(stp);
801 }
802
803 static void unhash_openowner(struct nfs4_openowner *oo)
804 {
805 struct nfs4_ol_stateid *stp;
806
807 list_del(&oo->oo_owner.so_strhash);
808 list_del(&oo->oo_perclient);
809 while (!list_empty(&oo->oo_owner.so_stateids)) {
810 stp = list_first_entry(&oo->oo_owner.so_stateids,
811 struct nfs4_ol_stateid, st_perstateowner);
812 release_open_stateid(stp);
813 }
814 }
815
816 static void release_last_closed_stateid(struct nfs4_openowner *oo)
817 {
818 struct nfs4_ol_stateid *s = oo->oo_last_closed_stid;
819
820 if (s) {
821 free_generic_stateid(s);
822 oo->oo_last_closed_stid = NULL;
823 }
824 }
825
826 static void nfs4_free_openowner(struct nfs4_openowner *oo)
827 {
828 kfree(oo->oo_owner.so_owner.data);
829 kmem_cache_free(openowner_slab, oo);
830 }
831
832 static void release_openowner(struct nfs4_openowner *oo)
833 {
834 unhash_openowner(oo);
835 list_del(&oo->oo_close_lru);
836 release_last_closed_stateid(oo);
837 nfs4_free_openowner(oo);
838 }
839
840 static inline int
841 hash_sessionid(struct nfs4_sessionid *sessionid)
842 {
843 struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;
844
845 return sid->sequence % SESSION_HASH_SIZE;
846 }
847
848 #ifdef NFSD_DEBUG
849 static inline void
850 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
851 {
852 u32 *ptr = (u32 *)(&sessionid->data[0]);
853 dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
854 }
855 #else
856 static inline void
857 dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
858 {
859 }
860 #endif
861
862 /*
863 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
864 * won't be used for replay.
865 */
866 void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
867 {
868 struct nfs4_stateowner *so = cstate->replay_owner;
869
870 if (nfserr == nfserr_replay_me)
871 return;
872
873 if (!seqid_mutating_err(ntohl(nfserr))) {
874 cstate->replay_owner = NULL;
875 return;
876 }
877 if (!so)
878 return;
879 if (so->so_is_open_owner)
880 release_last_closed_stateid(openowner(so));
881 so->so_seqid++;
882 return;
883 }
884
885 static void
886 gen_sessionid(struct nfsd4_session *ses)
887 {
888 struct nfs4_client *clp = ses->se_client;
889 struct nfsd4_sessionid *sid;
890
891 sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
892 sid->clientid = clp->cl_clientid;
893 sid->sequence = current_sessionid++;
894 sid->reserved = 0;
895 }
896
897 /*
898 * The protocol defines ca_maxresponssize_cached to include the size of
899 * the rpc header, but all we need to cache is the data starting after
900 * the end of the initial SEQUENCE operation--the rest we regenerate
901 * each time. Therefore we can advertise a ca_maxresponssize_cached
902 * value that is the number of bytes in our cache plus a few additional
903 * bytes. In order to stay on the safe side, and not promise more than
904 * we can cache, those additional bytes must be the minimum possible: 24
905 * bytes of rpc header (xid through accept state, with AUTH_NULL
906 * verifier), 12 for the compound header (with zero-length tag), and 44
907 * for the SEQUENCE op response:
908 */
909 #define NFSD_MIN_HDR_SEQ_SZ (24 + 12 + 44)
910
911 static void
912 free_session_slots(struct nfsd4_session *ses)
913 {
914 int i;
915
916 for (i = 0; i < ses->se_fchannel.maxreqs; i++)
917 kfree(ses->se_slots[i]);
918 }
919
920 /*
921 * We don't actually need to cache the rpc and session headers, so we
922 * can allocate a little less for each slot:
923 */
924 static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
925 {
926 u32 size;
927
928 if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
929 size = 0;
930 else
931 size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
932 return size + sizeof(struct nfsd4_slot);
933 }
934
935 /*
936 * XXX: If we run out of reserved DRC memory we could (up to a point)
937 * re-negotiate active sessions and reduce their slot usage to make
938 * room for new connections. For now we just fail the create session.
939 */
940 static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
941 {
942 u32 slotsize = slot_bytes(ca);
943 u32 num = ca->maxreqs;
944 int avail;
945
946 spin_lock(&nfsd_drc_lock);
947 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
948 nfsd_drc_max_mem - nfsd_drc_mem_used);
949 num = min_t(int, num, avail / slotsize);
950 nfsd_drc_mem_used += num * slotsize;
951 spin_unlock(&nfsd_drc_lock);
952
953 return num;
954 }
955
956 static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
957 {
958 int slotsize = slot_bytes(ca);
959
960 spin_lock(&nfsd_drc_lock);
961 nfsd_drc_mem_used -= slotsize * ca->maxreqs;
962 spin_unlock(&nfsd_drc_lock);
963 }
964
965 static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
966 struct nfsd4_channel_attrs *battrs)
967 {
968 int numslots = fattrs->maxreqs;
969 int slotsize = slot_bytes(fattrs);
970 struct nfsd4_session *new;
971 int mem, i;
972
973 BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
974 + sizeof(struct nfsd4_session) > PAGE_SIZE);
975 mem = numslots * sizeof(struct nfsd4_slot *);
976
977 new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
978 if (!new)
979 return NULL;
980 /* allocate each struct nfsd4_slot and data cache in one piece */
981 for (i = 0; i < numslots; i++) {
982 new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
983 if (!new->se_slots[i])
984 goto out_free;
985 }
986
987 memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
988 memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));
989
990 return new;
991 out_free:
992 while (i--)
993 kfree(new->se_slots[i]);
994 kfree(new);
995 return NULL;
996 }
997
998 static void free_conn(struct nfsd4_conn *c)
999 {
1000 svc_xprt_put(c->cn_xprt);
1001 kfree(c);
1002 }
1003
1004 static void nfsd4_conn_lost(struct svc_xpt_user *u)
1005 {
1006 struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
1007 struct nfs4_client *clp = c->cn_session->se_client;
1008
1009 spin_lock(&clp->cl_lock);
1010 if (!list_empty(&c->cn_persession)) {
1011 list_del(&c->cn_persession);
1012 free_conn(c);
1013 }
1014 nfsd4_probe_callback(clp);
1015 spin_unlock(&clp->cl_lock);
1016 }
1017
1018 static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1019 {
1020 struct nfsd4_conn *conn;
1021
1022 conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
1023 if (!conn)
1024 return NULL;
1025 svc_xprt_get(rqstp->rq_xprt);
1026 conn->cn_xprt = rqstp->rq_xprt;
1027 conn->cn_flags = flags;
1028 INIT_LIST_HEAD(&conn->cn_xpt_user.list);
1029 return conn;
1030 }
1031
1032 static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1033 {
1034 conn->cn_session = ses;
1035 list_add(&conn->cn_persession, &ses->se_conns);
1036 }
1037
1038 static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1039 {
1040 struct nfs4_client *clp = ses->se_client;
1041
1042 spin_lock(&clp->cl_lock);
1043 __nfsd4_hash_conn(conn, ses);
1044 spin_unlock(&clp->cl_lock);
1045 }
1046
1047 static int nfsd4_register_conn(struct nfsd4_conn *conn)
1048 {
1049 conn->cn_xpt_user.callback = nfsd4_conn_lost;
1050 return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1051 }
1052
1053 static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
1054 {
1055 int ret;
1056
1057 nfsd4_hash_conn(conn, ses);
1058 ret = nfsd4_register_conn(conn);
1059 if (ret)
1060 /* oops; xprt is already down: */
1061 nfsd4_conn_lost(&conn->cn_xpt_user);
1062 if (conn->cn_flags & NFS4_CDFC4_BACK) {
1063 /* callback channel may be back up */
1064 nfsd4_probe_callback(ses->se_client);
1065 }
1066 }
1067
1068 static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1069 {
1070 u32 dir = NFS4_CDFC4_FORE;
1071
1072 if (cses->flags & SESSION4_BACK_CHAN)
1073 dir |= NFS4_CDFC4_BACK;
1074 return alloc_conn(rqstp, dir);
1075 }
1076
1077 /* must be called under client_lock */
1078 static void nfsd4_del_conns(struct nfsd4_session *s)
1079 {
1080 struct nfs4_client *clp = s->se_client;
1081 struct nfsd4_conn *c;
1082
1083 spin_lock(&clp->cl_lock);
1084 while (!list_empty(&s->se_conns)) {
1085 c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
1086 list_del_init(&c->cn_persession);
1087 spin_unlock(&clp->cl_lock);
1088
1089 unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
1090 free_conn(c);
1091
1092 spin_lock(&clp->cl_lock);
1093 }
1094 spin_unlock(&clp->cl_lock);
1095 }
1096
1097 static void __free_session(struct nfsd4_session *ses)
1098 {
1099 free_session_slots(ses);
1100 kfree(ses);
1101 }
1102
1103 static void free_session(struct nfsd4_session *ses)
1104 {
1105 struct nfsd_net *nn = net_generic(ses->se_client->net, nfsd_net_id);
1106
1107 lockdep_assert_held(&nn->client_lock);
1108 nfsd4_del_conns(ses);
1109 nfsd4_put_drc_mem(&ses->se_fchannel);
1110 __free_session(ses);
1111 }
1112
1113 static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1114 {
1115 int idx;
1116 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1117
1118 new->se_client = clp;
1119 gen_sessionid(new);
1120
1121 INIT_LIST_HEAD(&new->se_conns);
1122
1123 new->se_cb_seq_nr = 1;
1124 new->se_flags = cses->flags;
1125 new->se_cb_prog = cses->callback_prog;
1126 new->se_cb_sec = cses->cb_sec;
1127 atomic_set(&new->se_ref, 0);
1128 idx = hash_sessionid(&new->se_sessionid);
1129 spin_lock(&nn->client_lock);
1130 list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1131 spin_lock(&clp->cl_lock);
1132 list_add(&new->se_perclnt, &clp->cl_sessions);
1133 spin_unlock(&clp->cl_lock);
1134 spin_unlock(&nn->client_lock);
1135
1136 if (cses->flags & SESSION4_BACK_CHAN) {
1137 struct sockaddr *sa = svc_addr(rqstp);
1138 /*
1139 * This is a little silly; with sessions there's no real
1140 * use for the callback address. Use the peer address
1141 * as a reasonable default for now, but consider fixing
1142 * the rpc client not to require an address in the
1143 * future:
1144 */
1145 rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
1146 clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
1147 }
1148 }
1149
1150 /* caller must hold client_lock */
1151 static struct nfsd4_session *
1152 __find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
1153 {
1154 struct nfsd4_session *elem;
1155 int idx;
1156 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1157
1158 dump_sessionid(__func__, sessionid);
1159 idx = hash_sessionid(sessionid);
1160 /* Search in the appropriate list */
1161 list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
1162 if (!memcmp(elem->se_sessionid.data, sessionid->data,
1163 NFS4_MAX_SESSIONID_LEN)) {
1164 return elem;
1165 }
1166 }
1167
1168 dprintk("%s: session not found\n", __func__);
1169 return NULL;
1170 }
1171
1172 static struct nfsd4_session *
1173 find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
1174 __be32 *ret)
1175 {
1176 struct nfsd4_session *session;
1177 __be32 status = nfserr_badsession;
1178
1179 session = __find_in_sessionid_hashtbl(sessionid, net);
1180 if (!session)
1181 goto out;
1182 status = nfsd4_get_session_locked(session);
1183 if (status)
1184 session = NULL;
1185 out:
1186 *ret = status;
1187 return session;
1188 }
1189
1190 /* caller must hold client_lock */
1191 static void
1192 unhash_session(struct nfsd4_session *ses)
1193 {
1194 list_del(&ses->se_hash);
1195 spin_lock(&ses->se_client->cl_lock);
1196 list_del(&ses->se_perclnt);
1197 spin_unlock(&ses->se_client->cl_lock);
1198 }
1199
1200 /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
1201 static int
1202 STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
1203 {
1204 if (clid->cl_boot == nn->boot_time)
1205 return 0;
1206 dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1207 clid->cl_boot, clid->cl_id, nn->boot_time);
1208 return 1;
1209 }
1210
1211 /*
1212 * XXX Should we use a slab cache ?
1213 * This type of memory management is somewhat inefficient, but we use it
1214 * anyway since SETCLIENTID is not a common operation.
1215 */
1216 static struct nfs4_client *alloc_client(struct xdr_netobj name)
1217 {
1218 struct nfs4_client *clp;
1219
1220 clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
1221 if (clp == NULL)
1222 return NULL;
1223 clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1224 if (clp->cl_name.data == NULL) {
1225 kfree(clp);
1226 return NULL;
1227 }
1228 clp->cl_name.len = name.len;
1229 INIT_LIST_HEAD(&clp->cl_sessions);
1230 idr_init(&clp->cl_stateids);
1231 atomic_set(&clp->cl_refcount, 0);
1232 clp->cl_cb_state = NFSD4_CB_UNKNOWN;
1233 INIT_LIST_HEAD(&clp->cl_idhash);
1234 INIT_LIST_HEAD(&clp->cl_openowners);
1235 INIT_LIST_HEAD(&clp->cl_delegations);
1236 INIT_LIST_HEAD(&clp->cl_lru);
1237 INIT_LIST_HEAD(&clp->cl_callbacks);
1238 INIT_LIST_HEAD(&clp->cl_revoked);
1239 spin_lock_init(&clp->cl_lock);
1240 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1241 return clp;
1242 }
1243
1244 static void
1245 free_client(struct nfs4_client *clp)
1246 {
1247 struct nfsd_net __maybe_unused *nn = net_generic(clp->net, nfsd_net_id);
1248
1249 lockdep_assert_held(&nn->client_lock);
1250 while (!list_empty(&clp->cl_sessions)) {
1251 struct nfsd4_session *ses;
1252 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
1253 se_perclnt);
1254 list_del(&ses->se_perclnt);
1255 WARN_ON_ONCE(atomic_read(&ses->se_ref));
1256 free_session(ses);
1257 }
1258 rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1259 free_svc_cred(&clp->cl_cred);
1260 kfree(clp->cl_name.data);
1261 idr_destroy(&clp->cl_stateids);
1262 kfree(clp);
1263 }
1264
1265 /* must be called under the client_lock */
1266 static inline void
1267 unhash_client_locked(struct nfs4_client *clp)
1268 {
1269 struct nfsd4_session *ses;
1270
1271 list_del(&clp->cl_lru);
1272 spin_lock(&clp->cl_lock);
1273 list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
1274 list_del_init(&ses->se_hash);
1275 spin_unlock(&clp->cl_lock);
1276 }
1277
1278 static void
1279 destroy_client(struct nfs4_client *clp)
1280 {
1281 struct nfs4_openowner *oo;
1282 struct nfs4_delegation *dp;
1283 struct list_head reaplist;
1284 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1285
1286 INIT_LIST_HEAD(&reaplist);
1287 spin_lock(&state_lock);
1288 while (!list_empty(&clp->cl_delegations)) {
1289 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1290 list_del_init(&dp->dl_perclnt);
1291 list_move(&dp->dl_recall_lru, &reaplist);
1292 }
1293 spin_unlock(&state_lock);
1294 while (!list_empty(&reaplist)) {
1295 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1296 destroy_delegation(dp);
1297 }
1298 list_splice_init(&clp->cl_revoked, &reaplist);
1299 while (!list_empty(&reaplist)) {
1300 dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1301 destroy_revoked_delegation(dp);
1302 }
1303 while (!list_empty(&clp->cl_openowners)) {
1304 oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1305 release_openowner(oo);
1306 }
1307 nfsd4_shutdown_callback(clp);
1308 if (clp->cl_cb_conn.cb_xprt)
1309 svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1310 list_del(&clp->cl_idhash);
1311 if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1312 rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1313 else
1314 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1315 spin_lock(&nn->client_lock);
1316 unhash_client_locked(clp);
1317 WARN_ON_ONCE(atomic_read(&clp->cl_refcount));
1318 free_client(clp);
1319 spin_unlock(&nn->client_lock);
1320 }
1321
1322 static void expire_client(struct nfs4_client *clp)
1323 {
1324 nfsd4_client_record_remove(clp);
1325 destroy_client(clp);
1326 }
1327
1328 static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
1329 {
1330 memcpy(target->cl_verifier.data, source->data,
1331 sizeof(target->cl_verifier.data));
1332 }
1333
1334 static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1335 {
1336 target->cl_clientid.cl_boot = source->cl_clientid.cl_boot;
1337 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1338 }
1339
1340 static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1341 {
1342 if (source->cr_principal) {
1343 target->cr_principal =
1344 kstrdup(source->cr_principal, GFP_KERNEL);
1345 if (target->cr_principal == NULL)
1346 return -ENOMEM;
1347 } else
1348 target->cr_principal = NULL;
1349 target->cr_flavor = source->cr_flavor;
1350 target->cr_uid = source->cr_uid;
1351 target->cr_gid = source->cr_gid;
1352 target->cr_group_info = source->cr_group_info;
1353 get_group_info(target->cr_group_info);
1354 target->cr_gss_mech = source->cr_gss_mech;
1355 if (source->cr_gss_mech)
1356 gss_mech_get(source->cr_gss_mech);
1357 return 0;
1358 }
1359
1360 static long long
1361 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
1362 {
1363 long long res;
1364
1365 res = o1->len - o2->len;
1366 if (res)
1367 return res;
1368 return (long long)memcmp(o1->data, o2->data, o1->len);
1369 }
1370
1371 static int same_name(const char *n1, const char *n2)
1372 {
1373 return 0 == memcmp(n1, n2, HEXDIR_LEN);
1374 }
1375
1376 static int
1377 same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
1378 {
1379 return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
1380 }
1381
1382 static int
1383 same_clid(clientid_t *cl1, clientid_t *cl2)
1384 {
1385 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1386 }
1387
1388 static bool groups_equal(struct group_info *g1, struct group_info *g2)
1389 {
1390 int i;
1391
1392 if (g1->ngroups != g2->ngroups)
1393 return false;
1394 for (i=0; i<g1->ngroups; i++)
1395 if (!gid_eq(GROUP_AT(g1, i), GROUP_AT(g2, i)))
1396 return false;
1397 return true;
1398 }
1399
1400 /*
1401 * RFC 3530 language requires clid_inuse be returned when the
1402 * "principal" associated with a requests differs from that previously
1403 * used. We use uid, gid's, and gss principal string as our best
1404 * approximation. We also don't want to allow non-gss use of a client
1405 * established using gss: in theory cr_principal should catch that
1406 * change, but in practice cr_principal can be null even in the gss case
1407 * since gssd doesn't always pass down a principal string.
1408 */
1409 static bool is_gss_cred(struct svc_cred *cr)
1410 {
1411 /* Is cr_flavor one of the gss "pseudoflavors"?: */
1412 return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
1413 }
1414
1415
1416 static bool
1417 same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
1418 {
1419 if ((is_gss_cred(cr1) != is_gss_cred(cr2))
1420 || (!uid_eq(cr1->cr_uid, cr2->cr_uid))
1421 || (!gid_eq(cr1->cr_gid, cr2->cr_gid))
1422 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
1423 return false;
1424 if (cr1->cr_principal == cr2->cr_principal)
1425 return true;
1426 if (!cr1->cr_principal || !cr2->cr_principal)
1427 return false;
1428 return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
1429 }
1430
1431 static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
1432 {
1433 struct svc_cred *cr = &rqstp->rq_cred;
1434 u32 service;
1435
1436 if (!cr->cr_gss_mech)
1437 return false;
1438 service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
1439 return service == RPC_GSS_SVC_INTEGRITY ||
1440 service == RPC_GSS_SVC_PRIVACY;
1441 }
1442
1443 static bool mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
1444 {
1445 struct svc_cred *cr = &rqstp->rq_cred;
1446
1447 if (!cl->cl_mach_cred)
1448 return true;
1449 if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
1450 return false;
1451 if (!svc_rqst_integrity_protected(rqstp))
1452 return false;
1453 if (!cr->cr_principal)
1454 return false;
1455 return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
1456 }
1457
1458 static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
1459 {
1460 static u32 current_clientid = 1;
1461
1462 clp->cl_clientid.cl_boot = nn->boot_time;
1463 clp->cl_clientid.cl_id = current_clientid++;
1464 }
1465
1466 static void gen_confirm(struct nfs4_client *clp)
1467 {
1468 __be32 verf[2];
1469 static u32 i;
1470
1471 /*
1472 * This is opaque to client, so no need to byte-swap. Use
1473 * __force to keep sparse happy
1474 */
1475 verf[0] = (__force __be32)get_seconds();
1476 verf[1] = (__force __be32)i++;
1477 memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
1478 }
1479
1480 static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t)
1481 {
1482 struct nfs4_stid *ret;
1483
1484 ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
1485 if (!ret || !ret->sc_type)
1486 return NULL;
1487 return ret;
1488 }
1489
1490 static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
1491 {
1492 struct nfs4_stid *s;
1493
1494 s = find_stateid(cl, t);
1495 if (!s)
1496 return NULL;
1497 if (typemask & s->sc_type)
1498 return s;
1499 return NULL;
1500 }
1501
1502 static struct nfs4_client *create_client(struct xdr_netobj name,
1503 struct svc_rqst *rqstp, nfs4_verifier *verf)
1504 {
1505 struct nfs4_client *clp;
1506 struct sockaddr *sa = svc_addr(rqstp);
1507 int ret;
1508 struct net *net = SVC_NET(rqstp);
1509 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1510
1511 clp = alloc_client(name);
1512 if (clp == NULL)
1513 return NULL;
1514
1515 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
1516 if (ret) {
1517 spin_lock(&nn->client_lock);
1518 free_client(clp);
1519 spin_unlock(&nn->client_lock);
1520 return NULL;
1521 }
1522 nfsd4_init_callback(&clp->cl_cb_null);
1523 clp->cl_time = get_seconds();
1524 clear_bit(0, &clp->cl_cb_slot_busy);
1525 copy_verf(clp, verf);
1526 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
1527 gen_confirm(clp);
1528 clp->cl_cb_session = NULL;
1529 clp->net = net;
1530 return clp;
1531 }
1532
1533 static void
1534 add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
1535 {
1536 struct rb_node **new = &(root->rb_node), *parent = NULL;
1537 struct nfs4_client *clp;
1538
1539 while (*new) {
1540 clp = rb_entry(*new, struct nfs4_client, cl_namenode);
1541 parent = *new;
1542
1543 if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
1544 new = &((*new)->rb_left);
1545 else
1546 new = &((*new)->rb_right);
1547 }
1548
1549 rb_link_node(&new_clp->cl_namenode, parent, new);
1550 rb_insert_color(&new_clp->cl_namenode, root);
1551 }
1552
1553 static struct nfs4_client *
1554 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
1555 {
1556 long long cmp;
1557 struct rb_node *node = root->rb_node;
1558 struct nfs4_client *clp;
1559
1560 while (node) {
1561 clp = rb_entry(node, struct nfs4_client, cl_namenode);
1562 cmp = compare_blob(&clp->cl_name, name);
1563 if (cmp > 0)
1564 node = node->rb_left;
1565 else if (cmp < 0)
1566 node = node->rb_right;
1567 else
1568 return clp;
1569 }
1570 return NULL;
1571 }
1572
1573 static void
1574 add_to_unconfirmed(struct nfs4_client *clp)
1575 {
1576 unsigned int idhashval;
1577 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1578
1579 clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1580 add_clp_to_name_tree(clp, &nn->unconf_name_tree);
1581 idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1582 list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
1583 renew_client(clp);
1584 }
1585
1586 static void
1587 move_to_confirmed(struct nfs4_client *clp)
1588 {
1589 unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1590 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
1591
1592 dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
1593 list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
1594 rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1595 add_clp_to_name_tree(clp, &nn->conf_name_tree);
1596 set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1597 renew_client(clp);
1598 }
1599
1600 static struct nfs4_client *
1601 find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
1602 {
1603 struct nfs4_client *clp;
1604 unsigned int idhashval = clientid_hashval(clid->cl_id);
1605
1606 list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
1607 if (same_clid(&clp->cl_clientid, clid)) {
1608 if ((bool)clp->cl_minorversion != sessions)
1609 return NULL;
1610 renew_client(clp);
1611 return clp;
1612 }
1613 }
1614 return NULL;
1615 }
1616
1617 static struct nfs4_client *
1618 find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1619 {
1620 struct list_head *tbl = nn->conf_id_hashtbl;
1621
1622 return find_client_in_id_table(tbl, clid, sessions);
1623 }
1624
1625 static struct nfs4_client *
1626 find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
1627 {
1628 struct list_head *tbl = nn->unconf_id_hashtbl;
1629
1630 return find_client_in_id_table(tbl, clid, sessions);
1631 }
1632
1633 static bool clp_used_exchangeid(struct nfs4_client *clp)
1634 {
1635 return clp->cl_exchange_flags != 0;
1636 }
1637
1638 static struct nfs4_client *
1639 find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1640 {
1641 return find_clp_in_name_tree(name, &nn->conf_name_tree);
1642 }
1643
1644 static struct nfs4_client *
1645 find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1646 {
1647 return find_clp_in_name_tree(name, &nn->unconf_name_tree);
1648 }
1649
1650 static void
1651 gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
1652 {
1653 struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
1654 struct sockaddr *sa = svc_addr(rqstp);
1655 u32 scopeid = rpc_get_scope_id(sa);
1656 unsigned short expected_family;
1657
1658 /* Currently, we only support tcp and tcp6 for the callback channel */
1659 if (se->se_callback_netid_len == 3 &&
1660 !memcmp(se->se_callback_netid_val, "tcp", 3))
1661 expected_family = AF_INET;
1662 else if (se->se_callback_netid_len == 4 &&
1663 !memcmp(se->se_callback_netid_val, "tcp6", 4))
1664 expected_family = AF_INET6;
1665 else
1666 goto out_err;
1667
1668 conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
1669 se->se_callback_addr_len,
1670 (struct sockaddr *)&conn->cb_addr,
1671 sizeof(conn->cb_addr));
1672
1673 if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
1674 goto out_err;
1675
1676 if (conn->cb_addr.ss_family == AF_INET6)
1677 ((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
1678
1679 conn->cb_prog = se->se_callback_prog;
1680 conn->cb_ident = se->se_callback_ident;
1681 memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
1682 return;
1683 out_err:
1684 conn->cb_addr.ss_family = AF_UNSPEC;
1685 conn->cb_addrlen = 0;
1686 dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
1687 "will not receive delegations\n",
1688 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
1689
1690 return;
1691 }
1692
1693 /*
1694 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
1695 */
1696 static void
1697 nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
1698 {
1699 struct xdr_buf *buf = resp->xdr.buf;
1700 struct nfsd4_slot *slot = resp->cstate.slot;
1701 unsigned int base;
1702
1703 dprintk("--> %s slot %p\n", __func__, slot);
1704
1705 slot->sl_opcnt = resp->opcnt;
1706 slot->sl_status = resp->cstate.status;
1707
1708 slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
1709 if (nfsd4_not_cached(resp)) {
1710 slot->sl_datalen = 0;
1711 return;
1712 }
1713 base = resp->cstate.data_offset;
1714 slot->sl_datalen = buf->len - base;
1715 if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
1716 WARN("%s: sessions DRC could not cache compound\n", __func__);
1717 return;
1718 }
1719
1720 /*
1721 * Encode the replay sequence operation from the slot values.
1722 * If cachethis is FALSE encode the uncached rep error on the next
1723 * operation which sets resp->p and increments resp->opcnt for
1724 * nfs4svc_encode_compoundres.
1725 *
1726 */
1727 static __be32
1728 nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
1729 struct nfsd4_compoundres *resp)
1730 {
1731 struct nfsd4_op *op;
1732 struct nfsd4_slot *slot = resp->cstate.slot;
1733
1734 /* Encode the replayed sequence operation */
1735 op = &args->ops[resp->opcnt - 1];
1736 nfsd4_encode_operation(resp, op);
1737
1738 /* Return nfserr_retry_uncached_rep in next operation. */
1739 if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
1740 op = &args->ops[resp->opcnt++];
1741 op->status = nfserr_retry_uncached_rep;
1742 nfsd4_encode_operation(resp, op);
1743 }
1744 return op->status;
1745 }
1746
1747 /*
1748 * The sequence operation is not cached because we can use the slot and
1749 * session values.
1750 */
1751 static __be32
1752 nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
1753 struct nfsd4_sequence *seq)
1754 {
1755 struct nfsd4_slot *slot = resp->cstate.slot;
1756 struct xdr_stream *xdr = &resp->xdr;
1757 __be32 *p;
1758 __be32 status;
1759
1760 dprintk("--> %s slot %p\n", __func__, slot);
1761
1762 status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
1763 if (status)
1764 return status;
1765
1766 p = xdr_reserve_space(xdr, slot->sl_datalen);
1767 if (!p) {
1768 WARN_ON_ONCE(1);
1769 return nfserr_serverfault;
1770 }
1771 xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
1772 xdr_commit_encode(xdr);
1773
1774 resp->opcnt = slot->sl_opcnt;
1775 return slot->sl_status;
1776 }
1777
1778 /*
1779 * Set the exchange_id flags returned by the server.
1780 */
1781 static void
1782 nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
1783 {
1784 /* pNFS is not supported */
1785 new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;
1786
1787 /* Referrals are supported, Migration is not. */
1788 new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;
1789
1790 /* set the wire flags to return to client. */
1791 clid->flags = new->cl_exchange_flags;
1792 }
1793
1794 static bool client_has_state(struct nfs4_client *clp)
1795 {
1796 /*
1797 * Note clp->cl_openowners check isn't quite right: there's no
1798 * need to count owners without stateid's.
1799 *
1800 * Also note we should probably be using this in 4.0 case too.
1801 */
1802 return !list_empty(&clp->cl_openowners)
1803 || !list_empty(&clp->cl_delegations)
1804 || !list_empty(&clp->cl_sessions);
1805 }
1806
1807 __be32
1808 nfsd4_exchange_id(struct svc_rqst *rqstp,
1809 struct nfsd4_compound_state *cstate,
1810 struct nfsd4_exchange_id *exid)
1811 {
1812 struct nfs4_client *unconf, *conf, *new;
1813 __be32 status;
1814 char addr_str[INET6_ADDRSTRLEN];
1815 nfs4_verifier verf = exid->verifier;
1816 struct sockaddr *sa = svc_addr(rqstp);
1817 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
1818 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1819
1820 rpc_ntop(sa, addr_str, sizeof(addr_str));
1821 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
1822 "ip_addr=%s flags %x, spa_how %d\n",
1823 __func__, rqstp, exid, exid->clname.len, exid->clname.data,
1824 addr_str, exid->flags, exid->spa_how);
1825
1826 if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
1827 return nfserr_inval;
1828
1829 switch (exid->spa_how) {
1830 case SP4_MACH_CRED:
1831 if (!svc_rqst_integrity_protected(rqstp))
1832 return nfserr_inval;
1833 case SP4_NONE:
1834 break;
1835 default: /* checked by xdr code */
1836 WARN_ON_ONCE(1);
1837 case SP4_SSV:
1838 return nfserr_encr_alg_unsupp;
1839 }
1840
1841 /* Cases below refer to rfc 5661 section 18.35.4: */
1842 nfs4_lock_state();
1843 conf = find_confirmed_client_by_name(&exid->clname, nn);
1844 if (conf) {
1845 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
1846 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
1847
1848 if (update) {
1849 if (!clp_used_exchangeid(conf)) { /* buggy client */
1850 status = nfserr_inval;
1851 goto out;
1852 }
1853 if (!mach_creds_match(conf, rqstp)) {
1854 status = nfserr_wrong_cred;
1855 goto out;
1856 }
1857 if (!creds_match) { /* case 9 */
1858 status = nfserr_perm;
1859 goto out;
1860 }
1861 if (!verfs_match) { /* case 8 */
1862 status = nfserr_not_same;
1863 goto out;
1864 }
1865 /* case 6 */
1866 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
1867 new = conf;
1868 goto out_copy;
1869 }
1870 if (!creds_match) { /* case 3 */
1871 if (client_has_state(conf)) {
1872 status = nfserr_clid_inuse;
1873 goto out;
1874 }
1875 expire_client(conf);
1876 goto out_new;
1877 }
1878 if (verfs_match) { /* case 2 */
1879 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
1880 new = conf;
1881 goto out_copy;
1882 }
1883 /* case 5, client reboot */
1884 goto out_new;
1885 }
1886
1887 if (update) { /* case 7 */
1888 status = nfserr_noent;
1889 goto out;
1890 }
1891
1892 unconf = find_unconfirmed_client_by_name(&exid->clname, nn);
1893 if (unconf) /* case 4, possible retry or client restart */
1894 expire_client(unconf);
1895
1896 /* case 1 (normal case) */
1897 out_new:
1898 new = create_client(exid->clname, rqstp, &verf);
1899 if (new == NULL) {
1900 status = nfserr_jukebox;
1901 goto out;
1902 }
1903 new->cl_minorversion = cstate->minorversion;
1904 new->cl_mach_cred = (exid->spa_how == SP4_MACH_CRED);
1905
1906 gen_clid(new, nn);
1907 add_to_unconfirmed(new);
1908 out_copy:
1909 exid->clientid.cl_boot = new->cl_clientid.cl_boot;
1910 exid->clientid.cl_id = new->cl_clientid.cl_id;
1911
1912 exid->seqid = new->cl_cs_slot.sl_seqid + 1;
1913 nfsd4_set_ex_flags(new, exid);
1914
1915 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
1916 new->cl_cs_slot.sl_seqid, new->cl_exchange_flags);
1917 status = nfs_ok;
1918
1919 out:
1920 nfs4_unlock_state();
1921 return status;
1922 }
1923
1924 static __be32
1925 check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
1926 {
1927 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
1928 slot_seqid);
1929
1930 /* The slot is in use, and no response has been sent. */
1931 if (slot_inuse) {
1932 if (seqid == slot_seqid)
1933 return nfserr_jukebox;
1934 else
1935 return nfserr_seq_misordered;
1936 }
1937 /* Note unsigned 32-bit arithmetic handles wraparound: */
1938 if (likely(seqid == slot_seqid + 1))
1939 return nfs_ok;
1940 if (seqid == slot_seqid)
1941 return nfserr_replay_cache;
1942 return nfserr_seq_misordered;
1943 }
1944
1945 /*
1946 * Cache the create session result into the create session single DRC
1947 * slot cache by saving the xdr structure. sl_seqid has been set.
1948 * Do this for solo or embedded create session operations.
1949 */
1950 static void
1951 nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
1952 struct nfsd4_clid_slot *slot, __be32 nfserr)
1953 {
1954 slot->sl_status = nfserr;
1955 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
1956 }
1957
1958 static __be32
1959 nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
1960 struct nfsd4_clid_slot *slot)
1961 {
1962 memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
1963 return slot->sl_status;
1964 }
1965
1966 #define NFSD_MIN_REQ_HDR_SEQ_SZ ((\
1967 2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
1968 1 + /* MIN tag is length with zero, only length */ \
1969 3 + /* version, opcount, opcode */ \
1970 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1971 /* seqid, slotID, slotID, cache */ \
1972 4 ) * sizeof(__be32))
1973
1974 #define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
1975 2 + /* verifier: AUTH_NULL, length 0 */\
1976 1 + /* status */ \
1977 1 + /* MIN tag is length with zero, only length */ \
1978 3 + /* opcount, opcode, opstatus*/ \
1979 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
1980 /* seqid, slotID, slotID, slotID, status */ \
1981 5 ) * sizeof(__be32))
1982
1983 static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
1984 {
1985 u32 maxrpc = nn->nfsd_serv->sv_max_mesg;
1986
1987 if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
1988 return nfserr_toosmall;
1989 if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
1990 return nfserr_toosmall;
1991 ca->headerpadsz = 0;
1992 ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
1993 ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
1994 ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
1995 ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
1996 NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
1997 ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
1998 /*
1999 * Note decreasing slot size below client's request may make it
2000 * difficult for client to function correctly, whereas
2001 * decreasing the number of slots will (just?) affect
2002 * performance. When short on memory we therefore prefer to
2003 * decrease number of slots instead of their size. Clients that
2004 * request larger slots than they need will get poor results:
2005 */
2006 ca->maxreqs = nfsd4_get_drc_mem(ca);
2007 if (!ca->maxreqs)
2008 return nfserr_jukebox;
2009
2010 return nfs_ok;
2011 }
2012
2013 #define NFSD_CB_MAX_REQ_SZ ((NFS4_enc_cb_recall_sz + \
2014 RPC_MAX_HEADER_WITH_AUTH) * sizeof(__be32))
2015 #define NFSD_CB_MAX_RESP_SZ ((NFS4_dec_cb_recall_sz + \
2016 RPC_MAX_REPHEADER_WITH_AUTH) * sizeof(__be32))
2017
2018 static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
2019 {
2020 ca->headerpadsz = 0;
2021
2022 /*
2023 * These RPC_MAX_HEADER macros are overkill, especially since we
2024 * don't even do gss on the backchannel yet. But this is still
2025 * less than 1k. Tighten up this estimate in the unlikely event
2026 * it turns out to be a problem for some client:
2027 */
2028 if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
2029 return nfserr_toosmall;
2030 if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
2031 return nfserr_toosmall;
2032 ca->maxresp_cached = 0;
2033 if (ca->maxops < 2)
2034 return nfserr_toosmall;
2035
2036 return nfs_ok;
2037 }
2038
2039 static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
2040 {
2041 switch (cbs->flavor) {
2042 case RPC_AUTH_NULL:
2043 case RPC_AUTH_UNIX:
2044 return nfs_ok;
2045 default:
2046 /*
2047 * GSS case: the spec doesn't allow us to return this
2048 * error. But it also doesn't allow us not to support
2049 * GSS.
2050 * I'd rather this fail hard than return some error the
2051 * client might think it can already handle:
2052 */
2053 return nfserr_encr_alg_unsupp;
2054 }
2055 }
2056
2057 __be32
2058 nfsd4_create_session(struct svc_rqst *rqstp,
2059 struct nfsd4_compound_state *cstate,
2060 struct nfsd4_create_session *cr_ses)
2061 {
2062 struct sockaddr *sa = svc_addr(rqstp);
2063 struct nfs4_client *conf, *unconf;
2064 struct nfsd4_session *new;
2065 struct nfsd4_conn *conn;
2066 struct nfsd4_clid_slot *cs_slot = NULL;
2067 __be32 status = 0;
2068 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2069
2070 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
2071 return nfserr_inval;
2072 status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
2073 if (status)
2074 return status;
2075 status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
2076 if (status)
2077 return status;
2078 status = check_backchannel_attrs(&cr_ses->back_channel);
2079 if (status)
2080 goto out_release_drc_mem;
2081 status = nfserr_jukebox;
2082 new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
2083 if (!new)
2084 goto out_release_drc_mem;
2085 conn = alloc_conn_from_crses(rqstp, cr_ses);
2086 if (!conn)
2087 goto out_free_session;
2088
2089 nfs4_lock_state();
2090 unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
2091 conf = find_confirmed_client(&cr_ses->clientid, true, nn);
2092 WARN_ON_ONCE(conf && unconf);
2093
2094 if (conf) {
2095 status = nfserr_wrong_cred;
2096 if (!mach_creds_match(conf, rqstp))
2097 goto out_free_conn;
2098 cs_slot = &conf->cl_cs_slot;
2099 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2100 if (status == nfserr_replay_cache) {
2101 status = nfsd4_replay_create_session(cr_ses, cs_slot);
2102 goto out_free_conn;
2103 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
2104 status = nfserr_seq_misordered;
2105 goto out_free_conn;
2106 }
2107 } else if (unconf) {
2108 struct nfs4_client *old;
2109 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
2110 !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
2111 status = nfserr_clid_inuse;
2112 goto out_free_conn;
2113 }
2114 status = nfserr_wrong_cred;
2115 if (!mach_creds_match(unconf, rqstp))
2116 goto out_free_conn;
2117 cs_slot = &unconf->cl_cs_slot;
2118 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2119 if (status) {
2120 /* an unconfirmed replay returns misordered */
2121 status = nfserr_seq_misordered;
2122 goto out_free_conn;
2123 }
2124 old = find_confirmed_client_by_name(&unconf->cl_name, nn);
2125 if (old) {
2126 status = mark_client_expired(old);
2127 if (status)
2128 goto out_free_conn;
2129 expire_client(old);
2130 }
2131 move_to_confirmed(unconf);
2132 conf = unconf;
2133 } else {
2134 status = nfserr_stale_clientid;
2135 goto out_free_conn;
2136 }
2137 status = nfs_ok;
2138 /*
2139 * We do not support RDMA or persistent sessions
2140 */
2141 cr_ses->flags &= ~SESSION4_PERSIST;
2142 cr_ses->flags &= ~SESSION4_RDMA;
2143
2144 init_session(rqstp, new, conf, cr_ses);
2145 nfsd4_init_conn(rqstp, conn, new);
2146
2147 memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
2148 NFS4_MAX_SESSIONID_LEN);
2149 cs_slot->sl_seqid++;
2150 cr_ses->seqid = cs_slot->sl_seqid;
2151
2152 /* cache solo and embedded create sessions under the state lock */
2153 nfsd4_cache_create_session(cr_ses, cs_slot, status);
2154 nfs4_unlock_state();
2155 return status;
2156 out_free_conn:
2157 nfs4_unlock_state();
2158 free_conn(conn);
2159 out_free_session:
2160 __free_session(new);
2161 out_release_drc_mem:
2162 nfsd4_put_drc_mem(&cr_ses->fore_channel);
2163 return status;
2164 }
2165
2166 static __be32 nfsd4_map_bcts_dir(u32 *dir)
2167 {
2168 switch (*dir) {
2169 case NFS4_CDFC4_FORE:
2170 case NFS4_CDFC4_BACK:
2171 return nfs_ok;
2172 case NFS4_CDFC4_FORE_OR_BOTH:
2173 case NFS4_CDFC4_BACK_OR_BOTH:
2174 *dir = NFS4_CDFC4_BOTH;
2175 return nfs_ok;
2176 };
2177 return nfserr_inval;
2178 }
2179
2180 __be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc)
2181 {
2182 struct nfsd4_session *session = cstate->session;
2183 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2184 __be32 status;
2185
2186 status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
2187 if (status)
2188 return status;
2189 spin_lock(&nn->client_lock);
2190 session->se_cb_prog = bc->bc_cb_program;
2191 session->se_cb_sec = bc->bc_cb_sec;
2192 spin_unlock(&nn->client_lock);
2193
2194 nfsd4_probe_callback(session->se_client);
2195
2196 return nfs_ok;
2197 }
2198
2199 __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
2200 struct nfsd4_compound_state *cstate,
2201 struct nfsd4_bind_conn_to_session *bcts)
2202 {
2203 __be32 status;
2204 struct nfsd4_conn *conn;
2205 struct nfsd4_session *session;
2206 struct net *net = SVC_NET(rqstp);
2207 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2208
2209 if (!nfsd4_last_compound_op(rqstp))
2210 return nfserr_not_only_op;
2211 nfs4_lock_state();
2212 spin_lock(&nn->client_lock);
2213 session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
2214 spin_unlock(&nn->client_lock);
2215 if (!session)
2216 goto out_no_session;
2217 status = nfserr_wrong_cred;
2218 if (!mach_creds_match(session->se_client, rqstp))
2219 goto out;
2220 status = nfsd4_map_bcts_dir(&bcts->dir);
2221 if (status)
2222 goto out;
2223 conn = alloc_conn(rqstp, bcts->dir);
2224 status = nfserr_jukebox;
2225 if (!conn)
2226 goto out;
2227 nfsd4_init_conn(rqstp, conn, session);
2228 status = nfs_ok;
2229 out:
2230 nfsd4_put_session(session);
2231 out_no_session:
2232 nfs4_unlock_state();
2233 return status;
2234 }
2235
2236 static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
2237 {
2238 if (!session)
2239 return 0;
2240 return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
2241 }
2242
2243 __be32
2244 nfsd4_destroy_session(struct svc_rqst *r,
2245 struct nfsd4_compound_state *cstate,
2246 struct nfsd4_destroy_session *sessionid)
2247 {
2248 struct nfsd4_session *ses;
2249 __be32 status;
2250 int ref_held_by_me = 0;
2251 struct net *net = SVC_NET(r);
2252 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2253
2254 nfs4_lock_state();
2255 status = nfserr_not_only_op;
2256 if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
2257 if (!nfsd4_last_compound_op(r))
2258 goto out;
2259 ref_held_by_me++;
2260 }
2261 dump_sessionid(__func__, &sessionid->sessionid);
2262 spin_lock(&nn->client_lock);
2263 ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
2264 if (!ses)
2265 goto out_client_lock;
2266 status = nfserr_wrong_cred;
2267 if (!mach_creds_match(ses->se_client, r))
2268 goto out_put_session;
2269 status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
2270 if (status)
2271 goto out_put_session;
2272 unhash_session(ses);
2273 spin_unlock(&nn->client_lock);
2274
2275 nfsd4_probe_callback_sync(ses->se_client);
2276
2277 spin_lock(&nn->client_lock);
2278 status = nfs_ok;
2279 out_put_session:
2280 nfsd4_put_session_locked(ses);
2281 out_client_lock:
2282 spin_unlock(&nn->client_lock);
2283 out:
2284 nfs4_unlock_state();
2285 return status;
2286 }
2287
2288 static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
2289 {
2290 struct nfsd4_conn *c;
2291
2292 list_for_each_entry(c, &s->se_conns, cn_persession) {
2293 if (c->cn_xprt == xpt) {
2294 return c;
2295 }
2296 }
2297 return NULL;
2298 }
2299
2300 static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
2301 {
2302 struct nfs4_client *clp = ses->se_client;
2303 struct nfsd4_conn *c;
2304 __be32 status = nfs_ok;
2305 int ret;
2306
2307 spin_lock(&clp->cl_lock);
2308 c = __nfsd4_find_conn(new->cn_xprt, ses);
2309 if (c)
2310 goto out_free;
2311 status = nfserr_conn_not_bound_to_session;
2312 if (clp->cl_mach_cred)
2313 goto out_free;
2314 __nfsd4_hash_conn(new, ses);
2315 spin_unlock(&clp->cl_lock);
2316 ret = nfsd4_register_conn(new);
2317 if (ret)
2318 /* oops; xprt is already down: */
2319 nfsd4_conn_lost(&new->cn_xpt_user);
2320 return nfs_ok;
2321 out_free:
2322 spin_unlock(&clp->cl_lock);
2323 free_conn(new);
2324 return status;
2325 }
2326
2327 static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
2328 {
2329 struct nfsd4_compoundargs *args = rqstp->rq_argp;
2330
2331 return args->opcnt > session->se_fchannel.maxops;
2332 }
2333
2334 static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
2335 struct nfsd4_session *session)
2336 {
2337 struct xdr_buf *xb = &rqstp->rq_arg;
2338
2339 return xb->len > session->se_fchannel.maxreq_sz;
2340 }
2341
2342 __be32
2343 nfsd4_sequence(struct svc_rqst *rqstp,
2344 struct nfsd4_compound_state *cstate,
2345 struct nfsd4_sequence *seq)
2346 {
2347 struct nfsd4_compoundres *resp = rqstp->rq_resp;
2348 struct xdr_stream *xdr = &resp->xdr;
2349 struct nfsd4_session *session;
2350 struct nfs4_client *clp;
2351 struct nfsd4_slot *slot;
2352 struct nfsd4_conn *conn;
2353 __be32 status;
2354 int buflen;
2355 struct net *net = SVC_NET(rqstp);
2356 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2357
2358 if (resp->opcnt != 1)
2359 return nfserr_sequence_pos;
2360
2361 /*
2362 * Will be either used or freed by nfsd4_sequence_check_conn
2363 * below.
2364 */
2365 conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
2366 if (!conn)
2367 return nfserr_jukebox;
2368
2369 spin_lock(&nn->client_lock);
2370 session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
2371 if (!session)
2372 goto out_no_session;
2373 clp = session->se_client;
2374
2375 status = nfserr_too_many_ops;
2376 if (nfsd4_session_too_many_ops(rqstp, session))
2377 goto out_put_session;
2378
2379 status = nfserr_req_too_big;
2380 if (nfsd4_request_too_big(rqstp, session))
2381 goto out_put_session;
2382
2383 status = nfserr_badslot;
2384 if (seq->slotid >= session->se_fchannel.maxreqs)
2385 goto out_put_session;
2386
2387 slot = session->se_slots[seq->slotid];
2388 dprintk("%s: slotid %d\n", __func__, seq->slotid);
2389
2390 /* We do not negotiate the number of slots yet, so set the
2391 * maxslots to the session maxreqs which is used to encode
2392 * sr_highest_slotid and the sr_target_slot id to maxslots */
2393 seq->maxslots = session->se_fchannel.maxreqs;
2394
2395 status = check_slot_seqid(seq->seqid, slot->sl_seqid,
2396 slot->sl_flags & NFSD4_SLOT_INUSE);
2397 if (status == nfserr_replay_cache) {
2398 status = nfserr_seq_misordered;
2399 if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
2400 goto out_put_session;
2401 cstate->slot = slot;
2402 cstate->session = session;
2403 cstate->clp = clp;
2404 /* Return the cached reply status and set cstate->status
2405 * for nfsd4_proc_compound processing */
2406 status = nfsd4_replay_cache_entry(resp, seq);
2407 cstate->status = nfserr_replay_cache;
2408 goto out;
2409 }
2410 if (status)
2411 goto out_put_session;
2412
2413 status = nfsd4_sequence_check_conn(conn, session);
2414 conn = NULL;
2415 if (status)
2416 goto out_put_session;
2417
2418 buflen = (seq->cachethis) ?
2419 session->se_fchannel.maxresp_cached :
2420 session->se_fchannel.maxresp_sz;
2421 status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
2422 nfserr_rep_too_big;
2423 if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
2424 goto out_put_session;
2425 svc_reserve(rqstp, buflen);
2426
2427 status = nfs_ok;
2428 /* Success! bump slot seqid */
2429 slot->sl_seqid = seq->seqid;
2430 slot->sl_flags |= NFSD4_SLOT_INUSE;
2431 if (seq->cachethis)
2432 slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
2433 else
2434 slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
2435
2436 cstate->slot = slot;
2437 cstate->session = session;
2438 cstate->clp = clp;
2439
2440 out:
2441 switch (clp->cl_cb_state) {
2442 case NFSD4_CB_DOWN:
2443 seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
2444 break;
2445 case NFSD4_CB_FAULT:
2446 seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
2447 break;
2448 default:
2449 seq->status_flags = 0;
2450 }
2451 if (!list_empty(&clp->cl_revoked))
2452 seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
2453 out_no_session:
2454 if (conn)
2455 free_conn(conn);
2456 spin_unlock(&nn->client_lock);
2457 return status;
2458 out_put_session:
2459 nfsd4_put_session_locked(session);
2460 goto out_no_session;
2461 }
2462
2463 void
2464 nfsd4_sequence_done(struct nfsd4_compoundres *resp)
2465 {
2466 struct nfsd4_compound_state *cs = &resp->cstate;
2467
2468 if (nfsd4_has_session(cs)) {
2469 if (cs->status != nfserr_replay_cache) {
2470 nfsd4_store_cache_entry(resp);
2471 cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
2472 }
2473 /* Drop session reference that was taken in nfsd4_sequence() */
2474 nfsd4_put_session(cs->session);
2475 } else if (cs->clp)
2476 put_client_renew(cs->clp);
2477 }
2478
2479 __be32
2480 nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
2481 {
2482 struct nfs4_client *conf, *unconf, *clp;
2483 __be32 status = 0;
2484 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2485
2486 nfs4_lock_state();
2487 unconf = find_unconfirmed_client(&dc->clientid, true, nn);
2488 conf = find_confirmed_client(&dc->clientid, true, nn);
2489 WARN_ON_ONCE(conf && unconf);
2490
2491 if (conf) {
2492 clp = conf;
2493
2494 if (client_has_state(conf)) {
2495 status = nfserr_clientid_busy;
2496 goto out;
2497 }
2498 } else if (unconf)
2499 clp = unconf;
2500 else {
2501 status = nfserr_stale_clientid;
2502 goto out;
2503 }
2504 if (!mach_creds_match(clp, rqstp)) {
2505 status = nfserr_wrong_cred;
2506 goto out;
2507 }
2508 expire_client(clp);
2509 out:
2510 nfs4_unlock_state();
2511 return status;
2512 }
2513
2514 __be32
2515 nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
2516 {
2517 __be32 status = 0;
2518
2519 if (rc->rca_one_fs) {
2520 if (!cstate->current_fh.fh_dentry)
2521 return nfserr_nofilehandle;
2522 /*
2523 * We don't take advantage of the rca_one_fs case.
2524 * That's OK, it's optional, we can safely ignore it.
2525 */
2526 return nfs_ok;
2527 }
2528
2529 nfs4_lock_state();
2530 status = nfserr_complete_already;
2531 if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
2532 &cstate->session->se_client->cl_flags))
2533 goto out;
2534
2535 status = nfserr_stale_clientid;
2536 if (is_client_expired(cstate->session->se_client))
2537 /*
2538 * The following error isn't really legal.
2539 * But we only get here if the client just explicitly
2540 * destroyed the client. Surely it no longer cares what
2541 * error it gets back on an operation for the dead
2542 * client.
2543 */
2544 goto out;
2545
2546 status = nfs_ok;
2547 nfsd4_client_record_create(cstate->session->se_client);
2548 out:
2549 nfs4_unlock_state();
2550 return status;
2551 }
2552
2553 __be32
2554 nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2555 struct nfsd4_setclientid *setclid)
2556 {
2557 struct xdr_netobj clname = setclid->se_name;
2558 nfs4_verifier clverifier = setclid->se_verf;
2559 struct nfs4_client *conf, *unconf, *new;
2560 __be32 status;
2561 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2562
2563 /* Cases below refer to rfc 3530 section 14.2.33: */
2564 nfs4_lock_state();
2565 conf = find_confirmed_client_by_name(&clname, nn);
2566 if (conf) {
2567 /* case 0: */
2568 status = nfserr_clid_inuse;
2569 if (clp_used_exchangeid(conf))
2570 goto out;
2571 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
2572 char addr_str[INET6_ADDRSTRLEN];
2573 rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
2574 sizeof(addr_str));
2575 dprintk("NFSD: setclientid: string in use by client "
2576 "at %s\n", addr_str);
2577 goto out;
2578 }
2579 }
2580 unconf = find_unconfirmed_client_by_name(&clname, nn);
2581 if (unconf)
2582 expire_client(unconf);
2583 status = nfserr_jukebox;
2584 new = create_client(clname, rqstp, &clverifier);
2585 if (new == NULL)
2586 goto out;
2587 if (conf && same_verf(&conf->cl_verifier, &clverifier))
2588 /* case 1: probable callback update */
2589 copy_clid(new, conf);
2590 else /* case 4 (new client) or cases 2, 3 (client reboot): */
2591 gen_clid(new, nn);
2592 new->cl_minorversion = 0;
2593 gen_callback(new, setclid, rqstp);
2594 add_to_unconfirmed(new);
2595 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
2596 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
2597 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
2598 status = nfs_ok;
2599 out:
2600 nfs4_unlock_state();
2601 return status;
2602 }
2603
2604
2605 __be32
2606 nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2607 struct nfsd4_compound_state *cstate,
2608 struct nfsd4_setclientid_confirm *setclientid_confirm)
2609 {
2610 struct nfs4_client *conf, *unconf;
2611 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
2612 clientid_t * clid = &setclientid_confirm->sc_clientid;
2613 __be32 status;
2614 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2615
2616 if (STALE_CLIENTID(clid, nn))
2617 return nfserr_stale_clientid;
2618 nfs4_lock_state();
2619
2620 conf = find_confirmed_client(clid, false, nn);
2621 unconf = find_unconfirmed_client(clid, false, nn);
2622 /*
2623 * We try hard to give out unique clientid's, so if we get an
2624 * attempt to confirm the same clientid with a different cred,
2625 * there's a bug somewhere. Let's charitably assume it's our
2626 * bug.
2627 */
2628 status = nfserr_serverfault;
2629 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
2630 goto out;
2631 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
2632 goto out;
2633 /* cases below refer to rfc 3530 section 14.2.34: */
2634 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
2635 if (conf && !unconf) /* case 2: probable retransmit */
2636 status = nfs_ok;
2637 else /* case 4: client hasn't noticed we rebooted yet? */
2638 status = nfserr_stale_clientid;
2639 goto out;
2640 }
2641 status = nfs_ok;
2642 if (conf) { /* case 1: callback update */
2643 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
2644 nfsd4_probe_callback(conf);
2645 expire_client(unconf);
2646 } else { /* case 3: normal case; new or rebooted client */
2647 conf = find_confirmed_client_by_name(&unconf->cl_name, nn);
2648 if (conf) {
2649 status = mark_client_expired(conf);
2650 if (status)
2651 goto out;
2652 expire_client(conf);
2653 }
2654 move_to_confirmed(unconf);
2655 nfsd4_probe_callback(unconf);
2656 }
2657 out:
2658 nfs4_unlock_state();
2659 return status;
2660 }
2661
2662 static struct nfs4_file *nfsd4_alloc_file(void)
2663 {
2664 return kmem_cache_alloc(file_slab, GFP_KERNEL);
2665 }
2666
2667 /* OPEN Share state helper functions */
2668 static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino)
2669 {
2670 unsigned int hashval = file_hashval(ino);
2671
2672 lockdep_assert_held(&state_lock);
2673
2674 atomic_set(&fp->fi_ref, 1);
2675 INIT_LIST_HEAD(&fp->fi_stateids);
2676 INIT_LIST_HEAD(&fp->fi_delegations);
2677 ihold(ino);
2678 fp->fi_inode = ino;
2679 fp->fi_had_conflict = false;
2680 fp->fi_lease = NULL;
2681 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
2682 memset(fp->fi_access, 0, sizeof(fp->fi_access));
2683 hlist_add_head(&fp->fi_hash, &file_hashtbl[hashval]);
2684 }
2685
2686 void
2687 nfsd4_free_slabs(void)
2688 {
2689 kmem_cache_destroy(openowner_slab);
2690 kmem_cache_destroy(lockowner_slab);
2691 kmem_cache_destroy(file_slab);
2692 kmem_cache_destroy(stateid_slab);
2693 kmem_cache_destroy(deleg_slab);
2694 }
2695
2696 int
2697 nfsd4_init_slabs(void)
2698 {
2699 openowner_slab = kmem_cache_create("nfsd4_openowners",
2700 sizeof(struct nfs4_openowner), 0, 0, NULL);
2701 if (openowner_slab == NULL)
2702 goto out;
2703 lockowner_slab = kmem_cache_create("nfsd4_lockowners",
2704 sizeof(struct nfs4_lockowner), 0, 0, NULL);
2705 if (lockowner_slab == NULL)
2706 goto out_free_openowner_slab;
2707 file_slab = kmem_cache_create("nfsd4_files",
2708 sizeof(struct nfs4_file), 0, 0, NULL);
2709 if (file_slab == NULL)
2710 goto out_free_lockowner_slab;
2711 stateid_slab = kmem_cache_create("nfsd4_stateids",
2712 sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
2713 if (stateid_slab == NULL)
2714 goto out_free_file_slab;
2715 deleg_slab = kmem_cache_create("nfsd4_delegations",
2716 sizeof(struct nfs4_delegation), 0, 0, NULL);
2717 if (deleg_slab == NULL)
2718 goto out_free_stateid_slab;
2719 return 0;
2720
2721 out_free_stateid_slab:
2722 kmem_cache_destroy(stateid_slab);
2723 out_free_file_slab:
2724 kmem_cache_destroy(file_slab);
2725 out_free_lockowner_slab:
2726 kmem_cache_destroy(lockowner_slab);
2727 out_free_openowner_slab:
2728 kmem_cache_destroy(openowner_slab);
2729 out:
2730 dprintk("nfsd4: out of memory while initializing nfsv4\n");
2731 return -ENOMEM;
2732 }
2733
2734 static void init_nfs4_replay(struct nfs4_replay *rp)
2735 {
2736 rp->rp_status = nfserr_serverfault;
2737 rp->rp_buflen = 0;
2738 rp->rp_buf = rp->rp_ibuf;
2739 }
2740
2741 static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
2742 {
2743 struct nfs4_stateowner *sop;
2744
2745 sop = kmem_cache_alloc(slab, GFP_KERNEL);
2746 if (!sop)
2747 return NULL;
2748
2749 sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
2750 if (!sop->so_owner.data) {
2751 kmem_cache_free(slab, sop);
2752 return NULL;
2753 }
2754 sop->so_owner.len = owner->len;
2755
2756 INIT_LIST_HEAD(&sop->so_stateids);
2757 sop->so_client = clp;
2758 init_nfs4_replay(&sop->so_replay);
2759 return sop;
2760 }
2761
2762 static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
2763 {
2764 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2765
2766 list_add(&oo->oo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]);
2767 list_add(&oo->oo_perclient, &clp->cl_openowners);
2768 }
2769
2770 static struct nfs4_openowner *
2771 alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
2772 struct nfsd4_compound_state *cstate)
2773 {
2774 struct nfs4_client *clp = cstate->clp;
2775 struct nfs4_openowner *oo;
2776
2777 oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
2778 if (!oo)
2779 return NULL;
2780 oo->oo_owner.so_is_open_owner = 1;
2781 oo->oo_owner.so_seqid = open->op_seqid;
2782 oo->oo_flags = NFS4_OO_NEW;
2783 if (nfsd4_has_session(cstate))
2784 oo->oo_flags |= NFS4_OO_CONFIRMED;
2785 oo->oo_time = 0;
2786 oo->oo_last_closed_stid = NULL;
2787 INIT_LIST_HEAD(&oo->oo_close_lru);
2788 hash_openowner(oo, clp, strhashval);
2789 return oo;
2790 }
2791
2792 static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
2793 struct nfs4_openowner *oo = open->op_openowner;
2794
2795 stp->st_stid.sc_type = NFS4_OPEN_STID;
2796 INIT_LIST_HEAD(&stp->st_locks);
2797 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
2798 list_add(&stp->st_perfile, &fp->fi_stateids);
2799 stp->st_stateowner = &oo->oo_owner;
2800 get_nfs4_file(fp);
2801 stp->st_file = fp;
2802 stp->st_access_bmap = 0;
2803 stp->st_deny_bmap = 0;
2804 set_access(open->op_share_access, stp);
2805 set_deny(open->op_share_deny, stp);
2806 stp->st_openstp = NULL;
2807 }
2808
2809 static void
2810 move_to_close_lru(struct nfs4_openowner *oo, struct net *net)
2811 {
2812 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2813
2814 dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
2815
2816 list_move_tail(&oo->oo_close_lru, &nn->close_lru);
2817 oo->oo_time = get_seconds();
2818 }
2819
2820 static int
2821 same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
2822 clientid_t *clid)
2823 {
2824 return (sop->so_owner.len == owner->len) &&
2825 0 == memcmp(sop->so_owner.data, owner->data, owner->len) &&
2826 (sop->so_client->cl_clientid.cl_id == clid->cl_id);
2827 }
2828
2829 static struct nfs4_openowner *
2830 find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
2831 bool sessions, struct nfsd_net *nn)
2832 {
2833 struct nfs4_stateowner *so;
2834 struct nfs4_openowner *oo;
2835 struct nfs4_client *clp;
2836
2837 list_for_each_entry(so, &nn->ownerstr_hashtbl[hashval], so_strhash) {
2838 if (!so->so_is_open_owner)
2839 continue;
2840 if (same_owner_str(so, &open->op_owner, &open->op_clientid)) {
2841 oo = openowner(so);
2842 clp = oo->oo_owner.so_client;
2843 if ((bool)clp->cl_minorversion != sessions)
2844 return NULL;
2845 renew_client(oo->oo_owner.so_client);
2846 return oo;
2847 }
2848 }
2849 return NULL;
2850 }
2851
2852 /* search file_hashtbl[] for file */
2853 static struct nfs4_file *
2854 find_file_locked(struct inode *ino)
2855 {
2856 unsigned int hashval = file_hashval(ino);
2857 struct nfs4_file *fp;
2858
2859 lockdep_assert_held(&state_lock);
2860
2861 hlist_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
2862 if (fp->fi_inode == ino) {
2863 get_nfs4_file(fp);
2864 return fp;
2865 }
2866 }
2867 return NULL;
2868 }
2869
2870 static struct nfs4_file *
2871 find_file(struct inode *ino)
2872 {
2873 struct nfs4_file *fp;
2874
2875 spin_lock(&state_lock);
2876 fp = find_file_locked(ino);
2877 spin_unlock(&state_lock);
2878 return fp;
2879 }
2880
2881 static struct nfs4_file *
2882 find_or_add_file(struct inode *ino, struct nfs4_file *new)
2883 {
2884 struct nfs4_file *fp;
2885
2886 spin_lock(&state_lock);
2887 fp = find_file_locked(ino);
2888 if (fp == NULL) {
2889 nfsd4_init_file(new, ino);
2890 fp = new;
2891 }
2892 spin_unlock(&state_lock);
2893
2894 return fp;
2895 }
2896
2897 /*
2898 * Called to check deny when READ with all zero stateid or
2899 * WRITE with all zero or all one stateid
2900 */
2901 static __be32
2902 nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
2903 {
2904 struct inode *ino = current_fh->fh_dentry->d_inode;
2905 struct nfs4_file *fp;
2906 struct nfs4_ol_stateid *stp;
2907 __be32 ret;
2908
2909 fp = find_file(ino);
2910 if (!fp)
2911 return nfs_ok;
2912 ret = nfserr_locked;
2913 /* Search for conflicting share reservations */
2914 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
2915 if (test_deny(deny_type, stp) ||
2916 test_deny(NFS4_SHARE_DENY_BOTH, stp))
2917 goto out;
2918 }
2919 ret = nfs_ok;
2920 out:
2921 put_nfs4_file(fp);
2922 return ret;
2923 }
2924
2925 static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
2926 {
2927 struct nfs4_client *clp = dp->dl_stid.sc_client;
2928 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
2929
2930 lockdep_assert_held(&state_lock);
2931 /* We're assuming the state code never drops its reference
2932 * without first removing the lease. Since we're in this lease
2933 * callback (and since the lease code is serialized by the kernel
2934 * lock) we know the server hasn't removed the lease yet, we know
2935 * it's safe to take a reference: */
2936 atomic_inc(&dp->dl_count);
2937
2938 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
2939
2940 /* Only place dl_time is set; protected by i_lock: */
2941 dp->dl_time = get_seconds();
2942
2943 block_delegations(&dp->dl_fh);
2944
2945 nfsd4_cb_recall(dp);
2946 }
2947
2948 /* Called from break_lease() with i_lock held. */
2949 static void nfsd_break_deleg_cb(struct file_lock *fl)
2950 {
2951 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
2952 struct nfs4_delegation *dp;
2953
2954 if (!fp) {
2955 WARN(1, "(%p)->fl_owner NULL\n", fl);
2956 return;
2957 }
2958 if (fp->fi_had_conflict) {
2959 WARN(1, "duplicate break on %p\n", fp);
2960 return;
2961 }
2962 /*
2963 * We don't want the locks code to timeout the lease for us;
2964 * we'll remove it ourself if a delegation isn't returned
2965 * in time:
2966 */
2967 fl->fl_break_time = 0;
2968
2969 spin_lock(&state_lock);
2970 fp->fi_had_conflict = true;
2971 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
2972 nfsd_break_one_deleg(dp);
2973 spin_unlock(&state_lock);
2974 }
2975
2976 static
2977 int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
2978 {
2979 if (arg & F_UNLCK)
2980 return lease_modify(onlist, arg);
2981 else
2982 return -EAGAIN;
2983 }
2984
2985 static const struct lock_manager_operations nfsd_lease_mng_ops = {
2986 .lm_break = nfsd_break_deleg_cb,
2987 .lm_change = nfsd_change_deleg_cb,
2988 };
2989
2990 static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
2991 {
2992 if (nfsd4_has_session(cstate))
2993 return nfs_ok;
2994 if (seqid == so->so_seqid - 1)
2995 return nfserr_replay_me;
2996 if (seqid == so->so_seqid)
2997 return nfs_ok;
2998 return nfserr_bad_seqid;
2999 }
3000
3001 static __be32 lookup_clientid(clientid_t *clid,
3002 struct nfsd4_compound_state *cstate,
3003 struct nfsd_net *nn)
3004 {
3005 struct nfs4_client *found;
3006
3007 if (cstate->clp) {
3008 found = cstate->clp;
3009 if (!same_clid(&found->cl_clientid, clid))
3010 return nfserr_stale_clientid;
3011 return nfs_ok;
3012 }
3013
3014 if (STALE_CLIENTID(clid, nn))
3015 return nfserr_stale_clientid;
3016
3017 /*
3018 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
3019 * cached already then we know this is for is for v4.0 and "sessions"
3020 * will be false.
3021 */
3022 WARN_ON_ONCE(cstate->session);
3023 found = find_confirmed_client(clid, false, nn);
3024 if (!found)
3025 return nfserr_expired;
3026
3027 /* Cache the nfs4_client in cstate! */
3028 cstate->clp = found;
3029 atomic_inc(&found->cl_refcount);
3030 return nfs_ok;
3031 }
3032
3033 __be32
3034 nfsd4_process_open1(struct nfsd4_compound_state *cstate,
3035 struct nfsd4_open *open, struct nfsd_net *nn)
3036 {
3037 clientid_t *clientid = &open->op_clientid;
3038 struct nfs4_client *clp = NULL;
3039 unsigned int strhashval;
3040 struct nfs4_openowner *oo = NULL;
3041 __be32 status;
3042
3043 if (STALE_CLIENTID(&open->op_clientid, nn))
3044 return nfserr_stale_clientid;
3045 /*
3046 * In case we need it later, after we've already created the
3047 * file and don't want to risk a further failure:
3048 */
3049 open->op_file = nfsd4_alloc_file();
3050 if (open->op_file == NULL)
3051 return nfserr_jukebox;
3052
3053 status = lookup_clientid(clientid, cstate, nn);
3054 if (status)
3055 return status;
3056 clp = cstate->clp;
3057
3058 strhashval = ownerstr_hashval(clientid->cl_id, &open->op_owner);
3059 oo = find_openstateowner_str(strhashval, open, cstate->minorversion, nn);
3060 open->op_openowner = oo;
3061 if (!oo) {
3062 goto new_owner;
3063 }
3064 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
3065 /* Replace unconfirmed owners without checking for replay. */
3066 release_openowner(oo);
3067 open->op_openowner = NULL;
3068 goto new_owner;
3069 }
3070 status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
3071 if (status)
3072 return status;
3073 goto alloc_stateid;
3074 new_owner:
3075 oo = alloc_init_open_stateowner(strhashval, open, cstate);
3076 if (oo == NULL)
3077 return nfserr_jukebox;
3078 open->op_openowner = oo;
3079 alloc_stateid:
3080 open->op_stp = nfs4_alloc_stateid(clp);
3081 if (!open->op_stp)
3082 return nfserr_jukebox;
3083 return nfs_ok;
3084 }
3085
3086 static inline __be32
3087 nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
3088 {
3089 if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
3090 return nfserr_openmode;
3091 else
3092 return nfs_ok;
3093 }
3094
3095 static int share_access_to_flags(u32 share_access)
3096 {
3097 return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
3098 }
3099
3100 static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
3101 {
3102 struct nfs4_stid *ret;
3103
3104 ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
3105 if (!ret)
3106 return NULL;
3107 return delegstateid(ret);
3108 }
3109
3110 static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
3111 {
3112 return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
3113 open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
3114 }
3115
3116 static __be32
3117 nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
3118 struct nfs4_delegation **dp)
3119 {
3120 int flags;
3121 __be32 status = nfserr_bad_stateid;
3122
3123 *dp = find_deleg_stateid(cl, &open->op_delegate_stateid);
3124 if (*dp == NULL)
3125 goto out;
3126 flags = share_access_to_flags(open->op_share_access);
3127 status = nfs4_check_delegmode(*dp, flags);
3128 if (status)
3129 *dp = NULL;
3130 out:
3131 if (!nfsd4_is_deleg_cur(open))
3132 return nfs_ok;
3133 if (status)
3134 return status;
3135 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3136 return nfs_ok;
3137 }
3138
3139 static __be32
3140 nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp)
3141 {
3142 struct nfs4_ol_stateid *local;
3143 struct nfs4_openowner *oo = open->op_openowner;
3144
3145 list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
3146 /* ignore lock owners */
3147 if (local->st_stateowner->so_is_open_owner == 0)
3148 continue;
3149 /* remember if we have seen this open owner */
3150 if (local->st_stateowner == &oo->oo_owner)
3151 *stpp = local;
3152 /* check for conflicting share reservations */
3153 if (!test_share(local, open))
3154 return nfserr_share_denied;
3155 }
3156 return nfs_ok;
3157 }
3158
3159 static inline int nfs4_access_to_access(u32 nfs4_access)
3160 {
3161 int flags = 0;
3162
3163 if (nfs4_access & NFS4_SHARE_ACCESS_READ)
3164 flags |= NFSD_MAY_READ;
3165 if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
3166 flags |= NFSD_MAY_WRITE;
3167 return flags;
3168 }
3169
3170 static inline __be32
3171 nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
3172 struct nfsd4_open *open)
3173 {
3174 struct iattr iattr = {
3175 .ia_valid = ATTR_SIZE,
3176 .ia_size = 0,
3177 };
3178 if (!open->op_truncate)
3179 return 0;
3180 if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
3181 return nfserr_inval;
3182 return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
3183 }
3184
3185 static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
3186 struct svc_fh *cur_fh, struct nfsd4_open *open)
3187 {
3188 __be32 status;
3189 int oflag = nfs4_access_to_omode(open->op_share_access);
3190 int access = nfs4_access_to_access(open->op_share_access);
3191
3192 if (!fp->fi_fds[oflag]) {
3193 status = nfsd_open(rqstp, cur_fh, S_IFREG, access,
3194 &fp->fi_fds[oflag]);
3195 if (status)
3196 goto out;
3197 }
3198 nfs4_file_get_access(fp, oflag);
3199
3200 status = nfsd4_truncate(rqstp, cur_fh, open);
3201 if (status)
3202 goto out_put_access;
3203
3204 return nfs_ok;
3205
3206 out_put_access:
3207 nfs4_file_put_access(fp, oflag);
3208 out:
3209 return status;
3210 }
3211
3212 static __be32
3213 nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
3214 {
3215 u32 op_share_access = open->op_share_access;
3216 __be32 status;
3217
3218 if (!test_access(op_share_access, stp))
3219 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
3220 else
3221 status = nfsd4_truncate(rqstp, cur_fh, open);
3222
3223 if (status)
3224 return status;
3225
3226 /* remember the open */
3227 set_access(op_share_access, stp);
3228 set_deny(open->op_share_deny, stp);
3229 return nfs_ok;
3230 }
3231
3232
3233 static void
3234 nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
3235 {
3236 open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3237 }
3238
3239 /* Should we give out recallable state?: */
3240 static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
3241 {
3242 if (clp->cl_cb_state == NFSD4_CB_UP)
3243 return true;
3244 /*
3245 * In the sessions case, since we don't have to establish a
3246 * separate connection for callbacks, we assume it's OK
3247 * until we hear otherwise:
3248 */
3249 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
3250 }
3251
3252 static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag)
3253 {
3254 struct file_lock *fl;
3255
3256 fl = locks_alloc_lock();
3257 if (!fl)
3258 return NULL;
3259 locks_init_lock(fl);
3260 fl->fl_lmops = &nfsd_lease_mng_ops;
3261 fl->fl_flags = FL_DELEG;
3262 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
3263 fl->fl_end = OFFSET_MAX;
3264 fl->fl_owner = (fl_owner_t)(dp->dl_file);
3265 fl->fl_pid = current->tgid;
3266 return fl;
3267 }
3268
3269 static int nfs4_setlease(struct nfs4_delegation *dp)
3270 {
3271 struct nfs4_file *fp = dp->dl_file;
3272 struct file_lock *fl;
3273 int status;
3274
3275 fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
3276 if (!fl)
3277 return -ENOMEM;
3278 fl->fl_file = find_readable_file(fp);
3279 status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
3280 if (status)
3281 goto out_free;
3282 fp->fi_lease = fl;
3283 fp->fi_deleg_file = get_file(fl->fl_file);
3284 atomic_set(&fp->fi_delegees, 1);
3285 spin_lock(&state_lock);
3286 hash_delegation_locked(dp, fp);
3287 spin_unlock(&state_lock);
3288 return 0;
3289 out_free:
3290 locks_free_lock(fl);
3291 return status;
3292 }
3293
3294 static int nfs4_set_delegation(struct nfs4_delegation *dp, struct nfs4_file *fp)
3295 {
3296 if (fp->fi_had_conflict)
3297 return -EAGAIN;
3298 get_nfs4_file(fp);
3299 dp->dl_file = fp;
3300 if (!fp->fi_lease)
3301 return nfs4_setlease(dp);
3302 spin_lock(&state_lock);
3303 atomic_inc(&fp->fi_delegees);
3304 if (fp->fi_had_conflict) {
3305 spin_unlock(&state_lock);
3306 return -EAGAIN;
3307 }
3308 hash_delegation_locked(dp, fp);
3309 spin_unlock(&state_lock);
3310 return 0;
3311 }
3312
3313 static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
3314 {
3315 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3316 if (status == -EAGAIN)
3317 open->op_why_no_deleg = WND4_CONTENTION;
3318 else {
3319 open->op_why_no_deleg = WND4_RESOURCE;
3320 switch (open->op_deleg_want) {
3321 case NFS4_SHARE_WANT_READ_DELEG:
3322 case NFS4_SHARE_WANT_WRITE_DELEG:
3323 case NFS4_SHARE_WANT_ANY_DELEG:
3324 break;
3325 case NFS4_SHARE_WANT_CANCEL:
3326 open->op_why_no_deleg = WND4_CANCELLED;
3327 break;
3328 case NFS4_SHARE_WANT_NO_DELEG:
3329 WARN_ON_ONCE(1);
3330 }
3331 }
3332 }
3333
3334 /*
3335 * Attempt to hand out a delegation.
3336 *
3337 * Note we don't support write delegations, and won't until the vfs has
3338 * proper support for them.
3339 */
3340 static void
3341 nfs4_open_delegation(struct net *net, struct svc_fh *fh,
3342 struct nfsd4_open *open, struct nfs4_ol_stateid *stp)
3343 {
3344 struct nfs4_delegation *dp;
3345 struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner);
3346 int cb_up;
3347 int status = 0;
3348
3349 cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
3350 open->op_recall = 0;
3351 switch (open->op_claim_type) {
3352 case NFS4_OPEN_CLAIM_PREVIOUS:
3353 if (!cb_up)
3354 open->op_recall = 1;
3355 if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
3356 goto out_no_deleg;
3357 break;
3358 case NFS4_OPEN_CLAIM_NULL:
3359 case NFS4_OPEN_CLAIM_FH:
3360 /*
3361 * Let's not give out any delegations till everyone's
3362 * had the chance to reclaim theirs....
3363 */
3364 if (locks_in_grace(net))
3365 goto out_no_deleg;
3366 if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
3367 goto out_no_deleg;
3368 /*
3369 * Also, if the file was opened for write or
3370 * create, there's a good chance the client's
3371 * about to write to it, resulting in an
3372 * immediate recall (since we don't support
3373 * write delegations):
3374 */
3375 if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
3376 goto out_no_deleg;
3377 if (open->op_create == NFS4_OPEN_CREATE)
3378 goto out_no_deleg;
3379 break;
3380 default:
3381 goto out_no_deleg;
3382 }
3383 dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh);
3384 if (dp == NULL)
3385 goto out_no_deleg;
3386 status = nfs4_set_delegation(dp, stp->st_file);
3387 if (status)
3388 goto out_free;
3389
3390 memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
3391
3392 dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
3393 STATEID_VAL(&dp->dl_stid.sc_stateid));
3394 open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
3395 return;
3396 out_free:
3397 destroy_delegation(dp);
3398 out_no_deleg:
3399 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
3400 if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
3401 open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
3402 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
3403 open->op_recall = 1;
3404 }
3405
3406 /* 4.1 client asking for a delegation? */
3407 if (open->op_deleg_want)
3408 nfsd4_open_deleg_none_ext(open, status);
3409 return;
3410 }
3411
3412 static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
3413 struct nfs4_delegation *dp)
3414 {
3415 if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
3416 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
3417 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3418 open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
3419 } else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
3420 dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
3421 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3422 open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
3423 }
3424 /* Otherwise the client must be confused wanting a delegation
3425 * it already has, therefore we don't return
3426 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
3427 */
3428 }
3429
3430 /*
3431 * called with nfs4_lock_state() held.
3432 */
3433 __be32
3434 nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
3435 {
3436 struct nfsd4_compoundres *resp = rqstp->rq_resp;
3437 struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
3438 struct nfs4_file *fp = NULL;
3439 struct inode *ino = current_fh->fh_dentry->d_inode;
3440 struct nfs4_ol_stateid *stp = NULL;
3441 struct nfs4_delegation *dp = NULL;
3442 __be32 status;
3443
3444 /*
3445 * Lookup file; if found, lookup stateid and check open request,
3446 * and check for delegations in the process of being recalled.
3447 * If not found, create the nfs4_file struct
3448 */
3449 fp = find_or_add_file(ino, open->op_file);
3450 if (fp != open->op_file) {
3451 if ((status = nfs4_check_open(fp, open, &stp)))
3452 goto out;
3453 status = nfs4_check_deleg(cl, open, &dp);
3454 if (status)
3455 goto out;
3456 } else {
3457 open->op_file = NULL;
3458 status = nfserr_bad_stateid;
3459 if (nfsd4_is_deleg_cur(open))
3460 goto out;
3461 status = nfserr_jukebox;
3462 }
3463
3464 /*
3465 * OPEN the file, or upgrade an existing OPEN.
3466 * If truncate fails, the OPEN fails.
3467 */
3468 if (stp) {
3469 /* Stateid was found, this is an OPEN upgrade */
3470 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
3471 if (status)
3472 goto out;
3473 } else {
3474 status = nfs4_get_vfs_file(rqstp, fp, current_fh, open);
3475 if (status)
3476 goto out;
3477 stp = open->op_stp;
3478 open->op_stp = NULL;
3479 init_open_stateid(stp, fp, open);
3480 }
3481 update_stateid(&stp->st_stid.sc_stateid);
3482 memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
3483
3484 if (nfsd4_has_session(&resp->cstate)) {
3485 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
3486 open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
3487 open->op_why_no_deleg = WND4_NOT_WANTED;
3488 goto nodeleg;
3489 }
3490 }
3491
3492 /*
3493 * Attempt to hand out a delegation. No error return, because the
3494 * OPEN succeeds even if we fail.
3495 */
3496 nfs4_open_delegation(SVC_NET(rqstp), current_fh, open, stp);
3497 nodeleg:
3498 status = nfs_ok;
3499
3500 dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
3501 STATEID_VAL(&stp->st_stid.sc_stateid));
3502 out:
3503 /* 4.1 client trying to upgrade/downgrade delegation? */
3504 if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
3505 open->op_deleg_want)
3506 nfsd4_deleg_xgrade_none_ext(open, dp);
3507
3508 if (fp)
3509 put_nfs4_file(fp);
3510 if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
3511 nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate));
3512 /*
3513 * To finish the open response, we just need to set the rflags.
3514 */
3515 open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
3516 if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
3517 !nfsd4_has_session(&resp->cstate))
3518 open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
3519
3520 return status;
3521 }
3522
3523 void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status)
3524 {
3525 if (open->op_openowner) {
3526 struct nfs4_openowner *oo = open->op_openowner;
3527
3528 if (!list_empty(&oo->oo_owner.so_stateids))
3529 list_del_init(&oo->oo_close_lru);
3530 if (oo->oo_flags & NFS4_OO_NEW) {
3531 if (status) {
3532 release_openowner(oo);
3533 open->op_openowner = NULL;
3534 } else
3535 oo->oo_flags &= ~NFS4_OO_NEW;
3536 }
3537 }
3538 if (open->op_file)
3539 nfsd4_free_file(open->op_file);
3540 if (open->op_stp)
3541 free_generic_stateid(open->op_stp);
3542 }
3543
3544 __be32
3545 nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3546 clientid_t *clid)
3547 {
3548 struct nfs4_client *clp;
3549 __be32 status;
3550 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
3551
3552 nfs4_lock_state();
3553 dprintk("process_renew(%08x/%08x): starting\n",
3554 clid->cl_boot, clid->cl_id);
3555 status = lookup_clientid(clid, cstate, nn);
3556 if (status)
3557 goto out;
3558 clp = cstate->clp;
3559 status = nfserr_cb_path_down;
3560 if (!list_empty(&clp->cl_delegations)
3561 && clp->cl_cb_state != NFSD4_CB_UP)
3562 goto out;
3563 status = nfs_ok;
3564 out:
3565 nfs4_unlock_state();
3566 return status;
3567 }
3568
3569 static void
3570 nfsd4_end_grace(struct nfsd_net *nn)
3571 {
3572 /* do nothing if grace period already ended */
3573 if (nn->grace_ended)
3574 return;
3575
3576 dprintk("NFSD: end of grace period\n");
3577 nn->grace_ended = true;
3578 nfsd4_record_grace_done(nn, nn->boot_time);
3579 locks_end_grace(&nn->nfsd4_manager);
3580 /*
3581 * Now that every NFSv4 client has had the chance to recover and
3582 * to see the (possibly new, possibly shorter) lease time, we
3583 * can safely set the next grace time to the current lease time:
3584 */
3585 nn->nfsd4_grace = nn->nfsd4_lease;
3586 }
3587
3588 static time_t
3589 nfs4_laundromat(struct nfsd_net *nn)
3590 {
3591 struct nfs4_client *clp;
3592 struct nfs4_openowner *oo;
3593 struct nfs4_delegation *dp;
3594 struct list_head *pos, *next, reaplist;
3595 time_t cutoff = get_seconds() - nn->nfsd4_lease;
3596 time_t t, new_timeo = nn->nfsd4_lease;
3597
3598 nfs4_lock_state();
3599
3600 dprintk("NFSD: laundromat service - starting\n");
3601 nfsd4_end_grace(nn);
3602 INIT_LIST_HEAD(&reaplist);
3603 spin_lock(&nn->client_lock);
3604 list_for_each_safe(pos, next, &nn->client_lru) {
3605 clp = list_entry(pos, struct nfs4_client, cl_lru);
3606 if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
3607 t = clp->cl_time - cutoff;
3608 new_timeo = min(new_timeo, t);
3609 break;
3610 }
3611 if (mark_client_expired_locked(clp)) {
3612 dprintk("NFSD: client in use (clientid %08x)\n",
3613 clp->cl_clientid.cl_id);
3614 continue;
3615 }
3616 list_move(&clp->cl_lru, &reaplist);
3617 }
3618 spin_unlock(&nn->client_lock);
3619 list_for_each_safe(pos, next, &reaplist) {
3620 clp = list_entry(pos, struct nfs4_client, cl_lru);
3621 dprintk("NFSD: purging unused client (clientid %08x)\n",
3622 clp->cl_clientid.cl_id);
3623 expire_client(clp);
3624 }
3625 spin_lock(&state_lock);
3626 list_for_each_safe(pos, next, &nn->del_recall_lru) {
3627 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3628 if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn)
3629 continue;
3630 if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
3631 t = dp->dl_time - cutoff;
3632 new_timeo = min(new_timeo, t);
3633 break;
3634 }
3635 list_move(&dp->dl_recall_lru, &reaplist);
3636 }
3637 spin_unlock(&state_lock);
3638 list_for_each_safe(pos, next, &reaplist) {
3639 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3640 revoke_delegation(dp);
3641 }
3642 list_for_each_safe(pos, next, &nn->close_lru) {
3643 oo = container_of(pos, struct nfs4_openowner, oo_close_lru);
3644 if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) {
3645 t = oo->oo_time - cutoff;
3646 new_timeo = min(new_timeo, t);
3647 break;
3648 }
3649 release_openowner(oo);
3650 }
3651 new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
3652 nfs4_unlock_state();
3653 return new_timeo;
3654 }
3655
3656 static struct workqueue_struct *laundry_wq;
3657 static void laundromat_main(struct work_struct *);
3658
3659 static void
3660 laundromat_main(struct work_struct *laundry)
3661 {
3662 time_t t;
3663 struct delayed_work *dwork = container_of(laundry, struct delayed_work,
3664 work);
3665 struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
3666 laundromat_work);
3667
3668 t = nfs4_laundromat(nn);
3669 dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
3670 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
3671 }
3672
3673 static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
3674 {
3675 if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode)
3676 return nfserr_bad_stateid;
3677 return nfs_ok;
3678 }
3679
3680 static inline int
3681 access_permit_read(struct nfs4_ol_stateid *stp)
3682 {
3683 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
3684 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
3685 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
3686 }
3687
3688 static inline int
3689 access_permit_write(struct nfs4_ol_stateid *stp)
3690 {
3691 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
3692 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
3693 }
3694
3695 static
3696 __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
3697 {
3698 __be32 status = nfserr_openmode;
3699
3700 /* For lock stateid's, we test the parent open, not the lock: */
3701 if (stp->st_openstp)
3702 stp = stp->st_openstp;
3703 if ((flags & WR_STATE) && !access_permit_write(stp))
3704 goto out;
3705 if ((flags & RD_STATE) && !access_permit_read(stp))
3706 goto out;
3707 status = nfs_ok;
3708 out:
3709 return status;
3710 }
3711
3712 static inline __be32
3713 check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
3714 {
3715 if (ONE_STATEID(stateid) && (flags & RD_STATE))
3716 return nfs_ok;
3717 else if (locks_in_grace(net)) {
3718 /* Answer in remaining cases depends on existence of
3719 * conflicting state; so we must wait out the grace period. */
3720 return nfserr_grace;
3721 } else if (flags & WR_STATE)
3722 return nfs4_share_conflict(current_fh,
3723 NFS4_SHARE_DENY_WRITE);
3724 else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
3725 return nfs4_share_conflict(current_fh,
3726 NFS4_SHARE_DENY_READ);
3727 }
3728
3729 /*
3730 * Allow READ/WRITE during grace period on recovered state only for files
3731 * that are not able to provide mandatory locking.
3732 */
3733 static inline int
3734 grace_disallows_io(struct net *net, struct inode *inode)
3735 {
3736 return locks_in_grace(net) && mandatory_lock(inode);
3737 }
3738
3739 /* Returns true iff a is later than b: */
3740 static bool stateid_generation_after(stateid_t *a, stateid_t *b)
3741 {
3742 return (s32)(a->si_generation - b->si_generation) > 0;
3743 }
3744
3745 static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
3746 {
3747 /*
3748 * When sessions are used the stateid generation number is ignored
3749 * when it is zero.
3750 */
3751 if (has_session && in->si_generation == 0)
3752 return nfs_ok;
3753
3754 if (in->si_generation == ref->si_generation)
3755 return nfs_ok;
3756
3757 /* If the client sends us a stateid from the future, it's buggy: */
3758 if (stateid_generation_after(in, ref))
3759 return nfserr_bad_stateid;
3760 /*
3761 * However, we could see a stateid from the past, even from a
3762 * non-buggy client. For example, if the client sends a lock
3763 * while some IO is outstanding, the lock may bump si_generation
3764 * while the IO is still in flight. The client could avoid that
3765 * situation by waiting for responses on all the IO requests,
3766 * but better performance may result in retrying IO that
3767 * receives an old_stateid error if requests are rarely
3768 * reordered in flight:
3769 */
3770 return nfserr_old_stateid;
3771 }
3772
3773 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
3774 {
3775 struct nfs4_stid *s;
3776 struct nfs4_ol_stateid *ols;
3777 __be32 status;
3778
3779 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3780 return nfserr_bad_stateid;
3781 /* Client debugging aid. */
3782 if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
3783 char addr_str[INET6_ADDRSTRLEN];
3784 rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
3785 sizeof(addr_str));
3786 pr_warn_ratelimited("NFSD: client %s testing state ID "
3787 "with incorrect client ID\n", addr_str);
3788 return nfserr_bad_stateid;
3789 }
3790 s = find_stateid(cl, stateid);
3791 if (!s)
3792 return nfserr_bad_stateid;
3793 status = check_stateid_generation(stateid, &s->sc_stateid, 1);
3794 if (status)
3795 return status;
3796 switch (s->sc_type) {
3797 case NFS4_DELEG_STID:
3798 return nfs_ok;
3799 case NFS4_REVOKED_DELEG_STID:
3800 return nfserr_deleg_revoked;
3801 case NFS4_OPEN_STID:
3802 case NFS4_LOCK_STID:
3803 ols = openlockstateid(s);
3804 if (ols->st_stateowner->so_is_open_owner
3805 && !(openowner(ols->st_stateowner)->oo_flags
3806 & NFS4_OO_CONFIRMED))
3807 return nfserr_bad_stateid;
3808 return nfs_ok;
3809 default:
3810 printk("unknown stateid type %x\n", s->sc_type);
3811 case NFS4_CLOSED_STID:
3812 return nfserr_bad_stateid;
3813 }
3814 }
3815
3816 static __be32
3817 nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
3818 stateid_t *stateid, unsigned char typemask,
3819 struct nfs4_stid **s, struct nfsd_net *nn)
3820 {
3821 __be32 status;
3822
3823 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3824 return nfserr_bad_stateid;
3825 status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
3826 if (status == nfserr_stale_clientid) {
3827 if (cstate->session)
3828 return nfserr_bad_stateid;
3829 return nfserr_stale_stateid;
3830 }
3831 if (status)
3832 return status;
3833 *s = find_stateid_by_type(cstate->clp, stateid, typemask);
3834 if (!*s)
3835 return nfserr_bad_stateid;
3836 return nfs_ok;
3837 }
3838
3839 /*
3840 * Checks for stateid operations
3841 */
3842 __be32
3843 nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
3844 stateid_t *stateid, int flags, struct file **filpp)
3845 {
3846 struct nfs4_stid *s;
3847 struct nfs4_ol_stateid *stp = NULL;
3848 struct nfs4_delegation *dp = NULL;
3849 struct svc_fh *current_fh = &cstate->current_fh;
3850 struct inode *ino = current_fh->fh_dentry->d_inode;
3851 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3852 struct file *file = NULL;
3853 __be32 status;
3854
3855 if (filpp)
3856 *filpp = NULL;
3857
3858 if (grace_disallows_io(net, ino))
3859 return nfserr_grace;
3860
3861 if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3862 return check_special_stateids(net, current_fh, stateid, flags);
3863
3864 nfs4_lock_state();
3865
3866 status = nfsd4_lookup_stateid(cstate, stateid,
3867 NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
3868 &s, nn);
3869 if (status)
3870 goto out;
3871 status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
3872 if (status)
3873 goto out;
3874 switch (s->sc_type) {
3875 case NFS4_DELEG_STID:
3876 dp = delegstateid(s);
3877 status = nfs4_check_delegmode(dp, flags);
3878 if (status)
3879 goto out;
3880 if (filpp) {
3881 file = dp->dl_file->fi_deleg_file;
3882 if (!file) {
3883 WARN_ON_ONCE(1);
3884 status = nfserr_serverfault;
3885 goto out;
3886 }
3887 }
3888 break;
3889 case NFS4_OPEN_STID:
3890 case NFS4_LOCK_STID:
3891 stp = openlockstateid(s);
3892 status = nfs4_check_fh(current_fh, stp);
3893 if (status)
3894 goto out;
3895 if (stp->st_stateowner->so_is_open_owner
3896 && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
3897 goto out;
3898 status = nfs4_check_openmode(stp, flags);
3899 if (status)
3900 goto out;
3901 if (filpp) {
3902 if (flags & RD_STATE)
3903 file = find_readable_file(stp->st_file);
3904 else
3905 file = find_writeable_file(stp->st_file);
3906 }
3907 break;
3908 default:
3909 status = nfserr_bad_stateid;
3910 goto out;
3911 }
3912 status = nfs_ok;
3913 if (file)
3914 *filpp = get_file(file);
3915 out:
3916 nfs4_unlock_state();
3917 return status;
3918 }
3919
3920 static __be32
3921 nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
3922 {
3923 struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
3924
3925 if (check_for_locks(stp->st_file, lo))
3926 return nfserr_locks_held;
3927 release_lockowner_if_empty(lo);
3928 return nfs_ok;
3929 }
3930
3931 /*
3932 * Test if the stateid is valid
3933 */
3934 __be32
3935 nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3936 struct nfsd4_test_stateid *test_stateid)
3937 {
3938 struct nfsd4_test_stateid_id *stateid;
3939 struct nfs4_client *cl = cstate->session->se_client;
3940
3941 nfs4_lock_state();
3942 list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
3943 stateid->ts_id_status =
3944 nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
3945 nfs4_unlock_state();
3946
3947 return nfs_ok;
3948 }
3949
3950 __be32
3951 nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
3952 struct nfsd4_free_stateid *free_stateid)
3953 {
3954 stateid_t *stateid = &free_stateid->fr_stateid;
3955 struct nfs4_stid *s;
3956 struct nfs4_delegation *dp;
3957 struct nfs4_client *cl = cstate->session->se_client;
3958 __be32 ret = nfserr_bad_stateid;
3959
3960 nfs4_lock_state();
3961 s = find_stateid(cl, stateid);
3962 if (!s)
3963 goto out;
3964 switch (s->sc_type) {
3965 case NFS4_DELEG_STID:
3966 ret = nfserr_locks_held;
3967 goto out;
3968 case NFS4_OPEN_STID:
3969 case NFS4_LOCK_STID:
3970 ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
3971 if (ret)
3972 goto out;
3973 if (s->sc_type == NFS4_LOCK_STID)
3974 ret = nfsd4_free_lock_stateid(openlockstateid(s));
3975 else
3976 ret = nfserr_locks_held;
3977 break;
3978 case NFS4_REVOKED_DELEG_STID:
3979 dp = delegstateid(s);
3980 destroy_revoked_delegation(dp);
3981 ret = nfs_ok;
3982 break;
3983 default:
3984 ret = nfserr_bad_stateid;
3985 }
3986 out:
3987 nfs4_unlock_state();
3988 return ret;
3989 }
3990
3991 static inline int
3992 setlkflg (int type)
3993 {
3994 return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
3995 RD_STATE : WR_STATE;
3996 }
3997
3998 static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
3999 {
4000 struct svc_fh *current_fh = &cstate->current_fh;
4001 struct nfs4_stateowner *sop = stp->st_stateowner;
4002 __be32 status;
4003
4004 status = nfsd4_check_seqid(cstate, sop, seqid);
4005 if (status)
4006 return status;
4007 if (stp->st_stid.sc_type == NFS4_CLOSED_STID
4008 || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4009 /*
4010 * "Closed" stateid's exist *only* to return
4011 * nfserr_replay_me from the previous step, and
4012 * revoked delegations are kept only for free_stateid.
4013 */
4014 return nfserr_bad_stateid;
4015 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
4016 if (status)
4017 return status;
4018 return nfs4_check_fh(current_fh, stp);
4019 }
4020
4021 /*
4022 * Checks for sequence id mutating operations.
4023 */
4024 static __be32
4025 nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
4026 stateid_t *stateid, char typemask,
4027 struct nfs4_ol_stateid **stpp,
4028 struct nfsd_net *nn)
4029 {
4030 __be32 status;
4031 struct nfs4_stid *s;
4032 struct nfs4_ol_stateid *stp = NULL;
4033
4034 dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
4035 seqid, STATEID_VAL(stateid));
4036
4037 *stpp = NULL;
4038 status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
4039 if (status)
4040 return status;
4041 stp = openlockstateid(s);
4042 if (!nfsd4_has_session(cstate))
4043 cstate->replay_owner = stp->st_stateowner;
4044
4045 status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
4046 if (!status)
4047 *stpp = stp;
4048 return status;
4049 }
4050
4051 static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
4052 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
4053 {
4054 __be32 status;
4055 struct nfs4_openowner *oo;
4056
4057 status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
4058 NFS4_OPEN_STID, stpp, nn);
4059 if (status)
4060 return status;
4061 oo = openowner((*stpp)->st_stateowner);
4062 if (!(oo->oo_flags & NFS4_OO_CONFIRMED))
4063 return nfserr_bad_stateid;
4064 return nfs_ok;
4065 }
4066
4067 __be32
4068 nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4069 struct nfsd4_open_confirm *oc)
4070 {
4071 __be32 status;
4072 struct nfs4_openowner *oo;
4073 struct nfs4_ol_stateid *stp;
4074 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4075
4076 dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
4077 cstate->current_fh.fh_dentry);
4078
4079 status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
4080 if (status)
4081 return status;
4082
4083 nfs4_lock_state();
4084
4085 status = nfs4_preprocess_seqid_op(cstate,
4086 oc->oc_seqid, &oc->oc_req_stateid,
4087 NFS4_OPEN_STID, &stp, nn);
4088 if (status)
4089 goto out;
4090 oo = openowner(stp->st_stateowner);
4091 status = nfserr_bad_stateid;
4092 if (oo->oo_flags & NFS4_OO_CONFIRMED)
4093 goto out;
4094 oo->oo_flags |= NFS4_OO_CONFIRMED;
4095 update_stateid(&stp->st_stid.sc_stateid);
4096 memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4097 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
4098 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
4099
4100 nfsd4_client_record_create(oo->oo_owner.so_client);
4101 status = nfs_ok;
4102 out:
4103 nfsd4_bump_seqid(cstate, status);
4104 if (!cstate->replay_owner)
4105 nfs4_unlock_state();
4106 return status;
4107 }
4108
4109 static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
4110 {
4111 if (!test_access(access, stp))
4112 return;
4113 nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access));
4114 clear_access(access, stp);
4115 }
4116
4117 static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
4118 {
4119 switch (to_access) {
4120 case NFS4_SHARE_ACCESS_READ:
4121 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
4122 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
4123 break;
4124 case NFS4_SHARE_ACCESS_WRITE:
4125 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
4126 nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
4127 break;
4128 case NFS4_SHARE_ACCESS_BOTH:
4129 break;
4130 default:
4131 WARN_ON_ONCE(1);
4132 }
4133 }
4134
4135 static void
4136 reset_union_bmap_deny(unsigned long deny, struct nfs4_ol_stateid *stp)
4137 {
4138 int i;
4139 for (i = 0; i < 4; i++) {
4140 if ((i & deny) != i)
4141 clear_deny(i, stp);
4142 }
4143 }
4144
4145 __be32
4146 nfsd4_open_downgrade(struct svc_rqst *rqstp,
4147 struct nfsd4_compound_state *cstate,
4148 struct nfsd4_open_downgrade *od)
4149 {
4150 __be32 status;
4151 struct nfs4_ol_stateid *stp;
4152 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4153
4154 dprintk("NFSD: nfsd4_open_downgrade on file %pd\n",
4155 cstate->current_fh.fh_dentry);
4156
4157 /* We don't yet support WANT bits: */
4158 if (od->od_deleg_want)
4159 dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
4160 od->od_deleg_want);
4161
4162 nfs4_lock_state();
4163 status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
4164 &od->od_stateid, &stp, nn);
4165 if (status)
4166 goto out;
4167 status = nfserr_inval;
4168 if (!test_access(od->od_share_access, stp)) {
4169 dprintk("NFSD: access not a subset current bitmap: 0x%lx, input access=%08x\n",
4170 stp->st_access_bmap, od->od_share_access);
4171 goto out;
4172 }
4173 if (!test_deny(od->od_share_deny, stp)) {
4174 dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n",
4175 stp->st_deny_bmap, od->od_share_deny);
4176 goto out;
4177 }
4178 nfs4_stateid_downgrade(stp, od->od_share_access);
4179
4180 reset_union_bmap_deny(od->od_share_deny, stp);
4181
4182 update_stateid(&stp->st_stid.sc_stateid);
4183 memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4184 status = nfs_ok;
4185 out:
4186 nfsd4_bump_seqid(cstate, status);
4187 if (!cstate->replay_owner)
4188 nfs4_unlock_state();
4189 return status;
4190 }
4191
4192 static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
4193 {
4194 struct nfs4_client *clp = s->st_stid.sc_client;
4195 struct nfs4_openowner *oo = openowner(s->st_stateowner);
4196
4197 s->st_stid.sc_type = NFS4_CLOSED_STID;
4198 unhash_open_stateid(s);
4199
4200 if (clp->cl_minorversion) {
4201 free_generic_stateid(s);
4202 if (list_empty(&oo->oo_owner.so_stateids))
4203 release_openowner(oo);
4204 } else {
4205 oo->oo_last_closed_stid = s;
4206 /*
4207 * In the 4.0 case we need to keep the owners around a
4208 * little while to handle CLOSE replay.
4209 */
4210 if (list_empty(&oo->oo_owner.so_stateids))
4211 move_to_close_lru(oo, clp->net);
4212 }
4213 }
4214
4215 /*
4216 * nfs4_unlock_state() called after encode
4217 */
4218 __be32
4219 nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4220 struct nfsd4_close *close)
4221 {
4222 __be32 status;
4223 struct nfs4_ol_stateid *stp;
4224 struct net *net = SVC_NET(rqstp);
4225 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4226
4227 dprintk("NFSD: nfsd4_close on file %pd\n",
4228 cstate->current_fh.fh_dentry);
4229
4230 nfs4_lock_state();
4231 status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
4232 &close->cl_stateid,
4233 NFS4_OPEN_STID|NFS4_CLOSED_STID,
4234 &stp, nn);
4235 nfsd4_bump_seqid(cstate, status);
4236 if (status)
4237 goto out;
4238 update_stateid(&stp->st_stid.sc_stateid);
4239 memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4240
4241 nfsd4_close_open_stateid(stp);
4242 out:
4243 if (!cstate->replay_owner)
4244 nfs4_unlock_state();
4245 return status;
4246 }
4247
4248 __be32
4249 nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4250 struct nfsd4_delegreturn *dr)
4251 {
4252 struct nfs4_delegation *dp;
4253 stateid_t *stateid = &dr->dr_stateid;
4254 struct nfs4_stid *s;
4255 __be32 status;
4256 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4257
4258 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4259 return status;
4260
4261 nfs4_lock_state();
4262 status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
4263 if (status)
4264 goto out;
4265 dp = delegstateid(s);
4266 status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
4267 if (status)
4268 goto out;
4269
4270 destroy_delegation(dp);
4271 out:
4272 nfs4_unlock_state();
4273
4274 return status;
4275 }
4276
4277
4278 #define LOFF_OVERFLOW(start, len) ((u64)(len) > ~(u64)(start))
4279
4280 static inline u64
4281 end_offset(u64 start, u64 len)
4282 {
4283 u64 end;
4284
4285 end = start + len;
4286 return end >= start ? end: NFS4_MAX_UINT64;
4287 }
4288
4289 /* last octet in a range */
4290 static inline u64
4291 last_byte_offset(u64 start, u64 len)
4292 {
4293 u64 end;
4294
4295 WARN_ON_ONCE(!len);
4296 end = start + len;
4297 return end > start ? end - 1: NFS4_MAX_UINT64;
4298 }
4299
4300 /*
4301 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
4302 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
4303 * byte, because of sign extension problems. Since NFSv4 calls for 64-bit
4304 * locking, this prevents us from being completely protocol-compliant. The
4305 * real solution to this problem is to start using unsigned file offsets in
4306 * the VFS, but this is a very deep change!
4307 */
4308 static inline void
4309 nfs4_transform_lock_offset(struct file_lock *lock)
4310 {
4311 if (lock->fl_start < 0)
4312 lock->fl_start = OFFSET_MAX;
4313 if (lock->fl_end < 0)
4314 lock->fl_end = OFFSET_MAX;
4315 }
4316
4317 /* Hack!: For now, we're defining this just so we can use a pointer to it
4318 * as a unique cookie to identify our (NFSv4's) posix locks. */
4319 static const struct lock_manager_operations nfsd_posix_mng_ops = {
4320 };
4321
4322 static inline void
4323 nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
4324 {
4325 struct nfs4_lockowner *lo;
4326
4327 if (fl->fl_lmops == &nfsd_posix_mng_ops) {
4328 lo = (struct nfs4_lockowner *) fl->fl_owner;
4329 deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
4330 lo->lo_owner.so_owner.len, GFP_KERNEL);
4331 if (!deny->ld_owner.data)
4332 /* We just don't care that much */
4333 goto nevermind;
4334 deny->ld_owner.len = lo->lo_owner.so_owner.len;
4335 deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
4336 } else {
4337 nevermind:
4338 deny->ld_owner.len = 0;
4339 deny->ld_owner.data = NULL;
4340 deny->ld_clientid.cl_boot = 0;
4341 deny->ld_clientid.cl_id = 0;
4342 }
4343 deny->ld_start = fl->fl_start;
4344 deny->ld_length = NFS4_MAX_UINT64;
4345 if (fl->fl_end != NFS4_MAX_UINT64)
4346 deny->ld_length = fl->fl_end - fl->fl_start + 1;
4347 deny->ld_type = NFS4_READ_LT;
4348 if (fl->fl_type != F_RDLCK)
4349 deny->ld_type = NFS4_WRITE_LT;
4350 }
4351
4352 static struct nfs4_lockowner *
4353 find_lockowner_str(clientid_t *clid, struct xdr_netobj *owner,
4354 struct nfsd_net *nn)
4355 {
4356 unsigned int strhashval = ownerstr_hashval(clid->cl_id, owner);
4357 struct nfs4_stateowner *so;
4358
4359 list_for_each_entry(so, &nn->ownerstr_hashtbl[strhashval], so_strhash) {
4360 if (so->so_is_open_owner)
4361 continue;
4362 if (!same_owner_str(so, owner, clid))
4363 continue;
4364 return lockowner(so);
4365 }
4366 return NULL;
4367 }
4368
4369 /*
4370 * Alloc a lock owner structure.
4371 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has
4372 * occurred.
4373 *
4374 * strhashval = ownerstr_hashval
4375 */
4376 static struct nfs4_lockowner *
4377 alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) {
4378 struct nfs4_lockowner *lo;
4379 struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
4380
4381 lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
4382 if (!lo)
4383 return NULL;
4384 INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
4385 lo->lo_owner.so_is_open_owner = 0;
4386 /* It is the openowner seqid that will be incremented in encode in the
4387 * case of new lockowners; so increment the lock seqid manually: */
4388 lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1;
4389 list_add(&lo->lo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]);
4390 return lo;
4391 }
4392
4393 static struct nfs4_ol_stateid *
4394 alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp)
4395 {
4396 struct nfs4_ol_stateid *stp;
4397 struct nfs4_client *clp = lo->lo_owner.so_client;
4398
4399 stp = nfs4_alloc_stateid(clp);
4400 if (stp == NULL)
4401 return NULL;
4402 stp->st_stid.sc_type = NFS4_LOCK_STID;
4403 list_add(&stp->st_perfile, &fp->fi_stateids);
4404 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
4405 stp->st_stateowner = &lo->lo_owner;
4406 get_nfs4_file(fp);
4407 stp->st_file = fp;
4408 stp->st_access_bmap = 0;
4409 stp->st_deny_bmap = open_stp->st_deny_bmap;
4410 stp->st_openstp = open_stp;
4411 list_add(&stp->st_locks, &open_stp->st_locks);
4412 return stp;
4413 }
4414
4415 static struct nfs4_ol_stateid *
4416 find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
4417 {
4418 struct nfs4_ol_stateid *lst;
4419
4420 list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
4421 if (lst->st_file == fp)
4422 return lst;
4423 }
4424 return NULL;
4425 }
4426
4427
4428 static int
4429 check_lock_length(u64 offset, u64 length)
4430 {
4431 return ((length == 0) || ((length != NFS4_MAX_UINT64) &&
4432 LOFF_OVERFLOW(offset, length)));
4433 }
4434
4435 static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
4436 {
4437 struct nfs4_file *fp = lock_stp->st_file;
4438 int oflag = nfs4_access_to_omode(access);
4439
4440 if (test_access(access, lock_stp))
4441 return;
4442 nfs4_file_get_access(fp, oflag);
4443 set_access(access, lock_stp);
4444 }
4445
4446 static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new)
4447 {
4448 struct nfs4_file *fi = ost->st_file;
4449 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
4450 struct nfs4_client *cl = oo->oo_owner.so_client;
4451 struct nfs4_lockowner *lo;
4452 unsigned int strhashval;
4453 struct nfsd_net *nn = net_generic(cl->net, nfsd_net_id);
4454
4455 lo = find_lockowner_str(&cl->cl_clientid, &lock->v.new.owner, nn);
4456 if (!lo) {
4457 strhashval = ownerstr_hashval(cl->cl_clientid.cl_id,
4458 &lock->v.new.owner);
4459 lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
4460 if (lo == NULL)
4461 return nfserr_jukebox;
4462 } else {
4463 /* with an existing lockowner, seqids must be the same */
4464 if (!cstate->minorversion &&
4465 lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
4466 return nfserr_bad_seqid;
4467 }
4468
4469 *lst = find_lock_stateid(lo, fi);
4470 if (*lst == NULL) {
4471 *lst = alloc_init_lock_stateid(lo, fi, ost);
4472 if (*lst == NULL) {
4473 release_lockowner_if_empty(lo);
4474 return nfserr_jukebox;
4475 }
4476 *new = true;
4477 }
4478 return nfs_ok;
4479 }
4480
4481 /*
4482 * LOCK operation
4483 */
4484 __be32
4485 nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4486 struct nfsd4_lock *lock)
4487 {
4488 struct nfs4_openowner *open_sop = NULL;
4489 struct nfs4_lockowner *lock_sop = NULL;
4490 struct nfs4_ol_stateid *lock_stp;
4491 struct file *filp = NULL;
4492 struct file_lock *file_lock = NULL;
4493 struct file_lock *conflock = NULL;
4494 __be32 status = 0;
4495 bool new_state = false;
4496 int lkflg;
4497 int err;
4498 struct net *net = SVC_NET(rqstp);
4499 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4500
4501 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
4502 (long long) lock->lk_offset,
4503 (long long) lock->lk_length);
4504
4505 if (check_lock_length(lock->lk_offset, lock->lk_length))
4506 return nfserr_inval;
4507
4508 if ((status = fh_verify(rqstp, &cstate->current_fh,
4509 S_IFREG, NFSD_MAY_LOCK))) {
4510 dprintk("NFSD: nfsd4_lock: permission denied!\n");
4511 return status;
4512 }
4513
4514 nfs4_lock_state();
4515
4516 if (lock->lk_is_new) {
4517 struct nfs4_ol_stateid *open_stp = NULL;
4518
4519 if (nfsd4_has_session(cstate))
4520 /* See rfc 5661 18.10.3: given clientid is ignored: */
4521 memcpy(&lock->v.new.clientid,
4522 &cstate->session->se_client->cl_clientid,
4523 sizeof(clientid_t));
4524
4525 status = nfserr_stale_clientid;
4526 if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
4527 goto out;
4528
4529 /* validate and update open stateid and open seqid */
4530 status = nfs4_preprocess_confirmed_seqid_op(cstate,
4531 lock->lk_new_open_seqid,
4532 &lock->lk_new_open_stateid,
4533 &open_stp, nn);
4534 if (status)
4535 goto out;
4536 open_sop = openowner(open_stp->st_stateowner);
4537 status = nfserr_bad_stateid;
4538 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
4539 &lock->v.new.clientid))
4540 goto out;
4541 status = lookup_or_create_lock_state(cstate, open_stp, lock,
4542 &lock_stp, &new_state);
4543 } else
4544 status = nfs4_preprocess_seqid_op(cstate,
4545 lock->lk_old_lock_seqid,
4546 &lock->lk_old_lock_stateid,
4547 NFS4_LOCK_STID, &lock_stp, nn);
4548 if (status)
4549 goto out;
4550 lock_sop = lockowner(lock_stp->st_stateowner);
4551
4552 lkflg = setlkflg(lock->lk_type);
4553 status = nfs4_check_openmode(lock_stp, lkflg);
4554 if (status)
4555 goto out;
4556
4557 status = nfserr_grace;
4558 if (locks_in_grace(net) && !lock->lk_reclaim)
4559 goto out;
4560 status = nfserr_no_grace;
4561 if (!locks_in_grace(net) && lock->lk_reclaim)
4562 goto out;
4563
4564 file_lock = locks_alloc_lock();
4565 if (!file_lock) {
4566 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4567 status = nfserr_jukebox;
4568 goto out;
4569 }
4570
4571 locks_init_lock(file_lock);
4572 switch (lock->lk_type) {
4573 case NFS4_READ_LT:
4574 case NFS4_READW_LT:
4575 filp = find_readable_file(lock_stp->st_file);
4576 if (filp)
4577 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
4578 file_lock->fl_type = F_RDLCK;
4579 break;
4580 case NFS4_WRITE_LT:
4581 case NFS4_WRITEW_LT:
4582 filp = find_writeable_file(lock_stp->st_file);
4583 if (filp)
4584 get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
4585 file_lock->fl_type = F_WRLCK;
4586 break;
4587 default:
4588 status = nfserr_inval;
4589 goto out;
4590 }
4591 if (!filp) {
4592 status = nfserr_openmode;
4593 goto out;
4594 }
4595 file_lock->fl_owner = (fl_owner_t)lock_sop;
4596 file_lock->fl_pid = current->tgid;
4597 file_lock->fl_file = filp;
4598 file_lock->fl_flags = FL_POSIX;
4599 file_lock->fl_lmops = &nfsd_posix_mng_ops;
4600 file_lock->fl_start = lock->lk_offset;
4601 file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
4602 nfs4_transform_lock_offset(file_lock);
4603
4604 conflock = locks_alloc_lock();
4605 if (!conflock) {
4606 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4607 status = nfserr_jukebox;
4608 goto out;
4609 }
4610
4611 err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
4612 switch (-err) {
4613 case 0: /* success! */
4614 update_stateid(&lock_stp->st_stid.sc_stateid);
4615 memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid,
4616 sizeof(stateid_t));
4617 status = 0;
4618 break;
4619 case (EAGAIN): /* conflock holds conflicting lock */
4620 status = nfserr_denied;
4621 dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
4622 nfs4_set_lock_denied(conflock, &lock->lk_denied);
4623 break;
4624 case (EDEADLK):
4625 status = nfserr_deadlock;
4626 break;
4627 default:
4628 dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
4629 status = nfserrno(err);
4630 break;
4631 }
4632 out:
4633 if (status && new_state)
4634 release_lock_stateid(lock_stp);
4635 nfsd4_bump_seqid(cstate, status);
4636 if (!cstate->replay_owner)
4637 nfs4_unlock_state();
4638 if (file_lock)
4639 locks_free_lock(file_lock);
4640 if (conflock)
4641 locks_free_lock(conflock);
4642 return status;
4643 }
4644
4645 /*
4646 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
4647 * so we do a temporary open here just to get an open file to pass to
4648 * vfs_test_lock. (Arguably perhaps test_lock should be done with an
4649 * inode operation.)
4650 */
4651 static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
4652 {
4653 struct file *file;
4654 __be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
4655 if (!err) {
4656 err = nfserrno(vfs_test_lock(file, lock));
4657 nfsd_close(file);
4658 }
4659 return err;
4660 }
4661
4662 /*
4663 * LOCKT operation
4664 */
4665 __be32
4666 nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4667 struct nfsd4_lockt *lockt)
4668 {
4669 struct file_lock *file_lock = NULL;
4670 struct nfs4_lockowner *lo;
4671 __be32 status;
4672 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4673
4674 if (locks_in_grace(SVC_NET(rqstp)))
4675 return nfserr_grace;
4676
4677 if (check_lock_length(lockt->lt_offset, lockt->lt_length))
4678 return nfserr_inval;
4679
4680 nfs4_lock_state();
4681
4682 if (!nfsd4_has_session(cstate)) {
4683 status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
4684 if (status)
4685 goto out;
4686 }
4687
4688 if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4689 goto out;
4690
4691 file_lock = locks_alloc_lock();
4692 if (!file_lock) {
4693 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4694 status = nfserr_jukebox;
4695 goto out;
4696 }
4697 locks_init_lock(file_lock);
4698 switch (lockt->lt_type) {
4699 case NFS4_READ_LT:
4700 case NFS4_READW_LT:
4701 file_lock->fl_type = F_RDLCK;
4702 break;
4703 case NFS4_WRITE_LT:
4704 case NFS4_WRITEW_LT:
4705 file_lock->fl_type = F_WRLCK;
4706 break;
4707 default:
4708 dprintk("NFSD: nfs4_lockt: bad lock type!\n");
4709 status = nfserr_inval;
4710 goto out;
4711 }
4712
4713 lo = find_lockowner_str(&lockt->lt_clientid, &lockt->lt_owner, nn);
4714 if (lo)
4715 file_lock->fl_owner = (fl_owner_t)lo;
4716 file_lock->fl_pid = current->tgid;
4717 file_lock->fl_flags = FL_POSIX;
4718
4719 file_lock->fl_start = lockt->lt_offset;
4720 file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
4721
4722 nfs4_transform_lock_offset(file_lock);
4723
4724 status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
4725 if (status)
4726 goto out;
4727
4728 if (file_lock->fl_type != F_UNLCK) {
4729 status = nfserr_denied;
4730 nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
4731 }
4732 out:
4733 nfs4_unlock_state();
4734 if (file_lock)
4735 locks_free_lock(file_lock);
4736 return status;
4737 }
4738
4739 __be32
4740 nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4741 struct nfsd4_locku *locku)
4742 {
4743 struct nfs4_ol_stateid *stp;
4744 struct file *filp = NULL;
4745 struct file_lock *file_lock = NULL;
4746 __be32 status;
4747 int err;
4748 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4749
4750 dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
4751 (long long) locku->lu_offset,
4752 (long long) locku->lu_length);
4753
4754 if (check_lock_length(locku->lu_offset, locku->lu_length))
4755 return nfserr_inval;
4756
4757 nfs4_lock_state();
4758
4759 status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
4760 &locku->lu_stateid, NFS4_LOCK_STID,
4761 &stp, nn);
4762 if (status)
4763 goto out;
4764 filp = find_any_file(stp->st_file);
4765 if (!filp) {
4766 status = nfserr_lock_range;
4767 goto out;
4768 }
4769 file_lock = locks_alloc_lock();
4770 if (!file_lock) {
4771 dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
4772 status = nfserr_jukebox;
4773 goto out;
4774 }
4775 locks_init_lock(file_lock);
4776 file_lock->fl_type = F_UNLCK;
4777 file_lock->fl_owner = (fl_owner_t)lockowner(stp->st_stateowner);
4778 file_lock->fl_pid = current->tgid;
4779 file_lock->fl_file = filp;
4780 file_lock->fl_flags = FL_POSIX;
4781 file_lock->fl_lmops = &nfsd_posix_mng_ops;
4782 file_lock->fl_start = locku->lu_offset;
4783
4784 file_lock->fl_end = last_byte_offset(locku->lu_offset,
4785 locku->lu_length);
4786 nfs4_transform_lock_offset(file_lock);
4787
4788 err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
4789 if (err) {
4790 dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
4791 goto out_nfserr;
4792 }
4793 update_stateid(&stp->st_stid.sc_stateid);
4794 memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4795
4796 out:
4797 nfsd4_bump_seqid(cstate, status);
4798 if (!cstate->replay_owner)
4799 nfs4_unlock_state();
4800 if (file_lock)
4801 locks_free_lock(file_lock);
4802 return status;
4803
4804 out_nfserr:
4805 status = nfserrno(err);
4806 goto out;
4807 }
4808
4809 /*
4810 * returns
4811 * 1: locks held by lockowner
4812 * 0: no locks held by lockowner
4813 */
4814 static int
4815 check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner)
4816 {
4817 struct file_lock **flpp;
4818 struct inode *inode = filp->fi_inode;
4819 int status = 0;
4820
4821 spin_lock(&inode->i_lock);
4822 for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
4823 if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
4824 status = 1;
4825 goto out;
4826 }
4827 }
4828 out:
4829 spin_unlock(&inode->i_lock);
4830 return status;
4831 }
4832
4833 __be32
4834 nfsd4_release_lockowner(struct svc_rqst *rqstp,
4835 struct nfsd4_compound_state *cstate,
4836 struct nfsd4_release_lockowner *rlockowner)
4837 {
4838 clientid_t *clid = &rlockowner->rl_clientid;
4839 struct nfs4_stateowner *sop = NULL, *tmp;
4840 struct nfs4_lockowner *lo;
4841 struct nfs4_ol_stateid *stp;
4842 struct xdr_netobj *owner = &rlockowner->rl_owner;
4843 unsigned int hashval = ownerstr_hashval(clid->cl_id, owner);
4844 __be32 status;
4845 struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
4846
4847 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
4848 clid->cl_boot, clid->cl_id);
4849
4850 nfs4_lock_state();
4851
4852 status = lookup_clientid(clid, cstate, nn);
4853 if (status)
4854 goto out;
4855
4856 status = nfserr_locks_held;
4857
4858 /* Find the matching lock stateowner */
4859 list_for_each_entry(tmp, &nn->ownerstr_hashtbl[hashval], so_strhash) {
4860 if (tmp->so_is_open_owner)
4861 continue;
4862 if (same_owner_str(tmp, owner, clid)) {
4863 sop = tmp;
4864 break;
4865 }
4866 }
4867
4868 /* No matching owner found, maybe a replay? Just declare victory... */
4869 if (!sop) {
4870 status = nfs_ok;
4871 goto out;
4872 }
4873
4874 lo = lockowner(sop);
4875 /* see if there are still any locks associated with it */
4876 list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
4877 if (check_for_locks(stp->st_file, lo))
4878 goto out;
4879 }
4880
4881 status = nfs_ok;
4882 release_lockowner(lo);
4883 out:
4884 nfs4_unlock_state();
4885 return status;
4886 }
4887
4888 static inline struct nfs4_client_reclaim *
4889 alloc_reclaim(void)
4890 {
4891 return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
4892 }
4893
4894 bool
4895 nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
4896 {
4897 struct nfs4_client_reclaim *crp;
4898
4899 crp = nfsd4_find_reclaim_client(name, nn);
4900 return (crp && crp->cr_clp);
4901 }
4902
4903 /*
4904 * failure => all reset bets are off, nfserr_no_grace...
4905 */
4906 struct nfs4_client_reclaim *
4907 nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
4908 {
4909 unsigned int strhashval;
4910 struct nfs4_client_reclaim *crp;
4911
4912 dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
4913 crp = alloc_reclaim();
4914 if (crp) {
4915 strhashval = clientstr_hashval(name);
4916 INIT_LIST_HEAD(&crp->cr_strhash);
4917 list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
4918 memcpy(crp->cr_recdir, name, HEXDIR_LEN);
4919 crp->cr_clp = NULL;
4920 nn->reclaim_str_hashtbl_size++;
4921 }
4922 return crp;
4923 }
4924
4925 void
4926 nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
4927 {
4928 list_del(&crp->cr_strhash);
4929 kfree(crp);
4930 nn->reclaim_str_hashtbl_size--;
4931 }
4932
4933 void
4934 nfs4_release_reclaim(struct nfsd_net *nn)
4935 {
4936 struct nfs4_client_reclaim *crp = NULL;
4937 int i;
4938
4939 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
4940 while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
4941 crp = list_entry(nn->reclaim_str_hashtbl[i].next,
4942 struct nfs4_client_reclaim, cr_strhash);
4943 nfs4_remove_reclaim_record(crp, nn);
4944 }
4945 }
4946 WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
4947 }
4948
4949 /*
4950 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
4951 struct nfs4_client_reclaim *
4952 nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
4953 {
4954 unsigned int strhashval;
4955 struct nfs4_client_reclaim *crp = NULL;
4956
4957 dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
4958
4959 strhashval = clientstr_hashval(recdir);
4960 list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
4961 if (same_name(crp->cr_recdir, recdir)) {
4962 return crp;
4963 }
4964 }
4965 return NULL;
4966 }
4967
4968 /*
4969 * Called from OPEN. Look for clientid in reclaim list.
4970 */
4971 __be32
4972 nfs4_check_open_reclaim(clientid_t *clid,
4973 struct nfsd4_compound_state *cstate,
4974 struct nfsd_net *nn)
4975 {
4976 __be32 status;
4977
4978 /* find clientid in conf_id_hashtbl */
4979 status = lookup_clientid(clid, cstate, nn);
4980 if (status)
4981 return nfserr_reclaim_bad;
4982
4983 if (nfsd4_client_record_check(cstate->clp))
4984 return nfserr_reclaim_bad;
4985
4986 return nfs_ok;
4987 }
4988
4989 #ifdef CONFIG_NFSD_FAULT_INJECTION
4990
4991 u64 nfsd_forget_client(struct nfs4_client *clp, u64 max)
4992 {
4993 if (mark_client_expired(clp))
4994 return 0;
4995 expire_client(clp);
4996 return 1;
4997 }
4998
4999 u64 nfsd_print_client(struct nfs4_client *clp, u64 num)
5000 {
5001 char buf[INET6_ADDRSTRLEN];
5002 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
5003 printk(KERN_INFO "NFS Client: %s\n", buf);
5004 return 1;
5005 }
5006
5007 static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
5008 const char *type)
5009 {
5010 char buf[INET6_ADDRSTRLEN];
5011 rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
5012 printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
5013 }
5014
5015 static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
5016 void (*func)(struct nfs4_ol_stateid *))
5017 {
5018 struct nfs4_openowner *oop;
5019 struct nfs4_ol_stateid *stp, *st_next;
5020 struct nfs4_ol_stateid *lst, *lst_next;
5021 u64 count = 0;
5022
5023 list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
5024 list_for_each_entry_safe(stp, st_next,
5025 &oop->oo_owner.so_stateids, st_perstateowner) {
5026 list_for_each_entry_safe(lst, lst_next,
5027 &stp->st_locks, st_locks) {
5028 if (func)
5029 func(lst);
5030 if (++count == max)
5031 return count;
5032 }
5033 }
5034 }
5035
5036 return count;
5037 }
5038
5039 u64 nfsd_forget_client_locks(struct nfs4_client *clp, u64 max)
5040 {
5041 return nfsd_foreach_client_lock(clp, max, release_lock_stateid);
5042 }
5043
5044 u64 nfsd_print_client_locks(struct nfs4_client *clp, u64 max)
5045 {
5046 u64 count = nfsd_foreach_client_lock(clp, max, NULL);
5047 nfsd_print_count(clp, count, "locked files");
5048 return count;
5049 }
5050
5051 static u64 nfsd_foreach_client_open(struct nfs4_client *clp, u64 max, void (*func)(struct nfs4_openowner *))
5052 {
5053 struct nfs4_openowner *oop, *next;
5054 u64 count = 0;
5055
5056 list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
5057 if (func)
5058 func(oop);
5059 if (++count == max)
5060 break;
5061 }
5062
5063 return count;
5064 }
5065
5066 u64 nfsd_forget_client_openowners(struct nfs4_client *clp, u64 max)
5067 {
5068 return nfsd_foreach_client_open(clp, max, release_openowner);
5069 }
5070
5071 u64 nfsd_print_client_openowners(struct nfs4_client *clp, u64 max)
5072 {
5073 u64 count = nfsd_foreach_client_open(clp, max, NULL);
5074 nfsd_print_count(clp, count, "open files");
5075 return count;
5076 }
5077
5078 static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
5079 struct list_head *victims)
5080 {
5081 struct nfs4_delegation *dp, *next;
5082 u64 count = 0;
5083
5084 lockdep_assert_held(&state_lock);
5085 list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
5086 if (victims)
5087 list_move(&dp->dl_recall_lru, victims);
5088 if (++count == max)
5089 break;
5090 }
5091 return count;
5092 }
5093
5094 u64 nfsd_forget_client_delegations(struct nfs4_client *clp, u64 max)
5095 {
5096 struct nfs4_delegation *dp, *next;
5097 LIST_HEAD(victims);
5098 u64 count;
5099
5100 spin_lock(&state_lock);
5101 count = nfsd_find_all_delegations(clp, max, &victims);
5102 spin_unlock(&state_lock);
5103
5104 list_for_each_entry_safe(dp, next, &victims, dl_recall_lru)
5105 revoke_delegation(dp);
5106
5107 return count;
5108 }
5109
5110 u64 nfsd_recall_client_delegations(struct nfs4_client *clp, u64 max)
5111 {
5112 struct nfs4_delegation *dp, *next;
5113 LIST_HEAD(victims);
5114 u64 count;
5115
5116 spin_lock(&state_lock);
5117 count = nfsd_find_all_delegations(clp, max, &victims);
5118 list_for_each_entry_safe(dp, next, &victims, dl_recall_lru)
5119 nfsd_break_one_deleg(dp);
5120 spin_unlock(&state_lock);
5121
5122 return count;
5123 }
5124
5125 u64 nfsd_print_client_delegations(struct nfs4_client *clp, u64 max)
5126 {
5127 u64 count = 0;
5128
5129 spin_lock(&state_lock);
5130 count = nfsd_find_all_delegations(clp, max, NULL);
5131 spin_unlock(&state_lock);
5132
5133 nfsd_print_count(clp, count, "delegations");
5134 return count;
5135 }
5136
5137 u64 nfsd_for_n_state(u64 max, u64 (*func)(struct nfs4_client *, u64))
5138 {
5139 struct nfs4_client *clp, *next;
5140 u64 count = 0;
5141 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id);
5142
5143 if (!nfsd_netns_ready(nn))
5144 return 0;
5145
5146 list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
5147 count += func(clp, max - count);
5148 if ((max != 0) && (count >= max))
5149 break;
5150 }
5151
5152 return count;
5153 }
5154
5155 struct nfs4_client *nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
5156 {
5157 struct nfs4_client *clp;
5158 struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id);
5159
5160 if (!nfsd_netns_ready(nn))
5161 return NULL;
5162
5163 list_for_each_entry(clp, &nn->client_lru, cl_lru) {
5164 if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
5165 return clp;
5166 }
5167 return NULL;
5168 }
5169
5170 #endif /* CONFIG_NFSD_FAULT_INJECTION */
5171
5172 /*
5173 * Since the lifetime of a delegation isn't limited to that of an open, a
5174 * client may quite reasonably hang on to a delegation as long as it has
5175 * the inode cached. This becomes an obvious problem the first time a
5176 * client's inode cache approaches the size of the server's total memory.
5177 *
5178 * For now we avoid this problem by imposing a hard limit on the number
5179 * of delegations, which varies according to the server's memory size.
5180 */
5181 static void
5182 set_max_delegations(void)
5183 {
5184 /*
5185 * Allow at most 4 delegations per megabyte of RAM. Quick
5186 * estimates suggest that in the worst case (where every delegation
5187 * is for a different inode), a delegation could take about 1.5K,
5188 * giving a worst case usage of about 6% of memory.
5189 */
5190 max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
5191 }
5192
5193 static int nfs4_state_create_net(struct net *net)
5194 {
5195 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5196 int i;
5197
5198 nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
5199 CLIENT_HASH_SIZE, GFP_KERNEL);
5200 if (!nn->conf_id_hashtbl)
5201 goto err;
5202 nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
5203 CLIENT_HASH_SIZE, GFP_KERNEL);
5204 if (!nn->unconf_id_hashtbl)
5205 goto err_unconf_id;
5206 nn->ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
5207 OWNER_HASH_SIZE, GFP_KERNEL);
5208 if (!nn->ownerstr_hashtbl)
5209 goto err_ownerstr;
5210 nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
5211 SESSION_HASH_SIZE, GFP_KERNEL);
5212 if (!nn->sessionid_hashtbl)
5213 goto err_sessionid;
5214
5215 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
5216 INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
5217 INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
5218 }
5219 for (i = 0; i < OWNER_HASH_SIZE; i++)
5220 INIT_LIST_HEAD(&nn->ownerstr_hashtbl[i]);
5221 for (i = 0; i < SESSION_HASH_SIZE; i++)
5222 INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
5223 nn->conf_name_tree = RB_ROOT;
5224 nn->unconf_name_tree = RB_ROOT;
5225 INIT_LIST_HEAD(&nn->client_lru);
5226 INIT_LIST_HEAD(&nn->close_lru);
5227 INIT_LIST_HEAD(&nn->del_recall_lru);
5228 spin_lock_init(&nn->client_lock);
5229
5230 INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
5231 get_net(net);
5232
5233 return 0;
5234
5235 err_sessionid:
5236 kfree(nn->ownerstr_hashtbl);
5237 err_ownerstr:
5238 kfree(nn->unconf_id_hashtbl);
5239 err_unconf_id:
5240 kfree(nn->conf_id_hashtbl);
5241 err:
5242 return -ENOMEM;
5243 }
5244
5245 static void
5246 nfs4_state_destroy_net(struct net *net)
5247 {
5248 int i;
5249 struct nfs4_client *clp = NULL;
5250 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5251
5252 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
5253 while (!list_empty(&nn->conf_id_hashtbl[i])) {
5254 clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
5255 destroy_client(clp);
5256 }
5257 }
5258
5259 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
5260 while (!list_empty(&nn->unconf_id_hashtbl[i])) {
5261 clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
5262 destroy_client(clp);
5263 }
5264 }
5265
5266 kfree(nn->sessionid_hashtbl);
5267 kfree(nn->ownerstr_hashtbl);
5268 kfree(nn->unconf_id_hashtbl);
5269 kfree(nn->conf_id_hashtbl);
5270 put_net(net);
5271 }
5272
5273 int
5274 nfs4_state_start_net(struct net *net)
5275 {
5276 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5277 int ret;
5278
5279 ret = nfs4_state_create_net(net);
5280 if (ret)
5281 return ret;
5282 nfsd4_client_tracking_init(net);
5283 nn->boot_time = get_seconds();
5284 locks_start_grace(net, &nn->nfsd4_manager);
5285 nn->grace_ended = false;
5286 printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
5287 nn->nfsd4_grace, net);
5288 queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
5289 return 0;
5290 }
5291
5292 /* initialization to perform when the nfsd service is started: */
5293
5294 int
5295 nfs4_state_start(void)
5296 {
5297 int ret;
5298
5299 ret = set_callback_cred();
5300 if (ret)
5301 return -ENOMEM;
5302 laundry_wq = create_singlethread_workqueue("nfsd4");
5303 if (laundry_wq == NULL) {
5304 ret = -ENOMEM;
5305 goto out_recovery;
5306 }
5307 ret = nfsd4_create_callback_queue();
5308 if (ret)
5309 goto out_free_laundry;
5310
5311 set_max_delegations();
5312
5313 return 0;
5314
5315 out_free_laundry:
5316 destroy_workqueue(laundry_wq);
5317 out_recovery:
5318 return ret;
5319 }
5320
5321 void
5322 nfs4_state_shutdown_net(struct net *net)
5323 {
5324 struct nfs4_delegation *dp = NULL;
5325 struct list_head *pos, *next, reaplist;
5326 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5327
5328 cancel_delayed_work_sync(&nn->laundromat_work);
5329 locks_end_grace(&nn->nfsd4_manager);
5330
5331 nfs4_lock_state();
5332 INIT_LIST_HEAD(&reaplist);
5333 spin_lock(&state_lock);
5334 list_for_each_safe(pos, next, &nn->del_recall_lru) {
5335 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
5336 list_move(&dp->dl_recall_lru, &reaplist);
5337 }
5338 spin_unlock(&state_lock);
5339 list_for_each_safe(pos, next, &reaplist) {
5340 dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
5341 destroy_delegation(dp);
5342 }
5343
5344 nfsd4_client_tracking_exit(net);
5345 nfs4_state_destroy_net(net);
5346 nfs4_unlock_state();
5347 }
5348
5349 void
5350 nfs4_state_shutdown(void)
5351 {
5352 destroy_workqueue(laundry_wq);
5353 nfsd4_destroy_callback_queue();
5354 }
5355
5356 static void
5357 get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
5358 {
5359 if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
5360 memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
5361 }
5362
5363 static void
5364 put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
5365 {
5366 if (cstate->minorversion) {
5367 memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
5368 SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
5369 }
5370 }
5371
5372 void
5373 clear_current_stateid(struct nfsd4_compound_state *cstate)
5374 {
5375 CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
5376 }
5377
5378 /*
5379 * functions to set current state id
5380 */
5381 void
5382 nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
5383 {
5384 put_stateid(cstate, &odp->od_stateid);
5385 }
5386
5387 void
5388 nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
5389 {
5390 put_stateid(cstate, &open->op_stateid);
5391 }
5392
5393 void
5394 nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
5395 {
5396 put_stateid(cstate, &close->cl_stateid);
5397 }
5398
5399 void
5400 nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
5401 {
5402 put_stateid(cstate, &lock->lk_resp_stateid);
5403 }
5404
5405 /*
5406 * functions to consume current state id
5407 */
5408
5409 void
5410 nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
5411 {
5412 get_stateid(cstate, &odp->od_stateid);
5413 }
5414
5415 void
5416 nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
5417 {
5418 get_stateid(cstate, &drp->dr_stateid);
5419 }
5420
5421 void
5422 nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
5423 {
5424 get_stateid(cstate, &fsp->fr_stateid);
5425 }
5426
5427 void
5428 nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
5429 {
5430 get_stateid(cstate, &setattr->sa_stateid);
5431 }
5432
5433 void
5434 nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
5435 {
5436 get_stateid(cstate, &close->cl_stateid);
5437 }
5438
5439 void
5440 nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
5441 {
5442 get_stateid(cstate, &locku->lu_stateid);
5443 }
5444
5445 void
5446 nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
5447 {
5448 get_stateid(cstate, &read->rd_stateid);
5449 }
5450
5451 void
5452 nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
5453 {
5454 get_stateid(cstate, &write->wr_stateid);
5455 }
This page took 0.150711 seconds and 6 git commands to generate.