Merge tag 'pm+acpi-4.0-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[deliverable/linux.git] / fs / nfs / delegation.c
1 /*
2 * linux/fs/nfs/delegation.c
3 *
4 * Copyright (C) 2004 Trond Myklebust
5 *
6 * NFS file delegation management
7 *
8 */
9 #include <linux/completion.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15
16 #include <linux/nfs4.h>
17 #include <linux/nfs_fs.h>
18 #include <linux/nfs_xdr.h>
19
20 #include "nfs4_fs.h"
21 #include "delegation.h"
22 #include "internal.h"
23 #include "nfs4trace.h"
24
25 static void nfs_free_delegation(struct nfs_delegation *delegation)
26 {
27 if (delegation->cred) {
28 put_rpccred(delegation->cred);
29 delegation->cred = NULL;
30 }
31 kfree_rcu(delegation, rcu);
32 }
33
34 /**
35 * nfs_mark_delegation_referenced - set delegation's REFERENCED flag
36 * @delegation: delegation to process
37 *
38 */
39 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation)
40 {
41 set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags);
42 }
43
44 static int
45 nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
46 {
47 struct nfs_delegation *delegation;
48 int ret = 0;
49
50 flags &= FMODE_READ|FMODE_WRITE;
51 rcu_read_lock();
52 delegation = rcu_dereference(NFS_I(inode)->delegation);
53 if (delegation != NULL && (delegation->type & flags) == flags &&
54 !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
55 if (mark)
56 nfs_mark_delegation_referenced(delegation);
57 ret = 1;
58 }
59 rcu_read_unlock();
60 return ret;
61 }
62 /**
63 * nfs_have_delegation - check if inode has a delegation, mark it
64 * NFS_DELEGATION_REFERENCED if there is one.
65 * @inode: inode to check
66 * @flags: delegation types to check for
67 *
68 * Returns one if inode has the indicated delegation, otherwise zero.
69 */
70 int nfs4_have_delegation(struct inode *inode, fmode_t flags)
71 {
72 return nfs4_do_check_delegation(inode, flags, true);
73 }
74
75 /*
76 * nfs4_check_delegation - check if inode has a delegation, do not mark
77 * NFS_DELEGATION_REFERENCED if it has one.
78 */
79 int nfs4_check_delegation(struct inode *inode, fmode_t flags)
80 {
81 return nfs4_do_check_delegation(inode, flags, false);
82 }
83
84 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid)
85 {
86 struct inode *inode = state->inode;
87 struct file_lock *fl;
88 struct file_lock_context *flctx = inode->i_flctx;
89 struct list_head *list;
90 int status = 0;
91
92 if (flctx == NULL)
93 goto out;
94
95 list = &flctx->flc_posix;
96 spin_lock(&flctx->flc_lock);
97 restart:
98 list_for_each_entry(fl, list, fl_list) {
99 if (nfs_file_open_context(fl->fl_file) != ctx)
100 continue;
101 spin_unlock(&flctx->flc_lock);
102 status = nfs4_lock_delegation_recall(fl, state, stateid);
103 if (status < 0)
104 goto out;
105 spin_lock(&flctx->flc_lock);
106 }
107 if (list == &flctx->flc_posix) {
108 list = &flctx->flc_flock;
109 goto restart;
110 }
111 spin_unlock(&flctx->flc_lock);
112 out:
113 return status;
114 }
115
116 static int nfs_delegation_claim_opens(struct inode *inode, const nfs4_stateid *stateid)
117 {
118 struct nfs_inode *nfsi = NFS_I(inode);
119 struct nfs_open_context *ctx;
120 struct nfs4_state_owner *sp;
121 struct nfs4_state *state;
122 unsigned int seq;
123 int err;
124
125 again:
126 spin_lock(&inode->i_lock);
127 list_for_each_entry(ctx, &nfsi->open_files, list) {
128 state = ctx->state;
129 if (state == NULL)
130 continue;
131 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
132 continue;
133 if (!nfs4_valid_open_stateid(state))
134 continue;
135 if (!nfs4_stateid_match(&state->stateid, stateid))
136 continue;
137 get_nfs_open_context(ctx);
138 spin_unlock(&inode->i_lock);
139 sp = state->owner;
140 /* Block nfs4_proc_unlck */
141 mutex_lock(&sp->so_delegreturn_mutex);
142 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
143 err = nfs4_open_delegation_recall(ctx, state, stateid);
144 if (!err)
145 err = nfs_delegation_claim_locks(ctx, state, stateid);
146 if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
147 err = -EAGAIN;
148 mutex_unlock(&sp->so_delegreturn_mutex);
149 put_nfs_open_context(ctx);
150 if (err != 0)
151 return err;
152 goto again;
153 }
154 spin_unlock(&inode->i_lock);
155 return 0;
156 }
157
158 /**
159 * nfs_inode_reclaim_delegation - process a delegation reclaim request
160 * @inode: inode to process
161 * @cred: credential to use for request
162 * @res: new delegation state from server
163 *
164 */
165 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
166 struct nfs_openres *res)
167 {
168 struct nfs_delegation *delegation;
169 struct rpc_cred *oldcred = NULL;
170
171 rcu_read_lock();
172 delegation = rcu_dereference(NFS_I(inode)->delegation);
173 if (delegation != NULL) {
174 spin_lock(&delegation->lock);
175 if (delegation->inode != NULL) {
176 nfs4_stateid_copy(&delegation->stateid, &res->delegation);
177 delegation->type = res->delegation_type;
178 delegation->maxsize = res->maxsize;
179 oldcred = delegation->cred;
180 delegation->cred = get_rpccred(cred);
181 clear_bit(NFS_DELEGATION_NEED_RECLAIM,
182 &delegation->flags);
183 spin_unlock(&delegation->lock);
184 rcu_read_unlock();
185 put_rpccred(oldcred);
186 trace_nfs4_reclaim_delegation(inode, res->delegation_type);
187 } else {
188 /* We appear to have raced with a delegation return. */
189 spin_unlock(&delegation->lock);
190 rcu_read_unlock();
191 nfs_inode_set_delegation(inode, cred, res);
192 }
193 } else {
194 rcu_read_unlock();
195 }
196 }
197
198 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
199 {
200 int res = 0;
201
202 if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
203 res = nfs4_proc_delegreturn(inode,
204 delegation->cred,
205 &delegation->stateid,
206 issync);
207 nfs_free_delegation(delegation);
208 return res;
209 }
210
211 static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation)
212 {
213 struct inode *inode = NULL;
214
215 spin_lock(&delegation->lock);
216 if (delegation->inode != NULL)
217 inode = igrab(delegation->inode);
218 spin_unlock(&delegation->lock);
219 return inode;
220 }
221
222 static struct nfs_delegation *
223 nfs_start_delegation_return_locked(struct nfs_inode *nfsi)
224 {
225 struct nfs_delegation *ret = NULL;
226 struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
227
228 if (delegation == NULL)
229 goto out;
230 spin_lock(&delegation->lock);
231 if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
232 ret = delegation;
233 spin_unlock(&delegation->lock);
234 out:
235 return ret;
236 }
237
238 static struct nfs_delegation *
239 nfs_start_delegation_return(struct nfs_inode *nfsi)
240 {
241 struct nfs_delegation *delegation;
242
243 rcu_read_lock();
244 delegation = nfs_start_delegation_return_locked(nfsi);
245 rcu_read_unlock();
246 return delegation;
247 }
248
249 static void
250 nfs_abort_delegation_return(struct nfs_delegation *delegation,
251 struct nfs_client *clp)
252 {
253
254 spin_lock(&delegation->lock);
255 clear_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
256 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
257 spin_unlock(&delegation->lock);
258 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
259 }
260
261 static struct nfs_delegation *
262 nfs_detach_delegation_locked(struct nfs_inode *nfsi,
263 struct nfs_delegation *delegation,
264 struct nfs_client *clp)
265 {
266 struct nfs_delegation *deleg_cur =
267 rcu_dereference_protected(nfsi->delegation,
268 lockdep_is_held(&clp->cl_lock));
269
270 if (deleg_cur == NULL || delegation != deleg_cur)
271 return NULL;
272
273 spin_lock(&delegation->lock);
274 set_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
275 list_del_rcu(&delegation->super_list);
276 delegation->inode = NULL;
277 rcu_assign_pointer(nfsi->delegation, NULL);
278 spin_unlock(&delegation->lock);
279 return delegation;
280 }
281
282 static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi,
283 struct nfs_delegation *delegation,
284 struct nfs_server *server)
285 {
286 struct nfs_client *clp = server->nfs_client;
287
288 spin_lock(&clp->cl_lock);
289 delegation = nfs_detach_delegation_locked(nfsi, delegation, clp);
290 spin_unlock(&clp->cl_lock);
291 return delegation;
292 }
293
294 static struct nfs_delegation *
295 nfs_inode_detach_delegation(struct inode *inode)
296 {
297 struct nfs_inode *nfsi = NFS_I(inode);
298 struct nfs_server *server = NFS_SERVER(inode);
299 struct nfs_delegation *delegation;
300
301 delegation = nfs_start_delegation_return(nfsi);
302 if (delegation == NULL)
303 return NULL;
304 return nfs_detach_delegation(nfsi, delegation, server);
305 }
306
307 static void
308 nfs_update_inplace_delegation(struct nfs_delegation *delegation,
309 const struct nfs_delegation *update)
310 {
311 if (nfs4_stateid_is_newer(&update->stateid, &delegation->stateid)) {
312 delegation->stateid.seqid = update->stateid.seqid;
313 smp_wmb();
314 delegation->type = update->type;
315 }
316 }
317
318 /**
319 * nfs_inode_set_delegation - set up a delegation on an inode
320 * @inode: inode to which delegation applies
321 * @cred: cred to use for subsequent delegation processing
322 * @res: new delegation state from server
323 *
324 * Returns zero on success, or a negative errno value.
325 */
326 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
327 {
328 struct nfs_server *server = NFS_SERVER(inode);
329 struct nfs_client *clp = server->nfs_client;
330 struct nfs_inode *nfsi = NFS_I(inode);
331 struct nfs_delegation *delegation, *old_delegation;
332 struct nfs_delegation *freeme = NULL;
333 int status = 0;
334
335 delegation = kmalloc(sizeof(*delegation), GFP_NOFS);
336 if (delegation == NULL)
337 return -ENOMEM;
338 nfs4_stateid_copy(&delegation->stateid, &res->delegation);
339 delegation->type = res->delegation_type;
340 delegation->maxsize = res->maxsize;
341 delegation->change_attr = inode->i_version;
342 delegation->cred = get_rpccred(cred);
343 delegation->inode = inode;
344 delegation->flags = 1<<NFS_DELEGATION_REFERENCED;
345 spin_lock_init(&delegation->lock);
346
347 spin_lock(&clp->cl_lock);
348 old_delegation = rcu_dereference_protected(nfsi->delegation,
349 lockdep_is_held(&clp->cl_lock));
350 if (old_delegation != NULL) {
351 /* Is this an update of the existing delegation? */
352 if (nfs4_stateid_match_other(&old_delegation->stateid,
353 &delegation->stateid)) {
354 nfs_update_inplace_delegation(old_delegation,
355 delegation);
356 goto out;
357 }
358 /*
359 * Deal with broken servers that hand out two
360 * delegations for the same file.
361 * Allow for upgrades to a WRITE delegation, but
362 * nothing else.
363 */
364 dfprintk(FILE, "%s: server %s handed out "
365 "a duplicate delegation!\n",
366 __func__, clp->cl_hostname);
367 if (delegation->type == old_delegation->type ||
368 !(delegation->type & FMODE_WRITE)) {
369 freeme = delegation;
370 delegation = NULL;
371 goto out;
372 }
373 if (test_and_set_bit(NFS_DELEGATION_RETURNING,
374 &old_delegation->flags))
375 goto out;
376 freeme = nfs_detach_delegation_locked(nfsi,
377 old_delegation, clp);
378 if (freeme == NULL)
379 goto out;
380 }
381 list_add_rcu(&delegation->super_list, &server->delegations);
382 rcu_assign_pointer(nfsi->delegation, delegation);
383 delegation = NULL;
384
385 /* Ensure we revalidate the attributes and page cache! */
386 spin_lock(&inode->i_lock);
387 nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
388 spin_unlock(&inode->i_lock);
389 trace_nfs4_set_delegation(inode, res->delegation_type);
390
391 out:
392 spin_unlock(&clp->cl_lock);
393 if (delegation != NULL)
394 nfs_free_delegation(delegation);
395 if (freeme != NULL)
396 nfs_do_return_delegation(inode, freeme, 0);
397 return status;
398 }
399
400 /*
401 * Basic procedure for returning a delegation to the server
402 */
403 static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation *delegation, int issync)
404 {
405 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
406 struct nfs_inode *nfsi = NFS_I(inode);
407 int err = 0;
408
409 if (delegation == NULL)
410 return 0;
411 do {
412 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
413 break;
414 err = nfs_delegation_claim_opens(inode, &delegation->stateid);
415 if (!issync || err != -EAGAIN)
416 break;
417 /*
418 * Guard against state recovery
419 */
420 err = nfs4_wait_clnt_recover(clp);
421 } while (err == 0);
422
423 if (err) {
424 nfs_abort_delegation_return(delegation, clp);
425 goto out;
426 }
427 if (!nfs_detach_delegation(nfsi, delegation, NFS_SERVER(inode)))
428 goto out;
429
430 err = nfs_do_return_delegation(inode, delegation, issync);
431 out:
432 return err;
433 }
434
435 static bool nfs_delegation_need_return(struct nfs_delegation *delegation)
436 {
437 bool ret = false;
438
439 if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
440 goto out;
441 if (test_and_clear_bit(NFS_DELEGATION_RETURN, &delegation->flags))
442 ret = true;
443 if (test_and_clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags) && !ret) {
444 struct inode *inode;
445
446 spin_lock(&delegation->lock);
447 inode = delegation->inode;
448 if (inode && list_empty(&NFS_I(inode)->open_files))
449 ret = true;
450 spin_unlock(&delegation->lock);
451 }
452 out:
453 return ret;
454 }
455
456 /**
457 * nfs_client_return_marked_delegations - return previously marked delegations
458 * @clp: nfs_client to process
459 *
460 * Note that this function is designed to be called by the state
461 * manager thread. For this reason, it cannot flush the dirty data,
462 * since that could deadlock in case of a state recovery error.
463 *
464 * Returns zero on success, or a negative errno value.
465 */
466 int nfs_client_return_marked_delegations(struct nfs_client *clp)
467 {
468 struct nfs_delegation *delegation;
469 struct nfs_server *server;
470 struct inode *inode;
471 int err = 0;
472
473 restart:
474 rcu_read_lock();
475 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
476 list_for_each_entry_rcu(delegation, &server->delegations,
477 super_list) {
478 if (!nfs_delegation_need_return(delegation))
479 continue;
480 if (!nfs_sb_active(server->super))
481 continue;
482 inode = nfs_delegation_grab_inode(delegation);
483 if (inode == NULL) {
484 rcu_read_unlock();
485 nfs_sb_deactive(server->super);
486 goto restart;
487 }
488 delegation = nfs_start_delegation_return_locked(NFS_I(inode));
489 rcu_read_unlock();
490
491 err = nfs_end_delegation_return(inode, delegation, 0);
492 iput(inode);
493 nfs_sb_deactive(server->super);
494 if (!err)
495 goto restart;
496 set_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state);
497 return err;
498 }
499 }
500 rcu_read_unlock();
501 return 0;
502 }
503
504 /**
505 * nfs_inode_return_delegation_noreclaim - return delegation, don't reclaim opens
506 * @inode: inode to process
507 *
508 * Does not protect against delegation reclaims, therefore really only safe
509 * to be called from nfs4_clear_inode().
510 */
511 void nfs_inode_return_delegation_noreclaim(struct inode *inode)
512 {
513 struct nfs_delegation *delegation;
514
515 delegation = nfs_inode_detach_delegation(inode);
516 if (delegation != NULL)
517 nfs_do_return_delegation(inode, delegation, 0);
518 }
519
520 /**
521 * nfs_inode_return_delegation - synchronously return a delegation
522 * @inode: inode to process
523 *
524 * This routine will always flush any dirty data to disk on the
525 * assumption that if we need to return the delegation, then
526 * we should stop caching.
527 *
528 * Returns zero on success, or a negative errno value.
529 */
530 int nfs4_inode_return_delegation(struct inode *inode)
531 {
532 struct nfs_inode *nfsi = NFS_I(inode);
533 struct nfs_delegation *delegation;
534 int err = 0;
535
536 nfs_wb_all(inode);
537 delegation = nfs_start_delegation_return(nfsi);
538 if (delegation != NULL)
539 err = nfs_end_delegation_return(inode, delegation, 1);
540 return err;
541 }
542
543 static void nfs_mark_return_if_closed_delegation(struct nfs_server *server,
544 struct nfs_delegation *delegation)
545 {
546 set_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
547 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
548 }
549
550 static void nfs_mark_return_delegation(struct nfs_server *server,
551 struct nfs_delegation *delegation)
552 {
553 set_bit(NFS_DELEGATION_RETURN, &delegation->flags);
554 set_bit(NFS4CLNT_DELEGRETURN, &server->nfs_client->cl_state);
555 }
556
557 static bool nfs_server_mark_return_all_delegations(struct nfs_server *server)
558 {
559 struct nfs_delegation *delegation;
560 bool ret = false;
561
562 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
563 nfs_mark_return_delegation(server, delegation);
564 ret = true;
565 }
566 return ret;
567 }
568
569 static void nfs_client_mark_return_all_delegations(struct nfs_client *clp)
570 {
571 struct nfs_server *server;
572
573 rcu_read_lock();
574 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
575 nfs_server_mark_return_all_delegations(server);
576 rcu_read_unlock();
577 }
578
579 static void nfs_delegation_run_state_manager(struct nfs_client *clp)
580 {
581 if (test_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state))
582 nfs4_schedule_state_manager(clp);
583 }
584
585 /**
586 * nfs_expire_all_delegations
587 * @clp: client to process
588 *
589 */
590 void nfs_expire_all_delegations(struct nfs_client *clp)
591 {
592 nfs_client_mark_return_all_delegations(clp);
593 nfs_delegation_run_state_manager(clp);
594 }
595
596 /**
597 * nfs_super_return_all_delegations - return delegations for one superblock
598 * @sb: sb to process
599 *
600 */
601 void nfs_server_return_all_delegations(struct nfs_server *server)
602 {
603 struct nfs_client *clp = server->nfs_client;
604 bool need_wait;
605
606 if (clp == NULL)
607 return;
608
609 rcu_read_lock();
610 need_wait = nfs_server_mark_return_all_delegations(server);
611 rcu_read_unlock();
612
613 if (need_wait) {
614 nfs4_schedule_state_manager(clp);
615 nfs4_wait_clnt_recover(clp);
616 }
617 }
618
619 static void nfs_mark_return_unused_delegation_types(struct nfs_server *server,
620 fmode_t flags)
621 {
622 struct nfs_delegation *delegation;
623
624 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
625 if ((delegation->type == (FMODE_READ|FMODE_WRITE)) && !(flags & FMODE_WRITE))
626 continue;
627 if (delegation->type & flags)
628 nfs_mark_return_if_closed_delegation(server, delegation);
629 }
630 }
631
632 static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *clp,
633 fmode_t flags)
634 {
635 struct nfs_server *server;
636
637 rcu_read_lock();
638 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
639 nfs_mark_return_unused_delegation_types(server, flags);
640 rcu_read_unlock();
641 }
642
643 static void nfs_revoke_delegation(struct inode *inode)
644 {
645 struct nfs_delegation *delegation;
646 rcu_read_lock();
647 delegation = rcu_dereference(NFS_I(inode)->delegation);
648 if (delegation != NULL) {
649 set_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
650 nfs_mark_return_delegation(NFS_SERVER(inode), delegation);
651 }
652 rcu_read_unlock();
653 }
654
655 void nfs_remove_bad_delegation(struct inode *inode)
656 {
657 struct nfs_delegation *delegation;
658
659 nfs_revoke_delegation(inode);
660 delegation = nfs_inode_detach_delegation(inode);
661 if (delegation) {
662 nfs_inode_find_state_and_recover(inode, &delegation->stateid);
663 nfs_free_delegation(delegation);
664 }
665 }
666 EXPORT_SYMBOL_GPL(nfs_remove_bad_delegation);
667
668 /**
669 * nfs_expire_unused_delegation_types
670 * @clp: client to process
671 * @flags: delegation types to expire
672 *
673 */
674 void nfs_expire_unused_delegation_types(struct nfs_client *clp, fmode_t flags)
675 {
676 nfs_client_mark_return_unused_delegation_types(clp, flags);
677 nfs_delegation_run_state_manager(clp);
678 }
679
680 static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server)
681 {
682 struct nfs_delegation *delegation;
683
684 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
685 if (test_and_clear_bit(NFS_DELEGATION_REFERENCED, &delegation->flags))
686 continue;
687 nfs_mark_return_if_closed_delegation(server, delegation);
688 }
689 }
690
691 /**
692 * nfs_expire_unreferenced_delegations - Eliminate unused delegations
693 * @clp: nfs_client to process
694 *
695 */
696 void nfs_expire_unreferenced_delegations(struct nfs_client *clp)
697 {
698 struct nfs_server *server;
699
700 rcu_read_lock();
701 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
702 nfs_mark_return_unreferenced_delegations(server);
703 rcu_read_unlock();
704
705 nfs_delegation_run_state_manager(clp);
706 }
707
708 /**
709 * nfs_async_inode_return_delegation - asynchronously return a delegation
710 * @inode: inode to process
711 * @stateid: state ID information
712 *
713 * Returns zero on success, or a negative errno value.
714 */
715 int nfs_async_inode_return_delegation(struct inode *inode,
716 const nfs4_stateid *stateid)
717 {
718 struct nfs_server *server = NFS_SERVER(inode);
719 struct nfs_client *clp = server->nfs_client;
720 struct nfs_delegation *delegation;
721
722 filemap_flush(inode->i_mapping);
723
724 rcu_read_lock();
725 delegation = rcu_dereference(NFS_I(inode)->delegation);
726 if (delegation == NULL)
727 goto out_enoent;
728
729 if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid))
730 goto out_enoent;
731 nfs_mark_return_delegation(server, delegation);
732 rcu_read_unlock();
733
734 nfs_delegation_run_state_manager(clp);
735 return 0;
736 out_enoent:
737 rcu_read_unlock();
738 return -ENOENT;
739 }
740
741 static struct inode *
742 nfs_delegation_find_inode_server(struct nfs_server *server,
743 const struct nfs_fh *fhandle)
744 {
745 struct nfs_delegation *delegation;
746 struct inode *res = NULL;
747
748 list_for_each_entry_rcu(delegation, &server->delegations, super_list) {
749 spin_lock(&delegation->lock);
750 if (delegation->inode != NULL &&
751 nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
752 res = igrab(delegation->inode);
753 }
754 spin_unlock(&delegation->lock);
755 if (res != NULL)
756 break;
757 }
758 return res;
759 }
760
761 /**
762 * nfs_delegation_find_inode - retrieve the inode associated with a delegation
763 * @clp: client state handle
764 * @fhandle: filehandle from a delegation recall
765 *
766 * Returns pointer to inode matching "fhandle," or NULL if a matching inode
767 * cannot be found.
768 */
769 struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
770 const struct nfs_fh *fhandle)
771 {
772 struct nfs_server *server;
773 struct inode *res = NULL;
774
775 rcu_read_lock();
776 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
777 res = nfs_delegation_find_inode_server(server, fhandle);
778 if (res != NULL)
779 break;
780 }
781 rcu_read_unlock();
782 return res;
783 }
784
785 static void nfs_delegation_mark_reclaim_server(struct nfs_server *server)
786 {
787 struct nfs_delegation *delegation;
788
789 list_for_each_entry_rcu(delegation, &server->delegations, super_list)
790 set_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags);
791 }
792
793 /**
794 * nfs_delegation_mark_reclaim - mark all delegations as needing to be reclaimed
795 * @clp: nfs_client to process
796 *
797 */
798 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
799 {
800 struct nfs_server *server;
801
802 rcu_read_lock();
803 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
804 nfs_delegation_mark_reclaim_server(server);
805 rcu_read_unlock();
806 }
807
808 /**
809 * nfs_delegation_reap_unclaimed - reap unclaimed delegations after reboot recovery is done
810 * @clp: nfs_client to process
811 *
812 */
813 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
814 {
815 struct nfs_delegation *delegation;
816 struct nfs_server *server;
817 struct inode *inode;
818
819 restart:
820 rcu_read_lock();
821 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
822 list_for_each_entry_rcu(delegation, &server->delegations,
823 super_list) {
824 if (test_bit(NFS_DELEGATION_RETURNING,
825 &delegation->flags))
826 continue;
827 if (test_bit(NFS_DELEGATION_NEED_RECLAIM,
828 &delegation->flags) == 0)
829 continue;
830 if (!nfs_sb_active(server->super))
831 continue;
832 inode = nfs_delegation_grab_inode(delegation);
833 if (inode == NULL) {
834 rcu_read_unlock();
835 nfs_sb_deactive(server->super);
836 goto restart;
837 }
838 delegation = nfs_start_delegation_return_locked(NFS_I(inode));
839 rcu_read_unlock();
840 if (delegation != NULL) {
841 delegation = nfs_detach_delegation(NFS_I(inode),
842 delegation, server);
843 if (delegation != NULL)
844 nfs_free_delegation(delegation);
845 }
846 iput(inode);
847 nfs_sb_deactive(server->super);
848 goto restart;
849 }
850 }
851 rcu_read_unlock();
852 }
853
854 /**
855 * nfs_delegations_present - check for existence of delegations
856 * @clp: client state handle
857 *
858 * Returns one if there are any nfs_delegation structures attached
859 * to this nfs_client.
860 */
861 int nfs_delegations_present(struct nfs_client *clp)
862 {
863 struct nfs_server *server;
864 int ret = 0;
865
866 rcu_read_lock();
867 list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link)
868 if (!list_empty(&server->delegations)) {
869 ret = 1;
870 break;
871 }
872 rcu_read_unlock();
873 return ret;
874 }
875
876 /**
877 * nfs4_copy_delegation_stateid - Copy inode's state ID information
878 * @dst: stateid data structure to fill in
879 * @inode: inode to check
880 * @flags: delegation type requirement
881 *
882 * Returns "true" and fills in "dst->data" * if inode had a delegation,
883 * otherwise "false" is returned.
884 */
885 bool nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode,
886 fmode_t flags)
887 {
888 struct nfs_inode *nfsi = NFS_I(inode);
889 struct nfs_delegation *delegation;
890 bool ret;
891
892 flags &= FMODE_READ|FMODE_WRITE;
893 rcu_read_lock();
894 delegation = rcu_dereference(nfsi->delegation);
895 ret = (delegation != NULL && (delegation->type & flags) == flags);
896 if (ret) {
897 nfs4_stateid_copy(dst, &delegation->stateid);
898 nfs_mark_delegation_referenced(delegation);
899 }
900 rcu_read_unlock();
901 return ret;
902 }
This page took 0.050296 seconds and 6 git commands to generate.