Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/lockd/clntlock.c | |
3 | * | |
4 | * Lock handling for the client side NLM implementation | |
5 | * | |
6 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/types.h> | |
11 | #include <linux/time.h> | |
12 | #include <linux/nfs_fs.h> | |
13 | #include <linux/sunrpc/clnt.h> | |
14 | #include <linux/sunrpc/svc.h> | |
15 | #include <linux/lockd/lockd.h> | |
16 | #include <linux/smp_lock.h> | |
17 | ||
18 | #define NLMDBG_FACILITY NLMDBG_CLIENT | |
19 | ||
20 | /* | |
21 | * Local function prototypes | |
22 | */ | |
23 | static int reclaimer(void *ptr); | |
24 | ||
25 | /* | |
26 | * The following functions handle blocking and granting from the | |
27 | * client perspective. | |
28 | */ | |
29 | ||
30 | /* | |
31 | * This is the representation of a blocked client lock. | |
32 | */ | |
33 | struct nlm_wait { | |
4f15e2b1 | 34 | struct list_head b_list; /* linked list */ |
1da177e4 LT |
35 | wait_queue_head_t b_wait; /* where to wait on */ |
36 | struct nlm_host * b_host; | |
37 | struct file_lock * b_lock; /* local file lock */ | |
38 | unsigned short b_reclaim; /* got to reclaim lock */ | |
39 | u32 b_status; /* grant callback status */ | |
40 | }; | |
41 | ||
4f15e2b1 | 42 | static LIST_HEAD(nlm_blocked); |
1da177e4 LT |
43 | |
44 | /* | |
ecdbf769 | 45 | * Queue up a lock for blocking so that the GRANTED request can see it |
1da177e4 | 46 | */ |
ecdbf769 TM |
47 | int nlmclnt_prepare_block(struct nlm_rqst *req, struct nlm_host *host, struct file_lock *fl) |
48 | { | |
49 | struct nlm_wait *block; | |
50 | ||
51 | BUG_ON(req->a_block != NULL); | |
52 | block = kmalloc(sizeof(*block), GFP_KERNEL); | |
53 | if (block == NULL) | |
54 | return -ENOMEM; | |
55 | block->b_host = host; | |
56 | block->b_lock = fl; | |
57 | init_waitqueue_head(&block->b_wait); | |
58 | block->b_status = NLM_LCK_BLOCKED; | |
59 | ||
60 | list_add(&block->b_list, &nlm_blocked); | |
61 | req->a_block = block; | |
62 | ||
63 | return 0; | |
64 | } | |
65 | ||
66 | void nlmclnt_finish_block(struct nlm_rqst *req) | |
1da177e4 | 67 | { |
ecdbf769 TM |
68 | struct nlm_wait *block = req->a_block; |
69 | ||
70 | if (block == NULL) | |
71 | return; | |
72 | req->a_block = NULL; | |
73 | list_del(&block->b_list); | |
74 | kfree(block); | |
75 | } | |
1da177e4 | 76 | |
ecdbf769 TM |
77 | /* |
78 | * Block on a lock | |
79 | */ | |
80 | long nlmclnt_block(struct nlm_rqst *req, long timeout) | |
81 | { | |
82 | struct nlm_wait *block = req->a_block; | |
83 | long ret; | |
1da177e4 | 84 | |
ecdbf769 TM |
85 | /* A borken server might ask us to block even if we didn't |
86 | * request it. Just say no! | |
87 | */ | |
88 | if (!req->a_args.block) | |
89 | return -EAGAIN; | |
1da177e4 LT |
90 | |
91 | /* Go to sleep waiting for GRANT callback. Some servers seem | |
92 | * to lose callbacks, however, so we're going to poll from | |
93 | * time to time just to make sure. | |
94 | * | |
95 | * For now, the retry frequency is pretty high; normally | |
96 | * a 1 minute timeout would do. See the comment before | |
97 | * nlmclnt_lock for an explanation. | |
98 | */ | |
ecdbf769 TM |
99 | ret = wait_event_interruptible_timeout(block->b_wait, |
100 | block->b_status != NLM_LCK_BLOCKED, | |
101 | timeout); | |
1da177e4 | 102 | |
ecdbf769 TM |
103 | if (block->b_status != NLM_LCK_BLOCKED) { |
104 | req->a_res.status = block->b_status; | |
105 | block->b_status = NLM_LCK_BLOCKED; | |
1da177e4 LT |
106 | } |
107 | ||
ecdbf769 | 108 | return ret; |
1da177e4 LT |
109 | } |
110 | ||
111 | /* | |
112 | * The server lockd has called us back to tell us the lock was granted | |
113 | */ | |
114 | u32 | |
115 | nlmclnt_grant(struct nlm_lock *lock) | |
116 | { | |
117 | struct nlm_wait *block; | |
ecdbf769 | 118 | u32 res = nlm_lck_denied; |
1da177e4 LT |
119 | |
120 | /* | |
121 | * Look up blocked request based on arguments. | |
122 | * Warning: must not use cookie to match it! | |
123 | */ | |
4f15e2b1 | 124 | list_for_each_entry(block, &nlm_blocked, b_list) { |
ecdbf769 TM |
125 | if (nlm_compare_locks(block->b_lock, &lock->fl)) { |
126 | /* Alright, we found a lock. Set the return status | |
127 | * and wake up the caller | |
128 | */ | |
129 | block->b_status = NLM_LCK_GRANTED; | |
130 | wake_up(&block->b_wait); | |
131 | res = nlm_granted; | |
132 | } | |
1da177e4 | 133 | } |
ecdbf769 | 134 | return res; |
1da177e4 LT |
135 | } |
136 | ||
137 | /* | |
138 | * The following procedures deal with the recovery of locks after a | |
139 | * server crash. | |
140 | */ | |
141 | ||
142 | /* | |
143 | * Mark the locks for reclaiming. | |
144 | * FIXME: In 2.5 we don't want to iterate through any global file_lock_list. | |
145 | * Maintain NLM lock reclaiming lists in the nlm_host instead. | |
146 | */ | |
147 | static | |
148 | void nlmclnt_mark_reclaim(struct nlm_host *host) | |
149 | { | |
150 | struct file_lock *fl; | |
151 | struct inode *inode; | |
152 | struct list_head *tmp; | |
153 | ||
154 | list_for_each(tmp, &file_lock_list) { | |
155 | fl = list_entry(tmp, struct file_lock, fl_link); | |
156 | ||
157 | inode = fl->fl_file->f_dentry->d_inode; | |
158 | if (inode->i_sb->s_magic != NFS_SUPER_MAGIC) | |
159 | continue; | |
9b5b1f5b TM |
160 | if (fl->fl_u.nfs_fl.owner == NULL) |
161 | continue; | |
1da177e4 LT |
162 | if (fl->fl_u.nfs_fl.owner->host != host) |
163 | continue; | |
164 | if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_GRANTED)) | |
165 | continue; | |
166 | fl->fl_u.nfs_fl.flags |= NFS_LCK_RECLAIM; | |
167 | } | |
168 | } | |
169 | ||
170 | /* | |
171 | * Someone has sent us an SM_NOTIFY. Ensure we bind to the new port number, | |
172 | * that we mark locks for reclaiming, and that we bump the pseudo NSM state. | |
173 | */ | |
174 | static inline | |
175 | void nlmclnt_prepare_reclaim(struct nlm_host *host, u32 newstate) | |
176 | { | |
177 | host->h_monitored = 0; | |
178 | host->h_nsmstate = newstate; | |
179 | host->h_state++; | |
180 | host->h_nextrebind = 0; | |
181 | nlm_rebind_host(host); | |
182 | nlmclnt_mark_reclaim(host); | |
183 | dprintk("NLM: reclaiming locks for host %s", host->h_name); | |
184 | } | |
185 | ||
186 | /* | |
187 | * Reclaim all locks on server host. We do this by spawning a separate | |
188 | * reclaimer thread. | |
189 | */ | |
190 | void | |
191 | nlmclnt_recovery(struct nlm_host *host, u32 newstate) | |
192 | { | |
193 | if (host->h_reclaiming++) { | |
194 | if (host->h_nsmstate == newstate) | |
195 | return; | |
196 | nlmclnt_prepare_reclaim(host, newstate); | |
197 | } else { | |
198 | nlmclnt_prepare_reclaim(host, newstate); | |
199 | nlm_get_host(host); | |
200 | __module_get(THIS_MODULE); | |
201 | if (kernel_thread(reclaimer, host, CLONE_KERNEL) < 0) | |
202 | module_put(THIS_MODULE); | |
203 | } | |
204 | } | |
205 | ||
206 | static int | |
207 | reclaimer(void *ptr) | |
208 | { | |
209 | struct nlm_host *host = (struct nlm_host *) ptr; | |
210 | struct nlm_wait *block; | |
211 | struct list_head *tmp; | |
212 | struct file_lock *fl; | |
213 | struct inode *inode; | |
214 | ||
215 | daemonize("%s-reclaim", host->h_name); | |
216 | allow_signal(SIGKILL); | |
217 | ||
218 | /* This one ensures that our parent doesn't terminate while the | |
219 | * reclaim is in progress */ | |
220 | lock_kernel(); | |
221 | lockd_up(); | |
222 | ||
223 | /* First, reclaim all locks that have been marked. */ | |
224 | restart: | |
225 | list_for_each(tmp, &file_lock_list) { | |
226 | fl = list_entry(tmp, struct file_lock, fl_link); | |
227 | ||
228 | inode = fl->fl_file->f_dentry->d_inode; | |
229 | if (inode->i_sb->s_magic != NFS_SUPER_MAGIC) | |
230 | continue; | |
9b5b1f5b TM |
231 | if (fl->fl_u.nfs_fl.owner == NULL) |
232 | continue; | |
1da177e4 LT |
233 | if (fl->fl_u.nfs_fl.owner->host != host) |
234 | continue; | |
235 | if (!(fl->fl_u.nfs_fl.flags & NFS_LCK_RECLAIM)) | |
236 | continue; | |
237 | ||
238 | fl->fl_u.nfs_fl.flags &= ~NFS_LCK_RECLAIM; | |
239 | nlmclnt_reclaim(host, fl); | |
240 | if (signalled()) | |
241 | break; | |
242 | goto restart; | |
243 | } | |
244 | ||
245 | host->h_reclaiming = 0; | |
246 | ||
247 | /* Now, wake up all processes that sleep on a blocked lock */ | |
4f15e2b1 | 248 | list_for_each_entry(block, &nlm_blocked, b_list) { |
1da177e4 LT |
249 | if (block->b_host == host) { |
250 | block->b_status = NLM_LCK_DENIED_GRACE_PERIOD; | |
251 | wake_up(&block->b_wait); | |
252 | } | |
253 | } | |
254 | ||
255 | /* Release host handle after use */ | |
256 | nlm_release_host(host); | |
257 | lockd_down(); | |
258 | unlock_kernel(); | |
259 | module_put_and_exit(0); | |
260 | } |