Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
08e0e7c8 | 2 | * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved. |
1da177e4 LT |
3 | * |
4 | * This software may be freely redistributed under the terms of the | |
5 | * GNU General Public License. | |
6 | * | |
7 | * You should have received a copy of the GNU General Public License | |
8 | * along with this program; if not, write to the Free Software | |
9 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
10 | * | |
11 | * Authors: David Woodhouse <dwmw2@cambridge.redhat.com> | |
12 | * David Howells <dhowells@redhat.com> | |
13 | * | |
14 | */ | |
15 | ||
16 | #include <linux/kernel.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/init.h> | |
08e0e7c8 | 19 | #include <linux/circ_buf.h> |
1da177e4 | 20 | #include "internal.h" |
08e0e7c8 DH |
21 | |
22 | unsigned afs_vnode_update_timeout = 10; | |
23 | ||
24 | #define afs_breakring_space(server) \ | |
25 | CIRC_SPACE((server)->cb_break_head, (server)->cb_break_tail, \ | |
26 | ARRAY_SIZE((server)->cb_break)) | |
27 | ||
28 | //static void afs_callback_updater(struct work_struct *); | |
29 | ||
30 | static struct workqueue_struct *afs_callback_update_worker; | |
1da177e4 | 31 | |
1da177e4 LT |
32 | /* |
33 | * allow the fileserver to request callback state (re-)initialisation | |
34 | */ | |
08e0e7c8 | 35 | void afs_init_callback_state(struct afs_server *server) |
1da177e4 | 36 | { |
08e0e7c8 | 37 | struct afs_vnode *vnode; |
1da177e4 | 38 | |
08e0e7c8 | 39 | _enter("{%p}", server); |
1da177e4 | 40 | |
1da177e4 LT |
41 | spin_lock(&server->cb_lock); |
42 | ||
08e0e7c8 DH |
43 | /* kill all the promises on record from this server */ |
44 | while (!RB_EMPTY_ROOT(&server->cb_promises)) { | |
45 | vnode = rb_entry(server->cb_promises.rb_node, | |
46 | struct afs_vnode, cb_promise); | |
47 | printk("\nUNPROMISE on %p\n", vnode); | |
48 | rb_erase(&vnode->cb_promise, &server->cb_promises); | |
49 | vnode->cb_promised = false; | |
50 | } | |
1da177e4 | 51 | |
08e0e7c8 DH |
52 | spin_unlock(&server->cb_lock); |
53 | _leave(""); | |
54 | } | |
1da177e4 | 55 | |
08e0e7c8 DH |
56 | /* |
57 | * handle the data invalidation side of a callback being broken | |
58 | */ | |
59 | void afs_broken_callback_work(struct work_struct *work) | |
60 | { | |
61 | struct afs_vnode *vnode = | |
62 | container_of(work, struct afs_vnode, cb_broken_work); | |
1da177e4 | 63 | |
08e0e7c8 | 64 | _enter(""); |
1da177e4 | 65 | |
08e0e7c8 DH |
66 | if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) |
67 | return; | |
1da177e4 | 68 | |
08e0e7c8 DH |
69 | /* we're only interested in dealing with a broken callback on *this* |
70 | * vnode and only if no-one else has dealt with it yet */ | |
71 | if (!mutex_trylock(&vnode->cb_broken_lock)) | |
72 | return; /* someone else is dealing with it */ | |
1da177e4 | 73 | |
08e0e7c8 | 74 | if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) { |
00d3b7a4 DH |
75 | if (S_ISDIR(vnode->vfs_inode.i_mode)) |
76 | afs_clear_permits(vnode); | |
77 | ||
78 | if (afs_vnode_fetch_status(vnode, NULL, NULL) < 0) | |
08e0e7c8 | 79 | goto out; |
1da177e4 | 80 | |
08e0e7c8 DH |
81 | if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) |
82 | goto out; | |
1da177e4 | 83 | |
08e0e7c8 DH |
84 | /* if the vnode's data version number changed then its contents |
85 | * are different */ | |
86 | if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) { | |
87 | _debug("zap data"); | |
88 | invalidate_remote_inode(&vnode->vfs_inode); | |
1da177e4 LT |
89 | } |
90 | } | |
91 | ||
08e0e7c8 DH |
92 | out: |
93 | mutex_unlock(&vnode->cb_broken_lock); | |
1da177e4 | 94 | |
08e0e7c8 DH |
95 | /* avoid the potential race whereby the mutex_trylock() in this |
96 | * function happens again between the clear_bit() and the | |
97 | * mutex_unlock() */ | |
98 | if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) { | |
99 | _debug("requeue"); | |
100 | queue_work(afs_callback_update_worker, &vnode->cb_broken_work); | |
101 | } | |
102 | _leave(""); | |
103 | } | |
104 | ||
105 | /* | |
106 | * actually break a callback | |
107 | */ | |
108 | static void afs_break_callback(struct afs_server *server, | |
109 | struct afs_vnode *vnode) | |
110 | { | |
111 | _enter(""); | |
112 | ||
113 | set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags); | |
114 | ||
115 | if (vnode->cb_promised) { | |
116 | spin_lock(&vnode->lock); | |
117 | ||
118 | _debug("break callback"); | |
119 | ||
120 | spin_lock(&server->cb_lock); | |
121 | if (vnode->cb_promised) { | |
122 | rb_erase(&vnode->cb_promise, &server->cb_promises); | |
123 | vnode->cb_promised = false; | |
124 | } | |
125 | spin_unlock(&server->cb_lock); | |
126 | ||
127 | queue_work(afs_callback_update_worker, &vnode->cb_broken_work); | |
128 | spin_unlock(&vnode->lock); | |
129 | } | |
130 | } | |
131 | ||
132 | /* | |
133 | * allow the fileserver to explicitly break one callback | |
134 | * - happens when | |
135 | * - the backing file is changed | |
136 | * - a lock is released | |
137 | */ | |
138 | static void afs_break_one_callback(struct afs_server *server, | |
139 | struct afs_fid *fid) | |
140 | { | |
141 | struct afs_vnode *vnode; | |
142 | struct rb_node *p; | |
143 | ||
144 | _debug("find"); | |
145 | spin_lock(&server->fs_lock); | |
146 | p = server->fs_vnodes.rb_node; | |
147 | while (p) { | |
148 | vnode = rb_entry(p, struct afs_vnode, server_rb); | |
149 | if (fid->vid < vnode->fid.vid) | |
150 | p = p->rb_left; | |
151 | else if (fid->vid > vnode->fid.vid) | |
152 | p = p->rb_right; | |
153 | else if (fid->vnode < vnode->fid.vnode) | |
154 | p = p->rb_left; | |
155 | else if (fid->vnode > vnode->fid.vnode) | |
156 | p = p->rb_right; | |
157 | else if (fid->unique < vnode->fid.unique) | |
158 | p = p->rb_left; | |
159 | else if (fid->unique > vnode->fid.unique) | |
160 | p = p->rb_right; | |
161 | else | |
162 | goto found; | |
163 | } | |
164 | ||
165 | /* not found so we just ignore it (it may have moved to another | |
166 | * server) */ | |
167 | not_available: | |
168 | _debug("not avail"); | |
169 | spin_unlock(&server->fs_lock); | |
170 | _leave(""); | |
171 | return; | |
172 | ||
173 | found: | |
174 | _debug("found"); | |
175 | ASSERTCMP(server, ==, vnode->server); | |
176 | ||
177 | if (!igrab(AFS_VNODE_TO_I(vnode))) | |
178 | goto not_available; | |
179 | spin_unlock(&server->fs_lock); | |
180 | ||
181 | afs_break_callback(server, vnode); | |
182 | iput(&vnode->vfs_inode); | |
183 | _leave(""); | |
ec26815a | 184 | } |
1da177e4 | 185 | |
1da177e4 LT |
186 | /* |
187 | * allow the fileserver to break callback promises | |
188 | */ | |
08e0e7c8 DH |
189 | void afs_break_callbacks(struct afs_server *server, size_t count, |
190 | struct afs_callback callbacks[]) | |
1da177e4 | 191 | { |
08e0e7c8 | 192 | _enter("%p,%zu,", server, count); |
1da177e4 | 193 | |
08e0e7c8 DH |
194 | ASSERT(server != NULL); |
195 | ASSERTCMP(count, <=, AFSCBMAX); | |
1da177e4 | 196 | |
08e0e7c8 | 197 | for (; count > 0; callbacks++, count--) { |
1da177e4 LT |
198 | _debug("- Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u }", |
199 | callbacks->fid.vid, | |
200 | callbacks->fid.vnode, | |
201 | callbacks->fid.unique, | |
202 | callbacks->version, | |
203 | callbacks->expiry, | |
204 | callbacks->type | |
205 | ); | |
08e0e7c8 DH |
206 | afs_break_one_callback(server, &callbacks->fid); |
207 | } | |
208 | ||
209 | _leave(""); | |
210 | return; | |
211 | } | |
1da177e4 | 212 | |
08e0e7c8 DH |
213 | /* |
214 | * record the callback for breaking | |
215 | * - the caller must hold server->cb_lock | |
216 | */ | |
217 | static void afs_do_give_up_callback(struct afs_server *server, | |
218 | struct afs_vnode *vnode) | |
219 | { | |
220 | struct afs_callback *cb; | |
1da177e4 | 221 | |
08e0e7c8 | 222 | _enter("%p,%p", server, vnode); |
1da177e4 | 223 | |
08e0e7c8 DH |
224 | cb = &server->cb_break[server->cb_break_head]; |
225 | cb->fid = vnode->fid; | |
226 | cb->version = vnode->cb_version; | |
227 | cb->expiry = vnode->cb_expiry; | |
228 | cb->type = vnode->cb_type; | |
229 | smp_wmb(); | |
230 | server->cb_break_head = | |
231 | (server->cb_break_head + 1) & | |
232 | (ARRAY_SIZE(server->cb_break) - 1); | |
1da177e4 | 233 | |
08e0e7c8 DH |
234 | /* defer the breaking of callbacks to try and collect as many as |
235 | * possible to ship in one operation */ | |
236 | switch (atomic_inc_return(&server->cb_break_n)) { | |
237 | case 1 ... AFSCBMAX - 1: | |
238 | queue_delayed_work(afs_callback_update_worker, | |
239 | &server->cb_break_work, HZ * 2); | |
240 | break; | |
241 | case AFSCBMAX: | |
242 | afs_flush_callback_breaks(server); | |
243 | break; | |
244 | default: | |
245 | break; | |
246 | } | |
247 | ||
248 | ASSERT(server->cb_promises.rb_node != NULL); | |
249 | rb_erase(&vnode->cb_promise, &server->cb_promises); | |
250 | vnode->cb_promised = false; | |
251 | _leave(""); | |
252 | } | |
253 | ||
254 | /* | |
255 | * give up the callback registered for a vnode on the file server when the | |
256 | * inode is being cleared | |
257 | */ | |
258 | void afs_give_up_callback(struct afs_vnode *vnode) | |
259 | { | |
260 | struct afs_server *server = vnode->server; | |
261 | ||
262 | DECLARE_WAITQUEUE(myself, current); | |
263 | ||
264 | _enter("%d", vnode->cb_promised); | |
265 | ||
266 | _debug("GIVE UP INODE %p", &vnode->vfs_inode); | |
267 | ||
268 | if (!vnode->cb_promised) { | |
269 | _leave(" [not promised]"); | |
270 | return; | |
271 | } | |
272 | ||
273 | ASSERT(server != NULL); | |
274 | ||
275 | spin_lock(&server->cb_lock); | |
276 | if (vnode->cb_promised && afs_breakring_space(server) == 0) { | |
277 | add_wait_queue(&server->cb_break_waitq, &myself); | |
278 | for (;;) { | |
279 | set_current_state(TASK_UNINTERRUPTIBLE); | |
280 | if (!vnode->cb_promised || | |
281 | afs_breakring_space(server) != 0) | |
282 | break; | |
283 | spin_unlock(&server->cb_lock); | |
284 | schedule(); | |
285 | spin_lock(&server->cb_lock); | |
1da177e4 | 286 | } |
08e0e7c8 DH |
287 | remove_wait_queue(&server->cb_break_waitq, &myself); |
288 | __set_current_state(TASK_RUNNING); | |
289 | } | |
290 | ||
291 | /* of course, it's always possible for the server to break this vnode's | |
292 | * callback first... */ | |
293 | if (vnode->cb_promised) | |
294 | afs_do_give_up_callback(server, vnode); | |
295 | ||
296 | spin_unlock(&server->cb_lock); | |
297 | _leave(""); | |
298 | } | |
299 | ||
300 | /* | |
301 | * dispatch a deferred give up callbacks operation | |
302 | */ | |
303 | void afs_dispatch_give_up_callbacks(struct work_struct *work) | |
304 | { | |
305 | struct afs_server *server = | |
306 | container_of(work, struct afs_server, cb_break_work.work); | |
307 | ||
308 | _enter(""); | |
309 | ||
310 | /* tell the fileserver to discard the callback promises it has | |
311 | * - in the event of ENOMEM or some other error, we just forget that we | |
312 | * had callbacks entirely, and the server will call us later to break | |
313 | * them | |
314 | */ | |
315 | afs_fs_give_up_callbacks(server, &afs_async_call); | |
316 | } | |
317 | ||
318 | /* | |
319 | * flush the outstanding callback breaks on a server | |
320 | */ | |
321 | void afs_flush_callback_breaks(struct afs_server *server) | |
322 | { | |
323 | cancel_delayed_work(&server->cb_break_work); | |
324 | queue_delayed_work(afs_callback_update_worker, | |
325 | &server->cb_break_work, 0); | |
326 | } | |
327 | ||
328 | #if 0 | |
329 | /* | |
330 | * update a bunch of callbacks | |
331 | */ | |
332 | static void afs_callback_updater(struct work_struct *work) | |
333 | { | |
334 | struct afs_server *server; | |
335 | struct afs_vnode *vnode, *xvnode; | |
336 | time_t now; | |
337 | long timeout; | |
338 | int ret; | |
339 | ||
340 | server = container_of(work, struct afs_server, updater); | |
341 | ||
342 | _enter(""); | |
1da177e4 | 343 | |
08e0e7c8 DH |
344 | now = get_seconds(); |
345 | ||
346 | /* find the first vnode to update */ | |
347 | spin_lock(&server->cb_lock); | |
348 | for (;;) { | |
349 | if (RB_EMPTY_ROOT(&server->cb_promises)) { | |
350 | spin_unlock(&server->cb_lock); | |
351 | _leave(" [nothing]"); | |
352 | return; | |
1da177e4 | 353 | } |
08e0e7c8 DH |
354 | |
355 | vnode = rb_entry(rb_first(&server->cb_promises), | |
356 | struct afs_vnode, cb_promise); | |
357 | if (atomic_read(&vnode->usage) > 0) | |
358 | break; | |
359 | rb_erase(&vnode->cb_promise, &server->cb_promises); | |
360 | vnode->cb_promised = false; | |
361 | } | |
362 | ||
363 | timeout = vnode->update_at - now; | |
364 | if (timeout > 0) { | |
365 | queue_delayed_work(afs_vnode_update_worker, | |
366 | &afs_vnode_update, timeout * HZ); | |
367 | spin_unlock(&server->cb_lock); | |
368 | _leave(" [nothing]"); | |
369 | return; | |
370 | } | |
371 | ||
372 | list_del_init(&vnode->update); | |
373 | atomic_inc(&vnode->usage); | |
374 | spin_unlock(&server->cb_lock); | |
375 | ||
376 | /* we can now perform the update */ | |
377 | _debug("update %s", vnode->vldb.name); | |
378 | vnode->state = AFS_VL_UPDATING; | |
379 | vnode->upd_rej_cnt = 0; | |
380 | vnode->upd_busy_cnt = 0; | |
381 | ||
382 | ret = afs_vnode_update_record(vl, &vldb); | |
383 | switch (ret) { | |
384 | case 0: | |
385 | afs_vnode_apply_update(vl, &vldb); | |
386 | vnode->state = AFS_VL_UPDATING; | |
387 | break; | |
388 | case -ENOMEDIUM: | |
389 | vnode->state = AFS_VL_VOLUME_DELETED; | |
390 | break; | |
391 | default: | |
392 | vnode->state = AFS_VL_UNCERTAIN; | |
393 | break; | |
394 | } | |
395 | ||
396 | /* and then reschedule */ | |
397 | _debug("reschedule"); | |
398 | vnode->update_at = get_seconds() + afs_vnode_update_timeout; | |
399 | ||
400 | spin_lock(&server->cb_lock); | |
401 | ||
402 | if (!list_empty(&server->cb_promises)) { | |
403 | /* next update in 10 minutes, but wait at least 1 second more | |
404 | * than the newest record already queued so that we don't spam | |
405 | * the VL server suddenly with lots of requests | |
406 | */ | |
407 | xvnode = list_entry(server->cb_promises.prev, | |
408 | struct afs_vnode, update); | |
409 | if (vnode->update_at <= xvnode->update_at) | |
410 | vnode->update_at = xvnode->update_at + 1; | |
411 | xvnode = list_entry(server->cb_promises.next, | |
412 | struct afs_vnode, update); | |
413 | timeout = xvnode->update_at - now; | |
414 | if (timeout < 0) | |
415 | timeout = 0; | |
416 | } else { | |
417 | timeout = afs_vnode_update_timeout; | |
1da177e4 LT |
418 | } |
419 | ||
08e0e7c8 DH |
420 | list_add_tail(&vnode->update, &server->cb_promises); |
421 | ||
422 | _debug("timeout %ld", timeout); | |
423 | queue_delayed_work(afs_vnode_update_worker, | |
424 | &afs_vnode_update, timeout * HZ); | |
425 | spin_unlock(&server->cb_lock); | |
426 | afs_put_vnode(vl); | |
427 | } | |
428 | #endif | |
429 | ||
430 | /* | |
431 | * initialise the callback update process | |
432 | */ | |
433 | int __init afs_callback_update_init(void) | |
434 | { | |
435 | afs_callback_update_worker = | |
436 | create_singlethread_workqueue("kafs_callbackd"); | |
437 | return afs_callback_update_worker ? 0 : -ENOMEM; | |
ec26815a | 438 | } |
1da177e4 | 439 | |
1da177e4 | 440 | /* |
08e0e7c8 | 441 | * shut down the callback update process |
1da177e4 | 442 | */ |
08e0e7c8 | 443 | void __exit afs_callback_update_kill(void) |
1da177e4 | 444 | { |
08e0e7c8 | 445 | destroy_workqueue(afs_callback_update_worker); |
ec26815a | 446 | } |