FS-Cache: Out of line fscache_operation_init()
[deliverable/linux.git] / fs / fscache / operation.c
1 /* FS-Cache worker operation management routines
2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 * See Documentation/filesystems/caching/operations.txt
12 */
13
14 #define FSCACHE_DEBUG_LEVEL OPERATION
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/slab.h>
18 #include "internal.h"
19
20 atomic_t fscache_op_debug_id;
21 EXPORT_SYMBOL(fscache_op_debug_id);
22
23 /**
24 * fscache_operation_init - Do basic initialisation of an operation
25 * @op: The operation to initialise
26 * @release: The release function to assign
27 *
28 * Do basic initialisation of an operation. The caller must still set flags,
29 * object and processor if needed.
30 */
31 void fscache_operation_init(struct fscache_operation *op,
32 fscache_operation_processor_t processor,
33 fscache_operation_release_t release)
34 {
35 INIT_WORK(&op->work, fscache_op_work_func);
36 atomic_set(&op->usage, 1);
37 op->state = FSCACHE_OP_ST_INITIALISED;
38 op->debug_id = atomic_inc_return(&fscache_op_debug_id);
39 op->processor = processor;
40 op->release = release;
41 INIT_LIST_HEAD(&op->pend_link);
42 }
43 EXPORT_SYMBOL(fscache_operation_init);
44
45 /**
46 * fscache_enqueue_operation - Enqueue an operation for processing
47 * @op: The operation to enqueue
48 *
49 * Enqueue an operation for processing by the FS-Cache thread pool.
50 *
51 * This will get its own ref on the object.
52 */
53 void fscache_enqueue_operation(struct fscache_operation *op)
54 {
55 _enter("{OBJ%x OP%x,%u}",
56 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
57
58 ASSERT(list_empty(&op->pend_link));
59 ASSERT(op->processor != NULL);
60 ASSERT(fscache_object_is_available(op->object));
61 ASSERTCMP(atomic_read(&op->usage), >, 0);
62 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
63
64 fscache_stat(&fscache_n_op_enqueue);
65 switch (op->flags & FSCACHE_OP_TYPE) {
66 case FSCACHE_OP_ASYNC:
67 _debug("queue async");
68 atomic_inc(&op->usage);
69 if (!queue_work(fscache_op_wq, &op->work))
70 fscache_put_operation(op);
71 break;
72 case FSCACHE_OP_MYTHREAD:
73 _debug("queue for caller's attention");
74 break;
75 default:
76 pr_err("Unexpected op type %lx", op->flags);
77 BUG();
78 break;
79 }
80 }
81 EXPORT_SYMBOL(fscache_enqueue_operation);
82
83 /*
84 * start an op running
85 */
86 static void fscache_run_op(struct fscache_object *object,
87 struct fscache_operation *op)
88 {
89 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
90
91 op->state = FSCACHE_OP_ST_IN_PROGRESS;
92 object->n_in_progress++;
93 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
94 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
95 if (op->processor)
96 fscache_enqueue_operation(op);
97 fscache_stat(&fscache_n_op_run);
98 }
99
100 /*
101 * report an unexpected submission
102 */
103 static void fscache_report_unexpected_submission(struct fscache_object *object,
104 struct fscache_operation *op,
105 const struct fscache_state *ostate)
106 {
107 static bool once_only;
108 struct fscache_operation *p;
109 unsigned n;
110
111 if (once_only)
112 return;
113 once_only = true;
114
115 kdebug("unexpected submission OP%x [OBJ%x %s]",
116 op->debug_id, object->debug_id, object->state->name);
117 kdebug("objstate=%s [%s]", object->state->name, ostate->name);
118 kdebug("objflags=%lx", object->flags);
119 kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
120 kdebug("ops=%u inp=%u exc=%u",
121 object->n_ops, object->n_in_progress, object->n_exclusive);
122
123 if (!list_empty(&object->pending_ops)) {
124 n = 0;
125 list_for_each_entry(p, &object->pending_ops, pend_link) {
126 ASSERTCMP(p->object, ==, object);
127 kdebug("%p %p", op->processor, op->release);
128 n++;
129 }
130
131 kdebug("n=%u", n);
132 }
133
134 dump_stack();
135 }
136
137 /*
138 * submit an exclusive operation for an object
139 * - other ops are excluded from running simultaneously with this one
140 * - this gets any extra refs it needs on an op
141 */
142 int fscache_submit_exclusive_op(struct fscache_object *object,
143 struct fscache_operation *op)
144 {
145 const struct fscache_state *ostate;
146 unsigned long flags;
147 int ret;
148
149 _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
150
151 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
152 ASSERTCMP(atomic_read(&op->usage), >, 0);
153
154 spin_lock(&object->lock);
155 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
156 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
157 ASSERT(list_empty(&op->pend_link));
158
159 ostate = object->state;
160 smp_rmb();
161
162 op->state = FSCACHE_OP_ST_PENDING;
163 flags = READ_ONCE(object->flags);
164 if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
165 fscache_stat(&fscache_n_op_rejected);
166 op->state = FSCACHE_OP_ST_CANCELLED;
167 ret = -ENOBUFS;
168 } else if (unlikely(fscache_cache_is_broken(object))) {
169 op->state = FSCACHE_OP_ST_CANCELLED;
170 ret = -EIO;
171 } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
172 op->object = object;
173 object->n_ops++;
174 object->n_exclusive++; /* reads and writes must wait */
175
176 if (object->n_in_progress > 0) {
177 atomic_inc(&op->usage);
178 list_add_tail(&op->pend_link, &object->pending_ops);
179 fscache_stat(&fscache_n_op_pend);
180 } else if (!list_empty(&object->pending_ops)) {
181 atomic_inc(&op->usage);
182 list_add_tail(&op->pend_link, &object->pending_ops);
183 fscache_stat(&fscache_n_op_pend);
184 fscache_start_operations(object);
185 } else {
186 ASSERTCMP(object->n_in_progress, ==, 0);
187 fscache_run_op(object, op);
188 }
189
190 /* need to issue a new write op after this */
191 clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
192 ret = 0;
193 } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
194 op->object = object;
195 object->n_ops++;
196 object->n_exclusive++; /* reads and writes must wait */
197 atomic_inc(&op->usage);
198 list_add_tail(&op->pend_link, &object->pending_ops);
199 fscache_stat(&fscache_n_op_pend);
200 ret = 0;
201 } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
202 op->state = FSCACHE_OP_ST_CANCELLED;
203 ret = -ENOBUFS;
204 } else {
205 fscache_report_unexpected_submission(object, op, ostate);
206 op->state = FSCACHE_OP_ST_CANCELLED;
207 ret = -ENOBUFS;
208 }
209
210 spin_unlock(&object->lock);
211 return ret;
212 }
213
214 /*
215 * submit an operation for an object
216 * - objects may be submitted only in the following states:
217 * - during object creation (write ops may be submitted)
218 * - whilst the object is active
219 * - after an I/O error incurred in one of the two above states (op rejected)
220 * - this gets any extra refs it needs on an op
221 */
222 int fscache_submit_op(struct fscache_object *object,
223 struct fscache_operation *op)
224 {
225 const struct fscache_state *ostate;
226 unsigned long flags;
227 int ret;
228
229 _enter("{OBJ%x OP%x},{%u}",
230 object->debug_id, op->debug_id, atomic_read(&op->usage));
231
232 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
233 ASSERTCMP(atomic_read(&op->usage), >, 0);
234
235 spin_lock(&object->lock);
236 ASSERTCMP(object->n_ops, >=, object->n_in_progress);
237 ASSERTCMP(object->n_ops, >=, object->n_exclusive);
238 ASSERT(list_empty(&op->pend_link));
239
240 ostate = object->state;
241 smp_rmb();
242
243 op->state = FSCACHE_OP_ST_PENDING;
244 flags = READ_ONCE(object->flags);
245 if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
246 fscache_stat(&fscache_n_op_rejected);
247 op->state = FSCACHE_OP_ST_CANCELLED;
248 ret = -ENOBUFS;
249 } else if (unlikely(fscache_cache_is_broken(object))) {
250 op->state = FSCACHE_OP_ST_CANCELLED;
251 ret = -EIO;
252 } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
253 op->object = object;
254 object->n_ops++;
255
256 if (object->n_exclusive > 0) {
257 atomic_inc(&op->usage);
258 list_add_tail(&op->pend_link, &object->pending_ops);
259 fscache_stat(&fscache_n_op_pend);
260 } else if (!list_empty(&object->pending_ops)) {
261 atomic_inc(&op->usage);
262 list_add_tail(&op->pend_link, &object->pending_ops);
263 fscache_stat(&fscache_n_op_pend);
264 fscache_start_operations(object);
265 } else {
266 ASSERTCMP(object->n_exclusive, ==, 0);
267 fscache_run_op(object, op);
268 }
269 ret = 0;
270 } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
271 op->object = object;
272 object->n_ops++;
273 atomic_inc(&op->usage);
274 list_add_tail(&op->pend_link, &object->pending_ops);
275 fscache_stat(&fscache_n_op_pend);
276 ret = 0;
277 } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
278 op->state = FSCACHE_OP_ST_CANCELLED;
279 ret = -ENOBUFS;
280 } else {
281 fscache_report_unexpected_submission(object, op, ostate);
282 ASSERT(!fscache_object_is_active(object));
283 op->state = FSCACHE_OP_ST_CANCELLED;
284 ret = -ENOBUFS;
285 }
286
287 spin_unlock(&object->lock);
288 return ret;
289 }
290
291 /*
292 * queue an object for withdrawal on error, aborting all following asynchronous
293 * operations
294 */
295 void fscache_abort_object(struct fscache_object *object)
296 {
297 _enter("{OBJ%x}", object->debug_id);
298
299 fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
300 }
301
302 /*
303 * Jump start the operation processing on an object. The caller must hold
304 * object->lock.
305 */
306 void fscache_start_operations(struct fscache_object *object)
307 {
308 struct fscache_operation *op;
309 bool stop = false;
310
311 while (!list_empty(&object->pending_ops) && !stop) {
312 op = list_entry(object->pending_ops.next,
313 struct fscache_operation, pend_link);
314
315 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
316 if (object->n_in_progress > 0)
317 break;
318 stop = true;
319 }
320 list_del_init(&op->pend_link);
321 fscache_run_op(object, op);
322
323 /* the pending queue was holding a ref on the object */
324 fscache_put_operation(op);
325 }
326
327 ASSERTCMP(object->n_in_progress, <=, object->n_ops);
328
329 _debug("woke %d ops on OBJ%x",
330 object->n_in_progress, object->debug_id);
331 }
332
333 /*
334 * cancel an operation that's pending on an object
335 */
336 int fscache_cancel_op(struct fscache_operation *op,
337 void (*do_cancel)(struct fscache_operation *),
338 bool cancel_in_progress_op)
339 {
340 struct fscache_object *object = op->object;
341 bool put = false;
342 int ret;
343
344 _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
345
346 ASSERTCMP(op->state, >=, FSCACHE_OP_ST_PENDING);
347 ASSERTCMP(op->state, !=, FSCACHE_OP_ST_CANCELLED);
348 ASSERTCMP(atomic_read(&op->usage), >, 0);
349
350 spin_lock(&object->lock);
351
352 ret = -EBUSY;
353 if (op->state == FSCACHE_OP_ST_PENDING) {
354 ASSERT(!list_empty(&op->pend_link));
355 list_del_init(&op->pend_link);
356 put = true;
357 fscache_stat(&fscache_n_op_cancelled);
358 if (do_cancel)
359 do_cancel(op);
360 op->state = FSCACHE_OP_ST_CANCELLED;
361 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
362 object->n_exclusive--;
363 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
364 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
365 ret = 0;
366 } else if (op->state == FSCACHE_OP_ST_IN_PROGRESS && cancel_in_progress_op) {
367 fscache_stat(&fscache_n_op_cancelled);
368 if (do_cancel)
369 do_cancel(op);
370 op->state = FSCACHE_OP_ST_CANCELLED;
371 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
372 object->n_exclusive--;
373 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
374 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
375 ret = 0;
376 }
377
378 if (put)
379 fscache_put_operation(op);
380 spin_unlock(&object->lock);
381 _leave(" = %d", ret);
382 return ret;
383 }
384
385 /*
386 * Cancel all pending operations on an object
387 */
388 void fscache_cancel_all_ops(struct fscache_object *object)
389 {
390 struct fscache_operation *op;
391
392 _enter("OBJ%x", object->debug_id);
393
394 spin_lock(&object->lock);
395
396 while (!list_empty(&object->pending_ops)) {
397 op = list_entry(object->pending_ops.next,
398 struct fscache_operation, pend_link);
399 fscache_stat(&fscache_n_op_cancelled);
400 list_del_init(&op->pend_link);
401
402 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
403 op->state = FSCACHE_OP_ST_CANCELLED;
404
405 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
406 object->n_exclusive--;
407 if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
408 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
409 fscache_put_operation(op);
410 cond_resched_lock(&object->lock);
411 }
412
413 spin_unlock(&object->lock);
414 _leave("");
415 }
416
417 /*
418 * Record the completion or cancellation of an in-progress operation.
419 */
420 void fscache_op_complete(struct fscache_operation *op, bool cancelled)
421 {
422 struct fscache_object *object = op->object;
423
424 _enter("OBJ%x", object->debug_id);
425
426 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
427 ASSERTCMP(object->n_in_progress, >, 0);
428 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
429 object->n_exclusive, >, 0);
430 ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
431 object->n_in_progress, ==, 1);
432
433 spin_lock(&object->lock);
434
435 op->state = cancelled ?
436 FSCACHE_OP_ST_CANCELLED : FSCACHE_OP_ST_COMPLETE;
437
438 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
439 object->n_exclusive--;
440 object->n_in_progress--;
441 if (object->n_in_progress == 0)
442 fscache_start_operations(object);
443
444 spin_unlock(&object->lock);
445 _leave("");
446 }
447 EXPORT_SYMBOL(fscache_op_complete);
448
449 /*
450 * release an operation
451 * - queues pending ops if this is the last in-progress op
452 */
453 void fscache_put_operation(struct fscache_operation *op)
454 {
455 struct fscache_object *object;
456 struct fscache_cache *cache;
457
458 _enter("{OBJ%x OP%x,%d}",
459 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
460
461 ASSERTCMP(atomic_read(&op->usage), >, 0);
462
463 if (!atomic_dec_and_test(&op->usage))
464 return;
465
466 _debug("PUT OP");
467 ASSERTIFCMP(op->state != FSCACHE_OP_ST_COMPLETE,
468 op->state, ==, FSCACHE_OP_ST_CANCELLED);
469 op->state = FSCACHE_OP_ST_DEAD;
470
471 fscache_stat(&fscache_n_op_release);
472
473 if (op->release) {
474 op->release(op);
475 op->release = NULL;
476 }
477
478 object = op->object;
479
480 if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
481 atomic_dec(&object->n_reads);
482 if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags))
483 fscache_unuse_cookie(object);
484
485 /* now... we may get called with the object spinlock held, so we
486 * complete the cleanup here only if we can immediately acquire the
487 * lock, and defer it otherwise */
488 if (!spin_trylock(&object->lock)) {
489 _debug("defer put");
490 fscache_stat(&fscache_n_op_deferred_release);
491
492 cache = object->cache;
493 spin_lock(&cache->op_gc_list_lock);
494 list_add_tail(&op->pend_link, &cache->op_gc_list);
495 spin_unlock(&cache->op_gc_list_lock);
496 schedule_work(&cache->op_gc);
497 _leave(" [defer]");
498 return;
499 }
500
501 ASSERTCMP(object->n_ops, >, 0);
502 object->n_ops--;
503 if (object->n_ops == 0)
504 fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
505
506 spin_unlock(&object->lock);
507
508 kfree(op);
509 _leave(" [done]");
510 }
511 EXPORT_SYMBOL(fscache_put_operation);
512
513 /*
514 * garbage collect operations that have had their release deferred
515 */
516 void fscache_operation_gc(struct work_struct *work)
517 {
518 struct fscache_operation *op;
519 struct fscache_object *object;
520 struct fscache_cache *cache =
521 container_of(work, struct fscache_cache, op_gc);
522 int count = 0;
523
524 _enter("");
525
526 do {
527 spin_lock(&cache->op_gc_list_lock);
528 if (list_empty(&cache->op_gc_list)) {
529 spin_unlock(&cache->op_gc_list_lock);
530 break;
531 }
532
533 op = list_entry(cache->op_gc_list.next,
534 struct fscache_operation, pend_link);
535 list_del(&op->pend_link);
536 spin_unlock(&cache->op_gc_list_lock);
537
538 object = op->object;
539 spin_lock(&object->lock);
540
541 _debug("GC DEFERRED REL OBJ%x OP%x",
542 object->debug_id, op->debug_id);
543 fscache_stat(&fscache_n_op_gc);
544
545 ASSERTCMP(atomic_read(&op->usage), ==, 0);
546 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
547
548 ASSERTCMP(object->n_ops, >, 0);
549 object->n_ops--;
550 if (object->n_ops == 0)
551 fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
552
553 spin_unlock(&object->lock);
554 kfree(op);
555
556 } while (count++ < 20);
557
558 if (!list_empty(&cache->op_gc_list))
559 schedule_work(&cache->op_gc);
560
561 _leave("");
562 }
563
564 /*
565 * execute an operation using fs_op_wq to provide processing context -
566 * the caller holds a ref to this object, so we don't need to hold one
567 */
568 void fscache_op_work_func(struct work_struct *work)
569 {
570 struct fscache_operation *op =
571 container_of(work, struct fscache_operation, work);
572 unsigned long start;
573
574 _enter("{OBJ%x OP%x,%d}",
575 op->object->debug_id, op->debug_id, atomic_read(&op->usage));
576
577 ASSERT(op->processor != NULL);
578 start = jiffies;
579 op->processor(op);
580 fscache_hist(fscache_ops_histogram, start);
581 fscache_put_operation(op);
582
583 _leave("");
584 }
This page took 0.060949 seconds and 5 git commands to generate.