c6623c1d023e5d465ba54b5e1c37531120eb6568
[deliverable/linux.git] / drivers / staging / lustre / lustre / osc / osc_cache.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2012, 2015, Intel Corporation.
31 *
32 */
33 /*
34 * This file is part of Lustre, http://www.lustre.org/
35 * Lustre is a trademark of Sun Microsystems, Inc.
36 *
37 * osc cache management.
38 *
39 * Author: Jinshan Xiong <jinshan.xiong@whamcloud.com>
40 */
41
42 #define DEBUG_SUBSYSTEM S_OSC
43
44 #include "osc_cl_internal.h"
45 #include "osc_internal.h"
46
47 static int extent_debug; /* set it to be true for more debug */
48
49 static void osc_update_pending(struct osc_object *obj, int cmd, int delta);
50 static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
51 int state);
52 static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
53 struct osc_async_page *oap, int sent, int rc);
54 static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
55 int cmd);
56 static int osc_refresh_count(const struct lu_env *env,
57 struct osc_async_page *oap, int cmd);
58 static int osc_io_unplug_async(const struct lu_env *env,
59 struct client_obd *cli, struct osc_object *osc);
60 static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
61 unsigned int lost_grant);
62
63 static void osc_extent_tree_dump0(int level, struct osc_object *obj,
64 const char *func, int line);
65 #define osc_extent_tree_dump(lvl, obj) \
66 osc_extent_tree_dump0(lvl, obj, __func__, __LINE__)
67
68 /** \addtogroup osc
69 * @{
70 */
71
72 /* ------------------ osc extent ------------------ */
73 static inline char *ext_flags(struct osc_extent *ext, char *flags)
74 {
75 char *buf = flags;
76 *buf++ = ext->oe_rw ? 'r' : 'w';
77 if (ext->oe_intree)
78 *buf++ = 'i';
79 if (ext->oe_srvlock)
80 *buf++ = 's';
81 if (ext->oe_hp)
82 *buf++ = 'h';
83 if (ext->oe_urgent)
84 *buf++ = 'u';
85 if (ext->oe_memalloc)
86 *buf++ = 'm';
87 if (ext->oe_trunc_pending)
88 *buf++ = 't';
89 if (ext->oe_fsync_wait)
90 *buf++ = 'Y';
91 *buf = 0;
92 return flags;
93 }
94
95 static inline char list_empty_marker(struct list_head *list)
96 {
97 return list_empty(list) ? '-' : '+';
98 }
99
100 #define EXTSTR "[%lu -> %lu/%lu]"
101 #define EXTPARA(ext) (ext)->oe_start, (ext)->oe_end, (ext)->oe_max_end
102 static const char *oes_strings[] = {
103 "inv", "active", "cache", "locking", "lockdone", "rpc", "trunc", NULL };
104
105 #define OSC_EXTENT_DUMP(lvl, extent, fmt, ...) do { \
106 struct osc_extent *__ext = (extent); \
107 char __buf[16]; \
108 \
109 CDEBUG(lvl, \
110 "extent %p@{" EXTSTR ", " \
111 "[%d|%d|%c|%s|%s|%p], [%d|%d|%c|%c|%p|%u|%p]} " fmt, \
112 /* ----- extent part 0 ----- */ \
113 __ext, EXTPARA(__ext), \
114 /* ----- part 1 ----- */ \
115 atomic_read(&__ext->oe_refc), \
116 atomic_read(&__ext->oe_users), \
117 list_empty_marker(&__ext->oe_link), \
118 oes_strings[__ext->oe_state], ext_flags(__ext, __buf), \
119 __ext->oe_obj, \
120 /* ----- part 2 ----- */ \
121 __ext->oe_grants, __ext->oe_nr_pages, \
122 list_empty_marker(&__ext->oe_pages), \
123 waitqueue_active(&__ext->oe_waitq) ? '+' : '-', \
124 __ext->oe_osclock, __ext->oe_mppr, __ext->oe_owner, \
125 /* ----- part 4 ----- */ \
126 ## __VA_ARGS__); \
127 } while (0)
128
129 #undef EASSERTF
130 #define EASSERTF(expr, ext, fmt, args...) do { \
131 if (!(expr)) { \
132 OSC_EXTENT_DUMP(D_ERROR, (ext), fmt, ##args); \
133 osc_extent_tree_dump(D_ERROR, (ext)->oe_obj); \
134 LASSERT(expr); \
135 } \
136 } while (0)
137
138 #undef EASSERT
139 #define EASSERT(expr, ext) EASSERTF(expr, ext, "\n")
140
141 static inline struct osc_extent *rb_extent(struct rb_node *n)
142 {
143 if (!n)
144 return NULL;
145
146 return container_of(n, struct osc_extent, oe_node);
147 }
148
149 static inline struct osc_extent *next_extent(struct osc_extent *ext)
150 {
151 if (!ext)
152 return NULL;
153
154 LASSERT(ext->oe_intree);
155 return rb_extent(rb_next(&ext->oe_node));
156 }
157
158 static inline struct osc_extent *prev_extent(struct osc_extent *ext)
159 {
160 if (!ext)
161 return NULL;
162
163 LASSERT(ext->oe_intree);
164 return rb_extent(rb_prev(&ext->oe_node));
165 }
166
167 static inline struct osc_extent *first_extent(struct osc_object *obj)
168 {
169 return rb_extent(rb_first(&obj->oo_root));
170 }
171
172 /* object must be locked by caller. */
173 static int osc_extent_sanity_check0(struct osc_extent *ext,
174 const char *func, const int line)
175 {
176 struct osc_object *obj = ext->oe_obj;
177 struct osc_async_page *oap;
178 int page_count;
179 int rc = 0;
180
181 if (!osc_object_is_locked(obj)) {
182 rc = 9;
183 goto out;
184 }
185
186 if (ext->oe_state >= OES_STATE_MAX) {
187 rc = 10;
188 goto out;
189 }
190
191 if (atomic_read(&ext->oe_refc) <= 0) {
192 rc = 20;
193 goto out;
194 }
195
196 if (atomic_read(&ext->oe_refc) < atomic_read(&ext->oe_users)) {
197 rc = 30;
198 goto out;
199 }
200
201 switch (ext->oe_state) {
202 case OES_INV:
203 if (ext->oe_nr_pages > 0 || !list_empty(&ext->oe_pages))
204 rc = 35;
205 else
206 rc = 0;
207 goto out;
208 case OES_ACTIVE:
209 if (atomic_read(&ext->oe_users) == 0) {
210 rc = 40;
211 goto out;
212 }
213 if (ext->oe_hp) {
214 rc = 50;
215 goto out;
216 }
217 if (ext->oe_fsync_wait && !ext->oe_urgent) {
218 rc = 55;
219 goto out;
220 }
221 break;
222 case OES_CACHE:
223 if (ext->oe_grants == 0) {
224 rc = 60;
225 goto out;
226 }
227 if (ext->oe_fsync_wait && !ext->oe_urgent && !ext->oe_hp) {
228 rc = 65;
229 goto out;
230 }
231 default:
232 if (atomic_read(&ext->oe_users) > 0) {
233 rc = 70;
234 goto out;
235 }
236 }
237
238 if (ext->oe_max_end < ext->oe_end || ext->oe_end < ext->oe_start) {
239 rc = 80;
240 goto out;
241 }
242
243 if (!ext->oe_osclock && ext->oe_grants > 0) {
244 rc = 90;
245 goto out;
246 }
247
248 if (ext->oe_osclock) {
249 struct cl_lock_descr *descr;
250
251 descr = &ext->oe_osclock->cll_descr;
252 if (!(descr->cld_start <= ext->oe_start &&
253 descr->cld_end >= ext->oe_max_end)) {
254 rc = 100;
255 goto out;
256 }
257 }
258
259 if (ext->oe_nr_pages > ext->oe_mppr) {
260 rc = 105;
261 goto out;
262 }
263
264 /* Do not verify page list if extent is in RPC. This is because an
265 * in-RPC extent is supposed to be exclusively accessible w/o lock.
266 */
267 if (ext->oe_state > OES_CACHE) {
268 rc = 0;
269 goto out;
270 }
271
272 if (!extent_debug) {
273 rc = 0;
274 goto out;
275 }
276
277 page_count = 0;
278 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
279 pgoff_t index = oap2cl_page(oap)->cp_index;
280 ++page_count;
281 if (index > ext->oe_end || index < ext->oe_start) {
282 rc = 110;
283 goto out;
284 }
285 }
286 if (page_count != ext->oe_nr_pages) {
287 rc = 120;
288 goto out;
289 }
290
291 out:
292 if (rc != 0)
293 OSC_EXTENT_DUMP(D_ERROR, ext,
294 "%s:%d sanity check %p failed with rc = %d\n",
295 func, line, ext, rc);
296 return rc;
297 }
298
299 #define sanity_check_nolock(ext) \
300 osc_extent_sanity_check0(ext, __func__, __LINE__)
301
302 #define sanity_check(ext) ({ \
303 int __res; \
304 osc_object_lock((ext)->oe_obj); \
305 __res = sanity_check_nolock(ext); \
306 osc_object_unlock((ext)->oe_obj); \
307 __res; \
308 })
309
310 /**
311 * sanity check - to make sure there is no overlapped extent in the tree.
312 */
313 static int osc_extent_is_overlapped(struct osc_object *obj,
314 struct osc_extent *ext)
315 {
316 struct osc_extent *tmp;
317
318 LASSERT(osc_object_is_locked(obj));
319
320 if (!extent_debug)
321 return 0;
322
323 for (tmp = first_extent(obj); tmp; tmp = next_extent(tmp)) {
324 if (tmp == ext)
325 continue;
326 if (tmp->oe_end >= ext->oe_start &&
327 tmp->oe_start <= ext->oe_end)
328 return 1;
329 }
330 return 0;
331 }
332
333 static void osc_extent_state_set(struct osc_extent *ext, int state)
334 {
335 LASSERT(osc_object_is_locked(ext->oe_obj));
336 LASSERT(state >= OES_INV && state < OES_STATE_MAX);
337
338 /* Never try to sanity check a state changing extent :-) */
339 /* LASSERT(sanity_check_nolock(ext) == 0); */
340
341 /* TODO: validate the state machine */
342 ext->oe_state = state;
343 wake_up_all(&ext->oe_waitq);
344 }
345
346 static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
347 {
348 struct osc_extent *ext;
349
350 ext = kmem_cache_alloc(osc_extent_kmem, GFP_NOFS | __GFP_ZERO);
351 if (!ext)
352 return NULL;
353
354 RB_CLEAR_NODE(&ext->oe_node);
355 ext->oe_obj = obj;
356 atomic_set(&ext->oe_refc, 1);
357 atomic_set(&ext->oe_users, 0);
358 INIT_LIST_HEAD(&ext->oe_link);
359 ext->oe_state = OES_INV;
360 INIT_LIST_HEAD(&ext->oe_pages);
361 init_waitqueue_head(&ext->oe_waitq);
362 ext->oe_osclock = NULL;
363
364 return ext;
365 }
366
367 static void osc_extent_free(struct osc_extent *ext)
368 {
369 kmem_cache_free(osc_extent_kmem, ext);
370 }
371
372 static struct osc_extent *osc_extent_get(struct osc_extent *ext)
373 {
374 LASSERT(atomic_read(&ext->oe_refc) >= 0);
375 atomic_inc(&ext->oe_refc);
376 return ext;
377 }
378
379 static void osc_extent_put(const struct lu_env *env, struct osc_extent *ext)
380 {
381 LASSERT(atomic_read(&ext->oe_refc) > 0);
382 if (atomic_dec_and_test(&ext->oe_refc)) {
383 LASSERT(list_empty(&ext->oe_link));
384 LASSERT(atomic_read(&ext->oe_users) == 0);
385 LASSERT(ext->oe_state == OES_INV);
386 LASSERT(!ext->oe_intree);
387
388 if (ext->oe_osclock) {
389 cl_lock_put(env, ext->oe_osclock);
390 ext->oe_osclock = NULL;
391 }
392 osc_extent_free(ext);
393 }
394 }
395
396 /**
397 * osc_extent_put_trust() is a special version of osc_extent_put() when
398 * it's known that the caller is not the last user. This is to address the
399 * problem of lacking of lu_env ;-).
400 */
401 static void osc_extent_put_trust(struct osc_extent *ext)
402 {
403 LASSERT(atomic_read(&ext->oe_refc) > 1);
404 LASSERT(osc_object_is_locked(ext->oe_obj));
405 atomic_dec(&ext->oe_refc);
406 }
407
408 /**
409 * Return the extent which includes pgoff @index, or return the greatest
410 * previous extent in the tree.
411 */
412 static struct osc_extent *osc_extent_search(struct osc_object *obj,
413 pgoff_t index)
414 {
415 struct rb_node *n = obj->oo_root.rb_node;
416 struct osc_extent *tmp, *p = NULL;
417
418 LASSERT(osc_object_is_locked(obj));
419 while (n) {
420 tmp = rb_extent(n);
421 if (index < tmp->oe_start) {
422 n = n->rb_left;
423 } else if (index > tmp->oe_end) {
424 p = rb_extent(n);
425 n = n->rb_right;
426 } else {
427 return tmp;
428 }
429 }
430 return p;
431 }
432
433 /*
434 * Return the extent covering @index, otherwise return NULL.
435 * caller must have held object lock.
436 */
437 static struct osc_extent *osc_extent_lookup(struct osc_object *obj,
438 pgoff_t index)
439 {
440 struct osc_extent *ext;
441
442 ext = osc_extent_search(obj, index);
443 if (ext && ext->oe_start <= index && index <= ext->oe_end)
444 return osc_extent_get(ext);
445 return NULL;
446 }
447
448 /* caller must have held object lock. */
449 static void osc_extent_insert(struct osc_object *obj, struct osc_extent *ext)
450 {
451 struct rb_node **n = &obj->oo_root.rb_node;
452 struct rb_node *parent = NULL;
453 struct osc_extent *tmp;
454
455 LASSERT(ext->oe_intree == 0);
456 LASSERT(ext->oe_obj == obj);
457 LASSERT(osc_object_is_locked(obj));
458 while (*n) {
459 tmp = rb_extent(*n);
460 parent = *n;
461
462 if (ext->oe_end < tmp->oe_start)
463 n = &(*n)->rb_left;
464 else if (ext->oe_start > tmp->oe_end)
465 n = &(*n)->rb_right;
466 else
467 EASSERTF(0, tmp, EXTSTR, EXTPARA(ext));
468 }
469 rb_link_node(&ext->oe_node, parent, n);
470 rb_insert_color(&ext->oe_node, &obj->oo_root);
471 osc_extent_get(ext);
472 ext->oe_intree = 1;
473 }
474
475 /* caller must have held object lock. */
476 static void osc_extent_erase(struct osc_extent *ext)
477 {
478 struct osc_object *obj = ext->oe_obj;
479
480 LASSERT(osc_object_is_locked(obj));
481 if (ext->oe_intree) {
482 rb_erase(&ext->oe_node, &obj->oo_root);
483 ext->oe_intree = 0;
484 /* rbtree held a refcount */
485 osc_extent_put_trust(ext);
486 }
487 }
488
489 static struct osc_extent *osc_extent_hold(struct osc_extent *ext)
490 {
491 struct osc_object *obj = ext->oe_obj;
492
493 LASSERT(osc_object_is_locked(obj));
494 LASSERT(ext->oe_state == OES_ACTIVE || ext->oe_state == OES_CACHE);
495 if (ext->oe_state == OES_CACHE) {
496 osc_extent_state_set(ext, OES_ACTIVE);
497 osc_update_pending(obj, OBD_BRW_WRITE, -ext->oe_nr_pages);
498 }
499 atomic_inc(&ext->oe_users);
500 list_del_init(&ext->oe_link);
501 return osc_extent_get(ext);
502 }
503
504 static void __osc_extent_remove(struct osc_extent *ext)
505 {
506 LASSERT(osc_object_is_locked(ext->oe_obj));
507 LASSERT(list_empty(&ext->oe_pages));
508 osc_extent_erase(ext);
509 list_del_init(&ext->oe_link);
510 osc_extent_state_set(ext, OES_INV);
511 OSC_EXTENT_DUMP(D_CACHE, ext, "destroyed.\n");
512 }
513
514 static void osc_extent_remove(struct osc_extent *ext)
515 {
516 struct osc_object *obj = ext->oe_obj;
517
518 osc_object_lock(obj);
519 __osc_extent_remove(ext);
520 osc_object_unlock(obj);
521 }
522
523 /**
524 * This function is used to merge extents to get better performance. It checks
525 * if @cur and @victim are contiguous at chunk level.
526 */
527 static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
528 struct osc_extent *victim)
529 {
530 struct osc_object *obj = cur->oe_obj;
531 pgoff_t chunk_start;
532 pgoff_t chunk_end;
533 int ppc_bits;
534
535 LASSERT(cur->oe_state == OES_CACHE);
536 LASSERT(osc_object_is_locked(obj));
537 if (!victim)
538 return -EINVAL;
539
540 if (victim->oe_state != OES_CACHE || victim->oe_fsync_wait)
541 return -EBUSY;
542
543 if (cur->oe_max_end != victim->oe_max_end)
544 return -ERANGE;
545
546 LASSERT(cur->oe_osclock == victim->oe_osclock);
547 ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT;
548 chunk_start = cur->oe_start >> ppc_bits;
549 chunk_end = cur->oe_end >> ppc_bits;
550 if (chunk_start != (victim->oe_end >> ppc_bits) + 1 &&
551 chunk_end + 1 != victim->oe_start >> ppc_bits)
552 return -ERANGE;
553
554 OSC_EXTENT_DUMP(D_CACHE, victim, "will be merged by %p.\n", cur);
555
556 cur->oe_start = min(cur->oe_start, victim->oe_start);
557 cur->oe_end = max(cur->oe_end, victim->oe_end);
558 cur->oe_grants += victim->oe_grants;
559 cur->oe_nr_pages += victim->oe_nr_pages;
560 /* only the following bits are needed to merge */
561 cur->oe_urgent |= victim->oe_urgent;
562 cur->oe_memalloc |= victim->oe_memalloc;
563 list_splice_init(&victim->oe_pages, &cur->oe_pages);
564 list_del_init(&victim->oe_link);
565 victim->oe_nr_pages = 0;
566
567 osc_extent_get(victim);
568 __osc_extent_remove(victim);
569 osc_extent_put(env, victim);
570
571 OSC_EXTENT_DUMP(D_CACHE, cur, "after merging %p.\n", victim);
572 return 0;
573 }
574
575 /**
576 * Drop user count of osc_extent, and unplug IO asynchronously.
577 */
578 void osc_extent_release(const struct lu_env *env, struct osc_extent *ext)
579 {
580 struct osc_object *obj = ext->oe_obj;
581
582 LASSERT(atomic_read(&ext->oe_users) > 0);
583 LASSERT(sanity_check(ext) == 0);
584 LASSERT(ext->oe_grants > 0);
585
586 if (atomic_dec_and_lock(&ext->oe_users, &obj->oo_lock)) {
587 LASSERT(ext->oe_state == OES_ACTIVE);
588 if (ext->oe_trunc_pending) {
589 /* a truncate process is waiting for this extent.
590 * This may happen due to a race, check
591 * osc_cache_truncate_start().
592 */
593 osc_extent_state_set(ext, OES_TRUNC);
594 ext->oe_trunc_pending = 0;
595 } else {
596 osc_extent_state_set(ext, OES_CACHE);
597 osc_update_pending(obj, OBD_BRW_WRITE,
598 ext->oe_nr_pages);
599
600 /* try to merge the previous and next extent. */
601 osc_extent_merge(env, ext, prev_extent(ext));
602 osc_extent_merge(env, ext, next_extent(ext));
603
604 if (ext->oe_urgent)
605 list_move_tail(&ext->oe_link,
606 &obj->oo_urgent_exts);
607 }
608 osc_object_unlock(obj);
609
610 osc_io_unplug_async(env, osc_cli(obj), obj);
611 }
612 osc_extent_put(env, ext);
613 }
614
615 static inline int overlapped(struct osc_extent *ex1, struct osc_extent *ex2)
616 {
617 return !(ex1->oe_end < ex2->oe_start || ex2->oe_end < ex1->oe_start);
618 }
619
620 /**
621 * Find or create an extent which includes @index, core function to manage
622 * extent tree.
623 */
624 static struct osc_extent *osc_extent_find(const struct lu_env *env,
625 struct osc_object *obj, pgoff_t index,
626 int *grants)
627
628 {
629 struct client_obd *cli = osc_cli(obj);
630 struct cl_lock *lock;
631 struct osc_extent *cur;
632 struct osc_extent *ext;
633 struct osc_extent *conflict = NULL;
634 struct osc_extent *found = NULL;
635 pgoff_t chunk;
636 pgoff_t max_end;
637 int max_pages; /* max_pages_per_rpc */
638 int chunksize;
639 int ppc_bits; /* pages per chunk bits */
640 int chunk_mask;
641 int rc;
642
643 cur = osc_extent_alloc(obj);
644 if (!cur)
645 return ERR_PTR(-ENOMEM);
646
647 lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0);
648 LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
649
650 LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT);
651 ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
652 chunk_mask = ~((1 << ppc_bits) - 1);
653 chunksize = 1 << cli->cl_chunkbits;
654 chunk = index >> ppc_bits;
655
656 /* align end to rpc edge, rpc size may not be a power 2 integer. */
657 max_pages = cli->cl_max_pages_per_rpc;
658 LASSERT((max_pages & ~chunk_mask) == 0);
659 max_end = index - (index % max_pages) + max_pages - 1;
660 max_end = min_t(pgoff_t, max_end, lock->cll_descr.cld_end);
661
662 /* initialize new extent by parameters so far */
663 cur->oe_max_end = max_end;
664 cur->oe_start = index & chunk_mask;
665 cur->oe_end = ((index + ~chunk_mask + 1) & chunk_mask) - 1;
666 if (cur->oe_start < lock->cll_descr.cld_start)
667 cur->oe_start = lock->cll_descr.cld_start;
668 if (cur->oe_end > max_end)
669 cur->oe_end = max_end;
670 cur->oe_osclock = lock;
671 cur->oe_grants = 0;
672 cur->oe_mppr = max_pages;
673
674 /* grants has been allocated by caller */
675 LASSERTF(*grants >= chunksize + cli->cl_extent_tax,
676 "%u/%u/%u.\n", *grants, chunksize, cli->cl_extent_tax);
677 LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR, EXTPARA(cur));
678
679 restart:
680 osc_object_lock(obj);
681 ext = osc_extent_search(obj, cur->oe_start);
682 if (!ext)
683 ext = first_extent(obj);
684 while (ext) {
685 loff_t ext_chk_start = ext->oe_start >> ppc_bits;
686 loff_t ext_chk_end = ext->oe_end >> ppc_bits;
687
688 LASSERT(sanity_check_nolock(ext) == 0);
689 if (chunk > ext_chk_end + 1)
690 break;
691
692 /* if covering by different locks, no chance to match */
693 if (lock != ext->oe_osclock) {
694 EASSERTF(!overlapped(ext, cur), ext,
695 EXTSTR, EXTPARA(cur));
696
697 ext = next_extent(ext);
698 continue;
699 }
700
701 /* discontiguous chunks? */
702 if (chunk + 1 < ext_chk_start) {
703 ext = next_extent(ext);
704 continue;
705 }
706
707 /* ok, from now on, ext and cur have these attrs:
708 * 1. covered by the same lock
709 * 2. contiguous at chunk level or overlapping.
710 */
711
712 if (overlapped(ext, cur)) {
713 /* cur is the minimum unit, so overlapping means
714 * full contain.
715 */
716 EASSERTF((ext->oe_start <= cur->oe_start &&
717 ext->oe_end >= cur->oe_end),
718 ext, EXTSTR, EXTPARA(cur));
719
720 if (ext->oe_state > OES_CACHE || ext->oe_fsync_wait) {
721 /* for simplicity, we wait for this extent to
722 * finish before going forward.
723 */
724 conflict = osc_extent_get(ext);
725 break;
726 }
727
728 found = osc_extent_hold(ext);
729 break;
730 }
731
732 /* non-overlapped extent */
733 if (ext->oe_state != OES_CACHE || ext->oe_fsync_wait) {
734 /* we can't do anything for a non OES_CACHE extent, or
735 * if there is someone waiting for this extent to be
736 * flushed, try next one.
737 */
738 ext = next_extent(ext);
739 continue;
740 }
741
742 /* check if they belong to the same rpc slot before trying to
743 * merge. the extents are not overlapped and contiguous at
744 * chunk level to get here.
745 */
746 if (ext->oe_max_end != max_end) {
747 /* if they don't belong to the same RPC slot or
748 * max_pages_per_rpc has ever changed, do not merge.
749 */
750 ext = next_extent(ext);
751 continue;
752 }
753
754 /* it's required that an extent must be contiguous at chunk
755 * level so that we know the whole extent is covered by grant
756 * (the pages in the extent are NOT required to be contiguous).
757 * Otherwise, it will be too much difficult to know which
758 * chunks have grants allocated.
759 */
760
761 /* try to do front merge - extend ext's start */
762 if (chunk + 1 == ext_chk_start) {
763 /* ext must be chunk size aligned */
764 EASSERT((ext->oe_start & ~chunk_mask) == 0, ext);
765
766 /* pull ext's start back to cover cur */
767 ext->oe_start = cur->oe_start;
768 ext->oe_grants += chunksize;
769 *grants -= chunksize;
770
771 found = osc_extent_hold(ext);
772 } else if (chunk == ext_chk_end + 1) {
773 /* rear merge */
774 ext->oe_end = cur->oe_end;
775 ext->oe_grants += chunksize;
776 *grants -= chunksize;
777
778 /* try to merge with the next one because we just fill
779 * in a gap
780 */
781 if (osc_extent_merge(env, ext, next_extent(ext)) == 0)
782 /* we can save extent tax from next extent */
783 *grants += cli->cl_extent_tax;
784
785 found = osc_extent_hold(ext);
786 }
787 if (found)
788 break;
789
790 ext = next_extent(ext);
791 }
792
793 osc_extent_tree_dump(D_CACHE, obj);
794 if (found) {
795 LASSERT(!conflict);
796 if (!IS_ERR(found)) {
797 LASSERT(found->oe_osclock == cur->oe_osclock);
798 OSC_EXTENT_DUMP(D_CACHE, found,
799 "found caching ext for %lu.\n", index);
800 }
801 } else if (!conflict) {
802 /* create a new extent */
803 EASSERT(osc_extent_is_overlapped(obj, cur) == 0, cur);
804 cur->oe_grants = chunksize + cli->cl_extent_tax;
805 *grants -= cur->oe_grants;
806 LASSERT(*grants >= 0);
807
808 cur->oe_state = OES_CACHE;
809 found = osc_extent_hold(cur);
810 osc_extent_insert(obj, cur);
811 OSC_EXTENT_DUMP(D_CACHE, cur, "add into tree %lu/%lu.\n",
812 index, lock->cll_descr.cld_end);
813 }
814 osc_object_unlock(obj);
815
816 if (conflict) {
817 LASSERT(!found);
818
819 /* waiting for IO to finish. Please notice that it's impossible
820 * to be an OES_TRUNC extent.
821 */
822 rc = osc_extent_wait(env, conflict, OES_INV);
823 osc_extent_put(env, conflict);
824 conflict = NULL;
825 if (rc < 0) {
826 found = ERR_PTR(rc);
827 goto out;
828 }
829
830 goto restart;
831 }
832
833 out:
834 osc_extent_put(env, cur);
835 LASSERT(*grants >= 0);
836 return found;
837 }
838
839 /**
840 * Called when IO is finished to an extent.
841 */
842 int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
843 int sent, int rc)
844 {
845 struct client_obd *cli = osc_cli(ext->oe_obj);
846 struct osc_async_page *oap;
847 struct osc_async_page *tmp;
848 int nr_pages = ext->oe_nr_pages;
849 int lost_grant = 0;
850 int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
851 __u64 last_off = 0;
852 int last_count = -1;
853
854 OSC_EXTENT_DUMP(D_CACHE, ext, "extent finished.\n");
855
856 ext->oe_rc = rc ?: ext->oe_nr_pages;
857 EASSERT(ergo(rc == 0, ext->oe_state == OES_RPC), ext);
858 list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
859 oap_pending_item) {
860 list_del_init(&oap->oap_rpc_item);
861 list_del_init(&oap->oap_pending_item);
862 if (last_off <= oap->oap_obj_off) {
863 last_off = oap->oap_obj_off;
864 last_count = oap->oap_count;
865 }
866
867 --ext->oe_nr_pages;
868 osc_ap_completion(env, cli, oap, sent, rc);
869 }
870 EASSERT(ext->oe_nr_pages == 0, ext);
871
872 if (!sent) {
873 lost_grant = ext->oe_grants;
874 } else if (blocksize < PAGE_CACHE_SIZE &&
875 last_count != PAGE_CACHE_SIZE) {
876 /* For short writes we shouldn't count parts of pages that
877 * span a whole chunk on the OST side, or our accounting goes
878 * wrong. Should match the code in filter_grant_check.
879 */
880 int offset = oap->oap_page_off & ~CFS_PAGE_MASK;
881 int count = oap->oap_count + (offset & (blocksize - 1));
882 int end = (offset + oap->oap_count) & (blocksize - 1);
883
884 if (end)
885 count += blocksize - end;
886
887 lost_grant = PAGE_CACHE_SIZE - count;
888 }
889 if (ext->oe_grants > 0)
890 osc_free_grant(cli, nr_pages, lost_grant);
891
892 osc_extent_remove(ext);
893 /* put the refcount for RPC */
894 osc_extent_put(env, ext);
895 return 0;
896 }
897
898 static int extent_wait_cb(struct osc_extent *ext, int state)
899 {
900 int ret;
901
902 osc_object_lock(ext->oe_obj);
903 ret = ext->oe_state == state;
904 osc_object_unlock(ext->oe_obj);
905
906 return ret;
907 }
908
909 /**
910 * Wait for the extent's state to become @state.
911 */
912 static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
913 int state)
914 {
915 struct osc_object *obj = ext->oe_obj;
916 struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(600), NULL,
917 LWI_ON_SIGNAL_NOOP, NULL);
918 int rc = 0;
919
920 osc_object_lock(obj);
921 LASSERT(sanity_check_nolock(ext) == 0);
922 /* `Kick' this extent only if the caller is waiting for it to be
923 * written out.
924 */
925 if (state == OES_INV && !ext->oe_urgent && !ext->oe_hp &&
926 !ext->oe_trunc_pending) {
927 if (ext->oe_state == OES_ACTIVE) {
928 ext->oe_urgent = 1;
929 } else if (ext->oe_state == OES_CACHE) {
930 ext->oe_urgent = 1;
931 osc_extent_hold(ext);
932 rc = 1;
933 }
934 }
935 osc_object_unlock(obj);
936 if (rc == 1)
937 osc_extent_release(env, ext);
938
939 /* wait for the extent until its state becomes @state */
940 rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state), &lwi);
941 if (rc == -ETIMEDOUT) {
942 OSC_EXTENT_DUMP(D_ERROR, ext,
943 "%s: wait ext to %d timedout, recovery in progress?\n",
944 osc_export(obj)->exp_obd->obd_name, state);
945
946 lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
947 rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state),
948 &lwi);
949 }
950 if (rc == 0 && ext->oe_rc < 0)
951 rc = ext->oe_rc;
952 return rc;
953 }
954
955 /**
956 * Discard pages with index greater than @size. If @ext is overlapped with
957 * @size, then partial truncate happens.
958 */
959 static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
960 bool partial)
961 {
962 struct cl_env_nest nest;
963 struct lu_env *env;
964 struct cl_io *io;
965 struct osc_object *obj = ext->oe_obj;
966 struct client_obd *cli = osc_cli(obj);
967 struct osc_async_page *oap;
968 struct osc_async_page *tmp;
969 int pages_in_chunk = 0;
970 int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
971 __u64 trunc_chunk = trunc_index >> ppc_bits;
972 int grants = 0;
973 int nr_pages = 0;
974 int rc = 0;
975
976 LASSERT(sanity_check(ext) == 0);
977 EASSERT(ext->oe_state == OES_TRUNC, ext);
978 EASSERT(!ext->oe_urgent, ext);
979
980 /* Request new lu_env.
981 * We can't use that env from osc_cache_truncate_start() because
982 * it's from lov_io_sub and not fully initialized.
983 */
984 env = cl_env_nested_get(&nest);
985 io = &osc_env_info(env)->oti_io;
986 io->ci_obj = cl_object_top(osc2cl(obj));
987 rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
988 if (rc < 0)
989 goto out;
990
991 /* discard all pages with index greater then trunc_index */
992 list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
993 oap_pending_item) {
994 struct cl_page *sub = oap2cl_page(oap);
995 struct cl_page *page = cl_page_top(sub);
996
997 LASSERT(list_empty(&oap->oap_rpc_item));
998
999 /* only discard the pages with their index greater than
1000 * trunc_index, and ...
1001 */
1002 if (sub->cp_index < trunc_index ||
1003 (sub->cp_index == trunc_index && partial)) {
1004 /* accounting how many pages remaining in the chunk
1005 * so that we can calculate grants correctly. */
1006 if (sub->cp_index >> ppc_bits == trunc_chunk)
1007 ++pages_in_chunk;
1008 continue;
1009 }
1010
1011 list_del_init(&oap->oap_pending_item);
1012
1013 cl_page_get(page);
1014 lu_ref_add(&page->cp_reference, "truncate", current);
1015
1016 if (cl_page_own(env, io, page) == 0) {
1017 cl_page_unmap(env, io, page);
1018 cl_page_discard(env, io, page);
1019 cl_page_disown(env, io, page);
1020 } else {
1021 LASSERT(page->cp_state == CPS_FREEING);
1022 LASSERT(0);
1023 }
1024
1025 lu_ref_del(&page->cp_reference, "truncate", current);
1026 cl_page_put(env, page);
1027
1028 --ext->oe_nr_pages;
1029 ++nr_pages;
1030 }
1031 EASSERTF(ergo(ext->oe_start >= trunc_index + !!partial,
1032 ext->oe_nr_pages == 0),
1033 ext, "trunc_index %lu, partial %d\n", trunc_index, partial);
1034
1035 osc_object_lock(obj);
1036 if (ext->oe_nr_pages == 0) {
1037 LASSERT(pages_in_chunk == 0);
1038 grants = ext->oe_grants;
1039 ext->oe_grants = 0;
1040 } else { /* calculate how many grants we can free */
1041 int chunks = (ext->oe_end >> ppc_bits) - trunc_chunk;
1042 pgoff_t last_index;
1043
1044 /* if there is no pages in this chunk, we can also free grants
1045 * for the last chunk
1046 */
1047 if (pages_in_chunk == 0) {
1048 /* if this is the 1st chunk and no pages in this chunk,
1049 * ext->oe_nr_pages must be zero, so we should be in
1050 * the other if-clause.
1051 */
1052 LASSERT(trunc_chunk > 0);
1053 --trunc_chunk;
1054 ++chunks;
1055 }
1056
1057 /* this is what we can free from this extent */
1058 grants = chunks << cli->cl_chunkbits;
1059 ext->oe_grants -= grants;
1060 last_index = ((trunc_chunk + 1) << ppc_bits) - 1;
1061 ext->oe_end = min(last_index, ext->oe_max_end);
1062 LASSERT(ext->oe_end >= ext->oe_start);
1063 LASSERT(ext->oe_grants > 0);
1064 }
1065 osc_object_unlock(obj);
1066
1067 if (grants > 0 || nr_pages > 0)
1068 osc_free_grant(cli, nr_pages, grants);
1069
1070 out:
1071 cl_io_fini(env, io);
1072 cl_env_nested_put(&nest, env);
1073 return rc;
1074 }
1075
1076 /**
1077 * This function is used to make the extent prepared for transfer.
1078 * A race with flushing page - ll_writepage() has to be handled cautiously.
1079 */
1080 static int osc_extent_make_ready(const struct lu_env *env,
1081 struct osc_extent *ext)
1082 {
1083 struct osc_async_page *oap;
1084 struct osc_async_page *last = NULL;
1085 struct osc_object *obj = ext->oe_obj;
1086 int page_count = 0;
1087 int rc;
1088
1089 /* we're going to grab page lock, so object lock must not be taken. */
1090 LASSERT(sanity_check(ext) == 0);
1091 /* in locking state, any process should not touch this extent. */
1092 EASSERT(ext->oe_state == OES_LOCKING, ext);
1093 EASSERT(ext->oe_owner, ext);
1094
1095 OSC_EXTENT_DUMP(D_CACHE, ext, "make ready\n");
1096
1097 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
1098 ++page_count;
1099 if (!last || last->oap_obj_off < oap->oap_obj_off)
1100 last = oap;
1101
1102 /* checking ASYNC_READY is race safe */
1103 if ((oap->oap_async_flags & ASYNC_READY) != 0)
1104 continue;
1105
1106 rc = osc_make_ready(env, oap, OBD_BRW_WRITE);
1107 switch (rc) {
1108 case 0:
1109 spin_lock(&oap->oap_lock);
1110 oap->oap_async_flags |= ASYNC_READY;
1111 spin_unlock(&oap->oap_lock);
1112 break;
1113 case -EALREADY:
1114 LASSERT((oap->oap_async_flags & ASYNC_READY) != 0);
1115 break;
1116 default:
1117 LASSERTF(0, "unknown return code: %d\n", rc);
1118 }
1119 }
1120
1121 LASSERT(page_count == ext->oe_nr_pages);
1122 LASSERT(last);
1123 /* the last page is the only one we need to refresh its count by
1124 * the size of file.
1125 */
1126 if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
1127 last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
1128 LASSERT(last->oap_count > 0);
1129 LASSERT(last->oap_page_off + last->oap_count <= PAGE_CACHE_SIZE);
1130 last->oap_async_flags |= ASYNC_COUNT_STABLE;
1131 }
1132
1133 /* for the rest of pages, we don't need to call osf_refresh_count()
1134 * because it's known they are not the last page
1135 */
1136 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
1137 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
1138 oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off;
1139 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
1140 }
1141 }
1142
1143 osc_object_lock(obj);
1144 osc_extent_state_set(ext, OES_RPC);
1145 osc_object_unlock(obj);
1146 /* get a refcount for RPC. */
1147 osc_extent_get(ext);
1148
1149 return 0;
1150 }
1151
1152 /**
1153 * Quick and simple version of osc_extent_find(). This function is frequently
1154 * called to expand the extent for the same IO. To expand the extent, the
1155 * page index must be in the same or next chunk of ext->oe_end.
1156 */
1157 static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
1158 {
1159 struct osc_object *obj = ext->oe_obj;
1160 struct client_obd *cli = osc_cli(obj);
1161 struct osc_extent *next;
1162 int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
1163 pgoff_t chunk = index >> ppc_bits;
1164 pgoff_t end_chunk;
1165 pgoff_t end_index;
1166 int chunksize = 1 << cli->cl_chunkbits;
1167 int rc = 0;
1168
1169 LASSERT(ext->oe_max_end >= index && ext->oe_start <= index);
1170 osc_object_lock(obj);
1171 LASSERT(sanity_check_nolock(ext) == 0);
1172 end_chunk = ext->oe_end >> ppc_bits;
1173 if (chunk > end_chunk + 1) {
1174 rc = -ERANGE;
1175 goto out;
1176 }
1177
1178 if (end_chunk >= chunk) {
1179 rc = 0;
1180 goto out;
1181 }
1182
1183 LASSERT(end_chunk + 1 == chunk);
1184 /* try to expand this extent to cover @index */
1185 end_index = min(ext->oe_max_end, ((chunk + 1) << ppc_bits) - 1);
1186
1187 next = next_extent(ext);
1188 if (next && next->oe_start <= end_index) {
1189 /* complex mode - overlapped with the next extent,
1190 * this case will be handled by osc_extent_find()
1191 */
1192 rc = -EAGAIN;
1193 goto out;
1194 }
1195
1196 ext->oe_end = end_index;
1197 ext->oe_grants += chunksize;
1198 *grants -= chunksize;
1199 LASSERT(*grants >= 0);
1200 EASSERTF(osc_extent_is_overlapped(obj, ext) == 0, ext,
1201 "overlapped after expanding for %lu.\n", index);
1202
1203 out:
1204 osc_object_unlock(obj);
1205 return rc;
1206 }
1207
1208 static void osc_extent_tree_dump0(int level, struct osc_object *obj,
1209 const char *func, int line)
1210 {
1211 struct osc_extent *ext;
1212 int cnt;
1213
1214 CDEBUG(level, "Dump object %p extents at %s:%d, mppr: %u.\n",
1215 obj, func, line, osc_cli(obj)->cl_max_pages_per_rpc);
1216
1217 /* osc_object_lock(obj); */
1218 cnt = 1;
1219 for (ext = first_extent(obj); ext; ext = next_extent(ext))
1220 OSC_EXTENT_DUMP(level, ext, "in tree %d.\n", cnt++);
1221
1222 cnt = 1;
1223 list_for_each_entry(ext, &obj->oo_hp_exts, oe_link)
1224 OSC_EXTENT_DUMP(level, ext, "hp %d.\n", cnt++);
1225
1226 cnt = 1;
1227 list_for_each_entry(ext, &obj->oo_urgent_exts, oe_link)
1228 OSC_EXTENT_DUMP(level, ext, "urgent %d.\n", cnt++);
1229
1230 cnt = 1;
1231 list_for_each_entry(ext, &obj->oo_reading_exts, oe_link)
1232 OSC_EXTENT_DUMP(level, ext, "reading %d.\n", cnt++);
1233 /* osc_object_unlock(obj); */
1234 }
1235
1236 /* ------------------ osc extent end ------------------ */
1237
1238 static inline int osc_is_ready(struct osc_object *osc)
1239 {
1240 return !list_empty(&osc->oo_ready_item) ||
1241 !list_empty(&osc->oo_hp_ready_item);
1242 }
1243
1244 #define OSC_IO_DEBUG(OSC, STR, args...) \
1245 CDEBUG(D_CACHE, "obj %p ready %d|%c|%c wr %d|%c|%c rd %d|%c " STR, \
1246 (OSC), osc_is_ready(OSC), \
1247 list_empty_marker(&(OSC)->oo_hp_ready_item), \
1248 list_empty_marker(&(OSC)->oo_ready_item), \
1249 atomic_read(&(OSC)->oo_nr_writes), \
1250 list_empty_marker(&(OSC)->oo_hp_exts), \
1251 list_empty_marker(&(OSC)->oo_urgent_exts), \
1252 atomic_read(&(OSC)->oo_nr_reads), \
1253 list_empty_marker(&(OSC)->oo_reading_exts), \
1254 ##args)
1255
1256 static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
1257 int cmd)
1258 {
1259 struct osc_page *opg = oap2osc_page(oap);
1260 struct cl_page *page = cl_page_top(oap2cl_page(oap));
1261 int result;
1262
1263 LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
1264
1265 result = cl_page_make_ready(env, page, CRT_WRITE);
1266 if (result == 0)
1267 opg->ops_submit_time = cfs_time_current();
1268 return result;
1269 }
1270
1271 static int osc_refresh_count(const struct lu_env *env,
1272 struct osc_async_page *oap, int cmd)
1273 {
1274 struct osc_page *opg = oap2osc_page(oap);
1275 struct cl_page *page = oap2cl_page(oap);
1276 struct cl_object *obj;
1277 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
1278
1279 int result;
1280 loff_t kms;
1281
1282 /* readpage queues with _COUNT_STABLE, shouldn't get here. */
1283 LASSERT(!(cmd & OBD_BRW_READ));
1284 obj = opg->ops_cl.cpl_obj;
1285
1286 cl_object_attr_lock(obj);
1287 result = cl_object_attr_get(env, obj, attr);
1288 cl_object_attr_unlock(obj);
1289 if (result < 0)
1290 return result;
1291 kms = attr->cat_kms;
1292 if (cl_offset(obj, page->cp_index) >= kms)
1293 /* catch race with truncate */
1294 return 0;
1295 else if (cl_offset(obj, page->cp_index + 1) > kms)
1296 /* catch sub-page write at end of file */
1297 return kms % PAGE_CACHE_SIZE;
1298 else
1299 return PAGE_CACHE_SIZE;
1300 }
1301
1302 static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
1303 int cmd, int rc)
1304 {
1305 struct osc_page *opg = oap2osc_page(oap);
1306 struct cl_page *page = cl_page_top(oap2cl_page(oap));
1307 struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
1308 enum cl_req_type crt;
1309 int srvlock;
1310
1311 cmd &= ~OBD_BRW_NOQUOTA;
1312 LASSERT(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ));
1313 LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
1314 LASSERT(opg->ops_transfer_pinned);
1315
1316 /*
1317 * page->cp_req can be NULL if io submission failed before
1318 * cl_req was allocated.
1319 */
1320 if (page->cp_req)
1321 cl_req_page_done(env, page);
1322 LASSERT(!page->cp_req);
1323
1324 crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
1325 /* Clear opg->ops_transfer_pinned before VM lock is released. */
1326 opg->ops_transfer_pinned = 0;
1327
1328 spin_lock(&obj->oo_seatbelt);
1329 LASSERT(opg->ops_submitter);
1330 LASSERT(!list_empty(&opg->ops_inflight));
1331 list_del_init(&opg->ops_inflight);
1332 opg->ops_submitter = NULL;
1333 spin_unlock(&obj->oo_seatbelt);
1334
1335 opg->ops_submit_time = 0;
1336 srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK;
1337
1338 /* statistic */
1339 if (rc == 0 && srvlock) {
1340 struct lu_device *ld = opg->ops_cl.cpl_obj->co_lu.lo_dev;
1341 struct osc_stats *stats = &lu2osc_dev(ld)->od_stats;
1342 int bytes = oap->oap_count;
1343
1344 if (crt == CRT_READ)
1345 stats->os_lockless_reads += bytes;
1346 else
1347 stats->os_lockless_writes += bytes;
1348 }
1349
1350 /*
1351 * This has to be the last operation with the page, as locks are
1352 * released in cl_page_completion() and nothing except for the
1353 * reference counter protects page from concurrent reclaim.
1354 */
1355 lu_ref_del(&page->cp_reference, "transfer", page);
1356
1357 cl_page_completion(env, page, crt, rc);
1358
1359 return 0;
1360 }
1361
1362 #define OSC_DUMP_GRANT(cli, fmt, args...) do { \
1363 struct client_obd *__tmp = (cli); \
1364 CDEBUG(D_CACHE, "%s: { dirty: %ld/%ld dirty_pages: %d/%d " \
1365 "dropped: %ld avail: %ld, reserved: %ld, flight: %d } " fmt, \
1366 __tmp->cl_import->imp_obd->obd_name, \
1367 __tmp->cl_dirty, __tmp->cl_dirty_max, \
1368 atomic_read(&obd_dirty_pages), obd_max_dirty_pages, \
1369 __tmp->cl_lost_grant, __tmp->cl_avail_grant, \
1370 __tmp->cl_reserved_grant, __tmp->cl_w_in_flight, ##args); \
1371 } while (0)
1372
1373 /* caller must hold loi_list_lock */
1374 static void osc_consume_write_grant(struct client_obd *cli,
1375 struct brw_page *pga)
1376 {
1377 assert_spin_locked(&cli->cl_loi_list_lock.lock);
1378 LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
1379 atomic_inc(&obd_dirty_pages);
1380 cli->cl_dirty += PAGE_CACHE_SIZE;
1381 pga->flag |= OBD_BRW_FROM_GRANT;
1382 CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
1383 PAGE_CACHE_SIZE, pga, pga->pg);
1384 osc_update_next_shrink(cli);
1385 }
1386
1387 /* the companion to osc_consume_write_grant, called when a brw has completed.
1388 * must be called with the loi lock held.
1389 */
1390 static void osc_release_write_grant(struct client_obd *cli,
1391 struct brw_page *pga)
1392 {
1393 assert_spin_locked(&cli->cl_loi_list_lock.lock);
1394 if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
1395 return;
1396 }
1397
1398 pga->flag &= ~OBD_BRW_FROM_GRANT;
1399 atomic_dec(&obd_dirty_pages);
1400 cli->cl_dirty -= PAGE_CACHE_SIZE;
1401 if (pga->flag & OBD_BRW_NOCACHE) {
1402 pga->flag &= ~OBD_BRW_NOCACHE;
1403 atomic_dec(&obd_dirty_transit_pages);
1404 cli->cl_dirty_transit -= PAGE_CACHE_SIZE;
1405 }
1406 }
1407
1408 /**
1409 * To avoid sleeping with object lock held, it's good for us allocate enough
1410 * grants before entering into critical section.
1411 *
1412 * client_obd_list_lock held by caller
1413 */
1414 static int osc_reserve_grant(struct client_obd *cli, unsigned int bytes)
1415 {
1416 int rc = -EDQUOT;
1417
1418 if (cli->cl_avail_grant >= bytes) {
1419 cli->cl_avail_grant -= bytes;
1420 cli->cl_reserved_grant += bytes;
1421 rc = 0;
1422 }
1423 return rc;
1424 }
1425
1426 static void __osc_unreserve_grant(struct client_obd *cli,
1427 unsigned int reserved, unsigned int unused)
1428 {
1429 /* it's quite normal for us to get more grant than reserved.
1430 * Thinking about a case that two extents merged by adding a new
1431 * chunk, we can save one extent tax. If extent tax is greater than
1432 * one chunk, we can save more grant by adding a new chunk
1433 */
1434 cli->cl_reserved_grant -= reserved;
1435 if (unused > reserved) {
1436 cli->cl_avail_grant += reserved;
1437 cli->cl_lost_grant += unused - reserved;
1438 } else {
1439 cli->cl_avail_grant += unused;
1440 }
1441 }
1442
1443 static void osc_unreserve_grant(struct client_obd *cli,
1444 unsigned int reserved, unsigned int unused)
1445 {
1446 client_obd_list_lock(&cli->cl_loi_list_lock);
1447 __osc_unreserve_grant(cli, reserved, unused);
1448 if (unused > 0)
1449 osc_wake_cache_waiters(cli);
1450 client_obd_list_unlock(&cli->cl_loi_list_lock);
1451 }
1452
1453 /**
1454 * Free grant after IO is finished or canceled.
1455 *
1456 * @lost_grant is used to remember how many grants we have allocated but not
1457 * used, we should return these grants to OST. There're two cases where grants
1458 * can be lost:
1459 * 1. truncate;
1460 * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was
1461 * written. In this case OST may use less chunks to serve this partial
1462 * write. OSTs don't actually know the page size on the client side. so
1463 * clients have to calculate lost grant by the blocksize on the OST.
1464 * See filter_grant_check() for details.
1465 */
1466 static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
1467 unsigned int lost_grant)
1468 {
1469 int grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
1470
1471 client_obd_list_lock(&cli->cl_loi_list_lock);
1472 atomic_sub(nr_pages, &obd_dirty_pages);
1473 cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT;
1474 cli->cl_lost_grant += lost_grant;
1475 if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
1476 /* borrow some grant from truncate to avoid the case that
1477 * truncate uses up all avail grant
1478 */
1479 cli->cl_lost_grant -= grant;
1480 cli->cl_avail_grant += grant;
1481 }
1482 osc_wake_cache_waiters(cli);
1483 client_obd_list_unlock(&cli->cl_loi_list_lock);
1484 CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu\n",
1485 lost_grant, cli->cl_lost_grant,
1486 cli->cl_avail_grant, cli->cl_dirty);
1487 }
1488
1489 /**
1490 * The companion to osc_enter_cache(), called when @oap is no longer part of
1491 * the dirty accounting due to error.
1492 */
1493 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap)
1494 {
1495 client_obd_list_lock(&cli->cl_loi_list_lock);
1496 osc_release_write_grant(cli, &oap->oap_brw_page);
1497 client_obd_list_unlock(&cli->cl_loi_list_lock);
1498 }
1499
1500 /**
1501 * Non-blocking version of osc_enter_cache() that consumes grant only when it
1502 * is available.
1503 */
1504 static int osc_enter_cache_try(struct client_obd *cli,
1505 struct osc_async_page *oap,
1506 int bytes, int transient)
1507 {
1508 int rc;
1509
1510 OSC_DUMP_GRANT(cli, "need:%d.\n", bytes);
1511
1512 rc = osc_reserve_grant(cli, bytes);
1513 if (rc < 0)
1514 return 0;
1515
1516 if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max &&
1517 atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
1518 osc_consume_write_grant(cli, &oap->oap_brw_page);
1519 if (transient) {
1520 cli->cl_dirty_transit += PAGE_CACHE_SIZE;
1521 atomic_inc(&obd_dirty_transit_pages);
1522 oap->oap_brw_flags |= OBD_BRW_NOCACHE;
1523 }
1524 rc = 1;
1525 } else {
1526 __osc_unreserve_grant(cli, bytes, bytes);
1527 rc = 0;
1528 }
1529 return rc;
1530 }
1531
1532 static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
1533 {
1534 int rc;
1535
1536 client_obd_list_lock(&cli->cl_loi_list_lock);
1537 rc = list_empty(&ocw->ocw_entry);
1538 client_obd_list_unlock(&cli->cl_loi_list_lock);
1539 return rc;
1540 }
1541
1542 /**
1543 * The main entry to reserve dirty page accounting. Usually the grant reserved
1544 * in this function will be freed in bulk in osc_free_grant() unless it fails
1545 * to add osc cache, in that case, it will be freed in osc_exit_cache().
1546 *
1547 * The process will be put into sleep if it's already run out of grant.
1548 */
1549 static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
1550 struct osc_async_page *oap, int bytes)
1551 {
1552 struct osc_object *osc = oap->oap_obj;
1553 struct lov_oinfo *loi = osc->oo_oinfo;
1554 struct osc_cache_waiter ocw;
1555 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
1556 int rc = -EDQUOT;
1557
1558 OSC_DUMP_GRANT(cli, "need:%d.\n", bytes);
1559
1560 client_obd_list_lock(&cli->cl_loi_list_lock);
1561
1562 /* force the caller to try sync io. this can jump the list
1563 * of queued writes and create a discontiguous rpc stream
1564 */
1565 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
1566 cli->cl_dirty_max < PAGE_CACHE_SIZE ||
1567 cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
1568 rc = -EDQUOT;
1569 goto out;
1570 }
1571
1572 /* Hopefully normal case - cache space and write credits available */
1573 if (osc_enter_cache_try(cli, oap, bytes, 0)) {
1574 rc = 0;
1575 goto out;
1576 }
1577
1578 /* We can get here for two reasons: too many dirty pages in cache, or
1579 * run out of grants. In both cases we should write dirty pages out.
1580 * Adding a cache waiter will trigger urgent write-out no matter what
1581 * RPC size will be.
1582 * The exiting condition is no avail grants and no dirty pages caching,
1583 * that really means there is no space on the OST.
1584 */
1585 init_waitqueue_head(&ocw.ocw_waitq);
1586 ocw.ocw_oap = oap;
1587 ocw.ocw_grant = bytes;
1588 while (cli->cl_dirty > 0 || cli->cl_w_in_flight > 0) {
1589 list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
1590 ocw.ocw_rc = 0;
1591 client_obd_list_unlock(&cli->cl_loi_list_lock);
1592
1593 osc_io_unplug_async(env, cli, NULL);
1594
1595 CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n",
1596 cli->cl_import->imp_obd->obd_name, &ocw, oap);
1597
1598 rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
1599
1600 client_obd_list_lock(&cli->cl_loi_list_lock);
1601
1602 /* l_wait_event is interrupted by signal */
1603 if (rc < 0) {
1604 list_del_init(&ocw.ocw_entry);
1605 goto out;
1606 }
1607
1608 LASSERT(list_empty(&ocw.ocw_entry));
1609 rc = ocw.ocw_rc;
1610
1611 if (rc != -EDQUOT)
1612 goto out;
1613 if (osc_enter_cache_try(cli, oap, bytes, 0)) {
1614 rc = 0;
1615 goto out;
1616 }
1617 }
1618 out:
1619 client_obd_list_unlock(&cli->cl_loi_list_lock);
1620 OSC_DUMP_GRANT(cli, "returned %d.\n", rc);
1621 return rc;
1622 }
1623
1624 /* caller must hold loi_list_lock */
1625 void osc_wake_cache_waiters(struct client_obd *cli)
1626 {
1627 struct list_head *l, *tmp;
1628 struct osc_cache_waiter *ocw;
1629
1630 list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
1631 ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
1632 list_del_init(&ocw->ocw_entry);
1633
1634 ocw->ocw_rc = -EDQUOT;
1635 /* we can't dirty more */
1636 if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) ||
1637 (atomic_read(&obd_dirty_pages) + 1 >
1638 obd_max_dirty_pages)) {
1639 CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
1640 cli->cl_dirty,
1641 cli->cl_dirty_max, obd_max_dirty_pages);
1642 goto wakeup;
1643 }
1644
1645 ocw->ocw_rc = 0;
1646 if (!osc_enter_cache_try(cli, ocw->ocw_oap, ocw->ocw_grant, 0))
1647 ocw->ocw_rc = -EDQUOT;
1648
1649 wakeup:
1650 CDEBUG(D_CACHE, "wake up %p for oap %p, avail grant %ld, %d\n",
1651 ocw, ocw->ocw_oap, cli->cl_avail_grant, ocw->ocw_rc);
1652
1653 wake_up(&ocw->ocw_waitq);
1654 }
1655 }
1656
1657 static int osc_max_rpc_in_flight(struct client_obd *cli, struct osc_object *osc)
1658 {
1659 int hprpc = !!list_empty(&osc->oo_hp_exts);
1660
1661 return rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight + hprpc;
1662 }
1663
1664 /* This maintains the lists of pending pages to read/write for a given object
1665 * (lop). This is used by osc_check_rpcs->osc_next_obj() and osc_list_maint()
1666 * to quickly find objects that are ready to send an RPC.
1667 */
1668 static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc,
1669 int cmd)
1670 {
1671 int invalid_import = 0;
1672
1673 /* if we have an invalid import we want to drain the queued pages
1674 * by forcing them through rpcs that immediately fail and complete
1675 * the pages. recovery relies on this to empty the queued pages
1676 * before canceling the locks and evicting down the llite pages
1677 */
1678 if (!cli->cl_import || cli->cl_import->imp_invalid)
1679 invalid_import = 1;
1680
1681 if (cmd & OBD_BRW_WRITE) {
1682 if (atomic_read(&osc->oo_nr_writes) == 0)
1683 return 0;
1684 if (invalid_import) {
1685 CDEBUG(D_CACHE, "invalid import forcing RPC\n");
1686 return 1;
1687 }
1688 if (!list_empty(&osc->oo_hp_exts)) {
1689 CDEBUG(D_CACHE, "high prio request forcing RPC\n");
1690 return 1;
1691 }
1692 if (!list_empty(&osc->oo_urgent_exts)) {
1693 CDEBUG(D_CACHE, "urgent request forcing RPC\n");
1694 return 1;
1695 }
1696 /* trigger a write rpc stream as long as there are dirtiers
1697 * waiting for space. as they're waiting, they're not going to
1698 * create more pages to coalesce with what's waiting..
1699 */
1700 if (!list_empty(&cli->cl_cache_waiters)) {
1701 CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
1702 return 1;
1703 }
1704 if (atomic_read(&osc->oo_nr_writes) >=
1705 cli->cl_max_pages_per_rpc)
1706 return 1;
1707 } else {
1708 if (atomic_read(&osc->oo_nr_reads) == 0)
1709 return 0;
1710 if (invalid_import) {
1711 CDEBUG(D_CACHE, "invalid import forcing RPC\n");
1712 return 1;
1713 }
1714 /* all read are urgent. */
1715 if (!list_empty(&osc->oo_reading_exts))
1716 return 1;
1717 }
1718
1719 return 0;
1720 }
1721
1722 static void osc_update_pending(struct osc_object *obj, int cmd, int delta)
1723 {
1724 struct client_obd *cli = osc_cli(obj);
1725
1726 if (cmd & OBD_BRW_WRITE) {
1727 atomic_add(delta, &obj->oo_nr_writes);
1728 atomic_add(delta, &cli->cl_pending_w_pages);
1729 LASSERT(atomic_read(&obj->oo_nr_writes) >= 0);
1730 } else {
1731 atomic_add(delta, &obj->oo_nr_reads);
1732 atomic_add(delta, &cli->cl_pending_r_pages);
1733 LASSERT(atomic_read(&obj->oo_nr_reads) >= 0);
1734 }
1735 OSC_IO_DEBUG(obj, "update pending cmd %d delta %d.\n", cmd, delta);
1736 }
1737
1738 static int osc_makes_hprpc(struct osc_object *obj)
1739 {
1740 return !list_empty(&obj->oo_hp_exts);
1741 }
1742
1743 static void on_list(struct list_head *item, struct list_head *list, int should_be_on)
1744 {
1745 if (list_empty(item) && should_be_on)
1746 list_add_tail(item, list);
1747 else if (!list_empty(item) && !should_be_on)
1748 list_del_init(item);
1749 }
1750
1751 /* maintain the osc's cli list membership invariants so that osc_send_oap_rpc
1752 * can find pages to build into rpcs quickly
1753 */
1754 static int __osc_list_maint(struct client_obd *cli, struct osc_object *osc)
1755 {
1756 if (osc_makes_hprpc(osc)) {
1757 /* HP rpc */
1758 on_list(&osc->oo_ready_item, &cli->cl_loi_ready_list, 0);
1759 on_list(&osc->oo_hp_ready_item, &cli->cl_loi_hp_ready_list, 1);
1760 } else {
1761 on_list(&osc->oo_hp_ready_item, &cli->cl_loi_hp_ready_list, 0);
1762 on_list(&osc->oo_ready_item, &cli->cl_loi_ready_list,
1763 osc_makes_rpc(cli, osc, OBD_BRW_WRITE) ||
1764 osc_makes_rpc(cli, osc, OBD_BRW_READ));
1765 }
1766
1767 on_list(&osc->oo_write_item, &cli->cl_loi_write_list,
1768 atomic_read(&osc->oo_nr_writes) > 0);
1769
1770 on_list(&osc->oo_read_item, &cli->cl_loi_read_list,
1771 atomic_read(&osc->oo_nr_reads) > 0);
1772
1773 return osc_is_ready(osc);
1774 }
1775
1776 static int osc_list_maint(struct client_obd *cli, struct osc_object *osc)
1777 {
1778 int is_ready;
1779
1780 client_obd_list_lock(&cli->cl_loi_list_lock);
1781 is_ready = __osc_list_maint(cli, osc);
1782 client_obd_list_unlock(&cli->cl_loi_list_lock);
1783
1784 return is_ready;
1785 }
1786
1787 /* this is trying to propagate async writeback errors back up to the
1788 * application. As an async write fails we record the error code for later if
1789 * the app does an fsync. As long as errors persist we force future rpcs to be
1790 * sync so that the app can get a sync error and break the cycle of queueing
1791 * pages for which writeback will fail.
1792 */
1793 static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
1794 int rc)
1795 {
1796 if (rc) {
1797 if (!ar->ar_rc)
1798 ar->ar_rc = rc;
1799
1800 ar->ar_force_sync = 1;
1801 ar->ar_min_xid = ptlrpc_sample_next_xid();
1802 return;
1803
1804 }
1805
1806 if (ar->ar_force_sync && (xid >= ar->ar_min_xid))
1807 ar->ar_force_sync = 0;
1808 }
1809
1810 /* this must be called holding the loi list lock to give coverage to exit_cache,
1811 * async_flag maintenance, and oap_request
1812 */
1813 static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
1814 struct osc_async_page *oap, int sent, int rc)
1815 {
1816 struct osc_object *osc = oap->oap_obj;
1817 struct lov_oinfo *loi = osc->oo_oinfo;
1818 __u64 xid = 0;
1819
1820 if (oap->oap_request) {
1821 xid = ptlrpc_req_xid(oap->oap_request);
1822 ptlrpc_req_finished(oap->oap_request);
1823 oap->oap_request = NULL;
1824 }
1825
1826 /* As the transfer for this page is being done, clear the flags */
1827 spin_lock(&oap->oap_lock);
1828 oap->oap_async_flags = 0;
1829 spin_unlock(&oap->oap_lock);
1830 oap->oap_interrupted = 0;
1831
1832 if (oap->oap_cmd & OBD_BRW_WRITE && xid > 0) {
1833 client_obd_list_lock(&cli->cl_loi_list_lock);
1834 osc_process_ar(&cli->cl_ar, xid, rc);
1835 osc_process_ar(&loi->loi_ar, xid, rc);
1836 client_obd_list_unlock(&cli->cl_loi_list_lock);
1837 }
1838
1839 rc = osc_completion(env, oap, oap->oap_cmd, rc);
1840 if (rc)
1841 CERROR("completion on oap %p obj %p returns %d.\n",
1842 oap, osc, rc);
1843 }
1844
1845 /**
1846 * Try to add extent to one RPC. We need to think about the following things:
1847 * - # of pages must not be over max_pages_per_rpc
1848 * - extent must be compatible with previous ones
1849 */
1850 static int try_to_add_extent_for_io(struct client_obd *cli,
1851 struct osc_extent *ext, struct list_head *rpclist,
1852 int *pc, unsigned int *max_pages)
1853 {
1854 struct osc_extent *tmp;
1855 struct osc_async_page *oap = list_first_entry(&ext->oe_pages,
1856 struct osc_async_page,
1857 oap_pending_item);
1858
1859 EASSERT((ext->oe_state == OES_CACHE || ext->oe_state == OES_LOCK_DONE),
1860 ext);
1861
1862 *max_pages = max(ext->oe_mppr, *max_pages);
1863 if (*pc + ext->oe_nr_pages > *max_pages)
1864 return 0;
1865
1866 list_for_each_entry(tmp, rpclist, oe_link) {
1867 struct osc_async_page *oap2;
1868
1869 oap2 = list_first_entry(&tmp->oe_pages, struct osc_async_page,
1870 oap_pending_item);
1871 EASSERT(tmp->oe_owner == current, tmp);
1872 if (oap2cl_page(oap)->cp_type != oap2cl_page(oap2)->cp_type) {
1873 CDEBUG(D_CACHE, "Do not permit different type of IO"
1874 " for a same RPC\n");
1875 return 0;
1876 }
1877
1878 if (tmp->oe_srvlock != ext->oe_srvlock ||
1879 !tmp->oe_grants != !ext->oe_grants)
1880 return 0;
1881
1882 /* remove break for strict check */
1883 break;
1884 }
1885
1886 *pc += ext->oe_nr_pages;
1887 list_move_tail(&ext->oe_link, rpclist);
1888 ext->oe_owner = current;
1889 return 1;
1890 }
1891
1892 /**
1893 * In order to prevent multiple ptlrpcd from breaking contiguous extents,
1894 * get_write_extent() takes all appropriate extents in atomic.
1895 *
1896 * The following policy is used to collect extents for IO:
1897 * 1. Add as many HP extents as possible;
1898 * 2. Add the first urgent extent in urgent extent list and take it out of
1899 * urgent list;
1900 * 3. Add subsequent extents of this urgent extent;
1901 * 4. If urgent list is not empty, goto 2;
1902 * 5. Traverse the extent tree from the 1st extent;
1903 * 6. Above steps exit if there is no space in this RPC.
1904 */
1905 static int get_write_extents(struct osc_object *obj, struct list_head *rpclist)
1906 {
1907 struct client_obd *cli = osc_cli(obj);
1908 struct osc_extent *ext;
1909 int page_count = 0;
1910 unsigned int max_pages = cli->cl_max_pages_per_rpc;
1911
1912 LASSERT(osc_object_is_locked(obj));
1913 while (!list_empty(&obj->oo_hp_exts)) {
1914 ext = list_entry(obj->oo_hp_exts.next, struct osc_extent,
1915 oe_link);
1916 LASSERT(ext->oe_state == OES_CACHE);
1917 if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
1918 &max_pages))
1919 return page_count;
1920 EASSERT(ext->oe_nr_pages <= max_pages, ext);
1921 }
1922 if (page_count == max_pages)
1923 return page_count;
1924
1925 while (!list_empty(&obj->oo_urgent_exts)) {
1926 ext = list_entry(obj->oo_urgent_exts.next,
1927 struct osc_extent, oe_link);
1928 if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
1929 &max_pages))
1930 return page_count;
1931
1932 if (!ext->oe_intree)
1933 continue;
1934
1935 while ((ext = next_extent(ext)) != NULL) {
1936 if ((ext->oe_state != OES_CACHE) ||
1937 (!list_empty(&ext->oe_link) &&
1938 ext->oe_owner))
1939 continue;
1940
1941 if (!try_to_add_extent_for_io(cli, ext, rpclist,
1942 &page_count, &max_pages))
1943 return page_count;
1944 }
1945 }
1946 if (page_count == max_pages)
1947 return page_count;
1948
1949 ext = first_extent(obj);
1950 while (ext) {
1951 if ((ext->oe_state != OES_CACHE) ||
1952 /* this extent may be already in current rpclist */
1953 (!list_empty(&ext->oe_link) && ext->oe_owner)) {
1954 ext = next_extent(ext);
1955 continue;
1956 }
1957
1958 if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
1959 &max_pages))
1960 return page_count;
1961
1962 ext = next_extent(ext);
1963 }
1964 return page_count;
1965 }
1966
1967 static int
1968 osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli,
1969 struct osc_object *osc)
1970 __must_hold(osc)
1971 {
1972 LIST_HEAD(rpclist);
1973 struct osc_extent *ext;
1974 struct osc_extent *tmp;
1975 struct osc_extent *first = NULL;
1976 u32 page_count = 0;
1977 int srvlock = 0;
1978 int rc = 0;
1979
1980 LASSERT(osc_object_is_locked(osc));
1981
1982 page_count = get_write_extents(osc, &rpclist);
1983 LASSERT(equi(page_count == 0, list_empty(&rpclist)));
1984
1985 if (list_empty(&rpclist))
1986 return 0;
1987
1988 osc_update_pending(osc, OBD_BRW_WRITE, -page_count);
1989
1990 list_for_each_entry(ext, &rpclist, oe_link) {
1991 LASSERT(ext->oe_state == OES_CACHE ||
1992 ext->oe_state == OES_LOCK_DONE);
1993 if (ext->oe_state == OES_CACHE)
1994 osc_extent_state_set(ext, OES_LOCKING);
1995 else
1996 osc_extent_state_set(ext, OES_RPC);
1997 }
1998
1999 /* we're going to grab page lock, so release object lock because
2000 * lock order is page lock -> object lock.
2001 */
2002 osc_object_unlock(osc);
2003
2004 list_for_each_entry_safe(ext, tmp, &rpclist, oe_link) {
2005 if (ext->oe_state == OES_LOCKING) {
2006 rc = osc_extent_make_ready(env, ext);
2007 if (unlikely(rc < 0)) {
2008 list_del_init(&ext->oe_link);
2009 osc_extent_finish(env, ext, 0, rc);
2010 continue;
2011 }
2012 }
2013 if (!first) {
2014 first = ext;
2015 srvlock = ext->oe_srvlock;
2016 } else {
2017 LASSERT(srvlock == ext->oe_srvlock);
2018 }
2019 }
2020
2021 if (!list_empty(&rpclist)) {
2022 LASSERT(page_count > 0);
2023 rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_WRITE);
2024 LASSERT(list_empty(&rpclist));
2025 }
2026
2027 osc_object_lock(osc);
2028 return rc;
2029 }
2030
2031 /**
2032 * prepare pages for ASYNC io and put pages in send queue.
2033 *
2034 * \param cmd OBD_BRW_* macroses
2035 * \param lop pending pages
2036 *
2037 * \return zero if no page added to send queue.
2038 * \return 1 if pages successfully added to send queue.
2039 * \return negative on errors.
2040 */
2041 static int
2042 osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli,
2043 struct osc_object *osc)
2044 __must_hold(osc)
2045 {
2046 struct osc_extent *ext;
2047 struct osc_extent *next;
2048 LIST_HEAD(rpclist);
2049 int page_count = 0;
2050 unsigned int max_pages = cli->cl_max_pages_per_rpc;
2051 int rc = 0;
2052
2053 LASSERT(osc_object_is_locked(osc));
2054 list_for_each_entry_safe(ext, next,
2055 &osc->oo_reading_exts, oe_link) {
2056 EASSERT(ext->oe_state == OES_LOCK_DONE, ext);
2057 if (!try_to_add_extent_for_io(cli, ext, &rpclist, &page_count,
2058 &max_pages))
2059 break;
2060 osc_extent_state_set(ext, OES_RPC);
2061 EASSERT(ext->oe_nr_pages <= max_pages, ext);
2062 }
2063 LASSERT(page_count <= max_pages);
2064
2065 osc_update_pending(osc, OBD_BRW_READ, -page_count);
2066
2067 if (!list_empty(&rpclist)) {
2068 osc_object_unlock(osc);
2069
2070 LASSERT(page_count > 0);
2071 rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_READ);
2072 LASSERT(list_empty(&rpclist));
2073
2074 osc_object_lock(osc);
2075 }
2076 return rc;
2077 }
2078
2079 #define list_to_obj(list, item) ({ \
2080 struct list_head *__tmp = (list)->next; \
2081 list_del_init(__tmp); \
2082 list_entry(__tmp, struct osc_object, oo_##item); \
2083 })
2084
2085 /* This is called by osc_check_rpcs() to find which objects have pages that
2086 * we could be sending. These lists are maintained by osc_makes_rpc().
2087 */
2088 static struct osc_object *osc_next_obj(struct client_obd *cli)
2089 {
2090 /* First return objects that have blocked locks so that they
2091 * will be flushed quickly and other clients can get the lock,
2092 * then objects which have pages ready to be stuffed into RPCs
2093 */
2094 if (!list_empty(&cli->cl_loi_hp_ready_list))
2095 return list_to_obj(&cli->cl_loi_hp_ready_list, hp_ready_item);
2096 if (!list_empty(&cli->cl_loi_ready_list))
2097 return list_to_obj(&cli->cl_loi_ready_list, ready_item);
2098
2099 /* then if we have cache waiters, return all objects with queued
2100 * writes. This is especially important when many small files
2101 * have filled up the cache and not been fired into rpcs because
2102 * they don't pass the nr_pending/object threshold
2103 */
2104 if (!list_empty(&cli->cl_cache_waiters) &&
2105 !list_empty(&cli->cl_loi_write_list))
2106 return list_to_obj(&cli->cl_loi_write_list, write_item);
2107
2108 /* then return all queued objects when we have an invalid import
2109 * so that they get flushed
2110 */
2111 if (!cli->cl_import || cli->cl_import->imp_invalid) {
2112 if (!list_empty(&cli->cl_loi_write_list))
2113 return list_to_obj(&cli->cl_loi_write_list, write_item);
2114 if (!list_empty(&cli->cl_loi_read_list))
2115 return list_to_obj(&cli->cl_loi_read_list, read_item);
2116 }
2117 return NULL;
2118 }
2119
2120 /* called with the loi list lock held */
2121 static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
2122 __must_hold(&cli->cl_loi_list_lock)
2123 {
2124 struct osc_object *osc;
2125 int rc = 0;
2126
2127 while ((osc = osc_next_obj(cli)) != NULL) {
2128 struct cl_object *obj = osc2cl(osc);
2129 struct lu_ref_link link;
2130
2131 OSC_IO_DEBUG(osc, "%lu in flight\n", rpcs_in_flight(cli));
2132
2133 if (osc_max_rpc_in_flight(cli, osc)) {
2134 __osc_list_maint(cli, osc);
2135 break;
2136 }
2137
2138 cl_object_get(obj);
2139 client_obd_list_unlock(&cli->cl_loi_list_lock);
2140 lu_object_ref_add_at(&obj->co_lu, &link, "check",
2141 current);
2142
2143 /* attempt some read/write balancing by alternating between
2144 * reads and writes in an object. The makes_rpc checks here
2145 * would be redundant if we were getting read/write work items
2146 * instead of objects. we don't want send_oap_rpc to drain a
2147 * partial read pending queue when we're given this object to
2148 * do io on writes while there are cache waiters
2149 */
2150 osc_object_lock(osc);
2151 if (osc_makes_rpc(cli, osc, OBD_BRW_WRITE)) {
2152 rc = osc_send_write_rpc(env, cli, osc);
2153 if (rc < 0) {
2154 CERROR("Write request failed with %d\n", rc);
2155
2156 /* osc_send_write_rpc failed, mostly because of
2157 * memory pressure.
2158 *
2159 * It can't break here, because if:
2160 * - a page was submitted by osc_io_submit, so
2161 * page locked;
2162 * - no request in flight
2163 * - no subsequent request
2164 * The system will be in live-lock state,
2165 * because there is no chance to call
2166 * osc_io_unplug() and osc_check_rpcs() any
2167 * more. pdflush can't help in this case,
2168 * because it might be blocked at grabbing
2169 * the page lock as we mentioned.
2170 *
2171 * Anyway, continue to drain pages.
2172 */
2173 /* break; */
2174 }
2175 }
2176 if (osc_makes_rpc(cli, osc, OBD_BRW_READ)) {
2177 rc = osc_send_read_rpc(env, cli, osc);
2178 if (rc < 0)
2179 CERROR("Read request failed with %d\n", rc);
2180 }
2181 osc_object_unlock(osc);
2182
2183 osc_list_maint(cli, osc);
2184 lu_object_ref_del_at(&obj->co_lu, &link, "check",
2185 current);
2186 cl_object_put(env, obj);
2187
2188 client_obd_list_lock(&cli->cl_loi_list_lock);
2189 }
2190 }
2191
2192 static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
2193 struct osc_object *osc, int async)
2194 {
2195 int rc = 0;
2196
2197 if (osc && osc_list_maint(cli, osc) == 0)
2198 return 0;
2199
2200 if (!async) {
2201 /* disable osc_lru_shrink() temporarily to avoid
2202 * potential stack overrun problem. LU-2859
2203 */
2204 atomic_inc(&cli->cl_lru_shrinkers);
2205 client_obd_list_lock(&cli->cl_loi_list_lock);
2206 osc_check_rpcs(env, cli);
2207 client_obd_list_unlock(&cli->cl_loi_list_lock);
2208 atomic_dec(&cli->cl_lru_shrinkers);
2209 } else {
2210 CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
2211 LASSERT(cli->cl_writeback_work);
2212 rc = ptlrpcd_queue_work(cli->cl_writeback_work);
2213 }
2214 return rc;
2215 }
2216
2217 static int osc_io_unplug_async(const struct lu_env *env,
2218 struct client_obd *cli, struct osc_object *osc)
2219 {
2220 return osc_io_unplug0(env, cli, osc, 1);
2221 }
2222
2223 void osc_io_unplug(const struct lu_env *env, struct client_obd *cli,
2224 struct osc_object *osc)
2225 {
2226 (void)osc_io_unplug0(env, cli, osc, 0);
2227 }
2228
2229 int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
2230 struct page *page, loff_t offset)
2231 {
2232 struct obd_export *exp = osc_export(osc);
2233 struct osc_async_page *oap = &ops->ops_oap;
2234
2235 if (!page)
2236 return cfs_size_round(sizeof(*oap));
2237
2238 oap->oap_magic = OAP_MAGIC;
2239 oap->oap_cli = &exp->exp_obd->u.cli;
2240 oap->oap_obj = osc;
2241
2242 oap->oap_page = page;
2243 oap->oap_obj_off = offset;
2244 LASSERT(!(offset & ~CFS_PAGE_MASK));
2245
2246 if (!client_is_remote(exp) && capable(CFS_CAP_SYS_RESOURCE))
2247 oap->oap_brw_flags = OBD_BRW_NOQUOTA;
2248
2249 INIT_LIST_HEAD(&oap->oap_pending_item);
2250 INIT_LIST_HEAD(&oap->oap_rpc_item);
2251
2252 spin_lock_init(&oap->oap_lock);
2253 CDEBUG(D_INFO, "oap %p page %p obj off %llu\n",
2254 oap, page, oap->oap_obj_off);
2255 return 0;
2256 }
2257
2258 int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
2259 struct osc_page *ops)
2260 {
2261 struct osc_io *oio = osc_env_io(env);
2262 struct osc_extent *ext = NULL;
2263 struct osc_async_page *oap = &ops->ops_oap;
2264 struct client_obd *cli = oap->oap_cli;
2265 struct osc_object *osc = oap->oap_obj;
2266 pgoff_t index;
2267 int grants = 0;
2268 int brw_flags = OBD_BRW_ASYNC;
2269 int cmd = OBD_BRW_WRITE;
2270 int need_release = 0;
2271 int rc = 0;
2272
2273 if (oap->oap_magic != OAP_MAGIC)
2274 return -EINVAL;
2275
2276 if (!cli->cl_import || cli->cl_import->imp_invalid)
2277 return -EIO;
2278
2279 if (!list_empty(&oap->oap_pending_item) ||
2280 !list_empty(&oap->oap_rpc_item))
2281 return -EBUSY;
2282
2283 /* Set the OBD_BRW_SRVLOCK before the page is queued. */
2284 brw_flags |= ops->ops_srvlock ? OBD_BRW_SRVLOCK : 0;
2285 if (!client_is_remote(osc_export(osc)) &&
2286 capable(CFS_CAP_SYS_RESOURCE)) {
2287 brw_flags |= OBD_BRW_NOQUOTA;
2288 cmd |= OBD_BRW_NOQUOTA;
2289 }
2290
2291 /* check if the file's owner/group is over quota */
2292 if (!(cmd & OBD_BRW_NOQUOTA)) {
2293 struct cl_object *obj;
2294 struct cl_attr *attr;
2295 unsigned int qid[MAXQUOTAS];
2296
2297 obj = cl_object_top(&osc->oo_cl);
2298 attr = &osc_env_info(env)->oti_attr;
2299
2300 cl_object_attr_lock(obj);
2301 rc = cl_object_attr_get(env, obj, attr);
2302 cl_object_attr_unlock(obj);
2303
2304 qid[USRQUOTA] = attr->cat_uid;
2305 qid[GRPQUOTA] = attr->cat_gid;
2306 if (rc == 0 && osc_quota_chkdq(cli, qid) == NO_QUOTA)
2307 rc = -EDQUOT;
2308 if (rc)
2309 return rc;
2310 }
2311
2312 oap->oap_cmd = cmd;
2313 oap->oap_page_off = ops->ops_from;
2314 oap->oap_count = ops->ops_to - ops->ops_from;
2315 oap->oap_async_flags = 0;
2316 oap->oap_brw_flags = brw_flags;
2317
2318 OSC_IO_DEBUG(osc, "oap %p page %p added for cmd %d\n",
2319 oap, oap->oap_page, oap->oap_cmd & OBD_BRW_RWMASK);
2320
2321 index = oap2cl_page(oap)->cp_index;
2322
2323 /* Add this page into extent by the following steps:
2324 * 1. if there exists an active extent for this IO, mostly this page
2325 * can be added to the active extent and sometimes we need to
2326 * expand extent to accommodate this page;
2327 * 2. otherwise, a new extent will be allocated.
2328 */
2329
2330 ext = oio->oi_active;
2331 if (ext && ext->oe_start <= index && ext->oe_max_end >= index) {
2332 /* one chunk plus extent overhead must be enough to write this
2333 * page
2334 */
2335 grants = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
2336 if (ext->oe_end >= index)
2337 grants = 0;
2338
2339 /* it doesn't need any grant to dirty this page */
2340 client_obd_list_lock(&cli->cl_loi_list_lock);
2341 rc = osc_enter_cache_try(cli, oap, grants, 0);
2342 client_obd_list_unlock(&cli->cl_loi_list_lock);
2343 if (rc == 0) { /* try failed */
2344 grants = 0;
2345 need_release = 1;
2346 } else if (ext->oe_end < index) {
2347 int tmp = grants;
2348 /* try to expand this extent */
2349 rc = osc_extent_expand(ext, index, &tmp);
2350 if (rc < 0) {
2351 need_release = 1;
2352 /* don't free reserved grant */
2353 } else {
2354 OSC_EXTENT_DUMP(D_CACHE, ext,
2355 "expanded for %lu.\n", index);
2356 osc_unreserve_grant(cli, grants, tmp);
2357 grants = 0;
2358 }
2359 }
2360 rc = 0;
2361 } else if (ext) {
2362 /* index is located outside of active extent */
2363 need_release = 1;
2364 }
2365 if (need_release) {
2366 osc_extent_release(env, ext);
2367 oio->oi_active = NULL;
2368 ext = NULL;
2369 }
2370
2371 if (!ext) {
2372 int tmp = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
2373
2374 /* try to find new extent to cover this page */
2375 LASSERT(!oio->oi_active);
2376 /* we may have allocated grant for this page if we failed
2377 * to expand the previous active extent.
2378 */
2379 LASSERT(ergo(grants > 0, grants >= tmp));
2380
2381 rc = 0;
2382 if (grants == 0) {
2383 /* we haven't allocated grant for this page. */
2384 rc = osc_enter_cache(env, cli, oap, tmp);
2385 if (rc == 0)
2386 grants = tmp;
2387 }
2388
2389 tmp = grants;
2390 if (rc == 0) {
2391 ext = osc_extent_find(env, osc, index, &tmp);
2392 if (IS_ERR(ext)) {
2393 LASSERT(tmp == grants);
2394 osc_exit_cache(cli, oap);
2395 rc = PTR_ERR(ext);
2396 ext = NULL;
2397 } else {
2398 oio->oi_active = ext;
2399 }
2400 }
2401 if (grants > 0)
2402 osc_unreserve_grant(cli, grants, tmp);
2403 }
2404
2405 LASSERT(ergo(rc == 0, ext));
2406 if (ext) {
2407 EASSERTF(ext->oe_end >= index && ext->oe_start <= index,
2408 ext, "index = %lu.\n", index);
2409 LASSERT((oap->oap_brw_flags & OBD_BRW_FROM_GRANT) != 0);
2410
2411 osc_object_lock(osc);
2412 if (ext->oe_nr_pages == 0)
2413 ext->oe_srvlock = ops->ops_srvlock;
2414 else
2415 LASSERT(ext->oe_srvlock == ops->ops_srvlock);
2416 ++ext->oe_nr_pages;
2417 list_add_tail(&oap->oap_pending_item, &ext->oe_pages);
2418 osc_object_unlock(osc);
2419 }
2420 return rc;
2421 }
2422
2423 int osc_teardown_async_page(const struct lu_env *env,
2424 struct osc_object *obj, struct osc_page *ops)
2425 {
2426 struct osc_async_page *oap = &ops->ops_oap;
2427 struct osc_extent *ext = NULL;
2428 int rc = 0;
2429
2430 LASSERT(oap->oap_magic == OAP_MAGIC);
2431
2432 CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n",
2433 oap, ops, oap2cl_page(oap)->cp_index);
2434
2435 osc_object_lock(obj);
2436 if (!list_empty(&oap->oap_rpc_item)) {
2437 CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap);
2438 rc = -EBUSY;
2439 } else if (!list_empty(&oap->oap_pending_item)) {
2440 ext = osc_extent_lookup(obj, oap2cl_page(oap)->cp_index);
2441 /* only truncated pages are allowed to be taken out.
2442 * See osc_extent_truncate() and osc_cache_truncate_start()
2443 * for details.
2444 */
2445 if (ext && ext->oe_state != OES_TRUNC) {
2446 OSC_EXTENT_DUMP(D_ERROR, ext, "trunc at %lu.\n",
2447 oap2cl_page(oap)->cp_index);
2448 rc = -EBUSY;
2449 }
2450 }
2451 osc_object_unlock(obj);
2452 if (ext)
2453 osc_extent_put(env, ext);
2454 return rc;
2455 }
2456
2457 /**
2458 * This is called when a page is picked up by kernel to write out.
2459 *
2460 * We should find out the corresponding extent and add the whole extent
2461 * into urgent list. The extent may be being truncated or used, handle it
2462 * carefully.
2463 */
2464 int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
2465 struct osc_page *ops)
2466 {
2467 struct osc_extent *ext = NULL;
2468 struct osc_object *obj = cl2osc(ops->ops_cl.cpl_obj);
2469 struct cl_page *cp = ops->ops_cl.cpl_page;
2470 pgoff_t index = cp->cp_index;
2471 struct osc_async_page *oap = &ops->ops_oap;
2472 bool unplug = false;
2473 int rc = 0;
2474
2475 osc_object_lock(obj);
2476 ext = osc_extent_lookup(obj, index);
2477 if (!ext) {
2478 osc_extent_tree_dump(D_ERROR, obj);
2479 LASSERTF(0, "page index %lu is NOT covered.\n", index);
2480 }
2481
2482 switch (ext->oe_state) {
2483 case OES_RPC:
2484 case OES_LOCK_DONE:
2485 CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(cp),
2486 "flush an in-rpc page?\n");
2487 LASSERT(0);
2488 break;
2489 case OES_LOCKING:
2490 /* If we know this extent is being written out, we should abort
2491 * so that the writer can make this page ready. Otherwise, there
2492 * exists a deadlock problem because other process can wait for
2493 * page writeback bit holding page lock; and meanwhile in
2494 * vvp_page_make_ready(), we need to grab page lock before
2495 * really sending the RPC.
2496 */
2497 case OES_TRUNC:
2498 /* race with truncate, page will be redirtied */
2499 case OES_ACTIVE:
2500 /* The extent is active so we need to abort and let the caller
2501 * re-dirty the page. If we continued on here, and we were the
2502 * one making the extent active, we could deadlock waiting for
2503 * the page writeback to clear but it won't because the extent
2504 * is active and won't be written out.
2505 */
2506 rc = -EAGAIN;
2507 goto out;
2508 default:
2509 break;
2510 }
2511
2512 rc = cl_page_prep(env, io, cl_page_top(cp), CRT_WRITE);
2513 if (rc)
2514 goto out;
2515
2516 spin_lock(&oap->oap_lock);
2517 oap->oap_async_flags |= ASYNC_READY|ASYNC_URGENT;
2518 spin_unlock(&oap->oap_lock);
2519
2520 if (memory_pressure_get())
2521 ext->oe_memalloc = 1;
2522
2523 ext->oe_urgent = 1;
2524 if (ext->oe_state == OES_CACHE) {
2525 OSC_EXTENT_DUMP(D_CACHE, ext,
2526 "flush page %p make it urgent.\n", oap);
2527 if (list_empty(&ext->oe_link))
2528 list_add_tail(&ext->oe_link, &obj->oo_urgent_exts);
2529 unplug = true;
2530 }
2531 rc = 0;
2532
2533 out:
2534 osc_object_unlock(obj);
2535 osc_extent_put(env, ext);
2536 if (unplug)
2537 osc_io_unplug_async(env, osc_cli(obj), obj);
2538 return rc;
2539 }
2540
2541 /**
2542 * this is called when a sync waiter receives an interruption. Its job is to
2543 * get the caller woken as soon as possible. If its page hasn't been put in an
2544 * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as
2545 * desiring interruption which will forcefully complete the rpc once the rpc
2546 * has timed out.
2547 */
2548 int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops)
2549 {
2550 struct osc_async_page *oap = &ops->ops_oap;
2551 struct osc_object *obj = oap->oap_obj;
2552 struct client_obd *cli = osc_cli(obj);
2553 struct osc_extent *ext;
2554 struct osc_extent *found = NULL;
2555 struct list_head *plist;
2556 pgoff_t index = oap2cl_page(oap)->cp_index;
2557 int rc = -EBUSY;
2558 int cmd;
2559
2560 LASSERT(!oap->oap_interrupted);
2561 oap->oap_interrupted = 1;
2562
2563 /* Find out the caching extent */
2564 osc_object_lock(obj);
2565 if (oap->oap_cmd & OBD_BRW_WRITE) {
2566 plist = &obj->oo_urgent_exts;
2567 cmd = OBD_BRW_WRITE;
2568 } else {
2569 plist = &obj->oo_reading_exts;
2570 cmd = OBD_BRW_READ;
2571 }
2572 list_for_each_entry(ext, plist, oe_link) {
2573 if (ext->oe_start <= index && ext->oe_end >= index) {
2574 LASSERT(ext->oe_state == OES_LOCK_DONE);
2575 /* For OES_LOCK_DONE state extent, it has already held
2576 * a refcount for RPC.
2577 */
2578 found = osc_extent_get(ext);
2579 break;
2580 }
2581 }
2582 if (found) {
2583 list_del_init(&found->oe_link);
2584 osc_update_pending(obj, cmd, -found->oe_nr_pages);
2585 osc_object_unlock(obj);
2586
2587 osc_extent_finish(env, found, 0, -EINTR);
2588 osc_extent_put(env, found);
2589 rc = 0;
2590 } else {
2591 osc_object_unlock(obj);
2592 /* ok, it's been put in an rpc. only one oap gets a request
2593 * reference
2594 */
2595 if (oap->oap_request) {
2596 ptlrpc_mark_interrupted(oap->oap_request);
2597 ptlrpcd_wake(oap->oap_request);
2598 ptlrpc_req_finished(oap->oap_request);
2599 oap->oap_request = NULL;
2600 }
2601 }
2602
2603 osc_list_maint(cli, obj);
2604 return rc;
2605 }
2606
2607 int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
2608 struct list_head *list, int cmd, int brw_flags)
2609 {
2610 struct client_obd *cli = osc_cli(obj);
2611 struct osc_extent *ext;
2612 struct osc_async_page *oap, *tmp;
2613 int page_count = 0;
2614 int mppr = cli->cl_max_pages_per_rpc;
2615 pgoff_t start = CL_PAGE_EOF;
2616 pgoff_t end = 0;
2617
2618 list_for_each_entry(oap, list, oap_pending_item) {
2619 struct cl_page *cp = oap2cl_page(oap);
2620
2621 if (cp->cp_index > end)
2622 end = cp->cp_index;
2623 if (cp->cp_index < start)
2624 start = cp->cp_index;
2625 ++page_count;
2626 mppr <<= (page_count > mppr);
2627 }
2628
2629 ext = osc_extent_alloc(obj);
2630 if (!ext) {
2631 list_for_each_entry_safe(oap, tmp, list, oap_pending_item) {
2632 list_del_init(&oap->oap_pending_item);
2633 osc_ap_completion(env, cli, oap, 0, -ENOMEM);
2634 }
2635 return -ENOMEM;
2636 }
2637
2638 ext->oe_rw = !!(cmd & OBD_BRW_READ);
2639 ext->oe_urgent = 1;
2640 ext->oe_start = start;
2641 ext->oe_end = ext->oe_max_end = end;
2642 ext->oe_obj = obj;
2643 ext->oe_srvlock = !!(brw_flags & OBD_BRW_SRVLOCK);
2644 ext->oe_nr_pages = page_count;
2645 ext->oe_mppr = mppr;
2646 list_splice_init(list, &ext->oe_pages);
2647
2648 osc_object_lock(obj);
2649 /* Reuse the initial refcount for RPC, don't drop it */
2650 osc_extent_state_set(ext, OES_LOCK_DONE);
2651 if (cmd & OBD_BRW_WRITE) {
2652 list_add_tail(&ext->oe_link, &obj->oo_urgent_exts);
2653 osc_update_pending(obj, OBD_BRW_WRITE, page_count);
2654 } else {
2655 list_add_tail(&ext->oe_link, &obj->oo_reading_exts);
2656 osc_update_pending(obj, OBD_BRW_READ, page_count);
2657 }
2658 osc_object_unlock(obj);
2659
2660 osc_io_unplug_async(env, cli, obj);
2661 return 0;
2662 }
2663
2664 /**
2665 * Called by osc_io_setattr_start() to freeze and destroy covering extents.
2666 */
2667 int osc_cache_truncate_start(const struct lu_env *env, struct osc_io *oio,
2668 struct osc_object *obj, __u64 size)
2669 {
2670 struct client_obd *cli = osc_cli(obj);
2671 struct osc_extent *ext;
2672 struct osc_extent *waiting = NULL;
2673 pgoff_t index;
2674 LIST_HEAD(list);
2675 int result = 0;
2676 bool partial;
2677
2678 /* pages with index greater or equal to index will be truncated. */
2679 index = cl_index(osc2cl(obj), size);
2680 partial = size > cl_offset(osc2cl(obj), index);
2681
2682 again:
2683 osc_object_lock(obj);
2684 ext = osc_extent_search(obj, index);
2685 if (!ext)
2686 ext = first_extent(obj);
2687 else if (ext->oe_end < index)
2688 ext = next_extent(ext);
2689 while (ext) {
2690 EASSERT(ext->oe_state != OES_TRUNC, ext);
2691
2692 if (ext->oe_state > OES_CACHE || ext->oe_urgent) {
2693 /* if ext is in urgent state, it means there must exist
2694 * a page already having been flushed by write_page().
2695 * We have to wait for this extent because we can't
2696 * truncate that page.
2697 */
2698 LASSERT(!ext->oe_hp);
2699 OSC_EXTENT_DUMP(D_CACHE, ext,
2700 "waiting for busy extent\n");
2701 waiting = osc_extent_get(ext);
2702 break;
2703 }
2704
2705 OSC_EXTENT_DUMP(D_CACHE, ext, "try to trunc:%llu.\n", size);
2706
2707 osc_extent_get(ext);
2708 if (ext->oe_state == OES_ACTIVE) {
2709 /* though we grab inode mutex for write path, but we
2710 * release it before releasing extent(in osc_io_end()),
2711 * so there is a race window that an extent is still
2712 * in OES_ACTIVE when truncate starts.
2713 */
2714 LASSERT(!ext->oe_trunc_pending);
2715 ext->oe_trunc_pending = 1;
2716 } else {
2717 EASSERT(ext->oe_state == OES_CACHE, ext);
2718 osc_extent_state_set(ext, OES_TRUNC);
2719 osc_update_pending(obj, OBD_BRW_WRITE,
2720 -ext->oe_nr_pages);
2721 }
2722 EASSERT(list_empty(&ext->oe_link), ext);
2723 list_add_tail(&ext->oe_link, &list);
2724
2725 ext = next_extent(ext);
2726 }
2727 osc_object_unlock(obj);
2728
2729 osc_list_maint(cli, obj);
2730
2731 while (!list_empty(&list)) {
2732 int rc;
2733
2734 ext = list_entry(list.next, struct osc_extent, oe_link);
2735 list_del_init(&ext->oe_link);
2736
2737 /* extent may be in OES_ACTIVE state because inode mutex
2738 * is released before osc_io_end() in file write case
2739 */
2740 if (ext->oe_state != OES_TRUNC)
2741 osc_extent_wait(env, ext, OES_TRUNC);
2742
2743 rc = osc_extent_truncate(ext, index, partial);
2744 if (rc < 0) {
2745 if (result == 0)
2746 result = rc;
2747
2748 OSC_EXTENT_DUMP(D_ERROR, ext,
2749 "truncate error %d\n", rc);
2750 } else if (ext->oe_nr_pages == 0) {
2751 osc_extent_remove(ext);
2752 } else {
2753 /* this must be an overlapped extent which means only
2754 * part of pages in this extent have been truncated.
2755 */
2756 EASSERTF(ext->oe_start <= index, ext,
2757 "trunc index = %lu/%d.\n", index, partial);
2758 /* fix index to skip this partially truncated extent */
2759 index = ext->oe_end + 1;
2760 partial = false;
2761
2762 /* we need to hold this extent in OES_TRUNC state so
2763 * that no writeback will happen. This is to avoid
2764 * BUG 17397.
2765 */
2766 LASSERT(!oio->oi_trunc);
2767 oio->oi_trunc = osc_extent_get(ext);
2768 OSC_EXTENT_DUMP(D_CACHE, ext,
2769 "trunc at %llu\n", size);
2770 }
2771 osc_extent_put(env, ext);
2772 }
2773 if (waiting) {
2774 int rc;
2775
2776 /* ignore the result of osc_extent_wait the write initiator
2777 * should take care of it.
2778 */
2779 rc = osc_extent_wait(env, waiting, OES_INV);
2780 if (rc < 0)
2781 OSC_EXTENT_DUMP(D_CACHE, waiting, "error: %d.\n", rc);
2782
2783 osc_extent_put(env, waiting);
2784 waiting = NULL;
2785 goto again;
2786 }
2787 return result;
2788 }
2789
2790 /**
2791 * Called after osc_io_setattr_end to add oio->oi_trunc back to cache.
2792 */
2793 void osc_cache_truncate_end(const struct lu_env *env, struct osc_io *oio,
2794 struct osc_object *obj)
2795 {
2796 struct osc_extent *ext = oio->oi_trunc;
2797
2798 oio->oi_trunc = NULL;
2799 if (ext) {
2800 bool unplug = false;
2801
2802 EASSERT(ext->oe_nr_pages > 0, ext);
2803 EASSERT(ext->oe_state == OES_TRUNC, ext);
2804 EASSERT(!ext->oe_urgent, ext);
2805
2806 OSC_EXTENT_DUMP(D_CACHE, ext, "trunc -> cache.\n");
2807 osc_object_lock(obj);
2808 osc_extent_state_set(ext, OES_CACHE);
2809 if (ext->oe_fsync_wait && !ext->oe_urgent) {
2810 ext->oe_urgent = 1;
2811 list_move_tail(&ext->oe_link, &obj->oo_urgent_exts);
2812 unplug = true;
2813 }
2814 osc_update_pending(obj, OBD_BRW_WRITE, ext->oe_nr_pages);
2815 osc_object_unlock(obj);
2816 osc_extent_put(env, ext);
2817
2818 if (unplug)
2819 osc_io_unplug_async(env, osc_cli(obj), obj);
2820 }
2821 }
2822
2823 /**
2824 * Wait for extents in a specific range to be written out.
2825 * The caller must have called osc_cache_writeback_range() to issue IO
2826 * otherwise it will take a long time for this function to finish.
2827 *
2828 * Caller must hold inode_mutex , or cancel exclusive dlm lock so that
2829 * nobody else can dirty this range of file while we're waiting for
2830 * extents to be written.
2831 */
2832 int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj,
2833 pgoff_t start, pgoff_t end)
2834 {
2835 struct osc_extent *ext;
2836 pgoff_t index = start;
2837 int result = 0;
2838
2839 again:
2840 osc_object_lock(obj);
2841 ext = osc_extent_search(obj, index);
2842 if (!ext)
2843 ext = first_extent(obj);
2844 else if (ext->oe_end < index)
2845 ext = next_extent(ext);
2846 while (ext) {
2847 int rc;
2848
2849 if (ext->oe_start > end)
2850 break;
2851
2852 if (!ext->oe_fsync_wait) {
2853 ext = next_extent(ext);
2854 continue;
2855 }
2856
2857 EASSERT(ergo(ext->oe_state == OES_CACHE,
2858 ext->oe_hp || ext->oe_urgent), ext);
2859 EASSERT(ergo(ext->oe_state == OES_ACTIVE,
2860 !ext->oe_hp && ext->oe_urgent), ext);
2861
2862 index = ext->oe_end + 1;
2863 osc_extent_get(ext);
2864 osc_object_unlock(obj);
2865
2866 rc = osc_extent_wait(env, ext, OES_INV);
2867 if (result == 0)
2868 result = rc;
2869 osc_extent_put(env, ext);
2870 goto again;
2871 }
2872 osc_object_unlock(obj);
2873
2874 OSC_IO_DEBUG(obj, "sync file range.\n");
2875 return result;
2876 }
2877
2878 /**
2879 * Called to write out a range of osc object.
2880 *
2881 * @hp : should be set this is caused by lock cancel;
2882 * @discard: is set if dirty pages should be dropped - file will be deleted or
2883 * truncated, this implies there is no partially discarding extents.
2884 *
2885 * Return how many pages will be issued, or error code if error occurred.
2886 */
2887 int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
2888 pgoff_t start, pgoff_t end, int hp, int discard)
2889 {
2890 struct osc_extent *ext;
2891 LIST_HEAD(discard_list);
2892 bool unplug = false;
2893 int result = 0;
2894
2895 osc_object_lock(obj);
2896 ext = osc_extent_search(obj, start);
2897 if (!ext)
2898 ext = first_extent(obj);
2899 else if (ext->oe_end < start)
2900 ext = next_extent(ext);
2901 while (ext) {
2902 if (ext->oe_start > end)
2903 break;
2904
2905 ext->oe_fsync_wait = 1;
2906 switch (ext->oe_state) {
2907 case OES_CACHE:
2908 result += ext->oe_nr_pages;
2909 if (!discard) {
2910 struct list_head *list = NULL;
2911
2912 if (hp) {
2913 EASSERT(!ext->oe_hp, ext);
2914 ext->oe_hp = 1;
2915 list = &obj->oo_hp_exts;
2916 } else if (!ext->oe_urgent) {
2917 ext->oe_urgent = 1;
2918 list = &obj->oo_urgent_exts;
2919 }
2920 if (list)
2921 list_move_tail(&ext->oe_link, list);
2922 unplug = true;
2923 } else {
2924 /* the only discarder is lock cancelling, so
2925 * [start, end] must contain this extent
2926 */
2927 EASSERT(ext->oe_start >= start &&
2928 ext->oe_max_end <= end, ext);
2929 osc_extent_state_set(ext, OES_LOCKING);
2930 ext->oe_owner = current;
2931 list_move_tail(&ext->oe_link,
2932 &discard_list);
2933 osc_update_pending(obj, OBD_BRW_WRITE,
2934 -ext->oe_nr_pages);
2935 }
2936 break;
2937 case OES_ACTIVE:
2938 /* It's pretty bad to wait for ACTIVE extents, because
2939 * we don't know how long we will wait for it to be
2940 * flushed since it may be blocked at awaiting more
2941 * grants. We do this for the correctness of fsync.
2942 */
2943 LASSERT(hp == 0 && discard == 0);
2944 ext->oe_urgent = 1;
2945 break;
2946 case OES_TRUNC:
2947 /* this extent is being truncated, can't do anything
2948 * for it now. it will be set to urgent after truncate
2949 * is finished in osc_cache_truncate_end().
2950 */
2951 default:
2952 break;
2953 }
2954 ext = next_extent(ext);
2955 }
2956 osc_object_unlock(obj);
2957
2958 LASSERT(ergo(!discard, list_empty(&discard_list)));
2959 if (!list_empty(&discard_list)) {
2960 struct osc_extent *tmp;
2961 int rc;
2962
2963 osc_list_maint(osc_cli(obj), obj);
2964 list_for_each_entry_safe(ext, tmp, &discard_list, oe_link) {
2965 list_del_init(&ext->oe_link);
2966 EASSERT(ext->oe_state == OES_LOCKING, ext);
2967
2968 /* Discard caching pages. We don't actually write this
2969 * extent out but we complete it as if we did.
2970 */
2971 rc = osc_extent_make_ready(env, ext);
2972 if (unlikely(rc < 0)) {
2973 OSC_EXTENT_DUMP(D_ERROR, ext,
2974 "make_ready returned %d\n", rc);
2975 if (result >= 0)
2976 result = rc;
2977 }
2978
2979 /* finish the extent as if the pages were sent */
2980 osc_extent_finish(env, ext, 0, 0);
2981 }
2982 }
2983
2984 if (unplug)
2985 osc_io_unplug(env, osc_cli(obj), obj);
2986
2987 if (hp || discard) {
2988 int rc;
2989
2990 rc = osc_cache_wait_range(env, obj, start, end);
2991 if (result >= 0 && rc < 0)
2992 result = rc;
2993 }
2994
2995 OSC_IO_DEBUG(obj, "cache page out.\n");
2996 return result;
2997 }
2998
2999 /** @} osc */
This page took 0.093581 seconds and 4 git commands to generate.