4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
38 #define DEBUG_SUBSYSTEM S_CLASS
40 #include "../include/obd_class.h"
41 #include "../include/obd_support.h"
42 #include "../include/lustre_fid.h"
43 #include <linux/list.h>
44 #include <linux/sched.h>
45 #include "../include/cl_object.h"
46 #include "cl_internal.h"
48 /*****************************************************************************
54 #define cl_io_for_each(slice, io) \
55 list_for_each_entry((slice), &io->ci_layers, cis_linkage)
56 #define cl_io_for_each_reverse(slice, io) \
57 list_for_each_entry_reverse((slice), &io->ci_layers, cis_linkage)
59 static inline int cl_io_type_is_valid(enum cl_io_type type
)
61 return CIT_READ
<= type
&& type
< CIT_OP_NR
;
64 static inline int cl_io_is_loopable(const struct cl_io
*io
)
66 return cl_io_type_is_valid(io
->ci_type
) && io
->ci_type
!= CIT_MISC
;
70 * Returns true iff there is an IO ongoing in the given environment.
72 int cl_io_is_going(const struct lu_env
*env
)
74 return cl_env_info(env
)->clt_current_io
!= NULL
;
76 EXPORT_SYMBOL(cl_io_is_going
);
79 * cl_io invariant that holds at all times when exported cl_io_*() functions
80 * are entered and left.
82 static int cl_io_invariant(const struct cl_io
*io
)
89 * io can own pages only when it is ongoing. Sub-io might
90 * still be in CIS_LOCKED state when top-io is in
93 ergo(io
->ci_owned_nr
> 0, io
->ci_state
== CIS_IO_GOING
||
94 (io
->ci_state
== CIS_LOCKED
&& up
));
98 * Finalize \a io, by calling cl_io_operations::cio_fini() bottom-to-top.
100 void cl_io_fini(const struct lu_env
*env
, struct cl_io
*io
)
102 struct cl_io_slice
*slice
;
103 struct cl_thread_info
*info
;
105 LINVRNT(cl_io_type_is_valid(io
->ci_type
));
106 LINVRNT(cl_io_invariant(io
));
108 while (!list_empty(&io
->ci_layers
)) {
109 slice
= container_of(io
->ci_layers
.prev
, struct cl_io_slice
,
111 list_del_init(&slice
->cis_linkage
);
112 if (slice
->cis_iop
->op
[io
->ci_type
].cio_fini
)
113 slice
->cis_iop
->op
[io
->ci_type
].cio_fini(env
, slice
);
115 * Invalidate slice to catch use after free. This assumes that
116 * slices are allocated within session and can be touched
117 * after ->cio_fini() returns.
119 slice
->cis_io
= NULL
;
121 io
->ci_state
= CIS_FINI
;
122 info
= cl_env_info(env
);
123 if (info
->clt_current_io
== io
)
124 info
->clt_current_io
= NULL
;
126 /* sanity check for layout change */
127 switch (io
->ci_type
) {
134 LASSERT(!io
->ci_need_restart
);
138 /* Check ignore layout change conf */
139 LASSERT(ergo(io
->ci_ignore_layout
|| !io
->ci_verify_layout
,
140 !io
->ci_need_restart
));
146 EXPORT_SYMBOL(cl_io_fini
);
148 static int cl_io_init0(const struct lu_env
*env
, struct cl_io
*io
,
149 enum cl_io_type iot
, struct cl_object
*obj
)
151 struct cl_object
*scan
;
154 LINVRNT(io
->ci_state
== CIS_ZERO
|| io
->ci_state
== CIS_FINI
);
155 LINVRNT(cl_io_type_is_valid(iot
));
156 LINVRNT(cl_io_invariant(io
));
159 INIT_LIST_HEAD(&io
->ci_lockset
.cls_todo
);
160 INIT_LIST_HEAD(&io
->ci_lockset
.cls_done
);
161 INIT_LIST_HEAD(&io
->ci_layers
);
164 cl_object_for_each(scan
, obj
) {
165 if (scan
->co_ops
->coo_io_init
) {
166 result
= scan
->co_ops
->coo_io_init(env
, scan
, io
);
172 io
->ci_state
= CIS_INIT
;
177 * Initialize sub-io, by calling cl_io_operations::cio_init() top-to-bottom.
179 * \pre obj != cl_object_top(obj)
181 int cl_io_sub_init(const struct lu_env
*env
, struct cl_io
*io
,
182 enum cl_io_type iot
, struct cl_object
*obj
)
184 struct cl_thread_info
*info
= cl_env_info(env
);
186 LASSERT(obj
!= cl_object_top(obj
));
187 if (!info
->clt_current_io
)
188 info
->clt_current_io
= io
;
189 return cl_io_init0(env
, io
, iot
, obj
);
191 EXPORT_SYMBOL(cl_io_sub_init
);
194 * Initialize \a io, by calling cl_io_operations::cio_init() top-to-bottom.
196 * Caller has to call cl_io_fini() after a call to cl_io_init(), no matter
197 * what the latter returned.
199 * \pre obj == cl_object_top(obj)
200 * \pre cl_io_type_is_valid(iot)
201 * \post cl_io_type_is_valid(io->ci_type) && io->ci_type == iot
203 int cl_io_init(const struct lu_env
*env
, struct cl_io
*io
,
204 enum cl_io_type iot
, struct cl_object
*obj
)
206 struct cl_thread_info
*info
= cl_env_info(env
);
208 LASSERT(obj
== cl_object_top(obj
));
209 LASSERT(!info
->clt_current_io
);
211 info
->clt_current_io
= io
;
212 return cl_io_init0(env
, io
, iot
, obj
);
214 EXPORT_SYMBOL(cl_io_init
);
217 * Initialize read or write io.
219 * \pre iot == CIT_READ || iot == CIT_WRITE
221 int cl_io_rw_init(const struct lu_env
*env
, struct cl_io
*io
,
222 enum cl_io_type iot
, loff_t pos
, size_t count
)
224 LINVRNT(iot
== CIT_READ
|| iot
== CIT_WRITE
);
227 LU_OBJECT_HEADER(D_VFSTRACE
, env
, &io
->ci_obj
->co_lu
,
228 "io range: %u [%llu, %llu) %u %u\n",
229 iot
, (__u64
)pos
, (__u64
)pos
+ count
,
230 io
->u
.ci_rw
.crw_nonblock
, io
->u
.ci_wr
.wr_append
);
231 io
->u
.ci_rw
.crw_pos
= pos
;
232 io
->u
.ci_rw
.crw_count
= count
;
233 return cl_io_init(env
, io
, iot
, io
->ci_obj
);
235 EXPORT_SYMBOL(cl_io_rw_init
);
237 static int cl_lock_descr_sort(const struct cl_lock_descr
*d0
,
238 const struct cl_lock_descr
*d1
)
240 return lu_fid_cmp(lu_object_fid(&d0
->cld_obj
->co_lu
),
241 lu_object_fid(&d1
->cld_obj
->co_lu
));
245 * Sort locks in lexicographical order of their (fid, start-offset) pairs.
247 static void cl_io_locks_sort(struct cl_io
*io
)
251 /* hidden treasure: bubble sort for now. */
253 struct cl_io_lock_link
*curr
;
254 struct cl_io_lock_link
*prev
;
255 struct cl_io_lock_link
*temp
;
260 list_for_each_entry_safe(curr
, temp
,
261 &io
->ci_lockset
.cls_todo
,
264 switch (cl_lock_descr_sort(&prev
->cill_descr
,
265 &curr
->cill_descr
)) {
268 * IMPOSSIBLE: Identical locks are
275 list_move_tail(&curr
->cill_linkage
,
276 &prev
->cill_linkage
);
278 continue; /* don't change prev: it's
281 case -1: /* already in order */
290 static void cl_lock_descr_merge(struct cl_lock_descr
*d0
,
291 const struct cl_lock_descr
*d1
)
293 d0
->cld_start
= min(d0
->cld_start
, d1
->cld_start
);
294 d0
->cld_end
= max(d0
->cld_end
, d1
->cld_end
);
296 if (d1
->cld_mode
== CLM_WRITE
&& d0
->cld_mode
!= CLM_WRITE
)
297 d0
->cld_mode
= CLM_WRITE
;
299 if (d1
->cld_mode
== CLM_GROUP
&& d0
->cld_mode
!= CLM_GROUP
)
300 d0
->cld_mode
= CLM_GROUP
;
303 static int cl_lockset_merge(const struct cl_lockset
*set
,
304 const struct cl_lock_descr
*need
)
306 struct cl_io_lock_link
*scan
;
308 list_for_each_entry(scan
, &set
->cls_todo
, cill_linkage
) {
309 if (!cl_object_same(scan
->cill_descr
.cld_obj
, need
->cld_obj
))
312 /* Merge locks for the same object because ldlm lock server
313 * may expand the lock extent, otherwise there is a deadlock
314 * case if two conflicted locks are queueud for the same object
315 * and lock server expands one lock to overlap the another.
316 * The side effect is that it can generate a multi-stripe lock
317 * that may cause casacading problem
319 cl_lock_descr_merge(&scan
->cill_descr
, need
);
320 CDEBUG(D_VFSTRACE
, "lock: %d: [%lu, %lu]\n",
321 scan
->cill_descr
.cld_mode
, scan
->cill_descr
.cld_start
,
322 scan
->cill_descr
.cld_end
);
328 static int cl_lockset_lock(const struct lu_env
*env
, struct cl_io
*io
,
329 struct cl_lockset
*set
)
331 struct cl_io_lock_link
*link
;
332 struct cl_io_lock_link
*temp
;
336 list_for_each_entry_safe(link
, temp
, &set
->cls_todo
, cill_linkage
) {
337 result
= cl_lock_request(env
, io
, &link
->cill_lock
);
341 list_move(&link
->cill_linkage
, &set
->cls_done
);
347 * Takes locks necessary for the current iteration of io.
349 * Calls cl_io_operations::cio_lock() top-to-bottom to collect locks required
350 * by layers for the current iteration. Then sort locks (to avoid dead-locks),
353 int cl_io_lock(const struct lu_env
*env
, struct cl_io
*io
)
355 const struct cl_io_slice
*scan
;
358 LINVRNT(cl_io_is_loopable(io
));
359 LINVRNT(io
->ci_state
== CIS_IT_STARTED
);
360 LINVRNT(cl_io_invariant(io
));
362 cl_io_for_each(scan
, io
) {
363 if (!scan
->cis_iop
->op
[io
->ci_type
].cio_lock
)
365 result
= scan
->cis_iop
->op
[io
->ci_type
].cio_lock(env
, scan
);
370 cl_io_locks_sort(io
);
371 result
= cl_lockset_lock(env
, io
, &io
->ci_lockset
);
374 cl_io_unlock(env
, io
);
376 io
->ci_state
= CIS_LOCKED
;
379 EXPORT_SYMBOL(cl_io_lock
);
382 * Release locks takes by io.
384 void cl_io_unlock(const struct lu_env
*env
, struct cl_io
*io
)
386 struct cl_lockset
*set
;
387 struct cl_io_lock_link
*link
;
388 struct cl_io_lock_link
*temp
;
389 const struct cl_io_slice
*scan
;
391 LASSERT(cl_io_is_loopable(io
));
392 LASSERT(CIS_IT_STARTED
<= io
->ci_state
&& io
->ci_state
< CIS_UNLOCKED
);
393 LINVRNT(cl_io_invariant(io
));
395 set
= &io
->ci_lockset
;
397 list_for_each_entry_safe(link
, temp
, &set
->cls_todo
, cill_linkage
) {
398 list_del_init(&link
->cill_linkage
);
400 link
->cill_fini(env
, link
);
403 list_for_each_entry_safe(link
, temp
, &set
->cls_done
, cill_linkage
) {
404 list_del_init(&link
->cill_linkage
);
405 cl_lock_release(env
, &link
->cill_lock
);
407 link
->cill_fini(env
, link
);
410 cl_io_for_each_reverse(scan
, io
) {
411 if (scan
->cis_iop
->op
[io
->ci_type
].cio_unlock
)
412 scan
->cis_iop
->op
[io
->ci_type
].cio_unlock(env
, scan
);
414 io
->ci_state
= CIS_UNLOCKED
;
415 LASSERT(!cl_env_info(env
)->clt_counters
[CNL_TOP
].ctc_nr_locks_acquired
);
417 EXPORT_SYMBOL(cl_io_unlock
);
420 * Prepares next iteration of io.
422 * Calls cl_io_operations::cio_iter_init() top-to-bottom. This exists to give
423 * layers a chance to modify io parameters, e.g., so that lov can restrict io
424 * to a single stripe.
426 int cl_io_iter_init(const struct lu_env
*env
, struct cl_io
*io
)
428 const struct cl_io_slice
*scan
;
431 LINVRNT(cl_io_is_loopable(io
));
432 LINVRNT(io
->ci_state
== CIS_INIT
|| io
->ci_state
== CIS_IT_ENDED
);
433 LINVRNT(cl_io_invariant(io
));
436 cl_io_for_each(scan
, io
) {
437 if (!scan
->cis_iop
->op
[io
->ci_type
].cio_iter_init
)
439 result
= scan
->cis_iop
->op
[io
->ci_type
].cio_iter_init(env
,
445 io
->ci_state
= CIS_IT_STARTED
;
448 EXPORT_SYMBOL(cl_io_iter_init
);
451 * Finalizes io iteration.
453 * Calls cl_io_operations::cio_iter_fini() bottom-to-top.
455 void cl_io_iter_fini(const struct lu_env
*env
, struct cl_io
*io
)
457 const struct cl_io_slice
*scan
;
459 LINVRNT(cl_io_is_loopable(io
));
460 LINVRNT(io
->ci_state
== CIS_UNLOCKED
);
461 LINVRNT(cl_io_invariant(io
));
463 cl_io_for_each_reverse(scan
, io
) {
464 if (scan
->cis_iop
->op
[io
->ci_type
].cio_iter_fini
)
465 scan
->cis_iop
->op
[io
->ci_type
].cio_iter_fini(env
, scan
);
467 io
->ci_state
= CIS_IT_ENDED
;
469 EXPORT_SYMBOL(cl_io_iter_fini
);
472 * Records that read or write io progressed \a nob bytes forward.
474 static void cl_io_rw_advance(const struct lu_env
*env
, struct cl_io
*io
,
477 const struct cl_io_slice
*scan
;
479 LINVRNT(io
->ci_type
== CIT_READ
|| io
->ci_type
== CIT_WRITE
||
481 LINVRNT(cl_io_is_loopable(io
));
482 LINVRNT(cl_io_invariant(io
));
484 io
->u
.ci_rw
.crw_pos
+= nob
;
485 io
->u
.ci_rw
.crw_count
-= nob
;
487 /* layers have to be notified. */
488 cl_io_for_each_reverse(scan
, io
) {
489 if (scan
->cis_iop
->op
[io
->ci_type
].cio_advance
)
490 scan
->cis_iop
->op
[io
->ci_type
].cio_advance(env
, scan
,
496 * Adds a lock to a lockset.
498 int cl_io_lock_add(const struct lu_env
*env
, struct cl_io
*io
,
499 struct cl_io_lock_link
*link
)
503 if (cl_lockset_merge(&io
->ci_lockset
, &link
->cill_descr
)) {
506 list_add(&link
->cill_linkage
, &io
->ci_lockset
.cls_todo
);
511 EXPORT_SYMBOL(cl_io_lock_add
);
513 static void cl_free_io_lock_link(const struct lu_env
*env
,
514 struct cl_io_lock_link
*link
)
520 * Allocates new lock link, and uses it to add a lock to a lockset.
522 int cl_io_lock_alloc_add(const struct lu_env
*env
, struct cl_io
*io
,
523 struct cl_lock_descr
*descr
)
525 struct cl_io_lock_link
*link
;
528 link
= kzalloc(sizeof(*link
), GFP_NOFS
);
530 link
->cill_descr
= *descr
;
531 link
->cill_fini
= cl_free_io_lock_link
;
532 result
= cl_io_lock_add(env
, io
, link
);
533 if (result
) /* lock match */
534 link
->cill_fini(env
, link
);
541 EXPORT_SYMBOL(cl_io_lock_alloc_add
);
544 * Starts io by calling cl_io_operations::cio_start() top-to-bottom.
546 int cl_io_start(const struct lu_env
*env
, struct cl_io
*io
)
548 const struct cl_io_slice
*scan
;
551 LINVRNT(cl_io_is_loopable(io
));
552 LINVRNT(io
->ci_state
== CIS_LOCKED
);
553 LINVRNT(cl_io_invariant(io
));
555 io
->ci_state
= CIS_IO_GOING
;
556 cl_io_for_each(scan
, io
) {
557 if (!scan
->cis_iop
->op
[io
->ci_type
].cio_start
)
559 result
= scan
->cis_iop
->op
[io
->ci_type
].cio_start(env
, scan
);
567 EXPORT_SYMBOL(cl_io_start
);
570 * Wait until current io iteration is finished by calling
571 * cl_io_operations::cio_end() bottom-to-top.
573 void cl_io_end(const struct lu_env
*env
, struct cl_io
*io
)
575 const struct cl_io_slice
*scan
;
577 LINVRNT(cl_io_is_loopable(io
));
578 LINVRNT(io
->ci_state
== CIS_IO_GOING
);
579 LINVRNT(cl_io_invariant(io
));
581 cl_io_for_each_reverse(scan
, io
) {
582 if (scan
->cis_iop
->op
[io
->ci_type
].cio_end
)
583 scan
->cis_iop
->op
[io
->ci_type
].cio_end(env
, scan
);
584 /* TODO: error handling. */
586 io
->ci_state
= CIS_IO_FINISHED
;
588 EXPORT_SYMBOL(cl_io_end
);
590 static const struct cl_page_slice
*
591 cl_io_slice_page(const struct cl_io_slice
*ios
, struct cl_page
*page
)
593 const struct cl_page_slice
*slice
;
595 slice
= cl_page_at(page
, ios
->cis_obj
->co_lu
.lo_dev
->ld_type
);
601 * Called by read io, when page has to be read from the server.
603 * \see cl_io_operations::cio_read_page()
605 int cl_io_read_page(const struct lu_env
*env
, struct cl_io
*io
,
606 struct cl_page
*page
)
608 const struct cl_io_slice
*scan
;
609 struct cl_2queue
*queue
;
612 LINVRNT(io
->ci_type
== CIT_READ
|| io
->ci_type
== CIT_FAULT
);
613 LINVRNT(cl_page_is_owned(page
, io
));
614 LINVRNT(io
->ci_state
== CIS_IO_GOING
|| io
->ci_state
== CIS_LOCKED
);
615 LINVRNT(cl_io_invariant(io
));
617 queue
= &io
->ci_queue
;
619 cl_2queue_init(queue
);
621 * ->cio_read_page() methods called in the loop below are supposed to
622 * never block waiting for network (the only subtle point is the
623 * creation of new pages for read-ahead that might result in cache
624 * shrinking, but currently only clean pages are shrunk and this
625 * requires no network io).
627 * Should this ever starts blocking, retry loop would be needed for
628 * "parallel io" (see CLO_REPEAT loops in cl_lock.c).
630 cl_io_for_each(scan
, io
) {
631 if (scan
->cis_iop
->cio_read_page
) {
632 const struct cl_page_slice
*slice
;
634 slice
= cl_io_slice_page(scan
, page
);
636 result
= scan
->cis_iop
->cio_read_page(env
, scan
, slice
);
641 if (result
== 0 && queue
->c2_qin
.pl_nr
> 0)
642 result
= cl_io_submit_rw(env
, io
, CRT_READ
, queue
);
644 * Unlock unsent pages in case of error.
646 cl_page_list_disown(env
, io
, &queue
->c2_qin
);
647 cl_2queue_fini(env
, queue
);
650 EXPORT_SYMBOL(cl_io_read_page
);
653 * Commit a list of contiguous pages into writeback cache.
655 * \returns 0 if all pages committed, or errcode if error occurred.
656 * \see cl_io_operations::cio_commit_async()
658 int cl_io_commit_async(const struct lu_env
*env
, struct cl_io
*io
,
659 struct cl_page_list
*queue
, int from
, int to
,
662 const struct cl_io_slice
*scan
;
665 cl_io_for_each(scan
, io
) {
666 if (!scan
->cis_iop
->cio_commit_async
)
668 result
= scan
->cis_iop
->cio_commit_async(env
, scan
, queue
,
675 EXPORT_SYMBOL(cl_io_commit_async
);
678 * Submits a list of pages for immediate io.
680 * After the function gets returned, The submitted pages are moved to
681 * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
682 * to be submitted, and the pages are errant to submit.
684 * \returns 0 if at least one page was submitted, error code otherwise.
685 * \see cl_io_operations::cio_submit()
687 int cl_io_submit_rw(const struct lu_env
*env
, struct cl_io
*io
,
688 enum cl_req_type crt
, struct cl_2queue
*queue
)
690 const struct cl_io_slice
*scan
;
693 cl_io_for_each(scan
, io
) {
694 if (!scan
->cis_iop
->cio_submit
)
696 result
= scan
->cis_iop
->cio_submit(env
, scan
, crt
, queue
);
701 * If ->cio_submit() failed, no pages were sent.
703 LASSERT(ergo(result
!= 0, list_empty(&queue
->c2_qout
.pl_pages
)));
706 EXPORT_SYMBOL(cl_io_submit_rw
);
708 static void cl_page_list_assume(const struct lu_env
*env
,
709 struct cl_io
*io
, struct cl_page_list
*plist
);
712 * Submit a sync_io and wait for the IO to be finished, or error happens.
713 * If \a timeout is zero, it means to wait for the IO unconditionally.
715 int cl_io_submit_sync(const struct lu_env
*env
, struct cl_io
*io
,
716 enum cl_req_type iot
, struct cl_2queue
*queue
,
719 struct cl_sync_io
*anchor
= &cl_env_info(env
)->clt_anchor
;
723 cl_page_list_for_each(pg
, &queue
->c2_qin
) {
724 LASSERT(!pg
->cp_sync_io
);
725 pg
->cp_sync_io
= anchor
;
728 cl_sync_io_init(anchor
, queue
->c2_qin
.pl_nr
, &cl_sync_io_end
);
729 rc
= cl_io_submit_rw(env
, io
, iot
, queue
);
732 * If some pages weren't sent for any reason (e.g.,
733 * read found up-to-date pages in the cache, or write found
734 * clean pages), count them as completed to avoid infinite
737 cl_page_list_for_each(pg
, &queue
->c2_qin
) {
738 pg
->cp_sync_io
= NULL
;
739 cl_sync_io_note(env
, anchor
, 1);
742 /* wait for the IO to be finished. */
743 rc
= cl_sync_io_wait(env
, anchor
, timeout
);
744 cl_page_list_assume(env
, io
, &queue
->c2_qout
);
746 LASSERT(list_empty(&queue
->c2_qout
.pl_pages
));
747 cl_page_list_for_each(pg
, &queue
->c2_qin
)
748 pg
->cp_sync_io
= NULL
;
752 EXPORT_SYMBOL(cl_io_submit_sync
);
757 * Pumps io through iterations calling
759 * - cl_io_iter_init()
769 * - cl_io_iter_fini()
771 * repeatedly until there is no more io to do.
773 int cl_io_loop(const struct lu_env
*env
, struct cl_io
*io
)
777 LINVRNT(cl_io_is_loopable(io
));
783 result
= cl_io_iter_init(env
, io
);
786 result
= cl_io_lock(env
, io
);
789 * Notify layers that locks has been taken,
792 * - llite: kms, short read;
793 * - llite: generic_file_read();
795 result
= cl_io_start(env
, io
);
797 * Send any remaining pending
800 * - llite: ll_rw_stats_tally.
803 cl_io_unlock(env
, io
);
804 cl_io_rw_advance(env
, io
, io
->ci_nob
- nob
);
807 cl_io_iter_fini(env
, io
);
808 } while (result
== 0 && io
->ci_continue
);
810 result
= io
->ci_result
;
811 return result
< 0 ? result
: 0;
813 EXPORT_SYMBOL(cl_io_loop
);
816 * Adds io slice to the cl_io.
818 * This is called by cl_object_operations::coo_io_init() methods to add a
819 * per-layer state to the io. New state is added at the end of
820 * cl_io::ci_layers list, that is, it is at the bottom of the stack.
822 * \see cl_lock_slice_add(), cl_req_slice_add(), cl_page_slice_add()
824 void cl_io_slice_add(struct cl_io
*io
, struct cl_io_slice
*slice
,
825 struct cl_object
*obj
,
826 const struct cl_io_operations
*ops
)
828 struct list_head
*linkage
= &slice
->cis_linkage
;
830 LASSERT((!linkage
->prev
&& !linkage
->next
) ||
831 list_empty(linkage
));
833 list_add_tail(linkage
, &io
->ci_layers
);
835 slice
->cis_obj
= obj
;
836 slice
->cis_iop
= ops
;
838 EXPORT_SYMBOL(cl_io_slice_add
);
841 * Initializes page list.
843 void cl_page_list_init(struct cl_page_list
*plist
)
846 INIT_LIST_HEAD(&plist
->pl_pages
);
847 plist
->pl_owner
= current
;
849 EXPORT_SYMBOL(cl_page_list_init
);
852 * Adds a page to a page list.
854 void cl_page_list_add(struct cl_page_list
*plist
, struct cl_page
*page
)
856 /* it would be better to check that page is owned by "current" io, but
857 * it is not passed here.
859 LASSERT(page
->cp_owner
);
860 LINVRNT(plist
->pl_owner
== current
);
862 LASSERT(list_empty(&page
->cp_batch
));
863 list_add_tail(&page
->cp_batch
, &plist
->pl_pages
);
865 lu_ref_add_at(&page
->cp_reference
, &page
->cp_queue_ref
, "queue", plist
);
868 EXPORT_SYMBOL(cl_page_list_add
);
871 * Removes a page from a page list.
873 void cl_page_list_del(const struct lu_env
*env
, struct cl_page_list
*plist
,
874 struct cl_page
*page
)
876 LASSERT(plist
->pl_nr
> 0);
877 LASSERT(cl_page_is_vmlocked(env
, page
));
878 LINVRNT(plist
->pl_owner
== current
);
880 list_del_init(&page
->cp_batch
);
882 lu_ref_del_at(&page
->cp_reference
, &page
->cp_queue_ref
, "queue", plist
);
883 cl_page_put(env
, page
);
885 EXPORT_SYMBOL(cl_page_list_del
);
888 * Moves a page from one page list to another.
890 void cl_page_list_move(struct cl_page_list
*dst
, struct cl_page_list
*src
,
891 struct cl_page
*page
)
893 LASSERT(src
->pl_nr
> 0);
894 LINVRNT(dst
->pl_owner
== current
);
895 LINVRNT(src
->pl_owner
== current
);
897 list_move_tail(&page
->cp_batch
, &dst
->pl_pages
);
900 lu_ref_set_at(&page
->cp_reference
, &page
->cp_queue_ref
, "queue",
903 EXPORT_SYMBOL(cl_page_list_move
);
906 * Moves a page from one page list to the head of another list.
908 void cl_page_list_move_head(struct cl_page_list
*dst
, struct cl_page_list
*src
,
909 struct cl_page
*page
)
911 LASSERT(src
->pl_nr
> 0);
912 LINVRNT(dst
->pl_owner
== current
);
913 LINVRNT(src
->pl_owner
== current
);
915 list_move(&page
->cp_batch
, &dst
->pl_pages
);
918 lu_ref_set_at(&page
->cp_reference
, &page
->cp_queue_ref
, "queue",
921 EXPORT_SYMBOL(cl_page_list_move_head
);
924 * splice the cl_page_list, just as list head does
926 void cl_page_list_splice(struct cl_page_list
*list
, struct cl_page_list
*head
)
928 struct cl_page
*page
;
931 LINVRNT(list
->pl_owner
== current
);
932 LINVRNT(head
->pl_owner
== current
);
934 cl_page_list_for_each_safe(page
, tmp
, list
)
935 cl_page_list_move(head
, list
, page
);
937 EXPORT_SYMBOL(cl_page_list_splice
);
939 void cl_page_disown0(const struct lu_env
*env
,
940 struct cl_io
*io
, struct cl_page
*pg
);
943 * Disowns pages in a queue.
945 void cl_page_list_disown(const struct lu_env
*env
,
946 struct cl_io
*io
, struct cl_page_list
*plist
)
948 struct cl_page
*page
;
949 struct cl_page
*temp
;
951 LINVRNT(plist
->pl_owner
== current
);
953 cl_page_list_for_each_safe(page
, temp
, plist
) {
954 LASSERT(plist
->pl_nr
> 0);
956 list_del_init(&page
->cp_batch
);
959 * cl_page_disown0 rather than usual cl_page_disown() is used,
960 * because pages are possibly in CPS_FREEING state already due
961 * to the call to cl_page_list_discard().
964 * XXX cl_page_disown0() will fail if page is not locked.
966 cl_page_disown0(env
, io
, page
);
967 lu_ref_del_at(&page
->cp_reference
, &page
->cp_queue_ref
, "queue",
969 cl_page_put(env
, page
);
972 EXPORT_SYMBOL(cl_page_list_disown
);
975 * Releases pages from queue.
977 void cl_page_list_fini(const struct lu_env
*env
, struct cl_page_list
*plist
)
979 struct cl_page
*page
;
980 struct cl_page
*temp
;
982 LINVRNT(plist
->pl_owner
== current
);
984 cl_page_list_for_each_safe(page
, temp
, plist
)
985 cl_page_list_del(env
, plist
, page
);
986 LASSERT(plist
->pl_nr
== 0);
988 EXPORT_SYMBOL(cl_page_list_fini
);
991 * Assumes all pages in a queue.
993 static void cl_page_list_assume(const struct lu_env
*env
,
994 struct cl_io
*io
, struct cl_page_list
*plist
)
996 struct cl_page
*page
;
998 LINVRNT(plist
->pl_owner
== current
);
1000 cl_page_list_for_each(page
, plist
)
1001 cl_page_assume(env
, io
, page
);
1005 * Discards all pages in a queue.
1007 static void cl_page_list_discard(const struct lu_env
*env
, struct cl_io
*io
,
1008 struct cl_page_list
*plist
)
1010 struct cl_page
*page
;
1012 LINVRNT(plist
->pl_owner
== current
);
1013 cl_page_list_for_each(page
, plist
)
1014 cl_page_discard(env
, io
, page
);
1018 * Initialize dual page queue.
1020 void cl_2queue_init(struct cl_2queue
*queue
)
1022 cl_page_list_init(&queue
->c2_qin
);
1023 cl_page_list_init(&queue
->c2_qout
);
1025 EXPORT_SYMBOL(cl_2queue_init
);
1028 * Disown pages in both lists of a 2-queue.
1030 void cl_2queue_disown(const struct lu_env
*env
,
1031 struct cl_io
*io
, struct cl_2queue
*queue
)
1033 cl_page_list_disown(env
, io
, &queue
->c2_qin
);
1034 cl_page_list_disown(env
, io
, &queue
->c2_qout
);
1036 EXPORT_SYMBOL(cl_2queue_disown
);
1039 * Discard (truncate) pages in both lists of a 2-queue.
1041 void cl_2queue_discard(const struct lu_env
*env
,
1042 struct cl_io
*io
, struct cl_2queue
*queue
)
1044 cl_page_list_discard(env
, io
, &queue
->c2_qin
);
1045 cl_page_list_discard(env
, io
, &queue
->c2_qout
);
1047 EXPORT_SYMBOL(cl_2queue_discard
);
1050 * Finalize both page lists of a 2-queue.
1052 void cl_2queue_fini(const struct lu_env
*env
, struct cl_2queue
*queue
)
1054 cl_page_list_fini(env
, &queue
->c2_qout
);
1055 cl_page_list_fini(env
, &queue
->c2_qin
);
1057 EXPORT_SYMBOL(cl_2queue_fini
);
1060 * Initialize a 2-queue to contain \a page in its incoming page list.
1062 void cl_2queue_init_page(struct cl_2queue
*queue
, struct cl_page
*page
)
1064 cl_2queue_init(queue
);
1066 * Add a page to the incoming page list of 2-queue.
1068 cl_page_list_add(&queue
->c2_qin
, page
);
1070 EXPORT_SYMBOL(cl_2queue_init_page
);
1073 * Returns top-level io.
1075 * \see cl_object_top()
1077 struct cl_io
*cl_io_top(struct cl_io
*io
)
1079 while (io
->ci_parent
)
1083 EXPORT_SYMBOL(cl_io_top
);
1086 * Adds request slice to the compound request.
1088 * This is called by cl_device_operations::cdo_req_init() methods to add a
1089 * per-layer state to the request. New state is added at the end of
1090 * cl_req::crq_layers list, that is, it is at the bottom of the stack.
1092 * \see cl_lock_slice_add(), cl_page_slice_add(), cl_io_slice_add()
1094 void cl_req_slice_add(struct cl_req
*req
, struct cl_req_slice
*slice
,
1095 struct cl_device
*dev
,
1096 const struct cl_req_operations
*ops
)
1098 list_add_tail(&slice
->crs_linkage
, &req
->crq_layers
);
1099 slice
->crs_dev
= dev
;
1100 slice
->crs_ops
= ops
;
1101 slice
->crs_req
= req
;
1103 EXPORT_SYMBOL(cl_req_slice_add
);
1105 static void cl_req_free(const struct lu_env
*env
, struct cl_req
*req
)
1109 LASSERT(list_empty(&req
->crq_pages
));
1110 LASSERT(req
->crq_nrpages
== 0);
1111 LINVRNT(list_empty(&req
->crq_layers
));
1112 LINVRNT(equi(req
->crq_nrobjs
> 0, req
->crq_o
));
1115 for (i
= 0; i
< req
->crq_nrobjs
; ++i
) {
1116 struct cl_object
*obj
= req
->crq_o
[i
].ro_obj
;
1119 lu_object_ref_del_at(&obj
->co_lu
,
1120 &req
->crq_o
[i
].ro_obj_ref
,
1122 cl_object_put(env
, obj
);
1130 static int cl_req_init(const struct lu_env
*env
, struct cl_req
*req
,
1131 struct cl_page
*page
)
1133 struct cl_device
*dev
;
1134 struct cl_page_slice
*slice
;
1138 list_for_each_entry(slice
, &page
->cp_layers
, cpl_linkage
) {
1139 dev
= lu2cl_dev(slice
->cpl_obj
->co_lu
.lo_dev
);
1140 if (dev
->cd_ops
->cdo_req_init
) {
1141 result
= dev
->cd_ops
->cdo_req_init(env
, dev
, req
);
1150 * Invokes per-request transfer completion call-backs
1151 * (cl_req_operations::cro_completion()) bottom-to-top.
1153 void cl_req_completion(const struct lu_env
*env
, struct cl_req
*req
, int rc
)
1155 struct cl_req_slice
*slice
;
1158 * for the lack of list_for_each_entry_reverse_safe()...
1160 while (!list_empty(&req
->crq_layers
)) {
1161 slice
= list_entry(req
->crq_layers
.prev
,
1162 struct cl_req_slice
, crs_linkage
);
1163 list_del_init(&slice
->crs_linkage
);
1164 if (slice
->crs_ops
->cro_completion
)
1165 slice
->crs_ops
->cro_completion(env
, slice
, rc
);
1167 cl_req_free(env
, req
);
1169 EXPORT_SYMBOL(cl_req_completion
);
1172 * Allocates new transfer request.
1174 struct cl_req
*cl_req_alloc(const struct lu_env
*env
, struct cl_page
*page
,
1175 enum cl_req_type crt
, int nr_objects
)
1179 LINVRNT(nr_objects
> 0);
1181 req
= kzalloc(sizeof(*req
), GFP_NOFS
);
1185 req
->crq_type
= crt
;
1186 INIT_LIST_HEAD(&req
->crq_pages
);
1187 INIT_LIST_HEAD(&req
->crq_layers
);
1189 req
->crq_o
= kcalloc(nr_objects
, sizeof(req
->crq_o
[0]),
1192 req
->crq_nrobjs
= nr_objects
;
1193 result
= cl_req_init(env
, req
, page
);
1198 cl_req_completion(env
, req
, result
);
1199 req
= ERR_PTR(result
);
1202 req
= ERR_PTR(-ENOMEM
);
1206 EXPORT_SYMBOL(cl_req_alloc
);
1209 * Adds a page to a request.
1211 void cl_req_page_add(const struct lu_env
*env
,
1212 struct cl_req
*req
, struct cl_page
*page
)
1214 struct cl_object
*obj
;
1215 struct cl_req_obj
*rqo
;
1218 LASSERT(list_empty(&page
->cp_flight
));
1219 LASSERT(!page
->cp_req
);
1221 CL_PAGE_DEBUG(D_PAGE
, env
, page
, "req %p, %d, %u\n",
1222 req
, req
->crq_type
, req
->crq_nrpages
);
1224 list_add_tail(&page
->cp_flight
, &req
->crq_pages
);
1227 obj
= cl_object_top(page
->cp_obj
);
1228 for (i
= 0, rqo
= req
->crq_o
; obj
!= rqo
->ro_obj
; ++i
, ++rqo
) {
1232 lu_object_ref_add_at(&obj
->co_lu
, &rqo
->ro_obj_ref
,
1237 LASSERT(i
< req
->crq_nrobjs
);
1239 EXPORT_SYMBOL(cl_req_page_add
);
1242 * Removes a page from a request.
1244 void cl_req_page_done(const struct lu_env
*env
, struct cl_page
*page
)
1246 struct cl_req
*req
= page
->cp_req
;
1248 LASSERT(!list_empty(&page
->cp_flight
));
1249 LASSERT(req
->crq_nrpages
> 0);
1251 list_del_init(&page
->cp_flight
);
1253 page
->cp_req
= NULL
;
1255 EXPORT_SYMBOL(cl_req_page_done
);
1258 * Notifies layers that request is about to depart by calling
1259 * cl_req_operations::cro_prep() top-to-bottom.
1261 int cl_req_prep(const struct lu_env
*env
, struct cl_req
*req
)
1265 const struct cl_req_slice
*slice
;
1268 * Check that the caller of cl_req_alloc() didn't lie about the number
1271 for (i
= 0; i
< req
->crq_nrobjs
; ++i
)
1272 LASSERT(req
->crq_o
[i
].ro_obj
);
1275 list_for_each_entry(slice
, &req
->crq_layers
, crs_linkage
) {
1276 if (slice
->crs_ops
->cro_prep
) {
1277 result
= slice
->crs_ops
->cro_prep(env
, slice
);
1284 EXPORT_SYMBOL(cl_req_prep
);
1287 * Fills in attributes that are passed to server together with transfer. Only
1288 * attributes from \a flags may be touched. This can be called multiple times
1289 * for the same request.
1291 void cl_req_attr_set(const struct lu_env
*env
, struct cl_req
*req
,
1292 struct cl_req_attr
*attr
, u64 flags
)
1294 const struct cl_req_slice
*slice
;
1295 struct cl_page
*page
;
1298 LASSERT(!list_empty(&req
->crq_pages
));
1300 /* Take any page to use as a model. */
1301 page
= list_entry(req
->crq_pages
.next
, struct cl_page
, cp_flight
);
1303 for (i
= 0; i
< req
->crq_nrobjs
; ++i
) {
1304 list_for_each_entry(slice
, &req
->crq_layers
, crs_linkage
) {
1305 const struct cl_page_slice
*scan
;
1306 const struct cl_object
*obj
;
1308 scan
= cl_page_at(page
,
1309 slice
->crs_dev
->cd_lu_dev
.ld_type
);
1310 obj
= scan
->cpl_obj
;
1311 if (slice
->crs_ops
->cro_attr_set
)
1312 slice
->crs_ops
->cro_attr_set(env
, slice
, obj
,
1317 EXPORT_SYMBOL(cl_req_attr_set
);
1319 /* cl_sync_io_callback assumes the caller must call cl_sync_io_wait() to
1320 * wait for the IO to finish.
1322 void cl_sync_io_end(const struct lu_env
*env
, struct cl_sync_io
*anchor
)
1324 wake_up_all(&anchor
->csi_waitq
);
1326 /* it's safe to nuke or reuse anchor now */
1327 atomic_set(&anchor
->csi_barrier
, 0);
1329 EXPORT_SYMBOL(cl_sync_io_end
);
1332 * Initialize synchronous io wait anchor
1334 void cl_sync_io_init(struct cl_sync_io
*anchor
, int nr
,
1335 void (*end
)(const struct lu_env
*, struct cl_sync_io
*))
1337 memset(anchor
, 0, sizeof(*anchor
));
1338 init_waitqueue_head(&anchor
->csi_waitq
);
1339 atomic_set(&anchor
->csi_sync_nr
, nr
);
1340 atomic_set(&anchor
->csi_barrier
, nr
> 0);
1341 anchor
->csi_sync_rc
= 0;
1342 anchor
->csi_end_io
= end
;
1345 EXPORT_SYMBOL(cl_sync_io_init
);
1348 * Wait until all IO completes. Transfer completion routine has to call
1349 * cl_sync_io_note() for every entity.
1351 int cl_sync_io_wait(const struct lu_env
*env
, struct cl_sync_io
*anchor
,
1354 struct l_wait_info lwi
= LWI_TIMEOUT_INTR(cfs_time_seconds(timeout
),
1358 LASSERT(timeout
>= 0);
1360 rc
= l_wait_event(anchor
->csi_waitq
,
1361 atomic_read(&anchor
->csi_sync_nr
) == 0,
1364 CERROR("IO failed: %d, still wait for %d remaining entries\n",
1365 rc
, atomic_read(&anchor
->csi_sync_nr
));
1367 lwi
= (struct l_wait_info
) { 0 };
1368 (void)l_wait_event(anchor
->csi_waitq
,
1369 atomic_read(&anchor
->csi_sync_nr
) == 0,
1372 rc
= anchor
->csi_sync_rc
;
1374 LASSERT(atomic_read(&anchor
->csi_sync_nr
) == 0);
1376 /* wait until cl_sync_io_note() has done wakeup */
1377 while (unlikely(atomic_read(&anchor
->csi_barrier
) != 0)) {
1383 EXPORT_SYMBOL(cl_sync_io_wait
);
1386 * Indicate that transfer of a single page completed.
1388 void cl_sync_io_note(const struct lu_env
*env
, struct cl_sync_io
*anchor
,
1391 if (anchor
->csi_sync_rc
== 0 && ioret
< 0)
1392 anchor
->csi_sync_rc
= ioret
;
1394 * Synchronous IO done without releasing page lock (e.g., as a part of
1395 * ->{prepare,commit}_write(). Completion is used to signal the end of
1398 LASSERT(atomic_read(&anchor
->csi_sync_nr
) > 0);
1399 if (atomic_dec_and_test(&anchor
->csi_sync_nr
)) {
1400 LASSERT(anchor
->csi_end_io
);
1401 anchor
->csi_end_io(env
, anchor
);
1402 /* Can't access anchor any more */
1405 EXPORT_SYMBOL(cl_sync_io_note
);
This page took 0.073616 seconds and 5 git commands to generate.