4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 * Lustre Lite I/O page cache routines shared by different kernel revs
41 #include <linux/kernel.h>
43 #include <linux/string.h>
44 #include <linux/stat.h>
45 #include <linux/errno.h>
46 #include <linux/unistd.h>
47 #include <linux/writeback.h>
48 #include <linux/uaccess.h>
51 #include <linux/pagemap.h>
52 /* current_is_kswapd() */
53 #include <linux/swap.h>
55 #define DEBUG_SUBSYSTEM S_LLITE
57 #include "../include/lustre_lite.h"
58 #include "../include/obd_cksum.h"
59 #include "llite_internal.h"
60 #include "../include/linux/lustre_compat25.h"
63 * Finalizes cl-data before exiting typical address_space operation. Dual to
66 static void ll_cl_fini(struct ll_cl_context
*lcc
)
68 struct lu_env
*env
= lcc
->lcc_env
;
69 struct cl_io
*io
= lcc
->lcc_io
;
70 struct cl_page
*page
= lcc
->lcc_page
;
72 LASSERT(lcc
->lcc_cookie
== current
);
76 lu_ref_del(&page
->cp_reference
, "cl_io", io
);
77 cl_page_put(env
, page
);
80 cl_env_put(env
, &lcc
->lcc_refcheck
);
84 * Initializes common cl-data at the typical address_space operation entry
87 static struct ll_cl_context
*ll_cl_init(struct file
*file
,
88 struct page
*vmpage
, int create
)
90 struct ll_cl_context
*lcc
;
93 struct cl_object
*clob
;
99 clob
= ll_i2info(vmpage
->mapping
->host
)->lli_clob
;
102 env
= cl_env_get(&refcheck
);
104 return ERR_CAST(env
);
106 lcc
= &vvp_env_info(env
)->vti_io_ctx
;
107 memset(lcc
, 0, sizeof(*lcc
));
109 lcc
->lcc_refcheck
= refcheck
;
110 lcc
->lcc_cookie
= current
;
112 cio
= ccc_env_io(env
);
113 io
= cio
->cui_cl
.cis_io
;
115 struct inode
*inode
= vmpage
->mapping
->host
;
118 if (inode_trylock(inode
)) {
119 inode_unlock((inode
));
121 /* this is too bad. Someone is trying to write the
122 * page w/o holding inode mutex. This means we can
123 * add dirty pages into cache during truncate
125 CERROR("Proc %s is dirtying page w/o inode lock, this will break truncate\n",
129 return ERR_PTR(-EIO
);
133 * Loop-back driver calls ->prepare_write().
134 * methods directly, bypassing file system ->write() operation,
135 * so cl_io has to be created here.
137 io
= ccc_env_thread_io(env
);
138 ll_io_init(io
, file
, 1);
140 /* No lock at all for this kind of IO - we can't do it because
141 * we have held page lock, it would cause deadlock.
142 * XXX: This causes poor performance to loop device - One page
144 * In order to get better performance, users should use
145 * lloop driver instead.
147 io
->ci_lockreq
= CILR_NEVER
;
149 pos
= vmpage
->index
<< PAGE_SHIFT
;
151 /* Create a temp IO to serve write. */
152 result
= cl_io_rw_init(env
, io
, CIT_WRITE
, pos
, PAGE_SIZE
);
154 cio
->cui_fd
= LUSTRE_FPRIVATE(file
);
155 cio
->cui_iter
= NULL
;
156 result
= cl_io_iter_init(env
, io
);
158 result
= cl_io_lock(env
, io
);
160 result
= cl_io_start(env
, io
);
163 result
= io
->ci_result
;
170 struct cl_page
*page
;
172 LASSERT(io
->ci_state
== CIS_IO_GOING
);
173 LASSERT(cio
->cui_fd
== LUSTRE_FPRIVATE(file
));
174 page
= cl_page_find(env
, clob
, vmpage
->index
, vmpage
,
177 lcc
->lcc_page
= page
;
178 lu_ref_add(&page
->cp_reference
, "cl_io", io
);
181 result
= PTR_ERR(page
);
185 lcc
= ERR_PTR(result
);
188 CDEBUG(D_VFSTRACE
, "%lu@"DFID
" -> %d %p %p\n",
189 vmpage
->index
, PFID(lu_object_fid(&clob
->co_lu
)), result
,
194 static struct ll_cl_context
*ll_cl_get(void)
196 struct ll_cl_context
*lcc
;
200 env
= cl_env_get(&refcheck
);
201 LASSERT(!IS_ERR(env
));
202 lcc
= &vvp_env_info(env
)->vti_io_ctx
;
203 LASSERT(env
== lcc
->lcc_env
);
204 LASSERT(current
== lcc
->lcc_cookie
);
205 cl_env_put(env
, &refcheck
);
207 /* env has got in ll_cl_init, so it is still usable. */
212 * ->prepare_write() address space operation called by generic_file_write()
213 * for every page during write.
215 int ll_prepare_write(struct file
*file
, struct page
*vmpage
, unsigned from
,
218 struct ll_cl_context
*lcc
;
221 lcc
= ll_cl_init(file
, vmpage
, 1);
223 struct lu_env
*env
= lcc
->lcc_env
;
224 struct cl_io
*io
= lcc
->lcc_io
;
225 struct cl_page
*page
= lcc
->lcc_page
;
227 cl_page_assume(env
, io
, page
);
229 result
= cl_io_prepare_write(env
, io
, page
, from
, to
);
232 * Add a reference, so that page is not evicted from
233 * the cache until ->commit_write() is called.
236 lu_ref_add(&page
->cp_reference
, "prepare_write",
239 cl_page_unassume(env
, io
, page
);
242 /* returning 0 in prepare assumes commit must be called
246 result
= PTR_ERR(lcc
);
251 int ll_commit_write(struct file
*file
, struct page
*vmpage
, unsigned from
,
254 struct ll_cl_context
*lcc
;
257 struct cl_page
*page
;
262 page
= lcc
->lcc_page
;
265 LASSERT(cl_page_is_owned(page
, io
));
267 if (from
!= to
) /* handle short write case. */
268 result
= cl_io_commit_write(env
, io
, page
, from
, to
);
269 if (cl_page_is_owned(page
, io
))
270 cl_page_unassume(env
, io
, page
);
273 * Release reference acquired by ll_prepare_write().
275 lu_ref_del(&page
->cp_reference
, "prepare_write", current
);
276 cl_page_put(env
, page
);
281 static void ll_ra_stats_inc_sbi(struct ll_sb_info
*sbi
, enum ra_stat which
);
284 * Get readahead pages from the filesystem readahead pool of the client for a
287 * /param sbi superblock for filesystem readahead state ll_ra_info
288 * /param ria per-thread readahead state
289 * /param pages number of pages requested for readahead for the thread.
291 * WARNING: This algorithm is used to reduce contention on sbi->ll_lock.
292 * It should work well if the ra_max_pages is much greater than the single
293 * file's read-ahead window, and not too many threads contending for
294 * these readahead pages.
296 * TODO: There may be a 'global sync problem' if many threads are trying
297 * to get an ra budget that is larger than the remaining readahead pages
298 * and reach here at exactly the same time. They will compute /a ret to
299 * consume the remaining pages, but will fail at atomic_add_return() and
300 * get a zero ra window, although there is still ra space remaining. - Jay
302 static unsigned long ll_ra_count_get(struct ll_sb_info
*sbi
,
303 struct ra_io_arg
*ria
,
306 struct ll_ra_info
*ra
= &sbi
->ll_ra_info
;
309 /* If read-ahead pages left are less than 1M, do not do read-ahead,
310 * otherwise it will form small read RPC(< 1M), which hurt server
313 ret
= min(ra
->ra_max_pages
- atomic_read(&ra
->ra_cur_pages
), pages
);
314 if (ret
< 0 || ret
< min_t(long, PTLRPC_MAX_BRW_PAGES
, pages
)) {
319 /* If the non-strided (ria_pages == 0) readahead window
320 * (ria_start + ret) has grown across an RPC boundary, then trim
321 * readahead size by the amount beyond the RPC so it ends on an
322 * RPC boundary. If the readahead window is already ending on
323 * an RPC boundary (beyond_rpc == 0), or smaller than a full
324 * RPC (beyond_rpc < ret) the readahead size is unchanged.
325 * The (beyond_rpc != 0) check is skipped since the conditional
326 * branch is more expensive than subtracting zero from the result.
328 * Strided read is left unaligned to avoid small fragments beyond
329 * the RPC boundary from needing an extra read RPC.
331 if (ria
->ria_pages
== 0) {
332 long beyond_rpc
= (ria
->ria_start
+ ret
) % PTLRPC_MAX_BRW_PAGES
;
334 if (/* beyond_rpc != 0 && */ beyond_rpc
< ret
)
338 if (atomic_add_return(ret
, &ra
->ra_cur_pages
) > ra
->ra_max_pages
) {
339 atomic_sub(ret
, &ra
->ra_cur_pages
);
347 void ll_ra_count_put(struct ll_sb_info
*sbi
, unsigned long len
)
349 struct ll_ra_info
*ra
= &sbi
->ll_ra_info
;
351 atomic_sub(len
, &ra
->ra_cur_pages
);
354 static void ll_ra_stats_inc_sbi(struct ll_sb_info
*sbi
, enum ra_stat which
)
356 LASSERTF(which
>= 0 && which
< _NR_RA_STAT
, "which: %u\n", which
);
357 lprocfs_counter_incr(sbi
->ll_ra_stats
, which
);
360 void ll_ra_stats_inc(struct address_space
*mapping
, enum ra_stat which
)
362 struct ll_sb_info
*sbi
= ll_i2sbi(mapping
->host
);
364 ll_ra_stats_inc_sbi(sbi
, which
);
367 #define RAS_CDEBUG(ras) \
369 "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu" \
370 "csr %lu sf %lu sp %lu sl %lu\n", \
371 ras->ras_last_readpage, ras->ras_consecutive_requests, \
372 ras->ras_consecutive_pages, ras->ras_window_start, \
373 ras->ras_window_len, ras->ras_next_readahead, \
374 ras->ras_requests, ras->ras_request_index, \
375 ras->ras_consecutive_stride_requests, ras->ras_stride_offset, \
376 ras->ras_stride_pages, ras->ras_stride_length)
378 static int index_in_window(unsigned long index
, unsigned long point
,
379 unsigned long before
, unsigned long after
)
381 unsigned long start
= point
- before
, end
= point
+ after
;
388 return start
<= index
&& index
<= end
;
391 static struct ll_readahead_state
*ll_ras_get(struct file
*f
)
393 struct ll_file_data
*fd
;
395 fd
= LUSTRE_FPRIVATE(f
);
399 void ll_ra_read_in(struct file
*f
, struct ll_ra_read
*rar
)
401 struct ll_readahead_state
*ras
;
405 spin_lock(&ras
->ras_lock
);
407 ras
->ras_request_index
= 0;
408 ras
->ras_consecutive_requests
++;
409 rar
->lrr_reader
= current
;
411 list_add(&rar
->lrr_linkage
, &ras
->ras_read_beads
);
412 spin_unlock(&ras
->ras_lock
);
415 void ll_ra_read_ex(struct file
*f
, struct ll_ra_read
*rar
)
417 struct ll_readahead_state
*ras
;
421 spin_lock(&ras
->ras_lock
);
422 list_del_init(&rar
->lrr_linkage
);
423 spin_unlock(&ras
->ras_lock
);
426 static int cl_read_ahead_page(const struct lu_env
*env
, struct cl_io
*io
,
427 struct cl_page_list
*queue
, struct cl_page
*page
,
434 cl_page_assume(env
, io
, page
);
435 lu_ref_add(&page
->cp_reference
, "ra", current
);
436 cp
= cl2ccc_page(cl_page_at(page
, &vvp_device_type
));
437 if (!cp
->cpg_defer_uptodate
&& !PageUptodate(vmpage
)) {
438 rc
= cl_page_is_under_lock(env
, io
, page
);
440 cp
->cpg_defer_uptodate
= 1;
442 cl_page_list_add(queue
, page
);
445 cl_page_delete(env
, page
);
449 /* skip completed pages */
450 cl_page_unassume(env
, io
, page
);
452 lu_ref_del(&page
->cp_reference
, "ra", current
);
453 cl_page_put(env
, page
);
458 * Initiates read-ahead of a page with given index.
460 * \retval +ve: page was added to \a queue.
462 * \retval -ENOLCK: there is no extent lock for this part of a file, stop
465 * \retval -ve, 0: page wasn't added to \a queue for other reason.
467 static int ll_read_ahead_page(const struct lu_env
*env
, struct cl_io
*io
,
468 struct cl_page_list
*queue
,
469 pgoff_t index
, struct address_space
*mapping
)
472 struct cl_object
*clob
= ll_i2info(mapping
->host
)->lli_clob
;
473 struct cl_page
*page
;
474 enum ra_stat which
= _NR_RA_STAT
; /* keep gcc happy */
476 const char *msg
= NULL
;
478 vmpage
= grab_cache_page_nowait(mapping
, index
);
480 /* Check if vmpage was truncated or reclaimed */
481 if (vmpage
->mapping
== mapping
) {
482 page
= cl_page_find(env
, clob
, vmpage
->index
,
483 vmpage
, CPT_CACHEABLE
);
485 rc
= cl_read_ahead_page(env
, io
, queue
,
488 which
= RA_STAT_FAILED_MATCH
;
489 msg
= "lock match failed";
492 which
= RA_STAT_FAILED_GRAB_PAGE
;
493 msg
= "cl_page_find failed";
496 which
= RA_STAT_WRONG_GRAB_PAGE
;
497 msg
= "g_c_p_n returned invalid page";
503 which
= RA_STAT_FAILED_GRAB_PAGE
;
504 msg
= "g_c_p_n failed";
507 ll_ra_stats_inc(mapping
, which
);
508 CDEBUG(D_READA
, "%s\n", msg
);
513 #define RIA_DEBUG(ria) \
514 CDEBUG(D_READA, "rs %lu re %lu ro %lu rl %lu rp %lu\n", \
515 ria->ria_start, ria->ria_end, ria->ria_stoff, ria->ria_length,\
518 /* Limit this to the blocksize instead of PTLRPC_BRW_MAX_SIZE, since we don't
519 * know what the actual RPC size is. If this needs to change, it makes more
520 * sense to tune the i_blkbits value for the file based on the OSTs it is
521 * striped over, rather than having a constant value for all files here.
524 /* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_SHIFT)).
525 * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled
526 * by default, this should be adjusted corresponding with max_read_ahead_mb
527 * and max_read_ahead_per_file_mb otherwise the readahead budget can be used
528 * up quickly which will affect read performance significantly. See LU-2816
530 #define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_SHIFT)
532 static inline int stride_io_mode(struct ll_readahead_state
*ras
)
534 return ras
->ras_consecutive_stride_requests
> 1;
537 /* The function calculates how much pages will be read in
538 * [off, off + length], in such stride IO area,
539 * stride_offset = st_off, stride_length = st_len,
540 * stride_pages = st_pgs
542 * |------------------|*****|------------------|*****|------------|*****|....
545 * |----- st_len -----|
547 * How many pages it should read in such pattern
548 * |-------------------------------------------------------------|
550 * |<------ length ------->|
552 * = |<----->| + |-------------------------------------| + |---|
553 * start_left st_pgs * i end_left
556 stride_pg_count(pgoff_t st_off
, unsigned long st_len
, unsigned long st_pgs
,
557 unsigned long off
, unsigned long length
)
559 __u64 start
= off
> st_off
? off
- st_off
: 0;
560 __u64 end
= off
+ length
> st_off
? off
+ length
- st_off
: 0;
561 unsigned long start_left
= 0;
562 unsigned long end_left
= 0;
563 unsigned long pg_count
;
565 if (st_len
== 0 || length
== 0 || end
== 0)
568 start_left
= do_div(start
, st_len
);
569 if (start_left
< st_pgs
)
570 start_left
= st_pgs
- start_left
;
574 end_left
= do_div(end
, st_len
);
575 if (end_left
> st_pgs
)
578 CDEBUG(D_READA
, "start %llu, end %llu start_left %lu end_left %lu\n",
579 start
, end
, start_left
, end_left
);
582 pg_count
= end_left
- (st_pgs
- start_left
);
584 pg_count
= start_left
+ st_pgs
* (end
- start
- 1) + end_left
;
586 CDEBUG(D_READA
, "st_off %lu, st_len %lu st_pgs %lu off %lu length %lu pgcount %lu\n",
587 st_off
, st_len
, st_pgs
, off
, length
, pg_count
);
592 static int ria_page_count(struct ra_io_arg
*ria
)
594 __u64 length
= ria
->ria_end
>= ria
->ria_start
?
595 ria
->ria_end
- ria
->ria_start
+ 1 : 0;
597 return stride_pg_count(ria
->ria_stoff
, ria
->ria_length
,
598 ria
->ria_pages
, ria
->ria_start
,
602 /*Check whether the index is in the defined ra-window */
603 static int ras_inside_ra_window(unsigned long idx
, struct ra_io_arg
*ria
)
605 /* If ria_length == ria_pages, it means non-stride I/O mode,
606 * idx should always inside read-ahead window in this case
607 * For stride I/O mode, just check whether the idx is inside
610 return ria
->ria_length
== 0 || ria
->ria_length
== ria
->ria_pages
||
611 (idx
>= ria
->ria_stoff
&& (idx
- ria
->ria_stoff
) %
612 ria
->ria_length
< ria
->ria_pages
);
615 static int ll_read_ahead_pages(const struct lu_env
*env
,
616 struct cl_io
*io
, struct cl_page_list
*queue
,
617 struct ra_io_arg
*ria
,
618 unsigned long *reserved_pages
,
619 struct address_space
*mapping
,
620 unsigned long *ra_end
)
622 int rc
, count
= 0, stride_ria
;
623 unsigned long page_idx
;
628 stride_ria
= ria
->ria_length
> ria
->ria_pages
&& ria
->ria_pages
> 0;
629 for (page_idx
= ria
->ria_start
;
630 page_idx
<= ria
->ria_end
&& *reserved_pages
> 0; page_idx
++) {
631 if (ras_inside_ra_window(page_idx
, ria
)) {
632 /* If the page is inside the read-ahead window*/
633 rc
= ll_read_ahead_page(env
, io
, queue
,
638 } else if (rc
== -ENOLCK
)
640 } else if (stride_ria
) {
641 /* If it is not in the read-ahead window, and it is
642 * read-ahead mode, then check whether it should skip
646 /* FIXME: This assertion only is valid when it is for
647 * forward read-ahead, it will be fixed when backward
648 * read-ahead is implemented
650 LASSERTF(page_idx
> ria
->ria_stoff
, "Invalid page_idx %lu rs %lu re %lu ro %lu rl %lu rp %lu\n",
652 ria
->ria_start
, ria
->ria_end
, ria
->ria_stoff
,
653 ria
->ria_length
, ria
->ria_pages
);
654 offset
= page_idx
- ria
->ria_stoff
;
655 offset
= offset
% (ria
->ria_length
);
656 if (offset
> ria
->ria_pages
) {
657 page_idx
+= ria
->ria_length
- offset
;
658 CDEBUG(D_READA
, "i %lu skip %lu\n", page_idx
,
659 ria
->ria_length
- offset
);
668 int ll_readahead(const struct lu_env
*env
, struct cl_io
*io
,
669 struct ll_readahead_state
*ras
, struct address_space
*mapping
,
670 struct cl_page_list
*queue
, int flags
)
672 struct vvp_io
*vio
= vvp_env_io(env
);
673 struct vvp_thread_info
*vti
= vvp_env_info(env
);
674 struct cl_attr
*attr
= ccc_env_thread_attr(env
);
675 unsigned long start
= 0, end
= 0, reserved
;
676 unsigned long ra_end
, len
;
678 struct ll_ra_read
*bead
;
679 struct ra_io_arg
*ria
= &vti
->vti_ria
;
680 struct ll_inode_info
*lli
;
681 struct cl_object
*clob
;
685 inode
= mapping
->host
;
686 lli
= ll_i2info(inode
);
687 clob
= lli
->lli_clob
;
689 memset(ria
, 0, sizeof(*ria
));
691 cl_object_attr_lock(clob
);
692 ret
= cl_object_attr_get(env
, clob
, attr
);
693 cl_object_attr_unlock(clob
);
699 ll_ra_stats_inc(mapping
, RA_STAT_ZERO_LEN
);
703 spin_lock(&ras
->ras_lock
);
704 if (vio
->cui_ra_window_set
)
705 bead
= &vio
->cui_bead
;
709 /* Enlarge the RA window to encompass the full read */
710 if (bead
&& ras
->ras_window_start
+ ras
->ras_window_len
<
711 bead
->lrr_start
+ bead
->lrr_count
) {
712 ras
->ras_window_len
= bead
->lrr_start
+ bead
->lrr_count
-
713 ras
->ras_window_start
;
715 /* Reserve a part of the read-ahead window that we'll be issuing */
716 if (ras
->ras_window_len
) {
717 start
= ras
->ras_next_readahead
;
718 end
= ras
->ras_window_start
+ ras
->ras_window_len
- 1;
721 unsigned long rpc_boundary
;
723 * Align RA window to an optimal boundary.
725 * XXX This would be better to align to cl_max_pages_per_rpc
726 * instead of PTLRPC_MAX_BRW_PAGES, because the RPC size may
727 * be aligned to the RAID stripe size in the future and that
728 * is more important than the RPC size.
730 /* Note: we only trim the RPC, instead of extending the RPC
731 * to the boundary, so to avoid reading too much pages during
734 rpc_boundary
= (end
+ 1) & (~(PTLRPC_MAX_BRW_PAGES
- 1));
735 if (rpc_boundary
> 0)
738 if (rpc_boundary
> start
)
741 /* Truncate RA window to end of file */
742 end
= min(end
, (unsigned long)((kms
- 1) >> PAGE_SHIFT
));
744 ras
->ras_next_readahead
= max(end
, end
+ 1);
747 ria
->ria_start
= start
;
749 /* If stride I/O mode is detected, get stride window*/
750 if (stride_io_mode(ras
)) {
751 ria
->ria_stoff
= ras
->ras_stride_offset
;
752 ria
->ria_length
= ras
->ras_stride_length
;
753 ria
->ria_pages
= ras
->ras_stride_pages
;
755 spin_unlock(&ras
->ras_lock
);
758 ll_ra_stats_inc(mapping
, RA_STAT_ZERO_WINDOW
);
761 len
= ria_page_count(ria
);
765 reserved
= ll_ra_count_get(ll_i2sbi(inode
), ria
, len
);
767 ll_ra_stats_inc(mapping
, RA_STAT_MAX_IN_FLIGHT
);
769 CDEBUG(D_READA
, "reserved page %lu ra_cur %d ra_max %lu\n", reserved
,
770 atomic_read(&ll_i2sbi(inode
)->ll_ra_info
.ra_cur_pages
),
771 ll_i2sbi(inode
)->ll_ra_info
.ra_max_pages
);
773 ret
= ll_read_ahead_pages(env
, io
, queue
,
774 ria
, &reserved
, mapping
, &ra_end
);
777 ll_ra_count_put(ll_i2sbi(inode
), reserved
);
779 if (ra_end
== end
+ 1 && ra_end
== (kms
>> PAGE_SHIFT
))
780 ll_ra_stats_inc(mapping
, RA_STAT_EOF
);
782 /* if we didn't get to the end of the region we reserved from
783 * the ras we need to go back and update the ras so that the
784 * next read-ahead tries from where we left off. we only do so
785 * if the region we failed to issue read-ahead on is still ahead
786 * of the app and behind the next index to start read-ahead from
788 CDEBUG(D_READA
, "ra_end %lu end %lu stride end %lu\n",
789 ra_end
, end
, ria
->ria_end
);
791 if (ra_end
!= end
+ 1) {
792 spin_lock(&ras
->ras_lock
);
793 if (ra_end
< ras
->ras_next_readahead
&&
794 index_in_window(ra_end
, ras
->ras_window_start
, 0,
795 ras
->ras_window_len
)) {
796 ras
->ras_next_readahead
= ra_end
;
799 spin_unlock(&ras
->ras_lock
);
805 static void ras_set_start(struct inode
*inode
, struct ll_readahead_state
*ras
,
808 ras
->ras_window_start
= index
& (~(RAS_INCREASE_STEP(inode
) - 1));
811 /* called with the ras_lock held or from places where it doesn't matter */
812 static void ras_reset(struct inode
*inode
, struct ll_readahead_state
*ras
,
815 ras
->ras_last_readpage
= index
;
816 ras
->ras_consecutive_requests
= 0;
817 ras
->ras_consecutive_pages
= 0;
818 ras
->ras_window_len
= 0;
819 ras_set_start(inode
, ras
, index
);
820 ras
->ras_next_readahead
= max(ras
->ras_window_start
, index
);
825 /* called with the ras_lock held or from places where it doesn't matter */
826 static void ras_stride_reset(struct ll_readahead_state
*ras
)
828 ras
->ras_consecutive_stride_requests
= 0;
829 ras
->ras_stride_length
= 0;
830 ras
->ras_stride_pages
= 0;
834 void ll_readahead_init(struct inode
*inode
, struct ll_readahead_state
*ras
)
836 spin_lock_init(&ras
->ras_lock
);
837 ras_reset(inode
, ras
, 0);
838 ras
->ras_requests
= 0;
839 INIT_LIST_HEAD(&ras
->ras_read_beads
);
843 * Check whether the read request is in the stride window.
844 * If it is in the stride window, return 1, otherwise return 0.
846 static int index_in_stride_window(struct ll_readahead_state
*ras
,
849 unsigned long stride_gap
;
851 if (ras
->ras_stride_length
== 0 || ras
->ras_stride_pages
== 0 ||
852 ras
->ras_stride_pages
== ras
->ras_stride_length
)
855 stride_gap
= index
- ras
->ras_last_readpage
- 1;
857 /* If it is contiguous read */
859 return ras
->ras_consecutive_pages
+ 1 <= ras
->ras_stride_pages
;
861 /* Otherwise check the stride by itself */
862 return (ras
->ras_stride_length
- ras
->ras_stride_pages
) == stride_gap
&&
863 ras
->ras_consecutive_pages
== ras
->ras_stride_pages
;
866 static void ras_update_stride_detector(struct ll_readahead_state
*ras
,
869 unsigned long stride_gap
= index
- ras
->ras_last_readpage
- 1;
871 if (!stride_io_mode(ras
) && (stride_gap
!= 0 ||
872 ras
->ras_consecutive_stride_requests
== 0)) {
873 ras
->ras_stride_pages
= ras
->ras_consecutive_pages
;
874 ras
->ras_stride_length
= stride_gap
+ras
->ras_consecutive_pages
;
876 LASSERT(ras
->ras_request_index
== 0);
877 LASSERT(ras
->ras_consecutive_stride_requests
== 0);
879 if (index
<= ras
->ras_last_readpage
) {
880 /*Reset stride window for forward read*/
881 ras_stride_reset(ras
);
885 ras
->ras_stride_pages
= ras
->ras_consecutive_pages
;
886 ras
->ras_stride_length
= stride_gap
+ras
->ras_consecutive_pages
;
892 /* Stride Read-ahead window will be increased inc_len according to
895 static void ras_stride_increase_window(struct ll_readahead_state
*ras
,
896 struct ll_ra_info
*ra
,
897 unsigned long inc_len
)
899 unsigned long left
, step
, window_len
;
900 unsigned long stride_len
;
902 LASSERT(ras
->ras_stride_length
> 0);
903 LASSERTF(ras
->ras_window_start
+ ras
->ras_window_len
904 >= ras
->ras_stride_offset
, "window_start %lu, window_len %lu stride_offset %lu\n",
905 ras
->ras_window_start
,
906 ras
->ras_window_len
, ras
->ras_stride_offset
);
908 stride_len
= ras
->ras_window_start
+ ras
->ras_window_len
-
909 ras
->ras_stride_offset
;
911 left
= stride_len
% ras
->ras_stride_length
;
912 window_len
= ras
->ras_window_len
- left
;
914 if (left
< ras
->ras_stride_pages
)
917 left
= ras
->ras_stride_pages
+ inc_len
;
919 LASSERT(ras
->ras_stride_pages
!= 0);
921 step
= left
/ ras
->ras_stride_pages
;
922 left
%= ras
->ras_stride_pages
;
924 window_len
+= step
* ras
->ras_stride_length
+ left
;
926 if (stride_pg_count(ras
->ras_stride_offset
, ras
->ras_stride_length
,
927 ras
->ras_stride_pages
, ras
->ras_stride_offset
,
928 window_len
) <= ra
->ra_max_pages_per_file
)
929 ras
->ras_window_len
= window_len
;
934 static void ras_increase_window(struct inode
*inode
,
935 struct ll_readahead_state
*ras
,
936 struct ll_ra_info
*ra
)
938 /* The stretch of ra-window should be aligned with max rpc_size
939 * but current clio architecture does not support retrieve such
940 * information from lower layer. FIXME later
942 if (stride_io_mode(ras
))
943 ras_stride_increase_window(ras
, ra
, RAS_INCREASE_STEP(inode
));
945 ras
->ras_window_len
= min(ras
->ras_window_len
+
946 RAS_INCREASE_STEP(inode
),
947 ra
->ra_max_pages_per_file
);
950 void ras_update(struct ll_sb_info
*sbi
, struct inode
*inode
,
951 struct ll_readahead_state
*ras
, unsigned long index
,
954 struct ll_ra_info
*ra
= &sbi
->ll_ra_info
;
955 int zero
= 0, stride_detect
= 0, ra_miss
= 0;
957 spin_lock(&ras
->ras_lock
);
959 ll_ra_stats_inc_sbi(sbi
, hit
? RA_STAT_HIT
: RA_STAT_MISS
);
961 /* reset the read-ahead window in two cases. First when the app seeks
962 * or reads to some other part of the file. Secondly if we get a
963 * read-ahead miss that we think we've previously issued. This can
964 * be a symptom of there being so many read-ahead pages that the VM is
965 * reclaiming it before we get to it.
967 if (!index_in_window(index
, ras
->ras_last_readpage
, 8, 8)) {
969 ll_ra_stats_inc_sbi(sbi
, RA_STAT_DISTANT_READPAGE
);
970 } else if (!hit
&& ras
->ras_window_len
&&
971 index
< ras
->ras_next_readahead
&&
972 index_in_window(index
, ras
->ras_window_start
, 0,
973 ras
->ras_window_len
)) {
975 ll_ra_stats_inc_sbi(sbi
, RA_STAT_MISS_IN_WINDOW
);
978 /* On the second access to a file smaller than the tunable
979 * ra_max_read_ahead_whole_pages trigger RA on all pages in the
980 * file up to ra_max_pages_per_file. This is simply a best effort
981 * and only occurs once per open file. Normal RA behavior is reverted
982 * to for subsequent IO. The mmap case does not increment
983 * ras_requests and thus can never trigger this behavior.
985 if (ras
->ras_requests
== 2 && !ras
->ras_request_index
) {
988 kms_pages
= (i_size_read(inode
) + PAGE_SIZE
- 1) >>
991 CDEBUG(D_READA
, "kmsp %llu mwp %lu mp %lu\n", kms_pages
,
992 ra
->ra_max_read_ahead_whole_pages
, ra
->ra_max_pages_per_file
);
995 kms_pages
<= ra
->ra_max_read_ahead_whole_pages
) {
996 ras
->ras_window_start
= 0;
997 ras
->ras_last_readpage
= 0;
998 ras
->ras_next_readahead
= 0;
999 ras
->ras_window_len
= min(ra
->ra_max_pages_per_file
,
1000 ra
->ra_max_read_ahead_whole_pages
);
1005 /* check whether it is in stride I/O mode*/
1006 if (!index_in_stride_window(ras
, index
)) {
1007 if (ras
->ras_consecutive_stride_requests
== 0 &&
1008 ras
->ras_request_index
== 0) {
1009 ras_update_stride_detector(ras
, index
);
1010 ras
->ras_consecutive_stride_requests
++;
1012 ras_stride_reset(ras
);
1014 ras_reset(inode
, ras
, index
);
1015 ras
->ras_consecutive_pages
++;
1018 ras
->ras_consecutive_pages
= 0;
1019 ras
->ras_consecutive_requests
= 0;
1020 if (++ras
->ras_consecutive_stride_requests
> 1)
1026 if (index_in_stride_window(ras
, index
) &&
1027 stride_io_mode(ras
)) {
1028 /*If stride-RA hit cache miss, the stride dector
1029 *will not be reset to avoid the overhead of
1030 *redetecting read-ahead mode
1032 if (index
!= ras
->ras_last_readpage
+ 1)
1033 ras
->ras_consecutive_pages
= 0;
1034 ras_reset(inode
, ras
, index
);
1037 /* Reset both stride window and normal RA
1040 ras_reset(inode
, ras
, index
);
1041 ras
->ras_consecutive_pages
++;
1042 ras_stride_reset(ras
);
1045 } else if (stride_io_mode(ras
)) {
1046 /* If this is contiguous read but in stride I/O mode
1047 * currently, check whether stride step still is valid,
1048 * if invalid, it will reset the stride ra window
1050 if (!index_in_stride_window(ras
, index
)) {
1051 /* Shrink stride read-ahead window to be zero */
1052 ras_stride_reset(ras
);
1053 ras
->ras_window_len
= 0;
1054 ras
->ras_next_readahead
= index
;
1058 ras
->ras_consecutive_pages
++;
1059 ras
->ras_last_readpage
= index
;
1060 ras_set_start(inode
, ras
, index
);
1062 if (stride_io_mode(ras
))
1063 /* Since stride readahead is sensitive to the offset
1064 * of read-ahead, so we use original offset here,
1065 * instead of ras_window_start, which is RPC aligned
1067 ras
->ras_next_readahead
= max(index
, ras
->ras_next_readahead
);
1069 ras
->ras_next_readahead
= max(ras
->ras_window_start
,
1070 ras
->ras_next_readahead
);
1073 /* Trigger RA in the mmap case where ras_consecutive_requests
1074 * is not incremented and thus can't be used to trigger RA
1076 if (!ras
->ras_window_len
&& ras
->ras_consecutive_pages
== 4) {
1077 ras
->ras_window_len
= RAS_INCREASE_STEP(inode
);
1081 /* Initially reset the stride window offset to next_readahead*/
1082 if (ras
->ras_consecutive_stride_requests
== 2 && stride_detect
) {
1084 * Once stride IO mode is detected, next_readahead should be
1085 * reset to make sure next_readahead > stride offset
1087 ras
->ras_next_readahead
= max(index
, ras
->ras_next_readahead
);
1088 ras
->ras_stride_offset
= index
;
1089 ras
->ras_window_len
= RAS_INCREASE_STEP(inode
);
1092 /* The initial ras_window_len is set to the request size. To avoid
1093 * uselessly reading and discarding pages for random IO the window is
1094 * only increased once per consecutive request received. */
1095 if ((ras
->ras_consecutive_requests
> 1 || stride_detect
) &&
1096 !ras
->ras_request_index
)
1097 ras_increase_window(inode
, ras
, ra
);
1100 ras
->ras_request_index
++;
1101 spin_unlock(&ras
->ras_lock
);
1105 int ll_writepage(struct page
*vmpage
, struct writeback_control
*wbc
)
1107 struct inode
*inode
= vmpage
->mapping
->host
;
1108 struct ll_inode_info
*lli
= ll_i2info(inode
);
1111 struct cl_page
*page
;
1112 struct cl_object
*clob
;
1113 struct cl_env_nest nest
;
1114 bool redirtied
= false;
1115 bool unlocked
= false;
1118 LASSERT(PageLocked(vmpage
));
1119 LASSERT(!PageWriteback(vmpage
));
1121 LASSERT(ll_i2dtexp(inode
));
1123 env
= cl_env_nested_get(&nest
);
1125 result
= PTR_ERR(env
);
1129 clob
= ll_i2info(inode
)->lli_clob
;
1132 io
= ccc_env_thread_io(env
);
1134 io
->ci_ignore_layout
= 1;
1135 result
= cl_io_init(env
, io
, CIT_MISC
, clob
);
1137 page
= cl_page_find(env
, clob
, vmpage
->index
,
1138 vmpage
, CPT_CACHEABLE
);
1139 if (!IS_ERR(page
)) {
1140 lu_ref_add(&page
->cp_reference
, "writepage",
1142 cl_page_assume(env
, io
, page
);
1143 result
= cl_page_flush(env
, io
, page
);
1146 * Re-dirty page on error so it retries write,
1147 * but not in case when IO has actually
1148 * occurred and completed with an error.
1150 if (!PageError(vmpage
)) {
1151 redirty_page_for_writepage(wbc
, vmpage
);
1156 cl_page_disown(env
, io
, page
);
1158 lu_ref_del(&page
->cp_reference
,
1159 "writepage", current
);
1160 cl_page_put(env
, page
);
1162 result
= PTR_ERR(page
);
1165 cl_io_fini(env
, io
);
1167 if (redirtied
&& wbc
->sync_mode
== WB_SYNC_ALL
) {
1168 loff_t offset
= cl_offset(clob
, vmpage
->index
);
1170 /* Flush page failed because the extent is being written out.
1171 * Wait for the write of extent to be finished to avoid
1172 * breaking kernel which assumes ->writepage should mark
1173 * PageWriteback or clean the page.
1175 result
= cl_sync_file_range(inode
, offset
,
1176 offset
+ PAGE_SIZE
- 1,
1179 /* actually we may have written more than one page.
1180 * decreasing this page because the caller will count
1183 wbc
->nr_to_write
-= result
- 1;
1188 cl_env_nested_put(&nest
, env
);
1193 if (!lli
->lli_async_rc
)
1194 lli
->lli_async_rc
= result
;
1195 SetPageError(vmpage
);
1197 unlock_page(vmpage
);
1202 int ll_writepages(struct address_space
*mapping
, struct writeback_control
*wbc
)
1204 struct inode
*inode
= mapping
->host
;
1205 struct ll_sb_info
*sbi
= ll_i2sbi(inode
);
1208 enum cl_fsync_mode mode
;
1209 int range_whole
= 0;
1211 int ignore_layout
= 0;
1213 if (wbc
->range_cyclic
) {
1214 start
= mapping
->writeback_index
<< PAGE_SHIFT
;
1215 end
= OBD_OBJECT_EOF
;
1217 start
= wbc
->range_start
;
1218 end
= wbc
->range_end
;
1219 if (end
== LLONG_MAX
) {
1220 end
= OBD_OBJECT_EOF
;
1221 range_whole
= start
== 0;
1225 mode
= CL_FSYNC_NONE
;
1226 if (wbc
->sync_mode
== WB_SYNC_ALL
)
1227 mode
= CL_FSYNC_LOCAL
;
1229 if (sbi
->ll_umounting
)
1230 /* if the mountpoint is being umounted, all pages have to be
1231 * evicted to avoid hitting LBUG when truncate_inode_pages()
1232 * is called later on.
1235 result
= cl_sync_file_range(inode
, start
, end
, mode
, ignore_layout
);
1237 wbc
->nr_to_write
-= result
;
1241 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0)) {
1242 if (end
== OBD_OBJECT_EOF
)
1243 end
= i_size_read(inode
);
1244 mapping
->writeback_index
= (end
>> PAGE_SHIFT
) + 1;
1249 int ll_readpage(struct file
*file
, struct page
*vmpage
)
1251 struct ll_cl_context
*lcc
;
1254 lcc
= ll_cl_init(file
, vmpage
, 0);
1256 struct lu_env
*env
= lcc
->lcc_env
;
1257 struct cl_io
*io
= lcc
->lcc_io
;
1258 struct cl_page
*page
= lcc
->lcc_page
;
1260 LASSERT(page
->cp_type
== CPT_CACHEABLE
);
1261 if (likely(!PageUptodate(vmpage
))) {
1262 cl_page_assume(env
, io
, page
);
1263 result
= cl_io_read_page(env
, io
, page
);
1265 /* Page from a non-object file. */
1266 unlock_page(vmpage
);
1271 unlock_page(vmpage
);
1272 result
= PTR_ERR(lcc
);