Commit | Line | Data |
---|---|---|
d7e09d03 PT |
1 | /* |
2 | * GPL HEADER START | |
3 | * | |
4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 only, | |
8 | * as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License version 2 for more details (a copy is included | |
14 | * in the LICENSE file that accompanied this code). | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * version 2 along with this program; If not, see | |
6a5b99a4 | 18 | * http://www.gnu.org/licenses/gpl-2.0.html |
d7e09d03 | 19 | * |
d7e09d03 PT |
20 | * GPL HEADER END |
21 | */ | |
22 | /* | |
23 | * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. | |
24 | * Use is subject to license terms. | |
1dc563a6 AD |
25 | * |
26 | * Copyright (c) 2013, 2015, Intel Corporation. | |
d7e09d03 PT |
27 | */ |
28 | /* | |
29 | * This file is part of Lustre, http://www.lustre.org/ | |
30 | * Lustre is a trademark of Sun Microsystems, Inc. | |
31 | * | |
32 | * Internal definitions for VVP layer. | |
33 | * | |
34 | * Author: Nikita Danilov <nikita.danilov@sun.com> | |
35 | */ | |
36 | ||
37 | #ifndef VVP_INTERNAL_H | |
38 | #define VVP_INTERNAL_H | |
39 | ||
0d345656 | 40 | #include "../include/lustre/lustre_idl.h" |
67a235f5 | 41 | #include "../include/cl_object.h" |
0d345656 JH |
42 | |
43 | enum obd_notify_event; | |
44 | struct inode; | |
45 | struct lov_stripe_md; | |
46 | struct lustre_md; | |
47 | struct obd_capa; | |
48 | struct obd_device; | |
49 | struct obd_export; | |
50 | struct page; | |
51 | ||
10cdef73 JH |
52 | /* specific architecture can implement only part of this list */ |
53 | enum vvp_io_subtype { | |
54 | /** normal IO */ | |
55 | IO_NORMAL, | |
56 | /** io started from splice_{read|write} */ | |
57 | IO_SPLICE | |
58 | }; | |
59 | ||
0d345656 | 60 | /** |
10cdef73 | 61 | * IO state private to IO state private to VVP layer. |
0d345656 | 62 | */ |
10cdef73 | 63 | struct vvp_io { |
0d345656 | 64 | /** super class */ |
e0a8144b JH |
65 | struct cl_io_slice vui_cl; |
66 | struct cl_io_lock_link vui_link; | |
0d345656 JH |
67 | /** |
68 | * I/O vector information to or from which read/write is going. | |
69 | */ | |
e0a8144b | 70 | struct iov_iter *vui_iter; |
0d345656 JH |
71 | /** |
72 | * Total size for the left IO. | |
73 | */ | |
e0a8144b | 74 | size_t vui_tot_count; |
0d345656 JH |
75 | |
76 | union { | |
10cdef73 JH |
77 | struct vvp_fault_io { |
78 | /** | |
79 | * Inode modification time that is checked across DLM | |
80 | * lock request. | |
81 | */ | |
82 | time64_t ft_mtime; | |
83 | struct vm_area_struct *ft_vma; | |
84 | /** | |
85 | * locked page returned from vvp_io | |
86 | */ | |
87 | struct page *ft_vmpage; | |
88 | /** | |
89 | * kernel fault info | |
90 | */ | |
91 | struct vm_fault *ft_vmf; | |
92 | /** | |
93 | * fault API used bitflags for return code. | |
94 | */ | |
95 | unsigned int ft_flags; | |
96 | /** | |
97 | * check that flags are from filemap_fault | |
98 | */ | |
99 | bool ft_flags_valid; | |
100 | } fault; | |
10cdef73 | 101 | struct { |
e0a8144b JH |
102 | struct pipe_inode_info *vui_pipe; |
103 | unsigned int vui_flags; | |
10cdef73 | 104 | } splice; |
0d345656 | 105 | struct { |
e0a8144b JH |
106 | struct cl_page_list vui_queue; |
107 | unsigned long vui_written; | |
108 | int vui_from; | |
109 | int vui_to; | |
0d345656 JH |
110 | } write; |
111 | } u; | |
10cdef73 | 112 | |
e0a8144b | 113 | enum vvp_io_subtype vui_io_subtype; |
10cdef73 | 114 | |
0d345656 JH |
115 | /** |
116 | * Layout version when this IO is initialized | |
117 | */ | |
e0a8144b | 118 | __u32 vui_layout_gen; |
0d345656 JH |
119 | /** |
120 | * File descriptor against which IO is done. | |
121 | */ | |
e0a8144b JH |
122 | struct ll_file_data *vui_fd; |
123 | struct kiocb *vui_iocb; | |
10cdef73 JH |
124 | |
125 | /* Readahead state. */ | |
e0a8144b JH |
126 | pgoff_t vui_ra_start; |
127 | pgoff_t vui_ra_count; | |
128 | /* Set when vui_ra_{start,count} have been initialized. */ | |
129 | bool vui_ra_valid; | |
0d345656 JH |
130 | }; |
131 | ||
a37bec74 JH |
132 | extern struct lu_device_type vvp_device_type; |
133 | ||
10cdef73 | 134 | extern struct lu_context_key vvp_session_key; |
9acc4500 | 135 | extern struct lu_context_key vvp_thread_key; |
0d345656 | 136 | |
4a4eee07 | 137 | extern struct kmem_cache *vvp_lock_kmem; |
8c7b0e1a | 138 | extern struct kmem_cache *vvp_object_kmem; |
103b8bda | 139 | extern struct kmem_cache *vvp_req_kmem; |
8c7b0e1a | 140 | |
9acc4500 JH |
141 | struct vvp_thread_info { |
142 | struct cl_lock vti_lock; | |
143 | struct cl_lock_descr vti_descr; | |
144 | struct cl_io vti_io; | |
145 | struct cl_attr vti_attr; | |
0d345656 JH |
146 | }; |
147 | ||
9acc4500 | 148 | static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env) |
0d345656 | 149 | { |
9acc4500 | 150 | struct vvp_thread_info *vti; |
0d345656 | 151 | |
9acc4500 JH |
152 | vti = lu_context_key_get(&env->le_ctx, &vvp_thread_key); |
153 | LASSERT(vti); | |
0d345656 | 154 | |
9acc4500 | 155 | return vti; |
0d345656 JH |
156 | } |
157 | ||
9acc4500 | 158 | static inline struct cl_lock *vvp_env_lock(const struct lu_env *env) |
0d345656 | 159 | { |
9acc4500 | 160 | struct cl_lock *lock = &vvp_env_info(env)->vti_lock; |
0d345656 JH |
161 | |
162 | memset(lock, 0, sizeof(*lock)); | |
163 | return lock; | |
164 | } | |
165 | ||
9acc4500 | 166 | static inline struct cl_attr *vvp_env_thread_attr(const struct lu_env *env) |
0d345656 | 167 | { |
9acc4500 | 168 | struct cl_attr *attr = &vvp_env_info(env)->vti_attr; |
0d345656 JH |
169 | |
170 | memset(attr, 0, sizeof(*attr)); | |
171 | ||
172 | return attr; | |
173 | } | |
174 | ||
9acc4500 | 175 | static inline struct cl_io *vvp_env_thread_io(const struct lu_env *env) |
0d345656 | 176 | { |
9acc4500 | 177 | struct cl_io *io = &vvp_env_info(env)->vti_io; |
0d345656 JH |
178 | |
179 | memset(io, 0, sizeof(*io)); | |
180 | ||
181 | return io; | |
182 | } | |
183 | ||
10cdef73 JH |
184 | struct vvp_session { |
185 | struct vvp_io cs_ios; | |
0d345656 JH |
186 | }; |
187 | ||
10cdef73 | 188 | static inline struct vvp_session *vvp_env_session(const struct lu_env *env) |
0d345656 | 189 | { |
10cdef73 | 190 | struct vvp_session *ses; |
0d345656 | 191 | |
10cdef73 | 192 | ses = lu_context_key_get(env->le_ses, &vvp_session_key); |
0d345656 JH |
193 | LASSERT(ses); |
194 | ||
195 | return ses; | |
196 | } | |
197 | ||
10cdef73 | 198 | static inline struct vvp_io *vvp_env_io(const struct lu_env *env) |
0d345656 | 199 | { |
10cdef73 | 200 | return &vvp_env_session(env)->cs_ios; |
0d345656 JH |
201 | } |
202 | ||
203 | /** | |
204 | * ccc-private object state. | |
205 | */ | |
8c7b0e1a JH |
206 | struct vvp_object { |
207 | struct cl_object_header vob_header; | |
208 | struct cl_object vob_cl; | |
209 | struct inode *vob_inode; | |
0d345656 JH |
210 | |
211 | /** | |
212 | * A list of dirty pages pending IO in the cache. Used by | |
213 | * SOM. Protected by ll_inode_info::lli_lock. | |
214 | * | |
3a52f803 | 215 | * \see vvp_page::vpg_pending_linkage |
0d345656 | 216 | */ |
8c7b0e1a | 217 | struct list_head vob_pending_list; |
0d345656 JH |
218 | |
219 | /** | |
220 | * Access this counter is protected by inode->i_sem. Now that | |
221 | * the lifetime of transient pages must be covered by inode sem, | |
222 | * we don't need to hold any lock.. | |
223 | */ | |
8c7b0e1a | 224 | int vob_transient_pages; |
0d345656 JH |
225 | /** |
226 | * Number of outstanding mmaps on this file. | |
227 | * | |
228 | * \see ll_vm_open(), ll_vm_close(). | |
229 | */ | |
8c7b0e1a | 230 | atomic_t vob_mmap_cnt; |
0d345656 JH |
231 | |
232 | /** | |
233 | * various flags | |
8c7b0e1a | 234 | * vob_discard_page_warned |
0d345656 JH |
235 | * if pages belonging to this object are discarded when a client |
236 | * is evicted, some debug info will be printed, this flag will be set | |
237 | * during processing the first discarded page, then avoid flooding | |
238 | * debug message for lots of discarded pages. | |
239 | * | |
240 | * \see ll_dirty_page_discard_warn. | |
241 | */ | |
8c7b0e1a | 242 | unsigned int vob_discard_page_warned:1; |
0d345656 JH |
243 | }; |
244 | ||
245 | /** | |
3a52f803 | 246 | * VVP-private page state. |
0d345656 | 247 | */ |
3a52f803 JH |
248 | struct vvp_page { |
249 | struct cl_page_slice vpg_cl; | |
96c53363 JX |
250 | unsigned int vpg_defer_uptodate:1, |
251 | vpg_ra_used:1, | |
252 | vpg_write_queued:1; | |
0d345656 JH |
253 | /** |
254 | * Non-empty iff this page is already counted in | |
8c7b0e1a | 255 | * vvp_object::vob_pending_list. This list is only used as a flag, |
0d345656 JH |
256 | * that is, never iterated through, only checked for list_empty(), but |
257 | * having a list is useful for debugging. | |
258 | */ | |
3a52f803 | 259 | struct list_head vpg_pending_linkage; |
0d345656 | 260 | /** VM page */ |
3a52f803 | 261 | struct page *vpg_page; |
0d345656 JH |
262 | }; |
263 | ||
3a52f803 | 264 | static inline struct vvp_page *cl2vvp_page(const struct cl_page_slice *slice) |
0d345656 | 265 | { |
3a52f803 | 266 | return container_of(slice, struct vvp_page, vpg_cl); |
0d345656 JH |
267 | } |
268 | ||
3a52f803 | 269 | static inline pgoff_t vvp_index(struct vvp_page *vvp) |
0d345656 | 270 | { |
3a52f803 | 271 | return vvp->vpg_cl.cpl_index; |
0d345656 JH |
272 | } |
273 | ||
3c95b839 JH |
274 | struct vvp_device { |
275 | struct cl_device vdv_cl; | |
276 | struct super_block *vdv_sb; | |
277 | struct cl_device *vdv_next; | |
0d345656 JH |
278 | }; |
279 | ||
4a4eee07 JH |
280 | struct vvp_lock { |
281 | struct cl_lock_slice vlk_cl; | |
0d345656 JH |
282 | }; |
283 | ||
103b8bda JH |
284 | struct vvp_req { |
285 | struct cl_req_slice vrq_cl; | |
0d345656 JH |
286 | }; |
287 | ||
288 | void *ccc_key_init(const struct lu_context *ctx, | |
289 | struct lu_context_key *key); | |
290 | void ccc_key_fini(const struct lu_context *ctx, | |
291 | struct lu_context_key *key, void *data); | |
0d345656 | 292 | |
0d345656 | 293 | void ccc_umount(const struct lu_env *env, struct cl_device *dev); |
0d345656 | 294 | |
3c95b839 JH |
295 | static inline struct lu_device *vvp2lu_dev(struct vvp_device *vdv) |
296 | { | |
297 | return &vdv->vdv_cl.cd_lu_dev; | |
298 | } | |
299 | ||
300 | static inline struct vvp_device *lu2vvp_dev(const struct lu_device *d) | |
301 | { | |
302 | return container_of0(d, struct vvp_device, vdv_cl.cd_lu_dev); | |
303 | } | |
304 | ||
305 | static inline struct vvp_device *cl2vvp_dev(const struct cl_device *d) | |
306 | { | |
307 | return container_of0(d, struct vvp_device, vdv_cl); | |
308 | } | |
309 | ||
8c7b0e1a JH |
310 | static inline struct vvp_object *cl2vvp(const struct cl_object *obj) |
311 | { | |
312 | return container_of0(obj, struct vvp_object, vob_cl); | |
313 | } | |
314 | ||
315 | static inline struct vvp_object *lu2vvp(const struct lu_object *obj) | |
316 | { | |
317 | return container_of0(obj, struct vvp_object, vob_cl.co_lu); | |
318 | } | |
319 | ||
320 | static inline struct inode *vvp_object_inode(const struct cl_object *obj) | |
321 | { | |
322 | return cl2vvp(obj)->vob_inode; | |
323 | } | |
324 | ||
325 | int vvp_object_invariant(const struct cl_object *obj); | |
326 | struct vvp_object *cl_inode2vvp(struct inode *inode); | |
327 | ||
3a52f803 JH |
328 | static inline struct page *cl2vm_page(const struct cl_page_slice *slice) |
329 | { | |
330 | return cl2vvp_page(slice)->vpg_page; | |
331 | } | |
332 | ||
4a4eee07 JH |
333 | static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice) |
334 | { | |
335 | return container_of(slice, struct vvp_lock, vlk_cl); | |
336 | } | |
337 | ||
0d345656 JH |
338 | # define CLOBINVRNT(env, clob, expr) \ |
339 | ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr))) | |
340 | ||
0d345656 JH |
341 | /** |
342 | * New interfaces to get and put lov_stripe_md from lov layer. This violates | |
343 | * layering because lov_stripe_md is supposed to be a private data in lov. | |
344 | * | |
345 | * NB: If you find you have to use these interfaces for your new code, please | |
346 | * think about it again. These interfaces may be removed in the future for | |
347 | * better layering. | |
348 | */ | |
349 | struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj); | |
350 | void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm); | |
351 | int lov_read_and_clear_async_rc(struct cl_object *clob); | |
352 | ||
353 | struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode); | |
354 | void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm); | |
d7e09d03 | 355 | |
77605e41 JX |
356 | int vvp_io_init(const struct lu_env *env, struct cl_object *obj, |
357 | struct cl_io *io); | |
fee6eb50 | 358 | int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io); |
77605e41 JX |
359 | int vvp_lock_init(const struct lu_env *env, struct cl_object *obj, |
360 | struct cl_lock *lock, const struct cl_io *io); | |
e15ba45d | 361 | int vvp_page_init(const struct lu_env *env, struct cl_object *obj, |
7addf402 | 362 | struct cl_page *page, pgoff_t index); |
103b8bda JH |
363 | int vvp_req_init(const struct lu_env *env, struct cl_device *dev, |
364 | struct cl_req *req); | |
d7e09d03 PT |
365 | struct lu_object *vvp_object_alloc(const struct lu_env *env, |
366 | const struct lu_object_header *hdr, | |
367 | struct lu_device *dev); | |
d7e09d03 | 368 | |
5c5af0fc JH |
369 | int vvp_global_init(void); |
370 | void vvp_global_fini(void); | |
371 | ||
2d95f10e | 372 | extern const struct file_operations vvp_dump_pgcache_file_ops; |
d7e09d03 PT |
373 | |
374 | #endif /* VVP_INTERNAL_H */ |