lib: lz4: cleanup unaligned access efficiency detection
[deliverable/linux.git] / drivers / staging / lustre / lustre / llite / llite_mmap.c
1 /*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26 /*
27 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2011, 2015, Intel Corporation.
31 */
32 /*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 */
36
37 #include <linux/kernel.h>
38 #include <linux/mm.h>
39 #include <linux/string.h>
40 #include <linux/stat.h>
41 #include <linux/errno.h>
42 #include <linux/unistd.h>
43 #include <linux/uaccess.h>
44
45 #include <linux/fs.h>
46 #include <linux/pagemap.h>
47
48 #define DEBUG_SUBSYSTEM S_LLITE
49
50 #include "../include/lustre_lite.h"
51 #include "llite_internal.h"
52 #include "../include/linux/lustre_compat25.h"
53
54 static const struct vm_operations_struct ll_file_vm_ops;
55
56 void policy_from_vma(ldlm_policy_data_t *policy,
57 struct vm_area_struct *vma, unsigned long addr,
58 size_t count)
59 {
60 policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
61 (vma->vm_pgoff << PAGE_CACHE_SHIFT);
62 policy->l_extent.end = (policy->l_extent.start + count - 1) |
63 ~CFS_PAGE_MASK;
64 }
65
66 struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
67 size_t count)
68 {
69 struct vm_area_struct *vma, *ret = NULL;
70
71 /* mmap_sem must have been held by caller. */
72 LASSERT(!down_write_trylock(&mm->mmap_sem));
73
74 for (vma = find_vma(mm, addr);
75 vma && vma->vm_start < (addr + count); vma = vma->vm_next) {
76 if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
77 vma->vm_flags & VM_SHARED) {
78 ret = vma;
79 break;
80 }
81 }
82 return ret;
83 }
84
85 /**
86 * API independent part for page fault initialization.
87 * \param vma - virtual memory area addressed to page fault
88 * \param env - corespondent lu_env to processing
89 * \param nest - nested level
90 * \param index - page index corespondent to fault.
91 * \parm ra_flags - vma readahead flags.
92 *
93 * \return allocated and initialized env for fault operation.
94 * \retval EINVAL if env can't allocated
95 * \return other error codes from cl_io_init.
96 */
97 static struct cl_io *
98 ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
99 struct cl_env_nest *nest, pgoff_t index,
100 unsigned long *ra_flags)
101 {
102 struct file *file = vma->vm_file;
103 struct inode *inode = file_inode(file);
104 struct cl_io *io;
105 struct cl_fault_io *fio;
106 struct lu_env *env;
107 int rc;
108
109 *env_ret = NULL;
110 if (ll_file_nolock(file))
111 return ERR_PTR(-EOPNOTSUPP);
112
113 /*
114 * page fault can be called when lustre IO is
115 * already active for the current thread, e.g., when doing read/write
116 * against user level buffer mapped from Lustre buffer. To avoid
117 * stomping on existing context, optionally force an allocation of a new
118 * one.
119 */
120 env = cl_env_nested_get(nest);
121 if (IS_ERR(env))
122 return ERR_PTR(-EINVAL);
123
124 *env_ret = env;
125
126 io = ccc_env_thread_io(env);
127 io->ci_obj = ll_i2info(inode)->lli_clob;
128 LASSERT(io->ci_obj);
129
130 fio = &io->u.ci_fault;
131 fio->ft_index = index;
132 fio->ft_executable = vma->vm_flags&VM_EXEC;
133
134 /*
135 * disable VM_SEQ_READ and use VM_RAND_READ to make sure that
136 * the kernel will not read other pages not covered by ldlm in
137 * filemap_nopage. we do our readahead in ll_readpage.
138 */
139 if (ra_flags)
140 *ra_flags = vma->vm_flags & (VM_RAND_READ|VM_SEQ_READ);
141 vma->vm_flags &= ~VM_SEQ_READ;
142 vma->vm_flags |= VM_RAND_READ;
143
144 CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
145 fio->ft_index, fio->ft_executable);
146
147 rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
148 if (rc == 0) {
149 struct ccc_io *cio = ccc_env_io(env);
150 struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
151
152 LASSERT(cio->cui_cl.cis_io == io);
153
154 /* mmap lock must be MANDATORY it has to cache pages. */
155 io->ci_lockreq = CILR_MANDATORY;
156 cio->cui_fd = fd;
157 } else {
158 LASSERT(rc < 0);
159 cl_io_fini(env, io);
160 cl_env_nested_put(nest, env);
161 io = ERR_PTR(rc);
162 }
163
164 return io;
165 }
166
167 /* Sharing code of page_mkwrite method for rhel5 and rhel6 */
168 static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
169 bool *retry)
170 {
171 struct lu_env *env;
172 struct cl_io *io;
173 struct vvp_io *vio;
174 struct cl_env_nest nest;
175 int result;
176 sigset_t set;
177 struct inode *inode;
178 struct ll_inode_info *lli;
179
180 io = ll_fault_io_init(vma, &env, &nest, vmpage->index, NULL);
181 if (IS_ERR(io)) {
182 result = PTR_ERR(io);
183 goto out;
184 }
185
186 result = io->ci_result;
187 if (result < 0)
188 goto out_io;
189
190 io->u.ci_fault.ft_mkwrite = 1;
191 io->u.ci_fault.ft_writable = 1;
192
193 vio = vvp_env_io(env);
194 vio->u.fault.ft_vma = vma;
195 vio->u.fault.ft_vmpage = vmpage;
196
197 set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
198
199 /* we grab lli_trunc_sem to exclude truncate case.
200 * Otherwise, we could add dirty pages into osc cache
201 * while truncate is on-going.
202 */
203 inode = ccc_object_inode(io->ci_obj);
204 lli = ll_i2info(inode);
205 down_read(&lli->lli_trunc_sem);
206
207 result = cl_io_loop(env, io);
208
209 up_read(&lli->lli_trunc_sem);
210
211 cfs_restore_sigs(set);
212
213 if (result == 0) {
214 struct inode *inode = file_inode(vma->vm_file);
215 struct ll_inode_info *lli = ll_i2info(inode);
216
217 lock_page(vmpage);
218 if (!vmpage->mapping) {
219 unlock_page(vmpage);
220
221 /* page was truncated and lock was cancelled, return
222 * ENODATA so that VM_FAULT_NOPAGE will be returned
223 * to handle_mm_fault().
224 */
225 if (result == 0)
226 result = -ENODATA;
227 } else if (!PageDirty(vmpage)) {
228 /* race, the page has been cleaned by ptlrpcd after
229 * it was unlocked, it has to be added into dirty
230 * cache again otherwise this soon-to-dirty page won't
231 * consume any grants, even worse if this page is being
232 * transferred because it will break RPC checksum.
233 */
234 unlock_page(vmpage);
235
236 CDEBUG(D_MMAP, "Race on page_mkwrite %p/%lu, page has been written out, retry.\n",
237 vmpage, vmpage->index);
238
239 *retry = true;
240 result = -EAGAIN;
241 }
242
243 if (result == 0) {
244 spin_lock(&lli->lli_lock);
245 lli->lli_flags |= LLIF_DATA_MODIFIED;
246 spin_unlock(&lli->lli_lock);
247 }
248 }
249
250 out_io:
251 cl_io_fini(env, io);
252 cl_env_nested_put(&nest, env);
253 out:
254 CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
255 LASSERT(ergo(result == 0, PageLocked(vmpage)));
256
257 return result;
258 }
259
260 static inline int to_fault_error(int result)
261 {
262 switch (result) {
263 case 0:
264 result = VM_FAULT_LOCKED;
265 break;
266 case -EFAULT:
267 result = VM_FAULT_NOPAGE;
268 break;
269 case -ENOMEM:
270 result = VM_FAULT_OOM;
271 break;
272 default:
273 result = VM_FAULT_SIGBUS;
274 break;
275 }
276 return result;
277 }
278
279 /**
280 * Lustre implementation of a vm_operations_struct::fault() method, called by
281 * VM to server page fault (both in kernel and user space).
282 *
283 * \param vma - is virtual area struct related to page fault
284 * \param vmf - structure which describe type and address where hit fault
285 *
286 * \return allocated and filled _locked_ page for address
287 * \retval VM_FAULT_ERROR on general error
288 * \retval NOPAGE_OOM not have memory for allocate new page
289 */
290 static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
291 {
292 struct lu_env *env;
293 struct cl_io *io;
294 struct vvp_io *vio = NULL;
295 struct page *vmpage;
296 unsigned long ra_flags;
297 struct cl_env_nest nest;
298 int result;
299 int fault_ret = 0;
300
301 io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags);
302 if (IS_ERR(io))
303 return to_fault_error(PTR_ERR(io));
304
305 result = io->ci_result;
306 if (result == 0) {
307 vio = vvp_env_io(env);
308 vio->u.fault.ft_vma = vma;
309 vio->u.fault.ft_vmpage = NULL;
310 vio->u.fault.fault.ft_vmf = vmf;
311 vio->u.fault.fault.ft_flags = 0;
312 vio->u.fault.fault.ft_flags_valid = false;
313
314 result = cl_io_loop(env, io);
315
316 /* ft_flags are only valid if we reached
317 * the call to filemap_fault
318 */
319 if (vio->u.fault.fault.ft_flags_valid)
320 fault_ret = vio->u.fault.fault.ft_flags;
321
322 vmpage = vio->u.fault.ft_vmpage;
323 if (result != 0 && vmpage) {
324 page_cache_release(vmpage);
325 vmf->page = NULL;
326 }
327 }
328 cl_io_fini(env, io);
329 cl_env_nested_put(&nest, env);
330
331 vma->vm_flags |= ra_flags;
332 if (result != 0 && !(fault_ret & VM_FAULT_RETRY))
333 fault_ret |= to_fault_error(result);
334
335 CDEBUG(D_MMAP, "%s fault %d/%d\n",
336 current->comm, fault_ret, result);
337 return fault_ret;
338 }
339
340 static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
341 {
342 int count = 0;
343 bool printed = false;
344 int result;
345 sigset_t set;
346
347 /* Only SIGKILL and SIGTERM are allowed for fault/nopage/mkwrite
348 * so that it can be killed by admin but not cause segfault by
349 * other signals.
350 */
351 set = cfs_block_sigsinv(sigmask(SIGKILL) | sigmask(SIGTERM));
352
353 restart:
354 result = ll_fault0(vma, vmf);
355 LASSERT(!(result & VM_FAULT_LOCKED));
356 if (result == 0) {
357 struct page *vmpage = vmf->page;
358
359 /* check if this page has been truncated */
360 lock_page(vmpage);
361 if (unlikely(!vmpage->mapping)) { /* unlucky */
362 unlock_page(vmpage);
363 page_cache_release(vmpage);
364 vmf->page = NULL;
365
366 if (!printed && ++count > 16) {
367 CWARN("the page is under heavy contention, maybe your app(%s) needs revising :-)\n",
368 current->comm);
369 printed = true;
370 }
371
372 goto restart;
373 }
374
375 result = VM_FAULT_LOCKED;
376 }
377 cfs_restore_sigs(set);
378 return result;
379 }
380
381 static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
382 {
383 int count = 0;
384 bool printed = false;
385 bool retry;
386 int result;
387
388 do {
389 retry = false;
390 result = ll_page_mkwrite0(vma, vmf->page, &retry);
391
392 if (!printed && ++count > 16) {
393 CWARN("app(%s): the page %lu of file %lu is under heavy contention.\n",
394 current->comm, vmf->pgoff,
395 file_inode(vma->vm_file)->i_ino);
396 printed = true;
397 }
398 } while (retry);
399
400 switch (result) {
401 case 0:
402 LASSERT(PageLocked(vmf->page));
403 result = VM_FAULT_LOCKED;
404 break;
405 case -ENODATA:
406 case -EFAULT:
407 result = VM_FAULT_NOPAGE;
408 break;
409 case -ENOMEM:
410 result = VM_FAULT_OOM;
411 break;
412 case -EAGAIN:
413 result = VM_FAULT_RETRY;
414 break;
415 default:
416 result = VM_FAULT_SIGBUS;
417 break;
418 }
419
420 return result;
421 }
422
423 /**
424 * To avoid cancel the locks covering mmapped region for lock cache pressure,
425 * we track the mapped vma count in ccc_object::cob_mmap_cnt.
426 */
427 static void ll_vm_open(struct vm_area_struct *vma)
428 {
429 struct inode *inode = file_inode(vma->vm_file);
430 struct ccc_object *vob = cl_inode2ccc(inode);
431
432 LASSERT(vma->vm_file);
433 LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
434 atomic_inc(&vob->cob_mmap_cnt);
435 }
436
437 /**
438 * Dual to ll_vm_open().
439 */
440 static void ll_vm_close(struct vm_area_struct *vma)
441 {
442 struct inode *inode = file_inode(vma->vm_file);
443 struct ccc_object *vob = cl_inode2ccc(inode);
444
445 LASSERT(vma->vm_file);
446 atomic_dec(&vob->cob_mmap_cnt);
447 LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
448 }
449
450 /* XXX put nice comment here. talk about __free_pte -> dirty pages and
451 * nopage's reference passing to the pte
452 */
453 int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
454 {
455 int rc = -ENOENT;
456
457 LASSERTF(last > first, "last %llu first %llu\n", last, first);
458 if (mapping_mapped(mapping)) {
459 rc = 0;
460 unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1,
461 last - first + 1, 0);
462 }
463
464 return rc;
465 }
466
467 static const struct vm_operations_struct ll_file_vm_ops = {
468 .fault = ll_fault,
469 .page_mkwrite = ll_page_mkwrite,
470 .open = ll_vm_open,
471 .close = ll_vm_close,
472 };
473
474 int ll_file_mmap(struct file *file, struct vm_area_struct *vma)
475 {
476 struct inode *inode = file_inode(file);
477 int rc;
478
479 if (ll_file_nolock(file))
480 return -EOPNOTSUPP;
481
482 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
483 rc = generic_file_mmap(file, vma);
484 if (rc == 0) {
485 vma->vm_ops = &ll_file_vm_ops;
486 vma->vm_ops->open(vma);
487 /* update the inode's size and mtime */
488 rc = ll_glimpse_size(inode);
489 }
490
491 return rc;
492 }
This page took 0.041593 seconds and 5 git commands to generate.