Commit | Line | Data |
---|---|---|
11980c2a | 1 | /* mm/ashmem.c |
2258937b CJB |
2 | * |
3 | * Anonymous Shared Memory Subsystem, ashmem | |
4 | * | |
5 | * Copyright (C) 2008 Google, Inc. | |
6 | * | |
7 | * Robert Love <rlove@google.com> | |
8 | * | |
9 | * This software is licensed under the terms of the GNU General Public | |
10 | * License version 2, as published by the Free Software Foundation, and | |
11 | * may be copied, distributed, and modified under those terms. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | */ | |
11980c2a | 18 | |
c810a399 SK |
19 | #define pr_fmt(fmt) "ashmem: " fmt |
20 | ||
12950595 PG |
21 | #include <linux/init.h> |
22 | #include <linux/export.h> | |
11980c2a RL |
23 | #include <linux/file.h> |
24 | #include <linux/fs.h> | |
3f31d075 | 25 | #include <linux/falloc.h> |
11980c2a RL |
26 | #include <linux/miscdevice.h> |
27 | #include <linux/security.h> | |
28 | #include <linux/mm.h> | |
29 | #include <linux/mman.h> | |
30 | #include <linux/uaccess.h> | |
31 | #include <linux/personality.h> | |
32 | #include <linux/bitops.h> | |
33 | #include <linux/mutex.h> | |
34 | #include <linux/shmem_fs.h> | |
35 | #include "ashmem.h" | |
36 | ||
37 | #define ASHMEM_NAME_PREFIX "dev/ashmem/" | |
38 | #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) | |
39 | #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN) | |
40 | ||
4d2c9d5d CJB |
41 | /** |
42 | * struct ashmem_area - The anonymous shared memory area | |
43 | * @name: The optional name in /proc/pid/maps | |
44 | * @unpinned_list: The list of all ashmem areas | |
45 | * @file: The shmem-based backing file | |
46 | * @size: The size of the mapping, in bytes | |
7d92ea5d | 47 | * @prot_mask: The allowed protection bits, as vm_flags |
4d2c9d5d CJB |
48 | * |
49 | * The lifecycle of this structure is from our parent file's open() until | |
50 | * its release(). It is also protected by 'ashmem_mutex' | |
51 | * | |
52 | * Warning: Mappings do NOT pin this structure; It dies on close() | |
11980c2a RL |
53 | */ |
54 | struct ashmem_area { | |
4d2c9d5d CJB |
55 | char name[ASHMEM_FULL_NAME_LEN]; |
56 | struct list_head unpinned_list; | |
57 | struct file *file; | |
58 | size_t size; | |
59 | unsigned long prot_mask; | |
11980c2a RL |
60 | }; |
61 | ||
4d2c9d5d CJB |
62 | /** |
63 | * struct ashmem_range - A range of unpinned/evictable pages | |
64 | * @lru: The entry in the LRU list | |
65 | * @unpinned: The entry in its area's unpinned list | |
66 | * @asma: The associated anonymous shared memory area. | |
67 | * @pgstart: The starting page (inclusive) | |
68 | * @pgend: The ending page (inclusive) | |
69 | * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED) | |
70 | * | |
71 | * The lifecycle of this structure is from unpin to pin. | |
72 | * It is protected by 'ashmem_mutex' | |
11980c2a RL |
73 | */ |
74 | struct ashmem_range { | |
4d2c9d5d CJB |
75 | struct list_head lru; |
76 | struct list_head unpinned; | |
77 | struct ashmem_area *asma; | |
78 | size_t pgstart; | |
79 | size_t pgend; | |
80 | unsigned int purged; | |
11980c2a RL |
81 | }; |
82 | ||
83 | /* LRU list of unpinned pages, protected by ashmem_mutex */ | |
84 | static LIST_HEAD(ashmem_lru_list); | |
85 | ||
801b798c | 86 | /* |
4d2c9d5d CJB |
87 | * long lru_count - The count of pages on our LRU list. |
88 | * | |
89 | * This is protected by ashmem_mutex. | |
90 | */ | |
11980c2a RL |
91 | static unsigned long lru_count; |
92 | ||
801b798c | 93 | /* |
11980c2a RL |
94 | * ashmem_mutex - protects the list of and each individual ashmem_area |
95 | * | |
96 | * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem | |
97 | */ | |
98 | static DEFINE_MUTEX(ashmem_mutex); | |
99 | ||
100 | static struct kmem_cache *ashmem_area_cachep __read_mostly; | |
101 | static struct kmem_cache *ashmem_range_cachep __read_mostly; | |
102 | ||
103 | #define range_size(range) \ | |
1efb3439 | 104 | ((range)->pgend - (range)->pgstart + 1) |
11980c2a RL |
105 | |
106 | #define range_on_lru(range) \ | |
1efb3439 | 107 | ((range)->purged == ASHMEM_NOT_PURGED) |
11980c2a | 108 | |
c0ece6c3 BG |
109 | static inline int page_range_subsumes_range(struct ashmem_range *range, |
110 | size_t start, size_t end) | |
111 | { | |
112 | return (((range)->pgstart >= (start)) && ((range)->pgend <= (end))); | |
113 | } | |
11980c2a | 114 | |
c0ece6c3 BG |
115 | static inline int page_range_subsumed_by_range(struct ashmem_range *range, |
116 | size_t start, size_t end) | |
117 | { | |
118 | return (((range)->pgstart <= (start)) && ((range)->pgend >= (end))); | |
119 | } | |
11980c2a | 120 | |
e2a83f32 BG |
121 | static inline int page_in_range(struct ashmem_range *range, size_t page) |
122 | { | |
123 | return (((range)->pgstart <= (page)) && ((range)->pgend >= (page))); | |
124 | } | |
11980c2a | 125 | |
c0ece6c3 BG |
126 | static inline int page_range_in_range(struct ashmem_range *range, |
127 | size_t start, size_t end) | |
128 | { | |
129 | return (page_in_range(range, start) || page_in_range(range, end) || | |
130 | page_range_subsumes_range(range, start, end)); | |
131 | } | |
11980c2a | 132 | |
e2a83f32 BG |
133 | static inline int range_before_page(struct ashmem_range *range, size_t page) |
134 | { | |
135 | return ((range)->pgend < (page)); | |
136 | } | |
11980c2a RL |
137 | |
138 | #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) | |
139 | ||
4d2c9d5d CJB |
140 | /** |
141 | * lru_add() - Adds a range of memory to the LRU list | |
142 | * @range: The memory range being added. | |
143 | * | |
144 | * The range is first added to the end (tail) of the LRU list. | |
145 | * After this, the size of the range is added to @lru_count | |
146 | */ | |
11980c2a RL |
147 | static inline void lru_add(struct ashmem_range *range) |
148 | { | |
149 | list_add_tail(&range->lru, &ashmem_lru_list); | |
150 | lru_count += range_size(range); | |
151 | } | |
152 | ||
4d2c9d5d CJB |
153 | /** |
154 | * lru_del() - Removes a range of memory from the LRU list | |
155 | * @range: The memory range being removed | |
156 | * | |
157 | * The range is first deleted from the LRU list. | |
158 | * After this, the size of the range is removed from @lru_count | |
159 | */ | |
11980c2a RL |
160 | static inline void lru_del(struct ashmem_range *range) |
161 | { | |
162 | list_del(&range->lru); | |
163 | lru_count -= range_size(range); | |
164 | } | |
165 | ||
4d2c9d5d CJB |
166 | /** |
167 | * range_alloc() - Allocates and initializes a new ashmem_range structure | |
168 | * @asma: The associated ashmem_area | |
169 | * @prev_range: The previous ashmem_range in the sorted asma->unpinned list | |
170 | * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) | |
171 | * @start: The starting page (inclusive) | |
172 | * @end: The ending page (inclusive) | |
11980c2a | 173 | * |
4d2c9d5d | 174 | * This function is protected by ashmem_mutex. |
11980c2a | 175 | * |
4d2c9d5d | 176 | * Return: 0 if successful, or -ENOMEM if there is an error |
11980c2a RL |
177 | */ |
178 | static int range_alloc(struct ashmem_area *asma, | |
179 | struct ashmem_range *prev_range, unsigned int purged, | |
180 | size_t start, size_t end) | |
181 | { | |
182 | struct ashmem_range *range; | |
183 | ||
184 | range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); | |
185 | if (unlikely(!range)) | |
186 | return -ENOMEM; | |
187 | ||
188 | range->asma = asma; | |
189 | range->pgstart = start; | |
190 | range->pgend = end; | |
191 | range->purged = purged; | |
192 | ||
193 | list_add_tail(&range->unpinned, &prev_range->unpinned); | |
194 | ||
195 | if (range_on_lru(range)) | |
196 | lru_add(range); | |
197 | ||
198 | return 0; | |
199 | } | |
200 | ||
4d2c9d5d CJB |
201 | /** |
202 | * range_del() - Deletes and dealloctes an ashmem_range structure | |
203 | * @range: The associated ashmem_range that has previously been allocated | |
204 | */ | |
11980c2a RL |
205 | static void range_del(struct ashmem_range *range) |
206 | { | |
207 | list_del(&range->unpinned); | |
208 | if (range_on_lru(range)) | |
209 | lru_del(range); | |
210 | kmem_cache_free(ashmem_range_cachep, range); | |
211 | } | |
212 | ||
781114ce CJB |
213 | /** |
214 | * range_shrink() - Shrinks an ashmem_range | |
215 | * @range: The associated ashmem_range being shrunk | |
216 | * @start: The starting byte of the new range | |
217 | * @end: The ending byte of the new range | |
218 | * | |
219 | * This does not modify the data inside the existing range in any way - It | |
220 | * simply shrinks the boundaries of the range. | |
11980c2a | 221 | * |
781114ce CJB |
222 | * Theoretically, with a little tweaking, this could eventually be changed |
223 | * to range_resize, and expand the lru_count if the new range is larger. | |
11980c2a RL |
224 | */ |
225 | static inline void range_shrink(struct ashmem_range *range, | |
226 | size_t start, size_t end) | |
227 | { | |
228 | size_t pre = range_size(range); | |
229 | ||
230 | range->pgstart = start; | |
231 | range->pgend = end; | |
232 | ||
233 | if (range_on_lru(range)) | |
234 | lru_count -= pre - range_size(range); | |
235 | } | |
236 | ||
781114ce CJB |
237 | /** |
238 | * ashmem_open() - Opens an Anonymous Shared Memory structure | |
239 | * @inode: The backing file's index node(?) | |
240 | * @file: The backing file | |
241 | * | |
242 | * Please note that the ashmem_area is not returned by this function - It is | |
243 | * instead written to "file->private_data". | |
244 | * | |
245 | * Return: 0 if successful, or another code if unsuccessful. | |
246 | */ | |
11980c2a RL |
247 | static int ashmem_open(struct inode *inode, struct file *file) |
248 | { | |
249 | struct ashmem_area *asma; | |
250 | int ret; | |
251 | ||
5154b93b | 252 | ret = generic_file_open(inode, file); |
11980c2a RL |
253 | if (unlikely(ret)) |
254 | return ret; | |
255 | ||
256 | asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL); | |
257 | if (unlikely(!asma)) | |
258 | return -ENOMEM; | |
259 | ||
260 | INIT_LIST_HEAD(&asma->unpinned_list); | |
261 | memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN); | |
262 | asma->prot_mask = PROT_MASK; | |
263 | file->private_data = asma; | |
264 | ||
265 | return 0; | |
266 | } | |
267 | ||
781114ce CJB |
268 | /** |
269 | * ashmem_release() - Releases an Anonymous Shared Memory structure | |
270 | * @ignored: The backing file's Index Node(?) - It is ignored here. | |
271 | * @file: The backing file | |
272 | * | |
273 | * Return: 0 if successful. If it is anything else, go have a coffee and | |
274 | * try again. | |
275 | */ | |
11980c2a RL |
276 | static int ashmem_release(struct inode *ignored, struct file *file) |
277 | { | |
278 | struct ashmem_area *asma = file->private_data; | |
279 | struct ashmem_range *range, *next; | |
280 | ||
281 | mutex_lock(&ashmem_mutex); | |
282 | list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) | |
283 | range_del(range); | |
284 | mutex_unlock(&ashmem_mutex); | |
285 | ||
286 | if (asma->file) | |
287 | fput(asma->file); | |
288 | kmem_cache_free(ashmem_area_cachep, asma); | |
289 | ||
290 | return 0; | |
291 | } | |
292 | ||
781114ce CJB |
293 | /** |
294 | * ashmem_read() - Reads a set of bytes from an Ashmem-enabled file | |
295 | * @file: The associated backing file. | |
296 | * @buf: The buffer of data being written to | |
297 | * @len: The number of bytes being read | |
298 | * @pos: The position of the first byte to read. | |
299 | * | |
300 | * Return: 0 if successful, or another return code if not. | |
301 | */ | |
853ca7ae BB |
302 | static ssize_t ashmem_read(struct file *file, char __user *buf, |
303 | size_t len, loff_t *pos) | |
304 | { | |
305 | struct ashmem_area *asma = file->private_data; | |
306 | int ret = 0; | |
307 | ||
308 | mutex_lock(&ashmem_mutex); | |
309 | ||
310 | /* If size is not set, or set to 0, always return EOF. */ | |
1efb3439 | 311 | if (asma->size == 0) |
077f6db9 | 312 | goto out_unlock; |
853ca7ae BB |
313 | |
314 | if (!asma->file) { | |
315 | ret = -EBADF; | |
077f6db9 | 316 | goto out_unlock; |
853ca7ae BB |
317 | } |
318 | ||
077f6db9 | 319 | mutex_unlock(&ashmem_mutex); |
5154b93b | 320 | |
077f6db9 TP |
321 | /* |
322 | * asma and asma->file are used outside the lock here. We assume | |
323 | * once asma->file is set it will never be changed, and will not | |
324 | * be destroyed until all references to the file are dropped and | |
325 | * ashmem_release is called. | |
326 | */ | |
e1452866 | 327 | ret = __vfs_read(asma->file, buf, len, pos); |
7273773c | 328 | if (ret >= 0) |
077f6db9 TP |
329 | /** Update backing file pos, since f_ops->read() doesn't */ |
330 | asma->file->f_pos = *pos; | |
077f6db9 | 331 | return ret; |
5154b93b | 332 | |
077f6db9 | 333 | out_unlock: |
5154b93b BB |
334 | mutex_unlock(&ashmem_mutex); |
335 | return ret; | |
336 | } | |
337 | ||
338 | static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) | |
339 | { | |
340 | struct ashmem_area *asma = file->private_data; | |
341 | int ret; | |
342 | ||
343 | mutex_lock(&ashmem_mutex); | |
344 | ||
345 | if (asma->size == 0) { | |
346 | ret = -EINVAL; | |
347 | goto out; | |
348 | } | |
349 | ||
350 | if (!asma->file) { | |
351 | ret = -EBADF; | |
352 | goto out; | |
353 | } | |
354 | ||
91360b02 | 355 | ret = vfs_llseek(asma->file, offset, origin); |
1efb3439 | 356 | if (ret < 0) |
5154b93b | 357 | goto out; |
5154b93b BB |
358 | |
359 | /** Copy f_pos from backing file, since f_ops->llseek() sets it */ | |
360 | file->f_pos = asma->file->f_pos; | |
853ca7ae BB |
361 | |
362 | out: | |
363 | mutex_unlock(&ashmem_mutex); | |
364 | return ret; | |
365 | } | |
366 | ||
7cfce77d | 367 | static inline vm_flags_t calc_vm_may_flags(unsigned long prot) |
56f76fc6 | 368 | { |
1efb3439 | 369 | return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) | |
56f76fc6 AH |
370 | _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) | |
371 | _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); | |
372 | } | |
853ca7ae | 373 | |
11980c2a RL |
374 | static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) |
375 | { | |
376 | struct ashmem_area *asma = file->private_data; | |
377 | int ret = 0; | |
378 | ||
379 | mutex_lock(&ashmem_mutex); | |
380 | ||
381 | /* user needs to SET_SIZE before mapping */ | |
382 | if (unlikely(!asma->size)) { | |
383 | ret = -EINVAL; | |
384 | goto out; | |
385 | } | |
386 | ||
387 | /* requested protection bits must match our allowed protection mask */ | |
e6bfb709 DH |
388 | if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) & |
389 | calc_vm_prot_bits(PROT_MASK, 0))) { | |
11980c2a RL |
390 | ret = -EPERM; |
391 | goto out; | |
392 | } | |
56f76fc6 | 393 | vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask); |
11980c2a RL |
394 | |
395 | if (!asma->file) { | |
396 | char *name = ASHMEM_NAME_DEF; | |
397 | struct file *vmfile; | |
398 | ||
399 | if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') | |
400 | name = asma->name; | |
401 | ||
402 | /* ... and allocate the backing shmem file */ | |
403 | vmfile = shmem_file_setup(name, asma->size, vma->vm_flags); | |
7f44cb0b | 404 | if (IS_ERR(vmfile)) { |
11980c2a RL |
405 | ret = PTR_ERR(vmfile); |
406 | goto out; | |
407 | } | |
408 | asma->file = vmfile; | |
409 | } | |
410 | get_file(asma->file); | |
411 | ||
412 | /* | |
aa5af974 | 413 | * XXX - Reworked to use shmem_zero_setup() instead of |
11980c2a RL |
414 | * shmem_set_file while we're in staging. -jstultz |
415 | */ | |
416 | if (vma->vm_flags & VM_SHARED) { | |
417 | ret = shmem_zero_setup(vma); | |
418 | if (ret) { | |
419 | fput(asma->file); | |
420 | goto out; | |
421 | } | |
422 | } | |
423 | ||
424 | if (vma->vm_file) | |
425 | fput(vma->vm_file); | |
426 | vma->vm_file = asma->file; | |
11980c2a RL |
427 | |
428 | out: | |
429 | mutex_unlock(&ashmem_mutex); | |
430 | return ret; | |
431 | } | |
432 | ||
433 | /* | |
6b4f7799 | 434 | * ashmem_shrink - our cache shrinker, called from mm/vmscan.c |
11980c2a | 435 | * |
7dc19d5a | 436 | * 'nr_to_scan' is the number of objects to scan for freeing. |
11980c2a RL |
437 | * |
438 | * 'gfp_mask' is the mask of the allocation that got us into this mess. | |
439 | * | |
7dc19d5a | 440 | * Return value is the number of objects freed or -1 if we cannot |
11980c2a RL |
441 | * proceed without risk of deadlock (due to gfp_mask). |
442 | * | |
443 | * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial | |
444 | * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' | |
445 | * pages freed. | |
446 | */ | |
7dc19d5a DC |
447 | static unsigned long |
448 | ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) | |
11980c2a RL |
449 | { |
450 | struct ashmem_range *range, *next; | |
7dc19d5a | 451 | unsigned long freed = 0; |
11980c2a RL |
452 | |
453 | /* We might recurse into filesystem code, so bail out if necessary */ | |
7dc19d5a DC |
454 | if (!(sc->gfp_mask & __GFP_FS)) |
455 | return SHRINK_STOP; | |
11980c2a | 456 | |
18e77054 LA |
457 | if (!mutex_trylock(&ashmem_mutex)) |
458 | return -1; | |
459 | ||
11980c2a | 460 | list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { |
11980c2a | 461 | loff_t start = range->pgstart * PAGE_SIZE; |
3f31d075 | 462 | loff_t end = (range->pgend + 1) * PAGE_SIZE; |
11980c2a | 463 | |
72c72bdf | 464 | vfs_fallocate(range->asma->file, |
002397c4 FH |
465 | FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, |
466 | start, end - start); | |
11980c2a RL |
467 | range->purged = ASHMEM_WAS_PURGED; |
468 | lru_del(range); | |
469 | ||
7dc19d5a DC |
470 | freed += range_size(range); |
471 | if (--sc->nr_to_scan <= 0) | |
11980c2a RL |
472 | break; |
473 | } | |
474 | mutex_unlock(&ashmem_mutex); | |
7dc19d5a DC |
475 | return freed; |
476 | } | |
11980c2a | 477 | |
7dc19d5a DC |
478 | static unsigned long |
479 | ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc) | |
480 | { | |
481 | /* | |
482 | * note that lru_count is count of pages on the lru, not a count of | |
483 | * objects on the list. This means the scan function needs to return the | |
484 | * number of pages freed, not the number of objects scanned. | |
485 | */ | |
11980c2a RL |
486 | return lru_count; |
487 | } | |
488 | ||
489 | static struct shrinker ashmem_shrinker = { | |
7dc19d5a DC |
490 | .count_objects = ashmem_shrink_count, |
491 | .scan_objects = ashmem_shrink_scan, | |
492 | /* | |
493 | * XXX (dchinner): I wish people would comment on why they need on | |
494 | * significant changes to the default value here | |
495 | */ | |
11980c2a RL |
496 | .seeks = DEFAULT_SEEKS * 4, |
497 | }; | |
498 | ||
499 | static int set_prot_mask(struct ashmem_area *asma, unsigned long prot) | |
500 | { | |
501 | int ret = 0; | |
502 | ||
503 | mutex_lock(&ashmem_mutex); | |
504 | ||
505 | /* the user can only remove, not add, protection bits */ | |
506 | if (unlikely((asma->prot_mask & prot) != prot)) { | |
507 | ret = -EINVAL; | |
508 | goto out; | |
509 | } | |
510 | ||
511 | /* does the application expect PROT_READ to imply PROT_EXEC? */ | |
512 | if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) | |
513 | prot |= PROT_EXEC; | |
514 | ||
515 | asma->prot_mask = prot; | |
516 | ||
517 | out: | |
518 | mutex_unlock(&ashmem_mutex); | |
519 | return ret; | |
520 | } | |
521 | ||
522 | static int set_name(struct ashmem_area *asma, void __user *name) | |
523 | { | |
077f6db9 | 524 | int len; |
11980c2a | 525 | int ret = 0; |
e5834d62 | 526 | char local_name[ASHMEM_NAME_LEN]; |
11980c2a | 527 | |
e5834d62 SB |
528 | /* |
529 | * Holding the ashmem_mutex while doing a copy_from_user might cause | |
530 | * an data abort which would try to access mmap_sem. If another | |
531 | * thread has invoked ashmem_mmap then it will be holding the | |
532 | * semaphore and will be waiting for ashmem_mutex, there by leading to | |
533 | * deadlock. We'll release the mutex and take the name to a local | |
534 | * variable that does not need protection and later copy the local | |
535 | * variable to the structure member with lock held. | |
536 | */ | |
077f6db9 TP |
537 | len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN); |
538 | if (len < 0) | |
539 | return len; | |
540 | if (len == ASHMEM_NAME_LEN) | |
541 | local_name[ASHMEM_NAME_LEN - 1] = '\0'; | |
e5834d62 | 542 | mutex_lock(&ashmem_mutex); |
11980c2a | 543 | /* cannot change an existing mapping's name */ |
077f6db9 | 544 | if (unlikely(asma->file)) |
11980c2a | 545 | ret = -EINVAL; |
077f6db9 TP |
546 | else |
547 | strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name); | |
11980c2a | 548 | |
077f6db9 | 549 | mutex_unlock(&ashmem_mutex); |
11980c2a RL |
550 | return ret; |
551 | } | |
552 | ||
553 | static int get_name(struct ashmem_area *asma, void __user *name) | |
554 | { | |
555 | int ret = 0; | |
e5834d62 SB |
556 | size_t len; |
557 | /* | |
558 | * Have a local variable to which we'll copy the content | |
559 | * from asma with the lock held. Later we can copy this to the user | |
560 | * space safely without holding any locks. So even if we proceed to | |
561 | * wait for mmap_sem, it won't lead to deadlock. | |
562 | */ | |
563 | char local_name[ASHMEM_NAME_LEN]; | |
11980c2a RL |
564 | |
565 | mutex_lock(&ashmem_mutex); | |
566 | if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') { | |
11980c2a RL |
567 | /* |
568 | * Copying only `len', instead of ASHMEM_NAME_LEN, bytes | |
569 | * prevents us from revealing one user's stack to another. | |
570 | */ | |
571 | len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1; | |
e5834d62 | 572 | memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len); |
11980c2a | 573 | } else { |
e5834d62 SB |
574 | len = sizeof(ASHMEM_NAME_DEF); |
575 | memcpy(local_name, ASHMEM_NAME_DEF, len); | |
11980c2a RL |
576 | } |
577 | mutex_unlock(&ashmem_mutex); | |
578 | ||
e5834d62 SB |
579 | /* |
580 | * Now we are just copying from the stack variable to userland | |
581 | * No lock held | |
582 | */ | |
583 | if (unlikely(copy_to_user(name, local_name, len))) | |
584 | ret = -EFAULT; | |
11980c2a RL |
585 | return ret; |
586 | } | |
587 | ||
588 | /* | |
589 | * ashmem_pin - pin the given ashmem region, returning whether it was | |
590 | * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED). | |
591 | * | |
592 | * Caller must hold ashmem_mutex. | |
593 | */ | |
594 | static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) | |
595 | { | |
596 | struct ashmem_range *range, *next; | |
597 | int ret = ASHMEM_NOT_PURGED; | |
598 | ||
599 | list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { | |
600 | /* moved past last applicable page; we can short circuit */ | |
601 | if (range_before_page(range, pgstart)) | |
602 | break; | |
603 | ||
604 | /* | |
605 | * The user can ask us to pin pages that span multiple ranges, | |
606 | * or to pin pages that aren't even unpinned, so this is messy. | |
607 | * | |
608 | * Four cases: | |
609 | * 1. The requested range subsumes an existing range, so we | |
610 | * just remove the entire matching range. | |
611 | * 2. The requested range overlaps the start of an existing | |
612 | * range, so we just update that range. | |
613 | * 3. The requested range overlaps the end of an existing | |
614 | * range, so we just update that range. | |
615 | * 4. The requested range punches a hole in an existing range, | |
616 | * so we have to update one side of the range and then | |
617 | * create a new range for the other side. | |
618 | */ | |
619 | if (page_range_in_range(range, pgstart, pgend)) { | |
620 | ret |= range->purged; | |
621 | ||
622 | /* Case #1: Easy. Just nuke the whole thing. */ | |
623 | if (page_range_subsumes_range(range, pgstart, pgend)) { | |
624 | range_del(range); | |
625 | continue; | |
626 | } | |
627 | ||
628 | /* Case #2: We overlap from the start, so adjust it */ | |
629 | if (range->pgstart >= pgstart) { | |
630 | range_shrink(range, pgend + 1, range->pgend); | |
631 | continue; | |
632 | } | |
633 | ||
634 | /* Case #3: We overlap from the rear, so adjust it */ | |
635 | if (range->pgend <= pgend) { | |
d2e4f687 PS |
636 | range_shrink(range, range->pgstart, |
637 | pgstart - 1); | |
11980c2a RL |
638 | continue; |
639 | } | |
640 | ||
641 | /* | |
642 | * Case #4: We eat a chunk out of the middle. A bit | |
643 | * more complicated, we allocate a new range for the | |
644 | * second half and adjust the first chunk's endpoint. | |
645 | */ | |
646 | range_alloc(asma, range, range->purged, | |
647 | pgend + 1, range->pgend); | |
648 | range_shrink(range, range->pgstart, pgstart - 1); | |
649 | break; | |
650 | } | |
651 | } | |
652 | ||
653 | return ret; | |
654 | } | |
655 | ||
656 | /* | |
657 | * ashmem_unpin - unpin the given range of pages. Returns zero on success. | |
658 | * | |
659 | * Caller must hold ashmem_mutex. | |
660 | */ | |
661 | static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) | |
662 | { | |
663 | struct ashmem_range *range, *next; | |
664 | unsigned int purged = ASHMEM_NOT_PURGED; | |
665 | ||
666 | restart: | |
667 | list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { | |
668 | /* short circuit: this is our insertion point */ | |
669 | if (range_before_page(range, pgstart)) | |
670 | break; | |
671 | ||
672 | /* | |
673 | * The user can ask us to unpin pages that are already entirely | |
674 | * or partially pinned. We handle those two cases here. | |
675 | */ | |
676 | if (page_range_subsumed_by_range(range, pgstart, pgend)) | |
677 | return 0; | |
678 | if (page_range_in_range(range, pgstart, pgend)) { | |
9f1c427d AKC |
679 | pgstart = min(range->pgstart, pgstart); |
680 | pgend = max(range->pgend, pgend); | |
11980c2a RL |
681 | purged |= range->purged; |
682 | range_del(range); | |
683 | goto restart; | |
684 | } | |
685 | } | |
686 | ||
687 | return range_alloc(asma, range, purged, pgstart, pgend); | |
688 | } | |
689 | ||
690 | /* | |
691 | * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the | |
692 | * given interval are unpinned and ASHMEM_IS_PINNED otherwise. | |
693 | * | |
694 | * Caller must hold ashmem_mutex. | |
695 | */ | |
696 | static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart, | |
697 | size_t pgend) | |
698 | { | |
699 | struct ashmem_range *range; | |
700 | int ret = ASHMEM_IS_PINNED; | |
701 | ||
702 | list_for_each_entry(range, &asma->unpinned_list, unpinned) { | |
703 | if (range_before_page(range, pgstart)) | |
704 | break; | |
705 | if (page_range_in_range(range, pgstart, pgend)) { | |
706 | ret = ASHMEM_IS_UNPINNED; | |
707 | break; | |
708 | } | |
709 | } | |
710 | ||
711 | return ret; | |
712 | } | |
713 | ||
714 | static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, | |
715 | void __user *p) | |
716 | { | |
717 | struct ashmem_pin pin; | |
718 | size_t pgstart, pgend; | |
719 | int ret = -EINVAL; | |
720 | ||
721 | if (unlikely(!asma->file)) | |
722 | return -EINVAL; | |
723 | ||
724 | if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) | |
725 | return -EFAULT; | |
726 | ||
727 | /* per custom, you can pass zero for len to mean "everything onward" */ | |
728 | if (!pin.len) | |
729 | pin.len = PAGE_ALIGN(asma->size) - pin.offset; | |
730 | ||
731 | if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) | |
732 | return -EINVAL; | |
733 | ||
b8d3bfa7 | 734 | if (unlikely(((__u32)-1) - pin.offset < pin.len)) |
11980c2a RL |
735 | return -EINVAL; |
736 | ||
737 | if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) | |
738 | return -EINVAL; | |
739 | ||
740 | pgstart = pin.offset / PAGE_SIZE; | |
741 | pgend = pgstart + (pin.len / PAGE_SIZE) - 1; | |
742 | ||
743 | mutex_lock(&ashmem_mutex); | |
744 | ||
745 | switch (cmd) { | |
746 | case ASHMEM_PIN: | |
747 | ret = ashmem_pin(asma, pgstart, pgend); | |
748 | break; | |
749 | case ASHMEM_UNPIN: | |
750 | ret = ashmem_unpin(asma, pgstart, pgend); | |
751 | break; | |
752 | case ASHMEM_GET_PIN_STATUS: | |
753 | ret = ashmem_get_pin_status(asma, pgstart, pgend); | |
754 | break; | |
755 | } | |
756 | ||
757 | mutex_unlock(&ashmem_mutex); | |
758 | ||
759 | return ret; | |
760 | } | |
761 | ||
762 | static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |
763 | { | |
764 | struct ashmem_area *asma = file->private_data; | |
765 | long ret = -ENOTTY; | |
766 | ||
767 | switch (cmd) { | |
768 | case ASHMEM_SET_NAME: | |
1703ca90 | 769 | ret = set_name(asma, (void __user *)arg); |
11980c2a RL |
770 | break; |
771 | case ASHMEM_GET_NAME: | |
1703ca90 | 772 | ret = get_name(asma, (void __user *)arg); |
11980c2a RL |
773 | break; |
774 | case ASHMEM_SET_SIZE: | |
775 | ret = -EINVAL; | |
776 | if (!asma->file) { | |
777 | ret = 0; | |
b8d3bfa7 | 778 | asma->size = (size_t)arg; |
11980c2a RL |
779 | } |
780 | break; | |
781 | case ASHMEM_GET_SIZE: | |
782 | ret = asma->size; | |
783 | break; | |
784 | case ASHMEM_SET_PROT_MASK: | |
785 | ret = set_prot_mask(asma, arg); | |
786 | break; | |
787 | case ASHMEM_GET_PROT_MASK: | |
788 | ret = asma->prot_mask; | |
789 | break; | |
790 | case ASHMEM_PIN: | |
791 | case ASHMEM_UNPIN: | |
792 | case ASHMEM_GET_PIN_STATUS: | |
1703ca90 | 793 | ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg); |
11980c2a RL |
794 | break; |
795 | case ASHMEM_PURGE_ALL_CACHES: | |
796 | ret = -EPERM; | |
797 | if (capable(CAP_SYS_ADMIN)) { | |
33e8fc46 CC |
798 | struct shrink_control sc = { |
799 | .gfp_mask = GFP_KERNEL, | |
7dc19d5a | 800 | .nr_to_scan = LONG_MAX, |
33e8fc46 | 801 | }; |
59573240 | 802 | ret = ashmem_shrink_count(&ashmem_shrinker, &sc); |
7dc19d5a | 803 | ashmem_shrink_scan(&ashmem_shrinker, &sc); |
11980c2a RL |
804 | } |
805 | break; | |
806 | } | |
807 | ||
808 | return ret; | |
809 | } | |
810 | ||
e9f5b814 SC |
811 | /* support of 32bit userspace on 64bit platforms */ |
812 | #ifdef CONFIG_COMPAT | |
017ca3bf MR |
813 | static long compat_ashmem_ioctl(struct file *file, unsigned int cmd, |
814 | unsigned long arg) | |
e9f5b814 | 815 | { |
e9f5b814 SC |
816 | switch (cmd) { |
817 | case COMPAT_ASHMEM_SET_SIZE: | |
818 | cmd = ASHMEM_SET_SIZE; | |
819 | break; | |
820 | case COMPAT_ASHMEM_SET_PROT_MASK: | |
821 | cmd = ASHMEM_SET_PROT_MASK; | |
822 | break; | |
823 | } | |
824 | return ashmem_ioctl(file, cmd, arg); | |
825 | } | |
826 | #endif | |
827 | ||
aa5af974 | 828 | static const struct file_operations ashmem_fops = { |
11980c2a RL |
829 | .owner = THIS_MODULE, |
830 | .open = ashmem_open, | |
831 | .release = ashmem_release, | |
1efb3439 JS |
832 | .read = ashmem_read, |
833 | .llseek = ashmem_llseek, | |
11980c2a RL |
834 | .mmap = ashmem_mmap, |
835 | .unlocked_ioctl = ashmem_ioctl, | |
e9f5b814 SC |
836 | #ifdef CONFIG_COMPAT |
837 | .compat_ioctl = compat_ashmem_ioctl, | |
838 | #endif | |
11980c2a RL |
839 | }; |
840 | ||
841 | static struct miscdevice ashmem_misc = { | |
842 | .minor = MISC_DYNAMIC_MINOR, | |
843 | .name = "ashmem", | |
844 | .fops = &ashmem_fops, | |
845 | }; | |
846 | ||
847 | static int __init ashmem_init(void) | |
848 | { | |
a2df4e33 | 849 | int ret = -ENOMEM; |
11980c2a RL |
850 | |
851 | ashmem_area_cachep = kmem_cache_create("ashmem_area_cache", | |
7048c1fc PS |
852 | sizeof(struct ashmem_area), |
853 | 0, 0, NULL); | |
11980c2a | 854 | if (unlikely(!ashmem_area_cachep)) { |
c810a399 | 855 | pr_err("failed to create slab cache\n"); |
a2df4e33 | 856 | goto out; |
11980c2a RL |
857 | } |
858 | ||
859 | ashmem_range_cachep = kmem_cache_create("ashmem_range_cache", | |
7048c1fc PS |
860 | sizeof(struct ashmem_range), |
861 | 0, 0, NULL); | |
11980c2a | 862 | if (unlikely(!ashmem_range_cachep)) { |
c810a399 | 863 | pr_err("failed to create slab cache\n"); |
a2df4e33 | 864 | goto out_free1; |
11980c2a RL |
865 | } |
866 | ||
867 | ret = misc_register(&ashmem_misc); | |
868 | if (unlikely(ret)) { | |
c810a399 | 869 | pr_err("failed to register misc device!\n"); |
a2df4e33 | 870 | goto out_free2; |
11980c2a RL |
871 | } |
872 | ||
873 | register_shrinker(&ashmem_shrinker); | |
874 | ||
c810a399 | 875 | pr_info("initialized\n"); |
11980c2a RL |
876 | |
877 | return 0; | |
a2df4e33 WT |
878 | |
879 | out_free2: | |
880 | kmem_cache_destroy(ashmem_range_cachep); | |
881 | out_free1: | |
882 | kmem_cache_destroy(ashmem_area_cachep); | |
883 | out: | |
884 | return ret; | |
11980c2a | 885 | } |
12950595 | 886 | device_initcall(ashmem_init); |