Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/vmalloc.c | |
3 | * | |
4 | * Copyright (C) 1993 Linus Torvalds | |
5 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | |
6 | * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 | |
7 | * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 | |
930fc45a | 8 | * Numa awareness, Christoph Lameter, SGI, June 2005 |
1da177e4 LT |
9 | */ |
10 | ||
11 | #include <linux/mm.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/highmem.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/spinlock.h> | |
16 | #include <linux/interrupt.h> | |
17 | ||
18 | #include <linux/vmalloc.h> | |
19 | ||
20 | #include <asm/uaccess.h> | |
21 | #include <asm/tlbflush.h> | |
22 | ||
23 | ||
24 | DEFINE_RWLOCK(vmlist_lock); | |
25 | struct vm_struct *vmlist; | |
26 | ||
27 | static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) | |
28 | { | |
29 | pte_t *pte; | |
30 | ||
31 | pte = pte_offset_kernel(pmd, addr); | |
32 | do { | |
33 | pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); | |
34 | WARN_ON(!pte_none(ptent) && !pte_present(ptent)); | |
35 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
36 | } | |
37 | ||
38 | static inline void vunmap_pmd_range(pud_t *pud, unsigned long addr, | |
39 | unsigned long end) | |
40 | { | |
41 | pmd_t *pmd; | |
42 | unsigned long next; | |
43 | ||
44 | pmd = pmd_offset(pud, addr); | |
45 | do { | |
46 | next = pmd_addr_end(addr, end); | |
47 | if (pmd_none_or_clear_bad(pmd)) | |
48 | continue; | |
49 | vunmap_pte_range(pmd, addr, next); | |
50 | } while (pmd++, addr = next, addr != end); | |
51 | } | |
52 | ||
53 | static inline void vunmap_pud_range(pgd_t *pgd, unsigned long addr, | |
54 | unsigned long end) | |
55 | { | |
56 | pud_t *pud; | |
57 | unsigned long next; | |
58 | ||
59 | pud = pud_offset(pgd, addr); | |
60 | do { | |
61 | next = pud_addr_end(addr, end); | |
62 | if (pud_none_or_clear_bad(pud)) | |
63 | continue; | |
64 | vunmap_pmd_range(pud, addr, next); | |
65 | } while (pud++, addr = next, addr != end); | |
66 | } | |
67 | ||
68 | void unmap_vm_area(struct vm_struct *area) | |
69 | { | |
70 | pgd_t *pgd; | |
71 | unsigned long next; | |
72 | unsigned long addr = (unsigned long) area->addr; | |
73 | unsigned long end = addr + area->size; | |
74 | ||
75 | BUG_ON(addr >= end); | |
76 | pgd = pgd_offset_k(addr); | |
77 | flush_cache_vunmap(addr, end); | |
78 | do { | |
79 | next = pgd_addr_end(addr, end); | |
80 | if (pgd_none_or_clear_bad(pgd)) | |
81 | continue; | |
82 | vunmap_pud_range(pgd, addr, next); | |
83 | } while (pgd++, addr = next, addr != end); | |
84 | flush_tlb_kernel_range((unsigned long) area->addr, end); | |
85 | } | |
86 | ||
87 | static int vmap_pte_range(pmd_t *pmd, unsigned long addr, | |
88 | unsigned long end, pgprot_t prot, struct page ***pages) | |
89 | { | |
90 | pte_t *pte; | |
91 | ||
872fec16 | 92 | pte = pte_alloc_kernel(pmd, addr); |
1da177e4 LT |
93 | if (!pte) |
94 | return -ENOMEM; | |
95 | do { | |
96 | struct page *page = **pages; | |
97 | WARN_ON(!pte_none(*pte)); | |
98 | if (!page) | |
99 | return -ENOMEM; | |
100 | set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); | |
101 | (*pages)++; | |
102 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
103 | return 0; | |
104 | } | |
105 | ||
106 | static inline int vmap_pmd_range(pud_t *pud, unsigned long addr, | |
107 | unsigned long end, pgprot_t prot, struct page ***pages) | |
108 | { | |
109 | pmd_t *pmd; | |
110 | unsigned long next; | |
111 | ||
112 | pmd = pmd_alloc(&init_mm, pud, addr); | |
113 | if (!pmd) | |
114 | return -ENOMEM; | |
115 | do { | |
116 | next = pmd_addr_end(addr, end); | |
117 | if (vmap_pte_range(pmd, addr, next, prot, pages)) | |
118 | return -ENOMEM; | |
119 | } while (pmd++, addr = next, addr != end); | |
120 | return 0; | |
121 | } | |
122 | ||
123 | static inline int vmap_pud_range(pgd_t *pgd, unsigned long addr, | |
124 | unsigned long end, pgprot_t prot, struct page ***pages) | |
125 | { | |
126 | pud_t *pud; | |
127 | unsigned long next; | |
128 | ||
129 | pud = pud_alloc(&init_mm, pgd, addr); | |
130 | if (!pud) | |
131 | return -ENOMEM; | |
132 | do { | |
133 | next = pud_addr_end(addr, end); | |
134 | if (vmap_pmd_range(pud, addr, next, prot, pages)) | |
135 | return -ENOMEM; | |
136 | } while (pud++, addr = next, addr != end); | |
137 | return 0; | |
138 | } | |
139 | ||
140 | int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) | |
141 | { | |
142 | pgd_t *pgd; | |
143 | unsigned long next; | |
144 | unsigned long addr = (unsigned long) area->addr; | |
145 | unsigned long end = addr + area->size - PAGE_SIZE; | |
146 | int err; | |
147 | ||
148 | BUG_ON(addr >= end); | |
149 | pgd = pgd_offset_k(addr); | |
1da177e4 LT |
150 | do { |
151 | next = pgd_addr_end(addr, end); | |
152 | err = vmap_pud_range(pgd, addr, next, prot, pages); | |
153 | if (err) | |
154 | break; | |
155 | } while (pgd++, addr = next, addr != end); | |
1da177e4 LT |
156 | flush_cache_vmap((unsigned long) area->addr, end); |
157 | return err; | |
158 | } | |
159 | ||
930fc45a CL |
160 | struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags, |
161 | unsigned long start, unsigned long end, int node) | |
1da177e4 LT |
162 | { |
163 | struct vm_struct **p, *tmp, *area; | |
164 | unsigned long align = 1; | |
165 | unsigned long addr; | |
166 | ||
167 | if (flags & VM_IOREMAP) { | |
168 | int bit = fls(size); | |
169 | ||
170 | if (bit > IOREMAP_MAX_ORDER) | |
171 | bit = IOREMAP_MAX_ORDER; | |
172 | else if (bit < PAGE_SHIFT) | |
173 | bit = PAGE_SHIFT; | |
174 | ||
175 | align = 1ul << bit; | |
176 | } | |
177 | addr = ALIGN(start, align); | |
178 | size = PAGE_ALIGN(size); | |
179 | ||
930fc45a | 180 | area = kmalloc_node(sizeof(*area), GFP_KERNEL, node); |
1da177e4 LT |
181 | if (unlikely(!area)) |
182 | return NULL; | |
183 | ||
184 | if (unlikely(!size)) { | |
185 | kfree (area); | |
186 | return NULL; | |
187 | } | |
188 | ||
189 | /* | |
190 | * We always allocate a guard page. | |
191 | */ | |
192 | size += PAGE_SIZE; | |
193 | ||
194 | write_lock(&vmlist_lock); | |
195 | for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) { | |
196 | if ((unsigned long)tmp->addr < addr) { | |
197 | if((unsigned long)tmp->addr + tmp->size >= addr) | |
198 | addr = ALIGN(tmp->size + | |
199 | (unsigned long)tmp->addr, align); | |
200 | continue; | |
201 | } | |
202 | if ((size + addr) < addr) | |
203 | goto out; | |
204 | if (size + addr <= (unsigned long)tmp->addr) | |
205 | goto found; | |
206 | addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align); | |
207 | if (addr > end - size) | |
208 | goto out; | |
209 | } | |
210 | ||
211 | found: | |
212 | area->next = *p; | |
213 | *p = area; | |
214 | ||
215 | area->flags = flags; | |
216 | area->addr = (void *)addr; | |
217 | area->size = size; | |
218 | area->pages = NULL; | |
219 | area->nr_pages = 0; | |
220 | area->phys_addr = 0; | |
221 | write_unlock(&vmlist_lock); | |
222 | ||
223 | return area; | |
224 | ||
225 | out: | |
226 | write_unlock(&vmlist_lock); | |
227 | kfree(area); | |
228 | if (printk_ratelimit()) | |
229 | printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n"); | |
230 | return NULL; | |
231 | } | |
232 | ||
930fc45a CL |
233 | struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, |
234 | unsigned long start, unsigned long end) | |
235 | { | |
236 | return __get_vm_area_node(size, flags, start, end, -1); | |
237 | } | |
238 | ||
1da177e4 LT |
239 | /** |
240 | * get_vm_area - reserve a contingous kernel virtual area | |
241 | * | |
242 | * @size: size of the area | |
243 | * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC | |
244 | * | |
245 | * Search an area of @size in the kernel virtual mapping area, | |
246 | * and reserved it for out purposes. Returns the area descriptor | |
247 | * on success or %NULL on failure. | |
248 | */ | |
249 | struct vm_struct *get_vm_area(unsigned long size, unsigned long flags) | |
250 | { | |
251 | return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); | |
252 | } | |
253 | ||
930fc45a CL |
254 | struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int node) |
255 | { | |
256 | return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node); | |
257 | } | |
258 | ||
83342314 NP |
259 | /* Caller must hold vmlist_lock */ |
260 | static struct vm_struct *__find_vm_area(void *addr) | |
261 | { | |
262 | struct vm_struct *tmp; | |
263 | ||
264 | for (tmp = vmlist; tmp != NULL; tmp = tmp->next) { | |
265 | if (tmp->addr == addr) | |
266 | break; | |
267 | } | |
268 | ||
269 | return tmp; | |
270 | } | |
271 | ||
7856dfeb AK |
272 | /* Caller must hold vmlist_lock */ |
273 | struct vm_struct *__remove_vm_area(void *addr) | |
1da177e4 LT |
274 | { |
275 | struct vm_struct **p, *tmp; | |
276 | ||
1da177e4 LT |
277 | for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) { |
278 | if (tmp->addr == addr) | |
279 | goto found; | |
280 | } | |
1da177e4 LT |
281 | return NULL; |
282 | ||
283 | found: | |
284 | unmap_vm_area(tmp); | |
285 | *p = tmp->next; | |
1da177e4 LT |
286 | |
287 | /* | |
288 | * Remove the guard page. | |
289 | */ | |
290 | tmp->size -= PAGE_SIZE; | |
291 | return tmp; | |
292 | } | |
293 | ||
7856dfeb AK |
294 | /** |
295 | * remove_vm_area - find and remove a contingous kernel virtual area | |
296 | * | |
297 | * @addr: base address | |
298 | * | |
299 | * Search for the kernel VM area starting at @addr, and remove it. | |
300 | * This function returns the found VM area, but using it is NOT safe | |
301 | * on SMP machines, except for its size or flags. | |
302 | */ | |
303 | struct vm_struct *remove_vm_area(void *addr) | |
304 | { | |
305 | struct vm_struct *v; | |
306 | write_lock(&vmlist_lock); | |
307 | v = __remove_vm_area(addr); | |
308 | write_unlock(&vmlist_lock); | |
309 | return v; | |
310 | } | |
311 | ||
1da177e4 LT |
312 | void __vunmap(void *addr, int deallocate_pages) |
313 | { | |
314 | struct vm_struct *area; | |
315 | ||
316 | if (!addr) | |
317 | return; | |
318 | ||
319 | if ((PAGE_SIZE-1) & (unsigned long)addr) { | |
320 | printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr); | |
321 | WARN_ON(1); | |
322 | return; | |
323 | } | |
324 | ||
325 | area = remove_vm_area(addr); | |
326 | if (unlikely(!area)) { | |
327 | printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", | |
328 | addr); | |
329 | WARN_ON(1); | |
330 | return; | |
331 | } | |
332 | ||
9a11b49a IM |
333 | debug_check_no_locks_freed(addr, area->size); |
334 | ||
1da177e4 LT |
335 | if (deallocate_pages) { |
336 | int i; | |
337 | ||
338 | for (i = 0; i < area->nr_pages; i++) { | |
5aae277e | 339 | BUG_ON(!area->pages[i]); |
1da177e4 LT |
340 | __free_page(area->pages[i]); |
341 | } | |
342 | ||
8757d5fa | 343 | if (area->flags & VM_VPAGES) |
1da177e4 LT |
344 | vfree(area->pages); |
345 | else | |
346 | kfree(area->pages); | |
347 | } | |
348 | ||
349 | kfree(area); | |
350 | return; | |
351 | } | |
352 | ||
353 | /** | |
354 | * vfree - release memory allocated by vmalloc() | |
355 | * | |
356 | * @addr: memory base address | |
357 | * | |
358 | * Free the virtually contiguous memory area starting at @addr, as | |
80e93eff PE |
359 | * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is |
360 | * NULL, no operation is performed. | |
1da177e4 | 361 | * |
80e93eff | 362 | * Must not be called in interrupt context. |
1da177e4 LT |
363 | */ |
364 | void vfree(void *addr) | |
365 | { | |
366 | BUG_ON(in_interrupt()); | |
367 | __vunmap(addr, 1); | |
368 | } | |
1da177e4 LT |
369 | EXPORT_SYMBOL(vfree); |
370 | ||
371 | /** | |
372 | * vunmap - release virtual mapping obtained by vmap() | |
373 | * | |
374 | * @addr: memory base address | |
375 | * | |
376 | * Free the virtually contiguous memory area starting at @addr, | |
377 | * which was created from the page array passed to vmap(). | |
378 | * | |
80e93eff | 379 | * Must not be called in interrupt context. |
1da177e4 LT |
380 | */ |
381 | void vunmap(void *addr) | |
382 | { | |
383 | BUG_ON(in_interrupt()); | |
384 | __vunmap(addr, 0); | |
385 | } | |
1da177e4 LT |
386 | EXPORT_SYMBOL(vunmap); |
387 | ||
388 | /** | |
389 | * vmap - map an array of pages into virtually contiguous space | |
390 | * | |
391 | * @pages: array of page pointers | |
392 | * @count: number of pages to map | |
393 | * @flags: vm_area->flags | |
394 | * @prot: page protection for the mapping | |
395 | * | |
396 | * Maps @count pages from @pages into contiguous kernel virtual | |
397 | * space. | |
398 | */ | |
399 | void *vmap(struct page **pages, unsigned int count, | |
400 | unsigned long flags, pgprot_t prot) | |
401 | { | |
402 | struct vm_struct *area; | |
403 | ||
404 | if (count > num_physpages) | |
405 | return NULL; | |
406 | ||
407 | area = get_vm_area((count << PAGE_SHIFT), flags); | |
408 | if (!area) | |
409 | return NULL; | |
410 | if (map_vm_area(area, prot, &pages)) { | |
411 | vunmap(area->addr); | |
412 | return NULL; | |
413 | } | |
414 | ||
415 | return area->addr; | |
416 | } | |
1da177e4 LT |
417 | EXPORT_SYMBOL(vmap); |
418 | ||
930fc45a CL |
419 | void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, |
420 | pgprot_t prot, int node) | |
1da177e4 LT |
421 | { |
422 | struct page **pages; | |
423 | unsigned int nr_pages, array_size, i; | |
424 | ||
425 | nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; | |
426 | array_size = (nr_pages * sizeof(struct page *)); | |
427 | ||
428 | area->nr_pages = nr_pages; | |
429 | /* Please note that the recursion is strictly bounded. */ | |
8757d5fa | 430 | if (array_size > PAGE_SIZE) { |
930fc45a | 431 | pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); |
8757d5fa JK |
432 | area->flags |= VM_VPAGES; |
433 | } else | |
930fc45a | 434 | pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node); |
1da177e4 LT |
435 | area->pages = pages; |
436 | if (!area->pages) { | |
437 | remove_vm_area(area->addr); | |
438 | kfree(area); | |
439 | return NULL; | |
440 | } | |
441 | memset(area->pages, 0, array_size); | |
442 | ||
443 | for (i = 0; i < area->nr_pages; i++) { | |
930fc45a CL |
444 | if (node < 0) |
445 | area->pages[i] = alloc_page(gfp_mask); | |
446 | else | |
447 | area->pages[i] = alloc_pages_node(node, gfp_mask, 0); | |
1da177e4 LT |
448 | if (unlikely(!area->pages[i])) { |
449 | /* Successfully allocated i pages, free them in __vunmap() */ | |
450 | area->nr_pages = i; | |
451 | goto fail; | |
452 | } | |
453 | } | |
454 | ||
455 | if (map_vm_area(area, prot, &pages)) | |
456 | goto fail; | |
457 | return area->addr; | |
458 | ||
459 | fail: | |
460 | vfree(area->addr); | |
461 | return NULL; | |
462 | } | |
463 | ||
930fc45a CL |
464 | void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) |
465 | { | |
466 | return __vmalloc_area_node(area, gfp_mask, prot, -1); | |
467 | } | |
468 | ||
1da177e4 | 469 | /** |
930fc45a | 470 | * __vmalloc_node - allocate virtually contiguous memory |
1da177e4 LT |
471 | * |
472 | * @size: allocation size | |
473 | * @gfp_mask: flags for the page level allocator | |
474 | * @prot: protection mask for the allocated pages | |
d44e0780 | 475 | * @node: node to use for allocation or -1 |
1da177e4 LT |
476 | * |
477 | * Allocate enough pages to cover @size from the page level | |
478 | * allocator with @gfp_mask flags. Map them into contiguous | |
479 | * kernel virtual space, using a pagetable protection of @prot. | |
480 | */ | |
930fc45a CL |
481 | void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, |
482 | int node) | |
1da177e4 LT |
483 | { |
484 | struct vm_struct *area; | |
485 | ||
486 | size = PAGE_ALIGN(size); | |
487 | if (!size || (size >> PAGE_SHIFT) > num_physpages) | |
488 | return NULL; | |
489 | ||
930fc45a | 490 | area = get_vm_area_node(size, VM_ALLOC, node); |
1da177e4 LT |
491 | if (!area) |
492 | return NULL; | |
493 | ||
930fc45a | 494 | return __vmalloc_area_node(area, gfp_mask, prot, node); |
1da177e4 | 495 | } |
930fc45a | 496 | EXPORT_SYMBOL(__vmalloc_node); |
1da177e4 | 497 | |
930fc45a CL |
498 | void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) |
499 | { | |
500 | return __vmalloc_node(size, gfp_mask, prot, -1); | |
501 | } | |
1da177e4 LT |
502 | EXPORT_SYMBOL(__vmalloc); |
503 | ||
504 | /** | |
505 | * vmalloc - allocate virtually contiguous memory | |
506 | * | |
507 | * @size: allocation size | |
508 | * | |
509 | * Allocate enough pages to cover @size from the page level | |
510 | * allocator and map them into contiguous kernel virtual space. | |
511 | * | |
512 | * For tight cotrol over page level allocator and protection flags | |
513 | * use __vmalloc() instead. | |
514 | */ | |
515 | void *vmalloc(unsigned long size) | |
516 | { | |
83342314 | 517 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); |
1da177e4 | 518 | } |
1da177e4 LT |
519 | EXPORT_SYMBOL(vmalloc); |
520 | ||
83342314 NP |
521 | /** |
522 | * vmalloc_user - allocate virtually contiguous memory which has | |
523 | * been zeroed so it can be mapped to userspace without | |
524 | * leaking data. | |
525 | * | |
526 | * @size: allocation size | |
527 | */ | |
528 | void *vmalloc_user(unsigned long size) | |
529 | { | |
530 | struct vm_struct *area; | |
531 | void *ret; | |
532 | ||
533 | ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); | |
534 | write_lock(&vmlist_lock); | |
535 | area = __find_vm_area(ret); | |
536 | area->flags |= VM_USERMAP; | |
537 | write_unlock(&vmlist_lock); | |
538 | ||
539 | return ret; | |
540 | } | |
541 | EXPORT_SYMBOL(vmalloc_user); | |
542 | ||
930fc45a CL |
543 | /** |
544 | * vmalloc_node - allocate memory on a specific node | |
545 | * | |
546 | * @size: allocation size | |
d44e0780 | 547 | * @node: numa node |
930fc45a CL |
548 | * |
549 | * Allocate enough pages to cover @size from the page level | |
550 | * allocator and map them into contiguous kernel virtual space. | |
551 | * | |
552 | * For tight cotrol over page level allocator and protection flags | |
553 | * use __vmalloc() instead. | |
554 | */ | |
555 | void *vmalloc_node(unsigned long size, int node) | |
556 | { | |
83342314 | 557 | return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node); |
930fc45a CL |
558 | } |
559 | EXPORT_SYMBOL(vmalloc_node); | |
560 | ||
4dc3b16b PP |
561 | #ifndef PAGE_KERNEL_EXEC |
562 | # define PAGE_KERNEL_EXEC PAGE_KERNEL | |
563 | #endif | |
564 | ||
1da177e4 LT |
565 | /** |
566 | * vmalloc_exec - allocate virtually contiguous, executable memory | |
567 | * | |
568 | * @size: allocation size | |
569 | * | |
570 | * Kernel-internal function to allocate enough pages to cover @size | |
571 | * the page level allocator and map them into contiguous and | |
572 | * executable kernel virtual space. | |
573 | * | |
574 | * For tight cotrol over page level allocator and protection flags | |
575 | * use __vmalloc() instead. | |
576 | */ | |
577 | ||
1da177e4 LT |
578 | void *vmalloc_exec(unsigned long size) |
579 | { | |
580 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC); | |
581 | } | |
582 | ||
583 | /** | |
584 | * vmalloc_32 - allocate virtually contiguous memory (32bit addressable) | |
585 | * | |
586 | * @size: allocation size | |
587 | * | |
588 | * Allocate enough 32bit PA addressable pages to cover @size from the | |
589 | * page level allocator and map them into contiguous kernel virtual space. | |
590 | */ | |
591 | void *vmalloc_32(unsigned long size) | |
592 | { | |
593 | return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL); | |
594 | } | |
1da177e4 LT |
595 | EXPORT_SYMBOL(vmalloc_32); |
596 | ||
83342314 NP |
597 | /** |
598 | * vmalloc_32_user - allocate virtually contiguous memory (32bit | |
599 | * addressable) which is zeroed so it can be | |
600 | * mapped to userspace without leaking data. | |
601 | * | |
602 | * @size: allocation size | |
603 | */ | |
604 | void *vmalloc_32_user(unsigned long size) | |
605 | { | |
606 | struct vm_struct *area; | |
607 | void *ret; | |
608 | ||
609 | ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); | |
610 | write_lock(&vmlist_lock); | |
611 | area = __find_vm_area(ret); | |
612 | area->flags |= VM_USERMAP; | |
613 | write_unlock(&vmlist_lock); | |
614 | ||
615 | return ret; | |
616 | } | |
617 | EXPORT_SYMBOL(vmalloc_32_user); | |
618 | ||
1da177e4 LT |
619 | long vread(char *buf, char *addr, unsigned long count) |
620 | { | |
621 | struct vm_struct *tmp; | |
622 | char *vaddr, *buf_start = buf; | |
623 | unsigned long n; | |
624 | ||
625 | /* Don't allow overflow */ | |
626 | if ((unsigned long) addr + count < count) | |
627 | count = -(unsigned long) addr; | |
628 | ||
629 | read_lock(&vmlist_lock); | |
630 | for (tmp = vmlist; tmp; tmp = tmp->next) { | |
631 | vaddr = (char *) tmp->addr; | |
632 | if (addr >= vaddr + tmp->size - PAGE_SIZE) | |
633 | continue; | |
634 | while (addr < vaddr) { | |
635 | if (count == 0) | |
636 | goto finished; | |
637 | *buf = '\0'; | |
638 | buf++; | |
639 | addr++; | |
640 | count--; | |
641 | } | |
642 | n = vaddr + tmp->size - PAGE_SIZE - addr; | |
643 | do { | |
644 | if (count == 0) | |
645 | goto finished; | |
646 | *buf = *addr; | |
647 | buf++; | |
648 | addr++; | |
649 | count--; | |
650 | } while (--n > 0); | |
651 | } | |
652 | finished: | |
653 | read_unlock(&vmlist_lock); | |
654 | return buf - buf_start; | |
655 | } | |
656 | ||
657 | long vwrite(char *buf, char *addr, unsigned long count) | |
658 | { | |
659 | struct vm_struct *tmp; | |
660 | char *vaddr, *buf_start = buf; | |
661 | unsigned long n; | |
662 | ||
663 | /* Don't allow overflow */ | |
664 | if ((unsigned long) addr + count < count) | |
665 | count = -(unsigned long) addr; | |
666 | ||
667 | read_lock(&vmlist_lock); | |
668 | for (tmp = vmlist; tmp; tmp = tmp->next) { | |
669 | vaddr = (char *) tmp->addr; | |
670 | if (addr >= vaddr + tmp->size - PAGE_SIZE) | |
671 | continue; | |
672 | while (addr < vaddr) { | |
673 | if (count == 0) | |
674 | goto finished; | |
675 | buf++; | |
676 | addr++; | |
677 | count--; | |
678 | } | |
679 | n = vaddr + tmp->size - PAGE_SIZE - addr; | |
680 | do { | |
681 | if (count == 0) | |
682 | goto finished; | |
683 | *addr = *buf; | |
684 | buf++; | |
685 | addr++; | |
686 | count--; | |
687 | } while (--n > 0); | |
688 | } | |
689 | finished: | |
690 | read_unlock(&vmlist_lock); | |
691 | return buf - buf_start; | |
692 | } | |
83342314 NP |
693 | |
694 | /** | |
695 | * remap_vmalloc_range - map vmalloc pages to userspace | |
696 | * | |
697 | * @vma: vma to cover (map full range of vma) | |
698 | * @addr: vmalloc memory | |
699 | * @pgoff: number of pages into addr before first page to map | |
700 | * @returns: 0 for success, -Exxx on failure | |
701 | * | |
702 | * This function checks that addr is a valid vmalloc'ed area, and | |
703 | * that it is big enough to cover the vma. Will return failure if | |
704 | * that criteria isn't met. | |
705 | * | |
706 | * Similar to remap_pfn_range (see mm/memory.c) | |
707 | */ | |
708 | int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | |
709 | unsigned long pgoff) | |
710 | { | |
711 | struct vm_struct *area; | |
712 | unsigned long uaddr = vma->vm_start; | |
713 | unsigned long usize = vma->vm_end - vma->vm_start; | |
714 | int ret; | |
715 | ||
716 | if ((PAGE_SIZE-1) & (unsigned long)addr) | |
717 | return -EINVAL; | |
718 | ||
719 | read_lock(&vmlist_lock); | |
720 | area = __find_vm_area(addr); | |
721 | if (!area) | |
722 | goto out_einval_locked; | |
723 | ||
724 | if (!(area->flags & VM_USERMAP)) | |
725 | goto out_einval_locked; | |
726 | ||
727 | if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) | |
728 | goto out_einval_locked; | |
729 | read_unlock(&vmlist_lock); | |
730 | ||
731 | addr += pgoff << PAGE_SHIFT; | |
732 | do { | |
733 | struct page *page = vmalloc_to_page(addr); | |
734 | ret = vm_insert_page(vma, uaddr, page); | |
735 | if (ret) | |
736 | return ret; | |
737 | ||
738 | uaddr += PAGE_SIZE; | |
739 | addr += PAGE_SIZE; | |
740 | usize -= PAGE_SIZE; | |
741 | } while (usize > 0); | |
742 | ||
743 | /* Prevent "things" like memory migration? VM_flags need a cleanup... */ | |
744 | vma->vm_flags |= VM_RESERVED; | |
745 | ||
746 | return ret; | |
747 | ||
748 | out_einval_locked: | |
749 | read_unlock(&vmlist_lock); | |
750 | return -EINVAL; | |
751 | } | |
752 | EXPORT_SYMBOL(remap_vmalloc_range); | |
753 |