Commit | Line | Data |
---|---|---|
2334b75f KRW |
1 | /* |
2 | * Copyright 2011 (c) Oracle Corp. | |
3 | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the | |
12 | * next paragraph) shall be included in all copies or substantial portions | |
13 | * of the Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
21 | * DEALINGS IN THE SOFTWARE. | |
22 | * | |
23 | * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | |
24 | */ | |
25 | ||
26 | /* | |
27 | * A simple DMA pool losely based on dmapool.c. It has certain advantages | |
28 | * over the DMA pools: | |
29 | * - Pool collects resently freed pages for reuse (and hooks up to | |
30 | * the shrinker). | |
31 | * - Tracks currently in use pages | |
32 | * - Tracks whether the page is UC, WB or cached (and reverts to WB | |
33 | * when freed). | |
34 | */ | |
35 | ||
7aeb7448 | 36 | #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) |
25d0479a JP |
37 | #define pr_fmt(fmt) "[TTM] " fmt |
38 | ||
2334b75f KRW |
39 | #include <linux/dma-mapping.h> |
40 | #include <linux/list.h> | |
41 | #include <linux/seq_file.h> /* for seq_printf */ | |
42 | #include <linux/slab.h> | |
43 | #include <linux/spinlock.h> | |
44 | #include <linux/highmem.h> | |
45 | #include <linux/mm_types.h> | |
46 | #include <linux/module.h> | |
47 | #include <linux/mm.h> | |
48 | #include <linux/atomic.h> | |
49 | #include <linux/device.h> | |
50 | #include <linux/kthread.h> | |
760285e7 DH |
51 | #include <drm/ttm/ttm_bo_driver.h> |
52 | #include <drm/ttm/ttm_page_alloc.h> | |
2334b75f KRW |
53 | #ifdef TTM_HAS_AGP |
54 | #include <asm/agp.h> | |
55 | #endif | |
56 | ||
57 | #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) | |
58 | #define SMALL_ALLOCATION 4 | |
59 | #define FREE_ALL_PAGES (~0U) | |
60 | /* times are in msecs */ | |
61 | #define IS_UNDEFINED (0) | |
62 | #define IS_WC (1<<1) | |
63 | #define IS_UC (1<<2) | |
64 | #define IS_CACHED (1<<3) | |
65 | #define IS_DMA32 (1<<4) | |
66 | ||
67 | enum pool_type { | |
68 | POOL_IS_UNDEFINED, | |
69 | POOL_IS_WC = IS_WC, | |
70 | POOL_IS_UC = IS_UC, | |
71 | POOL_IS_CACHED = IS_CACHED, | |
72 | POOL_IS_WC_DMA32 = IS_WC | IS_DMA32, | |
73 | POOL_IS_UC_DMA32 = IS_UC | IS_DMA32, | |
74 | POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32, | |
75 | }; | |
76 | /* | |
77 | * The pool structure. There are usually six pools: | |
78 | * - generic (not restricted to DMA32): | |
79 | * - write combined, uncached, cached. | |
80 | * - dma32 (up to 2^32 - so up 4GB): | |
81 | * - write combined, uncached, cached. | |
82 | * for each 'struct device'. The 'cached' is for pages that are actively used. | |
83 | * The other ones can be shrunk by the shrinker API if neccessary. | |
84 | * @pools: The 'struct device->dma_pools' link. | |
85 | * @type: Type of the pool | |
86 | * @lock: Protects the inuse_list and free_list from concurrnet access. Must be | |
87 | * used with irqsave/irqrestore variants because pool allocator maybe called | |
88 | * from delayed work. | |
89 | * @inuse_list: Pool of pages that are in use. The order is very important and | |
90 | * it is in the order that the TTM pages that are put back are in. | |
91 | * @free_list: Pool of pages that are free to be used. No order requirements. | |
92 | * @dev: The device that is associated with these pools. | |
93 | * @size: Size used during DMA allocation. | |
94 | * @npages_free: Count of available pages for re-use. | |
95 | * @npages_in_use: Count of pages that are in use. | |
96 | * @nfrees: Stats when pool is shrinking. | |
97 | * @nrefills: Stats when the pool is grown. | |
98 | * @gfp_flags: Flags to pass for alloc_page. | |
99 | * @name: Name of the pool. | |
100 | * @dev_name: Name derieved from dev - similar to how dev_info works. | |
101 | * Used during shutdown as the dev_info during release is unavailable. | |
102 | */ | |
103 | struct dma_pool { | |
104 | struct list_head pools; /* The 'struct device->dma_pools link */ | |
105 | enum pool_type type; | |
106 | spinlock_t lock; | |
107 | struct list_head inuse_list; | |
108 | struct list_head free_list; | |
109 | struct device *dev; | |
110 | unsigned size; | |
111 | unsigned npages_free; | |
112 | unsigned npages_in_use; | |
113 | unsigned long nfrees; /* Stats when shrunk. */ | |
114 | unsigned long nrefills; /* Stats when grown. */ | |
115 | gfp_t gfp_flags; | |
116 | char name[13]; /* "cached dma32" */ | |
117 | char dev_name[64]; /* Constructed from dev */ | |
118 | }; | |
119 | ||
120 | /* | |
121 | * The accounting page keeping track of the allocated page along with | |
122 | * the DMA address. | |
123 | * @page_list: The link to the 'page_list' in 'struct dma_pool'. | |
124 | * @vaddr: The virtual address of the page | |
125 | * @dma: The bus address of the page. If the page is not allocated | |
126 | * via the DMA API, it will be -1. | |
127 | */ | |
128 | struct dma_page { | |
129 | struct list_head page_list; | |
130 | void *vaddr; | |
131 | struct page *p; | |
132 | dma_addr_t dma; | |
133 | }; | |
134 | ||
135 | /* | |
136 | * Limits for the pool. They are handled without locks because only place where | |
137 | * they may change is in sysfs store. They won't have immediate effect anyway | |
138 | * so forcing serialization to access them is pointless. | |
139 | */ | |
140 | ||
141 | struct ttm_pool_opts { | |
142 | unsigned alloc_size; | |
143 | unsigned max_size; | |
144 | unsigned small; | |
145 | }; | |
146 | ||
147 | /* | |
148 | * Contains the list of all of the 'struct device' and their corresponding | |
149 | * DMA pools. Guarded by _mutex->lock. | |
150 | * @pools: The link to 'struct ttm_pool_manager->pools' | |
151 | * @dev: The 'struct device' associated with the 'pool' | |
152 | * @pool: The 'struct dma_pool' associated with the 'dev' | |
153 | */ | |
154 | struct device_pools { | |
155 | struct list_head pools; | |
156 | struct device *dev; | |
157 | struct dma_pool *pool; | |
158 | }; | |
159 | ||
160 | /* | |
161 | * struct ttm_pool_manager - Holds memory pools for fast allocation | |
162 | * | |
163 | * @lock: Lock used when adding/removing from pools | |
164 | * @pools: List of 'struct device' and 'struct dma_pool' tuples. | |
165 | * @options: Limits for the pool. | |
166 | * @npools: Total amount of pools in existence. | |
167 | * @shrinker: The structure used by [un|]register_shrinker | |
168 | */ | |
169 | struct ttm_pool_manager { | |
170 | struct mutex lock; | |
171 | struct list_head pools; | |
172 | struct ttm_pool_opts options; | |
173 | unsigned npools; | |
174 | struct shrinker mm_shrink; | |
175 | struct kobject kobj; | |
176 | }; | |
177 | ||
178 | static struct ttm_pool_manager *_manager; | |
179 | ||
180 | static struct attribute ttm_page_pool_max = { | |
181 | .name = "pool_max_size", | |
182 | .mode = S_IRUGO | S_IWUSR | |
183 | }; | |
184 | static struct attribute ttm_page_pool_small = { | |
185 | .name = "pool_small_allocation", | |
186 | .mode = S_IRUGO | S_IWUSR | |
187 | }; | |
188 | static struct attribute ttm_page_pool_alloc_size = { | |
189 | .name = "pool_allocation_size", | |
190 | .mode = S_IRUGO | S_IWUSR | |
191 | }; | |
192 | ||
193 | static struct attribute *ttm_pool_attrs[] = { | |
194 | &ttm_page_pool_max, | |
195 | &ttm_page_pool_small, | |
196 | &ttm_page_pool_alloc_size, | |
197 | NULL | |
198 | }; | |
199 | ||
200 | static void ttm_pool_kobj_release(struct kobject *kobj) | |
201 | { | |
202 | struct ttm_pool_manager *m = | |
203 | container_of(kobj, struct ttm_pool_manager, kobj); | |
204 | kfree(m); | |
205 | } | |
206 | ||
207 | static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr, | |
208 | const char *buffer, size_t size) | |
209 | { | |
210 | struct ttm_pool_manager *m = | |
211 | container_of(kobj, struct ttm_pool_manager, kobj); | |
212 | int chars; | |
213 | unsigned val; | |
214 | chars = sscanf(buffer, "%u", &val); | |
215 | if (chars == 0) | |
216 | return size; | |
217 | ||
218 | /* Convert kb to number of pages */ | |
219 | val = val / (PAGE_SIZE >> 10); | |
220 | ||
221 | if (attr == &ttm_page_pool_max) | |
222 | m->options.max_size = val; | |
223 | else if (attr == &ttm_page_pool_small) | |
224 | m->options.small = val; | |
225 | else if (attr == &ttm_page_pool_alloc_size) { | |
226 | if (val > NUM_PAGES_TO_ALLOC*8) { | |
25d0479a | 227 | pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", |
2334b75f KRW |
228 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), |
229 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | |
230 | return size; | |
231 | } else if (val > NUM_PAGES_TO_ALLOC) { | |
25d0479a JP |
232 | pr_warn("Setting allocation size to larger than %lu is not recommended\n", |
233 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | |
2334b75f KRW |
234 | } |
235 | m->options.alloc_size = val; | |
236 | } | |
237 | ||
238 | return size; | |
239 | } | |
240 | ||
241 | static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr, | |
242 | char *buffer) | |
243 | { | |
244 | struct ttm_pool_manager *m = | |
245 | container_of(kobj, struct ttm_pool_manager, kobj); | |
246 | unsigned val = 0; | |
247 | ||
248 | if (attr == &ttm_page_pool_max) | |
249 | val = m->options.max_size; | |
250 | else if (attr == &ttm_page_pool_small) | |
251 | val = m->options.small; | |
252 | else if (attr == &ttm_page_pool_alloc_size) | |
253 | val = m->options.alloc_size; | |
254 | ||
255 | val = val * (PAGE_SIZE >> 10); | |
256 | ||
257 | return snprintf(buffer, PAGE_SIZE, "%u\n", val); | |
258 | } | |
259 | ||
260 | static const struct sysfs_ops ttm_pool_sysfs_ops = { | |
261 | .show = &ttm_pool_show, | |
262 | .store = &ttm_pool_store, | |
263 | }; | |
264 | ||
265 | static struct kobj_type ttm_pool_kobj_type = { | |
266 | .release = &ttm_pool_kobj_release, | |
267 | .sysfs_ops = &ttm_pool_sysfs_ops, | |
268 | .default_attrs = ttm_pool_attrs, | |
269 | }; | |
270 | ||
271 | #ifndef CONFIG_X86 | |
272 | static int set_pages_array_wb(struct page **pages, int addrinarray) | |
273 | { | |
274 | #ifdef TTM_HAS_AGP | |
275 | int i; | |
276 | ||
277 | for (i = 0; i < addrinarray; i++) | |
278 | unmap_page_from_agp(pages[i]); | |
279 | #endif | |
280 | return 0; | |
281 | } | |
282 | ||
283 | static int set_pages_array_wc(struct page **pages, int addrinarray) | |
284 | { | |
285 | #ifdef TTM_HAS_AGP | |
286 | int i; | |
287 | ||
288 | for (i = 0; i < addrinarray; i++) | |
289 | map_page_into_agp(pages[i]); | |
290 | #endif | |
291 | return 0; | |
292 | } | |
293 | ||
294 | static int set_pages_array_uc(struct page **pages, int addrinarray) | |
295 | { | |
296 | #ifdef TTM_HAS_AGP | |
297 | int i; | |
298 | ||
299 | for (i = 0; i < addrinarray; i++) | |
300 | map_page_into_agp(pages[i]); | |
301 | #endif | |
302 | return 0; | |
303 | } | |
304 | #endif /* for !CONFIG_X86 */ | |
305 | ||
306 | static int ttm_set_pages_caching(struct dma_pool *pool, | |
307 | struct page **pages, unsigned cpages) | |
308 | { | |
309 | int r = 0; | |
310 | /* Set page caching */ | |
311 | if (pool->type & IS_UC) { | |
312 | r = set_pages_array_uc(pages, cpages); | |
313 | if (r) | |
25d0479a | 314 | pr_err("%s: Failed to set %d pages to uc!\n", |
2334b75f KRW |
315 | pool->dev_name, cpages); |
316 | } | |
317 | if (pool->type & IS_WC) { | |
318 | r = set_pages_array_wc(pages, cpages); | |
319 | if (r) | |
25d0479a | 320 | pr_err("%s: Failed to set %d pages to wc!\n", |
2334b75f KRW |
321 | pool->dev_name, cpages); |
322 | } | |
323 | return r; | |
324 | } | |
325 | ||
326 | static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) | |
327 | { | |
328 | dma_addr_t dma = d_page->dma; | |
329 | dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma); | |
330 | ||
331 | kfree(d_page); | |
332 | d_page = NULL; | |
333 | } | |
334 | static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool) | |
335 | { | |
336 | struct dma_page *d_page; | |
337 | ||
338 | d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL); | |
339 | if (!d_page) | |
340 | return NULL; | |
341 | ||
342 | d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size, | |
343 | &d_page->dma, | |
344 | pool->gfp_flags); | |
1c34d824 AC |
345 | if (d_page->vaddr) { |
346 | if (is_vmalloc_addr(d_page->vaddr)) | |
347 | d_page->p = vmalloc_to_page(d_page->vaddr); | |
348 | else | |
349 | d_page->p = virt_to_page(d_page->vaddr); | |
350 | } else { | |
2334b75f KRW |
351 | kfree(d_page); |
352 | d_page = NULL; | |
353 | } | |
354 | return d_page; | |
355 | } | |
356 | static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate) | |
357 | { | |
358 | enum pool_type type = IS_UNDEFINED; | |
359 | ||
360 | if (flags & TTM_PAGE_FLAG_DMA32) | |
361 | type |= IS_DMA32; | |
362 | if (cstate == tt_cached) | |
363 | type |= IS_CACHED; | |
364 | else if (cstate == tt_uncached) | |
365 | type |= IS_UC; | |
366 | else | |
367 | type |= IS_WC; | |
368 | ||
369 | return type; | |
370 | } | |
371 | ||
372 | static void ttm_pool_update_free_locked(struct dma_pool *pool, | |
373 | unsigned freed_pages) | |
374 | { | |
375 | pool->npages_free -= freed_pages; | |
376 | pool->nfrees += freed_pages; | |
377 | ||
378 | } | |
379 | ||
380 | /* set memory back to wb and free the pages. */ | |
381 | static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages, | |
382 | struct page *pages[], unsigned npages) | |
383 | { | |
384 | struct dma_page *d_page, *tmp; | |
385 | ||
36d7c537 KRW |
386 | /* Don't set WB on WB page pool. */ |
387 | if (npages && !(pool->type & IS_CACHED) && | |
388 | set_pages_array_wb(pages, npages)) | |
25d0479a JP |
389 | pr_err("%s: Failed to set %d pages to wb!\n", |
390 | pool->dev_name, npages); | |
2334b75f KRW |
391 | |
392 | list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { | |
393 | list_del(&d_page->page_list); | |
394 | __ttm_dma_free_page(pool, d_page); | |
395 | } | |
396 | } | |
397 | ||
398 | static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) | |
399 | { | |
36d7c537 KRW |
400 | /* Don't set WB on WB page pool. */ |
401 | if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1)) | |
25d0479a JP |
402 | pr_err("%s: Failed to set %d pages to wb!\n", |
403 | pool->dev_name, 1); | |
2334b75f KRW |
404 | |
405 | list_del(&d_page->page_list); | |
406 | __ttm_dma_free_page(pool, d_page); | |
407 | } | |
408 | ||
409 | /* | |
410 | * Free pages from pool. | |
411 | * | |
412 | * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC | |
413 | * number of pages in one go. | |
414 | * | |
415 | * @pool: to free the pages from | |
416 | * @nr_free: If set to true will free all pages in pool | |
881fdaa5 | 417 | * @use_static: Safe to use static buffer |
2334b75f | 418 | **/ |
a91576d7 | 419 | static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, |
881fdaa5 | 420 | bool use_static) |
2334b75f | 421 | { |
881fdaa5 | 422 | static struct page *static_buf[NUM_PAGES_TO_ALLOC]; |
2334b75f KRW |
423 | unsigned long irq_flags; |
424 | struct dma_page *dma_p, *tmp; | |
425 | struct page **pages_to_free; | |
426 | struct list_head d_pages; | |
427 | unsigned freed_pages = 0, | |
428 | npages_to_free = nr_free; | |
429 | ||
430 | if (NUM_PAGES_TO_ALLOC < nr_free) | |
431 | npages_to_free = NUM_PAGES_TO_ALLOC; | |
432 | #if 0 | |
433 | if (nr_free > 1) { | |
434 | pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n", | |
25d0479a JP |
435 | pool->dev_name, pool->name, current->pid, |
436 | npages_to_free, nr_free); | |
2334b75f KRW |
437 | } |
438 | #endif | |
881fdaa5 TH |
439 | if (use_static) |
440 | pages_to_free = static_buf; | |
441 | else | |
442 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), | |
443 | GFP_KERNEL); | |
2334b75f KRW |
444 | |
445 | if (!pages_to_free) { | |
25d0479a JP |
446 | pr_err("%s: Failed to allocate memory for pool free operation\n", |
447 | pool->dev_name); | |
2334b75f KRW |
448 | return 0; |
449 | } | |
450 | INIT_LIST_HEAD(&d_pages); | |
451 | restart: | |
452 | spin_lock_irqsave(&pool->lock, irq_flags); | |
453 | ||
454 | /* We picking the oldest ones off the list */ | |
455 | list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list, | |
456 | page_list) { | |
457 | if (freed_pages >= npages_to_free) | |
458 | break; | |
459 | ||
460 | /* Move the dma_page from one list to another. */ | |
461 | list_move(&dma_p->page_list, &d_pages); | |
462 | ||
463 | pages_to_free[freed_pages++] = dma_p->p; | |
464 | /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ | |
465 | if (freed_pages >= NUM_PAGES_TO_ALLOC) { | |
466 | ||
467 | ttm_pool_update_free_locked(pool, freed_pages); | |
468 | /** | |
469 | * Because changing page caching is costly | |
470 | * we unlock the pool to prevent stalling. | |
471 | */ | |
472 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
473 | ||
474 | ttm_dma_pages_put(pool, &d_pages, pages_to_free, | |
475 | freed_pages); | |
476 | ||
477 | INIT_LIST_HEAD(&d_pages); | |
478 | ||
479 | if (likely(nr_free != FREE_ALL_PAGES)) | |
480 | nr_free -= freed_pages; | |
481 | ||
482 | if (NUM_PAGES_TO_ALLOC >= nr_free) | |
483 | npages_to_free = nr_free; | |
484 | else | |
485 | npages_to_free = NUM_PAGES_TO_ALLOC; | |
486 | ||
487 | freed_pages = 0; | |
488 | ||
489 | /* free all so restart the processing */ | |
490 | if (nr_free) | |
491 | goto restart; | |
492 | ||
493 | /* Not allowed to fall through or break because | |
494 | * following context is inside spinlock while we are | |
495 | * outside here. | |
496 | */ | |
497 | goto out; | |
498 | ||
499 | } | |
500 | } | |
501 | ||
502 | /* remove range of pages from the pool */ | |
503 | if (freed_pages) { | |
504 | ttm_pool_update_free_locked(pool, freed_pages); | |
505 | nr_free -= freed_pages; | |
506 | } | |
507 | ||
508 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
509 | ||
510 | if (freed_pages) | |
511 | ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages); | |
512 | out: | |
881fdaa5 TH |
513 | if (pages_to_free != static_buf) |
514 | kfree(pages_to_free); | |
2334b75f KRW |
515 | return nr_free; |
516 | } | |
517 | ||
518 | static void ttm_dma_free_pool(struct device *dev, enum pool_type type) | |
519 | { | |
520 | struct device_pools *p; | |
521 | struct dma_pool *pool; | |
522 | ||
523 | if (!dev) | |
524 | return; | |
525 | ||
526 | mutex_lock(&_manager->lock); | |
527 | list_for_each_entry_reverse(p, &_manager->pools, pools) { | |
528 | if (p->dev != dev) | |
529 | continue; | |
530 | pool = p->pool; | |
531 | if (pool->type != type) | |
532 | continue; | |
533 | ||
534 | list_del(&p->pools); | |
535 | kfree(p); | |
536 | _manager->npools--; | |
537 | break; | |
538 | } | |
539 | list_for_each_entry_reverse(pool, &dev->dma_pools, pools) { | |
540 | if (pool->type != type) | |
541 | continue; | |
542 | /* Takes a spinlock.. */ | |
881fdaa5 TH |
543 | /* OK to use static buffer since global mutex is held. */ |
544 | ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true); | |
2334b75f KRW |
545 | WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); |
546 | /* This code path is called after _all_ references to the | |
547 | * struct device has been dropped - so nobody should be | |
548 | * touching it. In case somebody is trying to _add_ we are | |
549 | * guarded by the mutex. */ | |
550 | list_del(&pool->pools); | |
551 | kfree(pool); | |
552 | break; | |
553 | } | |
554 | mutex_unlock(&_manager->lock); | |
555 | } | |
556 | ||
557 | /* | |
558 | * On free-ing of the 'struct device' this deconstructor is run. | |
559 | * Albeit the pool might have already been freed earlier. | |
560 | */ | |
561 | static void ttm_dma_pool_release(struct device *dev, void *res) | |
562 | { | |
563 | struct dma_pool *pool = *(struct dma_pool **)res; | |
564 | ||
565 | if (pool) | |
566 | ttm_dma_free_pool(dev, pool->type); | |
567 | } | |
568 | ||
569 | static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data) | |
570 | { | |
571 | return *(struct dma_pool **)res == match_data; | |
572 | } | |
573 | ||
574 | static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags, | |
575 | enum pool_type type) | |
576 | { | |
577 | char *n[] = {"wc", "uc", "cached", " dma32", "unknown",}; | |
578 | enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED}; | |
579 | struct device_pools *sec_pool = NULL; | |
580 | struct dma_pool *pool = NULL, **ptr; | |
581 | unsigned i; | |
582 | int ret = -ENODEV; | |
583 | char *p; | |
584 | ||
585 | if (!dev) | |
586 | return NULL; | |
587 | ||
588 | ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL); | |
589 | if (!ptr) | |
590 | return NULL; | |
591 | ||
592 | ret = -ENOMEM; | |
593 | ||
594 | pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL, | |
595 | dev_to_node(dev)); | |
596 | if (!pool) | |
597 | goto err_mem; | |
598 | ||
599 | sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL, | |
600 | dev_to_node(dev)); | |
601 | if (!sec_pool) | |
602 | goto err_mem; | |
603 | ||
604 | INIT_LIST_HEAD(&sec_pool->pools); | |
605 | sec_pool->dev = dev; | |
606 | sec_pool->pool = pool; | |
607 | ||
608 | INIT_LIST_HEAD(&pool->free_list); | |
609 | INIT_LIST_HEAD(&pool->inuse_list); | |
610 | INIT_LIST_HEAD(&pool->pools); | |
611 | spin_lock_init(&pool->lock); | |
612 | pool->dev = dev; | |
613 | pool->npages_free = pool->npages_in_use = 0; | |
614 | pool->nfrees = 0; | |
615 | pool->gfp_flags = flags; | |
616 | pool->size = PAGE_SIZE; | |
617 | pool->type = type; | |
618 | pool->nrefills = 0; | |
619 | p = pool->name; | |
620 | for (i = 0; i < 5; i++) { | |
621 | if (type & t[i]) { | |
622 | p += snprintf(p, sizeof(pool->name) - (p - pool->name), | |
623 | "%s", n[i]); | |
624 | } | |
625 | } | |
626 | *p = 0; | |
627 | /* We copy the name for pr_ calls b/c when dma_pool_destroy is called | |
628 | * - the kobj->name has already been deallocated.*/ | |
629 | snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s", | |
630 | dev_driver_string(dev), dev_name(dev)); | |
631 | mutex_lock(&_manager->lock); | |
632 | /* You can get the dma_pool from either the global: */ | |
633 | list_add(&sec_pool->pools, &_manager->pools); | |
634 | _manager->npools++; | |
635 | /* or from 'struct device': */ | |
636 | list_add(&pool->pools, &dev->dma_pools); | |
637 | mutex_unlock(&_manager->lock); | |
638 | ||
639 | *ptr = pool; | |
640 | devres_add(dev, ptr); | |
641 | ||
642 | return pool; | |
643 | err_mem: | |
644 | devres_free(ptr); | |
645 | kfree(sec_pool); | |
646 | kfree(pool); | |
647 | return ERR_PTR(ret); | |
648 | } | |
649 | ||
650 | static struct dma_pool *ttm_dma_find_pool(struct device *dev, | |
651 | enum pool_type type) | |
652 | { | |
653 | struct dma_pool *pool, *tmp, *found = NULL; | |
654 | ||
655 | if (type == IS_UNDEFINED) | |
656 | return found; | |
657 | ||
658 | /* NB: We iterate on the 'struct dev' which has no spinlock, but | |
659 | * it does have a kref which we have taken. The kref is taken during | |
660 | * graphic driver loading - in the drm_pci_init it calls either | |
661 | * pci_dev_get or pci_register_driver which both end up taking a kref | |
662 | * on 'struct device'. | |
663 | * | |
664 | * On teardown, the graphic drivers end up quiescing the TTM (put_pages) | |
665 | * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice | |
666 | * thing is at that point of time there are no pages associated with the | |
667 | * driver so this function will not be called. | |
668 | */ | |
669 | list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) { | |
670 | if (pool->type != type) | |
671 | continue; | |
672 | found = pool; | |
673 | break; | |
674 | } | |
675 | return found; | |
676 | } | |
677 | ||
678 | /* | |
679 | * Free pages the pages that failed to change the caching state. If there | |
680 | * are pages that have changed their caching state already put them to the | |
681 | * pool. | |
682 | */ | |
683 | static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool, | |
684 | struct list_head *d_pages, | |
685 | struct page **failed_pages, | |
686 | unsigned cpages) | |
687 | { | |
688 | struct dma_page *d_page, *tmp; | |
689 | struct page *p; | |
690 | unsigned i = 0; | |
691 | ||
692 | p = failed_pages[0]; | |
693 | if (!p) | |
694 | return; | |
695 | /* Find the failed page. */ | |
696 | list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { | |
697 | if (d_page->p != p) | |
698 | continue; | |
699 | /* .. and then progress over the full list. */ | |
700 | list_del(&d_page->page_list); | |
701 | __ttm_dma_free_page(pool, d_page); | |
702 | if (++i < cpages) | |
703 | p = failed_pages[i]; | |
704 | else | |
705 | break; | |
706 | } | |
707 | ||
708 | } | |
709 | ||
710 | /* | |
711 | * Allocate 'count' pages, and put 'need' number of them on the | |
712 | * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset. | |
713 | * The full list of pages should also be on 'd_pages'. | |
714 | * We return zero for success, and negative numbers as errors. | |
715 | */ | |
716 | static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, | |
717 | struct list_head *d_pages, | |
718 | unsigned count) | |
719 | { | |
720 | struct page **caching_array; | |
721 | struct dma_page *dma_p; | |
722 | struct page *p; | |
723 | int r = 0; | |
724 | unsigned i, cpages; | |
725 | unsigned max_cpages = min(count, | |
726 | (unsigned)(PAGE_SIZE/sizeof(struct page *))); | |
727 | ||
728 | /* allocate array for page caching change */ | |
729 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); | |
730 | ||
731 | if (!caching_array) { | |
25d0479a JP |
732 | pr_err("%s: Unable to allocate table for new pages\n", |
733 | pool->dev_name); | |
2334b75f KRW |
734 | return -ENOMEM; |
735 | } | |
736 | ||
737 | if (count > 1) { | |
738 | pr_debug("%s: (%s:%d) Getting %d pages\n", | |
25d0479a | 739 | pool->dev_name, pool->name, current->pid, count); |
2334b75f KRW |
740 | } |
741 | ||
742 | for (i = 0, cpages = 0; i < count; ++i) { | |
743 | dma_p = __ttm_dma_alloc_page(pool); | |
744 | if (!dma_p) { | |
25d0479a JP |
745 | pr_err("%s: Unable to get page %u\n", |
746 | pool->dev_name, i); | |
2334b75f KRW |
747 | |
748 | /* store already allocated pages in the pool after | |
749 | * setting the caching state */ | |
750 | if (cpages) { | |
751 | r = ttm_set_pages_caching(pool, caching_array, | |
752 | cpages); | |
753 | if (r) | |
754 | ttm_dma_handle_caching_state_failure( | |
755 | pool, d_pages, caching_array, | |
756 | cpages); | |
757 | } | |
758 | r = -ENOMEM; | |
759 | goto out; | |
760 | } | |
761 | p = dma_p->p; | |
762 | #ifdef CONFIG_HIGHMEM | |
763 | /* gfp flags of highmem page should never be dma32 so we | |
764 | * we should be fine in such case | |
765 | */ | |
766 | if (!PageHighMem(p)) | |
767 | #endif | |
768 | { | |
769 | caching_array[cpages++] = p; | |
770 | if (cpages == max_cpages) { | |
771 | /* Note: Cannot hold the spinlock */ | |
772 | r = ttm_set_pages_caching(pool, caching_array, | |
773 | cpages); | |
774 | if (r) { | |
775 | ttm_dma_handle_caching_state_failure( | |
776 | pool, d_pages, caching_array, | |
777 | cpages); | |
778 | goto out; | |
779 | } | |
780 | cpages = 0; | |
781 | } | |
782 | } | |
783 | list_add(&dma_p->page_list, d_pages); | |
784 | } | |
785 | ||
786 | if (cpages) { | |
787 | r = ttm_set_pages_caching(pool, caching_array, cpages); | |
788 | if (r) | |
789 | ttm_dma_handle_caching_state_failure(pool, d_pages, | |
790 | caching_array, cpages); | |
791 | } | |
792 | out: | |
793 | kfree(caching_array); | |
794 | return r; | |
795 | } | |
796 | ||
797 | /* | |
798 | * @return count of pages still required to fulfill the request. | |
8e7e7052 | 799 | */ |
2334b75f KRW |
800 | static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool, |
801 | unsigned long *irq_flags) | |
802 | { | |
803 | unsigned count = _manager->options.small; | |
804 | int r = pool->npages_free; | |
805 | ||
806 | if (count > pool->npages_free) { | |
807 | struct list_head d_pages; | |
808 | ||
809 | INIT_LIST_HEAD(&d_pages); | |
810 | ||
811 | spin_unlock_irqrestore(&pool->lock, *irq_flags); | |
812 | ||
813 | /* Returns how many more are neccessary to fulfill the | |
814 | * request. */ | |
815 | r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count); | |
816 | ||
817 | spin_lock_irqsave(&pool->lock, *irq_flags); | |
818 | if (!r) { | |
819 | /* Add the fresh to the end.. */ | |
820 | list_splice(&d_pages, &pool->free_list); | |
821 | ++pool->nrefills; | |
822 | pool->npages_free += count; | |
823 | r = count; | |
824 | } else { | |
825 | struct dma_page *d_page; | |
826 | unsigned cpages = 0; | |
827 | ||
25d0479a JP |
828 | pr_err("%s: Failed to fill %s pool (r:%d)!\n", |
829 | pool->dev_name, pool->name, r); | |
2334b75f KRW |
830 | |
831 | list_for_each_entry(d_page, &d_pages, page_list) { | |
832 | cpages++; | |
833 | } | |
834 | list_splice_tail(&d_pages, &pool->free_list); | |
835 | pool->npages_free += cpages; | |
836 | r = cpages; | |
837 | } | |
838 | } | |
839 | return r; | |
840 | } | |
841 | ||
842 | /* | |
843 | * @return count of pages still required to fulfill the request. | |
844 | * The populate list is actually a stack (not that is matters as TTM | |
845 | * allocates one page at a time. | |
846 | */ | |
847 | static int ttm_dma_pool_get_pages(struct dma_pool *pool, | |
8e7e7052 | 848 | struct ttm_dma_tt *ttm_dma, |
2334b75f KRW |
849 | unsigned index) |
850 | { | |
851 | struct dma_page *d_page; | |
8e7e7052 | 852 | struct ttm_tt *ttm = &ttm_dma->ttm; |
2334b75f KRW |
853 | unsigned long irq_flags; |
854 | int count, r = -ENOMEM; | |
855 | ||
856 | spin_lock_irqsave(&pool->lock, irq_flags); | |
857 | count = ttm_dma_page_pool_fill_locked(pool, &irq_flags); | |
858 | if (count) { | |
859 | d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); | |
860 | ttm->pages[index] = d_page->p; | |
3d50d4dc | 861 | ttm_dma->cpu_address[index] = d_page->vaddr; |
8e7e7052 JG |
862 | ttm_dma->dma_address[index] = d_page->dma; |
863 | list_move_tail(&d_page->page_list, &ttm_dma->pages_list); | |
2334b75f KRW |
864 | r = 0; |
865 | pool->npages_in_use += 1; | |
866 | pool->npages_free -= 1; | |
867 | } | |
868 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
869 | return r; | |
870 | } | |
871 | ||
872 | /* | |
873 | * On success pages list will hold count number of correctly | |
874 | * cached pages. On failure will hold the negative return value (-ENOMEM, etc). | |
875 | */ | |
8e7e7052 | 876 | int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev) |
2334b75f | 877 | { |
8e7e7052 | 878 | struct ttm_tt *ttm = &ttm_dma->ttm; |
2334b75f KRW |
879 | struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; |
880 | struct dma_pool *pool; | |
881 | enum pool_type type; | |
882 | unsigned i; | |
883 | gfp_t gfp_flags; | |
884 | int ret; | |
885 | ||
886 | if (ttm->state != tt_unpopulated) | |
887 | return 0; | |
888 | ||
889 | type = ttm_to_type(ttm->page_flags, ttm->caching_state); | |
890 | if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) | |
891 | gfp_flags = GFP_USER | GFP_DMA32; | |
892 | else | |
893 | gfp_flags = GFP_HIGHUSER; | |
894 | if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) | |
895 | gfp_flags |= __GFP_ZERO; | |
896 | ||
897 | pool = ttm_dma_find_pool(dev, type); | |
898 | if (!pool) { | |
899 | pool = ttm_dma_pool_init(dev, gfp_flags, type); | |
900 | if (IS_ERR_OR_NULL(pool)) { | |
901 | return -ENOMEM; | |
902 | } | |
903 | } | |
904 | ||
8e7e7052 | 905 | INIT_LIST_HEAD(&ttm_dma->pages_list); |
2334b75f | 906 | for (i = 0; i < ttm->num_pages; ++i) { |
8e7e7052 | 907 | ret = ttm_dma_pool_get_pages(pool, ttm_dma, i); |
2334b75f | 908 | if (ret != 0) { |
8e7e7052 | 909 | ttm_dma_unpopulate(ttm_dma, dev); |
2334b75f KRW |
910 | return -ENOMEM; |
911 | } | |
912 | ||
913 | ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], | |
914 | false, false); | |
915 | if (unlikely(ret != 0)) { | |
8e7e7052 | 916 | ttm_dma_unpopulate(ttm_dma, dev); |
2334b75f KRW |
917 | return -ENOMEM; |
918 | } | |
919 | } | |
920 | ||
921 | if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { | |
922 | ret = ttm_tt_swapin(ttm); | |
923 | if (unlikely(ret != 0)) { | |
8e7e7052 | 924 | ttm_dma_unpopulate(ttm_dma, dev); |
2334b75f KRW |
925 | return ret; |
926 | } | |
927 | } | |
928 | ||
929 | ttm->state = tt_unbound; | |
930 | return 0; | |
931 | } | |
932 | EXPORT_SYMBOL_GPL(ttm_dma_populate); | |
933 | ||
2334b75f | 934 | /* Put all pages in pages list to correct pool to wait for reuse */ |
8e7e7052 | 935 | void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) |
2334b75f | 936 | { |
8e7e7052 | 937 | struct ttm_tt *ttm = &ttm_dma->ttm; |
2334b75f KRW |
938 | struct dma_pool *pool; |
939 | struct dma_page *d_page, *next; | |
940 | enum pool_type type; | |
941 | bool is_cached = false; | |
2c05114d | 942 | unsigned count = 0, i, npages = 0; |
2334b75f KRW |
943 | unsigned long irq_flags; |
944 | ||
945 | type = ttm_to_type(ttm->page_flags, ttm->caching_state); | |
946 | pool = ttm_dma_find_pool(dev, type); | |
0e113315 | 947 | if (!pool) |
2334b75f | 948 | return; |
0e113315 | 949 | |
2334b75f KRW |
950 | is_cached = (ttm_dma_find_pool(pool->dev, |
951 | ttm_to_type(ttm->page_flags, tt_cached)) == pool); | |
952 | ||
953 | /* make sure pages array match list and count number of pages */ | |
8e7e7052 | 954 | list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) { |
2334b75f KRW |
955 | ttm->pages[count] = d_page->p; |
956 | count++; | |
957 | } | |
958 | ||
959 | spin_lock_irqsave(&pool->lock, irq_flags); | |
960 | pool->npages_in_use -= count; | |
961 | if (is_cached) { | |
962 | pool->nfrees += count; | |
963 | } else { | |
964 | pool->npages_free += count; | |
8e7e7052 | 965 | list_splice(&ttm_dma->pages_list, &pool->free_list); |
e9308884 JG |
966 | /* |
967 | * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages | |
968 | * to free in order to minimize calls to set_memory_wb(). | |
969 | */ | |
970 | if (pool->npages_free >= (_manager->options.max_size + | |
971 | NUM_PAGES_TO_ALLOC)) | |
2c05114d | 972 | npages = pool->npages_free - _manager->options.max_size; |
2334b75f KRW |
973 | } |
974 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
975 | ||
976 | if (is_cached) { | |
8e7e7052 | 977 | list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) { |
2334b75f KRW |
978 | ttm_mem_global_free_page(ttm->glob->mem_glob, |
979 | d_page->p); | |
980 | ttm_dma_page_put(pool, d_page); | |
981 | } | |
982 | } else { | |
983 | for (i = 0; i < count; i++) { | |
984 | ttm_mem_global_free_page(ttm->glob->mem_glob, | |
985 | ttm->pages[i]); | |
986 | } | |
987 | } | |
988 | ||
8e7e7052 | 989 | INIT_LIST_HEAD(&ttm_dma->pages_list); |
2334b75f KRW |
990 | for (i = 0; i < ttm->num_pages; i++) { |
991 | ttm->pages[i] = NULL; | |
3d50d4dc | 992 | ttm_dma->cpu_address[i] = 0; |
8e7e7052 | 993 | ttm_dma->dma_address[i] = 0; |
2334b75f KRW |
994 | } |
995 | ||
2c05114d KRW |
996 | /* shrink pool if necessary (only on !is_cached pools)*/ |
997 | if (npages) | |
881fdaa5 | 998 | ttm_dma_page_pool_free(pool, npages, false); |
2334b75f KRW |
999 | ttm->state = tt_unpopulated; |
1000 | } | |
1001 | EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); | |
1002 | ||
1003 | /** | |
1004 | * Callback for mm to request pool to reduce number of page held. | |
7dc19d5a DC |
1005 | * |
1006 | * XXX: (dchinner) Deadlock warning! | |
1007 | * | |
7dc19d5a DC |
1008 | * I'm getting sadder as I hear more pathetical whimpers about needing per-pool |
1009 | * shrinkers | |
2334b75f | 1010 | */ |
7dc19d5a DC |
1011 | static unsigned long |
1012 | ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) | |
2334b75f | 1013 | { |
46c2df68 | 1014 | static unsigned start_pool; |
2334b75f | 1015 | unsigned idx = 0; |
46c2df68 | 1016 | unsigned pool_offset; |
2334b75f KRW |
1017 | unsigned shrink_pages = sc->nr_to_scan; |
1018 | struct device_pools *p; | |
7dc19d5a | 1019 | unsigned long freed = 0; |
2334b75f KRW |
1020 | |
1021 | if (list_empty(&_manager->pools)) | |
7dc19d5a | 1022 | return SHRINK_STOP; |
2334b75f | 1023 | |
22e71691 TH |
1024 | if (!mutex_trylock(&_manager->lock)) |
1025 | return SHRINK_STOP; | |
11e504cc TH |
1026 | if (!_manager->npools) |
1027 | goto out; | |
46c2df68 | 1028 | pool_offset = ++start_pool % _manager->npools; |
2334b75f KRW |
1029 | list_for_each_entry(p, &_manager->pools, pools) { |
1030 | unsigned nr_free; | |
1031 | ||
7920aa5a | 1032 | if (!p->dev) |
2334b75f KRW |
1033 | continue; |
1034 | if (shrink_pages == 0) | |
1035 | break; | |
1036 | /* Do it in round-robin fashion. */ | |
1037 | if (++idx < pool_offset) | |
1038 | continue; | |
1039 | nr_free = shrink_pages; | |
881fdaa5 TH |
1040 | /* OK to use static buffer since global mutex is held. */ |
1041 | shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true); | |
7dc19d5a DC |
1042 | freed += nr_free - shrink_pages; |
1043 | ||
2334b75f | 1044 | pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", |
25d0479a JP |
1045 | p->pool->dev_name, p->pool->name, current->pid, |
1046 | nr_free, shrink_pages); | |
2334b75f | 1047 | } |
11e504cc | 1048 | out: |
2334b75f | 1049 | mutex_unlock(&_manager->lock); |
7dc19d5a DC |
1050 | return freed; |
1051 | } | |
1052 | ||
1053 | static unsigned long | |
1054 | ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) | |
1055 | { | |
1056 | struct device_pools *p; | |
1057 | unsigned long count = 0; | |
1058 | ||
22e71691 TH |
1059 | if (!mutex_trylock(&_manager->lock)) |
1060 | return 0; | |
7dc19d5a DC |
1061 | list_for_each_entry(p, &_manager->pools, pools) |
1062 | count += p->pool->npages_free; | |
1063 | mutex_unlock(&_manager->lock); | |
1064 | return count; | |
2334b75f KRW |
1065 | } |
1066 | ||
1067 | static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager) | |
1068 | { | |
7dc19d5a DC |
1069 | manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count; |
1070 | manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan; | |
2334b75f KRW |
1071 | manager->mm_shrink.seeks = 1; |
1072 | register_shrinker(&manager->mm_shrink); | |
1073 | } | |
1074 | ||
1075 | static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager) | |
1076 | { | |
1077 | unregister_shrinker(&manager->mm_shrink); | |
1078 | } | |
1079 | ||
1080 | int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) | |
1081 | { | |
1082 | int ret = -ENOMEM; | |
1083 | ||
1084 | WARN_ON(_manager); | |
1085 | ||
25d0479a | 1086 | pr_info("Initializing DMA pool allocator\n"); |
2334b75f KRW |
1087 | |
1088 | _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); | |
1089 | if (!_manager) | |
33cce6e9 | 1090 | goto err; |
2334b75f KRW |
1091 | |
1092 | mutex_init(&_manager->lock); | |
1093 | INIT_LIST_HEAD(&_manager->pools); | |
1094 | ||
1095 | _manager->options.max_size = max_pages; | |
1096 | _manager->options.small = SMALL_ALLOCATION; | |
1097 | _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; | |
1098 | ||
1099 | /* This takes care of auto-freeing the _manager */ | |
1100 | ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, | |
1101 | &glob->kobj, "dma_pool"); | |
1102 | if (unlikely(ret != 0)) { | |
1103 | kobject_put(&_manager->kobj); | |
1104 | goto err; | |
1105 | } | |
1106 | ttm_dma_pool_mm_shrink_init(_manager); | |
1107 | return 0; | |
2334b75f KRW |
1108 | err: |
1109 | return ret; | |
1110 | } | |
1111 | ||
1112 | void ttm_dma_page_alloc_fini(void) | |
1113 | { | |
1114 | struct device_pools *p, *t; | |
1115 | ||
25d0479a | 1116 | pr_info("Finalizing DMA pool allocator\n"); |
2334b75f KRW |
1117 | ttm_dma_pool_mm_shrink_fini(_manager); |
1118 | ||
1119 | list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) { | |
1120 | dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name, | |
1121 | current->pid); | |
1122 | WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release, | |
1123 | ttm_dma_pool_match, p->pool)); | |
1124 | ttm_dma_free_pool(p->dev, p->pool->type); | |
1125 | } | |
1126 | kobject_put(&_manager->kobj); | |
1127 | _manager = NULL; | |
1128 | } | |
1129 | ||
1130 | int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) | |
1131 | { | |
1132 | struct device_pools *p; | |
1133 | struct dma_pool *pool = NULL; | |
1134 | char *h[] = {"pool", "refills", "pages freed", "inuse", "available", | |
1135 | "name", "virt", "busaddr"}; | |
1136 | ||
1137 | if (!_manager) { | |
1138 | seq_printf(m, "No pool allocator running.\n"); | |
1139 | return 0; | |
1140 | } | |
1141 | seq_printf(m, "%13s %12s %13s %8s %8s %8s\n", | |
1142 | h[0], h[1], h[2], h[3], h[4], h[5]); | |
1143 | mutex_lock(&_manager->lock); | |
1144 | list_for_each_entry(p, &_manager->pools, pools) { | |
1145 | struct device *dev = p->dev; | |
1146 | if (!dev) | |
1147 | continue; | |
1148 | pool = p->pool; | |
1149 | seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n", | |
1150 | pool->name, pool->nrefills, | |
1151 | pool->nfrees, pool->npages_in_use, | |
1152 | pool->npages_free, | |
1153 | pool->dev_name); | |
1154 | } | |
1155 | mutex_unlock(&_manager->lock); | |
1156 | return 0; | |
1157 | } | |
1158 | EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs); | |
7aeb7448 TH |
1159 | |
1160 | #endif |