Commit | Line | Data |
---|---|---|
ba4e7d97 TH |
1 | /************************************************************************** |
2 | * | |
3 | * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA | |
4 | * All Rights Reserved. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the | |
8 | * "Software"), to deal in the Software without restriction, including | |
9 | * without limitation the rights to use, copy, modify, merge, publish, | |
10 | * distribute, sub license, and/or sell copies of the Software, and to | |
11 | * permit persons to whom the Software is furnished to do so, subject to | |
12 | * the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | **************************************************************************/ | |
27 | /* | |
28 | * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> | |
29 | */ | |
30 | ||
31 | #include "ttm/ttm_module.h" | |
32 | #include "ttm/ttm_bo_driver.h" | |
33 | #include "ttm/ttm_placement.h" | |
34 | #include <linux/jiffies.h> | |
35 | #include <linux/slab.h> | |
36 | #include <linux/sched.h> | |
37 | #include <linux/mm.h> | |
38 | #include <linux/file.h> | |
39 | #include <linux/module.h> | |
40 | ||
41 | #define TTM_ASSERT_LOCKED(param) | |
42 | #define TTM_DEBUG(fmt, arg...) | |
43 | #define TTM_BO_HASH_ORDER 13 | |
44 | ||
45 | static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); | |
46 | static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo); | |
47 | static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); | |
48 | ||
49 | static inline uint32_t ttm_bo_type_flags(unsigned type) | |
50 | { | |
51 | return 1 << (type); | |
52 | } | |
53 | ||
54 | static void ttm_bo_release_list(struct kref *list_kref) | |
55 | { | |
56 | struct ttm_buffer_object *bo = | |
57 | container_of(list_kref, struct ttm_buffer_object, list_kref); | |
58 | struct ttm_bo_device *bdev = bo->bdev; | |
59 | ||
60 | BUG_ON(atomic_read(&bo->list_kref.refcount)); | |
61 | BUG_ON(atomic_read(&bo->kref.refcount)); | |
62 | BUG_ON(atomic_read(&bo->cpu_writers)); | |
63 | BUG_ON(bo->sync_obj != NULL); | |
64 | BUG_ON(bo->mem.mm_node != NULL); | |
65 | BUG_ON(!list_empty(&bo->lru)); | |
66 | BUG_ON(!list_empty(&bo->ddestroy)); | |
67 | ||
68 | if (bo->ttm) | |
69 | ttm_tt_destroy(bo->ttm); | |
70 | if (bo->destroy) | |
71 | bo->destroy(bo); | |
72 | else { | |
73 | ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false); | |
74 | kfree(bo); | |
75 | } | |
76 | } | |
77 | ||
78 | int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible) | |
79 | { | |
80 | ||
81 | if (interruptible) { | |
82 | int ret = 0; | |
83 | ||
84 | ret = wait_event_interruptible(bo->event_queue, | |
85 | atomic_read(&bo->reserved) == 0); | |
86 | if (unlikely(ret != 0)) | |
87 | return -ERESTART; | |
88 | } else { | |
89 | wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0); | |
90 | } | |
91 | return 0; | |
92 | } | |
93 | ||
94 | static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) | |
95 | { | |
96 | struct ttm_bo_device *bdev = bo->bdev; | |
97 | struct ttm_mem_type_manager *man; | |
98 | ||
99 | BUG_ON(!atomic_read(&bo->reserved)); | |
100 | ||
101 | if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { | |
102 | ||
103 | BUG_ON(!list_empty(&bo->lru)); | |
104 | ||
105 | man = &bdev->man[bo->mem.mem_type]; | |
106 | list_add_tail(&bo->lru, &man->lru); | |
107 | kref_get(&bo->list_kref); | |
108 | ||
109 | if (bo->ttm != NULL) { | |
110 | list_add_tail(&bo->swap, &bdev->swap_lru); | |
111 | kref_get(&bo->list_kref); | |
112 | } | |
113 | } | |
114 | } | |
115 | ||
116 | /** | |
117 | * Call with the lru_lock held. | |
118 | */ | |
119 | ||
120 | static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo) | |
121 | { | |
122 | int put_count = 0; | |
123 | ||
124 | if (!list_empty(&bo->swap)) { | |
125 | list_del_init(&bo->swap); | |
126 | ++put_count; | |
127 | } | |
128 | if (!list_empty(&bo->lru)) { | |
129 | list_del_init(&bo->lru); | |
130 | ++put_count; | |
131 | } | |
132 | ||
133 | /* | |
134 | * TODO: Add a driver hook to delete from | |
135 | * driver-specific LRU's here. | |
136 | */ | |
137 | ||
138 | return put_count; | |
139 | } | |
140 | ||
141 | int ttm_bo_reserve_locked(struct ttm_buffer_object *bo, | |
142 | bool interruptible, | |
143 | bool no_wait, bool use_sequence, uint32_t sequence) | |
144 | { | |
145 | struct ttm_bo_device *bdev = bo->bdev; | |
146 | int ret; | |
147 | ||
148 | while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { | |
149 | if (use_sequence && bo->seq_valid && | |
150 | (sequence - bo->val_seq < (1 << 31))) { | |
151 | return -EAGAIN; | |
152 | } | |
153 | ||
154 | if (no_wait) | |
155 | return -EBUSY; | |
156 | ||
157 | spin_unlock(&bdev->lru_lock); | |
158 | ret = ttm_bo_wait_unreserved(bo, interruptible); | |
159 | spin_lock(&bdev->lru_lock); | |
160 | ||
161 | if (unlikely(ret)) | |
162 | return ret; | |
163 | } | |
164 | ||
165 | if (use_sequence) { | |
166 | bo->val_seq = sequence; | |
167 | bo->seq_valid = true; | |
168 | } else { | |
169 | bo->seq_valid = false; | |
170 | } | |
171 | ||
172 | return 0; | |
173 | } | |
174 | EXPORT_SYMBOL(ttm_bo_reserve); | |
175 | ||
176 | static void ttm_bo_ref_bug(struct kref *list_kref) | |
177 | { | |
178 | BUG(); | |
179 | } | |
180 | ||
181 | int ttm_bo_reserve(struct ttm_buffer_object *bo, | |
182 | bool interruptible, | |
183 | bool no_wait, bool use_sequence, uint32_t sequence) | |
184 | { | |
185 | struct ttm_bo_device *bdev = bo->bdev; | |
186 | int put_count = 0; | |
187 | int ret; | |
188 | ||
189 | spin_lock(&bdev->lru_lock); | |
190 | ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence, | |
191 | sequence); | |
192 | if (likely(ret == 0)) | |
193 | put_count = ttm_bo_del_from_lru(bo); | |
194 | spin_unlock(&bdev->lru_lock); | |
195 | ||
196 | while (put_count--) | |
197 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | |
198 | ||
199 | return ret; | |
200 | } | |
201 | ||
202 | void ttm_bo_unreserve(struct ttm_buffer_object *bo) | |
203 | { | |
204 | struct ttm_bo_device *bdev = bo->bdev; | |
205 | ||
206 | spin_lock(&bdev->lru_lock); | |
207 | ttm_bo_add_to_lru(bo); | |
208 | atomic_set(&bo->reserved, 0); | |
209 | wake_up_all(&bo->event_queue); | |
210 | spin_unlock(&bdev->lru_lock); | |
211 | } | |
212 | EXPORT_SYMBOL(ttm_bo_unreserve); | |
213 | ||
214 | /* | |
215 | * Call bo->mutex locked. | |
216 | */ | |
217 | ||
218 | static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) | |
219 | { | |
220 | struct ttm_bo_device *bdev = bo->bdev; | |
221 | int ret = 0; | |
222 | uint32_t page_flags = 0; | |
223 | ||
224 | TTM_ASSERT_LOCKED(&bo->mutex); | |
225 | bo->ttm = NULL; | |
226 | ||
ad49f501 DA |
227 | if (bdev->need_dma32) |
228 | page_flags |= TTM_PAGE_FLAG_DMA32; | |
229 | ||
ba4e7d97 TH |
230 | switch (bo->type) { |
231 | case ttm_bo_type_device: | |
232 | if (zero_alloc) | |
233 | page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC; | |
234 | case ttm_bo_type_kernel: | |
235 | bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, | |
236 | page_flags, bdev->dummy_read_page); | |
237 | if (unlikely(bo->ttm == NULL)) | |
238 | ret = -ENOMEM; | |
239 | break; | |
240 | case ttm_bo_type_user: | |
241 | bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT, | |
242 | page_flags | TTM_PAGE_FLAG_USER, | |
243 | bdev->dummy_read_page); | |
244 | if (unlikely(bo->ttm == NULL)) | |
245 | ret = -ENOMEM; | |
246 | break; | |
247 | ||
248 | ret = ttm_tt_set_user(bo->ttm, current, | |
249 | bo->buffer_start, bo->num_pages); | |
250 | if (unlikely(ret != 0)) | |
251 | ttm_tt_destroy(bo->ttm); | |
252 | break; | |
253 | default: | |
254 | printk(KERN_ERR TTM_PFX "Illegal buffer object type\n"); | |
255 | ret = -EINVAL; | |
256 | break; | |
257 | } | |
258 | ||
259 | return ret; | |
260 | } | |
261 | ||
262 | static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, | |
263 | struct ttm_mem_reg *mem, | |
264 | bool evict, bool interruptible, bool no_wait) | |
265 | { | |
266 | struct ttm_bo_device *bdev = bo->bdev; | |
267 | bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); | |
268 | bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); | |
269 | struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; | |
270 | struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; | |
271 | int ret = 0; | |
272 | ||
273 | if (old_is_pci || new_is_pci || | |
274 | ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) | |
275 | ttm_bo_unmap_virtual(bo); | |
276 | ||
277 | /* | |
278 | * Create and bind a ttm if required. | |
279 | */ | |
280 | ||
281 | if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) { | |
282 | ret = ttm_bo_add_ttm(bo, false); | |
283 | if (ret) | |
284 | goto out_err; | |
285 | ||
286 | ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); | |
287 | if (ret) | |
87ef9209 | 288 | goto out_err; |
ba4e7d97 TH |
289 | |
290 | if (mem->mem_type != TTM_PL_SYSTEM) { | |
291 | ret = ttm_tt_bind(bo->ttm, mem); | |
292 | if (ret) | |
293 | goto out_err; | |
294 | } | |
295 | ||
296 | if (bo->mem.mem_type == TTM_PL_SYSTEM) { | |
297 | ||
298 | struct ttm_mem_reg *old_mem = &bo->mem; | |
299 | uint32_t save_flags = old_mem->placement; | |
300 | ||
301 | *old_mem = *mem; | |
302 | mem->mm_node = NULL; | |
303 | ttm_flag_masked(&save_flags, mem->placement, | |
304 | TTM_PL_MASK_MEMTYPE); | |
305 | goto moved; | |
306 | } | |
307 | ||
308 | } | |
309 | ||
310 | if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && | |
311 | !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) | |
312 | ret = ttm_bo_move_ttm(bo, evict, no_wait, mem); | |
313 | else if (bdev->driver->move) | |
314 | ret = bdev->driver->move(bo, evict, interruptible, | |
315 | no_wait, mem); | |
316 | else | |
317 | ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem); | |
318 | ||
319 | if (ret) | |
320 | goto out_err; | |
321 | ||
322 | moved: | |
323 | if (bo->evicted) { | |
324 | ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); | |
325 | if (ret) | |
326 | printk(KERN_ERR TTM_PFX "Can not flush read caches\n"); | |
327 | bo->evicted = false; | |
328 | } | |
329 | ||
330 | if (bo->mem.mm_node) { | |
331 | spin_lock(&bo->lock); | |
332 | bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + | |
333 | bdev->man[bo->mem.mem_type].gpu_offset; | |
334 | bo->cur_placement = bo->mem.placement; | |
335 | spin_unlock(&bo->lock); | |
336 | } | |
337 | ||
338 | return 0; | |
339 | ||
340 | out_err: | |
341 | new_man = &bdev->man[bo->mem.mem_type]; | |
342 | if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { | |
343 | ttm_tt_unbind(bo->ttm); | |
344 | ttm_tt_destroy(bo->ttm); | |
345 | bo->ttm = NULL; | |
346 | } | |
347 | ||
348 | return ret; | |
349 | } | |
350 | ||
351 | /** | |
352 | * If bo idle, remove from delayed- and lru lists, and unref. | |
353 | * If not idle, and already on delayed list, do nothing. | |
354 | * If not idle, and not on delayed list, put on delayed list, | |
355 | * up the list_kref and schedule a delayed list check. | |
356 | */ | |
357 | ||
358 | static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) | |
359 | { | |
360 | struct ttm_bo_device *bdev = bo->bdev; | |
361 | struct ttm_bo_driver *driver = bdev->driver; | |
362 | int ret; | |
363 | ||
364 | spin_lock(&bo->lock); | |
365 | (void) ttm_bo_wait(bo, false, false, !remove_all); | |
366 | ||
367 | if (!bo->sync_obj) { | |
368 | int put_count; | |
369 | ||
370 | spin_unlock(&bo->lock); | |
371 | ||
372 | spin_lock(&bdev->lru_lock); | |
373 | ret = ttm_bo_reserve_locked(bo, false, false, false, 0); | |
374 | BUG_ON(ret); | |
375 | if (bo->ttm) | |
376 | ttm_tt_unbind(bo->ttm); | |
377 | ||
378 | if (!list_empty(&bo->ddestroy)) { | |
379 | list_del_init(&bo->ddestroy); | |
380 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | |
381 | } | |
382 | if (bo->mem.mm_node) { | |
383 | drm_mm_put_block(bo->mem.mm_node); | |
384 | bo->mem.mm_node = NULL; | |
385 | } | |
386 | put_count = ttm_bo_del_from_lru(bo); | |
387 | spin_unlock(&bdev->lru_lock); | |
388 | ||
389 | atomic_set(&bo->reserved, 0); | |
390 | ||
391 | while (put_count--) | |
392 | kref_put(&bo->list_kref, ttm_bo_release_list); | |
393 | ||
394 | return 0; | |
395 | } | |
396 | ||
397 | spin_lock(&bdev->lru_lock); | |
398 | if (list_empty(&bo->ddestroy)) { | |
399 | void *sync_obj = bo->sync_obj; | |
400 | void *sync_obj_arg = bo->sync_obj_arg; | |
401 | ||
402 | kref_get(&bo->list_kref); | |
403 | list_add_tail(&bo->ddestroy, &bdev->ddestroy); | |
404 | spin_unlock(&bdev->lru_lock); | |
405 | spin_unlock(&bo->lock); | |
406 | ||
407 | if (sync_obj) | |
408 | driver->sync_obj_flush(sync_obj, sync_obj_arg); | |
409 | schedule_delayed_work(&bdev->wq, | |
410 | ((HZ / 100) < 1) ? 1 : HZ / 100); | |
411 | ret = 0; | |
412 | ||
413 | } else { | |
414 | spin_unlock(&bdev->lru_lock); | |
415 | spin_unlock(&bo->lock); | |
416 | ret = -EBUSY; | |
417 | } | |
418 | ||
419 | return ret; | |
420 | } | |
421 | ||
422 | /** | |
423 | * Traverse the delayed list, and call ttm_bo_cleanup_refs on all | |
424 | * encountered buffers. | |
425 | */ | |
426 | ||
427 | static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) | |
428 | { | |
429 | struct ttm_buffer_object *entry, *nentry; | |
430 | struct list_head *list, *next; | |
431 | int ret; | |
432 | ||
433 | spin_lock(&bdev->lru_lock); | |
434 | list_for_each_safe(list, next, &bdev->ddestroy) { | |
435 | entry = list_entry(list, struct ttm_buffer_object, ddestroy); | |
436 | nentry = NULL; | |
437 | ||
438 | /* | |
439 | * Protect the next list entry from destruction while we | |
440 | * unlock the lru_lock. | |
441 | */ | |
442 | ||
443 | if (next != &bdev->ddestroy) { | |
444 | nentry = list_entry(next, struct ttm_buffer_object, | |
445 | ddestroy); | |
446 | kref_get(&nentry->list_kref); | |
447 | } | |
448 | kref_get(&entry->list_kref); | |
449 | ||
450 | spin_unlock(&bdev->lru_lock); | |
451 | ret = ttm_bo_cleanup_refs(entry, remove_all); | |
452 | kref_put(&entry->list_kref, ttm_bo_release_list); | |
453 | ||
454 | spin_lock(&bdev->lru_lock); | |
455 | if (nentry) { | |
456 | bool next_onlist = !list_empty(next); | |
457 | spin_unlock(&bdev->lru_lock); | |
458 | kref_put(&nentry->list_kref, ttm_bo_release_list); | |
459 | spin_lock(&bdev->lru_lock); | |
460 | /* | |
461 | * Someone might have raced us and removed the | |
462 | * next entry from the list. We don't bother restarting | |
463 | * list traversal. | |
464 | */ | |
465 | ||
466 | if (!next_onlist) | |
467 | break; | |
468 | } | |
469 | if (ret) | |
470 | break; | |
471 | } | |
472 | ret = !list_empty(&bdev->ddestroy); | |
473 | spin_unlock(&bdev->lru_lock); | |
474 | ||
475 | return ret; | |
476 | } | |
477 | ||
478 | static void ttm_bo_delayed_workqueue(struct work_struct *work) | |
479 | { | |
480 | struct ttm_bo_device *bdev = | |
481 | container_of(work, struct ttm_bo_device, wq.work); | |
482 | ||
483 | if (ttm_bo_delayed_delete(bdev, false)) { | |
484 | schedule_delayed_work(&bdev->wq, | |
485 | ((HZ / 100) < 1) ? 1 : HZ / 100); | |
486 | } | |
487 | } | |
488 | ||
489 | static void ttm_bo_release(struct kref *kref) | |
490 | { | |
491 | struct ttm_buffer_object *bo = | |
492 | container_of(kref, struct ttm_buffer_object, kref); | |
493 | struct ttm_bo_device *bdev = bo->bdev; | |
494 | ||
495 | if (likely(bo->vm_node != NULL)) { | |
496 | rb_erase(&bo->vm_rb, &bdev->addr_space_rb); | |
497 | drm_mm_put_block(bo->vm_node); | |
498 | bo->vm_node = NULL; | |
499 | } | |
500 | write_unlock(&bdev->vm_lock); | |
501 | ttm_bo_cleanup_refs(bo, false); | |
502 | kref_put(&bo->list_kref, ttm_bo_release_list); | |
503 | write_lock(&bdev->vm_lock); | |
504 | } | |
505 | ||
506 | void ttm_bo_unref(struct ttm_buffer_object **p_bo) | |
507 | { | |
508 | struct ttm_buffer_object *bo = *p_bo; | |
509 | struct ttm_bo_device *bdev = bo->bdev; | |
510 | ||
511 | *p_bo = NULL; | |
512 | write_lock(&bdev->vm_lock); | |
513 | kref_put(&bo->kref, ttm_bo_release); | |
514 | write_unlock(&bdev->vm_lock); | |
515 | } | |
516 | EXPORT_SYMBOL(ttm_bo_unref); | |
517 | ||
518 | static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type, | |
519 | bool interruptible, bool no_wait) | |
520 | { | |
521 | int ret = 0; | |
522 | struct ttm_bo_device *bdev = bo->bdev; | |
523 | struct ttm_mem_reg evict_mem; | |
524 | uint32_t proposed_placement; | |
525 | ||
526 | if (bo->mem.mem_type != mem_type) | |
527 | goto out; | |
528 | ||
529 | spin_lock(&bo->lock); | |
530 | ret = ttm_bo_wait(bo, false, interruptible, no_wait); | |
531 | spin_unlock(&bo->lock); | |
532 | ||
78ecf091 TH |
533 | if (unlikely(ret != 0)) { |
534 | if (ret != -ERESTART) { | |
535 | printk(KERN_ERR TTM_PFX | |
536 | "Failed to expire sync object before " | |
537 | "buffer eviction.\n"); | |
538 | } | |
ba4e7d97 TH |
539 | goto out; |
540 | } | |
541 | ||
542 | BUG_ON(!atomic_read(&bo->reserved)); | |
543 | ||
544 | evict_mem = bo->mem; | |
545 | evict_mem.mm_node = NULL; | |
546 | ||
547 | proposed_placement = bdev->driver->evict_flags(bo); | |
548 | ||
549 | ret = ttm_bo_mem_space(bo, proposed_placement, | |
550 | &evict_mem, interruptible, no_wait); | |
551 | if (unlikely(ret != 0 && ret != -ERESTART)) | |
552 | ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM, | |
553 | &evict_mem, interruptible, no_wait); | |
554 | ||
555 | if (ret) { | |
556 | if (ret != -ERESTART) | |
557 | printk(KERN_ERR TTM_PFX | |
558 | "Failed to find memory space for " | |
559 | "buffer 0x%p eviction.\n", bo); | |
560 | goto out; | |
561 | } | |
562 | ||
563 | ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, | |
564 | no_wait); | |
565 | if (ret) { | |
566 | if (ret != -ERESTART) | |
567 | printk(KERN_ERR TTM_PFX "Buffer eviction failed\n"); | |
568 | goto out; | |
569 | } | |
570 | ||
571 | spin_lock(&bdev->lru_lock); | |
572 | if (evict_mem.mm_node) { | |
573 | drm_mm_put_block(evict_mem.mm_node); | |
574 | evict_mem.mm_node = NULL; | |
575 | } | |
576 | spin_unlock(&bdev->lru_lock); | |
577 | bo->evicted = true; | |
578 | out: | |
579 | return ret; | |
580 | } | |
581 | ||
582 | /** | |
583 | * Repeatedly evict memory from the LRU for @mem_type until we create enough | |
584 | * space, or we've evicted everything and there isn't enough space. | |
585 | */ | |
586 | static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev, | |
587 | struct ttm_mem_reg *mem, | |
588 | uint32_t mem_type, | |
589 | bool interruptible, bool no_wait) | |
590 | { | |
591 | struct drm_mm_node *node; | |
592 | struct ttm_buffer_object *entry; | |
593 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | |
594 | struct list_head *lru; | |
595 | unsigned long num_pages = mem->num_pages; | |
596 | int put_count = 0; | |
597 | int ret; | |
598 | ||
599 | retry_pre_get: | |
600 | ret = drm_mm_pre_get(&man->manager); | |
601 | if (unlikely(ret != 0)) | |
602 | return ret; | |
603 | ||
604 | spin_lock(&bdev->lru_lock); | |
605 | do { | |
606 | node = drm_mm_search_free(&man->manager, num_pages, | |
607 | mem->page_alignment, 1); | |
608 | if (node) | |
609 | break; | |
610 | ||
611 | lru = &man->lru; | |
612 | if (list_empty(lru)) | |
613 | break; | |
614 | ||
615 | entry = list_first_entry(lru, struct ttm_buffer_object, lru); | |
616 | kref_get(&entry->list_kref); | |
617 | ||
618 | ret = | |
619 | ttm_bo_reserve_locked(entry, interruptible, no_wait, | |
620 | false, 0); | |
621 | ||
622 | if (likely(ret == 0)) | |
623 | put_count = ttm_bo_del_from_lru(entry); | |
624 | ||
625 | spin_unlock(&bdev->lru_lock); | |
626 | ||
627 | if (unlikely(ret != 0)) | |
628 | return ret; | |
629 | ||
630 | while (put_count--) | |
631 | kref_put(&entry->list_kref, ttm_bo_ref_bug); | |
632 | ||
633 | ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait); | |
634 | ||
635 | ttm_bo_unreserve(entry); | |
636 | ||
637 | kref_put(&entry->list_kref, ttm_bo_release_list); | |
638 | if (ret) | |
639 | return ret; | |
640 | ||
641 | spin_lock(&bdev->lru_lock); | |
642 | } while (1); | |
643 | ||
644 | if (!node) { | |
645 | spin_unlock(&bdev->lru_lock); | |
646 | return -ENOMEM; | |
647 | } | |
648 | ||
649 | node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment); | |
650 | if (unlikely(!node)) { | |
651 | spin_unlock(&bdev->lru_lock); | |
652 | goto retry_pre_get; | |
653 | } | |
654 | ||
655 | spin_unlock(&bdev->lru_lock); | |
656 | mem->mm_node = node; | |
657 | mem->mem_type = mem_type; | |
658 | return 0; | |
659 | } | |
660 | ||
ae3e8122 TH |
661 | static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, |
662 | uint32_t cur_placement, | |
663 | uint32_t proposed_placement) | |
664 | { | |
665 | uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; | |
666 | uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; | |
667 | ||
668 | /** | |
669 | * Keep current caching if possible. | |
670 | */ | |
671 | ||
672 | if ((cur_placement & caching) != 0) | |
673 | result |= (cur_placement & caching); | |
674 | else if ((man->default_caching & caching) != 0) | |
675 | result |= man->default_caching; | |
676 | else if ((TTM_PL_FLAG_CACHED & caching) != 0) | |
677 | result |= TTM_PL_FLAG_CACHED; | |
678 | else if ((TTM_PL_FLAG_WC & caching) != 0) | |
679 | result |= TTM_PL_FLAG_WC; | |
680 | else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) | |
681 | result |= TTM_PL_FLAG_UNCACHED; | |
682 | ||
683 | return result; | |
684 | } | |
685 | ||
686 | ||
ba4e7d97 TH |
687 | static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man, |
688 | bool disallow_fixed, | |
689 | uint32_t mem_type, | |
ae3e8122 TH |
690 | uint32_t proposed_placement, |
691 | uint32_t *masked_placement) | |
ba4e7d97 TH |
692 | { |
693 | uint32_t cur_flags = ttm_bo_type_flags(mem_type); | |
694 | ||
695 | if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed) | |
696 | return false; | |
697 | ||
ae3e8122 | 698 | if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0) |
ba4e7d97 TH |
699 | return false; |
700 | ||
ae3e8122 | 701 | if ((proposed_placement & man->available_caching) == 0) |
ba4e7d97 | 702 | return false; |
ba4e7d97 | 703 | |
ae3e8122 TH |
704 | cur_flags |= (proposed_placement & man->available_caching); |
705 | ||
706 | *masked_placement = cur_flags; | |
ba4e7d97 TH |
707 | return true; |
708 | } | |
709 | ||
710 | /** | |
711 | * Creates space for memory region @mem according to its type. | |
712 | * | |
713 | * This function first searches for free space in compatible memory types in | |
714 | * the priority order defined by the driver. If free space isn't found, then | |
715 | * ttm_bo_mem_force_space is attempted in priority order to evict and find | |
716 | * space. | |
717 | */ | |
718 | int ttm_bo_mem_space(struct ttm_buffer_object *bo, | |
719 | uint32_t proposed_placement, | |
720 | struct ttm_mem_reg *mem, | |
721 | bool interruptible, bool no_wait) | |
722 | { | |
723 | struct ttm_bo_device *bdev = bo->bdev; | |
724 | struct ttm_mem_type_manager *man; | |
725 | ||
726 | uint32_t num_prios = bdev->driver->num_mem_type_prio; | |
727 | const uint32_t *prios = bdev->driver->mem_type_prio; | |
728 | uint32_t i; | |
729 | uint32_t mem_type = TTM_PL_SYSTEM; | |
730 | uint32_t cur_flags = 0; | |
731 | bool type_found = false; | |
732 | bool type_ok = false; | |
733 | bool has_eagain = false; | |
734 | struct drm_mm_node *node = NULL; | |
735 | int ret; | |
736 | ||
737 | mem->mm_node = NULL; | |
738 | for (i = 0; i < num_prios; ++i) { | |
739 | mem_type = prios[i]; | |
740 | man = &bdev->man[mem_type]; | |
741 | ||
742 | type_ok = ttm_bo_mt_compatible(man, | |
743 | bo->type == ttm_bo_type_user, | |
744 | mem_type, proposed_placement, | |
745 | &cur_flags); | |
746 | ||
747 | if (!type_ok) | |
748 | continue; | |
749 | ||
ae3e8122 TH |
750 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
751 | cur_flags); | |
752 | ||
ba4e7d97 TH |
753 | if (mem_type == TTM_PL_SYSTEM) |
754 | break; | |
755 | ||
756 | if (man->has_type && man->use_type) { | |
757 | type_found = true; | |
758 | do { | |
759 | ret = drm_mm_pre_get(&man->manager); | |
760 | if (unlikely(ret)) | |
761 | return ret; | |
762 | ||
763 | spin_lock(&bdev->lru_lock); | |
764 | node = drm_mm_search_free(&man->manager, | |
765 | mem->num_pages, | |
766 | mem->page_alignment, | |
767 | 1); | |
768 | if (unlikely(!node)) { | |
769 | spin_unlock(&bdev->lru_lock); | |
770 | break; | |
771 | } | |
772 | node = drm_mm_get_block_atomic(node, | |
773 | mem->num_pages, | |
774 | mem-> | |
775 | page_alignment); | |
776 | spin_unlock(&bdev->lru_lock); | |
777 | } while (!node); | |
778 | } | |
779 | if (node) | |
780 | break; | |
781 | } | |
782 | ||
783 | if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) { | |
784 | mem->mm_node = node; | |
785 | mem->mem_type = mem_type; | |
786 | mem->placement = cur_flags; | |
787 | return 0; | |
788 | } | |
789 | ||
790 | if (!type_found) | |
791 | return -EINVAL; | |
792 | ||
793 | num_prios = bdev->driver->num_mem_busy_prio; | |
794 | prios = bdev->driver->mem_busy_prio; | |
795 | ||
796 | for (i = 0; i < num_prios; ++i) { | |
797 | mem_type = prios[i]; | |
798 | man = &bdev->man[mem_type]; | |
799 | ||
800 | if (!man->has_type) | |
801 | continue; | |
802 | ||
803 | if (!ttm_bo_mt_compatible(man, | |
804 | bo->type == ttm_bo_type_user, | |
805 | mem_type, | |
806 | proposed_placement, &cur_flags)) | |
807 | continue; | |
808 | ||
ae3e8122 TH |
809 | cur_flags = ttm_bo_select_caching(man, bo->mem.placement, |
810 | cur_flags); | |
811 | ||
ba4e7d97 TH |
812 | ret = ttm_bo_mem_force_space(bdev, mem, mem_type, |
813 | interruptible, no_wait); | |
814 | ||
815 | if (ret == 0 && mem->mm_node) { | |
816 | mem->placement = cur_flags; | |
817 | return 0; | |
818 | } | |
819 | ||
820 | if (ret == -ERESTART) | |
821 | has_eagain = true; | |
822 | } | |
823 | ||
824 | ret = (has_eagain) ? -ERESTART : -ENOMEM; | |
825 | return ret; | |
826 | } | |
827 | EXPORT_SYMBOL(ttm_bo_mem_space); | |
828 | ||
829 | int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait) | |
830 | { | |
831 | int ret = 0; | |
832 | ||
833 | if ((atomic_read(&bo->cpu_writers) > 0) && no_wait) | |
834 | return -EBUSY; | |
835 | ||
836 | ret = wait_event_interruptible(bo->event_queue, | |
837 | atomic_read(&bo->cpu_writers) == 0); | |
838 | ||
839 | if (ret == -ERESTARTSYS) | |
840 | ret = -ERESTART; | |
841 | ||
842 | return ret; | |
843 | } | |
844 | ||
845 | int ttm_bo_move_buffer(struct ttm_buffer_object *bo, | |
846 | uint32_t proposed_placement, | |
847 | bool interruptible, bool no_wait) | |
848 | { | |
849 | struct ttm_bo_device *bdev = bo->bdev; | |
850 | int ret = 0; | |
851 | struct ttm_mem_reg mem; | |
852 | ||
853 | BUG_ON(!atomic_read(&bo->reserved)); | |
854 | ||
855 | /* | |
856 | * FIXME: It's possible to pipeline buffer moves. | |
857 | * Have the driver move function wait for idle when necessary, | |
858 | * instead of doing it here. | |
859 | */ | |
860 | ||
861 | spin_lock(&bo->lock); | |
862 | ret = ttm_bo_wait(bo, false, interruptible, no_wait); | |
863 | spin_unlock(&bo->lock); | |
864 | ||
865 | if (ret) | |
866 | return ret; | |
867 | ||
868 | mem.num_pages = bo->num_pages; | |
869 | mem.size = mem.num_pages << PAGE_SHIFT; | |
870 | mem.page_alignment = bo->mem.page_alignment; | |
871 | ||
872 | /* | |
873 | * Determine where to move the buffer. | |
874 | */ | |
875 | ||
876 | ret = ttm_bo_mem_space(bo, proposed_placement, &mem, | |
877 | interruptible, no_wait); | |
878 | if (ret) | |
879 | goto out_unlock; | |
880 | ||
881 | ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait); | |
882 | ||
883 | out_unlock: | |
884 | if (ret && mem.mm_node) { | |
885 | spin_lock(&bdev->lru_lock); | |
886 | drm_mm_put_block(mem.mm_node); | |
887 | spin_unlock(&bdev->lru_lock); | |
888 | } | |
889 | return ret; | |
890 | } | |
891 | ||
892 | static int ttm_bo_mem_compat(uint32_t proposed_placement, | |
893 | struct ttm_mem_reg *mem) | |
894 | { | |
895 | if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0) | |
896 | return 0; | |
897 | if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0) | |
898 | return 0; | |
899 | ||
900 | return 1; | |
901 | } | |
902 | ||
903 | int ttm_buffer_object_validate(struct ttm_buffer_object *bo, | |
904 | uint32_t proposed_placement, | |
905 | bool interruptible, bool no_wait) | |
906 | { | |
907 | int ret; | |
908 | ||
909 | BUG_ON(!atomic_read(&bo->reserved)); | |
910 | bo->proposed_placement = proposed_placement; | |
911 | ||
912 | TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n", | |
913 | (unsigned long)proposed_placement, | |
914 | (unsigned long)bo->mem.placement); | |
915 | ||
916 | /* | |
917 | * Check whether we need to move buffer. | |
918 | */ | |
919 | ||
920 | if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) { | |
921 | ret = ttm_bo_move_buffer(bo, bo->proposed_placement, | |
922 | interruptible, no_wait); | |
923 | if (ret) { | |
924 | if (ret != -ERESTART) | |
925 | printk(KERN_ERR TTM_PFX | |
926 | "Failed moving buffer. " | |
927 | "Proposed placement 0x%08x\n", | |
928 | bo->proposed_placement); | |
929 | if (ret == -ENOMEM) | |
930 | printk(KERN_ERR TTM_PFX | |
931 | "Out of aperture space or " | |
932 | "DRM memory quota.\n"); | |
933 | return ret; | |
934 | } | |
935 | } | |
936 | ||
937 | /* | |
938 | * We might need to add a TTM. | |
939 | */ | |
940 | ||
941 | if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { | |
942 | ret = ttm_bo_add_ttm(bo, true); | |
943 | if (ret) | |
944 | return ret; | |
945 | } | |
946 | /* | |
947 | * Validation has succeeded, move the access and other | |
948 | * non-mapping-related flag bits from the proposed flags to | |
949 | * the active flags | |
950 | */ | |
951 | ||
952 | ttm_flag_masked(&bo->mem.placement, bo->proposed_placement, | |
953 | ~TTM_PL_MASK_MEMTYPE); | |
954 | ||
955 | return 0; | |
956 | } | |
957 | EXPORT_SYMBOL(ttm_buffer_object_validate); | |
958 | ||
959 | int | |
960 | ttm_bo_check_placement(struct ttm_buffer_object *bo, | |
961 | uint32_t set_flags, uint32_t clr_flags) | |
962 | { | |
963 | uint32_t new_mask = set_flags | clr_flags; | |
964 | ||
965 | if ((bo->type == ttm_bo_type_user) && | |
966 | (clr_flags & TTM_PL_FLAG_CACHED)) { | |
967 | printk(KERN_ERR TTM_PFX | |
968 | "User buffers require cache-coherent memory.\n"); | |
969 | return -EINVAL; | |
970 | } | |
971 | ||
972 | if (!capable(CAP_SYS_ADMIN)) { | |
973 | if (new_mask & TTM_PL_FLAG_NO_EVICT) { | |
974 | printk(KERN_ERR TTM_PFX "Need to be root to modify" | |
975 | " NO_EVICT status.\n"); | |
976 | return -EINVAL; | |
977 | } | |
978 | ||
979 | if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) && | |
980 | (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { | |
981 | printk(KERN_ERR TTM_PFX | |
982 | "Incompatible memory specification" | |
983 | " for NO_EVICT buffer.\n"); | |
984 | return -EINVAL; | |
985 | } | |
986 | } | |
987 | return 0; | |
988 | } | |
989 | ||
990 | int ttm_buffer_object_init(struct ttm_bo_device *bdev, | |
991 | struct ttm_buffer_object *bo, | |
992 | unsigned long size, | |
993 | enum ttm_bo_type type, | |
994 | uint32_t flags, | |
995 | uint32_t page_alignment, | |
996 | unsigned long buffer_start, | |
997 | bool interruptible, | |
998 | struct file *persistant_swap_storage, | |
999 | size_t acc_size, | |
1000 | void (*destroy) (struct ttm_buffer_object *)) | |
1001 | { | |
1002 | int ret = 0; | |
1003 | unsigned long num_pages; | |
1004 | ||
1005 | size += buffer_start & ~PAGE_MASK; | |
1006 | num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
1007 | if (num_pages == 0) { | |
1008 | printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n"); | |
1009 | return -EINVAL; | |
1010 | } | |
1011 | bo->destroy = destroy; | |
1012 | ||
1013 | spin_lock_init(&bo->lock); | |
1014 | kref_init(&bo->kref); | |
1015 | kref_init(&bo->list_kref); | |
1016 | atomic_set(&bo->cpu_writers, 0); | |
1017 | atomic_set(&bo->reserved, 1); | |
1018 | init_waitqueue_head(&bo->event_queue); | |
1019 | INIT_LIST_HEAD(&bo->lru); | |
1020 | INIT_LIST_HEAD(&bo->ddestroy); | |
1021 | INIT_LIST_HEAD(&bo->swap); | |
1022 | bo->bdev = bdev; | |
1023 | bo->type = type; | |
1024 | bo->num_pages = num_pages; | |
1025 | bo->mem.mem_type = TTM_PL_SYSTEM; | |
1026 | bo->mem.num_pages = bo->num_pages; | |
1027 | bo->mem.mm_node = NULL; | |
1028 | bo->mem.page_alignment = page_alignment; | |
1029 | bo->buffer_start = buffer_start & PAGE_MASK; | |
1030 | bo->priv_flags = 0; | |
1031 | bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); | |
1032 | bo->seq_valid = false; | |
1033 | bo->persistant_swap_storage = persistant_swap_storage; | |
1034 | bo->acc_size = acc_size; | |
1035 | ||
1036 | ret = ttm_bo_check_placement(bo, flags, 0ULL); | |
1037 | if (unlikely(ret != 0)) | |
1038 | goto out_err; | |
1039 | ||
1040 | /* | |
1041 | * If no caching attributes are set, accept any form of caching. | |
1042 | */ | |
1043 | ||
1044 | if ((flags & TTM_PL_MASK_CACHING) == 0) | |
1045 | flags |= TTM_PL_MASK_CACHING; | |
1046 | ||
1047 | /* | |
1048 | * For ttm_bo_type_device buffers, allocate | |
1049 | * address space from the device. | |
1050 | */ | |
1051 | ||
1052 | if (bo->type == ttm_bo_type_device) { | |
1053 | ret = ttm_bo_setup_vm(bo); | |
1054 | if (ret) | |
1055 | goto out_err; | |
1056 | } | |
1057 | ||
1058 | ret = ttm_buffer_object_validate(bo, flags, interruptible, false); | |
1059 | if (ret) | |
1060 | goto out_err; | |
1061 | ||
1062 | ttm_bo_unreserve(bo); | |
1063 | return 0; | |
1064 | ||
1065 | out_err: | |
1066 | ttm_bo_unreserve(bo); | |
1067 | ttm_bo_unref(&bo); | |
1068 | ||
1069 | return ret; | |
1070 | } | |
1071 | EXPORT_SYMBOL(ttm_buffer_object_init); | |
1072 | ||
1073 | static inline size_t ttm_bo_size(struct ttm_bo_device *bdev, | |
1074 | unsigned long num_pages) | |
1075 | { | |
1076 | size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) & | |
1077 | PAGE_MASK; | |
1078 | ||
1079 | return bdev->ttm_bo_size + 2 * page_array_size; | |
1080 | } | |
1081 | ||
1082 | int ttm_buffer_object_create(struct ttm_bo_device *bdev, | |
1083 | unsigned long size, | |
1084 | enum ttm_bo_type type, | |
1085 | uint32_t flags, | |
1086 | uint32_t page_alignment, | |
1087 | unsigned long buffer_start, | |
1088 | bool interruptible, | |
1089 | struct file *persistant_swap_storage, | |
1090 | struct ttm_buffer_object **p_bo) | |
1091 | { | |
1092 | struct ttm_buffer_object *bo; | |
1093 | int ret; | |
1094 | struct ttm_mem_global *mem_glob = bdev->mem_glob; | |
1095 | ||
1096 | size_t acc_size = | |
1097 | ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT); | |
1098 | ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false); | |
1099 | if (unlikely(ret != 0)) | |
1100 | return ret; | |
1101 | ||
1102 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); | |
1103 | ||
1104 | if (unlikely(bo == NULL)) { | |
1105 | ttm_mem_global_free(mem_glob, acc_size, false); | |
1106 | return -ENOMEM; | |
1107 | } | |
1108 | ||
1109 | ret = ttm_buffer_object_init(bdev, bo, size, type, flags, | |
1110 | page_alignment, buffer_start, | |
1111 | interruptible, | |
1112 | persistant_swap_storage, acc_size, NULL); | |
1113 | if (likely(ret == 0)) | |
1114 | *p_bo = bo; | |
1115 | ||
1116 | return ret; | |
1117 | } | |
1118 | ||
1119 | static int ttm_bo_leave_list(struct ttm_buffer_object *bo, | |
1120 | uint32_t mem_type, bool allow_errors) | |
1121 | { | |
1122 | int ret; | |
1123 | ||
1124 | spin_lock(&bo->lock); | |
1125 | ret = ttm_bo_wait(bo, false, false, false); | |
1126 | spin_unlock(&bo->lock); | |
1127 | ||
1128 | if (ret && allow_errors) | |
1129 | goto out; | |
1130 | ||
1131 | if (bo->mem.mem_type == mem_type) | |
1132 | ret = ttm_bo_evict(bo, mem_type, false, false); | |
1133 | ||
1134 | if (ret) { | |
1135 | if (allow_errors) { | |
1136 | goto out; | |
1137 | } else { | |
1138 | ret = 0; | |
1139 | printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n"); | |
1140 | } | |
1141 | } | |
1142 | ||
1143 | out: | |
1144 | return ret; | |
1145 | } | |
1146 | ||
1147 | static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, | |
1148 | struct list_head *head, | |
1149 | unsigned mem_type, bool allow_errors) | |
1150 | { | |
1151 | struct ttm_buffer_object *entry; | |
1152 | int ret; | |
1153 | int put_count; | |
1154 | ||
1155 | /* | |
1156 | * Can't use standard list traversal since we're unlocking. | |
1157 | */ | |
1158 | ||
1159 | spin_lock(&bdev->lru_lock); | |
1160 | ||
1161 | while (!list_empty(head)) { | |
1162 | entry = list_first_entry(head, struct ttm_buffer_object, lru); | |
1163 | kref_get(&entry->list_kref); | |
1164 | ret = ttm_bo_reserve_locked(entry, false, false, false, 0); | |
1165 | put_count = ttm_bo_del_from_lru(entry); | |
1166 | spin_unlock(&bdev->lru_lock); | |
1167 | while (put_count--) | |
1168 | kref_put(&entry->list_kref, ttm_bo_ref_bug); | |
1169 | BUG_ON(ret); | |
1170 | ret = ttm_bo_leave_list(entry, mem_type, allow_errors); | |
1171 | ttm_bo_unreserve(entry); | |
1172 | kref_put(&entry->list_kref, ttm_bo_release_list); | |
1173 | spin_lock(&bdev->lru_lock); | |
1174 | } | |
1175 | ||
1176 | spin_unlock(&bdev->lru_lock); | |
1177 | ||
1178 | return 0; | |
1179 | } | |
1180 | ||
1181 | int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) | |
1182 | { | |
1183 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | |
1184 | int ret = -EINVAL; | |
1185 | ||
1186 | if (mem_type >= TTM_NUM_MEM_TYPES) { | |
1187 | printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type); | |
1188 | return ret; | |
1189 | } | |
1190 | ||
1191 | if (!man->has_type) { | |
1192 | printk(KERN_ERR TTM_PFX "Trying to take down uninitialized " | |
1193 | "memory manager type %u\n", mem_type); | |
1194 | return ret; | |
1195 | } | |
1196 | ||
1197 | man->use_type = false; | |
1198 | man->has_type = false; | |
1199 | ||
1200 | ret = 0; | |
1201 | if (mem_type > 0) { | |
1202 | ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false); | |
1203 | ||
1204 | spin_lock(&bdev->lru_lock); | |
1205 | if (drm_mm_clean(&man->manager)) | |
1206 | drm_mm_takedown(&man->manager); | |
1207 | else | |
1208 | ret = -EBUSY; | |
1209 | ||
1210 | spin_unlock(&bdev->lru_lock); | |
1211 | } | |
1212 | ||
1213 | return ret; | |
1214 | } | |
1215 | EXPORT_SYMBOL(ttm_bo_clean_mm); | |
1216 | ||
1217 | int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) | |
1218 | { | |
1219 | struct ttm_mem_type_manager *man = &bdev->man[mem_type]; | |
1220 | ||
1221 | if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { | |
1222 | printk(KERN_ERR TTM_PFX | |
1223 | "Illegal memory manager memory type %u.\n", | |
1224 | mem_type); | |
1225 | return -EINVAL; | |
1226 | } | |
1227 | ||
1228 | if (!man->has_type) { | |
1229 | printk(KERN_ERR TTM_PFX | |
1230 | "Memory type %u has not been initialized.\n", | |
1231 | mem_type); | |
1232 | return 0; | |
1233 | } | |
1234 | ||
1235 | return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true); | |
1236 | } | |
1237 | EXPORT_SYMBOL(ttm_bo_evict_mm); | |
1238 | ||
1239 | int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, | |
1240 | unsigned long p_offset, unsigned long p_size) | |
1241 | { | |
1242 | int ret = -EINVAL; | |
1243 | struct ttm_mem_type_manager *man; | |
1244 | ||
1245 | if (type >= TTM_NUM_MEM_TYPES) { | |
1246 | printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type); | |
1247 | return ret; | |
1248 | } | |
1249 | ||
1250 | man = &bdev->man[type]; | |
1251 | if (man->has_type) { | |
1252 | printk(KERN_ERR TTM_PFX | |
1253 | "Memory manager already initialized for type %d\n", | |
1254 | type); | |
1255 | return ret; | |
1256 | } | |
1257 | ||
1258 | ret = bdev->driver->init_mem_type(bdev, type, man); | |
1259 | if (ret) | |
1260 | return ret; | |
1261 | ||
1262 | ret = 0; | |
1263 | if (type != TTM_PL_SYSTEM) { | |
1264 | if (!p_size) { | |
1265 | printk(KERN_ERR TTM_PFX | |
1266 | "Zero size memory manager type %d\n", | |
1267 | type); | |
1268 | return ret; | |
1269 | } | |
1270 | ret = drm_mm_init(&man->manager, p_offset, p_size); | |
1271 | if (ret) | |
1272 | return ret; | |
1273 | } | |
1274 | man->has_type = true; | |
1275 | man->use_type = true; | |
1276 | man->size = p_size; | |
1277 | ||
1278 | INIT_LIST_HEAD(&man->lru); | |
1279 | ||
1280 | return 0; | |
1281 | } | |
1282 | EXPORT_SYMBOL(ttm_bo_init_mm); | |
1283 | ||
1284 | int ttm_bo_device_release(struct ttm_bo_device *bdev) | |
1285 | { | |
1286 | int ret = 0; | |
1287 | unsigned i = TTM_NUM_MEM_TYPES; | |
1288 | struct ttm_mem_type_manager *man; | |
1289 | ||
1290 | while (i--) { | |
1291 | man = &bdev->man[i]; | |
1292 | if (man->has_type) { | |
1293 | man->use_type = false; | |
1294 | if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) { | |
1295 | ret = -EBUSY; | |
1296 | printk(KERN_ERR TTM_PFX | |
1297 | "DRM memory manager type %d " | |
1298 | "is not clean.\n", i); | |
1299 | } | |
1300 | man->has_type = false; | |
1301 | } | |
1302 | } | |
1303 | ||
1304 | if (!cancel_delayed_work(&bdev->wq)) | |
1305 | flush_scheduled_work(); | |
1306 | ||
1307 | while (ttm_bo_delayed_delete(bdev, true)) | |
1308 | ; | |
1309 | ||
1310 | spin_lock(&bdev->lru_lock); | |
1311 | if (list_empty(&bdev->ddestroy)) | |
1312 | TTM_DEBUG("Delayed destroy list was clean\n"); | |
1313 | ||
1314 | if (list_empty(&bdev->man[0].lru)) | |
1315 | TTM_DEBUG("Swap list was clean\n"); | |
1316 | spin_unlock(&bdev->lru_lock); | |
1317 | ||
1318 | ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink); | |
1319 | BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); | |
1320 | write_lock(&bdev->vm_lock); | |
1321 | drm_mm_takedown(&bdev->addr_space_mm); | |
1322 | write_unlock(&bdev->vm_lock); | |
1323 | ||
1324 | __free_page(bdev->dummy_read_page); | |
1325 | return ret; | |
1326 | } | |
1327 | EXPORT_SYMBOL(ttm_bo_device_release); | |
1328 | ||
1329 | /* | |
1330 | * This function is intended to be called on drm driver load. | |
1331 | * If you decide to call it from firstopen, you must protect the call | |
1332 | * from a potentially racing ttm_bo_driver_finish in lastclose. | |
1333 | * (This may happen on X server restart). | |
1334 | */ | |
1335 | ||
1336 | int ttm_bo_device_init(struct ttm_bo_device *bdev, | |
1337 | struct ttm_mem_global *mem_glob, | |
ad49f501 DA |
1338 | struct ttm_bo_driver *driver, uint64_t file_page_offset, |
1339 | bool need_dma32) | |
ba4e7d97 TH |
1340 | { |
1341 | int ret = -EINVAL; | |
1342 | ||
1343 | bdev->dummy_read_page = NULL; | |
1344 | rwlock_init(&bdev->vm_lock); | |
1345 | spin_lock_init(&bdev->lru_lock); | |
1346 | ||
1347 | bdev->driver = driver; | |
1348 | bdev->mem_glob = mem_glob; | |
1349 | ||
1350 | memset(bdev->man, 0, sizeof(bdev->man)); | |
1351 | ||
1352 | bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); | |
1353 | if (unlikely(bdev->dummy_read_page == NULL)) { | |
1354 | ret = -ENOMEM; | |
1355 | goto out_err0; | |
1356 | } | |
1357 | ||
1358 | /* | |
1359 | * Initialize the system memory buffer type. | |
1360 | * Other types need to be driver / IOCTL initialized. | |
1361 | */ | |
1362 | ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0); | |
1363 | if (unlikely(ret != 0)) | |
1364 | goto out_err1; | |
1365 | ||
1366 | bdev->addr_space_rb = RB_ROOT; | |
1367 | ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); | |
1368 | if (unlikely(ret != 0)) | |
1369 | goto out_err2; | |
1370 | ||
1371 | INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); | |
1372 | bdev->nice_mode = true; | |
1373 | INIT_LIST_HEAD(&bdev->ddestroy); | |
1374 | INIT_LIST_HEAD(&bdev->swap_lru); | |
1375 | bdev->dev_mapping = NULL; | |
ad49f501 | 1376 | bdev->need_dma32 = need_dma32; |
ba4e7d97 TH |
1377 | ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout); |
1378 | ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink); | |
1379 | if (unlikely(ret != 0)) { | |
1380 | printk(KERN_ERR TTM_PFX | |
1381 | "Could not register buffer object swapout.\n"); | |
1382 | goto out_err2; | |
1383 | } | |
1384 | ||
1385 | bdev->ttm_bo_extra_size = | |
1386 | ttm_round_pot(sizeof(struct ttm_tt)) + | |
1387 | ttm_round_pot(sizeof(struct ttm_backend)); | |
1388 | ||
1389 | bdev->ttm_bo_size = bdev->ttm_bo_extra_size + | |
1390 | ttm_round_pot(sizeof(struct ttm_buffer_object)); | |
1391 | ||
1392 | return 0; | |
1393 | out_err2: | |
1394 | ttm_bo_clean_mm(bdev, 0); | |
1395 | out_err1: | |
1396 | __free_page(bdev->dummy_read_page); | |
1397 | out_err0: | |
1398 | return ret; | |
1399 | } | |
1400 | EXPORT_SYMBOL(ttm_bo_device_init); | |
1401 | ||
1402 | /* | |
1403 | * buffer object vm functions. | |
1404 | */ | |
1405 | ||
1406 | bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
1407 | { | |
1408 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
1409 | ||
1410 | if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) { | |
1411 | if (mem->mem_type == TTM_PL_SYSTEM) | |
1412 | return false; | |
1413 | ||
1414 | if (man->flags & TTM_MEMTYPE_FLAG_CMA) | |
1415 | return false; | |
1416 | ||
1417 | if (mem->placement & TTM_PL_FLAG_CACHED) | |
1418 | return false; | |
1419 | } | |
1420 | return true; | |
1421 | } | |
1422 | ||
1423 | int ttm_bo_pci_offset(struct ttm_bo_device *bdev, | |
1424 | struct ttm_mem_reg *mem, | |
1425 | unsigned long *bus_base, | |
1426 | unsigned long *bus_offset, unsigned long *bus_size) | |
1427 | { | |
1428 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
1429 | ||
1430 | *bus_size = 0; | |
1431 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
1432 | return -EINVAL; | |
1433 | ||
1434 | if (ttm_mem_reg_is_pci(bdev, mem)) { | |
1435 | *bus_offset = mem->mm_node->start << PAGE_SHIFT; | |
1436 | *bus_size = mem->num_pages << PAGE_SHIFT; | |
1437 | *bus_base = man->io_offset; | |
1438 | } | |
1439 | ||
1440 | return 0; | |
1441 | } | |
1442 | ||
1443 | void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) | |
1444 | { | |
1445 | struct ttm_bo_device *bdev = bo->bdev; | |
1446 | loff_t offset = (loff_t) bo->addr_space_offset; | |
1447 | loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; | |
1448 | ||
1449 | if (!bdev->dev_mapping) | |
1450 | return; | |
1451 | ||
1452 | unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); | |
1453 | } | |
1454 | ||
1455 | static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) | |
1456 | { | |
1457 | struct ttm_bo_device *bdev = bo->bdev; | |
1458 | struct rb_node **cur = &bdev->addr_space_rb.rb_node; | |
1459 | struct rb_node *parent = NULL; | |
1460 | struct ttm_buffer_object *cur_bo; | |
1461 | unsigned long offset = bo->vm_node->start; | |
1462 | unsigned long cur_offset; | |
1463 | ||
1464 | while (*cur) { | |
1465 | parent = *cur; | |
1466 | cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb); | |
1467 | cur_offset = cur_bo->vm_node->start; | |
1468 | if (offset < cur_offset) | |
1469 | cur = &parent->rb_left; | |
1470 | else if (offset > cur_offset) | |
1471 | cur = &parent->rb_right; | |
1472 | else | |
1473 | BUG(); | |
1474 | } | |
1475 | ||
1476 | rb_link_node(&bo->vm_rb, parent, cur); | |
1477 | rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb); | |
1478 | } | |
1479 | ||
1480 | /** | |
1481 | * ttm_bo_setup_vm: | |
1482 | * | |
1483 | * @bo: the buffer to allocate address space for | |
1484 | * | |
1485 | * Allocate address space in the drm device so that applications | |
1486 | * can mmap the buffer and access the contents. This only | |
1487 | * applies to ttm_bo_type_device objects as others are not | |
1488 | * placed in the drm device address space. | |
1489 | */ | |
1490 | ||
1491 | static int ttm_bo_setup_vm(struct ttm_buffer_object *bo) | |
1492 | { | |
1493 | struct ttm_bo_device *bdev = bo->bdev; | |
1494 | int ret; | |
1495 | ||
1496 | retry_pre_get: | |
1497 | ret = drm_mm_pre_get(&bdev->addr_space_mm); | |
1498 | if (unlikely(ret != 0)) | |
1499 | return ret; | |
1500 | ||
1501 | write_lock(&bdev->vm_lock); | |
1502 | bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm, | |
1503 | bo->mem.num_pages, 0, 0); | |
1504 | ||
1505 | if (unlikely(bo->vm_node == NULL)) { | |
1506 | ret = -ENOMEM; | |
1507 | goto out_unlock; | |
1508 | } | |
1509 | ||
1510 | bo->vm_node = drm_mm_get_block_atomic(bo->vm_node, | |
1511 | bo->mem.num_pages, 0); | |
1512 | ||
1513 | if (unlikely(bo->vm_node == NULL)) { | |
1514 | write_unlock(&bdev->vm_lock); | |
1515 | goto retry_pre_get; | |
1516 | } | |
1517 | ||
1518 | ttm_bo_vm_insert_rb(bo); | |
1519 | write_unlock(&bdev->vm_lock); | |
1520 | bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT; | |
1521 | ||
1522 | return 0; | |
1523 | out_unlock: | |
1524 | write_unlock(&bdev->vm_lock); | |
1525 | return ret; | |
1526 | } | |
1527 | ||
1528 | int ttm_bo_wait(struct ttm_buffer_object *bo, | |
1529 | bool lazy, bool interruptible, bool no_wait) | |
1530 | { | |
1531 | struct ttm_bo_driver *driver = bo->bdev->driver; | |
1532 | void *sync_obj; | |
1533 | void *sync_obj_arg; | |
1534 | int ret = 0; | |
1535 | ||
1536 | if (likely(bo->sync_obj == NULL)) | |
1537 | return 0; | |
1538 | ||
1539 | while (bo->sync_obj) { | |
1540 | ||
1541 | if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) { | |
1542 | void *tmp_obj = bo->sync_obj; | |
1543 | bo->sync_obj = NULL; | |
1544 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); | |
1545 | spin_unlock(&bo->lock); | |
1546 | driver->sync_obj_unref(&tmp_obj); | |
1547 | spin_lock(&bo->lock); | |
1548 | continue; | |
1549 | } | |
1550 | ||
1551 | if (no_wait) | |
1552 | return -EBUSY; | |
1553 | ||
1554 | sync_obj = driver->sync_obj_ref(bo->sync_obj); | |
1555 | sync_obj_arg = bo->sync_obj_arg; | |
1556 | spin_unlock(&bo->lock); | |
1557 | ret = driver->sync_obj_wait(sync_obj, sync_obj_arg, | |
1558 | lazy, interruptible); | |
1559 | if (unlikely(ret != 0)) { | |
1560 | driver->sync_obj_unref(&sync_obj); | |
1561 | spin_lock(&bo->lock); | |
1562 | return ret; | |
1563 | } | |
1564 | spin_lock(&bo->lock); | |
1565 | if (likely(bo->sync_obj == sync_obj && | |
1566 | bo->sync_obj_arg == sync_obj_arg)) { | |
1567 | void *tmp_obj = bo->sync_obj; | |
1568 | bo->sync_obj = NULL; | |
1569 | clear_bit(TTM_BO_PRIV_FLAG_MOVING, | |
1570 | &bo->priv_flags); | |
1571 | spin_unlock(&bo->lock); | |
1572 | driver->sync_obj_unref(&sync_obj); | |
1573 | driver->sync_obj_unref(&tmp_obj); | |
1574 | spin_lock(&bo->lock); | |
1575 | } | |
1576 | } | |
1577 | return 0; | |
1578 | } | |
1579 | EXPORT_SYMBOL(ttm_bo_wait); | |
1580 | ||
1581 | void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo) | |
1582 | { | |
1583 | atomic_set(&bo->reserved, 0); | |
1584 | wake_up_all(&bo->event_queue); | |
1585 | } | |
1586 | ||
1587 | int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible, | |
1588 | bool no_wait) | |
1589 | { | |
1590 | int ret; | |
1591 | ||
1592 | while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) { | |
1593 | if (no_wait) | |
1594 | return -EBUSY; | |
1595 | else if (interruptible) { | |
1596 | ret = wait_event_interruptible | |
1597 | (bo->event_queue, atomic_read(&bo->reserved) == 0); | |
1598 | if (unlikely(ret != 0)) | |
1599 | return -ERESTART; | |
1600 | } else { | |
1601 | wait_event(bo->event_queue, | |
1602 | atomic_read(&bo->reserved) == 0); | |
1603 | } | |
1604 | } | |
1605 | return 0; | |
1606 | } | |
1607 | ||
1608 | int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) | |
1609 | { | |
1610 | int ret = 0; | |
1611 | ||
1612 | /* | |
1613 | * Using ttm_bo_reserve instead of ttm_bo_block_reservation | |
1614 | * makes sure the lru lists are updated. | |
1615 | */ | |
1616 | ||
1617 | ret = ttm_bo_reserve(bo, true, no_wait, false, 0); | |
1618 | if (unlikely(ret != 0)) | |
1619 | return ret; | |
1620 | spin_lock(&bo->lock); | |
1621 | ret = ttm_bo_wait(bo, false, true, no_wait); | |
1622 | spin_unlock(&bo->lock); | |
1623 | if (likely(ret == 0)) | |
1624 | atomic_inc(&bo->cpu_writers); | |
1625 | ttm_bo_unreserve(bo); | |
1626 | return ret; | |
1627 | } | |
1628 | ||
1629 | void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) | |
1630 | { | |
1631 | if (atomic_dec_and_test(&bo->cpu_writers)) | |
1632 | wake_up_all(&bo->event_queue); | |
1633 | } | |
1634 | ||
1635 | /** | |
1636 | * A buffer object shrink method that tries to swap out the first | |
1637 | * buffer object on the bo_global::swap_lru list. | |
1638 | */ | |
1639 | ||
1640 | static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) | |
1641 | { | |
1642 | struct ttm_bo_device *bdev = | |
1643 | container_of(shrink, struct ttm_bo_device, shrink); | |
1644 | struct ttm_buffer_object *bo; | |
1645 | int ret = -EBUSY; | |
1646 | int put_count; | |
1647 | uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM); | |
1648 | ||
1649 | spin_lock(&bdev->lru_lock); | |
1650 | while (ret == -EBUSY) { | |
1651 | if (unlikely(list_empty(&bdev->swap_lru))) { | |
1652 | spin_unlock(&bdev->lru_lock); | |
1653 | return -EBUSY; | |
1654 | } | |
1655 | ||
1656 | bo = list_first_entry(&bdev->swap_lru, | |
1657 | struct ttm_buffer_object, swap); | |
1658 | kref_get(&bo->list_kref); | |
1659 | ||
1660 | /** | |
1661 | * Reserve buffer. Since we unlock while sleeping, we need | |
1662 | * to re-check that nobody removed us from the swap-list while | |
1663 | * we slept. | |
1664 | */ | |
1665 | ||
1666 | ret = ttm_bo_reserve_locked(bo, false, true, false, 0); | |
1667 | if (unlikely(ret == -EBUSY)) { | |
1668 | spin_unlock(&bdev->lru_lock); | |
1669 | ttm_bo_wait_unreserved(bo, false); | |
1670 | kref_put(&bo->list_kref, ttm_bo_release_list); | |
1671 | spin_lock(&bdev->lru_lock); | |
1672 | } | |
1673 | } | |
1674 | ||
1675 | BUG_ON(ret != 0); | |
1676 | put_count = ttm_bo_del_from_lru(bo); | |
1677 | spin_unlock(&bdev->lru_lock); | |
1678 | ||
1679 | while (put_count--) | |
1680 | kref_put(&bo->list_kref, ttm_bo_ref_bug); | |
1681 | ||
1682 | /** | |
1683 | * Wait for GPU, then move to system cached. | |
1684 | */ | |
1685 | ||
1686 | spin_lock(&bo->lock); | |
1687 | ret = ttm_bo_wait(bo, false, false, false); | |
1688 | spin_unlock(&bo->lock); | |
1689 | ||
1690 | if (unlikely(ret != 0)) | |
1691 | goto out; | |
1692 | ||
1693 | if ((bo->mem.placement & swap_placement) != swap_placement) { | |
1694 | struct ttm_mem_reg evict_mem; | |
1695 | ||
1696 | evict_mem = bo->mem; | |
1697 | evict_mem.mm_node = NULL; | |
1698 | evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED; | |
1699 | evict_mem.mem_type = TTM_PL_SYSTEM; | |
1700 | ||
1701 | ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, | |
1702 | false, false); | |
1703 | if (unlikely(ret != 0)) | |
1704 | goto out; | |
1705 | } | |
1706 | ||
1707 | ttm_bo_unmap_virtual(bo); | |
1708 | ||
1709 | /** | |
1710 | * Swap out. Buffer will be swapped in again as soon as | |
1711 | * anyone tries to access a ttm page. | |
1712 | */ | |
1713 | ||
1714 | ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage); | |
1715 | out: | |
1716 | ||
1717 | /** | |
1718 | * | |
1719 | * Unreserve without putting on LRU to avoid swapping out an | |
1720 | * already swapped buffer. | |
1721 | */ | |
1722 | ||
1723 | atomic_set(&bo->reserved, 0); | |
1724 | wake_up_all(&bo->event_queue); | |
1725 | kref_put(&bo->list_kref, ttm_bo_release_list); | |
1726 | return ret; | |
1727 | } | |
1728 | ||
1729 | void ttm_bo_swapout_all(struct ttm_bo_device *bdev) | |
1730 | { | |
1731 | while (ttm_bo_swapout(&bdev->shrink) == 0) | |
1732 | ; | |
1733 | } |