radeon: Take drm struct_mutex over reclocking
[deliverable/linux.git] / drivers / gpu / drm / ttm / ttm_bo.c
CommitLineData
ba4e7d97
TH
1/**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27/*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
ca262a99
JG
30/* Notes:
31 *
32 * We store bo pointer in drm_mm_node struct so we know which bo own a
33 * specific node. There is no protection on the pointer, thus to make
34 * sure things don't go berserk you have to access this pointer while
35 * holding the global lru lock and make sure anytime you free a node you
36 * reset the pointer to NULL.
37 */
ba4e7d97
TH
38
39#include "ttm/ttm_module.h"
40#include "ttm/ttm_bo_driver.h"
41#include "ttm/ttm_placement.h"
42#include <linux/jiffies.h>
43#include <linux/slab.h>
44#include <linux/sched.h>
45#include <linux/mm.h>
46#include <linux/file.h>
47#include <linux/module.h>
48
49#define TTM_ASSERT_LOCKED(param)
50#define TTM_DEBUG(fmt, arg...)
51#define TTM_BO_HASH_ORDER 13
52
53static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
ba4e7d97 54static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
a987fcaa
TH
55static void ttm_bo_global_kobj_release(struct kobject *kobj);
56
57static struct attribute ttm_bo_count = {
58 .name = "bo_count",
59 .mode = S_IRUGO
60};
61
fb53f862
JG
62static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
63{
64 int i;
65
66 for (i = 0; i <= TTM_PL_PRIV5; i++)
67 if (flags & (1 << i)) {
68 *mem_type = i;
69 return 0;
70 }
71 return -EINVAL;
72}
73
5012f506 74static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
fb53f862 75{
5012f506
JG
76 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
77
fb53f862
JG
78 printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
79 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
80 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
81 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
eb6d2c39 82 printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
fb53f862
JG
83 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
84 man->available_caching);
85 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
86 man->default_caching);
5012f506
JG
87 if (mem_type != TTM_PL_SYSTEM) {
88 spin_lock(&bdev->glob->lru_lock);
89 drm_mm_debug_table(&man->manager, TTM_PFX);
90 spin_unlock(&bdev->glob->lru_lock);
91 }
fb53f862
JG
92}
93
94static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
95 struct ttm_placement *placement)
96{
fb53f862
JG
97 int i, ret, mem_type;
98
eb6d2c39 99 printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
fb53f862
JG
100 bo, bo->mem.num_pages, bo->mem.size >> 10,
101 bo->mem.size >> 20);
102 for (i = 0; i < placement->num_placement; i++) {
103 ret = ttm_mem_type_from_flags(placement->placement[i],
104 &mem_type);
105 if (ret)
106 return;
fb53f862
JG
107 printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
108 i, placement->placement[i], mem_type);
5012f506 109 ttm_mem_type_debug(bo->bdev, mem_type);
fb53f862
JG
110 }
111}
112
a987fcaa
TH
113static ssize_t ttm_bo_global_show(struct kobject *kobj,
114 struct attribute *attr,
115 char *buffer)
116{
117 struct ttm_bo_global *glob =
118 container_of(kobj, struct ttm_bo_global, kobj);
119
120 return snprintf(buffer, PAGE_SIZE, "%lu\n",
121 (unsigned long) atomic_read(&glob->bo_count));
122}
123
124static struct attribute *ttm_bo_global_attrs[] = {
125 &ttm_bo_count,
126 NULL
127};
128
52cf25d0 129static const struct sysfs_ops ttm_bo_global_ops = {
a987fcaa
TH
130 .show = &ttm_bo_global_show
131};
132
133static struct kobj_type ttm_bo_glob_kobj_type = {
134 .release = &ttm_bo_global_kobj_release,
135 .sysfs_ops = &ttm_bo_global_ops,
136 .default_attrs = ttm_bo_global_attrs
137};
138
ba4e7d97
TH
139
140static inline uint32_t ttm_bo_type_flags(unsigned type)
141{
142 return 1 << (type);
143}
144
145static void ttm_bo_release_list(struct kref *list_kref)
146{
147 struct ttm_buffer_object *bo =
148 container_of(list_kref, struct ttm_buffer_object, list_kref);
149 struct ttm_bo_device *bdev = bo->bdev;
150
151 BUG_ON(atomic_read(&bo->list_kref.refcount));
152 BUG_ON(atomic_read(&bo->kref.refcount));
153 BUG_ON(atomic_read(&bo->cpu_writers));
154 BUG_ON(bo->sync_obj != NULL);
155 BUG_ON(bo->mem.mm_node != NULL);
156 BUG_ON(!list_empty(&bo->lru));
157 BUG_ON(!list_empty(&bo->ddestroy));
158
159 if (bo->ttm)
160 ttm_tt_destroy(bo->ttm);
a987fcaa 161 atomic_dec(&bo->glob->bo_count);
ba4e7d97
TH
162 if (bo->destroy)
163 bo->destroy(bo);
164 else {
a987fcaa 165 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
ba4e7d97
TH
166 kfree(bo);
167 }
168}
169
170int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
171{
172
173 if (interruptible) {
174 int ret = 0;
175
176 ret = wait_event_interruptible(bo->event_queue,
177 atomic_read(&bo->reserved) == 0);
178 if (unlikely(ret != 0))
98ffc415 179 return ret;
ba4e7d97
TH
180 } else {
181 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
182 }
183 return 0;
184}
d1ede145 185EXPORT_SYMBOL(ttm_bo_wait_unreserved);
ba4e7d97
TH
186
187static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
188{
189 struct ttm_bo_device *bdev = bo->bdev;
190 struct ttm_mem_type_manager *man;
191
192 BUG_ON(!atomic_read(&bo->reserved));
193
194 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
195
196 BUG_ON(!list_empty(&bo->lru));
197
198 man = &bdev->man[bo->mem.mem_type];
199 list_add_tail(&bo->lru, &man->lru);
200 kref_get(&bo->list_kref);
201
202 if (bo->ttm != NULL) {
a987fcaa 203 list_add_tail(&bo->swap, &bo->glob->swap_lru);
ba4e7d97
TH
204 kref_get(&bo->list_kref);
205 }
206 }
207}
208
209/**
210 * Call with the lru_lock held.
211 */
212
213static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
214{
215 int put_count = 0;
216
217 if (!list_empty(&bo->swap)) {
218 list_del_init(&bo->swap);
219 ++put_count;
220 }
221 if (!list_empty(&bo->lru)) {
222 list_del_init(&bo->lru);
223 ++put_count;
224 }
225
226 /*
227 * TODO: Add a driver hook to delete from
228 * driver-specific LRU's here.
229 */
230
231 return put_count;
232}
233
234int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
235 bool interruptible,
236 bool no_wait, bool use_sequence, uint32_t sequence)
237{
a987fcaa 238 struct ttm_bo_global *glob = bo->glob;
ba4e7d97
TH
239 int ret;
240
241 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
242 if (use_sequence && bo->seq_valid &&
243 (sequence - bo->val_seq < (1 << 31))) {
244 return -EAGAIN;
245 }
246
247 if (no_wait)
248 return -EBUSY;
249
a987fcaa 250 spin_unlock(&glob->lru_lock);
ba4e7d97 251 ret = ttm_bo_wait_unreserved(bo, interruptible);
a987fcaa 252 spin_lock(&glob->lru_lock);
ba4e7d97
TH
253
254 if (unlikely(ret))
255 return ret;
256 }
257
258 if (use_sequence) {
259 bo->val_seq = sequence;
260 bo->seq_valid = true;
261 } else {
262 bo->seq_valid = false;
263 }
264
265 return 0;
266}
267EXPORT_SYMBOL(ttm_bo_reserve);
268
269static void ttm_bo_ref_bug(struct kref *list_kref)
270{
271 BUG();
272}
273
274int ttm_bo_reserve(struct ttm_buffer_object *bo,
275 bool interruptible,
276 bool no_wait, bool use_sequence, uint32_t sequence)
277{
a987fcaa 278 struct ttm_bo_global *glob = bo->glob;
ba4e7d97
TH
279 int put_count = 0;
280 int ret;
281
a987fcaa 282 spin_lock(&glob->lru_lock);
ba4e7d97
TH
283 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
284 sequence);
285 if (likely(ret == 0))
286 put_count = ttm_bo_del_from_lru(bo);
a987fcaa 287 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
288
289 while (put_count--)
290 kref_put(&bo->list_kref, ttm_bo_ref_bug);
291
292 return ret;
293}
294
295void ttm_bo_unreserve(struct ttm_buffer_object *bo)
296{
a987fcaa 297 struct ttm_bo_global *glob = bo->glob;
ba4e7d97 298
a987fcaa 299 spin_lock(&glob->lru_lock);
ba4e7d97
TH
300 ttm_bo_add_to_lru(bo);
301 atomic_set(&bo->reserved, 0);
302 wake_up_all(&bo->event_queue);
a987fcaa 303 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
304}
305EXPORT_SYMBOL(ttm_bo_unreserve);
306
307/*
308 * Call bo->mutex locked.
309 */
ba4e7d97
TH
310static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
311{
312 struct ttm_bo_device *bdev = bo->bdev;
a987fcaa 313 struct ttm_bo_global *glob = bo->glob;
ba4e7d97
TH
314 int ret = 0;
315 uint32_t page_flags = 0;
316
317 TTM_ASSERT_LOCKED(&bo->mutex);
318 bo->ttm = NULL;
319
ad49f501
DA
320 if (bdev->need_dma32)
321 page_flags |= TTM_PAGE_FLAG_DMA32;
322
ba4e7d97
TH
323 switch (bo->type) {
324 case ttm_bo_type_device:
325 if (zero_alloc)
326 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
327 case ttm_bo_type_kernel:
328 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
a987fcaa 329 page_flags, glob->dummy_read_page);
ba4e7d97
TH
330 if (unlikely(bo->ttm == NULL))
331 ret = -ENOMEM;
332 break;
333 case ttm_bo_type_user:
334 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
335 page_flags | TTM_PAGE_FLAG_USER,
a987fcaa 336 glob->dummy_read_page);
447aeb90 337 if (unlikely(bo->ttm == NULL)) {
ba4e7d97 338 ret = -ENOMEM;
447aeb90
DA
339 break;
340 }
ba4e7d97
TH
341
342 ret = ttm_tt_set_user(bo->ttm, current,
343 bo->buffer_start, bo->num_pages);
344 if (unlikely(ret != 0))
345 ttm_tt_destroy(bo->ttm);
346 break;
347 default:
348 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
349 ret = -EINVAL;
350 break;
351 }
352
353 return ret;
354}
355
356static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
357 struct ttm_mem_reg *mem,
9d87fa21
JG
358 bool evict, bool interruptible,
359 bool no_wait_reserve, bool no_wait_gpu)
ba4e7d97
TH
360{
361 struct ttm_bo_device *bdev = bo->bdev;
362 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
363 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
364 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
365 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
366 int ret = 0;
367
368 if (old_is_pci || new_is_pci ||
369 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
370 ttm_bo_unmap_virtual(bo);
371
372 /*
373 * Create and bind a ttm if required.
374 */
375
376 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
377 ret = ttm_bo_add_ttm(bo, false);
378 if (ret)
379 goto out_err;
380
381 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
382 if (ret)
87ef9209 383 goto out_err;
ba4e7d97
TH
384
385 if (mem->mem_type != TTM_PL_SYSTEM) {
386 ret = ttm_tt_bind(bo->ttm, mem);
387 if (ret)
388 goto out_err;
389 }
390
391 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
ca262a99 392 bo->mem = *mem;
ba4e7d97 393 mem->mm_node = NULL;
ba4e7d97
TH
394 goto moved;
395 }
396
397 }
398
e024e110
DA
399 if (bdev->driver->move_notify)
400 bdev->driver->move_notify(bo, mem);
401
ba4e7d97
TH
402 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
403 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
9d87fa21 404 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
ba4e7d97
TH
405 else if (bdev->driver->move)
406 ret = bdev->driver->move(bo, evict, interruptible,
9d87fa21 407 no_wait_reserve, no_wait_gpu, mem);
ba4e7d97 408 else
9d87fa21 409 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
ba4e7d97
TH
410
411 if (ret)
412 goto out_err;
413
414moved:
415 if (bo->evicted) {
416 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
417 if (ret)
418 printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
419 bo->evicted = false;
420 }
421
422 if (bo->mem.mm_node) {
423 spin_lock(&bo->lock);
424 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
425 bdev->man[bo->mem.mem_type].gpu_offset;
426 bo->cur_placement = bo->mem.placement;
427 spin_unlock(&bo->lock);
354fb52c
TH
428 } else
429 bo->offset = 0;
ba4e7d97
TH
430
431 return 0;
432
433out_err:
434 new_man = &bdev->man[bo->mem.mem_type];
435 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
436 ttm_tt_unbind(bo->ttm);
437 ttm_tt_destroy(bo->ttm);
438 bo->ttm = NULL;
439 }
440
441 return ret;
442}
443
444/**
445 * If bo idle, remove from delayed- and lru lists, and unref.
446 * If not idle, and already on delayed list, do nothing.
447 * If not idle, and not on delayed list, put on delayed list,
448 * up the list_kref and schedule a delayed list check.
449 */
450
451static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
452{
453 struct ttm_bo_device *bdev = bo->bdev;
a987fcaa 454 struct ttm_bo_global *glob = bo->glob;
ba4e7d97
TH
455 struct ttm_bo_driver *driver = bdev->driver;
456 int ret;
457
458 spin_lock(&bo->lock);
459 (void) ttm_bo_wait(bo, false, false, !remove_all);
460
461 if (!bo->sync_obj) {
462 int put_count;
463
464 spin_unlock(&bo->lock);
465
a987fcaa 466 spin_lock(&glob->lru_lock);
aaa20736
TH
467 put_count = ttm_bo_del_from_lru(bo);
468
ba4e7d97
TH
469 ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
470 BUG_ON(ret);
471 if (bo->ttm)
472 ttm_tt_unbind(bo->ttm);
473
474 if (!list_empty(&bo->ddestroy)) {
475 list_del_init(&bo->ddestroy);
aaa20736 476 ++put_count;
ba4e7d97
TH
477 }
478 if (bo->mem.mm_node) {
ca262a99 479 bo->mem.mm_node->private = NULL;
ba4e7d97
TH
480 drm_mm_put_block(bo->mem.mm_node);
481 bo->mem.mm_node = NULL;
482 }
a987fcaa 483 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
484
485 atomic_set(&bo->reserved, 0);
486
487 while (put_count--)
aaa20736 488 kref_put(&bo->list_kref, ttm_bo_ref_bug);
ba4e7d97
TH
489
490 return 0;
491 }
492
a987fcaa 493 spin_lock(&glob->lru_lock);
ba4e7d97
TH
494 if (list_empty(&bo->ddestroy)) {
495 void *sync_obj = bo->sync_obj;
496 void *sync_obj_arg = bo->sync_obj_arg;
497
498 kref_get(&bo->list_kref);
499 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
a987fcaa 500 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
501 spin_unlock(&bo->lock);
502
503 if (sync_obj)
504 driver->sync_obj_flush(sync_obj, sync_obj_arg);
505 schedule_delayed_work(&bdev->wq,
506 ((HZ / 100) < 1) ? 1 : HZ / 100);
507 ret = 0;
508
509 } else {
a987fcaa 510 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
511 spin_unlock(&bo->lock);
512 ret = -EBUSY;
513 }
514
515 return ret;
516}
517
518/**
519 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
520 * encountered buffers.
521 */
522
523static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
524{
a987fcaa 525 struct ttm_bo_global *glob = bdev->glob;
1a961ce0
LB
526 struct ttm_buffer_object *entry = NULL;
527 int ret = 0;
ba4e7d97 528
a987fcaa 529 spin_lock(&glob->lru_lock);
1a961ce0
LB
530 if (list_empty(&bdev->ddestroy))
531 goto out_unlock;
532
533 entry = list_first_entry(&bdev->ddestroy,
534 struct ttm_buffer_object, ddestroy);
535 kref_get(&entry->list_kref);
536
537 for (;;) {
538 struct ttm_buffer_object *nentry = NULL;
539
540 if (entry->ddestroy.next != &bdev->ddestroy) {
541 nentry = list_first_entry(&entry->ddestroy,
542 struct ttm_buffer_object, ddestroy);
ba4e7d97
TH
543 kref_get(&nentry->list_kref);
544 }
ba4e7d97 545
a987fcaa 546 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
547 ret = ttm_bo_cleanup_refs(entry, remove_all);
548 kref_put(&entry->list_kref, ttm_bo_release_list);
1a961ce0
LB
549 entry = nentry;
550
551 if (ret || !entry)
552 goto out;
ba4e7d97 553
a987fcaa 554 spin_lock(&glob->lru_lock);
1a961ce0 555 if (list_empty(&entry->ddestroy))
ba4e7d97
TH
556 break;
557 }
ba4e7d97 558
1a961ce0
LB
559out_unlock:
560 spin_unlock(&glob->lru_lock);
561out:
562 if (entry)
563 kref_put(&entry->list_kref, ttm_bo_release_list);
ba4e7d97
TH
564 return ret;
565}
566
567static void ttm_bo_delayed_workqueue(struct work_struct *work)
568{
569 struct ttm_bo_device *bdev =
570 container_of(work, struct ttm_bo_device, wq.work);
571
572 if (ttm_bo_delayed_delete(bdev, false)) {
573 schedule_delayed_work(&bdev->wq,
574 ((HZ / 100) < 1) ? 1 : HZ / 100);
575 }
576}
577
578static void ttm_bo_release(struct kref *kref)
579{
580 struct ttm_buffer_object *bo =
581 container_of(kref, struct ttm_buffer_object, kref);
582 struct ttm_bo_device *bdev = bo->bdev;
583
584 if (likely(bo->vm_node != NULL)) {
585 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
586 drm_mm_put_block(bo->vm_node);
587 bo->vm_node = NULL;
588 }
589 write_unlock(&bdev->vm_lock);
590 ttm_bo_cleanup_refs(bo, false);
591 kref_put(&bo->list_kref, ttm_bo_release_list);
592 write_lock(&bdev->vm_lock);
593}
594
595void ttm_bo_unref(struct ttm_buffer_object **p_bo)
596{
597 struct ttm_buffer_object *bo = *p_bo;
598 struct ttm_bo_device *bdev = bo->bdev;
599
600 *p_bo = NULL;
601 write_lock(&bdev->vm_lock);
602 kref_put(&bo->kref, ttm_bo_release);
603 write_unlock(&bdev->vm_lock);
604}
605EXPORT_SYMBOL(ttm_bo_unref);
606
ca262a99 607static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
9d87fa21 608 bool no_wait_reserve, bool no_wait_gpu)
ba4e7d97 609{
ba4e7d97 610 struct ttm_bo_device *bdev = bo->bdev;
a987fcaa 611 struct ttm_bo_global *glob = bo->glob;
ba4e7d97 612 struct ttm_mem_reg evict_mem;
ca262a99
JG
613 struct ttm_placement placement;
614 int ret = 0;
ba4e7d97
TH
615
616 spin_lock(&bo->lock);
9d87fa21 617 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
ba4e7d97
TH
618 spin_unlock(&bo->lock);
619
78ecf091 620 if (unlikely(ret != 0)) {
98ffc415 621 if (ret != -ERESTARTSYS) {
78ecf091
TH
622 printk(KERN_ERR TTM_PFX
623 "Failed to expire sync object before "
624 "buffer eviction.\n");
625 }
ba4e7d97
TH
626 goto out;
627 }
628
629 BUG_ON(!atomic_read(&bo->reserved));
630
631 evict_mem = bo->mem;
632 evict_mem.mm_node = NULL;
82c5da6b 633 evict_mem.bus.io_reserved = false;
ba4e7d97 634
7cb7d1d7
JG
635 placement.fpfn = 0;
636 placement.lpfn = 0;
637 placement.num_placement = 0;
638 placement.num_busy_placement = 0;
ca262a99
JG
639 bdev->driver->evict_flags(bo, &placement);
640 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
9d87fa21 641 no_wait_reserve, no_wait_gpu);
ba4e7d97 642 if (ret) {
fb53f862 643 if (ret != -ERESTARTSYS) {
ba4e7d97
TH
644 printk(KERN_ERR TTM_PFX
645 "Failed to find memory space for "
646 "buffer 0x%p eviction.\n", bo);
fb53f862
JG
647 ttm_bo_mem_space_debug(bo, &placement);
648 }
ba4e7d97
TH
649 goto out;
650 }
651
652 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
9d87fa21 653 no_wait_reserve, no_wait_gpu);
ba4e7d97 654 if (ret) {
98ffc415 655 if (ret != -ERESTARTSYS)
ba4e7d97 656 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
ca262a99
JG
657 spin_lock(&glob->lru_lock);
658 if (evict_mem.mm_node) {
659 evict_mem.mm_node->private = NULL;
660 drm_mm_put_block(evict_mem.mm_node);
661 evict_mem.mm_node = NULL;
662 }
663 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
664 goto out;
665 }
ca262a99
JG
666 bo->evicted = true;
667out:
668 return ret;
669}
670
671static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
672 uint32_t mem_type,
9d87fa21
JG
673 bool interruptible, bool no_wait_reserve,
674 bool no_wait_gpu)
ca262a99
JG
675{
676 struct ttm_bo_global *glob = bdev->glob;
677 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
678 struct ttm_buffer_object *bo;
679 int ret, put_count = 0;
ba4e7d97 680
9c51ba1d 681retry:
a987fcaa 682 spin_lock(&glob->lru_lock);
9c51ba1d
TH
683 if (list_empty(&man->lru)) {
684 spin_unlock(&glob->lru_lock);
685 return -EBUSY;
686 }
687
ca262a99
JG
688 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
689 kref_get(&bo->list_kref);
9c51ba1d 690
9d87fa21 691 ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
9c51ba1d
TH
692
693 if (unlikely(ret == -EBUSY)) {
694 spin_unlock(&glob->lru_lock);
9d87fa21 695 if (likely(!no_wait_gpu))
9c51ba1d
TH
696 ret = ttm_bo_wait_unreserved(bo, interruptible);
697
698 kref_put(&bo->list_kref, ttm_bo_release_list);
699
700 /**
701 * We *need* to retry after releasing the lru lock.
702 */
703
704 if (unlikely(ret != 0))
705 return ret;
706 goto retry;
707 }
708
709 put_count = ttm_bo_del_from_lru(bo);
a987fcaa 710 spin_unlock(&glob->lru_lock);
9c51ba1d
TH
711
712 BUG_ON(ret != 0);
713
ca262a99
JG
714 while (put_count--)
715 kref_put(&bo->list_kref, ttm_bo_ref_bug);
9c51ba1d 716
9d87fa21 717 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
ca262a99 718 ttm_bo_unreserve(bo);
9c51ba1d 719
ca262a99 720 kref_put(&bo->list_kref, ttm_bo_release_list);
ba4e7d97
TH
721 return ret;
722}
723
ca262a99
JG
724static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
725 struct ttm_mem_type_manager *man,
726 struct ttm_placement *placement,
727 struct ttm_mem_reg *mem,
728 struct drm_mm_node **node)
729{
730 struct ttm_bo_global *glob = bo->glob;
731 unsigned long lpfn;
732 int ret;
733
734 lpfn = placement->lpfn;
735 if (!lpfn)
736 lpfn = man->size;
737 *node = NULL;
738 do {
739 ret = drm_mm_pre_get(&man->manager);
740 if (unlikely(ret))
741 return ret;
742
743 spin_lock(&glob->lru_lock);
744 *node = drm_mm_search_free_in_range(&man->manager,
745 mem->num_pages, mem->page_alignment,
746 placement->fpfn, lpfn, 1);
747 if (unlikely(*node == NULL)) {
748 spin_unlock(&glob->lru_lock);
749 return 0;
750 }
751 *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
752 mem->page_alignment,
753 placement->fpfn,
754 lpfn);
755 spin_unlock(&glob->lru_lock);
756 } while (*node == NULL);
757 return 0;
758}
759
ba4e7d97
TH
760/**
761 * Repeatedly evict memory from the LRU for @mem_type until we create enough
762 * space, or we've evicted everything and there isn't enough space.
763 */
ca262a99
JG
764static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
765 uint32_t mem_type,
766 struct ttm_placement *placement,
767 struct ttm_mem_reg *mem,
9d87fa21
JG
768 bool interruptible,
769 bool no_wait_reserve,
770 bool no_wait_gpu)
ba4e7d97 771{
ca262a99 772 struct ttm_bo_device *bdev = bo->bdev;
a987fcaa 773 struct ttm_bo_global *glob = bdev->glob;
ba4e7d97 774 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
ca262a99 775 struct drm_mm_node *node;
ba4e7d97
TH
776 int ret;
777
ba4e7d97 778 do {
ca262a99
JG
779 ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
780 if (unlikely(ret != 0))
781 return ret;
ba4e7d97
TH
782 if (node)
783 break;
ca262a99
JG
784 spin_lock(&glob->lru_lock);
785 if (list_empty(&man->lru)) {
786 spin_unlock(&glob->lru_lock);
ba4e7d97 787 break;
ca262a99 788 }
a987fcaa 789 spin_unlock(&glob->lru_lock);
ca262a99 790 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
9d87fa21 791 no_wait_reserve, no_wait_gpu);
ba4e7d97
TH
792 if (unlikely(ret != 0))
793 return ret;
ba4e7d97 794 } while (1);
ca262a99 795 if (node == NULL)
ba4e7d97 796 return -ENOMEM;
ba4e7d97
TH
797 mem->mm_node = node;
798 mem->mem_type = mem_type;
799 return 0;
800}
801
ae3e8122
TH
802static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
803 uint32_t cur_placement,
804 uint32_t proposed_placement)
805{
806 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
807 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
808
809 /**
810 * Keep current caching if possible.
811 */
812
813 if ((cur_placement & caching) != 0)
814 result |= (cur_placement & caching);
815 else if ((man->default_caching & caching) != 0)
816 result |= man->default_caching;
817 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
818 result |= TTM_PL_FLAG_CACHED;
819 else if ((TTM_PL_FLAG_WC & caching) != 0)
820 result |= TTM_PL_FLAG_WC;
821 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
822 result |= TTM_PL_FLAG_UNCACHED;
823
824 return result;
825}
826
ba4e7d97
TH
827static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
828 bool disallow_fixed,
829 uint32_t mem_type,
ae3e8122
TH
830 uint32_t proposed_placement,
831 uint32_t *masked_placement)
ba4e7d97
TH
832{
833 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
834
835 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
836 return false;
837
ae3e8122 838 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
ba4e7d97
TH
839 return false;
840
ae3e8122 841 if ((proposed_placement & man->available_caching) == 0)
ba4e7d97 842 return false;
ba4e7d97 843
ae3e8122
TH
844 cur_flags |= (proposed_placement & man->available_caching);
845
846 *masked_placement = cur_flags;
ba4e7d97
TH
847 return true;
848}
849
850/**
851 * Creates space for memory region @mem according to its type.
852 *
853 * This function first searches for free space in compatible memory types in
854 * the priority order defined by the driver. If free space isn't found, then
855 * ttm_bo_mem_force_space is attempted in priority order to evict and find
856 * space.
857 */
858int ttm_bo_mem_space(struct ttm_buffer_object *bo,
ca262a99
JG
859 struct ttm_placement *placement,
860 struct ttm_mem_reg *mem,
9d87fa21
JG
861 bool interruptible, bool no_wait_reserve,
862 bool no_wait_gpu)
ba4e7d97
TH
863{
864 struct ttm_bo_device *bdev = bo->bdev;
865 struct ttm_mem_type_manager *man;
ba4e7d97
TH
866 uint32_t mem_type = TTM_PL_SYSTEM;
867 uint32_t cur_flags = 0;
868 bool type_found = false;
869 bool type_ok = false;
98ffc415 870 bool has_erestartsys = false;
ba4e7d97 871 struct drm_mm_node *node = NULL;
ca262a99 872 int i, ret;
ba4e7d97
TH
873
874 mem->mm_node = NULL;
b6637526 875 for (i = 0; i < placement->num_placement; ++i) {
ca262a99
JG
876 ret = ttm_mem_type_from_flags(placement->placement[i],
877 &mem_type);
878 if (ret)
879 return ret;
ba4e7d97
TH
880 man = &bdev->man[mem_type];
881
882 type_ok = ttm_bo_mt_compatible(man,
ca262a99
JG
883 bo->type == ttm_bo_type_user,
884 mem_type,
885 placement->placement[i],
886 &cur_flags);
ba4e7d97
TH
887
888 if (!type_ok)
889 continue;
890
ae3e8122
TH
891 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
892 cur_flags);
ca262a99
JG
893 /*
894 * Use the access and other non-mapping-related flag bits from
895 * the memory placement flags to the current flags
896 */
897 ttm_flag_masked(&cur_flags, placement->placement[i],
898 ~TTM_PL_MASK_MEMTYPE);
ae3e8122 899
ba4e7d97
TH
900 if (mem_type == TTM_PL_SYSTEM)
901 break;
902
903 if (man->has_type && man->use_type) {
904 type_found = true;
ca262a99
JG
905 ret = ttm_bo_man_get_node(bo, man, placement, mem,
906 &node);
907 if (unlikely(ret))
908 return ret;
ba4e7d97
TH
909 }
910 if (node)
911 break;
912 }
913
914 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
915 mem->mm_node = node;
916 mem->mem_type = mem_type;
917 mem->placement = cur_flags;
ca262a99
JG
918 if (node)
919 node->private = bo;
ba4e7d97
TH
920 return 0;
921 }
922
923 if (!type_found)
924 return -EINVAL;
925
b6637526
DA
926 for (i = 0; i < placement->num_busy_placement; ++i) {
927 ret = ttm_mem_type_from_flags(placement->busy_placement[i],
ca262a99
JG
928 &mem_type);
929 if (ret)
930 return ret;
ba4e7d97 931 man = &bdev->man[mem_type];
ba4e7d97
TH
932 if (!man->has_type)
933 continue;
ba4e7d97 934 if (!ttm_bo_mt_compatible(man,
ca262a99
JG
935 bo->type == ttm_bo_type_user,
936 mem_type,
b6637526 937 placement->busy_placement[i],
ca262a99 938 &cur_flags))
ba4e7d97
TH
939 continue;
940
ae3e8122
TH
941 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
942 cur_flags);
ca262a99
JG
943 /*
944 * Use the access and other non-mapping-related flag bits from
945 * the memory placement flags to the current flags
946 */
b6637526 947 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
ca262a99 948 ~TTM_PL_MASK_MEMTYPE);
ae3e8122 949
0eaddb28
TH
950
951 if (mem_type == TTM_PL_SYSTEM) {
952 mem->mem_type = mem_type;
953 mem->placement = cur_flags;
954 mem->mm_node = NULL;
955 return 0;
956 }
957
ca262a99 958 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
9d87fa21 959 interruptible, no_wait_reserve, no_wait_gpu);
ba4e7d97
TH
960 if (ret == 0 && mem->mm_node) {
961 mem->placement = cur_flags;
ca262a99 962 mem->mm_node->private = bo;
ba4e7d97
TH
963 return 0;
964 }
98ffc415
TH
965 if (ret == -ERESTARTSYS)
966 has_erestartsys = true;
ba4e7d97 967 }
98ffc415 968 ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
ba4e7d97
TH
969 return ret;
970}
971EXPORT_SYMBOL(ttm_bo_mem_space);
972
973int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
974{
ba4e7d97
TH
975 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
976 return -EBUSY;
977
98ffc415
TH
978 return wait_event_interruptible(bo->event_queue,
979 atomic_read(&bo->cpu_writers) == 0);
ba4e7d97 980}
d1ede145 981EXPORT_SYMBOL(ttm_bo_wait_cpu);
ba4e7d97
TH
982
983int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
ca262a99 984 struct ttm_placement *placement,
9d87fa21
JG
985 bool interruptible, bool no_wait_reserve,
986 bool no_wait_gpu)
ba4e7d97 987{
a987fcaa 988 struct ttm_bo_global *glob = bo->glob;
ba4e7d97
TH
989 int ret = 0;
990 struct ttm_mem_reg mem;
991
992 BUG_ON(!atomic_read(&bo->reserved));
993
994 /*
995 * FIXME: It's possible to pipeline buffer moves.
996 * Have the driver move function wait for idle when necessary,
997 * instead of doing it here.
998 */
ba4e7d97 999 spin_lock(&bo->lock);
9d87fa21 1000 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
ba4e7d97 1001 spin_unlock(&bo->lock);
ba4e7d97
TH
1002 if (ret)
1003 return ret;
ba4e7d97
TH
1004 mem.num_pages = bo->num_pages;
1005 mem.size = mem.num_pages << PAGE_SHIFT;
1006 mem.page_alignment = bo->mem.page_alignment;
82c5da6b 1007 mem.bus.io_reserved = false;
ba4e7d97
TH
1008 /*
1009 * Determine where to move the buffer.
1010 */
9d87fa21 1011 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
ba4e7d97
TH
1012 if (ret)
1013 goto out_unlock;
9d87fa21 1014 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
ba4e7d97
TH
1015out_unlock:
1016 if (ret && mem.mm_node) {
a987fcaa 1017 spin_lock(&glob->lru_lock);
ca262a99 1018 mem.mm_node->private = NULL;
ba4e7d97 1019 drm_mm_put_block(mem.mm_node);
a987fcaa 1020 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
1021 }
1022 return ret;
1023}
1024
ca262a99 1025static int ttm_bo_mem_compat(struct ttm_placement *placement,
ba4e7d97
TH
1026 struct ttm_mem_reg *mem)
1027{
ca262a99 1028 int i;
e22238ea
TH
1029 struct drm_mm_node *node = mem->mm_node;
1030
1031 if (node && placement->lpfn != 0 &&
1032 (node->start < placement->fpfn ||
1033 node->start + node->size > placement->lpfn))
1034 return -1;
ca262a99
JG
1035
1036 for (i = 0; i < placement->num_placement; i++) {
1037 if ((placement->placement[i] & mem->placement &
1038 TTM_PL_MASK_CACHING) &&
1039 (placement->placement[i] & mem->placement &
1040 TTM_PL_MASK_MEM))
1041 return i;
1042 }
1043 return -1;
ba4e7d97
TH
1044}
1045
09855acb
JG
1046int ttm_bo_validate(struct ttm_buffer_object *bo,
1047 struct ttm_placement *placement,
9d87fa21
JG
1048 bool interruptible, bool no_wait_reserve,
1049 bool no_wait_gpu)
ba4e7d97
TH
1050{
1051 int ret;
1052
1053 BUG_ON(!atomic_read(&bo->reserved));
ca262a99
JG
1054 /* Check that range is valid */
1055 if (placement->lpfn || placement->fpfn)
1056 if (placement->fpfn > placement->lpfn ||
1057 (placement->lpfn - placement->fpfn) < bo->num_pages)
1058 return -EINVAL;
ba4e7d97
TH
1059 /*
1060 * Check whether we need to move buffer.
1061 */
ca262a99
JG
1062 ret = ttm_bo_mem_compat(placement, &bo->mem);
1063 if (ret < 0) {
9d87fa21 1064 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
ca262a99 1065 if (ret)
ba4e7d97 1066 return ret;
ca262a99
JG
1067 } else {
1068 /*
1069 * Use the access and other non-mapping-related flag bits from
1070 * the compatible memory placement flags to the active flags
1071 */
1072 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1073 ~TTM_PL_MASK_MEMTYPE);
ba4e7d97 1074 }
ba4e7d97
TH
1075 /*
1076 * We might need to add a TTM.
1077 */
ba4e7d97
TH
1078 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1079 ret = ttm_bo_add_ttm(bo, true);
1080 if (ret)
1081 return ret;
1082 }
ba4e7d97
TH
1083 return 0;
1084}
09855acb 1085EXPORT_SYMBOL(ttm_bo_validate);
ba4e7d97 1086
09855acb
JG
1087int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1088 struct ttm_placement *placement)
ba4e7d97 1089{
09855acb 1090 int i;
ba4e7d97 1091
09855acb
JG
1092 if (placement->fpfn || placement->lpfn) {
1093 if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
1094 printk(KERN_ERR TTM_PFX "Page number range to small "
1095 "Need %lu pages, range is [%u, %u]\n",
1096 bo->mem.num_pages, placement->fpfn,
1097 placement->lpfn);
ba4e7d97
TH
1098 return -EINVAL;
1099 }
09855acb
JG
1100 }
1101 for (i = 0; i < placement->num_placement; i++) {
1102 if (!capable(CAP_SYS_ADMIN)) {
1103 if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
1104 printk(KERN_ERR TTM_PFX "Need to be root to "
1105 "modify NO_EVICT status.\n");
1106 return -EINVAL;
1107 }
1108 }
1109 }
1110 for (i = 0; i < placement->num_busy_placement; i++) {
1111 if (!capable(CAP_SYS_ADMIN)) {
1112 if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
1113 printk(KERN_ERR TTM_PFX "Need to be root to "
1114 "modify NO_EVICT status.\n");
1115 return -EINVAL;
1116 }
ba4e7d97
TH
1117 }
1118 }
1119 return 0;
1120}
1121
09855acb
JG
1122int ttm_bo_init(struct ttm_bo_device *bdev,
1123 struct ttm_buffer_object *bo,
1124 unsigned long size,
1125 enum ttm_bo_type type,
1126 struct ttm_placement *placement,
1127 uint32_t page_alignment,
1128 unsigned long buffer_start,
1129 bool interruptible,
1130 struct file *persistant_swap_storage,
1131 size_t acc_size,
1132 void (*destroy) (struct ttm_buffer_object *))
ba4e7d97 1133{
09855acb 1134 int ret = 0;
ba4e7d97
TH
1135 unsigned long num_pages;
1136
1137 size += buffer_start & ~PAGE_MASK;
1138 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1139 if (num_pages == 0) {
1140 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1141 return -EINVAL;
1142 }
1143 bo->destroy = destroy;
1144
1145 spin_lock_init(&bo->lock);
1146 kref_init(&bo->kref);
1147 kref_init(&bo->list_kref);
1148 atomic_set(&bo->cpu_writers, 0);
1149 atomic_set(&bo->reserved, 1);
1150 init_waitqueue_head(&bo->event_queue);
1151 INIT_LIST_HEAD(&bo->lru);
1152 INIT_LIST_HEAD(&bo->ddestroy);
1153 INIT_LIST_HEAD(&bo->swap);
1154 bo->bdev = bdev;
a987fcaa 1155 bo->glob = bdev->glob;
ba4e7d97
TH
1156 bo->type = type;
1157 bo->num_pages = num_pages;
eb6d2c39 1158 bo->mem.size = num_pages << PAGE_SHIFT;
ba4e7d97
TH
1159 bo->mem.mem_type = TTM_PL_SYSTEM;
1160 bo->mem.num_pages = bo->num_pages;
1161 bo->mem.mm_node = NULL;
1162 bo->mem.page_alignment = page_alignment;
82c5da6b 1163 bo->mem.bus.io_reserved = false;
ba4e7d97
TH
1164 bo->buffer_start = buffer_start & PAGE_MASK;
1165 bo->priv_flags = 0;
1166 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1167 bo->seq_valid = false;
1168 bo->persistant_swap_storage = persistant_swap_storage;
1169 bo->acc_size = acc_size;
a987fcaa 1170 atomic_inc(&bo->glob->bo_count);
ba4e7d97 1171
09855acb 1172 ret = ttm_bo_check_placement(bo, placement);
ba4e7d97
TH
1173 if (unlikely(ret != 0))
1174 goto out_err;
1175
ba4e7d97
TH
1176 /*
1177 * For ttm_bo_type_device buffers, allocate
1178 * address space from the device.
1179 */
ba4e7d97
TH
1180 if (bo->type == ttm_bo_type_device) {
1181 ret = ttm_bo_setup_vm(bo);
1182 if (ret)
1183 goto out_err;
1184 }
1185
9d87fa21 1186 ret = ttm_bo_validate(bo, placement, interruptible, false, false);
ba4e7d97
TH
1187 if (ret)
1188 goto out_err;
1189
1190 ttm_bo_unreserve(bo);
1191 return 0;
1192
1193out_err:
1194 ttm_bo_unreserve(bo);
1195 ttm_bo_unref(&bo);
1196
1197 return ret;
1198}
09855acb 1199EXPORT_SYMBOL(ttm_bo_init);
ba4e7d97 1200
a987fcaa 1201static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
ba4e7d97
TH
1202 unsigned long num_pages)
1203{
1204 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1205 PAGE_MASK;
1206
a987fcaa 1207 return glob->ttm_bo_size + 2 * page_array_size;
ba4e7d97
TH
1208}
1209
09855acb
JG
1210int ttm_bo_create(struct ttm_bo_device *bdev,
1211 unsigned long size,
1212 enum ttm_bo_type type,
1213 struct ttm_placement *placement,
1214 uint32_t page_alignment,
1215 unsigned long buffer_start,
1216 bool interruptible,
1217 struct file *persistant_swap_storage,
1218 struct ttm_buffer_object **p_bo)
ba4e7d97
TH
1219{
1220 struct ttm_buffer_object *bo;
a987fcaa 1221 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
ca262a99 1222 int ret;
ba4e7d97
TH
1223
1224 size_t acc_size =
a987fcaa 1225 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
5fd9cbad 1226 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
ba4e7d97
TH
1227 if (unlikely(ret != 0))
1228 return ret;
1229
1230 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1231
1232 if (unlikely(bo == NULL)) {
5fd9cbad 1233 ttm_mem_global_free(mem_glob, acc_size);
ba4e7d97
TH
1234 return -ENOMEM;
1235 }
1236
09855acb
JG
1237 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1238 buffer_start, interruptible,
1239 persistant_swap_storage, acc_size, NULL);
ba4e7d97
TH
1240 if (likely(ret == 0))
1241 *p_bo = bo;
1242
1243 return ret;
1244}
1245
ba4e7d97 1246static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
ca262a99 1247 unsigned mem_type, bool allow_errors)
ba4e7d97 1248{
ca262a99 1249 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
a987fcaa 1250 struct ttm_bo_global *glob = bdev->glob;
ba4e7d97 1251 int ret;
ba4e7d97
TH
1252
1253 /*
1254 * Can't use standard list traversal since we're unlocking.
1255 */
1256
a987fcaa 1257 spin_lock(&glob->lru_lock);
ca262a99 1258 while (!list_empty(&man->lru)) {
a987fcaa 1259 spin_unlock(&glob->lru_lock);
9d87fa21 1260 ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
ca262a99
JG
1261 if (ret) {
1262 if (allow_errors) {
1263 return ret;
1264 } else {
1265 printk(KERN_ERR TTM_PFX
1266 "Cleanup eviction failed\n");
1267 }
1268 }
a987fcaa 1269 spin_lock(&glob->lru_lock);
ba4e7d97 1270 }
a987fcaa 1271 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
1272 return 0;
1273}
1274
1275int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1276{
a987fcaa 1277 struct ttm_bo_global *glob = bdev->glob;
c96e7c7a 1278 struct ttm_mem_type_manager *man;
ba4e7d97
TH
1279 int ret = -EINVAL;
1280
1281 if (mem_type >= TTM_NUM_MEM_TYPES) {
1282 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1283 return ret;
1284 }
c96e7c7a 1285 man = &bdev->man[mem_type];
ba4e7d97
TH
1286
1287 if (!man->has_type) {
1288 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1289 "memory manager type %u\n", mem_type);
1290 return ret;
1291 }
1292
1293 man->use_type = false;
1294 man->has_type = false;
1295
1296 ret = 0;
1297 if (mem_type > 0) {
ca262a99 1298 ttm_bo_force_list_clean(bdev, mem_type, false);
ba4e7d97 1299
a987fcaa 1300 spin_lock(&glob->lru_lock);
ba4e7d97
TH
1301 if (drm_mm_clean(&man->manager))
1302 drm_mm_takedown(&man->manager);
1303 else
1304 ret = -EBUSY;
1305
a987fcaa 1306 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
1307 }
1308
1309 return ret;
1310}
1311EXPORT_SYMBOL(ttm_bo_clean_mm);
1312
1313int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1314{
1315 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1316
1317 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1318 printk(KERN_ERR TTM_PFX
1319 "Illegal memory manager memory type %u.\n",
1320 mem_type);
1321 return -EINVAL;
1322 }
1323
1324 if (!man->has_type) {
1325 printk(KERN_ERR TTM_PFX
1326 "Memory type %u has not been initialized.\n",
1327 mem_type);
1328 return 0;
1329 }
1330
ca262a99 1331 return ttm_bo_force_list_clean(bdev, mem_type, true);
ba4e7d97
TH
1332}
1333EXPORT_SYMBOL(ttm_bo_evict_mm);
1334
1335int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
ca262a99 1336 unsigned long p_size)
ba4e7d97
TH
1337{
1338 int ret = -EINVAL;
1339 struct ttm_mem_type_manager *man;
1340
1341 if (type >= TTM_NUM_MEM_TYPES) {
1342 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
1343 return ret;
1344 }
1345
1346 man = &bdev->man[type];
1347 if (man->has_type) {
1348 printk(KERN_ERR TTM_PFX
1349 "Memory manager already initialized for type %d\n",
1350 type);
1351 return ret;
1352 }
1353
1354 ret = bdev->driver->init_mem_type(bdev, type, man);
1355 if (ret)
1356 return ret;
1357
1358 ret = 0;
1359 if (type != TTM_PL_SYSTEM) {
1360 if (!p_size) {
1361 printk(KERN_ERR TTM_PFX
1362 "Zero size memory manager type %d\n",
1363 type);
1364 return ret;
1365 }
ca262a99 1366 ret = drm_mm_init(&man->manager, 0, p_size);
ba4e7d97
TH
1367 if (ret)
1368 return ret;
1369 }
1370 man->has_type = true;
1371 man->use_type = true;
1372 man->size = p_size;
1373
1374 INIT_LIST_HEAD(&man->lru);
1375
1376 return 0;
1377}
1378EXPORT_SYMBOL(ttm_bo_init_mm);
1379
a987fcaa
TH
1380static void ttm_bo_global_kobj_release(struct kobject *kobj)
1381{
1382 struct ttm_bo_global *glob =
1383 container_of(kobj, struct ttm_bo_global, kobj);
1384
a987fcaa
TH
1385 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1386 __free_page(glob->dummy_read_page);
1387 kfree(glob);
1388}
1389
1390void ttm_bo_global_release(struct ttm_global_reference *ref)
1391{
1392 struct ttm_bo_global *glob = ref->object;
1393
1394 kobject_del(&glob->kobj);
1395 kobject_put(&glob->kobj);
1396}
1397EXPORT_SYMBOL(ttm_bo_global_release);
1398
1399int ttm_bo_global_init(struct ttm_global_reference *ref)
1400{
1401 struct ttm_bo_global_ref *bo_ref =
1402 container_of(ref, struct ttm_bo_global_ref, ref);
1403 struct ttm_bo_global *glob = ref->object;
1404 int ret;
1405
1406 mutex_init(&glob->device_list_mutex);
1407 spin_lock_init(&glob->lru_lock);
1408 glob->mem_glob = bo_ref->mem_glob;
1409 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1410
1411 if (unlikely(glob->dummy_read_page == NULL)) {
1412 ret = -ENOMEM;
1413 goto out_no_drp;
1414 }
1415
1416 INIT_LIST_HEAD(&glob->swap_lru);
1417 INIT_LIST_HEAD(&glob->device_list);
1418
1419 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1420 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1421 if (unlikely(ret != 0)) {
1422 printk(KERN_ERR TTM_PFX
1423 "Could not register buffer object swapout.\n");
1424 goto out_no_shrink;
1425 }
1426
1427 glob->ttm_bo_extra_size =
1428 ttm_round_pot(sizeof(struct ttm_tt)) +
1429 ttm_round_pot(sizeof(struct ttm_backend));
1430
1431 glob->ttm_bo_size = glob->ttm_bo_extra_size +
1432 ttm_round_pot(sizeof(struct ttm_buffer_object));
1433
1434 atomic_set(&glob->bo_count, 0);
1435
b642ed06
RD
1436 ret = kobject_init_and_add(
1437 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
a987fcaa
TH
1438 if (unlikely(ret != 0))
1439 kobject_put(&glob->kobj);
1440 return ret;
1441out_no_shrink:
1442 __free_page(glob->dummy_read_page);
1443out_no_drp:
1444 kfree(glob);
1445 return ret;
1446}
1447EXPORT_SYMBOL(ttm_bo_global_init);
1448
1449
ba4e7d97
TH
1450int ttm_bo_device_release(struct ttm_bo_device *bdev)
1451{
1452 int ret = 0;
1453 unsigned i = TTM_NUM_MEM_TYPES;
1454 struct ttm_mem_type_manager *man;
a987fcaa 1455 struct ttm_bo_global *glob = bdev->glob;
ba4e7d97
TH
1456
1457 while (i--) {
1458 man = &bdev->man[i];
1459 if (man->has_type) {
1460 man->use_type = false;
1461 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1462 ret = -EBUSY;
1463 printk(KERN_ERR TTM_PFX
1464 "DRM memory manager type %d "
1465 "is not clean.\n", i);
1466 }
1467 man->has_type = false;
1468 }
1469 }
1470
a987fcaa
TH
1471 mutex_lock(&glob->device_list_mutex);
1472 list_del(&bdev->device_list);
1473 mutex_unlock(&glob->device_list_mutex);
1474
ba4e7d97
TH
1475 if (!cancel_delayed_work(&bdev->wq))
1476 flush_scheduled_work();
1477
1478 while (ttm_bo_delayed_delete(bdev, true))
1479 ;
1480
a987fcaa 1481 spin_lock(&glob->lru_lock);
ba4e7d97
TH
1482 if (list_empty(&bdev->ddestroy))
1483 TTM_DEBUG("Delayed destroy list was clean\n");
1484
1485 if (list_empty(&bdev->man[0].lru))
1486 TTM_DEBUG("Swap list was clean\n");
a987fcaa 1487 spin_unlock(&glob->lru_lock);
ba4e7d97 1488
ba4e7d97
TH
1489 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1490 write_lock(&bdev->vm_lock);
1491 drm_mm_takedown(&bdev->addr_space_mm);
1492 write_unlock(&bdev->vm_lock);
1493
ba4e7d97
TH
1494 return ret;
1495}
1496EXPORT_SYMBOL(ttm_bo_device_release);
1497
ba4e7d97 1498int ttm_bo_device_init(struct ttm_bo_device *bdev,
a987fcaa
TH
1499 struct ttm_bo_global *glob,
1500 struct ttm_bo_driver *driver,
51c8b407 1501 uint64_t file_page_offset,
ad49f501 1502 bool need_dma32)
ba4e7d97
TH
1503{
1504 int ret = -EINVAL;
1505
ba4e7d97 1506 rwlock_init(&bdev->vm_lock);
ba4e7d97 1507 bdev->driver = driver;
ba4e7d97
TH
1508
1509 memset(bdev->man, 0, sizeof(bdev->man));
1510
ba4e7d97
TH
1511 /*
1512 * Initialize the system memory buffer type.
1513 * Other types need to be driver / IOCTL initialized.
1514 */
ca262a99 1515 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
ba4e7d97 1516 if (unlikely(ret != 0))
a987fcaa 1517 goto out_no_sys;
ba4e7d97
TH
1518
1519 bdev->addr_space_rb = RB_ROOT;
1520 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1521 if (unlikely(ret != 0))
a987fcaa 1522 goto out_no_addr_mm;
ba4e7d97
TH
1523
1524 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1525 bdev->nice_mode = true;
1526 INIT_LIST_HEAD(&bdev->ddestroy);
ba4e7d97 1527 bdev->dev_mapping = NULL;
a987fcaa 1528 bdev->glob = glob;
ad49f501 1529 bdev->need_dma32 = need_dma32;
ba4e7d97 1530
a987fcaa
TH
1531 mutex_lock(&glob->device_list_mutex);
1532 list_add_tail(&bdev->device_list, &glob->device_list);
1533 mutex_unlock(&glob->device_list_mutex);
ba4e7d97
TH
1534
1535 return 0;
a987fcaa 1536out_no_addr_mm:
ba4e7d97 1537 ttm_bo_clean_mm(bdev, 0);
a987fcaa 1538out_no_sys:
ba4e7d97
TH
1539 return ret;
1540}
1541EXPORT_SYMBOL(ttm_bo_device_init);
1542
1543/*
1544 * buffer object vm functions.
1545 */
1546
1547bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1548{
1549 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1550
1551 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1552 if (mem->mem_type == TTM_PL_SYSTEM)
1553 return false;
1554
1555 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1556 return false;
1557
1558 if (mem->placement & TTM_PL_FLAG_CACHED)
1559 return false;
1560 }
1561 return true;
1562}
1563
ba4e7d97
TH
1564void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1565{
1566 struct ttm_bo_device *bdev = bo->bdev;
1567 loff_t offset = (loff_t) bo->addr_space_offset;
1568 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1569
1570 if (!bdev->dev_mapping)
1571 return;
ba4e7d97 1572 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
82c5da6b 1573 ttm_mem_io_free(bdev, &bo->mem);
ba4e7d97 1574}
e024e110 1575EXPORT_SYMBOL(ttm_bo_unmap_virtual);
ba4e7d97
TH
1576
1577static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1578{
1579 struct ttm_bo_device *bdev = bo->bdev;
1580 struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1581 struct rb_node *parent = NULL;
1582 struct ttm_buffer_object *cur_bo;
1583 unsigned long offset = bo->vm_node->start;
1584 unsigned long cur_offset;
1585
1586 while (*cur) {
1587 parent = *cur;
1588 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1589 cur_offset = cur_bo->vm_node->start;
1590 if (offset < cur_offset)
1591 cur = &parent->rb_left;
1592 else if (offset > cur_offset)
1593 cur = &parent->rb_right;
1594 else
1595 BUG();
1596 }
1597
1598 rb_link_node(&bo->vm_rb, parent, cur);
1599 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1600}
1601
1602/**
1603 * ttm_bo_setup_vm:
1604 *
1605 * @bo: the buffer to allocate address space for
1606 *
1607 * Allocate address space in the drm device so that applications
1608 * can mmap the buffer and access the contents. This only
1609 * applies to ttm_bo_type_device objects as others are not
1610 * placed in the drm device address space.
1611 */
1612
1613static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1614{
1615 struct ttm_bo_device *bdev = bo->bdev;
1616 int ret;
1617
1618retry_pre_get:
1619 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1620 if (unlikely(ret != 0))
1621 return ret;
1622
1623 write_lock(&bdev->vm_lock);
1624 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1625 bo->mem.num_pages, 0, 0);
1626
1627 if (unlikely(bo->vm_node == NULL)) {
1628 ret = -ENOMEM;
1629 goto out_unlock;
1630 }
1631
1632 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1633 bo->mem.num_pages, 0);
1634
1635 if (unlikely(bo->vm_node == NULL)) {
1636 write_unlock(&bdev->vm_lock);
1637 goto retry_pre_get;
1638 }
1639
1640 ttm_bo_vm_insert_rb(bo);
1641 write_unlock(&bdev->vm_lock);
1642 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1643
1644 return 0;
1645out_unlock:
1646 write_unlock(&bdev->vm_lock);
1647 return ret;
1648}
1649
1650int ttm_bo_wait(struct ttm_buffer_object *bo,
1651 bool lazy, bool interruptible, bool no_wait)
1652{
1653 struct ttm_bo_driver *driver = bo->bdev->driver;
1654 void *sync_obj;
1655 void *sync_obj_arg;
1656 int ret = 0;
1657
1658 if (likely(bo->sync_obj == NULL))
1659 return 0;
1660
1661 while (bo->sync_obj) {
1662
1663 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1664 void *tmp_obj = bo->sync_obj;
1665 bo->sync_obj = NULL;
1666 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1667 spin_unlock(&bo->lock);
1668 driver->sync_obj_unref(&tmp_obj);
1669 spin_lock(&bo->lock);
1670 continue;
1671 }
1672
1673 if (no_wait)
1674 return -EBUSY;
1675
1676 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1677 sync_obj_arg = bo->sync_obj_arg;
1678 spin_unlock(&bo->lock);
1679 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1680 lazy, interruptible);
1681 if (unlikely(ret != 0)) {
1682 driver->sync_obj_unref(&sync_obj);
1683 spin_lock(&bo->lock);
1684 return ret;
1685 }
1686 spin_lock(&bo->lock);
1687 if (likely(bo->sync_obj == sync_obj &&
1688 bo->sync_obj_arg == sync_obj_arg)) {
1689 void *tmp_obj = bo->sync_obj;
1690 bo->sync_obj = NULL;
1691 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1692 &bo->priv_flags);
1693 spin_unlock(&bo->lock);
1694 driver->sync_obj_unref(&sync_obj);
1695 driver->sync_obj_unref(&tmp_obj);
1696 spin_lock(&bo->lock);
fee280d3
TH
1697 } else {
1698 spin_unlock(&bo->lock);
1699 driver->sync_obj_unref(&sync_obj);
1700 spin_lock(&bo->lock);
ba4e7d97
TH
1701 }
1702 }
1703 return 0;
1704}
1705EXPORT_SYMBOL(ttm_bo_wait);
1706
1707void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
1708{
1709 atomic_set(&bo->reserved, 0);
1710 wake_up_all(&bo->event_queue);
1711}
1712
1713int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
1714 bool no_wait)
1715{
1716 int ret;
1717
1718 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
1719 if (no_wait)
1720 return -EBUSY;
1721 else if (interruptible) {
1722 ret = wait_event_interruptible
1723 (bo->event_queue, atomic_read(&bo->reserved) == 0);
1724 if (unlikely(ret != 0))
98ffc415 1725 return ret;
ba4e7d97
TH
1726 } else {
1727 wait_event(bo->event_queue,
1728 atomic_read(&bo->reserved) == 0);
1729 }
1730 }
1731 return 0;
1732}
1733
1734int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1735{
1736 int ret = 0;
1737
1738 /*
1739 * Using ttm_bo_reserve instead of ttm_bo_block_reservation
1740 * makes sure the lru lists are updated.
1741 */
1742
1743 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1744 if (unlikely(ret != 0))
1745 return ret;
1746 spin_lock(&bo->lock);
1747 ret = ttm_bo_wait(bo, false, true, no_wait);
1748 spin_unlock(&bo->lock);
1749 if (likely(ret == 0))
1750 atomic_inc(&bo->cpu_writers);
1751 ttm_bo_unreserve(bo);
1752 return ret;
1753}
d1ede145 1754EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
ba4e7d97
TH
1755
1756void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1757{
1758 if (atomic_dec_and_test(&bo->cpu_writers))
1759 wake_up_all(&bo->event_queue);
1760}
d1ede145 1761EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
ba4e7d97
TH
1762
1763/**
1764 * A buffer object shrink method that tries to swap out the first
1765 * buffer object on the bo_global::swap_lru list.
1766 */
1767
1768static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1769{
a987fcaa
TH
1770 struct ttm_bo_global *glob =
1771 container_of(shrink, struct ttm_bo_global, shrink);
ba4e7d97
TH
1772 struct ttm_buffer_object *bo;
1773 int ret = -EBUSY;
1774 int put_count;
1775 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1776
a987fcaa 1777 spin_lock(&glob->lru_lock);
ba4e7d97 1778 while (ret == -EBUSY) {
a987fcaa
TH
1779 if (unlikely(list_empty(&glob->swap_lru))) {
1780 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
1781 return -EBUSY;
1782 }
1783
a987fcaa 1784 bo = list_first_entry(&glob->swap_lru,
ba4e7d97
TH
1785 struct ttm_buffer_object, swap);
1786 kref_get(&bo->list_kref);
1787
1788 /**
1789 * Reserve buffer. Since we unlock while sleeping, we need
1790 * to re-check that nobody removed us from the swap-list while
1791 * we slept.
1792 */
1793
1794 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1795 if (unlikely(ret == -EBUSY)) {
a987fcaa 1796 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
1797 ttm_bo_wait_unreserved(bo, false);
1798 kref_put(&bo->list_kref, ttm_bo_release_list);
a987fcaa 1799 spin_lock(&glob->lru_lock);
ba4e7d97
TH
1800 }
1801 }
1802
1803 BUG_ON(ret != 0);
1804 put_count = ttm_bo_del_from_lru(bo);
a987fcaa 1805 spin_unlock(&glob->lru_lock);
ba4e7d97
TH
1806
1807 while (put_count--)
1808 kref_put(&bo->list_kref, ttm_bo_ref_bug);
1809
1810 /**
1811 * Wait for GPU, then move to system cached.
1812 */
1813
1814 spin_lock(&bo->lock);
1815 ret = ttm_bo_wait(bo, false, false, false);
1816 spin_unlock(&bo->lock);
1817
1818 if (unlikely(ret != 0))
1819 goto out;
1820
1821 if ((bo->mem.placement & swap_placement) != swap_placement) {
1822 struct ttm_mem_reg evict_mem;
1823
1824 evict_mem = bo->mem;
1825 evict_mem.mm_node = NULL;
1826 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1827 evict_mem.mem_type = TTM_PL_SYSTEM;
1828
1829 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
9d87fa21 1830 false, false, false);
ba4e7d97
TH
1831 if (unlikely(ret != 0))
1832 goto out;
1833 }
1834
1835 ttm_bo_unmap_virtual(bo);
1836
1837 /**
1838 * Swap out. Buffer will be swapped in again as soon as
1839 * anyone tries to access a ttm page.
1840 */
1841
3f09ea4e
TH
1842 if (bo->bdev->driver->swap_notify)
1843 bo->bdev->driver->swap_notify(bo);
1844
ba4e7d97
TH
1845 ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
1846out:
1847
1848 /**
1849 *
1850 * Unreserve without putting on LRU to avoid swapping out an
1851 * already swapped buffer.
1852 */
1853
1854 atomic_set(&bo->reserved, 0);
1855 wake_up_all(&bo->event_queue);
1856 kref_put(&bo->list_kref, ttm_bo_release_list);
1857 return ret;
1858}
1859
1860void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1861{
a987fcaa 1862 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
ba4e7d97
TH
1863 ;
1864}
e99e1e78 1865EXPORT_SYMBOL(ttm_bo_swapout_all);
This page took 0.168666 seconds and 5 git commands to generate.