drm/amdgpu: export reservation_object from dmabuf to ttm (v2)
[deliverable/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_object.c
index 8da64245b31b924e85561cad158e80b159f639c6..f25cfed339271f13b620d819111c14fdd4db636f 100644 (file)
@@ -127,7 +127,7 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
                        placements[c].fpfn =
                                adev->mc.visible_vram_size >> PAGE_SHIFT;
                        placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
-                               TTM_PL_FLAG_VRAM;
+                               TTM_PL_FLAG_VRAM | TTM_PL_FLAG_TOPDOWN;
                }
                placements[c].fpfn = 0;
                placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
@@ -215,6 +215,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
                                bool kernel, u32 domain, u64 flags,
                                struct sg_table *sg,
                                struct ttm_placement *placement,
+                               struct reservation_object *resv,
                                struct amdgpu_bo **bo_ptr)
 {
        struct amdgpu_bo *bo;
@@ -223,18 +224,6 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
        size_t acc_size;
        int r;
 
-       /* VI has a hw bug where VM PTEs have to be allocated in groups of 8.
-        * do this as a temporary workaround
-        */
-       if (!(domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) {
-               if (adev->asic_type >= CHIP_TOPAZ) {
-                       if (byte_align & 0x7fff)
-                               byte_align = ALIGN(byte_align, 0x8000);
-                       if (size & 0x7fff)
-                               size = ALIGN(size, 0x8000);
-               }
-       }
-
        page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
        size = ALIGN(size, PAGE_SIZE);
 
@@ -273,7 +262,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
        /* Kernel allocation are uninterruptible */
        r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
                        &bo->placement, page_align, !kernel, NULL,
-                       acc_size, sg, NULL, &amdgpu_ttm_bo_destroy);
+                       acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
        if (unlikely(r != 0)) {
                return r;
        }
@@ -287,7 +276,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
 int amdgpu_bo_create(struct amdgpu_device *adev,
                     unsigned long size, int byte_align,
                     bool kernel, u32 domain, u64 flags,
-                    struct sg_table *sg, struct amdgpu_bo **bo_ptr)
+                    struct sg_table *sg,
+                    struct reservation_object *resv,
+                    struct amdgpu_bo **bo_ptr)
 {
        struct ttm_placement placement = {0};
        struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
@@ -298,11 +289,9 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
        amdgpu_ttm_placement_init(adev, &placement,
                                  placements, domain, flags);
 
-       return amdgpu_bo_create_restricted(adev, size, byte_align,
-                                          kernel, domain, flags,
-                                          sg,
-                                          &placement,
-                                          bo_ptr);
+       return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
+                                          domain, flags, sg, &placement,
+                                          resv, bo_ptr);
 }
 
 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
@@ -462,7 +451,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
 {
        /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
-       if (0 && (adev->flags & AMDGPU_IS_APU)) {
+       if (0 && (adev->flags & AMD_IS_APU)) {
                /* Useless to evict on IGP chips */
                return 0;
        }
@@ -478,7 +467,6 @@ void amdgpu_bo_force_delete(struct amdgpu_device *adev)
        }
        dev_err(adev->dev, "Userspace still has active objects !\n");
        list_for_each_entry_safe(bo, n, &adev->gem.objects, list) {
-               mutex_lock(&adev->ddev->struct_mutex);
                dev_err(adev->dev, "%p %p %lu %lu force free\n",
                        &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
                        *((unsigned long *)&bo->gem_base.refcount));
@@ -486,8 +474,7 @@ void amdgpu_bo_force_delete(struct amdgpu_device *adev)
                list_del_init(&bo->list);
                mutex_unlock(&bo->adev->gem.mutex);
                /* this should unref the ttm bo */
-               drm_gem_object_unreference(&bo->gem_base);
-               mutex_unlock(&adev->ddev->struct_mutex);
+               drm_gem_object_unreference_unlocked(&bo->gem_base);
        }
 }
 
@@ -658,13 +645,13 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
  * @shared: true if fence should be added shared
  *
  */
-void amdgpu_bo_fence(struct amdgpu_bo *bo, struct amdgpu_fence *fence,
+void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
                     bool shared)
 {
        struct reservation_object *resv = bo->tbo.resv;
 
        if (shared)
-               reservation_object_add_shared_fence(resv, &fence->base);
+               reservation_object_add_shared_fence(resv, fence);
        else
-               reservation_object_add_excl_fence(resv, &fence->base);
+               reservation_object_add_excl_fence(resv, fence);
 }
This page took 0.026595 seconds and 5 git commands to generate.