drm/amdgpu: add reference for **fence
authorChunming Zhou <david1.zhou@amd.com>
Wed, 12 Aug 2015 04:58:31 +0000 (12:58 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 17 Aug 2015 20:51:17 +0000 (16:51 -0400)
fix fence is released when pass to **fence sometimes.
add reference for it.

Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Christian K?nig <christian.koenig@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
drivers/gpu/drm/amd/scheduler/gpu_scheduler.c

index f428288d83631b8bd64b90ac15eba654137a6c87..8796938216d6002c7d07cd3bc36dfcdbe5c5e702 100644 (file)
@@ -136,6 +136,7 @@ static void amdgpu_job_work_func(struct work_struct *work)
                sched_job->free_job(sched_job);
        mutex_unlock(&sched_job->job_lock);
        /* after processing job, free memory */
+       fence_put(&sched_job->s_fence->base);
        kfree(sched_job);
 }
 struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
index d2e5f3b90a3c76b05cae70b2fd3fde46c4251ce4..a86e38158afaead697bfbc8b9b9cddb1ea9b77cf 100644 (file)
@@ -133,13 +133,13 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
                        return r;
                }
                ibs[num_ibs - 1].sequence = sched_job->s_fence->v_seq;
-               *f = &sched_job->s_fence->base;
+               *f = fence_get(&sched_job->s_fence->base);
                mutex_unlock(&sched_job->job_lock);
        } else {
                r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
                if (r)
                        return r;
-               *f = &ibs[num_ibs - 1].fence->base;
+               *f = fence_get(&ibs[num_ibs - 1].fence->base);
        }
        return 0;
 }
index e7336a95fe59bef088ab49b6b85ea41d649224a7..68369cf1e3185b022df0174fbc4e9398fa93e82f 100644 (file)
@@ -877,7 +877,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
        if (fence)
                *fence = fence_get(f);
        amdgpu_bo_unref(&bo);
-
+       fence_put(f);
        if (amdgpu_enable_scheduler)
                return 0;
 
index 38660eac67d695c80ca317ee39c4c70726fa8617..33ee6ae28f37b07cecaf8c25797c89c39099c745 100644 (file)
@@ -415,6 +415,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
                goto err;
        if (fence)
                *fence = fence_get(f);
+       fence_put(f);
        if (amdgpu_enable_scheduler)
                return 0;
 err:
@@ -481,6 +482,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
                goto err;
        if (fence)
                *fence = fence_get(f);
+       fence_put(f);
        if (amdgpu_enable_scheduler)
                return 0;
 err:
index b3f5d0484980eed771489c9456aea896b80df10b..de882b0db3507acafdc01a7377d1e6f838365c8d 100644 (file)
@@ -366,6 +366,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
                                                 &fence);
        if (!r)
                amdgpu_bo_fence(bo, fence, true);
+       fence_put(fence);
        if (amdgpu_enable_scheduler) {
                amdgpu_bo_unreserve(bo);
                return 0;
@@ -495,6 +496,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
                if (r)
                        goto error_free;
                amdgpu_bo_fence(pd, fence, true);
+               fence_put(fence);
        }
 
        if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
@@ -812,6 +814,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
                fence_put(*fence);
                *fence = fence_get(f);
        }
+       fence_put(f);
        if (!amdgpu_enable_scheduler) {
                amdgpu_ib_free(adev, ib);
                kfree(ib);
index c3ed5b22d7327217794a22c0f3b5dfa1801db127..2b4242b39b0a0d71dfefa53d95f7323f6dc2a633 100644 (file)
@@ -669,6 +669,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
        }
 
 err1:
+       fence_put(f);
        amdgpu_ib_free(adev, &ib);
 err0:
        amdgpu_wb_free(adev, index);
index ee1c47f9a2b60e2aa6fac2b7aab2a6cf42915d74..9b0cab41367731454f8f2ac9641c2f27069d95fb 100644 (file)
@@ -2698,6 +2698,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
        }
 
 err2:
+       fence_put(f);
        amdgpu_ib_free(adev, &ib);
 err1:
        amdgpu_gfx_scratch_free(adev, scratch);
index a865d96b67af3d5ba56227d2bf049c99fcfd17fc..4b68e6306f40cbeb8dfb8c4633f849d849cdc5bf 100644 (file)
@@ -659,6 +659,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
                r = -EINVAL;
        }
 err2:
+       fence_put(f);
        amdgpu_ib_free(adev, &ib);
 err1:
        amdgpu_gfx_scratch_free(adev, scratch);
index 6de7dc88d53ceaa6dfd3c8dc0167b40a4dc71ce6..9de8104eddeb20965d68834a623f55a5321aa9e4 100644 (file)
@@ -733,6 +733,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
        }
 
 err1:
+       fence_put(f);
        amdgpu_ib_free(adev, &ib);
 err0:
        amdgpu_wb_free(adev, index);
index 963a991fea00bfd262a76197007ec5a739edaf60..029f3455f9f9721cde9c04b05044781a51561dc6 100644 (file)
@@ -853,6 +853,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
                r = -EINVAL;
        }
 err1:
+       fence_put(f);
        amdgpu_ib_free(adev, &ib);
 err0:
        amdgpu_wb_free(adev, index);
index 39577f6c0241e95633e4927c3efab3c588d49dc5..5017c71ba700fc2ad2ca4b53dd5af545df6305e6 100644 (file)
@@ -313,6 +313,7 @@ int amd_sched_push_job(struct amd_gpu_scheduler *sched,
                kfree(job);
                return -EINVAL;
        }
+       fence_get(&(*fence)->base);
        job->s_fence = *fence;
        while (kfifo_in_spinlocked(&c_entity->job_queue, &job, sizeof(void *),
                                   &c_entity->queue_lock) != sizeof(void *)) {
This page took 0.052888 seconds and 5 git commands to generate.