drm/amdgpu: use scheduler user seq instead of previous user seq
authorChunming Zhou <david1.zhou@amd.com>
Tue, 21 Jul 2015 07:13:53 +0000 (15:13 +0800)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 17 Aug 2015 20:50:35 +0000 (16:50 -0400)
Signed-off-by: Chunming Zhou <david1.zhou@amd.com>
Acked-by: Christian K?nig <christian.koenig@amd.com>
Reviewed-by: Jammy Zhou <Jammy.Zhou@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c

index 557fb60f416b40bcd4d70735576f42bb6f7fac9f..b9be250cb206fbf84488092c954aabd80283bdab 100644 (file)
@@ -225,10 +225,16 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
                              struct fence *fence)
 {
        struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
-       uint64_t seq = cring->sequence;
-       unsigned idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
-       struct fence *other = cring->fences[idx];
+       uint64_t seq = 0;
+       unsigned idx = 0;
+       struct fence *other = NULL;
 
+       if (amdgpu_enable_scheduler)
+               seq = atomic64_read(&cring->c_entity.last_queued_v_seq);
+       else
+               seq = cring->sequence;
+       idx = seq % AMDGPU_CTX_MAX_CS_PENDING;
+       other = cring->fences[idx];
        if (other) {
                signed long r;
                r = fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
@@ -240,7 +246,8 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
 
        spin_lock(&ctx->ring_lock);
        cring->fences[idx] = fence;
-       cring->sequence++;
+       if (!amdgpu_enable_scheduler)
+               cring->sequence++;
        spin_unlock(&ctx->ring_lock);
 
        fence_put(other);
@@ -253,14 +260,21 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
 {
        struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
        struct fence *fence;
+       uint64_t queued_seq;
 
        spin_lock(&ctx->ring_lock);
-       if (seq >= cring->sequence) {
+       if (amdgpu_enable_scheduler)
+               queued_seq = atomic64_read(&cring->c_entity.last_queued_v_seq) + 1;
+       else
+               queued_seq = cring->sequence;
+
+       if (seq >= queued_seq) {
                spin_unlock(&ctx->ring_lock);
                return ERR_PTR(-EINVAL);
        }
 
-       if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) {
+
+       if (seq + AMDGPU_CTX_MAX_CS_PENDING < queued_seq) {
                spin_unlock(&ctx->ring_lock);
                return NULL;
        }
This page took 0.027915 seconds and 5 git commands to generate.