2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: monk liu <monk.liu@amd.com>
28 static int amdgpu_ctx_init(struct amdgpu_device
*adev
, struct amdgpu_ctx
*ctx
)
33 memset(ctx
, 0, sizeof(*ctx
));
35 kref_init(&ctx
->refcount
);
36 spin_lock_init(&ctx
->ring_lock
);
37 ctx
->fences
= kcalloc(amdgpu_sched_jobs
* AMDGPU_MAX_RINGS
,
38 sizeof(struct fence
*), GFP_KERNEL
);
42 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
43 ctx
->rings
[i
].sequence
= 1;
44 ctx
->rings
[i
].fences
= &ctx
->fences
[amdgpu_sched_jobs
* i
];
46 /* create context entity for each ring */
47 for (i
= 0; i
< adev
->num_rings
; i
++) {
48 struct amdgpu_ring
*ring
= adev
->rings
[i
];
49 struct amd_sched_rq
*rq
;
51 rq
= &ring
->sched
.sched_rq
[AMD_SCHED_PRIORITY_NORMAL
];
52 r
= amd_sched_entity_init(&ring
->sched
, &ctx
->rings
[i
].entity
,
53 rq
, amdgpu_sched_jobs
);
58 if (i
< adev
->num_rings
) {
59 for (j
= 0; j
< i
; j
++)
60 amd_sched_entity_fini(&adev
->rings
[j
]->sched
,
61 &ctx
->rings
[j
].entity
);
68 static void amdgpu_ctx_fini(struct amdgpu_ctx
*ctx
)
70 struct amdgpu_device
*adev
= ctx
->adev
;
76 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
)
77 for (j
= 0; j
< amdgpu_sched_jobs
; ++j
)
78 fence_put(ctx
->rings
[i
].fences
[j
]);
81 for (i
= 0; i
< adev
->num_rings
; i
++)
82 amd_sched_entity_fini(&adev
->rings
[i
]->sched
,
83 &ctx
->rings
[i
].entity
);
86 static int amdgpu_ctx_alloc(struct amdgpu_device
*adev
,
87 struct amdgpu_fpriv
*fpriv
,
90 struct amdgpu_ctx_mgr
*mgr
= &fpriv
->ctx_mgr
;
91 struct amdgpu_ctx
*ctx
;
94 ctx
= kmalloc(sizeof(*ctx
), GFP_KERNEL
);
98 mutex_lock(&mgr
->lock
);
99 r
= idr_alloc(&mgr
->ctx_handles
, ctx
, 1, 0, GFP_KERNEL
);
101 mutex_unlock(&mgr
->lock
);
106 r
= amdgpu_ctx_init(adev
, ctx
);
108 idr_remove(&mgr
->ctx_handles
, *id
);
112 mutex_unlock(&mgr
->lock
);
116 static void amdgpu_ctx_do_release(struct kref
*ref
)
118 struct amdgpu_ctx
*ctx
;
120 ctx
= container_of(ref
, struct amdgpu_ctx
, refcount
);
122 amdgpu_ctx_fini(ctx
);
127 static int amdgpu_ctx_free(struct amdgpu_fpriv
*fpriv
, uint32_t id
)
129 struct amdgpu_ctx_mgr
*mgr
= &fpriv
->ctx_mgr
;
130 struct amdgpu_ctx
*ctx
;
132 mutex_lock(&mgr
->lock
);
133 ctx
= idr_find(&mgr
->ctx_handles
, id
);
135 idr_remove(&mgr
->ctx_handles
, id
);
136 kref_put(&ctx
->refcount
, amdgpu_ctx_do_release
);
137 mutex_unlock(&mgr
->lock
);
140 mutex_unlock(&mgr
->lock
);
144 static int amdgpu_ctx_query(struct amdgpu_device
*adev
,
145 struct amdgpu_fpriv
*fpriv
, uint32_t id
,
146 union drm_amdgpu_ctx_out
*out
)
148 struct amdgpu_ctx
*ctx
;
149 struct amdgpu_ctx_mgr
*mgr
;
150 unsigned reset_counter
;
155 mgr
= &fpriv
->ctx_mgr
;
156 mutex_lock(&mgr
->lock
);
157 ctx
= idr_find(&mgr
->ctx_handles
, id
);
159 mutex_unlock(&mgr
->lock
);
163 /* TODO: these two are always zero */
164 out
->state
.flags
= 0x0;
165 out
->state
.hangs
= 0x0;
167 /* determine if a GPU reset has occured since the last call */
168 reset_counter
= atomic_read(&adev
->gpu_reset_counter
);
169 /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */
170 if (ctx
->reset_counter
== reset_counter
)
171 out
->state
.reset_status
= AMDGPU_CTX_NO_RESET
;
173 out
->state
.reset_status
= AMDGPU_CTX_UNKNOWN_RESET
;
174 ctx
->reset_counter
= reset_counter
;
176 mutex_unlock(&mgr
->lock
);
180 int amdgpu_ctx_ioctl(struct drm_device
*dev
, void *data
,
181 struct drm_file
*filp
)
186 union drm_amdgpu_ctx
*args
= data
;
187 struct amdgpu_device
*adev
= dev
->dev_private
;
188 struct amdgpu_fpriv
*fpriv
= filp
->driver_priv
;
191 id
= args
->in
.ctx_id
;
193 switch (args
->in
.op
) {
194 case AMDGPU_CTX_OP_ALLOC_CTX
:
195 r
= amdgpu_ctx_alloc(adev
, fpriv
, &id
);
196 args
->out
.alloc
.ctx_id
= id
;
198 case AMDGPU_CTX_OP_FREE_CTX
:
199 r
= amdgpu_ctx_free(fpriv
, id
);
201 case AMDGPU_CTX_OP_QUERY_STATE
:
202 r
= amdgpu_ctx_query(adev
, fpriv
, id
, &args
->out
);
211 struct amdgpu_ctx
*amdgpu_ctx_get(struct amdgpu_fpriv
*fpriv
, uint32_t id
)
213 struct amdgpu_ctx
*ctx
;
214 struct amdgpu_ctx_mgr
*mgr
;
219 mgr
= &fpriv
->ctx_mgr
;
221 mutex_lock(&mgr
->lock
);
222 ctx
= idr_find(&mgr
->ctx_handles
, id
);
224 kref_get(&ctx
->refcount
);
225 mutex_unlock(&mgr
->lock
);
229 int amdgpu_ctx_put(struct amdgpu_ctx
*ctx
)
234 kref_put(&ctx
->refcount
, amdgpu_ctx_do_release
);
238 uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx
*ctx
, struct amdgpu_ring
*ring
,
241 struct amdgpu_ctx_ring
*cring
= & ctx
->rings
[ring
->idx
];
242 uint64_t seq
= cring
->sequence
;
244 struct fence
*other
= NULL
;
246 idx
= seq
& (amdgpu_sched_jobs
- 1);
247 other
= cring
->fences
[idx
];
250 r
= fence_wait_timeout(other
, false, MAX_SCHEDULE_TIMEOUT
);
252 DRM_ERROR("Error (%ld) waiting for fence!\n", r
);
257 spin_lock(&ctx
->ring_lock
);
258 cring
->fences
[idx
] = fence
;
260 spin_unlock(&ctx
->ring_lock
);
267 struct fence
*amdgpu_ctx_get_fence(struct amdgpu_ctx
*ctx
,
268 struct amdgpu_ring
*ring
, uint64_t seq
)
270 struct amdgpu_ctx_ring
*cring
= & ctx
->rings
[ring
->idx
];
273 spin_lock(&ctx
->ring_lock
);
275 if (seq
>= cring
->sequence
) {
276 spin_unlock(&ctx
->ring_lock
);
277 return ERR_PTR(-EINVAL
);
281 if (seq
+ amdgpu_sched_jobs
< cring
->sequence
) {
282 spin_unlock(&ctx
->ring_lock
);
286 fence
= fence_get(cring
->fences
[seq
& (amdgpu_sched_jobs
- 1)]);
287 spin_unlock(&ctx
->ring_lock
);
292 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr
*mgr
)
294 mutex_init(&mgr
->lock
);
295 idr_init(&mgr
->ctx_handles
);
298 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr
*mgr
)
300 struct amdgpu_ctx
*ctx
;
304 idp
= &mgr
->ctx_handles
;
306 idr_for_each_entry(idp
, ctx
, id
) {
307 if (kref_put(&ctx
->refcount
, amdgpu_ctx_do_release
) != 1)
308 DRM_ERROR("ctx %p is still alive\n", ctx
);
311 idr_destroy(&mgr
->ctx_handles
);
312 mutex_destroy(&mgr
->lock
);