2 * Copyright 2015 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Christian König <deathsimple@vodafone.de>
33 #include "amdgpu_trace.h"
35 static int amdgpu_bo_list_create(struct amdgpu_fpriv
*fpriv
,
36 struct amdgpu_bo_list
**result
,
41 *result
= kzalloc(sizeof(struct amdgpu_bo_list
), GFP_KERNEL
);
45 mutex_lock(&fpriv
->bo_list_lock
);
46 r
= idr_alloc(&fpriv
->bo_list_handles
, *result
,
49 mutex_unlock(&fpriv
->bo_list_lock
);
55 mutex_init(&(*result
)->lock
);
56 (*result
)->num_entries
= 0;
57 (*result
)->array
= NULL
;
59 mutex_lock(&(*result
)->lock
);
60 mutex_unlock(&fpriv
->bo_list_lock
);
65 static void amdgpu_bo_list_destroy(struct amdgpu_fpriv
*fpriv
, int id
)
67 struct amdgpu_bo_list
*list
;
69 mutex_lock(&fpriv
->bo_list_lock
);
70 list
= idr_find(&fpriv
->bo_list_handles
, id
);
72 mutex_lock(&list
->lock
);
73 idr_remove(&fpriv
->bo_list_handles
, id
);
74 mutex_unlock(&list
->lock
);
75 amdgpu_bo_list_free(list
);
77 mutex_unlock(&fpriv
->bo_list_lock
);
80 static int amdgpu_bo_list_set(struct amdgpu_device
*adev
,
81 struct drm_file
*filp
,
82 struct amdgpu_bo_list
*list
,
83 struct drm_amdgpu_bo_list_entry
*info
,
86 struct amdgpu_bo_list_entry
*array
;
87 struct amdgpu_bo
*gds_obj
= adev
->gds
.gds_gfx_bo
;
88 struct amdgpu_bo
*gws_obj
= adev
->gds
.gws_gfx_bo
;
89 struct amdgpu_bo
*oa_obj
= adev
->gds
.oa_gfx_bo
;
91 bool has_userptr
= false;
94 array
= drm_malloc_ab(num_entries
, sizeof(struct amdgpu_bo_list_entry
));
97 memset(array
, 0, num_entries
* sizeof(struct amdgpu_bo_list_entry
));
99 for (i
= 0; i
< num_entries
; ++i
) {
100 struct amdgpu_bo_list_entry
*entry
= &array
[i
];
101 struct drm_gem_object
*gobj
;
103 gobj
= drm_gem_object_lookup(adev
->ddev
, filp
, info
[i
].bo_handle
);
107 entry
->robj
= amdgpu_bo_ref(gem_to_amdgpu_bo(gobj
));
108 drm_gem_object_unreference_unlocked(gobj
);
109 entry
->priority
= info
[i
].bo_priority
;
110 entry
->prefered_domains
= entry
->robj
->initial_domain
;
111 entry
->allowed_domains
= entry
->prefered_domains
;
112 if (entry
->allowed_domains
== AMDGPU_GEM_DOMAIN_VRAM
)
113 entry
->allowed_domains
|= AMDGPU_GEM_DOMAIN_GTT
;
114 if (amdgpu_ttm_tt_has_userptr(entry
->robj
->tbo
.ttm
)) {
116 entry
->prefered_domains
= AMDGPU_GEM_DOMAIN_GTT
;
117 entry
->allowed_domains
= AMDGPU_GEM_DOMAIN_GTT
;
119 entry
->tv
.bo
= &entry
->robj
->tbo
;
120 entry
->tv
.shared
= true;
122 if (entry
->prefered_domains
== AMDGPU_GEM_DOMAIN_GDS
)
123 gds_obj
= entry
->robj
;
124 if (entry
->prefered_domains
== AMDGPU_GEM_DOMAIN_GWS
)
125 gws_obj
= entry
->robj
;
126 if (entry
->prefered_domains
== AMDGPU_GEM_DOMAIN_OA
)
127 oa_obj
= entry
->robj
;
129 trace_amdgpu_bo_list_set(list
, entry
->robj
);
132 for (i
= 0; i
< list
->num_entries
; ++i
)
133 amdgpu_bo_unref(&list
->array
[i
].robj
);
135 drm_free_large(list
->array
);
137 list
->gds_obj
= gds_obj
;
138 list
->gws_obj
= gws_obj
;
139 list
->oa_obj
= oa_obj
;
140 list
->has_userptr
= has_userptr
;
142 list
->num_entries
= num_entries
;
147 drm_free_large(array
);
151 struct amdgpu_bo_list
*
152 amdgpu_bo_list_get(struct amdgpu_fpriv
*fpriv
, int id
)
154 struct amdgpu_bo_list
*result
;
156 mutex_lock(&fpriv
->bo_list_lock
);
157 result
= idr_find(&fpriv
->bo_list_handles
, id
);
159 mutex_lock(&result
->lock
);
160 mutex_unlock(&fpriv
->bo_list_lock
);
164 void amdgpu_bo_list_put(struct amdgpu_bo_list
*list
)
166 mutex_unlock(&list
->lock
);
169 void amdgpu_bo_list_copy(struct amdgpu_device
*adev
,
170 struct amdgpu_bo_list
*dst
,
171 struct amdgpu_bo_list
*src
)
173 struct amdgpu_bo_list_entry
*array
;
174 struct amdgpu_bo
*gds_obj
= adev
->gds
.gds_gfx_bo
;
175 struct amdgpu_bo
*gws_obj
= adev
->gds
.gws_gfx_bo
;
176 struct amdgpu_bo
*oa_obj
= adev
->gds
.oa_gfx_bo
;
178 bool has_userptr
= false;
181 array
= drm_calloc_large(src
->num_entries
, sizeof(struct amdgpu_bo_list_entry
));
184 memset(array
, 0, src
->num_entries
* sizeof(struct amdgpu_bo_list_entry
));
186 for (i
= 0; i
< src
->num_entries
; ++i
) {
187 memcpy(array
, src
->array
,
188 src
->num_entries
* sizeof(struct amdgpu_bo_list_entry
));
189 array
[i
].robj
= amdgpu_bo_ref(src
->array
[i
].robj
);
190 if (amdgpu_ttm_tt_has_userptr(array
[i
].robj
->tbo
.ttm
)) {
192 array
[i
].prefered_domains
= AMDGPU_GEM_DOMAIN_GTT
;
193 array
[i
].allowed_domains
= AMDGPU_GEM_DOMAIN_GTT
;
195 array
[i
].tv
.bo
= &array
[i
].robj
->tbo
;
196 array
[i
].tv
.shared
= true;
198 if (array
[i
].prefered_domains
== AMDGPU_GEM_DOMAIN_GDS
)
199 gds_obj
= array
[i
].robj
;
200 if (array
[i
].prefered_domains
== AMDGPU_GEM_DOMAIN_GWS
)
201 gws_obj
= array
[i
].robj
;
202 if (array
[i
].prefered_domains
== AMDGPU_GEM_DOMAIN_OA
)
203 oa_obj
= array
[i
].robj
;
206 for (i
= 0; i
< dst
->num_entries
; ++i
)
207 amdgpu_bo_unref(&dst
->array
[i
].robj
);
209 drm_free_large(dst
->array
);
211 dst
->gds_obj
= gds_obj
;
212 dst
->gws_obj
= gws_obj
;
213 dst
->oa_obj
= oa_obj
;
214 dst
->has_userptr
= has_userptr
;
216 dst
->num_entries
= src
->num_entries
;
219 void amdgpu_bo_list_free(struct amdgpu_bo_list
*list
)
223 for (i
= 0; i
< list
->num_entries
; ++i
)
224 amdgpu_bo_unref(&list
->array
[i
].robj
);
226 mutex_destroy(&list
->lock
);
227 drm_free_large(list
->array
);
231 int amdgpu_bo_list_ioctl(struct drm_device
*dev
, void *data
,
232 struct drm_file
*filp
)
234 const uint32_t info_size
= sizeof(struct drm_amdgpu_bo_list_entry
);
236 struct amdgpu_device
*adev
= dev
->dev_private
;
237 struct amdgpu_fpriv
*fpriv
= filp
->driver_priv
;
238 union drm_amdgpu_bo_list
*args
= data
;
239 uint32_t handle
= args
->in
.list_handle
;
240 const void __user
*uptr
= (const void*)(long)args
->in
.bo_info_ptr
;
242 struct drm_amdgpu_bo_list_entry
*info
;
243 struct amdgpu_bo_list
*list
;
247 info
= drm_malloc_ab(args
->in
.bo_number
,
248 sizeof(struct drm_amdgpu_bo_list_entry
));
252 /* copy the handle array from userspace to a kernel buffer */
254 if (likely(info_size
== args
->in
.bo_info_size
)) {
255 unsigned long bytes
= args
->in
.bo_number
*
256 args
->in
.bo_info_size
;
258 if (copy_from_user(info
, uptr
, bytes
))
262 unsigned long bytes
= min(args
->in
.bo_info_size
, info_size
);
265 memset(info
, 0, args
->in
.bo_number
* info_size
);
266 for (i
= 0; i
< args
->in
.bo_number
; ++i
) {
267 if (copy_from_user(&info
[i
], uptr
, bytes
))
270 uptr
+= args
->in
.bo_info_size
;
274 switch (args
->in
.operation
) {
275 case AMDGPU_BO_LIST_OP_CREATE
:
276 r
= amdgpu_bo_list_create(fpriv
, &list
, &handle
);
280 r
= amdgpu_bo_list_set(adev
, filp
, list
, info
,
282 amdgpu_bo_list_put(list
);
288 case AMDGPU_BO_LIST_OP_DESTROY
:
289 amdgpu_bo_list_destroy(fpriv
, handle
);
293 case AMDGPU_BO_LIST_OP_UPDATE
:
295 list
= amdgpu_bo_list_get(fpriv
, handle
);
299 r
= amdgpu_bo_list_set(adev
, filp
, list
, info
,
301 amdgpu_bo_list_put(list
);
312 memset(args
, 0, sizeof(*args
));
313 args
->out
.list_handle
= handle
;
314 drm_free_large(info
);
319 drm_free_large(info
);