2 * Copyright 2015 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Christian König <deathsimple@vodafone.de>
33 #include "amdgpu_trace.h"
35 #define AMDGPU_BO_LIST_MAX_PRIORITY 32u
36 #define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
38 static int amdgpu_bo_list_create(struct amdgpu_fpriv
*fpriv
,
39 struct amdgpu_bo_list
**result
,
44 *result
= kzalloc(sizeof(struct amdgpu_bo_list
), GFP_KERNEL
);
48 mutex_lock(&fpriv
->bo_list_lock
);
49 r
= idr_alloc(&fpriv
->bo_list_handles
, *result
,
52 mutex_unlock(&fpriv
->bo_list_lock
);
58 mutex_init(&(*result
)->lock
);
59 (*result
)->num_entries
= 0;
60 (*result
)->array
= NULL
;
62 mutex_lock(&(*result
)->lock
);
63 mutex_unlock(&fpriv
->bo_list_lock
);
68 static void amdgpu_bo_list_destroy(struct amdgpu_fpriv
*fpriv
, int id
)
70 struct amdgpu_bo_list
*list
;
72 mutex_lock(&fpriv
->bo_list_lock
);
73 list
= idr_find(&fpriv
->bo_list_handles
, id
);
75 mutex_lock(&list
->lock
);
76 idr_remove(&fpriv
->bo_list_handles
, id
);
77 mutex_unlock(&list
->lock
);
78 amdgpu_bo_list_free(list
);
80 mutex_unlock(&fpriv
->bo_list_lock
);
83 static int amdgpu_bo_list_set(struct amdgpu_device
*adev
,
84 struct drm_file
*filp
,
85 struct amdgpu_bo_list
*list
,
86 struct drm_amdgpu_bo_list_entry
*info
,
89 struct amdgpu_bo_list_entry
*array
;
90 struct amdgpu_bo
*gds_obj
= adev
->gds
.gds_gfx_bo
;
91 struct amdgpu_bo
*gws_obj
= adev
->gds
.gws_gfx_bo
;
92 struct amdgpu_bo
*oa_obj
= adev
->gds
.oa_gfx_bo
;
94 bool has_userptr
= false;
98 array
= drm_malloc_ab(num_entries
, sizeof(struct amdgpu_bo_list_entry
));
101 memset(array
, 0, num_entries
* sizeof(struct amdgpu_bo_list_entry
));
103 for (i
= 0; i
< num_entries
; ++i
) {
104 struct amdgpu_bo_list_entry
*entry
= &array
[i
];
105 struct drm_gem_object
*gobj
;
106 struct mm_struct
*usermm
;
108 gobj
= drm_gem_object_lookup(adev
->ddev
, filp
, info
[i
].bo_handle
);
114 entry
->robj
= amdgpu_bo_ref(gem_to_amdgpu_bo(gobj
));
115 drm_gem_object_unreference_unlocked(gobj
);
116 entry
->priority
= min(info
[i
].bo_priority
,
117 AMDGPU_BO_LIST_MAX_PRIORITY
);
118 usermm
= amdgpu_ttm_tt_get_usermm(entry
->robj
->tbo
.ttm
);
120 if (usermm
!= current
->mm
) {
126 entry
->tv
.bo
= &entry
->robj
->tbo
;
127 entry
->tv
.shared
= true;
129 if (entry
->robj
->prefered_domains
== AMDGPU_GEM_DOMAIN_GDS
)
130 gds_obj
= entry
->robj
;
131 if (entry
->robj
->prefered_domains
== AMDGPU_GEM_DOMAIN_GWS
)
132 gws_obj
= entry
->robj
;
133 if (entry
->robj
->prefered_domains
== AMDGPU_GEM_DOMAIN_OA
)
134 oa_obj
= entry
->robj
;
136 trace_amdgpu_bo_list_set(list
, entry
->robj
);
139 for (i
= 0; i
< list
->num_entries
; ++i
)
140 amdgpu_bo_unref(&list
->array
[i
].robj
);
142 drm_free_large(list
->array
);
144 list
->gds_obj
= gds_obj
;
145 list
->gws_obj
= gws_obj
;
146 list
->oa_obj
= oa_obj
;
147 list
->has_userptr
= has_userptr
;
149 list
->num_entries
= num_entries
;
154 drm_free_large(array
);
158 struct amdgpu_bo_list
*
159 amdgpu_bo_list_get(struct amdgpu_fpriv
*fpriv
, int id
)
161 struct amdgpu_bo_list
*result
;
163 mutex_lock(&fpriv
->bo_list_lock
);
164 result
= idr_find(&fpriv
->bo_list_handles
, id
);
166 mutex_lock(&result
->lock
);
167 mutex_unlock(&fpriv
->bo_list_lock
);
171 void amdgpu_bo_list_get_list(struct amdgpu_bo_list
*list
,
172 struct list_head
*validated
)
174 /* This is based on the bucket sort with O(n) time complexity.
175 * An item with priority "i" is added to bucket[i]. The lists are then
176 * concatenated in descending order.
178 struct list_head bucket
[AMDGPU_BO_LIST_NUM_BUCKETS
];
181 for (i
= 0; i
< AMDGPU_BO_LIST_NUM_BUCKETS
; i
++)
182 INIT_LIST_HEAD(&bucket
[i
]);
184 /* Since buffers which appear sooner in the relocation list are
185 * likely to be used more often than buffers which appear later
186 * in the list, the sort mustn't change the ordering of buffers
187 * with the same priority, i.e. it must be stable.
189 for (i
= 0; i
< list
->num_entries
; i
++) {
190 unsigned priority
= list
->array
[i
].priority
;
192 list_add_tail(&list
->array
[i
].tv
.head
,
196 /* Connect the sorted buckets in the output list. */
197 for (i
= 0; i
< AMDGPU_BO_LIST_NUM_BUCKETS
; i
++)
198 list_splice(&bucket
[i
], validated
);
201 void amdgpu_bo_list_put(struct amdgpu_bo_list
*list
)
203 mutex_unlock(&list
->lock
);
206 void amdgpu_bo_list_free(struct amdgpu_bo_list
*list
)
210 for (i
= 0; i
< list
->num_entries
; ++i
)
211 amdgpu_bo_unref(&list
->array
[i
].robj
);
213 mutex_destroy(&list
->lock
);
214 drm_free_large(list
->array
);
218 int amdgpu_bo_list_ioctl(struct drm_device
*dev
, void *data
,
219 struct drm_file
*filp
)
221 const uint32_t info_size
= sizeof(struct drm_amdgpu_bo_list_entry
);
223 struct amdgpu_device
*adev
= dev
->dev_private
;
224 struct amdgpu_fpriv
*fpriv
= filp
->driver_priv
;
225 union drm_amdgpu_bo_list
*args
= data
;
226 uint32_t handle
= args
->in
.list_handle
;
227 const void __user
*uptr
= (const void*)(long)args
->in
.bo_info_ptr
;
229 struct drm_amdgpu_bo_list_entry
*info
;
230 struct amdgpu_bo_list
*list
;
234 info
= drm_malloc_ab(args
->in
.bo_number
,
235 sizeof(struct drm_amdgpu_bo_list_entry
));
239 /* copy the handle array from userspace to a kernel buffer */
241 if (likely(info_size
== args
->in
.bo_info_size
)) {
242 unsigned long bytes
= args
->in
.bo_number
*
243 args
->in
.bo_info_size
;
245 if (copy_from_user(info
, uptr
, bytes
))
249 unsigned long bytes
= min(args
->in
.bo_info_size
, info_size
);
252 memset(info
, 0, args
->in
.bo_number
* info_size
);
253 for (i
= 0; i
< args
->in
.bo_number
; ++i
) {
254 if (copy_from_user(&info
[i
], uptr
, bytes
))
257 uptr
+= args
->in
.bo_info_size
;
261 switch (args
->in
.operation
) {
262 case AMDGPU_BO_LIST_OP_CREATE
:
263 r
= amdgpu_bo_list_create(fpriv
, &list
, &handle
);
267 r
= amdgpu_bo_list_set(adev
, filp
, list
, info
,
269 amdgpu_bo_list_put(list
);
275 case AMDGPU_BO_LIST_OP_DESTROY
:
276 amdgpu_bo_list_destroy(fpriv
, handle
);
280 case AMDGPU_BO_LIST_OP_UPDATE
:
282 list
= amdgpu_bo_list_get(fpriv
, handle
);
286 r
= amdgpu_bo_list_set(adev
, filp
, list
, info
,
288 amdgpu_bo_list_put(list
);
299 memset(args
, 0, sizeof(*args
));
300 args
->out
.list_handle
= handle
;
301 drm_free_large(info
);
306 drm_free_large(info
);