Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Christian König <deathsimple@vodafone.de> | |
29 | */ | |
30 | ||
31 | #include <drm/drmP.h> | |
32 | #include "amdgpu.h" | |
ec74407a | 33 | #include "amdgpu_trace.h" |
d38ceaf9 | 34 | |
636ce25c CK |
35 | #define AMDGPU_BO_LIST_MAX_PRIORITY 32u |
36 | #define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1) | |
37 | ||
d38ceaf9 AD |
38 | static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv, |
39 | struct amdgpu_bo_list **result, | |
40 | int *id) | |
41 | { | |
42 | int r; | |
43 | ||
44 | *result = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL); | |
45 | if (!*result) | |
46 | return -ENOMEM; | |
47 | ||
48 | mutex_lock(&fpriv->bo_list_lock); | |
49 | r = idr_alloc(&fpriv->bo_list_handles, *result, | |
decee87a | 50 | 1, 0, GFP_KERNEL); |
d38ceaf9 AD |
51 | if (r < 0) { |
52 | mutex_unlock(&fpriv->bo_list_lock); | |
53 | kfree(*result); | |
54 | return r; | |
55 | } | |
56 | *id = r; | |
57 | ||
58 | mutex_init(&(*result)->lock); | |
59 | (*result)->num_entries = 0; | |
60 | (*result)->array = NULL; | |
61 | ||
62 | mutex_lock(&(*result)->lock); | |
63 | mutex_unlock(&fpriv->bo_list_lock); | |
64 | ||
65 | return 0; | |
66 | } | |
67 | ||
68 | static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id) | |
69 | { | |
70 | struct amdgpu_bo_list *list; | |
71 | ||
72 | mutex_lock(&fpriv->bo_list_lock); | |
73 | list = idr_find(&fpriv->bo_list_handles, id); | |
74 | if (list) { | |
75 | mutex_lock(&list->lock); | |
76 | idr_remove(&fpriv->bo_list_handles, id); | |
77 | mutex_unlock(&list->lock); | |
78 | amdgpu_bo_list_free(list); | |
79 | } | |
80 | mutex_unlock(&fpriv->bo_list_lock); | |
81 | } | |
82 | ||
83 | static int amdgpu_bo_list_set(struct amdgpu_device *adev, | |
84 | struct drm_file *filp, | |
85 | struct amdgpu_bo_list *list, | |
86 | struct drm_amdgpu_bo_list_entry *info, | |
87 | unsigned num_entries) | |
88 | { | |
89 | struct amdgpu_bo_list_entry *array; | |
90 | struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo; | |
91 | struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo; | |
92 | struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo; | |
93 | ||
211dff55 | 94 | unsigned last_entry = 0, first_userptr = num_entries; |
d38ceaf9 | 95 | unsigned i; |
cc325d19 | 96 | int r; |
d38ceaf9 AD |
97 | |
98 | array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry)); | |
99 | if (!array) | |
100 | return -ENOMEM; | |
101 | memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry)); | |
102 | ||
103 | for (i = 0; i < num_entries; ++i) { | |
211dff55 | 104 | struct amdgpu_bo_list_entry *entry; |
d38ceaf9 | 105 | struct drm_gem_object *gobj; |
211dff55 | 106 | struct amdgpu_bo *bo; |
cc325d19 | 107 | struct mm_struct *usermm; |
d38ceaf9 AD |
108 | |
109 | gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle); | |
cc325d19 CK |
110 | if (!gobj) { |
111 | r = -ENOENT; | |
d38ceaf9 | 112 | goto error_free; |
cc325d19 | 113 | } |
d38ceaf9 | 114 | |
211dff55 | 115 | bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); |
d38ceaf9 | 116 | drm_gem_object_unreference_unlocked(gobj); |
211dff55 CK |
117 | |
118 | usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); | |
cc325d19 CK |
119 | if (usermm) { |
120 | if (usermm != current->mm) { | |
211dff55 | 121 | amdgpu_bo_unref(&bo); |
cc325d19 CK |
122 | r = -EPERM; |
123 | goto error_free; | |
124 | } | |
211dff55 CK |
125 | entry = &array[--first_userptr]; |
126 | } else { | |
127 | entry = &array[last_entry++]; | |
cc325d19 | 128 | } |
211dff55 CK |
129 | |
130 | entry->robj = bo; | |
131 | entry->priority = min(info[i].bo_priority, | |
132 | AMDGPU_BO_LIST_MAX_PRIORITY); | |
d38ceaf9 AD |
133 | entry->tv.bo = &entry->robj->tbo; |
134 | entry->tv.shared = true; | |
135 | ||
1ea863fd | 136 | if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) |
d38ceaf9 | 137 | gds_obj = entry->robj; |
1ea863fd | 138 | if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GWS) |
d38ceaf9 | 139 | gws_obj = entry->robj; |
1ea863fd | 140 | if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA) |
d38ceaf9 | 141 | oa_obj = entry->robj; |
ec74407a CK |
142 | |
143 | trace_amdgpu_bo_list_set(list, entry->robj); | |
d38ceaf9 AD |
144 | } |
145 | ||
146 | for (i = 0; i < list->num_entries; ++i) | |
147 | amdgpu_bo_unref(&list->array[i].robj); | |
148 | ||
149 | drm_free_large(list->array); | |
150 | ||
151 | list->gds_obj = gds_obj; | |
152 | list->gws_obj = gws_obj; | |
153 | list->oa_obj = oa_obj; | |
211dff55 | 154 | list->first_userptr = first_userptr; |
d38ceaf9 AD |
155 | list->array = array; |
156 | list->num_entries = num_entries; | |
157 | ||
158 | return 0; | |
159 | ||
160 | error_free: | |
70eacc72 CK |
161 | while (i--) |
162 | amdgpu_bo_unref(&array[i].robj); | |
d38ceaf9 | 163 | drm_free_large(array); |
cc325d19 | 164 | return r; |
d38ceaf9 AD |
165 | } |
166 | ||
167 | struct amdgpu_bo_list * | |
168 | amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id) | |
169 | { | |
170 | struct amdgpu_bo_list *result; | |
171 | ||
172 | mutex_lock(&fpriv->bo_list_lock); | |
173 | result = idr_find(&fpriv->bo_list_handles, id); | |
174 | if (result) | |
175 | mutex_lock(&result->lock); | |
176 | mutex_unlock(&fpriv->bo_list_lock); | |
177 | return result; | |
178 | } | |
179 | ||
636ce25c CK |
180 | void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, |
181 | struct list_head *validated) | |
182 | { | |
183 | /* This is based on the bucket sort with O(n) time complexity. | |
184 | * An item with priority "i" is added to bucket[i]. The lists are then | |
185 | * concatenated in descending order. | |
186 | */ | |
187 | struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS]; | |
188 | unsigned i; | |
189 | ||
190 | for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++) | |
191 | INIT_LIST_HEAD(&bucket[i]); | |
192 | ||
193 | /* Since buffers which appear sooner in the relocation list are | |
194 | * likely to be used more often than buffers which appear later | |
195 | * in the list, the sort mustn't change the ordering of buffers | |
196 | * with the same priority, i.e. it must be stable. | |
197 | */ | |
198 | for (i = 0; i < list->num_entries; i++) { | |
199 | unsigned priority = list->array[i].priority; | |
200 | ||
201 | list_add_tail(&list->array[i].tv.head, | |
202 | &bucket[priority]); | |
203 | } | |
204 | ||
205 | /* Connect the sorted buckets in the output list. */ | |
206 | for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++) | |
207 | list_splice(&bucket[i], validated); | |
208 | } | |
209 | ||
d38ceaf9 AD |
210 | void amdgpu_bo_list_put(struct amdgpu_bo_list *list) |
211 | { | |
212 | mutex_unlock(&list->lock); | |
213 | } | |
214 | ||
215 | void amdgpu_bo_list_free(struct amdgpu_bo_list *list) | |
216 | { | |
217 | unsigned i; | |
218 | ||
219 | for (i = 0; i < list->num_entries; ++i) | |
220 | amdgpu_bo_unref(&list->array[i].robj); | |
221 | ||
222 | mutex_destroy(&list->lock); | |
223 | drm_free_large(list->array); | |
224 | kfree(list); | |
225 | } | |
226 | ||
227 | int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, | |
228 | struct drm_file *filp) | |
229 | { | |
230 | const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry); | |
231 | ||
232 | struct amdgpu_device *adev = dev->dev_private; | |
233 | struct amdgpu_fpriv *fpriv = filp->driver_priv; | |
234 | union drm_amdgpu_bo_list *args = data; | |
235 | uint32_t handle = args->in.list_handle; | |
236 | const void __user *uptr = (const void*)(long)args->in.bo_info_ptr; | |
237 | ||
238 | struct drm_amdgpu_bo_list_entry *info; | |
239 | struct amdgpu_bo_list *list; | |
240 | ||
241 | int r; | |
242 | ||
243 | info = drm_malloc_ab(args->in.bo_number, | |
244 | sizeof(struct drm_amdgpu_bo_list_entry)); | |
245 | if (!info) | |
246 | return -ENOMEM; | |
247 | ||
248 | /* copy the handle array from userspace to a kernel buffer */ | |
249 | r = -EFAULT; | |
250 | if (likely(info_size == args->in.bo_info_size)) { | |
251 | unsigned long bytes = args->in.bo_number * | |
252 | args->in.bo_info_size; | |
253 | ||
254 | if (copy_from_user(info, uptr, bytes)) | |
255 | goto error_free; | |
256 | ||
257 | } else { | |
258 | unsigned long bytes = min(args->in.bo_info_size, info_size); | |
259 | unsigned i; | |
260 | ||
261 | memset(info, 0, args->in.bo_number * info_size); | |
262 | for (i = 0; i < args->in.bo_number; ++i) { | |
263 | if (copy_from_user(&info[i], uptr, bytes)) | |
264 | goto error_free; | |
265 | ||
266 | uptr += args->in.bo_info_size; | |
267 | } | |
268 | } | |
269 | ||
270 | switch (args->in.operation) { | |
271 | case AMDGPU_BO_LIST_OP_CREATE: | |
272 | r = amdgpu_bo_list_create(fpriv, &list, &handle); | |
273 | if (r) | |
274 | goto error_free; | |
275 | ||
276 | r = amdgpu_bo_list_set(adev, filp, list, info, | |
277 | args->in.bo_number); | |
278 | amdgpu_bo_list_put(list); | |
279 | if (r) | |
280 | goto error_free; | |
281 | ||
282 | break; | |
283 | ||
284 | case AMDGPU_BO_LIST_OP_DESTROY: | |
285 | amdgpu_bo_list_destroy(fpriv, handle); | |
286 | handle = 0; | |
287 | break; | |
288 | ||
289 | case AMDGPU_BO_LIST_OP_UPDATE: | |
290 | r = -ENOENT; | |
291 | list = amdgpu_bo_list_get(fpriv, handle); | |
292 | if (!list) | |
293 | goto error_free; | |
294 | ||
295 | r = amdgpu_bo_list_set(adev, filp, list, info, | |
296 | args->in.bo_number); | |
297 | amdgpu_bo_list_put(list); | |
298 | if (r) | |
299 | goto error_free; | |
300 | ||
301 | break; | |
302 | ||
303 | default: | |
304 | r = -EINVAL; | |
305 | goto error_free; | |
306 | } | |
307 | ||
308 | memset(args, 0, sizeof(*args)); | |
309 | args->out.list_handle = handle; | |
310 | drm_free_large(info); | |
311 | ||
312 | return 0; | |
313 | ||
314 | error_free: | |
315 | drm_free_large(info); | |
316 | return r; | |
317 | } |