drm/amdgpu: cleanup bo list bucket handling
[deliverable/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_bo_list.c
CommitLineData
d38ceaf9
AD
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <deathsimple@vodafone.de>
29 */
30
31#include <drm/drmP.h>
32#include "amdgpu.h"
ec74407a 33#include "amdgpu_trace.h"
d38ceaf9 34
636ce25c
CK
35#define AMDGPU_BO_LIST_MAX_PRIORITY 32u
36#define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
37
d38ceaf9
AD
38static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv,
39 struct amdgpu_bo_list **result,
40 int *id)
41{
42 int r;
43
44 *result = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
45 if (!*result)
46 return -ENOMEM;
47
48 mutex_lock(&fpriv->bo_list_lock);
49 r = idr_alloc(&fpriv->bo_list_handles, *result,
decee87a 50 1, 0, GFP_KERNEL);
d38ceaf9
AD
51 if (r < 0) {
52 mutex_unlock(&fpriv->bo_list_lock);
53 kfree(*result);
54 return r;
55 }
56 *id = r;
57
58 mutex_init(&(*result)->lock);
59 (*result)->num_entries = 0;
60 (*result)->array = NULL;
61
62 mutex_lock(&(*result)->lock);
63 mutex_unlock(&fpriv->bo_list_lock);
64
65 return 0;
66}
67
68static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
69{
70 struct amdgpu_bo_list *list;
71
72 mutex_lock(&fpriv->bo_list_lock);
73 list = idr_find(&fpriv->bo_list_handles, id);
74 if (list) {
75 mutex_lock(&list->lock);
76 idr_remove(&fpriv->bo_list_handles, id);
77 mutex_unlock(&list->lock);
78 amdgpu_bo_list_free(list);
79 }
80 mutex_unlock(&fpriv->bo_list_lock);
81}
82
83static int amdgpu_bo_list_set(struct amdgpu_device *adev,
84 struct drm_file *filp,
85 struct amdgpu_bo_list *list,
86 struct drm_amdgpu_bo_list_entry *info,
87 unsigned num_entries)
88{
89 struct amdgpu_bo_list_entry *array;
90 struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
91 struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
92 struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
93
94 bool has_userptr = false;
95 unsigned i;
96
97 array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
98 if (!array)
99 return -ENOMEM;
100 memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
101
102 for (i = 0; i < num_entries; ++i) {
103 struct amdgpu_bo_list_entry *entry = &array[i];
104 struct drm_gem_object *gobj;
105
106 gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle);
107 if (!gobj)
108 goto error_free;
109
110 entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
111 drm_gem_object_unreference_unlocked(gobj);
636ce25c
CK
112 entry->priority = min(info[i].bo_priority,
113 AMDGPU_BO_LIST_MAX_PRIORITY);
d38ceaf9
AD
114 entry->prefered_domains = entry->robj->initial_domain;
115 entry->allowed_domains = entry->prefered_domains;
116 if (entry->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
117 entry->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
118 if (amdgpu_ttm_tt_has_userptr(entry->robj->tbo.ttm)) {
119 has_userptr = true;
120 entry->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
121 entry->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
122 }
123 entry->tv.bo = &entry->robj->tbo;
124 entry->tv.shared = true;
125
126 if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
127 gds_obj = entry->robj;
128 if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
129 gws_obj = entry->robj;
130 if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
131 oa_obj = entry->robj;
ec74407a
CK
132
133 trace_amdgpu_bo_list_set(list, entry->robj);
d38ceaf9
AD
134 }
135
136 for (i = 0; i < list->num_entries; ++i)
137 amdgpu_bo_unref(&list->array[i].robj);
138
139 drm_free_large(list->array);
140
141 list->gds_obj = gds_obj;
142 list->gws_obj = gws_obj;
143 list->oa_obj = oa_obj;
144 list->has_userptr = has_userptr;
145 list->array = array;
146 list->num_entries = num_entries;
147
148 return 0;
149
150error_free:
151 drm_free_large(array);
152 return -ENOENT;
153}
154
155struct amdgpu_bo_list *
156amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
157{
158 struct amdgpu_bo_list *result;
159
160 mutex_lock(&fpriv->bo_list_lock);
161 result = idr_find(&fpriv->bo_list_handles, id);
162 if (result)
163 mutex_lock(&result->lock);
164 mutex_unlock(&fpriv->bo_list_lock);
165 return result;
166}
167
636ce25c
CK
168void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
169 struct list_head *validated)
170{
171 /* This is based on the bucket sort with O(n) time complexity.
172 * An item with priority "i" is added to bucket[i]. The lists are then
173 * concatenated in descending order.
174 */
175 struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
176 unsigned i;
177
178 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
179 INIT_LIST_HEAD(&bucket[i]);
180
181 /* Since buffers which appear sooner in the relocation list are
182 * likely to be used more often than buffers which appear later
183 * in the list, the sort mustn't change the ordering of buffers
184 * with the same priority, i.e. it must be stable.
185 */
186 for (i = 0; i < list->num_entries; i++) {
187 unsigned priority = list->array[i].priority;
188
189 list_add_tail(&list->array[i].tv.head,
190 &bucket[priority]);
191 }
192
193 /* Connect the sorted buckets in the output list. */
194 for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
195 list_splice(&bucket[i], validated);
196}
197
d38ceaf9
AD
198void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
199{
200 mutex_unlock(&list->lock);
201}
202
203void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
204{
205 unsigned i;
206
207 for (i = 0; i < list->num_entries; ++i)
208 amdgpu_bo_unref(&list->array[i].robj);
209
210 mutex_destroy(&list->lock);
211 drm_free_large(list->array);
212 kfree(list);
213}
214
215int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
216 struct drm_file *filp)
217{
218 const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
219
220 struct amdgpu_device *adev = dev->dev_private;
221 struct amdgpu_fpriv *fpriv = filp->driver_priv;
222 union drm_amdgpu_bo_list *args = data;
223 uint32_t handle = args->in.list_handle;
224 const void __user *uptr = (const void*)(long)args->in.bo_info_ptr;
225
226 struct drm_amdgpu_bo_list_entry *info;
227 struct amdgpu_bo_list *list;
228
229 int r;
230
231 info = drm_malloc_ab(args->in.bo_number,
232 sizeof(struct drm_amdgpu_bo_list_entry));
233 if (!info)
234 return -ENOMEM;
235
236 /* copy the handle array from userspace to a kernel buffer */
237 r = -EFAULT;
238 if (likely(info_size == args->in.bo_info_size)) {
239 unsigned long bytes = args->in.bo_number *
240 args->in.bo_info_size;
241
242 if (copy_from_user(info, uptr, bytes))
243 goto error_free;
244
245 } else {
246 unsigned long bytes = min(args->in.bo_info_size, info_size);
247 unsigned i;
248
249 memset(info, 0, args->in.bo_number * info_size);
250 for (i = 0; i < args->in.bo_number; ++i) {
251 if (copy_from_user(&info[i], uptr, bytes))
252 goto error_free;
253
254 uptr += args->in.bo_info_size;
255 }
256 }
257
258 switch (args->in.operation) {
259 case AMDGPU_BO_LIST_OP_CREATE:
260 r = amdgpu_bo_list_create(fpriv, &list, &handle);
261 if (r)
262 goto error_free;
263
264 r = amdgpu_bo_list_set(adev, filp, list, info,
265 args->in.bo_number);
266 amdgpu_bo_list_put(list);
267 if (r)
268 goto error_free;
269
270 break;
271
272 case AMDGPU_BO_LIST_OP_DESTROY:
273 amdgpu_bo_list_destroy(fpriv, handle);
274 handle = 0;
275 break;
276
277 case AMDGPU_BO_LIST_OP_UPDATE:
278 r = -ENOENT;
279 list = amdgpu_bo_list_get(fpriv, handle);
280 if (!list)
281 goto error_free;
282
283 r = amdgpu_bo_list_set(adev, filp, list, info,
284 args->in.bo_number);
285 amdgpu_bo_list_put(list);
286 if (r)
287 goto error_free;
288
289 break;
290
291 default:
292 r = -EINVAL;
293 goto error_free;
294 }
295
296 memset(args, 0, sizeof(*args));
297 args->out.list_handle = handle;
298 drm_free_large(info);
299
300 return 0;
301
302error_free:
303 drm_free_large(info);
304 return r;
305}
This page took 0.112705 seconds and 5 git commands to generate.