Merge branch 'for-linus-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nouveau_abi16.c
1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include <nvif/client.h>
25 #include <nvif/driver.h>
26 #include <nvif/ioctl.h>
27 #include <nvif/class.h>
28
29 #include "nouveau_drm.h"
30 #include "nouveau_dma.h"
31 #include "nouveau_gem.h"
32 #include "nouveau_chan.h"
33 #include "nouveau_abi16.h"
34
35 struct nouveau_abi16 *
36 nouveau_abi16_get(struct drm_file *file_priv, struct drm_device *dev)
37 {
38 struct nouveau_cli *cli = nouveau_cli(file_priv);
39 mutex_lock(&cli->mutex);
40 if (!cli->abi16) {
41 struct nouveau_abi16 *abi16;
42 cli->abi16 = abi16 = kzalloc(sizeof(*abi16), GFP_KERNEL);
43 if (cli->abi16) {
44 struct nv_device_v0 args = {
45 .device = ~0ULL,
46 };
47
48 INIT_LIST_HEAD(&abi16->channels);
49
50 /* allocate device object targeting client's default
51 * device (ie. the one that belongs to the fd it
52 * opened)
53 */
54 if (nvif_device_init(&cli->base.base, NULL,
55 NOUVEAU_ABI16_DEVICE, NV_DEVICE,
56 &args, sizeof(args),
57 &abi16->device) == 0)
58 return cli->abi16;
59
60 kfree(cli->abi16);
61 cli->abi16 = NULL;
62 }
63
64 mutex_unlock(&cli->mutex);
65 }
66 return cli->abi16;
67 }
68
69 int
70 nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
71 {
72 struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base);
73 mutex_unlock(&cli->mutex);
74 return ret;
75 }
76
77 u16
78 nouveau_abi16_swclass(struct nouveau_drm *drm)
79 {
80 switch (drm->device.info.family) {
81 case NV_DEVICE_INFO_V0_TNT:
82 return 0x006e;
83 case NV_DEVICE_INFO_V0_CELSIUS:
84 case NV_DEVICE_INFO_V0_KELVIN:
85 case NV_DEVICE_INFO_V0_RANKINE:
86 case NV_DEVICE_INFO_V0_CURIE:
87 return 0x016e;
88 case NV_DEVICE_INFO_V0_TESLA:
89 return 0x506e;
90 case NV_DEVICE_INFO_V0_FERMI:
91 case NV_DEVICE_INFO_V0_KEPLER:
92 case NV_DEVICE_INFO_V0_MAXWELL:
93 return 0x906e;
94 }
95
96 return 0x0000;
97 }
98
99 static void
100 nouveau_abi16_ntfy_fini(struct nouveau_abi16_chan *chan,
101 struct nouveau_abi16_ntfy *ntfy)
102 {
103 nvkm_mm_free(&chan->heap, &ntfy->node);
104 list_del(&ntfy->head);
105 kfree(ntfy);
106 }
107
108 static void
109 nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
110 struct nouveau_abi16_chan *chan)
111 {
112 struct nouveau_abi16_ntfy *ntfy, *temp;
113
114 /* wait for all activity to stop before releasing notify object, which
115 * may be still in use */
116 if (chan->chan && chan->ntfy)
117 nouveau_channel_idle(chan->chan);
118
119 /* cleanup notifier state */
120 list_for_each_entry_safe(ntfy, temp, &chan->notifiers, head) {
121 nouveau_abi16_ntfy_fini(chan, ntfy);
122 }
123
124 if (chan->ntfy) {
125 nouveau_bo_vma_del(chan->ntfy, &chan->ntfy_vma);
126 nouveau_bo_unpin(chan->ntfy);
127 drm_gem_object_unreference_unlocked(&chan->ntfy->gem);
128 }
129
130 if (chan->heap.block_size)
131 nvkm_mm_fini(&chan->heap);
132
133 /* destroy channel object, all children will be killed too */
134 if (chan->chan) {
135 abi16->handles &= ~(1ULL << (chan->chan->object->handle & 0xffff));
136 nouveau_channel_del(&chan->chan);
137 }
138
139 list_del(&chan->head);
140 kfree(chan);
141 }
142
143 void
144 nouveau_abi16_fini(struct nouveau_abi16 *abi16)
145 {
146 struct nouveau_cli *cli = (void *)nvif_client(&abi16->device.base);
147 struct nouveau_abi16_chan *chan, *temp;
148
149 /* cleanup channels */
150 list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
151 nouveau_abi16_chan_fini(abi16, chan);
152 }
153
154 /* destroy the device object */
155 nvif_device_fini(&abi16->device);
156
157 kfree(cli->abi16);
158 cli->abi16 = NULL;
159 }
160
161 int
162 nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
163 {
164 struct nouveau_cli *cli = nouveau_cli(file_priv);
165 struct nouveau_drm *drm = nouveau_drm(dev);
166 struct nvif_device *device = &drm->device;
167 struct nvkm_timer *ptimer = nvxx_timer(device);
168 struct nvkm_gr *gr = nvxx_gr(device);
169 struct drm_nouveau_getparam *getparam = data;
170
171 switch (getparam->param) {
172 case NOUVEAU_GETPARAM_CHIPSET_ID:
173 getparam->value = device->info.chipset;
174 break;
175 case NOUVEAU_GETPARAM_PCI_VENDOR:
176 if (nv_device_is_pci(nvxx_device(device)))
177 getparam->value = dev->pdev->vendor;
178 else
179 getparam->value = 0;
180 break;
181 case NOUVEAU_GETPARAM_PCI_DEVICE:
182 if (nv_device_is_pci(nvxx_device(device)))
183 getparam->value = dev->pdev->device;
184 else
185 getparam->value = 0;
186 break;
187 case NOUVEAU_GETPARAM_BUS_TYPE:
188 if (!nv_device_is_pci(nvxx_device(device)))
189 getparam->value = 3;
190 else
191 if (drm_pci_device_is_agp(dev))
192 getparam->value = 0;
193 else
194 if (!pci_is_pcie(dev->pdev))
195 getparam->value = 1;
196 else
197 getparam->value = 2;
198 break;
199 case NOUVEAU_GETPARAM_FB_SIZE:
200 getparam->value = drm->gem.vram_available;
201 break;
202 case NOUVEAU_GETPARAM_AGP_SIZE:
203 getparam->value = drm->gem.gart_available;
204 break;
205 case NOUVEAU_GETPARAM_VM_VRAM_BASE:
206 getparam->value = 0; /* deprecated */
207 break;
208 case NOUVEAU_GETPARAM_PTIMER_TIME:
209 getparam->value = ptimer->read(ptimer);
210 break;
211 case NOUVEAU_GETPARAM_HAS_BO_USAGE:
212 getparam->value = 1;
213 break;
214 case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
215 getparam->value = 1;
216 break;
217 case NOUVEAU_GETPARAM_GRAPH_UNITS:
218 getparam->value = gr->units ? gr->units(gr) : 0;
219 break;
220 default:
221 NV_PRINTK(debug, cli, "unknown parameter %lld\n", getparam->param);
222 return -EINVAL;
223 }
224
225 return 0;
226 }
227
228 int
229 nouveau_abi16_ioctl_setparam(ABI16_IOCTL_ARGS)
230 {
231 return -EINVAL;
232 }
233
234 int
235 nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
236 {
237 struct drm_nouveau_channel_alloc *init = data;
238 struct nouveau_cli *cli = nouveau_cli(file_priv);
239 struct nouveau_drm *drm = nouveau_drm(dev);
240 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
241 struct nouveau_abi16_chan *chan;
242 struct nvif_device *device;
243 int ret;
244
245 if (unlikely(!abi16))
246 return -ENOMEM;
247
248 if (!drm->channel)
249 return nouveau_abi16_put(abi16, -ENODEV);
250
251 device = &abi16->device;
252
253 /* hack to allow channel engine type specification on kepler */
254 if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
255 if (init->fb_ctxdma_handle != ~0)
256 init->fb_ctxdma_handle = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR;
257 else
258 init->fb_ctxdma_handle = init->tt_ctxdma_handle;
259
260 /* allow flips to be executed if this is a graphics channel */
261 init->tt_ctxdma_handle = 0;
262 if (init->fb_ctxdma_handle == KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR)
263 init->tt_ctxdma_handle = 1;
264 }
265
266 if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
267 return nouveau_abi16_put(abi16, -EINVAL);
268
269 /* allocate "abi16 channel" data and make up a handle for it */
270 init->channel = __ffs64(~abi16->handles);
271 if (~abi16->handles == 0)
272 return nouveau_abi16_put(abi16, -ENOSPC);
273
274 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
275 if (!chan)
276 return nouveau_abi16_put(abi16, -ENOMEM);
277
278 INIT_LIST_HEAD(&chan->notifiers);
279 list_add(&chan->head, &abi16->channels);
280 abi16->handles |= (1ULL << init->channel);
281
282 /* create channel object and initialise dma and fence management */
283 ret = nouveau_channel_new(drm, device,
284 NOUVEAU_ABI16_CHAN(init->channel),
285 init->fb_ctxdma_handle,
286 init->tt_ctxdma_handle, &chan->chan);
287 if (ret)
288 goto done;
289
290 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA)
291 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
292 NOUVEAU_GEM_DOMAIN_GART;
293 else
294 if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM)
295 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
296 else
297 init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
298
299 if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) {
300 init->subchan[0].handle = 0x00000000;
301 init->subchan[0].grclass = 0x0000;
302 init->subchan[1].handle = chan->chan->nvsw.handle;
303 init->subchan[1].grclass = 0x506e;
304 init->nr_subchan = 2;
305 }
306
307 /* Named memory object area */
308 ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
309 0, 0, &chan->ntfy);
310 if (ret == 0)
311 ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false);
312 if (ret)
313 goto done;
314
315 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
316 ret = nouveau_bo_vma_add(chan->ntfy, cli->vm,
317 &chan->ntfy_vma);
318 if (ret)
319 goto done;
320 }
321
322 ret = drm_gem_handle_create(file_priv, &chan->ntfy->gem,
323 &init->notifier_handle);
324 if (ret)
325 goto done;
326
327 ret = nvkm_mm_init(&chan->heap, 0, PAGE_SIZE, 1);
328 done:
329 if (ret)
330 nouveau_abi16_chan_fini(abi16, chan);
331 return nouveau_abi16_put(abi16, ret);
332 }
333
334 static struct nouveau_abi16_chan *
335 nouveau_abi16_chan(struct nouveau_abi16 *abi16, int channel)
336 {
337 struct nouveau_abi16_chan *chan;
338
339 list_for_each_entry(chan, &abi16->channels, head) {
340 if (chan->chan->object->handle == NOUVEAU_ABI16_CHAN(channel))
341 return chan;
342 }
343
344 return NULL;
345 }
346
347 int
348 nouveau_abi16_ioctl_channel_free(ABI16_IOCTL_ARGS)
349 {
350 struct drm_nouveau_channel_free *req = data;
351 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
352 struct nouveau_abi16_chan *chan;
353
354 if (unlikely(!abi16))
355 return -ENOMEM;
356
357 chan = nouveau_abi16_chan(abi16, req->channel);
358 if (!chan)
359 return nouveau_abi16_put(abi16, -ENOENT);
360 nouveau_abi16_chan_fini(abi16, chan);
361 return nouveau_abi16_put(abi16, 0);
362 }
363
364 int
365 nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
366 {
367 struct drm_nouveau_grobj_alloc *init = data;
368 struct {
369 struct nvif_ioctl_v0 ioctl;
370 struct nvif_ioctl_new_v0 new;
371 } args = {
372 .ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
373 .ioctl.type = NVIF_IOCTL_V0_NEW,
374 .ioctl.path_nr = 3,
375 .ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
376 .ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
377 .ioctl.path[0] = NOUVEAU_ABI16_CHAN(init->channel),
378 .new.route = NVDRM_OBJECT_ABI16,
379 .new.handle = init->handle,
380 .new.oclass = init->class,
381 };
382 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
383 struct nouveau_drm *drm = nouveau_drm(dev);
384 struct nvif_client *client;
385 int ret;
386
387 if (unlikely(!abi16))
388 return -ENOMEM;
389
390 if (init->handle == ~0)
391 return nouveau_abi16_put(abi16, -EINVAL);
392 client = nvif_client(nvif_object(&abi16->device));
393
394 /* compatibility with userspace that assumes 506e for all chipsets */
395 if (init->class == 0x506e) {
396 init->class = nouveau_abi16_swclass(drm);
397 if (init->class == 0x906e)
398 return nouveau_abi16_put(abi16, 0);
399 }
400
401 ret = nvif_client_ioctl(client, &args, sizeof(args));
402 return nouveau_abi16_put(abi16, ret);
403 }
404
405 int
406 nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
407 {
408 struct drm_nouveau_notifierobj_alloc *info = data;
409 struct {
410 struct nvif_ioctl_v0 ioctl;
411 struct nvif_ioctl_new_v0 new;
412 struct nv_dma_v0 ctxdma;
413 } args = {
414 .ioctl.owner = NVIF_IOCTL_V0_OWNER_ANY,
415 .ioctl.type = NVIF_IOCTL_V0_NEW,
416 .ioctl.path_nr = 3,
417 .ioctl.path[2] = NOUVEAU_ABI16_CLIENT,
418 .ioctl.path[1] = NOUVEAU_ABI16_DEVICE,
419 .ioctl.path[0] = NOUVEAU_ABI16_CHAN(info->channel),
420 .new.route = NVDRM_OBJECT_ABI16,
421 .new.handle = info->handle,
422 .new.oclass = NV_DMA_IN_MEMORY,
423 };
424 struct nouveau_drm *drm = nouveau_drm(dev);
425 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
426 struct nouveau_abi16_chan *chan;
427 struct nouveau_abi16_ntfy *ntfy;
428 struct nvif_device *device = &abi16->device;
429 struct nvif_client *client;
430 int ret;
431
432 if (unlikely(!abi16))
433 return -ENOMEM;
434
435 /* completely unnecessary for these chipsets... */
436 if (unlikely(device->info.family >= NV_DEVICE_INFO_V0_FERMI))
437 return nouveau_abi16_put(abi16, -EINVAL);
438 client = nvif_client(nvif_object(&abi16->device));
439
440 chan = nouveau_abi16_chan(abi16, info->channel);
441 if (!chan)
442 return nouveau_abi16_put(abi16, -ENOENT);
443
444 ntfy = kzalloc(sizeof(*ntfy), GFP_KERNEL);
445 if (!ntfy)
446 return nouveau_abi16_put(abi16, -ENOMEM);
447
448 list_add(&ntfy->head, &chan->notifiers);
449 ntfy->handle = info->handle;
450
451 ret = nvkm_mm_head(&chan->heap, 0, 1, info->size, info->size, 1,
452 &ntfy->node);
453 if (ret)
454 goto done;
455
456 args.ctxdma.start = ntfy->node->offset;
457 args.ctxdma.limit = ntfy->node->offset + ntfy->node->length - 1;
458 if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
459 args.ctxdma.target = NV_DMA_V0_TARGET_VM;
460 args.ctxdma.access = NV_DMA_V0_ACCESS_VM;
461 args.ctxdma.start += chan->ntfy_vma.offset;
462 args.ctxdma.limit += chan->ntfy_vma.offset;
463 } else
464 if (drm->agp.stat == ENABLED) {
465 args.ctxdma.target = NV_DMA_V0_TARGET_AGP;
466 args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
467 args.ctxdma.start += drm->agp.base + chan->ntfy->bo.offset;
468 args.ctxdma.limit += drm->agp.base + chan->ntfy->bo.offset;
469 client->super = true;
470 } else {
471 args.ctxdma.target = NV_DMA_V0_TARGET_VM;
472 args.ctxdma.access = NV_DMA_V0_ACCESS_RDWR;
473 args.ctxdma.start += chan->ntfy->bo.offset;
474 args.ctxdma.limit += chan->ntfy->bo.offset;
475 }
476
477 ret = nvif_client_ioctl(client, &args, sizeof(args));
478 client->super = false;
479 if (ret)
480 goto done;
481
482 info->offset = ntfy->node->offset;
483
484 done:
485 if (ret)
486 nouveau_abi16_ntfy_fini(chan, ntfy);
487 return nouveau_abi16_put(abi16, ret);
488 }
489
490 int
491 nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
492 {
493 struct drm_nouveau_gpuobj_free *fini = data;
494 struct {
495 struct nvif_ioctl_v0 ioctl;
496 struct nvif_ioctl_del del;
497 } args = {
498 .ioctl.owner = NVDRM_OBJECT_ABI16,
499 .ioctl.type = NVIF_IOCTL_V0_DEL,
500 .ioctl.path_nr = 4,
501 .ioctl.path[3] = NOUVEAU_ABI16_CLIENT,
502 .ioctl.path[2] = NOUVEAU_ABI16_DEVICE,
503 .ioctl.path[1] = NOUVEAU_ABI16_CHAN(fini->channel),
504 .ioctl.path[0] = fini->handle,
505 };
506 struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
507 struct nouveau_abi16_chan *chan;
508 struct nouveau_abi16_ntfy *ntfy;
509 struct nvif_client *client;
510 int ret;
511
512 if (unlikely(!abi16))
513 return -ENOMEM;
514
515 chan = nouveau_abi16_chan(abi16, fini->channel);
516 if (!chan)
517 return nouveau_abi16_put(abi16, -ENOENT);
518 client = nvif_client(nvif_object(&abi16->device));
519
520 /* synchronize with the user channel and destroy the gpu object */
521 nouveau_channel_idle(chan->chan);
522
523 ret = nvif_client_ioctl(client, &args, sizeof(args));
524 if (ret)
525 return nouveau_abi16_put(abi16, ret);
526
527 /* cleanup extra state if this object was a notifier */
528 list_for_each_entry(ntfy, &chan->notifiers, head) {
529 if (ntfy->handle == fini->handle) {
530 nvkm_mm_free(&chan->heap, &ntfy->node);
531 list_del(&ntfy->head);
532 break;
533 }
534 }
535
536 return nouveau_abi16_put(abi16, 0);
537 }
This page took 0.064025 seconds and 5 git commands to generate.