drm/nouveau/gpuobj: separate allocation from nvkm_object
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nvkm / engine / fifo / g84.c
1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "nv50.h"
25 #include "nv04.h"
26
27 #include <core/client.h>
28 #include <core/engctx.h>
29 #include <core/ramht.h>
30 #include <subdev/mmu.h>
31 #include <subdev/timer.h>
32
33 #include <nvif/class.h>
34 #include <nvif/unpack.h>
35
36 /*******************************************************************************
37 * FIFO channel objects
38 ******************************************************************************/
39
40 static int
41 g84_fifo_context_attach(struct nvkm_object *parent, struct nvkm_object *object)
42 {
43 struct nv50_fifo_base *base = (void *)parent->parent;
44 struct nvkm_gpuobj *ectx = (void *)object;
45 u64 limit = ectx->addr + ectx->size - 1;
46 u64 start = ectx->addr;
47 u32 addr;
48
49 switch (nv_engidx(object->engine)) {
50 case NVDEV_ENGINE_SW : return 0;
51 case NVDEV_ENGINE_GR : addr = 0x0020; break;
52 case NVDEV_ENGINE_VP :
53 case NVDEV_ENGINE_MSPDEC: addr = 0x0040; break;
54 case NVDEV_ENGINE_MSPPP :
55 case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
56 case NVDEV_ENGINE_BSP :
57 case NVDEV_ENGINE_MSVLD : addr = 0x0080; break;
58 case NVDEV_ENGINE_CIPHER:
59 case NVDEV_ENGINE_SEC : addr = 0x00a0; break;
60 case NVDEV_ENGINE_CE0 : addr = 0x00c0; break;
61 default:
62 return -EINVAL;
63 }
64
65 nv_engctx(ectx)->addr = nv_gpuobj(base)->addr >> 12;
66 nvkm_kmap(base->eng);
67 nvkm_wo32(base->eng, addr + 0x00, 0x00190000);
68 nvkm_wo32(base->eng, addr + 0x04, lower_32_bits(limit));
69 nvkm_wo32(base->eng, addr + 0x08, lower_32_bits(start));
70 nvkm_wo32(base->eng, addr + 0x0c, upper_32_bits(limit) << 24 |
71 upper_32_bits(start));
72 nvkm_wo32(base->eng, addr + 0x10, 0x00000000);
73 nvkm_wo32(base->eng, addr + 0x14, 0x00000000);
74 nvkm_done(base->eng);
75 return 0;
76 }
77
78 static int
79 g84_fifo_context_detach(struct nvkm_object *parent, bool suspend,
80 struct nvkm_object *object)
81 {
82 struct nv50_fifo *fifo = (void *)parent->engine;
83 struct nv50_fifo_base *base = (void *)parent->parent;
84 struct nv50_fifo_chan *chan = (void *)parent;
85 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
86 struct nvkm_device *device = subdev->device;
87 u32 addr, save, engn;
88 bool done;
89
90 switch (nv_engidx(object->engine)) {
91 case NVDEV_ENGINE_SW : return 0;
92 case NVDEV_ENGINE_GR : engn = 0; addr = 0x0020; break;
93 case NVDEV_ENGINE_VP :
94 case NVDEV_ENGINE_MSPDEC: engn = 3; addr = 0x0040; break;
95 case NVDEV_ENGINE_MSPPP :
96 case NVDEV_ENGINE_MPEG : engn = 1; addr = 0x0060; break;
97 case NVDEV_ENGINE_BSP :
98 case NVDEV_ENGINE_MSVLD : engn = 5; addr = 0x0080; break;
99 case NVDEV_ENGINE_CIPHER:
100 case NVDEV_ENGINE_SEC : engn = 4; addr = 0x00a0; break;
101 case NVDEV_ENGINE_CE0 : engn = 2; addr = 0x00c0; break;
102 default:
103 return -EINVAL;
104 }
105
106 save = nvkm_mask(device, 0x002520, 0x0000003f, 1 << engn);
107 nvkm_wr32(device, 0x0032fc, nv_gpuobj(base)->addr >> 12);
108 done = nvkm_msec(device, 2000,
109 if (nvkm_rd32(device, 0x0032fc) != 0xffffffff)
110 break;
111 ) >= 0;
112 nvkm_wr32(device, 0x002520, save);
113 if (!done) {
114 nvkm_error(subdev, "channel %d [%s] unload timeout\n",
115 chan->base.chid, nvkm_client_name(chan));
116 if (suspend)
117 return -EBUSY;
118 }
119
120 nvkm_kmap(base->eng);
121 nvkm_wo32(base->eng, addr + 0x00, 0x00000000);
122 nvkm_wo32(base->eng, addr + 0x04, 0x00000000);
123 nvkm_wo32(base->eng, addr + 0x08, 0x00000000);
124 nvkm_wo32(base->eng, addr + 0x0c, 0x00000000);
125 nvkm_wo32(base->eng, addr + 0x10, 0x00000000);
126 nvkm_wo32(base->eng, addr + 0x14, 0x00000000);
127 nvkm_done(base->eng);
128 return 0;
129 }
130
131 static int
132 g84_fifo_object_attach(struct nvkm_object *parent,
133 struct nvkm_object *object, u32 handle)
134 {
135 struct nv50_fifo_chan *chan = (void *)parent;
136 u32 context;
137
138 if (nv_iclass(object, NV_GPUOBJ_CLASS))
139 context = nv_gpuobj(object)->node->offset >> 4;
140 else
141 context = 0x00000004; /* just non-zero */
142
143 if (object->engine) {
144 switch (nv_engidx(object->engine)) {
145 case NVDEV_ENGINE_DMAOBJ:
146 case NVDEV_ENGINE_SW : context |= 0x00000000; break;
147 case NVDEV_ENGINE_GR : context |= 0x00100000; break;
148 case NVDEV_ENGINE_MPEG :
149 case NVDEV_ENGINE_MSPPP : context |= 0x00200000; break;
150 case NVDEV_ENGINE_ME :
151 case NVDEV_ENGINE_CE0 : context |= 0x00300000; break;
152 case NVDEV_ENGINE_VP :
153 case NVDEV_ENGINE_MSPDEC: context |= 0x00400000; break;
154 case NVDEV_ENGINE_CIPHER:
155 case NVDEV_ENGINE_SEC :
156 case NVDEV_ENGINE_VIC : context |= 0x00500000; break;
157 case NVDEV_ENGINE_BSP :
158 case NVDEV_ENGINE_MSVLD : context |= 0x00600000; break;
159 default:
160 return -EINVAL;
161 }
162 }
163
164 return nvkm_ramht_insert(chan->ramht, 0, handle, context);
165 }
166
167 static int
168 g84_fifo_chan_ctor_dma(struct nvkm_object *parent, struct nvkm_object *engine,
169 struct nvkm_oclass *oclass, void *data, u32 size,
170 struct nvkm_object **pobject)
171 {
172 union {
173 struct nv50_channel_dma_v0 v0;
174 } *args = data;
175 struct nv50_fifo_base *base = (void *)parent;
176 struct nv50_fifo_chan *chan;
177 int ret;
178
179 nvif_ioctl(parent, "create channel dma size %d\n", size);
180 if (nvif_unpack(args->v0, 0, 0, false)) {
181 nvif_ioctl(parent, "create channel dma vers %d pushbuf %llx "
182 "offset %016llx\n", args->v0.version,
183 args->v0.pushbuf, args->v0.offset);
184 if (args->v0.vm)
185 return -ENOENT;
186 } else
187 return ret;
188
189 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
190 0x2000, args->v0.pushbuf,
191 (1ULL << NVDEV_ENGINE_DMAOBJ) |
192 (1ULL << NVDEV_ENGINE_SW) |
193 (1ULL << NVDEV_ENGINE_GR) |
194 (1ULL << NVDEV_ENGINE_MPEG) |
195 (1ULL << NVDEV_ENGINE_ME) |
196 (1ULL << NVDEV_ENGINE_VP) |
197 (1ULL << NVDEV_ENGINE_CIPHER) |
198 (1ULL << NVDEV_ENGINE_SEC) |
199 (1ULL << NVDEV_ENGINE_BSP) |
200 (1ULL << NVDEV_ENGINE_MSVLD) |
201 (1ULL << NVDEV_ENGINE_MSPDEC) |
202 (1ULL << NVDEV_ENGINE_MSPPP) |
203 (1ULL << NVDEV_ENGINE_CE0) |
204 (1ULL << NVDEV_ENGINE_VIC), &chan);
205 *pobject = nv_object(chan);
206 if (ret)
207 return ret;
208
209 args->v0.chid = chan->base.chid;
210
211 ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
212 &chan->ramht);
213 if (ret)
214 return ret;
215
216 nv_parent(chan)->context_attach = g84_fifo_context_attach;
217 nv_parent(chan)->context_detach = g84_fifo_context_detach;
218 nv_parent(chan)->object_attach = g84_fifo_object_attach;
219 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
220
221 nvkm_kmap(base->ramfc);
222 nvkm_wo32(base->ramfc, 0x08, lower_32_bits(args->v0.offset));
223 nvkm_wo32(base->ramfc, 0x0c, upper_32_bits(args->v0.offset));
224 nvkm_wo32(base->ramfc, 0x10, lower_32_bits(args->v0.offset));
225 nvkm_wo32(base->ramfc, 0x14, upper_32_bits(args->v0.offset));
226 nvkm_wo32(base->ramfc, 0x3c, 0x003f6078);
227 nvkm_wo32(base->ramfc, 0x44, 0x01003fff);
228 nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
229 nvkm_wo32(base->ramfc, 0x4c, 0xffffffff);
230 nvkm_wo32(base->ramfc, 0x60, 0x7fffffff);
231 nvkm_wo32(base->ramfc, 0x78, 0x00000000);
232 nvkm_wo32(base->ramfc, 0x7c, 0x30000001);
233 nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
234 (4 << 24) /* SEARCH_FULL */ |
235 (chan->ramht->gpuobj.node->offset >> 4));
236 nvkm_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
237 nvkm_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
238 nvkm_done(base->ramfc);
239 return 0;
240 }
241
242 static int
243 g84_fifo_chan_ctor_ind(struct nvkm_object *parent, struct nvkm_object *engine,
244 struct nvkm_oclass *oclass, void *data, u32 size,
245 struct nvkm_object **pobject)
246 {
247 union {
248 struct nv50_channel_gpfifo_v0 v0;
249 } *args = data;
250 struct nv50_fifo_base *base = (void *)parent;
251 struct nv50_fifo_chan *chan;
252 u64 ioffset, ilength;
253 int ret;
254
255 nvif_ioctl(parent, "create channel gpfifo size %d\n", size);
256 if (nvif_unpack(args->v0, 0, 0, false)) {
257 nvif_ioctl(parent, "create channel gpfifo vers %d pushbuf %llx "
258 "ioffset %016llx ilength %08x\n",
259 args->v0.version, args->v0.pushbuf, args->v0.ioffset,
260 args->v0.ilength);
261 if (args->v0.vm)
262 return -ENOENT;
263 } else
264 return ret;
265
266 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0xc00000,
267 0x2000, args->v0.pushbuf,
268 (1ULL << NVDEV_ENGINE_DMAOBJ) |
269 (1ULL << NVDEV_ENGINE_SW) |
270 (1ULL << NVDEV_ENGINE_GR) |
271 (1ULL << NVDEV_ENGINE_MPEG) |
272 (1ULL << NVDEV_ENGINE_ME) |
273 (1ULL << NVDEV_ENGINE_VP) |
274 (1ULL << NVDEV_ENGINE_CIPHER) |
275 (1ULL << NVDEV_ENGINE_SEC) |
276 (1ULL << NVDEV_ENGINE_BSP) |
277 (1ULL << NVDEV_ENGINE_MSVLD) |
278 (1ULL << NVDEV_ENGINE_MSPDEC) |
279 (1ULL << NVDEV_ENGINE_MSPPP) |
280 (1ULL << NVDEV_ENGINE_CE0) |
281 (1ULL << NVDEV_ENGINE_VIC), &chan);
282 *pobject = nv_object(chan);
283 if (ret)
284 return ret;
285
286 args->v0.chid = chan->base.chid;
287
288 ret = nvkm_ramht_new(nv_object(chan), nv_object(chan), 0x8000, 16,
289 &chan->ramht);
290 if (ret)
291 return ret;
292
293 nv_parent(chan)->context_attach = g84_fifo_context_attach;
294 nv_parent(chan)->context_detach = g84_fifo_context_detach;
295 nv_parent(chan)->object_attach = g84_fifo_object_attach;
296 nv_parent(chan)->object_detach = nv50_fifo_object_detach;
297
298 ioffset = args->v0.ioffset;
299 ilength = order_base_2(args->v0.ilength / 8);
300
301 nvkm_kmap(base->ramfc);
302 nvkm_wo32(base->ramfc, 0x3c, 0x403f6078);
303 nvkm_wo32(base->ramfc, 0x44, 0x01003fff);
304 nvkm_wo32(base->ramfc, 0x48, chan->base.pushgpu->node->offset >> 4);
305 nvkm_wo32(base->ramfc, 0x50, lower_32_bits(ioffset));
306 nvkm_wo32(base->ramfc, 0x54, upper_32_bits(ioffset) | (ilength << 16));
307 nvkm_wo32(base->ramfc, 0x60, 0x7fffffff);
308 nvkm_wo32(base->ramfc, 0x78, 0x00000000);
309 nvkm_wo32(base->ramfc, 0x7c, 0x30000001);
310 nvkm_wo32(base->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
311 (4 << 24) /* SEARCH_FULL */ |
312 (chan->ramht->gpuobj.node->offset >> 4));
313 nvkm_wo32(base->ramfc, 0x88, base->cache->addr >> 10);
314 nvkm_wo32(base->ramfc, 0x98, nv_gpuobj(base)->addr >> 12);
315 nvkm_done(base->ramfc);
316 return 0;
317 }
318
319 static int
320 g84_fifo_chan_init(struct nvkm_object *object)
321 {
322 struct nv50_fifo *fifo = (void *)object->engine;
323 struct nv50_fifo_base *base = (void *)object->parent;
324 struct nv50_fifo_chan *chan = (void *)object;
325 struct nvkm_gpuobj *ramfc = base->ramfc;
326 struct nvkm_device *device = fifo->base.engine.subdev.device;
327 u32 chid = chan->base.chid;
328 int ret;
329
330 ret = nvkm_fifo_channel_init(&chan->base);
331 if (ret)
332 return ret;
333
334 nvkm_wr32(device, 0x002600 + (chid * 4), 0x80000000 | ramfc->addr >> 8);
335 nv50_fifo_playlist_update(fifo);
336 return 0;
337 }
338
339 static struct nvkm_ofuncs
340 g84_fifo_ofuncs_dma = {
341 .ctor = g84_fifo_chan_ctor_dma,
342 .dtor = nv50_fifo_chan_dtor,
343 .init = g84_fifo_chan_init,
344 .fini = nv50_fifo_chan_fini,
345 .map = _nvkm_fifo_channel_map,
346 .rd32 = _nvkm_fifo_channel_rd32,
347 .wr32 = _nvkm_fifo_channel_wr32,
348 .ntfy = _nvkm_fifo_channel_ntfy
349 };
350
351 static struct nvkm_ofuncs
352 g84_fifo_ofuncs_ind = {
353 .ctor = g84_fifo_chan_ctor_ind,
354 .dtor = nv50_fifo_chan_dtor,
355 .init = g84_fifo_chan_init,
356 .fini = nv50_fifo_chan_fini,
357 .map = _nvkm_fifo_channel_map,
358 .rd32 = _nvkm_fifo_channel_rd32,
359 .wr32 = _nvkm_fifo_channel_wr32,
360 .ntfy = _nvkm_fifo_channel_ntfy
361 };
362
363 static struct nvkm_oclass
364 g84_fifo_sclass[] = {
365 { G82_CHANNEL_DMA, &g84_fifo_ofuncs_dma },
366 { G82_CHANNEL_GPFIFO, &g84_fifo_ofuncs_ind },
367 {}
368 };
369
370 /*******************************************************************************
371 * FIFO context - basically just the instmem reserved for the channel
372 ******************************************************************************/
373
374 static int
375 g84_fifo_context_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
376 struct nvkm_oclass *oclass, void *data, u32 size,
377 struct nvkm_object **pobject)
378 {
379 struct nvkm_device *device = nv_engine(engine)->subdev.device;
380 struct nv50_fifo_base *base;
381 int ret;
382
383 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x10000,
384 0x1000, NVOBJ_FLAG_HEAP, &base);
385 *pobject = nv_object(base);
386 if (ret)
387 return ret;
388
389 ret = nvkm_gpuobj_new(device, 0x0200, 0, true, &base->base.gpuobj,
390 &base->eng);
391 if (ret)
392 return ret;
393
394 ret = nvkm_gpuobj_new(device, 0x4000, 0, false, &base->base.gpuobj,
395 &base->pgd);
396 if (ret)
397 return ret;
398
399 ret = nvkm_vm_ref(nvkm_client(parent)->vm, &base->vm, base->pgd);
400 if (ret)
401 return ret;
402
403 ret = nvkm_gpuobj_new(device, 0x1000, 0x400, true, &base->base.gpuobj,
404 &base->cache);
405 if (ret)
406 return ret;
407
408 ret = nvkm_gpuobj_new(device, 0x100, 0x100, true, &base->base.gpuobj,
409 &base->ramfc);
410 if (ret)
411 return ret;
412
413 return 0;
414 }
415
416 static struct nvkm_oclass
417 g84_fifo_cclass = {
418 .handle = NV_ENGCTX(FIFO, 0x84),
419 .ofuncs = &(struct nvkm_ofuncs) {
420 .ctor = g84_fifo_context_ctor,
421 .dtor = nv50_fifo_context_dtor,
422 .init = _nvkm_fifo_context_init,
423 .fini = _nvkm_fifo_context_fini,
424 .rd32 = _nvkm_fifo_context_rd32,
425 .wr32 = _nvkm_fifo_context_wr32,
426 },
427 };
428
429 /*******************************************************************************
430 * PFIFO engine
431 ******************************************************************************/
432
433 static void
434 g84_fifo_uevent_init(struct nvkm_event *event, int type, int index)
435 {
436 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
437 struct nvkm_device *device = fifo->engine.subdev.device;
438 nvkm_mask(device, 0x002140, 0x40000000, 0x40000000);
439 }
440
441 static void
442 g84_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
443 {
444 struct nvkm_fifo *fifo = container_of(event, typeof(*fifo), uevent);
445 struct nvkm_device *device = fifo->engine.subdev.device;
446 nvkm_mask(device, 0x002140, 0x40000000, 0x00000000);
447 }
448
449 static const struct nvkm_event_func
450 g84_fifo_uevent_func = {
451 .ctor = nvkm_fifo_uevent_ctor,
452 .init = g84_fifo_uevent_init,
453 .fini = g84_fifo_uevent_fini,
454 };
455
456 static int
457 g84_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
458 struct nvkm_oclass *oclass, void *data, u32 size,
459 struct nvkm_object **pobject)
460 {
461 struct nvkm_device *device = (void *)parent;
462 struct nv50_fifo *fifo;
463 int ret;
464
465 ret = nvkm_fifo_create(parent, engine, oclass, 1, 127, &fifo);
466 *pobject = nv_object(fifo);
467 if (ret)
468 return ret;
469
470 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
471 false, &fifo->playlist[0]);
472 if (ret)
473 return ret;
474
475 ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000,
476 false, &fifo->playlist[1]);
477 if (ret)
478 return ret;
479
480 ret = nvkm_event_init(&g84_fifo_uevent_func, 1, 1, &fifo->base.uevent);
481 if (ret)
482 return ret;
483
484 nv_subdev(fifo)->unit = 0x00000100;
485 nv_subdev(fifo)->intr = nv04_fifo_intr;
486 nv_engine(fifo)->cclass = &g84_fifo_cclass;
487 nv_engine(fifo)->sclass = g84_fifo_sclass;
488 fifo->base.pause = nv04_fifo_pause;
489 fifo->base.start = nv04_fifo_start;
490 return 0;
491 }
492
493 struct nvkm_oclass *
494 g84_fifo_oclass = &(struct nvkm_oclass) {
495 .handle = NV_ENGINE(FIFO, 0x84),
496 .ofuncs = &(struct nvkm_ofuncs) {
497 .ctor = g84_fifo_ctor,
498 .dtor = nv50_fifo_dtor,
499 .init = nv50_fifo_init,
500 .fini = _nvkm_fifo_fini,
501 },
502 };
This page took 0.054928 seconds and 6 git commands to generate.