drm/nouveau/device: cleaner abstraction for device resource functions
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nvkm / engine / fifo / chan.c
CommitLineData
9a65a38c
BS
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "chan.h"
25
26#include <core/client.h>
13de7f46 27#include <core/gpuobj.h>
8f0649b5
BS
28#include <core/oproxy.h>
29#include <subdev/mmu.h>
9a65a38c
BS
30#include <engine/dma.h>
31
8f0649b5
BS
32struct nvkm_fifo_chan_object {
33 struct nvkm_oproxy oproxy;
34 struct nvkm_fifo_chan *chan;
35 int hash;
36};
9a65a38c 37
8f0649b5
BS
38static int
39nvkm_fifo_chan_child_fini(struct nvkm_oproxy *base, bool suspend)
40{
41 struct nvkm_fifo_chan_object *object =
42 container_of(base, typeof(*object), oproxy);
43 struct nvkm_engine *engine = object->oproxy.object->engine;
44 struct nvkm_fifo_chan *chan = object->chan;
45 struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
46 const char *name = nvkm_subdev_name[engine->subdev.index];
47 int ret = 0;
48
49 if (--engn->usecount)
50 return 0;
51
52 if (chan->func->engine_fini) {
53 ret = chan->func->engine_fini(chan, engine, suspend);
54 if (ret) {
55 nvif_error(&chan->object,
56 "detach %s failed, %d\n", name, ret);
57 return ret;
58 }
59 }
60
61 if (engn->object) {
62 ret = nvkm_object_fini(engn->object, suspend);
63 if (ret && suspend)
64 return ret;
65 }
66
67 nvif_trace(&chan->object, "detached %s\n", name);
68 return ret;
69}
70
71static int
72nvkm_fifo_chan_child_init(struct nvkm_oproxy *base)
73{
74 struct nvkm_fifo_chan_object *object =
75 container_of(base, typeof(*object), oproxy);
76 struct nvkm_engine *engine = object->oproxy.object->engine;
77 struct nvkm_fifo_chan *chan = object->chan;
78 struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
79 const char *name = nvkm_subdev_name[engine->subdev.index];
80 int ret;
81
82 if (engn->usecount++)
83 return 0;
84
85 if (engn->object) {
86 ret = nvkm_object_init(engn->object);
87 if (ret)
88 return ret;
89 }
90
91 if (chan->func->engine_init) {
92 ret = chan->func->engine_init(chan, engine);
93 if (ret) {
94 nvif_error(&chan->object,
95 "attach %s failed, %d\n", name, ret);
96 return ret;
97 }
98 }
99
100 nvif_trace(&chan->object, "attached %s\n", name);
101 return 0;
102}
103
104static void
105nvkm_fifo_chan_child_del(struct nvkm_oproxy *base)
106{
107 struct nvkm_fifo_chan_object *object =
108 container_of(base, typeof(*object), oproxy);
109 struct nvkm_engine *engine = object->oproxy.base.engine;
110 struct nvkm_fifo_chan *chan = object->chan;
111 struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
112
113 if (chan->func->object_dtor)
114 chan->func->object_dtor(chan, object->hash);
115
116 if (!--engn->refcount) {
117 if (chan->func->engine_dtor)
118 chan->func->engine_dtor(chan, engine);
fbd58ebd 119 nvkm_object_del(&engn->object);
8f0649b5
BS
120 if (chan->vm)
121 atomic_dec(&chan->vm->engref[engine->subdev.index]);
122 }
123}
124
125static const struct nvkm_oproxy_func
126nvkm_fifo_chan_child_func = {
127 .dtor[0] = nvkm_fifo_chan_child_del,
128 .init[0] = nvkm_fifo_chan_child_init,
129 .fini[0] = nvkm_fifo_chan_child_fini,
130};
131
8f0649b5
BS
132static int
133nvkm_fifo_chan_child_new(const struct nvkm_oclass *oclass, void *data, u32 size,
134 struct nvkm_object **pobject)
135{
136 struct nvkm_engine *engine = oclass->engine;
137 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(oclass->parent);
138 struct nvkm_fifo_engn *engn = &chan->engn[engine->subdev.index];
139 struct nvkm_fifo_chan_object *object;
140 int ret = 0;
141
142 if (!(object = kzalloc(sizeof(*object), GFP_KERNEL)))
143 return -ENOMEM;
144 nvkm_oproxy_ctor(&nvkm_fifo_chan_child_func, oclass, &object->oproxy);
145 object->chan = chan;
146 *pobject = &object->oproxy.base;
147
148 if (!engn->refcount++) {
149 struct nvkm_oclass cclass = {
150 .client = oclass->client,
151 .engine = oclass->engine,
152 };
153
154 if (chan->vm)
155 atomic_inc(&chan->vm->engref[engine->subdev.index]);
156
157 if (engine->func->fifo.cclass) {
158 ret = engine->func->fifo.cclass(chan, &cclass,
159 &engn->object);
160 } else
161 if (engine->func->cclass) {
162 ret = nvkm_object_new_(engine->func->cclass, &cclass,
163 NULL, 0, &engn->object);
164 }
165 if (ret)
166 return ret;
167
168 if (chan->func->engine_ctor) {
169 ret = chan->func->engine_ctor(chan, oclass->engine,
170 engn->object);
171 if (ret)
172 return ret;
173 }
174 }
175
176 ret = oclass->base.ctor(&(const struct nvkm_oclass) {
177 .base = oclass->base,
178 .engn = oclass->engn,
179 .handle = oclass->handle,
180 .object = oclass->object,
181 .client = oclass->client,
182 .parent = engn->object ?
183 engn->object :
184 oclass->parent,
185 .engine = engine,
186 }, data, size, &object->oproxy.object);
187 if (ret)
188 return ret;
189
190 if (chan->func->object_ctor) {
191 object->hash =
192 chan->func->object_ctor(chan, object->oproxy.object);
193 if (object->hash < 0)
194 return object->hash;
195 }
196
197 return 0;
198}
199
200static int
201nvkm_fifo_chan_child_get(struct nvkm_object *object, int index,
202 struct nvkm_oclass *oclass)
203{
204 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
205 struct nvkm_fifo *fifo = chan->fifo;
206 struct nvkm_device *device = fifo->engine.subdev.device;
207 struct nvkm_engine *engine;
208 u64 mask = chan->engines;
209 int ret, i, c;
210
211 for (; c = 0, i = __ffs64(mask), mask; mask &= ~(1ULL << i)) {
8f0649b5
BS
212 if (!(engine = nvkm_device_engine(device, i)))
213 continue;
214 oclass->engine = engine;
215 oclass->base.oclass = 0;
216
217 if (engine->func->fifo.sclass) {
218 ret = engine->func->fifo.sclass(oclass, index);
219 if (oclass->base.oclass) {
220 if (!oclass->base.ctor)
221 oclass->base.ctor = nvkm_object_new;
222 oclass->ctor = nvkm_fifo_chan_child_new;
223 return 0;
224 }
225
226 index -= ret;
227 continue;
228 }
229
230 while (engine->func->sclass[c].oclass) {
231 if (c++ == index) {
232 oclass->base = engine->func->sclass[index];
233 if (!oclass->base.ctor)
234 oclass->base.ctor = nvkm_object_new;
235 oclass->ctor = nvkm_fifo_chan_child_new;
236 return 0;
237 }
238 }
239 index -= c;
240 }
241
9a65a38c
BS
242 return -EINVAL;
243}
244
8f0649b5
BS
245static int
246nvkm_fifo_chan_ntfy(struct nvkm_object *object, u32 type,
247 struct nvkm_event **pevent)
9a65a38c 248{
8f0649b5
BS
249 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
250 if (chan->func->ntfy)
251 return chan->func->ntfy(chan, type, pevent);
252 return -ENODEV;
253}
254
255static int
256nvkm_fifo_chan_map(struct nvkm_object *object, u64 *addr, u32 *size)
257{
258 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
9a65a38c
BS
259 *addr = chan->addr;
260 *size = chan->size;
261 return 0;
262}
263
8f0649b5
BS
264static int
265nvkm_fifo_chan_rd32(struct nvkm_object *object, u64 addr, u32 *data)
9a65a38c 266{
8f0649b5 267 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
9a65a38c
BS
268 if (unlikely(!chan->user)) {
269 chan->user = ioremap(chan->addr, chan->size);
8f0649b5
BS
270 if (!chan->user)
271 return -ENOMEM;
9a65a38c 272 }
8f0649b5
BS
273 if (unlikely(addr + 4 > chan->size))
274 return -EINVAL;
275 *data = ioread32_native(chan->user + addr);
276 return 0;
9a65a38c
BS
277}
278
8f0649b5
BS
279static int
280nvkm_fifo_chan_wr32(struct nvkm_object *object, u64 addr, u32 data)
9a65a38c 281{
8f0649b5 282 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
9a65a38c
BS
283 if (unlikely(!chan->user)) {
284 chan->user = ioremap(chan->addr, chan->size);
8f0649b5
BS
285 if (!chan->user)
286 return -ENOMEM;
9a65a38c 287 }
8f0649b5
BS
288 if (unlikely(addr + 4 > chan->size))
289 return -EINVAL;
9a65a38c 290 iowrite32_native(data, chan->user + addr);
8f0649b5
BS
291 return 0;
292}
293
294static int
295nvkm_fifo_chan_fini(struct nvkm_object *object, bool suspend)
296{
297 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
298 chan->func->fini(chan);
299 return 0;
9a65a38c
BS
300}
301
8f0649b5
BS
302static int
303nvkm_fifo_chan_init(struct nvkm_object *object)
9a65a38c 304{
8f0649b5
BS
305 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
306 chan->func->init(chan);
307 return 0;
308}
309
310static void *
311nvkm_fifo_chan_dtor(struct nvkm_object *object)
312{
313 struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
314 struct nvkm_fifo *fifo = chan->fifo;
315 void *data = chan->func->dtor(chan);
9a65a38c
BS
316 unsigned long flags;
317
8f0649b5
BS
318 spin_lock_irqsave(&fifo->lock, flags);
319 if (!list_empty(&chan->head)) {
320 __clear_bit(chan->chid, fifo->mask);
321 list_del(&chan->head);
322 }
323 spin_unlock_irqrestore(&fifo->lock, flags);
324
9a65a38c
BS
325 if (chan->user)
326 iounmap(chan->user);
327
8f0649b5 328 nvkm_vm_ref(NULL, &chan->vm, NULL);
9a65a38c 329
8f0649b5
BS
330 nvkm_gpuobj_del(&chan->push);
331 nvkm_gpuobj_del(&chan->inst);
332 return data;
9a65a38c
BS
333}
334
68f3f702 335static const struct nvkm_object_func
8f0649b5
BS
336nvkm_fifo_chan_func = {
337 .dtor = nvkm_fifo_chan_dtor,
338 .init = nvkm_fifo_chan_init,
339 .fini = nvkm_fifo_chan_fini,
340 .ntfy = nvkm_fifo_chan_ntfy,
341 .map = nvkm_fifo_chan_map,
342 .rd32 = nvkm_fifo_chan_rd32,
343 .wr32 = nvkm_fifo_chan_wr32,
344 .sclass = nvkm_fifo_chan_child_get,
345};
9a65a38c
BS
346
347int
8f0649b5
BS
348nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
349 struct nvkm_fifo *fifo, u32 size, u32 align, bool zero,
350 u64 vm, u64 push, u64 engines, int bar, u32 base, u32 user,
351 const struct nvkm_oclass *oclass,
352 struct nvkm_fifo_chan *chan)
9a65a38c 353{
8f0649b5
BS
354 struct nvkm_client *client = oclass->client;
355 struct nvkm_device *device = fifo->engine.subdev.device;
356 struct nvkm_mmu *mmu = device->mmu;
9a65a38c
BS
357 struct nvkm_dmaobj *dmaobj;
358 unsigned long flags;
359 int ret;
360
8f0649b5
BS
361 nvkm_object_ctor(&nvkm_fifo_chan_func, oclass, &chan->object);
362 chan->func = func;
363 chan->fifo = fifo;
364 chan->engines = engines;
365 INIT_LIST_HEAD(&chan->head);
366
367 /* instance memory */
368 ret = nvkm_gpuobj_new(device, size, align, zero, NULL, &chan->inst);
9a65a38c
BS
369 if (ret)
370 return ret;
371
8f0649b5
BS
372 /* allocate push buffer ctxdma instance */
373 if (push) {
374 dmaobj = nvkm_dma_search(device->dma, oclass->client, push);
9a65a38c
BS
375 if (!dmaobj)
376 return -ENOENT;
377
8f0649b5
BS
378 ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16,
379 &chan->push);
9a65a38c
BS
380 if (ret)
381 return ret;
382 }
383
8f0649b5
BS
384 /* channel address space */
385 if (!vm && mmu) {
386 if (!client->vm || client->vm->mmu == mmu) {
387 ret = nvkm_vm_ref(client->vm, &chan->vm, NULL);
388 if (ret)
389 return ret;
390 } else {
391 return -EINVAL;
9a65a38c 392 }
8f0649b5
BS
393 } else {
394 return -ENOENT;
9a65a38c 395 }
9a65a38c 396
8f0649b5
BS
397 /* allocate channel id */
398 spin_lock_irqsave(&fifo->lock, flags);
399 chan->chid = find_first_zero_bit(fifo->mask, NVKM_FIFO_CHID_NR);
400 if (chan->chid >= NVKM_FIFO_CHID_NR) {
401 spin_unlock_irqrestore(&fifo->lock, flags);
9a65a38c
BS
402 return -ENOSPC;
403 }
8f0649b5
BS
404 list_add(&chan->head, &fifo->chan);
405 __set_bit(chan->chid, fifo->mask);
406 spin_unlock_irqrestore(&fifo->lock, flags);
9a65a38c 407
8f0649b5 408 /* determine address of this channel's user registers */
7e8820fe 409 chan->addr = device->func->resource_addr(device, bar) +
8f0649b5
BS
410 base + user * chan->chid;
411 chan->size = user;
412
9a65a38c
BS
413 nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0);
414 return 0;
415}
This page took 0.057494 seconds and 5 git commands to generate.