drm/nouveau/core: remove last printks
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nvkm / engine / fifo / nv04.c
CommitLineData
6ee73861 1/*
ebb945a9 2 * Copyright 2012 Red Hat Inc.
6ee73861 3 *
ebb945a9
BS
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
6ee73861 10 *
ebb945a9
BS
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
6ee73861 13 *
ebb945a9
BS
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
6ee73861 21 *
ebb945a9 22 * Authors: Ben Skeggs
6ee73861 23 */
05c7145d 24#include "nv04.h"
6ee73861 25
bbf8906b 26#include <core/client.h>
ebb945a9 27#include <core/engctx.h>
ebb945a9 28#include <core/handle.h>
02a841d4 29#include <core/ramht.h>
ebb945a9
BS
30#include <subdev/instmem/nv04.h>
31#include <subdev/timer.h>
ebb945a9 32
05c7145d
BS
33#include <nvif/class.h>
34#include <nvif/unpack.h>
ebb945a9
BS
35
36static struct ramfc_desc
37nv04_ramfc[] = {
c420b2dc
BS
38 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
39 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
40 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
41 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
42 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
43 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
44 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
45 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
46 {}
47};
48
ebb945a9
BS
49/*******************************************************************************
50 * FIFO channel objects
51 ******************************************************************************/
c420b2dc 52
ebb945a9 53int
05c7145d
BS
54nv04_fifo_object_attach(struct nvkm_object *parent,
55 struct nvkm_object *object, u32 handle)
588d7d12 56{
6189f1b0 57 struct nv04_fifo *fifo = (void *)parent->engine;
ebb945a9
BS
58 struct nv04_fifo_chan *chan = (void *)parent;
59 u32 context, chid = chan->base.chid;
60 int ret;
61
62 if (nv_iclass(object, NV_GPUOBJ_CLASS))
63 context = nv_gpuobj(object)->addr >> 4;
64 else
65 context = 0x00000004; /* just non-zero */
66
67 switch (nv_engidx(object->engine)) {
68 case NVDEV_ENGINE_DMAOBJ:
69 case NVDEV_ENGINE_SW:
70 context |= 0x00000000;
71 break;
72 case NVDEV_ENGINE_GR:
73 context |= 0x00010000;
74 break;
75 case NVDEV_ENGINE_MPEG:
76 context |= 0x00020000;
77 break;
78 default:
79 return -EINVAL;
588d7d12
FJ
80 }
81
ebb945a9
BS
82 context |= 0x80000000; /* valid */
83 context |= chid << 24;
84
6189f1b0
BS
85 mutex_lock(&nv_subdev(fifo)->mutex);
86 ret = nvkm_ramht_insert(fifo->ramht, chid, handle, context);
87 mutex_unlock(&nv_subdev(fifo)->mutex);
ebb945a9
BS
88 return ret;
89}
90
91void
05c7145d 92nv04_fifo_object_detach(struct nvkm_object *parent, int cookie)
ebb945a9 93{
6189f1b0
BS
94 struct nv04_fifo *fifo = (void *)parent->engine;
95 mutex_lock(&nv_subdev(fifo)->mutex);
96 nvkm_ramht_remove(fifo->ramht, cookie);
97 mutex_unlock(&nv_subdev(fifo)->mutex);
588d7d12
FJ
98}
99
4c2d4222 100int
05c7145d
BS
101nv04_fifo_context_attach(struct nvkm_object *parent,
102 struct nvkm_object *object)
4c2d4222 103{
05c7145d 104 nv_engctx(object)->addr = nvkm_fifo_chan(parent)->chid;
4c2d4222
BS
105 return 0;
106}
107
c420b2dc 108static int
05c7145d
BS
109nv04_fifo_chan_ctor(struct nvkm_object *parent,
110 struct nvkm_object *engine,
111 struct nvkm_oclass *oclass, void *data, u32 size,
112 struct nvkm_object **pobject)
6ee73861 113{
bbf8906b
BS
114 union {
115 struct nv03_channel_dma_v0 v0;
116 } *args = data;
6189f1b0 117 struct nv04_fifo *fifo = (void *)engine;
ebb945a9 118 struct nv04_fifo_chan *chan;
6ee73861
BS
119 int ret;
120
53003941 121 nvif_ioctl(parent, "create channel dma size %d\n", size);
bbf8906b 122 if (nvif_unpack(args->v0, 0, 0, false)) {
53003941
BS
123 nvif_ioctl(parent, "create channel dma vers %d pushbuf %08x "
124 "offset %016llx\n", args->v0.version,
125 args->v0.pushbuf, args->v0.offset);
bbf8906b
BS
126 } else
127 return ret;
6ee73861 128
05c7145d
BS
129 ret = nvkm_fifo_channel_create(parent, engine, oclass, 0, 0x800000,
130 0x10000, args->v0.pushbuf,
131 (1ULL << NVDEV_ENGINE_DMAOBJ) |
132 (1ULL << NVDEV_ENGINE_SW) |
133 (1ULL << NVDEV_ENGINE_GR), &chan);
ebb945a9
BS
134 *pobject = nv_object(chan);
135 if (ret)
136 return ret;
70ee6f1c 137
bbf8906b
BS
138 args->v0.chid = chan->base.chid;
139
ebb945a9
BS
140 nv_parent(chan)->object_attach = nv04_fifo_object_attach;
141 nv_parent(chan)->object_detach = nv04_fifo_object_detach;
4c2d4222 142 nv_parent(chan)->context_attach = nv04_fifo_context_attach;
ebb945a9 143 chan->ramfc = chan->base.chid * 32;
6ee73861 144
6189f1b0
BS
145 nv_wo32(fifo->ramfc, chan->ramfc + 0x00, args->v0.offset);
146 nv_wo32(fifo->ramfc, chan->ramfc + 0x04, args->v0.offset);
147 nv_wo32(fifo->ramfc, chan->ramfc + 0x08, chan->base.pushgpu->addr >> 4);
148 nv_wo32(fifo->ramfc, chan->ramfc + 0x10,
70ee6f1c
BS
149 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
150 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
c420b2dc 151#ifdef __BIG_ENDIAN
70ee6f1c 152 NV_PFIFO_CACHE1_BIG_ENDIAN |
c420b2dc 153#endif
70ee6f1c 154 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
ebb945a9
BS
155 return 0;
156}
157
158void
05c7145d 159nv04_fifo_chan_dtor(struct nvkm_object *object)
ebb945a9 160{
6189f1b0 161 struct nv04_fifo *fifo = (void *)object->engine;
ebb945a9 162 struct nv04_fifo_chan *chan = (void *)object;
6189f1b0 163 struct ramfc_desc *c = fifo->ramfc_desc;
ff9e5279 164
ebb945a9 165 do {
6189f1b0 166 nv_wo32(fifo->ramfc, chan->ramfc + c->ctxp, 0x00000000);
ebb945a9
BS
167 } while ((++c)->bits);
168
05c7145d 169 nvkm_fifo_channel_destroy(&chan->base);
ebb945a9 170}
c420b2dc 171
ebb945a9 172int
05c7145d 173nv04_fifo_chan_init(struct nvkm_object *object)
ebb945a9 174{
6189f1b0 175 struct nv04_fifo *fifo = (void *)object->engine;
ebb945a9 176 struct nv04_fifo_chan *chan = (void *)object;
87744403 177 struct nvkm_device *device = fifo->base.engine.subdev.device;
ebb945a9
BS
178 u32 mask = 1 << chan->base.chid;
179 unsigned long flags;
180 int ret;
181
05c7145d 182 ret = nvkm_fifo_channel_init(&chan->base);
c420b2dc 183 if (ret)
ebb945a9
BS
184 return ret;
185
6189f1b0 186 spin_lock_irqsave(&fifo->base.lock, flags);
87744403 187 nvkm_mask(device, NV04_PFIFO_MODE, mask, mask);
6189f1b0 188 spin_unlock_irqrestore(&fifo->base.lock, flags);
ebb945a9 189 return 0;
6ee73861
BS
190}
191
ebb945a9 192int
05c7145d 193nv04_fifo_chan_fini(struct nvkm_object *object, bool suspend)
6ee73861 194{
6189f1b0 195 struct nv04_fifo *fifo = (void *)object->engine;
ebb945a9 196 struct nv04_fifo_chan *chan = (void *)object;
6189f1b0 197 struct nvkm_gpuobj *fctx = fifo->ramfc;
87744403 198 struct nvkm_device *device = fifo->base.engine.subdev.device;
ebb945a9 199 struct ramfc_desc *c;
3945e475 200 unsigned long flags;
ebb945a9
BS
201 u32 data = chan->ramfc;
202 u32 chid;
6ee73861 203
c420b2dc 204 /* prevent fifo context switches */
6189f1b0 205 spin_lock_irqsave(&fifo->base.lock, flags);
87744403 206 nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
3945e475 207
c420b2dc 208 /* if this channel is active, replace it with a null context */
87744403 209 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max;
ebb945a9 210 if (chid == chan->base.chid) {
87744403
BS
211 nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
212 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0);
213 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
c420b2dc 214
6189f1b0 215 c = fifo->ramfc_desc;
c420b2dc 216 do {
ebb945a9
BS
217 u32 rm = ((1ULL << c->bits) - 1) << c->regs;
218 u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
87744403 219 u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs;
ebb945a9
BS
220 u32 cv = (nv_ro32(fctx, c->ctxp + data) & ~cm);
221 nv_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs));
222 } while ((++c)->bits);
223
6189f1b0 224 c = fifo->ramfc_desc;
ebb945a9 225 do {
87744403 226 nvkm_wr32(device, c->regp, 0x00000000);
c420b2dc
BS
227 } while ((++c)->bits);
228
87744403
BS
229 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0);
230 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0);
231 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max);
232 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
233 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
3945e475
FJ
234 }
235
c420b2dc 236 /* restore normal operation, after disabling dma mode */
87744403
BS
237 nvkm_mask(device, NV04_PFIFO_MODE, 1 << chan->base.chid, 0);
238 nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
6189f1b0 239 spin_unlock_irqrestore(&fifo->base.lock, flags);
ebb945a9 240
05c7145d 241 return nvkm_fifo_channel_fini(&chan->base, suspend);
6ee73861
BS
242}
243
05c7145d 244static struct nvkm_ofuncs
ebb945a9
BS
245nv04_fifo_ofuncs = {
246 .ctor = nv04_fifo_chan_ctor,
247 .dtor = nv04_fifo_chan_dtor,
248 .init = nv04_fifo_chan_init,
249 .fini = nv04_fifo_chan_fini,
05c7145d
BS
250 .map = _nvkm_fifo_channel_map,
251 .rd32 = _nvkm_fifo_channel_rd32,
252 .wr32 = _nvkm_fifo_channel_wr32,
253 .ntfy = _nvkm_fifo_channel_ntfy
ebb945a9
BS
254};
255
05c7145d 256static struct nvkm_oclass
ebb945a9 257nv04_fifo_sclass[] = {
bbf8906b 258 { NV03_CHANNEL_DMA, &nv04_fifo_ofuncs },
ebb945a9
BS
259 {}
260};
261
262/*******************************************************************************
263 * FIFO context - basically just the instmem reserved for the channel
264 ******************************************************************************/
265
6ee73861 266int
05c7145d
BS
267nv04_fifo_context_ctor(struct nvkm_object *parent,
268 struct nvkm_object *engine,
269 struct nvkm_oclass *oclass, void *data, u32 size,
270 struct nvkm_object **pobject)
6ee73861 271{
ebb945a9
BS
272 struct nv04_fifo_base *base;
273 int ret;
6ee73861 274
05c7145d
BS
275 ret = nvkm_fifo_context_create(parent, engine, oclass, NULL, 0x1000,
276 0x1000, NVOBJ_FLAG_HEAP, &base);
ebb945a9
BS
277 *pobject = nv_object(base);
278 if (ret)
279 return ret;
6ee73861 280
ebb945a9
BS
281 return 0;
282}
6ee73861 283
05c7145d 284static struct nvkm_oclass
ebb945a9
BS
285nv04_fifo_cclass = {
286 .handle = NV_ENGCTX(FIFO, 0x04),
05c7145d 287 .ofuncs = &(struct nvkm_ofuncs) {
ebb945a9 288 .ctor = nv04_fifo_context_ctor,
05c7145d
BS
289 .dtor = _nvkm_fifo_context_dtor,
290 .init = _nvkm_fifo_context_init,
291 .fini = _nvkm_fifo_context_fini,
292 .rd32 = _nvkm_fifo_context_rd32,
293 .wr32 = _nvkm_fifo_context_wr32,
ebb945a9
BS
294 },
295};
6ee73861 296
ebb945a9
BS
297/*******************************************************************************
298 * PFIFO engine
299 ******************************************************************************/
6ee73861 300
ebb945a9 301void
6189f1b0
BS
302nv04_fifo_pause(struct nvkm_fifo *obj, unsigned long *pflags)
303__acquires(fifo->base.lock)
ebb945a9 304{
6189f1b0 305 struct nv04_fifo *fifo = container_of(obj, typeof(*fifo), base);
87744403 306 struct nvkm_device *device = fifo->base.engine.subdev.device;
ebb945a9 307 unsigned long flags;
6ee73861 308
6189f1b0 309 spin_lock_irqsave(&fifo->base.lock, flags);
ebb945a9
BS
310 *pflags = flags;
311
87744403
BS
312 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000000);
313 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000000);
ebb945a9
BS
314
315 /* in some cases the puller may be left in an inconsistent state
316 * if you try to stop it while it's busy translating handles.
317 * sometimes you get a CACHE_ERROR, sometimes it just fails
318 * silently; sending incorrect instance offsets to PGRAPH after
319 * it's started up again.
320 *
321 * to avoid this, we invalidate the most recently calculated
322 * instance.
323 */
af3082b3
BS
324 nvkm_msec(device, 2000,
325 u32 tmp = nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0);
326 if (!(tmp & NV04_PFIFO_CACHE1_PULL0_HASH_BUSY))
327 break;
328 );
ebb945a9 329
87744403 330 if (nvkm_rd32(device, NV04_PFIFO_CACHE1_PULL0) &
ebb945a9 331 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
87744403 332 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
ebb945a9 333
87744403 334 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0x00000000);
ebb945a9 335}
6ee73861 336
ebb945a9 337void
6189f1b0
BS
338nv04_fifo_start(struct nvkm_fifo *obj, unsigned long *pflags)
339__releases(fifo->base.lock)
ebb945a9 340{
6189f1b0 341 struct nv04_fifo *fifo = container_of(obj, typeof(*fifo), base);
87744403 342 struct nvkm_device *device = fifo->base.engine.subdev.device;
ebb945a9 343 unsigned long flags = *pflags;
6ee73861 344
87744403
BS
345 nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0x00000001);
346 nvkm_wr32(device, NV03_PFIFO_CACHES, 0x00000001);
ebb945a9 347
6189f1b0 348 spin_unlock_irqrestore(&fifo->base.lock, flags);
6ee73861
BS
349}
350
ebb945a9
BS
351static const char *
352nv_dma_state_err(u32 state)
5178d40d 353{
ebb945a9
BS
354 static const char * const desc[] = {
355 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
356 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
357 };
358 return desc[(state >> 29) & 0x7];
5178d40d
BS
359}
360
361static bool
6189f1b0 362nv04_fifo_swmthd(struct nv04_fifo *fifo, u32 chid, u32 addr, u32 data)
5178d40d 363{
87744403 364 struct nvkm_device *device = fifo->base.engine.subdev.device;
ebb945a9 365 struct nv04_fifo_chan *chan = NULL;
05c7145d 366 struct nvkm_handle *bind;
5178d40d
BS
367 const int subc = (addr >> 13) & 0x7;
368 const int mthd = addr & 0x1ffc;
369 bool handled = false;
ebb945a9 370 unsigned long flags;
5178d40d
BS
371 u32 engine;
372
6189f1b0
BS
373 spin_lock_irqsave(&fifo->base.lock, flags);
374 if (likely(chid >= fifo->base.min && chid <= fifo->base.max))
375 chan = (void *)fifo->base.channel[chid];
5178d40d
BS
376 if (unlikely(!chan))
377 goto out;
378
379 switch (mthd) {
ebb945a9 380 case 0x0000:
05c7145d 381 bind = nvkm_namedb_get(nv_namedb(chan), data);
ebb945a9 382 if (unlikely(!bind))
5178d40d
BS
383 break;
384
ebb945a9
BS
385 if (nv_engidx(bind->object->engine) == NVDEV_ENGINE_SW) {
386 engine = 0x0000000f << (subc * 4);
387 chan->subc[subc] = data;
388 handled = true;
389
87744403 390 nvkm_mask(device, NV04_PFIFO_CACHE1_ENGINE, engine, 0);
ebb945a9 391 }
5178d40d 392
05c7145d 393 nvkm_namedb_put(bind);
5178d40d
BS
394 break;
395 default:
87744403 396 engine = nvkm_rd32(device, NV04_PFIFO_CACHE1_ENGINE);
5178d40d
BS
397 if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
398 break;
399
05c7145d 400 bind = nvkm_namedb_get(nv_namedb(chan), chan->subc[subc]);
ebb945a9
BS
401 if (likely(bind)) {
402 if (!nv_call(bind->object, mthd, data))
403 handled = true;
05c7145d 404 nvkm_namedb_put(bind);
ebb945a9 405 }
5178d40d
BS
406 break;
407 }
408
409out:
6189f1b0 410 spin_unlock_irqrestore(&fifo->base.lock, flags);
5178d40d
BS
411 return handled;
412}
413
fc10199e 414static void
e5c5e4f5 415nv04_fifo_cache_error(struct nv04_fifo *fifo, u32 chid, u32 get)
fc10199e 416{
e5c5e4f5
BS
417 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
418 struct nvkm_device *device = subdev->device;
fc10199e
MS
419 u32 mthd, data;
420 int ptr;
421
422 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my
423 * G80 chips, but CACHE1 isn't big enough for this much data.. Tests
424 * show that it wraps around to the start at GET=0x800.. No clue as to
425 * why..
426 */
427 ptr = (get & 0x7ff) >> 2;
428
429 if (device->card_type < NV_40) {
87744403
BS
430 mthd = nvkm_rd32(device, NV04_PFIFO_CACHE1_METHOD(ptr));
431 data = nvkm_rd32(device, NV04_PFIFO_CACHE1_DATA(ptr));
fc10199e 432 } else {
87744403
BS
433 mthd = nvkm_rd32(device, NV40_PFIFO_CACHE1_METHOD(ptr));
434 data = nvkm_rd32(device, NV40_PFIFO_CACHE1_DATA(ptr));
fc10199e
MS
435 }
436
6189f1b0 437 if (!nv04_fifo_swmthd(fifo, chid, mthd, data)) {
93260d3c 438 const char *client_name =
6189f1b0 439 nvkm_client_name_for_fifo_chid(&fifo->base, chid);
e5c5e4f5
BS
440 nvkm_error(subdev, "CACHE_ERROR - "
441 "ch %d [%s] subc %d mthd %04x data %08x\n",
442 chid, client_name, (mthd >> 13) & 7, mthd & 0x1ffc,
443 data);
fc10199e
MS
444 }
445
87744403
BS
446 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
447 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
fc10199e 448
87744403
BS
449 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
450 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) & ~1);
451 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
452 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0,
453 nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH0) | 1);
454 nvkm_wr32(device, NV04_PFIFO_CACHE1_HASH, 0);
fc10199e 455
87744403
BS
456 nvkm_wr32(device, NV04_PFIFO_CACHE1_DMA_PUSH,
457 nvkm_rd32(device, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
458 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
fc10199e
MS
459}
460
461static void
e5c5e4f5 462nv04_fifo_dma_pusher(struct nv04_fifo *fifo, u32 chid)
fc10199e 463{
e5c5e4f5
BS
464 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
465 struct nvkm_device *device = subdev->device;
87744403
BS
466 u32 dma_get = nvkm_rd32(device, 0x003244);
467 u32 dma_put = nvkm_rd32(device, 0x003240);
468 u32 push = nvkm_rd32(device, 0x003220);
469 u32 state = nvkm_rd32(device, 0x003228);
e5c5e4f5 470 const char *client_name;
fc10199e 471
6189f1b0 472 client_name = nvkm_client_name_for_fifo_chid(&fifo->base, chid);
93260d3c 473
fc10199e 474 if (device->card_type == NV_50) {
87744403
BS
475 u32 ho_get = nvkm_rd32(device, 0x003328);
476 u32 ho_put = nvkm_rd32(device, 0x003320);
477 u32 ib_get = nvkm_rd32(device, 0x003334);
478 u32 ib_put = nvkm_rd32(device, 0x003330);
fc10199e 479
e5c5e4f5
BS
480 nvkm_error(subdev, "DMA_PUSHER - "
481 "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x "
482 "ib_put %08x state %08x (err: %s) push %08x\n",
483 chid, client_name, ho_get, dma_get, ho_put, dma_put,
484 ib_get, ib_put, state, nv_dma_state_err(state),
485 push);
fc10199e
MS
486
487 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
87744403 488 nvkm_wr32(device, 0x003364, 0x00000000);
fc10199e 489 if (dma_get != dma_put || ho_get != ho_put) {
87744403
BS
490 nvkm_wr32(device, 0x003244, dma_put);
491 nvkm_wr32(device, 0x003328, ho_put);
fc10199e
MS
492 } else
493 if (ib_get != ib_put)
87744403 494 nvkm_wr32(device, 0x003334, ib_put);
fc10199e 495 } else {
e5c5e4f5
BS
496 nvkm_error(subdev, "DMA_PUSHER - ch %d [%s] get %08x put %08x "
497 "state %08x (err: %s) push %08x\n",
498 chid, client_name, dma_get, dma_put, state,
499 nv_dma_state_err(state), push);
fc10199e
MS
500
501 if (dma_get != dma_put)
87744403 502 nvkm_wr32(device, 0x003244, dma_put);
fc10199e
MS
503 }
504
87744403
BS
505 nvkm_wr32(device, 0x003228, 0x00000000);
506 nvkm_wr32(device, 0x003220, 0x00000001);
507 nvkm_wr32(device, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
fc10199e
MS
508}
509
5178d40d 510void
05c7145d 511nv04_fifo_intr(struct nvkm_subdev *subdev)
5178d40d 512{
e5c5e4f5 513 struct nvkm_device *device = subdev->device;
6189f1b0 514 struct nv04_fifo *fifo = (void *)subdev;
87744403
BS
515 u32 mask = nvkm_rd32(device, NV03_PFIFO_INTR_EN_0);
516 u32 stat = nvkm_rd32(device, NV03_PFIFO_INTR_0) & mask;
adc346b1 517 u32 reassign, chid, get, sem;
5178d40d 518
87744403
BS
519 reassign = nvkm_rd32(device, NV03_PFIFO_CACHES) & 1;
520 nvkm_wr32(device, NV03_PFIFO_CACHES, 0);
5178d40d 521
87744403
BS
522 chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & fifo->base.max;
523 get = nvkm_rd32(device, NV03_PFIFO_CACHE1_GET);
5178d40d 524
adc346b1 525 if (stat & NV_PFIFO_INTR_CACHE_ERROR) {
e5c5e4f5 526 nv04_fifo_cache_error(fifo, chid, get);
adc346b1
BS
527 stat &= ~NV_PFIFO_INTR_CACHE_ERROR;
528 }
5178d40d 529
adc346b1 530 if (stat & NV_PFIFO_INTR_DMA_PUSHER) {
e5c5e4f5 531 nv04_fifo_dma_pusher(fifo, chid);
adc346b1
BS
532 stat &= ~NV_PFIFO_INTR_DMA_PUSHER;
533 }
5178d40d 534
adc346b1
BS
535 if (stat & NV_PFIFO_INTR_SEMAPHORE) {
536 stat &= ~NV_PFIFO_INTR_SEMAPHORE;
87744403 537 nvkm_wr32(device, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE);
5178d40d 538
87744403
BS
539 sem = nvkm_rd32(device, NV10_PFIFO_CACHE1_SEMAPHORE);
540 nvkm_wr32(device, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
5178d40d 541
87744403
BS
542 nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, get + 4);
543 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
adc346b1 544 }
5178d40d 545
adc346b1
BS
546 if (device->card_type == NV_50) {
547 if (stat & 0x00000010) {
548 stat &= ~0x00000010;
87744403 549 nvkm_wr32(device, 0x002100, 0x00000010);
5178d40d
BS
550 }
551
adc346b1 552 if (stat & 0x40000000) {
87744403 553 nvkm_wr32(device, 0x002100, 0x40000000);
6189f1b0 554 nvkm_fifo_uevent(&fifo->base);
adc346b1 555 stat &= ~0x40000000;
5178d40d 556 }
5178d40d
BS
557 }
558
adc346b1 559 if (stat) {
e5c5e4f5 560 nvkm_warn(subdev, "intr %08x\n", stat);
87744403
BS
561 nvkm_mask(device, NV03_PFIFO_INTR_EN_0, stat, 0x00000000);
562 nvkm_wr32(device, NV03_PFIFO_INTR_0, stat);
5178d40d
BS
563 }
564
87744403 565 nvkm_wr32(device, NV03_PFIFO_CACHES, reassign);
5178d40d 566}
c420b2dc 567
ebb945a9 568static int
05c7145d
BS
569nv04_fifo_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
570 struct nvkm_oclass *oclass, void *data, u32 size,
571 struct nvkm_object **pobject)
c420b2dc 572{
c44c06ae 573 struct nv04_instmem *imem = nv04_instmem(parent);
6189f1b0 574 struct nv04_fifo *fifo;
ebb945a9 575 int ret;
c420b2dc 576
6189f1b0
BS
577 ret = nvkm_fifo_create(parent, engine, oclass, 0, 15, &fifo);
578 *pobject = nv_object(fifo);
ebb945a9
BS
579 if (ret)
580 return ret;
581
6189f1b0
BS
582 nvkm_ramht_ref(imem->ramht, &fifo->ramht);
583 nvkm_gpuobj_ref(imem->ramro, &fifo->ramro);
584 nvkm_gpuobj_ref(imem->ramfc, &fifo->ramfc);
585
586 nv_subdev(fifo)->unit = 0x00000100;
587 nv_subdev(fifo)->intr = nv04_fifo_intr;
588 nv_engine(fifo)->cclass = &nv04_fifo_cclass;
589 nv_engine(fifo)->sclass = nv04_fifo_sclass;
590 fifo->base.pause = nv04_fifo_pause;
591 fifo->base.start = nv04_fifo_start;
592 fifo->ramfc_desc = nv04_ramfc;
ebb945a9
BS
593 return 0;
594}
c420b2dc 595
ebb945a9 596void
05c7145d 597nv04_fifo_dtor(struct nvkm_object *object)
ebb945a9 598{
6189f1b0
BS
599 struct nv04_fifo *fifo = (void *)object;
600 nvkm_gpuobj_ref(NULL, &fifo->ramfc);
601 nvkm_gpuobj_ref(NULL, &fifo->ramro);
602 nvkm_ramht_ref(NULL, &fifo->ramht);
603 nvkm_fifo_destroy(&fifo->base);
c420b2dc
BS
604}
605
606int
05c7145d 607nv04_fifo_init(struct nvkm_object *object)
c420b2dc 608{
6189f1b0 609 struct nv04_fifo *fifo = (void *)object;
87744403 610 struct nvkm_device *device = fifo->base.engine.subdev.device;
ebb945a9
BS
611 int ret;
612
6189f1b0 613 ret = nvkm_fifo_init(&fifo->base);
ebb945a9
BS
614 if (ret)
615 return ret;
c420b2dc 616
87744403
BS
617 nvkm_wr32(device, NV04_PFIFO_DELAY_0, 0x000000ff);
618 nvkm_wr32(device, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
c420b2dc 619
87744403 620 nvkm_wr32(device, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
6189f1b0
BS
621 ((fifo->ramht->bits - 9) << 16) |
622 (fifo->ramht->gpuobj.addr >> 8));
87744403
BS
623 nvkm_wr32(device, NV03_PFIFO_RAMRO, fifo->ramro->addr >> 8);
624 nvkm_wr32(device, NV03_PFIFO_RAMFC, fifo->ramfc->addr >> 8);
5787640d 625
87744403 626 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, fifo->base.max);
c420b2dc 627
87744403
BS
628 nvkm_wr32(device, NV03_PFIFO_INTR_0, 0xffffffff);
629 nvkm_wr32(device, NV03_PFIFO_INTR_EN_0, 0xffffffff);
ebb945a9 630
87744403
BS
631 nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1);
632 nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1);
633 nvkm_wr32(device, NV03_PFIFO_CACHES, 1);
c420b2dc
BS
634 return 0;
635}
ebb945a9 636
05c7145d
BS
637struct nvkm_oclass *
638nv04_fifo_oclass = &(struct nvkm_oclass) {
ebb945a9 639 .handle = NV_ENGINE(FIFO, 0x04),
05c7145d 640 .ofuncs = &(struct nvkm_ofuncs) {
ebb945a9
BS
641 .ctor = nv04_fifo_ctor,
642 .dtor = nv04_fifo_dtor,
643 .init = nv04_fifo_init,
05c7145d 644 .fini = _nvkm_fifo_fini,
ebb945a9
BS
645 },
646};
This page took 0.56609 seconds and 5 git commands to generate.