2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include <core/client.h>
27 #include <core/engctx.h>
28 #include <core/handle.h>
29 #include <core/ramht.h>
30 #include <subdev/instmem.h>
31 #include <subdev/timer.h>
32 #include <engine/sw.h>
34 #include <nvif/class.h>
35 #include <nvif/unpack.h>
37 static struct ramfc_desc
39 { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT
},
40 { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET
},
41 { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE
},
42 { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT
},
43 { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE
},
44 { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH
},
45 { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE
},
46 { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1
},
50 /*******************************************************************************
51 * FIFO channel objects
52 ******************************************************************************/
55 nv04_fifo_object_attach(struct nvkm_object
*parent
,
56 struct nvkm_object
*object
, u32 handle
)
58 struct nv04_fifo
*fifo
= (void *)parent
->engine
;
59 struct nv04_fifo_chan
*chan
= (void *)parent
;
60 struct nvkm_instmem
*imem
= fifo
->base
.engine
.subdev
.device
->imem
;
61 u32 context
, chid
= chan
->base
.chid
;
64 if (nv_iclass(object
, NV_GPUOBJ_CLASS
))
65 context
= nv_gpuobj(object
)->addr
>> 4;
67 context
= 0x00000004; /* just non-zero */
70 switch (nv_engidx(object
->engine
)) {
71 case NVDEV_ENGINE_DMAOBJ
:
73 context
|= 0x00000000;
76 context
|= 0x00010000;
78 case NVDEV_ENGINE_MPEG
:
79 context
|= 0x00020000;
86 context
|= 0x80000000; /* valid */
87 context
|= chid
<< 24;
89 mutex_lock(&nv_subdev(fifo
)->mutex
);
90 ret
= nvkm_ramht_insert(imem
->ramht
, NULL
, chid
, 0, handle
, context
);
91 mutex_unlock(&nv_subdev(fifo
)->mutex
);
96 nv04_fifo_object_detach(struct nvkm_object
*parent
, int cookie
)
98 struct nv04_fifo
*fifo
= (void *)parent
->engine
;
99 struct nvkm_instmem
*imem
= fifo
->base
.engine
.subdev
.device
->imem
;
100 mutex_lock(&nv_subdev(fifo
)->mutex
);
101 nvkm_ramht_remove(imem
->ramht
, cookie
);
102 mutex_unlock(&nv_subdev(fifo
)->mutex
);
106 nv04_fifo_context_attach(struct nvkm_object
*parent
,
107 struct nvkm_object
*object
)
109 nv_engctx(object
)->addr
= nvkm_fifo_chan(parent
)->chid
;
114 nv04_fifo_chan_ctor(struct nvkm_object
*parent
,
115 struct nvkm_object
*engine
,
116 struct nvkm_oclass
*oclass
, void *data
, u32 size
,
117 struct nvkm_object
**pobject
)
120 struct nv03_channel_dma_v0 v0
;
122 struct nv04_fifo
*fifo
= (void *)engine
;
123 struct nvkm_instmem
*imem
= fifo
->base
.engine
.subdev
.device
->imem
;
124 struct nv04_fifo_chan
*chan
;
127 nvif_ioctl(parent
, "create channel dma size %d\n", size
);
128 if (nvif_unpack(args
->v0
, 0, 0, false)) {
129 nvif_ioctl(parent
, "create channel dma vers %d pushbuf %llx "
130 "offset %08x\n", args
->v0
.version
,
131 args
->v0
.pushbuf
, args
->v0
.offset
);
135 ret
= nvkm_fifo_channel_create(parent
, engine
, oclass
, 0, 0x800000,
136 0x10000, args
->v0
.pushbuf
,
137 (1ULL << NVDEV_ENGINE_DMAOBJ
) |
138 (1ULL << NVDEV_ENGINE_SW
) |
139 (1ULL << NVDEV_ENGINE_GR
), &chan
);
140 *pobject
= nv_object(chan
);
144 args
->v0
.chid
= chan
->base
.chid
;
146 nv_parent(chan
)->object_attach
= nv04_fifo_object_attach
;
147 nv_parent(chan
)->object_detach
= nv04_fifo_object_detach
;
148 nv_parent(chan
)->context_attach
= nv04_fifo_context_attach
;
149 chan
->ramfc
= chan
->base
.chid
* 32;
151 nvkm_kmap(imem
->ramfc
);
152 nvkm_wo32(imem
->ramfc
, chan
->ramfc
+ 0x00, args
->v0
.offset
);
153 nvkm_wo32(imem
->ramfc
, chan
->ramfc
+ 0x04, args
->v0
.offset
);
154 nvkm_wo32(imem
->ramfc
, chan
->ramfc
+ 0x08, chan
->base
.pushgpu
->addr
>> 4);
155 nvkm_wo32(imem
->ramfc
, chan
->ramfc
+ 0x10,
156 NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES
|
157 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES
|
159 NV_PFIFO_CACHE1_BIG_ENDIAN
|
161 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8
);
162 nvkm_done(imem
->ramfc
);
167 nv04_fifo_chan_dtor(struct nvkm_object
*object
)
169 struct nv04_fifo
*fifo
= (void *)object
->engine
;
170 struct nv04_fifo_chan
*chan
= (void *)object
;
171 struct nvkm_instmem
*imem
= fifo
->base
.engine
.subdev
.device
->imem
;
172 struct ramfc_desc
*c
= fifo
->ramfc_desc
;
174 nvkm_kmap(imem
->ramfc
);
176 nvkm_wo32(imem
->ramfc
, chan
->ramfc
+ c
->ctxp
, 0x00000000);
177 } while ((++c
)->bits
);
178 nvkm_done(imem
->ramfc
);
180 nvkm_fifo_channel_destroy(&chan
->base
);
184 nv04_fifo_chan_init(struct nvkm_object
*object
)
186 struct nv04_fifo
*fifo
= (void *)object
->engine
;
187 struct nv04_fifo_chan
*chan
= (void *)object
;
188 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
189 u32 mask
= 1 << chan
->base
.chid
;
193 ret
= nvkm_fifo_channel_init(&chan
->base
);
197 spin_lock_irqsave(&fifo
->base
.lock
, flags
);
198 nvkm_mask(device
, NV04_PFIFO_MODE
, mask
, mask
);
199 spin_unlock_irqrestore(&fifo
->base
.lock
, flags
);
204 nv04_fifo_chan_fini(struct nvkm_object
*object
, bool suspend
)
206 struct nv04_fifo
*fifo
= (void *)object
->engine
;
207 struct nv04_fifo_chan
*chan
= (void *)object
;
208 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
209 struct nvkm_memory
*fctx
= device
->imem
->ramfc
;
210 struct ramfc_desc
*c
;
212 u32 data
= chan
->ramfc
;
215 /* prevent fifo context switches */
216 spin_lock_irqsave(&fifo
->base
.lock
, flags
);
217 nvkm_wr32(device
, NV03_PFIFO_CACHES
, 0);
219 /* if this channel is active, replace it with a null context */
220 chid
= nvkm_rd32(device
, NV03_PFIFO_CACHE1_PUSH1
) & fifo
->base
.max
;
221 if (chid
== chan
->base
.chid
) {
222 nvkm_mask(device
, NV04_PFIFO_CACHE1_DMA_PUSH
, 0x00000001, 0);
223 nvkm_wr32(device
, NV03_PFIFO_CACHE1_PUSH0
, 0);
224 nvkm_mask(device
, NV04_PFIFO_CACHE1_PULL0
, 0x00000001, 0);
226 c
= fifo
->ramfc_desc
;
228 u32 rm
= ((1ULL << c
->bits
) - 1) << c
->regs
;
229 u32 cm
= ((1ULL << c
->bits
) - 1) << c
->ctxs
;
230 u32 rv
= (nvkm_rd32(device
, c
->regp
) & rm
) >> c
->regs
;
231 u32 cv
= (nvkm_ro32(fctx
, c
->ctxp
+ data
) & ~cm
);
232 nvkm_wo32(fctx
, c
->ctxp
+ data
, cv
| (rv
<< c
->ctxs
));
233 } while ((++c
)->bits
);
235 c
= fifo
->ramfc_desc
;
237 nvkm_wr32(device
, c
->regp
, 0x00000000);
238 } while ((++c
)->bits
);
240 nvkm_wr32(device
, NV03_PFIFO_CACHE1_GET
, 0);
241 nvkm_wr32(device
, NV03_PFIFO_CACHE1_PUT
, 0);
242 nvkm_wr32(device
, NV03_PFIFO_CACHE1_PUSH1
, fifo
->base
.max
);
243 nvkm_wr32(device
, NV03_PFIFO_CACHE1_PUSH0
, 1);
244 nvkm_wr32(device
, NV04_PFIFO_CACHE1_PULL0
, 1);
247 /* restore normal operation, after disabling dma mode */
248 nvkm_mask(device
, NV04_PFIFO_MODE
, 1 << chan
->base
.chid
, 0);
249 nvkm_wr32(device
, NV03_PFIFO_CACHES
, 1);
250 spin_unlock_irqrestore(&fifo
->base
.lock
, flags
);
252 return nvkm_fifo_channel_fini(&chan
->base
, suspend
);
255 static struct nvkm_ofuncs
257 .ctor
= nv04_fifo_chan_ctor
,
258 .dtor
= nv04_fifo_chan_dtor
,
259 .init
= nv04_fifo_chan_init
,
260 .fini
= nv04_fifo_chan_fini
,
261 .map
= _nvkm_fifo_channel_map
,
262 .rd32
= _nvkm_fifo_channel_rd32
,
263 .wr32
= _nvkm_fifo_channel_wr32
,
264 .ntfy
= _nvkm_fifo_channel_ntfy
267 static struct nvkm_oclass
268 nv04_fifo_sclass
[] = {
269 { NV03_CHANNEL_DMA
, &nv04_fifo_ofuncs
},
273 /*******************************************************************************
274 * FIFO context - basically just the instmem reserved for the channel
275 ******************************************************************************/
278 nv04_fifo_context_ctor(struct nvkm_object
*parent
,
279 struct nvkm_object
*engine
,
280 struct nvkm_oclass
*oclass
, void *data
, u32 size
,
281 struct nvkm_object
**pobject
)
283 struct nv04_fifo_base
*base
;
286 ret
= nvkm_fifo_context_create(parent
, engine
, oclass
, NULL
, 0x1000,
287 0x1000, NVOBJ_FLAG_HEAP
, &base
);
288 *pobject
= nv_object(base
);
295 static struct nvkm_oclass
297 .handle
= NV_ENGCTX(FIFO
, 0x04),
298 .ofuncs
= &(struct nvkm_ofuncs
) {
299 .ctor
= nv04_fifo_context_ctor
,
300 .dtor
= _nvkm_fifo_context_dtor
,
301 .init
= _nvkm_fifo_context_init
,
302 .fini
= _nvkm_fifo_context_fini
,
303 .rd32
= _nvkm_fifo_context_rd32
,
304 .wr32
= _nvkm_fifo_context_wr32
,
308 /*******************************************************************************
310 ******************************************************************************/
313 nv04_fifo_pause(struct nvkm_fifo
*obj
, unsigned long *pflags
)
314 __acquires(fifo
->base
.lock
)
316 struct nv04_fifo
*fifo
= container_of(obj
, typeof(*fifo
), base
);
317 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
320 spin_lock_irqsave(&fifo
->base
.lock
, flags
);
323 nvkm_wr32(device
, NV03_PFIFO_CACHES
, 0x00000000);
324 nvkm_mask(device
, NV04_PFIFO_CACHE1_PULL0
, 0x00000001, 0x00000000);
326 /* in some cases the puller may be left in an inconsistent state
327 * if you try to stop it while it's busy translating handles.
328 * sometimes you get a CACHE_ERROR, sometimes it just fails
329 * silently; sending incorrect instance offsets to PGRAPH after
330 * it's started up again.
332 * to avoid this, we invalidate the most recently calculated
335 nvkm_msec(device
, 2000,
336 u32 tmp
= nvkm_rd32(device
, NV04_PFIFO_CACHE1_PULL0
);
337 if (!(tmp
& NV04_PFIFO_CACHE1_PULL0_HASH_BUSY
))
341 if (nvkm_rd32(device
, NV04_PFIFO_CACHE1_PULL0
) &
342 NV04_PFIFO_CACHE1_PULL0_HASH_FAILED
)
343 nvkm_wr32(device
, NV03_PFIFO_INTR_0
, NV_PFIFO_INTR_CACHE_ERROR
);
345 nvkm_wr32(device
, NV04_PFIFO_CACHE1_HASH
, 0x00000000);
349 nv04_fifo_start(struct nvkm_fifo
*obj
, unsigned long *pflags
)
350 __releases(fifo
->base
.lock
)
352 struct nv04_fifo
*fifo
= container_of(obj
, typeof(*fifo
), base
);
353 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
354 unsigned long flags
= *pflags
;
356 nvkm_mask(device
, NV04_PFIFO_CACHE1_PULL0
, 0x00000001, 0x00000001);
357 nvkm_wr32(device
, NV03_PFIFO_CACHES
, 0x00000001);
359 spin_unlock_irqrestore(&fifo
->base
.lock
, flags
);
363 nv_dma_state_err(u32 state
)
365 static const char * const desc
[] = {
366 "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
367 "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
369 return desc
[(state
>> 29) & 0x7];
373 nv04_fifo_swmthd(struct nvkm_device
*device
, u32 chid
, u32 addr
, u32 data
)
375 struct nvkm_sw
*sw
= device
->sw
;
376 const int subc
= (addr
& 0x0000e000) >> 13;
377 const int mthd
= (addr
& 0x00001ffc);
378 const u32 mask
= 0x0000000f << (subc
* 4);
379 u32 engine
= nvkm_rd32(device
, 0x003280);
380 bool handled
= false;
383 case 0x0000 ... 0x0000: /* subchannel's engine -> software */
384 nvkm_wr32(device
, 0x003280, (engine
&= ~mask
));
385 case 0x0180 ... 0x01fc: /* handle -> instance */
386 data
= nvkm_rd32(device
, 0x003258) & 0x0000ffff;
387 case 0x0100 ... 0x017c:
388 case 0x0200 ... 0x1ffc: /* pass method down to sw */
389 if (!(engine
& mask
) && sw
)
390 handled
= nvkm_sw_mthd(sw
, chid
, subc
, mthd
, data
);
400 nv04_fifo_cache_error(struct nv04_fifo
*fifo
, u32 chid
, u32 get
)
402 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
403 struct nvkm_device
*device
= subdev
->device
;
404 u32 pull0
= nvkm_rd32(device
, 0x003250);
408 /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before wrapping on my
409 * G80 chips, but CACHE1 isn't big enough for this much data.. Tests
410 * show that it wraps around to the start at GET=0x800.. No clue as to
413 ptr
= (get
& 0x7ff) >> 2;
415 if (device
->card_type
< NV_40
) {
416 mthd
= nvkm_rd32(device
, NV04_PFIFO_CACHE1_METHOD(ptr
));
417 data
= nvkm_rd32(device
, NV04_PFIFO_CACHE1_DATA(ptr
));
419 mthd
= nvkm_rd32(device
, NV40_PFIFO_CACHE1_METHOD(ptr
));
420 data
= nvkm_rd32(device
, NV40_PFIFO_CACHE1_DATA(ptr
));
423 if (!(pull0
& 0x00000100) ||
424 !nv04_fifo_swmthd(device
, chid
, mthd
, data
)) {
425 const char *client_name
=
426 nvkm_client_name_for_fifo_chid(&fifo
->base
, chid
);
427 nvkm_error(subdev
, "CACHE_ERROR - "
428 "ch %d [%s] subc %d mthd %04x data %08x\n",
429 chid
, client_name
, (mthd
>> 13) & 7, mthd
& 0x1ffc,
433 nvkm_wr32(device
, NV04_PFIFO_CACHE1_DMA_PUSH
, 0);
434 nvkm_wr32(device
, NV03_PFIFO_INTR_0
, NV_PFIFO_INTR_CACHE_ERROR
);
436 nvkm_wr32(device
, NV03_PFIFO_CACHE1_PUSH0
,
437 nvkm_rd32(device
, NV03_PFIFO_CACHE1_PUSH0
) & ~1);
438 nvkm_wr32(device
, NV03_PFIFO_CACHE1_GET
, get
+ 4);
439 nvkm_wr32(device
, NV03_PFIFO_CACHE1_PUSH0
,
440 nvkm_rd32(device
, NV03_PFIFO_CACHE1_PUSH0
) | 1);
441 nvkm_wr32(device
, NV04_PFIFO_CACHE1_HASH
, 0);
443 nvkm_wr32(device
, NV04_PFIFO_CACHE1_DMA_PUSH
,
444 nvkm_rd32(device
, NV04_PFIFO_CACHE1_DMA_PUSH
) | 1);
445 nvkm_wr32(device
, NV04_PFIFO_CACHE1_PULL0
, 1);
449 nv04_fifo_dma_pusher(struct nv04_fifo
*fifo
, u32 chid
)
451 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
452 struct nvkm_device
*device
= subdev
->device
;
453 u32 dma_get
= nvkm_rd32(device
, 0x003244);
454 u32 dma_put
= nvkm_rd32(device
, 0x003240);
455 u32 push
= nvkm_rd32(device
, 0x003220);
456 u32 state
= nvkm_rd32(device
, 0x003228);
457 const char *client_name
;
459 client_name
= nvkm_client_name_for_fifo_chid(&fifo
->base
, chid
);
461 if (device
->card_type
== NV_50
) {
462 u32 ho_get
= nvkm_rd32(device
, 0x003328);
463 u32 ho_put
= nvkm_rd32(device
, 0x003320);
464 u32 ib_get
= nvkm_rd32(device
, 0x003334);
465 u32 ib_put
= nvkm_rd32(device
, 0x003330);
467 nvkm_error(subdev
, "DMA_PUSHER - "
468 "ch %d [%s] get %02x%08x put %02x%08x ib_get %08x "
469 "ib_put %08x state %08x (err: %s) push %08x\n",
470 chid
, client_name
, ho_get
, dma_get
, ho_put
, dma_put
,
471 ib_get
, ib_put
, state
, nv_dma_state_err(state
),
474 /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
475 nvkm_wr32(device
, 0x003364, 0x00000000);
476 if (dma_get
!= dma_put
|| ho_get
!= ho_put
) {
477 nvkm_wr32(device
, 0x003244, dma_put
);
478 nvkm_wr32(device
, 0x003328, ho_put
);
480 if (ib_get
!= ib_put
)
481 nvkm_wr32(device
, 0x003334, ib_put
);
483 nvkm_error(subdev
, "DMA_PUSHER - ch %d [%s] get %08x put %08x "
484 "state %08x (err: %s) push %08x\n",
485 chid
, client_name
, dma_get
, dma_put
, state
,
486 nv_dma_state_err(state
), push
);
488 if (dma_get
!= dma_put
)
489 nvkm_wr32(device
, 0x003244, dma_put
);
492 nvkm_wr32(device
, 0x003228, 0x00000000);
493 nvkm_wr32(device
, 0x003220, 0x00000001);
494 nvkm_wr32(device
, 0x002100, NV_PFIFO_INTR_DMA_PUSHER
);
498 nv04_fifo_intr(struct nvkm_subdev
*subdev
)
500 struct nvkm_device
*device
= subdev
->device
;
501 struct nv04_fifo
*fifo
= (void *)subdev
;
502 u32 mask
= nvkm_rd32(device
, NV03_PFIFO_INTR_EN_0
);
503 u32 stat
= nvkm_rd32(device
, NV03_PFIFO_INTR_0
) & mask
;
504 u32 reassign
, chid
, get
, sem
;
506 reassign
= nvkm_rd32(device
, NV03_PFIFO_CACHES
) & 1;
507 nvkm_wr32(device
, NV03_PFIFO_CACHES
, 0);
509 chid
= nvkm_rd32(device
, NV03_PFIFO_CACHE1_PUSH1
) & fifo
->base
.max
;
510 get
= nvkm_rd32(device
, NV03_PFIFO_CACHE1_GET
);
512 if (stat
& NV_PFIFO_INTR_CACHE_ERROR
) {
513 nv04_fifo_cache_error(fifo
, chid
, get
);
514 stat
&= ~NV_PFIFO_INTR_CACHE_ERROR
;
517 if (stat
& NV_PFIFO_INTR_DMA_PUSHER
) {
518 nv04_fifo_dma_pusher(fifo
, chid
);
519 stat
&= ~NV_PFIFO_INTR_DMA_PUSHER
;
522 if (stat
& NV_PFIFO_INTR_SEMAPHORE
) {
523 stat
&= ~NV_PFIFO_INTR_SEMAPHORE
;
524 nvkm_wr32(device
, NV03_PFIFO_INTR_0
, NV_PFIFO_INTR_SEMAPHORE
);
526 sem
= nvkm_rd32(device
, NV10_PFIFO_CACHE1_SEMAPHORE
);
527 nvkm_wr32(device
, NV10_PFIFO_CACHE1_SEMAPHORE
, sem
| 0x1);
529 nvkm_wr32(device
, NV03_PFIFO_CACHE1_GET
, get
+ 4);
530 nvkm_wr32(device
, NV04_PFIFO_CACHE1_PULL0
, 1);
533 if (device
->card_type
== NV_50
) {
534 if (stat
& 0x00000010) {
536 nvkm_wr32(device
, 0x002100, 0x00000010);
539 if (stat
& 0x40000000) {
540 nvkm_wr32(device
, 0x002100, 0x40000000);
541 nvkm_fifo_uevent(&fifo
->base
);
547 nvkm_warn(subdev
, "intr %08x\n", stat
);
548 nvkm_mask(device
, NV03_PFIFO_INTR_EN_0
, stat
, 0x00000000);
549 nvkm_wr32(device
, NV03_PFIFO_INTR_0
, stat
);
552 nvkm_wr32(device
, NV03_PFIFO_CACHES
, reassign
);
556 nv04_fifo_ctor(struct nvkm_object
*parent
, struct nvkm_object
*engine
,
557 struct nvkm_oclass
*oclass
, void *data
, u32 size
,
558 struct nvkm_object
**pobject
)
560 struct nv04_fifo
*fifo
;
563 ret
= nvkm_fifo_create(parent
, engine
, oclass
, 0, 15, &fifo
);
564 *pobject
= nv_object(fifo
);
568 nv_subdev(fifo
)->unit
= 0x00000100;
569 nv_subdev(fifo
)->intr
= nv04_fifo_intr
;
570 nv_engine(fifo
)->cclass
= &nv04_fifo_cclass
;
571 nv_engine(fifo
)->sclass
= nv04_fifo_sclass
;
572 fifo
->base
.pause
= nv04_fifo_pause
;
573 fifo
->base
.start
= nv04_fifo_start
;
574 fifo
->ramfc_desc
= nv04_ramfc
;
579 nv04_fifo_dtor(struct nvkm_object
*object
)
581 struct nv04_fifo
*fifo
= (void *)object
;
582 nvkm_fifo_destroy(&fifo
->base
);
586 nv04_fifo_init(struct nvkm_object
*object
)
588 struct nv04_fifo
*fifo
= (void *)object
;
589 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
590 struct nvkm_instmem
*imem
= device
->imem
;
591 struct nvkm_ramht
*ramht
= imem
->ramht
;
592 struct nvkm_memory
*ramro
= imem
->ramro
;
593 struct nvkm_memory
*ramfc
= imem
->ramfc
;
596 ret
= nvkm_fifo_init(&fifo
->base
);
600 nvkm_wr32(device
, NV04_PFIFO_DELAY_0
, 0x000000ff);
601 nvkm_wr32(device
, NV04_PFIFO_DMA_TIMESLICE
, 0x0101ffff);
603 nvkm_wr32(device
, NV03_PFIFO_RAMHT
, (0x03 << 24) /* search 128 */ |
604 ((ramht
->bits
- 9) << 16) |
605 (ramht
->gpuobj
->addr
>> 8));
606 nvkm_wr32(device
, NV03_PFIFO_RAMRO
, nvkm_memory_addr(ramro
) >> 8);
607 nvkm_wr32(device
, NV03_PFIFO_RAMFC
, nvkm_memory_addr(ramfc
) >> 8);
609 nvkm_wr32(device
, NV03_PFIFO_CACHE1_PUSH1
, fifo
->base
.max
);
611 nvkm_wr32(device
, NV03_PFIFO_INTR_0
, 0xffffffff);
612 nvkm_wr32(device
, NV03_PFIFO_INTR_EN_0
, 0xffffffff);
614 nvkm_wr32(device
, NV03_PFIFO_CACHE1_PUSH0
, 1);
615 nvkm_wr32(device
, NV04_PFIFO_CACHE1_PULL0
, 1);
616 nvkm_wr32(device
, NV03_PFIFO_CACHES
, 1);
621 nv04_fifo_oclass
= &(struct nvkm_oclass
) {
622 .handle
= NV_ENGINE(FIFO
, 0x04),
623 .ofuncs
= &(struct nvkm_ofuncs
) {
624 .ctor
= nv04_fifo_ctor
,
625 .dtor
= nv04_fifo_dtor
,
626 .init
= nv04_fifo_init
,
627 .fini
= _nvkm_fifo_fini
,