2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <engine/fifo.h>
26 #include <core/client.h>
27 #include <core/engctx.h>
28 #include <core/enum.h>
29 #include <core/handle.h>
30 #include <subdev/bar.h>
31 #include <subdev/fb.h>
32 #include <subdev/mmu.h>
33 #include <subdev/timer.h>
35 #include <nvif/class.h>
36 #include <nvif/unpack.h>
39 struct nvkm_fifo base
;
41 struct work_struct fault
;
45 struct nvkm_gpuobj
*mem
[2];
47 wait_queue_head_t wait
;
51 struct nvkm_gpuobj
*mem
;
57 struct gf100_fifo_base
{
58 struct nvkm_fifo_base base
;
59 struct nvkm_gpuobj
*pgd
;
63 struct gf100_fifo_chan
{
64 struct nvkm_fifo_chan base
;
72 /*******************************************************************************
73 * FIFO channel objects
74 ******************************************************************************/
77 gf100_fifo_runlist_update(struct gf100_fifo
*fifo
)
79 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
80 struct nvkm_device
*device
= subdev
->device
;
81 struct nvkm_bar
*bar
= device
->bar
;
82 struct nvkm_gpuobj
*cur
;
85 mutex_lock(&nv_subdev(fifo
)->mutex
);
86 cur
= fifo
->runlist
.mem
[fifo
->runlist
.active
];
87 fifo
->runlist
.active
= !fifo
->runlist
.active
;
89 for (i
= 0, p
= 0; i
< 128; i
++) {
90 struct gf100_fifo_chan
*chan
= (void *)fifo
->base
.channel
[i
];
91 if (chan
&& chan
->state
== RUNNING
) {
92 nv_wo32(cur
, p
+ 0, i
);
93 nv_wo32(cur
, p
+ 4, 0x00000004);
99 nvkm_wr32(device
, 0x002270, cur
->addr
>> 12);
100 nvkm_wr32(device
, 0x002274, 0x01f00000 | (p
>> 3));
102 if (wait_event_timeout(fifo
->runlist
.wait
,
103 !(nvkm_rd32(device
, 0x00227c) & 0x00100000),
104 msecs_to_jiffies(2000)) == 0)
105 nvkm_error(subdev
, "runlist update timeout\n");
106 mutex_unlock(&nv_subdev(fifo
)->mutex
);
110 gf100_fifo_context_attach(struct nvkm_object
*parent
,
111 struct nvkm_object
*object
)
113 struct nvkm_bar
*bar
= nvkm_bar(parent
);
114 struct gf100_fifo_base
*base
= (void *)parent
->parent
;
115 struct nvkm_engctx
*ectx
= (void *)object
;
119 switch (nv_engidx(object
->engine
)) {
120 case NVDEV_ENGINE_SW
: return 0;
121 case NVDEV_ENGINE_GR
: addr
= 0x0210; break;
122 case NVDEV_ENGINE_CE0
: addr
= 0x0230; break;
123 case NVDEV_ENGINE_CE1
: addr
= 0x0240; break;
124 case NVDEV_ENGINE_MSVLD
: addr
= 0x0270; break;
125 case NVDEV_ENGINE_MSPDEC
: addr
= 0x0250; break;
126 case NVDEV_ENGINE_MSPPP
: addr
= 0x0260; break;
131 if (!ectx
->vma
.node
) {
132 ret
= nvkm_gpuobj_map_vm(nv_gpuobj(ectx
), base
->vm
,
133 NV_MEM_ACCESS_RW
, &ectx
->vma
);
137 nv_engctx(ectx
)->addr
= nv_gpuobj(base
)->addr
>> 12;
140 nv_wo32(base
, addr
+ 0x00, lower_32_bits(ectx
->vma
.offset
) | 4);
141 nv_wo32(base
, addr
+ 0x04, upper_32_bits(ectx
->vma
.offset
));
147 gf100_fifo_context_detach(struct nvkm_object
*parent
, bool suspend
,
148 struct nvkm_object
*object
)
150 struct gf100_fifo
*fifo
= (void *)parent
->engine
;
151 struct gf100_fifo_base
*base
= (void *)parent
->parent
;
152 struct gf100_fifo_chan
*chan
= (void *)parent
;
153 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
154 struct nvkm_device
*device
= subdev
->device
;
155 struct nvkm_bar
*bar
= device
->bar
;
158 switch (nv_engidx(object
->engine
)) {
159 case NVDEV_ENGINE_SW
: return 0;
160 case NVDEV_ENGINE_GR
: addr
= 0x0210; break;
161 case NVDEV_ENGINE_CE0
: addr
= 0x0230; break;
162 case NVDEV_ENGINE_CE1
: addr
= 0x0240; break;
163 case NVDEV_ENGINE_MSVLD
: addr
= 0x0270; break;
164 case NVDEV_ENGINE_MSPDEC
: addr
= 0x0250; break;
165 case NVDEV_ENGINE_MSPPP
: addr
= 0x0260; break;
170 nvkm_wr32(device
, 0x002634, chan
->base
.chid
);
171 if (nvkm_msec(device
, 2000,
172 if (nvkm_rd32(device
, 0x002634) == chan
->base
.chid
)
175 nvkm_error(subdev
, "channel %d [%s] kick timeout\n",
176 chan
->base
.chid
, nvkm_client_name(chan
));
181 nv_wo32(base
, addr
+ 0x00, 0x00000000);
182 nv_wo32(base
, addr
+ 0x04, 0x00000000);
188 gf100_fifo_chan_ctor(struct nvkm_object
*parent
, struct nvkm_object
*engine
,
189 struct nvkm_oclass
*oclass
, void *data
, u32 size
,
190 struct nvkm_object
**pobject
)
193 struct nv50_channel_gpfifo_v0 v0
;
195 struct nvkm_bar
*bar
= nvkm_bar(parent
);
196 struct gf100_fifo
*fifo
= (void *)engine
;
197 struct gf100_fifo_base
*base
= (void *)parent
;
198 struct gf100_fifo_chan
*chan
;
199 u64 usermem
, ioffset
, ilength
;
202 nvif_ioctl(parent
, "create channel gpfifo size %d\n", size
);
203 if (nvif_unpack(args
->v0
, 0, 0, false)) {
204 nvif_ioctl(parent
, "create channel gpfifo vers %d pushbuf %08x "
205 "ioffset %016llx ilength %08x\n",
206 args
->v0
.version
, args
->v0
.pushbuf
, args
->v0
.ioffset
,
211 ret
= nvkm_fifo_channel_create(parent
, engine
, oclass
, 1,
212 fifo
->user
.bar
.offset
, 0x1000,
214 (1ULL << NVDEV_ENGINE_SW
) |
215 (1ULL << NVDEV_ENGINE_GR
) |
216 (1ULL << NVDEV_ENGINE_CE0
) |
217 (1ULL << NVDEV_ENGINE_CE1
) |
218 (1ULL << NVDEV_ENGINE_MSVLD
) |
219 (1ULL << NVDEV_ENGINE_MSPDEC
) |
220 (1ULL << NVDEV_ENGINE_MSPPP
), &chan
);
221 *pobject
= nv_object(chan
);
225 args
->v0
.chid
= chan
->base
.chid
;
227 nv_parent(chan
)->context_attach
= gf100_fifo_context_attach
;
228 nv_parent(chan
)->context_detach
= gf100_fifo_context_detach
;
230 usermem
= chan
->base
.chid
* 0x1000;
231 ioffset
= args
->v0
.ioffset
;
232 ilength
= order_base_2(args
->v0
.ilength
/ 8);
234 for (i
= 0; i
< 0x1000; i
+= 4)
235 nv_wo32(fifo
->user
.mem
, usermem
+ i
, 0x00000000);
237 nv_wo32(base
, 0x08, lower_32_bits(fifo
->user
.mem
->addr
+ usermem
));
238 nv_wo32(base
, 0x0c, upper_32_bits(fifo
->user
.mem
->addr
+ usermem
));
239 nv_wo32(base
, 0x10, 0x0000face);
240 nv_wo32(base
, 0x30, 0xfffff902);
241 nv_wo32(base
, 0x48, lower_32_bits(ioffset
));
242 nv_wo32(base
, 0x4c, upper_32_bits(ioffset
) | (ilength
<< 16));
243 nv_wo32(base
, 0x54, 0x00000002);
244 nv_wo32(base
, 0x84, 0x20400000);
245 nv_wo32(base
, 0x94, 0x30000001);
246 nv_wo32(base
, 0x9c, 0x00000100);
247 nv_wo32(base
, 0xa4, 0x1f1f1f1f);
248 nv_wo32(base
, 0xa8, 0x1f1f1f1f);
249 nv_wo32(base
, 0xac, 0x0000001f);
250 nv_wo32(base
, 0xb8, 0xf8000000);
251 nv_wo32(base
, 0xf8, 0x10003080); /* 0x002310 */
252 nv_wo32(base
, 0xfc, 0x10000010); /* 0x002350 */
258 gf100_fifo_chan_init(struct nvkm_object
*object
)
260 struct nvkm_gpuobj
*base
= nv_gpuobj(object
->parent
);
261 struct gf100_fifo
*fifo
= (void *)object
->engine
;
262 struct gf100_fifo_chan
*chan
= (void *)object
;
263 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
264 u32 chid
= chan
->base
.chid
;
267 ret
= nvkm_fifo_channel_init(&chan
->base
);
271 nvkm_wr32(device
, 0x003000 + (chid
* 8), 0xc0000000 | base
->addr
>> 12);
273 if (chan
->state
== STOPPED
&& (chan
->state
= RUNNING
) == RUNNING
) {
274 nvkm_wr32(device
, 0x003004 + (chid
* 8), 0x001f0001);
275 gf100_fifo_runlist_update(fifo
);
281 static void gf100_fifo_intr_engine(struct gf100_fifo
*fifo
);
284 gf100_fifo_chan_fini(struct nvkm_object
*object
, bool suspend
)
286 struct gf100_fifo
*fifo
= (void *)object
->engine
;
287 struct gf100_fifo_chan
*chan
= (void *)object
;
288 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
289 u32 chid
= chan
->base
.chid
;
291 if (chan
->state
== RUNNING
&& (chan
->state
= STOPPED
) == STOPPED
) {
292 nvkm_mask(device
, 0x003004 + (chid
* 8), 0x00000001, 0x00000000);
293 gf100_fifo_runlist_update(fifo
);
296 gf100_fifo_intr_engine(fifo
);
298 nvkm_wr32(device
, 0x003000 + (chid
* 8), 0x00000000);
299 return nvkm_fifo_channel_fini(&chan
->base
, suspend
);
302 static struct nvkm_ofuncs
303 gf100_fifo_ofuncs
= {
304 .ctor
= gf100_fifo_chan_ctor
,
305 .dtor
= _nvkm_fifo_channel_dtor
,
306 .init
= gf100_fifo_chan_init
,
307 .fini
= gf100_fifo_chan_fini
,
308 .map
= _nvkm_fifo_channel_map
,
309 .rd32
= _nvkm_fifo_channel_rd32
,
310 .wr32
= _nvkm_fifo_channel_wr32
,
311 .ntfy
= _nvkm_fifo_channel_ntfy
314 static struct nvkm_oclass
315 gf100_fifo_sclass
[] = {
316 { FERMI_CHANNEL_GPFIFO
, &gf100_fifo_ofuncs
},
320 /*******************************************************************************
321 * FIFO context - instmem heap and vm setup
322 ******************************************************************************/
325 gf100_fifo_context_ctor(struct nvkm_object
*parent
, struct nvkm_object
*engine
,
326 struct nvkm_oclass
*oclass
, void *data
, u32 size
,
327 struct nvkm_object
**pobject
)
329 struct gf100_fifo_base
*base
;
332 ret
= nvkm_fifo_context_create(parent
, engine
, oclass
, NULL
, 0x1000,
333 0x1000, NVOBJ_FLAG_ZERO_ALLOC
|
334 NVOBJ_FLAG_HEAP
, &base
);
335 *pobject
= nv_object(base
);
339 ret
= nvkm_gpuobj_new(nv_object(base
), NULL
, 0x10000, 0x1000, 0,
344 nv_wo32(base
, 0x0200, lower_32_bits(base
->pgd
->addr
));
345 nv_wo32(base
, 0x0204, upper_32_bits(base
->pgd
->addr
));
346 nv_wo32(base
, 0x0208, 0xffffffff);
347 nv_wo32(base
, 0x020c, 0x000000ff);
349 ret
= nvkm_vm_ref(nvkm_client(parent
)->vm
, &base
->vm
, base
->pgd
);
357 gf100_fifo_context_dtor(struct nvkm_object
*object
)
359 struct gf100_fifo_base
*base
= (void *)object
;
360 nvkm_vm_ref(NULL
, &base
->vm
, base
->pgd
);
361 nvkm_gpuobj_ref(NULL
, &base
->pgd
);
362 nvkm_fifo_context_destroy(&base
->base
);
365 static struct nvkm_oclass
366 gf100_fifo_cclass
= {
367 .handle
= NV_ENGCTX(FIFO
, 0xc0),
368 .ofuncs
= &(struct nvkm_ofuncs
) {
369 .ctor
= gf100_fifo_context_ctor
,
370 .dtor
= gf100_fifo_context_dtor
,
371 .init
= _nvkm_fifo_context_init
,
372 .fini
= _nvkm_fifo_context_fini
,
373 .rd32
= _nvkm_fifo_context_rd32
,
374 .wr32
= _nvkm_fifo_context_wr32
,
378 /*******************************************************************************
380 ******************************************************************************/
383 gf100_fifo_engidx(struct gf100_fifo
*fifo
, u32 engn
)
386 case NVDEV_ENGINE_GR
: engn
= 0; break;
387 case NVDEV_ENGINE_MSVLD
: engn
= 1; break;
388 case NVDEV_ENGINE_MSPPP
: engn
= 2; break;
389 case NVDEV_ENGINE_MSPDEC
: engn
= 3; break;
390 case NVDEV_ENGINE_CE0
: engn
= 4; break;
391 case NVDEV_ENGINE_CE1
: engn
= 5; break;
399 static inline struct nvkm_engine
*
400 gf100_fifo_engine(struct gf100_fifo
*fifo
, u32 engn
)
403 case 0: engn
= NVDEV_ENGINE_GR
; break;
404 case 1: engn
= NVDEV_ENGINE_MSVLD
; break;
405 case 2: engn
= NVDEV_ENGINE_MSPPP
; break;
406 case 3: engn
= NVDEV_ENGINE_MSPDEC
; break;
407 case 4: engn
= NVDEV_ENGINE_CE0
; break;
408 case 5: engn
= NVDEV_ENGINE_CE1
; break;
413 return nvkm_engine(fifo
, engn
);
417 gf100_fifo_recover_work(struct work_struct
*work
)
419 struct gf100_fifo
*fifo
= container_of(work
, typeof(*fifo
), fault
);
420 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
421 struct nvkm_object
*engine
;
426 spin_lock_irqsave(&fifo
->base
.lock
, flags
);
429 spin_unlock_irqrestore(&fifo
->base
.lock
, flags
);
431 for (todo
= mask
; engn
= __ffs64(todo
), todo
; todo
&= ~(1 << engn
))
432 engm
|= 1 << gf100_fifo_engidx(fifo
, engn
);
433 nvkm_mask(device
, 0x002630, engm
, engm
);
435 for (todo
= mask
; engn
= __ffs64(todo
), todo
; todo
&= ~(1 << engn
)) {
436 if ((engine
= (void *)nvkm_engine(fifo
, engn
))) {
437 nv_ofuncs(engine
)->fini(engine
, false);
438 WARN_ON(nv_ofuncs(engine
)->init(engine
));
442 gf100_fifo_runlist_update(fifo
);
443 nvkm_wr32(device
, 0x00262c, engm
);
444 nvkm_mask(device
, 0x002630, engm
, 0x00000000);
448 gf100_fifo_recover(struct gf100_fifo
*fifo
, struct nvkm_engine
*engine
,
449 struct gf100_fifo_chan
*chan
)
451 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
452 struct nvkm_device
*device
= subdev
->device
;
453 u32 chid
= chan
->base
.chid
;
456 nvkm_error(subdev
, "%s engine fault on channel %d, recovering...\n",
457 engine
->subdev
.name
, chid
);
459 nvkm_mask(device
, 0x003004 + (chid
* 0x08), 0x00000001, 0x00000000);
460 chan
->state
= KILLED
;
462 spin_lock_irqsave(&fifo
->base
.lock
, flags
);
463 fifo
->mask
|= 1ULL << nv_engidx(engine
);
464 spin_unlock_irqrestore(&fifo
->base
.lock
, flags
);
465 schedule_work(&fifo
->fault
);
469 gf100_fifo_swmthd(struct gf100_fifo
*fifo
, u32 chid
, u32 mthd
, u32 data
)
471 struct gf100_fifo_chan
*chan
= NULL
;
472 struct nvkm_handle
*bind
;
476 spin_lock_irqsave(&fifo
->base
.lock
, flags
);
477 if (likely(chid
>= fifo
->base
.min
&& chid
<= fifo
->base
.max
))
478 chan
= (void *)fifo
->base
.channel
[chid
];
482 bind
= nvkm_namedb_get_class(nv_namedb(chan
), 0x906e);
484 if (!mthd
|| !nv_call(bind
->object
, mthd
, data
))
486 nvkm_namedb_put(bind
);
490 spin_unlock_irqrestore(&fifo
->base
.lock
, flags
);
494 static const struct nvkm_enum
495 gf100_fifo_sched_reason
[] = {
496 { 0x0a, "CTXSW_TIMEOUT" },
501 gf100_fifo_intr_sched_ctxsw(struct gf100_fifo
*fifo
)
503 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
504 struct nvkm_engine
*engine
;
505 struct gf100_fifo_chan
*chan
;
508 for (engn
= 0; engn
< 6; engn
++) {
509 u32 stat
= nvkm_rd32(device
, 0x002640 + (engn
* 0x04));
510 u32 busy
= (stat
& 0x80000000);
511 u32 save
= (stat
& 0x00100000); /* maybe? */
512 u32 unk0
= (stat
& 0x00040000);
513 u32 unk1
= (stat
& 0x00001000);
514 u32 chid
= (stat
& 0x0000007f);
517 if (busy
&& unk0
&& unk1
) {
518 if (!(chan
= (void *)fifo
->base
.channel
[chid
]))
520 if (!(engine
= gf100_fifo_engine(fifo
, engn
)))
522 gf100_fifo_recover(fifo
, engine
, chan
);
528 gf100_fifo_intr_sched(struct gf100_fifo
*fifo
)
530 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
531 struct nvkm_device
*device
= subdev
->device
;
532 u32 intr
= nvkm_rd32(device
, 0x00254c);
533 u32 code
= intr
& 0x000000ff;
534 const struct nvkm_enum
*en
;
536 en
= nvkm_enum_find(gf100_fifo_sched_reason
, code
);
538 nvkm_error(subdev
, "SCHED_ERROR %02x [%s]\n", code
, en
? en
->name
: "");
542 gf100_fifo_intr_sched_ctxsw(fifo
);
549 static const struct nvkm_enum
550 gf100_fifo_fault_engine
[] = {
551 { 0x00, "PGRAPH", NULL
, NVDEV_ENGINE_GR
},
552 { 0x03, "PEEPHOLE", NULL
, NVDEV_ENGINE_IFB
},
553 { 0x04, "BAR1", NULL
, NVDEV_SUBDEV_BAR
},
554 { 0x05, "BAR3", NULL
, NVDEV_SUBDEV_INSTMEM
},
555 { 0x07, "PFIFO", NULL
, NVDEV_ENGINE_FIFO
},
556 { 0x10, "PMSVLD", NULL
, NVDEV_ENGINE_MSVLD
},
557 { 0x11, "PMSPPP", NULL
, NVDEV_ENGINE_MSPPP
},
558 { 0x13, "PCOUNTER" },
559 { 0x14, "PMSPDEC", NULL
, NVDEV_ENGINE_MSPDEC
},
560 { 0x15, "PCE0", NULL
, NVDEV_ENGINE_CE0
},
561 { 0x16, "PCE1", NULL
, NVDEV_ENGINE_CE1
},
566 static const struct nvkm_enum
567 gf100_fifo_fault_reason
[] = {
568 { 0x00, "PT_NOT_PRESENT" },
569 { 0x01, "PT_TOO_SHORT" },
570 { 0x02, "PAGE_NOT_PRESENT" },
571 { 0x03, "VM_LIMIT_EXCEEDED" },
572 { 0x04, "NO_CHANNEL" },
573 { 0x05, "PAGE_SYSTEM_ONLY" },
574 { 0x06, "PAGE_READ_ONLY" },
575 { 0x0a, "COMPRESSED_SYSRAM" },
576 { 0x0c, "INVALID_STORAGE_TYPE" },
580 static const struct nvkm_enum
581 gf100_fifo_fault_hubclient
[] = {
584 { 0x04, "DISPATCH" },
587 { 0x07, "BAR_READ" },
588 { 0x08, "BAR_WRITE" },
592 { 0x11, "PCOUNTER" },
595 { 0x15, "CCACHE_POST" },
599 static const struct nvkm_enum
600 gf100_fifo_fault_gpcclient
[] = {
609 gf100_fifo_intr_fault(struct gf100_fifo
*fifo
, int unit
)
611 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
612 struct nvkm_device
*device
= subdev
->device
;
613 u32 inst
= nvkm_rd32(device
, 0x002800 + (unit
* 0x10));
614 u32 valo
= nvkm_rd32(device
, 0x002804 + (unit
* 0x10));
615 u32 vahi
= nvkm_rd32(device
, 0x002808 + (unit
* 0x10));
616 u32 stat
= nvkm_rd32(device
, 0x00280c + (unit
* 0x10));
617 u32 gpc
= (stat
& 0x1f000000) >> 24;
618 u32 client
= (stat
& 0x00001f00) >> 8;
619 u32 write
= (stat
& 0x00000080);
620 u32 hub
= (stat
& 0x00000040);
621 u32 reason
= (stat
& 0x0000000f);
622 struct nvkm_object
*engctx
= NULL
, *object
;
623 struct nvkm_engine
*engine
= NULL
;
624 const struct nvkm_enum
*er
, *eu
, *ec
;
627 er
= nvkm_enum_find(gf100_fifo_fault_reason
, reason
);
628 eu
= nvkm_enum_find(gf100_fifo_fault_engine
, unit
);
630 ec
= nvkm_enum_find(gf100_fifo_fault_hubclient
, client
);
632 ec
= nvkm_enum_find(gf100_fifo_fault_gpcclient
, client
);
633 snprintf(gpcid
, sizeof(gpcid
), "GPC%d/", gpc
);
638 case NVDEV_SUBDEV_BAR
:
639 nvkm_mask(device
, 0x001704, 0x00000000, 0x00000000);
641 case NVDEV_SUBDEV_INSTMEM
:
642 nvkm_mask(device
, 0x001714, 0x00000000, 0x00000000);
644 case NVDEV_ENGINE_IFB
:
645 nvkm_mask(device
, 0x001718, 0x00000000, 0x00000000);
648 engine
= nvkm_engine(fifo
, eu
->data2
);
650 engctx
= nvkm_engctx_get(engine
, inst
);
656 "%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
657 "reason %02x [%s] on channel %d [%010llx %s]\n",
658 write
? "write" : "read", (u64
)vahi
<< 32 | valo
,
659 unit
, eu
? eu
->name
: "", client
, gpcid
, ec
? ec
->name
: "",
660 reason
, er
? er
->name
: "", -1, (u64
)inst
<< 12,
661 nvkm_client_name(engctx
));
665 switch (nv_mclass(object
)) {
666 case FERMI_CHANNEL_GPFIFO
:
667 gf100_fifo_recover(fifo
, engine
, (void *)object
);
670 object
= object
->parent
;
673 nvkm_engctx_put(engctx
);
676 static const struct nvkm_bitfield
677 gf100_fifo_pbdma_intr
[] = {
678 /* { 0x00008000, "" } seen with null ib push */
679 { 0x00200000, "ILLEGAL_MTHD" },
680 { 0x00800000, "EMPTY_SUBC" },
685 gf100_fifo_intr_pbdma(struct gf100_fifo
*fifo
, int unit
)
687 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
688 struct nvkm_device
*device
= subdev
->device
;
689 u32 stat
= nvkm_rd32(device
, 0x040108 + (unit
* 0x2000));
690 u32 addr
= nvkm_rd32(device
, 0x0400c0 + (unit
* 0x2000));
691 u32 data
= nvkm_rd32(device
, 0x0400c4 + (unit
* 0x2000));
692 u32 chid
= nvkm_rd32(device
, 0x040120 + (unit
* 0x2000)) & 0x7f;
693 u32 subc
= (addr
& 0x00070000) >> 16;
694 u32 mthd
= (addr
& 0x00003ffc);
698 if (stat
& 0x00800000) {
699 if (!gf100_fifo_swmthd(fifo
, chid
, mthd
, data
))
704 nvkm_snprintbf(msg
, sizeof(msg
), gf100_fifo_pbdma_intr
, show
);
705 nvkm_error(subdev
, "PBDMA%d: %08x [%s] ch %d [%s] subc %d "
706 "mthd %04x data %08x\n",
707 unit
, show
, msg
, chid
,
708 nvkm_client_name_for_fifo_chid(&fifo
->base
, chid
),
712 nvkm_wr32(device
, 0x0400c0 + (unit
* 0x2000), 0x80600008);
713 nvkm_wr32(device
, 0x040108 + (unit
* 0x2000), stat
);
717 gf100_fifo_intr_runlist(struct gf100_fifo
*fifo
)
719 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
720 struct nvkm_device
*device
= subdev
->device
;
721 u32 intr
= nvkm_rd32(device
, 0x002a00);
723 if (intr
& 0x10000000) {
724 wake_up(&fifo
->runlist
.wait
);
725 nvkm_wr32(device
, 0x002a00, 0x10000000);
730 nvkm_error(subdev
, "RUNLIST %08x\n", intr
);
731 nvkm_wr32(device
, 0x002a00, intr
);
736 gf100_fifo_intr_engine_unit(struct gf100_fifo
*fifo
, int engn
)
738 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
739 struct nvkm_device
*device
= subdev
->device
;
740 u32 intr
= nvkm_rd32(device
, 0x0025a8 + (engn
* 0x04));
741 u32 inte
= nvkm_rd32(device
, 0x002628);
744 nvkm_wr32(device
, 0x0025a8 + (engn
* 0x04), intr
);
746 for (unkn
= 0; unkn
< 8; unkn
++) {
747 u32 ints
= (intr
>> (unkn
* 0x04)) & inte
;
749 nvkm_fifo_uevent(&fifo
->base
);
753 nvkm_error(subdev
, "ENGINE %d %d %01x",
755 nvkm_mask(device
, 0x002628, ints
, 0);
761 gf100_fifo_intr_engine(struct gf100_fifo
*fifo
)
763 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
764 u32 mask
= nvkm_rd32(device
, 0x0025a4);
766 u32 unit
= __ffs(mask
);
767 gf100_fifo_intr_engine_unit(fifo
, unit
);
768 mask
&= ~(1 << unit
);
773 gf100_fifo_intr(struct nvkm_subdev
*subdev
)
775 struct gf100_fifo
*fifo
= (void *)subdev
;
776 struct nvkm_device
*device
= fifo
->base
.engine
.subdev
.device
;
777 u32 mask
= nvkm_rd32(device
, 0x002140);
778 u32 stat
= nvkm_rd32(device
, 0x002100) & mask
;
780 if (stat
& 0x00000001) {
781 u32 intr
= nvkm_rd32(device
, 0x00252c);
782 nvkm_warn(subdev
, "INTR 00000001: %08x\n", intr
);
783 nvkm_wr32(device
, 0x002100, 0x00000001);
787 if (stat
& 0x00000100) {
788 gf100_fifo_intr_sched(fifo
);
789 nvkm_wr32(device
, 0x002100, 0x00000100);
793 if (stat
& 0x00010000) {
794 u32 intr
= nvkm_rd32(device
, 0x00256c);
795 nvkm_warn(subdev
, "INTR 00010000: %08x\n", intr
);
796 nvkm_wr32(device
, 0x002100, 0x00010000);
800 if (stat
& 0x01000000) {
801 u32 intr
= nvkm_rd32(device
, 0x00258c);
802 nvkm_warn(subdev
, "INTR 01000000: %08x\n", intr
);
803 nvkm_wr32(device
, 0x002100, 0x01000000);
807 if (stat
& 0x10000000) {
808 u32 mask
= nvkm_rd32(device
, 0x00259c);
810 u32 unit
= __ffs(mask
);
811 gf100_fifo_intr_fault(fifo
, unit
);
812 nvkm_wr32(device
, 0x00259c, (1 << unit
));
813 mask
&= ~(1 << unit
);
818 if (stat
& 0x20000000) {
819 u32 mask
= nvkm_rd32(device
, 0x0025a0);
821 u32 unit
= __ffs(mask
);
822 gf100_fifo_intr_pbdma(fifo
, unit
);
823 nvkm_wr32(device
, 0x0025a0, (1 << unit
));
824 mask
&= ~(1 << unit
);
829 if (stat
& 0x40000000) {
830 gf100_fifo_intr_runlist(fifo
);
834 if (stat
& 0x80000000) {
835 gf100_fifo_intr_engine(fifo
);
840 nvkm_error(subdev
, "INTR %08x\n", stat
);
841 nvkm_mask(device
, 0x002140, stat
, 0x00000000);
842 nvkm_wr32(device
, 0x002100, stat
);
847 gf100_fifo_uevent_init(struct nvkm_event
*event
, int type
, int index
)
849 struct nvkm_fifo
*fifo
= container_of(event
, typeof(*fifo
), uevent
);
850 struct nvkm_device
*device
= fifo
->engine
.subdev
.device
;
851 nvkm_mask(device
, 0x002140, 0x80000000, 0x80000000);
855 gf100_fifo_uevent_fini(struct nvkm_event
*event
, int type
, int index
)
857 struct nvkm_fifo
*fifo
= container_of(event
, typeof(*fifo
), uevent
);
858 struct nvkm_device
*device
= fifo
->engine
.subdev
.device
;
859 nvkm_mask(device
, 0x002140, 0x80000000, 0x00000000);
862 static const struct nvkm_event_func
863 gf100_fifo_uevent_func
= {
864 .ctor
= nvkm_fifo_uevent_ctor
,
865 .init
= gf100_fifo_uevent_init
,
866 .fini
= gf100_fifo_uevent_fini
,
870 gf100_fifo_ctor(struct nvkm_object
*parent
, struct nvkm_object
*engine
,
871 struct nvkm_oclass
*oclass
, void *data
, u32 size
,
872 struct nvkm_object
**pobject
)
874 struct gf100_fifo
*fifo
;
877 ret
= nvkm_fifo_create(parent
, engine
, oclass
, 0, 127, &fifo
);
878 *pobject
= nv_object(fifo
);
882 INIT_WORK(&fifo
->fault
, gf100_fifo_recover_work
);
884 ret
= nvkm_gpuobj_new(nv_object(fifo
), NULL
, 0x1000, 0x1000, 0,
885 &fifo
->runlist
.mem
[0]);
889 ret
= nvkm_gpuobj_new(nv_object(fifo
), NULL
, 0x1000, 0x1000, 0,
890 &fifo
->runlist
.mem
[1]);
894 init_waitqueue_head(&fifo
->runlist
.wait
);
896 ret
= nvkm_gpuobj_new(nv_object(fifo
), NULL
, 128 * 0x1000, 0x1000, 0,
901 ret
= nvkm_gpuobj_map(fifo
->user
.mem
, NV_MEM_ACCESS_RW
,
906 ret
= nvkm_event_init(&gf100_fifo_uevent_func
, 1, 1, &fifo
->base
.uevent
);
910 nv_subdev(fifo
)->unit
= 0x00000100;
911 nv_subdev(fifo
)->intr
= gf100_fifo_intr
;
912 nv_engine(fifo
)->cclass
= &gf100_fifo_cclass
;
913 nv_engine(fifo
)->sclass
= gf100_fifo_sclass
;
918 gf100_fifo_dtor(struct nvkm_object
*object
)
920 struct gf100_fifo
*fifo
= (void *)object
;
922 nvkm_gpuobj_unmap(&fifo
->user
.bar
);
923 nvkm_gpuobj_ref(NULL
, &fifo
->user
.mem
);
924 nvkm_gpuobj_ref(NULL
, &fifo
->runlist
.mem
[0]);
925 nvkm_gpuobj_ref(NULL
, &fifo
->runlist
.mem
[1]);
927 nvkm_fifo_destroy(&fifo
->base
);
931 gf100_fifo_init(struct nvkm_object
*object
)
933 struct gf100_fifo
*fifo
= (void *)object
;
934 struct nvkm_subdev
*subdev
= &fifo
->base
.engine
.subdev
;
935 struct nvkm_device
*device
= subdev
->device
;
938 ret
= nvkm_fifo_init(&fifo
->base
);
942 nvkm_wr32(device
, 0x000204, 0xffffffff);
943 nvkm_wr32(device
, 0x002204, 0xffffffff);
945 fifo
->spoon_nr
= hweight32(nvkm_rd32(device
, 0x002204));
946 nvkm_debug(subdev
, "%d PBDMA unit(s)\n", fifo
->spoon_nr
);
948 /* assign engines to PBDMAs */
949 if (fifo
->spoon_nr
>= 3) {
950 nvkm_wr32(device
, 0x002208, ~(1 << 0)); /* PGRAPH */
951 nvkm_wr32(device
, 0x00220c, ~(1 << 1)); /* PVP */
952 nvkm_wr32(device
, 0x002210, ~(1 << 1)); /* PMSPP */
953 nvkm_wr32(device
, 0x002214, ~(1 << 1)); /* PMSVLD */
954 nvkm_wr32(device
, 0x002218, ~(1 << 2)); /* PCE0 */
955 nvkm_wr32(device
, 0x00221c, ~(1 << 1)); /* PCE1 */
959 for (i
= 0; i
< fifo
->spoon_nr
; i
++) {
960 nvkm_mask(device
, 0x04013c + (i
* 0x2000), 0x10000100, 0x00000000);
961 nvkm_wr32(device
, 0x040108 + (i
* 0x2000), 0xffffffff); /* INTR */
962 nvkm_wr32(device
, 0x04010c + (i
* 0x2000), 0xfffffeff); /* INTREN */
965 nvkm_mask(device
, 0x002200, 0x00000001, 0x00000001);
966 nvkm_wr32(device
, 0x002254, 0x10000000 | fifo
->user
.bar
.offset
>> 12);
968 nvkm_wr32(device
, 0x002100, 0xffffffff);
969 nvkm_wr32(device
, 0x002140, 0x7fffffff);
970 nvkm_wr32(device
, 0x002628, 0x00000001); /* ENGINE_INTR_EN */
975 gf100_fifo_oclass
= &(struct nvkm_oclass
) {
976 .handle
= NV_ENGINE(FIFO
, 0xc0),
977 .ofuncs
= &(struct nvkm_ofuncs
) {
978 .ctor
= gf100_fifo_ctor
,
979 .dtor
= gf100_fifo_dtor
,
980 .init
= gf100_fifo_init
,
981 .fini
= _nvkm_fifo_fini
,