2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <core/client.h>
26 #include <core/handle.h>
27 #include <core/namedb.h>
28 #include <core/gpuobj.h>
29 #include <core/engctx.h>
30 #include <core/event.h>
31 #include <core/class.h>
32 #include <core/enum.h>
34 #include <subdev/timer.h>
35 #include <subdev/bar.h>
36 #include <subdev/vm.h>
38 #include <engine/dmaobj.h>
39 #include <engine/fifo.h>
41 #define _(a,b) { (a), ((1ULL << (a)) | (b)) }
46 _(NVDEV_ENGINE_GR
, (1ULL << NVDEV_ENGINE_SW
) |
47 (1ULL << NVDEV_ENGINE_COPY2
)),
48 _(NVDEV_ENGINE_VP
, 0),
49 _(NVDEV_ENGINE_PPP
, 0),
50 _(NVDEV_ENGINE_BSP
, 0),
51 _(NVDEV_ENGINE_COPY0
, 0),
52 _(NVDEV_ENGINE_COPY1
, 0),
53 _(NVDEV_ENGINE_VENC
, 0),
56 #define FIFO_ENGINE_NR ARRAY_SIZE(fifo_engine)
58 struct nve0_fifo_engn
{
59 struct nouveau_gpuobj
*playlist
[2];
63 struct nve0_fifo_priv
{
64 struct nouveau_fifo base
;
65 struct nve0_fifo_engn engine
[FIFO_ENGINE_NR
];
67 struct nouveau_gpuobj
*mem
;
68 struct nouveau_vma bar
;
73 struct nve0_fifo_base
{
74 struct nouveau_fifo_base base
;
75 struct nouveau_gpuobj
*pgd
;
76 struct nouveau_vm
*vm
;
79 struct nve0_fifo_chan
{
80 struct nouveau_fifo_chan base
;
84 /*******************************************************************************
85 * FIFO channel objects
86 ******************************************************************************/
89 nve0_fifo_playlist_update(struct nve0_fifo_priv
*priv
, u32 engine
)
91 struct nouveau_bar
*bar
= nouveau_bar(priv
);
92 struct nve0_fifo_engn
*engn
= &priv
->engine
[engine
];
93 struct nouveau_gpuobj
*cur
;
94 u32 match
= (engine
<< 16) | 0x00000001;
97 mutex_lock(&nv_subdev(priv
)->mutex
);
98 cur
= engn
->playlist
[engn
->cur_playlist
];
99 engn
->cur_playlist
= !engn
->cur_playlist
;
101 for (i
= 0, p
= 0; i
< priv
->base
.max
; i
++) {
102 u32 ctrl
= nv_rd32(priv
, 0x800004 + (i
* 8)) & 0x001f0001;
105 nv_wo32(cur
, p
+ 0, i
);
106 nv_wo32(cur
, p
+ 4, 0x00000000);
111 nv_wr32(priv
, 0x002270, cur
->addr
>> 12);
112 nv_wr32(priv
, 0x002274, (engine
<< 20) | (p
>> 3));
113 if (!nv_wait(priv
, 0x002284 + (engine
* 4), 0x00100000, 0x00000000))
114 nv_error(priv
, "playlist %d update timeout\n", engine
);
115 mutex_unlock(&nv_subdev(priv
)->mutex
);
119 nve0_fifo_context_attach(struct nouveau_object
*parent
,
120 struct nouveau_object
*object
)
122 struct nouveau_bar
*bar
= nouveau_bar(parent
);
123 struct nve0_fifo_base
*base
= (void *)parent
->parent
;
124 struct nouveau_engctx
*ectx
= (void *)object
;
128 switch (nv_engidx(object
->engine
)) {
129 case NVDEV_ENGINE_SW
:
130 case NVDEV_ENGINE_COPY0
:
131 case NVDEV_ENGINE_COPY1
:
132 case NVDEV_ENGINE_COPY2
:
134 case NVDEV_ENGINE_GR
: addr
= 0x0210; break;
135 case NVDEV_ENGINE_BSP
: addr
= 0x0270; break;
136 case NVDEV_ENGINE_VP
: addr
= 0x0250; break;
137 case NVDEV_ENGINE_PPP
: addr
= 0x0260; break;
142 if (!ectx
->vma
.node
) {
143 ret
= nouveau_gpuobj_map_vm(nv_gpuobj(ectx
), base
->vm
,
144 NV_MEM_ACCESS_RW
, &ectx
->vma
);
148 nv_engctx(ectx
)->addr
= nv_gpuobj(base
)->addr
>> 12;
151 nv_wo32(base
, addr
+ 0x00, lower_32_bits(ectx
->vma
.offset
) | 4);
152 nv_wo32(base
, addr
+ 0x04, upper_32_bits(ectx
->vma
.offset
));
158 nve0_fifo_context_detach(struct nouveau_object
*parent
, bool suspend
,
159 struct nouveau_object
*object
)
161 struct nouveau_bar
*bar
= nouveau_bar(parent
);
162 struct nve0_fifo_priv
*priv
= (void *)parent
->engine
;
163 struct nve0_fifo_base
*base
= (void *)parent
->parent
;
164 struct nve0_fifo_chan
*chan
= (void *)parent
;
167 switch (nv_engidx(object
->engine
)) {
168 case NVDEV_ENGINE_SW
: return 0;
169 case NVDEV_ENGINE_COPY0
:
170 case NVDEV_ENGINE_COPY1
:
171 case NVDEV_ENGINE_COPY2
: addr
= 0x0000; break;
172 case NVDEV_ENGINE_GR
: addr
= 0x0210; break;
173 case NVDEV_ENGINE_BSP
: addr
= 0x0270; break;
174 case NVDEV_ENGINE_VP
: addr
= 0x0250; break;
175 case NVDEV_ENGINE_PPP
: addr
= 0x0260; break;
180 nv_wr32(priv
, 0x002634, chan
->base
.chid
);
181 if (!nv_wait(priv
, 0x002634, 0xffffffff, chan
->base
.chid
)) {
182 nv_error(priv
, "channel %d [%s] kick timeout\n",
183 chan
->base
.chid
, nouveau_client_name(chan
));
189 nv_wo32(base
, addr
+ 0x00, 0x00000000);
190 nv_wo32(base
, addr
+ 0x04, 0x00000000);
198 nve0_fifo_chan_ctor(struct nouveau_object
*parent
,
199 struct nouveau_object
*engine
,
200 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
201 struct nouveau_object
**pobject
)
203 struct nouveau_bar
*bar
= nouveau_bar(parent
);
204 struct nve0_fifo_priv
*priv
= (void *)engine
;
205 struct nve0_fifo_base
*base
= (void *)parent
;
206 struct nve0_fifo_chan
*chan
;
207 struct nve0_channel_ind_class
*args
= data
;
208 u64 usermem
, ioffset
, ilength
;
211 if (size
< sizeof(*args
))
214 for (i
= 0; i
< FIFO_ENGINE_NR
; i
++) {
215 if (args
->engine
& (1 << i
)) {
216 if (nouveau_engine(parent
, fifo_engine
[i
].subdev
)) {
217 args
->engine
= (1 << i
);
223 if (i
== FIFO_ENGINE_NR
) {
224 nv_error(priv
, "unsupported engines 0x%08x\n", args
->engine
);
228 ret
= nouveau_fifo_channel_create(parent
, engine
, oclass
, 1,
229 priv
->user
.bar
.offset
, 0x200,
231 fifo_engine
[i
].mask
, &chan
);
232 *pobject
= nv_object(chan
);
236 nv_parent(chan
)->context_attach
= nve0_fifo_context_attach
;
237 nv_parent(chan
)->context_detach
= nve0_fifo_context_detach
;
240 usermem
= chan
->base
.chid
* 0x200;
241 ioffset
= args
->ioffset
;
242 ilength
= order_base_2(args
->ilength
/ 8);
244 for (i
= 0; i
< 0x200; i
+= 4)
245 nv_wo32(priv
->user
.mem
, usermem
+ i
, 0x00000000);
247 nv_wo32(base
, 0x08, lower_32_bits(priv
->user
.mem
->addr
+ usermem
));
248 nv_wo32(base
, 0x0c, upper_32_bits(priv
->user
.mem
->addr
+ usermem
));
249 nv_wo32(base
, 0x10, 0x0000face);
250 nv_wo32(base
, 0x30, 0xfffff902);
251 nv_wo32(base
, 0x48, lower_32_bits(ioffset
));
252 nv_wo32(base
, 0x4c, upper_32_bits(ioffset
) | (ilength
<< 16));
253 nv_wo32(base
, 0x84, 0x20400000);
254 nv_wo32(base
, 0x94, 0x30000001);
255 nv_wo32(base
, 0x9c, 0x00000100);
256 nv_wo32(base
, 0xac, 0x0000001f);
257 nv_wo32(base
, 0xe8, chan
->base
.chid
);
258 nv_wo32(base
, 0xb8, 0xf8000000);
259 nv_wo32(base
, 0xf8, 0x10003080); /* 0x002310 */
260 nv_wo32(base
, 0xfc, 0x10000010); /* 0x002350 */
266 nve0_fifo_chan_init(struct nouveau_object
*object
)
268 struct nouveau_gpuobj
*base
= nv_gpuobj(object
->parent
);
269 struct nve0_fifo_priv
*priv
= (void *)object
->engine
;
270 struct nve0_fifo_chan
*chan
= (void *)object
;
271 u32 chid
= chan
->base
.chid
;
274 ret
= nouveau_fifo_channel_init(&chan
->base
);
278 nv_mask(priv
, 0x800004 + (chid
* 8), 0x000f0000, chan
->engine
<< 16);
279 nv_wr32(priv
, 0x800000 + (chid
* 8), 0x80000000 | base
->addr
>> 12);
280 nv_mask(priv
, 0x800004 + (chid
* 8), 0x00000400, 0x00000400);
281 nve0_fifo_playlist_update(priv
, chan
->engine
);
282 nv_mask(priv
, 0x800004 + (chid
* 8), 0x00000400, 0x00000400);
287 nve0_fifo_chan_fini(struct nouveau_object
*object
, bool suspend
)
289 struct nve0_fifo_priv
*priv
= (void *)object
->engine
;
290 struct nve0_fifo_chan
*chan
= (void *)object
;
291 u32 chid
= chan
->base
.chid
;
293 nv_mask(priv
, 0x800004 + (chid
* 8), 0x00000800, 0x00000800);
294 nve0_fifo_playlist_update(priv
, chan
->engine
);
295 nv_wr32(priv
, 0x800000 + (chid
* 8), 0x00000000);
297 return nouveau_fifo_channel_fini(&chan
->base
, suspend
);
300 static struct nouveau_ofuncs
302 .ctor
= nve0_fifo_chan_ctor
,
303 .dtor
= _nouveau_fifo_channel_dtor
,
304 .init
= nve0_fifo_chan_init
,
305 .fini
= nve0_fifo_chan_fini
,
306 .rd32
= _nouveau_fifo_channel_rd32
,
307 .wr32
= _nouveau_fifo_channel_wr32
,
310 static struct nouveau_oclass
311 nve0_fifo_sclass
[] = {
312 { NVE0_CHANNEL_IND_CLASS
, &nve0_fifo_ofuncs
},
316 /*******************************************************************************
317 * FIFO context - instmem heap and vm setup
318 ******************************************************************************/
321 nve0_fifo_context_ctor(struct nouveau_object
*parent
,
322 struct nouveau_object
*engine
,
323 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
324 struct nouveau_object
**pobject
)
326 struct nve0_fifo_base
*base
;
329 ret
= nouveau_fifo_context_create(parent
, engine
, oclass
, NULL
, 0x1000,
330 0x1000, NVOBJ_FLAG_ZERO_ALLOC
, &base
);
331 *pobject
= nv_object(base
);
335 ret
= nouveau_gpuobj_new(nv_object(base
), NULL
, 0x10000, 0x1000, 0,
340 nv_wo32(base
, 0x0200, lower_32_bits(base
->pgd
->addr
));
341 nv_wo32(base
, 0x0204, upper_32_bits(base
->pgd
->addr
));
342 nv_wo32(base
, 0x0208, 0xffffffff);
343 nv_wo32(base
, 0x020c, 0x000000ff);
345 ret
= nouveau_vm_ref(nouveau_client(parent
)->vm
, &base
->vm
, base
->pgd
);
353 nve0_fifo_context_dtor(struct nouveau_object
*object
)
355 struct nve0_fifo_base
*base
= (void *)object
;
356 nouveau_vm_ref(NULL
, &base
->vm
, base
->pgd
);
357 nouveau_gpuobj_ref(NULL
, &base
->pgd
);
358 nouveau_fifo_context_destroy(&base
->base
);
361 static struct nouveau_oclass
363 .handle
= NV_ENGCTX(FIFO
, 0xe0),
364 .ofuncs
= &(struct nouveau_ofuncs
) {
365 .ctor
= nve0_fifo_context_ctor
,
366 .dtor
= nve0_fifo_context_dtor
,
367 .init
= _nouveau_fifo_context_init
,
368 .fini
= _nouveau_fifo_context_fini
,
369 .rd32
= _nouveau_fifo_context_rd32
,
370 .wr32
= _nouveau_fifo_context_wr32
,
374 /*******************************************************************************
376 ******************************************************************************/
378 static const struct nouveau_enum nve0_fifo_fault_unit
[] = {
382 static const struct nouveau_enum nve0_fifo_fault_reason
[] = {
383 { 0x00, "PT_NOT_PRESENT" },
384 { 0x01, "PT_TOO_SHORT" },
385 { 0x02, "PAGE_NOT_PRESENT" },
386 { 0x03, "VM_LIMIT_EXCEEDED" },
387 { 0x04, "NO_CHANNEL" },
388 { 0x05, "PAGE_SYSTEM_ONLY" },
389 { 0x06, "PAGE_READ_ONLY" },
390 { 0x0a, "COMPRESSED_SYSRAM" },
391 { 0x0c, "INVALID_STORAGE_TYPE" },
395 static const struct nouveau_enum nve0_fifo_fault_hubclient
[] = {
399 static const struct nouveau_enum nve0_fifo_fault_gpcclient
[] = {
403 static const struct nouveau_bitfield nve0_fifo_subfifo_intr
[] = {
404 { 0x00200000, "ILLEGAL_MTHD" },
405 { 0x00800000, "EMPTY_SUBC" },
410 nve0_fifo_isr_vm_fault(struct nve0_fifo_priv
*priv
, int unit
)
412 u32 inst
= nv_rd32(priv
, 0x2800 + (unit
* 0x10));
413 u32 valo
= nv_rd32(priv
, 0x2804 + (unit
* 0x10));
414 u32 vahi
= nv_rd32(priv
, 0x2808 + (unit
* 0x10));
415 u32 stat
= nv_rd32(priv
, 0x280c + (unit
* 0x10));
416 u32 client
= (stat
& 0x00001f00) >> 8;
417 const struct nouveau_enum
*en
;
418 struct nouveau_engine
*engine
;
419 struct nouveau_object
*engctx
= NULL
;
421 nv_error(priv
, "PFIFO: %s fault at 0x%010llx [", (stat
& 0x00000080) ?
422 "write" : "read", (u64
)vahi
<< 32 | valo
);
423 nouveau_enum_print(nve0_fifo_fault_reason
, stat
& 0x0000000f);
425 en
= nouveau_enum_print(nve0_fifo_fault_unit
, unit
);
426 if (stat
& 0x00000040) {
428 nouveau_enum_print(nve0_fifo_fault_hubclient
, client
);
430 pr_cont("/GPC%d/", (stat
& 0x1f000000) >> 24);
431 nouveau_enum_print(nve0_fifo_fault_gpcclient
, client
);
434 if (en
&& en
->data2
) {
435 engine
= nouveau_engine(priv
, en
->data2
);
437 engctx
= nouveau_engctx_get(engine
, inst
);
441 pr_cont(" on channel 0x%010llx [%s]\n", (u64
)inst
<< 12,
442 nouveau_client_name(engctx
));
444 nouveau_engctx_put(engctx
);
448 nve0_fifo_swmthd(struct nve0_fifo_priv
*priv
, u32 chid
, u32 mthd
, u32 data
)
450 struct nve0_fifo_chan
*chan
= NULL
;
451 struct nouveau_handle
*bind
;
455 spin_lock_irqsave(&priv
->base
.lock
, flags
);
456 if (likely(chid
>= priv
->base
.min
&& chid
<= priv
->base
.max
))
457 chan
= (void *)priv
->base
.channel
[chid
];
461 bind
= nouveau_namedb_get_class(nv_namedb(chan
), 0x906e);
463 if (!mthd
|| !nv_call(bind
->object
, mthd
, data
))
465 nouveau_namedb_put(bind
);
469 spin_unlock_irqrestore(&priv
->base
.lock
, flags
);
474 nve0_fifo_isr_subfifo_intr(struct nve0_fifo_priv
*priv
, int unit
)
476 u32 stat
= nv_rd32(priv
, 0x040108 + (unit
* 0x2000));
477 u32 addr
= nv_rd32(priv
, 0x0400c0 + (unit
* 0x2000));
478 u32 data
= nv_rd32(priv
, 0x0400c4 + (unit
* 0x2000));
479 u32 chid
= nv_rd32(priv
, 0x040120 + (unit
* 0x2000)) & 0xfff;
480 u32 subc
= (addr
& 0x00070000) >> 16;
481 u32 mthd
= (addr
& 0x00003ffc);
484 if (stat
& 0x00800000) {
485 if (!nve0_fifo_swmthd(priv
, chid
, mthd
, data
))
490 nv_error(priv
, "SUBFIFO%d:", unit
);
491 nouveau_bitfield_print(nve0_fifo_subfifo_intr
, show
);
494 "SUBFIFO%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
496 nouveau_client_name_for_fifo_chid(&priv
->base
, chid
),
500 nv_wr32(priv
, 0x0400c0 + (unit
* 0x2000), 0x80600008);
501 nv_wr32(priv
, 0x040108 + (unit
* 0x2000), stat
);
505 nve0_fifo_intr(struct nouveau_subdev
*subdev
)
507 struct nve0_fifo_priv
*priv
= (void *)subdev
;
508 u32 mask
= nv_rd32(priv
, 0x002140);
509 u32 stat
= nv_rd32(priv
, 0x002100) & mask
;
511 if (stat
& 0x00000100) {
512 nv_warn(priv
, "unknown status 0x00000100\n");
513 nv_wr32(priv
, 0x002100, 0x00000100);
517 if (stat
& 0x10000000) {
518 u32 units
= nv_rd32(priv
, 0x00259c);
523 nve0_fifo_isr_vm_fault(priv
, i
);
527 nv_wr32(priv
, 0x00259c, units
);
531 if (stat
& 0x20000000) {
532 u32 units
= nv_rd32(priv
, 0x0025a0);
537 nve0_fifo_isr_subfifo_intr(priv
, i
);
541 nv_wr32(priv
, 0x0025a0, units
);
545 if (stat
& 0x40000000) {
546 nv_warn(priv
, "unknown status 0x40000000\n");
547 nv_mask(priv
, 0x002a00, 0x00000000, 0x00000000);
551 if (stat
& 0x80000000) {
552 nouveau_event_trigger(priv
->base
.uevent
, 0);
553 nv_wr32(priv
, 0x002100, 0x80000000);
558 nv_fatal(priv
, "unhandled status 0x%08x\n", stat
);
559 nv_wr32(priv
, 0x002100, stat
);
560 nv_wr32(priv
, 0x002140, 0);
565 nve0_fifo_uevent_enable(struct nouveau_event
*event
, int index
)
567 struct nve0_fifo_priv
*priv
= event
->priv
;
568 nv_mask(priv
, 0x002140, 0x80000000, 0x80000000);
572 nve0_fifo_uevent_disable(struct nouveau_event
*event
, int index
)
574 struct nve0_fifo_priv
*priv
= event
->priv
;
575 nv_mask(priv
, 0x002140, 0x80000000, 0x00000000);
579 nve0_fifo_ctor(struct nouveau_object
*parent
, struct nouveau_object
*engine
,
580 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
581 struct nouveau_object
**pobject
)
583 struct nve0_fifo_priv
*priv
;
586 ret
= nouveau_fifo_create(parent
, engine
, oclass
, 0, 4095, &priv
);
587 *pobject
= nv_object(priv
);
591 for (i
= 0; i
< FIFO_ENGINE_NR
; i
++) {
592 ret
= nouveau_gpuobj_new(nv_object(priv
), NULL
, 0x8000, 0x1000,
593 0, &priv
->engine
[i
].playlist
[0]);
597 ret
= nouveau_gpuobj_new(nv_object(priv
), NULL
, 0x8000, 0x1000,
598 0, &priv
->engine
[i
].playlist
[1]);
603 ret
= nouveau_gpuobj_new(nv_object(priv
), NULL
, 4096 * 0x200, 0x1000,
604 NVOBJ_FLAG_ZERO_ALLOC
, &priv
->user
.mem
);
608 ret
= nouveau_gpuobj_map(priv
->user
.mem
, NV_MEM_ACCESS_RW
,
613 priv
->base
.uevent
->enable
= nve0_fifo_uevent_enable
;
614 priv
->base
.uevent
->disable
= nve0_fifo_uevent_disable
;
615 priv
->base
.uevent
->priv
= priv
;
617 nv_subdev(priv
)->unit
= 0x00000100;
618 nv_subdev(priv
)->intr
= nve0_fifo_intr
;
619 nv_engine(priv
)->cclass
= &nve0_fifo_cclass
;
620 nv_engine(priv
)->sclass
= nve0_fifo_sclass
;
625 nve0_fifo_dtor(struct nouveau_object
*object
)
627 struct nve0_fifo_priv
*priv
= (void *)object
;
630 nouveau_gpuobj_unmap(&priv
->user
.bar
);
631 nouveau_gpuobj_ref(NULL
, &priv
->user
.mem
);
633 for (i
= 0; i
< FIFO_ENGINE_NR
; i
++) {
634 nouveau_gpuobj_ref(NULL
, &priv
->engine
[i
].playlist
[1]);
635 nouveau_gpuobj_ref(NULL
, &priv
->engine
[i
].playlist
[0]);
638 nouveau_fifo_destroy(&priv
->base
);
642 nve0_fifo_init(struct nouveau_object
*object
)
644 struct nve0_fifo_priv
*priv
= (void *)object
;
647 ret
= nouveau_fifo_init(&priv
->base
);
651 /* enable all available PSUBFIFOs */
652 nv_wr32(priv
, 0x000204, 0xffffffff);
653 priv
->spoon_nr
= hweight32(nv_rd32(priv
, 0x000204));
654 nv_debug(priv
, "%d subfifo(s)\n", priv
->spoon_nr
);
657 for (i
= 0; i
< priv
->spoon_nr
; i
++) {
658 nv_mask(priv
, 0x04013c + (i
* 0x2000), 0x10000100, 0x00000000);
659 nv_wr32(priv
, 0x040108 + (i
* 0x2000), 0xffffffff); /* INTR */
660 nv_wr32(priv
, 0x04010c + (i
* 0x2000), 0xfffffeff); /* INTREN */
663 nv_wr32(priv
, 0x002254, 0x10000000 | priv
->user
.bar
.offset
>> 12);
665 nv_wr32(priv
, 0x002a00, 0xffffffff);
666 nv_wr32(priv
, 0x002100, 0xffffffff);
667 nv_wr32(priv
, 0x002140, 0x3fffffff);
671 struct nouveau_oclass
*
672 nve0_fifo_oclass
= &(struct nouveau_oclass
) {
673 .handle
= NV_ENGINE(FIFO
, 0xe0),
674 .ofuncs
= &(struct nouveau_ofuncs
) {
675 .ctor
= nve0_fifo_ctor
,
676 .dtor
= nve0_fifo_dtor
,
677 .init
= nve0_fifo_init
,
678 .fini
= _nouveau_fifo_fini
,