2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <core/object.h>
26 #include <core/parent.h>
27 #include <core/handle.h>
28 #include <core/class.h>
30 #include <engine/disp.h>
32 #include <subdev/bios.h>
33 #include <subdev/bios/dcb.h>
34 #include <subdev/bios/disp.h>
35 #include <subdev/bios/init.h>
36 #include <subdev/bios/pll.h>
37 #include <subdev/devinit.h>
38 #include <subdev/fb.h>
39 #include <subdev/timer.h>
43 /*******************************************************************************
44 * EVO DMA channel base class
45 ******************************************************************************/
48 nvd0_disp_dmac_object_attach(struct nouveau_object
*parent
,
49 struct nouveau_object
*object
, u32 name
)
51 struct nv50_disp_base
*base
= (void *)parent
->parent
;
52 struct nv50_disp_chan
*chan
= (void *)parent
;
53 u32 addr
= nv_gpuobj(object
)->node
->offset
;
54 u32 data
= (chan
->chid
<< 27) | (addr
<< 9) | 0x00000001;
55 return nouveau_ramht_insert(base
->ramht
, chan
->chid
, name
, data
);
59 nvd0_disp_dmac_object_detach(struct nouveau_object
*parent
, int cookie
)
61 struct nv50_disp_base
*base
= (void *)parent
->parent
;
62 nouveau_ramht_remove(base
->ramht
, cookie
);
66 nvd0_disp_dmac_init(struct nouveau_object
*object
)
68 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
69 struct nv50_disp_dmac
*dmac
= (void *)object
;
70 int chid
= dmac
->base
.chid
;
73 ret
= nv50_disp_chan_init(&dmac
->base
);
77 /* enable error reporting */
78 nv_mask(priv
, 0x610090, 0x00000001 << chid
, 0x00000001 << chid
);
79 nv_mask(priv
, 0x6100a0, 0x00000001 << chid
, 0x00000001 << chid
);
81 /* initialise channel for dma command submission */
82 nv_wr32(priv
, 0x610494 + (chid
* 0x0010), dmac
->push
);
83 nv_wr32(priv
, 0x610498 + (chid
* 0x0010), 0x00010000);
84 nv_wr32(priv
, 0x61049c + (chid
* 0x0010), 0x00000001);
85 nv_mask(priv
, 0x610490 + (chid
* 0x0010), 0x00000010, 0x00000010);
86 nv_wr32(priv
, 0x640000 + (chid
* 0x1000), 0x00000000);
87 nv_wr32(priv
, 0x610490 + (chid
* 0x0010), 0x00000013);
89 /* wait for it to go inactive */
90 if (!nv_wait(priv
, 0x610490 + (chid
* 0x10), 0x80000000, 0x00000000)) {
91 nv_error(dmac
, "init: 0x%08x\n",
92 nv_rd32(priv
, 0x610490 + (chid
* 0x10)));
100 nvd0_disp_dmac_fini(struct nouveau_object
*object
, bool suspend
)
102 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
103 struct nv50_disp_dmac
*dmac
= (void *)object
;
104 int chid
= dmac
->base
.chid
;
106 /* deactivate channel */
107 nv_mask(priv
, 0x610490 + (chid
* 0x0010), 0x00001010, 0x00001000);
108 nv_mask(priv
, 0x610490 + (chid
* 0x0010), 0x00000003, 0x00000000);
109 if (!nv_wait(priv
, 0x610490 + (chid
* 0x10), 0x001e0000, 0x00000000)) {
110 nv_error(dmac
, "fini: 0x%08x\n",
111 nv_rd32(priv
, 0x610490 + (chid
* 0x10)));
116 /* disable error reporting */
117 nv_mask(priv
, 0x610090, 0x00000001 << chid
, 0x00000000);
118 nv_mask(priv
, 0x6100a0, 0x00000001 << chid
, 0x00000000);
120 return nv50_disp_chan_fini(&dmac
->base
, suspend
);
123 /*******************************************************************************
124 * EVO master channel object
125 ******************************************************************************/
128 nvd0_disp_mast_ctor(struct nouveau_object
*parent
,
129 struct nouveau_object
*engine
,
130 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
131 struct nouveau_object
**pobject
)
133 struct nv50_display_mast_class
*args
= data
;
134 struct nv50_disp_dmac
*mast
;
137 if (size
< sizeof(*args
))
140 ret
= nv50_disp_dmac_create_(parent
, engine
, oclass
, args
->pushbuf
,
141 0, sizeof(*mast
), (void **)&mast
);
142 *pobject
= nv_object(mast
);
146 nv_parent(mast
)->object_attach
= nvd0_disp_dmac_object_attach
;
147 nv_parent(mast
)->object_detach
= nvd0_disp_dmac_object_detach
;
152 nvd0_disp_mast_init(struct nouveau_object
*object
)
154 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
155 struct nv50_disp_dmac
*mast
= (void *)object
;
158 ret
= nv50_disp_chan_init(&mast
->base
);
162 /* enable error reporting */
163 nv_mask(priv
, 0x610090, 0x00000001, 0x00000001);
164 nv_mask(priv
, 0x6100a0, 0x00000001, 0x00000001);
166 /* initialise channel for dma command submission */
167 nv_wr32(priv
, 0x610494, mast
->push
);
168 nv_wr32(priv
, 0x610498, 0x00010000);
169 nv_wr32(priv
, 0x61049c, 0x00000001);
170 nv_mask(priv
, 0x610490, 0x00000010, 0x00000010);
171 nv_wr32(priv
, 0x640000, 0x00000000);
172 nv_wr32(priv
, 0x610490, 0x01000013);
174 /* wait for it to go inactive */
175 if (!nv_wait(priv
, 0x610490, 0x80000000, 0x00000000)) {
176 nv_error(mast
, "init: 0x%08x\n", nv_rd32(priv
, 0x610490));
184 nvd0_disp_mast_fini(struct nouveau_object
*object
, bool suspend
)
186 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
187 struct nv50_disp_dmac
*mast
= (void *)object
;
189 /* deactivate channel */
190 nv_mask(priv
, 0x610490, 0x00000010, 0x00000000);
191 nv_mask(priv
, 0x610490, 0x00000003, 0x00000000);
192 if (!nv_wait(priv
, 0x610490, 0x001e0000, 0x00000000)) {
193 nv_error(mast
, "fini: 0x%08x\n", nv_rd32(priv
, 0x610490));
198 /* disable error reporting */
199 nv_mask(priv
, 0x610090, 0x00000001, 0x00000000);
200 nv_mask(priv
, 0x6100a0, 0x00000001, 0x00000000);
202 return nv50_disp_chan_fini(&mast
->base
, suspend
);
205 struct nouveau_ofuncs
206 nvd0_disp_mast_ofuncs
= {
207 .ctor
= nvd0_disp_mast_ctor
,
208 .dtor
= nv50_disp_dmac_dtor
,
209 .init
= nvd0_disp_mast_init
,
210 .fini
= nvd0_disp_mast_fini
,
211 .rd32
= nv50_disp_chan_rd32
,
212 .wr32
= nv50_disp_chan_wr32
,
215 /*******************************************************************************
216 * EVO sync channel objects
217 ******************************************************************************/
220 nvd0_disp_sync_ctor(struct nouveau_object
*parent
,
221 struct nouveau_object
*engine
,
222 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
223 struct nouveau_object
**pobject
)
225 struct nv50_display_sync_class
*args
= data
;
226 struct nv50_disp_priv
*priv
= (void *)engine
;
227 struct nv50_disp_dmac
*dmac
;
230 if (size
< sizeof(*args
) || args
->head
>= priv
->head
.nr
)
233 ret
= nv50_disp_dmac_create_(parent
, engine
, oclass
, args
->pushbuf
,
234 1 + args
->head
, sizeof(*dmac
),
236 *pobject
= nv_object(dmac
);
240 nv_parent(dmac
)->object_attach
= nvd0_disp_dmac_object_attach
;
241 nv_parent(dmac
)->object_detach
= nvd0_disp_dmac_object_detach
;
245 struct nouveau_ofuncs
246 nvd0_disp_sync_ofuncs
= {
247 .ctor
= nvd0_disp_sync_ctor
,
248 .dtor
= nv50_disp_dmac_dtor
,
249 .init
= nvd0_disp_dmac_init
,
250 .fini
= nvd0_disp_dmac_fini
,
251 .rd32
= nv50_disp_chan_rd32
,
252 .wr32
= nv50_disp_chan_wr32
,
255 /*******************************************************************************
256 * EVO overlay channel objects
257 ******************************************************************************/
260 nvd0_disp_ovly_ctor(struct nouveau_object
*parent
,
261 struct nouveau_object
*engine
,
262 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
263 struct nouveau_object
**pobject
)
265 struct nv50_display_ovly_class
*args
= data
;
266 struct nv50_disp_priv
*priv
= (void *)engine
;
267 struct nv50_disp_dmac
*dmac
;
270 if (size
< sizeof(*args
) || args
->head
>= priv
->head
.nr
)
273 ret
= nv50_disp_dmac_create_(parent
, engine
, oclass
, args
->pushbuf
,
274 5 + args
->head
, sizeof(*dmac
),
276 *pobject
= nv_object(dmac
);
280 nv_parent(dmac
)->object_attach
= nvd0_disp_dmac_object_attach
;
281 nv_parent(dmac
)->object_detach
= nvd0_disp_dmac_object_detach
;
285 struct nouveau_ofuncs
286 nvd0_disp_ovly_ofuncs
= {
287 .ctor
= nvd0_disp_ovly_ctor
,
288 .dtor
= nv50_disp_dmac_dtor
,
289 .init
= nvd0_disp_dmac_init
,
290 .fini
= nvd0_disp_dmac_fini
,
291 .rd32
= nv50_disp_chan_rd32
,
292 .wr32
= nv50_disp_chan_wr32
,
295 /*******************************************************************************
296 * EVO PIO channel base class
297 ******************************************************************************/
300 nvd0_disp_pioc_create_(struct nouveau_object
*parent
,
301 struct nouveau_object
*engine
,
302 struct nouveau_oclass
*oclass
, int chid
,
303 int length
, void **pobject
)
305 return nv50_disp_chan_create_(parent
, engine
, oclass
, chid
,
310 nvd0_disp_pioc_dtor(struct nouveau_object
*object
)
312 struct nv50_disp_pioc
*pioc
= (void *)object
;
313 nv50_disp_chan_destroy(&pioc
->base
);
317 nvd0_disp_pioc_init(struct nouveau_object
*object
)
319 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
320 struct nv50_disp_pioc
*pioc
= (void *)object
;
321 int chid
= pioc
->base
.chid
;
324 ret
= nv50_disp_chan_init(&pioc
->base
);
328 /* enable error reporting */
329 nv_mask(priv
, 0x610090, 0x00000001 << chid
, 0x00000001 << chid
);
330 nv_mask(priv
, 0x6100a0, 0x00000001 << chid
, 0x00000001 << chid
);
332 /* activate channel */
333 nv_wr32(priv
, 0x610490 + (chid
* 0x10), 0x00000001);
334 if (!nv_wait(priv
, 0x610490 + (chid
* 0x10), 0x00030000, 0x00010000)) {
335 nv_error(pioc
, "init: 0x%08x\n",
336 nv_rd32(priv
, 0x610490 + (chid
* 0x10)));
344 nvd0_disp_pioc_fini(struct nouveau_object
*object
, bool suspend
)
346 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
347 struct nv50_disp_pioc
*pioc
= (void *)object
;
348 int chid
= pioc
->base
.chid
;
350 nv_mask(priv
, 0x610490 + (chid
* 0x10), 0x00000001, 0x00000000);
351 if (!nv_wait(priv
, 0x610490 + (chid
* 0x10), 0x00030000, 0x00000000)) {
352 nv_error(pioc
, "timeout: 0x%08x\n",
353 nv_rd32(priv
, 0x610490 + (chid
* 0x10)));
358 /* disable error reporting */
359 nv_mask(priv
, 0x610090, 0x00000001 << chid
, 0x00000000);
360 nv_mask(priv
, 0x6100a0, 0x00000001 << chid
, 0x00000000);
362 return nv50_disp_chan_fini(&pioc
->base
, suspend
);
365 /*******************************************************************************
366 * EVO immediate overlay channel objects
367 ******************************************************************************/
370 nvd0_disp_oimm_ctor(struct nouveau_object
*parent
,
371 struct nouveau_object
*engine
,
372 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
373 struct nouveau_object
**pobject
)
375 struct nv50_display_oimm_class
*args
= data
;
376 struct nv50_disp_priv
*priv
= (void *)engine
;
377 struct nv50_disp_pioc
*pioc
;
380 if (size
< sizeof(*args
) || args
->head
>= priv
->head
.nr
)
383 ret
= nvd0_disp_pioc_create_(parent
, engine
, oclass
, 9 + args
->head
,
384 sizeof(*pioc
), (void **)&pioc
);
385 *pobject
= nv_object(pioc
);
392 struct nouveau_ofuncs
393 nvd0_disp_oimm_ofuncs
= {
394 .ctor
= nvd0_disp_oimm_ctor
,
395 .dtor
= nvd0_disp_pioc_dtor
,
396 .init
= nvd0_disp_pioc_init
,
397 .fini
= nvd0_disp_pioc_fini
,
398 .rd32
= nv50_disp_chan_rd32
,
399 .wr32
= nv50_disp_chan_wr32
,
402 /*******************************************************************************
403 * EVO cursor channel objects
404 ******************************************************************************/
407 nvd0_disp_curs_ctor(struct nouveau_object
*parent
,
408 struct nouveau_object
*engine
,
409 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
410 struct nouveau_object
**pobject
)
412 struct nv50_display_curs_class
*args
= data
;
413 struct nv50_disp_priv
*priv
= (void *)engine
;
414 struct nv50_disp_pioc
*pioc
;
417 if (size
< sizeof(*args
) || args
->head
>= priv
->head
.nr
)
420 ret
= nvd0_disp_pioc_create_(parent
, engine
, oclass
, 13 + args
->head
,
421 sizeof(*pioc
), (void **)&pioc
);
422 *pobject
= nv_object(pioc
);
429 struct nouveau_ofuncs
430 nvd0_disp_curs_ofuncs
= {
431 .ctor
= nvd0_disp_curs_ctor
,
432 .dtor
= nvd0_disp_pioc_dtor
,
433 .init
= nvd0_disp_pioc_init
,
434 .fini
= nvd0_disp_pioc_fini
,
435 .rd32
= nv50_disp_chan_rd32
,
436 .wr32
= nv50_disp_chan_wr32
,
439 /*******************************************************************************
440 * Base display object
441 ******************************************************************************/
444 nvd0_disp_base_scanoutpos(struct nouveau_object
*object
, u32 mthd
,
445 void *data
, u32 size
)
447 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
448 struct nv04_display_scanoutpos
*args
= data
;
449 const int head
= (mthd
& NV50_DISP_MTHD_HEAD
);
450 u32 blanke
, blanks
, total
;
452 if (size
< sizeof(*args
) || head
>= priv
->head
.nr
)
455 total
= nv_rd32(priv
, 0x640414 + (head
* 0x300));
456 blanke
= nv_rd32(priv
, 0x64041c + (head
* 0x300));
457 blanks
= nv_rd32(priv
, 0x640420 + (head
* 0x300));
459 args
->vblanke
= (blanke
& 0xffff0000) >> 16;
460 args
->hblanke
= (blanke
& 0x0000ffff);
461 args
->vblanks
= (blanks
& 0xffff0000) >> 16;
462 args
->hblanks
= (blanks
& 0x0000ffff);
463 args
->vtotal
= ( total
& 0xffff0000) >> 16;
464 args
->htotal
= ( total
& 0x0000ffff);
466 args
->time
[0] = ktime_to_ns(ktime_get());
467 args
->vline
= nv_rd32(priv
, 0x616340 + (head
* 0x800)) & 0xffff;
468 args
->time
[1] = ktime_to_ns(ktime_get()); /* vline read locks hline */
469 args
->hline
= nv_rd32(priv
, 0x616344 + (head
* 0x800)) & 0xffff;
474 nvd0_disp_base_vblank_enable(struct nouveau_event
*event
, int head
)
476 nv_mask(event
->priv
, 0x6100c0 + (head
* 0x800), 0x00000001, 0x00000001);
480 nvd0_disp_base_vblank_disable(struct nouveau_event
*event
, int head
)
482 nv_mask(event
->priv
, 0x6100c0 + (head
* 0x800), 0x00000001, 0x00000000);
486 nvd0_disp_base_ctor(struct nouveau_object
*parent
,
487 struct nouveau_object
*engine
,
488 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
489 struct nouveau_object
**pobject
)
491 struct nv50_disp_priv
*priv
= (void *)engine
;
492 struct nv50_disp_base
*base
;
495 ret
= nouveau_parent_create(parent
, engine
, oclass
, 0,
496 priv
->sclass
, 0, &base
);
497 *pobject
= nv_object(base
);
501 priv
->base
.vblank
->priv
= priv
;
502 priv
->base
.vblank
->enable
= nvd0_disp_base_vblank_enable
;
503 priv
->base
.vblank
->disable
= nvd0_disp_base_vblank_disable
;
505 return nouveau_ramht_new(nv_object(base
), nv_object(base
), 0x1000, 0,
510 nvd0_disp_base_dtor(struct nouveau_object
*object
)
512 struct nv50_disp_base
*base
= (void *)object
;
513 nouveau_ramht_ref(NULL
, &base
->ramht
);
514 nouveau_parent_destroy(&base
->base
);
518 nvd0_disp_base_init(struct nouveau_object
*object
)
520 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
521 struct nv50_disp_base
*base
= (void *)object
;
525 ret
= nouveau_parent_init(&base
->base
);
529 /* The below segments of code copying values from one register to
530 * another appear to inform EVO of the display capabilities or
535 for (i
= 0; i
< priv
->head
.nr
; i
++) {
536 tmp
= nv_rd32(priv
, 0x616104 + (i
* 0x800));
537 nv_wr32(priv
, 0x6101b4 + (i
* 0x800), tmp
);
538 tmp
= nv_rd32(priv
, 0x616108 + (i
* 0x800));
539 nv_wr32(priv
, 0x6101b8 + (i
* 0x800), tmp
);
540 tmp
= nv_rd32(priv
, 0x61610c + (i
* 0x800));
541 nv_wr32(priv
, 0x6101bc + (i
* 0x800), tmp
);
545 for (i
= 0; i
< priv
->dac
.nr
; i
++) {
546 tmp
= nv_rd32(priv
, 0x61a000 + (i
* 0x800));
547 nv_wr32(priv
, 0x6101c0 + (i
* 0x800), tmp
);
551 for (i
= 0; i
< priv
->sor
.nr
; i
++) {
552 tmp
= nv_rd32(priv
, 0x61c000 + (i
* 0x800));
553 nv_wr32(priv
, 0x6301c4 + (i
* 0x800), tmp
);
556 /* steal display away from vbios, or something like that */
557 if (nv_rd32(priv
, 0x6100ac) & 0x00000100) {
558 nv_wr32(priv
, 0x6100ac, 0x00000100);
559 nv_mask(priv
, 0x6194e8, 0x00000001, 0x00000000);
560 if (!nv_wait(priv
, 0x6194e8, 0x00000002, 0x00000000)) {
561 nv_error(priv
, "timeout acquiring display\n");
566 /* point at display engine memory area (hash table, objects) */
567 nv_wr32(priv
, 0x610010, (nv_gpuobj(object
->parent
)->addr
>> 8) | 9);
569 /* enable supervisor interrupts, disable everything else */
570 nv_wr32(priv
, 0x610090, 0x00000000);
571 nv_wr32(priv
, 0x6100a0, 0x00000000);
572 nv_wr32(priv
, 0x6100b0, 0x00000307);
574 /* disable underflow reporting, preventing an intermittent issue
575 * on some nve4 boards where the production vbios left this
576 * setting enabled by default.
578 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
580 for (i
= 0; i
< priv
->head
.nr
; i
++)
581 nv_mask(priv
, 0x616308 + (i
* 0x800), 0x00000111, 0x00000010);
587 nvd0_disp_base_fini(struct nouveau_object
*object
, bool suspend
)
589 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
590 struct nv50_disp_base
*base
= (void *)object
;
592 /* disable all interrupts */
593 nv_wr32(priv
, 0x6100b0, 0x00000000);
595 return nouveau_parent_fini(&base
->base
, suspend
);
598 struct nouveau_ofuncs
599 nvd0_disp_base_ofuncs
= {
600 .ctor
= nvd0_disp_base_ctor
,
601 .dtor
= nvd0_disp_base_dtor
,
602 .init
= nvd0_disp_base_init
,
603 .fini
= nvd0_disp_base_fini
,
606 struct nouveau_omthds
607 nvd0_disp_base_omthds
[] = {
608 { HEAD_MTHD(NV50_DISP_SCANOUTPOS
) , nvd0_disp_base_scanoutpos
},
609 { SOR_MTHD(NV50_DISP_SOR_PWR
) , nv50_sor_mthd
},
610 { SOR_MTHD(NVA3_DISP_SOR_HDA_ELD
) , nv50_sor_mthd
},
611 { SOR_MTHD(NV84_DISP_SOR_HDMI_PWR
) , nv50_sor_mthd
},
612 { SOR_MTHD(NV50_DISP_SOR_LVDS_SCRIPT
) , nv50_sor_mthd
},
613 { DAC_MTHD(NV50_DISP_DAC_PWR
) , nv50_dac_mthd
},
614 { DAC_MTHD(NV50_DISP_DAC_LOAD
) , nv50_dac_mthd
},
615 { PIOR_MTHD(NV50_DISP_PIOR_PWR
) , nv50_pior_mthd
},
616 { PIOR_MTHD(NV50_DISP_PIOR_TMDS_PWR
) , nv50_pior_mthd
},
617 { PIOR_MTHD(NV50_DISP_PIOR_DP_PWR
) , nv50_pior_mthd
},
621 static struct nouveau_oclass
622 nvd0_disp_base_oclass
[] = {
623 { NVD0_DISP_CLASS
, &nvd0_disp_base_ofuncs
, nvd0_disp_base_omthds
},
627 static struct nouveau_oclass
628 nvd0_disp_sclass
[] = {
629 { NVD0_DISP_MAST_CLASS
, &nvd0_disp_mast_ofuncs
},
630 { NVD0_DISP_SYNC_CLASS
, &nvd0_disp_sync_ofuncs
},
631 { NVD0_DISP_OVLY_CLASS
, &nvd0_disp_ovly_ofuncs
},
632 { NVD0_DISP_OIMM_CLASS
, &nvd0_disp_oimm_ofuncs
},
633 { NVD0_DISP_CURS_CLASS
, &nvd0_disp_curs_ofuncs
},
637 /*******************************************************************************
638 * Display engine implementation
639 ******************************************************************************/
642 exec_lookup(struct nv50_disp_priv
*priv
, int head
, int outp
, u32 ctrl
,
643 struct dcb_output
*dcb
, u8
*ver
, u8
*hdr
, u8
*cnt
, u8
*len
,
644 struct nvbios_outp
*info
)
646 struct nouveau_bios
*bios
= nouveau_bios(priv
);
647 u16 mask
, type
, data
;
650 type
= DCB_OUTPUT_ANALOG
;
654 switch (ctrl
& 0x00000f00) {
655 case 0x00000000: type
= DCB_OUTPUT_LVDS
; mask
= 1; break;
656 case 0x00000100: type
= DCB_OUTPUT_TMDS
; mask
= 1; break;
657 case 0x00000200: type
= DCB_OUTPUT_TMDS
; mask
= 2; break;
658 case 0x00000500: type
= DCB_OUTPUT_TMDS
; mask
= 3; break;
659 case 0x00000800: type
= DCB_OUTPUT_DP
; mask
= 1; break;
660 case 0x00000900: type
= DCB_OUTPUT_DP
; mask
= 2; break;
662 nv_error(priv
, "unknown SOR mc 0x%08x\n", ctrl
);
665 dcb
->sorconf
.link
= mask
;
668 mask
= 0x00c0 & (mask
<< 6);
669 mask
|= 0x0001 << outp
;
670 mask
|= 0x0100 << head
;
672 data
= dcb_outp_match(bios
, type
, mask
, ver
, hdr
, dcb
);
676 return nvbios_outp_match(bios
, type
, mask
, ver
, hdr
, cnt
, len
, info
);
680 exec_script(struct nv50_disp_priv
*priv
, int head
, int id
)
682 struct nouveau_bios
*bios
= nouveau_bios(priv
);
683 struct nvbios_outp info
;
684 struct dcb_output dcb
;
685 u8 ver
, hdr
, cnt
, len
;
686 u32 ctrl
= 0x00000000;
690 for (outp
= 0; !(ctrl
& (1 << head
)) && outp
< 8; outp
++) {
691 ctrl
= nv_rd32(priv
, 0x640180 + (outp
* 0x20));
692 if (ctrl
& (1 << head
))
699 data
= exec_lookup(priv
, head
, outp
, ctrl
, &dcb
, &ver
, &hdr
, &cnt
, &len
, &info
);
701 struct nvbios_init init
= {
702 .subdev
= nv_subdev(priv
),
704 .offset
= info
.script
[id
],
710 return nvbios_exec(&init
) == 0;
717 exec_clkcmp(struct nv50_disp_priv
*priv
, int head
, int id
,
718 u32 pclk
, struct dcb_output
*dcb
)
720 struct nouveau_bios
*bios
= nouveau_bios(priv
);
721 struct nvbios_outp info1
;
722 struct nvbios_ocfg info2
;
723 u8 ver
, hdr
, cnt
, len
;
724 u32 ctrl
= 0x00000000;
728 for (outp
= 0; !(ctrl
& (1 << head
)) && outp
< 8; outp
++) {
729 ctrl
= nv_rd32(priv
, 0x660180 + (outp
* 0x20));
730 if (ctrl
& (1 << head
))
737 data
= exec_lookup(priv
, head
, outp
, ctrl
, dcb
, &ver
, &hdr
, &cnt
, &len
, &info1
);
742 case DCB_OUTPUT_TMDS
:
743 conf
= (ctrl
& 0x00000f00) >> 8;
747 case DCB_OUTPUT_LVDS
:
748 conf
= priv
->sor
.lvdsconf
;
751 conf
= (ctrl
& 0x00000f00) >> 8;
753 case DCB_OUTPUT_ANALOG
:
759 data
= nvbios_ocfg_match(bios
, data
, conf
, &ver
, &hdr
, &cnt
, &len
, &info2
);
760 if (data
&& id
< 0xff) {
761 data
= nvbios_oclk_match(bios
, info2
.clkcmp
[id
], pclk
);
763 struct nvbios_init init
= {
764 .subdev
= nv_subdev(priv
),
780 nvd0_disp_intr_unk1_0(struct nv50_disp_priv
*priv
, int head
)
782 exec_script(priv
, head
, 1);
786 nvd0_disp_intr_unk2_0(struct nv50_disp_priv
*priv
, int head
)
788 exec_script(priv
, head
, 2);
792 nvd0_disp_intr_unk2_1(struct nv50_disp_priv
*priv
, int head
)
794 struct nouveau_devinit
*devinit
= nouveau_devinit(priv
);
795 u32 pclk
= nv_rd32(priv
, 0x660450 + (head
* 0x300)) / 1000;
797 devinit
->pll_set(devinit
, PLL_VPLL0
+ head
, pclk
);
798 nv_wr32(priv
, 0x612200 + (head
* 0x800), 0x00000000);
802 nvd0_disp_intr_unk2_2_tu(struct nv50_disp_priv
*priv
, int head
,
803 struct dcb_output
*outp
)
805 const int or = ffs(outp
->or) - 1;
806 const u32 ctrl
= nv_rd32(priv
, 0x660200 + (or * 0x020));
807 const u32 conf
= nv_rd32(priv
, 0x660404 + (head
* 0x300));
808 const u32 pclk
= nv_rd32(priv
, 0x660450 + (head
* 0x300)) / 1000;
809 const u32 link
= ((ctrl
& 0xf00) == 0x800) ? 0 : 1;
810 const u32 hoff
= (head
* 0x800);
811 const u32 soff
= ( or * 0x800);
812 const u32 loff
= (link
* 0x080) + soff
;
813 const u32 symbol
= 100000;
815 u32 dpctrl
= nv_rd32(priv
, 0x61c10c + loff
) & 0x000f0000;
816 u32 clksor
= nv_rd32(priv
, 0x612300 + soff
);
817 u32 datarate
, link_nr
, link_bw
, bits
;
820 if ((conf
& 0x3c0) == 0x180) bits
= 30;
821 else if ((conf
& 0x3c0) == 0x140) bits
= 24;
823 datarate
= (pclk
* bits
) / 8;
825 if (dpctrl
> 0x00030000) link_nr
= 4;
826 else if (dpctrl
> 0x00010000) link_nr
= 2;
829 link_bw
= (clksor
& 0x007c0000) >> 18;
834 do_div(ratio
, link_nr
* link_bw
);
836 value
= (symbol
- ratio
) * TU
;
838 do_div(value
, symbol
);
839 do_div(value
, symbol
);
844 nv_wr32(priv
, 0x616610 + hoff
, value
);
848 nvd0_disp_intr_unk2_2(struct nv50_disp_priv
*priv
, int head
)
850 struct dcb_output outp
;
851 u32 pclk
= nv_rd32(priv
, 0x660450 + (head
* 0x300)) / 1000;
852 u32 conf
= exec_clkcmp(priv
, head
, 0xff, pclk
, &outp
);
856 if (outp
.type
== DCB_OUTPUT_DP
) {
857 u32 sync
= nv_rd32(priv
, 0x660404 + (head
* 0x300));
858 switch ((sync
& 0x000003c0) >> 6) {
859 case 6: pclk
= pclk
* 30 / 8; break;
860 case 5: pclk
= pclk
* 24 / 8; break;
863 pclk
= pclk
* 18 / 8;
867 nouveau_dp_train(&priv
->base
, priv
->sor
.dp
,
871 exec_clkcmp(priv
, head
, 0, pclk
, &outp
);
873 if (outp
.type
== DCB_OUTPUT_ANALOG
) {
874 addr
= 0x612280 + (ffs(outp
.or) - 1) * 0x800;
877 if (outp
.type
== DCB_OUTPUT_DP
)
878 nvd0_disp_intr_unk2_2_tu(priv
, head
, &outp
);
879 addr
= 0x612300 + (ffs(outp
.or) - 1) * 0x800;
880 data
= (conf
& 0x0100) ? 0x00000101 : 0x00000000;
883 nv_mask(priv
, addr
, 0x00000707, data
);
888 nvd0_disp_intr_unk4_0(struct nv50_disp_priv
*priv
, int head
)
890 struct dcb_output outp
;
891 u32 pclk
= nv_rd32(priv
, 0x660450 + (head
* 0x300)) / 1000;
892 exec_clkcmp(priv
, head
, 1, pclk
, &outp
);
896 nvd0_disp_intr_supervisor(struct work_struct
*work
)
898 struct nv50_disp_priv
*priv
=
899 container_of(work
, struct nv50_disp_priv
, supervisor
);
903 nv_debug(priv
, "supervisor %08x\n", priv
->super
);
904 for (head
= 0; head
< priv
->head
.nr
; head
++) {
905 mask
[head
] = nv_rd32(priv
, 0x6101d4 + (head
* 0x800));
906 nv_debug(priv
, "head %d: 0x%08x\n", head
, mask
[head
]);
909 if (priv
->super
& 0x00000001) {
910 for (head
= 0; head
< priv
->head
.nr
; head
++) {
911 if (!(mask
[head
] & 0x00001000))
913 nvd0_disp_intr_unk1_0(priv
, head
);
916 if (priv
->super
& 0x00000002) {
917 for (head
= 0; head
< priv
->head
.nr
; head
++) {
918 if (!(mask
[head
] & 0x00001000))
920 nvd0_disp_intr_unk2_0(priv
, head
);
922 for (head
= 0; head
< priv
->head
.nr
; head
++) {
923 if (!(mask
[head
] & 0x00010000))
925 nvd0_disp_intr_unk2_1(priv
, head
);
927 for (head
= 0; head
< priv
->head
.nr
; head
++) {
928 if (!(mask
[head
] & 0x00001000))
930 nvd0_disp_intr_unk2_2(priv
, head
);
933 if (priv
->super
& 0x00000004) {
934 for (head
= 0; head
< priv
->head
.nr
; head
++) {
935 if (!(mask
[head
] & 0x00001000))
937 nvd0_disp_intr_unk4_0(priv
, head
);
941 for (head
= 0; head
< priv
->head
.nr
; head
++)
942 nv_wr32(priv
, 0x6101d4 + (head
* 0x800), 0x00000000);
943 nv_wr32(priv
, 0x6101d0, 0x80000000);
947 nvd0_disp_intr(struct nouveau_subdev
*subdev
)
949 struct nv50_disp_priv
*priv
= (void *)subdev
;
950 u32 intr
= nv_rd32(priv
, 0x610088);
953 if (intr
& 0x00000001) {
954 u32 stat
= nv_rd32(priv
, 0x61008c);
955 nv_wr32(priv
, 0x61008c, stat
);
959 if (intr
& 0x00000002) {
960 u32 stat
= nv_rd32(priv
, 0x61009c);
961 int chid
= ffs(stat
) - 1;
963 u32 mthd
= nv_rd32(priv
, 0x6101f0 + (chid
* 12));
964 u32 data
= nv_rd32(priv
, 0x6101f4 + (chid
* 12));
965 u32 unkn
= nv_rd32(priv
, 0x6101f8 + (chid
* 12));
967 nv_error(priv
, "chid %d mthd 0x%04x data 0x%08x "
969 chid
, (mthd
& 0x0000ffc), data
, mthd
, unkn
);
970 nv_wr32(priv
, 0x61009c, (1 << chid
));
971 nv_wr32(priv
, 0x6101f0 + (chid
* 12), 0x90000000);
977 if (intr
& 0x00100000) {
978 u32 stat
= nv_rd32(priv
, 0x6100ac);
979 if (stat
& 0x00000007) {
980 priv
->super
= (stat
& 0x00000007);
981 schedule_work(&priv
->supervisor
);
982 nv_wr32(priv
, 0x6100ac, priv
->super
);
987 nv_info(priv
, "unknown intr24 0x%08x\n", stat
);
988 nv_wr32(priv
, 0x6100ac, stat
);
994 for (i
= 0; i
< priv
->head
.nr
; i
++) {
995 u32 mask
= 0x01000000 << i
;
997 u32 stat
= nv_rd32(priv
, 0x6100bc + (i
* 0x800));
998 if (stat
& 0x00000001)
999 nouveau_event_trigger(priv
->base
.vblank
, i
);
1000 nv_mask(priv
, 0x6100bc + (i
* 0x800), 0, 0);
1001 nv_rd32(priv
, 0x6100c0 + (i
* 0x800));
1007 nvd0_disp_ctor(struct nouveau_object
*parent
, struct nouveau_object
*engine
,
1008 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
1009 struct nouveau_object
**pobject
)
1011 struct nv50_disp_priv
*priv
;
1012 int heads
= nv_rd32(parent
, 0x022448);
1015 ret
= nouveau_disp_create(parent
, engine
, oclass
, heads
,
1016 "PDISP", "display", &priv
);
1017 *pobject
= nv_object(priv
);
1021 nv_engine(priv
)->sclass
= nvd0_disp_base_oclass
;
1022 nv_engine(priv
)->cclass
= &nv50_disp_cclass
;
1023 nv_subdev(priv
)->intr
= nvd0_disp_intr
;
1024 INIT_WORK(&priv
->supervisor
, nvd0_disp_intr_supervisor
);
1025 priv
->sclass
= nvd0_disp_sclass
;
1026 priv
->head
.nr
= heads
;
1029 priv
->dac
.power
= nv50_dac_power
;
1030 priv
->dac
.sense
= nv50_dac_sense
;
1031 priv
->sor
.power
= nv50_sor_power
;
1032 priv
->sor
.hda_eld
= nvd0_hda_eld
;
1033 priv
->sor
.hdmi
= nvd0_hdmi_ctrl
;
1034 priv
->sor
.dp
= &nvd0_sor_dp_func
;
1038 struct nouveau_oclass
*
1039 nvd0_disp_oclass
= &(struct nv50_disp_impl
) {
1040 .base
.base
.handle
= NV_ENGINE(DISP
, 0x90),
1041 .base
.base
.ofuncs
= &(struct nouveau_ofuncs
) {
1042 .ctor
= nvd0_disp_ctor
,
1043 .dtor
= _nouveau_disp_dtor
,
1044 .init
= _nouveau_disp_init
,
1045 .fini
= _nouveau_disp_fini
,