2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
25 #include <core/object.h>
26 #include <core/client.h>
27 #include <core/parent.h>
28 #include <core/handle.h>
29 #include <nvif/unpack.h>
30 #include <nvif/class.h>
32 #include <engine/disp.h>
34 #include <subdev/bios.h>
35 #include <subdev/bios/dcb.h>
36 #include <subdev/bios/disp.h>
37 #include <subdev/bios/init.h>
38 #include <subdev/bios/pll.h>
39 #include <subdev/devinit.h>
40 #include <subdev/fb.h>
41 #include <subdev/timer.h>
45 /*******************************************************************************
46 * EVO channel base class
47 ******************************************************************************/
50 nvd0_disp_chan_uevent_fini(struct nvkm_event
*event
, int type
, int index
)
52 struct nv50_disp_priv
*priv
= container_of(event
, typeof(*priv
), uevent
);
53 nv_mask(priv
, 0x610090, 0x00000001 << index
, 0x00000000 << index
);
54 nv_wr32(priv
, 0x61008c, 0x00000001 << index
);
58 nvd0_disp_chan_uevent_init(struct nvkm_event
*event
, int types
, int index
)
60 struct nv50_disp_priv
*priv
= container_of(event
, typeof(*priv
), uevent
);
61 nv_wr32(priv
, 0x61008c, 0x00000001 << index
);
62 nv_mask(priv
, 0x610090, 0x00000001 << index
, 0x00000001 << index
);
65 const struct nvkm_event_func
66 nvd0_disp_chan_uevent
= {
67 .ctor
= nv50_disp_chan_uevent_ctor
,
68 .init
= nvd0_disp_chan_uevent_init
,
69 .fini
= nvd0_disp_chan_uevent_fini
,
72 /*******************************************************************************
73 * EVO DMA channel base class
74 ******************************************************************************/
77 nvd0_disp_dmac_object_attach(struct nouveau_object
*parent
,
78 struct nouveau_object
*object
, u32 name
)
80 struct nv50_disp_base
*base
= (void *)parent
->parent
;
81 struct nv50_disp_chan
*chan
= (void *)parent
;
82 u32 addr
= nv_gpuobj(object
)->node
->offset
;
83 u32 data
= (chan
->chid
<< 27) | (addr
<< 9) | 0x00000001;
84 return nouveau_ramht_insert(base
->ramht
, chan
->chid
, name
, data
);
88 nvd0_disp_dmac_object_detach(struct nouveau_object
*parent
, int cookie
)
90 struct nv50_disp_base
*base
= (void *)parent
->parent
;
91 nouveau_ramht_remove(base
->ramht
, cookie
);
95 nvd0_disp_dmac_init(struct nouveau_object
*object
)
97 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
98 struct nv50_disp_dmac
*dmac
= (void *)object
;
99 int chid
= dmac
->base
.chid
;
102 ret
= nv50_disp_chan_init(&dmac
->base
);
106 /* enable error reporting */
107 nv_mask(priv
, 0x6100a0, 0x00000001 << chid
, 0x00000001 << chid
);
109 /* initialise channel for dma command submission */
110 nv_wr32(priv
, 0x610494 + (chid
* 0x0010), dmac
->push
);
111 nv_wr32(priv
, 0x610498 + (chid
* 0x0010), 0x00010000);
112 nv_wr32(priv
, 0x61049c + (chid
* 0x0010), 0x00000001);
113 nv_mask(priv
, 0x610490 + (chid
* 0x0010), 0x00000010, 0x00000010);
114 nv_wr32(priv
, 0x640000 + (chid
* 0x1000), 0x00000000);
115 nv_wr32(priv
, 0x610490 + (chid
* 0x0010), 0x00000013);
117 /* wait for it to go inactive */
118 if (!nv_wait(priv
, 0x610490 + (chid
* 0x10), 0x80000000, 0x00000000)) {
119 nv_error(dmac
, "init: 0x%08x\n",
120 nv_rd32(priv
, 0x610490 + (chid
* 0x10)));
128 nvd0_disp_dmac_fini(struct nouveau_object
*object
, bool suspend
)
130 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
131 struct nv50_disp_dmac
*dmac
= (void *)object
;
132 int chid
= dmac
->base
.chid
;
134 /* deactivate channel */
135 nv_mask(priv
, 0x610490 + (chid
* 0x0010), 0x00001010, 0x00001000);
136 nv_mask(priv
, 0x610490 + (chid
* 0x0010), 0x00000003, 0x00000000);
137 if (!nv_wait(priv
, 0x610490 + (chid
* 0x10), 0x001e0000, 0x00000000)) {
138 nv_error(dmac
, "fini: 0x%08x\n",
139 nv_rd32(priv
, 0x610490 + (chid
* 0x10)));
144 /* disable error reporting and completion notification */
145 nv_mask(priv
, 0x610090, 0x00000001 << chid
, 0x00000000);
146 nv_mask(priv
, 0x6100a0, 0x00000001 << chid
, 0x00000000);
148 return nv50_disp_chan_fini(&dmac
->base
, suspend
);
151 /*******************************************************************************
152 * EVO master channel object
153 ******************************************************************************/
155 const struct nv50_disp_mthd_list
156 nvd0_disp_core_mthd_base
= {
160 { 0x0080, 0x660080 },
161 { 0x0084, 0x660084 },
162 { 0x0088, 0x660088 },
163 { 0x008c, 0x000000 },
168 const struct nv50_disp_mthd_list
169 nvd0_disp_core_mthd_dac
= {
173 { 0x0180, 0x660180 },
174 { 0x0184, 0x660184 },
175 { 0x0188, 0x660188 },
176 { 0x0190, 0x660190 },
181 const struct nv50_disp_mthd_list
182 nvd0_disp_core_mthd_sor
= {
186 { 0x0200, 0x660200 },
187 { 0x0204, 0x660204 },
188 { 0x0208, 0x660208 },
189 { 0x0210, 0x660210 },
194 const struct nv50_disp_mthd_list
195 nvd0_disp_core_mthd_pior
= {
199 { 0x0300, 0x660300 },
200 { 0x0304, 0x660304 },
201 { 0x0308, 0x660308 },
202 { 0x0310, 0x660310 },
207 static const struct nv50_disp_mthd_list
208 nvd0_disp_core_mthd_head
= {
212 { 0x0400, 0x660400 },
213 { 0x0404, 0x660404 },
214 { 0x0408, 0x660408 },
215 { 0x040c, 0x66040c },
216 { 0x0410, 0x660410 },
217 { 0x0414, 0x660414 },
218 { 0x0418, 0x660418 },
219 { 0x041c, 0x66041c },
220 { 0x0420, 0x660420 },
221 { 0x0424, 0x660424 },
222 { 0x0428, 0x660428 },
223 { 0x042c, 0x66042c },
224 { 0x0430, 0x660430 },
225 { 0x0434, 0x660434 },
226 { 0x0438, 0x660438 },
227 { 0x0440, 0x660440 },
228 { 0x0444, 0x660444 },
229 { 0x0448, 0x660448 },
230 { 0x044c, 0x66044c },
231 { 0x0450, 0x660450 },
232 { 0x0454, 0x660454 },
233 { 0x0458, 0x660458 },
234 { 0x045c, 0x66045c },
235 { 0x0460, 0x660460 },
236 { 0x0468, 0x660468 },
237 { 0x046c, 0x66046c },
238 { 0x0470, 0x660470 },
239 { 0x0474, 0x660474 },
240 { 0x0480, 0x660480 },
241 { 0x0484, 0x660484 },
242 { 0x048c, 0x66048c },
243 { 0x0490, 0x660490 },
244 { 0x0494, 0x660494 },
245 { 0x0498, 0x660498 },
246 { 0x04b0, 0x6604b0 },
247 { 0x04b8, 0x6604b8 },
248 { 0x04bc, 0x6604bc },
249 { 0x04c0, 0x6604c0 },
250 { 0x04c4, 0x6604c4 },
251 { 0x04c8, 0x6604c8 },
252 { 0x04d0, 0x6604d0 },
253 { 0x04d4, 0x6604d4 },
254 { 0x04e0, 0x6604e0 },
255 { 0x04e4, 0x6604e4 },
256 { 0x04e8, 0x6604e8 },
257 { 0x04ec, 0x6604ec },
258 { 0x04f0, 0x6604f0 },
259 { 0x04f4, 0x6604f4 },
260 { 0x04f8, 0x6604f8 },
261 { 0x04fc, 0x6604fc },
262 { 0x0500, 0x660500 },
263 { 0x0504, 0x660504 },
264 { 0x0508, 0x660508 },
265 { 0x050c, 0x66050c },
266 { 0x0510, 0x660510 },
267 { 0x0514, 0x660514 },
268 { 0x0518, 0x660518 },
269 { 0x051c, 0x66051c },
270 { 0x052c, 0x66052c },
271 { 0x0530, 0x660530 },
272 { 0x054c, 0x66054c },
273 { 0x0550, 0x660550 },
274 { 0x0554, 0x660554 },
275 { 0x0558, 0x660558 },
276 { 0x055c, 0x66055c },
281 static const struct nv50_disp_mthd_chan
282 nvd0_disp_core_mthd_chan
= {
286 { "Global", 1, &nvd0_disp_core_mthd_base
},
287 { "DAC", 3, &nvd0_disp_core_mthd_dac
},
288 { "SOR", 8, &nvd0_disp_core_mthd_sor
},
289 { "PIOR", 4, &nvd0_disp_core_mthd_pior
},
290 { "HEAD", 4, &nvd0_disp_core_mthd_head
},
296 nvd0_disp_core_init(struct nouveau_object
*object
)
298 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
299 struct nv50_disp_dmac
*mast
= (void *)object
;
302 ret
= nv50_disp_chan_init(&mast
->base
);
306 /* enable error reporting */
307 nv_mask(priv
, 0x6100a0, 0x00000001, 0x00000001);
309 /* initialise channel for dma command submission */
310 nv_wr32(priv
, 0x610494, mast
->push
);
311 nv_wr32(priv
, 0x610498, 0x00010000);
312 nv_wr32(priv
, 0x61049c, 0x00000001);
313 nv_mask(priv
, 0x610490, 0x00000010, 0x00000010);
314 nv_wr32(priv
, 0x640000, 0x00000000);
315 nv_wr32(priv
, 0x610490, 0x01000013);
317 /* wait for it to go inactive */
318 if (!nv_wait(priv
, 0x610490, 0x80000000, 0x00000000)) {
319 nv_error(mast
, "init: 0x%08x\n", nv_rd32(priv
, 0x610490));
327 nvd0_disp_core_fini(struct nouveau_object
*object
, bool suspend
)
329 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
330 struct nv50_disp_dmac
*mast
= (void *)object
;
332 /* deactivate channel */
333 nv_mask(priv
, 0x610490, 0x00000010, 0x00000000);
334 nv_mask(priv
, 0x610490, 0x00000003, 0x00000000);
335 if (!nv_wait(priv
, 0x610490, 0x001e0000, 0x00000000)) {
336 nv_error(mast
, "fini: 0x%08x\n", nv_rd32(priv
, 0x610490));
341 /* disable error reporting and completion notification */
342 nv_mask(priv
, 0x610090, 0x00000001, 0x00000000);
343 nv_mask(priv
, 0x6100a0, 0x00000001, 0x00000000);
345 return nv50_disp_chan_fini(&mast
->base
, suspend
);
348 struct nv50_disp_chan_impl
349 nvd0_disp_core_ofuncs
= {
350 .base
.ctor
= nv50_disp_core_ctor
,
351 .base
.dtor
= nv50_disp_dmac_dtor
,
352 .base
.init
= nvd0_disp_core_init
,
353 .base
.fini
= nvd0_disp_core_fini
,
354 .base
.ntfy
= nv50_disp_chan_ntfy
,
355 .base
.map
= nv50_disp_chan_map
,
356 .base
.rd32
= nv50_disp_chan_rd32
,
357 .base
.wr32
= nv50_disp_chan_wr32
,
359 .attach
= nvd0_disp_dmac_object_attach
,
360 .detach
= nvd0_disp_dmac_object_detach
,
363 /*******************************************************************************
364 * EVO sync channel objects
365 ******************************************************************************/
367 static const struct nv50_disp_mthd_list
368 nvd0_disp_base_mthd_base
= {
372 { 0x0080, 0x661080 },
373 { 0x0084, 0x661084 },
374 { 0x0088, 0x661088 },
375 { 0x008c, 0x66108c },
376 { 0x0090, 0x661090 },
377 { 0x0094, 0x661094 },
378 { 0x00a0, 0x6610a0 },
379 { 0x00a4, 0x6610a4 },
380 { 0x00c0, 0x6610c0 },
381 { 0x00c4, 0x6610c4 },
382 { 0x00c8, 0x6610c8 },
383 { 0x00cc, 0x6610cc },
384 { 0x00e0, 0x6610e0 },
385 { 0x00e4, 0x6610e4 },
386 { 0x00e8, 0x6610e8 },
387 { 0x00ec, 0x6610ec },
388 { 0x00fc, 0x6610fc },
389 { 0x0100, 0x661100 },
390 { 0x0104, 0x661104 },
391 { 0x0108, 0x661108 },
392 { 0x010c, 0x66110c },
393 { 0x0110, 0x661110 },
394 { 0x0114, 0x661114 },
395 { 0x0118, 0x661118 },
396 { 0x011c, 0x66111c },
397 { 0x0130, 0x661130 },
398 { 0x0134, 0x661134 },
399 { 0x0138, 0x661138 },
400 { 0x013c, 0x66113c },
401 { 0x0140, 0x661140 },
402 { 0x0144, 0x661144 },
403 { 0x0148, 0x661148 },
404 { 0x014c, 0x66114c },
405 { 0x0150, 0x661150 },
406 { 0x0154, 0x661154 },
407 { 0x0158, 0x661158 },
408 { 0x015c, 0x66115c },
409 { 0x0160, 0x661160 },
410 { 0x0164, 0x661164 },
411 { 0x0168, 0x661168 },
412 { 0x016c, 0x66116c },
417 static const struct nv50_disp_mthd_list
418 nvd0_disp_base_mthd_image
= {
422 { 0x0400, 0x661400 },
423 { 0x0404, 0x661404 },
424 { 0x0408, 0x661408 },
425 { 0x040c, 0x66140c },
426 { 0x0410, 0x661410 },
431 const struct nv50_disp_mthd_chan
432 nvd0_disp_base_mthd_chan
= {
436 { "Global", 1, &nvd0_disp_base_mthd_base
},
437 { "Image", 2, &nvd0_disp_base_mthd_image
},
442 struct nv50_disp_chan_impl
443 nvd0_disp_base_ofuncs
= {
444 .base
.ctor
= nv50_disp_base_ctor
,
445 .base
.dtor
= nv50_disp_dmac_dtor
,
446 .base
.init
= nvd0_disp_dmac_init
,
447 .base
.fini
= nvd0_disp_dmac_fini
,
448 .base
.ntfy
= nv50_disp_chan_ntfy
,
449 .base
.map
= nv50_disp_chan_map
,
450 .base
.rd32
= nv50_disp_chan_rd32
,
451 .base
.wr32
= nv50_disp_chan_wr32
,
453 .attach
= nvd0_disp_dmac_object_attach
,
454 .detach
= nvd0_disp_dmac_object_detach
,
457 /*******************************************************************************
458 * EVO overlay channel objects
459 ******************************************************************************/
461 static const struct nv50_disp_mthd_list
462 nvd0_disp_ovly_mthd_base
= {
465 { 0x0080, 0x665080 },
466 { 0x0084, 0x665084 },
467 { 0x0088, 0x665088 },
468 { 0x008c, 0x66508c },
469 { 0x0090, 0x665090 },
470 { 0x0094, 0x665094 },
471 { 0x00a0, 0x6650a0 },
472 { 0x00a4, 0x6650a4 },
473 { 0x00b0, 0x6650b0 },
474 { 0x00b4, 0x6650b4 },
475 { 0x00b8, 0x6650b8 },
476 { 0x00c0, 0x6650c0 },
477 { 0x00e0, 0x6650e0 },
478 { 0x00e4, 0x6650e4 },
479 { 0x00e8, 0x6650e8 },
480 { 0x0100, 0x665100 },
481 { 0x0104, 0x665104 },
482 { 0x0108, 0x665108 },
483 { 0x010c, 0x66510c },
484 { 0x0110, 0x665110 },
485 { 0x0118, 0x665118 },
486 { 0x011c, 0x66511c },
487 { 0x0120, 0x665120 },
488 { 0x0124, 0x665124 },
489 { 0x0130, 0x665130 },
490 { 0x0134, 0x665134 },
491 { 0x0138, 0x665138 },
492 { 0x013c, 0x66513c },
493 { 0x0140, 0x665140 },
494 { 0x0144, 0x665144 },
495 { 0x0148, 0x665148 },
496 { 0x014c, 0x66514c },
497 { 0x0150, 0x665150 },
498 { 0x0154, 0x665154 },
499 { 0x0158, 0x665158 },
500 { 0x015c, 0x66515c },
501 { 0x0160, 0x665160 },
502 { 0x0164, 0x665164 },
503 { 0x0168, 0x665168 },
504 { 0x016c, 0x66516c },
505 { 0x0400, 0x665400 },
506 { 0x0408, 0x665408 },
507 { 0x040c, 0x66540c },
508 { 0x0410, 0x665410 },
513 static const struct nv50_disp_mthd_chan
514 nvd0_disp_ovly_mthd_chan
= {
518 { "Global", 1, &nvd0_disp_ovly_mthd_base
},
523 struct nv50_disp_chan_impl
524 nvd0_disp_ovly_ofuncs
= {
525 .base
.ctor
= nv50_disp_ovly_ctor
,
526 .base
.dtor
= nv50_disp_dmac_dtor
,
527 .base
.init
= nvd0_disp_dmac_init
,
528 .base
.fini
= nvd0_disp_dmac_fini
,
529 .base
.ntfy
= nv50_disp_chan_ntfy
,
530 .base
.map
= nv50_disp_chan_map
,
531 .base
.rd32
= nv50_disp_chan_rd32
,
532 .base
.wr32
= nv50_disp_chan_wr32
,
534 .attach
= nvd0_disp_dmac_object_attach
,
535 .detach
= nvd0_disp_dmac_object_detach
,
538 /*******************************************************************************
539 * EVO PIO channel base class
540 ******************************************************************************/
543 nvd0_disp_pioc_init(struct nouveau_object
*object
)
545 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
546 struct nv50_disp_pioc
*pioc
= (void *)object
;
547 int chid
= pioc
->base
.chid
;
550 ret
= nv50_disp_chan_init(&pioc
->base
);
554 /* enable error reporting */
555 nv_mask(priv
, 0x6100a0, 0x00000001 << chid
, 0x00000001 << chid
);
557 /* activate channel */
558 nv_wr32(priv
, 0x610490 + (chid
* 0x10), 0x00000001);
559 if (!nv_wait(priv
, 0x610490 + (chid
* 0x10), 0x00030000, 0x00010000)) {
560 nv_error(pioc
, "init: 0x%08x\n",
561 nv_rd32(priv
, 0x610490 + (chid
* 0x10)));
569 nvd0_disp_pioc_fini(struct nouveau_object
*object
, bool suspend
)
571 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
572 struct nv50_disp_pioc
*pioc
= (void *)object
;
573 int chid
= pioc
->base
.chid
;
575 nv_mask(priv
, 0x610490 + (chid
* 0x10), 0x00000001, 0x00000000);
576 if (!nv_wait(priv
, 0x610490 + (chid
* 0x10), 0x00030000, 0x00000000)) {
577 nv_error(pioc
, "timeout: 0x%08x\n",
578 nv_rd32(priv
, 0x610490 + (chid
* 0x10)));
583 /* disable error reporting and completion notification */
584 nv_mask(priv
, 0x610090, 0x00000001 << chid
, 0x00000000);
585 nv_mask(priv
, 0x6100a0, 0x00000001 << chid
, 0x00000000);
587 return nv50_disp_chan_fini(&pioc
->base
, suspend
);
590 /*******************************************************************************
591 * EVO immediate overlay channel objects
592 ******************************************************************************/
594 struct nv50_disp_chan_impl
595 nvd0_disp_oimm_ofuncs
= {
596 .base
.ctor
= nv50_disp_oimm_ctor
,
597 .base
.dtor
= nv50_disp_pioc_dtor
,
598 .base
.init
= nvd0_disp_pioc_init
,
599 .base
.fini
= nvd0_disp_pioc_fini
,
600 .base
.ntfy
= nv50_disp_chan_ntfy
,
601 .base
.map
= nv50_disp_chan_map
,
602 .base
.rd32
= nv50_disp_chan_rd32
,
603 .base
.wr32
= nv50_disp_chan_wr32
,
607 /*******************************************************************************
608 * EVO cursor channel objects
609 ******************************************************************************/
611 struct nv50_disp_chan_impl
612 nvd0_disp_curs_ofuncs
= {
613 .base
.ctor
= nv50_disp_curs_ctor
,
614 .base
.dtor
= nv50_disp_pioc_dtor
,
615 .base
.init
= nvd0_disp_pioc_init
,
616 .base
.fini
= nvd0_disp_pioc_fini
,
617 .base
.ntfy
= nv50_disp_chan_ntfy
,
618 .base
.map
= nv50_disp_chan_map
,
619 .base
.rd32
= nv50_disp_chan_rd32
,
620 .base
.wr32
= nv50_disp_chan_wr32
,
624 /*******************************************************************************
625 * Base display object
626 ******************************************************************************/
629 nvd0_disp_main_scanoutpos(NV50_DISP_MTHD_V0
)
631 const u32 total
= nv_rd32(priv
, 0x640414 + (head
* 0x300));
632 const u32 blanke
= nv_rd32(priv
, 0x64041c + (head
* 0x300));
633 const u32 blanks
= nv_rd32(priv
, 0x640420 + (head
* 0x300));
635 struct nv04_disp_scanoutpos_v0 v0
;
639 nv_ioctl(object
, "disp scanoutpos size %d\n", size
);
640 if (nvif_unpack(args
->v0
, 0, 0, false)) {
641 nv_ioctl(object
, "disp scanoutpos vers %d\n", args
->v0
.version
);
642 args
->v0
.vblanke
= (blanke
& 0xffff0000) >> 16;
643 args
->v0
.hblanke
= (blanke
& 0x0000ffff);
644 args
->v0
.vblanks
= (blanks
& 0xffff0000) >> 16;
645 args
->v0
.hblanks
= (blanks
& 0x0000ffff);
646 args
->v0
.vtotal
= ( total
& 0xffff0000) >> 16;
647 args
->v0
.htotal
= ( total
& 0x0000ffff);
648 args
->v0
.time
[0] = ktime_to_ns(ktime_get());
649 args
->v0
.vline
= /* vline read locks hline */
650 nv_rd32(priv
, 0x616340 + (head
* 0x800)) & 0xffff;
651 args
->v0
.time
[1] = ktime_to_ns(ktime_get());
653 nv_rd32(priv
, 0x616344 + (head
* 0x800)) & 0xffff;
661 nvd0_disp_main_init(struct nouveau_object
*object
)
663 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
664 struct nv50_disp_base
*base
= (void *)object
;
668 ret
= nouveau_parent_init(&base
->base
);
672 /* The below segments of code copying values from one register to
673 * another appear to inform EVO of the display capabilities or
678 for (i
= 0; i
< priv
->head
.nr
; i
++) {
679 tmp
= nv_rd32(priv
, 0x616104 + (i
* 0x800));
680 nv_wr32(priv
, 0x6101b4 + (i
* 0x800), tmp
);
681 tmp
= nv_rd32(priv
, 0x616108 + (i
* 0x800));
682 nv_wr32(priv
, 0x6101b8 + (i
* 0x800), tmp
);
683 tmp
= nv_rd32(priv
, 0x61610c + (i
* 0x800));
684 nv_wr32(priv
, 0x6101bc + (i
* 0x800), tmp
);
688 for (i
= 0; i
< priv
->dac
.nr
; i
++) {
689 tmp
= nv_rd32(priv
, 0x61a000 + (i
* 0x800));
690 nv_wr32(priv
, 0x6101c0 + (i
* 0x800), tmp
);
694 for (i
= 0; i
< priv
->sor
.nr
; i
++) {
695 tmp
= nv_rd32(priv
, 0x61c000 + (i
* 0x800));
696 nv_wr32(priv
, 0x6301c4 + (i
* 0x800), tmp
);
699 /* steal display away from vbios, or something like that */
700 if (nv_rd32(priv
, 0x6100ac) & 0x00000100) {
701 nv_wr32(priv
, 0x6100ac, 0x00000100);
702 nv_mask(priv
, 0x6194e8, 0x00000001, 0x00000000);
703 if (!nv_wait(priv
, 0x6194e8, 0x00000002, 0x00000000)) {
704 nv_error(priv
, "timeout acquiring display\n");
709 /* point at display engine memory area (hash table, objects) */
710 nv_wr32(priv
, 0x610010, (nv_gpuobj(object
->parent
)->addr
>> 8) | 9);
712 /* enable supervisor interrupts, disable everything else */
713 nv_wr32(priv
, 0x610090, 0x00000000);
714 nv_wr32(priv
, 0x6100a0, 0x00000000);
715 nv_wr32(priv
, 0x6100b0, 0x00000307);
717 /* disable underflow reporting, preventing an intermittent issue
718 * on some nve4 boards where the production vbios left this
719 * setting enabled by default.
721 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
723 for (i
= 0; i
< priv
->head
.nr
; i
++)
724 nv_mask(priv
, 0x616308 + (i
* 0x800), 0x00000111, 0x00000010);
730 nvd0_disp_main_fini(struct nouveau_object
*object
, bool suspend
)
732 struct nv50_disp_priv
*priv
= (void *)object
->engine
;
733 struct nv50_disp_base
*base
= (void *)object
;
735 /* disable all interrupts */
736 nv_wr32(priv
, 0x6100b0, 0x00000000);
738 return nouveau_parent_fini(&base
->base
, suspend
);
741 struct nouveau_ofuncs
742 nvd0_disp_main_ofuncs
= {
743 .ctor
= nv50_disp_main_ctor
,
744 .dtor
= nv50_disp_main_dtor
,
745 .init
= nvd0_disp_main_init
,
746 .fini
= nvd0_disp_main_fini
,
747 .mthd
= nv50_disp_main_mthd
,
748 .ntfy
= nouveau_disp_ntfy
,
751 static struct nouveau_oclass
752 nvd0_disp_main_oclass
[] = {
753 { GF110_DISP
, &nvd0_disp_main_ofuncs
},
757 static struct nouveau_oclass
758 nvd0_disp_sclass
[] = {
759 { GF110_DISP_CORE_CHANNEL_DMA
, &nvd0_disp_core_ofuncs
.base
},
760 { GF110_DISP_BASE_CHANNEL_DMA
, &nvd0_disp_base_ofuncs
.base
},
761 { GF110_DISP_OVERLAY_CONTROL_DMA
, &nvd0_disp_ovly_ofuncs
.base
},
762 { GF110_DISP_OVERLAY
, &nvd0_disp_oimm_ofuncs
.base
},
763 { GF110_DISP_CURSOR
, &nvd0_disp_curs_ofuncs
.base
},
767 /*******************************************************************************
768 * Display engine implementation
769 ******************************************************************************/
772 nvd0_disp_vblank_init(struct nvkm_event
*event
, int type
, int head
)
774 struct nouveau_disp
*disp
= container_of(event
, typeof(*disp
), vblank
);
775 nv_mask(disp
, 0x6100c0 + (head
* 0x800), 0x00000001, 0x00000001);
779 nvd0_disp_vblank_fini(struct nvkm_event
*event
, int type
, int head
)
781 struct nouveau_disp
*disp
= container_of(event
, typeof(*disp
), vblank
);
782 nv_mask(disp
, 0x6100c0 + (head
* 0x800), 0x00000001, 0x00000000);
785 const struct nvkm_event_func
786 nvd0_disp_vblank_func
= {
787 .ctor
= nouveau_disp_vblank_ctor
,
788 .init
= nvd0_disp_vblank_init
,
789 .fini
= nvd0_disp_vblank_fini
,
792 static struct nvkm_output
*
793 exec_lookup(struct nv50_disp_priv
*priv
, int head
, int or, u32 ctrl
,
794 u32
*data
, u8
*ver
, u8
*hdr
, u8
*cnt
, u8
*len
,
795 struct nvbios_outp
*info
)
797 struct nouveau_bios
*bios
= nouveau_bios(priv
);
798 struct nvkm_output
*outp
;
802 type
= DCB_OUTPUT_ANALOG
;
806 switch (ctrl
& 0x00000f00) {
807 case 0x00000000: type
= DCB_OUTPUT_LVDS
; mask
= 1; break;
808 case 0x00000100: type
= DCB_OUTPUT_TMDS
; mask
= 1; break;
809 case 0x00000200: type
= DCB_OUTPUT_TMDS
; mask
= 2; break;
810 case 0x00000500: type
= DCB_OUTPUT_TMDS
; mask
= 3; break;
811 case 0x00000800: type
= DCB_OUTPUT_DP
; mask
= 1; break;
812 case 0x00000900: type
= DCB_OUTPUT_DP
; mask
= 2; break;
814 nv_error(priv
, "unknown SOR mc 0x%08x\n", ctrl
);
819 mask
= 0x00c0 & (mask
<< 6);
820 mask
|= 0x0001 << or;
821 mask
|= 0x0100 << head
;
823 list_for_each_entry(outp
, &priv
->base
.outp
, head
) {
824 if ((outp
->info
.hasht
& 0xff) == type
&&
825 (outp
->info
.hashm
& mask
) == mask
) {
826 *data
= nvbios_outp_match(bios
, outp
->info
.hasht
,
828 ver
, hdr
, cnt
, len
, info
);
838 static struct nvkm_output
*
839 exec_script(struct nv50_disp_priv
*priv
, int head
, int id
)
841 struct nouveau_bios
*bios
= nouveau_bios(priv
);
842 struct nvkm_output
*outp
;
843 struct nvbios_outp info
;
844 u8 ver
, hdr
, cnt
, len
;
848 for (or = 0; !(ctrl
& (1 << head
)) && or < 8; or++) {
849 ctrl
= nv_rd32(priv
, 0x640180 + (or * 0x20));
850 if (ctrl
& (1 << head
))
857 outp
= exec_lookup(priv
, head
, or, ctrl
, &data
, &ver
, &hdr
, &cnt
, &len
, &info
);
859 struct nvbios_init init
= {
860 .subdev
= nv_subdev(priv
),
862 .offset
= info
.script
[id
],
874 static struct nvkm_output
*
875 exec_clkcmp(struct nv50_disp_priv
*priv
, int head
, int id
, u32 pclk
, u32
*conf
)
877 struct nouveau_bios
*bios
= nouveau_bios(priv
);
878 struct nvkm_output
*outp
;
879 struct nvbios_outp info1
;
880 struct nvbios_ocfg info2
;
881 u8 ver
, hdr
, cnt
, len
;
885 for (or = 0; !(ctrl
& (1 << head
)) && or < 8; or++) {
886 ctrl
= nv_rd32(priv
, 0x660180 + (or * 0x20));
887 if (ctrl
& (1 << head
))
894 outp
= exec_lookup(priv
, head
, or, ctrl
, &data
, &ver
, &hdr
, &cnt
, &len
, &info1
);
898 switch (outp
->info
.type
) {
899 case DCB_OUTPUT_TMDS
:
900 *conf
= (ctrl
& 0x00000f00) >> 8;
904 case DCB_OUTPUT_LVDS
:
905 *conf
= priv
->sor
.lvdsconf
;
908 *conf
= (ctrl
& 0x00000f00) >> 8;
910 case DCB_OUTPUT_ANALOG
:
916 data
= nvbios_ocfg_match(bios
, data
, *conf
, &ver
, &hdr
, &cnt
, &len
, &info2
);
917 if (data
&& id
< 0xff) {
918 data
= nvbios_oclk_match(bios
, info2
.clkcmp
[id
], pclk
);
920 struct nvbios_init init
= {
921 .subdev
= nv_subdev(priv
),
937 nvd0_disp_intr_unk1_0(struct nv50_disp_priv
*priv
, int head
)
939 exec_script(priv
, head
, 1);
943 nvd0_disp_intr_unk2_0(struct nv50_disp_priv
*priv
, int head
)
945 struct nvkm_output
*outp
= exec_script(priv
, head
, 2);
947 /* see note in nv50_disp_intr_unk20_0() */
948 if (outp
&& outp
->info
.type
== DCB_OUTPUT_DP
) {
949 struct nvkm_output_dp
*outpdp
= (void *)outp
;
950 struct nvbios_init init
= {
951 .subdev
= nv_subdev(priv
),
952 .bios
= nouveau_bios(priv
),
955 .offset
= outpdp
->info
.script
[4],
960 atomic_set(&outpdp
->lt
.done
, 0);
965 nvd0_disp_intr_unk2_1(struct nv50_disp_priv
*priv
, int head
)
967 struct nouveau_devinit
*devinit
= nouveau_devinit(priv
);
968 u32 pclk
= nv_rd32(priv
, 0x660450 + (head
* 0x300)) / 1000;
970 devinit
->pll_set(devinit
, PLL_VPLL0
+ head
, pclk
);
971 nv_wr32(priv
, 0x612200 + (head
* 0x800), 0x00000000);
975 nvd0_disp_intr_unk2_2_tu(struct nv50_disp_priv
*priv
, int head
,
976 struct dcb_output
*outp
)
978 const int or = ffs(outp
->or) - 1;
979 const u32 ctrl
= nv_rd32(priv
, 0x660200 + (or * 0x020));
980 const u32 conf
= nv_rd32(priv
, 0x660404 + (head
* 0x300));
981 const s32 vactive
= nv_rd32(priv
, 0x660414 + (head
* 0x300)) & 0xffff;
982 const s32 vblanke
= nv_rd32(priv
, 0x66041c + (head
* 0x300)) & 0xffff;
983 const s32 vblanks
= nv_rd32(priv
, 0x660420 + (head
* 0x300)) & 0xffff;
984 const u32 pclk
= nv_rd32(priv
, 0x660450 + (head
* 0x300)) / 1000;
985 const u32 link
= ((ctrl
& 0xf00) == 0x800) ? 0 : 1;
986 const u32 hoff
= (head
* 0x800);
987 const u32 soff
= ( or * 0x800);
988 const u32 loff
= (link
* 0x080) + soff
;
989 const u32 symbol
= 100000;
991 u32 dpctrl
= nv_rd32(priv
, 0x61c10c + loff
);
992 u32 clksor
= nv_rd32(priv
, 0x612300 + soff
);
993 u32 datarate
, link_nr
, link_bw
, bits
;
996 link_nr
= hweight32(dpctrl
& 0x000f0000);
997 link_bw
= (clksor
& 0x007c0000) >> 18;
1000 /* symbols/hblank - algorithm taken from comments in tegra driver */
1001 value
= vblanke
+ vactive
- vblanks
- 7;
1002 value
= value
* link_bw
;
1003 do_div(value
, pclk
);
1004 value
= value
- (3 * !!(dpctrl
& 0x00004000)) - (12 / link_nr
);
1005 nv_mask(priv
, 0x616620 + hoff
, 0x0000ffff, value
);
1007 /* symbols/vblank - algorithm taken from comments in tegra driver */
1008 value
= vblanks
- vblanke
- 25;
1009 value
= value
* link_bw
;
1010 do_div(value
, pclk
);
1011 value
= value
- ((36 / link_nr
) + 3) - 1;
1012 nv_mask(priv
, 0x616624 + hoff
, 0x00ffffff, value
);
1015 if ((conf
& 0x3c0) == 0x180) bits
= 30;
1016 else if ((conf
& 0x3c0) == 0x140) bits
= 24;
1018 datarate
= (pclk
* bits
) / 8;
1022 do_div(ratio
, link_nr
* link_bw
);
1024 value
= (symbol
- ratio
) * TU
;
1026 do_div(value
, symbol
);
1027 do_div(value
, symbol
);
1030 value
|= 0x08000000;
1032 nv_wr32(priv
, 0x616610 + hoff
, value
);
1036 nvd0_disp_intr_unk2_2(struct nv50_disp_priv
*priv
, int head
)
1038 struct nvkm_output
*outp
;
1039 u32 pclk
= nv_rd32(priv
, 0x660450 + (head
* 0x300)) / 1000;
1040 u32 conf
, addr
, data
;
1042 outp
= exec_clkcmp(priv
, head
, 0xff, pclk
, &conf
);
1046 /* see note in nv50_disp_intr_unk20_2() */
1047 if (outp
->info
.type
== DCB_OUTPUT_DP
) {
1048 u32 sync
= nv_rd32(priv
, 0x660404 + (head
* 0x300));
1049 switch ((sync
& 0x000003c0) >> 6) {
1050 case 6: pclk
= pclk
* 30; break;
1051 case 5: pclk
= pclk
* 24; break;
1058 if (nvkm_output_dp_train(outp
, pclk
, true))
1059 ERR("link not trained before attach\n");
1061 if (priv
->sor
.magic
)
1062 priv
->sor
.magic(outp
);
1065 exec_clkcmp(priv
, head
, 0, pclk
, &conf
);
1067 if (outp
->info
.type
== DCB_OUTPUT_ANALOG
) {
1068 addr
= 0x612280 + (ffs(outp
->info
.or) - 1) * 0x800;
1071 addr
= 0x612300 + (ffs(outp
->info
.or) - 1) * 0x800;
1072 data
= (conf
& 0x0100) ? 0x00000101 : 0x00000000;
1073 switch (outp
->info
.type
) {
1074 case DCB_OUTPUT_TMDS
:
1075 nv_mask(priv
, addr
, 0x007c0000, 0x00280000);
1078 nvd0_disp_intr_unk2_2_tu(priv
, head
, &outp
->info
);
1085 nv_mask(priv
, addr
, 0x00000707, data
);
1089 nvd0_disp_intr_unk4_0(struct nv50_disp_priv
*priv
, int head
)
1091 u32 pclk
= nv_rd32(priv
, 0x660450 + (head
* 0x300)) / 1000;
1094 exec_clkcmp(priv
, head
, 1, pclk
, &conf
);
1098 nvd0_disp_intr_supervisor(struct work_struct
*work
)
1100 struct nv50_disp_priv
*priv
=
1101 container_of(work
, struct nv50_disp_priv
, supervisor
);
1102 struct nv50_disp_impl
*impl
= (void *)nv_object(priv
)->oclass
;
1106 nv_debug(priv
, "supervisor %d\n", ffs(priv
->super
));
1107 for (head
= 0; head
< priv
->head
.nr
; head
++) {
1108 mask
[head
] = nv_rd32(priv
, 0x6101d4 + (head
* 0x800));
1109 nv_debug(priv
, "head %d: 0x%08x\n", head
, mask
[head
]);
1112 if (priv
->super
& 0x00000001) {
1113 nv50_disp_mthd_chan(priv
, NV_DBG_DEBUG
, 0, impl
->mthd
.core
);
1114 for (head
= 0; head
< priv
->head
.nr
; head
++) {
1115 if (!(mask
[head
] & 0x00001000))
1117 nv_debug(priv
, "supervisor 1.0 - head %d\n", head
);
1118 nvd0_disp_intr_unk1_0(priv
, head
);
1121 if (priv
->super
& 0x00000002) {
1122 for (head
= 0; head
< priv
->head
.nr
; head
++) {
1123 if (!(mask
[head
] & 0x00001000))
1125 nv_debug(priv
, "supervisor 2.0 - head %d\n", head
);
1126 nvd0_disp_intr_unk2_0(priv
, head
);
1128 for (head
= 0; head
< priv
->head
.nr
; head
++) {
1129 if (!(mask
[head
] & 0x00010000))
1131 nv_debug(priv
, "supervisor 2.1 - head %d\n", head
);
1132 nvd0_disp_intr_unk2_1(priv
, head
);
1134 for (head
= 0; head
< priv
->head
.nr
; head
++) {
1135 if (!(mask
[head
] & 0x00001000))
1137 nv_debug(priv
, "supervisor 2.2 - head %d\n", head
);
1138 nvd0_disp_intr_unk2_2(priv
, head
);
1141 if (priv
->super
& 0x00000004) {
1142 for (head
= 0; head
< priv
->head
.nr
; head
++) {
1143 if (!(mask
[head
] & 0x00001000))
1145 nv_debug(priv
, "supervisor 3.0 - head %d\n", head
);
1146 nvd0_disp_intr_unk4_0(priv
, head
);
1150 for (head
= 0; head
< priv
->head
.nr
; head
++)
1151 nv_wr32(priv
, 0x6101d4 + (head
* 0x800), 0x00000000);
1152 nv_wr32(priv
, 0x6101d0, 0x80000000);
1156 nvd0_disp_intr_error(struct nv50_disp_priv
*priv
, int chid
)
1158 const struct nv50_disp_impl
*impl
= (void *)nv_object(priv
)->oclass
;
1159 u32 mthd
= nv_rd32(priv
, 0x6101f0 + (chid
* 12));
1160 u32 data
= nv_rd32(priv
, 0x6101f4 + (chid
* 12));
1161 u32 unkn
= nv_rd32(priv
, 0x6101f8 + (chid
* 12));
1163 nv_error(priv
, "chid %d mthd 0x%04x data 0x%08x "
1165 chid
, (mthd
& 0x0000ffc), data
, mthd
, unkn
);
1168 switch (mthd
& 0xffc) {
1170 nv50_disp_mthd_chan(priv
, NV_DBG_ERROR
, chid
- 0,
1178 switch (mthd
& 0xffc) {
1180 nv50_disp_mthd_chan(priv
, NV_DBG_ERROR
, chid
- 1,
1188 switch (mthd
& 0xffc) {
1190 nv50_disp_mthd_chan(priv
, NV_DBG_ERROR
, chid
- 5,
1198 nv_wr32(priv
, 0x61009c, (1 << chid
));
1199 nv_wr32(priv
, 0x6101f0 + (chid
* 12), 0x90000000);
1203 nvd0_disp_intr(struct nouveau_subdev
*subdev
)
1205 struct nv50_disp_priv
*priv
= (void *)subdev
;
1206 u32 intr
= nv_rd32(priv
, 0x610088);
1209 if (intr
& 0x00000001) {
1210 u32 stat
= nv_rd32(priv
, 0x61008c);
1212 int chid
= __ffs(stat
); stat
&= ~(1 << chid
);
1213 nv50_disp_chan_uevent_send(priv
, chid
);
1214 nv_wr32(priv
, 0x61008c, 1 << chid
);
1216 intr
&= ~0x00000001;
1219 if (intr
& 0x00000002) {
1220 u32 stat
= nv_rd32(priv
, 0x61009c);
1221 int chid
= ffs(stat
) - 1;
1223 nvd0_disp_intr_error(priv
, chid
);
1224 intr
&= ~0x00000002;
1227 if (intr
& 0x00100000) {
1228 u32 stat
= nv_rd32(priv
, 0x6100ac);
1229 if (stat
& 0x00000007) {
1230 priv
->super
= (stat
& 0x00000007);
1231 schedule_work(&priv
->supervisor
);
1232 nv_wr32(priv
, 0x6100ac, priv
->super
);
1233 stat
&= ~0x00000007;
1237 nv_info(priv
, "unknown intr24 0x%08x\n", stat
);
1238 nv_wr32(priv
, 0x6100ac, stat
);
1241 intr
&= ~0x00100000;
1244 for (i
= 0; i
< priv
->head
.nr
; i
++) {
1245 u32 mask
= 0x01000000 << i
;
1247 u32 stat
= nv_rd32(priv
, 0x6100bc + (i
* 0x800));
1248 if (stat
& 0x00000001)
1249 nouveau_disp_vblank(&priv
->base
, i
);
1250 nv_mask(priv
, 0x6100bc + (i
* 0x800), 0, 0);
1251 nv_rd32(priv
, 0x6100c0 + (i
* 0x800));
1257 nvd0_disp_ctor(struct nouveau_object
*parent
, struct nouveau_object
*engine
,
1258 struct nouveau_oclass
*oclass
, void *data
, u32 size
,
1259 struct nouveau_object
**pobject
)
1261 struct nv50_disp_priv
*priv
;
1262 int heads
= nv_rd32(parent
, 0x022448);
1265 ret
= nouveau_disp_create(parent
, engine
, oclass
, heads
,
1266 "PDISP", "display", &priv
);
1267 *pobject
= nv_object(priv
);
1271 ret
= nvkm_event_init(&nvd0_disp_chan_uevent
, 1, 17, &priv
->uevent
);
1275 nv_engine(priv
)->sclass
= nvd0_disp_main_oclass
;
1276 nv_engine(priv
)->cclass
= &nv50_disp_cclass
;
1277 nv_subdev(priv
)->intr
= nvd0_disp_intr
;
1278 INIT_WORK(&priv
->supervisor
, nvd0_disp_intr_supervisor
);
1279 priv
->sclass
= nvd0_disp_sclass
;
1280 priv
->head
.nr
= heads
;
1283 priv
->dac
.power
= nv50_dac_power
;
1284 priv
->dac
.sense
= nv50_dac_sense
;
1285 priv
->sor
.power
= nv50_sor_power
;
1286 priv
->sor
.hda_eld
= nvd0_hda_eld
;
1287 priv
->sor
.hdmi
= nvd0_hdmi_ctrl
;
1291 struct nouveau_oclass
*
1292 nvd0_disp_outp_sclass
[] = {
1293 &nvd0_sor_dp_impl
.base
.base
,
1297 struct nouveau_oclass
*
1298 nvd0_disp_oclass
= &(struct nv50_disp_impl
) {
1299 .base
.base
.handle
= NV_ENGINE(DISP
, 0x90),
1300 .base
.base
.ofuncs
= &(struct nouveau_ofuncs
) {
1301 .ctor
= nvd0_disp_ctor
,
1302 .dtor
= _nouveau_disp_dtor
,
1303 .init
= _nouveau_disp_init
,
1304 .fini
= _nouveau_disp_fini
,
1306 .base
.vblank
= &nvd0_disp_vblank_func
,
1307 .base
.outp
= nvd0_disp_outp_sclass
,
1308 .mthd
.core
= &nvd0_disp_core_mthd_chan
,
1309 .mthd
.base
= &nvd0_disp_base_mthd_chan
,
1310 .mthd
.ovly
= &nvd0_disp_ovly_mthd_chan
,
1311 .mthd
.prev
= -0x020000,
1312 .head
.scanoutpos
= nvd0_disp_main_scanoutpos
,