drm/i915: add common intel_digital_port_connected function
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
... / ...
CommitLineData
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
29#include <linux/slab.h>
30#include <linux/export.h>
31#include <linux/notifier.h>
32#include <linux/reboot.h>
33#include <drm/drmP.h>
34#include <drm/drm_atomic_helper.h>
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
38#include "intel_drv.h"
39#include <drm/i915_drm.h>
40#include "i915_drv.h"
41
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50struct dp_link_dpll {
51 int clock;
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
56 { 162000,
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58 { 270000,
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
63 { 162000,
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65 { 270000,
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
69static const struct dp_link_dpll vlv_dpll[] = {
70 { 162000,
71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72 { 270000,
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { 270000, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { 540000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
93
94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
96static const int skl_rates[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 };
98static const int chv_rates[] = { 162000, 202500, 210000, 216000,
99 243000, 270000, 324000, 405000,
100 420000, 432000, 540000 };
101static const int default_rates[] = { 162000, 270000, 540000 };
102
103/**
104 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
105 * @intel_dp: DP struct
106 *
107 * If a CPU or PCH DP output is attached to an eDP panel, this function
108 * will return true, and false otherwise.
109 */
110static bool is_edp(struct intel_dp *intel_dp)
111{
112 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
113
114 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
115}
116
117static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
118{
119 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
120
121 return intel_dig_port->base.base.dev;
122}
123
124static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
125{
126 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
127}
128
129static void intel_dp_link_down(struct intel_dp *intel_dp);
130static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
131static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
132static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
133static void vlv_steal_power_sequencer(struct drm_device *dev,
134 enum pipe pipe);
135
136static int
137intel_dp_max_link_bw(struct intel_dp *intel_dp)
138{
139 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
140
141 switch (max_link_bw) {
142 case DP_LINK_BW_1_62:
143 case DP_LINK_BW_2_7:
144 case DP_LINK_BW_5_4:
145 break;
146 default:
147 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
148 max_link_bw);
149 max_link_bw = DP_LINK_BW_1_62;
150 break;
151 }
152 return max_link_bw;
153}
154
155static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
156{
157 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
158 struct drm_device *dev = intel_dig_port->base.base.dev;
159 u8 source_max, sink_max;
160
161 source_max = 4;
162 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
163 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
164 source_max = 2;
165
166 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
167
168 return min(source_max, sink_max);
169}
170
171/*
172 * The units on the numbers in the next two are... bizarre. Examples will
173 * make it clearer; this one parallels an example in the eDP spec.
174 *
175 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
176 *
177 * 270000 * 1 * 8 / 10 == 216000
178 *
179 * The actual data capacity of that configuration is 2.16Gbit/s, so the
180 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
181 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
182 * 119000. At 18bpp that's 2142000 kilobits per second.
183 *
184 * Thus the strange-looking division by 10 in intel_dp_link_required, to
185 * get the result in decakilobits instead of kilobits.
186 */
187
188static int
189intel_dp_link_required(int pixel_clock, int bpp)
190{
191 return (pixel_clock * bpp + 9) / 10;
192}
193
194static int
195intel_dp_max_data_rate(int max_link_clock, int max_lanes)
196{
197 return (max_link_clock * max_lanes * 8) / 10;
198}
199
200static enum drm_mode_status
201intel_dp_mode_valid(struct drm_connector *connector,
202 struct drm_display_mode *mode)
203{
204 struct intel_dp *intel_dp = intel_attached_dp(connector);
205 struct intel_connector *intel_connector = to_intel_connector(connector);
206 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
207 int target_clock = mode->clock;
208 int max_rate, mode_rate, max_lanes, max_link_clock;
209
210 if (is_edp(intel_dp) && fixed_mode) {
211 if (mode->hdisplay > fixed_mode->hdisplay)
212 return MODE_PANEL;
213
214 if (mode->vdisplay > fixed_mode->vdisplay)
215 return MODE_PANEL;
216
217 target_clock = fixed_mode->clock;
218 }
219
220 max_link_clock = intel_dp_max_link_rate(intel_dp);
221 max_lanes = intel_dp_max_lane_count(intel_dp);
222
223 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
224 mode_rate = intel_dp_link_required(target_clock, 18);
225
226 if (mode_rate > max_rate)
227 return MODE_CLOCK_HIGH;
228
229 if (mode->clock < 10000)
230 return MODE_CLOCK_LOW;
231
232 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
233 return MODE_H_ILLEGAL;
234
235 return MODE_OK;
236}
237
238uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
239{
240 int i;
241 uint32_t v = 0;
242
243 if (src_bytes > 4)
244 src_bytes = 4;
245 for (i = 0; i < src_bytes; i++)
246 v |= ((uint32_t) src[i]) << ((3-i) * 8);
247 return v;
248}
249
250static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
251{
252 int i;
253 if (dst_bytes > 4)
254 dst_bytes = 4;
255 for (i = 0; i < dst_bytes; i++)
256 dst[i] = src >> ((3-i) * 8);
257}
258
259/* hrawclock is 1/4 the FSB frequency */
260static int
261intel_hrawclk(struct drm_device *dev)
262{
263 struct drm_i915_private *dev_priv = dev->dev_private;
264 uint32_t clkcfg;
265
266 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
267 if (IS_VALLEYVIEW(dev))
268 return 200;
269
270 clkcfg = I915_READ(CLKCFG);
271 switch (clkcfg & CLKCFG_FSB_MASK) {
272 case CLKCFG_FSB_400:
273 return 100;
274 case CLKCFG_FSB_533:
275 return 133;
276 case CLKCFG_FSB_667:
277 return 166;
278 case CLKCFG_FSB_800:
279 return 200;
280 case CLKCFG_FSB_1067:
281 return 266;
282 case CLKCFG_FSB_1333:
283 return 333;
284 /* these two are just a guess; one of them might be right */
285 case CLKCFG_FSB_1600:
286 case CLKCFG_FSB_1600_ALT:
287 return 400;
288 default:
289 return 133;
290 }
291}
292
293static void
294intel_dp_init_panel_power_sequencer(struct drm_device *dev,
295 struct intel_dp *intel_dp);
296static void
297intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
298 struct intel_dp *intel_dp);
299
300static void pps_lock(struct intel_dp *intel_dp)
301{
302 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
303 struct intel_encoder *encoder = &intel_dig_port->base;
304 struct drm_device *dev = encoder->base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum intel_display_power_domain power_domain;
307
308 /*
309 * See vlv_power_sequencer_reset() why we need
310 * a power domain reference here.
311 */
312 power_domain = intel_display_port_power_domain(encoder);
313 intel_display_power_get(dev_priv, power_domain);
314
315 mutex_lock(&dev_priv->pps_mutex);
316}
317
318static void pps_unlock(struct intel_dp *intel_dp)
319{
320 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
321 struct intel_encoder *encoder = &intel_dig_port->base;
322 struct drm_device *dev = encoder->base.dev;
323 struct drm_i915_private *dev_priv = dev->dev_private;
324 enum intel_display_power_domain power_domain;
325
326 mutex_unlock(&dev_priv->pps_mutex);
327
328 power_domain = intel_display_port_power_domain(encoder);
329 intel_display_power_put(dev_priv, power_domain);
330}
331
332static void
333vlv_power_sequencer_kick(struct intel_dp *intel_dp)
334{
335 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
336 struct drm_device *dev = intel_dig_port->base.base.dev;
337 struct drm_i915_private *dev_priv = dev->dev_private;
338 enum pipe pipe = intel_dp->pps_pipe;
339 bool pll_enabled;
340 uint32_t DP;
341
342 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
343 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
344 pipe_name(pipe), port_name(intel_dig_port->port)))
345 return;
346
347 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
348 pipe_name(pipe), port_name(intel_dig_port->port));
349
350 /* Preserve the BIOS-computed detected bit. This is
351 * supposed to be read-only.
352 */
353 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
354 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
355 DP |= DP_PORT_WIDTH(1);
356 DP |= DP_LINK_TRAIN_PAT_1;
357
358 if (IS_CHERRYVIEW(dev))
359 DP |= DP_PIPE_SELECT_CHV(pipe);
360 else if (pipe == PIPE_B)
361 DP |= DP_PIPEB_SELECT;
362
363 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
364
365 /*
366 * The DPLL for the pipe must be enabled for this to work.
367 * So enable temporarily it if it's not already enabled.
368 */
369 if (!pll_enabled)
370 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
371 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
372
373 /*
374 * Similar magic as in intel_dp_enable_port().
375 * We _must_ do this port enable + disable trick
376 * to make this power seqeuencer lock onto the port.
377 * Otherwise even VDD force bit won't work.
378 */
379 I915_WRITE(intel_dp->output_reg, DP);
380 POSTING_READ(intel_dp->output_reg);
381
382 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
383 POSTING_READ(intel_dp->output_reg);
384
385 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
386 POSTING_READ(intel_dp->output_reg);
387
388 if (!pll_enabled)
389 vlv_force_pll_off(dev, pipe);
390}
391
392static enum pipe
393vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
394{
395 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
396 struct drm_device *dev = intel_dig_port->base.base.dev;
397 struct drm_i915_private *dev_priv = dev->dev_private;
398 struct intel_encoder *encoder;
399 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
400 enum pipe pipe;
401
402 lockdep_assert_held(&dev_priv->pps_mutex);
403
404 /* We should never land here with regular DP ports */
405 WARN_ON(!is_edp(intel_dp));
406
407 if (intel_dp->pps_pipe != INVALID_PIPE)
408 return intel_dp->pps_pipe;
409
410 /*
411 * We don't have power sequencer currently.
412 * Pick one that's not used by other ports.
413 */
414 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
415 base.head) {
416 struct intel_dp *tmp;
417
418 if (encoder->type != INTEL_OUTPUT_EDP)
419 continue;
420
421 tmp = enc_to_intel_dp(&encoder->base);
422
423 if (tmp->pps_pipe != INVALID_PIPE)
424 pipes &= ~(1 << tmp->pps_pipe);
425 }
426
427 /*
428 * Didn't find one. This should not happen since there
429 * are two power sequencers and up to two eDP ports.
430 */
431 if (WARN_ON(pipes == 0))
432 pipe = PIPE_A;
433 else
434 pipe = ffs(pipes) - 1;
435
436 vlv_steal_power_sequencer(dev, pipe);
437 intel_dp->pps_pipe = pipe;
438
439 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
440 pipe_name(intel_dp->pps_pipe),
441 port_name(intel_dig_port->port));
442
443 /* init power sequencer on this pipe and port */
444 intel_dp_init_panel_power_sequencer(dev, intel_dp);
445 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
446
447 /*
448 * Even vdd force doesn't work until we've made
449 * the power sequencer lock in on the port.
450 */
451 vlv_power_sequencer_kick(intel_dp);
452
453 return intel_dp->pps_pipe;
454}
455
456typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
457 enum pipe pipe);
458
459static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
460 enum pipe pipe)
461{
462 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
463}
464
465static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
466 enum pipe pipe)
467{
468 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
469}
470
471static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
472 enum pipe pipe)
473{
474 return true;
475}
476
477static enum pipe
478vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
479 enum port port,
480 vlv_pipe_check pipe_check)
481{
482 enum pipe pipe;
483
484 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
485 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
486 PANEL_PORT_SELECT_MASK;
487
488 if (port_sel != PANEL_PORT_SELECT_VLV(port))
489 continue;
490
491 if (!pipe_check(dev_priv, pipe))
492 continue;
493
494 return pipe;
495 }
496
497 return INVALID_PIPE;
498}
499
500static void
501vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
502{
503 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
504 struct drm_device *dev = intel_dig_port->base.base.dev;
505 struct drm_i915_private *dev_priv = dev->dev_private;
506 enum port port = intel_dig_port->port;
507
508 lockdep_assert_held(&dev_priv->pps_mutex);
509
510 /* try to find a pipe with this port selected */
511 /* first pick one where the panel is on */
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513 vlv_pipe_has_pp_on);
514 /* didn't find one? pick one where vdd is on */
515 if (intel_dp->pps_pipe == INVALID_PIPE)
516 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
517 vlv_pipe_has_vdd_on);
518 /* didn't find one? pick one with just the correct port */
519 if (intel_dp->pps_pipe == INVALID_PIPE)
520 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
521 vlv_pipe_any);
522
523 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
524 if (intel_dp->pps_pipe == INVALID_PIPE) {
525 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
526 port_name(port));
527 return;
528 }
529
530 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
531 port_name(port), pipe_name(intel_dp->pps_pipe));
532
533 intel_dp_init_panel_power_sequencer(dev, intel_dp);
534 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
535}
536
537void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
538{
539 struct drm_device *dev = dev_priv->dev;
540 struct intel_encoder *encoder;
541
542 if (WARN_ON(!IS_VALLEYVIEW(dev)))
543 return;
544
545 /*
546 * We can't grab pps_mutex here due to deadlock with power_domain
547 * mutex when power_domain functions are called while holding pps_mutex.
548 * That also means that in order to use pps_pipe the code needs to
549 * hold both a power domain reference and pps_mutex, and the power domain
550 * reference get/put must be done while _not_ holding pps_mutex.
551 * pps_{lock,unlock}() do these steps in the correct order, so one
552 * should use them always.
553 */
554
555 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
556 struct intel_dp *intel_dp;
557
558 if (encoder->type != INTEL_OUTPUT_EDP)
559 continue;
560
561 intel_dp = enc_to_intel_dp(&encoder->base);
562 intel_dp->pps_pipe = INVALID_PIPE;
563 }
564}
565
566static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
567{
568 struct drm_device *dev = intel_dp_to_dev(intel_dp);
569
570 if (IS_BROXTON(dev))
571 return BXT_PP_CONTROL(0);
572 else if (HAS_PCH_SPLIT(dev))
573 return PCH_PP_CONTROL;
574 else
575 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
576}
577
578static u32 _pp_stat_reg(struct intel_dp *intel_dp)
579{
580 struct drm_device *dev = intel_dp_to_dev(intel_dp);
581
582 if (IS_BROXTON(dev))
583 return BXT_PP_STATUS(0);
584 else if (HAS_PCH_SPLIT(dev))
585 return PCH_PP_STATUS;
586 else
587 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
588}
589
590/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
591 This function only applicable when panel PM state is not to be tracked */
592static int edp_notify_handler(struct notifier_block *this, unsigned long code,
593 void *unused)
594{
595 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
596 edp_notifier);
597 struct drm_device *dev = intel_dp_to_dev(intel_dp);
598 struct drm_i915_private *dev_priv = dev->dev_private;
599 u32 pp_div;
600 u32 pp_ctrl_reg, pp_div_reg;
601
602 if (!is_edp(intel_dp) || code != SYS_RESTART)
603 return 0;
604
605 pps_lock(intel_dp);
606
607 if (IS_VALLEYVIEW(dev)) {
608 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
609
610 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
611 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
612 pp_div = I915_READ(pp_div_reg);
613 pp_div &= PP_REFERENCE_DIVIDER_MASK;
614
615 /* 0x1F write to PP_DIV_REG sets max cycle delay */
616 I915_WRITE(pp_div_reg, pp_div | 0x1F);
617 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
618 msleep(intel_dp->panel_power_cycle_delay);
619 }
620
621 pps_unlock(intel_dp);
622
623 return 0;
624}
625
626static bool edp_have_panel_power(struct intel_dp *intel_dp)
627{
628 struct drm_device *dev = intel_dp_to_dev(intel_dp);
629 struct drm_i915_private *dev_priv = dev->dev_private;
630
631 lockdep_assert_held(&dev_priv->pps_mutex);
632
633 if (IS_VALLEYVIEW(dev) &&
634 intel_dp->pps_pipe == INVALID_PIPE)
635 return false;
636
637 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
638}
639
640static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
641{
642 struct drm_device *dev = intel_dp_to_dev(intel_dp);
643 struct drm_i915_private *dev_priv = dev->dev_private;
644
645 lockdep_assert_held(&dev_priv->pps_mutex);
646
647 if (IS_VALLEYVIEW(dev) &&
648 intel_dp->pps_pipe == INVALID_PIPE)
649 return false;
650
651 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
652}
653
654static void
655intel_dp_check_edp(struct intel_dp *intel_dp)
656{
657 struct drm_device *dev = intel_dp_to_dev(intel_dp);
658 struct drm_i915_private *dev_priv = dev->dev_private;
659
660 if (!is_edp(intel_dp))
661 return;
662
663 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
664 WARN(1, "eDP powered off while attempting aux channel communication.\n");
665 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
666 I915_READ(_pp_stat_reg(intel_dp)),
667 I915_READ(_pp_ctrl_reg(intel_dp)));
668 }
669}
670
671static uint32_t
672intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
673{
674 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
675 struct drm_device *dev = intel_dig_port->base.base.dev;
676 struct drm_i915_private *dev_priv = dev->dev_private;
677 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
678 uint32_t status;
679 bool done;
680
681#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
682 if (has_aux_irq)
683 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
684 msecs_to_jiffies_timeout(10));
685 else
686 done = wait_for_atomic(C, 10) == 0;
687 if (!done)
688 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
689 has_aux_irq);
690#undef C
691
692 return status;
693}
694
695static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
696{
697 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698 struct drm_device *dev = intel_dig_port->base.base.dev;
699
700 /*
701 * The clock divider is based off the hrawclk, and would like to run at
702 * 2MHz. So, take the hrawclk value and divide by 2 and use that
703 */
704 return index ? 0 : intel_hrawclk(dev) / 2;
705}
706
707static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
708{
709 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
710 struct drm_device *dev = intel_dig_port->base.base.dev;
711 struct drm_i915_private *dev_priv = dev->dev_private;
712
713 if (index)
714 return 0;
715
716 if (intel_dig_port->port == PORT_A) {
717 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
718
719 } else {
720 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
721 }
722}
723
724static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
725{
726 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
727 struct drm_device *dev = intel_dig_port->base.base.dev;
728 struct drm_i915_private *dev_priv = dev->dev_private;
729
730 if (intel_dig_port->port == PORT_A) {
731 if (index)
732 return 0;
733 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
734 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
735 /* Workaround for non-ULT HSW */
736 switch (index) {
737 case 0: return 63;
738 case 1: return 72;
739 default: return 0;
740 }
741 } else {
742 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
743 }
744}
745
746static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
747{
748 return index ? 0 : 100;
749}
750
751static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
752{
753 /*
754 * SKL doesn't need us to program the AUX clock divider (Hardware will
755 * derive the clock from CDCLK automatically). We still implement the
756 * get_aux_clock_divider vfunc to plug-in into the existing code.
757 */
758 return index ? 0 : 1;
759}
760
761static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
762 bool has_aux_irq,
763 int send_bytes,
764 uint32_t aux_clock_divider)
765{
766 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
767 struct drm_device *dev = intel_dig_port->base.base.dev;
768 uint32_t precharge, timeout;
769
770 if (IS_GEN6(dev))
771 precharge = 3;
772 else
773 precharge = 5;
774
775 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
776 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
777 else
778 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
779
780 return DP_AUX_CH_CTL_SEND_BUSY |
781 DP_AUX_CH_CTL_DONE |
782 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
783 DP_AUX_CH_CTL_TIME_OUT_ERROR |
784 timeout |
785 DP_AUX_CH_CTL_RECEIVE_ERROR |
786 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
787 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
789}
790
791static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
792 bool has_aux_irq,
793 int send_bytes,
794 uint32_t unused)
795{
796 return DP_AUX_CH_CTL_SEND_BUSY |
797 DP_AUX_CH_CTL_DONE |
798 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
799 DP_AUX_CH_CTL_TIME_OUT_ERROR |
800 DP_AUX_CH_CTL_TIME_OUT_1600us |
801 DP_AUX_CH_CTL_RECEIVE_ERROR |
802 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
803 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
804}
805
806static int
807intel_dp_aux_ch(struct intel_dp *intel_dp,
808 const uint8_t *send, int send_bytes,
809 uint8_t *recv, int recv_size)
810{
811 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
812 struct drm_device *dev = intel_dig_port->base.base.dev;
813 struct drm_i915_private *dev_priv = dev->dev_private;
814 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
815 uint32_t ch_data = ch_ctl + 4;
816 uint32_t aux_clock_divider;
817 int i, ret, recv_bytes;
818 uint32_t status;
819 int try, clock = 0;
820 bool has_aux_irq = HAS_AUX_IRQ(dev);
821 bool vdd;
822
823 pps_lock(intel_dp);
824
825 /*
826 * We will be called with VDD already enabled for dpcd/edid/oui reads.
827 * In such cases we want to leave VDD enabled and it's up to upper layers
828 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
829 * ourselves.
830 */
831 vdd = edp_panel_vdd_on(intel_dp);
832
833 /* dp aux is extremely sensitive to irq latency, hence request the
834 * lowest possible wakeup latency and so prevent the cpu from going into
835 * deep sleep states.
836 */
837 pm_qos_update_request(&dev_priv->pm_qos, 0);
838
839 intel_dp_check_edp(intel_dp);
840
841 intel_aux_display_runtime_get(dev_priv);
842
843 /* Try to wait for any previous AUX channel activity */
844 for (try = 0; try < 3; try++) {
845 status = I915_READ_NOTRACE(ch_ctl);
846 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
847 break;
848 msleep(1);
849 }
850
851 if (try == 3) {
852 static u32 last_status = -1;
853 const u32 status = I915_READ(ch_ctl);
854
855 if (status != last_status) {
856 WARN(1, "dp_aux_ch not started status 0x%08x\n",
857 status);
858 last_status = status;
859 }
860
861 ret = -EBUSY;
862 goto out;
863 }
864
865 /* Only 5 data registers! */
866 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
867 ret = -E2BIG;
868 goto out;
869 }
870
871 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
872 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
873 has_aux_irq,
874 send_bytes,
875 aux_clock_divider);
876
877 /* Must try at least 3 times according to DP spec */
878 for (try = 0; try < 5; try++) {
879 /* Load the send data into the aux channel data registers */
880 for (i = 0; i < send_bytes; i += 4)
881 I915_WRITE(ch_data + i,
882 intel_dp_pack_aux(send + i,
883 send_bytes - i));
884
885 /* Send the command and wait for it to complete */
886 I915_WRITE(ch_ctl, send_ctl);
887
888 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
889
890 /* Clear done status and any errors */
891 I915_WRITE(ch_ctl,
892 status |
893 DP_AUX_CH_CTL_DONE |
894 DP_AUX_CH_CTL_TIME_OUT_ERROR |
895 DP_AUX_CH_CTL_RECEIVE_ERROR);
896
897 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
898 continue;
899
900 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
901 * 400us delay required for errors and timeouts
902 * Timeout errors from the HW already meet this
903 * requirement so skip to next iteration
904 */
905 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
906 usleep_range(400, 500);
907 continue;
908 }
909 if (status & DP_AUX_CH_CTL_DONE)
910 goto done;
911 }
912 }
913
914 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
915 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
916 ret = -EBUSY;
917 goto out;
918 }
919
920done:
921 /* Check for timeout or receive error.
922 * Timeouts occur when the sink is not connected
923 */
924 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
925 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
926 ret = -EIO;
927 goto out;
928 }
929
930 /* Timeouts occur when the device isn't connected, so they're
931 * "normal" -- don't fill the kernel log with these */
932 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
933 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
934 ret = -ETIMEDOUT;
935 goto out;
936 }
937
938 /* Unload any bytes sent back from the other side */
939 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
940 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
941 if (recv_bytes > recv_size)
942 recv_bytes = recv_size;
943
944 for (i = 0; i < recv_bytes; i += 4)
945 intel_dp_unpack_aux(I915_READ(ch_data + i),
946 recv + i, recv_bytes - i);
947
948 ret = recv_bytes;
949out:
950 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
951 intel_aux_display_runtime_put(dev_priv);
952
953 if (vdd)
954 edp_panel_vdd_off(intel_dp, false);
955
956 pps_unlock(intel_dp);
957
958 return ret;
959}
960
961#define BARE_ADDRESS_SIZE 3
962#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
963static ssize_t
964intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
965{
966 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
967 uint8_t txbuf[20], rxbuf[20];
968 size_t txsize, rxsize;
969 int ret;
970
971 txbuf[0] = (msg->request << 4) |
972 ((msg->address >> 16) & 0xf);
973 txbuf[1] = (msg->address >> 8) & 0xff;
974 txbuf[2] = msg->address & 0xff;
975 txbuf[3] = msg->size - 1;
976
977 switch (msg->request & ~DP_AUX_I2C_MOT) {
978 case DP_AUX_NATIVE_WRITE:
979 case DP_AUX_I2C_WRITE:
980 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
981 rxsize = 2; /* 0 or 1 data bytes */
982
983 if (WARN_ON(txsize > 20))
984 return -E2BIG;
985
986 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
987
988 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
989 if (ret > 0) {
990 msg->reply = rxbuf[0] >> 4;
991
992 if (ret > 1) {
993 /* Number of bytes written in a short write. */
994 ret = clamp_t(int, rxbuf[1], 0, msg->size);
995 } else {
996 /* Return payload size. */
997 ret = msg->size;
998 }
999 }
1000 break;
1001
1002 case DP_AUX_NATIVE_READ:
1003 case DP_AUX_I2C_READ:
1004 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1005 rxsize = msg->size + 1;
1006
1007 if (WARN_ON(rxsize > 20))
1008 return -E2BIG;
1009
1010 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1011 if (ret > 0) {
1012 msg->reply = rxbuf[0] >> 4;
1013 /*
1014 * Assume happy day, and copy the data. The caller is
1015 * expected to check msg->reply before touching it.
1016 *
1017 * Return payload size.
1018 */
1019 ret--;
1020 memcpy(msg->buffer, rxbuf + 1, ret);
1021 }
1022 break;
1023
1024 default:
1025 ret = -EINVAL;
1026 break;
1027 }
1028
1029 return ret;
1030}
1031
1032static void
1033intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1034{
1035 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1036 struct drm_i915_private *dev_priv = dev->dev_private;
1037 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1038 enum port port = intel_dig_port->port;
1039 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
1040 const char *name = NULL;
1041 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1042 int ret;
1043
1044 /* On SKL we don't have Aux for port E so we rely on VBT to set
1045 * a proper alternate aux channel.
1046 */
1047 if (IS_SKYLAKE(dev) && port == PORT_E) {
1048 switch (info->alternate_aux_channel) {
1049 case DP_AUX_B:
1050 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1051 break;
1052 case DP_AUX_C:
1053 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1054 break;
1055 case DP_AUX_D:
1056 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1057 break;
1058 case DP_AUX_A:
1059 default:
1060 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1061 }
1062 }
1063
1064 switch (port) {
1065 case PORT_A:
1066 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
1067 name = "DPDDC-A";
1068 break;
1069 case PORT_B:
1070 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
1071 name = "DPDDC-B";
1072 break;
1073 case PORT_C:
1074 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
1075 name = "DPDDC-C";
1076 break;
1077 case PORT_D:
1078 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
1079 name = "DPDDC-D";
1080 break;
1081 case PORT_E:
1082 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1083 name = "DPDDC-E";
1084 break;
1085 default:
1086 BUG();
1087 }
1088
1089 /*
1090 * The AUX_CTL register is usually DP_CTL + 0x10.
1091 *
1092 * On Haswell and Broadwell though:
1093 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1094 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1095 *
1096 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1097 */
1098 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
1099 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
1100
1101 intel_dp->aux.name = name;
1102 intel_dp->aux.dev = dev->dev;
1103 intel_dp->aux.transfer = intel_dp_aux_transfer;
1104
1105 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1106 connector->base.kdev->kobj.name);
1107
1108 ret = drm_dp_aux_register(&intel_dp->aux);
1109 if (ret < 0) {
1110 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1111 name, ret);
1112 return;
1113 }
1114
1115 ret = sysfs_create_link(&connector->base.kdev->kobj,
1116 &intel_dp->aux.ddc.dev.kobj,
1117 intel_dp->aux.ddc.dev.kobj.name);
1118 if (ret < 0) {
1119 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
1120 drm_dp_aux_unregister(&intel_dp->aux);
1121 }
1122}
1123
1124static void
1125intel_dp_connector_unregister(struct intel_connector *intel_connector)
1126{
1127 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1128
1129 if (!intel_connector->mst_port)
1130 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1131 intel_dp->aux.ddc.dev.kobj.name);
1132 intel_connector_unregister(intel_connector);
1133}
1134
1135static void
1136skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1137{
1138 u32 ctrl1;
1139
1140 memset(&pipe_config->dpll_hw_state, 0,
1141 sizeof(pipe_config->dpll_hw_state));
1142
1143 pipe_config->ddi_pll_sel = SKL_DPLL0;
1144 pipe_config->dpll_hw_state.cfgcr1 = 0;
1145 pipe_config->dpll_hw_state.cfgcr2 = 0;
1146
1147 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1148 switch (pipe_config->port_clock / 2) {
1149 case 81000:
1150 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1151 SKL_DPLL0);
1152 break;
1153 case 135000:
1154 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1155 SKL_DPLL0);
1156 break;
1157 case 270000:
1158 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1159 SKL_DPLL0);
1160 break;
1161 case 162000:
1162 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1163 SKL_DPLL0);
1164 break;
1165 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1166 results in CDCLK change. Need to handle the change of CDCLK by
1167 disabling pipes and re-enabling them */
1168 case 108000:
1169 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1170 SKL_DPLL0);
1171 break;
1172 case 216000:
1173 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1174 SKL_DPLL0);
1175 break;
1176
1177 }
1178 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1179}
1180
1181static void
1182hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1183{
1184 memset(&pipe_config->dpll_hw_state, 0,
1185 sizeof(pipe_config->dpll_hw_state));
1186
1187 switch (pipe_config->port_clock / 2) {
1188 case 81000:
1189 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1190 break;
1191 case 135000:
1192 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1193 break;
1194 case 270000:
1195 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1196 break;
1197 }
1198}
1199
1200static int
1201intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1202{
1203 if (intel_dp->num_sink_rates) {
1204 *sink_rates = intel_dp->sink_rates;
1205 return intel_dp->num_sink_rates;
1206 }
1207
1208 *sink_rates = default_rates;
1209
1210 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1211}
1212
1213static int
1214intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
1215{
1216 if (IS_BROXTON(dev)) {
1217 *source_rates = bxt_rates;
1218 return ARRAY_SIZE(bxt_rates);
1219 } else if (IS_SKYLAKE(dev)) {
1220 *source_rates = skl_rates;
1221 return ARRAY_SIZE(skl_rates);
1222 } else if (IS_CHERRYVIEW(dev)) {
1223 *source_rates = chv_rates;
1224 return ARRAY_SIZE(chv_rates);
1225 }
1226
1227 *source_rates = default_rates;
1228
1229 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1230 /* WaDisableHBR2:skl */
1231 return (DP_LINK_BW_2_7 >> 3) + 1;
1232 else if (INTEL_INFO(dev)->gen >= 8 ||
1233 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1234 return (DP_LINK_BW_5_4 >> 3) + 1;
1235 else
1236 return (DP_LINK_BW_2_7 >> 3) + 1;
1237}
1238
1239static void
1240intel_dp_set_clock(struct intel_encoder *encoder,
1241 struct intel_crtc_state *pipe_config)
1242{
1243 struct drm_device *dev = encoder->base.dev;
1244 const struct dp_link_dpll *divisor = NULL;
1245 int i, count = 0;
1246
1247 if (IS_G4X(dev)) {
1248 divisor = gen4_dpll;
1249 count = ARRAY_SIZE(gen4_dpll);
1250 } else if (HAS_PCH_SPLIT(dev)) {
1251 divisor = pch_dpll;
1252 count = ARRAY_SIZE(pch_dpll);
1253 } else if (IS_CHERRYVIEW(dev)) {
1254 divisor = chv_dpll;
1255 count = ARRAY_SIZE(chv_dpll);
1256 } else if (IS_VALLEYVIEW(dev)) {
1257 divisor = vlv_dpll;
1258 count = ARRAY_SIZE(vlv_dpll);
1259 }
1260
1261 if (divisor && count) {
1262 for (i = 0; i < count; i++) {
1263 if (pipe_config->port_clock == divisor[i].clock) {
1264 pipe_config->dpll = divisor[i].dpll;
1265 pipe_config->clock_set = true;
1266 break;
1267 }
1268 }
1269 }
1270}
1271
1272static int intersect_rates(const int *source_rates, int source_len,
1273 const int *sink_rates, int sink_len,
1274 int *common_rates)
1275{
1276 int i = 0, j = 0, k = 0;
1277
1278 while (i < source_len && j < sink_len) {
1279 if (source_rates[i] == sink_rates[j]) {
1280 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1281 return k;
1282 common_rates[k] = source_rates[i];
1283 ++k;
1284 ++i;
1285 ++j;
1286 } else if (source_rates[i] < sink_rates[j]) {
1287 ++i;
1288 } else {
1289 ++j;
1290 }
1291 }
1292 return k;
1293}
1294
1295static int intel_dp_common_rates(struct intel_dp *intel_dp,
1296 int *common_rates)
1297{
1298 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1299 const int *source_rates, *sink_rates;
1300 int source_len, sink_len;
1301
1302 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1303 source_len = intel_dp_source_rates(dev, &source_rates);
1304
1305 return intersect_rates(source_rates, source_len,
1306 sink_rates, sink_len,
1307 common_rates);
1308}
1309
1310static void snprintf_int_array(char *str, size_t len,
1311 const int *array, int nelem)
1312{
1313 int i;
1314
1315 str[0] = '\0';
1316
1317 for (i = 0; i < nelem; i++) {
1318 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1319 if (r >= len)
1320 return;
1321 str += r;
1322 len -= r;
1323 }
1324}
1325
1326static void intel_dp_print_rates(struct intel_dp *intel_dp)
1327{
1328 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1329 const int *source_rates, *sink_rates;
1330 int source_len, sink_len, common_len;
1331 int common_rates[DP_MAX_SUPPORTED_RATES];
1332 char str[128]; /* FIXME: too big for stack? */
1333
1334 if ((drm_debug & DRM_UT_KMS) == 0)
1335 return;
1336
1337 source_len = intel_dp_source_rates(dev, &source_rates);
1338 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1339 DRM_DEBUG_KMS("source rates: %s\n", str);
1340
1341 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1342 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1343 DRM_DEBUG_KMS("sink rates: %s\n", str);
1344
1345 common_len = intel_dp_common_rates(intel_dp, common_rates);
1346 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1347 DRM_DEBUG_KMS("common rates: %s\n", str);
1348}
1349
1350static int rate_to_index(int find, const int *rates)
1351{
1352 int i = 0;
1353
1354 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1355 if (find == rates[i])
1356 break;
1357
1358 return i;
1359}
1360
1361int
1362intel_dp_max_link_rate(struct intel_dp *intel_dp)
1363{
1364 int rates[DP_MAX_SUPPORTED_RATES] = {};
1365 int len;
1366
1367 len = intel_dp_common_rates(intel_dp, rates);
1368 if (WARN_ON(len <= 0))
1369 return 162000;
1370
1371 return rates[rate_to_index(0, rates) - 1];
1372}
1373
1374int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1375{
1376 return rate_to_index(rate, intel_dp->sink_rates);
1377}
1378
1379static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1380 uint8_t *link_bw, uint8_t *rate_select)
1381{
1382 if (intel_dp->num_sink_rates) {
1383 *link_bw = 0;
1384 *rate_select =
1385 intel_dp_rate_select(intel_dp, port_clock);
1386 } else {
1387 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1388 *rate_select = 0;
1389 }
1390}
1391
1392bool
1393intel_dp_compute_config(struct intel_encoder *encoder,
1394 struct intel_crtc_state *pipe_config)
1395{
1396 struct drm_device *dev = encoder->base.dev;
1397 struct drm_i915_private *dev_priv = dev->dev_private;
1398 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1399 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1400 enum port port = dp_to_dig_port(intel_dp)->port;
1401 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1402 struct intel_connector *intel_connector = intel_dp->attached_connector;
1403 int lane_count, clock;
1404 int min_lane_count = 1;
1405 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1406 /* Conveniently, the link BW constants become indices with a shift...*/
1407 int min_clock = 0;
1408 int max_clock;
1409 int bpp, mode_rate;
1410 int link_avail, link_clock;
1411 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1412 int common_len;
1413 uint8_t link_bw, rate_select;
1414
1415 common_len = intel_dp_common_rates(intel_dp, common_rates);
1416
1417 /* No common link rates between source and sink */
1418 WARN_ON(common_len <= 0);
1419
1420 max_clock = common_len - 1;
1421
1422 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1423 pipe_config->has_pch_encoder = true;
1424
1425 pipe_config->has_dp_encoder = true;
1426 pipe_config->has_drrs = false;
1427 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1428
1429 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1430 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1431 adjusted_mode);
1432
1433 if (INTEL_INFO(dev)->gen >= 9) {
1434 int ret;
1435 ret = skl_update_scaler_crtc(pipe_config);
1436 if (ret)
1437 return ret;
1438 }
1439
1440 if (!HAS_PCH_SPLIT(dev))
1441 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1442 intel_connector->panel.fitting_mode);
1443 else
1444 intel_pch_panel_fitting(intel_crtc, pipe_config,
1445 intel_connector->panel.fitting_mode);
1446 }
1447
1448 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1449 return false;
1450
1451 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1452 "max bw %d pixel clock %iKHz\n",
1453 max_lane_count, common_rates[max_clock],
1454 adjusted_mode->crtc_clock);
1455
1456 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1457 * bpc in between. */
1458 bpp = pipe_config->pipe_bpp;
1459 if (is_edp(intel_dp)) {
1460
1461 /* Get bpp from vbt only for panels that dont have bpp in edid */
1462 if (intel_connector->base.display_info.bpc == 0 &&
1463 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1464 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1465 dev_priv->vbt.edp_bpp);
1466 bpp = dev_priv->vbt.edp_bpp;
1467 }
1468
1469 /*
1470 * Use the maximum clock and number of lanes the eDP panel
1471 * advertizes being capable of. The panels are generally
1472 * designed to support only a single clock and lane
1473 * configuration, and typically these values correspond to the
1474 * native resolution of the panel.
1475 */
1476 min_lane_count = max_lane_count;
1477 min_clock = max_clock;
1478 }
1479
1480 for (; bpp >= 6*3; bpp -= 2*3) {
1481 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1482 bpp);
1483
1484 for (clock = min_clock; clock <= max_clock; clock++) {
1485 for (lane_count = min_lane_count;
1486 lane_count <= max_lane_count;
1487 lane_count <<= 1) {
1488
1489 link_clock = common_rates[clock];
1490 link_avail = intel_dp_max_data_rate(link_clock,
1491 lane_count);
1492
1493 if (mode_rate <= link_avail) {
1494 goto found;
1495 }
1496 }
1497 }
1498 }
1499
1500 return false;
1501
1502found:
1503 if (intel_dp->color_range_auto) {
1504 /*
1505 * See:
1506 * CEA-861-E - 5.1 Default Encoding Parameters
1507 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1508 */
1509 pipe_config->limited_color_range =
1510 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1511 } else {
1512 pipe_config->limited_color_range =
1513 intel_dp->limited_color_range;
1514 }
1515
1516 pipe_config->lane_count = lane_count;
1517
1518 pipe_config->pipe_bpp = bpp;
1519 pipe_config->port_clock = common_rates[clock];
1520
1521 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1522 &link_bw, &rate_select);
1523
1524 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1525 link_bw, rate_select, pipe_config->lane_count,
1526 pipe_config->port_clock, bpp);
1527 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1528 mode_rate, link_avail);
1529
1530 intel_link_compute_m_n(bpp, lane_count,
1531 adjusted_mode->crtc_clock,
1532 pipe_config->port_clock,
1533 &pipe_config->dp_m_n);
1534
1535 if (intel_connector->panel.downclock_mode != NULL &&
1536 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1537 pipe_config->has_drrs = true;
1538 intel_link_compute_m_n(bpp, lane_count,
1539 intel_connector->panel.downclock_mode->clock,
1540 pipe_config->port_clock,
1541 &pipe_config->dp_m2_n2);
1542 }
1543
1544 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1545 skl_edp_set_pll_config(pipe_config);
1546 else if (IS_BROXTON(dev))
1547 /* handled in ddi */;
1548 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1549 hsw_dp_set_ddi_pll_sel(pipe_config);
1550 else
1551 intel_dp_set_clock(encoder, pipe_config);
1552
1553 return true;
1554}
1555
1556static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
1557{
1558 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1559 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1560 struct drm_device *dev = crtc->base.dev;
1561 struct drm_i915_private *dev_priv = dev->dev_private;
1562 u32 dpa_ctl;
1563
1564 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1565 crtc->config->port_clock);
1566 dpa_ctl = I915_READ(DP_A);
1567 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1568
1569 if (crtc->config->port_clock == 162000) {
1570 /* For a long time we've carried around a ILK-DevA w/a for the
1571 * 160MHz clock. If we're really unlucky, it's still required.
1572 */
1573 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
1574 dpa_ctl |= DP_PLL_FREQ_160MHZ;
1575 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
1576 } else {
1577 dpa_ctl |= DP_PLL_FREQ_270MHZ;
1578 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
1579 }
1580
1581 I915_WRITE(DP_A, dpa_ctl);
1582
1583 POSTING_READ(DP_A);
1584 udelay(500);
1585}
1586
1587void intel_dp_set_link_params(struct intel_dp *intel_dp,
1588 const struct intel_crtc_state *pipe_config)
1589{
1590 intel_dp->link_rate = pipe_config->port_clock;
1591 intel_dp->lane_count = pipe_config->lane_count;
1592}
1593
1594static void intel_dp_prepare(struct intel_encoder *encoder)
1595{
1596 struct drm_device *dev = encoder->base.dev;
1597 struct drm_i915_private *dev_priv = dev->dev_private;
1598 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1599 enum port port = dp_to_dig_port(intel_dp)->port;
1600 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1601 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1602
1603 intel_dp_set_link_params(intel_dp, crtc->config);
1604
1605 /*
1606 * There are four kinds of DP registers:
1607 *
1608 * IBX PCH
1609 * SNB CPU
1610 * IVB CPU
1611 * CPT PCH
1612 *
1613 * IBX PCH and CPU are the same for almost everything,
1614 * except that the CPU DP PLL is configured in this
1615 * register
1616 *
1617 * CPT PCH is quite different, having many bits moved
1618 * to the TRANS_DP_CTL register instead. That
1619 * configuration happens (oddly) in ironlake_pch_enable
1620 */
1621
1622 /* Preserve the BIOS-computed detected bit. This is
1623 * supposed to be read-only.
1624 */
1625 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1626
1627 /* Handle DP bits in common between all three register formats */
1628 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1629 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1630
1631 if (crtc->config->has_audio)
1632 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1633
1634 /* Split out the IBX/CPU vs CPT settings */
1635
1636 if (IS_GEN7(dev) && port == PORT_A) {
1637 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1638 intel_dp->DP |= DP_SYNC_HS_HIGH;
1639 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1640 intel_dp->DP |= DP_SYNC_VS_HIGH;
1641 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1642
1643 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1644 intel_dp->DP |= DP_ENHANCED_FRAMING;
1645
1646 intel_dp->DP |= crtc->pipe << 29;
1647 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1648 u32 trans_dp;
1649
1650 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1651
1652 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1653 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1654 trans_dp |= TRANS_DP_ENH_FRAMING;
1655 else
1656 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1657 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1658 } else {
1659 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1660 crtc->config->limited_color_range)
1661 intel_dp->DP |= DP_COLOR_RANGE_16_235;
1662
1663 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1664 intel_dp->DP |= DP_SYNC_HS_HIGH;
1665 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1666 intel_dp->DP |= DP_SYNC_VS_HIGH;
1667 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1668
1669 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1670 intel_dp->DP |= DP_ENHANCED_FRAMING;
1671
1672 if (IS_CHERRYVIEW(dev))
1673 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1674 else if (crtc->pipe == PIPE_B)
1675 intel_dp->DP |= DP_PIPEB_SELECT;
1676 }
1677}
1678
1679#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1680#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1681
1682#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1683#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1684
1685#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1686#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1687
1688static void wait_panel_status(struct intel_dp *intel_dp,
1689 u32 mask,
1690 u32 value)
1691{
1692 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1693 struct drm_i915_private *dev_priv = dev->dev_private;
1694 u32 pp_stat_reg, pp_ctrl_reg;
1695
1696 lockdep_assert_held(&dev_priv->pps_mutex);
1697
1698 pp_stat_reg = _pp_stat_reg(intel_dp);
1699 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1700
1701 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1702 mask, value,
1703 I915_READ(pp_stat_reg),
1704 I915_READ(pp_ctrl_reg));
1705
1706 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1707 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1708 I915_READ(pp_stat_reg),
1709 I915_READ(pp_ctrl_reg));
1710 }
1711
1712 DRM_DEBUG_KMS("Wait complete\n");
1713}
1714
1715static void wait_panel_on(struct intel_dp *intel_dp)
1716{
1717 DRM_DEBUG_KMS("Wait for panel power on\n");
1718 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1719}
1720
1721static void wait_panel_off(struct intel_dp *intel_dp)
1722{
1723 DRM_DEBUG_KMS("Wait for panel power off time\n");
1724 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1725}
1726
1727static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1728{
1729 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1730
1731 /* When we disable the VDD override bit last we have to do the manual
1732 * wait. */
1733 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1734 intel_dp->panel_power_cycle_delay);
1735
1736 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1737}
1738
1739static void wait_backlight_on(struct intel_dp *intel_dp)
1740{
1741 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1742 intel_dp->backlight_on_delay);
1743}
1744
1745static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1746{
1747 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1748 intel_dp->backlight_off_delay);
1749}
1750
1751/* Read the current pp_control value, unlocking the register if it
1752 * is locked
1753 */
1754
1755static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1756{
1757 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1758 struct drm_i915_private *dev_priv = dev->dev_private;
1759 u32 control;
1760
1761 lockdep_assert_held(&dev_priv->pps_mutex);
1762
1763 control = I915_READ(_pp_ctrl_reg(intel_dp));
1764 if (!IS_BROXTON(dev)) {
1765 control &= ~PANEL_UNLOCK_MASK;
1766 control |= PANEL_UNLOCK_REGS;
1767 }
1768 return control;
1769}
1770
1771/*
1772 * Must be paired with edp_panel_vdd_off().
1773 * Must hold pps_mutex around the whole on/off sequence.
1774 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1775 */
1776static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1777{
1778 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1779 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1780 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1781 struct drm_i915_private *dev_priv = dev->dev_private;
1782 enum intel_display_power_domain power_domain;
1783 u32 pp;
1784 u32 pp_stat_reg, pp_ctrl_reg;
1785 bool need_to_disable = !intel_dp->want_panel_vdd;
1786
1787 lockdep_assert_held(&dev_priv->pps_mutex);
1788
1789 if (!is_edp(intel_dp))
1790 return false;
1791
1792 cancel_delayed_work(&intel_dp->panel_vdd_work);
1793 intel_dp->want_panel_vdd = true;
1794
1795 if (edp_have_panel_vdd(intel_dp))
1796 return need_to_disable;
1797
1798 power_domain = intel_display_port_power_domain(intel_encoder);
1799 intel_display_power_get(dev_priv, power_domain);
1800
1801 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1802 port_name(intel_dig_port->port));
1803
1804 if (!edp_have_panel_power(intel_dp))
1805 wait_panel_power_cycle(intel_dp);
1806
1807 pp = ironlake_get_pp_control(intel_dp);
1808 pp |= EDP_FORCE_VDD;
1809
1810 pp_stat_reg = _pp_stat_reg(intel_dp);
1811 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1812
1813 I915_WRITE(pp_ctrl_reg, pp);
1814 POSTING_READ(pp_ctrl_reg);
1815 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1816 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1817 /*
1818 * If the panel wasn't on, delay before accessing aux channel
1819 */
1820 if (!edp_have_panel_power(intel_dp)) {
1821 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1822 port_name(intel_dig_port->port));
1823 msleep(intel_dp->panel_power_up_delay);
1824 }
1825
1826 return need_to_disable;
1827}
1828
1829/*
1830 * Must be paired with intel_edp_panel_vdd_off() or
1831 * intel_edp_panel_off().
1832 * Nested calls to these functions are not allowed since
1833 * we drop the lock. Caller must use some higher level
1834 * locking to prevent nested calls from other threads.
1835 */
1836void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1837{
1838 bool vdd;
1839
1840 if (!is_edp(intel_dp))
1841 return;
1842
1843 pps_lock(intel_dp);
1844 vdd = edp_panel_vdd_on(intel_dp);
1845 pps_unlock(intel_dp);
1846
1847 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1848 port_name(dp_to_dig_port(intel_dp)->port));
1849}
1850
1851static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1852{
1853 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1854 struct drm_i915_private *dev_priv = dev->dev_private;
1855 struct intel_digital_port *intel_dig_port =
1856 dp_to_dig_port(intel_dp);
1857 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1858 enum intel_display_power_domain power_domain;
1859 u32 pp;
1860 u32 pp_stat_reg, pp_ctrl_reg;
1861
1862 lockdep_assert_held(&dev_priv->pps_mutex);
1863
1864 WARN_ON(intel_dp->want_panel_vdd);
1865
1866 if (!edp_have_panel_vdd(intel_dp))
1867 return;
1868
1869 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1870 port_name(intel_dig_port->port));
1871
1872 pp = ironlake_get_pp_control(intel_dp);
1873 pp &= ~EDP_FORCE_VDD;
1874
1875 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1876 pp_stat_reg = _pp_stat_reg(intel_dp);
1877
1878 I915_WRITE(pp_ctrl_reg, pp);
1879 POSTING_READ(pp_ctrl_reg);
1880
1881 /* Make sure sequencer is idle before allowing subsequent activity */
1882 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1883 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1884
1885 if ((pp & POWER_TARGET_ON) == 0)
1886 intel_dp->last_power_cycle = jiffies;
1887
1888 power_domain = intel_display_port_power_domain(intel_encoder);
1889 intel_display_power_put(dev_priv, power_domain);
1890}
1891
1892static void edp_panel_vdd_work(struct work_struct *__work)
1893{
1894 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1895 struct intel_dp, panel_vdd_work);
1896
1897 pps_lock(intel_dp);
1898 if (!intel_dp->want_panel_vdd)
1899 edp_panel_vdd_off_sync(intel_dp);
1900 pps_unlock(intel_dp);
1901}
1902
1903static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1904{
1905 unsigned long delay;
1906
1907 /*
1908 * Queue the timer to fire a long time from now (relative to the power
1909 * down delay) to keep the panel power up across a sequence of
1910 * operations.
1911 */
1912 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1913 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1914}
1915
1916/*
1917 * Must be paired with edp_panel_vdd_on().
1918 * Must hold pps_mutex around the whole on/off sequence.
1919 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1920 */
1921static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1922{
1923 struct drm_i915_private *dev_priv =
1924 intel_dp_to_dev(intel_dp)->dev_private;
1925
1926 lockdep_assert_held(&dev_priv->pps_mutex);
1927
1928 if (!is_edp(intel_dp))
1929 return;
1930
1931 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1932 port_name(dp_to_dig_port(intel_dp)->port));
1933
1934 intel_dp->want_panel_vdd = false;
1935
1936 if (sync)
1937 edp_panel_vdd_off_sync(intel_dp);
1938 else
1939 edp_panel_vdd_schedule_off(intel_dp);
1940}
1941
1942static void edp_panel_on(struct intel_dp *intel_dp)
1943{
1944 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1945 struct drm_i915_private *dev_priv = dev->dev_private;
1946 u32 pp;
1947 u32 pp_ctrl_reg;
1948
1949 lockdep_assert_held(&dev_priv->pps_mutex);
1950
1951 if (!is_edp(intel_dp))
1952 return;
1953
1954 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1955 port_name(dp_to_dig_port(intel_dp)->port));
1956
1957 if (WARN(edp_have_panel_power(intel_dp),
1958 "eDP port %c panel power already on\n",
1959 port_name(dp_to_dig_port(intel_dp)->port)))
1960 return;
1961
1962 wait_panel_power_cycle(intel_dp);
1963
1964 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1965 pp = ironlake_get_pp_control(intel_dp);
1966 if (IS_GEN5(dev)) {
1967 /* ILK workaround: disable reset around power sequence */
1968 pp &= ~PANEL_POWER_RESET;
1969 I915_WRITE(pp_ctrl_reg, pp);
1970 POSTING_READ(pp_ctrl_reg);
1971 }
1972
1973 pp |= POWER_TARGET_ON;
1974 if (!IS_GEN5(dev))
1975 pp |= PANEL_POWER_RESET;
1976
1977 I915_WRITE(pp_ctrl_reg, pp);
1978 POSTING_READ(pp_ctrl_reg);
1979
1980 wait_panel_on(intel_dp);
1981 intel_dp->last_power_on = jiffies;
1982
1983 if (IS_GEN5(dev)) {
1984 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1985 I915_WRITE(pp_ctrl_reg, pp);
1986 POSTING_READ(pp_ctrl_reg);
1987 }
1988}
1989
1990void intel_edp_panel_on(struct intel_dp *intel_dp)
1991{
1992 if (!is_edp(intel_dp))
1993 return;
1994
1995 pps_lock(intel_dp);
1996 edp_panel_on(intel_dp);
1997 pps_unlock(intel_dp);
1998}
1999
2000
2001static void edp_panel_off(struct intel_dp *intel_dp)
2002{
2003 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2004 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2005 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2006 struct drm_i915_private *dev_priv = dev->dev_private;
2007 enum intel_display_power_domain power_domain;
2008 u32 pp;
2009 u32 pp_ctrl_reg;
2010
2011 lockdep_assert_held(&dev_priv->pps_mutex);
2012
2013 if (!is_edp(intel_dp))
2014 return;
2015
2016 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2017 port_name(dp_to_dig_port(intel_dp)->port));
2018
2019 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2020 port_name(dp_to_dig_port(intel_dp)->port));
2021
2022 pp = ironlake_get_pp_control(intel_dp);
2023 /* We need to switch off panel power _and_ force vdd, for otherwise some
2024 * panels get very unhappy and cease to work. */
2025 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2026 EDP_BLC_ENABLE);
2027
2028 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2029
2030 intel_dp->want_panel_vdd = false;
2031
2032 I915_WRITE(pp_ctrl_reg, pp);
2033 POSTING_READ(pp_ctrl_reg);
2034
2035 intel_dp->last_power_cycle = jiffies;
2036 wait_panel_off(intel_dp);
2037
2038 /* We got a reference when we enabled the VDD. */
2039 power_domain = intel_display_port_power_domain(intel_encoder);
2040 intel_display_power_put(dev_priv, power_domain);
2041}
2042
2043void intel_edp_panel_off(struct intel_dp *intel_dp)
2044{
2045 if (!is_edp(intel_dp))
2046 return;
2047
2048 pps_lock(intel_dp);
2049 edp_panel_off(intel_dp);
2050 pps_unlock(intel_dp);
2051}
2052
2053/* Enable backlight in the panel power control. */
2054static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2055{
2056 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2057 struct drm_device *dev = intel_dig_port->base.base.dev;
2058 struct drm_i915_private *dev_priv = dev->dev_private;
2059 u32 pp;
2060 u32 pp_ctrl_reg;
2061
2062 /*
2063 * If we enable the backlight right away following a panel power
2064 * on, we may see slight flicker as the panel syncs with the eDP
2065 * link. So delay a bit to make sure the image is solid before
2066 * allowing it to appear.
2067 */
2068 wait_backlight_on(intel_dp);
2069
2070 pps_lock(intel_dp);
2071
2072 pp = ironlake_get_pp_control(intel_dp);
2073 pp |= EDP_BLC_ENABLE;
2074
2075 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2076
2077 I915_WRITE(pp_ctrl_reg, pp);
2078 POSTING_READ(pp_ctrl_reg);
2079
2080 pps_unlock(intel_dp);
2081}
2082
2083/* Enable backlight PWM and backlight PP control. */
2084void intel_edp_backlight_on(struct intel_dp *intel_dp)
2085{
2086 if (!is_edp(intel_dp))
2087 return;
2088
2089 DRM_DEBUG_KMS("\n");
2090
2091 intel_panel_enable_backlight(intel_dp->attached_connector);
2092 _intel_edp_backlight_on(intel_dp);
2093}
2094
2095/* Disable backlight in the panel power control. */
2096static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2097{
2098 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2099 struct drm_i915_private *dev_priv = dev->dev_private;
2100 u32 pp;
2101 u32 pp_ctrl_reg;
2102
2103 if (!is_edp(intel_dp))
2104 return;
2105
2106 pps_lock(intel_dp);
2107
2108 pp = ironlake_get_pp_control(intel_dp);
2109 pp &= ~EDP_BLC_ENABLE;
2110
2111 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2112
2113 I915_WRITE(pp_ctrl_reg, pp);
2114 POSTING_READ(pp_ctrl_reg);
2115
2116 pps_unlock(intel_dp);
2117
2118 intel_dp->last_backlight_off = jiffies;
2119 edp_wait_backlight_off(intel_dp);
2120}
2121
2122/* Disable backlight PP control and backlight PWM. */
2123void intel_edp_backlight_off(struct intel_dp *intel_dp)
2124{
2125 if (!is_edp(intel_dp))
2126 return;
2127
2128 DRM_DEBUG_KMS("\n");
2129
2130 _intel_edp_backlight_off(intel_dp);
2131 intel_panel_disable_backlight(intel_dp->attached_connector);
2132}
2133
2134/*
2135 * Hook for controlling the panel power control backlight through the bl_power
2136 * sysfs attribute. Take care to handle multiple calls.
2137 */
2138static void intel_edp_backlight_power(struct intel_connector *connector,
2139 bool enable)
2140{
2141 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2142 bool is_enabled;
2143
2144 pps_lock(intel_dp);
2145 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2146 pps_unlock(intel_dp);
2147
2148 if (is_enabled == enable)
2149 return;
2150
2151 DRM_DEBUG_KMS("panel power control backlight %s\n",
2152 enable ? "enable" : "disable");
2153
2154 if (enable)
2155 _intel_edp_backlight_on(intel_dp);
2156 else
2157 _intel_edp_backlight_off(intel_dp);
2158}
2159
2160static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2161{
2162 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2163 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2164 struct drm_device *dev = crtc->dev;
2165 struct drm_i915_private *dev_priv = dev->dev_private;
2166 u32 dpa_ctl;
2167
2168 assert_pipe_disabled(dev_priv,
2169 to_intel_crtc(crtc)->pipe);
2170
2171 DRM_DEBUG_KMS("\n");
2172 dpa_ctl = I915_READ(DP_A);
2173 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2174 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2175
2176 /* We don't adjust intel_dp->DP while tearing down the link, to
2177 * facilitate link retraining (e.g. after hotplug). Hence clear all
2178 * enable bits here to ensure that we don't enable too much. */
2179 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2180 intel_dp->DP |= DP_PLL_ENABLE;
2181 I915_WRITE(DP_A, intel_dp->DP);
2182 POSTING_READ(DP_A);
2183 udelay(200);
2184}
2185
2186static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2187{
2188 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2189 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2190 struct drm_device *dev = crtc->dev;
2191 struct drm_i915_private *dev_priv = dev->dev_private;
2192 u32 dpa_ctl;
2193
2194 assert_pipe_disabled(dev_priv,
2195 to_intel_crtc(crtc)->pipe);
2196
2197 dpa_ctl = I915_READ(DP_A);
2198 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2199 "dp pll off, should be on\n");
2200 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2201
2202 /* We can't rely on the value tracked for the DP register in
2203 * intel_dp->DP because link_down must not change that (otherwise link
2204 * re-training will fail. */
2205 dpa_ctl &= ~DP_PLL_ENABLE;
2206 I915_WRITE(DP_A, dpa_ctl);
2207 POSTING_READ(DP_A);
2208 udelay(200);
2209}
2210
2211/* If the sink supports it, try to set the power state appropriately */
2212void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2213{
2214 int ret, i;
2215
2216 /* Should have a valid DPCD by this point */
2217 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2218 return;
2219
2220 if (mode != DRM_MODE_DPMS_ON) {
2221 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2222 DP_SET_POWER_D3);
2223 } else {
2224 /*
2225 * When turning on, we need to retry for 1ms to give the sink
2226 * time to wake up.
2227 */
2228 for (i = 0; i < 3; i++) {
2229 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2230 DP_SET_POWER_D0);
2231 if (ret == 1)
2232 break;
2233 msleep(1);
2234 }
2235 }
2236
2237 if (ret != 1)
2238 DRM_DEBUG_KMS("failed to %s sink power state\n",
2239 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2240}
2241
2242static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2243 enum pipe *pipe)
2244{
2245 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2246 enum port port = dp_to_dig_port(intel_dp)->port;
2247 struct drm_device *dev = encoder->base.dev;
2248 struct drm_i915_private *dev_priv = dev->dev_private;
2249 enum intel_display_power_domain power_domain;
2250 u32 tmp;
2251
2252 power_domain = intel_display_port_power_domain(encoder);
2253 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2254 return false;
2255
2256 tmp = I915_READ(intel_dp->output_reg);
2257
2258 if (!(tmp & DP_PORT_EN))
2259 return false;
2260
2261 if (IS_GEN7(dev) && port == PORT_A) {
2262 *pipe = PORT_TO_PIPE_CPT(tmp);
2263 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2264 enum pipe p;
2265
2266 for_each_pipe(dev_priv, p) {
2267 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2268 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2269 *pipe = p;
2270 return true;
2271 }
2272 }
2273
2274 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2275 intel_dp->output_reg);
2276 } else if (IS_CHERRYVIEW(dev)) {
2277 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2278 } else {
2279 *pipe = PORT_TO_PIPE(tmp);
2280 }
2281
2282 return true;
2283}
2284
2285static void intel_dp_get_config(struct intel_encoder *encoder,
2286 struct intel_crtc_state *pipe_config)
2287{
2288 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2289 u32 tmp, flags = 0;
2290 struct drm_device *dev = encoder->base.dev;
2291 struct drm_i915_private *dev_priv = dev->dev_private;
2292 enum port port = dp_to_dig_port(intel_dp)->port;
2293 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2294 int dotclock;
2295
2296 tmp = I915_READ(intel_dp->output_reg);
2297
2298 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2299
2300 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2301 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2302
2303 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2304 flags |= DRM_MODE_FLAG_PHSYNC;
2305 else
2306 flags |= DRM_MODE_FLAG_NHSYNC;
2307
2308 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2309 flags |= DRM_MODE_FLAG_PVSYNC;
2310 else
2311 flags |= DRM_MODE_FLAG_NVSYNC;
2312 } else {
2313 if (tmp & DP_SYNC_HS_HIGH)
2314 flags |= DRM_MODE_FLAG_PHSYNC;
2315 else
2316 flags |= DRM_MODE_FLAG_NHSYNC;
2317
2318 if (tmp & DP_SYNC_VS_HIGH)
2319 flags |= DRM_MODE_FLAG_PVSYNC;
2320 else
2321 flags |= DRM_MODE_FLAG_NVSYNC;
2322 }
2323
2324 pipe_config->base.adjusted_mode.flags |= flags;
2325
2326 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2327 tmp & DP_COLOR_RANGE_16_235)
2328 pipe_config->limited_color_range = true;
2329
2330 pipe_config->has_dp_encoder = true;
2331
2332 pipe_config->lane_count =
2333 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2334
2335 intel_dp_get_m_n(crtc, pipe_config);
2336
2337 if (port == PORT_A) {
2338 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2339 pipe_config->port_clock = 162000;
2340 else
2341 pipe_config->port_clock = 270000;
2342 }
2343
2344 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2345 &pipe_config->dp_m_n);
2346
2347 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2348 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2349
2350 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2351
2352 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2353 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2354 /*
2355 * This is a big fat ugly hack.
2356 *
2357 * Some machines in UEFI boot mode provide us a VBT that has 18
2358 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2359 * unknown we fail to light up. Yet the same BIOS boots up with
2360 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2361 * max, not what it tells us to use.
2362 *
2363 * Note: This will still be broken if the eDP panel is not lit
2364 * up by the BIOS, and thus we can't get the mode at module
2365 * load.
2366 */
2367 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2368 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2369 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2370 }
2371}
2372
2373static void intel_disable_dp(struct intel_encoder *encoder)
2374{
2375 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2376 struct drm_device *dev = encoder->base.dev;
2377 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2378
2379 if (crtc->config->has_audio)
2380 intel_audio_codec_disable(encoder);
2381
2382 if (HAS_PSR(dev) && !HAS_DDI(dev))
2383 intel_psr_disable(intel_dp);
2384
2385 /* Make sure the panel is off before trying to change the mode. But also
2386 * ensure that we have vdd while we switch off the panel. */
2387 intel_edp_panel_vdd_on(intel_dp);
2388 intel_edp_backlight_off(intel_dp);
2389 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2390 intel_edp_panel_off(intel_dp);
2391
2392 /* disable the port before the pipe on g4x */
2393 if (INTEL_INFO(dev)->gen < 5)
2394 intel_dp_link_down(intel_dp);
2395}
2396
2397static void ilk_post_disable_dp(struct intel_encoder *encoder)
2398{
2399 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2400 enum port port = dp_to_dig_port(intel_dp)->port;
2401
2402 intel_dp_link_down(intel_dp);
2403 if (port == PORT_A)
2404 ironlake_edp_pll_off(intel_dp);
2405}
2406
2407static void vlv_post_disable_dp(struct intel_encoder *encoder)
2408{
2409 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2410
2411 intel_dp_link_down(intel_dp);
2412}
2413
2414static void chv_post_disable_dp(struct intel_encoder *encoder)
2415{
2416 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2417 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2418 struct drm_device *dev = encoder->base.dev;
2419 struct drm_i915_private *dev_priv = dev->dev_private;
2420 struct intel_crtc *intel_crtc =
2421 to_intel_crtc(encoder->base.crtc);
2422 enum dpio_channel ch = vlv_dport_to_channel(dport);
2423 enum pipe pipe = intel_crtc->pipe;
2424 u32 val;
2425
2426 intel_dp_link_down(intel_dp);
2427
2428 mutex_lock(&dev_priv->sb_lock);
2429
2430 /* Propagate soft reset to data lane reset */
2431 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2432 val |= CHV_PCS_REQ_SOFTRESET_EN;
2433 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2434
2435 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2436 val |= CHV_PCS_REQ_SOFTRESET_EN;
2437 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2438
2439 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2440 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2441 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2442
2443 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2444 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2445 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2446
2447 mutex_unlock(&dev_priv->sb_lock);
2448}
2449
2450static void
2451_intel_dp_set_link_train(struct intel_dp *intel_dp,
2452 uint32_t *DP,
2453 uint8_t dp_train_pat)
2454{
2455 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2456 struct drm_device *dev = intel_dig_port->base.base.dev;
2457 struct drm_i915_private *dev_priv = dev->dev_private;
2458 enum port port = intel_dig_port->port;
2459
2460 if (HAS_DDI(dev)) {
2461 uint32_t temp = I915_READ(DP_TP_CTL(port));
2462
2463 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2464 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2465 else
2466 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2467
2468 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2469 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2470 case DP_TRAINING_PATTERN_DISABLE:
2471 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2472
2473 break;
2474 case DP_TRAINING_PATTERN_1:
2475 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2476 break;
2477 case DP_TRAINING_PATTERN_2:
2478 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2479 break;
2480 case DP_TRAINING_PATTERN_3:
2481 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2482 break;
2483 }
2484 I915_WRITE(DP_TP_CTL(port), temp);
2485
2486 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2487 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2488 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2489
2490 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2491 case DP_TRAINING_PATTERN_DISABLE:
2492 *DP |= DP_LINK_TRAIN_OFF_CPT;
2493 break;
2494 case DP_TRAINING_PATTERN_1:
2495 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2496 break;
2497 case DP_TRAINING_PATTERN_2:
2498 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2499 break;
2500 case DP_TRAINING_PATTERN_3:
2501 DRM_ERROR("DP training pattern 3 not supported\n");
2502 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2503 break;
2504 }
2505
2506 } else {
2507 if (IS_CHERRYVIEW(dev))
2508 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2509 else
2510 *DP &= ~DP_LINK_TRAIN_MASK;
2511
2512 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2513 case DP_TRAINING_PATTERN_DISABLE:
2514 *DP |= DP_LINK_TRAIN_OFF;
2515 break;
2516 case DP_TRAINING_PATTERN_1:
2517 *DP |= DP_LINK_TRAIN_PAT_1;
2518 break;
2519 case DP_TRAINING_PATTERN_2:
2520 *DP |= DP_LINK_TRAIN_PAT_2;
2521 break;
2522 case DP_TRAINING_PATTERN_3:
2523 if (IS_CHERRYVIEW(dev)) {
2524 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2525 } else {
2526 DRM_ERROR("DP training pattern 3 not supported\n");
2527 *DP |= DP_LINK_TRAIN_PAT_2;
2528 }
2529 break;
2530 }
2531 }
2532}
2533
2534static void intel_dp_enable_port(struct intel_dp *intel_dp)
2535{
2536 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2537 struct drm_i915_private *dev_priv = dev->dev_private;
2538
2539 /* enable with pattern 1 (as per spec) */
2540 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2541 DP_TRAINING_PATTERN_1);
2542
2543 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2544 POSTING_READ(intel_dp->output_reg);
2545
2546 /*
2547 * Magic for VLV/CHV. We _must_ first set up the register
2548 * without actually enabling the port, and then do another
2549 * write to enable the port. Otherwise link training will
2550 * fail when the power sequencer is freshly used for this port.
2551 */
2552 intel_dp->DP |= DP_PORT_EN;
2553
2554 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2555 POSTING_READ(intel_dp->output_reg);
2556}
2557
2558static void intel_enable_dp(struct intel_encoder *encoder)
2559{
2560 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2561 struct drm_device *dev = encoder->base.dev;
2562 struct drm_i915_private *dev_priv = dev->dev_private;
2563 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2564 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2565 unsigned int lane_mask = 0x0;
2566
2567 if (WARN_ON(dp_reg & DP_PORT_EN))
2568 return;
2569
2570 pps_lock(intel_dp);
2571
2572 if (IS_VALLEYVIEW(dev))
2573 vlv_init_panel_power_sequencer(intel_dp);
2574
2575 intel_dp_enable_port(intel_dp);
2576
2577 edp_panel_vdd_on(intel_dp);
2578 edp_panel_on(intel_dp);
2579 edp_panel_vdd_off(intel_dp, true);
2580
2581 pps_unlock(intel_dp);
2582
2583 if (IS_VALLEYVIEW(dev))
2584 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2585 lane_mask);
2586
2587 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2588 intel_dp_start_link_train(intel_dp);
2589 intel_dp_complete_link_train(intel_dp);
2590 intel_dp_stop_link_train(intel_dp);
2591
2592 if (crtc->config->has_audio) {
2593 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2594 pipe_name(crtc->pipe));
2595 intel_audio_codec_enable(encoder);
2596 }
2597}
2598
2599static void g4x_enable_dp(struct intel_encoder *encoder)
2600{
2601 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2602
2603 intel_enable_dp(encoder);
2604 intel_edp_backlight_on(intel_dp);
2605}
2606
2607static void vlv_enable_dp(struct intel_encoder *encoder)
2608{
2609 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2610
2611 intel_edp_backlight_on(intel_dp);
2612 intel_psr_enable(intel_dp);
2613}
2614
2615static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2616{
2617 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2618 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2619
2620 intel_dp_prepare(encoder);
2621
2622 /* Only ilk+ has port A */
2623 if (dport->port == PORT_A) {
2624 ironlake_set_pll_cpu_edp(intel_dp);
2625 ironlake_edp_pll_on(intel_dp);
2626 }
2627}
2628
2629static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2630{
2631 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2632 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2633 enum pipe pipe = intel_dp->pps_pipe;
2634 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2635
2636 edp_panel_vdd_off_sync(intel_dp);
2637
2638 /*
2639 * VLV seems to get confused when multiple power seqeuencers
2640 * have the same port selected (even if only one has power/vdd
2641 * enabled). The failure manifests as vlv_wait_port_ready() failing
2642 * CHV on the other hand doesn't seem to mind having the same port
2643 * selected in multiple power seqeuencers, but let's clear the
2644 * port select always when logically disconnecting a power sequencer
2645 * from a port.
2646 */
2647 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2648 pipe_name(pipe), port_name(intel_dig_port->port));
2649 I915_WRITE(pp_on_reg, 0);
2650 POSTING_READ(pp_on_reg);
2651
2652 intel_dp->pps_pipe = INVALID_PIPE;
2653}
2654
2655static void vlv_steal_power_sequencer(struct drm_device *dev,
2656 enum pipe pipe)
2657{
2658 struct drm_i915_private *dev_priv = dev->dev_private;
2659 struct intel_encoder *encoder;
2660
2661 lockdep_assert_held(&dev_priv->pps_mutex);
2662
2663 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2664 return;
2665
2666 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2667 base.head) {
2668 struct intel_dp *intel_dp;
2669 enum port port;
2670
2671 if (encoder->type != INTEL_OUTPUT_EDP)
2672 continue;
2673
2674 intel_dp = enc_to_intel_dp(&encoder->base);
2675 port = dp_to_dig_port(intel_dp)->port;
2676
2677 if (intel_dp->pps_pipe != pipe)
2678 continue;
2679
2680 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2681 pipe_name(pipe), port_name(port));
2682
2683 WARN(encoder->base.crtc,
2684 "stealing pipe %c power sequencer from active eDP port %c\n",
2685 pipe_name(pipe), port_name(port));
2686
2687 /* make sure vdd is off before we steal it */
2688 vlv_detach_power_sequencer(intel_dp);
2689 }
2690}
2691
2692static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2693{
2694 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2695 struct intel_encoder *encoder = &intel_dig_port->base;
2696 struct drm_device *dev = encoder->base.dev;
2697 struct drm_i915_private *dev_priv = dev->dev_private;
2698 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2699
2700 lockdep_assert_held(&dev_priv->pps_mutex);
2701
2702 if (!is_edp(intel_dp))
2703 return;
2704
2705 if (intel_dp->pps_pipe == crtc->pipe)
2706 return;
2707
2708 /*
2709 * If another power sequencer was being used on this
2710 * port previously make sure to turn off vdd there while
2711 * we still have control of it.
2712 */
2713 if (intel_dp->pps_pipe != INVALID_PIPE)
2714 vlv_detach_power_sequencer(intel_dp);
2715
2716 /*
2717 * We may be stealing the power
2718 * sequencer from another port.
2719 */
2720 vlv_steal_power_sequencer(dev, crtc->pipe);
2721
2722 /* now it's all ours */
2723 intel_dp->pps_pipe = crtc->pipe;
2724
2725 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2726 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2727
2728 /* init power sequencer on this pipe and port */
2729 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2730 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2731}
2732
2733static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2734{
2735 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2736 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2737 struct drm_device *dev = encoder->base.dev;
2738 struct drm_i915_private *dev_priv = dev->dev_private;
2739 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2740 enum dpio_channel port = vlv_dport_to_channel(dport);
2741 int pipe = intel_crtc->pipe;
2742 u32 val;
2743
2744 mutex_lock(&dev_priv->sb_lock);
2745
2746 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2747 val = 0;
2748 if (pipe)
2749 val |= (1<<21);
2750 else
2751 val &= ~(1<<21);
2752 val |= 0x001000c4;
2753 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2754 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2755 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2756
2757 mutex_unlock(&dev_priv->sb_lock);
2758
2759 intel_enable_dp(encoder);
2760}
2761
2762static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2763{
2764 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2765 struct drm_device *dev = encoder->base.dev;
2766 struct drm_i915_private *dev_priv = dev->dev_private;
2767 struct intel_crtc *intel_crtc =
2768 to_intel_crtc(encoder->base.crtc);
2769 enum dpio_channel port = vlv_dport_to_channel(dport);
2770 int pipe = intel_crtc->pipe;
2771
2772 intel_dp_prepare(encoder);
2773
2774 /* Program Tx lane resets to default */
2775 mutex_lock(&dev_priv->sb_lock);
2776 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2777 DPIO_PCS_TX_LANE2_RESET |
2778 DPIO_PCS_TX_LANE1_RESET);
2779 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2780 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2781 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2782 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2783 DPIO_PCS_CLK_SOFT_RESET);
2784
2785 /* Fix up inter-pair skew failure */
2786 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2787 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2788 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2789 mutex_unlock(&dev_priv->sb_lock);
2790}
2791
2792static void chv_pre_enable_dp(struct intel_encoder *encoder)
2793{
2794 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2795 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2796 struct drm_device *dev = encoder->base.dev;
2797 struct drm_i915_private *dev_priv = dev->dev_private;
2798 struct intel_crtc *intel_crtc =
2799 to_intel_crtc(encoder->base.crtc);
2800 enum dpio_channel ch = vlv_dport_to_channel(dport);
2801 int pipe = intel_crtc->pipe;
2802 int data, i, stagger;
2803 u32 val;
2804
2805 mutex_lock(&dev_priv->sb_lock);
2806
2807 /* allow hardware to manage TX FIFO reset source */
2808 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2809 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2810 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2811
2812 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2813 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2814 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2815
2816 /* Deassert soft data lane reset*/
2817 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2818 val |= CHV_PCS_REQ_SOFTRESET_EN;
2819 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2820
2821 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2822 val |= CHV_PCS_REQ_SOFTRESET_EN;
2823 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2824
2825 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2826 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2827 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2828
2829 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2830 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2831 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2832
2833 /* Program Tx lane latency optimal setting*/
2834 for (i = 0; i < 4; i++) {
2835 /* Set the upar bit */
2836 data = (i == 1) ? 0x0 : 0x1;
2837 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2838 data << DPIO_UPAR_SHIFT);
2839 }
2840
2841 /* Data lane stagger programming */
2842 if (intel_crtc->config->port_clock > 270000)
2843 stagger = 0x18;
2844 else if (intel_crtc->config->port_clock > 135000)
2845 stagger = 0xd;
2846 else if (intel_crtc->config->port_clock > 67500)
2847 stagger = 0x7;
2848 else if (intel_crtc->config->port_clock > 33750)
2849 stagger = 0x4;
2850 else
2851 stagger = 0x2;
2852
2853 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2854 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2855 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2856
2857 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2858 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2859 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2860
2861 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2862 DPIO_LANESTAGGER_STRAP(stagger) |
2863 DPIO_LANESTAGGER_STRAP_OVRD |
2864 DPIO_TX1_STAGGER_MASK(0x1f) |
2865 DPIO_TX1_STAGGER_MULT(6) |
2866 DPIO_TX2_STAGGER_MULT(0));
2867
2868 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2869 DPIO_LANESTAGGER_STRAP(stagger) |
2870 DPIO_LANESTAGGER_STRAP_OVRD |
2871 DPIO_TX1_STAGGER_MASK(0x1f) |
2872 DPIO_TX1_STAGGER_MULT(7) |
2873 DPIO_TX2_STAGGER_MULT(5));
2874
2875 mutex_unlock(&dev_priv->sb_lock);
2876
2877 intel_enable_dp(encoder);
2878}
2879
2880static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2881{
2882 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2883 struct drm_device *dev = encoder->base.dev;
2884 struct drm_i915_private *dev_priv = dev->dev_private;
2885 struct intel_crtc *intel_crtc =
2886 to_intel_crtc(encoder->base.crtc);
2887 enum dpio_channel ch = vlv_dport_to_channel(dport);
2888 enum pipe pipe = intel_crtc->pipe;
2889 u32 val;
2890
2891 intel_dp_prepare(encoder);
2892
2893 mutex_lock(&dev_priv->sb_lock);
2894
2895 /* program left/right clock distribution */
2896 if (pipe != PIPE_B) {
2897 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2898 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2899 if (ch == DPIO_CH0)
2900 val |= CHV_BUFLEFTENA1_FORCE;
2901 if (ch == DPIO_CH1)
2902 val |= CHV_BUFRIGHTENA1_FORCE;
2903 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2904 } else {
2905 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2906 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2907 if (ch == DPIO_CH0)
2908 val |= CHV_BUFLEFTENA2_FORCE;
2909 if (ch == DPIO_CH1)
2910 val |= CHV_BUFRIGHTENA2_FORCE;
2911 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2912 }
2913
2914 /* program clock channel usage */
2915 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2916 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2917 if (pipe != PIPE_B)
2918 val &= ~CHV_PCS_USEDCLKCHANNEL;
2919 else
2920 val |= CHV_PCS_USEDCLKCHANNEL;
2921 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2922
2923 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2924 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2925 if (pipe != PIPE_B)
2926 val &= ~CHV_PCS_USEDCLKCHANNEL;
2927 else
2928 val |= CHV_PCS_USEDCLKCHANNEL;
2929 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2930
2931 /*
2932 * This a a bit weird since generally CL
2933 * matches the pipe, but here we need to
2934 * pick the CL based on the port.
2935 */
2936 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2937 if (pipe != PIPE_B)
2938 val &= ~CHV_CMN_USEDCLKCHANNEL;
2939 else
2940 val |= CHV_CMN_USEDCLKCHANNEL;
2941 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2942
2943 mutex_unlock(&dev_priv->sb_lock);
2944}
2945
2946static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
2947{
2948 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2949 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2950 u32 val;
2951
2952 mutex_lock(&dev_priv->sb_lock);
2953
2954 /* disable left/right clock distribution */
2955 if (pipe != PIPE_B) {
2956 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2957 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2958 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2959 } else {
2960 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2961 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2962 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2963 }
2964
2965 mutex_unlock(&dev_priv->sb_lock);
2966}
2967
2968/*
2969 * Native read with retry for link status and receiver capability reads for
2970 * cases where the sink may still be asleep.
2971 *
2972 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2973 * supposed to retry 3 times per the spec.
2974 */
2975static ssize_t
2976intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2977 void *buffer, size_t size)
2978{
2979 ssize_t ret;
2980 int i;
2981
2982 /*
2983 * Sometime we just get the same incorrect byte repeated
2984 * over the entire buffer. Doing just one throw away read
2985 * initially seems to "solve" it.
2986 */
2987 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2988
2989 for (i = 0; i < 3; i++) {
2990 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2991 if (ret == size)
2992 return ret;
2993 msleep(1);
2994 }
2995
2996 return ret;
2997}
2998
2999/*
3000 * Fetch AUX CH registers 0x202 - 0x207 which contain
3001 * link status information
3002 */
3003static bool
3004intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3005{
3006 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3007 DP_LANE0_1_STATUS,
3008 link_status,
3009 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3010}
3011
3012/* These are source-specific values. */
3013static uint8_t
3014intel_dp_voltage_max(struct intel_dp *intel_dp)
3015{
3016 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3017 struct drm_i915_private *dev_priv = dev->dev_private;
3018 enum port port = dp_to_dig_port(intel_dp)->port;
3019
3020 if (IS_BROXTON(dev))
3021 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3022 else if (INTEL_INFO(dev)->gen >= 9) {
3023 if (dev_priv->edp_low_vswing && port == PORT_A)
3024 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3025 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3026 } else if (IS_VALLEYVIEW(dev))
3027 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3028 else if (IS_GEN7(dev) && port == PORT_A)
3029 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3030 else if (HAS_PCH_CPT(dev) && port != PORT_A)
3031 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3032 else
3033 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3034}
3035
3036static uint8_t
3037intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3038{
3039 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3040 enum port port = dp_to_dig_port(intel_dp)->port;
3041
3042 if (INTEL_INFO(dev)->gen >= 9) {
3043 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3044 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3045 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3046 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3047 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3048 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3049 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3050 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3051 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3052 default:
3053 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3054 }
3055 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3056 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3057 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3058 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3059 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3060 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3061 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3062 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3063 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3064 default:
3065 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3066 }
3067 } else if (IS_VALLEYVIEW(dev)) {
3068 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3069 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3070 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3071 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3072 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3073 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3074 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3075 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3076 default:
3077 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3078 }
3079 } else if (IS_GEN7(dev) && port == PORT_A) {
3080 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3081 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3082 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3083 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3084 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3085 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3086 default:
3087 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3088 }
3089 } else {
3090 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3091 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3092 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3093 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3094 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3095 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3096 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3097 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3098 default:
3099 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3100 }
3101 }
3102}
3103
3104static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3105{
3106 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3107 struct drm_i915_private *dev_priv = dev->dev_private;
3108 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3109 struct intel_crtc *intel_crtc =
3110 to_intel_crtc(dport->base.base.crtc);
3111 unsigned long demph_reg_value, preemph_reg_value,
3112 uniqtranscale_reg_value;
3113 uint8_t train_set = intel_dp->train_set[0];
3114 enum dpio_channel port = vlv_dport_to_channel(dport);
3115 int pipe = intel_crtc->pipe;
3116
3117 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3118 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3119 preemph_reg_value = 0x0004000;
3120 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3121 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3122 demph_reg_value = 0x2B405555;
3123 uniqtranscale_reg_value = 0x552AB83A;
3124 break;
3125 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3126 demph_reg_value = 0x2B404040;
3127 uniqtranscale_reg_value = 0x5548B83A;
3128 break;
3129 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3130 demph_reg_value = 0x2B245555;
3131 uniqtranscale_reg_value = 0x5560B83A;
3132 break;
3133 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3134 demph_reg_value = 0x2B405555;
3135 uniqtranscale_reg_value = 0x5598DA3A;
3136 break;
3137 default:
3138 return 0;
3139 }
3140 break;
3141 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3142 preemph_reg_value = 0x0002000;
3143 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3144 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3145 demph_reg_value = 0x2B404040;
3146 uniqtranscale_reg_value = 0x5552B83A;
3147 break;
3148 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3149 demph_reg_value = 0x2B404848;
3150 uniqtranscale_reg_value = 0x5580B83A;
3151 break;
3152 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3153 demph_reg_value = 0x2B404040;
3154 uniqtranscale_reg_value = 0x55ADDA3A;
3155 break;
3156 default:
3157 return 0;
3158 }
3159 break;
3160 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3161 preemph_reg_value = 0x0000000;
3162 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3163 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3164 demph_reg_value = 0x2B305555;
3165 uniqtranscale_reg_value = 0x5570B83A;
3166 break;
3167 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3168 demph_reg_value = 0x2B2B4040;
3169 uniqtranscale_reg_value = 0x55ADDA3A;
3170 break;
3171 default:
3172 return 0;
3173 }
3174 break;
3175 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3176 preemph_reg_value = 0x0006000;
3177 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3178 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3179 demph_reg_value = 0x1B405555;
3180 uniqtranscale_reg_value = 0x55ADDA3A;
3181 break;
3182 default:
3183 return 0;
3184 }
3185 break;
3186 default:
3187 return 0;
3188 }
3189
3190 mutex_lock(&dev_priv->sb_lock);
3191 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3192 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3193 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3194 uniqtranscale_reg_value);
3195 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3196 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3197 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3198 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3199 mutex_unlock(&dev_priv->sb_lock);
3200
3201 return 0;
3202}
3203
3204static bool chv_need_uniq_trans_scale(uint8_t train_set)
3205{
3206 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3207 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3208}
3209
3210static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3211{
3212 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3213 struct drm_i915_private *dev_priv = dev->dev_private;
3214 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3215 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3216 u32 deemph_reg_value, margin_reg_value, val;
3217 uint8_t train_set = intel_dp->train_set[0];
3218 enum dpio_channel ch = vlv_dport_to_channel(dport);
3219 enum pipe pipe = intel_crtc->pipe;
3220 int i;
3221
3222 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3223 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3224 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3225 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3226 deemph_reg_value = 128;
3227 margin_reg_value = 52;
3228 break;
3229 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3230 deemph_reg_value = 128;
3231 margin_reg_value = 77;
3232 break;
3233 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3234 deemph_reg_value = 128;
3235 margin_reg_value = 102;
3236 break;
3237 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3238 deemph_reg_value = 128;
3239 margin_reg_value = 154;
3240 /* FIXME extra to set for 1200 */
3241 break;
3242 default:
3243 return 0;
3244 }
3245 break;
3246 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3247 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3248 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3249 deemph_reg_value = 85;
3250 margin_reg_value = 78;
3251 break;
3252 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3253 deemph_reg_value = 85;
3254 margin_reg_value = 116;
3255 break;
3256 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3257 deemph_reg_value = 85;
3258 margin_reg_value = 154;
3259 break;
3260 default:
3261 return 0;
3262 }
3263 break;
3264 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3265 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3266 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3267 deemph_reg_value = 64;
3268 margin_reg_value = 104;
3269 break;
3270 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3271 deemph_reg_value = 64;
3272 margin_reg_value = 154;
3273 break;
3274 default:
3275 return 0;
3276 }
3277 break;
3278 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3279 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3281 deemph_reg_value = 43;
3282 margin_reg_value = 154;
3283 break;
3284 default:
3285 return 0;
3286 }
3287 break;
3288 default:
3289 return 0;
3290 }
3291
3292 mutex_lock(&dev_priv->sb_lock);
3293
3294 /* Clear calc init */
3295 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3296 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3297 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3298 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3299 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3300
3301 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3302 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3303 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3304 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3305 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3306
3307 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3308 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3309 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3310 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3311
3312 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3313 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3314 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3315 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3316
3317 /* Program swing deemph */
3318 for (i = 0; i < 4; i++) {
3319 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3320 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3321 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3322 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3323 }
3324
3325 /* Program swing margin */
3326 for (i = 0; i < 4; i++) {
3327 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3328
3329 val &= ~DPIO_SWING_MARGIN000_MASK;
3330 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3331
3332 /*
3333 * Supposedly this value shouldn't matter when unique transition
3334 * scale is disabled, but in fact it does matter. Let's just
3335 * always program the same value and hope it's OK.
3336 */
3337 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3338 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3339
3340 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3341 }
3342
3343 /*
3344 * The document said it needs to set bit 27 for ch0 and bit 26
3345 * for ch1. Might be a typo in the doc.
3346 * For now, for this unique transition scale selection, set bit
3347 * 27 for ch0 and ch1.
3348 */
3349 for (i = 0; i < 4; i++) {
3350 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3351 if (chv_need_uniq_trans_scale(train_set))
3352 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3353 else
3354 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3355 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3356 }
3357
3358 /* Start swing calculation */
3359 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3360 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3361 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3362
3363 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3364 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3365 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3366
3367 /* LRC Bypass */
3368 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3369 val |= DPIO_LRC_BYPASS;
3370 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3371
3372 mutex_unlock(&dev_priv->sb_lock);
3373
3374 return 0;
3375}
3376
3377static void
3378intel_get_adjust_train(struct intel_dp *intel_dp,
3379 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3380{
3381 uint8_t v = 0;
3382 uint8_t p = 0;
3383 int lane;
3384 uint8_t voltage_max;
3385 uint8_t preemph_max;
3386
3387 for (lane = 0; lane < intel_dp->lane_count; lane++) {
3388 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3389 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
3390
3391 if (this_v > v)
3392 v = this_v;
3393 if (this_p > p)
3394 p = this_p;
3395 }
3396
3397 voltage_max = intel_dp_voltage_max(intel_dp);
3398 if (v >= voltage_max)
3399 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
3400
3401 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3402 if (p >= preemph_max)
3403 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
3404
3405 for (lane = 0; lane < 4; lane++)
3406 intel_dp->train_set[lane] = v | p;
3407}
3408
3409static uint32_t
3410gen4_signal_levels(uint8_t train_set)
3411{
3412 uint32_t signal_levels = 0;
3413
3414 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3415 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3416 default:
3417 signal_levels |= DP_VOLTAGE_0_4;
3418 break;
3419 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3420 signal_levels |= DP_VOLTAGE_0_6;
3421 break;
3422 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3423 signal_levels |= DP_VOLTAGE_0_8;
3424 break;
3425 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3426 signal_levels |= DP_VOLTAGE_1_2;
3427 break;
3428 }
3429 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3430 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3431 default:
3432 signal_levels |= DP_PRE_EMPHASIS_0;
3433 break;
3434 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3435 signal_levels |= DP_PRE_EMPHASIS_3_5;
3436 break;
3437 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3438 signal_levels |= DP_PRE_EMPHASIS_6;
3439 break;
3440 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3441 signal_levels |= DP_PRE_EMPHASIS_9_5;
3442 break;
3443 }
3444 return signal_levels;
3445}
3446
3447/* Gen6's DP voltage swing and pre-emphasis control */
3448static uint32_t
3449gen6_edp_signal_levels(uint8_t train_set)
3450{
3451 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3452 DP_TRAIN_PRE_EMPHASIS_MASK);
3453 switch (signal_levels) {
3454 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3455 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3456 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3457 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3458 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3459 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3460 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3461 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3462 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3463 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3464 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3465 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3466 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3467 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3468 default:
3469 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3470 "0x%x\n", signal_levels);
3471 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3472 }
3473}
3474
3475/* Gen7's DP voltage swing and pre-emphasis control */
3476static uint32_t
3477gen7_edp_signal_levels(uint8_t train_set)
3478{
3479 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3480 DP_TRAIN_PRE_EMPHASIS_MASK);
3481 switch (signal_levels) {
3482 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3483 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3484 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3485 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3486 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3487 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3488
3489 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3490 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3491 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3492 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3493
3494 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3495 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3496 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3497 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3498
3499 default:
3500 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3501 "0x%x\n", signal_levels);
3502 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3503 }
3504}
3505
3506/* Properly updates "DP" with the correct signal levels. */
3507static void
3508intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3509{
3510 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3511 enum port port = intel_dig_port->port;
3512 struct drm_device *dev = intel_dig_port->base.base.dev;
3513 uint32_t signal_levels, mask = 0;
3514 uint8_t train_set = intel_dp->train_set[0];
3515
3516 if (HAS_DDI(dev)) {
3517 signal_levels = ddi_signal_levels(intel_dp);
3518
3519 if (IS_BROXTON(dev))
3520 signal_levels = 0;
3521 else
3522 mask = DDI_BUF_EMP_MASK;
3523 } else if (IS_CHERRYVIEW(dev)) {
3524 signal_levels = chv_signal_levels(intel_dp);
3525 } else if (IS_VALLEYVIEW(dev)) {
3526 signal_levels = vlv_signal_levels(intel_dp);
3527 } else if (IS_GEN7(dev) && port == PORT_A) {
3528 signal_levels = gen7_edp_signal_levels(train_set);
3529 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3530 } else if (IS_GEN6(dev) && port == PORT_A) {
3531 signal_levels = gen6_edp_signal_levels(train_set);
3532 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3533 } else {
3534 signal_levels = gen4_signal_levels(train_set);
3535 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3536 }
3537
3538 if (mask)
3539 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3540
3541 DRM_DEBUG_KMS("Using vswing level %d\n",
3542 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3543 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3544 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3545 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3546
3547 *DP = (*DP & ~mask) | signal_levels;
3548}
3549
3550static bool
3551intel_dp_set_link_train(struct intel_dp *intel_dp,
3552 uint32_t *DP,
3553 uint8_t dp_train_pat)
3554{
3555 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3556 struct drm_i915_private *dev_priv =
3557 to_i915(intel_dig_port->base.base.dev);
3558 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3559 int ret, len;
3560
3561 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3562
3563 I915_WRITE(intel_dp->output_reg, *DP);
3564 POSTING_READ(intel_dp->output_reg);
3565
3566 buf[0] = dp_train_pat;
3567 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
3568 DP_TRAINING_PATTERN_DISABLE) {
3569 /* don't write DP_TRAINING_LANEx_SET on disable */
3570 len = 1;
3571 } else {
3572 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3573 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3574 len = intel_dp->lane_count + 1;
3575 }
3576
3577 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3578 buf, len);
3579
3580 return ret == len;
3581}
3582
3583static bool
3584intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3585 uint8_t dp_train_pat)
3586{
3587 if (!intel_dp->train_set_valid)
3588 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
3589 intel_dp_set_signal_levels(intel_dp, DP);
3590 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3591}
3592
3593static bool
3594intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3595 const uint8_t link_status[DP_LINK_STATUS_SIZE])
3596{
3597 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3598 struct drm_i915_private *dev_priv =
3599 to_i915(intel_dig_port->base.base.dev);
3600 int ret;
3601
3602 intel_get_adjust_train(intel_dp, link_status);
3603 intel_dp_set_signal_levels(intel_dp, DP);
3604
3605 I915_WRITE(intel_dp->output_reg, *DP);
3606 POSTING_READ(intel_dp->output_reg);
3607
3608 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3609 intel_dp->train_set, intel_dp->lane_count);
3610
3611 return ret == intel_dp->lane_count;
3612}
3613
3614static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3615{
3616 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3617 struct drm_device *dev = intel_dig_port->base.base.dev;
3618 struct drm_i915_private *dev_priv = dev->dev_private;
3619 enum port port = intel_dig_port->port;
3620 uint32_t val;
3621
3622 if (!HAS_DDI(dev))
3623 return;
3624
3625 val = I915_READ(DP_TP_CTL(port));
3626 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3627 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3628 I915_WRITE(DP_TP_CTL(port), val);
3629
3630 /*
3631 * On PORT_A we can have only eDP in SST mode. There the only reason
3632 * we need to set idle transmission mode is to work around a HW issue
3633 * where we enable the pipe while not in idle link-training mode.
3634 * In this case there is requirement to wait for a minimum number of
3635 * idle patterns to be sent.
3636 */
3637 if (port == PORT_A)
3638 return;
3639
3640 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3641 1))
3642 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3643}
3644
3645/* Enable corresponding port and start training pattern 1 */
3646void
3647intel_dp_start_link_train(struct intel_dp *intel_dp)
3648{
3649 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
3650 struct drm_device *dev = encoder->dev;
3651 int i;
3652 uint8_t voltage;
3653 int voltage_tries, loop_tries;
3654 uint32_t DP = intel_dp->DP;
3655 uint8_t link_config[2];
3656 uint8_t link_bw, rate_select;
3657
3658 if (HAS_DDI(dev))
3659 intel_ddi_prepare_link_retrain(encoder);
3660
3661 intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
3662 &link_bw, &rate_select);
3663
3664 /* Write the link configuration data */
3665 link_config[0] = link_bw;
3666 link_config[1] = intel_dp->lane_count;
3667 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3668 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
3669 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
3670 if (intel_dp->num_sink_rates)
3671 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3672 &rate_select, 1);
3673
3674 link_config[0] = 0;
3675 link_config[1] = DP_SET_ANSI_8B10B;
3676 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
3677
3678 DP |= DP_PORT_EN;
3679
3680 /* clock recovery */
3681 if (!intel_dp_reset_link_train(intel_dp, &DP,
3682 DP_TRAINING_PATTERN_1 |
3683 DP_LINK_SCRAMBLING_DISABLE)) {
3684 DRM_ERROR("failed to enable link training\n");
3685 return;
3686 }
3687
3688 voltage = 0xff;
3689 voltage_tries = 0;
3690 loop_tries = 0;
3691 for (;;) {
3692 uint8_t link_status[DP_LINK_STATUS_SIZE];
3693
3694 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
3695 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3696 DRM_ERROR("failed to get link status\n");
3697 break;
3698 }
3699
3700 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
3701 DRM_DEBUG_KMS("clock recovery OK\n");
3702 break;
3703 }
3704
3705 /*
3706 * if we used previously trained voltage and pre-emphasis values
3707 * and we don't get clock recovery, reset link training values
3708 */
3709 if (intel_dp->train_set_valid) {
3710 DRM_DEBUG_KMS("clock recovery not ok, reset");
3711 /* clear the flag as we are not reusing train set */
3712 intel_dp->train_set_valid = false;
3713 if (!intel_dp_reset_link_train(intel_dp, &DP,
3714 DP_TRAINING_PATTERN_1 |
3715 DP_LINK_SCRAMBLING_DISABLE)) {
3716 DRM_ERROR("failed to enable link training\n");
3717 return;
3718 }
3719 continue;
3720 }
3721
3722 /* Check to see if we've tried the max voltage */
3723 for (i = 0; i < intel_dp->lane_count; i++)
3724 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
3725 break;
3726 if (i == intel_dp->lane_count) {
3727 ++loop_tries;
3728 if (loop_tries == 5) {
3729 DRM_ERROR("too many full retries, give up\n");
3730 break;
3731 }
3732 intel_dp_reset_link_train(intel_dp, &DP,
3733 DP_TRAINING_PATTERN_1 |
3734 DP_LINK_SCRAMBLING_DISABLE);
3735 voltage_tries = 0;
3736 continue;
3737 }
3738
3739 /* Check to see if we've tried the same voltage 5 times */
3740 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
3741 ++voltage_tries;
3742 if (voltage_tries == 5) {
3743 DRM_ERROR("too many voltage retries, give up\n");
3744 break;
3745 }
3746 } else
3747 voltage_tries = 0;
3748 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
3749
3750 /* Update training set as requested by target */
3751 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3752 DRM_ERROR("failed to update link training\n");
3753 break;
3754 }
3755 }
3756
3757 intel_dp->DP = DP;
3758}
3759
3760void
3761intel_dp_complete_link_train(struct intel_dp *intel_dp)
3762{
3763 bool channel_eq = false;
3764 int tries, cr_tries;
3765 uint32_t DP = intel_dp->DP;
3766 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3767
3768 /* Training Pattern 3 for HBR2 or 1.2 devices that support it*/
3769 if (intel_dp->link_rate == 540000 || intel_dp->use_tps3)
3770 training_pattern = DP_TRAINING_PATTERN_3;
3771
3772 /* channel equalization */
3773 if (!intel_dp_set_link_train(intel_dp, &DP,
3774 training_pattern |
3775 DP_LINK_SCRAMBLING_DISABLE)) {
3776 DRM_ERROR("failed to start channel equalization\n");
3777 return;
3778 }
3779
3780 tries = 0;
3781 cr_tries = 0;
3782 channel_eq = false;
3783 for (;;) {
3784 uint8_t link_status[DP_LINK_STATUS_SIZE];
3785
3786 if (cr_tries > 5) {
3787 DRM_ERROR("failed to train DP, aborting\n");
3788 break;
3789 }
3790
3791 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
3792 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3793 DRM_ERROR("failed to get link status\n");
3794 break;
3795 }
3796
3797 /* Make sure clock is still ok */
3798 if (!drm_dp_clock_recovery_ok(link_status,
3799 intel_dp->lane_count)) {
3800 intel_dp->train_set_valid = false;
3801 intel_dp_start_link_train(intel_dp);
3802 intel_dp_set_link_train(intel_dp, &DP,
3803 training_pattern |
3804 DP_LINK_SCRAMBLING_DISABLE);
3805 cr_tries++;
3806 continue;
3807 }
3808
3809 if (drm_dp_channel_eq_ok(link_status,
3810 intel_dp->lane_count)) {
3811 channel_eq = true;
3812 break;
3813 }
3814
3815 /* Try 5 times, then try clock recovery if that fails */
3816 if (tries > 5) {
3817 intel_dp->train_set_valid = false;
3818 intel_dp_start_link_train(intel_dp);
3819 intel_dp_set_link_train(intel_dp, &DP,
3820 training_pattern |
3821 DP_LINK_SCRAMBLING_DISABLE);
3822 tries = 0;
3823 cr_tries++;
3824 continue;
3825 }
3826
3827 /* Update training set as requested by target */
3828 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3829 DRM_ERROR("failed to update link training\n");
3830 break;
3831 }
3832 ++tries;
3833 }
3834
3835 intel_dp_set_idle_link_train(intel_dp);
3836
3837 intel_dp->DP = DP;
3838
3839 if (channel_eq) {
3840 intel_dp->train_set_valid = true;
3841 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
3842 }
3843}
3844
3845void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3846{
3847 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3848 DP_TRAINING_PATTERN_DISABLE);
3849}
3850
3851static void
3852intel_dp_link_down(struct intel_dp *intel_dp)
3853{
3854 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3855 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3856 enum port port = intel_dig_port->port;
3857 struct drm_device *dev = intel_dig_port->base.base.dev;
3858 struct drm_i915_private *dev_priv = dev->dev_private;
3859 uint32_t DP = intel_dp->DP;
3860
3861 if (WARN_ON(HAS_DDI(dev)))
3862 return;
3863
3864 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3865 return;
3866
3867 DRM_DEBUG_KMS("\n");
3868
3869 if ((IS_GEN7(dev) && port == PORT_A) ||
3870 (HAS_PCH_CPT(dev) && port != PORT_A)) {
3871 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3872 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3873 } else {
3874 if (IS_CHERRYVIEW(dev))
3875 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3876 else
3877 DP &= ~DP_LINK_TRAIN_MASK;
3878 DP |= DP_LINK_TRAIN_PAT_IDLE;
3879 }
3880 I915_WRITE(intel_dp->output_reg, DP);
3881 POSTING_READ(intel_dp->output_reg);
3882
3883 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3884 I915_WRITE(intel_dp->output_reg, DP);
3885 POSTING_READ(intel_dp->output_reg);
3886
3887 /*
3888 * HW workaround for IBX, we need to move the port
3889 * to transcoder A after disabling it to allow the
3890 * matching HDMI port to be enabled on transcoder A.
3891 */
3892 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3893 /* always enable with pattern 1 (as per spec) */
3894 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3895 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3896 I915_WRITE(intel_dp->output_reg, DP);
3897 POSTING_READ(intel_dp->output_reg);
3898
3899 DP &= ~DP_PORT_EN;
3900 I915_WRITE(intel_dp->output_reg, DP);
3901 POSTING_READ(intel_dp->output_reg);
3902 }
3903
3904 msleep(intel_dp->panel_power_down_delay);
3905}
3906
3907static bool
3908intel_dp_get_dpcd(struct intel_dp *intel_dp)
3909{
3910 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3911 struct drm_device *dev = dig_port->base.base.dev;
3912 struct drm_i915_private *dev_priv = dev->dev_private;
3913 uint8_t rev;
3914
3915 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3916 sizeof(intel_dp->dpcd)) < 0)
3917 return false; /* aux transfer failed */
3918
3919 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3920
3921 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3922 return false; /* DPCD not present */
3923
3924 /* Check if the panel supports PSR */
3925 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3926 if (is_edp(intel_dp)) {
3927 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3928 intel_dp->psr_dpcd,
3929 sizeof(intel_dp->psr_dpcd));
3930 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3931 dev_priv->psr.sink_support = true;
3932 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3933 }
3934
3935 if (INTEL_INFO(dev)->gen >= 9 &&
3936 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3937 uint8_t frame_sync_cap;
3938
3939 dev_priv->psr.sink_support = true;
3940 intel_dp_dpcd_read_wake(&intel_dp->aux,
3941 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3942 &frame_sync_cap, 1);
3943 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3944 /* PSR2 needs frame sync as well */
3945 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3946 DRM_DEBUG_KMS("PSR2 %s on sink",
3947 dev_priv->psr.psr2_support ? "supported" : "not supported");
3948 }
3949 }
3950
3951 /* Training Pattern 3 support, both source and sink */
3952 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3953 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3954 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3955 intel_dp->use_tps3 = true;
3956 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3957 } else
3958 intel_dp->use_tps3 = false;
3959
3960 /* Intermediate frequency support */
3961 if (is_edp(intel_dp) &&
3962 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3963 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3964 (rev >= 0x03)) { /* eDp v1.4 or higher */
3965 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3966 int i;
3967
3968 intel_dp_dpcd_read_wake(&intel_dp->aux,
3969 DP_SUPPORTED_LINK_RATES,
3970 sink_rates,
3971 sizeof(sink_rates));
3972
3973 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3974 int val = le16_to_cpu(sink_rates[i]);
3975
3976 if (val == 0)
3977 break;
3978
3979 /* Value read is in kHz while drm clock is saved in deca-kHz */
3980 intel_dp->sink_rates[i] = (val * 200) / 10;
3981 }
3982 intel_dp->num_sink_rates = i;
3983 }
3984
3985 intel_dp_print_rates(intel_dp);
3986
3987 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3988 DP_DWN_STRM_PORT_PRESENT))
3989 return true; /* native DP sink */
3990
3991 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3992 return true; /* no per-port downstream info */
3993
3994 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3995 intel_dp->downstream_ports,
3996 DP_MAX_DOWNSTREAM_PORTS) < 0)
3997 return false; /* downstream port status fetch failed */
3998
3999 return true;
4000}
4001
4002static void
4003intel_dp_probe_oui(struct intel_dp *intel_dp)
4004{
4005 u8 buf[3];
4006
4007 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4008 return;
4009
4010 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
4011 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4012 buf[0], buf[1], buf[2]);
4013
4014 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
4015 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4016 buf[0], buf[1], buf[2]);
4017}
4018
4019static bool
4020intel_dp_probe_mst(struct intel_dp *intel_dp)
4021{
4022 u8 buf[1];
4023
4024 if (!intel_dp->can_mst)
4025 return false;
4026
4027 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4028 return false;
4029
4030 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4031 if (buf[0] & DP_MST_CAP) {
4032 DRM_DEBUG_KMS("Sink is MST capable\n");
4033 intel_dp->is_mst = true;
4034 } else {
4035 DRM_DEBUG_KMS("Sink is not MST capable\n");
4036 intel_dp->is_mst = false;
4037 }
4038 }
4039
4040 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4041 return intel_dp->is_mst;
4042}
4043
4044static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
4045{
4046 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4047 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4048 u8 buf;
4049 int ret = 0;
4050
4051 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4052 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4053 ret = -EIO;
4054 goto out;
4055 }
4056
4057 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4058 buf & ~DP_TEST_SINK_START) < 0) {
4059 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4060 ret = -EIO;
4061 goto out;
4062 }
4063
4064 intel_dp->sink_crc.started = false;
4065 out:
4066 hsw_enable_ips(intel_crtc);
4067 return ret;
4068}
4069
4070static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4071{
4072 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4073 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4074 u8 buf;
4075 int ret;
4076
4077 if (intel_dp->sink_crc.started) {
4078 ret = intel_dp_sink_crc_stop(intel_dp);
4079 if (ret)
4080 return ret;
4081 }
4082
4083 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4084 return -EIO;
4085
4086 if (!(buf & DP_TEST_CRC_SUPPORTED))
4087 return -ENOTTY;
4088
4089 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4090
4091 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4092 return -EIO;
4093
4094 hsw_disable_ips(intel_crtc);
4095
4096 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4097 buf | DP_TEST_SINK_START) < 0) {
4098 hsw_enable_ips(intel_crtc);
4099 return -EIO;
4100 }
4101
4102 intel_dp->sink_crc.started = true;
4103 return 0;
4104}
4105
4106int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4107{
4108 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4109 struct drm_device *dev = dig_port->base.base.dev;
4110 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4111 u8 buf;
4112 int count, ret;
4113 int attempts = 6;
4114 bool old_equal_new;
4115
4116 ret = intel_dp_sink_crc_start(intel_dp);
4117 if (ret)
4118 return ret;
4119
4120 do {
4121 intel_wait_for_vblank(dev, intel_crtc->pipe);
4122
4123 if (drm_dp_dpcd_readb(&intel_dp->aux,
4124 DP_TEST_SINK_MISC, &buf) < 0) {
4125 ret = -EIO;
4126 goto stop;
4127 }
4128 count = buf & DP_TEST_COUNT_MASK;
4129
4130 /*
4131 * Count might be reset during the loop. In this case
4132 * last known count needs to be reset as well.
4133 */
4134 if (count == 0)
4135 intel_dp->sink_crc.last_count = 0;
4136
4137 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4138 ret = -EIO;
4139 goto stop;
4140 }
4141
4142 old_equal_new = (count == intel_dp->sink_crc.last_count &&
4143 !memcmp(intel_dp->sink_crc.last_crc, crc,
4144 6 * sizeof(u8)));
4145
4146 } while (--attempts && (count == 0 || old_equal_new));
4147
4148 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4149 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
4150
4151 if (attempts == 0) {
4152 if (old_equal_new) {
4153 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4154 } else {
4155 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4156 ret = -ETIMEDOUT;
4157 goto stop;
4158 }
4159 }
4160
4161stop:
4162 intel_dp_sink_crc_stop(intel_dp);
4163 return ret;
4164}
4165
4166static bool
4167intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4168{
4169 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4170 DP_DEVICE_SERVICE_IRQ_VECTOR,
4171 sink_irq_vector, 1) == 1;
4172}
4173
4174static bool
4175intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4176{
4177 int ret;
4178
4179 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4180 DP_SINK_COUNT_ESI,
4181 sink_irq_vector, 14);
4182 if (ret != 14)
4183 return false;
4184
4185 return true;
4186}
4187
4188static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4189{
4190 uint8_t test_result = DP_TEST_ACK;
4191 return test_result;
4192}
4193
4194static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4195{
4196 uint8_t test_result = DP_TEST_NAK;
4197 return test_result;
4198}
4199
4200static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4201{
4202 uint8_t test_result = DP_TEST_NAK;
4203 struct intel_connector *intel_connector = intel_dp->attached_connector;
4204 struct drm_connector *connector = &intel_connector->base;
4205
4206 if (intel_connector->detect_edid == NULL ||
4207 connector->edid_corrupt ||
4208 intel_dp->aux.i2c_defer_count > 6) {
4209 /* Check EDID read for NACKs, DEFERs and corruption
4210 * (DP CTS 1.2 Core r1.1)
4211 * 4.2.2.4 : Failed EDID read, I2C_NAK
4212 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4213 * 4.2.2.6 : EDID corruption detected
4214 * Use failsafe mode for all cases
4215 */
4216 if (intel_dp->aux.i2c_nack_count > 0 ||
4217 intel_dp->aux.i2c_defer_count > 0)
4218 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4219 intel_dp->aux.i2c_nack_count,
4220 intel_dp->aux.i2c_defer_count);
4221 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4222 } else {
4223 struct edid *block = intel_connector->detect_edid;
4224
4225 /* We have to write the checksum
4226 * of the last block read
4227 */
4228 block += intel_connector->detect_edid->extensions;
4229
4230 if (!drm_dp_dpcd_write(&intel_dp->aux,
4231 DP_TEST_EDID_CHECKSUM,
4232 &block->checksum,
4233 1))
4234 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4235
4236 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4237 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4238 }
4239
4240 /* Set test active flag here so userspace doesn't interrupt things */
4241 intel_dp->compliance_test_active = 1;
4242
4243 return test_result;
4244}
4245
4246static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4247{
4248 uint8_t test_result = DP_TEST_NAK;
4249 return test_result;
4250}
4251
4252static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4253{
4254 uint8_t response = DP_TEST_NAK;
4255 uint8_t rxdata = 0;
4256 int status = 0;
4257
4258 intel_dp->compliance_test_active = 0;
4259 intel_dp->compliance_test_type = 0;
4260 intel_dp->compliance_test_data = 0;
4261
4262 intel_dp->aux.i2c_nack_count = 0;
4263 intel_dp->aux.i2c_defer_count = 0;
4264
4265 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4266 if (status <= 0) {
4267 DRM_DEBUG_KMS("Could not read test request from sink\n");
4268 goto update_status;
4269 }
4270
4271 switch (rxdata) {
4272 case DP_TEST_LINK_TRAINING:
4273 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4274 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4275 response = intel_dp_autotest_link_training(intel_dp);
4276 break;
4277 case DP_TEST_LINK_VIDEO_PATTERN:
4278 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4279 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4280 response = intel_dp_autotest_video_pattern(intel_dp);
4281 break;
4282 case DP_TEST_LINK_EDID_READ:
4283 DRM_DEBUG_KMS("EDID test requested\n");
4284 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4285 response = intel_dp_autotest_edid(intel_dp);
4286 break;
4287 case DP_TEST_LINK_PHY_TEST_PATTERN:
4288 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4289 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4290 response = intel_dp_autotest_phy_pattern(intel_dp);
4291 break;
4292 default:
4293 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4294 break;
4295 }
4296
4297update_status:
4298 status = drm_dp_dpcd_write(&intel_dp->aux,
4299 DP_TEST_RESPONSE,
4300 &response, 1);
4301 if (status <= 0)
4302 DRM_DEBUG_KMS("Could not write test response to sink\n");
4303}
4304
4305static int
4306intel_dp_check_mst_status(struct intel_dp *intel_dp)
4307{
4308 bool bret;
4309
4310 if (intel_dp->is_mst) {
4311 u8 esi[16] = { 0 };
4312 int ret = 0;
4313 int retry;
4314 bool handled;
4315 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4316go_again:
4317 if (bret == true) {
4318
4319 /* check link status - esi[10] = 0x200c */
4320 if (intel_dp->active_mst_links &&
4321 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4322 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4323 intel_dp_start_link_train(intel_dp);
4324 intel_dp_complete_link_train(intel_dp);
4325 intel_dp_stop_link_train(intel_dp);
4326 }
4327
4328 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4329 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4330
4331 if (handled) {
4332 for (retry = 0; retry < 3; retry++) {
4333 int wret;
4334 wret = drm_dp_dpcd_write(&intel_dp->aux,
4335 DP_SINK_COUNT_ESI+1,
4336 &esi[1], 3);
4337 if (wret == 3) {
4338 break;
4339 }
4340 }
4341
4342 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4343 if (bret == true) {
4344 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4345 goto go_again;
4346 }
4347 } else
4348 ret = 0;
4349
4350 return ret;
4351 } else {
4352 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4353 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4354 intel_dp->is_mst = false;
4355 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4356 /* send a hotplug event */
4357 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4358 }
4359 }
4360 return -EINVAL;
4361}
4362
4363/*
4364 * According to DP spec
4365 * 5.1.2:
4366 * 1. Read DPCD
4367 * 2. Configure link according to Receiver Capabilities
4368 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4369 * 4. Check link status on receipt of hot-plug interrupt
4370 */
4371static void
4372intel_dp_check_link_status(struct intel_dp *intel_dp)
4373{
4374 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4375 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4376 u8 sink_irq_vector;
4377 u8 link_status[DP_LINK_STATUS_SIZE];
4378
4379 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4380
4381 if (!intel_encoder->base.crtc)
4382 return;
4383
4384 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4385 return;
4386
4387 /* Try to read receiver status if the link appears to be up */
4388 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4389 return;
4390 }
4391
4392 /* Now read the DPCD to see if it's actually running */
4393 if (!intel_dp_get_dpcd(intel_dp)) {
4394 return;
4395 }
4396
4397 /* Try to read the source of the interrupt */
4398 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4399 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4400 /* Clear interrupt source */
4401 drm_dp_dpcd_writeb(&intel_dp->aux,
4402 DP_DEVICE_SERVICE_IRQ_VECTOR,
4403 sink_irq_vector);
4404
4405 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4406 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4407 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4408 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4409 }
4410
4411 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4412 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4413 intel_encoder->base.name);
4414 intel_dp_start_link_train(intel_dp);
4415 intel_dp_complete_link_train(intel_dp);
4416 intel_dp_stop_link_train(intel_dp);
4417 }
4418}
4419
4420/* XXX this is probably wrong for multiple downstream ports */
4421static enum drm_connector_status
4422intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4423{
4424 uint8_t *dpcd = intel_dp->dpcd;
4425 uint8_t type;
4426
4427 if (!intel_dp_get_dpcd(intel_dp))
4428 return connector_status_disconnected;
4429
4430 /* if there's no downstream port, we're done */
4431 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4432 return connector_status_connected;
4433
4434 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4435 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4436 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4437 uint8_t reg;
4438
4439 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4440 &reg, 1) < 0)
4441 return connector_status_unknown;
4442
4443 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4444 : connector_status_disconnected;
4445 }
4446
4447 /* If no HPD, poke DDC gently */
4448 if (drm_probe_ddc(&intel_dp->aux.ddc))
4449 return connector_status_connected;
4450
4451 /* Well we tried, say unknown for unreliable port types */
4452 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4453 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4454 if (type == DP_DS_PORT_TYPE_VGA ||
4455 type == DP_DS_PORT_TYPE_NON_EDID)
4456 return connector_status_unknown;
4457 } else {
4458 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4459 DP_DWN_STRM_PORT_TYPE_MASK;
4460 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4461 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4462 return connector_status_unknown;
4463 }
4464
4465 /* Anything else is out of spec, warn and ignore */
4466 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4467 return connector_status_disconnected;
4468}
4469
4470static enum drm_connector_status
4471edp_detect(struct intel_dp *intel_dp)
4472{
4473 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4474 enum drm_connector_status status;
4475
4476 status = intel_panel_detect(dev);
4477 if (status == connector_status_unknown)
4478 status = connector_status_connected;
4479
4480 return status;
4481}
4482
4483static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4484 struct intel_digital_port *port)
4485{
4486 u32 bit;
4487
4488 if (HAS_PCH_IBX(dev_priv->dev)) {
4489 switch (port->port) {
4490 case PORT_A:
4491 return true;
4492 case PORT_B:
4493 bit = SDE_PORTB_HOTPLUG;
4494 break;
4495 case PORT_C:
4496 bit = SDE_PORTC_HOTPLUG;
4497 break;
4498 case PORT_D:
4499 bit = SDE_PORTD_HOTPLUG;
4500 break;
4501 default:
4502 MISSING_CASE(port->port);
4503 return false;
4504 }
4505 } else {
4506 switch (port->port) {
4507 case PORT_A:
4508 return true;
4509 case PORT_B:
4510 bit = SDE_PORTB_HOTPLUG_CPT;
4511 break;
4512 case PORT_C:
4513 bit = SDE_PORTC_HOTPLUG_CPT;
4514 break;
4515 case PORT_D:
4516 bit = SDE_PORTD_HOTPLUG_CPT;
4517 break;
4518 default:
4519 MISSING_CASE(port->port);
4520 return false;
4521 }
4522 }
4523
4524 return I915_READ(SDEISR) & bit;
4525}
4526
4527static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4528 struct intel_digital_port *port)
4529{
4530 uint32_t bit;
4531
4532 if (IS_VALLEYVIEW(dev_priv)) {
4533 switch (port->port) {
4534 case PORT_B:
4535 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4536 break;
4537 case PORT_C:
4538 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4539 break;
4540 case PORT_D:
4541 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4542 break;
4543 default:
4544 MISSING_CASE(port->port);
4545 return false;
4546 }
4547 } else {
4548 switch (port->port) {
4549 case PORT_B:
4550 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4551 break;
4552 case PORT_C:
4553 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4554 break;
4555 case PORT_D:
4556 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4557 break;
4558 default:
4559 MISSING_CASE(port->port);
4560 return false;
4561 }
4562 }
4563
4564 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4565}
4566
4567/*
4568 * intel_digital_port_connected - is the specified port connected?
4569 * @dev_priv: i915 private structure
4570 * @port: the port to test
4571 *
4572 * Return %true if @port is connected, %false otherwise.
4573 */
4574static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4575 struct intel_digital_port *port)
4576{
4577 if (HAS_PCH_SPLIT(dev_priv))
4578 return ibx_digital_port_connected(dev_priv, port);
4579 else
4580 return g4x_digital_port_connected(dev_priv, port);
4581}
4582
4583static enum drm_connector_status
4584ironlake_dp_detect(struct intel_dp *intel_dp)
4585{
4586 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4587 struct drm_i915_private *dev_priv = dev->dev_private;
4588 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4589
4590 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
4591 return connector_status_disconnected;
4592
4593 return intel_dp_detect_dpcd(intel_dp);
4594}
4595
4596static enum drm_connector_status
4597g4x_dp_detect(struct intel_dp *intel_dp)
4598{
4599 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4600 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4601
4602 /* Can't disconnect eDP, but you can close the lid... */
4603 if (is_edp(intel_dp)) {
4604 enum drm_connector_status status;
4605
4606 status = intel_panel_detect(dev);
4607 if (status == connector_status_unknown)
4608 status = connector_status_connected;
4609 return status;
4610 }
4611
4612 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
4613 return connector_status_disconnected;
4614
4615 return intel_dp_detect_dpcd(intel_dp);
4616}
4617
4618static struct edid *
4619intel_dp_get_edid(struct intel_dp *intel_dp)
4620{
4621 struct intel_connector *intel_connector = intel_dp->attached_connector;
4622
4623 /* use cached edid if we have one */
4624 if (intel_connector->edid) {
4625 /* invalid edid */
4626 if (IS_ERR(intel_connector->edid))
4627 return NULL;
4628
4629 return drm_edid_duplicate(intel_connector->edid);
4630 } else
4631 return drm_get_edid(&intel_connector->base,
4632 &intel_dp->aux.ddc);
4633}
4634
4635static void
4636intel_dp_set_edid(struct intel_dp *intel_dp)
4637{
4638 struct intel_connector *intel_connector = intel_dp->attached_connector;
4639 struct edid *edid;
4640
4641 edid = intel_dp_get_edid(intel_dp);
4642 intel_connector->detect_edid = edid;
4643
4644 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4645 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4646 else
4647 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4648}
4649
4650static void
4651intel_dp_unset_edid(struct intel_dp *intel_dp)
4652{
4653 struct intel_connector *intel_connector = intel_dp->attached_connector;
4654
4655 kfree(intel_connector->detect_edid);
4656 intel_connector->detect_edid = NULL;
4657
4658 intel_dp->has_audio = false;
4659}
4660
4661static enum intel_display_power_domain
4662intel_dp_power_get(struct intel_dp *dp)
4663{
4664 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4665 enum intel_display_power_domain power_domain;
4666
4667 power_domain = intel_display_port_power_domain(encoder);
4668 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4669
4670 return power_domain;
4671}
4672
4673static void
4674intel_dp_power_put(struct intel_dp *dp,
4675 enum intel_display_power_domain power_domain)
4676{
4677 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4678 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
4679}
4680
4681static enum drm_connector_status
4682intel_dp_detect(struct drm_connector *connector, bool force)
4683{
4684 struct intel_dp *intel_dp = intel_attached_dp(connector);
4685 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4686 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4687 struct drm_device *dev = connector->dev;
4688 enum drm_connector_status status;
4689 enum intel_display_power_domain power_domain;
4690 bool ret;
4691 u8 sink_irq_vector;
4692
4693 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4694 connector->base.id, connector->name);
4695 intel_dp_unset_edid(intel_dp);
4696
4697 if (intel_dp->is_mst) {
4698 /* MST devices are disconnected from a monitor POV */
4699 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4700 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4701 return connector_status_disconnected;
4702 }
4703
4704 power_domain = intel_dp_power_get(intel_dp);
4705
4706 /* Can't disconnect eDP, but you can close the lid... */
4707 if (is_edp(intel_dp))
4708 status = edp_detect(intel_dp);
4709 else if (HAS_PCH_SPLIT(dev))
4710 status = ironlake_dp_detect(intel_dp);
4711 else
4712 status = g4x_dp_detect(intel_dp);
4713 if (status != connector_status_connected)
4714 goto out;
4715
4716 intel_dp_probe_oui(intel_dp);
4717
4718 ret = intel_dp_probe_mst(intel_dp);
4719 if (ret) {
4720 /* if we are in MST mode then this connector
4721 won't appear connected or have anything with EDID on it */
4722 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4723 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4724 status = connector_status_disconnected;
4725 goto out;
4726 }
4727
4728 intel_dp_set_edid(intel_dp);
4729
4730 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4731 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4732 status = connector_status_connected;
4733
4734 /* Try to read the source of the interrupt */
4735 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4736 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4737 /* Clear interrupt source */
4738 drm_dp_dpcd_writeb(&intel_dp->aux,
4739 DP_DEVICE_SERVICE_IRQ_VECTOR,
4740 sink_irq_vector);
4741
4742 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4743 intel_dp_handle_test_request(intel_dp);
4744 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4745 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4746 }
4747
4748out:
4749 intel_dp_power_put(intel_dp, power_domain);
4750 return status;
4751}
4752
4753static void
4754intel_dp_force(struct drm_connector *connector)
4755{
4756 struct intel_dp *intel_dp = intel_attached_dp(connector);
4757 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4758 enum intel_display_power_domain power_domain;
4759
4760 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4761 connector->base.id, connector->name);
4762 intel_dp_unset_edid(intel_dp);
4763
4764 if (connector->status != connector_status_connected)
4765 return;
4766
4767 power_domain = intel_dp_power_get(intel_dp);
4768
4769 intel_dp_set_edid(intel_dp);
4770
4771 intel_dp_power_put(intel_dp, power_domain);
4772
4773 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4774 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4775}
4776
4777static int intel_dp_get_modes(struct drm_connector *connector)
4778{
4779 struct intel_connector *intel_connector = to_intel_connector(connector);
4780 struct edid *edid;
4781
4782 edid = intel_connector->detect_edid;
4783 if (edid) {
4784 int ret = intel_connector_update_modes(connector, edid);
4785 if (ret)
4786 return ret;
4787 }
4788
4789 /* if eDP has no EDID, fall back to fixed mode */
4790 if (is_edp(intel_attached_dp(connector)) &&
4791 intel_connector->panel.fixed_mode) {
4792 struct drm_display_mode *mode;
4793
4794 mode = drm_mode_duplicate(connector->dev,
4795 intel_connector->panel.fixed_mode);
4796 if (mode) {
4797 drm_mode_probed_add(connector, mode);
4798 return 1;
4799 }
4800 }
4801
4802 return 0;
4803}
4804
4805static bool
4806intel_dp_detect_audio(struct drm_connector *connector)
4807{
4808 bool has_audio = false;
4809 struct edid *edid;
4810
4811 edid = to_intel_connector(connector)->detect_edid;
4812 if (edid)
4813 has_audio = drm_detect_monitor_audio(edid);
4814
4815 return has_audio;
4816}
4817
4818static int
4819intel_dp_set_property(struct drm_connector *connector,
4820 struct drm_property *property,
4821 uint64_t val)
4822{
4823 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4824 struct intel_connector *intel_connector = to_intel_connector(connector);
4825 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4826 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4827 int ret;
4828
4829 ret = drm_object_property_set_value(&connector->base, property, val);
4830 if (ret)
4831 return ret;
4832
4833 if (property == dev_priv->force_audio_property) {
4834 int i = val;
4835 bool has_audio;
4836
4837 if (i == intel_dp->force_audio)
4838 return 0;
4839
4840 intel_dp->force_audio = i;
4841
4842 if (i == HDMI_AUDIO_AUTO)
4843 has_audio = intel_dp_detect_audio(connector);
4844 else
4845 has_audio = (i == HDMI_AUDIO_ON);
4846
4847 if (has_audio == intel_dp->has_audio)
4848 return 0;
4849
4850 intel_dp->has_audio = has_audio;
4851 goto done;
4852 }
4853
4854 if (property == dev_priv->broadcast_rgb_property) {
4855 bool old_auto = intel_dp->color_range_auto;
4856 bool old_range = intel_dp->limited_color_range;
4857
4858 switch (val) {
4859 case INTEL_BROADCAST_RGB_AUTO:
4860 intel_dp->color_range_auto = true;
4861 break;
4862 case INTEL_BROADCAST_RGB_FULL:
4863 intel_dp->color_range_auto = false;
4864 intel_dp->limited_color_range = false;
4865 break;
4866 case INTEL_BROADCAST_RGB_LIMITED:
4867 intel_dp->color_range_auto = false;
4868 intel_dp->limited_color_range = true;
4869 break;
4870 default:
4871 return -EINVAL;
4872 }
4873
4874 if (old_auto == intel_dp->color_range_auto &&
4875 old_range == intel_dp->limited_color_range)
4876 return 0;
4877
4878 goto done;
4879 }
4880
4881 if (is_edp(intel_dp) &&
4882 property == connector->dev->mode_config.scaling_mode_property) {
4883 if (val == DRM_MODE_SCALE_NONE) {
4884 DRM_DEBUG_KMS("no scaling not supported\n");
4885 return -EINVAL;
4886 }
4887
4888 if (intel_connector->panel.fitting_mode == val) {
4889 /* the eDP scaling property is not changed */
4890 return 0;
4891 }
4892 intel_connector->panel.fitting_mode = val;
4893
4894 goto done;
4895 }
4896
4897 return -EINVAL;
4898
4899done:
4900 if (intel_encoder->base.crtc)
4901 intel_crtc_restore_mode(intel_encoder->base.crtc);
4902
4903 return 0;
4904}
4905
4906static void
4907intel_dp_connector_destroy(struct drm_connector *connector)
4908{
4909 struct intel_connector *intel_connector = to_intel_connector(connector);
4910
4911 kfree(intel_connector->detect_edid);
4912
4913 if (!IS_ERR_OR_NULL(intel_connector->edid))
4914 kfree(intel_connector->edid);
4915
4916 /* Can't call is_edp() since the encoder may have been destroyed
4917 * already. */
4918 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4919 intel_panel_fini(&intel_connector->panel);
4920
4921 drm_connector_cleanup(connector);
4922 kfree(connector);
4923}
4924
4925void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4926{
4927 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4928 struct intel_dp *intel_dp = &intel_dig_port->dp;
4929
4930 drm_dp_aux_unregister(&intel_dp->aux);
4931 intel_dp_mst_encoder_cleanup(intel_dig_port);
4932 if (is_edp(intel_dp)) {
4933 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4934 /*
4935 * vdd might still be enabled do to the delayed vdd off.
4936 * Make sure vdd is actually turned off here.
4937 */
4938 pps_lock(intel_dp);
4939 edp_panel_vdd_off_sync(intel_dp);
4940 pps_unlock(intel_dp);
4941
4942 if (intel_dp->edp_notifier.notifier_call) {
4943 unregister_reboot_notifier(&intel_dp->edp_notifier);
4944 intel_dp->edp_notifier.notifier_call = NULL;
4945 }
4946 }
4947 drm_encoder_cleanup(encoder);
4948 kfree(intel_dig_port);
4949}
4950
4951static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4952{
4953 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4954
4955 if (!is_edp(intel_dp))
4956 return;
4957
4958 /*
4959 * vdd might still be enabled do to the delayed vdd off.
4960 * Make sure vdd is actually turned off here.
4961 */
4962 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4963 pps_lock(intel_dp);
4964 edp_panel_vdd_off_sync(intel_dp);
4965 pps_unlock(intel_dp);
4966}
4967
4968static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4969{
4970 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4971 struct drm_device *dev = intel_dig_port->base.base.dev;
4972 struct drm_i915_private *dev_priv = dev->dev_private;
4973 enum intel_display_power_domain power_domain;
4974
4975 lockdep_assert_held(&dev_priv->pps_mutex);
4976
4977 if (!edp_have_panel_vdd(intel_dp))
4978 return;
4979
4980 /*
4981 * The VDD bit needs a power domain reference, so if the bit is
4982 * already enabled when we boot or resume, grab this reference and
4983 * schedule a vdd off, so we don't hold on to the reference
4984 * indefinitely.
4985 */
4986 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4987 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4988 intel_display_power_get(dev_priv, power_domain);
4989
4990 edp_panel_vdd_schedule_off(intel_dp);
4991}
4992
4993static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4994{
4995 struct intel_dp *intel_dp;
4996
4997 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4998 return;
4999
5000 intel_dp = enc_to_intel_dp(encoder);
5001
5002 pps_lock(intel_dp);
5003
5004 /*
5005 * Read out the current power sequencer assignment,
5006 * in case the BIOS did something with it.
5007 */
5008 if (IS_VALLEYVIEW(encoder->dev))
5009 vlv_initial_power_sequencer_setup(intel_dp);
5010
5011 intel_edp_panel_vdd_sanitize(intel_dp);
5012
5013 pps_unlock(intel_dp);
5014}
5015
5016static const struct drm_connector_funcs intel_dp_connector_funcs = {
5017 .dpms = drm_atomic_helper_connector_dpms,
5018 .detect = intel_dp_detect,
5019 .force = intel_dp_force,
5020 .fill_modes = drm_helper_probe_single_connector_modes,
5021 .set_property = intel_dp_set_property,
5022 .atomic_get_property = intel_connector_atomic_get_property,
5023 .destroy = intel_dp_connector_destroy,
5024 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5025 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5026};
5027
5028static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5029 .get_modes = intel_dp_get_modes,
5030 .mode_valid = intel_dp_mode_valid,
5031 .best_encoder = intel_best_encoder,
5032};
5033
5034static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5035 .reset = intel_dp_encoder_reset,
5036 .destroy = intel_dp_encoder_destroy,
5037};
5038
5039enum irqreturn
5040intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5041{
5042 struct intel_dp *intel_dp = &intel_dig_port->dp;
5043 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5044 struct drm_device *dev = intel_dig_port->base.base.dev;
5045 struct drm_i915_private *dev_priv = dev->dev_private;
5046 enum intel_display_power_domain power_domain;
5047 enum irqreturn ret = IRQ_NONE;
5048
5049 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
5050 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5051
5052 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5053 /*
5054 * vdd off can generate a long pulse on eDP which
5055 * would require vdd on to handle it, and thus we
5056 * would end up in an endless cycle of
5057 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5058 */
5059 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5060 port_name(intel_dig_port->port));
5061 return IRQ_HANDLED;
5062 }
5063
5064 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5065 port_name(intel_dig_port->port),
5066 long_hpd ? "long" : "short");
5067
5068 power_domain = intel_display_port_power_domain(intel_encoder);
5069 intel_display_power_get(dev_priv, power_domain);
5070
5071 if (long_hpd) {
5072 /* indicate that we need to restart link training */
5073 intel_dp->train_set_valid = false;
5074
5075 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5076 goto mst_fail;
5077
5078 if (!intel_dp_get_dpcd(intel_dp)) {
5079 goto mst_fail;
5080 }
5081
5082 intel_dp_probe_oui(intel_dp);
5083
5084 if (!intel_dp_probe_mst(intel_dp))
5085 goto mst_fail;
5086
5087 } else {
5088 if (intel_dp->is_mst) {
5089 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5090 goto mst_fail;
5091 }
5092
5093 if (!intel_dp->is_mst) {
5094 /*
5095 * we'll check the link status via the normal hot plug path later -
5096 * but for short hpds we should check it now
5097 */
5098 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5099 intel_dp_check_link_status(intel_dp);
5100 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5101 }
5102 }
5103
5104 ret = IRQ_HANDLED;
5105
5106 goto put_power;
5107mst_fail:
5108 /* if we were in MST mode, and device is not there get out of MST mode */
5109 if (intel_dp->is_mst) {
5110 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5111 intel_dp->is_mst = false;
5112 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5113 }
5114put_power:
5115 intel_display_power_put(dev_priv, power_domain);
5116
5117 return ret;
5118}
5119
5120/* Return which DP Port should be selected for Transcoder DP control */
5121int
5122intel_trans_dp_port_sel(struct drm_crtc *crtc)
5123{
5124 struct drm_device *dev = crtc->dev;
5125 struct intel_encoder *intel_encoder;
5126 struct intel_dp *intel_dp;
5127
5128 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5129 intel_dp = enc_to_intel_dp(&intel_encoder->base);
5130
5131 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5132 intel_encoder->type == INTEL_OUTPUT_EDP)
5133 return intel_dp->output_reg;
5134 }
5135
5136 return -1;
5137}
5138
5139/* check the VBT to see whether the eDP is on DP-D port */
5140bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5141{
5142 struct drm_i915_private *dev_priv = dev->dev_private;
5143 union child_device_config *p_child;
5144 int i;
5145 static const short port_mapping[] = {
5146 [PORT_B] = PORT_IDPB,
5147 [PORT_C] = PORT_IDPC,
5148 [PORT_D] = PORT_IDPD,
5149 };
5150
5151 if (port == PORT_A)
5152 return true;
5153
5154 if (!dev_priv->vbt.child_dev_num)
5155 return false;
5156
5157 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5158 p_child = dev_priv->vbt.child_dev + i;
5159
5160 if (p_child->common.dvo_port == port_mapping[port] &&
5161 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5162 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5163 return true;
5164 }
5165 return false;
5166}
5167
5168void
5169intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5170{
5171 struct intel_connector *intel_connector = to_intel_connector(connector);
5172
5173 intel_attach_force_audio_property(connector);
5174 intel_attach_broadcast_rgb_property(connector);
5175 intel_dp->color_range_auto = true;
5176
5177 if (is_edp(intel_dp)) {
5178 drm_mode_create_scaling_mode_property(connector->dev);
5179 drm_object_attach_property(
5180 &connector->base,
5181 connector->dev->mode_config.scaling_mode_property,
5182 DRM_MODE_SCALE_ASPECT);
5183 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5184 }
5185}
5186
5187static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5188{
5189 intel_dp->last_power_cycle = jiffies;
5190 intel_dp->last_power_on = jiffies;
5191 intel_dp->last_backlight_off = jiffies;
5192}
5193
5194static void
5195intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5196 struct intel_dp *intel_dp)
5197{
5198 struct drm_i915_private *dev_priv = dev->dev_private;
5199 struct edp_power_seq cur, vbt, spec,
5200 *final = &intel_dp->pps_delays;
5201 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5202 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
5203
5204 lockdep_assert_held(&dev_priv->pps_mutex);
5205
5206 /* already initialized? */
5207 if (final->t11_t12 != 0)
5208 return;
5209
5210 if (IS_BROXTON(dev)) {
5211 /*
5212 * TODO: BXT has 2 sets of PPS registers.
5213 * Correct Register for Broxton need to be identified
5214 * using VBT. hardcoding for now
5215 */
5216 pp_ctrl_reg = BXT_PP_CONTROL(0);
5217 pp_on_reg = BXT_PP_ON_DELAYS(0);
5218 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5219 } else if (HAS_PCH_SPLIT(dev)) {
5220 pp_ctrl_reg = PCH_PP_CONTROL;
5221 pp_on_reg = PCH_PP_ON_DELAYS;
5222 pp_off_reg = PCH_PP_OFF_DELAYS;
5223 pp_div_reg = PCH_PP_DIVISOR;
5224 } else {
5225 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5226
5227 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5228 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5229 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5230 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5231 }
5232
5233 /* Workaround: Need to write PP_CONTROL with the unlock key as
5234 * the very first thing. */
5235 pp_ctl = ironlake_get_pp_control(intel_dp);
5236
5237 pp_on = I915_READ(pp_on_reg);
5238 pp_off = I915_READ(pp_off_reg);
5239 if (!IS_BROXTON(dev)) {
5240 I915_WRITE(pp_ctrl_reg, pp_ctl);
5241 pp_div = I915_READ(pp_div_reg);
5242 }
5243
5244 /* Pull timing values out of registers */
5245 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5246 PANEL_POWER_UP_DELAY_SHIFT;
5247
5248 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5249 PANEL_LIGHT_ON_DELAY_SHIFT;
5250
5251 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5252 PANEL_LIGHT_OFF_DELAY_SHIFT;
5253
5254 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5255 PANEL_POWER_DOWN_DELAY_SHIFT;
5256
5257 if (IS_BROXTON(dev)) {
5258 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5259 BXT_POWER_CYCLE_DELAY_SHIFT;
5260 if (tmp > 0)
5261 cur.t11_t12 = (tmp - 1) * 1000;
5262 else
5263 cur.t11_t12 = 0;
5264 } else {
5265 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5266 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5267 }
5268
5269 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5270 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5271
5272 vbt = dev_priv->vbt.edp_pps;
5273
5274 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5275 * our hw here, which are all in 100usec. */
5276 spec.t1_t3 = 210 * 10;
5277 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5278 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5279 spec.t10 = 500 * 10;
5280 /* This one is special and actually in units of 100ms, but zero
5281 * based in the hw (so we need to add 100 ms). But the sw vbt
5282 * table multiplies it with 1000 to make it in units of 100usec,
5283 * too. */
5284 spec.t11_t12 = (510 + 100) * 10;
5285
5286 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5287 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5288
5289 /* Use the max of the register settings and vbt. If both are
5290 * unset, fall back to the spec limits. */
5291#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5292 spec.field : \
5293 max(cur.field, vbt.field))
5294 assign_final(t1_t3);
5295 assign_final(t8);
5296 assign_final(t9);
5297 assign_final(t10);
5298 assign_final(t11_t12);
5299#undef assign_final
5300
5301#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5302 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5303 intel_dp->backlight_on_delay = get_delay(t8);
5304 intel_dp->backlight_off_delay = get_delay(t9);
5305 intel_dp->panel_power_down_delay = get_delay(t10);
5306 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5307#undef get_delay
5308
5309 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5310 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5311 intel_dp->panel_power_cycle_delay);
5312
5313 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5314 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5315}
5316
5317static void
5318intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5319 struct intel_dp *intel_dp)
5320{
5321 struct drm_i915_private *dev_priv = dev->dev_private;
5322 u32 pp_on, pp_off, pp_div, port_sel = 0;
5323 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5324 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
5325 enum port port = dp_to_dig_port(intel_dp)->port;
5326 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5327
5328 lockdep_assert_held(&dev_priv->pps_mutex);
5329
5330 if (IS_BROXTON(dev)) {
5331 /*
5332 * TODO: BXT has 2 sets of PPS registers.
5333 * Correct Register for Broxton need to be identified
5334 * using VBT. hardcoding for now
5335 */
5336 pp_ctrl_reg = BXT_PP_CONTROL(0);
5337 pp_on_reg = BXT_PP_ON_DELAYS(0);
5338 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5339
5340 } else if (HAS_PCH_SPLIT(dev)) {
5341 pp_on_reg = PCH_PP_ON_DELAYS;
5342 pp_off_reg = PCH_PP_OFF_DELAYS;
5343 pp_div_reg = PCH_PP_DIVISOR;
5344 } else {
5345 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5346
5347 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5348 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5349 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5350 }
5351
5352 /*
5353 * And finally store the new values in the power sequencer. The
5354 * backlight delays are set to 1 because we do manual waits on them. For
5355 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5356 * we'll end up waiting for the backlight off delay twice: once when we
5357 * do the manual sleep, and once when we disable the panel and wait for
5358 * the PP_STATUS bit to become zero.
5359 */
5360 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5361 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5362 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5363 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5364 /* Compute the divisor for the pp clock, simply match the Bspec
5365 * formula. */
5366 if (IS_BROXTON(dev)) {
5367 pp_div = I915_READ(pp_ctrl_reg);
5368 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5369 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5370 << BXT_POWER_CYCLE_DELAY_SHIFT);
5371 } else {
5372 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5373 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5374 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5375 }
5376
5377 /* Haswell doesn't have any port selection bits for the panel
5378 * power sequencer any more. */
5379 if (IS_VALLEYVIEW(dev)) {
5380 port_sel = PANEL_PORT_SELECT_VLV(port);
5381 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5382 if (port == PORT_A)
5383 port_sel = PANEL_PORT_SELECT_DPA;
5384 else
5385 port_sel = PANEL_PORT_SELECT_DPD;
5386 }
5387
5388 pp_on |= port_sel;
5389
5390 I915_WRITE(pp_on_reg, pp_on);
5391 I915_WRITE(pp_off_reg, pp_off);
5392 if (IS_BROXTON(dev))
5393 I915_WRITE(pp_ctrl_reg, pp_div);
5394 else
5395 I915_WRITE(pp_div_reg, pp_div);
5396
5397 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5398 I915_READ(pp_on_reg),
5399 I915_READ(pp_off_reg),
5400 IS_BROXTON(dev) ?
5401 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5402 I915_READ(pp_div_reg));
5403}
5404
5405/**
5406 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5407 * @dev: DRM device
5408 * @refresh_rate: RR to be programmed
5409 *
5410 * This function gets called when refresh rate (RR) has to be changed from
5411 * one frequency to another. Switches can be between high and low RR
5412 * supported by the panel or to any other RR based on media playback (in
5413 * this case, RR value needs to be passed from user space).
5414 *
5415 * The caller of this function needs to take a lock on dev_priv->drrs.
5416 */
5417static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5418{
5419 struct drm_i915_private *dev_priv = dev->dev_private;
5420 struct intel_encoder *encoder;
5421 struct intel_digital_port *dig_port = NULL;
5422 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5423 struct intel_crtc_state *config = NULL;
5424 struct intel_crtc *intel_crtc = NULL;
5425 u32 reg, val;
5426 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5427
5428 if (refresh_rate <= 0) {
5429 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5430 return;
5431 }
5432
5433 if (intel_dp == NULL) {
5434 DRM_DEBUG_KMS("DRRS not supported.\n");
5435 return;
5436 }
5437
5438 /*
5439 * FIXME: This needs proper synchronization with psr state for some
5440 * platforms that cannot have PSR and DRRS enabled at the same time.
5441 */
5442
5443 dig_port = dp_to_dig_port(intel_dp);
5444 encoder = &dig_port->base;
5445 intel_crtc = to_intel_crtc(encoder->base.crtc);
5446
5447 if (!intel_crtc) {
5448 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5449 return;
5450 }
5451
5452 config = intel_crtc->config;
5453
5454 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5455 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5456 return;
5457 }
5458
5459 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5460 refresh_rate)
5461 index = DRRS_LOW_RR;
5462
5463 if (index == dev_priv->drrs.refresh_rate_type) {
5464 DRM_DEBUG_KMS(
5465 "DRRS requested for previously set RR...ignoring\n");
5466 return;
5467 }
5468
5469 if (!intel_crtc->active) {
5470 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5471 return;
5472 }
5473
5474 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5475 switch (index) {
5476 case DRRS_HIGH_RR:
5477 intel_dp_set_m_n(intel_crtc, M1_N1);
5478 break;
5479 case DRRS_LOW_RR:
5480 intel_dp_set_m_n(intel_crtc, M2_N2);
5481 break;
5482 case DRRS_MAX_RR:
5483 default:
5484 DRM_ERROR("Unsupported refreshrate type\n");
5485 }
5486 } else if (INTEL_INFO(dev)->gen > 6) {
5487 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5488 val = I915_READ(reg);
5489
5490 if (index > DRRS_HIGH_RR) {
5491 if (IS_VALLEYVIEW(dev))
5492 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5493 else
5494 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5495 } else {
5496 if (IS_VALLEYVIEW(dev))
5497 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5498 else
5499 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5500 }
5501 I915_WRITE(reg, val);
5502 }
5503
5504 dev_priv->drrs.refresh_rate_type = index;
5505
5506 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5507}
5508
5509/**
5510 * intel_edp_drrs_enable - init drrs struct if supported
5511 * @intel_dp: DP struct
5512 *
5513 * Initializes frontbuffer_bits and drrs.dp
5514 */
5515void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5516{
5517 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5518 struct drm_i915_private *dev_priv = dev->dev_private;
5519 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5520 struct drm_crtc *crtc = dig_port->base.base.crtc;
5521 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5522
5523 if (!intel_crtc->config->has_drrs) {
5524 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5525 return;
5526 }
5527
5528 mutex_lock(&dev_priv->drrs.mutex);
5529 if (WARN_ON(dev_priv->drrs.dp)) {
5530 DRM_ERROR("DRRS already enabled\n");
5531 goto unlock;
5532 }
5533
5534 dev_priv->drrs.busy_frontbuffer_bits = 0;
5535
5536 dev_priv->drrs.dp = intel_dp;
5537
5538unlock:
5539 mutex_unlock(&dev_priv->drrs.mutex);
5540}
5541
5542/**
5543 * intel_edp_drrs_disable - Disable DRRS
5544 * @intel_dp: DP struct
5545 *
5546 */
5547void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5548{
5549 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5550 struct drm_i915_private *dev_priv = dev->dev_private;
5551 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5552 struct drm_crtc *crtc = dig_port->base.base.crtc;
5553 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5554
5555 if (!intel_crtc->config->has_drrs)
5556 return;
5557
5558 mutex_lock(&dev_priv->drrs.mutex);
5559 if (!dev_priv->drrs.dp) {
5560 mutex_unlock(&dev_priv->drrs.mutex);
5561 return;
5562 }
5563
5564 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5565 intel_dp_set_drrs_state(dev_priv->dev,
5566 intel_dp->attached_connector->panel.
5567 fixed_mode->vrefresh);
5568
5569 dev_priv->drrs.dp = NULL;
5570 mutex_unlock(&dev_priv->drrs.mutex);
5571
5572 cancel_delayed_work_sync(&dev_priv->drrs.work);
5573}
5574
5575static void intel_edp_drrs_downclock_work(struct work_struct *work)
5576{
5577 struct drm_i915_private *dev_priv =
5578 container_of(work, typeof(*dev_priv), drrs.work.work);
5579 struct intel_dp *intel_dp;
5580
5581 mutex_lock(&dev_priv->drrs.mutex);
5582
5583 intel_dp = dev_priv->drrs.dp;
5584
5585 if (!intel_dp)
5586 goto unlock;
5587
5588 /*
5589 * The delayed work can race with an invalidate hence we need to
5590 * recheck.
5591 */
5592
5593 if (dev_priv->drrs.busy_frontbuffer_bits)
5594 goto unlock;
5595
5596 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5597 intel_dp_set_drrs_state(dev_priv->dev,
5598 intel_dp->attached_connector->panel.
5599 downclock_mode->vrefresh);
5600
5601unlock:
5602 mutex_unlock(&dev_priv->drrs.mutex);
5603}
5604
5605/**
5606 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5607 * @dev: DRM device
5608 * @frontbuffer_bits: frontbuffer plane tracking bits
5609 *
5610 * This function gets called everytime rendering on the given planes start.
5611 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5612 *
5613 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5614 */
5615void intel_edp_drrs_invalidate(struct drm_device *dev,
5616 unsigned frontbuffer_bits)
5617{
5618 struct drm_i915_private *dev_priv = dev->dev_private;
5619 struct drm_crtc *crtc;
5620 enum pipe pipe;
5621
5622 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5623 return;
5624
5625 cancel_delayed_work(&dev_priv->drrs.work);
5626
5627 mutex_lock(&dev_priv->drrs.mutex);
5628 if (!dev_priv->drrs.dp) {
5629 mutex_unlock(&dev_priv->drrs.mutex);
5630 return;
5631 }
5632
5633 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5634 pipe = to_intel_crtc(crtc)->pipe;
5635
5636 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5637 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5638
5639 /* invalidate means busy screen hence upclock */
5640 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5641 intel_dp_set_drrs_state(dev_priv->dev,
5642 dev_priv->drrs.dp->attached_connector->panel.
5643 fixed_mode->vrefresh);
5644
5645 mutex_unlock(&dev_priv->drrs.mutex);
5646}
5647
5648/**
5649 * intel_edp_drrs_flush - Restart Idleness DRRS
5650 * @dev: DRM device
5651 * @frontbuffer_bits: frontbuffer plane tracking bits
5652 *
5653 * This function gets called every time rendering on the given planes has
5654 * completed or flip on a crtc is completed. So DRRS should be upclocked
5655 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5656 * if no other planes are dirty.
5657 *
5658 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5659 */
5660void intel_edp_drrs_flush(struct drm_device *dev,
5661 unsigned frontbuffer_bits)
5662{
5663 struct drm_i915_private *dev_priv = dev->dev_private;
5664 struct drm_crtc *crtc;
5665 enum pipe pipe;
5666
5667 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5668 return;
5669
5670 cancel_delayed_work(&dev_priv->drrs.work);
5671
5672 mutex_lock(&dev_priv->drrs.mutex);
5673 if (!dev_priv->drrs.dp) {
5674 mutex_unlock(&dev_priv->drrs.mutex);
5675 return;
5676 }
5677
5678 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5679 pipe = to_intel_crtc(crtc)->pipe;
5680
5681 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5682 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5683
5684 /* flush means busy screen hence upclock */
5685 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5686 intel_dp_set_drrs_state(dev_priv->dev,
5687 dev_priv->drrs.dp->attached_connector->panel.
5688 fixed_mode->vrefresh);
5689
5690 /*
5691 * flush also means no more activity hence schedule downclock, if all
5692 * other fbs are quiescent too
5693 */
5694 if (!dev_priv->drrs.busy_frontbuffer_bits)
5695 schedule_delayed_work(&dev_priv->drrs.work,
5696 msecs_to_jiffies(1000));
5697 mutex_unlock(&dev_priv->drrs.mutex);
5698}
5699
5700/**
5701 * DOC: Display Refresh Rate Switching (DRRS)
5702 *
5703 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5704 * which enables swtching between low and high refresh rates,
5705 * dynamically, based on the usage scenario. This feature is applicable
5706 * for internal panels.
5707 *
5708 * Indication that the panel supports DRRS is given by the panel EDID, which
5709 * would list multiple refresh rates for one resolution.
5710 *
5711 * DRRS is of 2 types - static and seamless.
5712 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5713 * (may appear as a blink on screen) and is used in dock-undock scenario.
5714 * Seamless DRRS involves changing RR without any visual effect to the user
5715 * and can be used during normal system usage. This is done by programming
5716 * certain registers.
5717 *
5718 * Support for static/seamless DRRS may be indicated in the VBT based on
5719 * inputs from the panel spec.
5720 *
5721 * DRRS saves power by switching to low RR based on usage scenarios.
5722 *
5723 * eDP DRRS:-
5724 * The implementation is based on frontbuffer tracking implementation.
5725 * When there is a disturbance on the screen triggered by user activity or a
5726 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5727 * When there is no movement on screen, after a timeout of 1 second, a switch
5728 * to low RR is made.
5729 * For integration with frontbuffer tracking code,
5730 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5731 *
5732 * DRRS can be further extended to support other internal panels and also
5733 * the scenario of video playback wherein RR is set based on the rate
5734 * requested by userspace.
5735 */
5736
5737/**
5738 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5739 * @intel_connector: eDP connector
5740 * @fixed_mode: preferred mode of panel
5741 *
5742 * This function is called only once at driver load to initialize basic
5743 * DRRS stuff.
5744 *
5745 * Returns:
5746 * Downclock mode if panel supports it, else return NULL.
5747 * DRRS support is determined by the presence of downclock mode (apart
5748 * from VBT setting).
5749 */
5750static struct drm_display_mode *
5751intel_dp_drrs_init(struct intel_connector *intel_connector,
5752 struct drm_display_mode *fixed_mode)
5753{
5754 struct drm_connector *connector = &intel_connector->base;
5755 struct drm_device *dev = connector->dev;
5756 struct drm_i915_private *dev_priv = dev->dev_private;
5757 struct drm_display_mode *downclock_mode = NULL;
5758
5759 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5760 mutex_init(&dev_priv->drrs.mutex);
5761
5762 if (INTEL_INFO(dev)->gen <= 6) {
5763 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5764 return NULL;
5765 }
5766
5767 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5768 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5769 return NULL;
5770 }
5771
5772 downclock_mode = intel_find_panel_downclock
5773 (dev, fixed_mode, connector);
5774
5775 if (!downclock_mode) {
5776 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5777 return NULL;
5778 }
5779
5780 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5781
5782 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5783 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5784 return downclock_mode;
5785}
5786
5787static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5788 struct intel_connector *intel_connector)
5789{
5790 struct drm_connector *connector = &intel_connector->base;
5791 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5792 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5793 struct drm_device *dev = intel_encoder->base.dev;
5794 struct drm_i915_private *dev_priv = dev->dev_private;
5795 struct drm_display_mode *fixed_mode = NULL;
5796 struct drm_display_mode *downclock_mode = NULL;
5797 bool has_dpcd;
5798 struct drm_display_mode *scan;
5799 struct edid *edid;
5800 enum pipe pipe = INVALID_PIPE;
5801
5802 if (!is_edp(intel_dp))
5803 return true;
5804
5805 pps_lock(intel_dp);
5806 intel_edp_panel_vdd_sanitize(intel_dp);
5807 pps_unlock(intel_dp);
5808
5809 /* Cache DPCD and EDID for edp. */
5810 has_dpcd = intel_dp_get_dpcd(intel_dp);
5811
5812 if (has_dpcd) {
5813 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5814 dev_priv->no_aux_handshake =
5815 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5816 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5817 } else {
5818 /* if this fails, presume the device is a ghost */
5819 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5820 return false;
5821 }
5822
5823 /* We now know it's not a ghost, init power sequence regs. */
5824 pps_lock(intel_dp);
5825 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5826 pps_unlock(intel_dp);
5827
5828 mutex_lock(&dev->mode_config.mutex);
5829 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5830 if (edid) {
5831 if (drm_add_edid_modes(connector, edid)) {
5832 drm_mode_connector_update_edid_property(connector,
5833 edid);
5834 drm_edid_to_eld(connector, edid);
5835 } else {
5836 kfree(edid);
5837 edid = ERR_PTR(-EINVAL);
5838 }
5839 } else {
5840 edid = ERR_PTR(-ENOENT);
5841 }
5842 intel_connector->edid = edid;
5843
5844 /* prefer fixed mode from EDID if available */
5845 list_for_each_entry(scan, &connector->probed_modes, head) {
5846 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5847 fixed_mode = drm_mode_duplicate(dev, scan);
5848 downclock_mode = intel_dp_drrs_init(
5849 intel_connector, fixed_mode);
5850 break;
5851 }
5852 }
5853
5854 /* fallback to VBT if available for eDP */
5855 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5856 fixed_mode = drm_mode_duplicate(dev,
5857 dev_priv->vbt.lfp_lvds_vbt_mode);
5858 if (fixed_mode)
5859 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5860 }
5861 mutex_unlock(&dev->mode_config.mutex);
5862
5863 if (IS_VALLEYVIEW(dev)) {
5864 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5865 register_reboot_notifier(&intel_dp->edp_notifier);
5866
5867 /*
5868 * Figure out the current pipe for the initial backlight setup.
5869 * If the current pipe isn't valid, try the PPS pipe, and if that
5870 * fails just assume pipe A.
5871 */
5872 if (IS_CHERRYVIEW(dev))
5873 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5874 else
5875 pipe = PORT_TO_PIPE(intel_dp->DP);
5876
5877 if (pipe != PIPE_A && pipe != PIPE_B)
5878 pipe = intel_dp->pps_pipe;
5879
5880 if (pipe != PIPE_A && pipe != PIPE_B)
5881 pipe = PIPE_A;
5882
5883 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5884 pipe_name(pipe));
5885 }
5886
5887 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5888 intel_connector->panel.backlight_power = intel_edp_backlight_power;
5889 intel_panel_setup_backlight(connector, pipe);
5890
5891 return true;
5892}
5893
5894bool
5895intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5896 struct intel_connector *intel_connector)
5897{
5898 struct drm_connector *connector = &intel_connector->base;
5899 struct intel_dp *intel_dp = &intel_dig_port->dp;
5900 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5901 struct drm_device *dev = intel_encoder->base.dev;
5902 struct drm_i915_private *dev_priv = dev->dev_private;
5903 enum port port = intel_dig_port->port;
5904 int type;
5905
5906 intel_dp->pps_pipe = INVALID_PIPE;
5907
5908 /* intel_dp vfuncs */
5909 if (INTEL_INFO(dev)->gen >= 9)
5910 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5911 else if (IS_VALLEYVIEW(dev))
5912 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5913 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5914 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5915 else if (HAS_PCH_SPLIT(dev))
5916 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5917 else
5918 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5919
5920 if (INTEL_INFO(dev)->gen >= 9)
5921 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5922 else
5923 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5924
5925 /* Preserve the current hw state. */
5926 intel_dp->DP = I915_READ(intel_dp->output_reg);
5927 intel_dp->attached_connector = intel_connector;
5928
5929 if (intel_dp_is_edp(dev, port))
5930 type = DRM_MODE_CONNECTOR_eDP;
5931 else
5932 type = DRM_MODE_CONNECTOR_DisplayPort;
5933
5934 /*
5935 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5936 * for DP the encoder type can be set by the caller to
5937 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5938 */
5939 if (type == DRM_MODE_CONNECTOR_eDP)
5940 intel_encoder->type = INTEL_OUTPUT_EDP;
5941
5942 /* eDP only on port B and/or C on vlv/chv */
5943 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5944 port != PORT_B && port != PORT_C))
5945 return false;
5946
5947 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5948 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5949 port_name(port));
5950
5951 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5952 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5953
5954 connector->interlace_allowed = true;
5955 connector->doublescan_allowed = 0;
5956
5957 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5958 edp_panel_vdd_work);
5959
5960 intel_connector_attach_encoder(intel_connector, intel_encoder);
5961 drm_connector_register(connector);
5962
5963 if (HAS_DDI(dev))
5964 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5965 else
5966 intel_connector->get_hw_state = intel_connector_get_hw_state;
5967 intel_connector->unregister = intel_dp_connector_unregister;
5968
5969 /* Set up the hotplug pin. */
5970 switch (port) {
5971 case PORT_A:
5972 intel_encoder->hpd_pin = HPD_PORT_A;
5973 break;
5974 case PORT_B:
5975 intel_encoder->hpd_pin = HPD_PORT_B;
5976 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
5977 intel_encoder->hpd_pin = HPD_PORT_A;
5978 break;
5979 case PORT_C:
5980 intel_encoder->hpd_pin = HPD_PORT_C;
5981 break;
5982 case PORT_D:
5983 intel_encoder->hpd_pin = HPD_PORT_D;
5984 break;
5985 default:
5986 BUG();
5987 }
5988
5989 if (is_edp(intel_dp)) {
5990 pps_lock(intel_dp);
5991 intel_dp_init_panel_power_timestamps(intel_dp);
5992 if (IS_VALLEYVIEW(dev))
5993 vlv_initial_power_sequencer_setup(intel_dp);
5994 else
5995 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5996 pps_unlock(intel_dp);
5997 }
5998
5999 intel_dp_aux_init(intel_dp, intel_connector);
6000
6001 /* init MST on ports that can support it */
6002 if (HAS_DP_MST(dev) &&
6003 (port == PORT_B || port == PORT_C || port == PORT_D))
6004 intel_dp_mst_encoder_init(intel_dig_port,
6005 intel_connector->base.base.id);
6006
6007 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6008 drm_dp_aux_unregister(&intel_dp->aux);
6009 if (is_edp(intel_dp)) {
6010 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
6011 /*
6012 * vdd might still be enabled do to the delayed vdd off.
6013 * Make sure vdd is actually turned off here.
6014 */
6015 pps_lock(intel_dp);
6016 edp_panel_vdd_off_sync(intel_dp);
6017 pps_unlock(intel_dp);
6018 }
6019 drm_connector_unregister(connector);
6020 drm_connector_cleanup(connector);
6021 return false;
6022 }
6023
6024 intel_dp_add_properties(intel_dp, connector);
6025
6026 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6027 * 0xd. Failure to do so will result in spurious interrupts being
6028 * generated on the port when a cable is not attached.
6029 */
6030 if (IS_G4X(dev) && !IS_GM45(dev)) {
6031 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6032 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6033 }
6034
6035 i915_debugfs_connector_add(connector);
6036
6037 return true;
6038}
6039
6040void
6041intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6042{
6043 struct drm_i915_private *dev_priv = dev->dev_private;
6044 struct intel_digital_port *intel_dig_port;
6045 struct intel_encoder *intel_encoder;
6046 struct drm_encoder *encoder;
6047 struct intel_connector *intel_connector;
6048
6049 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6050 if (!intel_dig_port)
6051 return;
6052
6053 intel_connector = intel_connector_alloc();
6054 if (!intel_connector) {
6055 kfree(intel_dig_port);
6056 return;
6057 }
6058
6059 intel_encoder = &intel_dig_port->base;
6060 encoder = &intel_encoder->base;
6061
6062 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6063 DRM_MODE_ENCODER_TMDS);
6064
6065 intel_encoder->compute_config = intel_dp_compute_config;
6066 intel_encoder->disable = intel_disable_dp;
6067 intel_encoder->get_hw_state = intel_dp_get_hw_state;
6068 intel_encoder->get_config = intel_dp_get_config;
6069 intel_encoder->suspend = intel_dp_encoder_suspend;
6070 if (IS_CHERRYVIEW(dev)) {
6071 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6072 intel_encoder->pre_enable = chv_pre_enable_dp;
6073 intel_encoder->enable = vlv_enable_dp;
6074 intel_encoder->post_disable = chv_post_disable_dp;
6075 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6076 } else if (IS_VALLEYVIEW(dev)) {
6077 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6078 intel_encoder->pre_enable = vlv_pre_enable_dp;
6079 intel_encoder->enable = vlv_enable_dp;
6080 intel_encoder->post_disable = vlv_post_disable_dp;
6081 } else {
6082 intel_encoder->pre_enable = g4x_pre_enable_dp;
6083 intel_encoder->enable = g4x_enable_dp;
6084 if (INTEL_INFO(dev)->gen >= 5)
6085 intel_encoder->post_disable = ilk_post_disable_dp;
6086 }
6087
6088 intel_dig_port->port = port;
6089 intel_dig_port->dp.output_reg = output_reg;
6090
6091 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6092 if (IS_CHERRYVIEW(dev)) {
6093 if (port == PORT_D)
6094 intel_encoder->crtc_mask = 1 << 2;
6095 else
6096 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6097 } else {
6098 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6099 }
6100 intel_encoder->cloneable = 0;
6101
6102 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6103 dev_priv->hotplug.irq_port[port] = intel_dig_port;
6104
6105 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
6106 drm_encoder_cleanup(encoder);
6107 kfree(intel_dig_port);
6108 kfree(intel_connector);
6109 }
6110}
6111
6112void intel_dp_mst_suspend(struct drm_device *dev)
6113{
6114 struct drm_i915_private *dev_priv = dev->dev_private;
6115 int i;
6116
6117 /* disable MST */
6118 for (i = 0; i < I915_MAX_PORTS; i++) {
6119 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6120 if (!intel_dig_port)
6121 continue;
6122
6123 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6124 if (!intel_dig_port->dp.can_mst)
6125 continue;
6126 if (intel_dig_port->dp.is_mst)
6127 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6128 }
6129 }
6130}
6131
6132void intel_dp_mst_resume(struct drm_device *dev)
6133{
6134 struct drm_i915_private *dev_priv = dev->dev_private;
6135 int i;
6136
6137 for (i = 0; i < I915_MAX_PORTS; i++) {
6138 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6139 if (!intel_dig_port)
6140 continue;
6141 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6142 int ret;
6143
6144 if (!intel_dig_port->dp.can_mst)
6145 continue;
6146
6147 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6148 if (ret != 0) {
6149 intel_dp_check_mst_status(&intel_dig_port->dp);
6150 }
6151 }
6152 }
6153}
This page took 0.047176 seconds and 5 git commands to generate.