drm/i915: set CDCLK if DPLL0 enabled during resuming from S3
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf
CML
50struct dp_link_dpll {
51 int link_bw;
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
56 { DP_LINK_BW_1_62,
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58 { DP_LINK_BW_2_7,
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
63 { DP_LINK_BW_1_62,
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65 { DP_LINK_BW_2_7,
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5
CML
69static const struct dp_link_dpll vlv_dpll[] = {
70 { DP_LINK_BW_1_62,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
72 { DP_LINK_BW_2_7,
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
86 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63 93
64987fc5
SJ
94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
637a9c63 96static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15
VS
97 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 99
cfcb0fc9
JB
100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
da63a9f2
PZ
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
112}
113
68b4d824 114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 115{
68b4d824
ID
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
119}
120
df0e9248
CW
121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
fa90ecef 123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
124}
125
ea5b213a 126static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
a4fc5ed6 132
ed4e9c1d
VS
133static int
134intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 135{
7183dc29 136 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
137
138 switch (max_link_bw) {
139 case DP_LINK_BW_1_62:
140 case DP_LINK_BW_2_7:
1db10e28 141 case DP_LINK_BW_5_4:
d4eead50 142 break;
a4fc5ed6 143 default:
d4eead50
ID
144 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
145 max_link_bw);
a4fc5ed6
KP
146 max_link_bw = DP_LINK_BW_1_62;
147 break;
148 }
149 return max_link_bw;
150}
151
eeb6324d
PZ
152static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
153{
154 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
155 struct drm_device *dev = intel_dig_port->base.base.dev;
156 u8 source_max, sink_max;
157
158 source_max = 4;
159 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
160 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
161 source_max = 2;
162
163 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
164
165 return min(source_max, sink_max);
166}
167
cd9dde44
AJ
168/*
169 * The units on the numbers in the next two are... bizarre. Examples will
170 * make it clearer; this one parallels an example in the eDP spec.
171 *
172 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
173 *
174 * 270000 * 1 * 8 / 10 == 216000
175 *
176 * The actual data capacity of that configuration is 2.16Gbit/s, so the
177 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
178 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
179 * 119000. At 18bpp that's 2142000 kilobits per second.
180 *
181 * Thus the strange-looking division by 10 in intel_dp_link_required, to
182 * get the result in decakilobits instead of kilobits.
183 */
184
a4fc5ed6 185static int
c898261c 186intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 187{
cd9dde44 188 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
189}
190
fe27d53e
DA
191static int
192intel_dp_max_data_rate(int max_link_clock, int max_lanes)
193{
194 return (max_link_clock * max_lanes * 8) / 10;
195}
196
c19de8eb 197static enum drm_mode_status
a4fc5ed6
KP
198intel_dp_mode_valid(struct drm_connector *connector,
199 struct drm_display_mode *mode)
200{
df0e9248 201 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
202 struct intel_connector *intel_connector = to_intel_connector(connector);
203 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
204 int target_clock = mode->clock;
205 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 206
dd06f90e
JN
207 if (is_edp(intel_dp) && fixed_mode) {
208 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
209 return MODE_PANEL;
210
dd06f90e 211 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 212 return MODE_PANEL;
03afc4a2
DV
213
214 target_clock = fixed_mode->clock;
7de56f43
ZY
215 }
216
50fec21a 217 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 218 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
219
220 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
221 mode_rate = intel_dp_link_required(target_clock, 18);
222
223 if (mode_rate > max_rate)
c4867936 224 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
225
226 if (mode->clock < 10000)
227 return MODE_CLOCK_LOW;
228
0af78a2b
DV
229 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
230 return MODE_H_ILLEGAL;
231
a4fc5ed6
KP
232 return MODE_OK;
233}
234
a4f1289e 235uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
236{
237 int i;
238 uint32_t v = 0;
239
240 if (src_bytes > 4)
241 src_bytes = 4;
242 for (i = 0; i < src_bytes; i++)
243 v |= ((uint32_t) src[i]) << ((3-i) * 8);
244 return v;
245}
246
c2af70e2 247static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
248{
249 int i;
250 if (dst_bytes > 4)
251 dst_bytes = 4;
252 for (i = 0; i < dst_bytes; i++)
253 dst[i] = src >> ((3-i) * 8);
254}
255
fb0f8fbf
KP
256/* hrawclock is 1/4 the FSB frequency */
257static int
258intel_hrawclk(struct drm_device *dev)
259{
260 struct drm_i915_private *dev_priv = dev->dev_private;
261 uint32_t clkcfg;
262
9473c8f4
VP
263 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
264 if (IS_VALLEYVIEW(dev))
265 return 200;
266
fb0f8fbf
KP
267 clkcfg = I915_READ(CLKCFG);
268 switch (clkcfg & CLKCFG_FSB_MASK) {
269 case CLKCFG_FSB_400:
270 return 100;
271 case CLKCFG_FSB_533:
272 return 133;
273 case CLKCFG_FSB_667:
274 return 166;
275 case CLKCFG_FSB_800:
276 return 200;
277 case CLKCFG_FSB_1067:
278 return 266;
279 case CLKCFG_FSB_1333:
280 return 333;
281 /* these two are just a guess; one of them might be right */
282 case CLKCFG_FSB_1600:
283 case CLKCFG_FSB_1600_ALT:
284 return 400;
285 default:
286 return 133;
287 }
288}
289
bf13e81b
JN
290static void
291intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 292 struct intel_dp *intel_dp);
bf13e81b
JN
293static void
294intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 295 struct intel_dp *intel_dp);
bf13e81b 296
773538e8
VS
297static void pps_lock(struct intel_dp *intel_dp)
298{
299 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
300 struct intel_encoder *encoder = &intel_dig_port->base;
301 struct drm_device *dev = encoder->base.dev;
302 struct drm_i915_private *dev_priv = dev->dev_private;
303 enum intel_display_power_domain power_domain;
304
305 /*
306 * See vlv_power_sequencer_reset() why we need
307 * a power domain reference here.
308 */
309 power_domain = intel_display_port_power_domain(encoder);
310 intel_display_power_get(dev_priv, power_domain);
311
312 mutex_lock(&dev_priv->pps_mutex);
313}
314
315static void pps_unlock(struct intel_dp *intel_dp)
316{
317 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
318 struct intel_encoder *encoder = &intel_dig_port->base;
319 struct drm_device *dev = encoder->base.dev;
320 struct drm_i915_private *dev_priv = dev->dev_private;
321 enum intel_display_power_domain power_domain;
322
323 mutex_unlock(&dev_priv->pps_mutex);
324
325 power_domain = intel_display_port_power_domain(encoder);
326 intel_display_power_put(dev_priv, power_domain);
327}
328
961a0db0
VS
329static void
330vlv_power_sequencer_kick(struct intel_dp *intel_dp)
331{
332 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
333 struct drm_device *dev = intel_dig_port->base.base.dev;
334 struct drm_i915_private *dev_priv = dev->dev_private;
335 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 336 bool pll_enabled;
961a0db0
VS
337 uint32_t DP;
338
339 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
340 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
341 pipe_name(pipe), port_name(intel_dig_port->port)))
342 return;
343
344 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
345 pipe_name(pipe), port_name(intel_dig_port->port));
346
347 /* Preserve the BIOS-computed detected bit. This is
348 * supposed to be read-only.
349 */
350 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
351 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
352 DP |= DP_PORT_WIDTH(1);
353 DP |= DP_LINK_TRAIN_PAT_1;
354
355 if (IS_CHERRYVIEW(dev))
356 DP |= DP_PIPE_SELECT_CHV(pipe);
357 else if (pipe == PIPE_B)
358 DP |= DP_PIPEB_SELECT;
359
d288f65f
VS
360 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
361
362 /*
363 * The DPLL for the pipe must be enabled for this to work.
364 * So enable temporarily it if it's not already enabled.
365 */
366 if (!pll_enabled)
367 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
368 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
369
961a0db0
VS
370 /*
371 * Similar magic as in intel_dp_enable_port().
372 * We _must_ do this port enable + disable trick
373 * to make this power seqeuencer lock onto the port.
374 * Otherwise even VDD force bit won't work.
375 */
376 I915_WRITE(intel_dp->output_reg, DP);
377 POSTING_READ(intel_dp->output_reg);
378
379 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
380 POSTING_READ(intel_dp->output_reg);
381
382 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
383 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
384
385 if (!pll_enabled)
386 vlv_force_pll_off(dev, pipe);
961a0db0
VS
387}
388
bf13e81b
JN
389static enum pipe
390vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
391{
392 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
393 struct drm_device *dev = intel_dig_port->base.base.dev;
394 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
395 struct intel_encoder *encoder;
396 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 397 enum pipe pipe;
bf13e81b 398
e39b999a 399 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 400
a8c3344e
VS
401 /* We should never land here with regular DP ports */
402 WARN_ON(!is_edp(intel_dp));
403
a4a5d2f8
VS
404 if (intel_dp->pps_pipe != INVALID_PIPE)
405 return intel_dp->pps_pipe;
406
407 /*
408 * We don't have power sequencer currently.
409 * Pick one that's not used by other ports.
410 */
411 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
412 base.head) {
413 struct intel_dp *tmp;
414
415 if (encoder->type != INTEL_OUTPUT_EDP)
416 continue;
417
418 tmp = enc_to_intel_dp(&encoder->base);
419
420 if (tmp->pps_pipe != INVALID_PIPE)
421 pipes &= ~(1 << tmp->pps_pipe);
422 }
423
424 /*
425 * Didn't find one. This should not happen since there
426 * are two power sequencers and up to two eDP ports.
427 */
428 if (WARN_ON(pipes == 0))
a8c3344e
VS
429 pipe = PIPE_A;
430 else
431 pipe = ffs(pipes) - 1;
a4a5d2f8 432
a8c3344e
VS
433 vlv_steal_power_sequencer(dev, pipe);
434 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
435
436 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
437 pipe_name(intel_dp->pps_pipe),
438 port_name(intel_dig_port->port));
439
440 /* init power sequencer on this pipe and port */
36b5f425
VS
441 intel_dp_init_panel_power_sequencer(dev, intel_dp);
442 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 443
961a0db0
VS
444 /*
445 * Even vdd force doesn't work until we've made
446 * the power sequencer lock in on the port.
447 */
448 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
449
450 return intel_dp->pps_pipe;
451}
452
6491ab27
VS
453typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
454 enum pipe pipe);
455
456static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
457 enum pipe pipe)
458{
459 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
460}
461
462static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
463 enum pipe pipe)
464{
465 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
466}
467
468static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
469 enum pipe pipe)
470{
471 return true;
472}
bf13e81b 473
a4a5d2f8 474static enum pipe
6491ab27
VS
475vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
476 enum port port,
477 vlv_pipe_check pipe_check)
a4a5d2f8
VS
478{
479 enum pipe pipe;
bf13e81b 480
bf13e81b
JN
481 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
482 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
483 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
484
485 if (port_sel != PANEL_PORT_SELECT_VLV(port))
486 continue;
487
6491ab27
VS
488 if (!pipe_check(dev_priv, pipe))
489 continue;
490
a4a5d2f8 491 return pipe;
bf13e81b
JN
492 }
493
a4a5d2f8
VS
494 return INVALID_PIPE;
495}
496
497static void
498vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
499{
500 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
501 struct drm_device *dev = intel_dig_port->base.base.dev;
502 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
503 enum port port = intel_dig_port->port;
504
505 lockdep_assert_held(&dev_priv->pps_mutex);
506
507 /* try to find a pipe with this port selected */
6491ab27
VS
508 /* first pick one where the panel is on */
509 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
510 vlv_pipe_has_pp_on);
511 /* didn't find one? pick one where vdd is on */
512 if (intel_dp->pps_pipe == INVALID_PIPE)
513 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
514 vlv_pipe_has_vdd_on);
515 /* didn't find one? pick one with just the correct port */
516 if (intel_dp->pps_pipe == INVALID_PIPE)
517 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
518 vlv_pipe_any);
a4a5d2f8
VS
519
520 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
521 if (intel_dp->pps_pipe == INVALID_PIPE) {
522 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
523 port_name(port));
524 return;
bf13e81b
JN
525 }
526
a4a5d2f8
VS
527 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
528 port_name(port), pipe_name(intel_dp->pps_pipe));
529
36b5f425
VS
530 intel_dp_init_panel_power_sequencer(dev, intel_dp);
531 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
532}
533
773538e8
VS
534void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
535{
536 struct drm_device *dev = dev_priv->dev;
537 struct intel_encoder *encoder;
538
539 if (WARN_ON(!IS_VALLEYVIEW(dev)))
540 return;
541
542 /*
543 * We can't grab pps_mutex here due to deadlock with power_domain
544 * mutex when power_domain functions are called while holding pps_mutex.
545 * That also means that in order to use pps_pipe the code needs to
546 * hold both a power domain reference and pps_mutex, and the power domain
547 * reference get/put must be done while _not_ holding pps_mutex.
548 * pps_{lock,unlock}() do these steps in the correct order, so one
549 * should use them always.
550 */
551
552 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
553 struct intel_dp *intel_dp;
554
555 if (encoder->type != INTEL_OUTPUT_EDP)
556 continue;
557
558 intel_dp = enc_to_intel_dp(&encoder->base);
559 intel_dp->pps_pipe = INVALID_PIPE;
560 }
bf13e81b
JN
561}
562
563static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
564{
565 struct drm_device *dev = intel_dp_to_dev(intel_dp);
566
b0a08bec
VK
567 if (IS_BROXTON(dev))
568 return BXT_PP_CONTROL(0);
569 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
570 return PCH_PP_CONTROL;
571 else
572 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
573}
574
575static u32 _pp_stat_reg(struct intel_dp *intel_dp)
576{
577 struct drm_device *dev = intel_dp_to_dev(intel_dp);
578
b0a08bec
VK
579 if (IS_BROXTON(dev))
580 return BXT_PP_STATUS(0);
581 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
582 return PCH_PP_STATUS;
583 else
584 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
585}
586
01527b31
CT
587/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
588 This function only applicable when panel PM state is not to be tracked */
589static int edp_notify_handler(struct notifier_block *this, unsigned long code,
590 void *unused)
591{
592 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
593 edp_notifier);
594 struct drm_device *dev = intel_dp_to_dev(intel_dp);
595 struct drm_i915_private *dev_priv = dev->dev_private;
596 u32 pp_div;
597 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
598
599 if (!is_edp(intel_dp) || code != SYS_RESTART)
600 return 0;
601
773538e8 602 pps_lock(intel_dp);
e39b999a 603
01527b31 604 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
605 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
606
01527b31
CT
607 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
608 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
609 pp_div = I915_READ(pp_div_reg);
610 pp_div &= PP_REFERENCE_DIVIDER_MASK;
611
612 /* 0x1F write to PP_DIV_REG sets max cycle delay */
613 I915_WRITE(pp_div_reg, pp_div | 0x1F);
614 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
615 msleep(intel_dp->panel_power_cycle_delay);
616 }
617
773538e8 618 pps_unlock(intel_dp);
e39b999a 619
01527b31
CT
620 return 0;
621}
622
4be73780 623static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 624{
30add22d 625 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
626 struct drm_i915_private *dev_priv = dev->dev_private;
627
e39b999a
VS
628 lockdep_assert_held(&dev_priv->pps_mutex);
629
9a42356b
VS
630 if (IS_VALLEYVIEW(dev) &&
631 intel_dp->pps_pipe == INVALID_PIPE)
632 return false;
633
bf13e81b 634 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
635}
636
4be73780 637static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 638{
30add22d 639 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
640 struct drm_i915_private *dev_priv = dev->dev_private;
641
e39b999a
VS
642 lockdep_assert_held(&dev_priv->pps_mutex);
643
9a42356b
VS
644 if (IS_VALLEYVIEW(dev) &&
645 intel_dp->pps_pipe == INVALID_PIPE)
646 return false;
647
773538e8 648 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
649}
650
9b984dae
KP
651static void
652intel_dp_check_edp(struct intel_dp *intel_dp)
653{
30add22d 654 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 655 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 656
9b984dae
KP
657 if (!is_edp(intel_dp))
658 return;
453c5420 659
4be73780 660 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
661 WARN(1, "eDP powered off while attempting aux channel communication.\n");
662 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
663 I915_READ(_pp_stat_reg(intel_dp)),
664 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
665 }
666}
667
9ee32fea
DV
668static uint32_t
669intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
670{
671 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
672 struct drm_device *dev = intel_dig_port->base.base.dev;
673 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 674 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
675 uint32_t status;
676 bool done;
677
ef04f00d 678#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 679 if (has_aux_irq)
b18ac466 680 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 681 msecs_to_jiffies_timeout(10));
9ee32fea
DV
682 else
683 done = wait_for_atomic(C, 10) == 0;
684 if (!done)
685 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
686 has_aux_irq);
687#undef C
688
689 return status;
690}
691
ec5b01dd 692static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 693{
174edf1f
PZ
694 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
695 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 696
ec5b01dd
DL
697 /*
698 * The clock divider is based off the hrawclk, and would like to run at
699 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 700 */
ec5b01dd
DL
701 return index ? 0 : intel_hrawclk(dev) / 2;
702}
703
704static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
705{
706 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
707 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 708 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
709
710 if (index)
711 return 0;
712
713 if (intel_dig_port->port == PORT_A) {
05024da3
VS
714 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
715
ec5b01dd
DL
716 } else {
717 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
718 }
719}
720
721static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
722{
723 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
724 struct drm_device *dev = intel_dig_port->base.base.dev;
725 struct drm_i915_private *dev_priv = dev->dev_private;
726
727 if (intel_dig_port->port == PORT_A) {
728 if (index)
729 return 0;
05024da3 730 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
2c55c336
JN
731 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
732 /* Workaround for non-ULT HSW */
bc86625a
CW
733 switch (index) {
734 case 0: return 63;
735 case 1: return 72;
736 default: return 0;
737 }
ec5b01dd 738 } else {
bc86625a 739 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 740 }
b84a1cf8
RV
741}
742
ec5b01dd
DL
743static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
744{
745 return index ? 0 : 100;
746}
747
b6b5e383
DL
748static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
749{
750 /*
751 * SKL doesn't need us to program the AUX clock divider (Hardware will
752 * derive the clock from CDCLK automatically). We still implement the
753 * get_aux_clock_divider vfunc to plug-in into the existing code.
754 */
755 return index ? 0 : 1;
756}
757
5ed12a19
DL
758static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
759 bool has_aux_irq,
760 int send_bytes,
761 uint32_t aux_clock_divider)
762{
763 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
764 struct drm_device *dev = intel_dig_port->base.base.dev;
765 uint32_t precharge, timeout;
766
767 if (IS_GEN6(dev))
768 precharge = 3;
769 else
770 precharge = 5;
771
772 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
773 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
774 else
775 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
776
777 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 778 DP_AUX_CH_CTL_DONE |
5ed12a19 779 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 780 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 781 timeout |
788d4433 782 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
783 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
784 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 785 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
786}
787
b9ca5fad
DL
788static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
789 bool has_aux_irq,
790 int send_bytes,
791 uint32_t unused)
792{
793 return DP_AUX_CH_CTL_SEND_BUSY |
794 DP_AUX_CH_CTL_DONE |
795 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
796 DP_AUX_CH_CTL_TIME_OUT_ERROR |
797 DP_AUX_CH_CTL_TIME_OUT_1600us |
798 DP_AUX_CH_CTL_RECEIVE_ERROR |
799 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
800 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
801}
802
b84a1cf8
RV
803static int
804intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 805 const uint8_t *send, int send_bytes,
b84a1cf8
RV
806 uint8_t *recv, int recv_size)
807{
808 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
809 struct drm_device *dev = intel_dig_port->base.base.dev;
810 struct drm_i915_private *dev_priv = dev->dev_private;
811 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
812 uint32_t ch_data = ch_ctl + 4;
bc86625a 813 uint32_t aux_clock_divider;
b84a1cf8
RV
814 int i, ret, recv_bytes;
815 uint32_t status;
5ed12a19 816 int try, clock = 0;
4e6b788c 817 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
818 bool vdd;
819
773538e8 820 pps_lock(intel_dp);
e39b999a 821
72c3500a
VS
822 /*
823 * We will be called with VDD already enabled for dpcd/edid/oui reads.
824 * In such cases we want to leave VDD enabled and it's up to upper layers
825 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
826 * ourselves.
827 */
1e0560e0 828 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
829
830 /* dp aux is extremely sensitive to irq latency, hence request the
831 * lowest possible wakeup latency and so prevent the cpu from going into
832 * deep sleep states.
833 */
834 pm_qos_update_request(&dev_priv->pm_qos, 0);
835
836 intel_dp_check_edp(intel_dp);
5eb08b69 837
c67a470b
PZ
838 intel_aux_display_runtime_get(dev_priv);
839
11bee43e
JB
840 /* Try to wait for any previous AUX channel activity */
841 for (try = 0; try < 3; try++) {
ef04f00d 842 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
843 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
844 break;
845 msleep(1);
846 }
847
848 if (try == 3) {
02196c77
MK
849 static u32 last_status = -1;
850 const u32 status = I915_READ(ch_ctl);
851
852 if (status != last_status) {
853 WARN(1, "dp_aux_ch not started status 0x%08x\n",
854 status);
855 last_status = status;
856 }
857
9ee32fea
DV
858 ret = -EBUSY;
859 goto out;
4f7f7b7e
CW
860 }
861
46a5ae9f
PZ
862 /* Only 5 data registers! */
863 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
864 ret = -E2BIG;
865 goto out;
866 }
867
ec5b01dd 868 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
869 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
870 has_aux_irq,
871 send_bytes,
872 aux_clock_divider);
5ed12a19 873
bc86625a
CW
874 /* Must try at least 3 times according to DP spec */
875 for (try = 0; try < 5; try++) {
876 /* Load the send data into the aux channel data registers */
877 for (i = 0; i < send_bytes; i += 4)
878 I915_WRITE(ch_data + i,
a4f1289e
RV
879 intel_dp_pack_aux(send + i,
880 send_bytes - i));
bc86625a
CW
881
882 /* Send the command and wait for it to complete */
5ed12a19 883 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
884
885 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
886
887 /* Clear done status and any errors */
888 I915_WRITE(ch_ctl,
889 status |
890 DP_AUX_CH_CTL_DONE |
891 DP_AUX_CH_CTL_TIME_OUT_ERROR |
892 DP_AUX_CH_CTL_RECEIVE_ERROR);
893
74ebf294 894 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 895 continue;
74ebf294
TP
896
897 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
898 * 400us delay required for errors and timeouts
899 * Timeout errors from the HW already meet this
900 * requirement so skip to next iteration
901 */
902 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
903 usleep_range(400, 500);
bc86625a 904 continue;
74ebf294 905 }
bc86625a 906 if (status & DP_AUX_CH_CTL_DONE)
e058c945 907 goto done;
bc86625a 908 }
a4fc5ed6
KP
909 }
910
a4fc5ed6 911 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 912 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
913 ret = -EBUSY;
914 goto out;
a4fc5ed6
KP
915 }
916
e058c945 917done:
a4fc5ed6
KP
918 /* Check for timeout or receive error.
919 * Timeouts occur when the sink is not connected
920 */
a5b3da54 921 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 922 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
923 ret = -EIO;
924 goto out;
a5b3da54 925 }
1ae8c0a5
KP
926
927 /* Timeouts occur when the device isn't connected, so they're
928 * "normal" -- don't fill the kernel log with these */
a5b3da54 929 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 930 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
931 ret = -ETIMEDOUT;
932 goto out;
a4fc5ed6
KP
933 }
934
935 /* Unload any bytes sent back from the other side */
936 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
937 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
938 if (recv_bytes > recv_size)
939 recv_bytes = recv_size;
0206e353 940
4f7f7b7e 941 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
942 intel_dp_unpack_aux(I915_READ(ch_data + i),
943 recv + i, recv_bytes - i);
a4fc5ed6 944
9ee32fea
DV
945 ret = recv_bytes;
946out:
947 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 948 intel_aux_display_runtime_put(dev_priv);
9ee32fea 949
884f19e9
JN
950 if (vdd)
951 edp_panel_vdd_off(intel_dp, false);
952
773538e8 953 pps_unlock(intel_dp);
e39b999a 954
9ee32fea 955 return ret;
a4fc5ed6
KP
956}
957
a6c8aff0
JN
958#define BARE_ADDRESS_SIZE 3
959#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
960static ssize_t
961intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 962{
9d1a1031
JN
963 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
964 uint8_t txbuf[20], rxbuf[20];
965 size_t txsize, rxsize;
a4fc5ed6 966 int ret;
a4fc5ed6 967
d2d9cbbd
VS
968 txbuf[0] = (msg->request << 4) |
969 ((msg->address >> 16) & 0xf);
970 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
971 txbuf[2] = msg->address & 0xff;
972 txbuf[3] = msg->size - 1;
46a5ae9f 973
9d1a1031
JN
974 switch (msg->request & ~DP_AUX_I2C_MOT) {
975 case DP_AUX_NATIVE_WRITE:
976 case DP_AUX_I2C_WRITE:
a6c8aff0 977 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 978 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 979
9d1a1031
JN
980 if (WARN_ON(txsize > 20))
981 return -E2BIG;
a4fc5ed6 982
9d1a1031 983 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 984
9d1a1031
JN
985 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
986 if (ret > 0) {
987 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 988
a1ddefd8
JN
989 if (ret > 1) {
990 /* Number of bytes written in a short write. */
991 ret = clamp_t(int, rxbuf[1], 0, msg->size);
992 } else {
993 /* Return payload size. */
994 ret = msg->size;
995 }
9d1a1031
JN
996 }
997 break;
46a5ae9f 998
9d1a1031
JN
999 case DP_AUX_NATIVE_READ:
1000 case DP_AUX_I2C_READ:
a6c8aff0 1001 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 1002 rxsize = msg->size + 1;
a4fc5ed6 1003
9d1a1031
JN
1004 if (WARN_ON(rxsize > 20))
1005 return -E2BIG;
a4fc5ed6 1006
9d1a1031
JN
1007 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1008 if (ret > 0) {
1009 msg->reply = rxbuf[0] >> 4;
1010 /*
1011 * Assume happy day, and copy the data. The caller is
1012 * expected to check msg->reply before touching it.
1013 *
1014 * Return payload size.
1015 */
1016 ret--;
1017 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1018 }
9d1a1031
JN
1019 break;
1020
1021 default:
1022 ret = -EINVAL;
1023 break;
a4fc5ed6 1024 }
f51a44b9 1025
9d1a1031 1026 return ret;
a4fc5ed6
KP
1027}
1028
9d1a1031
JN
1029static void
1030intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1031{
1032 struct drm_device *dev = intel_dp_to_dev(intel_dp);
500ea70d 1033 struct drm_i915_private *dev_priv = dev->dev_private;
33ad6626
JN
1034 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1035 enum port port = intel_dig_port->port;
500ea70d 1036 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
0b99836f 1037 const char *name = NULL;
500ea70d 1038 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
ab2c0672
DA
1039 int ret;
1040
500ea70d
RV
1041 /* On SKL we don't have Aux for port E so we rely on VBT to set
1042 * a proper alternate aux channel.
1043 */
1044 if (IS_SKYLAKE(dev) && port == PORT_E) {
1045 switch (info->alternate_aux_channel) {
1046 case DP_AUX_B:
1047 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1048 break;
1049 case DP_AUX_C:
1050 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1051 break;
1052 case DP_AUX_D:
1053 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1054 break;
1055 case DP_AUX_A:
1056 default:
1057 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1058 }
1059 }
1060
33ad6626
JN
1061 switch (port) {
1062 case PORT_A:
1063 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1064 name = "DPDDC-A";
ab2c0672 1065 break;
33ad6626
JN
1066 case PORT_B:
1067 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1068 name = "DPDDC-B";
ab2c0672 1069 break;
33ad6626
JN
1070 case PORT_C:
1071 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1072 name = "DPDDC-C";
ab2c0672 1073 break;
33ad6626
JN
1074 case PORT_D:
1075 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1076 name = "DPDDC-D";
33ad6626 1077 break;
500ea70d
RV
1078 case PORT_E:
1079 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1080 name = "DPDDC-E";
1081 break;
33ad6626
JN
1082 default:
1083 BUG();
ab2c0672
DA
1084 }
1085
1b1aad75
DL
1086 /*
1087 * The AUX_CTL register is usually DP_CTL + 0x10.
1088 *
1089 * On Haswell and Broadwell though:
1090 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1091 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1092 *
1093 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1094 */
500ea70d 1095 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
33ad6626 1096 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1097
0b99836f 1098 intel_dp->aux.name = name;
9d1a1031
JN
1099 intel_dp->aux.dev = dev->dev;
1100 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1101
0b99836f
JN
1102 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1103 connector->base.kdev->kobj.name);
8316f337 1104
4f71d0cb 1105 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1106 if (ret < 0) {
4f71d0cb 1107 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1108 name, ret);
1109 return;
ab2c0672 1110 }
8a5e6aeb 1111
0b99836f
JN
1112 ret = sysfs_create_link(&connector->base.kdev->kobj,
1113 &intel_dp->aux.ddc.dev.kobj,
1114 intel_dp->aux.ddc.dev.kobj.name);
1115 if (ret < 0) {
1116 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1117 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1118 }
a4fc5ed6
KP
1119}
1120
80f65de3
ID
1121static void
1122intel_dp_connector_unregister(struct intel_connector *intel_connector)
1123{
1124 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1125
0e32b39c
DA
1126 if (!intel_connector->mst_port)
1127 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1128 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1129 intel_connector_unregister(intel_connector);
1130}
1131
5416d871 1132static void
c3346ef6 1133skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1134{
1135 u32 ctrl1;
1136
dd3cd74a
ACO
1137 memset(&pipe_config->dpll_hw_state, 0,
1138 sizeof(pipe_config->dpll_hw_state));
1139
5416d871
DL
1140 pipe_config->ddi_pll_sel = SKL_DPLL0;
1141 pipe_config->dpll_hw_state.cfgcr1 = 0;
1142 pipe_config->dpll_hw_state.cfgcr2 = 0;
1143
1144 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1145 switch (link_clock / 2) {
1146 case 81000:
71cd8423 1147 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1148 SKL_DPLL0);
1149 break;
c3346ef6 1150 case 135000:
71cd8423 1151 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1152 SKL_DPLL0);
1153 break;
c3346ef6 1154 case 270000:
71cd8423 1155 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1156 SKL_DPLL0);
1157 break;
c3346ef6 1158 case 162000:
71cd8423 1159 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1160 SKL_DPLL0);
1161 break;
1162 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1163 results in CDCLK change. Need to handle the change of CDCLK by
1164 disabling pipes and re-enabling them */
1165 case 108000:
71cd8423 1166 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1167 SKL_DPLL0);
1168 break;
1169 case 216000:
71cd8423 1170 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1171 SKL_DPLL0);
1172 break;
1173
5416d871
DL
1174 }
1175 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1176}
1177
0e50338c 1178static void
5cec258b 1179hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c 1180{
ee46f3c7
ACO
1181 memset(&pipe_config->dpll_hw_state, 0,
1182 sizeof(pipe_config->dpll_hw_state));
1183
0e50338c
DV
1184 switch (link_bw) {
1185 case DP_LINK_BW_1_62:
1186 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1187 break;
1188 case DP_LINK_BW_2_7:
1189 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1190 break;
1191 case DP_LINK_BW_5_4:
1192 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1193 break;
1194 }
1195}
1196
fc0f8e25 1197static int
12f6a2e2 1198intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1199{
94ca719e
VS
1200 if (intel_dp->num_sink_rates) {
1201 *sink_rates = intel_dp->sink_rates;
1202 return intel_dp->num_sink_rates;
fc0f8e25 1203 }
12f6a2e2
VS
1204
1205 *sink_rates = default_rates;
1206
1207 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1208}
1209
ed63baaf
TS
1210static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1211{
1212 /* WaDisableHBR2:skl */
1213 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1214 return false;
1215
1216 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1217 (INTEL_INFO(dev)->gen >= 9))
1218 return true;
1219 else
1220 return false;
1221}
1222
a8f3ef61 1223static int
1db10e28 1224intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1225{
af7080f5
TS
1226 int size;
1227
64987fc5
SJ
1228 if (IS_BROXTON(dev)) {
1229 *source_rates = bxt_rates;
af7080f5 1230 size = ARRAY_SIZE(bxt_rates);
64987fc5 1231 } else if (IS_SKYLAKE(dev)) {
637a9c63 1232 *source_rates = skl_rates;
af7080f5
TS
1233 size = ARRAY_SIZE(skl_rates);
1234 } else {
1235 *source_rates = default_rates;
1236 size = ARRAY_SIZE(default_rates);
a8f3ef61 1237 }
636280ba 1238
ed63baaf 1239 /* This depends on the fact that 5.4 is last value in the array */
af7080f5
TS
1240 if (!intel_dp_source_supports_hbr2(dev))
1241 size--;
1242
1243 return size;
a8f3ef61
SJ
1244}
1245
c6bb3538
DV
1246static void
1247intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1248 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1249{
1250 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1251 const struct dp_link_dpll *divisor = NULL;
1252 int i, count = 0;
c6bb3538
DV
1253
1254 if (IS_G4X(dev)) {
9dd4ffdf
CML
1255 divisor = gen4_dpll;
1256 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1257 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1258 divisor = pch_dpll;
1259 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1260 } else if (IS_CHERRYVIEW(dev)) {
1261 divisor = chv_dpll;
1262 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1263 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1264 divisor = vlv_dpll;
1265 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1266 }
9dd4ffdf
CML
1267
1268 if (divisor && count) {
1269 for (i = 0; i < count; i++) {
1270 if (link_bw == divisor[i].link_bw) {
1271 pipe_config->dpll = divisor[i].dpll;
1272 pipe_config->clock_set = true;
1273 break;
1274 }
1275 }
c6bb3538
DV
1276 }
1277}
1278
2ecae76a
VS
1279static int intersect_rates(const int *source_rates, int source_len,
1280 const int *sink_rates, int sink_len,
94ca719e 1281 int *common_rates)
a8f3ef61
SJ
1282{
1283 int i = 0, j = 0, k = 0;
1284
a8f3ef61
SJ
1285 while (i < source_len && j < sink_len) {
1286 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1287 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1288 return k;
94ca719e 1289 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1290 ++k;
1291 ++i;
1292 ++j;
1293 } else if (source_rates[i] < sink_rates[j]) {
1294 ++i;
1295 } else {
1296 ++j;
1297 }
1298 }
1299 return k;
1300}
1301
94ca719e
VS
1302static int intel_dp_common_rates(struct intel_dp *intel_dp,
1303 int *common_rates)
2ecae76a
VS
1304{
1305 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1306 const int *source_rates, *sink_rates;
1307 int source_len, sink_len;
1308
1309 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1310 source_len = intel_dp_source_rates(dev, &source_rates);
1311
1312 return intersect_rates(source_rates, source_len,
1313 sink_rates, sink_len,
94ca719e 1314 common_rates);
2ecae76a
VS
1315}
1316
0336400e
VS
1317static void snprintf_int_array(char *str, size_t len,
1318 const int *array, int nelem)
1319{
1320 int i;
1321
1322 str[0] = '\0';
1323
1324 for (i = 0; i < nelem; i++) {
b2f505be 1325 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1326 if (r >= len)
1327 return;
1328 str += r;
1329 len -= r;
1330 }
1331}
1332
1333static void intel_dp_print_rates(struct intel_dp *intel_dp)
1334{
1335 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1336 const int *source_rates, *sink_rates;
94ca719e
VS
1337 int source_len, sink_len, common_len;
1338 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1339 char str[128]; /* FIXME: too big for stack? */
1340
1341 if ((drm_debug & DRM_UT_KMS) == 0)
1342 return;
1343
1344 source_len = intel_dp_source_rates(dev, &source_rates);
1345 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1346 DRM_DEBUG_KMS("source rates: %s\n", str);
1347
1348 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1349 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1350 DRM_DEBUG_KMS("sink rates: %s\n", str);
1351
94ca719e
VS
1352 common_len = intel_dp_common_rates(intel_dp, common_rates);
1353 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1354 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1355}
1356
f4896f15 1357static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1358{
1359 int i = 0;
1360
1361 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1362 if (find == rates[i])
1363 break;
1364
1365 return i;
1366}
1367
50fec21a
VS
1368int
1369intel_dp_max_link_rate(struct intel_dp *intel_dp)
1370{
1371 int rates[DP_MAX_SUPPORTED_RATES] = {};
1372 int len;
1373
94ca719e 1374 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1375 if (WARN_ON(len <= 0))
1376 return 162000;
1377
1378 return rates[rate_to_index(0, rates) - 1];
1379}
1380
ed4e9c1d
VS
1381int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1382{
94ca719e 1383 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1384}
1385
00c09d70 1386bool
5bfe2ac0 1387intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1388 struct intel_crtc_state *pipe_config)
a4fc5ed6 1389{
5bfe2ac0 1390 struct drm_device *dev = encoder->base.dev;
36008365 1391 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1392 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1393 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1394 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1395 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1396 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1397 int lane_count, clock;
56071a20 1398 int min_lane_count = 1;
eeb6324d 1399 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1400 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1401 int min_clock = 0;
a8f3ef61 1402 int max_clock;
083f9560 1403 int bpp, mode_rate;
ff9a6750 1404 int link_avail, link_clock;
94ca719e
VS
1405 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1406 int common_len;
a8f3ef61 1407
94ca719e 1408 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1409
1410 /* No common link rates between source and sink */
94ca719e 1411 WARN_ON(common_len <= 0);
a8f3ef61 1412
94ca719e 1413 max_clock = common_len - 1;
a4fc5ed6 1414
bc7d38a4 1415 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1416 pipe_config->has_pch_encoder = true;
1417
03afc4a2 1418 pipe_config->has_dp_encoder = true;
f769cd24 1419 pipe_config->has_drrs = false;
9fcb1704 1420 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1421
dd06f90e
JN
1422 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1423 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1424 adjusted_mode);
a1b2278e
CK
1425
1426 if (INTEL_INFO(dev)->gen >= 9) {
1427 int ret;
e435d6e5 1428 ret = skl_update_scaler_crtc(pipe_config);
a1b2278e
CK
1429 if (ret)
1430 return ret;
1431 }
1432
2dd24552
JB
1433 if (!HAS_PCH_SPLIT(dev))
1434 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1435 intel_connector->panel.fitting_mode);
1436 else
b074cec8
JB
1437 intel_pch_panel_fitting(intel_crtc, pipe_config,
1438 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1439 }
1440
cb1793ce 1441 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1442 return false;
1443
083f9560 1444 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1445 "max bw %d pixel clock %iKHz\n",
94ca719e 1446 max_lane_count, common_rates[max_clock],
241bfc38 1447 adjusted_mode->crtc_clock);
083f9560 1448
36008365
DV
1449 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1450 * bpc in between. */
3e7ca985 1451 bpp = pipe_config->pipe_bpp;
56071a20 1452 if (is_edp(intel_dp)) {
22ce5628
TS
1453
1454 /* Get bpp from vbt only for panels that dont have bpp in edid */
1455 if (intel_connector->base.display_info.bpc == 0 &&
1456 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
56071a20
JN
1457 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1458 dev_priv->vbt.edp_bpp);
1459 bpp = dev_priv->vbt.edp_bpp;
1460 }
1461
344c5bbc
JN
1462 /*
1463 * Use the maximum clock and number of lanes the eDP panel
1464 * advertizes being capable of. The panels are generally
1465 * designed to support only a single clock and lane
1466 * configuration, and typically these values correspond to the
1467 * native resolution of the panel.
1468 */
1469 min_lane_count = max_lane_count;
1470 min_clock = max_clock;
7984211e 1471 }
657445fe 1472
36008365 1473 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1474 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1475 bpp);
36008365 1476
c6930992 1477 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1478 for (lane_count = min_lane_count;
1479 lane_count <= max_lane_count;
1480 lane_count <<= 1) {
1481
94ca719e 1482 link_clock = common_rates[clock];
36008365
DV
1483 link_avail = intel_dp_max_data_rate(link_clock,
1484 lane_count);
1485
1486 if (mode_rate <= link_avail) {
1487 goto found;
1488 }
1489 }
1490 }
1491 }
c4867936 1492
36008365 1493 return false;
3685a8f3 1494
36008365 1495found:
55bc60db
VS
1496 if (intel_dp->color_range_auto) {
1497 /*
1498 * See:
1499 * CEA-861-E - 5.1 Default Encoding Parameters
1500 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1501 */
18316c8c 1502 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1503 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1504 else
1505 intel_dp->color_range = 0;
1506 }
1507
3685a8f3 1508 if (intel_dp->color_range)
50f3b016 1509 pipe_config->limited_color_range = true;
a4fc5ed6 1510
36008365 1511 intel_dp->lane_count = lane_count;
a8f3ef61 1512
94ca719e 1513 if (intel_dp->num_sink_rates) {
bc27b7d3 1514 intel_dp->link_bw = 0;
a8f3ef61 1515 intel_dp->rate_select =
94ca719e 1516 intel_dp_rate_select(intel_dp, common_rates[clock]);
bc27b7d3
VS
1517 } else {
1518 intel_dp->link_bw =
94ca719e 1519 drm_dp_link_rate_to_bw_code(common_rates[clock]);
bc27b7d3 1520 intel_dp->rate_select = 0;
a8f3ef61
SJ
1521 }
1522
657445fe 1523 pipe_config->pipe_bpp = bpp;
94ca719e 1524 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1525
36008365
DV
1526 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1527 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1528 pipe_config->port_clock, bpp);
36008365
DV
1529 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1530 mode_rate, link_avail);
a4fc5ed6 1531
03afc4a2 1532 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1533 adjusted_mode->crtc_clock,
1534 pipe_config->port_clock,
03afc4a2 1535 &pipe_config->dp_m_n);
9d1a455b 1536
439d7ac0 1537 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1538 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1539 pipe_config->has_drrs = true;
439d7ac0
PB
1540 intel_link_compute_m_n(bpp, lane_count,
1541 intel_connector->panel.downclock_mode->clock,
1542 pipe_config->port_clock,
1543 &pipe_config->dp_m2_n2);
1544 }
1545
5416d871 1546 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
94ca719e 1547 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
977bb38d
S
1548 else if (IS_BROXTON(dev))
1549 /* handled in ddi */;
5416d871 1550 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1551 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1552 else
1553 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1554
03afc4a2 1555 return true;
a4fc5ed6
KP
1556}
1557
7c62a164 1558static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1559{
7c62a164
DV
1560 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1561 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1562 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1563 struct drm_i915_private *dev_priv = dev->dev_private;
1564 u32 dpa_ctl;
1565
6e3c9717
ACO
1566 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1567 crtc->config->port_clock);
ea9b6006
DV
1568 dpa_ctl = I915_READ(DP_A);
1569 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1570
6e3c9717 1571 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1572 /* For a long time we've carried around a ILK-DevA w/a for the
1573 * 160MHz clock. If we're really unlucky, it's still required.
1574 */
1575 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1576 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1577 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1578 } else {
1579 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1580 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1581 }
1ce17038 1582
ea9b6006
DV
1583 I915_WRITE(DP_A, dpa_ctl);
1584
1585 POSTING_READ(DP_A);
1586 udelay(500);
1587}
1588
8ac33ed3 1589static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1590{
b934223d 1591 struct drm_device *dev = encoder->base.dev;
417e822d 1592 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1593 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1594 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1595 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1596 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1597
417e822d 1598 /*
1a2eb460 1599 * There are four kinds of DP registers:
417e822d
KP
1600 *
1601 * IBX PCH
1a2eb460
KP
1602 * SNB CPU
1603 * IVB CPU
417e822d
KP
1604 * CPT PCH
1605 *
1606 * IBX PCH and CPU are the same for almost everything,
1607 * except that the CPU DP PLL is configured in this
1608 * register
1609 *
1610 * CPT PCH is quite different, having many bits moved
1611 * to the TRANS_DP_CTL register instead. That
1612 * configuration happens (oddly) in ironlake_pch_enable
1613 */
9c9e7927 1614
417e822d
KP
1615 /* Preserve the BIOS-computed detected bit. This is
1616 * supposed to be read-only.
1617 */
1618 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1619
417e822d 1620 /* Handle DP bits in common between all three register formats */
417e822d 1621 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1622 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1623
6e3c9717 1624 if (crtc->config->has_audio)
ea5b213a 1625 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1626
417e822d 1627 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1628
39e5fa88 1629 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1630 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1631 intel_dp->DP |= DP_SYNC_HS_HIGH;
1632 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1633 intel_dp->DP |= DP_SYNC_VS_HIGH;
1634 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1635
6aba5b6c 1636 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1637 intel_dp->DP |= DP_ENHANCED_FRAMING;
1638
7c62a164 1639 intel_dp->DP |= crtc->pipe << 29;
39e5fa88 1640 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
e3ef4479
VS
1641 u32 trans_dp;
1642
39e5fa88 1643 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
1644
1645 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1646 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1647 trans_dp |= TRANS_DP_ENH_FRAMING;
1648 else
1649 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1650 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 1651 } else {
b2634017 1652 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1653 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1654
1655 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1656 intel_dp->DP |= DP_SYNC_HS_HIGH;
1657 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1658 intel_dp->DP |= DP_SYNC_VS_HIGH;
1659 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1660
6aba5b6c 1661 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1662 intel_dp->DP |= DP_ENHANCED_FRAMING;
1663
39e5fa88 1664 if (IS_CHERRYVIEW(dev))
44f37d1f 1665 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1666 else if (crtc->pipe == PIPE_B)
1667 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1668 }
a4fc5ed6
KP
1669}
1670
ffd6749d
PZ
1671#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1672#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1673
1a5ef5b7
PZ
1674#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1675#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1676
ffd6749d
PZ
1677#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1678#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1679
4be73780 1680static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1681 u32 mask,
1682 u32 value)
bd943159 1683{
30add22d 1684 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1685 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1686 u32 pp_stat_reg, pp_ctrl_reg;
1687
e39b999a
VS
1688 lockdep_assert_held(&dev_priv->pps_mutex);
1689
bf13e81b
JN
1690 pp_stat_reg = _pp_stat_reg(intel_dp);
1691 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1692
99ea7127 1693 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1694 mask, value,
1695 I915_READ(pp_stat_reg),
1696 I915_READ(pp_ctrl_reg));
32ce697c 1697
453c5420 1698 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1699 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1700 I915_READ(pp_stat_reg),
1701 I915_READ(pp_ctrl_reg));
32ce697c 1702 }
54c136d4
CW
1703
1704 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1705}
32ce697c 1706
4be73780 1707static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1708{
1709 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1710 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1711}
1712
4be73780 1713static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1714{
1715 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1716 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1717}
1718
4be73780 1719static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1720{
1721 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1722
1723 /* When we disable the VDD override bit last we have to do the manual
1724 * wait. */
1725 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1726 intel_dp->panel_power_cycle_delay);
1727
4be73780 1728 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1729}
1730
4be73780 1731static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1732{
1733 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1734 intel_dp->backlight_on_delay);
1735}
1736
4be73780 1737static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1738{
1739 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1740 intel_dp->backlight_off_delay);
1741}
99ea7127 1742
832dd3c1
KP
1743/* Read the current pp_control value, unlocking the register if it
1744 * is locked
1745 */
1746
453c5420 1747static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1748{
453c5420
JB
1749 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1750 struct drm_i915_private *dev_priv = dev->dev_private;
1751 u32 control;
832dd3c1 1752
e39b999a
VS
1753 lockdep_assert_held(&dev_priv->pps_mutex);
1754
bf13e81b 1755 control = I915_READ(_pp_ctrl_reg(intel_dp));
b0a08bec
VK
1756 if (!IS_BROXTON(dev)) {
1757 control &= ~PANEL_UNLOCK_MASK;
1758 control |= PANEL_UNLOCK_REGS;
1759 }
832dd3c1 1760 return control;
bd943159
KP
1761}
1762
951468f3
VS
1763/*
1764 * Must be paired with edp_panel_vdd_off().
1765 * Must hold pps_mutex around the whole on/off sequence.
1766 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1767 */
1e0560e0 1768static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1769{
30add22d 1770 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1771 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1772 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1773 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1774 enum intel_display_power_domain power_domain;
5d613501 1775 u32 pp;
453c5420 1776 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1777 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1778
e39b999a
VS
1779 lockdep_assert_held(&dev_priv->pps_mutex);
1780
97af61f5 1781 if (!is_edp(intel_dp))
adddaaf4 1782 return false;
bd943159 1783
2c623c11 1784 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1785 intel_dp->want_panel_vdd = true;
99ea7127 1786
4be73780 1787 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1788 return need_to_disable;
b0665d57 1789
4e6e1a54
ID
1790 power_domain = intel_display_port_power_domain(intel_encoder);
1791 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1792
3936fcf4
VS
1793 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1794 port_name(intel_dig_port->port));
bd943159 1795
4be73780
DV
1796 if (!edp_have_panel_power(intel_dp))
1797 wait_panel_power_cycle(intel_dp);
99ea7127 1798
453c5420 1799 pp = ironlake_get_pp_control(intel_dp);
5d613501 1800 pp |= EDP_FORCE_VDD;
ebf33b18 1801
bf13e81b
JN
1802 pp_stat_reg = _pp_stat_reg(intel_dp);
1803 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1804
1805 I915_WRITE(pp_ctrl_reg, pp);
1806 POSTING_READ(pp_ctrl_reg);
1807 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1808 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1809 /*
1810 * If the panel wasn't on, delay before accessing aux channel
1811 */
4be73780 1812 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1813 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1814 port_name(intel_dig_port->port));
f01eca2e 1815 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1816 }
adddaaf4
JN
1817
1818 return need_to_disable;
1819}
1820
951468f3
VS
1821/*
1822 * Must be paired with intel_edp_panel_vdd_off() or
1823 * intel_edp_panel_off().
1824 * Nested calls to these functions are not allowed since
1825 * we drop the lock. Caller must use some higher level
1826 * locking to prevent nested calls from other threads.
1827 */
b80d6c78 1828void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1829{
c695b6b6 1830 bool vdd;
adddaaf4 1831
c695b6b6
VS
1832 if (!is_edp(intel_dp))
1833 return;
1834
773538e8 1835 pps_lock(intel_dp);
c695b6b6 1836 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1837 pps_unlock(intel_dp);
c695b6b6 1838
e2c719b7 1839 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1840 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1841}
1842
4be73780 1843static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1844{
30add22d 1845 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1846 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1847 struct intel_digital_port *intel_dig_port =
1848 dp_to_dig_port(intel_dp);
1849 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1850 enum intel_display_power_domain power_domain;
5d613501 1851 u32 pp;
453c5420 1852 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1853
e39b999a 1854 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1855
15e899a0 1856 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1857
15e899a0 1858 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1859 return;
b0665d57 1860
3936fcf4
VS
1861 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1862 port_name(intel_dig_port->port));
bd943159 1863
be2c9196
VS
1864 pp = ironlake_get_pp_control(intel_dp);
1865 pp &= ~EDP_FORCE_VDD;
453c5420 1866
be2c9196
VS
1867 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1868 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1869
be2c9196
VS
1870 I915_WRITE(pp_ctrl_reg, pp);
1871 POSTING_READ(pp_ctrl_reg);
90791a5c 1872
be2c9196
VS
1873 /* Make sure sequencer is idle before allowing subsequent activity */
1874 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1875 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1876
be2c9196
VS
1877 if ((pp & POWER_TARGET_ON) == 0)
1878 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1879
be2c9196
VS
1880 power_domain = intel_display_port_power_domain(intel_encoder);
1881 intel_display_power_put(dev_priv, power_domain);
bd943159 1882}
5d613501 1883
4be73780 1884static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1885{
1886 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1887 struct intel_dp, panel_vdd_work);
bd943159 1888
773538e8 1889 pps_lock(intel_dp);
15e899a0
VS
1890 if (!intel_dp->want_panel_vdd)
1891 edp_panel_vdd_off_sync(intel_dp);
773538e8 1892 pps_unlock(intel_dp);
bd943159
KP
1893}
1894
aba86890
ID
1895static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1896{
1897 unsigned long delay;
1898
1899 /*
1900 * Queue the timer to fire a long time from now (relative to the power
1901 * down delay) to keep the panel power up across a sequence of
1902 * operations.
1903 */
1904 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1905 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1906}
1907
951468f3
VS
1908/*
1909 * Must be paired with edp_panel_vdd_on().
1910 * Must hold pps_mutex around the whole on/off sequence.
1911 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1912 */
4be73780 1913static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1914{
e39b999a
VS
1915 struct drm_i915_private *dev_priv =
1916 intel_dp_to_dev(intel_dp)->dev_private;
1917
1918 lockdep_assert_held(&dev_priv->pps_mutex);
1919
97af61f5
KP
1920 if (!is_edp(intel_dp))
1921 return;
5d613501 1922
e2c719b7 1923 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1924 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1925
bd943159
KP
1926 intel_dp->want_panel_vdd = false;
1927
aba86890 1928 if (sync)
4be73780 1929 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1930 else
1931 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1932}
1933
9f0fb5be 1934static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1935{
30add22d 1936 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1937 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1938 u32 pp;
453c5420 1939 u32 pp_ctrl_reg;
9934c132 1940
9f0fb5be
VS
1941 lockdep_assert_held(&dev_priv->pps_mutex);
1942
97af61f5 1943 if (!is_edp(intel_dp))
bd943159 1944 return;
99ea7127 1945
3936fcf4
VS
1946 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1947 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1948
e7a89ace
VS
1949 if (WARN(edp_have_panel_power(intel_dp),
1950 "eDP port %c panel power already on\n",
1951 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1952 return;
9934c132 1953
4be73780 1954 wait_panel_power_cycle(intel_dp);
37c6c9b0 1955
bf13e81b 1956 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1957 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1958 if (IS_GEN5(dev)) {
1959 /* ILK workaround: disable reset around power sequence */
1960 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1961 I915_WRITE(pp_ctrl_reg, pp);
1962 POSTING_READ(pp_ctrl_reg);
05ce1a49 1963 }
37c6c9b0 1964
1c0ae80a 1965 pp |= POWER_TARGET_ON;
99ea7127
KP
1966 if (!IS_GEN5(dev))
1967 pp |= PANEL_POWER_RESET;
1968
453c5420
JB
1969 I915_WRITE(pp_ctrl_reg, pp);
1970 POSTING_READ(pp_ctrl_reg);
9934c132 1971
4be73780 1972 wait_panel_on(intel_dp);
dce56b3c 1973 intel_dp->last_power_on = jiffies;
9934c132 1974
05ce1a49
KP
1975 if (IS_GEN5(dev)) {
1976 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1977 I915_WRITE(pp_ctrl_reg, pp);
1978 POSTING_READ(pp_ctrl_reg);
05ce1a49 1979 }
9f0fb5be 1980}
e39b999a 1981
9f0fb5be
VS
1982void intel_edp_panel_on(struct intel_dp *intel_dp)
1983{
1984 if (!is_edp(intel_dp))
1985 return;
1986
1987 pps_lock(intel_dp);
1988 edp_panel_on(intel_dp);
773538e8 1989 pps_unlock(intel_dp);
9934c132
JB
1990}
1991
9f0fb5be
VS
1992
1993static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1994{
4e6e1a54
ID
1995 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1996 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1997 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1998 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1999 enum intel_display_power_domain power_domain;
99ea7127 2000 u32 pp;
453c5420 2001 u32 pp_ctrl_reg;
9934c132 2002
9f0fb5be
VS
2003 lockdep_assert_held(&dev_priv->pps_mutex);
2004
97af61f5
KP
2005 if (!is_edp(intel_dp))
2006 return;
37c6c9b0 2007
3936fcf4
VS
2008 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2009 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 2010
3936fcf4
VS
2011 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2012 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 2013
453c5420 2014 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
2015 /* We need to switch off panel power _and_ force vdd, for otherwise some
2016 * panels get very unhappy and cease to work. */
b3064154
PJ
2017 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2018 EDP_BLC_ENABLE);
453c5420 2019
bf13e81b 2020 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2021
849e39f5
PZ
2022 intel_dp->want_panel_vdd = false;
2023
453c5420
JB
2024 I915_WRITE(pp_ctrl_reg, pp);
2025 POSTING_READ(pp_ctrl_reg);
9934c132 2026
dce56b3c 2027 intel_dp->last_power_cycle = jiffies;
4be73780 2028 wait_panel_off(intel_dp);
849e39f5
PZ
2029
2030 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
2031 power_domain = intel_display_port_power_domain(intel_encoder);
2032 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 2033}
e39b999a 2034
9f0fb5be
VS
2035void intel_edp_panel_off(struct intel_dp *intel_dp)
2036{
2037 if (!is_edp(intel_dp))
2038 return;
e39b999a 2039
9f0fb5be
VS
2040 pps_lock(intel_dp);
2041 edp_panel_off(intel_dp);
773538e8 2042 pps_unlock(intel_dp);
9934c132
JB
2043}
2044
1250d107
JN
2045/* Enable backlight in the panel power control. */
2046static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 2047{
da63a9f2
PZ
2048 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2049 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
2050 struct drm_i915_private *dev_priv = dev->dev_private;
2051 u32 pp;
453c5420 2052 u32 pp_ctrl_reg;
32f9d658 2053
01cb9ea6
JB
2054 /*
2055 * If we enable the backlight right away following a panel power
2056 * on, we may see slight flicker as the panel syncs with the eDP
2057 * link. So delay a bit to make sure the image is solid before
2058 * allowing it to appear.
2059 */
4be73780 2060 wait_backlight_on(intel_dp);
e39b999a 2061
773538e8 2062 pps_lock(intel_dp);
e39b999a 2063
453c5420 2064 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2065 pp |= EDP_BLC_ENABLE;
453c5420 2066
bf13e81b 2067 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2068
2069 I915_WRITE(pp_ctrl_reg, pp);
2070 POSTING_READ(pp_ctrl_reg);
e39b999a 2071
773538e8 2072 pps_unlock(intel_dp);
32f9d658
ZW
2073}
2074
1250d107
JN
2075/* Enable backlight PWM and backlight PP control. */
2076void intel_edp_backlight_on(struct intel_dp *intel_dp)
2077{
2078 if (!is_edp(intel_dp))
2079 return;
2080
2081 DRM_DEBUG_KMS("\n");
2082
2083 intel_panel_enable_backlight(intel_dp->attached_connector);
2084 _intel_edp_backlight_on(intel_dp);
2085}
2086
2087/* Disable backlight in the panel power control. */
2088static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2089{
30add22d 2090 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2091 struct drm_i915_private *dev_priv = dev->dev_private;
2092 u32 pp;
453c5420 2093 u32 pp_ctrl_reg;
32f9d658 2094
f01eca2e
KP
2095 if (!is_edp(intel_dp))
2096 return;
2097
773538e8 2098 pps_lock(intel_dp);
e39b999a 2099
453c5420 2100 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2101 pp &= ~EDP_BLC_ENABLE;
453c5420 2102
bf13e81b 2103 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2104
2105 I915_WRITE(pp_ctrl_reg, pp);
2106 POSTING_READ(pp_ctrl_reg);
f7d2323c 2107
773538e8 2108 pps_unlock(intel_dp);
e39b999a
VS
2109
2110 intel_dp->last_backlight_off = jiffies;
f7d2323c 2111 edp_wait_backlight_off(intel_dp);
1250d107 2112}
f7d2323c 2113
1250d107
JN
2114/* Disable backlight PP control and backlight PWM. */
2115void intel_edp_backlight_off(struct intel_dp *intel_dp)
2116{
2117 if (!is_edp(intel_dp))
2118 return;
2119
2120 DRM_DEBUG_KMS("\n");
f7d2323c 2121
1250d107 2122 _intel_edp_backlight_off(intel_dp);
f7d2323c 2123 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2124}
a4fc5ed6 2125
73580fb7
JN
2126/*
2127 * Hook for controlling the panel power control backlight through the bl_power
2128 * sysfs attribute. Take care to handle multiple calls.
2129 */
2130static void intel_edp_backlight_power(struct intel_connector *connector,
2131 bool enable)
2132{
2133 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2134 bool is_enabled;
2135
773538e8 2136 pps_lock(intel_dp);
e39b999a 2137 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2138 pps_unlock(intel_dp);
73580fb7
JN
2139
2140 if (is_enabled == enable)
2141 return;
2142
23ba9373
JN
2143 DRM_DEBUG_KMS("panel power control backlight %s\n",
2144 enable ? "enable" : "disable");
73580fb7
JN
2145
2146 if (enable)
2147 _intel_edp_backlight_on(intel_dp);
2148 else
2149 _intel_edp_backlight_off(intel_dp);
2150}
2151
2bd2ad64 2152static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2153{
da63a9f2
PZ
2154 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2155 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2156 struct drm_device *dev = crtc->dev;
d240f20f
JB
2157 struct drm_i915_private *dev_priv = dev->dev_private;
2158 u32 dpa_ctl;
2159
2bd2ad64
DV
2160 assert_pipe_disabled(dev_priv,
2161 to_intel_crtc(crtc)->pipe);
2162
d240f20f
JB
2163 DRM_DEBUG_KMS("\n");
2164 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2165 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2166 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2167
2168 /* We don't adjust intel_dp->DP while tearing down the link, to
2169 * facilitate link retraining (e.g. after hotplug). Hence clear all
2170 * enable bits here to ensure that we don't enable too much. */
2171 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2172 intel_dp->DP |= DP_PLL_ENABLE;
2173 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2174 POSTING_READ(DP_A);
2175 udelay(200);
d240f20f
JB
2176}
2177
2bd2ad64 2178static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2179{
da63a9f2
PZ
2180 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2181 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2182 struct drm_device *dev = crtc->dev;
d240f20f
JB
2183 struct drm_i915_private *dev_priv = dev->dev_private;
2184 u32 dpa_ctl;
2185
2bd2ad64
DV
2186 assert_pipe_disabled(dev_priv,
2187 to_intel_crtc(crtc)->pipe);
2188
d240f20f 2189 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2190 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2191 "dp pll off, should be on\n");
2192 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2193
2194 /* We can't rely on the value tracked for the DP register in
2195 * intel_dp->DP because link_down must not change that (otherwise link
2196 * re-training will fail. */
298b0b39 2197 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2198 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2199 POSTING_READ(DP_A);
d240f20f
JB
2200 udelay(200);
2201}
2202
c7ad3810 2203/* If the sink supports it, try to set the power state appropriately */
c19b0669 2204void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2205{
2206 int ret, i;
2207
2208 /* Should have a valid DPCD by this point */
2209 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2210 return;
2211
2212 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2213 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2214 DP_SET_POWER_D3);
c7ad3810
JB
2215 } else {
2216 /*
2217 * When turning on, we need to retry for 1ms to give the sink
2218 * time to wake up.
2219 */
2220 for (i = 0; i < 3; i++) {
9d1a1031
JN
2221 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2222 DP_SET_POWER_D0);
c7ad3810
JB
2223 if (ret == 1)
2224 break;
2225 msleep(1);
2226 }
2227 }
f9cac721
JN
2228
2229 if (ret != 1)
2230 DRM_DEBUG_KMS("failed to %s sink power state\n",
2231 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2232}
2233
19d8fe15
DV
2234static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2235 enum pipe *pipe)
d240f20f 2236{
19d8fe15 2237 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2238 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2239 struct drm_device *dev = encoder->base.dev;
2240 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2241 enum intel_display_power_domain power_domain;
2242 u32 tmp;
2243
2244 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2245 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2246 return false;
2247
2248 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2249
2250 if (!(tmp & DP_PORT_EN))
2251 return false;
2252
39e5fa88 2253 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2254 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2255 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2256 enum pipe p;
19d8fe15 2257
adc289d7
VS
2258 for_each_pipe(dev_priv, p) {
2259 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2260 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2261 *pipe = p;
19d8fe15
DV
2262 return true;
2263 }
2264 }
19d8fe15 2265
4a0833ec
DV
2266 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2267 intel_dp->output_reg);
39e5fa88
VS
2268 } else if (IS_CHERRYVIEW(dev)) {
2269 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2270 } else {
2271 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2272 }
d240f20f 2273
19d8fe15
DV
2274 return true;
2275}
d240f20f 2276
045ac3b5 2277static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2278 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2279{
2280 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2281 u32 tmp, flags = 0;
63000ef6
XZ
2282 struct drm_device *dev = encoder->base.dev;
2283 struct drm_i915_private *dev_priv = dev->dev_private;
2284 enum port port = dp_to_dig_port(intel_dp)->port;
2285 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2286 int dotclock;
045ac3b5 2287
9ed109a7 2288 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2289
2290 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2291
39e5fa88
VS
2292 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2293 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2294 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2295 flags |= DRM_MODE_FLAG_PHSYNC;
2296 else
2297 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2298
39e5fa88 2299 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2300 flags |= DRM_MODE_FLAG_PVSYNC;
2301 else
2302 flags |= DRM_MODE_FLAG_NVSYNC;
2303 } else {
39e5fa88 2304 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2305 flags |= DRM_MODE_FLAG_PHSYNC;
2306 else
2307 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2308
39e5fa88 2309 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2310 flags |= DRM_MODE_FLAG_PVSYNC;
2311 else
2312 flags |= DRM_MODE_FLAG_NVSYNC;
2313 }
045ac3b5 2314
2d112de7 2315 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2316
8c875fca
VS
2317 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2318 tmp & DP_COLOR_RANGE_16_235)
2319 pipe_config->limited_color_range = true;
2320
eb14cb74
VS
2321 pipe_config->has_dp_encoder = true;
2322
2323 intel_dp_get_m_n(crtc, pipe_config);
2324
18442d08 2325 if (port == PORT_A) {
f1f644dc
JB
2326 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2327 pipe_config->port_clock = 162000;
2328 else
2329 pipe_config->port_clock = 270000;
2330 }
18442d08
VS
2331
2332 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2333 &pipe_config->dp_m_n);
2334
2335 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2336 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2337
2d112de7 2338 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2339
c6cd2ee2
JN
2340 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2341 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2342 /*
2343 * This is a big fat ugly hack.
2344 *
2345 * Some machines in UEFI boot mode provide us a VBT that has 18
2346 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2347 * unknown we fail to light up. Yet the same BIOS boots up with
2348 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2349 * max, not what it tells us to use.
2350 *
2351 * Note: This will still be broken if the eDP panel is not lit
2352 * up by the BIOS, and thus we can't get the mode at module
2353 * load.
2354 */
2355 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2356 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2357 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2358 }
045ac3b5
JB
2359}
2360
e8cb4558 2361static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2362{
e8cb4558 2363 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2364 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2365 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2366
6e3c9717 2367 if (crtc->config->has_audio)
495a5bb8 2368 intel_audio_codec_disable(encoder);
6cb49835 2369
b32c6f48
RV
2370 if (HAS_PSR(dev) && !HAS_DDI(dev))
2371 intel_psr_disable(intel_dp);
2372
6cb49835
DV
2373 /* Make sure the panel is off before trying to change the mode. But also
2374 * ensure that we have vdd while we switch off the panel. */
24f3e092 2375 intel_edp_panel_vdd_on(intel_dp);
4be73780 2376 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2377 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2378 intel_edp_panel_off(intel_dp);
3739850b 2379
08aff3fe
VS
2380 /* disable the port before the pipe on g4x */
2381 if (INTEL_INFO(dev)->gen < 5)
3739850b 2382 intel_dp_link_down(intel_dp);
d240f20f
JB
2383}
2384
08aff3fe 2385static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2386{
2bd2ad64 2387 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2388 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2389
49277c31 2390 intel_dp_link_down(intel_dp);
08aff3fe
VS
2391 if (port == PORT_A)
2392 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2393}
2394
2395static void vlv_post_disable_dp(struct intel_encoder *encoder)
2396{
2397 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2398
2399 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2400}
2401
580d3811
VS
2402static void chv_post_disable_dp(struct intel_encoder *encoder)
2403{
2404 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2405 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2406 struct drm_device *dev = encoder->base.dev;
2407 struct drm_i915_private *dev_priv = dev->dev_private;
2408 struct intel_crtc *intel_crtc =
2409 to_intel_crtc(encoder->base.crtc);
2410 enum dpio_channel ch = vlv_dport_to_channel(dport);
2411 enum pipe pipe = intel_crtc->pipe;
2412 u32 val;
2413
2414 intel_dp_link_down(intel_dp);
2415
a580516d 2416 mutex_lock(&dev_priv->sb_lock);
580d3811
VS
2417
2418 /* Propagate soft reset to data lane reset */
97fd4d5c 2419 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2420 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2421 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2422
97fd4d5c
VS
2423 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2424 val |= CHV_PCS_REQ_SOFTRESET_EN;
2425 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2426
2427 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2428 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2429 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2430
2431 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2432 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2433 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811 2434
a580516d 2435 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
2436}
2437
7b13b58a
VS
2438static void
2439_intel_dp_set_link_train(struct intel_dp *intel_dp,
2440 uint32_t *DP,
2441 uint8_t dp_train_pat)
2442{
2443 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2444 struct drm_device *dev = intel_dig_port->base.base.dev;
2445 struct drm_i915_private *dev_priv = dev->dev_private;
2446 enum port port = intel_dig_port->port;
2447
2448 if (HAS_DDI(dev)) {
2449 uint32_t temp = I915_READ(DP_TP_CTL(port));
2450
2451 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2452 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2453 else
2454 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2455
2456 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2457 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2458 case DP_TRAINING_PATTERN_DISABLE:
2459 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2460
2461 break;
2462 case DP_TRAINING_PATTERN_1:
2463 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2464 break;
2465 case DP_TRAINING_PATTERN_2:
2466 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2467 break;
2468 case DP_TRAINING_PATTERN_3:
2469 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2470 break;
2471 }
2472 I915_WRITE(DP_TP_CTL(port), temp);
2473
39e5fa88
VS
2474 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2475 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2476 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2477
2478 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2479 case DP_TRAINING_PATTERN_DISABLE:
2480 *DP |= DP_LINK_TRAIN_OFF_CPT;
2481 break;
2482 case DP_TRAINING_PATTERN_1:
2483 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2484 break;
2485 case DP_TRAINING_PATTERN_2:
2486 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2487 break;
2488 case DP_TRAINING_PATTERN_3:
2489 DRM_ERROR("DP training pattern 3 not supported\n");
2490 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2491 break;
2492 }
2493
2494 } else {
2495 if (IS_CHERRYVIEW(dev))
2496 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2497 else
2498 *DP &= ~DP_LINK_TRAIN_MASK;
2499
2500 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2501 case DP_TRAINING_PATTERN_DISABLE:
2502 *DP |= DP_LINK_TRAIN_OFF;
2503 break;
2504 case DP_TRAINING_PATTERN_1:
2505 *DP |= DP_LINK_TRAIN_PAT_1;
2506 break;
2507 case DP_TRAINING_PATTERN_2:
2508 *DP |= DP_LINK_TRAIN_PAT_2;
2509 break;
2510 case DP_TRAINING_PATTERN_3:
2511 if (IS_CHERRYVIEW(dev)) {
2512 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2513 } else {
2514 DRM_ERROR("DP training pattern 3 not supported\n");
2515 *DP |= DP_LINK_TRAIN_PAT_2;
2516 }
2517 break;
2518 }
2519 }
2520}
2521
2522static void intel_dp_enable_port(struct intel_dp *intel_dp)
2523{
2524 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2525 struct drm_i915_private *dev_priv = dev->dev_private;
2526
7b13b58a
VS
2527 /* enable with pattern 1 (as per spec) */
2528 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2529 DP_TRAINING_PATTERN_1);
2530
2531 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2532 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2533
2534 /*
2535 * Magic for VLV/CHV. We _must_ first set up the register
2536 * without actually enabling the port, and then do another
2537 * write to enable the port. Otherwise link training will
2538 * fail when the power sequencer is freshly used for this port.
2539 */
2540 intel_dp->DP |= DP_PORT_EN;
2541
2542 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2543 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2544}
2545
e8cb4558 2546static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2547{
e8cb4558
DV
2548 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2549 struct drm_device *dev = encoder->base.dev;
2550 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2551 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2552 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
9b6de0a1 2553 unsigned int lane_mask = 0x0;
5d613501 2554
0c33d8d7
DV
2555 if (WARN_ON(dp_reg & DP_PORT_EN))
2556 return;
5d613501 2557
093e3f13
VS
2558 pps_lock(intel_dp);
2559
2560 if (IS_VALLEYVIEW(dev))
2561 vlv_init_panel_power_sequencer(intel_dp);
2562
7b13b58a 2563 intel_dp_enable_port(intel_dp);
093e3f13
VS
2564
2565 edp_panel_vdd_on(intel_dp);
2566 edp_panel_on(intel_dp);
2567 edp_panel_vdd_off(intel_dp, true);
2568
2569 pps_unlock(intel_dp);
2570
61234fa5 2571 if (IS_VALLEYVIEW(dev))
9b6de0a1
VS
2572 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2573 lane_mask);
61234fa5 2574
f01eca2e 2575 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2576 intel_dp_start_link_train(intel_dp);
33a34e4e 2577 intel_dp_complete_link_train(intel_dp);
3ab9c637 2578 intel_dp_stop_link_train(intel_dp);
c1dec79a 2579
6e3c9717 2580 if (crtc->config->has_audio) {
c1dec79a
JN
2581 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2582 pipe_name(crtc->pipe));
2583 intel_audio_codec_enable(encoder);
2584 }
ab1f90f9 2585}
89b667f8 2586
ecff4f3b
JN
2587static void g4x_enable_dp(struct intel_encoder *encoder)
2588{
828f5c6e
JN
2589 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2590
ecff4f3b 2591 intel_enable_dp(encoder);
4be73780 2592 intel_edp_backlight_on(intel_dp);
ab1f90f9 2593}
89b667f8 2594
ab1f90f9
JN
2595static void vlv_enable_dp(struct intel_encoder *encoder)
2596{
828f5c6e
JN
2597 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2598
4be73780 2599 intel_edp_backlight_on(intel_dp);
b32c6f48 2600 intel_psr_enable(intel_dp);
d240f20f
JB
2601}
2602
ecff4f3b 2603static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2604{
2605 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2606 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2607
8ac33ed3
DV
2608 intel_dp_prepare(encoder);
2609
d41f1efb
DV
2610 /* Only ilk+ has port A */
2611 if (dport->port == PORT_A) {
2612 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2613 ironlake_edp_pll_on(intel_dp);
d41f1efb 2614 }
ab1f90f9
JN
2615}
2616
83b84597
VS
2617static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2618{
2619 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2620 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2621 enum pipe pipe = intel_dp->pps_pipe;
2622 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2623
2624 edp_panel_vdd_off_sync(intel_dp);
2625
2626 /*
2627 * VLV seems to get confused when multiple power seqeuencers
2628 * have the same port selected (even if only one has power/vdd
2629 * enabled). The failure manifests as vlv_wait_port_ready() failing
2630 * CHV on the other hand doesn't seem to mind having the same port
2631 * selected in multiple power seqeuencers, but let's clear the
2632 * port select always when logically disconnecting a power sequencer
2633 * from a port.
2634 */
2635 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2636 pipe_name(pipe), port_name(intel_dig_port->port));
2637 I915_WRITE(pp_on_reg, 0);
2638 POSTING_READ(pp_on_reg);
2639
2640 intel_dp->pps_pipe = INVALID_PIPE;
2641}
2642
a4a5d2f8
VS
2643static void vlv_steal_power_sequencer(struct drm_device *dev,
2644 enum pipe pipe)
2645{
2646 struct drm_i915_private *dev_priv = dev->dev_private;
2647 struct intel_encoder *encoder;
2648
2649 lockdep_assert_held(&dev_priv->pps_mutex);
2650
ac3c12e4
VS
2651 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2652 return;
2653
a4a5d2f8
VS
2654 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2655 base.head) {
2656 struct intel_dp *intel_dp;
773538e8 2657 enum port port;
a4a5d2f8
VS
2658
2659 if (encoder->type != INTEL_OUTPUT_EDP)
2660 continue;
2661
2662 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2663 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2664
2665 if (intel_dp->pps_pipe != pipe)
2666 continue;
2667
2668 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2669 pipe_name(pipe), port_name(port));
a4a5d2f8 2670
e02f9a06 2671 WARN(encoder->base.crtc,
034e43c6
VS
2672 "stealing pipe %c power sequencer from active eDP port %c\n",
2673 pipe_name(pipe), port_name(port));
a4a5d2f8 2674
a4a5d2f8 2675 /* make sure vdd is off before we steal it */
83b84597 2676 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2677 }
2678}
2679
2680static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2681{
2682 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2683 struct intel_encoder *encoder = &intel_dig_port->base;
2684 struct drm_device *dev = encoder->base.dev;
2685 struct drm_i915_private *dev_priv = dev->dev_private;
2686 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2687
2688 lockdep_assert_held(&dev_priv->pps_mutex);
2689
093e3f13
VS
2690 if (!is_edp(intel_dp))
2691 return;
2692
a4a5d2f8
VS
2693 if (intel_dp->pps_pipe == crtc->pipe)
2694 return;
2695
2696 /*
2697 * If another power sequencer was being used on this
2698 * port previously make sure to turn off vdd there while
2699 * we still have control of it.
2700 */
2701 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2702 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2703
2704 /*
2705 * We may be stealing the power
2706 * sequencer from another port.
2707 */
2708 vlv_steal_power_sequencer(dev, crtc->pipe);
2709
2710 /* now it's all ours */
2711 intel_dp->pps_pipe = crtc->pipe;
2712
2713 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2714 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2715
2716 /* init power sequencer on this pipe and port */
36b5f425
VS
2717 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2718 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2719}
2720
ab1f90f9 2721static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2722{
2bd2ad64 2723 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2724 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2725 struct drm_device *dev = encoder->base.dev;
89b667f8 2726 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2727 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2728 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2729 int pipe = intel_crtc->pipe;
2730 u32 val;
a4fc5ed6 2731
a580516d 2732 mutex_lock(&dev_priv->sb_lock);
89b667f8 2733
ab3c759a 2734 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2735 val = 0;
2736 if (pipe)
2737 val |= (1<<21);
2738 else
2739 val &= ~(1<<21);
2740 val |= 0x001000c4;
ab3c759a
CML
2741 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2742 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2743 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2744
a580516d 2745 mutex_unlock(&dev_priv->sb_lock);
ab1f90f9
JN
2746
2747 intel_enable_dp(encoder);
89b667f8
JB
2748}
2749
ecff4f3b 2750static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2751{
2752 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2753 struct drm_device *dev = encoder->base.dev;
2754 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2755 struct intel_crtc *intel_crtc =
2756 to_intel_crtc(encoder->base.crtc);
e4607fcf 2757 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2758 int pipe = intel_crtc->pipe;
89b667f8 2759
8ac33ed3
DV
2760 intel_dp_prepare(encoder);
2761
89b667f8 2762 /* Program Tx lane resets to default */
a580516d 2763 mutex_lock(&dev_priv->sb_lock);
ab3c759a 2764 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2765 DPIO_PCS_TX_LANE2_RESET |
2766 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2767 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2768 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2769 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2770 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2771 DPIO_PCS_CLK_SOFT_RESET);
2772
2773 /* Fix up inter-pair skew failure */
ab3c759a
CML
2774 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2775 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2776 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
a580516d 2777 mutex_unlock(&dev_priv->sb_lock);
a4fc5ed6
KP
2778}
2779
e4a1d846
CML
2780static void chv_pre_enable_dp(struct intel_encoder *encoder)
2781{
2782 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2783 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2784 struct drm_device *dev = encoder->base.dev;
2785 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2786 struct intel_crtc *intel_crtc =
2787 to_intel_crtc(encoder->base.crtc);
2788 enum dpio_channel ch = vlv_dport_to_channel(dport);
2789 int pipe = intel_crtc->pipe;
2e523e98 2790 int data, i, stagger;
949c1d43 2791 u32 val;
e4a1d846 2792
a580516d 2793 mutex_lock(&dev_priv->sb_lock);
949c1d43 2794
570e2a74
VS
2795 /* allow hardware to manage TX FIFO reset source */
2796 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2797 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2798 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2799
2800 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2801 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2802 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2803
949c1d43 2804 /* Deassert soft data lane reset*/
97fd4d5c 2805 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2806 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2807 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2808
2809 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2810 val |= CHV_PCS_REQ_SOFTRESET_EN;
2811 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2812
2813 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2814 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2815 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2816
97fd4d5c 2817 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2818 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2819 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2820
2821 /* Program Tx lane latency optimal setting*/
e4a1d846 2822 for (i = 0; i < 4; i++) {
e4a1d846
CML
2823 /* Set the upar bit */
2824 data = (i == 1) ? 0x0 : 0x1;
2825 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2826 data << DPIO_UPAR_SHIFT);
2827 }
2828
2829 /* Data lane stagger programming */
2e523e98
VS
2830 if (intel_crtc->config->port_clock > 270000)
2831 stagger = 0x18;
2832 else if (intel_crtc->config->port_clock > 135000)
2833 stagger = 0xd;
2834 else if (intel_crtc->config->port_clock > 67500)
2835 stagger = 0x7;
2836 else if (intel_crtc->config->port_clock > 33750)
2837 stagger = 0x4;
2838 else
2839 stagger = 0x2;
2840
2841 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2842 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2843 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2844
2845 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2846 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2847 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2848
2849 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2850 DPIO_LANESTAGGER_STRAP(stagger) |
2851 DPIO_LANESTAGGER_STRAP_OVRD |
2852 DPIO_TX1_STAGGER_MASK(0x1f) |
2853 DPIO_TX1_STAGGER_MULT(6) |
2854 DPIO_TX2_STAGGER_MULT(0));
2855
2856 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2857 DPIO_LANESTAGGER_STRAP(stagger) |
2858 DPIO_LANESTAGGER_STRAP_OVRD |
2859 DPIO_TX1_STAGGER_MASK(0x1f) |
2860 DPIO_TX1_STAGGER_MULT(7) |
2861 DPIO_TX2_STAGGER_MULT(5));
e4a1d846 2862
a580516d 2863 mutex_unlock(&dev_priv->sb_lock);
e4a1d846 2864
e4a1d846 2865 intel_enable_dp(encoder);
e4a1d846
CML
2866}
2867
9197c88b
VS
2868static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2869{
2870 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2871 struct drm_device *dev = encoder->base.dev;
2872 struct drm_i915_private *dev_priv = dev->dev_private;
2873 struct intel_crtc *intel_crtc =
2874 to_intel_crtc(encoder->base.crtc);
2875 enum dpio_channel ch = vlv_dport_to_channel(dport);
2876 enum pipe pipe = intel_crtc->pipe;
2877 u32 val;
2878
625695f8
VS
2879 intel_dp_prepare(encoder);
2880
a580516d 2881 mutex_lock(&dev_priv->sb_lock);
9197c88b 2882
b9e5ac3c
VS
2883 /* program left/right clock distribution */
2884 if (pipe != PIPE_B) {
2885 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2886 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2887 if (ch == DPIO_CH0)
2888 val |= CHV_BUFLEFTENA1_FORCE;
2889 if (ch == DPIO_CH1)
2890 val |= CHV_BUFRIGHTENA1_FORCE;
2891 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2892 } else {
2893 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2894 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2895 if (ch == DPIO_CH0)
2896 val |= CHV_BUFLEFTENA2_FORCE;
2897 if (ch == DPIO_CH1)
2898 val |= CHV_BUFRIGHTENA2_FORCE;
2899 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2900 }
2901
9197c88b
VS
2902 /* program clock channel usage */
2903 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2904 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2905 if (pipe != PIPE_B)
2906 val &= ~CHV_PCS_USEDCLKCHANNEL;
2907 else
2908 val |= CHV_PCS_USEDCLKCHANNEL;
2909 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2910
2911 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2912 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2913 if (pipe != PIPE_B)
2914 val &= ~CHV_PCS_USEDCLKCHANNEL;
2915 else
2916 val |= CHV_PCS_USEDCLKCHANNEL;
2917 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2918
2919 /*
2920 * This a a bit weird since generally CL
2921 * matches the pipe, but here we need to
2922 * pick the CL based on the port.
2923 */
2924 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2925 if (pipe != PIPE_B)
2926 val &= ~CHV_CMN_USEDCLKCHANNEL;
2927 else
2928 val |= CHV_CMN_USEDCLKCHANNEL;
2929 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2930
a580516d 2931 mutex_unlock(&dev_priv->sb_lock);
9197c88b
VS
2932}
2933
a4fc5ed6 2934/*
df0c237d
JB
2935 * Native read with retry for link status and receiver capability reads for
2936 * cases where the sink may still be asleep.
9d1a1031
JN
2937 *
2938 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2939 * supposed to retry 3 times per the spec.
a4fc5ed6 2940 */
9d1a1031
JN
2941static ssize_t
2942intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2943 void *buffer, size_t size)
a4fc5ed6 2944{
9d1a1031
JN
2945 ssize_t ret;
2946 int i;
61da5fab 2947
f6a19066
VS
2948 /*
2949 * Sometime we just get the same incorrect byte repeated
2950 * over the entire buffer. Doing just one throw away read
2951 * initially seems to "solve" it.
2952 */
2953 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2954
61da5fab 2955 for (i = 0; i < 3; i++) {
9d1a1031
JN
2956 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2957 if (ret == size)
2958 return ret;
61da5fab
JB
2959 msleep(1);
2960 }
a4fc5ed6 2961
9d1a1031 2962 return ret;
a4fc5ed6
KP
2963}
2964
2965/*
2966 * Fetch AUX CH registers 0x202 - 0x207 which contain
2967 * link status information
2968 */
2969static bool
93f62dad 2970intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2971{
9d1a1031
JN
2972 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2973 DP_LANE0_1_STATUS,
2974 link_status,
2975 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2976}
2977
1100244e 2978/* These are source-specific values. */
a4fc5ed6 2979static uint8_t
1a2eb460 2980intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2981{
30add22d 2982 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2983 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2984 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2985
9314726b
VK
2986 if (IS_BROXTON(dev))
2987 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2988 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 2989 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 2990 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2991 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2992 } else if (IS_VALLEYVIEW(dev))
bd60018a 2993 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2994 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2995 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2996 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2997 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2998 else
bd60018a 2999 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
3000}
3001
3002static uint8_t
3003intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3004{
30add22d 3005 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 3006 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3007
5a9d1f1a
DL
3008 if (INTEL_INFO(dev)->gen >= 9) {
3009 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3010 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3011 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3012 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3013 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3014 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3015 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
3016 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3017 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
3018 default:
3019 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3020 }
3021 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 3022 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3023 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3024 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3025 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3026 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3027 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3028 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3029 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 3030 default:
bd60018a 3031 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 3032 }
e2fa6fba
P
3033 } else if (IS_VALLEYVIEW(dev)) {
3034 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3035 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3036 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3037 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3038 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3039 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3040 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3041 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 3042 default:
bd60018a 3043 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 3044 }
bc7d38a4 3045 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 3046 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3047 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3048 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3049 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3050 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3051 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3052 default:
bd60018a 3053 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3054 }
3055 } else {
3056 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3057 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3058 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3059 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3060 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3061 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3062 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3063 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3064 default:
bd60018a 3065 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3066 }
a4fc5ed6
KP
3067 }
3068}
3069
5829975c 3070static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3071{
3072 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3073 struct drm_i915_private *dev_priv = dev->dev_private;
3074 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3075 struct intel_crtc *intel_crtc =
3076 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3077 unsigned long demph_reg_value, preemph_reg_value,
3078 uniqtranscale_reg_value;
3079 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3080 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3081 int pipe = intel_crtc->pipe;
e2fa6fba
P
3082
3083 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3084 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3085 preemph_reg_value = 0x0004000;
3086 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3087 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3088 demph_reg_value = 0x2B405555;
3089 uniqtranscale_reg_value = 0x552AB83A;
3090 break;
bd60018a 3091 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3092 demph_reg_value = 0x2B404040;
3093 uniqtranscale_reg_value = 0x5548B83A;
3094 break;
bd60018a 3095 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3096 demph_reg_value = 0x2B245555;
3097 uniqtranscale_reg_value = 0x5560B83A;
3098 break;
bd60018a 3099 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3100 demph_reg_value = 0x2B405555;
3101 uniqtranscale_reg_value = 0x5598DA3A;
3102 break;
3103 default:
3104 return 0;
3105 }
3106 break;
bd60018a 3107 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3108 preemph_reg_value = 0x0002000;
3109 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3110 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3111 demph_reg_value = 0x2B404040;
3112 uniqtranscale_reg_value = 0x5552B83A;
3113 break;
bd60018a 3114 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3115 demph_reg_value = 0x2B404848;
3116 uniqtranscale_reg_value = 0x5580B83A;
3117 break;
bd60018a 3118 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3119 demph_reg_value = 0x2B404040;
3120 uniqtranscale_reg_value = 0x55ADDA3A;
3121 break;
3122 default:
3123 return 0;
3124 }
3125 break;
bd60018a 3126 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3127 preemph_reg_value = 0x0000000;
3128 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3129 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3130 demph_reg_value = 0x2B305555;
3131 uniqtranscale_reg_value = 0x5570B83A;
3132 break;
bd60018a 3133 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3134 demph_reg_value = 0x2B2B4040;
3135 uniqtranscale_reg_value = 0x55ADDA3A;
3136 break;
3137 default:
3138 return 0;
3139 }
3140 break;
bd60018a 3141 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3142 preemph_reg_value = 0x0006000;
3143 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3144 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3145 demph_reg_value = 0x1B405555;
3146 uniqtranscale_reg_value = 0x55ADDA3A;
3147 break;
3148 default:
3149 return 0;
3150 }
3151 break;
3152 default:
3153 return 0;
3154 }
3155
a580516d 3156 mutex_lock(&dev_priv->sb_lock);
ab3c759a
CML
3157 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3158 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3159 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3160 uniqtranscale_reg_value);
ab3c759a
CML
3161 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3162 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3163 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3164 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
a580516d 3165 mutex_unlock(&dev_priv->sb_lock);
e2fa6fba
P
3166
3167 return 0;
3168}
3169
5829975c 3170static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3171{
3172 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3173 struct drm_i915_private *dev_priv = dev->dev_private;
3174 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3175 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3176 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3177 uint8_t train_set = intel_dp->train_set[0];
3178 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3179 enum pipe pipe = intel_crtc->pipe;
3180 int i;
e4a1d846
CML
3181
3182 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3183 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3184 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3185 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3186 deemph_reg_value = 128;
3187 margin_reg_value = 52;
3188 break;
bd60018a 3189 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3190 deemph_reg_value = 128;
3191 margin_reg_value = 77;
3192 break;
bd60018a 3193 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3194 deemph_reg_value = 128;
3195 margin_reg_value = 102;
3196 break;
bd60018a 3197 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3198 deemph_reg_value = 128;
3199 margin_reg_value = 154;
3200 /* FIXME extra to set for 1200 */
3201 break;
3202 default:
3203 return 0;
3204 }
3205 break;
bd60018a 3206 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3207 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3208 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3209 deemph_reg_value = 85;
3210 margin_reg_value = 78;
3211 break;
bd60018a 3212 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3213 deemph_reg_value = 85;
3214 margin_reg_value = 116;
3215 break;
bd60018a 3216 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3217 deemph_reg_value = 85;
3218 margin_reg_value = 154;
3219 break;
3220 default:
3221 return 0;
3222 }
3223 break;
bd60018a 3224 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3225 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3226 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3227 deemph_reg_value = 64;
3228 margin_reg_value = 104;
3229 break;
bd60018a 3230 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3231 deemph_reg_value = 64;
3232 margin_reg_value = 154;
3233 break;
3234 default:
3235 return 0;
3236 }
3237 break;
bd60018a 3238 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3239 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3240 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3241 deemph_reg_value = 43;
3242 margin_reg_value = 154;
3243 break;
3244 default:
3245 return 0;
3246 }
3247 break;
3248 default:
3249 return 0;
3250 }
3251
a580516d 3252 mutex_lock(&dev_priv->sb_lock);
e4a1d846
CML
3253
3254 /* Clear calc init */
1966e59e
VS
3255 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3256 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3257 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3258 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3259 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3260
3261 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3262 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3263 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3264 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3265 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3266
a02ef3c7
VS
3267 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3268 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3269 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3270 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3271
3272 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3273 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3274 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3275 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3276
e4a1d846 3277 /* Program swing deemph */
f72df8db
VS
3278 for (i = 0; i < 4; i++) {
3279 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3280 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3281 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3282 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3283 }
e4a1d846
CML
3284
3285 /* Program swing margin */
f72df8db
VS
3286 for (i = 0; i < 4; i++) {
3287 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3288 val &= ~DPIO_SWING_MARGIN000_MASK;
3289 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3290 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3291 }
e4a1d846
CML
3292
3293 /* Disable unique transition scale */
f72df8db
VS
3294 for (i = 0; i < 4; i++) {
3295 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3296 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3297 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3298 }
e4a1d846
CML
3299
3300 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3301 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3302 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3303 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3304
3305 /*
3306 * The document said it needs to set bit 27 for ch0 and bit 26
3307 * for ch1. Might be a typo in the doc.
3308 * For now, for this unique transition scale selection, set bit
3309 * 27 for ch0 and ch1.
3310 */
f72df8db
VS
3311 for (i = 0; i < 4; i++) {
3312 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3313 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3314 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3315 }
e4a1d846 3316
f72df8db
VS
3317 for (i = 0; i < 4; i++) {
3318 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3319 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3320 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3321 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3322 }
e4a1d846
CML
3323 }
3324
3325 /* Start swing calculation */
1966e59e
VS
3326 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3327 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3328 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3329
3330 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3331 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3332 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3333
3334 /* LRC Bypass */
3335 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3336 val |= DPIO_LRC_BYPASS;
3337 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3338
a580516d 3339 mutex_unlock(&dev_priv->sb_lock);
e4a1d846
CML
3340
3341 return 0;
3342}
3343
a4fc5ed6 3344static void
0301b3ac
JN
3345intel_get_adjust_train(struct intel_dp *intel_dp,
3346 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3347{
3348 uint8_t v = 0;
3349 uint8_t p = 0;
3350 int lane;
1a2eb460
KP
3351 uint8_t voltage_max;
3352 uint8_t preemph_max;
a4fc5ed6 3353
33a34e4e 3354 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3355 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3356 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3357
3358 if (this_v > v)
3359 v = this_v;
3360 if (this_p > p)
3361 p = this_p;
3362 }
3363
1a2eb460 3364 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3365 if (v >= voltage_max)
3366 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3367
1a2eb460
KP
3368 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3369 if (p >= preemph_max)
3370 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3371
3372 for (lane = 0; lane < 4; lane++)
33a34e4e 3373 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3374}
3375
3376static uint32_t
5829975c 3377gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3378{
3cf2efb1 3379 uint32_t signal_levels = 0;
a4fc5ed6 3380
3cf2efb1 3381 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3382 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3383 default:
3384 signal_levels |= DP_VOLTAGE_0_4;
3385 break;
bd60018a 3386 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3387 signal_levels |= DP_VOLTAGE_0_6;
3388 break;
bd60018a 3389 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3390 signal_levels |= DP_VOLTAGE_0_8;
3391 break;
bd60018a 3392 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3393 signal_levels |= DP_VOLTAGE_1_2;
3394 break;
3395 }
3cf2efb1 3396 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3397 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3398 default:
3399 signal_levels |= DP_PRE_EMPHASIS_0;
3400 break;
bd60018a 3401 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3402 signal_levels |= DP_PRE_EMPHASIS_3_5;
3403 break;
bd60018a 3404 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3405 signal_levels |= DP_PRE_EMPHASIS_6;
3406 break;
bd60018a 3407 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3408 signal_levels |= DP_PRE_EMPHASIS_9_5;
3409 break;
3410 }
3411 return signal_levels;
3412}
3413
e3421a18
ZW
3414/* Gen6's DP voltage swing and pre-emphasis control */
3415static uint32_t
5829975c 3416gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3417{
3c5a62b5
YL
3418 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3419 DP_TRAIN_PRE_EMPHASIS_MASK);
3420 switch (signal_levels) {
bd60018a
SJ
3421 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3422 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3423 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3424 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3425 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3426 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3427 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3428 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3429 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3430 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3431 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3432 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3433 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3434 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3435 default:
3c5a62b5
YL
3436 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3437 "0x%x\n", signal_levels);
3438 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3439 }
3440}
3441
1a2eb460
KP
3442/* Gen7's DP voltage swing and pre-emphasis control */
3443static uint32_t
5829975c 3444gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3445{
3446 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3447 DP_TRAIN_PRE_EMPHASIS_MASK);
3448 switch (signal_levels) {
bd60018a 3449 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3450 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3451 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3452 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3453 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3454 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3455
bd60018a 3456 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3457 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3458 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3459 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3460
bd60018a 3461 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3462 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3463 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3464 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3465
3466 default:
3467 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3468 "0x%x\n", signal_levels);
3469 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3470 }
3471}
3472
f0a3424e
PZ
3473/* Properly updates "DP" with the correct signal levels. */
3474static void
3475intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3476{
3477 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3478 enum port port = intel_dig_port->port;
f0a3424e 3479 struct drm_device *dev = intel_dig_port->base.base.dev;
f8896f5d 3480 uint32_t signal_levels, mask = 0;
f0a3424e
PZ
3481 uint8_t train_set = intel_dp->train_set[0];
3482
f8896f5d
DW
3483 if (HAS_DDI(dev)) {
3484 signal_levels = ddi_signal_levels(intel_dp);
3485
3486 if (IS_BROXTON(dev))
3487 signal_levels = 0;
3488 else
3489 mask = DDI_BUF_EMP_MASK;
e4a1d846 3490 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3491 signal_levels = chv_signal_levels(intel_dp);
e2fa6fba 3492 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3493 signal_levels = vlv_signal_levels(intel_dp);
bc7d38a4 3494 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3495 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3496 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3497 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3498 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3499 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3500 } else {
5829975c 3501 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3502 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3503 }
3504
96fb9f9b
VK
3505 if (mask)
3506 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3507
3508 DRM_DEBUG_KMS("Using vswing level %d\n",
3509 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3510 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3511 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3512 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e
PZ
3513
3514 *DP = (*DP & ~mask) | signal_levels;
3515}
3516
a4fc5ed6 3517static bool
ea5b213a 3518intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3519 uint32_t *DP,
58e10eb9 3520 uint8_t dp_train_pat)
a4fc5ed6 3521{
174edf1f
PZ
3522 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3523 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3524 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3525 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3526 int ret, len;
a4fc5ed6 3527
7b13b58a 3528 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3529
70aff66c 3530 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3531 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3532
2cdfe6c8
JN
3533 buf[0] = dp_train_pat;
3534 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3535 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3536 /* don't write DP_TRAINING_LANEx_SET on disable */
3537 len = 1;
3538 } else {
3539 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3540 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3541 len = intel_dp->lane_count + 1;
47ea7542 3542 }
a4fc5ed6 3543
9d1a1031
JN
3544 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3545 buf, len);
2cdfe6c8
JN
3546
3547 return ret == len;
a4fc5ed6
KP
3548}
3549
70aff66c
JN
3550static bool
3551intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3552 uint8_t dp_train_pat)
3553{
4e96c977
MK
3554 if (!intel_dp->train_set_valid)
3555 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3556 intel_dp_set_signal_levels(intel_dp, DP);
3557 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3558}
3559
3560static bool
3561intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3562 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3563{
3564 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3565 struct drm_device *dev = intel_dig_port->base.base.dev;
3566 struct drm_i915_private *dev_priv = dev->dev_private;
3567 int ret;
3568
3569 intel_get_adjust_train(intel_dp, link_status);
3570 intel_dp_set_signal_levels(intel_dp, DP);
3571
3572 I915_WRITE(intel_dp->output_reg, *DP);
3573 POSTING_READ(intel_dp->output_reg);
3574
9d1a1031
JN
3575 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3576 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3577
3578 return ret == intel_dp->lane_count;
3579}
3580
3ab9c637
ID
3581static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3582{
3583 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3584 struct drm_device *dev = intel_dig_port->base.base.dev;
3585 struct drm_i915_private *dev_priv = dev->dev_private;
3586 enum port port = intel_dig_port->port;
3587 uint32_t val;
3588
3589 if (!HAS_DDI(dev))
3590 return;
3591
3592 val = I915_READ(DP_TP_CTL(port));
3593 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3594 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3595 I915_WRITE(DP_TP_CTL(port), val);
3596
3597 /*
3598 * On PORT_A we can have only eDP in SST mode. There the only reason
3599 * we need to set idle transmission mode is to work around a HW issue
3600 * where we enable the pipe while not in idle link-training mode.
3601 * In this case there is requirement to wait for a minimum number of
3602 * idle patterns to be sent.
3603 */
3604 if (port == PORT_A)
3605 return;
3606
3607 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3608 1))
3609 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3610}
3611
33a34e4e 3612/* Enable corresponding port and start training pattern 1 */
c19b0669 3613void
33a34e4e 3614intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3615{
da63a9f2 3616 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3617 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3618 int i;
3619 uint8_t voltage;
cdb0e95b 3620 int voltage_tries, loop_tries;
ea5b213a 3621 uint32_t DP = intel_dp->DP;
6aba5b6c 3622 uint8_t link_config[2];
a4fc5ed6 3623
affa9354 3624 if (HAS_DDI(dev))
c19b0669
PZ
3625 intel_ddi_prepare_link_retrain(encoder);
3626
3cf2efb1 3627 /* Write the link configuration data */
6aba5b6c
JN
3628 link_config[0] = intel_dp->link_bw;
3629 link_config[1] = intel_dp->lane_count;
3630 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3631 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3632 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
94ca719e 3633 if (intel_dp->num_sink_rates)
a8f3ef61
SJ
3634 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3635 &intel_dp->rate_select, 1);
6aba5b6c
JN
3636
3637 link_config[0] = 0;
3638 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3639 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3640
3641 DP |= DP_PORT_EN;
1a2eb460 3642
70aff66c
JN
3643 /* clock recovery */
3644 if (!intel_dp_reset_link_train(intel_dp, &DP,
3645 DP_TRAINING_PATTERN_1 |
3646 DP_LINK_SCRAMBLING_DISABLE)) {
3647 DRM_ERROR("failed to enable link training\n");
3648 return;
3649 }
3650
a4fc5ed6 3651 voltage = 0xff;
cdb0e95b
KP
3652 voltage_tries = 0;
3653 loop_tries = 0;
a4fc5ed6 3654 for (;;) {
70aff66c 3655 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3656
a7c9655f 3657 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3658 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3659 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3660 break;
93f62dad 3661 }
a4fc5ed6 3662
01916270 3663 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3664 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3665 break;
3666 }
3667
4e96c977
MK
3668 /*
3669 * if we used previously trained voltage and pre-emphasis values
3670 * and we don't get clock recovery, reset link training values
3671 */
3672 if (intel_dp->train_set_valid) {
3673 DRM_DEBUG_KMS("clock recovery not ok, reset");
3674 /* clear the flag as we are not reusing train set */
3675 intel_dp->train_set_valid = false;
3676 if (!intel_dp_reset_link_train(intel_dp, &DP,
3677 DP_TRAINING_PATTERN_1 |
3678 DP_LINK_SCRAMBLING_DISABLE)) {
3679 DRM_ERROR("failed to enable link training\n");
3680 return;
3681 }
3682 continue;
3683 }
3684
3cf2efb1
CW
3685 /* Check to see if we've tried the max voltage */
3686 for (i = 0; i < intel_dp->lane_count; i++)
3687 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3688 break;
3b4f819d 3689 if (i == intel_dp->lane_count) {
b06fbda3
DV
3690 ++loop_tries;
3691 if (loop_tries == 5) {
3def84b3 3692 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3693 break;
3694 }
70aff66c
JN
3695 intel_dp_reset_link_train(intel_dp, &DP,
3696 DP_TRAINING_PATTERN_1 |
3697 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3698 voltage_tries = 0;
3699 continue;
3700 }
a4fc5ed6 3701
3cf2efb1 3702 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3703 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3704 ++voltage_tries;
b06fbda3 3705 if (voltage_tries == 5) {
3def84b3 3706 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3707 break;
3708 }
3709 } else
3710 voltage_tries = 0;
3711 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3712
70aff66c
JN
3713 /* Update training set as requested by target */
3714 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3715 DRM_ERROR("failed to update link training\n");
3716 break;
3717 }
a4fc5ed6
KP
3718 }
3719
33a34e4e
JB
3720 intel_dp->DP = DP;
3721}
3722
c19b0669 3723void
33a34e4e
JB
3724intel_dp_complete_link_train(struct intel_dp *intel_dp)
3725{
33a34e4e 3726 bool channel_eq = false;
37f80975 3727 int tries, cr_tries;
33a34e4e 3728 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3729 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3730
3731 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3732 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3733 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3734
a4fc5ed6 3735 /* channel equalization */
70aff66c 3736 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3737 training_pattern |
70aff66c
JN
3738 DP_LINK_SCRAMBLING_DISABLE)) {
3739 DRM_ERROR("failed to start channel equalization\n");
3740 return;
3741 }
3742
a4fc5ed6 3743 tries = 0;
37f80975 3744 cr_tries = 0;
a4fc5ed6
KP
3745 channel_eq = false;
3746 for (;;) {
70aff66c 3747 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3748
37f80975
JB
3749 if (cr_tries > 5) {
3750 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3751 break;
3752 }
3753
a7c9655f 3754 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3755 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3756 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3757 break;
70aff66c 3758 }
a4fc5ed6 3759
37f80975 3760 /* Make sure clock is still ok */
01916270 3761 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
4e96c977 3762 intel_dp->train_set_valid = false;
37f80975 3763 intel_dp_start_link_train(intel_dp);
70aff66c 3764 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3765 training_pattern |
70aff66c 3766 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3767 cr_tries++;
3768 continue;
3769 }
3770
1ffdff13 3771 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3772 channel_eq = true;
3773 break;
3774 }
a4fc5ed6 3775
37f80975
JB
3776 /* Try 5 times, then try clock recovery if that fails */
3777 if (tries > 5) {
4e96c977 3778 intel_dp->train_set_valid = false;
37f80975 3779 intel_dp_start_link_train(intel_dp);
70aff66c 3780 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3781 training_pattern |
70aff66c 3782 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3783 tries = 0;
3784 cr_tries++;
3785 continue;
3786 }
a4fc5ed6 3787
70aff66c
JN
3788 /* Update training set as requested by target */
3789 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3790 DRM_ERROR("failed to update link training\n");
3791 break;
3792 }
3cf2efb1 3793 ++tries;
869184a6 3794 }
3cf2efb1 3795
3ab9c637
ID
3796 intel_dp_set_idle_link_train(intel_dp);
3797
3798 intel_dp->DP = DP;
3799
4e96c977 3800 if (channel_eq) {
5fa836a9 3801 intel_dp->train_set_valid = true;
07f42258 3802 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
4e96c977 3803 }
3ab9c637
ID
3804}
3805
3806void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3807{
70aff66c 3808 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3809 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3810}
3811
3812static void
ea5b213a 3813intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3814{
da63a9f2 3815 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1612c8bd 3816 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
bc7d38a4 3817 enum port port = intel_dig_port->port;
da63a9f2 3818 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3819 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3820 uint32_t DP = intel_dp->DP;
a4fc5ed6 3821
bc76e320 3822 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3823 return;
3824
0c33d8d7 3825 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3826 return;
3827
28c97730 3828 DRM_DEBUG_KMS("\n");
32f9d658 3829
39e5fa88
VS
3830 if ((IS_GEN7(dev) && port == PORT_A) ||
3831 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3832 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3833 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3834 } else {
aad3d14d
VS
3835 if (IS_CHERRYVIEW(dev))
3836 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3837 else
3838 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3839 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3840 }
1612c8bd 3841 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3842 POSTING_READ(intel_dp->output_reg);
5eb08b69 3843
1612c8bd
VS
3844 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3845 I915_WRITE(intel_dp->output_reg, DP);
3846 POSTING_READ(intel_dp->output_reg);
3847
3848 /*
3849 * HW workaround for IBX, we need to move the port
3850 * to transcoder A after disabling it to allow the
3851 * matching HDMI port to be enabled on transcoder A.
3852 */
3853 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3854 /* always enable with pattern 1 (as per spec) */
3855 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3856 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3857 I915_WRITE(intel_dp->output_reg, DP);
3858 POSTING_READ(intel_dp->output_reg);
3859
3860 DP &= ~DP_PORT_EN;
5bddd17f 3861 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3862 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3863 }
3864
f01eca2e 3865 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3866}
3867
26d61aad
KP
3868static bool
3869intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3870{
a031d709
RV
3871 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3872 struct drm_device *dev = dig_port->base.base.dev;
3873 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3874 uint8_t rev;
a031d709 3875
9d1a1031
JN
3876 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3877 sizeof(intel_dp->dpcd)) < 0)
edb39244 3878 return false; /* aux transfer failed */
92fd8fd1 3879
a8e98153 3880 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3881
edb39244
AJ
3882 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3883 return false; /* DPCD not present */
3884
2293bb5c
SK
3885 /* Check if the panel supports PSR */
3886 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3887 if (is_edp(intel_dp)) {
9d1a1031
JN
3888 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3889 intel_dp->psr_dpcd,
3890 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3891 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3892 dev_priv->psr.sink_support = true;
50003939 3893 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3894 }
474d1ec4
SJ
3895
3896 if (INTEL_INFO(dev)->gen >= 9 &&
3897 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3898 uint8_t frame_sync_cap;
3899
3900 dev_priv->psr.sink_support = true;
3901 intel_dp_dpcd_read_wake(&intel_dp->aux,
3902 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3903 &frame_sync_cap, 1);
3904 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3905 /* PSR2 needs frame sync as well */
3906 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3907 DRM_DEBUG_KMS("PSR2 %s on sink",
3908 dev_priv->psr.psr2_support ? "supported" : "not supported");
3909 }
50003939
JN
3910 }
3911
ed63baaf
TS
3912 /* Training Pattern 3 support, Intel platforms that support HBR2 alone
3913 * have support for TP3 hence that check is used along with dpcd check
3914 * to ensure TP3 can be enabled.
3915 * SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
3916 * supported but still not enabled.
3917 */
06ea66b6 3918 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611 3919 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
ed63baaf 3920 intel_dp_source_supports_hbr2(dev)) {
06ea66b6 3921 intel_dp->use_tps3 = true;
f8d8a672 3922 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3923 } else
3924 intel_dp->use_tps3 = false;
3925
fc0f8e25
SJ
3926 /* Intermediate frequency support */
3927 if (is_edp(intel_dp) &&
3928 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3929 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3930 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3931 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3932 int i;
3933
fc0f8e25
SJ
3934 intel_dp_dpcd_read_wake(&intel_dp->aux,
3935 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3936 sink_rates,
3937 sizeof(sink_rates));
ea2d8a42 3938
94ca719e
VS
3939 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3940 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3941
3942 if (val == 0)
3943 break;
3944
af77b974
SJ
3945 /* Value read is in kHz while drm clock is saved in deca-kHz */
3946 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 3947 }
94ca719e 3948 intel_dp->num_sink_rates = i;
fc0f8e25 3949 }
0336400e
VS
3950
3951 intel_dp_print_rates(intel_dp);
3952
edb39244
AJ
3953 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3954 DP_DWN_STRM_PORT_PRESENT))
3955 return true; /* native DP sink */
3956
3957 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3958 return true; /* no per-port downstream info */
3959
9d1a1031
JN
3960 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3961 intel_dp->downstream_ports,
3962 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3963 return false; /* downstream port status fetch failed */
3964
3965 return true;
92fd8fd1
KP
3966}
3967
0d198328
AJ
3968static void
3969intel_dp_probe_oui(struct intel_dp *intel_dp)
3970{
3971 u8 buf[3];
3972
3973 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3974 return;
3975
9d1a1031 3976 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3977 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3978 buf[0], buf[1], buf[2]);
3979
9d1a1031 3980 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3981 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3982 buf[0], buf[1], buf[2]);
3983}
3984
0e32b39c
DA
3985static bool
3986intel_dp_probe_mst(struct intel_dp *intel_dp)
3987{
3988 u8 buf[1];
3989
3990 if (!intel_dp->can_mst)
3991 return false;
3992
3993 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3994 return false;
3995
0e32b39c
DA
3996 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3997 if (buf[0] & DP_MST_CAP) {
3998 DRM_DEBUG_KMS("Sink is MST capable\n");
3999 intel_dp->is_mst = true;
4000 } else {
4001 DRM_DEBUG_KMS("Sink is not MST capable\n");
4002 intel_dp->is_mst = false;
4003 }
4004 }
0e32b39c
DA
4005
4006 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4007 return intel_dp->is_mst;
4008}
4009
082dcc7c 4010static void intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
d2e216d0 4011{
082dcc7c
RV
4012 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4013 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
ad9dc91b 4014 u8 buf;
d2e216d0 4015
082dcc7c
RV
4016 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4017 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4018 return;
4373f0f2
PZ
4019 }
4020
082dcc7c
RV
4021 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4022 buf & ~DP_TEST_SINK_START) < 0)
4023 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
d2e216d0 4024
082dcc7c
RV
4025 hsw_enable_ips(intel_crtc);
4026}
4027
4028static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4029{
4030 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4031 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4032 u8 buf;
4033
4034 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4035 return -EIO;
4036
4037 if (!(buf & DP_TEST_CRC_SUPPORTED))
4038 return -ENOTTY;
4039
4040 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4041 return -EIO;
4042
4043 hsw_disable_ips(intel_crtc);
1dda5f93 4044
9d1a1031 4045 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
082dcc7c
RV
4046 buf | DP_TEST_SINK_START) < 0) {
4047 hsw_enable_ips(intel_crtc);
4048 return -EIO;
4373f0f2
PZ
4049 }
4050
082dcc7c
RV
4051 return 0;
4052}
4053
4054int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4055{
4056 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4057 struct drm_device *dev = dig_port->base.base.dev;
4058 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4059 u8 buf;
4060 int test_crc_count;
4061 int attempts = 6;
4062 int ret;
4063
4064 ret = intel_dp_sink_crc_start(intel_dp);
4065 if (ret)
4066 return ret;
4067
4373f0f2
PZ
4068 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0) {
4069 ret = -EIO;
afe0d67e 4070 goto stop;
4373f0f2 4071 }
d2e216d0 4072
ad9dc91b 4073 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 4074
ad9dc91b 4075 do {
1dda5f93 4076 if (drm_dp_dpcd_readb(&intel_dp->aux,
4373f0f2
PZ
4077 DP_TEST_SINK_MISC, &buf) < 0) {
4078 ret = -EIO;
afe0d67e 4079 goto stop;
4373f0f2 4080 }
ad9dc91b
RV
4081 intel_wait_for_vblank(dev, intel_crtc->pipe);
4082 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4083
4084 if (attempts == 0) {
90bd1f46 4085 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4373f0f2 4086 ret = -ETIMEDOUT;
afe0d67e 4087 goto stop;
ad9dc91b 4088 }
d2e216d0 4089
082dcc7c 4090 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
4373f0f2 4091 ret = -EIO;
afe0d67e 4092stop:
082dcc7c 4093 intel_dp_sink_crc_stop(intel_dp);
4373f0f2 4094 return ret;
d2e216d0
RV
4095}
4096
a60f0e38
JB
4097static bool
4098intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4099{
9d1a1031
JN
4100 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4101 DP_DEVICE_SERVICE_IRQ_VECTOR,
4102 sink_irq_vector, 1) == 1;
a60f0e38
JB
4103}
4104
0e32b39c
DA
4105static bool
4106intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4107{
4108 int ret;
4109
4110 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4111 DP_SINK_COUNT_ESI,
4112 sink_irq_vector, 14);
4113 if (ret != 14)
4114 return false;
4115
4116 return true;
4117}
4118
c5d5ab7a
TP
4119static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4120{
4121 uint8_t test_result = DP_TEST_ACK;
4122 return test_result;
4123}
4124
4125static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4126{
4127 uint8_t test_result = DP_TEST_NAK;
4128 return test_result;
4129}
4130
4131static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4132{
c5d5ab7a 4133 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4134 struct intel_connector *intel_connector = intel_dp->attached_connector;
4135 struct drm_connector *connector = &intel_connector->base;
4136
4137 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4138 connector->edid_corrupt ||
559be30c
TP
4139 intel_dp->aux.i2c_defer_count > 6) {
4140 /* Check EDID read for NACKs, DEFERs and corruption
4141 * (DP CTS 1.2 Core r1.1)
4142 * 4.2.2.4 : Failed EDID read, I2C_NAK
4143 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4144 * 4.2.2.6 : EDID corruption detected
4145 * Use failsafe mode for all cases
4146 */
4147 if (intel_dp->aux.i2c_nack_count > 0 ||
4148 intel_dp->aux.i2c_defer_count > 0)
4149 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4150 intel_dp->aux.i2c_nack_count,
4151 intel_dp->aux.i2c_defer_count);
4152 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4153 } else {
f79b468e
TS
4154 struct edid *block = intel_connector->detect_edid;
4155
4156 /* We have to write the checksum
4157 * of the last block read
4158 */
4159 block += intel_connector->detect_edid->extensions;
4160
559be30c
TP
4161 if (!drm_dp_dpcd_write(&intel_dp->aux,
4162 DP_TEST_EDID_CHECKSUM,
f79b468e 4163 &block->checksum,
5a1cc655 4164 1))
559be30c
TP
4165 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4166
4167 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4168 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4169 }
4170
4171 /* Set test active flag here so userspace doesn't interrupt things */
4172 intel_dp->compliance_test_active = 1;
4173
c5d5ab7a
TP
4174 return test_result;
4175}
4176
4177static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4178{
c5d5ab7a
TP
4179 uint8_t test_result = DP_TEST_NAK;
4180 return test_result;
4181}
4182
4183static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4184{
4185 uint8_t response = DP_TEST_NAK;
4186 uint8_t rxdata = 0;
4187 int status = 0;
4188
559be30c 4189 intel_dp->compliance_test_active = 0;
c5d5ab7a 4190 intel_dp->compliance_test_type = 0;
559be30c
TP
4191 intel_dp->compliance_test_data = 0;
4192
c5d5ab7a
TP
4193 intel_dp->aux.i2c_nack_count = 0;
4194 intel_dp->aux.i2c_defer_count = 0;
4195
4196 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4197 if (status <= 0) {
4198 DRM_DEBUG_KMS("Could not read test request from sink\n");
4199 goto update_status;
4200 }
4201
4202 switch (rxdata) {
4203 case DP_TEST_LINK_TRAINING:
4204 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4205 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4206 response = intel_dp_autotest_link_training(intel_dp);
4207 break;
4208 case DP_TEST_LINK_VIDEO_PATTERN:
4209 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4210 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4211 response = intel_dp_autotest_video_pattern(intel_dp);
4212 break;
4213 case DP_TEST_LINK_EDID_READ:
4214 DRM_DEBUG_KMS("EDID test requested\n");
4215 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4216 response = intel_dp_autotest_edid(intel_dp);
4217 break;
4218 case DP_TEST_LINK_PHY_TEST_PATTERN:
4219 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4220 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4221 response = intel_dp_autotest_phy_pattern(intel_dp);
4222 break;
4223 default:
4224 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4225 break;
4226 }
4227
4228update_status:
4229 status = drm_dp_dpcd_write(&intel_dp->aux,
4230 DP_TEST_RESPONSE,
4231 &response, 1);
4232 if (status <= 0)
4233 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4234}
4235
0e32b39c
DA
4236static int
4237intel_dp_check_mst_status(struct intel_dp *intel_dp)
4238{
4239 bool bret;
4240
4241 if (intel_dp->is_mst) {
4242 u8 esi[16] = { 0 };
4243 int ret = 0;
4244 int retry;
4245 bool handled;
4246 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4247go_again:
4248 if (bret == true) {
4249
4250 /* check link status - esi[10] = 0x200c */
4251 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4252 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4253 intel_dp_start_link_train(intel_dp);
4254 intel_dp_complete_link_train(intel_dp);
4255 intel_dp_stop_link_train(intel_dp);
4256 }
4257
6f34cc39 4258 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4259 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4260
4261 if (handled) {
4262 for (retry = 0; retry < 3; retry++) {
4263 int wret;
4264 wret = drm_dp_dpcd_write(&intel_dp->aux,
4265 DP_SINK_COUNT_ESI+1,
4266 &esi[1], 3);
4267 if (wret == 3) {
4268 break;
4269 }
4270 }
4271
4272 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4273 if (bret == true) {
6f34cc39 4274 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4275 goto go_again;
4276 }
4277 } else
4278 ret = 0;
4279
4280 return ret;
4281 } else {
4282 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4283 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4284 intel_dp->is_mst = false;
4285 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4286 /* send a hotplug event */
4287 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4288 }
4289 }
4290 return -EINVAL;
4291}
4292
a4fc5ed6
KP
4293/*
4294 * According to DP spec
4295 * 5.1.2:
4296 * 1. Read DPCD
4297 * 2. Configure link according to Receiver Capabilities
4298 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4299 * 4. Check link status on receipt of hot-plug interrupt
4300 */
a5146200 4301static void
ea5b213a 4302intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4303{
5b215bcf 4304 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4305 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4306 u8 sink_irq_vector;
93f62dad 4307 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4308
5b215bcf
DA
4309 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4310
e02f9a06 4311 if (!intel_encoder->base.crtc)
a4fc5ed6
KP
4312 return;
4313
1a125d8a
ID
4314 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4315 return;
4316
92fd8fd1 4317 /* Try to read receiver status if the link appears to be up */
93f62dad 4318 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4319 return;
4320 }
4321
92fd8fd1 4322 /* Now read the DPCD to see if it's actually running */
26d61aad 4323 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4324 return;
4325 }
4326
a60f0e38
JB
4327 /* Try to read the source of the interrupt */
4328 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4329 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4330 /* Clear interrupt source */
9d1a1031
JN
4331 drm_dp_dpcd_writeb(&intel_dp->aux,
4332 DP_DEVICE_SERVICE_IRQ_VECTOR,
4333 sink_irq_vector);
a60f0e38
JB
4334
4335 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4336 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4337 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4338 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4339 }
4340
1ffdff13 4341 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4342 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4343 intel_encoder->base.name);
33a34e4e
JB
4344 intel_dp_start_link_train(intel_dp);
4345 intel_dp_complete_link_train(intel_dp);
3ab9c637 4346 intel_dp_stop_link_train(intel_dp);
33a34e4e 4347 }
a4fc5ed6 4348}
a4fc5ed6 4349
caf9ab24 4350/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4351static enum drm_connector_status
26d61aad 4352intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4353{
caf9ab24 4354 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4355 uint8_t type;
4356
4357 if (!intel_dp_get_dpcd(intel_dp))
4358 return connector_status_disconnected;
4359
4360 /* if there's no downstream port, we're done */
4361 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4362 return connector_status_connected;
caf9ab24
AJ
4363
4364 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4365 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4366 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4367 uint8_t reg;
9d1a1031
JN
4368
4369 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4370 &reg, 1) < 0)
caf9ab24 4371 return connector_status_unknown;
9d1a1031 4372
23235177
AJ
4373 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4374 : connector_status_disconnected;
caf9ab24
AJ
4375 }
4376
4377 /* If no HPD, poke DDC gently */
0b99836f 4378 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4379 return connector_status_connected;
caf9ab24
AJ
4380
4381 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4382 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4383 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4384 if (type == DP_DS_PORT_TYPE_VGA ||
4385 type == DP_DS_PORT_TYPE_NON_EDID)
4386 return connector_status_unknown;
4387 } else {
4388 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4389 DP_DWN_STRM_PORT_TYPE_MASK;
4390 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4391 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4392 return connector_status_unknown;
4393 }
caf9ab24
AJ
4394
4395 /* Anything else is out of spec, warn and ignore */
4396 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4397 return connector_status_disconnected;
71ba9000
AJ
4398}
4399
d410b56d
CW
4400static enum drm_connector_status
4401edp_detect(struct intel_dp *intel_dp)
4402{
4403 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4404 enum drm_connector_status status;
4405
4406 status = intel_panel_detect(dev);
4407 if (status == connector_status_unknown)
4408 status = connector_status_connected;
4409
4410 return status;
4411}
4412
5eb08b69 4413static enum drm_connector_status
a9756bb5 4414ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4415{
30add22d 4416 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4417 struct drm_i915_private *dev_priv = dev->dev_private;
4418 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4419
1b469639
DL
4420 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4421 return connector_status_disconnected;
4422
26d61aad 4423 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4424}
4425
2a592bec
DA
4426static int g4x_digital_port_connected(struct drm_device *dev,
4427 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4428{
a4fc5ed6 4429 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4430 uint32_t bit;
5eb08b69 4431
232a6ee9
TP
4432 if (IS_VALLEYVIEW(dev)) {
4433 switch (intel_dig_port->port) {
4434 case PORT_B:
4435 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4436 break;
4437 case PORT_C:
4438 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4439 break;
4440 case PORT_D:
4441 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4442 break;
4443 default:
2a592bec 4444 return -EINVAL;
232a6ee9
TP
4445 }
4446 } else {
4447 switch (intel_dig_port->port) {
4448 case PORT_B:
4449 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4450 break;
4451 case PORT_C:
4452 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4453 break;
4454 case PORT_D:
4455 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4456 break;
4457 default:
2a592bec 4458 return -EINVAL;
232a6ee9 4459 }
a4fc5ed6
KP
4460 }
4461
10f76a38 4462 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4463 return 0;
4464 return 1;
4465}
4466
4467static enum drm_connector_status
4468g4x_dp_detect(struct intel_dp *intel_dp)
4469{
4470 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4471 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4472 int ret;
4473
4474 /* Can't disconnect eDP, but you can close the lid... */
4475 if (is_edp(intel_dp)) {
4476 enum drm_connector_status status;
4477
4478 status = intel_panel_detect(dev);
4479 if (status == connector_status_unknown)
4480 status = connector_status_connected;
4481 return status;
4482 }
4483
4484 ret = g4x_digital_port_connected(dev, intel_dig_port);
4485 if (ret == -EINVAL)
4486 return connector_status_unknown;
4487 else if (ret == 0)
a4fc5ed6
KP
4488 return connector_status_disconnected;
4489
26d61aad 4490 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4491}
4492
8c241fef 4493static struct edid *
beb60608 4494intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4495{
beb60608 4496 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4497
9cd300e0
JN
4498 /* use cached edid if we have one */
4499 if (intel_connector->edid) {
9cd300e0
JN
4500 /* invalid edid */
4501 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4502 return NULL;
4503
55e9edeb 4504 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4505 } else
4506 return drm_get_edid(&intel_connector->base,
4507 &intel_dp->aux.ddc);
4508}
8c241fef 4509
beb60608
CW
4510static void
4511intel_dp_set_edid(struct intel_dp *intel_dp)
4512{
4513 struct intel_connector *intel_connector = intel_dp->attached_connector;
4514 struct edid *edid;
8c241fef 4515
beb60608
CW
4516 edid = intel_dp_get_edid(intel_dp);
4517 intel_connector->detect_edid = edid;
4518
4519 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4520 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4521 else
4522 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4523}
4524
beb60608
CW
4525static void
4526intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4527{
beb60608 4528 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4529
beb60608
CW
4530 kfree(intel_connector->detect_edid);
4531 intel_connector->detect_edid = NULL;
9cd300e0 4532
beb60608
CW
4533 intel_dp->has_audio = false;
4534}
d6f24d0f 4535
beb60608
CW
4536static enum intel_display_power_domain
4537intel_dp_power_get(struct intel_dp *dp)
4538{
4539 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4540 enum intel_display_power_domain power_domain;
4541
4542 power_domain = intel_display_port_power_domain(encoder);
4543 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4544
4545 return power_domain;
4546}
d6f24d0f 4547
beb60608
CW
4548static void
4549intel_dp_power_put(struct intel_dp *dp,
4550 enum intel_display_power_domain power_domain)
4551{
4552 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4553 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4554}
4555
a9756bb5
ZW
4556static enum drm_connector_status
4557intel_dp_detect(struct drm_connector *connector, bool force)
4558{
4559 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4560 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4561 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4562 struct drm_device *dev = connector->dev;
a9756bb5 4563 enum drm_connector_status status;
671dedd2 4564 enum intel_display_power_domain power_domain;
0e32b39c 4565 bool ret;
09b1eb13 4566 u8 sink_irq_vector;
a9756bb5 4567
164c8598 4568 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4569 connector->base.id, connector->name);
beb60608 4570 intel_dp_unset_edid(intel_dp);
164c8598 4571
0e32b39c
DA
4572 if (intel_dp->is_mst) {
4573 /* MST devices are disconnected from a monitor POV */
4574 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4575 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4576 return connector_status_disconnected;
0e32b39c
DA
4577 }
4578
beb60608 4579 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4580
d410b56d
CW
4581 /* Can't disconnect eDP, but you can close the lid... */
4582 if (is_edp(intel_dp))
4583 status = edp_detect(intel_dp);
4584 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4585 status = ironlake_dp_detect(intel_dp);
4586 else
4587 status = g4x_dp_detect(intel_dp);
4588 if (status != connector_status_connected)
c8c8fb33 4589 goto out;
a9756bb5 4590
0d198328
AJ
4591 intel_dp_probe_oui(intel_dp);
4592
0e32b39c
DA
4593 ret = intel_dp_probe_mst(intel_dp);
4594 if (ret) {
4595 /* if we are in MST mode then this connector
4596 won't appear connected or have anything with EDID on it */
4597 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4598 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4599 status = connector_status_disconnected;
4600 goto out;
4601 }
4602
beb60608 4603 intel_dp_set_edid(intel_dp);
a9756bb5 4604
d63885da
PZ
4605 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4606 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4607 status = connector_status_connected;
4608
09b1eb13
TP
4609 /* Try to read the source of the interrupt */
4610 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4611 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4612 /* Clear interrupt source */
4613 drm_dp_dpcd_writeb(&intel_dp->aux,
4614 DP_DEVICE_SERVICE_IRQ_VECTOR,
4615 sink_irq_vector);
4616
4617 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4618 intel_dp_handle_test_request(intel_dp);
4619 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4620 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4621 }
4622
c8c8fb33 4623out:
beb60608 4624 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4625 return status;
a4fc5ed6
KP
4626}
4627
beb60608
CW
4628static void
4629intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4630{
df0e9248 4631 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4632 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4633 enum intel_display_power_domain power_domain;
a4fc5ed6 4634
beb60608
CW
4635 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4636 connector->base.id, connector->name);
4637 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4638
beb60608
CW
4639 if (connector->status != connector_status_connected)
4640 return;
671dedd2 4641
beb60608
CW
4642 power_domain = intel_dp_power_get(intel_dp);
4643
4644 intel_dp_set_edid(intel_dp);
4645
4646 intel_dp_power_put(intel_dp, power_domain);
4647
4648 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4649 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4650}
4651
4652static int intel_dp_get_modes(struct drm_connector *connector)
4653{
4654 struct intel_connector *intel_connector = to_intel_connector(connector);
4655 struct edid *edid;
4656
4657 edid = intel_connector->detect_edid;
4658 if (edid) {
4659 int ret = intel_connector_update_modes(connector, edid);
4660 if (ret)
4661 return ret;
4662 }
32f9d658 4663
f8779fda 4664 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4665 if (is_edp(intel_attached_dp(connector)) &&
4666 intel_connector->panel.fixed_mode) {
f8779fda 4667 struct drm_display_mode *mode;
beb60608
CW
4668
4669 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4670 intel_connector->panel.fixed_mode);
f8779fda 4671 if (mode) {
32f9d658
ZW
4672 drm_mode_probed_add(connector, mode);
4673 return 1;
4674 }
4675 }
beb60608 4676
32f9d658 4677 return 0;
a4fc5ed6
KP
4678}
4679
1aad7ac0
CW
4680static bool
4681intel_dp_detect_audio(struct drm_connector *connector)
4682{
1aad7ac0 4683 bool has_audio = false;
beb60608 4684 struct edid *edid;
1aad7ac0 4685
beb60608
CW
4686 edid = to_intel_connector(connector)->detect_edid;
4687 if (edid)
1aad7ac0 4688 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4689
1aad7ac0
CW
4690 return has_audio;
4691}
4692
f684960e
CW
4693static int
4694intel_dp_set_property(struct drm_connector *connector,
4695 struct drm_property *property,
4696 uint64_t val)
4697{
e953fd7b 4698 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4699 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4700 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4701 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4702 int ret;
4703
662595df 4704 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4705 if (ret)
4706 return ret;
4707
3f43c48d 4708 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4709 int i = val;
4710 bool has_audio;
4711
4712 if (i == intel_dp->force_audio)
f684960e
CW
4713 return 0;
4714
1aad7ac0 4715 intel_dp->force_audio = i;
f684960e 4716
c3e5f67b 4717 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4718 has_audio = intel_dp_detect_audio(connector);
4719 else
c3e5f67b 4720 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4721
4722 if (has_audio == intel_dp->has_audio)
f684960e
CW
4723 return 0;
4724
1aad7ac0 4725 intel_dp->has_audio = has_audio;
f684960e
CW
4726 goto done;
4727 }
4728
e953fd7b 4729 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4730 bool old_auto = intel_dp->color_range_auto;
4731 uint32_t old_range = intel_dp->color_range;
4732
55bc60db
VS
4733 switch (val) {
4734 case INTEL_BROADCAST_RGB_AUTO:
4735 intel_dp->color_range_auto = true;
4736 break;
4737 case INTEL_BROADCAST_RGB_FULL:
4738 intel_dp->color_range_auto = false;
4739 intel_dp->color_range = 0;
4740 break;
4741 case INTEL_BROADCAST_RGB_LIMITED:
4742 intel_dp->color_range_auto = false;
4743 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4744 break;
4745 default:
4746 return -EINVAL;
4747 }
ae4edb80
DV
4748
4749 if (old_auto == intel_dp->color_range_auto &&
4750 old_range == intel_dp->color_range)
4751 return 0;
4752
e953fd7b
CW
4753 goto done;
4754 }
4755
53b41837
YN
4756 if (is_edp(intel_dp) &&
4757 property == connector->dev->mode_config.scaling_mode_property) {
4758 if (val == DRM_MODE_SCALE_NONE) {
4759 DRM_DEBUG_KMS("no scaling not supported\n");
4760 return -EINVAL;
4761 }
4762
4763 if (intel_connector->panel.fitting_mode == val) {
4764 /* the eDP scaling property is not changed */
4765 return 0;
4766 }
4767 intel_connector->panel.fitting_mode = val;
4768
4769 goto done;
4770 }
4771
f684960e
CW
4772 return -EINVAL;
4773
4774done:
c0c36b94
CW
4775 if (intel_encoder->base.crtc)
4776 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4777
4778 return 0;
4779}
4780
a4fc5ed6 4781static void
73845adf 4782intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4783{
1d508706 4784 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4785
10e972d3 4786 kfree(intel_connector->detect_edid);
beb60608 4787
9cd300e0
JN
4788 if (!IS_ERR_OR_NULL(intel_connector->edid))
4789 kfree(intel_connector->edid);
4790
acd8db10
PZ
4791 /* Can't call is_edp() since the encoder may have been destroyed
4792 * already. */
4793 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4794 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4795
a4fc5ed6 4796 drm_connector_cleanup(connector);
55f78c43 4797 kfree(connector);
a4fc5ed6
KP
4798}
4799
00c09d70 4800void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4801{
da63a9f2
PZ
4802 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4803 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4804
4f71d0cb 4805 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4806 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4807 if (is_edp(intel_dp)) {
4808 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4809 /*
4810 * vdd might still be enabled do to the delayed vdd off.
4811 * Make sure vdd is actually turned off here.
4812 */
773538e8 4813 pps_lock(intel_dp);
4be73780 4814 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4815 pps_unlock(intel_dp);
4816
01527b31
CT
4817 if (intel_dp->edp_notifier.notifier_call) {
4818 unregister_reboot_notifier(&intel_dp->edp_notifier);
4819 intel_dp->edp_notifier.notifier_call = NULL;
4820 }
bd943159 4821 }
c8bd0e49 4822 drm_encoder_cleanup(encoder);
da63a9f2 4823 kfree(intel_dig_port);
24d05927
DV
4824}
4825
07f9cd0b
ID
4826static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4827{
4828 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4829
4830 if (!is_edp(intel_dp))
4831 return;
4832
951468f3
VS
4833 /*
4834 * vdd might still be enabled do to the delayed vdd off.
4835 * Make sure vdd is actually turned off here.
4836 */
afa4e53a 4837 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4838 pps_lock(intel_dp);
07f9cd0b 4839 edp_panel_vdd_off_sync(intel_dp);
773538e8 4840 pps_unlock(intel_dp);
07f9cd0b
ID
4841}
4842
49e6bc51
VS
4843static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4844{
4845 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4846 struct drm_device *dev = intel_dig_port->base.base.dev;
4847 struct drm_i915_private *dev_priv = dev->dev_private;
4848 enum intel_display_power_domain power_domain;
4849
4850 lockdep_assert_held(&dev_priv->pps_mutex);
4851
4852 if (!edp_have_panel_vdd(intel_dp))
4853 return;
4854
4855 /*
4856 * The VDD bit needs a power domain reference, so if the bit is
4857 * already enabled when we boot or resume, grab this reference and
4858 * schedule a vdd off, so we don't hold on to the reference
4859 * indefinitely.
4860 */
4861 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4862 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4863 intel_display_power_get(dev_priv, power_domain);
4864
4865 edp_panel_vdd_schedule_off(intel_dp);
4866}
4867
6d93c0c4
ID
4868static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4869{
49e6bc51
VS
4870 struct intel_dp *intel_dp;
4871
4872 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4873 return;
4874
4875 intel_dp = enc_to_intel_dp(encoder);
4876
4877 pps_lock(intel_dp);
4878
4879 /*
4880 * Read out the current power sequencer assignment,
4881 * in case the BIOS did something with it.
4882 */
4883 if (IS_VALLEYVIEW(encoder->dev))
4884 vlv_initial_power_sequencer_setup(intel_dp);
4885
4886 intel_edp_panel_vdd_sanitize(intel_dp);
4887
4888 pps_unlock(intel_dp);
6d93c0c4
ID
4889}
4890
a4fc5ed6 4891static const struct drm_connector_funcs intel_dp_connector_funcs = {
4d688a2a 4892 .dpms = drm_atomic_helper_connector_dpms,
a4fc5ed6 4893 .detect = intel_dp_detect,
beb60608 4894 .force = intel_dp_force,
a4fc5ed6 4895 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4896 .set_property = intel_dp_set_property,
2545e4a6 4897 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4898 .destroy = intel_dp_connector_destroy,
c6f95f27 4899 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4900 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4901};
4902
4903static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4904 .get_modes = intel_dp_get_modes,
4905 .mode_valid = intel_dp_mode_valid,
df0e9248 4906 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4907};
4908
a4fc5ed6 4909static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4910 .reset = intel_dp_encoder_reset,
24d05927 4911 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4912};
4913
b2c5c181 4914enum irqreturn
13cf5504
DA
4915intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4916{
4917 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4918 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4919 struct drm_device *dev = intel_dig_port->base.base.dev;
4920 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4921 enum intel_display_power_domain power_domain;
b2c5c181 4922 enum irqreturn ret = IRQ_NONE;
1c767b33 4923
0e32b39c
DA
4924 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4925 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4926
7a7f84cc
VS
4927 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4928 /*
4929 * vdd off can generate a long pulse on eDP which
4930 * would require vdd on to handle it, and thus we
4931 * would end up in an endless cycle of
4932 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4933 */
4934 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4935 port_name(intel_dig_port->port));
a8b3d52f 4936 return IRQ_HANDLED;
7a7f84cc
VS
4937 }
4938
26fbb774
VS
4939 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4940 port_name(intel_dig_port->port),
0e32b39c 4941 long_hpd ? "long" : "short");
13cf5504 4942
1c767b33
ID
4943 power_domain = intel_display_port_power_domain(intel_encoder);
4944 intel_display_power_get(dev_priv, power_domain);
4945
0e32b39c 4946 if (long_hpd) {
5fa836a9
MK
4947 /* indicate that we need to restart link training */
4948 intel_dp->train_set_valid = false;
2a592bec
DA
4949
4950 if (HAS_PCH_SPLIT(dev)) {
4951 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4952 goto mst_fail;
4953 } else {
4954 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4955 goto mst_fail;
4956 }
0e32b39c
DA
4957
4958 if (!intel_dp_get_dpcd(intel_dp)) {
4959 goto mst_fail;
4960 }
4961
4962 intel_dp_probe_oui(intel_dp);
4963
4964 if (!intel_dp_probe_mst(intel_dp))
4965 goto mst_fail;
4966
4967 } else {
4968 if (intel_dp->is_mst) {
1c767b33 4969 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4970 goto mst_fail;
4971 }
4972
4973 if (!intel_dp->is_mst) {
4974 /*
4975 * we'll check the link status via the normal hot plug path later -
4976 * but for short hpds we should check it now
4977 */
5b215bcf 4978 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4979 intel_dp_check_link_status(intel_dp);
5b215bcf 4980 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4981 }
4982 }
b2c5c181
DV
4983
4984 ret = IRQ_HANDLED;
4985
1c767b33 4986 goto put_power;
0e32b39c
DA
4987mst_fail:
4988 /* if we were in MST mode, and device is not there get out of MST mode */
4989 if (intel_dp->is_mst) {
4990 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4991 intel_dp->is_mst = false;
4992 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4993 }
1c767b33
ID
4994put_power:
4995 intel_display_power_put(dev_priv, power_domain);
4996
4997 return ret;
13cf5504
DA
4998}
4999
e3421a18
ZW
5000/* Return which DP Port should be selected for Transcoder DP control */
5001int
0206e353 5002intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
5003{
5004 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
5005 struct intel_encoder *intel_encoder;
5006 struct intel_dp *intel_dp;
e3421a18 5007
fa90ecef
PZ
5008 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5009 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 5010
fa90ecef
PZ
5011 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5012 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 5013 return intel_dp->output_reg;
e3421a18 5014 }
ea5b213a 5015
e3421a18
ZW
5016 return -1;
5017}
5018
36e83a18 5019/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 5020bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
5021{
5022 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5023 union child_device_config *p_child;
36e83a18 5024 int i;
5d8a7752
VS
5025 static const short port_mapping[] = {
5026 [PORT_B] = PORT_IDPB,
5027 [PORT_C] = PORT_IDPC,
5028 [PORT_D] = PORT_IDPD,
5029 };
36e83a18 5030
3b32a35b
VS
5031 if (port == PORT_A)
5032 return true;
5033
41aa3448 5034 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5035 return false;
5036
41aa3448
RV
5037 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5038 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5039
5d8a7752 5040 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5041 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5042 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5043 return true;
5044 }
5045 return false;
5046}
5047
0e32b39c 5048void
f684960e
CW
5049intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5050{
53b41837
YN
5051 struct intel_connector *intel_connector = to_intel_connector(connector);
5052
3f43c48d 5053 intel_attach_force_audio_property(connector);
e953fd7b 5054 intel_attach_broadcast_rgb_property(connector);
55bc60db 5055 intel_dp->color_range_auto = true;
53b41837
YN
5056
5057 if (is_edp(intel_dp)) {
5058 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5059 drm_object_attach_property(
5060 &connector->base,
53b41837 5061 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5062 DRM_MODE_SCALE_ASPECT);
5063 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5064 }
f684960e
CW
5065}
5066
dada1a9f
ID
5067static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5068{
5069 intel_dp->last_power_cycle = jiffies;
5070 intel_dp->last_power_on = jiffies;
5071 intel_dp->last_backlight_off = jiffies;
5072}
5073
67a54566
DV
5074static void
5075intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5076 struct intel_dp *intel_dp)
67a54566
DV
5077{
5078 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5079 struct edp_power_seq cur, vbt, spec,
5080 *final = &intel_dp->pps_delays;
b0a08bec
VK
5081 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5082 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
453c5420 5083
e39b999a
VS
5084 lockdep_assert_held(&dev_priv->pps_mutex);
5085
81ddbc69
VS
5086 /* already initialized? */
5087 if (final->t11_t12 != 0)
5088 return;
5089
b0a08bec
VK
5090 if (IS_BROXTON(dev)) {
5091 /*
5092 * TODO: BXT has 2 sets of PPS registers.
5093 * Correct Register for Broxton need to be identified
5094 * using VBT. hardcoding for now
5095 */
5096 pp_ctrl_reg = BXT_PP_CONTROL(0);
5097 pp_on_reg = BXT_PP_ON_DELAYS(0);
5098 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5099 } else if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5100 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5101 pp_on_reg = PCH_PP_ON_DELAYS;
5102 pp_off_reg = PCH_PP_OFF_DELAYS;
5103 pp_div_reg = PCH_PP_DIVISOR;
5104 } else {
bf13e81b
JN
5105 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5106
5107 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5108 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5109 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5110 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5111 }
67a54566
DV
5112
5113 /* Workaround: Need to write PP_CONTROL with the unlock key as
5114 * the very first thing. */
b0a08bec 5115 pp_ctl = ironlake_get_pp_control(intel_dp);
67a54566 5116
453c5420
JB
5117 pp_on = I915_READ(pp_on_reg);
5118 pp_off = I915_READ(pp_off_reg);
b0a08bec
VK
5119 if (!IS_BROXTON(dev)) {
5120 I915_WRITE(pp_ctrl_reg, pp_ctl);
5121 pp_div = I915_READ(pp_div_reg);
5122 }
67a54566
DV
5123
5124 /* Pull timing values out of registers */
5125 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5126 PANEL_POWER_UP_DELAY_SHIFT;
5127
5128 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5129 PANEL_LIGHT_ON_DELAY_SHIFT;
5130
5131 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5132 PANEL_LIGHT_OFF_DELAY_SHIFT;
5133
5134 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5135 PANEL_POWER_DOWN_DELAY_SHIFT;
5136
b0a08bec
VK
5137 if (IS_BROXTON(dev)) {
5138 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5139 BXT_POWER_CYCLE_DELAY_SHIFT;
5140 if (tmp > 0)
5141 cur.t11_t12 = (tmp - 1) * 1000;
5142 else
5143 cur.t11_t12 = 0;
5144 } else {
5145 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
67a54566 5146 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
b0a08bec 5147 }
67a54566
DV
5148
5149 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5150 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5151
41aa3448 5152 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5153
5154 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5155 * our hw here, which are all in 100usec. */
5156 spec.t1_t3 = 210 * 10;
5157 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5158 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5159 spec.t10 = 500 * 10;
5160 /* This one is special and actually in units of 100ms, but zero
5161 * based in the hw (so we need to add 100 ms). But the sw vbt
5162 * table multiplies it with 1000 to make it in units of 100usec,
5163 * too. */
5164 spec.t11_t12 = (510 + 100) * 10;
5165
5166 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5167 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5168
5169 /* Use the max of the register settings and vbt. If both are
5170 * unset, fall back to the spec limits. */
36b5f425 5171#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5172 spec.field : \
5173 max(cur.field, vbt.field))
5174 assign_final(t1_t3);
5175 assign_final(t8);
5176 assign_final(t9);
5177 assign_final(t10);
5178 assign_final(t11_t12);
5179#undef assign_final
5180
36b5f425 5181#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5182 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5183 intel_dp->backlight_on_delay = get_delay(t8);
5184 intel_dp->backlight_off_delay = get_delay(t9);
5185 intel_dp->panel_power_down_delay = get_delay(t10);
5186 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5187#undef get_delay
5188
f30d26e4
JN
5189 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5190 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5191 intel_dp->panel_power_cycle_delay);
5192
5193 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5194 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5195}
5196
5197static void
5198intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5199 struct intel_dp *intel_dp)
f30d26e4
JN
5200{
5201 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5202 u32 pp_on, pp_off, pp_div, port_sel = 0;
5203 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
b0a08bec 5204 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
ad933b56 5205 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5206 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5207
e39b999a 5208 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 5209
b0a08bec
VK
5210 if (IS_BROXTON(dev)) {
5211 /*
5212 * TODO: BXT has 2 sets of PPS registers.
5213 * Correct Register for Broxton need to be identified
5214 * using VBT. hardcoding for now
5215 */
5216 pp_ctrl_reg = BXT_PP_CONTROL(0);
5217 pp_on_reg = BXT_PP_ON_DELAYS(0);
5218 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5219
5220 } else if (HAS_PCH_SPLIT(dev)) {
453c5420
JB
5221 pp_on_reg = PCH_PP_ON_DELAYS;
5222 pp_off_reg = PCH_PP_OFF_DELAYS;
5223 pp_div_reg = PCH_PP_DIVISOR;
5224 } else {
bf13e81b
JN
5225 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5226
5227 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5228 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5229 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5230 }
5231
b2f19d1a
PZ
5232 /*
5233 * And finally store the new values in the power sequencer. The
5234 * backlight delays are set to 1 because we do manual waits on them. For
5235 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5236 * we'll end up waiting for the backlight off delay twice: once when we
5237 * do the manual sleep, and once when we disable the panel and wait for
5238 * the PP_STATUS bit to become zero.
5239 */
f30d26e4 5240 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5241 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5242 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5243 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5244 /* Compute the divisor for the pp clock, simply match the Bspec
5245 * formula. */
b0a08bec
VK
5246 if (IS_BROXTON(dev)) {
5247 pp_div = I915_READ(pp_ctrl_reg);
5248 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5249 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5250 << BXT_POWER_CYCLE_DELAY_SHIFT);
5251 } else {
5252 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5253 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5254 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5255 }
67a54566
DV
5256
5257 /* Haswell doesn't have any port selection bits for the panel
5258 * power sequencer any more. */
bc7d38a4 5259 if (IS_VALLEYVIEW(dev)) {
ad933b56 5260 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5261 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5262 if (port == PORT_A)
a24c144c 5263 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5264 else
a24c144c 5265 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5266 }
5267
453c5420
JB
5268 pp_on |= port_sel;
5269
5270 I915_WRITE(pp_on_reg, pp_on);
5271 I915_WRITE(pp_off_reg, pp_off);
b0a08bec
VK
5272 if (IS_BROXTON(dev))
5273 I915_WRITE(pp_ctrl_reg, pp_div);
5274 else
5275 I915_WRITE(pp_div_reg, pp_div);
67a54566 5276
67a54566 5277 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5278 I915_READ(pp_on_reg),
5279 I915_READ(pp_off_reg),
b0a08bec
VK
5280 IS_BROXTON(dev) ?
5281 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
453c5420 5282 I915_READ(pp_div_reg));
f684960e
CW
5283}
5284
b33a2815
VK
5285/**
5286 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5287 * @dev: DRM device
5288 * @refresh_rate: RR to be programmed
5289 *
5290 * This function gets called when refresh rate (RR) has to be changed from
5291 * one frequency to another. Switches can be between high and low RR
5292 * supported by the panel or to any other RR based on media playback (in
5293 * this case, RR value needs to be passed from user space).
5294 *
5295 * The caller of this function needs to take a lock on dev_priv->drrs.
5296 */
96178eeb 5297static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5298{
5299 struct drm_i915_private *dev_priv = dev->dev_private;
5300 struct intel_encoder *encoder;
96178eeb
VK
5301 struct intel_digital_port *dig_port = NULL;
5302 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5303 struct intel_crtc_state *config = NULL;
439d7ac0 5304 struct intel_crtc *intel_crtc = NULL;
439d7ac0 5305 u32 reg, val;
96178eeb 5306 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5307
5308 if (refresh_rate <= 0) {
5309 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5310 return;
5311 }
5312
96178eeb
VK
5313 if (intel_dp == NULL) {
5314 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5315 return;
5316 }
5317
1fcc9d1c 5318 /*
e4d59f6b
RV
5319 * FIXME: This needs proper synchronization with psr state for some
5320 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5321 */
439d7ac0 5322
96178eeb
VK
5323 dig_port = dp_to_dig_port(intel_dp);
5324 encoder = &dig_port->base;
723f9aab 5325 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5326
5327 if (!intel_crtc) {
5328 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5329 return;
5330 }
5331
6e3c9717 5332 config = intel_crtc->config;
439d7ac0 5333
96178eeb 5334 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5335 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5336 return;
5337 }
5338
96178eeb
VK
5339 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5340 refresh_rate)
439d7ac0
PB
5341 index = DRRS_LOW_RR;
5342
96178eeb 5343 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5344 DRM_DEBUG_KMS(
5345 "DRRS requested for previously set RR...ignoring\n");
5346 return;
5347 }
5348
5349 if (!intel_crtc->active) {
5350 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5351 return;
5352 }
5353
44395bfe 5354 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5355 switch (index) {
5356 case DRRS_HIGH_RR:
5357 intel_dp_set_m_n(intel_crtc, M1_N1);
5358 break;
5359 case DRRS_LOW_RR:
5360 intel_dp_set_m_n(intel_crtc, M2_N2);
5361 break;
5362 case DRRS_MAX_RR:
5363 default:
5364 DRM_ERROR("Unsupported refreshrate type\n");
5365 }
5366 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 5367 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 5368 val = I915_READ(reg);
a4c30b1d 5369
439d7ac0 5370 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5371 if (IS_VALLEYVIEW(dev))
5372 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5373 else
5374 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5375 } else {
6fa7aec1
VK
5376 if (IS_VALLEYVIEW(dev))
5377 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5378 else
5379 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5380 }
5381 I915_WRITE(reg, val);
5382 }
5383
4e9ac947
VK
5384 dev_priv->drrs.refresh_rate_type = index;
5385
5386 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5387}
5388
b33a2815
VK
5389/**
5390 * intel_edp_drrs_enable - init drrs struct if supported
5391 * @intel_dp: DP struct
5392 *
5393 * Initializes frontbuffer_bits and drrs.dp
5394 */
c395578e
VK
5395void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5396{
5397 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5398 struct drm_i915_private *dev_priv = dev->dev_private;
5399 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5400 struct drm_crtc *crtc = dig_port->base.base.crtc;
5401 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5402
5403 if (!intel_crtc->config->has_drrs) {
5404 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5405 return;
5406 }
5407
5408 mutex_lock(&dev_priv->drrs.mutex);
5409 if (WARN_ON(dev_priv->drrs.dp)) {
5410 DRM_ERROR("DRRS already enabled\n");
5411 goto unlock;
5412 }
5413
5414 dev_priv->drrs.busy_frontbuffer_bits = 0;
5415
5416 dev_priv->drrs.dp = intel_dp;
5417
5418unlock:
5419 mutex_unlock(&dev_priv->drrs.mutex);
5420}
5421
b33a2815
VK
5422/**
5423 * intel_edp_drrs_disable - Disable DRRS
5424 * @intel_dp: DP struct
5425 *
5426 */
c395578e
VK
5427void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5428{
5429 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5430 struct drm_i915_private *dev_priv = dev->dev_private;
5431 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5432 struct drm_crtc *crtc = dig_port->base.base.crtc;
5433 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5434
5435 if (!intel_crtc->config->has_drrs)
5436 return;
5437
5438 mutex_lock(&dev_priv->drrs.mutex);
5439 if (!dev_priv->drrs.dp) {
5440 mutex_unlock(&dev_priv->drrs.mutex);
5441 return;
5442 }
5443
5444 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5445 intel_dp_set_drrs_state(dev_priv->dev,
5446 intel_dp->attached_connector->panel.
5447 fixed_mode->vrefresh);
5448
5449 dev_priv->drrs.dp = NULL;
5450 mutex_unlock(&dev_priv->drrs.mutex);
5451
5452 cancel_delayed_work_sync(&dev_priv->drrs.work);
5453}
5454
4e9ac947
VK
5455static void intel_edp_drrs_downclock_work(struct work_struct *work)
5456{
5457 struct drm_i915_private *dev_priv =
5458 container_of(work, typeof(*dev_priv), drrs.work.work);
5459 struct intel_dp *intel_dp;
5460
5461 mutex_lock(&dev_priv->drrs.mutex);
5462
5463 intel_dp = dev_priv->drrs.dp;
5464
5465 if (!intel_dp)
5466 goto unlock;
5467
439d7ac0 5468 /*
4e9ac947
VK
5469 * The delayed work can race with an invalidate hence we need to
5470 * recheck.
439d7ac0
PB
5471 */
5472
4e9ac947
VK
5473 if (dev_priv->drrs.busy_frontbuffer_bits)
5474 goto unlock;
439d7ac0 5475
4e9ac947
VK
5476 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5477 intel_dp_set_drrs_state(dev_priv->dev,
5478 intel_dp->attached_connector->panel.
5479 downclock_mode->vrefresh);
439d7ac0 5480
4e9ac947 5481unlock:
4e9ac947 5482 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5483}
5484
b33a2815 5485/**
0ddfd203 5486 * intel_edp_drrs_invalidate - Disable Idleness DRRS
b33a2815
VK
5487 * @dev: DRM device
5488 * @frontbuffer_bits: frontbuffer plane tracking bits
5489 *
0ddfd203
R
5490 * This function gets called everytime rendering on the given planes start.
5491 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
5492 *
5493 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5494 */
a93fad0f
VK
5495void intel_edp_drrs_invalidate(struct drm_device *dev,
5496 unsigned frontbuffer_bits)
5497{
5498 struct drm_i915_private *dev_priv = dev->dev_private;
5499 struct drm_crtc *crtc;
5500 enum pipe pipe;
5501
9da7d693 5502 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5503 return;
5504
88f933a8 5505 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5506
a93fad0f 5507 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5508 if (!dev_priv->drrs.dp) {
5509 mutex_unlock(&dev_priv->drrs.mutex);
5510 return;
5511 }
5512
a93fad0f
VK
5513 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5514 pipe = to_intel_crtc(crtc)->pipe;
5515
c1d038c6
DV
5516 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5517 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5518
0ddfd203 5519 /* invalidate means busy screen hence upclock */
c1d038c6 5520 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
a93fad0f
VK
5521 intel_dp_set_drrs_state(dev_priv->dev,
5522 dev_priv->drrs.dp->attached_connector->panel.
5523 fixed_mode->vrefresh);
a93fad0f 5524
a93fad0f
VK
5525 mutex_unlock(&dev_priv->drrs.mutex);
5526}
5527
b33a2815 5528/**
0ddfd203 5529 * intel_edp_drrs_flush - Restart Idleness DRRS
b33a2815
VK
5530 * @dev: DRM device
5531 * @frontbuffer_bits: frontbuffer plane tracking bits
5532 *
0ddfd203
R
5533 * This function gets called every time rendering on the given planes has
5534 * completed or flip on a crtc is completed. So DRRS should be upclocked
5535 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5536 * if no other planes are dirty.
b33a2815
VK
5537 *
5538 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5539 */
a93fad0f
VK
5540void intel_edp_drrs_flush(struct drm_device *dev,
5541 unsigned frontbuffer_bits)
5542{
5543 struct drm_i915_private *dev_priv = dev->dev_private;
5544 struct drm_crtc *crtc;
5545 enum pipe pipe;
5546
9da7d693 5547 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5548 return;
5549
88f933a8 5550 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5551
a93fad0f 5552 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5553 if (!dev_priv->drrs.dp) {
5554 mutex_unlock(&dev_priv->drrs.mutex);
5555 return;
5556 }
5557
a93fad0f
VK
5558 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5559 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
5560
5561 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
5562 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5563
0ddfd203 5564 /* flush means busy screen hence upclock */
c1d038c6 5565 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
0ddfd203
R
5566 intel_dp_set_drrs_state(dev_priv->dev,
5567 dev_priv->drrs.dp->attached_connector->panel.
5568 fixed_mode->vrefresh);
5569
5570 /*
5571 * flush also means no more activity hence schedule downclock, if all
5572 * other fbs are quiescent too
5573 */
5574 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
5575 schedule_delayed_work(&dev_priv->drrs.work,
5576 msecs_to_jiffies(1000));
5577 mutex_unlock(&dev_priv->drrs.mutex);
5578}
5579
b33a2815
VK
5580/**
5581 * DOC: Display Refresh Rate Switching (DRRS)
5582 *
5583 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5584 * which enables swtching between low and high refresh rates,
5585 * dynamically, based on the usage scenario. This feature is applicable
5586 * for internal panels.
5587 *
5588 * Indication that the panel supports DRRS is given by the panel EDID, which
5589 * would list multiple refresh rates for one resolution.
5590 *
5591 * DRRS is of 2 types - static and seamless.
5592 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5593 * (may appear as a blink on screen) and is used in dock-undock scenario.
5594 * Seamless DRRS involves changing RR without any visual effect to the user
5595 * and can be used during normal system usage. This is done by programming
5596 * certain registers.
5597 *
5598 * Support for static/seamless DRRS may be indicated in the VBT based on
5599 * inputs from the panel spec.
5600 *
5601 * DRRS saves power by switching to low RR based on usage scenarios.
5602 *
5603 * eDP DRRS:-
5604 * The implementation is based on frontbuffer tracking implementation.
5605 * When there is a disturbance on the screen triggered by user activity or a
5606 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5607 * When there is no movement on screen, after a timeout of 1 second, a switch
5608 * to low RR is made.
5609 * For integration with frontbuffer tracking code,
5610 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5611 *
5612 * DRRS can be further extended to support other internal panels and also
5613 * the scenario of video playback wherein RR is set based on the rate
5614 * requested by userspace.
5615 */
5616
5617/**
5618 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5619 * @intel_connector: eDP connector
5620 * @fixed_mode: preferred mode of panel
5621 *
5622 * This function is called only once at driver load to initialize basic
5623 * DRRS stuff.
5624 *
5625 * Returns:
5626 * Downclock mode if panel supports it, else return NULL.
5627 * DRRS support is determined by the presence of downclock mode (apart
5628 * from VBT setting).
5629 */
4f9db5b5 5630static struct drm_display_mode *
96178eeb
VK
5631intel_dp_drrs_init(struct intel_connector *intel_connector,
5632 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5633{
5634 struct drm_connector *connector = &intel_connector->base;
96178eeb 5635 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5636 struct drm_i915_private *dev_priv = dev->dev_private;
5637 struct drm_display_mode *downclock_mode = NULL;
5638
9da7d693
DV
5639 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5640 mutex_init(&dev_priv->drrs.mutex);
5641
4f9db5b5
PB
5642 if (INTEL_INFO(dev)->gen <= 6) {
5643 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5644 return NULL;
5645 }
5646
5647 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5648 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5649 return NULL;
5650 }
5651
5652 downclock_mode = intel_find_panel_downclock
5653 (dev, fixed_mode, connector);
5654
5655 if (!downclock_mode) {
a1d26342 5656 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5657 return NULL;
5658 }
5659
96178eeb 5660 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5661
96178eeb 5662 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5663 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5664 return downclock_mode;
5665}
5666
ed92f0b2 5667static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5668 struct intel_connector *intel_connector)
ed92f0b2
PZ
5669{
5670 struct drm_connector *connector = &intel_connector->base;
5671 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5672 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5673 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5674 struct drm_i915_private *dev_priv = dev->dev_private;
5675 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5676 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5677 bool has_dpcd;
5678 struct drm_display_mode *scan;
5679 struct edid *edid;
6517d273 5680 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5681
5682 if (!is_edp(intel_dp))
5683 return true;
5684
49e6bc51
VS
5685 pps_lock(intel_dp);
5686 intel_edp_panel_vdd_sanitize(intel_dp);
5687 pps_unlock(intel_dp);
63635217 5688
ed92f0b2 5689 /* Cache DPCD and EDID for edp. */
ed92f0b2 5690 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5691
5692 if (has_dpcd) {
5693 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5694 dev_priv->no_aux_handshake =
5695 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5696 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5697 } else {
5698 /* if this fails, presume the device is a ghost */
5699 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5700 return false;
5701 }
5702
5703 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5704 pps_lock(intel_dp);
36b5f425 5705 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5706 pps_unlock(intel_dp);
ed92f0b2 5707
060c8778 5708 mutex_lock(&dev->mode_config.mutex);
0b99836f 5709 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5710 if (edid) {
5711 if (drm_add_edid_modes(connector, edid)) {
5712 drm_mode_connector_update_edid_property(connector,
5713 edid);
5714 drm_edid_to_eld(connector, edid);
5715 } else {
5716 kfree(edid);
5717 edid = ERR_PTR(-EINVAL);
5718 }
5719 } else {
5720 edid = ERR_PTR(-ENOENT);
5721 }
5722 intel_connector->edid = edid;
5723
5724 /* prefer fixed mode from EDID if available */
5725 list_for_each_entry(scan, &connector->probed_modes, head) {
5726 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5727 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5728 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5729 intel_connector, fixed_mode);
ed92f0b2
PZ
5730 break;
5731 }
5732 }
5733
5734 /* fallback to VBT if available for eDP */
5735 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5736 fixed_mode = drm_mode_duplicate(dev,
5737 dev_priv->vbt.lfp_lvds_vbt_mode);
5738 if (fixed_mode)
5739 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5740 }
060c8778 5741 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5742
01527b31
CT
5743 if (IS_VALLEYVIEW(dev)) {
5744 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5745 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5746
5747 /*
5748 * Figure out the current pipe for the initial backlight setup.
5749 * If the current pipe isn't valid, try the PPS pipe, and if that
5750 * fails just assume pipe A.
5751 */
5752 if (IS_CHERRYVIEW(dev))
5753 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5754 else
5755 pipe = PORT_TO_PIPE(intel_dp->DP);
5756
5757 if (pipe != PIPE_A && pipe != PIPE_B)
5758 pipe = intel_dp->pps_pipe;
5759
5760 if (pipe != PIPE_A && pipe != PIPE_B)
5761 pipe = PIPE_A;
5762
5763 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5764 pipe_name(pipe));
01527b31
CT
5765 }
5766
4f9db5b5 5767 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5768 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5769 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5770
5771 return true;
5772}
5773
16c25533 5774bool
f0fec3f2
PZ
5775intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5776 struct intel_connector *intel_connector)
a4fc5ed6 5777{
f0fec3f2
PZ
5778 struct drm_connector *connector = &intel_connector->base;
5779 struct intel_dp *intel_dp = &intel_dig_port->dp;
5780 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5781 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5782 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5783 enum port port = intel_dig_port->port;
0b99836f 5784 int type;
a4fc5ed6 5785
a4a5d2f8
VS
5786 intel_dp->pps_pipe = INVALID_PIPE;
5787
ec5b01dd 5788 /* intel_dp vfuncs */
b6b5e383
DL
5789 if (INTEL_INFO(dev)->gen >= 9)
5790 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5791 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5792 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5793 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5794 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5795 else if (HAS_PCH_SPLIT(dev))
5796 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5797 else
5798 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5799
b9ca5fad
DL
5800 if (INTEL_INFO(dev)->gen >= 9)
5801 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5802 else
5803 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5804
0767935e
DV
5805 /* Preserve the current hw state. */
5806 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5807 intel_dp->attached_connector = intel_connector;
3d3dc149 5808
3b32a35b 5809 if (intel_dp_is_edp(dev, port))
b329530c 5810 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5811 else
5812 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5813
f7d24902
ID
5814 /*
5815 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5816 * for DP the encoder type can be set by the caller to
5817 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5818 */
5819 if (type == DRM_MODE_CONNECTOR_eDP)
5820 intel_encoder->type = INTEL_OUTPUT_EDP;
5821
c17ed5b5
VS
5822 /* eDP only on port B and/or C on vlv/chv */
5823 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5824 port != PORT_B && port != PORT_C))
5825 return false;
5826
e7281eab
ID
5827 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5828 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5829 port_name(port));
5830
b329530c 5831 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5832 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5833
a4fc5ed6
KP
5834 connector->interlace_allowed = true;
5835 connector->doublescan_allowed = 0;
5836
f0fec3f2 5837 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5838 edp_panel_vdd_work);
a4fc5ed6 5839
df0e9248 5840 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5841 drm_connector_register(connector);
a4fc5ed6 5842
affa9354 5843 if (HAS_DDI(dev))
bcbc889b
PZ
5844 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5845 else
5846 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5847 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5848
0b99836f 5849 /* Set up the hotplug pin. */
ab9d7c30
PZ
5850 switch (port) {
5851 case PORT_A:
1d843f9d 5852 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5853 break;
5854 case PORT_B:
1d843f9d 5855 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5856 break;
5857 case PORT_C:
1d843f9d 5858 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5859 break;
5860 case PORT_D:
1d843f9d 5861 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30 5862 break;
26951caf
XZ
5863 case PORT_E:
5864 intel_encoder->hpd_pin = HPD_PORT_E;
5865 break;
ab9d7c30 5866 default:
ad1c0b19 5867 BUG();
5eb08b69
ZW
5868 }
5869
dada1a9f 5870 if (is_edp(intel_dp)) {
773538e8 5871 pps_lock(intel_dp);
1e74a324
VS
5872 intel_dp_init_panel_power_timestamps(intel_dp);
5873 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5874 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5875 else
36b5f425 5876 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5877 pps_unlock(intel_dp);
dada1a9f 5878 }
0095e6dc 5879
9d1a1031 5880 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5881
0e32b39c 5882 /* init MST on ports that can support it */
0c9b3715
JN
5883 if (HAS_DP_MST(dev) &&
5884 (port == PORT_B || port == PORT_C || port == PORT_D))
5885 intel_dp_mst_encoder_init(intel_dig_port,
5886 intel_connector->base.base.id);
0e32b39c 5887
36b5f425 5888 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5889 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5890 if (is_edp(intel_dp)) {
5891 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5892 /*
5893 * vdd might still be enabled do to the delayed vdd off.
5894 * Make sure vdd is actually turned off here.
5895 */
773538e8 5896 pps_lock(intel_dp);
4be73780 5897 edp_panel_vdd_off_sync(intel_dp);
773538e8 5898 pps_unlock(intel_dp);
15b1d171 5899 }
34ea3d38 5900 drm_connector_unregister(connector);
b2f246a8 5901 drm_connector_cleanup(connector);
16c25533 5902 return false;
b2f246a8 5903 }
32f9d658 5904
f684960e
CW
5905 intel_dp_add_properties(intel_dp, connector);
5906
a4fc5ed6
KP
5907 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5908 * 0xd. Failure to do so will result in spurious interrupts being
5909 * generated on the port when a cable is not attached.
5910 */
5911 if (IS_G4X(dev) && !IS_GM45(dev)) {
5912 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5913 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5914 }
16c25533 5915
aa7471d2
JN
5916 i915_debugfs_connector_add(connector);
5917
16c25533 5918 return true;
a4fc5ed6 5919}
f0fec3f2
PZ
5920
5921void
5922intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5923{
13cf5504 5924 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5925 struct intel_digital_port *intel_dig_port;
5926 struct intel_encoder *intel_encoder;
5927 struct drm_encoder *encoder;
5928 struct intel_connector *intel_connector;
5929
b14c5679 5930 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5931 if (!intel_dig_port)
5932 return;
5933
08d9bc92 5934 intel_connector = intel_connector_alloc();
f0fec3f2
PZ
5935 if (!intel_connector) {
5936 kfree(intel_dig_port);
5937 return;
5938 }
5939
5940 intel_encoder = &intel_dig_port->base;
5941 encoder = &intel_encoder->base;
5942
5943 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5944 DRM_MODE_ENCODER_TMDS);
5945
5bfe2ac0 5946 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5947 intel_encoder->disable = intel_disable_dp;
00c09d70 5948 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5949 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5950 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5951 if (IS_CHERRYVIEW(dev)) {
9197c88b 5952 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5953 intel_encoder->pre_enable = chv_pre_enable_dp;
5954 intel_encoder->enable = vlv_enable_dp;
580d3811 5955 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5956 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5957 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5958 intel_encoder->pre_enable = vlv_pre_enable_dp;
5959 intel_encoder->enable = vlv_enable_dp;
49277c31 5960 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5961 } else {
ecff4f3b
JN
5962 intel_encoder->pre_enable = g4x_pre_enable_dp;
5963 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5964 if (INTEL_INFO(dev)->gen >= 5)
5965 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5966 }
f0fec3f2 5967
174edf1f 5968 intel_dig_port->port = port;
f0fec3f2
PZ
5969 intel_dig_port->dp.output_reg = output_reg;
5970
00c09d70 5971 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5972 if (IS_CHERRYVIEW(dev)) {
5973 if (port == PORT_D)
5974 intel_encoder->crtc_mask = 1 << 2;
5975 else
5976 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5977 } else {
5978 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5979 }
bc079e8b 5980 intel_encoder->cloneable = 0;
f0fec3f2 5981
13cf5504 5982 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5fcece80 5983 dev_priv->hotplug.irq_port[port] = intel_dig_port;
13cf5504 5984
15b1d171
PZ
5985 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5986 drm_encoder_cleanup(encoder);
5987 kfree(intel_dig_port);
b2f246a8 5988 kfree(intel_connector);
15b1d171 5989 }
f0fec3f2 5990}
0e32b39c
DA
5991
5992void intel_dp_mst_suspend(struct drm_device *dev)
5993{
5994 struct drm_i915_private *dev_priv = dev->dev_private;
5995 int i;
5996
5997 /* disable MST */
5998 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 5999 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6000 if (!intel_dig_port)
6001 continue;
6002
6003 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6004 if (!intel_dig_port->dp.can_mst)
6005 continue;
6006 if (intel_dig_port->dp.is_mst)
6007 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6008 }
6009 }
6010}
6011
6012void intel_dp_mst_resume(struct drm_device *dev)
6013{
6014 struct drm_i915_private *dev_priv = dev->dev_private;
6015 int i;
6016
6017 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6018 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6019 if (!intel_dig_port)
6020 continue;
6021 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6022 int ret;
6023
6024 if (!intel_dig_port->dp.can_mst)
6025 continue;
6026
6027 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6028 if (ret != 0) {
6029 intel_dp_check_mst_status(&intel_dig_port->dp);
6030 }
6031 }
6032 }
6033}
This page took 1.00457 seconds and 5 git commands to generate.