drm/i915: Clean up the CPT DP .get_hw_state() port readout
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf
CML
50struct dp_link_dpll {
51 int link_bw;
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
56 { DP_LINK_BW_1_62,
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58 { DP_LINK_BW_2_7,
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
63 { DP_LINK_BW_1_62,
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65 { DP_LINK_BW_2_7,
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5
CML
69static const struct dp_link_dpll vlv_dpll[] = {
70 { DP_LINK_BW_1_62,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
72 { DP_LINK_BW_2_7,
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
86 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63
SJ
93
94static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15 95 324000, 432000, 540000 };
fe51bfb9
VS
96static const int chv_rates[] = { 162000, 202500, 210000, 216000,
97 243000, 270000, 324000, 405000,
98 420000, 432000, 540000 };
f4896f15 99static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 100
cfcb0fc9
JB
101/**
102 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
103 * @intel_dp: DP struct
104 *
105 * If a CPU or PCH DP output is attached to an eDP panel, this function
106 * will return true, and false otherwise.
107 */
108static bool is_edp(struct intel_dp *intel_dp)
109{
da63a9f2
PZ
110 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
111
112 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
113}
114
68b4d824 115static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 116{
68b4d824
ID
117 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
118
119 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
120}
121
df0e9248
CW
122static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
123{
fa90ecef 124 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
125}
126
ea5b213a 127static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 128static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 129static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 130static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
131static void vlv_steal_power_sequencer(struct drm_device *dev,
132 enum pipe pipe);
a4fc5ed6 133
ed4e9c1d
VS
134static int
135intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 136{
7183dc29 137 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
138
139 switch (max_link_bw) {
140 case DP_LINK_BW_1_62:
141 case DP_LINK_BW_2_7:
1db10e28 142 case DP_LINK_BW_5_4:
d4eead50 143 break;
a4fc5ed6 144 default:
d4eead50
ID
145 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
146 max_link_bw);
a4fc5ed6
KP
147 max_link_bw = DP_LINK_BW_1_62;
148 break;
149 }
150 return max_link_bw;
151}
152
eeb6324d
PZ
153static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
154{
155 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
156 struct drm_device *dev = intel_dig_port->base.base.dev;
157 u8 source_max, sink_max;
158
159 source_max = 4;
160 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
161 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
162 source_max = 2;
163
164 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
165
166 return min(source_max, sink_max);
167}
168
cd9dde44
AJ
169/*
170 * The units on the numbers in the next two are... bizarre. Examples will
171 * make it clearer; this one parallels an example in the eDP spec.
172 *
173 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
174 *
175 * 270000 * 1 * 8 / 10 == 216000
176 *
177 * The actual data capacity of that configuration is 2.16Gbit/s, so the
178 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
179 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
180 * 119000. At 18bpp that's 2142000 kilobits per second.
181 *
182 * Thus the strange-looking division by 10 in intel_dp_link_required, to
183 * get the result in decakilobits instead of kilobits.
184 */
185
a4fc5ed6 186static int
c898261c 187intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 188{
cd9dde44 189 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
190}
191
fe27d53e
DA
192static int
193intel_dp_max_data_rate(int max_link_clock, int max_lanes)
194{
195 return (max_link_clock * max_lanes * 8) / 10;
196}
197
c19de8eb 198static enum drm_mode_status
a4fc5ed6
KP
199intel_dp_mode_valid(struct drm_connector *connector,
200 struct drm_display_mode *mode)
201{
df0e9248 202 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
203 struct intel_connector *intel_connector = to_intel_connector(connector);
204 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
205 int target_clock = mode->clock;
206 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 207
dd06f90e
JN
208 if (is_edp(intel_dp) && fixed_mode) {
209 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
210 return MODE_PANEL;
211
dd06f90e 212 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 213 return MODE_PANEL;
03afc4a2
DV
214
215 target_clock = fixed_mode->clock;
7de56f43
ZY
216 }
217
50fec21a 218 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 219 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
220
221 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
222 mode_rate = intel_dp_link_required(target_clock, 18);
223
224 if (mode_rate > max_rate)
c4867936 225 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
226
227 if (mode->clock < 10000)
228 return MODE_CLOCK_LOW;
229
0af78a2b
DV
230 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
231 return MODE_H_ILLEGAL;
232
a4fc5ed6
KP
233 return MODE_OK;
234}
235
a4f1289e 236uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
237{
238 int i;
239 uint32_t v = 0;
240
241 if (src_bytes > 4)
242 src_bytes = 4;
243 for (i = 0; i < src_bytes; i++)
244 v |= ((uint32_t) src[i]) << ((3-i) * 8);
245 return v;
246}
247
c2af70e2 248static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
249{
250 int i;
251 if (dst_bytes > 4)
252 dst_bytes = 4;
253 for (i = 0; i < dst_bytes; i++)
254 dst[i] = src >> ((3-i) * 8);
255}
256
fb0f8fbf
KP
257/* hrawclock is 1/4 the FSB frequency */
258static int
259intel_hrawclk(struct drm_device *dev)
260{
261 struct drm_i915_private *dev_priv = dev->dev_private;
262 uint32_t clkcfg;
263
9473c8f4
VP
264 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
265 if (IS_VALLEYVIEW(dev))
266 return 200;
267
fb0f8fbf
KP
268 clkcfg = I915_READ(CLKCFG);
269 switch (clkcfg & CLKCFG_FSB_MASK) {
270 case CLKCFG_FSB_400:
271 return 100;
272 case CLKCFG_FSB_533:
273 return 133;
274 case CLKCFG_FSB_667:
275 return 166;
276 case CLKCFG_FSB_800:
277 return 200;
278 case CLKCFG_FSB_1067:
279 return 266;
280 case CLKCFG_FSB_1333:
281 return 333;
282 /* these two are just a guess; one of them might be right */
283 case CLKCFG_FSB_1600:
284 case CLKCFG_FSB_1600_ALT:
285 return 400;
286 default:
287 return 133;
288 }
289}
290
bf13e81b
JN
291static void
292intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 293 struct intel_dp *intel_dp);
bf13e81b
JN
294static void
295intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 296 struct intel_dp *intel_dp);
bf13e81b 297
773538e8
VS
298static void pps_lock(struct intel_dp *intel_dp)
299{
300 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
301 struct intel_encoder *encoder = &intel_dig_port->base;
302 struct drm_device *dev = encoder->base.dev;
303 struct drm_i915_private *dev_priv = dev->dev_private;
304 enum intel_display_power_domain power_domain;
305
306 /*
307 * See vlv_power_sequencer_reset() why we need
308 * a power domain reference here.
309 */
310 power_domain = intel_display_port_power_domain(encoder);
311 intel_display_power_get(dev_priv, power_domain);
312
313 mutex_lock(&dev_priv->pps_mutex);
314}
315
316static void pps_unlock(struct intel_dp *intel_dp)
317{
318 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
319 struct intel_encoder *encoder = &intel_dig_port->base;
320 struct drm_device *dev = encoder->base.dev;
321 struct drm_i915_private *dev_priv = dev->dev_private;
322 enum intel_display_power_domain power_domain;
323
324 mutex_unlock(&dev_priv->pps_mutex);
325
326 power_domain = intel_display_port_power_domain(encoder);
327 intel_display_power_put(dev_priv, power_domain);
328}
329
961a0db0
VS
330static void
331vlv_power_sequencer_kick(struct intel_dp *intel_dp)
332{
333 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
334 struct drm_device *dev = intel_dig_port->base.base.dev;
335 struct drm_i915_private *dev_priv = dev->dev_private;
336 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 337 bool pll_enabled;
961a0db0
VS
338 uint32_t DP;
339
340 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
341 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
342 pipe_name(pipe), port_name(intel_dig_port->port)))
343 return;
344
345 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
346 pipe_name(pipe), port_name(intel_dig_port->port));
347
348 /* Preserve the BIOS-computed detected bit. This is
349 * supposed to be read-only.
350 */
351 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
352 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
353 DP |= DP_PORT_WIDTH(1);
354 DP |= DP_LINK_TRAIN_PAT_1;
355
356 if (IS_CHERRYVIEW(dev))
357 DP |= DP_PIPE_SELECT_CHV(pipe);
358 else if (pipe == PIPE_B)
359 DP |= DP_PIPEB_SELECT;
360
d288f65f
VS
361 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
362
363 /*
364 * The DPLL for the pipe must be enabled for this to work.
365 * So enable temporarily it if it's not already enabled.
366 */
367 if (!pll_enabled)
368 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
369 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
370
961a0db0
VS
371 /*
372 * Similar magic as in intel_dp_enable_port().
373 * We _must_ do this port enable + disable trick
374 * to make this power seqeuencer lock onto the port.
375 * Otherwise even VDD force bit won't work.
376 */
377 I915_WRITE(intel_dp->output_reg, DP);
378 POSTING_READ(intel_dp->output_reg);
379
380 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
381 POSTING_READ(intel_dp->output_reg);
382
383 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
384 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
385
386 if (!pll_enabled)
387 vlv_force_pll_off(dev, pipe);
961a0db0
VS
388}
389
bf13e81b
JN
390static enum pipe
391vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
392{
393 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
394 struct drm_device *dev = intel_dig_port->base.base.dev;
395 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
396 struct intel_encoder *encoder;
397 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 398 enum pipe pipe;
bf13e81b 399
e39b999a 400 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 401
a8c3344e
VS
402 /* We should never land here with regular DP ports */
403 WARN_ON(!is_edp(intel_dp));
404
a4a5d2f8
VS
405 if (intel_dp->pps_pipe != INVALID_PIPE)
406 return intel_dp->pps_pipe;
407
408 /*
409 * We don't have power sequencer currently.
410 * Pick one that's not used by other ports.
411 */
412 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
413 base.head) {
414 struct intel_dp *tmp;
415
416 if (encoder->type != INTEL_OUTPUT_EDP)
417 continue;
418
419 tmp = enc_to_intel_dp(&encoder->base);
420
421 if (tmp->pps_pipe != INVALID_PIPE)
422 pipes &= ~(1 << tmp->pps_pipe);
423 }
424
425 /*
426 * Didn't find one. This should not happen since there
427 * are two power sequencers and up to two eDP ports.
428 */
429 if (WARN_ON(pipes == 0))
a8c3344e
VS
430 pipe = PIPE_A;
431 else
432 pipe = ffs(pipes) - 1;
a4a5d2f8 433
a8c3344e
VS
434 vlv_steal_power_sequencer(dev, pipe);
435 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
436
437 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
438 pipe_name(intel_dp->pps_pipe),
439 port_name(intel_dig_port->port));
440
441 /* init power sequencer on this pipe and port */
36b5f425
VS
442 intel_dp_init_panel_power_sequencer(dev, intel_dp);
443 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 444
961a0db0
VS
445 /*
446 * Even vdd force doesn't work until we've made
447 * the power sequencer lock in on the port.
448 */
449 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
450
451 return intel_dp->pps_pipe;
452}
453
6491ab27
VS
454typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
455 enum pipe pipe);
456
457static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
458 enum pipe pipe)
459{
460 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
461}
462
463static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
464 enum pipe pipe)
465{
466 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
467}
468
469static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
470 enum pipe pipe)
471{
472 return true;
473}
bf13e81b 474
a4a5d2f8 475static enum pipe
6491ab27
VS
476vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
477 enum port port,
478 vlv_pipe_check pipe_check)
a4a5d2f8
VS
479{
480 enum pipe pipe;
bf13e81b 481
bf13e81b
JN
482 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
483 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
484 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
485
486 if (port_sel != PANEL_PORT_SELECT_VLV(port))
487 continue;
488
6491ab27
VS
489 if (!pipe_check(dev_priv, pipe))
490 continue;
491
a4a5d2f8 492 return pipe;
bf13e81b
JN
493 }
494
a4a5d2f8
VS
495 return INVALID_PIPE;
496}
497
498static void
499vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
500{
501 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
502 struct drm_device *dev = intel_dig_port->base.base.dev;
503 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
504 enum port port = intel_dig_port->port;
505
506 lockdep_assert_held(&dev_priv->pps_mutex);
507
508 /* try to find a pipe with this port selected */
6491ab27
VS
509 /* first pick one where the panel is on */
510 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
511 vlv_pipe_has_pp_on);
512 /* didn't find one? pick one where vdd is on */
513 if (intel_dp->pps_pipe == INVALID_PIPE)
514 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
515 vlv_pipe_has_vdd_on);
516 /* didn't find one? pick one with just the correct port */
517 if (intel_dp->pps_pipe == INVALID_PIPE)
518 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
519 vlv_pipe_any);
a4a5d2f8
VS
520
521 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
522 if (intel_dp->pps_pipe == INVALID_PIPE) {
523 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
524 port_name(port));
525 return;
bf13e81b
JN
526 }
527
a4a5d2f8
VS
528 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
529 port_name(port), pipe_name(intel_dp->pps_pipe));
530
36b5f425
VS
531 intel_dp_init_panel_power_sequencer(dev, intel_dp);
532 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
533}
534
773538e8
VS
535void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
536{
537 struct drm_device *dev = dev_priv->dev;
538 struct intel_encoder *encoder;
539
540 if (WARN_ON(!IS_VALLEYVIEW(dev)))
541 return;
542
543 /*
544 * We can't grab pps_mutex here due to deadlock with power_domain
545 * mutex when power_domain functions are called while holding pps_mutex.
546 * That also means that in order to use pps_pipe the code needs to
547 * hold both a power domain reference and pps_mutex, and the power domain
548 * reference get/put must be done while _not_ holding pps_mutex.
549 * pps_{lock,unlock}() do these steps in the correct order, so one
550 * should use them always.
551 */
552
553 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
554 struct intel_dp *intel_dp;
555
556 if (encoder->type != INTEL_OUTPUT_EDP)
557 continue;
558
559 intel_dp = enc_to_intel_dp(&encoder->base);
560 intel_dp->pps_pipe = INVALID_PIPE;
561 }
bf13e81b
JN
562}
563
564static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
565{
566 struct drm_device *dev = intel_dp_to_dev(intel_dp);
567
568 if (HAS_PCH_SPLIT(dev))
569 return PCH_PP_CONTROL;
570 else
571 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
572}
573
574static u32 _pp_stat_reg(struct intel_dp *intel_dp)
575{
576 struct drm_device *dev = intel_dp_to_dev(intel_dp);
577
578 if (HAS_PCH_SPLIT(dev))
579 return PCH_PP_STATUS;
580 else
581 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
582}
583
01527b31
CT
584/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
585 This function only applicable when panel PM state is not to be tracked */
586static int edp_notify_handler(struct notifier_block *this, unsigned long code,
587 void *unused)
588{
589 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
590 edp_notifier);
591 struct drm_device *dev = intel_dp_to_dev(intel_dp);
592 struct drm_i915_private *dev_priv = dev->dev_private;
593 u32 pp_div;
594 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
595
596 if (!is_edp(intel_dp) || code != SYS_RESTART)
597 return 0;
598
773538e8 599 pps_lock(intel_dp);
e39b999a 600
01527b31 601 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
602 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
603
01527b31
CT
604 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
605 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
606 pp_div = I915_READ(pp_div_reg);
607 pp_div &= PP_REFERENCE_DIVIDER_MASK;
608
609 /* 0x1F write to PP_DIV_REG sets max cycle delay */
610 I915_WRITE(pp_div_reg, pp_div | 0x1F);
611 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
612 msleep(intel_dp->panel_power_cycle_delay);
613 }
614
773538e8 615 pps_unlock(intel_dp);
e39b999a 616
01527b31
CT
617 return 0;
618}
619
4be73780 620static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 621{
30add22d 622 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
623 struct drm_i915_private *dev_priv = dev->dev_private;
624
e39b999a
VS
625 lockdep_assert_held(&dev_priv->pps_mutex);
626
9a42356b
VS
627 if (IS_VALLEYVIEW(dev) &&
628 intel_dp->pps_pipe == INVALID_PIPE)
629 return false;
630
bf13e81b 631 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
632}
633
4be73780 634static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 635{
30add22d 636 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
637 struct drm_i915_private *dev_priv = dev->dev_private;
638
e39b999a
VS
639 lockdep_assert_held(&dev_priv->pps_mutex);
640
9a42356b
VS
641 if (IS_VALLEYVIEW(dev) &&
642 intel_dp->pps_pipe == INVALID_PIPE)
643 return false;
644
773538e8 645 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
646}
647
9b984dae
KP
648static void
649intel_dp_check_edp(struct intel_dp *intel_dp)
650{
30add22d 651 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 652 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 653
9b984dae
KP
654 if (!is_edp(intel_dp))
655 return;
453c5420 656
4be73780 657 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
658 WARN(1, "eDP powered off while attempting aux channel communication.\n");
659 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
660 I915_READ(_pp_stat_reg(intel_dp)),
661 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
662 }
663}
664
9ee32fea
DV
665static uint32_t
666intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
667{
668 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
669 struct drm_device *dev = intel_dig_port->base.base.dev;
670 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 671 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
672 uint32_t status;
673 bool done;
674
ef04f00d 675#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 676 if (has_aux_irq)
b18ac466 677 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 678 msecs_to_jiffies_timeout(10));
9ee32fea
DV
679 else
680 done = wait_for_atomic(C, 10) == 0;
681 if (!done)
682 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
683 has_aux_irq);
684#undef C
685
686 return status;
687}
688
ec5b01dd 689static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 690{
174edf1f
PZ
691 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
692 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 693
ec5b01dd
DL
694 /*
695 * The clock divider is based off the hrawclk, and would like to run at
696 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 697 */
ec5b01dd
DL
698 return index ? 0 : intel_hrawclk(dev) / 2;
699}
700
701static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
702{
703 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
704 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 705 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
706
707 if (index)
708 return 0;
709
710 if (intel_dig_port->port == PORT_A) {
469d4b2a 711 return DIV_ROUND_UP(dev_priv->display.get_display_clock_speed(dev), 2000);
ec5b01dd
DL
712 } else {
713 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
714 }
715}
716
717static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
718{
719 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
720 struct drm_device *dev = intel_dig_port->base.base.dev;
721 struct drm_i915_private *dev_priv = dev->dev_private;
722
723 if (intel_dig_port->port == PORT_A) {
724 if (index)
725 return 0;
1652d19e 726 return DIV_ROUND_CLOSEST(dev_priv->display.get_display_clock_speed(dev), 2000);
2c55c336
JN
727 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
728 /* Workaround for non-ULT HSW */
bc86625a
CW
729 switch (index) {
730 case 0: return 63;
731 case 1: return 72;
732 default: return 0;
733 }
ec5b01dd 734 } else {
bc86625a 735 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 736 }
b84a1cf8
RV
737}
738
ec5b01dd
DL
739static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
740{
741 return index ? 0 : 100;
742}
743
b6b5e383
DL
744static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
745{
746 /*
747 * SKL doesn't need us to program the AUX clock divider (Hardware will
748 * derive the clock from CDCLK automatically). We still implement the
749 * get_aux_clock_divider vfunc to plug-in into the existing code.
750 */
751 return index ? 0 : 1;
752}
753
5ed12a19
DL
754static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
755 bool has_aux_irq,
756 int send_bytes,
757 uint32_t aux_clock_divider)
758{
759 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
760 struct drm_device *dev = intel_dig_port->base.base.dev;
761 uint32_t precharge, timeout;
762
763 if (IS_GEN6(dev))
764 precharge = 3;
765 else
766 precharge = 5;
767
768 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
769 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
770 else
771 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
772
773 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 774 DP_AUX_CH_CTL_DONE |
5ed12a19 775 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 776 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 777 timeout |
788d4433 778 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
779 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
780 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 781 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
782}
783
b9ca5fad
DL
784static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
785 bool has_aux_irq,
786 int send_bytes,
787 uint32_t unused)
788{
789 return DP_AUX_CH_CTL_SEND_BUSY |
790 DP_AUX_CH_CTL_DONE |
791 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
792 DP_AUX_CH_CTL_TIME_OUT_ERROR |
793 DP_AUX_CH_CTL_TIME_OUT_1600us |
794 DP_AUX_CH_CTL_RECEIVE_ERROR |
795 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
796 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
797}
798
b84a1cf8
RV
799static int
800intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 801 const uint8_t *send, int send_bytes,
b84a1cf8
RV
802 uint8_t *recv, int recv_size)
803{
804 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
805 struct drm_device *dev = intel_dig_port->base.base.dev;
806 struct drm_i915_private *dev_priv = dev->dev_private;
807 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
808 uint32_t ch_data = ch_ctl + 4;
bc86625a 809 uint32_t aux_clock_divider;
b84a1cf8
RV
810 int i, ret, recv_bytes;
811 uint32_t status;
5ed12a19 812 int try, clock = 0;
4e6b788c 813 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
814 bool vdd;
815
773538e8 816 pps_lock(intel_dp);
e39b999a 817
72c3500a
VS
818 /*
819 * We will be called with VDD already enabled for dpcd/edid/oui reads.
820 * In such cases we want to leave VDD enabled and it's up to upper layers
821 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
822 * ourselves.
823 */
1e0560e0 824 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
825
826 /* dp aux is extremely sensitive to irq latency, hence request the
827 * lowest possible wakeup latency and so prevent the cpu from going into
828 * deep sleep states.
829 */
830 pm_qos_update_request(&dev_priv->pm_qos, 0);
831
832 intel_dp_check_edp(intel_dp);
5eb08b69 833
c67a470b
PZ
834 intel_aux_display_runtime_get(dev_priv);
835
11bee43e
JB
836 /* Try to wait for any previous AUX channel activity */
837 for (try = 0; try < 3; try++) {
ef04f00d 838 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
839 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
840 break;
841 msleep(1);
842 }
843
844 if (try == 3) {
845 WARN(1, "dp_aux_ch not started status 0x%08x\n",
846 I915_READ(ch_ctl));
9ee32fea
DV
847 ret = -EBUSY;
848 goto out;
4f7f7b7e
CW
849 }
850
46a5ae9f
PZ
851 /* Only 5 data registers! */
852 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
853 ret = -E2BIG;
854 goto out;
855 }
856
ec5b01dd 857 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
858 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
859 has_aux_irq,
860 send_bytes,
861 aux_clock_divider);
5ed12a19 862
bc86625a
CW
863 /* Must try at least 3 times according to DP spec */
864 for (try = 0; try < 5; try++) {
865 /* Load the send data into the aux channel data registers */
866 for (i = 0; i < send_bytes; i += 4)
867 I915_WRITE(ch_data + i,
a4f1289e
RV
868 intel_dp_pack_aux(send + i,
869 send_bytes - i));
bc86625a
CW
870
871 /* Send the command and wait for it to complete */
5ed12a19 872 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
873
874 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
875
876 /* Clear done status and any errors */
877 I915_WRITE(ch_ctl,
878 status |
879 DP_AUX_CH_CTL_DONE |
880 DP_AUX_CH_CTL_TIME_OUT_ERROR |
881 DP_AUX_CH_CTL_RECEIVE_ERROR);
882
74ebf294 883 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 884 continue;
74ebf294
TP
885
886 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
887 * 400us delay required for errors and timeouts
888 * Timeout errors from the HW already meet this
889 * requirement so skip to next iteration
890 */
891 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
892 usleep_range(400, 500);
bc86625a 893 continue;
74ebf294 894 }
bc86625a
CW
895 if (status & DP_AUX_CH_CTL_DONE)
896 break;
897 }
4f7f7b7e 898 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
899 break;
900 }
901
a4fc5ed6 902 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 903 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
904 ret = -EBUSY;
905 goto out;
a4fc5ed6
KP
906 }
907
908 /* Check for timeout or receive error.
909 * Timeouts occur when the sink is not connected
910 */
a5b3da54 911 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 912 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
913 ret = -EIO;
914 goto out;
a5b3da54 915 }
1ae8c0a5
KP
916
917 /* Timeouts occur when the device isn't connected, so they're
918 * "normal" -- don't fill the kernel log with these */
a5b3da54 919 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 920 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
921 ret = -ETIMEDOUT;
922 goto out;
a4fc5ed6
KP
923 }
924
925 /* Unload any bytes sent back from the other side */
926 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
927 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
928 if (recv_bytes > recv_size)
929 recv_bytes = recv_size;
0206e353 930
4f7f7b7e 931 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
932 intel_dp_unpack_aux(I915_READ(ch_data + i),
933 recv + i, recv_bytes - i);
a4fc5ed6 934
9ee32fea
DV
935 ret = recv_bytes;
936out:
937 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 938 intel_aux_display_runtime_put(dev_priv);
9ee32fea 939
884f19e9
JN
940 if (vdd)
941 edp_panel_vdd_off(intel_dp, false);
942
773538e8 943 pps_unlock(intel_dp);
e39b999a 944
9ee32fea 945 return ret;
a4fc5ed6
KP
946}
947
a6c8aff0
JN
948#define BARE_ADDRESS_SIZE 3
949#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
950static ssize_t
951intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 952{
9d1a1031
JN
953 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
954 uint8_t txbuf[20], rxbuf[20];
955 size_t txsize, rxsize;
a4fc5ed6 956 int ret;
a4fc5ed6 957
d2d9cbbd
VS
958 txbuf[0] = (msg->request << 4) |
959 ((msg->address >> 16) & 0xf);
960 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
961 txbuf[2] = msg->address & 0xff;
962 txbuf[3] = msg->size - 1;
46a5ae9f 963
9d1a1031
JN
964 switch (msg->request & ~DP_AUX_I2C_MOT) {
965 case DP_AUX_NATIVE_WRITE:
966 case DP_AUX_I2C_WRITE:
a6c8aff0 967 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 968 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 969
9d1a1031
JN
970 if (WARN_ON(txsize > 20))
971 return -E2BIG;
a4fc5ed6 972
9d1a1031 973 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 974
9d1a1031
JN
975 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
976 if (ret > 0) {
977 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 978
a1ddefd8
JN
979 if (ret > 1) {
980 /* Number of bytes written in a short write. */
981 ret = clamp_t(int, rxbuf[1], 0, msg->size);
982 } else {
983 /* Return payload size. */
984 ret = msg->size;
985 }
9d1a1031
JN
986 }
987 break;
46a5ae9f 988
9d1a1031
JN
989 case DP_AUX_NATIVE_READ:
990 case DP_AUX_I2C_READ:
a6c8aff0 991 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 992 rxsize = msg->size + 1;
a4fc5ed6 993
9d1a1031
JN
994 if (WARN_ON(rxsize > 20))
995 return -E2BIG;
a4fc5ed6 996
9d1a1031
JN
997 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
998 if (ret > 0) {
999 msg->reply = rxbuf[0] >> 4;
1000 /*
1001 * Assume happy day, and copy the data. The caller is
1002 * expected to check msg->reply before touching it.
1003 *
1004 * Return payload size.
1005 */
1006 ret--;
1007 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1008 }
9d1a1031
JN
1009 break;
1010
1011 default:
1012 ret = -EINVAL;
1013 break;
a4fc5ed6 1014 }
f51a44b9 1015
9d1a1031 1016 return ret;
a4fc5ed6
KP
1017}
1018
9d1a1031
JN
1019static void
1020intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1021{
1022 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1023 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1024 enum port port = intel_dig_port->port;
0b99836f 1025 const char *name = NULL;
ab2c0672
DA
1026 int ret;
1027
33ad6626
JN
1028 switch (port) {
1029 case PORT_A:
1030 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1031 name = "DPDDC-A";
ab2c0672 1032 break;
33ad6626
JN
1033 case PORT_B:
1034 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1035 name = "DPDDC-B";
ab2c0672 1036 break;
33ad6626
JN
1037 case PORT_C:
1038 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1039 name = "DPDDC-C";
ab2c0672 1040 break;
33ad6626
JN
1041 case PORT_D:
1042 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1043 name = "DPDDC-D";
33ad6626
JN
1044 break;
1045 default:
1046 BUG();
ab2c0672
DA
1047 }
1048
1b1aad75
DL
1049 /*
1050 * The AUX_CTL register is usually DP_CTL + 0x10.
1051 *
1052 * On Haswell and Broadwell though:
1053 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1054 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1055 *
1056 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1057 */
1058 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
33ad6626 1059 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1060
0b99836f 1061 intel_dp->aux.name = name;
9d1a1031
JN
1062 intel_dp->aux.dev = dev->dev;
1063 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1064
0b99836f
JN
1065 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1066 connector->base.kdev->kobj.name);
8316f337 1067
4f71d0cb 1068 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1069 if (ret < 0) {
4f71d0cb 1070 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1071 name, ret);
1072 return;
ab2c0672 1073 }
8a5e6aeb 1074
0b99836f
JN
1075 ret = sysfs_create_link(&connector->base.kdev->kobj,
1076 &intel_dp->aux.ddc.dev.kobj,
1077 intel_dp->aux.ddc.dev.kobj.name);
1078 if (ret < 0) {
1079 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1080 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1081 }
a4fc5ed6
KP
1082}
1083
80f65de3
ID
1084static void
1085intel_dp_connector_unregister(struct intel_connector *intel_connector)
1086{
1087 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1088
0e32b39c
DA
1089 if (!intel_connector->mst_port)
1090 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1091 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1092 intel_connector_unregister(intel_connector);
1093}
1094
5416d871 1095static void
c3346ef6 1096skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1097{
1098 u32 ctrl1;
1099
dd3cd74a
ACO
1100 memset(&pipe_config->dpll_hw_state, 0,
1101 sizeof(pipe_config->dpll_hw_state));
1102
5416d871
DL
1103 pipe_config->ddi_pll_sel = SKL_DPLL0;
1104 pipe_config->dpll_hw_state.cfgcr1 = 0;
1105 pipe_config->dpll_hw_state.cfgcr2 = 0;
1106
1107 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1108 switch (link_clock / 2) {
1109 case 81000:
71cd8423 1110 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1111 SKL_DPLL0);
1112 break;
c3346ef6 1113 case 135000:
71cd8423 1114 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1115 SKL_DPLL0);
1116 break;
c3346ef6 1117 case 270000:
71cd8423 1118 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1119 SKL_DPLL0);
1120 break;
c3346ef6 1121 case 162000:
71cd8423 1122 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1123 SKL_DPLL0);
1124 break;
1125 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1126 results in CDCLK change. Need to handle the change of CDCLK by
1127 disabling pipes and re-enabling them */
1128 case 108000:
71cd8423 1129 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1130 SKL_DPLL0);
1131 break;
1132 case 216000:
71cd8423 1133 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1134 SKL_DPLL0);
1135 break;
1136
5416d871
DL
1137 }
1138 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1139}
1140
0e50338c 1141static void
5cec258b 1142hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c
DV
1143{
1144 switch (link_bw) {
1145 case DP_LINK_BW_1_62:
1146 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1147 break;
1148 case DP_LINK_BW_2_7:
1149 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1150 break;
1151 case DP_LINK_BW_5_4:
1152 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1153 break;
1154 }
1155}
1156
fc0f8e25 1157static int
12f6a2e2 1158intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1159{
94ca719e
VS
1160 if (intel_dp->num_sink_rates) {
1161 *sink_rates = intel_dp->sink_rates;
1162 return intel_dp->num_sink_rates;
fc0f8e25 1163 }
12f6a2e2
VS
1164
1165 *sink_rates = default_rates;
1166
1167 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1168}
1169
a8f3ef61 1170static int
1db10e28 1171intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1172{
637a9c63
SJ
1173 if (IS_SKYLAKE(dev)) {
1174 *source_rates = skl_rates;
1175 return ARRAY_SIZE(skl_rates);
fe51bfb9
VS
1176 } else if (IS_CHERRYVIEW(dev)) {
1177 *source_rates = chv_rates;
1178 return ARRAY_SIZE(chv_rates);
a8f3ef61 1179 }
636280ba
VS
1180
1181 *source_rates = default_rates;
1182
1db10e28
VS
1183 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1184 /* WaDisableHBR2:skl */
1185 return (DP_LINK_BW_2_7 >> 3) + 1;
1186 else if (INTEL_INFO(dev)->gen >= 8 ||
1187 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1188 return (DP_LINK_BW_5_4 >> 3) + 1;
1189 else
1190 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1191}
1192
c6bb3538
DV
1193static void
1194intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1195 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1196{
1197 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1198 const struct dp_link_dpll *divisor = NULL;
1199 int i, count = 0;
c6bb3538
DV
1200
1201 if (IS_G4X(dev)) {
9dd4ffdf
CML
1202 divisor = gen4_dpll;
1203 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1204 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1205 divisor = pch_dpll;
1206 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1207 } else if (IS_CHERRYVIEW(dev)) {
1208 divisor = chv_dpll;
1209 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1210 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1211 divisor = vlv_dpll;
1212 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1213 }
9dd4ffdf
CML
1214
1215 if (divisor && count) {
1216 for (i = 0; i < count; i++) {
1217 if (link_bw == divisor[i].link_bw) {
1218 pipe_config->dpll = divisor[i].dpll;
1219 pipe_config->clock_set = true;
1220 break;
1221 }
1222 }
c6bb3538
DV
1223 }
1224}
1225
2ecae76a
VS
1226static int intersect_rates(const int *source_rates, int source_len,
1227 const int *sink_rates, int sink_len,
94ca719e 1228 int *common_rates)
a8f3ef61
SJ
1229{
1230 int i = 0, j = 0, k = 0;
1231
a8f3ef61
SJ
1232 while (i < source_len && j < sink_len) {
1233 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1234 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1235 return k;
94ca719e 1236 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1237 ++k;
1238 ++i;
1239 ++j;
1240 } else if (source_rates[i] < sink_rates[j]) {
1241 ++i;
1242 } else {
1243 ++j;
1244 }
1245 }
1246 return k;
1247}
1248
94ca719e
VS
1249static int intel_dp_common_rates(struct intel_dp *intel_dp,
1250 int *common_rates)
2ecae76a
VS
1251{
1252 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1253 const int *source_rates, *sink_rates;
1254 int source_len, sink_len;
1255
1256 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1257 source_len = intel_dp_source_rates(dev, &source_rates);
1258
1259 return intersect_rates(source_rates, source_len,
1260 sink_rates, sink_len,
94ca719e 1261 common_rates);
2ecae76a
VS
1262}
1263
0336400e
VS
1264static void snprintf_int_array(char *str, size_t len,
1265 const int *array, int nelem)
1266{
1267 int i;
1268
1269 str[0] = '\0';
1270
1271 for (i = 0; i < nelem; i++) {
b2f505be 1272 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1273 if (r >= len)
1274 return;
1275 str += r;
1276 len -= r;
1277 }
1278}
1279
1280static void intel_dp_print_rates(struct intel_dp *intel_dp)
1281{
1282 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1283 const int *source_rates, *sink_rates;
94ca719e
VS
1284 int source_len, sink_len, common_len;
1285 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1286 char str[128]; /* FIXME: too big for stack? */
1287
1288 if ((drm_debug & DRM_UT_KMS) == 0)
1289 return;
1290
1291 source_len = intel_dp_source_rates(dev, &source_rates);
1292 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1293 DRM_DEBUG_KMS("source rates: %s\n", str);
1294
1295 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1296 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1297 DRM_DEBUG_KMS("sink rates: %s\n", str);
1298
94ca719e
VS
1299 common_len = intel_dp_common_rates(intel_dp, common_rates);
1300 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1301 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1302}
1303
f4896f15 1304static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1305{
1306 int i = 0;
1307
1308 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1309 if (find == rates[i])
1310 break;
1311
1312 return i;
1313}
1314
50fec21a
VS
1315int
1316intel_dp_max_link_rate(struct intel_dp *intel_dp)
1317{
1318 int rates[DP_MAX_SUPPORTED_RATES] = {};
1319 int len;
1320
94ca719e 1321 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1322 if (WARN_ON(len <= 0))
1323 return 162000;
1324
1325 return rates[rate_to_index(0, rates) - 1];
1326}
1327
ed4e9c1d
VS
1328int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1329{
94ca719e 1330 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1331}
1332
00c09d70 1333bool
5bfe2ac0 1334intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1335 struct intel_crtc_state *pipe_config)
a4fc5ed6 1336{
5bfe2ac0 1337 struct drm_device *dev = encoder->base.dev;
36008365 1338 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1339 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1340 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1341 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1342 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1343 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1344 int lane_count, clock;
56071a20 1345 int min_lane_count = 1;
eeb6324d 1346 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1347 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1348 int min_clock = 0;
a8f3ef61 1349 int max_clock;
083f9560 1350 int bpp, mode_rate;
ff9a6750 1351 int link_avail, link_clock;
94ca719e
VS
1352 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1353 int common_len;
a8f3ef61 1354
94ca719e 1355 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1356
1357 /* No common link rates between source and sink */
94ca719e 1358 WARN_ON(common_len <= 0);
a8f3ef61 1359
94ca719e 1360 max_clock = common_len - 1;
a4fc5ed6 1361
bc7d38a4 1362 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1363 pipe_config->has_pch_encoder = true;
1364
03afc4a2 1365 pipe_config->has_dp_encoder = true;
f769cd24 1366 pipe_config->has_drrs = false;
9fcb1704 1367 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1368
dd06f90e
JN
1369 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1370 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1371 adjusted_mode);
a1b2278e
CK
1372
1373 if (INTEL_INFO(dev)->gen >= 9) {
1374 int ret;
1375 ret = skl_update_scaler_users(intel_crtc, pipe_config, NULL, NULL, 0);
1376 if (ret)
1377 return ret;
1378 }
1379
2dd24552
JB
1380 if (!HAS_PCH_SPLIT(dev))
1381 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1382 intel_connector->panel.fitting_mode);
1383 else
b074cec8
JB
1384 intel_pch_panel_fitting(intel_crtc, pipe_config,
1385 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1386 }
1387
cb1793ce 1388 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1389 return false;
1390
083f9560 1391 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1392 "max bw %d pixel clock %iKHz\n",
94ca719e 1393 max_lane_count, common_rates[max_clock],
241bfc38 1394 adjusted_mode->crtc_clock);
083f9560 1395
36008365
DV
1396 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1397 * bpc in between. */
3e7ca985 1398 bpp = pipe_config->pipe_bpp;
56071a20
JN
1399 if (is_edp(intel_dp)) {
1400 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1401 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1402 dev_priv->vbt.edp_bpp);
1403 bpp = dev_priv->vbt.edp_bpp;
1404 }
1405
344c5bbc
JN
1406 /*
1407 * Use the maximum clock and number of lanes the eDP panel
1408 * advertizes being capable of. The panels are generally
1409 * designed to support only a single clock and lane
1410 * configuration, and typically these values correspond to the
1411 * native resolution of the panel.
1412 */
1413 min_lane_count = max_lane_count;
1414 min_clock = max_clock;
7984211e 1415 }
657445fe 1416
36008365 1417 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1418 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1419 bpp);
36008365 1420
c6930992 1421 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1422 for (lane_count = min_lane_count;
1423 lane_count <= max_lane_count;
1424 lane_count <<= 1) {
1425
94ca719e 1426 link_clock = common_rates[clock];
36008365
DV
1427 link_avail = intel_dp_max_data_rate(link_clock,
1428 lane_count);
1429
1430 if (mode_rate <= link_avail) {
1431 goto found;
1432 }
1433 }
1434 }
1435 }
c4867936 1436
36008365 1437 return false;
3685a8f3 1438
36008365 1439found:
55bc60db
VS
1440 if (intel_dp->color_range_auto) {
1441 /*
1442 * See:
1443 * CEA-861-E - 5.1 Default Encoding Parameters
1444 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1445 */
18316c8c 1446 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1447 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1448 else
1449 intel_dp->color_range = 0;
1450 }
1451
3685a8f3 1452 if (intel_dp->color_range)
50f3b016 1453 pipe_config->limited_color_range = true;
a4fc5ed6 1454
36008365 1455 intel_dp->lane_count = lane_count;
a8f3ef61 1456
94ca719e 1457 if (intel_dp->num_sink_rates) {
bc27b7d3 1458 intel_dp->link_bw = 0;
a8f3ef61 1459 intel_dp->rate_select =
94ca719e 1460 intel_dp_rate_select(intel_dp, common_rates[clock]);
bc27b7d3
VS
1461 } else {
1462 intel_dp->link_bw =
94ca719e 1463 drm_dp_link_rate_to_bw_code(common_rates[clock]);
bc27b7d3 1464 intel_dp->rate_select = 0;
a8f3ef61
SJ
1465 }
1466
657445fe 1467 pipe_config->pipe_bpp = bpp;
94ca719e 1468 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1469
36008365
DV
1470 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1471 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1472 pipe_config->port_clock, bpp);
36008365
DV
1473 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1474 mode_rate, link_avail);
a4fc5ed6 1475
03afc4a2 1476 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1477 adjusted_mode->crtc_clock,
1478 pipe_config->port_clock,
03afc4a2 1479 &pipe_config->dp_m_n);
9d1a455b 1480
439d7ac0 1481 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1482 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1483 pipe_config->has_drrs = true;
439d7ac0
PB
1484 intel_link_compute_m_n(bpp, lane_count,
1485 intel_connector->panel.downclock_mode->clock,
1486 pipe_config->port_clock,
1487 &pipe_config->dp_m2_n2);
1488 }
1489
5416d871 1490 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
94ca719e 1491 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
977bb38d
S
1492 else if (IS_BROXTON(dev))
1493 /* handled in ddi */;
5416d871 1494 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1495 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1496 else
1497 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1498
03afc4a2 1499 return true;
a4fc5ed6
KP
1500}
1501
7c62a164 1502static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1503{
7c62a164
DV
1504 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1505 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1506 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1507 struct drm_i915_private *dev_priv = dev->dev_private;
1508 u32 dpa_ctl;
1509
6e3c9717
ACO
1510 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1511 crtc->config->port_clock);
ea9b6006
DV
1512 dpa_ctl = I915_READ(DP_A);
1513 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1514
6e3c9717 1515 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1516 /* For a long time we've carried around a ILK-DevA w/a for the
1517 * 160MHz clock. If we're really unlucky, it's still required.
1518 */
1519 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1520 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1521 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1522 } else {
1523 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1524 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1525 }
1ce17038 1526
ea9b6006
DV
1527 I915_WRITE(DP_A, dpa_ctl);
1528
1529 POSTING_READ(DP_A);
1530 udelay(500);
1531}
1532
8ac33ed3 1533static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1534{
b934223d 1535 struct drm_device *dev = encoder->base.dev;
417e822d 1536 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1537 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1538 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1539 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1540 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1541
417e822d 1542 /*
1a2eb460 1543 * There are four kinds of DP registers:
417e822d
KP
1544 *
1545 * IBX PCH
1a2eb460
KP
1546 * SNB CPU
1547 * IVB CPU
417e822d
KP
1548 * CPT PCH
1549 *
1550 * IBX PCH and CPU are the same for almost everything,
1551 * except that the CPU DP PLL is configured in this
1552 * register
1553 *
1554 * CPT PCH is quite different, having many bits moved
1555 * to the TRANS_DP_CTL register instead. That
1556 * configuration happens (oddly) in ironlake_pch_enable
1557 */
9c9e7927 1558
417e822d
KP
1559 /* Preserve the BIOS-computed detected bit. This is
1560 * supposed to be read-only.
1561 */
1562 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1563
417e822d 1564 /* Handle DP bits in common between all three register formats */
417e822d 1565 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1566 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1567
6e3c9717 1568 if (crtc->config->has_audio)
ea5b213a 1569 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1570
417e822d 1571 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1572
39e5fa88 1573 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1574 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1575 intel_dp->DP |= DP_SYNC_HS_HIGH;
1576 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1577 intel_dp->DP |= DP_SYNC_VS_HIGH;
1578 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1579
6aba5b6c 1580 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1581 intel_dp->DP |= DP_ENHANCED_FRAMING;
1582
7c62a164 1583 intel_dp->DP |= crtc->pipe << 29;
39e5fa88
VS
1584 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1585 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1586 } else {
b2634017 1587 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1588 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1589
1590 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1591 intel_dp->DP |= DP_SYNC_HS_HIGH;
1592 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1593 intel_dp->DP |= DP_SYNC_VS_HIGH;
1594 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1595
6aba5b6c 1596 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1597 intel_dp->DP |= DP_ENHANCED_FRAMING;
1598
39e5fa88 1599 if (IS_CHERRYVIEW(dev))
44f37d1f 1600 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1601 else if (crtc->pipe == PIPE_B)
1602 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1603 }
a4fc5ed6
KP
1604}
1605
ffd6749d
PZ
1606#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1607#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1608
1a5ef5b7
PZ
1609#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1610#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1611
ffd6749d
PZ
1612#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1613#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1614
4be73780 1615static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1616 u32 mask,
1617 u32 value)
bd943159 1618{
30add22d 1619 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1620 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1621 u32 pp_stat_reg, pp_ctrl_reg;
1622
e39b999a
VS
1623 lockdep_assert_held(&dev_priv->pps_mutex);
1624
bf13e81b
JN
1625 pp_stat_reg = _pp_stat_reg(intel_dp);
1626 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1627
99ea7127 1628 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1629 mask, value,
1630 I915_READ(pp_stat_reg),
1631 I915_READ(pp_ctrl_reg));
32ce697c 1632
453c5420 1633 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1634 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1635 I915_READ(pp_stat_reg),
1636 I915_READ(pp_ctrl_reg));
32ce697c 1637 }
54c136d4
CW
1638
1639 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1640}
32ce697c 1641
4be73780 1642static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1643{
1644 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1645 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1646}
1647
4be73780 1648static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1649{
1650 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1651 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1652}
1653
4be73780 1654static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1655{
1656 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1657
1658 /* When we disable the VDD override bit last we have to do the manual
1659 * wait. */
1660 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1661 intel_dp->panel_power_cycle_delay);
1662
4be73780 1663 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1664}
1665
4be73780 1666static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1667{
1668 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1669 intel_dp->backlight_on_delay);
1670}
1671
4be73780 1672static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1673{
1674 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1675 intel_dp->backlight_off_delay);
1676}
99ea7127 1677
832dd3c1
KP
1678/* Read the current pp_control value, unlocking the register if it
1679 * is locked
1680 */
1681
453c5420 1682static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1683{
453c5420
JB
1684 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1685 struct drm_i915_private *dev_priv = dev->dev_private;
1686 u32 control;
832dd3c1 1687
e39b999a
VS
1688 lockdep_assert_held(&dev_priv->pps_mutex);
1689
bf13e81b 1690 control = I915_READ(_pp_ctrl_reg(intel_dp));
832dd3c1
KP
1691 control &= ~PANEL_UNLOCK_MASK;
1692 control |= PANEL_UNLOCK_REGS;
1693 return control;
bd943159
KP
1694}
1695
951468f3
VS
1696/*
1697 * Must be paired with edp_panel_vdd_off().
1698 * Must hold pps_mutex around the whole on/off sequence.
1699 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1700 */
1e0560e0 1701static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1702{
30add22d 1703 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1704 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1705 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1706 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1707 enum intel_display_power_domain power_domain;
5d613501 1708 u32 pp;
453c5420 1709 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1710 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1711
e39b999a
VS
1712 lockdep_assert_held(&dev_priv->pps_mutex);
1713
97af61f5 1714 if (!is_edp(intel_dp))
adddaaf4 1715 return false;
bd943159 1716
2c623c11 1717 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1718 intel_dp->want_panel_vdd = true;
99ea7127 1719
4be73780 1720 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1721 return need_to_disable;
b0665d57 1722
4e6e1a54
ID
1723 power_domain = intel_display_port_power_domain(intel_encoder);
1724 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1725
3936fcf4
VS
1726 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1727 port_name(intel_dig_port->port));
bd943159 1728
4be73780
DV
1729 if (!edp_have_panel_power(intel_dp))
1730 wait_panel_power_cycle(intel_dp);
99ea7127 1731
453c5420 1732 pp = ironlake_get_pp_control(intel_dp);
5d613501 1733 pp |= EDP_FORCE_VDD;
ebf33b18 1734
bf13e81b
JN
1735 pp_stat_reg = _pp_stat_reg(intel_dp);
1736 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1737
1738 I915_WRITE(pp_ctrl_reg, pp);
1739 POSTING_READ(pp_ctrl_reg);
1740 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1741 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1742 /*
1743 * If the panel wasn't on, delay before accessing aux channel
1744 */
4be73780 1745 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1746 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1747 port_name(intel_dig_port->port));
f01eca2e 1748 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1749 }
adddaaf4
JN
1750
1751 return need_to_disable;
1752}
1753
951468f3
VS
1754/*
1755 * Must be paired with intel_edp_panel_vdd_off() or
1756 * intel_edp_panel_off().
1757 * Nested calls to these functions are not allowed since
1758 * we drop the lock. Caller must use some higher level
1759 * locking to prevent nested calls from other threads.
1760 */
b80d6c78 1761void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1762{
c695b6b6 1763 bool vdd;
adddaaf4 1764
c695b6b6
VS
1765 if (!is_edp(intel_dp))
1766 return;
1767
773538e8 1768 pps_lock(intel_dp);
c695b6b6 1769 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1770 pps_unlock(intel_dp);
c695b6b6 1771
e2c719b7 1772 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1773 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1774}
1775
4be73780 1776static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1777{
30add22d 1778 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1779 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1780 struct intel_digital_port *intel_dig_port =
1781 dp_to_dig_port(intel_dp);
1782 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1783 enum intel_display_power_domain power_domain;
5d613501 1784 u32 pp;
453c5420 1785 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1786
e39b999a 1787 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1788
15e899a0 1789 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1790
15e899a0 1791 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1792 return;
b0665d57 1793
3936fcf4
VS
1794 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1795 port_name(intel_dig_port->port));
bd943159 1796
be2c9196
VS
1797 pp = ironlake_get_pp_control(intel_dp);
1798 pp &= ~EDP_FORCE_VDD;
453c5420 1799
be2c9196
VS
1800 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1801 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1802
be2c9196
VS
1803 I915_WRITE(pp_ctrl_reg, pp);
1804 POSTING_READ(pp_ctrl_reg);
90791a5c 1805
be2c9196
VS
1806 /* Make sure sequencer is idle before allowing subsequent activity */
1807 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1808 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1809
be2c9196
VS
1810 if ((pp & POWER_TARGET_ON) == 0)
1811 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1812
be2c9196
VS
1813 power_domain = intel_display_port_power_domain(intel_encoder);
1814 intel_display_power_put(dev_priv, power_domain);
bd943159 1815}
5d613501 1816
4be73780 1817static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1818{
1819 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1820 struct intel_dp, panel_vdd_work);
bd943159 1821
773538e8 1822 pps_lock(intel_dp);
15e899a0
VS
1823 if (!intel_dp->want_panel_vdd)
1824 edp_panel_vdd_off_sync(intel_dp);
773538e8 1825 pps_unlock(intel_dp);
bd943159
KP
1826}
1827
aba86890
ID
1828static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1829{
1830 unsigned long delay;
1831
1832 /*
1833 * Queue the timer to fire a long time from now (relative to the power
1834 * down delay) to keep the panel power up across a sequence of
1835 * operations.
1836 */
1837 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1838 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1839}
1840
951468f3
VS
1841/*
1842 * Must be paired with edp_panel_vdd_on().
1843 * Must hold pps_mutex around the whole on/off sequence.
1844 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1845 */
4be73780 1846static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1847{
e39b999a
VS
1848 struct drm_i915_private *dev_priv =
1849 intel_dp_to_dev(intel_dp)->dev_private;
1850
1851 lockdep_assert_held(&dev_priv->pps_mutex);
1852
97af61f5
KP
1853 if (!is_edp(intel_dp))
1854 return;
5d613501 1855
e2c719b7 1856 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1857 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1858
bd943159
KP
1859 intel_dp->want_panel_vdd = false;
1860
aba86890 1861 if (sync)
4be73780 1862 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1863 else
1864 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1865}
1866
9f0fb5be 1867static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1868{
30add22d 1869 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1870 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1871 u32 pp;
453c5420 1872 u32 pp_ctrl_reg;
9934c132 1873
9f0fb5be
VS
1874 lockdep_assert_held(&dev_priv->pps_mutex);
1875
97af61f5 1876 if (!is_edp(intel_dp))
bd943159 1877 return;
99ea7127 1878
3936fcf4
VS
1879 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1880 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1881
e7a89ace
VS
1882 if (WARN(edp_have_panel_power(intel_dp),
1883 "eDP port %c panel power already on\n",
1884 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1885 return;
9934c132 1886
4be73780 1887 wait_panel_power_cycle(intel_dp);
37c6c9b0 1888
bf13e81b 1889 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1890 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1891 if (IS_GEN5(dev)) {
1892 /* ILK workaround: disable reset around power sequence */
1893 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1894 I915_WRITE(pp_ctrl_reg, pp);
1895 POSTING_READ(pp_ctrl_reg);
05ce1a49 1896 }
37c6c9b0 1897
1c0ae80a 1898 pp |= POWER_TARGET_ON;
99ea7127
KP
1899 if (!IS_GEN5(dev))
1900 pp |= PANEL_POWER_RESET;
1901
453c5420
JB
1902 I915_WRITE(pp_ctrl_reg, pp);
1903 POSTING_READ(pp_ctrl_reg);
9934c132 1904
4be73780 1905 wait_panel_on(intel_dp);
dce56b3c 1906 intel_dp->last_power_on = jiffies;
9934c132 1907
05ce1a49
KP
1908 if (IS_GEN5(dev)) {
1909 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1910 I915_WRITE(pp_ctrl_reg, pp);
1911 POSTING_READ(pp_ctrl_reg);
05ce1a49 1912 }
9f0fb5be 1913}
e39b999a 1914
9f0fb5be
VS
1915void intel_edp_panel_on(struct intel_dp *intel_dp)
1916{
1917 if (!is_edp(intel_dp))
1918 return;
1919
1920 pps_lock(intel_dp);
1921 edp_panel_on(intel_dp);
773538e8 1922 pps_unlock(intel_dp);
9934c132
JB
1923}
1924
9f0fb5be
VS
1925
1926static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1927{
4e6e1a54
ID
1928 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1929 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1930 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1931 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1932 enum intel_display_power_domain power_domain;
99ea7127 1933 u32 pp;
453c5420 1934 u32 pp_ctrl_reg;
9934c132 1935
9f0fb5be
VS
1936 lockdep_assert_held(&dev_priv->pps_mutex);
1937
97af61f5
KP
1938 if (!is_edp(intel_dp))
1939 return;
37c6c9b0 1940
3936fcf4
VS
1941 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1942 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 1943
3936fcf4
VS
1944 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1945 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 1946
453c5420 1947 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1948 /* We need to switch off panel power _and_ force vdd, for otherwise some
1949 * panels get very unhappy and cease to work. */
b3064154
PJ
1950 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1951 EDP_BLC_ENABLE);
453c5420 1952
bf13e81b 1953 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1954
849e39f5
PZ
1955 intel_dp->want_panel_vdd = false;
1956
453c5420
JB
1957 I915_WRITE(pp_ctrl_reg, pp);
1958 POSTING_READ(pp_ctrl_reg);
9934c132 1959
dce56b3c 1960 intel_dp->last_power_cycle = jiffies;
4be73780 1961 wait_panel_off(intel_dp);
849e39f5
PZ
1962
1963 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
1964 power_domain = intel_display_port_power_domain(intel_encoder);
1965 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 1966}
e39b999a 1967
9f0fb5be
VS
1968void intel_edp_panel_off(struct intel_dp *intel_dp)
1969{
1970 if (!is_edp(intel_dp))
1971 return;
e39b999a 1972
9f0fb5be
VS
1973 pps_lock(intel_dp);
1974 edp_panel_off(intel_dp);
773538e8 1975 pps_unlock(intel_dp);
9934c132
JB
1976}
1977
1250d107
JN
1978/* Enable backlight in the panel power control. */
1979static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1980{
da63a9f2
PZ
1981 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1982 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
1983 struct drm_i915_private *dev_priv = dev->dev_private;
1984 u32 pp;
453c5420 1985 u32 pp_ctrl_reg;
32f9d658 1986
01cb9ea6
JB
1987 /*
1988 * If we enable the backlight right away following a panel power
1989 * on, we may see slight flicker as the panel syncs with the eDP
1990 * link. So delay a bit to make sure the image is solid before
1991 * allowing it to appear.
1992 */
4be73780 1993 wait_backlight_on(intel_dp);
e39b999a 1994
773538e8 1995 pps_lock(intel_dp);
e39b999a 1996
453c5420 1997 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1998 pp |= EDP_BLC_ENABLE;
453c5420 1999
bf13e81b 2000 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2001
2002 I915_WRITE(pp_ctrl_reg, pp);
2003 POSTING_READ(pp_ctrl_reg);
e39b999a 2004
773538e8 2005 pps_unlock(intel_dp);
32f9d658
ZW
2006}
2007
1250d107
JN
2008/* Enable backlight PWM and backlight PP control. */
2009void intel_edp_backlight_on(struct intel_dp *intel_dp)
2010{
2011 if (!is_edp(intel_dp))
2012 return;
2013
2014 DRM_DEBUG_KMS("\n");
2015
2016 intel_panel_enable_backlight(intel_dp->attached_connector);
2017 _intel_edp_backlight_on(intel_dp);
2018}
2019
2020/* Disable backlight in the panel power control. */
2021static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2022{
30add22d 2023 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2024 struct drm_i915_private *dev_priv = dev->dev_private;
2025 u32 pp;
453c5420 2026 u32 pp_ctrl_reg;
32f9d658 2027
f01eca2e
KP
2028 if (!is_edp(intel_dp))
2029 return;
2030
773538e8 2031 pps_lock(intel_dp);
e39b999a 2032
453c5420 2033 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2034 pp &= ~EDP_BLC_ENABLE;
453c5420 2035
bf13e81b 2036 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2037
2038 I915_WRITE(pp_ctrl_reg, pp);
2039 POSTING_READ(pp_ctrl_reg);
f7d2323c 2040
773538e8 2041 pps_unlock(intel_dp);
e39b999a
VS
2042
2043 intel_dp->last_backlight_off = jiffies;
f7d2323c 2044 edp_wait_backlight_off(intel_dp);
1250d107 2045}
f7d2323c 2046
1250d107
JN
2047/* Disable backlight PP control and backlight PWM. */
2048void intel_edp_backlight_off(struct intel_dp *intel_dp)
2049{
2050 if (!is_edp(intel_dp))
2051 return;
2052
2053 DRM_DEBUG_KMS("\n");
f7d2323c 2054
1250d107 2055 _intel_edp_backlight_off(intel_dp);
f7d2323c 2056 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2057}
a4fc5ed6 2058
73580fb7
JN
2059/*
2060 * Hook for controlling the panel power control backlight through the bl_power
2061 * sysfs attribute. Take care to handle multiple calls.
2062 */
2063static void intel_edp_backlight_power(struct intel_connector *connector,
2064 bool enable)
2065{
2066 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2067 bool is_enabled;
2068
773538e8 2069 pps_lock(intel_dp);
e39b999a 2070 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2071 pps_unlock(intel_dp);
73580fb7
JN
2072
2073 if (is_enabled == enable)
2074 return;
2075
23ba9373
JN
2076 DRM_DEBUG_KMS("panel power control backlight %s\n",
2077 enable ? "enable" : "disable");
73580fb7
JN
2078
2079 if (enable)
2080 _intel_edp_backlight_on(intel_dp);
2081 else
2082 _intel_edp_backlight_off(intel_dp);
2083}
2084
2bd2ad64 2085static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2086{
da63a9f2
PZ
2087 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2088 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2089 struct drm_device *dev = crtc->dev;
d240f20f
JB
2090 struct drm_i915_private *dev_priv = dev->dev_private;
2091 u32 dpa_ctl;
2092
2bd2ad64
DV
2093 assert_pipe_disabled(dev_priv,
2094 to_intel_crtc(crtc)->pipe);
2095
d240f20f
JB
2096 DRM_DEBUG_KMS("\n");
2097 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2098 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2099 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2100
2101 /* We don't adjust intel_dp->DP while tearing down the link, to
2102 * facilitate link retraining (e.g. after hotplug). Hence clear all
2103 * enable bits here to ensure that we don't enable too much. */
2104 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2105 intel_dp->DP |= DP_PLL_ENABLE;
2106 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2107 POSTING_READ(DP_A);
2108 udelay(200);
d240f20f
JB
2109}
2110
2bd2ad64 2111static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2112{
da63a9f2
PZ
2113 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2114 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2115 struct drm_device *dev = crtc->dev;
d240f20f
JB
2116 struct drm_i915_private *dev_priv = dev->dev_private;
2117 u32 dpa_ctl;
2118
2bd2ad64
DV
2119 assert_pipe_disabled(dev_priv,
2120 to_intel_crtc(crtc)->pipe);
2121
d240f20f 2122 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2123 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2124 "dp pll off, should be on\n");
2125 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2126
2127 /* We can't rely on the value tracked for the DP register in
2128 * intel_dp->DP because link_down must not change that (otherwise link
2129 * re-training will fail. */
298b0b39 2130 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2131 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2132 POSTING_READ(DP_A);
d240f20f
JB
2133 udelay(200);
2134}
2135
c7ad3810 2136/* If the sink supports it, try to set the power state appropriately */
c19b0669 2137void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2138{
2139 int ret, i;
2140
2141 /* Should have a valid DPCD by this point */
2142 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2143 return;
2144
2145 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2146 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2147 DP_SET_POWER_D3);
c7ad3810
JB
2148 } else {
2149 /*
2150 * When turning on, we need to retry for 1ms to give the sink
2151 * time to wake up.
2152 */
2153 for (i = 0; i < 3; i++) {
9d1a1031
JN
2154 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2155 DP_SET_POWER_D0);
c7ad3810
JB
2156 if (ret == 1)
2157 break;
2158 msleep(1);
2159 }
2160 }
f9cac721
JN
2161
2162 if (ret != 1)
2163 DRM_DEBUG_KMS("failed to %s sink power state\n",
2164 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2165}
2166
19d8fe15
DV
2167static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2168 enum pipe *pipe)
d240f20f 2169{
19d8fe15 2170 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2171 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2172 struct drm_device *dev = encoder->base.dev;
2173 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2174 enum intel_display_power_domain power_domain;
2175 u32 tmp;
2176
2177 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2178 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2179 return false;
2180
2181 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2182
2183 if (!(tmp & DP_PORT_EN))
2184 return false;
2185
39e5fa88 2186 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2187 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2188 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2189 enum pipe p;
19d8fe15 2190
adc289d7
VS
2191 for_each_pipe(dev_priv, p) {
2192 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2193 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2194 *pipe = p;
19d8fe15
DV
2195 return true;
2196 }
2197 }
19d8fe15 2198
4a0833ec
DV
2199 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2200 intel_dp->output_reg);
39e5fa88
VS
2201 } else if (IS_CHERRYVIEW(dev)) {
2202 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2203 } else {
2204 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2205 }
d240f20f 2206
19d8fe15
DV
2207 return true;
2208}
d240f20f 2209
045ac3b5 2210static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2211 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2212{
2213 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2214 u32 tmp, flags = 0;
63000ef6
XZ
2215 struct drm_device *dev = encoder->base.dev;
2216 struct drm_i915_private *dev_priv = dev->dev_private;
2217 enum port port = dp_to_dig_port(intel_dp)->port;
2218 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2219 int dotclock;
045ac3b5 2220
9ed109a7 2221 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2222
2223 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2224
39e5fa88
VS
2225 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2226 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2227 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2228 flags |= DRM_MODE_FLAG_PHSYNC;
2229 else
2230 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2231
39e5fa88 2232 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2233 flags |= DRM_MODE_FLAG_PVSYNC;
2234 else
2235 flags |= DRM_MODE_FLAG_NVSYNC;
2236 } else {
39e5fa88 2237 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2238 flags |= DRM_MODE_FLAG_PHSYNC;
2239 else
2240 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2241
39e5fa88 2242 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2243 flags |= DRM_MODE_FLAG_PVSYNC;
2244 else
2245 flags |= DRM_MODE_FLAG_NVSYNC;
2246 }
045ac3b5 2247
2d112de7 2248 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2249
8c875fca
VS
2250 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2251 tmp & DP_COLOR_RANGE_16_235)
2252 pipe_config->limited_color_range = true;
2253
eb14cb74
VS
2254 pipe_config->has_dp_encoder = true;
2255
2256 intel_dp_get_m_n(crtc, pipe_config);
2257
18442d08 2258 if (port == PORT_A) {
f1f644dc
JB
2259 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2260 pipe_config->port_clock = 162000;
2261 else
2262 pipe_config->port_clock = 270000;
2263 }
18442d08
VS
2264
2265 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2266 &pipe_config->dp_m_n);
2267
2268 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2269 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2270
2d112de7 2271 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2272
c6cd2ee2
JN
2273 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2274 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2275 /*
2276 * This is a big fat ugly hack.
2277 *
2278 * Some machines in UEFI boot mode provide us a VBT that has 18
2279 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2280 * unknown we fail to light up. Yet the same BIOS boots up with
2281 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2282 * max, not what it tells us to use.
2283 *
2284 * Note: This will still be broken if the eDP panel is not lit
2285 * up by the BIOS, and thus we can't get the mode at module
2286 * load.
2287 */
2288 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2289 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2290 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2291 }
045ac3b5
JB
2292}
2293
e8cb4558 2294static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2295{
e8cb4558 2296 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2297 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2298 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2299
6e3c9717 2300 if (crtc->config->has_audio)
495a5bb8 2301 intel_audio_codec_disable(encoder);
6cb49835 2302
b32c6f48
RV
2303 if (HAS_PSR(dev) && !HAS_DDI(dev))
2304 intel_psr_disable(intel_dp);
2305
6cb49835
DV
2306 /* Make sure the panel is off before trying to change the mode. But also
2307 * ensure that we have vdd while we switch off the panel. */
24f3e092 2308 intel_edp_panel_vdd_on(intel_dp);
4be73780 2309 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2310 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2311 intel_edp_panel_off(intel_dp);
3739850b 2312
08aff3fe
VS
2313 /* disable the port before the pipe on g4x */
2314 if (INTEL_INFO(dev)->gen < 5)
3739850b 2315 intel_dp_link_down(intel_dp);
d240f20f
JB
2316}
2317
08aff3fe 2318static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2319{
2bd2ad64 2320 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2321 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2322
49277c31 2323 intel_dp_link_down(intel_dp);
08aff3fe
VS
2324 if (port == PORT_A)
2325 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2326}
2327
2328static void vlv_post_disable_dp(struct intel_encoder *encoder)
2329{
2330 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2331
2332 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2333}
2334
580d3811
VS
2335static void chv_post_disable_dp(struct intel_encoder *encoder)
2336{
2337 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2338 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2339 struct drm_device *dev = encoder->base.dev;
2340 struct drm_i915_private *dev_priv = dev->dev_private;
2341 struct intel_crtc *intel_crtc =
2342 to_intel_crtc(encoder->base.crtc);
2343 enum dpio_channel ch = vlv_dport_to_channel(dport);
2344 enum pipe pipe = intel_crtc->pipe;
2345 u32 val;
2346
2347 intel_dp_link_down(intel_dp);
2348
2349 mutex_lock(&dev_priv->dpio_lock);
2350
2351 /* Propagate soft reset to data lane reset */
97fd4d5c 2352 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2353 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2354 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2355
97fd4d5c
VS
2356 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2357 val |= CHV_PCS_REQ_SOFTRESET_EN;
2358 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2359
2360 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2361 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2362 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2363
2364 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2365 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2366 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811
VS
2367
2368 mutex_unlock(&dev_priv->dpio_lock);
2369}
2370
7b13b58a
VS
2371static void
2372_intel_dp_set_link_train(struct intel_dp *intel_dp,
2373 uint32_t *DP,
2374 uint8_t dp_train_pat)
2375{
2376 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2377 struct drm_device *dev = intel_dig_port->base.base.dev;
2378 struct drm_i915_private *dev_priv = dev->dev_private;
2379 enum port port = intel_dig_port->port;
2380
2381 if (HAS_DDI(dev)) {
2382 uint32_t temp = I915_READ(DP_TP_CTL(port));
2383
2384 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2385 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2386 else
2387 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2388
2389 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2390 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2391 case DP_TRAINING_PATTERN_DISABLE:
2392 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2393
2394 break;
2395 case DP_TRAINING_PATTERN_1:
2396 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2397 break;
2398 case DP_TRAINING_PATTERN_2:
2399 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2400 break;
2401 case DP_TRAINING_PATTERN_3:
2402 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2403 break;
2404 }
2405 I915_WRITE(DP_TP_CTL(port), temp);
2406
39e5fa88
VS
2407 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2408 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2409 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2410
2411 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2412 case DP_TRAINING_PATTERN_DISABLE:
2413 *DP |= DP_LINK_TRAIN_OFF_CPT;
2414 break;
2415 case DP_TRAINING_PATTERN_1:
2416 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2417 break;
2418 case DP_TRAINING_PATTERN_2:
2419 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2420 break;
2421 case DP_TRAINING_PATTERN_3:
2422 DRM_ERROR("DP training pattern 3 not supported\n");
2423 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2424 break;
2425 }
2426
2427 } else {
2428 if (IS_CHERRYVIEW(dev))
2429 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2430 else
2431 *DP &= ~DP_LINK_TRAIN_MASK;
2432
2433 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2434 case DP_TRAINING_PATTERN_DISABLE:
2435 *DP |= DP_LINK_TRAIN_OFF;
2436 break;
2437 case DP_TRAINING_PATTERN_1:
2438 *DP |= DP_LINK_TRAIN_PAT_1;
2439 break;
2440 case DP_TRAINING_PATTERN_2:
2441 *DP |= DP_LINK_TRAIN_PAT_2;
2442 break;
2443 case DP_TRAINING_PATTERN_3:
2444 if (IS_CHERRYVIEW(dev)) {
2445 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2446 } else {
2447 DRM_ERROR("DP training pattern 3 not supported\n");
2448 *DP |= DP_LINK_TRAIN_PAT_2;
2449 }
2450 break;
2451 }
2452 }
2453}
2454
2455static void intel_dp_enable_port(struct intel_dp *intel_dp)
2456{
2457 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2458 struct drm_i915_private *dev_priv = dev->dev_private;
2459
7b13b58a
VS
2460 /* enable with pattern 1 (as per spec) */
2461 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2462 DP_TRAINING_PATTERN_1);
2463
2464 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2465 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2466
2467 /*
2468 * Magic for VLV/CHV. We _must_ first set up the register
2469 * without actually enabling the port, and then do another
2470 * write to enable the port. Otherwise link training will
2471 * fail when the power sequencer is freshly used for this port.
2472 */
2473 intel_dp->DP |= DP_PORT_EN;
2474
2475 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2476 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2477}
2478
e8cb4558 2479static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2480{
e8cb4558
DV
2481 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2482 struct drm_device *dev = encoder->base.dev;
2483 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2484 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2485 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
9b6de0a1 2486 unsigned int lane_mask = 0x0;
5d613501 2487
0c33d8d7
DV
2488 if (WARN_ON(dp_reg & DP_PORT_EN))
2489 return;
5d613501 2490
093e3f13
VS
2491 pps_lock(intel_dp);
2492
2493 if (IS_VALLEYVIEW(dev))
2494 vlv_init_panel_power_sequencer(intel_dp);
2495
7b13b58a 2496 intel_dp_enable_port(intel_dp);
093e3f13
VS
2497
2498 edp_panel_vdd_on(intel_dp);
2499 edp_panel_on(intel_dp);
2500 edp_panel_vdd_off(intel_dp, true);
2501
2502 pps_unlock(intel_dp);
2503
61234fa5 2504 if (IS_VALLEYVIEW(dev))
9b6de0a1
VS
2505 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2506 lane_mask);
61234fa5 2507
f01eca2e 2508 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2509 intel_dp_start_link_train(intel_dp);
33a34e4e 2510 intel_dp_complete_link_train(intel_dp);
3ab9c637 2511 intel_dp_stop_link_train(intel_dp);
c1dec79a 2512
6e3c9717 2513 if (crtc->config->has_audio) {
c1dec79a
JN
2514 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2515 pipe_name(crtc->pipe));
2516 intel_audio_codec_enable(encoder);
2517 }
ab1f90f9 2518}
89b667f8 2519
ecff4f3b
JN
2520static void g4x_enable_dp(struct intel_encoder *encoder)
2521{
828f5c6e
JN
2522 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2523
ecff4f3b 2524 intel_enable_dp(encoder);
4be73780 2525 intel_edp_backlight_on(intel_dp);
ab1f90f9 2526}
89b667f8 2527
ab1f90f9
JN
2528static void vlv_enable_dp(struct intel_encoder *encoder)
2529{
828f5c6e
JN
2530 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2531
4be73780 2532 intel_edp_backlight_on(intel_dp);
b32c6f48 2533 intel_psr_enable(intel_dp);
d240f20f
JB
2534}
2535
ecff4f3b 2536static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2537{
2538 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2539 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2540
8ac33ed3
DV
2541 intel_dp_prepare(encoder);
2542
d41f1efb
DV
2543 /* Only ilk+ has port A */
2544 if (dport->port == PORT_A) {
2545 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2546 ironlake_edp_pll_on(intel_dp);
d41f1efb 2547 }
ab1f90f9
JN
2548}
2549
83b84597
VS
2550static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2551{
2552 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2553 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2554 enum pipe pipe = intel_dp->pps_pipe;
2555 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2556
2557 edp_panel_vdd_off_sync(intel_dp);
2558
2559 /*
2560 * VLV seems to get confused when multiple power seqeuencers
2561 * have the same port selected (even if only one has power/vdd
2562 * enabled). The failure manifests as vlv_wait_port_ready() failing
2563 * CHV on the other hand doesn't seem to mind having the same port
2564 * selected in multiple power seqeuencers, but let's clear the
2565 * port select always when logically disconnecting a power sequencer
2566 * from a port.
2567 */
2568 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2569 pipe_name(pipe), port_name(intel_dig_port->port));
2570 I915_WRITE(pp_on_reg, 0);
2571 POSTING_READ(pp_on_reg);
2572
2573 intel_dp->pps_pipe = INVALID_PIPE;
2574}
2575
a4a5d2f8
VS
2576static void vlv_steal_power_sequencer(struct drm_device *dev,
2577 enum pipe pipe)
2578{
2579 struct drm_i915_private *dev_priv = dev->dev_private;
2580 struct intel_encoder *encoder;
2581
2582 lockdep_assert_held(&dev_priv->pps_mutex);
2583
ac3c12e4
VS
2584 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2585 return;
2586
a4a5d2f8
VS
2587 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2588 base.head) {
2589 struct intel_dp *intel_dp;
773538e8 2590 enum port port;
a4a5d2f8
VS
2591
2592 if (encoder->type != INTEL_OUTPUT_EDP)
2593 continue;
2594
2595 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2596 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2597
2598 if (intel_dp->pps_pipe != pipe)
2599 continue;
2600
2601 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2602 pipe_name(pipe), port_name(port));
a4a5d2f8 2603
034e43c6
VS
2604 WARN(encoder->connectors_active,
2605 "stealing pipe %c power sequencer from active eDP port %c\n",
2606 pipe_name(pipe), port_name(port));
a4a5d2f8 2607
a4a5d2f8 2608 /* make sure vdd is off before we steal it */
83b84597 2609 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2610 }
2611}
2612
2613static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2614{
2615 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2616 struct intel_encoder *encoder = &intel_dig_port->base;
2617 struct drm_device *dev = encoder->base.dev;
2618 struct drm_i915_private *dev_priv = dev->dev_private;
2619 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2620
2621 lockdep_assert_held(&dev_priv->pps_mutex);
2622
093e3f13
VS
2623 if (!is_edp(intel_dp))
2624 return;
2625
a4a5d2f8
VS
2626 if (intel_dp->pps_pipe == crtc->pipe)
2627 return;
2628
2629 /*
2630 * If another power sequencer was being used on this
2631 * port previously make sure to turn off vdd there while
2632 * we still have control of it.
2633 */
2634 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2635 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2636
2637 /*
2638 * We may be stealing the power
2639 * sequencer from another port.
2640 */
2641 vlv_steal_power_sequencer(dev, crtc->pipe);
2642
2643 /* now it's all ours */
2644 intel_dp->pps_pipe = crtc->pipe;
2645
2646 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2647 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2648
2649 /* init power sequencer on this pipe and port */
36b5f425
VS
2650 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2651 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2652}
2653
ab1f90f9 2654static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2655{
2bd2ad64 2656 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2657 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2658 struct drm_device *dev = encoder->base.dev;
89b667f8 2659 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2660 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2661 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2662 int pipe = intel_crtc->pipe;
2663 u32 val;
a4fc5ed6 2664
ab1f90f9 2665 mutex_lock(&dev_priv->dpio_lock);
89b667f8 2666
ab3c759a 2667 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2668 val = 0;
2669 if (pipe)
2670 val |= (1<<21);
2671 else
2672 val &= ~(1<<21);
2673 val |= 0x001000c4;
ab3c759a
CML
2674 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2675 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2676 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2677
ab1f90f9
JN
2678 mutex_unlock(&dev_priv->dpio_lock);
2679
2680 intel_enable_dp(encoder);
89b667f8
JB
2681}
2682
ecff4f3b 2683static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2684{
2685 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2686 struct drm_device *dev = encoder->base.dev;
2687 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2688 struct intel_crtc *intel_crtc =
2689 to_intel_crtc(encoder->base.crtc);
e4607fcf 2690 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2691 int pipe = intel_crtc->pipe;
89b667f8 2692
8ac33ed3
DV
2693 intel_dp_prepare(encoder);
2694
89b667f8 2695 /* Program Tx lane resets to default */
0980a60f 2696 mutex_lock(&dev_priv->dpio_lock);
ab3c759a 2697 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2698 DPIO_PCS_TX_LANE2_RESET |
2699 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2700 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2701 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2702 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2703 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2704 DPIO_PCS_CLK_SOFT_RESET);
2705
2706 /* Fix up inter-pair skew failure */
ab3c759a
CML
2707 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2708 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2709 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
0980a60f 2710 mutex_unlock(&dev_priv->dpio_lock);
a4fc5ed6
KP
2711}
2712
e4a1d846
CML
2713static void chv_pre_enable_dp(struct intel_encoder *encoder)
2714{
2715 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2716 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2717 struct drm_device *dev = encoder->base.dev;
2718 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2719 struct intel_crtc *intel_crtc =
2720 to_intel_crtc(encoder->base.crtc);
2721 enum dpio_channel ch = vlv_dport_to_channel(dport);
2722 int pipe = intel_crtc->pipe;
2e523e98 2723 int data, i, stagger;
949c1d43 2724 u32 val;
e4a1d846 2725
e4a1d846 2726 mutex_lock(&dev_priv->dpio_lock);
949c1d43 2727
570e2a74
VS
2728 /* allow hardware to manage TX FIFO reset source */
2729 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2730 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2731 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2732
2733 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2734 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2735 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2736
949c1d43 2737 /* Deassert soft data lane reset*/
97fd4d5c 2738 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2739 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2740 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2741
2742 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2743 val |= CHV_PCS_REQ_SOFTRESET_EN;
2744 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2745
2746 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2747 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2748 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2749
97fd4d5c 2750 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2751 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2752 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2753
2754 /* Program Tx lane latency optimal setting*/
e4a1d846 2755 for (i = 0; i < 4; i++) {
e4a1d846
CML
2756 /* Set the upar bit */
2757 data = (i == 1) ? 0x0 : 0x1;
2758 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2759 data << DPIO_UPAR_SHIFT);
2760 }
2761
2762 /* Data lane stagger programming */
2e523e98
VS
2763 if (intel_crtc->config->port_clock > 270000)
2764 stagger = 0x18;
2765 else if (intel_crtc->config->port_clock > 135000)
2766 stagger = 0xd;
2767 else if (intel_crtc->config->port_clock > 67500)
2768 stagger = 0x7;
2769 else if (intel_crtc->config->port_clock > 33750)
2770 stagger = 0x4;
2771 else
2772 stagger = 0x2;
2773
2774 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2775 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2776 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2777
2778 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2779 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2780 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2781
2782 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2783 DPIO_LANESTAGGER_STRAP(stagger) |
2784 DPIO_LANESTAGGER_STRAP_OVRD |
2785 DPIO_TX1_STAGGER_MASK(0x1f) |
2786 DPIO_TX1_STAGGER_MULT(6) |
2787 DPIO_TX2_STAGGER_MULT(0));
2788
2789 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2790 DPIO_LANESTAGGER_STRAP(stagger) |
2791 DPIO_LANESTAGGER_STRAP_OVRD |
2792 DPIO_TX1_STAGGER_MASK(0x1f) |
2793 DPIO_TX1_STAGGER_MULT(7) |
2794 DPIO_TX2_STAGGER_MULT(5));
e4a1d846
CML
2795
2796 mutex_unlock(&dev_priv->dpio_lock);
2797
e4a1d846 2798 intel_enable_dp(encoder);
e4a1d846
CML
2799}
2800
9197c88b
VS
2801static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2802{
2803 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2804 struct drm_device *dev = encoder->base.dev;
2805 struct drm_i915_private *dev_priv = dev->dev_private;
2806 struct intel_crtc *intel_crtc =
2807 to_intel_crtc(encoder->base.crtc);
2808 enum dpio_channel ch = vlv_dport_to_channel(dport);
2809 enum pipe pipe = intel_crtc->pipe;
2810 u32 val;
2811
625695f8
VS
2812 intel_dp_prepare(encoder);
2813
9197c88b
VS
2814 mutex_lock(&dev_priv->dpio_lock);
2815
b9e5ac3c
VS
2816 /* program left/right clock distribution */
2817 if (pipe != PIPE_B) {
2818 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2819 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2820 if (ch == DPIO_CH0)
2821 val |= CHV_BUFLEFTENA1_FORCE;
2822 if (ch == DPIO_CH1)
2823 val |= CHV_BUFRIGHTENA1_FORCE;
2824 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2825 } else {
2826 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2827 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2828 if (ch == DPIO_CH0)
2829 val |= CHV_BUFLEFTENA2_FORCE;
2830 if (ch == DPIO_CH1)
2831 val |= CHV_BUFRIGHTENA2_FORCE;
2832 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2833 }
2834
9197c88b
VS
2835 /* program clock channel usage */
2836 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2837 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2838 if (pipe != PIPE_B)
2839 val &= ~CHV_PCS_USEDCLKCHANNEL;
2840 else
2841 val |= CHV_PCS_USEDCLKCHANNEL;
2842 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2843
2844 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2845 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2846 if (pipe != PIPE_B)
2847 val &= ~CHV_PCS_USEDCLKCHANNEL;
2848 else
2849 val |= CHV_PCS_USEDCLKCHANNEL;
2850 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2851
2852 /*
2853 * This a a bit weird since generally CL
2854 * matches the pipe, but here we need to
2855 * pick the CL based on the port.
2856 */
2857 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2858 if (pipe != PIPE_B)
2859 val &= ~CHV_CMN_USEDCLKCHANNEL;
2860 else
2861 val |= CHV_CMN_USEDCLKCHANNEL;
2862 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2863
2864 mutex_unlock(&dev_priv->dpio_lock);
2865}
2866
a4fc5ed6 2867/*
df0c237d
JB
2868 * Native read with retry for link status and receiver capability reads for
2869 * cases where the sink may still be asleep.
9d1a1031
JN
2870 *
2871 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2872 * supposed to retry 3 times per the spec.
a4fc5ed6 2873 */
9d1a1031
JN
2874static ssize_t
2875intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2876 void *buffer, size_t size)
a4fc5ed6 2877{
9d1a1031
JN
2878 ssize_t ret;
2879 int i;
61da5fab 2880
f6a19066
VS
2881 /*
2882 * Sometime we just get the same incorrect byte repeated
2883 * over the entire buffer. Doing just one throw away read
2884 * initially seems to "solve" it.
2885 */
2886 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2887
61da5fab 2888 for (i = 0; i < 3; i++) {
9d1a1031
JN
2889 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2890 if (ret == size)
2891 return ret;
61da5fab
JB
2892 msleep(1);
2893 }
a4fc5ed6 2894
9d1a1031 2895 return ret;
a4fc5ed6
KP
2896}
2897
2898/*
2899 * Fetch AUX CH registers 0x202 - 0x207 which contain
2900 * link status information
2901 */
2902static bool
93f62dad 2903intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2904{
9d1a1031
JN
2905 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2906 DP_LANE0_1_STATUS,
2907 link_status,
2908 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2909}
2910
1100244e 2911/* These are source-specific values. */
a4fc5ed6 2912static uint8_t
1a2eb460 2913intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2914{
30add22d 2915 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2916 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2917 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2918
9314726b
VK
2919 if (IS_BROXTON(dev))
2920 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2921 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 2922 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 2923 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2924 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2925 } else if (IS_VALLEYVIEW(dev))
bd60018a 2926 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2927 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2928 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2929 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2930 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2931 else
bd60018a 2932 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
2933}
2934
2935static uint8_t
2936intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2937{
30add22d 2938 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 2939 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2940
5a9d1f1a
DL
2941 if (INTEL_INFO(dev)->gen >= 9) {
2942 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2943 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2944 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2945 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2946 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2947 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2948 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
2949 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2950 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
2951 default:
2952 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2953 }
2954 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 2955 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2956 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2957 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2958 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2959 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2960 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2961 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2962 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 2963 default:
bd60018a 2964 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 2965 }
e2fa6fba
P
2966 } else if (IS_VALLEYVIEW(dev)) {
2967 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2968 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2969 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2970 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2971 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2972 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2973 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2974 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 2975 default:
bd60018a 2976 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 2977 }
bc7d38a4 2978 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 2979 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2980 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2981 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2982 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2983 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2984 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 2985 default:
bd60018a 2986 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
2987 }
2988 } else {
2989 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2990 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2991 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2992 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2993 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2994 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2995 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2996 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 2997 default:
bd60018a 2998 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 2999 }
a4fc5ed6
KP
3000 }
3001}
3002
5829975c 3003static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3004{
3005 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3006 struct drm_i915_private *dev_priv = dev->dev_private;
3007 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3008 struct intel_crtc *intel_crtc =
3009 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3010 unsigned long demph_reg_value, preemph_reg_value,
3011 uniqtranscale_reg_value;
3012 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3013 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3014 int pipe = intel_crtc->pipe;
e2fa6fba
P
3015
3016 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3017 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3018 preemph_reg_value = 0x0004000;
3019 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3020 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3021 demph_reg_value = 0x2B405555;
3022 uniqtranscale_reg_value = 0x552AB83A;
3023 break;
bd60018a 3024 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3025 demph_reg_value = 0x2B404040;
3026 uniqtranscale_reg_value = 0x5548B83A;
3027 break;
bd60018a 3028 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3029 demph_reg_value = 0x2B245555;
3030 uniqtranscale_reg_value = 0x5560B83A;
3031 break;
bd60018a 3032 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3033 demph_reg_value = 0x2B405555;
3034 uniqtranscale_reg_value = 0x5598DA3A;
3035 break;
3036 default:
3037 return 0;
3038 }
3039 break;
bd60018a 3040 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3041 preemph_reg_value = 0x0002000;
3042 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3043 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3044 demph_reg_value = 0x2B404040;
3045 uniqtranscale_reg_value = 0x5552B83A;
3046 break;
bd60018a 3047 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3048 demph_reg_value = 0x2B404848;
3049 uniqtranscale_reg_value = 0x5580B83A;
3050 break;
bd60018a 3051 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3052 demph_reg_value = 0x2B404040;
3053 uniqtranscale_reg_value = 0x55ADDA3A;
3054 break;
3055 default:
3056 return 0;
3057 }
3058 break;
bd60018a 3059 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3060 preemph_reg_value = 0x0000000;
3061 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3062 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3063 demph_reg_value = 0x2B305555;
3064 uniqtranscale_reg_value = 0x5570B83A;
3065 break;
bd60018a 3066 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3067 demph_reg_value = 0x2B2B4040;
3068 uniqtranscale_reg_value = 0x55ADDA3A;
3069 break;
3070 default:
3071 return 0;
3072 }
3073 break;
bd60018a 3074 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3075 preemph_reg_value = 0x0006000;
3076 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3077 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3078 demph_reg_value = 0x1B405555;
3079 uniqtranscale_reg_value = 0x55ADDA3A;
3080 break;
3081 default:
3082 return 0;
3083 }
3084 break;
3085 default:
3086 return 0;
3087 }
3088
0980a60f 3089 mutex_lock(&dev_priv->dpio_lock);
ab3c759a
CML
3090 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3091 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3092 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3093 uniqtranscale_reg_value);
ab3c759a
CML
3094 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3095 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3096 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3097 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
0980a60f 3098 mutex_unlock(&dev_priv->dpio_lock);
e2fa6fba
P
3099
3100 return 0;
3101}
3102
5829975c 3103static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3104{
3105 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3106 struct drm_i915_private *dev_priv = dev->dev_private;
3107 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3108 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3109 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3110 uint8_t train_set = intel_dp->train_set[0];
3111 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3112 enum pipe pipe = intel_crtc->pipe;
3113 int i;
e4a1d846
CML
3114
3115 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3116 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3117 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3118 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3119 deemph_reg_value = 128;
3120 margin_reg_value = 52;
3121 break;
bd60018a 3122 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3123 deemph_reg_value = 128;
3124 margin_reg_value = 77;
3125 break;
bd60018a 3126 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3127 deemph_reg_value = 128;
3128 margin_reg_value = 102;
3129 break;
bd60018a 3130 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3131 deemph_reg_value = 128;
3132 margin_reg_value = 154;
3133 /* FIXME extra to set for 1200 */
3134 break;
3135 default:
3136 return 0;
3137 }
3138 break;
bd60018a 3139 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3140 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3141 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3142 deemph_reg_value = 85;
3143 margin_reg_value = 78;
3144 break;
bd60018a 3145 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3146 deemph_reg_value = 85;
3147 margin_reg_value = 116;
3148 break;
bd60018a 3149 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3150 deemph_reg_value = 85;
3151 margin_reg_value = 154;
3152 break;
3153 default:
3154 return 0;
3155 }
3156 break;
bd60018a 3157 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3158 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3159 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3160 deemph_reg_value = 64;
3161 margin_reg_value = 104;
3162 break;
bd60018a 3163 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3164 deemph_reg_value = 64;
3165 margin_reg_value = 154;
3166 break;
3167 default:
3168 return 0;
3169 }
3170 break;
bd60018a 3171 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3172 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3173 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3174 deemph_reg_value = 43;
3175 margin_reg_value = 154;
3176 break;
3177 default:
3178 return 0;
3179 }
3180 break;
3181 default:
3182 return 0;
3183 }
3184
3185 mutex_lock(&dev_priv->dpio_lock);
3186
3187 /* Clear calc init */
1966e59e
VS
3188 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3189 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3190 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3191 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3192 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3193
3194 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3195 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3196 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3197 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3198 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3199
a02ef3c7
VS
3200 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3201 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3202 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3203 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3204
3205 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3206 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3207 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3208 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3209
e4a1d846 3210 /* Program swing deemph */
f72df8db
VS
3211 for (i = 0; i < 4; i++) {
3212 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3213 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3214 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3215 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3216 }
e4a1d846
CML
3217
3218 /* Program swing margin */
f72df8db
VS
3219 for (i = 0; i < 4; i++) {
3220 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3221 val &= ~DPIO_SWING_MARGIN000_MASK;
3222 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3223 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3224 }
e4a1d846
CML
3225
3226 /* Disable unique transition scale */
f72df8db
VS
3227 for (i = 0; i < 4; i++) {
3228 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3229 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3230 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3231 }
e4a1d846
CML
3232
3233 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3234 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3235 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3236 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3237
3238 /*
3239 * The document said it needs to set bit 27 for ch0 and bit 26
3240 * for ch1. Might be a typo in the doc.
3241 * For now, for this unique transition scale selection, set bit
3242 * 27 for ch0 and ch1.
3243 */
f72df8db
VS
3244 for (i = 0; i < 4; i++) {
3245 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3246 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3247 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3248 }
e4a1d846 3249
f72df8db
VS
3250 for (i = 0; i < 4; i++) {
3251 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3252 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3253 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3254 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3255 }
e4a1d846
CML
3256 }
3257
3258 /* Start swing calculation */
1966e59e
VS
3259 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3260 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3261 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3262
3263 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3264 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3265 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3266
3267 /* LRC Bypass */
3268 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3269 val |= DPIO_LRC_BYPASS;
3270 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3271
3272 mutex_unlock(&dev_priv->dpio_lock);
3273
3274 return 0;
3275}
3276
a4fc5ed6 3277static void
0301b3ac
JN
3278intel_get_adjust_train(struct intel_dp *intel_dp,
3279 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3280{
3281 uint8_t v = 0;
3282 uint8_t p = 0;
3283 int lane;
1a2eb460
KP
3284 uint8_t voltage_max;
3285 uint8_t preemph_max;
a4fc5ed6 3286
33a34e4e 3287 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3288 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3289 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3290
3291 if (this_v > v)
3292 v = this_v;
3293 if (this_p > p)
3294 p = this_p;
3295 }
3296
1a2eb460 3297 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3298 if (v >= voltage_max)
3299 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3300
1a2eb460
KP
3301 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3302 if (p >= preemph_max)
3303 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3304
3305 for (lane = 0; lane < 4; lane++)
33a34e4e 3306 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3307}
3308
3309static uint32_t
5829975c 3310gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3311{
3cf2efb1 3312 uint32_t signal_levels = 0;
a4fc5ed6 3313
3cf2efb1 3314 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3316 default:
3317 signal_levels |= DP_VOLTAGE_0_4;
3318 break;
bd60018a 3319 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3320 signal_levels |= DP_VOLTAGE_0_6;
3321 break;
bd60018a 3322 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3323 signal_levels |= DP_VOLTAGE_0_8;
3324 break;
bd60018a 3325 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3326 signal_levels |= DP_VOLTAGE_1_2;
3327 break;
3328 }
3cf2efb1 3329 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3330 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3331 default:
3332 signal_levels |= DP_PRE_EMPHASIS_0;
3333 break;
bd60018a 3334 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3335 signal_levels |= DP_PRE_EMPHASIS_3_5;
3336 break;
bd60018a 3337 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3338 signal_levels |= DP_PRE_EMPHASIS_6;
3339 break;
bd60018a 3340 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3341 signal_levels |= DP_PRE_EMPHASIS_9_5;
3342 break;
3343 }
3344 return signal_levels;
3345}
3346
e3421a18
ZW
3347/* Gen6's DP voltage swing and pre-emphasis control */
3348static uint32_t
5829975c 3349gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3350{
3c5a62b5
YL
3351 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3352 DP_TRAIN_PRE_EMPHASIS_MASK);
3353 switch (signal_levels) {
bd60018a
SJ
3354 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3355 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3356 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3357 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3358 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3359 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3360 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3361 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3362 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3363 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3364 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3365 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3366 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3367 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3368 default:
3c5a62b5
YL
3369 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3370 "0x%x\n", signal_levels);
3371 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3372 }
3373}
3374
1a2eb460
KP
3375/* Gen7's DP voltage swing and pre-emphasis control */
3376static uint32_t
5829975c 3377gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3378{
3379 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3380 DP_TRAIN_PRE_EMPHASIS_MASK);
3381 switch (signal_levels) {
bd60018a 3382 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3383 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3384 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3385 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3386 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3387 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3388
bd60018a 3389 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3390 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3391 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3392 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3393
bd60018a 3394 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3395 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3396 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3397 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3398
3399 default:
3400 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3401 "0x%x\n", signal_levels);
3402 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3403 }
3404}
3405
d6c0d722
PZ
3406/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3407static uint32_t
5829975c 3408hsw_signal_levels(uint8_t train_set)
a4fc5ed6 3409{
d6c0d722
PZ
3410 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3411 DP_TRAIN_PRE_EMPHASIS_MASK);
3412 switch (signal_levels) {
bd60018a 3413 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3414 return DDI_BUF_TRANS_SELECT(0);
bd60018a 3415 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3416 return DDI_BUF_TRANS_SELECT(1);
bd60018a 3417 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3418 return DDI_BUF_TRANS_SELECT(2);
bd60018a 3419 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
c5fe6a06 3420 return DDI_BUF_TRANS_SELECT(3);
a4fc5ed6 3421
bd60018a 3422 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3423 return DDI_BUF_TRANS_SELECT(4);
bd60018a 3424 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3425 return DDI_BUF_TRANS_SELECT(5);
bd60018a 3426 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3427 return DDI_BUF_TRANS_SELECT(6);
a4fc5ed6 3428
bd60018a 3429 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3430 return DDI_BUF_TRANS_SELECT(7);
bd60018a 3431 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3432 return DDI_BUF_TRANS_SELECT(8);
7ad14a29
SJ
3433
3434 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3435 return DDI_BUF_TRANS_SELECT(9);
d6c0d722
PZ
3436 default:
3437 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3438 "0x%x\n", signal_levels);
c5fe6a06 3439 return DDI_BUF_TRANS_SELECT(0);
a4fc5ed6 3440 }
a4fc5ed6
KP
3441}
3442
5829975c 3443static void bxt_signal_levels(struct intel_dp *intel_dp)
96fb9f9b
VK
3444{
3445 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3446 enum port port = dport->port;
3447 struct drm_device *dev = dport->base.base.dev;
3448 struct intel_encoder *encoder = &dport->base;
3449 uint8_t train_set = intel_dp->train_set[0];
3450 uint32_t level = 0;
3451
3452 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3453 DP_TRAIN_PRE_EMPHASIS_MASK);
3454 switch (signal_levels) {
3455 default:
3456 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emph level\n");
3457 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3458 level = 0;
3459 break;
3460 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3461 level = 1;
3462 break;
3463 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3464 level = 2;
3465 break;
3466 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
3467 level = 3;
3468 break;
3469 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3470 level = 4;
3471 break;
3472 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3473 level = 5;
3474 break;
3475 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3476 level = 6;
3477 break;
3478 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3479 level = 7;
3480 break;
3481 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3482 level = 8;
3483 break;
3484 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3485 level = 9;
3486 break;
3487 }
3488
3489 bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
3490}
3491
f0a3424e
PZ
3492/* Properly updates "DP" with the correct signal levels. */
3493static void
3494intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3495{
3496 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3497 enum port port = intel_dig_port->port;
f0a3424e
PZ
3498 struct drm_device *dev = intel_dig_port->base.base.dev;
3499 uint32_t signal_levels, mask;
3500 uint8_t train_set = intel_dp->train_set[0];
3501
96fb9f9b
VK
3502 if (IS_BROXTON(dev)) {
3503 signal_levels = 0;
5829975c 3504 bxt_signal_levels(intel_dp);
96fb9f9b
VK
3505 mask = 0;
3506 } else if (HAS_DDI(dev)) {
5829975c 3507 signal_levels = hsw_signal_levels(train_set);
f0a3424e 3508 mask = DDI_BUF_EMP_MASK;
e4a1d846 3509 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3510 signal_levels = chv_signal_levels(intel_dp);
e4a1d846 3511 mask = 0;
e2fa6fba 3512 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3513 signal_levels = vlv_signal_levels(intel_dp);
e2fa6fba 3514 mask = 0;
bc7d38a4 3515 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3516 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3517 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3518 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3519 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3520 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3521 } else {
5829975c 3522 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3523 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3524 }
3525
96fb9f9b
VK
3526 if (mask)
3527 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3528
3529 DRM_DEBUG_KMS("Using vswing level %d\n",
3530 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3531 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3532 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3533 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e
PZ
3534
3535 *DP = (*DP & ~mask) | signal_levels;
3536}
3537
a4fc5ed6 3538static bool
ea5b213a 3539intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3540 uint32_t *DP,
58e10eb9 3541 uint8_t dp_train_pat)
a4fc5ed6 3542{
174edf1f
PZ
3543 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3544 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3545 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3546 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3547 int ret, len;
a4fc5ed6 3548
7b13b58a 3549 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3550
70aff66c 3551 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3552 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3553
2cdfe6c8
JN
3554 buf[0] = dp_train_pat;
3555 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3556 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3557 /* don't write DP_TRAINING_LANEx_SET on disable */
3558 len = 1;
3559 } else {
3560 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3561 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3562 len = intel_dp->lane_count + 1;
47ea7542 3563 }
a4fc5ed6 3564
9d1a1031
JN
3565 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3566 buf, len);
2cdfe6c8
JN
3567
3568 return ret == len;
a4fc5ed6
KP
3569}
3570
70aff66c
JN
3571static bool
3572intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3573 uint8_t dp_train_pat)
3574{
4e96c977
MK
3575 if (!intel_dp->train_set_valid)
3576 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3577 intel_dp_set_signal_levels(intel_dp, DP);
3578 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3579}
3580
3581static bool
3582intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3583 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3584{
3585 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3586 struct drm_device *dev = intel_dig_port->base.base.dev;
3587 struct drm_i915_private *dev_priv = dev->dev_private;
3588 int ret;
3589
3590 intel_get_adjust_train(intel_dp, link_status);
3591 intel_dp_set_signal_levels(intel_dp, DP);
3592
3593 I915_WRITE(intel_dp->output_reg, *DP);
3594 POSTING_READ(intel_dp->output_reg);
3595
9d1a1031
JN
3596 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3597 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3598
3599 return ret == intel_dp->lane_count;
3600}
3601
3ab9c637
ID
3602static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3603{
3604 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3605 struct drm_device *dev = intel_dig_port->base.base.dev;
3606 struct drm_i915_private *dev_priv = dev->dev_private;
3607 enum port port = intel_dig_port->port;
3608 uint32_t val;
3609
3610 if (!HAS_DDI(dev))
3611 return;
3612
3613 val = I915_READ(DP_TP_CTL(port));
3614 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3615 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3616 I915_WRITE(DP_TP_CTL(port), val);
3617
3618 /*
3619 * On PORT_A we can have only eDP in SST mode. There the only reason
3620 * we need to set idle transmission mode is to work around a HW issue
3621 * where we enable the pipe while not in idle link-training mode.
3622 * In this case there is requirement to wait for a minimum number of
3623 * idle patterns to be sent.
3624 */
3625 if (port == PORT_A)
3626 return;
3627
3628 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3629 1))
3630 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3631}
3632
33a34e4e 3633/* Enable corresponding port and start training pattern 1 */
c19b0669 3634void
33a34e4e 3635intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3636{
da63a9f2 3637 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3638 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3639 int i;
3640 uint8_t voltage;
cdb0e95b 3641 int voltage_tries, loop_tries;
ea5b213a 3642 uint32_t DP = intel_dp->DP;
6aba5b6c 3643 uint8_t link_config[2];
a4fc5ed6 3644
affa9354 3645 if (HAS_DDI(dev))
c19b0669
PZ
3646 intel_ddi_prepare_link_retrain(encoder);
3647
3cf2efb1 3648 /* Write the link configuration data */
6aba5b6c
JN
3649 link_config[0] = intel_dp->link_bw;
3650 link_config[1] = intel_dp->lane_count;
3651 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3652 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3653 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
94ca719e 3654 if (intel_dp->num_sink_rates)
a8f3ef61
SJ
3655 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3656 &intel_dp->rate_select, 1);
6aba5b6c
JN
3657
3658 link_config[0] = 0;
3659 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3660 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3661
3662 DP |= DP_PORT_EN;
1a2eb460 3663
70aff66c
JN
3664 /* clock recovery */
3665 if (!intel_dp_reset_link_train(intel_dp, &DP,
3666 DP_TRAINING_PATTERN_1 |
3667 DP_LINK_SCRAMBLING_DISABLE)) {
3668 DRM_ERROR("failed to enable link training\n");
3669 return;
3670 }
3671
a4fc5ed6 3672 voltage = 0xff;
cdb0e95b
KP
3673 voltage_tries = 0;
3674 loop_tries = 0;
a4fc5ed6 3675 for (;;) {
70aff66c 3676 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3677
a7c9655f 3678 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3679 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3680 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3681 break;
93f62dad 3682 }
a4fc5ed6 3683
01916270 3684 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3685 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3686 break;
3687 }
3688
4e96c977
MK
3689 /*
3690 * if we used previously trained voltage and pre-emphasis values
3691 * and we don't get clock recovery, reset link training values
3692 */
3693 if (intel_dp->train_set_valid) {
3694 DRM_DEBUG_KMS("clock recovery not ok, reset");
3695 /* clear the flag as we are not reusing train set */
3696 intel_dp->train_set_valid = false;
3697 if (!intel_dp_reset_link_train(intel_dp, &DP,
3698 DP_TRAINING_PATTERN_1 |
3699 DP_LINK_SCRAMBLING_DISABLE)) {
3700 DRM_ERROR("failed to enable link training\n");
3701 return;
3702 }
3703 continue;
3704 }
3705
3cf2efb1
CW
3706 /* Check to see if we've tried the max voltage */
3707 for (i = 0; i < intel_dp->lane_count; i++)
3708 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3709 break;
3b4f819d 3710 if (i == intel_dp->lane_count) {
b06fbda3
DV
3711 ++loop_tries;
3712 if (loop_tries == 5) {
3def84b3 3713 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3714 break;
3715 }
70aff66c
JN
3716 intel_dp_reset_link_train(intel_dp, &DP,
3717 DP_TRAINING_PATTERN_1 |
3718 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3719 voltage_tries = 0;
3720 continue;
3721 }
a4fc5ed6 3722
3cf2efb1 3723 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3724 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3725 ++voltage_tries;
b06fbda3 3726 if (voltage_tries == 5) {
3def84b3 3727 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3728 break;
3729 }
3730 } else
3731 voltage_tries = 0;
3732 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3733
70aff66c
JN
3734 /* Update training set as requested by target */
3735 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3736 DRM_ERROR("failed to update link training\n");
3737 break;
3738 }
a4fc5ed6
KP
3739 }
3740
33a34e4e
JB
3741 intel_dp->DP = DP;
3742}
3743
c19b0669 3744void
33a34e4e
JB
3745intel_dp_complete_link_train(struct intel_dp *intel_dp)
3746{
33a34e4e 3747 bool channel_eq = false;
37f80975 3748 int tries, cr_tries;
33a34e4e 3749 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3750 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3751
3752 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3753 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3754 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3755
a4fc5ed6 3756 /* channel equalization */
70aff66c 3757 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3758 training_pattern |
70aff66c
JN
3759 DP_LINK_SCRAMBLING_DISABLE)) {
3760 DRM_ERROR("failed to start channel equalization\n");
3761 return;
3762 }
3763
a4fc5ed6 3764 tries = 0;
37f80975 3765 cr_tries = 0;
a4fc5ed6
KP
3766 channel_eq = false;
3767 for (;;) {
70aff66c 3768 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3769
37f80975
JB
3770 if (cr_tries > 5) {
3771 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3772 break;
3773 }
3774
a7c9655f 3775 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3776 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3777 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3778 break;
70aff66c 3779 }
a4fc5ed6 3780
37f80975 3781 /* Make sure clock is still ok */
01916270 3782 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
4e96c977 3783 intel_dp->train_set_valid = false;
37f80975 3784 intel_dp_start_link_train(intel_dp);
70aff66c 3785 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3786 training_pattern |
70aff66c 3787 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3788 cr_tries++;
3789 continue;
3790 }
3791
1ffdff13 3792 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3793 channel_eq = true;
3794 break;
3795 }
a4fc5ed6 3796
37f80975
JB
3797 /* Try 5 times, then try clock recovery if that fails */
3798 if (tries > 5) {
4e96c977 3799 intel_dp->train_set_valid = false;
37f80975 3800 intel_dp_start_link_train(intel_dp);
70aff66c 3801 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3802 training_pattern |
70aff66c 3803 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3804 tries = 0;
3805 cr_tries++;
3806 continue;
3807 }
a4fc5ed6 3808
70aff66c
JN
3809 /* Update training set as requested by target */
3810 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3811 DRM_ERROR("failed to update link training\n");
3812 break;
3813 }
3cf2efb1 3814 ++tries;
869184a6 3815 }
3cf2efb1 3816
3ab9c637
ID
3817 intel_dp_set_idle_link_train(intel_dp);
3818
3819 intel_dp->DP = DP;
3820
4e96c977 3821 if (channel_eq) {
5fa836a9 3822 intel_dp->train_set_valid = true;
07f42258 3823 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
4e96c977 3824 }
3ab9c637
ID
3825}
3826
3827void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3828{
70aff66c 3829 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3830 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3831}
3832
3833static void
ea5b213a 3834intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3835{
da63a9f2 3836 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3837 enum port port = intel_dig_port->port;
da63a9f2 3838 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3839 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3840 uint32_t DP = intel_dp->DP;
a4fc5ed6 3841
bc76e320 3842 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3843 return;
3844
0c33d8d7 3845 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3846 return;
3847
28c97730 3848 DRM_DEBUG_KMS("\n");
32f9d658 3849
39e5fa88
VS
3850 if ((IS_GEN7(dev) && port == PORT_A) ||
3851 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3852 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 3853 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18 3854 } else {
aad3d14d
VS
3855 if (IS_CHERRYVIEW(dev))
3856 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3857 else
3858 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 3859 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 3860 }
fe255d00 3861 POSTING_READ(intel_dp->output_reg);
5eb08b69 3862
493a7081 3863 if (HAS_PCH_IBX(dev) &&
1b39d6f3 3864 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
5bddd17f
EA
3865 /* Hardware workaround: leaving our transcoder select
3866 * set to transcoder B while it's off will prevent the
3867 * corresponding HDMI output on transcoder A.
3868 *
3869 * Combine this with another hardware workaround:
3870 * transcoder select bit can only be cleared while the
3871 * port is enabled.
3872 */
3873 DP &= ~DP_PIPEB_SELECT;
3874 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3875 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3876 }
3877
832afda6 3878 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
3879 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3880 POSTING_READ(intel_dp->output_reg);
f01eca2e 3881 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3882}
3883
26d61aad
KP
3884static bool
3885intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3886{
a031d709
RV
3887 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3888 struct drm_device *dev = dig_port->base.base.dev;
3889 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3890 uint8_t rev;
a031d709 3891
9d1a1031
JN
3892 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3893 sizeof(intel_dp->dpcd)) < 0)
edb39244 3894 return false; /* aux transfer failed */
92fd8fd1 3895
a8e98153 3896 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3897
edb39244
AJ
3898 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3899 return false; /* DPCD not present */
3900
2293bb5c
SK
3901 /* Check if the panel supports PSR */
3902 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3903 if (is_edp(intel_dp)) {
9d1a1031
JN
3904 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3905 intel_dp->psr_dpcd,
3906 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3907 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3908 dev_priv->psr.sink_support = true;
50003939 3909 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3910 }
474d1ec4
SJ
3911
3912 if (INTEL_INFO(dev)->gen >= 9 &&
3913 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3914 uint8_t frame_sync_cap;
3915
3916 dev_priv->psr.sink_support = true;
3917 intel_dp_dpcd_read_wake(&intel_dp->aux,
3918 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3919 &frame_sync_cap, 1);
3920 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3921 /* PSR2 needs frame sync as well */
3922 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3923 DRM_DEBUG_KMS("PSR2 %s on sink",
3924 dev_priv->psr.psr2_support ? "supported" : "not supported");
3925 }
50003939
JN
3926 }
3927
7809a611 3928 /* Training Pattern 3 support, both source and sink */
06ea66b6 3929 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3930 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3931 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3932 intel_dp->use_tps3 = true;
f8d8a672 3933 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3934 } else
3935 intel_dp->use_tps3 = false;
3936
fc0f8e25
SJ
3937 /* Intermediate frequency support */
3938 if (is_edp(intel_dp) &&
3939 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3940 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3941 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3942 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3943 int i;
3944
fc0f8e25
SJ
3945 intel_dp_dpcd_read_wake(&intel_dp->aux,
3946 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3947 sink_rates,
3948 sizeof(sink_rates));
ea2d8a42 3949
94ca719e
VS
3950 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3951 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3952
3953 if (val == 0)
3954 break;
3955
af77b974
SJ
3956 /* Value read is in kHz while drm clock is saved in deca-kHz */
3957 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 3958 }
94ca719e 3959 intel_dp->num_sink_rates = i;
fc0f8e25 3960 }
0336400e
VS
3961
3962 intel_dp_print_rates(intel_dp);
3963
edb39244
AJ
3964 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3965 DP_DWN_STRM_PORT_PRESENT))
3966 return true; /* native DP sink */
3967
3968 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3969 return true; /* no per-port downstream info */
3970
9d1a1031
JN
3971 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3972 intel_dp->downstream_ports,
3973 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3974 return false; /* downstream port status fetch failed */
3975
3976 return true;
92fd8fd1
KP
3977}
3978
0d198328
AJ
3979static void
3980intel_dp_probe_oui(struct intel_dp *intel_dp)
3981{
3982 u8 buf[3];
3983
3984 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3985 return;
3986
9d1a1031 3987 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3988 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3989 buf[0], buf[1], buf[2]);
3990
9d1a1031 3991 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3992 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3993 buf[0], buf[1], buf[2]);
3994}
3995
0e32b39c
DA
3996static bool
3997intel_dp_probe_mst(struct intel_dp *intel_dp)
3998{
3999 u8 buf[1];
4000
4001 if (!intel_dp->can_mst)
4002 return false;
4003
4004 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4005 return false;
4006
0e32b39c
DA
4007 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4008 if (buf[0] & DP_MST_CAP) {
4009 DRM_DEBUG_KMS("Sink is MST capable\n");
4010 intel_dp->is_mst = true;
4011 } else {
4012 DRM_DEBUG_KMS("Sink is not MST capable\n");
4013 intel_dp->is_mst = false;
4014 }
4015 }
0e32b39c
DA
4016
4017 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4018 return intel_dp->is_mst;
4019}
4020
d2e216d0
RV
4021int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4022{
4023 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4024 struct drm_device *dev = intel_dig_port->base.base.dev;
4025 struct intel_crtc *intel_crtc =
4026 to_intel_crtc(intel_dig_port->base.base.crtc);
ad9dc91b
RV
4027 u8 buf;
4028 int test_crc_count;
4029 int attempts = 6;
d2e216d0 4030
ad9dc91b 4031 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 4032 return -EIO;
d2e216d0 4033
ad9dc91b 4034 if (!(buf & DP_TEST_CRC_SUPPORTED))
d2e216d0
RV
4035 return -ENOTTY;
4036
1dda5f93
RV
4037 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4038 return -EIO;
4039
9d1a1031 4040 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
ce31d9f4 4041 buf | DP_TEST_SINK_START) < 0)
bda0381e 4042 return -EIO;
d2e216d0 4043
1dda5f93 4044 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 4045 return -EIO;
ad9dc91b 4046 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 4047
ad9dc91b 4048 do {
1dda5f93
RV
4049 if (drm_dp_dpcd_readb(&intel_dp->aux,
4050 DP_TEST_SINK_MISC, &buf) < 0)
4051 return -EIO;
ad9dc91b
RV
4052 intel_wait_for_vblank(dev, intel_crtc->pipe);
4053 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
4054
4055 if (attempts == 0) {
90bd1f46
DV
4056 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
4057 return -ETIMEDOUT;
ad9dc91b 4058 }
d2e216d0 4059
9d1a1031 4060 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
bda0381e 4061 return -EIO;
d2e216d0 4062
1dda5f93
RV
4063 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4064 return -EIO;
4065 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4066 buf & ~DP_TEST_SINK_START) < 0)
4067 return -EIO;
ce31d9f4 4068
d2e216d0
RV
4069 return 0;
4070}
4071
a60f0e38
JB
4072static bool
4073intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4074{
9d1a1031
JN
4075 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4076 DP_DEVICE_SERVICE_IRQ_VECTOR,
4077 sink_irq_vector, 1) == 1;
a60f0e38
JB
4078}
4079
0e32b39c
DA
4080static bool
4081intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4082{
4083 int ret;
4084
4085 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4086 DP_SINK_COUNT_ESI,
4087 sink_irq_vector, 14);
4088 if (ret != 14)
4089 return false;
4090
4091 return true;
4092}
4093
c5d5ab7a
TP
4094static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4095{
4096 uint8_t test_result = DP_TEST_ACK;
4097 return test_result;
4098}
4099
4100static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4101{
4102 uint8_t test_result = DP_TEST_NAK;
4103 return test_result;
4104}
4105
4106static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4107{
c5d5ab7a 4108 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4109 struct intel_connector *intel_connector = intel_dp->attached_connector;
4110 struct drm_connector *connector = &intel_connector->base;
4111
4112 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4113 connector->edid_corrupt ||
559be30c
TP
4114 intel_dp->aux.i2c_defer_count > 6) {
4115 /* Check EDID read for NACKs, DEFERs and corruption
4116 * (DP CTS 1.2 Core r1.1)
4117 * 4.2.2.4 : Failed EDID read, I2C_NAK
4118 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4119 * 4.2.2.6 : EDID corruption detected
4120 * Use failsafe mode for all cases
4121 */
4122 if (intel_dp->aux.i2c_nack_count > 0 ||
4123 intel_dp->aux.i2c_defer_count > 0)
4124 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4125 intel_dp->aux.i2c_nack_count,
4126 intel_dp->aux.i2c_defer_count);
4127 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4128 } else {
4129 if (!drm_dp_dpcd_write(&intel_dp->aux,
4130 DP_TEST_EDID_CHECKSUM,
4131 &intel_connector->detect_edid->checksum,
5a1cc655 4132 1))
559be30c
TP
4133 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4134
4135 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4136 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4137 }
4138
4139 /* Set test active flag here so userspace doesn't interrupt things */
4140 intel_dp->compliance_test_active = 1;
4141
c5d5ab7a
TP
4142 return test_result;
4143}
4144
4145static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4146{
c5d5ab7a
TP
4147 uint8_t test_result = DP_TEST_NAK;
4148 return test_result;
4149}
4150
4151static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4152{
4153 uint8_t response = DP_TEST_NAK;
4154 uint8_t rxdata = 0;
4155 int status = 0;
4156
559be30c 4157 intel_dp->compliance_test_active = 0;
c5d5ab7a 4158 intel_dp->compliance_test_type = 0;
559be30c
TP
4159 intel_dp->compliance_test_data = 0;
4160
c5d5ab7a
TP
4161 intel_dp->aux.i2c_nack_count = 0;
4162 intel_dp->aux.i2c_defer_count = 0;
4163
4164 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4165 if (status <= 0) {
4166 DRM_DEBUG_KMS("Could not read test request from sink\n");
4167 goto update_status;
4168 }
4169
4170 switch (rxdata) {
4171 case DP_TEST_LINK_TRAINING:
4172 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4173 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4174 response = intel_dp_autotest_link_training(intel_dp);
4175 break;
4176 case DP_TEST_LINK_VIDEO_PATTERN:
4177 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4178 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4179 response = intel_dp_autotest_video_pattern(intel_dp);
4180 break;
4181 case DP_TEST_LINK_EDID_READ:
4182 DRM_DEBUG_KMS("EDID test requested\n");
4183 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4184 response = intel_dp_autotest_edid(intel_dp);
4185 break;
4186 case DP_TEST_LINK_PHY_TEST_PATTERN:
4187 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4188 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4189 response = intel_dp_autotest_phy_pattern(intel_dp);
4190 break;
4191 default:
4192 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4193 break;
4194 }
4195
4196update_status:
4197 status = drm_dp_dpcd_write(&intel_dp->aux,
4198 DP_TEST_RESPONSE,
4199 &response, 1);
4200 if (status <= 0)
4201 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4202}
4203
0e32b39c
DA
4204static int
4205intel_dp_check_mst_status(struct intel_dp *intel_dp)
4206{
4207 bool bret;
4208
4209 if (intel_dp->is_mst) {
4210 u8 esi[16] = { 0 };
4211 int ret = 0;
4212 int retry;
4213 bool handled;
4214 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4215go_again:
4216 if (bret == true) {
4217
4218 /* check link status - esi[10] = 0x200c */
4219 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4220 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4221 intel_dp_start_link_train(intel_dp);
4222 intel_dp_complete_link_train(intel_dp);
4223 intel_dp_stop_link_train(intel_dp);
4224 }
4225
6f34cc39 4226 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4227 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4228
4229 if (handled) {
4230 for (retry = 0; retry < 3; retry++) {
4231 int wret;
4232 wret = drm_dp_dpcd_write(&intel_dp->aux,
4233 DP_SINK_COUNT_ESI+1,
4234 &esi[1], 3);
4235 if (wret == 3) {
4236 break;
4237 }
4238 }
4239
4240 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4241 if (bret == true) {
6f34cc39 4242 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4243 goto go_again;
4244 }
4245 } else
4246 ret = 0;
4247
4248 return ret;
4249 } else {
4250 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4251 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4252 intel_dp->is_mst = false;
4253 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4254 /* send a hotplug event */
4255 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4256 }
4257 }
4258 return -EINVAL;
4259}
4260
a4fc5ed6
KP
4261/*
4262 * According to DP spec
4263 * 5.1.2:
4264 * 1. Read DPCD
4265 * 2. Configure link according to Receiver Capabilities
4266 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4267 * 4. Check link status on receipt of hot-plug interrupt
4268 */
a5146200 4269static void
ea5b213a 4270intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4271{
5b215bcf 4272 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4273 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4274 u8 sink_irq_vector;
93f62dad 4275 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4276
5b215bcf
DA
4277 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4278
da63a9f2 4279 if (!intel_encoder->connectors_active)
d2b996ac 4280 return;
59cd09e1 4281
da63a9f2 4282 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
4283 return;
4284
1a125d8a
ID
4285 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4286 return;
4287
92fd8fd1 4288 /* Try to read receiver status if the link appears to be up */
93f62dad 4289 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4290 return;
4291 }
4292
92fd8fd1 4293 /* Now read the DPCD to see if it's actually running */
26d61aad 4294 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4295 return;
4296 }
4297
a60f0e38
JB
4298 /* Try to read the source of the interrupt */
4299 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4300 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4301 /* Clear interrupt source */
9d1a1031
JN
4302 drm_dp_dpcd_writeb(&intel_dp->aux,
4303 DP_DEVICE_SERVICE_IRQ_VECTOR,
4304 sink_irq_vector);
a60f0e38
JB
4305
4306 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4307 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4308 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4309 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4310 }
4311
1ffdff13 4312 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4313 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4314 intel_encoder->base.name);
33a34e4e
JB
4315 intel_dp_start_link_train(intel_dp);
4316 intel_dp_complete_link_train(intel_dp);
3ab9c637 4317 intel_dp_stop_link_train(intel_dp);
33a34e4e 4318 }
a4fc5ed6 4319}
a4fc5ed6 4320
caf9ab24 4321/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4322static enum drm_connector_status
26d61aad 4323intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4324{
caf9ab24 4325 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4326 uint8_t type;
4327
4328 if (!intel_dp_get_dpcd(intel_dp))
4329 return connector_status_disconnected;
4330
4331 /* if there's no downstream port, we're done */
4332 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4333 return connector_status_connected;
caf9ab24
AJ
4334
4335 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4336 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4337 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4338 uint8_t reg;
9d1a1031
JN
4339
4340 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4341 &reg, 1) < 0)
caf9ab24 4342 return connector_status_unknown;
9d1a1031 4343
23235177
AJ
4344 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4345 : connector_status_disconnected;
caf9ab24
AJ
4346 }
4347
4348 /* If no HPD, poke DDC gently */
0b99836f 4349 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4350 return connector_status_connected;
caf9ab24
AJ
4351
4352 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4353 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4354 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4355 if (type == DP_DS_PORT_TYPE_VGA ||
4356 type == DP_DS_PORT_TYPE_NON_EDID)
4357 return connector_status_unknown;
4358 } else {
4359 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4360 DP_DWN_STRM_PORT_TYPE_MASK;
4361 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4362 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4363 return connector_status_unknown;
4364 }
caf9ab24
AJ
4365
4366 /* Anything else is out of spec, warn and ignore */
4367 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4368 return connector_status_disconnected;
71ba9000
AJ
4369}
4370
d410b56d
CW
4371static enum drm_connector_status
4372edp_detect(struct intel_dp *intel_dp)
4373{
4374 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4375 enum drm_connector_status status;
4376
4377 status = intel_panel_detect(dev);
4378 if (status == connector_status_unknown)
4379 status = connector_status_connected;
4380
4381 return status;
4382}
4383
5eb08b69 4384static enum drm_connector_status
a9756bb5 4385ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4386{
30add22d 4387 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4388 struct drm_i915_private *dev_priv = dev->dev_private;
4389 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4390
1b469639
DL
4391 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4392 return connector_status_disconnected;
4393
26d61aad 4394 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4395}
4396
2a592bec
DA
4397static int g4x_digital_port_connected(struct drm_device *dev,
4398 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4399{
a4fc5ed6 4400 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4401 uint32_t bit;
5eb08b69 4402
232a6ee9
TP
4403 if (IS_VALLEYVIEW(dev)) {
4404 switch (intel_dig_port->port) {
4405 case PORT_B:
4406 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4407 break;
4408 case PORT_C:
4409 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4410 break;
4411 case PORT_D:
4412 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4413 break;
4414 default:
2a592bec 4415 return -EINVAL;
232a6ee9
TP
4416 }
4417 } else {
4418 switch (intel_dig_port->port) {
4419 case PORT_B:
4420 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4421 break;
4422 case PORT_C:
4423 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4424 break;
4425 case PORT_D:
4426 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4427 break;
4428 default:
2a592bec 4429 return -EINVAL;
232a6ee9 4430 }
a4fc5ed6
KP
4431 }
4432
10f76a38 4433 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4434 return 0;
4435 return 1;
4436}
4437
4438static enum drm_connector_status
4439g4x_dp_detect(struct intel_dp *intel_dp)
4440{
4441 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4442 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4443 int ret;
4444
4445 /* Can't disconnect eDP, but you can close the lid... */
4446 if (is_edp(intel_dp)) {
4447 enum drm_connector_status status;
4448
4449 status = intel_panel_detect(dev);
4450 if (status == connector_status_unknown)
4451 status = connector_status_connected;
4452 return status;
4453 }
4454
4455 ret = g4x_digital_port_connected(dev, intel_dig_port);
4456 if (ret == -EINVAL)
4457 return connector_status_unknown;
4458 else if (ret == 0)
a4fc5ed6
KP
4459 return connector_status_disconnected;
4460
26d61aad 4461 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4462}
4463
8c241fef 4464static struct edid *
beb60608 4465intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4466{
beb60608 4467 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4468
9cd300e0
JN
4469 /* use cached edid if we have one */
4470 if (intel_connector->edid) {
9cd300e0
JN
4471 /* invalid edid */
4472 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4473 return NULL;
4474
55e9edeb 4475 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4476 } else
4477 return drm_get_edid(&intel_connector->base,
4478 &intel_dp->aux.ddc);
4479}
8c241fef 4480
beb60608
CW
4481static void
4482intel_dp_set_edid(struct intel_dp *intel_dp)
4483{
4484 struct intel_connector *intel_connector = intel_dp->attached_connector;
4485 struct edid *edid;
8c241fef 4486
beb60608
CW
4487 edid = intel_dp_get_edid(intel_dp);
4488 intel_connector->detect_edid = edid;
4489
4490 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4491 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4492 else
4493 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4494}
4495
beb60608
CW
4496static void
4497intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4498{
beb60608 4499 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4500
beb60608
CW
4501 kfree(intel_connector->detect_edid);
4502 intel_connector->detect_edid = NULL;
9cd300e0 4503
beb60608
CW
4504 intel_dp->has_audio = false;
4505}
d6f24d0f 4506
beb60608
CW
4507static enum intel_display_power_domain
4508intel_dp_power_get(struct intel_dp *dp)
4509{
4510 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4511 enum intel_display_power_domain power_domain;
4512
4513 power_domain = intel_display_port_power_domain(encoder);
4514 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4515
4516 return power_domain;
4517}
d6f24d0f 4518
beb60608
CW
4519static void
4520intel_dp_power_put(struct intel_dp *dp,
4521 enum intel_display_power_domain power_domain)
4522{
4523 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4524 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4525}
4526
a9756bb5
ZW
4527static enum drm_connector_status
4528intel_dp_detect(struct drm_connector *connector, bool force)
4529{
4530 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4531 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4532 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4533 struct drm_device *dev = connector->dev;
a9756bb5 4534 enum drm_connector_status status;
671dedd2 4535 enum intel_display_power_domain power_domain;
0e32b39c 4536 bool ret;
09b1eb13 4537 u8 sink_irq_vector;
a9756bb5 4538
164c8598 4539 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4540 connector->base.id, connector->name);
beb60608 4541 intel_dp_unset_edid(intel_dp);
164c8598 4542
0e32b39c
DA
4543 if (intel_dp->is_mst) {
4544 /* MST devices are disconnected from a monitor POV */
4545 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4546 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4547 return connector_status_disconnected;
0e32b39c
DA
4548 }
4549
beb60608 4550 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4551
d410b56d
CW
4552 /* Can't disconnect eDP, but you can close the lid... */
4553 if (is_edp(intel_dp))
4554 status = edp_detect(intel_dp);
4555 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4556 status = ironlake_dp_detect(intel_dp);
4557 else
4558 status = g4x_dp_detect(intel_dp);
4559 if (status != connector_status_connected)
c8c8fb33 4560 goto out;
a9756bb5 4561
0d198328
AJ
4562 intel_dp_probe_oui(intel_dp);
4563
0e32b39c
DA
4564 ret = intel_dp_probe_mst(intel_dp);
4565 if (ret) {
4566 /* if we are in MST mode then this connector
4567 won't appear connected or have anything with EDID on it */
4568 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4569 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4570 status = connector_status_disconnected;
4571 goto out;
4572 }
4573
beb60608 4574 intel_dp_set_edid(intel_dp);
a9756bb5 4575
d63885da
PZ
4576 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4577 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4578 status = connector_status_connected;
4579
09b1eb13
TP
4580 /* Try to read the source of the interrupt */
4581 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4582 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4583 /* Clear interrupt source */
4584 drm_dp_dpcd_writeb(&intel_dp->aux,
4585 DP_DEVICE_SERVICE_IRQ_VECTOR,
4586 sink_irq_vector);
4587
4588 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4589 intel_dp_handle_test_request(intel_dp);
4590 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4591 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4592 }
4593
c8c8fb33 4594out:
beb60608 4595 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4596 return status;
a4fc5ed6
KP
4597}
4598
beb60608
CW
4599static void
4600intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4601{
df0e9248 4602 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4603 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4604 enum intel_display_power_domain power_domain;
a4fc5ed6 4605
beb60608
CW
4606 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4607 connector->base.id, connector->name);
4608 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4609
beb60608
CW
4610 if (connector->status != connector_status_connected)
4611 return;
671dedd2 4612
beb60608
CW
4613 power_domain = intel_dp_power_get(intel_dp);
4614
4615 intel_dp_set_edid(intel_dp);
4616
4617 intel_dp_power_put(intel_dp, power_domain);
4618
4619 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4620 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4621}
4622
4623static int intel_dp_get_modes(struct drm_connector *connector)
4624{
4625 struct intel_connector *intel_connector = to_intel_connector(connector);
4626 struct edid *edid;
4627
4628 edid = intel_connector->detect_edid;
4629 if (edid) {
4630 int ret = intel_connector_update_modes(connector, edid);
4631 if (ret)
4632 return ret;
4633 }
32f9d658 4634
f8779fda 4635 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4636 if (is_edp(intel_attached_dp(connector)) &&
4637 intel_connector->panel.fixed_mode) {
f8779fda 4638 struct drm_display_mode *mode;
beb60608
CW
4639
4640 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4641 intel_connector->panel.fixed_mode);
f8779fda 4642 if (mode) {
32f9d658
ZW
4643 drm_mode_probed_add(connector, mode);
4644 return 1;
4645 }
4646 }
beb60608 4647
32f9d658 4648 return 0;
a4fc5ed6
KP
4649}
4650
1aad7ac0
CW
4651static bool
4652intel_dp_detect_audio(struct drm_connector *connector)
4653{
1aad7ac0 4654 bool has_audio = false;
beb60608 4655 struct edid *edid;
1aad7ac0 4656
beb60608
CW
4657 edid = to_intel_connector(connector)->detect_edid;
4658 if (edid)
1aad7ac0 4659 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4660
1aad7ac0
CW
4661 return has_audio;
4662}
4663
f684960e
CW
4664static int
4665intel_dp_set_property(struct drm_connector *connector,
4666 struct drm_property *property,
4667 uint64_t val)
4668{
e953fd7b 4669 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4670 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4671 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4672 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4673 int ret;
4674
662595df 4675 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4676 if (ret)
4677 return ret;
4678
3f43c48d 4679 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4680 int i = val;
4681 bool has_audio;
4682
4683 if (i == intel_dp->force_audio)
f684960e
CW
4684 return 0;
4685
1aad7ac0 4686 intel_dp->force_audio = i;
f684960e 4687
c3e5f67b 4688 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4689 has_audio = intel_dp_detect_audio(connector);
4690 else
c3e5f67b 4691 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4692
4693 if (has_audio == intel_dp->has_audio)
f684960e
CW
4694 return 0;
4695
1aad7ac0 4696 intel_dp->has_audio = has_audio;
f684960e
CW
4697 goto done;
4698 }
4699
e953fd7b 4700 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4701 bool old_auto = intel_dp->color_range_auto;
4702 uint32_t old_range = intel_dp->color_range;
4703
55bc60db
VS
4704 switch (val) {
4705 case INTEL_BROADCAST_RGB_AUTO:
4706 intel_dp->color_range_auto = true;
4707 break;
4708 case INTEL_BROADCAST_RGB_FULL:
4709 intel_dp->color_range_auto = false;
4710 intel_dp->color_range = 0;
4711 break;
4712 case INTEL_BROADCAST_RGB_LIMITED:
4713 intel_dp->color_range_auto = false;
4714 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4715 break;
4716 default:
4717 return -EINVAL;
4718 }
ae4edb80
DV
4719
4720 if (old_auto == intel_dp->color_range_auto &&
4721 old_range == intel_dp->color_range)
4722 return 0;
4723
e953fd7b
CW
4724 goto done;
4725 }
4726
53b41837
YN
4727 if (is_edp(intel_dp) &&
4728 property == connector->dev->mode_config.scaling_mode_property) {
4729 if (val == DRM_MODE_SCALE_NONE) {
4730 DRM_DEBUG_KMS("no scaling not supported\n");
4731 return -EINVAL;
4732 }
4733
4734 if (intel_connector->panel.fitting_mode == val) {
4735 /* the eDP scaling property is not changed */
4736 return 0;
4737 }
4738 intel_connector->panel.fitting_mode = val;
4739
4740 goto done;
4741 }
4742
f684960e
CW
4743 return -EINVAL;
4744
4745done:
c0c36b94
CW
4746 if (intel_encoder->base.crtc)
4747 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4748
4749 return 0;
4750}
4751
a4fc5ed6 4752static void
73845adf 4753intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4754{
1d508706 4755 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4756
10e972d3 4757 kfree(intel_connector->detect_edid);
beb60608 4758
9cd300e0
JN
4759 if (!IS_ERR_OR_NULL(intel_connector->edid))
4760 kfree(intel_connector->edid);
4761
acd8db10
PZ
4762 /* Can't call is_edp() since the encoder may have been destroyed
4763 * already. */
4764 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4765 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4766
a4fc5ed6 4767 drm_connector_cleanup(connector);
55f78c43 4768 kfree(connector);
a4fc5ed6
KP
4769}
4770
00c09d70 4771void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4772{
da63a9f2
PZ
4773 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4774 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4775
4f71d0cb 4776 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4777 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4778 if (is_edp(intel_dp)) {
4779 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4780 /*
4781 * vdd might still be enabled do to the delayed vdd off.
4782 * Make sure vdd is actually turned off here.
4783 */
773538e8 4784 pps_lock(intel_dp);
4be73780 4785 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4786 pps_unlock(intel_dp);
4787
01527b31
CT
4788 if (intel_dp->edp_notifier.notifier_call) {
4789 unregister_reboot_notifier(&intel_dp->edp_notifier);
4790 intel_dp->edp_notifier.notifier_call = NULL;
4791 }
bd943159 4792 }
c8bd0e49 4793 drm_encoder_cleanup(encoder);
da63a9f2 4794 kfree(intel_dig_port);
24d05927
DV
4795}
4796
07f9cd0b
ID
4797static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4798{
4799 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4800
4801 if (!is_edp(intel_dp))
4802 return;
4803
951468f3
VS
4804 /*
4805 * vdd might still be enabled do to the delayed vdd off.
4806 * Make sure vdd is actually turned off here.
4807 */
afa4e53a 4808 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4809 pps_lock(intel_dp);
07f9cd0b 4810 edp_panel_vdd_off_sync(intel_dp);
773538e8 4811 pps_unlock(intel_dp);
07f9cd0b
ID
4812}
4813
49e6bc51
VS
4814static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4815{
4816 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4817 struct drm_device *dev = intel_dig_port->base.base.dev;
4818 struct drm_i915_private *dev_priv = dev->dev_private;
4819 enum intel_display_power_domain power_domain;
4820
4821 lockdep_assert_held(&dev_priv->pps_mutex);
4822
4823 if (!edp_have_panel_vdd(intel_dp))
4824 return;
4825
4826 /*
4827 * The VDD bit needs a power domain reference, so if the bit is
4828 * already enabled when we boot or resume, grab this reference and
4829 * schedule a vdd off, so we don't hold on to the reference
4830 * indefinitely.
4831 */
4832 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4833 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4834 intel_display_power_get(dev_priv, power_domain);
4835
4836 edp_panel_vdd_schedule_off(intel_dp);
4837}
4838
6d93c0c4
ID
4839static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4840{
49e6bc51
VS
4841 struct intel_dp *intel_dp;
4842
4843 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4844 return;
4845
4846 intel_dp = enc_to_intel_dp(encoder);
4847
4848 pps_lock(intel_dp);
4849
4850 /*
4851 * Read out the current power sequencer assignment,
4852 * in case the BIOS did something with it.
4853 */
4854 if (IS_VALLEYVIEW(encoder->dev))
4855 vlv_initial_power_sequencer_setup(intel_dp);
4856
4857 intel_edp_panel_vdd_sanitize(intel_dp);
4858
4859 pps_unlock(intel_dp);
6d93c0c4
ID
4860}
4861
a4fc5ed6 4862static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 4863 .dpms = intel_connector_dpms,
a4fc5ed6 4864 .detect = intel_dp_detect,
beb60608 4865 .force = intel_dp_force,
a4fc5ed6 4866 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4867 .set_property = intel_dp_set_property,
2545e4a6 4868 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4869 .destroy = intel_dp_connector_destroy,
c6f95f27 4870 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4871 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4872};
4873
4874static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4875 .get_modes = intel_dp_get_modes,
4876 .mode_valid = intel_dp_mode_valid,
df0e9248 4877 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4878};
4879
a4fc5ed6 4880static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4881 .reset = intel_dp_encoder_reset,
24d05927 4882 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4883};
4884
0e32b39c 4885void
21d40d37 4886intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 4887{
0e32b39c 4888 return;
c8110e52 4889}
6207937d 4890
b2c5c181 4891enum irqreturn
13cf5504
DA
4892intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4893{
4894 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4895 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4896 struct drm_device *dev = intel_dig_port->base.base.dev;
4897 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4898 enum intel_display_power_domain power_domain;
b2c5c181 4899 enum irqreturn ret = IRQ_NONE;
1c767b33 4900
0e32b39c
DA
4901 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4902 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4903
7a7f84cc
VS
4904 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4905 /*
4906 * vdd off can generate a long pulse on eDP which
4907 * would require vdd on to handle it, and thus we
4908 * would end up in an endless cycle of
4909 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4910 */
4911 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4912 port_name(intel_dig_port->port));
a8b3d52f 4913 return IRQ_HANDLED;
7a7f84cc
VS
4914 }
4915
26fbb774
VS
4916 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4917 port_name(intel_dig_port->port),
0e32b39c 4918 long_hpd ? "long" : "short");
13cf5504 4919
1c767b33
ID
4920 power_domain = intel_display_port_power_domain(intel_encoder);
4921 intel_display_power_get(dev_priv, power_domain);
4922
0e32b39c 4923 if (long_hpd) {
5fa836a9
MK
4924 /* indicate that we need to restart link training */
4925 intel_dp->train_set_valid = false;
2a592bec
DA
4926
4927 if (HAS_PCH_SPLIT(dev)) {
4928 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4929 goto mst_fail;
4930 } else {
4931 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4932 goto mst_fail;
4933 }
0e32b39c
DA
4934
4935 if (!intel_dp_get_dpcd(intel_dp)) {
4936 goto mst_fail;
4937 }
4938
4939 intel_dp_probe_oui(intel_dp);
4940
4941 if (!intel_dp_probe_mst(intel_dp))
4942 goto mst_fail;
4943
4944 } else {
4945 if (intel_dp->is_mst) {
1c767b33 4946 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4947 goto mst_fail;
4948 }
4949
4950 if (!intel_dp->is_mst) {
4951 /*
4952 * we'll check the link status via the normal hot plug path later -
4953 * but for short hpds we should check it now
4954 */
5b215bcf 4955 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4956 intel_dp_check_link_status(intel_dp);
5b215bcf 4957 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4958 }
4959 }
b2c5c181
DV
4960
4961 ret = IRQ_HANDLED;
4962
1c767b33 4963 goto put_power;
0e32b39c
DA
4964mst_fail:
4965 /* if we were in MST mode, and device is not there get out of MST mode */
4966 if (intel_dp->is_mst) {
4967 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4968 intel_dp->is_mst = false;
4969 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4970 }
1c767b33
ID
4971put_power:
4972 intel_display_power_put(dev_priv, power_domain);
4973
4974 return ret;
13cf5504
DA
4975}
4976
e3421a18
ZW
4977/* Return which DP Port should be selected for Transcoder DP control */
4978int
0206e353 4979intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
4980{
4981 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
4982 struct intel_encoder *intel_encoder;
4983 struct intel_dp *intel_dp;
e3421a18 4984
fa90ecef
PZ
4985 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4986 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 4987
fa90ecef
PZ
4988 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4989 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 4990 return intel_dp->output_reg;
e3421a18 4991 }
ea5b213a 4992
e3421a18
ZW
4993 return -1;
4994}
4995
36e83a18 4996/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 4997bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
4998{
4999 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5000 union child_device_config *p_child;
36e83a18 5001 int i;
5d8a7752
VS
5002 static const short port_mapping[] = {
5003 [PORT_B] = PORT_IDPB,
5004 [PORT_C] = PORT_IDPC,
5005 [PORT_D] = PORT_IDPD,
5006 };
36e83a18 5007
3b32a35b
VS
5008 if (port == PORT_A)
5009 return true;
5010
41aa3448 5011 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5012 return false;
5013
41aa3448
RV
5014 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5015 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5016
5d8a7752 5017 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5018 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5019 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5020 return true;
5021 }
5022 return false;
5023}
5024
0e32b39c 5025void
f684960e
CW
5026intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5027{
53b41837
YN
5028 struct intel_connector *intel_connector = to_intel_connector(connector);
5029
3f43c48d 5030 intel_attach_force_audio_property(connector);
e953fd7b 5031 intel_attach_broadcast_rgb_property(connector);
55bc60db 5032 intel_dp->color_range_auto = true;
53b41837
YN
5033
5034 if (is_edp(intel_dp)) {
5035 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5036 drm_object_attach_property(
5037 &connector->base,
53b41837 5038 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5039 DRM_MODE_SCALE_ASPECT);
5040 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5041 }
f684960e
CW
5042}
5043
dada1a9f
ID
5044static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5045{
5046 intel_dp->last_power_cycle = jiffies;
5047 intel_dp->last_power_on = jiffies;
5048 intel_dp->last_backlight_off = jiffies;
5049}
5050
67a54566
DV
5051static void
5052intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5053 struct intel_dp *intel_dp)
67a54566
DV
5054{
5055 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5056 struct edp_power_seq cur, vbt, spec,
5057 *final = &intel_dp->pps_delays;
67a54566 5058 u32 pp_on, pp_off, pp_div, pp;
bf13e81b 5059 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 5060
e39b999a
VS
5061 lockdep_assert_held(&dev_priv->pps_mutex);
5062
81ddbc69
VS
5063 /* already initialized? */
5064 if (final->t11_t12 != 0)
5065 return;
5066
453c5420 5067 if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5068 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5069 pp_on_reg = PCH_PP_ON_DELAYS;
5070 pp_off_reg = PCH_PP_OFF_DELAYS;
5071 pp_div_reg = PCH_PP_DIVISOR;
5072 } else {
bf13e81b
JN
5073 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5074
5075 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5076 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5077 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5078 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5079 }
67a54566
DV
5080
5081 /* Workaround: Need to write PP_CONTROL with the unlock key as
5082 * the very first thing. */
453c5420 5083 pp = ironlake_get_pp_control(intel_dp);
bf13e81b 5084 I915_WRITE(pp_ctrl_reg, pp);
67a54566 5085
453c5420
JB
5086 pp_on = I915_READ(pp_on_reg);
5087 pp_off = I915_READ(pp_off_reg);
5088 pp_div = I915_READ(pp_div_reg);
67a54566
DV
5089
5090 /* Pull timing values out of registers */
5091 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5092 PANEL_POWER_UP_DELAY_SHIFT;
5093
5094 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5095 PANEL_LIGHT_ON_DELAY_SHIFT;
5096
5097 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5098 PANEL_LIGHT_OFF_DELAY_SHIFT;
5099
5100 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5101 PANEL_POWER_DOWN_DELAY_SHIFT;
5102
5103 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5104 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5105
5106 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5107 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5108
41aa3448 5109 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5110
5111 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5112 * our hw here, which are all in 100usec. */
5113 spec.t1_t3 = 210 * 10;
5114 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5115 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5116 spec.t10 = 500 * 10;
5117 /* This one is special and actually in units of 100ms, but zero
5118 * based in the hw (so we need to add 100 ms). But the sw vbt
5119 * table multiplies it with 1000 to make it in units of 100usec,
5120 * too. */
5121 spec.t11_t12 = (510 + 100) * 10;
5122
5123 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5124 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5125
5126 /* Use the max of the register settings and vbt. If both are
5127 * unset, fall back to the spec limits. */
36b5f425 5128#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5129 spec.field : \
5130 max(cur.field, vbt.field))
5131 assign_final(t1_t3);
5132 assign_final(t8);
5133 assign_final(t9);
5134 assign_final(t10);
5135 assign_final(t11_t12);
5136#undef assign_final
5137
36b5f425 5138#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5139 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5140 intel_dp->backlight_on_delay = get_delay(t8);
5141 intel_dp->backlight_off_delay = get_delay(t9);
5142 intel_dp->panel_power_down_delay = get_delay(t10);
5143 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5144#undef get_delay
5145
f30d26e4
JN
5146 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5147 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5148 intel_dp->panel_power_cycle_delay);
5149
5150 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5151 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5152}
5153
5154static void
5155intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5156 struct intel_dp *intel_dp)
f30d26e4
JN
5157{
5158 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5159 u32 pp_on, pp_off, pp_div, port_sel = 0;
5160 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5161 int pp_on_reg, pp_off_reg, pp_div_reg;
ad933b56 5162 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5163 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5164
e39b999a 5165 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420
JB
5166
5167 if (HAS_PCH_SPLIT(dev)) {
5168 pp_on_reg = PCH_PP_ON_DELAYS;
5169 pp_off_reg = PCH_PP_OFF_DELAYS;
5170 pp_div_reg = PCH_PP_DIVISOR;
5171 } else {
bf13e81b
JN
5172 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5173
5174 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5175 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5176 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5177 }
5178
b2f19d1a
PZ
5179 /*
5180 * And finally store the new values in the power sequencer. The
5181 * backlight delays are set to 1 because we do manual waits on them. For
5182 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5183 * we'll end up waiting for the backlight off delay twice: once when we
5184 * do the manual sleep, and once when we disable the panel and wait for
5185 * the PP_STATUS bit to become zero.
5186 */
f30d26e4 5187 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5188 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5189 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5190 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5191 /* Compute the divisor for the pp clock, simply match the Bspec
5192 * formula. */
453c5420 5193 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 5194 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
5195 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5196
5197 /* Haswell doesn't have any port selection bits for the panel
5198 * power sequencer any more. */
bc7d38a4 5199 if (IS_VALLEYVIEW(dev)) {
ad933b56 5200 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5201 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5202 if (port == PORT_A)
a24c144c 5203 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5204 else
a24c144c 5205 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5206 }
5207
453c5420
JB
5208 pp_on |= port_sel;
5209
5210 I915_WRITE(pp_on_reg, pp_on);
5211 I915_WRITE(pp_off_reg, pp_off);
5212 I915_WRITE(pp_div_reg, pp_div);
67a54566 5213
67a54566 5214 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5215 I915_READ(pp_on_reg),
5216 I915_READ(pp_off_reg),
5217 I915_READ(pp_div_reg));
f684960e
CW
5218}
5219
b33a2815
VK
5220/**
5221 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5222 * @dev: DRM device
5223 * @refresh_rate: RR to be programmed
5224 *
5225 * This function gets called when refresh rate (RR) has to be changed from
5226 * one frequency to another. Switches can be between high and low RR
5227 * supported by the panel or to any other RR based on media playback (in
5228 * this case, RR value needs to be passed from user space).
5229 *
5230 * The caller of this function needs to take a lock on dev_priv->drrs.
5231 */
96178eeb 5232static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5233{
5234 struct drm_i915_private *dev_priv = dev->dev_private;
5235 struct intel_encoder *encoder;
96178eeb
VK
5236 struct intel_digital_port *dig_port = NULL;
5237 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5238 struct intel_crtc_state *config = NULL;
439d7ac0 5239 struct intel_crtc *intel_crtc = NULL;
439d7ac0 5240 u32 reg, val;
96178eeb 5241 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5242
5243 if (refresh_rate <= 0) {
5244 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5245 return;
5246 }
5247
96178eeb
VK
5248 if (intel_dp == NULL) {
5249 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5250 return;
5251 }
5252
1fcc9d1c 5253 /*
e4d59f6b
RV
5254 * FIXME: This needs proper synchronization with psr state for some
5255 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5256 */
439d7ac0 5257
96178eeb
VK
5258 dig_port = dp_to_dig_port(intel_dp);
5259 encoder = &dig_port->base;
723f9aab 5260 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5261
5262 if (!intel_crtc) {
5263 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5264 return;
5265 }
5266
6e3c9717 5267 config = intel_crtc->config;
439d7ac0 5268
96178eeb 5269 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5270 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5271 return;
5272 }
5273
96178eeb
VK
5274 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5275 refresh_rate)
439d7ac0
PB
5276 index = DRRS_LOW_RR;
5277
96178eeb 5278 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5279 DRM_DEBUG_KMS(
5280 "DRRS requested for previously set RR...ignoring\n");
5281 return;
5282 }
5283
5284 if (!intel_crtc->active) {
5285 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5286 return;
5287 }
5288
44395bfe 5289 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5290 switch (index) {
5291 case DRRS_HIGH_RR:
5292 intel_dp_set_m_n(intel_crtc, M1_N1);
5293 break;
5294 case DRRS_LOW_RR:
5295 intel_dp_set_m_n(intel_crtc, M2_N2);
5296 break;
5297 case DRRS_MAX_RR:
5298 default:
5299 DRM_ERROR("Unsupported refreshrate type\n");
5300 }
5301 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 5302 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 5303 val = I915_READ(reg);
a4c30b1d 5304
439d7ac0 5305 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5306 if (IS_VALLEYVIEW(dev))
5307 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5308 else
5309 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5310 } else {
6fa7aec1
VK
5311 if (IS_VALLEYVIEW(dev))
5312 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5313 else
5314 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5315 }
5316 I915_WRITE(reg, val);
5317 }
5318
4e9ac947
VK
5319 dev_priv->drrs.refresh_rate_type = index;
5320
5321 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5322}
5323
b33a2815
VK
5324/**
5325 * intel_edp_drrs_enable - init drrs struct if supported
5326 * @intel_dp: DP struct
5327 *
5328 * Initializes frontbuffer_bits and drrs.dp
5329 */
c395578e
VK
5330void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5331{
5332 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5333 struct drm_i915_private *dev_priv = dev->dev_private;
5334 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5335 struct drm_crtc *crtc = dig_port->base.base.crtc;
5336 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5337
5338 if (!intel_crtc->config->has_drrs) {
5339 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5340 return;
5341 }
5342
5343 mutex_lock(&dev_priv->drrs.mutex);
5344 if (WARN_ON(dev_priv->drrs.dp)) {
5345 DRM_ERROR("DRRS already enabled\n");
5346 goto unlock;
5347 }
5348
5349 dev_priv->drrs.busy_frontbuffer_bits = 0;
5350
5351 dev_priv->drrs.dp = intel_dp;
5352
5353unlock:
5354 mutex_unlock(&dev_priv->drrs.mutex);
5355}
5356
b33a2815
VK
5357/**
5358 * intel_edp_drrs_disable - Disable DRRS
5359 * @intel_dp: DP struct
5360 *
5361 */
c395578e
VK
5362void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5363{
5364 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5365 struct drm_i915_private *dev_priv = dev->dev_private;
5366 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5367 struct drm_crtc *crtc = dig_port->base.base.crtc;
5368 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5369
5370 if (!intel_crtc->config->has_drrs)
5371 return;
5372
5373 mutex_lock(&dev_priv->drrs.mutex);
5374 if (!dev_priv->drrs.dp) {
5375 mutex_unlock(&dev_priv->drrs.mutex);
5376 return;
5377 }
5378
5379 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5380 intel_dp_set_drrs_state(dev_priv->dev,
5381 intel_dp->attached_connector->panel.
5382 fixed_mode->vrefresh);
5383
5384 dev_priv->drrs.dp = NULL;
5385 mutex_unlock(&dev_priv->drrs.mutex);
5386
5387 cancel_delayed_work_sync(&dev_priv->drrs.work);
5388}
5389
4e9ac947
VK
5390static void intel_edp_drrs_downclock_work(struct work_struct *work)
5391{
5392 struct drm_i915_private *dev_priv =
5393 container_of(work, typeof(*dev_priv), drrs.work.work);
5394 struct intel_dp *intel_dp;
5395
5396 mutex_lock(&dev_priv->drrs.mutex);
5397
5398 intel_dp = dev_priv->drrs.dp;
5399
5400 if (!intel_dp)
5401 goto unlock;
5402
439d7ac0 5403 /*
4e9ac947
VK
5404 * The delayed work can race with an invalidate hence we need to
5405 * recheck.
439d7ac0
PB
5406 */
5407
4e9ac947
VK
5408 if (dev_priv->drrs.busy_frontbuffer_bits)
5409 goto unlock;
439d7ac0 5410
4e9ac947
VK
5411 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5412 intel_dp_set_drrs_state(dev_priv->dev,
5413 intel_dp->attached_connector->panel.
5414 downclock_mode->vrefresh);
439d7ac0 5415
4e9ac947 5416unlock:
4e9ac947 5417 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5418}
5419
b33a2815
VK
5420/**
5421 * intel_edp_drrs_invalidate - Invalidate DRRS
5422 * @dev: DRM device
5423 * @frontbuffer_bits: frontbuffer plane tracking bits
5424 *
5425 * When there is a disturbance on screen (due to cursor movement/time
5426 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5427 * high RR.
5428 *
5429 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5430 */
a93fad0f
VK
5431void intel_edp_drrs_invalidate(struct drm_device *dev,
5432 unsigned frontbuffer_bits)
5433{
5434 struct drm_i915_private *dev_priv = dev->dev_private;
5435 struct drm_crtc *crtc;
5436 enum pipe pipe;
5437
9da7d693 5438 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5439 return;
5440
88f933a8 5441 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5442
a93fad0f 5443 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5444 if (!dev_priv->drrs.dp) {
5445 mutex_unlock(&dev_priv->drrs.mutex);
5446 return;
5447 }
5448
a93fad0f
VK
5449 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5450 pipe = to_intel_crtc(crtc)->pipe;
5451
5452 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
a93fad0f
VK
5453 intel_dp_set_drrs_state(dev_priv->dev,
5454 dev_priv->drrs.dp->attached_connector->panel.
5455 fixed_mode->vrefresh);
5456 }
5457
5458 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5459
5460 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5461 mutex_unlock(&dev_priv->drrs.mutex);
5462}
5463
b33a2815
VK
5464/**
5465 * intel_edp_drrs_flush - Flush DRRS
5466 * @dev: DRM device
5467 * @frontbuffer_bits: frontbuffer plane tracking bits
5468 *
5469 * When there is no movement on screen, DRRS work can be scheduled.
5470 * This DRRS work is responsible for setting relevant registers after a
5471 * timeout of 1 second.
5472 *
5473 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5474 */
a93fad0f
VK
5475void intel_edp_drrs_flush(struct drm_device *dev,
5476 unsigned frontbuffer_bits)
5477{
5478 struct drm_i915_private *dev_priv = dev->dev_private;
5479 struct drm_crtc *crtc;
5480 enum pipe pipe;
5481
9da7d693 5482 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5483 return;
5484
88f933a8 5485 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5486
a93fad0f 5487 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5488 if (!dev_priv->drrs.dp) {
5489 mutex_unlock(&dev_priv->drrs.mutex);
5490 return;
5491 }
5492
a93fad0f
VK
5493 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5494 pipe = to_intel_crtc(crtc)->pipe;
5495 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5496
a93fad0f
VK
5497 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5498 !dev_priv->drrs.busy_frontbuffer_bits)
5499 schedule_delayed_work(&dev_priv->drrs.work,
5500 msecs_to_jiffies(1000));
5501 mutex_unlock(&dev_priv->drrs.mutex);
5502}
5503
b33a2815
VK
5504/**
5505 * DOC: Display Refresh Rate Switching (DRRS)
5506 *
5507 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5508 * which enables swtching between low and high refresh rates,
5509 * dynamically, based on the usage scenario. This feature is applicable
5510 * for internal panels.
5511 *
5512 * Indication that the panel supports DRRS is given by the panel EDID, which
5513 * would list multiple refresh rates for one resolution.
5514 *
5515 * DRRS is of 2 types - static and seamless.
5516 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5517 * (may appear as a blink on screen) and is used in dock-undock scenario.
5518 * Seamless DRRS involves changing RR without any visual effect to the user
5519 * and can be used during normal system usage. This is done by programming
5520 * certain registers.
5521 *
5522 * Support for static/seamless DRRS may be indicated in the VBT based on
5523 * inputs from the panel spec.
5524 *
5525 * DRRS saves power by switching to low RR based on usage scenarios.
5526 *
5527 * eDP DRRS:-
5528 * The implementation is based on frontbuffer tracking implementation.
5529 * When there is a disturbance on the screen triggered by user activity or a
5530 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5531 * When there is no movement on screen, after a timeout of 1 second, a switch
5532 * to low RR is made.
5533 * For integration with frontbuffer tracking code,
5534 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5535 *
5536 * DRRS can be further extended to support other internal panels and also
5537 * the scenario of video playback wherein RR is set based on the rate
5538 * requested by userspace.
5539 */
5540
5541/**
5542 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5543 * @intel_connector: eDP connector
5544 * @fixed_mode: preferred mode of panel
5545 *
5546 * This function is called only once at driver load to initialize basic
5547 * DRRS stuff.
5548 *
5549 * Returns:
5550 * Downclock mode if panel supports it, else return NULL.
5551 * DRRS support is determined by the presence of downclock mode (apart
5552 * from VBT setting).
5553 */
4f9db5b5 5554static struct drm_display_mode *
96178eeb
VK
5555intel_dp_drrs_init(struct intel_connector *intel_connector,
5556 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5557{
5558 struct drm_connector *connector = &intel_connector->base;
96178eeb 5559 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5560 struct drm_i915_private *dev_priv = dev->dev_private;
5561 struct drm_display_mode *downclock_mode = NULL;
5562
9da7d693
DV
5563 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5564 mutex_init(&dev_priv->drrs.mutex);
5565
4f9db5b5
PB
5566 if (INTEL_INFO(dev)->gen <= 6) {
5567 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5568 return NULL;
5569 }
5570
5571 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5572 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5573 return NULL;
5574 }
5575
5576 downclock_mode = intel_find_panel_downclock
5577 (dev, fixed_mode, connector);
5578
5579 if (!downclock_mode) {
a1d26342 5580 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5581 return NULL;
5582 }
5583
96178eeb 5584 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5585
96178eeb 5586 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5587 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5588 return downclock_mode;
5589}
5590
ed92f0b2 5591static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5592 struct intel_connector *intel_connector)
ed92f0b2
PZ
5593{
5594 struct drm_connector *connector = &intel_connector->base;
5595 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5596 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5597 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5598 struct drm_i915_private *dev_priv = dev->dev_private;
5599 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5600 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5601 bool has_dpcd;
5602 struct drm_display_mode *scan;
5603 struct edid *edid;
6517d273 5604 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5605
5606 if (!is_edp(intel_dp))
5607 return true;
5608
49e6bc51
VS
5609 pps_lock(intel_dp);
5610 intel_edp_panel_vdd_sanitize(intel_dp);
5611 pps_unlock(intel_dp);
63635217 5612
ed92f0b2 5613 /* Cache DPCD and EDID for edp. */
ed92f0b2 5614 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5615
5616 if (has_dpcd) {
5617 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5618 dev_priv->no_aux_handshake =
5619 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5620 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5621 } else {
5622 /* if this fails, presume the device is a ghost */
5623 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5624 return false;
5625 }
5626
5627 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5628 pps_lock(intel_dp);
36b5f425 5629 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5630 pps_unlock(intel_dp);
ed92f0b2 5631
060c8778 5632 mutex_lock(&dev->mode_config.mutex);
0b99836f 5633 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5634 if (edid) {
5635 if (drm_add_edid_modes(connector, edid)) {
5636 drm_mode_connector_update_edid_property(connector,
5637 edid);
5638 drm_edid_to_eld(connector, edid);
5639 } else {
5640 kfree(edid);
5641 edid = ERR_PTR(-EINVAL);
5642 }
5643 } else {
5644 edid = ERR_PTR(-ENOENT);
5645 }
5646 intel_connector->edid = edid;
5647
5648 /* prefer fixed mode from EDID if available */
5649 list_for_each_entry(scan, &connector->probed_modes, head) {
5650 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5651 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5652 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5653 intel_connector, fixed_mode);
ed92f0b2
PZ
5654 break;
5655 }
5656 }
5657
5658 /* fallback to VBT if available for eDP */
5659 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5660 fixed_mode = drm_mode_duplicate(dev,
5661 dev_priv->vbt.lfp_lvds_vbt_mode);
5662 if (fixed_mode)
5663 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5664 }
060c8778 5665 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5666
01527b31
CT
5667 if (IS_VALLEYVIEW(dev)) {
5668 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5669 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5670
5671 /*
5672 * Figure out the current pipe for the initial backlight setup.
5673 * If the current pipe isn't valid, try the PPS pipe, and if that
5674 * fails just assume pipe A.
5675 */
5676 if (IS_CHERRYVIEW(dev))
5677 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5678 else
5679 pipe = PORT_TO_PIPE(intel_dp->DP);
5680
5681 if (pipe != PIPE_A && pipe != PIPE_B)
5682 pipe = intel_dp->pps_pipe;
5683
5684 if (pipe != PIPE_A && pipe != PIPE_B)
5685 pipe = PIPE_A;
5686
5687 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5688 pipe_name(pipe));
01527b31
CT
5689 }
5690
4f9db5b5 5691 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5692 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5693 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5694
5695 return true;
5696}
5697
16c25533 5698bool
f0fec3f2
PZ
5699intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5700 struct intel_connector *intel_connector)
a4fc5ed6 5701{
f0fec3f2
PZ
5702 struct drm_connector *connector = &intel_connector->base;
5703 struct intel_dp *intel_dp = &intel_dig_port->dp;
5704 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5705 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5706 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5707 enum port port = intel_dig_port->port;
0b99836f 5708 int type;
a4fc5ed6 5709
a4a5d2f8
VS
5710 intel_dp->pps_pipe = INVALID_PIPE;
5711
ec5b01dd 5712 /* intel_dp vfuncs */
b6b5e383
DL
5713 if (INTEL_INFO(dev)->gen >= 9)
5714 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5715 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5716 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5717 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5718 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5719 else if (HAS_PCH_SPLIT(dev))
5720 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5721 else
5722 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5723
b9ca5fad
DL
5724 if (INTEL_INFO(dev)->gen >= 9)
5725 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5726 else
5727 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5728
0767935e
DV
5729 /* Preserve the current hw state. */
5730 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5731 intel_dp->attached_connector = intel_connector;
3d3dc149 5732
3b32a35b 5733 if (intel_dp_is_edp(dev, port))
b329530c 5734 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5735 else
5736 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5737
f7d24902
ID
5738 /*
5739 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5740 * for DP the encoder type can be set by the caller to
5741 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5742 */
5743 if (type == DRM_MODE_CONNECTOR_eDP)
5744 intel_encoder->type = INTEL_OUTPUT_EDP;
5745
c17ed5b5
VS
5746 /* eDP only on port B and/or C on vlv/chv */
5747 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5748 port != PORT_B && port != PORT_C))
5749 return false;
5750
e7281eab
ID
5751 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5752 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5753 port_name(port));
5754
b329530c 5755 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5756 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5757
a4fc5ed6
KP
5758 connector->interlace_allowed = true;
5759 connector->doublescan_allowed = 0;
5760
f0fec3f2 5761 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5762 edp_panel_vdd_work);
a4fc5ed6 5763
df0e9248 5764 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5765 drm_connector_register(connector);
a4fc5ed6 5766
affa9354 5767 if (HAS_DDI(dev))
bcbc889b
PZ
5768 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5769 else
5770 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5771 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5772
0b99836f 5773 /* Set up the hotplug pin. */
ab9d7c30
PZ
5774 switch (port) {
5775 case PORT_A:
1d843f9d 5776 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5777 break;
5778 case PORT_B:
1d843f9d 5779 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5780 break;
5781 case PORT_C:
1d843f9d 5782 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5783 break;
5784 case PORT_D:
1d843f9d 5785 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5786 break;
5787 default:
ad1c0b19 5788 BUG();
5eb08b69
ZW
5789 }
5790
dada1a9f 5791 if (is_edp(intel_dp)) {
773538e8 5792 pps_lock(intel_dp);
1e74a324
VS
5793 intel_dp_init_panel_power_timestamps(intel_dp);
5794 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5795 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5796 else
36b5f425 5797 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5798 pps_unlock(intel_dp);
dada1a9f 5799 }
0095e6dc 5800
9d1a1031 5801 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5802
0e32b39c 5803 /* init MST on ports that can support it */
0c9b3715
JN
5804 if (HAS_DP_MST(dev) &&
5805 (port == PORT_B || port == PORT_C || port == PORT_D))
5806 intel_dp_mst_encoder_init(intel_dig_port,
5807 intel_connector->base.base.id);
0e32b39c 5808
36b5f425 5809 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5810 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5811 if (is_edp(intel_dp)) {
5812 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5813 /*
5814 * vdd might still be enabled do to the delayed vdd off.
5815 * Make sure vdd is actually turned off here.
5816 */
773538e8 5817 pps_lock(intel_dp);
4be73780 5818 edp_panel_vdd_off_sync(intel_dp);
773538e8 5819 pps_unlock(intel_dp);
15b1d171 5820 }
34ea3d38 5821 drm_connector_unregister(connector);
b2f246a8 5822 drm_connector_cleanup(connector);
16c25533 5823 return false;
b2f246a8 5824 }
32f9d658 5825
f684960e
CW
5826 intel_dp_add_properties(intel_dp, connector);
5827
a4fc5ed6
KP
5828 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5829 * 0xd. Failure to do so will result in spurious interrupts being
5830 * generated on the port when a cable is not attached.
5831 */
5832 if (IS_G4X(dev) && !IS_GM45(dev)) {
5833 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5834 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5835 }
16c25533 5836
aa7471d2
JN
5837 i915_debugfs_connector_add(connector);
5838
16c25533 5839 return true;
a4fc5ed6 5840}
f0fec3f2
PZ
5841
5842void
5843intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5844{
13cf5504 5845 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5846 struct intel_digital_port *intel_dig_port;
5847 struct intel_encoder *intel_encoder;
5848 struct drm_encoder *encoder;
5849 struct intel_connector *intel_connector;
5850
b14c5679 5851 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5852 if (!intel_dig_port)
5853 return;
5854
08d9bc92 5855 intel_connector = intel_connector_alloc();
f0fec3f2
PZ
5856 if (!intel_connector) {
5857 kfree(intel_dig_port);
5858 return;
5859 }
5860
5861 intel_encoder = &intel_dig_port->base;
5862 encoder = &intel_encoder->base;
5863
5864 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5865 DRM_MODE_ENCODER_TMDS);
5866
5bfe2ac0 5867 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5868 intel_encoder->disable = intel_disable_dp;
00c09d70 5869 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5870 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5871 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5872 if (IS_CHERRYVIEW(dev)) {
9197c88b 5873 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5874 intel_encoder->pre_enable = chv_pre_enable_dp;
5875 intel_encoder->enable = vlv_enable_dp;
580d3811 5876 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5877 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5878 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5879 intel_encoder->pre_enable = vlv_pre_enable_dp;
5880 intel_encoder->enable = vlv_enable_dp;
49277c31 5881 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5882 } else {
ecff4f3b
JN
5883 intel_encoder->pre_enable = g4x_pre_enable_dp;
5884 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5885 if (INTEL_INFO(dev)->gen >= 5)
5886 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5887 }
f0fec3f2 5888
174edf1f 5889 intel_dig_port->port = port;
f0fec3f2
PZ
5890 intel_dig_port->dp.output_reg = output_reg;
5891
00c09d70 5892 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5893 if (IS_CHERRYVIEW(dev)) {
5894 if (port == PORT_D)
5895 intel_encoder->crtc_mask = 1 << 2;
5896 else
5897 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5898 } else {
5899 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5900 }
bc079e8b 5901 intel_encoder->cloneable = 0;
f0fec3f2
PZ
5902 intel_encoder->hot_plug = intel_dp_hot_plug;
5903
13cf5504
DA
5904 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5905 dev_priv->hpd_irq_port[port] = intel_dig_port;
5906
15b1d171
PZ
5907 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5908 drm_encoder_cleanup(encoder);
5909 kfree(intel_dig_port);
b2f246a8 5910 kfree(intel_connector);
15b1d171 5911 }
f0fec3f2 5912}
0e32b39c
DA
5913
5914void intel_dp_mst_suspend(struct drm_device *dev)
5915{
5916 struct drm_i915_private *dev_priv = dev->dev_private;
5917 int i;
5918
5919 /* disable MST */
5920 for (i = 0; i < I915_MAX_PORTS; i++) {
5921 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5922 if (!intel_dig_port)
5923 continue;
5924
5925 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5926 if (!intel_dig_port->dp.can_mst)
5927 continue;
5928 if (intel_dig_port->dp.is_mst)
5929 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5930 }
5931 }
5932}
5933
5934void intel_dp_mst_resume(struct drm_device *dev)
5935{
5936 struct drm_i915_private *dev_priv = dev->dev_private;
5937 int i;
5938
5939 for (i = 0; i < I915_MAX_PORTS; i++) {
5940 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5941 if (!intel_dig_port)
5942 continue;
5943 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5944 int ret;
5945
5946 if (!intel_dig_port->dp.can_mst)
5947 continue;
5948
5949 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5950 if (ret != 0) {
5951 intel_dp_check_mst_status(&intel_dig_port->dp);
5952 }
5953 }
5954 }
5955}
This page took 1.244778 seconds and 5 git commands to generate.