drm/i915: Unconfuse DP link rate array names
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
9dd4ffdf
CML
44struct dp_link_dpll {
45 int link_bw;
46 struct dpll dpll;
47};
48
49static const struct dp_link_dpll gen4_dpll[] = {
50 { DP_LINK_BW_1_62,
51 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52 { DP_LINK_BW_2_7,
53 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54};
55
56static const struct dp_link_dpll pch_dpll[] = {
57 { DP_LINK_BW_1_62,
58 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59 { DP_LINK_BW_2_7,
60 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61};
62
65ce4bf5
CML
63static const struct dp_link_dpll vlv_dpll[] = {
64 { DP_LINK_BW_1_62,
58f6e632 65 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
66 { DP_LINK_BW_2_7,
67 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68};
69
ef9348c8
CML
70/*
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
73 */
74static const struct dp_link_dpll chv_dpll[] = {
75 /*
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
79 */
80 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
83 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86};
a8f3ef61 87/* Skylake supports following rates */
f4896f15
VS
88static const int gen9_rates[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
fe51bfb9
VS
90static const int chv_rates[] = { 162000, 202500, 210000, 216000,
91 243000, 270000, 324000, 405000,
92 420000, 432000, 540000 };
f4896f15 93static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 94
cfcb0fc9
JB
95/**
96 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
97 * @intel_dp: DP struct
98 *
99 * If a CPU or PCH DP output is attached to an eDP panel, this function
100 * will return true, and false otherwise.
101 */
102static bool is_edp(struct intel_dp *intel_dp)
103{
da63a9f2
PZ
104 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
105
106 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
107}
108
68b4d824 109static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 110{
68b4d824
ID
111 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112
113 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
114}
115
df0e9248
CW
116static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
117{
fa90ecef 118 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
119}
120
ea5b213a 121static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 122static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 123static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 124static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
125static void vlv_steal_power_sequencer(struct drm_device *dev,
126 enum pipe pipe);
a4fc5ed6 127
ed4e9c1d
VS
128static int
129intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 130{
7183dc29 131 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
132
133 switch (max_link_bw) {
134 case DP_LINK_BW_1_62:
135 case DP_LINK_BW_2_7:
1db10e28 136 case DP_LINK_BW_5_4:
d4eead50 137 break;
a4fc5ed6 138 default:
d4eead50
ID
139 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
140 max_link_bw);
a4fc5ed6
KP
141 max_link_bw = DP_LINK_BW_1_62;
142 break;
143 }
144 return max_link_bw;
145}
146
eeb6324d
PZ
147static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
148{
149 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
150 struct drm_device *dev = intel_dig_port->base.base.dev;
151 u8 source_max, sink_max;
152
153 source_max = 4;
154 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
155 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
156 source_max = 2;
157
158 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
159
160 return min(source_max, sink_max);
161}
162
cd9dde44
AJ
163/*
164 * The units on the numbers in the next two are... bizarre. Examples will
165 * make it clearer; this one parallels an example in the eDP spec.
166 *
167 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
168 *
169 * 270000 * 1 * 8 / 10 == 216000
170 *
171 * The actual data capacity of that configuration is 2.16Gbit/s, so the
172 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
173 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
174 * 119000. At 18bpp that's 2142000 kilobits per second.
175 *
176 * Thus the strange-looking division by 10 in intel_dp_link_required, to
177 * get the result in decakilobits instead of kilobits.
178 */
179
a4fc5ed6 180static int
c898261c 181intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 182{
cd9dde44 183 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
184}
185
fe27d53e
DA
186static int
187intel_dp_max_data_rate(int max_link_clock, int max_lanes)
188{
189 return (max_link_clock * max_lanes * 8) / 10;
190}
191
c19de8eb 192static enum drm_mode_status
a4fc5ed6
KP
193intel_dp_mode_valid(struct drm_connector *connector,
194 struct drm_display_mode *mode)
195{
df0e9248 196 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
197 struct intel_connector *intel_connector = to_intel_connector(connector);
198 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
199 int target_clock = mode->clock;
200 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 201
dd06f90e
JN
202 if (is_edp(intel_dp) && fixed_mode) {
203 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
204 return MODE_PANEL;
205
dd06f90e 206 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 207 return MODE_PANEL;
03afc4a2
DV
208
209 target_clock = fixed_mode->clock;
7de56f43
ZY
210 }
211
50fec21a 212 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 213 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
214
215 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
216 mode_rate = intel_dp_link_required(target_clock, 18);
217
218 if (mode_rate > max_rate)
c4867936 219 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
220
221 if (mode->clock < 10000)
222 return MODE_CLOCK_LOW;
223
0af78a2b
DV
224 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
225 return MODE_H_ILLEGAL;
226
a4fc5ed6
KP
227 return MODE_OK;
228}
229
a4f1289e 230uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
231{
232 int i;
233 uint32_t v = 0;
234
235 if (src_bytes > 4)
236 src_bytes = 4;
237 for (i = 0; i < src_bytes; i++)
238 v |= ((uint32_t) src[i]) << ((3-i) * 8);
239 return v;
240}
241
c2af70e2 242static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
243{
244 int i;
245 if (dst_bytes > 4)
246 dst_bytes = 4;
247 for (i = 0; i < dst_bytes; i++)
248 dst[i] = src >> ((3-i) * 8);
249}
250
fb0f8fbf
KP
251/* hrawclock is 1/4 the FSB frequency */
252static int
253intel_hrawclk(struct drm_device *dev)
254{
255 struct drm_i915_private *dev_priv = dev->dev_private;
256 uint32_t clkcfg;
257
9473c8f4
VP
258 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
259 if (IS_VALLEYVIEW(dev))
260 return 200;
261
fb0f8fbf
KP
262 clkcfg = I915_READ(CLKCFG);
263 switch (clkcfg & CLKCFG_FSB_MASK) {
264 case CLKCFG_FSB_400:
265 return 100;
266 case CLKCFG_FSB_533:
267 return 133;
268 case CLKCFG_FSB_667:
269 return 166;
270 case CLKCFG_FSB_800:
271 return 200;
272 case CLKCFG_FSB_1067:
273 return 266;
274 case CLKCFG_FSB_1333:
275 return 333;
276 /* these two are just a guess; one of them might be right */
277 case CLKCFG_FSB_1600:
278 case CLKCFG_FSB_1600_ALT:
279 return 400;
280 default:
281 return 133;
282 }
283}
284
bf13e81b
JN
285static void
286intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 287 struct intel_dp *intel_dp);
bf13e81b
JN
288static void
289intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 290 struct intel_dp *intel_dp);
bf13e81b 291
773538e8
VS
292static void pps_lock(struct intel_dp *intel_dp)
293{
294 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
295 struct intel_encoder *encoder = &intel_dig_port->base;
296 struct drm_device *dev = encoder->base.dev;
297 struct drm_i915_private *dev_priv = dev->dev_private;
298 enum intel_display_power_domain power_domain;
299
300 /*
301 * See vlv_power_sequencer_reset() why we need
302 * a power domain reference here.
303 */
304 power_domain = intel_display_port_power_domain(encoder);
305 intel_display_power_get(dev_priv, power_domain);
306
307 mutex_lock(&dev_priv->pps_mutex);
308}
309
310static void pps_unlock(struct intel_dp *intel_dp)
311{
312 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
313 struct intel_encoder *encoder = &intel_dig_port->base;
314 struct drm_device *dev = encoder->base.dev;
315 struct drm_i915_private *dev_priv = dev->dev_private;
316 enum intel_display_power_domain power_domain;
317
318 mutex_unlock(&dev_priv->pps_mutex);
319
320 power_domain = intel_display_port_power_domain(encoder);
321 intel_display_power_put(dev_priv, power_domain);
322}
323
961a0db0
VS
324static void
325vlv_power_sequencer_kick(struct intel_dp *intel_dp)
326{
327 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
328 struct drm_device *dev = intel_dig_port->base.base.dev;
329 struct drm_i915_private *dev_priv = dev->dev_private;
330 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 331 bool pll_enabled;
961a0db0
VS
332 uint32_t DP;
333
334 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
335 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
336 pipe_name(pipe), port_name(intel_dig_port->port)))
337 return;
338
339 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
340 pipe_name(pipe), port_name(intel_dig_port->port));
341
342 /* Preserve the BIOS-computed detected bit. This is
343 * supposed to be read-only.
344 */
345 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
346 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
347 DP |= DP_PORT_WIDTH(1);
348 DP |= DP_LINK_TRAIN_PAT_1;
349
350 if (IS_CHERRYVIEW(dev))
351 DP |= DP_PIPE_SELECT_CHV(pipe);
352 else if (pipe == PIPE_B)
353 DP |= DP_PIPEB_SELECT;
354
d288f65f
VS
355 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
356
357 /*
358 * The DPLL for the pipe must be enabled for this to work.
359 * So enable temporarily it if it's not already enabled.
360 */
361 if (!pll_enabled)
362 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
363 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
364
961a0db0
VS
365 /*
366 * Similar magic as in intel_dp_enable_port().
367 * We _must_ do this port enable + disable trick
368 * to make this power seqeuencer lock onto the port.
369 * Otherwise even VDD force bit won't work.
370 */
371 I915_WRITE(intel_dp->output_reg, DP);
372 POSTING_READ(intel_dp->output_reg);
373
374 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
375 POSTING_READ(intel_dp->output_reg);
376
377 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
378 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
379
380 if (!pll_enabled)
381 vlv_force_pll_off(dev, pipe);
961a0db0
VS
382}
383
bf13e81b
JN
384static enum pipe
385vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
386{
387 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
388 struct drm_device *dev = intel_dig_port->base.base.dev;
389 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
390 struct intel_encoder *encoder;
391 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 392 enum pipe pipe;
bf13e81b 393
e39b999a 394 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 395
a8c3344e
VS
396 /* We should never land here with regular DP ports */
397 WARN_ON(!is_edp(intel_dp));
398
a4a5d2f8
VS
399 if (intel_dp->pps_pipe != INVALID_PIPE)
400 return intel_dp->pps_pipe;
401
402 /*
403 * We don't have power sequencer currently.
404 * Pick one that's not used by other ports.
405 */
406 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
407 base.head) {
408 struct intel_dp *tmp;
409
410 if (encoder->type != INTEL_OUTPUT_EDP)
411 continue;
412
413 tmp = enc_to_intel_dp(&encoder->base);
414
415 if (tmp->pps_pipe != INVALID_PIPE)
416 pipes &= ~(1 << tmp->pps_pipe);
417 }
418
419 /*
420 * Didn't find one. This should not happen since there
421 * are two power sequencers and up to two eDP ports.
422 */
423 if (WARN_ON(pipes == 0))
a8c3344e
VS
424 pipe = PIPE_A;
425 else
426 pipe = ffs(pipes) - 1;
a4a5d2f8 427
a8c3344e
VS
428 vlv_steal_power_sequencer(dev, pipe);
429 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
430
431 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
432 pipe_name(intel_dp->pps_pipe),
433 port_name(intel_dig_port->port));
434
435 /* init power sequencer on this pipe and port */
36b5f425
VS
436 intel_dp_init_panel_power_sequencer(dev, intel_dp);
437 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 438
961a0db0
VS
439 /*
440 * Even vdd force doesn't work until we've made
441 * the power sequencer lock in on the port.
442 */
443 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
444
445 return intel_dp->pps_pipe;
446}
447
6491ab27
VS
448typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
449 enum pipe pipe);
450
451static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
452 enum pipe pipe)
453{
454 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
455}
456
457static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
458 enum pipe pipe)
459{
460 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
461}
462
463static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
464 enum pipe pipe)
465{
466 return true;
467}
bf13e81b 468
a4a5d2f8 469static enum pipe
6491ab27
VS
470vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
471 enum port port,
472 vlv_pipe_check pipe_check)
a4a5d2f8
VS
473{
474 enum pipe pipe;
bf13e81b 475
bf13e81b
JN
476 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
477 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
478 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
479
480 if (port_sel != PANEL_PORT_SELECT_VLV(port))
481 continue;
482
6491ab27
VS
483 if (!pipe_check(dev_priv, pipe))
484 continue;
485
a4a5d2f8 486 return pipe;
bf13e81b
JN
487 }
488
a4a5d2f8
VS
489 return INVALID_PIPE;
490}
491
492static void
493vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
494{
495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
496 struct drm_device *dev = intel_dig_port->base.base.dev;
497 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
498 enum port port = intel_dig_port->port;
499
500 lockdep_assert_held(&dev_priv->pps_mutex);
501
502 /* try to find a pipe with this port selected */
6491ab27
VS
503 /* first pick one where the panel is on */
504 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
505 vlv_pipe_has_pp_on);
506 /* didn't find one? pick one where vdd is on */
507 if (intel_dp->pps_pipe == INVALID_PIPE)
508 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
509 vlv_pipe_has_vdd_on);
510 /* didn't find one? pick one with just the correct port */
511 if (intel_dp->pps_pipe == INVALID_PIPE)
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513 vlv_pipe_any);
a4a5d2f8
VS
514
515 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
516 if (intel_dp->pps_pipe == INVALID_PIPE) {
517 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
518 port_name(port));
519 return;
bf13e81b
JN
520 }
521
a4a5d2f8
VS
522 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
523 port_name(port), pipe_name(intel_dp->pps_pipe));
524
36b5f425
VS
525 intel_dp_init_panel_power_sequencer(dev, intel_dp);
526 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
527}
528
773538e8
VS
529void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
530{
531 struct drm_device *dev = dev_priv->dev;
532 struct intel_encoder *encoder;
533
534 if (WARN_ON(!IS_VALLEYVIEW(dev)))
535 return;
536
537 /*
538 * We can't grab pps_mutex here due to deadlock with power_domain
539 * mutex when power_domain functions are called while holding pps_mutex.
540 * That also means that in order to use pps_pipe the code needs to
541 * hold both a power domain reference and pps_mutex, and the power domain
542 * reference get/put must be done while _not_ holding pps_mutex.
543 * pps_{lock,unlock}() do these steps in the correct order, so one
544 * should use them always.
545 */
546
547 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
548 struct intel_dp *intel_dp;
549
550 if (encoder->type != INTEL_OUTPUT_EDP)
551 continue;
552
553 intel_dp = enc_to_intel_dp(&encoder->base);
554 intel_dp->pps_pipe = INVALID_PIPE;
555 }
bf13e81b
JN
556}
557
558static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
559{
560 struct drm_device *dev = intel_dp_to_dev(intel_dp);
561
562 if (HAS_PCH_SPLIT(dev))
563 return PCH_PP_CONTROL;
564 else
565 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
566}
567
568static u32 _pp_stat_reg(struct intel_dp *intel_dp)
569{
570 struct drm_device *dev = intel_dp_to_dev(intel_dp);
571
572 if (HAS_PCH_SPLIT(dev))
573 return PCH_PP_STATUS;
574 else
575 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
576}
577
01527b31
CT
578/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
579 This function only applicable when panel PM state is not to be tracked */
580static int edp_notify_handler(struct notifier_block *this, unsigned long code,
581 void *unused)
582{
583 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
584 edp_notifier);
585 struct drm_device *dev = intel_dp_to_dev(intel_dp);
586 struct drm_i915_private *dev_priv = dev->dev_private;
587 u32 pp_div;
588 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
589
590 if (!is_edp(intel_dp) || code != SYS_RESTART)
591 return 0;
592
773538e8 593 pps_lock(intel_dp);
e39b999a 594
01527b31 595 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
596 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
597
01527b31
CT
598 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
599 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
600 pp_div = I915_READ(pp_div_reg);
601 pp_div &= PP_REFERENCE_DIVIDER_MASK;
602
603 /* 0x1F write to PP_DIV_REG sets max cycle delay */
604 I915_WRITE(pp_div_reg, pp_div | 0x1F);
605 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
606 msleep(intel_dp->panel_power_cycle_delay);
607 }
608
773538e8 609 pps_unlock(intel_dp);
e39b999a 610
01527b31
CT
611 return 0;
612}
613
4be73780 614static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 615{
30add22d 616 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
617 struct drm_i915_private *dev_priv = dev->dev_private;
618
e39b999a
VS
619 lockdep_assert_held(&dev_priv->pps_mutex);
620
9a42356b
VS
621 if (IS_VALLEYVIEW(dev) &&
622 intel_dp->pps_pipe == INVALID_PIPE)
623 return false;
624
bf13e81b 625 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
626}
627
4be73780 628static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 629{
30add22d 630 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
631 struct drm_i915_private *dev_priv = dev->dev_private;
632
e39b999a
VS
633 lockdep_assert_held(&dev_priv->pps_mutex);
634
9a42356b
VS
635 if (IS_VALLEYVIEW(dev) &&
636 intel_dp->pps_pipe == INVALID_PIPE)
637 return false;
638
773538e8 639 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
640}
641
9b984dae
KP
642static void
643intel_dp_check_edp(struct intel_dp *intel_dp)
644{
30add22d 645 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 646 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 647
9b984dae
KP
648 if (!is_edp(intel_dp))
649 return;
453c5420 650
4be73780 651 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
652 WARN(1, "eDP powered off while attempting aux channel communication.\n");
653 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
654 I915_READ(_pp_stat_reg(intel_dp)),
655 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
656 }
657}
658
9ee32fea
DV
659static uint32_t
660intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
661{
662 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
663 struct drm_device *dev = intel_dig_port->base.base.dev;
664 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 665 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
666 uint32_t status;
667 bool done;
668
ef04f00d 669#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 670 if (has_aux_irq)
b18ac466 671 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 672 msecs_to_jiffies_timeout(10));
9ee32fea
DV
673 else
674 done = wait_for_atomic(C, 10) == 0;
675 if (!done)
676 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
677 has_aux_irq);
678#undef C
679
680 return status;
681}
682
ec5b01dd 683static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 684{
174edf1f
PZ
685 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
686 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 687
ec5b01dd
DL
688 /*
689 * The clock divider is based off the hrawclk, and would like to run at
690 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 691 */
ec5b01dd
DL
692 return index ? 0 : intel_hrawclk(dev) / 2;
693}
694
695static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
696{
697 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698 struct drm_device *dev = intel_dig_port->base.base.dev;
699
700 if (index)
701 return 0;
702
703 if (intel_dig_port->port == PORT_A) {
704 if (IS_GEN6(dev) || IS_GEN7(dev))
b84a1cf8 705 return 200; /* SNB & IVB eDP input clock at 400Mhz */
e3421a18 706 else
b84a1cf8 707 return 225; /* eDP input clock at 450Mhz */
ec5b01dd
DL
708 } else {
709 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
710 }
711}
712
713static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
714{
715 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
716 struct drm_device *dev = intel_dig_port->base.base.dev;
717 struct drm_i915_private *dev_priv = dev->dev_private;
718
719 if (intel_dig_port->port == PORT_A) {
720 if (index)
721 return 0;
722 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
2c55c336
JN
723 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
724 /* Workaround for non-ULT HSW */
bc86625a
CW
725 switch (index) {
726 case 0: return 63;
727 case 1: return 72;
728 default: return 0;
729 }
ec5b01dd 730 } else {
bc86625a 731 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 732 }
b84a1cf8
RV
733}
734
ec5b01dd
DL
735static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
736{
737 return index ? 0 : 100;
738}
739
b6b5e383
DL
740static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
741{
742 /*
743 * SKL doesn't need us to program the AUX clock divider (Hardware will
744 * derive the clock from CDCLK automatically). We still implement the
745 * get_aux_clock_divider vfunc to plug-in into the existing code.
746 */
747 return index ? 0 : 1;
748}
749
5ed12a19
DL
750static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
751 bool has_aux_irq,
752 int send_bytes,
753 uint32_t aux_clock_divider)
754{
755 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
756 struct drm_device *dev = intel_dig_port->base.base.dev;
757 uint32_t precharge, timeout;
758
759 if (IS_GEN6(dev))
760 precharge = 3;
761 else
762 precharge = 5;
763
764 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
765 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
766 else
767 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
768
769 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 770 DP_AUX_CH_CTL_DONE |
5ed12a19 771 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 772 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 773 timeout |
788d4433 774 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
775 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
776 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 777 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
778}
779
b9ca5fad
DL
780static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
781 bool has_aux_irq,
782 int send_bytes,
783 uint32_t unused)
784{
785 return DP_AUX_CH_CTL_SEND_BUSY |
786 DP_AUX_CH_CTL_DONE |
787 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788 DP_AUX_CH_CTL_TIME_OUT_ERROR |
789 DP_AUX_CH_CTL_TIME_OUT_1600us |
790 DP_AUX_CH_CTL_RECEIVE_ERROR |
791 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
792 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
793}
794
b84a1cf8
RV
795static int
796intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 797 const uint8_t *send, int send_bytes,
b84a1cf8
RV
798 uint8_t *recv, int recv_size)
799{
800 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
801 struct drm_device *dev = intel_dig_port->base.base.dev;
802 struct drm_i915_private *dev_priv = dev->dev_private;
803 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
804 uint32_t ch_data = ch_ctl + 4;
bc86625a 805 uint32_t aux_clock_divider;
b84a1cf8
RV
806 int i, ret, recv_bytes;
807 uint32_t status;
5ed12a19 808 int try, clock = 0;
4e6b788c 809 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
810 bool vdd;
811
773538e8 812 pps_lock(intel_dp);
e39b999a 813
72c3500a
VS
814 /*
815 * We will be called with VDD already enabled for dpcd/edid/oui reads.
816 * In such cases we want to leave VDD enabled and it's up to upper layers
817 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
818 * ourselves.
819 */
1e0560e0 820 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
821
822 /* dp aux is extremely sensitive to irq latency, hence request the
823 * lowest possible wakeup latency and so prevent the cpu from going into
824 * deep sleep states.
825 */
826 pm_qos_update_request(&dev_priv->pm_qos, 0);
827
828 intel_dp_check_edp(intel_dp);
5eb08b69 829
c67a470b
PZ
830 intel_aux_display_runtime_get(dev_priv);
831
11bee43e
JB
832 /* Try to wait for any previous AUX channel activity */
833 for (try = 0; try < 3; try++) {
ef04f00d 834 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
835 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
836 break;
837 msleep(1);
838 }
839
840 if (try == 3) {
841 WARN(1, "dp_aux_ch not started status 0x%08x\n",
842 I915_READ(ch_ctl));
9ee32fea
DV
843 ret = -EBUSY;
844 goto out;
4f7f7b7e
CW
845 }
846
46a5ae9f
PZ
847 /* Only 5 data registers! */
848 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
849 ret = -E2BIG;
850 goto out;
851 }
852
ec5b01dd 853 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
854 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
855 has_aux_irq,
856 send_bytes,
857 aux_clock_divider);
5ed12a19 858
bc86625a
CW
859 /* Must try at least 3 times according to DP spec */
860 for (try = 0; try < 5; try++) {
861 /* Load the send data into the aux channel data registers */
862 for (i = 0; i < send_bytes; i += 4)
863 I915_WRITE(ch_data + i,
a4f1289e
RV
864 intel_dp_pack_aux(send + i,
865 send_bytes - i));
bc86625a
CW
866
867 /* Send the command and wait for it to complete */
5ed12a19 868 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
869
870 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
871
872 /* Clear done status and any errors */
873 I915_WRITE(ch_ctl,
874 status |
875 DP_AUX_CH_CTL_DONE |
876 DP_AUX_CH_CTL_TIME_OUT_ERROR |
877 DP_AUX_CH_CTL_RECEIVE_ERROR);
878
879 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
880 DP_AUX_CH_CTL_RECEIVE_ERROR))
881 continue;
882 if (status & DP_AUX_CH_CTL_DONE)
883 break;
884 }
4f7f7b7e 885 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
886 break;
887 }
888
a4fc5ed6 889 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 890 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
891 ret = -EBUSY;
892 goto out;
a4fc5ed6
KP
893 }
894
895 /* Check for timeout or receive error.
896 * Timeouts occur when the sink is not connected
897 */
a5b3da54 898 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 899 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
900 ret = -EIO;
901 goto out;
a5b3da54 902 }
1ae8c0a5
KP
903
904 /* Timeouts occur when the device isn't connected, so they're
905 * "normal" -- don't fill the kernel log with these */
a5b3da54 906 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 907 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
908 ret = -ETIMEDOUT;
909 goto out;
a4fc5ed6
KP
910 }
911
912 /* Unload any bytes sent back from the other side */
913 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
914 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
915 if (recv_bytes > recv_size)
916 recv_bytes = recv_size;
0206e353 917
4f7f7b7e 918 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
919 intel_dp_unpack_aux(I915_READ(ch_data + i),
920 recv + i, recv_bytes - i);
a4fc5ed6 921
9ee32fea
DV
922 ret = recv_bytes;
923out:
924 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 925 intel_aux_display_runtime_put(dev_priv);
9ee32fea 926
884f19e9
JN
927 if (vdd)
928 edp_panel_vdd_off(intel_dp, false);
929
773538e8 930 pps_unlock(intel_dp);
e39b999a 931
9ee32fea 932 return ret;
a4fc5ed6
KP
933}
934
a6c8aff0
JN
935#define BARE_ADDRESS_SIZE 3
936#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
937static ssize_t
938intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 939{
9d1a1031
JN
940 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
941 uint8_t txbuf[20], rxbuf[20];
942 size_t txsize, rxsize;
a4fc5ed6 943 int ret;
a4fc5ed6 944
9d1a1031
JN
945 txbuf[0] = msg->request << 4;
946 txbuf[1] = msg->address >> 8;
947 txbuf[2] = msg->address & 0xff;
948 txbuf[3] = msg->size - 1;
46a5ae9f 949
9d1a1031
JN
950 switch (msg->request & ~DP_AUX_I2C_MOT) {
951 case DP_AUX_NATIVE_WRITE:
952 case DP_AUX_I2C_WRITE:
a6c8aff0 953 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
9d1a1031 954 rxsize = 1;
f51a44b9 955
9d1a1031
JN
956 if (WARN_ON(txsize > 20))
957 return -E2BIG;
a4fc5ed6 958
9d1a1031 959 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 960
9d1a1031
JN
961 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
962 if (ret > 0) {
963 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 964
9d1a1031
JN
965 /* Return payload size. */
966 ret = msg->size;
967 }
968 break;
46a5ae9f 969
9d1a1031
JN
970 case DP_AUX_NATIVE_READ:
971 case DP_AUX_I2C_READ:
a6c8aff0 972 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 973 rxsize = msg->size + 1;
a4fc5ed6 974
9d1a1031
JN
975 if (WARN_ON(rxsize > 20))
976 return -E2BIG;
a4fc5ed6 977
9d1a1031
JN
978 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
979 if (ret > 0) {
980 msg->reply = rxbuf[0] >> 4;
981 /*
982 * Assume happy day, and copy the data. The caller is
983 * expected to check msg->reply before touching it.
984 *
985 * Return payload size.
986 */
987 ret--;
988 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 989 }
9d1a1031
JN
990 break;
991
992 default:
993 ret = -EINVAL;
994 break;
a4fc5ed6 995 }
f51a44b9 996
9d1a1031 997 return ret;
a4fc5ed6
KP
998}
999
9d1a1031
JN
1000static void
1001intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1002{
1003 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1004 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1005 enum port port = intel_dig_port->port;
0b99836f 1006 const char *name = NULL;
ab2c0672
DA
1007 int ret;
1008
33ad6626
JN
1009 switch (port) {
1010 case PORT_A:
1011 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1012 name = "DPDDC-A";
ab2c0672 1013 break;
33ad6626
JN
1014 case PORT_B:
1015 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1016 name = "DPDDC-B";
ab2c0672 1017 break;
33ad6626
JN
1018 case PORT_C:
1019 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1020 name = "DPDDC-C";
ab2c0672 1021 break;
33ad6626
JN
1022 case PORT_D:
1023 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1024 name = "DPDDC-D";
33ad6626
JN
1025 break;
1026 default:
1027 BUG();
ab2c0672
DA
1028 }
1029
1b1aad75
DL
1030 /*
1031 * The AUX_CTL register is usually DP_CTL + 0x10.
1032 *
1033 * On Haswell and Broadwell though:
1034 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1035 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1036 *
1037 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1038 */
1039 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
33ad6626 1040 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1041
0b99836f 1042 intel_dp->aux.name = name;
9d1a1031
JN
1043 intel_dp->aux.dev = dev->dev;
1044 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1045
0b99836f
JN
1046 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1047 connector->base.kdev->kobj.name);
8316f337 1048
4f71d0cb 1049 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1050 if (ret < 0) {
4f71d0cb 1051 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1052 name, ret);
1053 return;
ab2c0672 1054 }
8a5e6aeb 1055
0b99836f
JN
1056 ret = sysfs_create_link(&connector->base.kdev->kobj,
1057 &intel_dp->aux.ddc.dev.kobj,
1058 intel_dp->aux.ddc.dev.kobj.name);
1059 if (ret < 0) {
1060 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1061 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1062 }
a4fc5ed6
KP
1063}
1064
80f65de3
ID
1065static void
1066intel_dp_connector_unregister(struct intel_connector *intel_connector)
1067{
1068 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1069
0e32b39c
DA
1070 if (!intel_connector->mst_port)
1071 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1072 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1073 intel_connector_unregister(intel_connector);
1074}
1075
5416d871 1076static void
c3346ef6 1077skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1078{
1079 u32 ctrl1;
1080
1081 pipe_config->ddi_pll_sel = SKL_DPLL0;
1082 pipe_config->dpll_hw_state.cfgcr1 = 0;
1083 pipe_config->dpll_hw_state.cfgcr2 = 0;
1084
1085 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1086 switch (link_clock / 2) {
1087 case 81000:
5416d871
DL
1088 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1089 SKL_DPLL0);
1090 break;
c3346ef6 1091 case 135000:
5416d871
DL
1092 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1093 SKL_DPLL0);
1094 break;
c3346ef6 1095 case 270000:
5416d871
DL
1096 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1097 SKL_DPLL0);
1098 break;
c3346ef6
SJ
1099 case 162000:
1100 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1101 SKL_DPLL0);
1102 break;
1103 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1104 results in CDCLK change. Need to handle the change of CDCLK by
1105 disabling pipes and re-enabling them */
1106 case 108000:
1107 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1108 SKL_DPLL0);
1109 break;
1110 case 216000:
1111 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1112 SKL_DPLL0);
1113 break;
1114
5416d871
DL
1115 }
1116 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1117}
1118
0e50338c 1119static void
5cec258b 1120hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c
DV
1121{
1122 switch (link_bw) {
1123 case DP_LINK_BW_1_62:
1124 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1125 break;
1126 case DP_LINK_BW_2_7:
1127 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1128 break;
1129 case DP_LINK_BW_5_4:
1130 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1131 break;
1132 }
1133}
1134
fc0f8e25 1135static int
12f6a2e2 1136intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1137{
94ca719e
VS
1138 if (intel_dp->num_sink_rates) {
1139 *sink_rates = intel_dp->sink_rates;
1140 return intel_dp->num_sink_rates;
fc0f8e25 1141 }
12f6a2e2
VS
1142
1143 *sink_rates = default_rates;
1144
1145 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1146}
1147
a8f3ef61 1148static int
1db10e28 1149intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1150{
636280ba
VS
1151 if (INTEL_INFO(dev)->gen >= 9) {
1152 *source_rates = gen9_rates;
1153 return ARRAY_SIZE(gen9_rates);
fe51bfb9
VS
1154 } else if (IS_CHERRYVIEW(dev)) {
1155 *source_rates = chv_rates;
1156 return ARRAY_SIZE(chv_rates);
a8f3ef61 1157 }
636280ba
VS
1158
1159 *source_rates = default_rates;
1160
1db10e28
VS
1161 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1162 /* WaDisableHBR2:skl */
1163 return (DP_LINK_BW_2_7 >> 3) + 1;
1164 else if (INTEL_INFO(dev)->gen >= 8 ||
1165 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1166 return (DP_LINK_BW_5_4 >> 3) + 1;
1167 else
1168 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1169}
1170
c6bb3538
DV
1171static void
1172intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1173 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1174{
1175 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1176 const struct dp_link_dpll *divisor = NULL;
1177 int i, count = 0;
c6bb3538
DV
1178
1179 if (IS_G4X(dev)) {
9dd4ffdf
CML
1180 divisor = gen4_dpll;
1181 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1182 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1183 divisor = pch_dpll;
1184 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1185 } else if (IS_CHERRYVIEW(dev)) {
1186 divisor = chv_dpll;
1187 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1188 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1189 divisor = vlv_dpll;
1190 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1191 }
9dd4ffdf
CML
1192
1193 if (divisor && count) {
1194 for (i = 0; i < count; i++) {
1195 if (link_bw == divisor[i].link_bw) {
1196 pipe_config->dpll = divisor[i].dpll;
1197 pipe_config->clock_set = true;
1198 break;
1199 }
1200 }
c6bb3538
DV
1201 }
1202}
1203
2ecae76a
VS
1204static int intersect_rates(const int *source_rates, int source_len,
1205 const int *sink_rates, int sink_len,
94ca719e 1206 int *common_rates)
a8f3ef61
SJ
1207{
1208 int i = 0, j = 0, k = 0;
1209
a8f3ef61
SJ
1210 while (i < source_len && j < sink_len) {
1211 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1212 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1213 return k;
94ca719e 1214 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1215 ++k;
1216 ++i;
1217 ++j;
1218 } else if (source_rates[i] < sink_rates[j]) {
1219 ++i;
1220 } else {
1221 ++j;
1222 }
1223 }
1224 return k;
1225}
1226
94ca719e
VS
1227static int intel_dp_common_rates(struct intel_dp *intel_dp,
1228 int *common_rates)
2ecae76a
VS
1229{
1230 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1231 const int *source_rates, *sink_rates;
1232 int source_len, sink_len;
1233
1234 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1235 source_len = intel_dp_source_rates(dev, &source_rates);
1236
1237 return intersect_rates(source_rates, source_len,
1238 sink_rates, sink_len,
94ca719e 1239 common_rates);
2ecae76a
VS
1240}
1241
0336400e
VS
1242static void snprintf_int_array(char *str, size_t len,
1243 const int *array, int nelem)
1244{
1245 int i;
1246
1247 str[0] = '\0';
1248
1249 for (i = 0; i < nelem; i++) {
1250 int r = snprintf(str, len, "%d,", array[i]);
1251 if (r >= len)
1252 return;
1253 str += r;
1254 len -= r;
1255 }
1256}
1257
1258static void intel_dp_print_rates(struct intel_dp *intel_dp)
1259{
1260 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1261 const int *source_rates, *sink_rates;
94ca719e
VS
1262 int source_len, sink_len, common_len;
1263 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1264 char str[128]; /* FIXME: too big for stack? */
1265
1266 if ((drm_debug & DRM_UT_KMS) == 0)
1267 return;
1268
1269 source_len = intel_dp_source_rates(dev, &source_rates);
1270 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1271 DRM_DEBUG_KMS("source rates: %s\n", str);
1272
1273 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1274 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1275 DRM_DEBUG_KMS("sink rates: %s\n", str);
1276
94ca719e
VS
1277 common_len = intel_dp_common_rates(intel_dp, common_rates);
1278 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1279 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1280}
1281
f4896f15 1282static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1283{
1284 int i = 0;
1285
1286 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1287 if (find == rates[i])
1288 break;
1289
1290 return i;
1291}
1292
50fec21a
VS
1293int
1294intel_dp_max_link_rate(struct intel_dp *intel_dp)
1295{
1296 int rates[DP_MAX_SUPPORTED_RATES] = {};
1297 int len;
1298
94ca719e 1299 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1300 if (WARN_ON(len <= 0))
1301 return 162000;
1302
1303 return rates[rate_to_index(0, rates) - 1];
1304}
1305
ed4e9c1d
VS
1306int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1307{
94ca719e 1308 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1309}
1310
00c09d70 1311bool
5bfe2ac0 1312intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1313 struct intel_crtc_state *pipe_config)
a4fc5ed6 1314{
5bfe2ac0 1315 struct drm_device *dev = encoder->base.dev;
36008365 1316 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1317 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1318 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1319 enum port port = dp_to_dig_port(intel_dp)->port;
2dd24552 1320 struct intel_crtc *intel_crtc = encoder->new_crtc;
dd06f90e 1321 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1322 int lane_count, clock;
56071a20 1323 int min_lane_count = 1;
eeb6324d 1324 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1325 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1326 int min_clock = 0;
a8f3ef61 1327 int max_clock;
083f9560 1328 int bpp, mode_rate;
ff9a6750 1329 int link_avail, link_clock;
94ca719e
VS
1330 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1331 int common_len;
a8f3ef61 1332
94ca719e 1333 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1334
1335 /* No common link rates between source and sink */
94ca719e 1336 WARN_ON(common_len <= 0);
a8f3ef61 1337
94ca719e 1338 max_clock = common_len - 1;
a4fc5ed6 1339
bc7d38a4 1340 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1341 pipe_config->has_pch_encoder = true;
1342
03afc4a2 1343 pipe_config->has_dp_encoder = true;
f769cd24 1344 pipe_config->has_drrs = false;
9ed109a7 1345 pipe_config->has_audio = intel_dp->has_audio;
a4fc5ed6 1346
dd06f90e
JN
1347 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1348 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1349 adjusted_mode);
2dd24552
JB
1350 if (!HAS_PCH_SPLIT(dev))
1351 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1352 intel_connector->panel.fitting_mode);
1353 else
b074cec8
JB
1354 intel_pch_panel_fitting(intel_crtc, pipe_config,
1355 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1356 }
1357
cb1793ce 1358 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1359 return false;
1360
083f9560 1361 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1362 "max bw %d pixel clock %iKHz\n",
94ca719e 1363 max_lane_count, common_rates[max_clock],
241bfc38 1364 adjusted_mode->crtc_clock);
083f9560 1365
36008365
DV
1366 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1367 * bpc in between. */
3e7ca985 1368 bpp = pipe_config->pipe_bpp;
56071a20
JN
1369 if (is_edp(intel_dp)) {
1370 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1371 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1372 dev_priv->vbt.edp_bpp);
1373 bpp = dev_priv->vbt.edp_bpp;
1374 }
1375
344c5bbc
JN
1376 /*
1377 * Use the maximum clock and number of lanes the eDP panel
1378 * advertizes being capable of. The panels are generally
1379 * designed to support only a single clock and lane
1380 * configuration, and typically these values correspond to the
1381 * native resolution of the panel.
1382 */
1383 min_lane_count = max_lane_count;
1384 min_clock = max_clock;
7984211e 1385 }
657445fe 1386
36008365 1387 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1388 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1389 bpp);
36008365 1390
c6930992 1391 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1392 for (lane_count = min_lane_count;
1393 lane_count <= max_lane_count;
1394 lane_count <<= 1) {
1395
94ca719e 1396 link_clock = common_rates[clock];
36008365
DV
1397 link_avail = intel_dp_max_data_rate(link_clock,
1398 lane_count);
1399
1400 if (mode_rate <= link_avail) {
1401 goto found;
1402 }
1403 }
1404 }
1405 }
c4867936 1406
36008365 1407 return false;
3685a8f3 1408
36008365 1409found:
55bc60db
VS
1410 if (intel_dp->color_range_auto) {
1411 /*
1412 * See:
1413 * CEA-861-E - 5.1 Default Encoding Parameters
1414 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1415 */
18316c8c 1416 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1417 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1418 else
1419 intel_dp->color_range = 0;
1420 }
1421
3685a8f3 1422 if (intel_dp->color_range)
50f3b016 1423 pipe_config->limited_color_range = true;
a4fc5ed6 1424
36008365 1425 intel_dp->lane_count = lane_count;
a8f3ef61 1426
94ca719e 1427 if (intel_dp->num_sink_rates) {
bc27b7d3 1428 intel_dp->link_bw = 0;
a8f3ef61 1429 intel_dp->rate_select =
94ca719e 1430 intel_dp_rate_select(intel_dp, common_rates[clock]);
bc27b7d3
VS
1431 } else {
1432 intel_dp->link_bw =
94ca719e 1433 drm_dp_link_rate_to_bw_code(common_rates[clock]);
bc27b7d3 1434 intel_dp->rate_select = 0;
a8f3ef61
SJ
1435 }
1436
657445fe 1437 pipe_config->pipe_bpp = bpp;
94ca719e 1438 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1439
36008365
DV
1440 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1441 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1442 pipe_config->port_clock, bpp);
36008365
DV
1443 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1444 mode_rate, link_avail);
a4fc5ed6 1445
03afc4a2 1446 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1447 adjusted_mode->crtc_clock,
1448 pipe_config->port_clock,
03afc4a2 1449 &pipe_config->dp_m_n);
9d1a455b 1450
439d7ac0 1451 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1452 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1453 pipe_config->has_drrs = true;
439d7ac0
PB
1454 intel_link_compute_m_n(bpp, lane_count,
1455 intel_connector->panel.downclock_mode->clock,
1456 pipe_config->port_clock,
1457 &pipe_config->dp_m2_n2);
1458 }
1459
5416d871 1460 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
94ca719e 1461 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
5416d871 1462 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1463 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1464 else
1465 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1466
03afc4a2 1467 return true;
a4fc5ed6
KP
1468}
1469
7c62a164 1470static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1471{
7c62a164
DV
1472 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1473 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1474 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1475 struct drm_i915_private *dev_priv = dev->dev_private;
1476 u32 dpa_ctl;
1477
6e3c9717
ACO
1478 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1479 crtc->config->port_clock);
ea9b6006
DV
1480 dpa_ctl = I915_READ(DP_A);
1481 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1482
6e3c9717 1483 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1484 /* For a long time we've carried around a ILK-DevA w/a for the
1485 * 160MHz clock. If we're really unlucky, it's still required.
1486 */
1487 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1488 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1489 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1490 } else {
1491 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1492 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1493 }
1ce17038 1494
ea9b6006
DV
1495 I915_WRITE(DP_A, dpa_ctl);
1496
1497 POSTING_READ(DP_A);
1498 udelay(500);
1499}
1500
8ac33ed3 1501static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1502{
b934223d 1503 struct drm_device *dev = encoder->base.dev;
417e822d 1504 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1505 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1506 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1507 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1508 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1509
417e822d 1510 /*
1a2eb460 1511 * There are four kinds of DP registers:
417e822d
KP
1512 *
1513 * IBX PCH
1a2eb460
KP
1514 * SNB CPU
1515 * IVB CPU
417e822d
KP
1516 * CPT PCH
1517 *
1518 * IBX PCH and CPU are the same for almost everything,
1519 * except that the CPU DP PLL is configured in this
1520 * register
1521 *
1522 * CPT PCH is quite different, having many bits moved
1523 * to the TRANS_DP_CTL register instead. That
1524 * configuration happens (oddly) in ironlake_pch_enable
1525 */
9c9e7927 1526
417e822d
KP
1527 /* Preserve the BIOS-computed detected bit. This is
1528 * supposed to be read-only.
1529 */
1530 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1531
417e822d 1532 /* Handle DP bits in common between all three register formats */
417e822d 1533 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1534 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1535
6e3c9717 1536 if (crtc->config->has_audio)
ea5b213a 1537 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1538
417e822d 1539 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1540
bc7d38a4 1541 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
1542 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1543 intel_dp->DP |= DP_SYNC_HS_HIGH;
1544 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1545 intel_dp->DP |= DP_SYNC_VS_HIGH;
1546 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1547
6aba5b6c 1548 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1549 intel_dp->DP |= DP_ENHANCED_FRAMING;
1550
7c62a164 1551 intel_dp->DP |= crtc->pipe << 29;
bc7d38a4 1552 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
b2634017 1553 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1554 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1555
1556 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1557 intel_dp->DP |= DP_SYNC_HS_HIGH;
1558 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1559 intel_dp->DP |= DP_SYNC_VS_HIGH;
1560 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1561
6aba5b6c 1562 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1563 intel_dp->DP |= DP_ENHANCED_FRAMING;
1564
44f37d1f
CML
1565 if (!IS_CHERRYVIEW(dev)) {
1566 if (crtc->pipe == 1)
1567 intel_dp->DP |= DP_PIPEB_SELECT;
1568 } else {
1569 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1570 }
417e822d
KP
1571 } else {
1572 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
32f9d658 1573 }
a4fc5ed6
KP
1574}
1575
ffd6749d
PZ
1576#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1577#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1578
1a5ef5b7
PZ
1579#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1580#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1581
ffd6749d
PZ
1582#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1583#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1584
4be73780 1585static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1586 u32 mask,
1587 u32 value)
bd943159 1588{
30add22d 1589 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1590 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1591 u32 pp_stat_reg, pp_ctrl_reg;
1592
e39b999a
VS
1593 lockdep_assert_held(&dev_priv->pps_mutex);
1594
bf13e81b
JN
1595 pp_stat_reg = _pp_stat_reg(intel_dp);
1596 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1597
99ea7127 1598 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1599 mask, value,
1600 I915_READ(pp_stat_reg),
1601 I915_READ(pp_ctrl_reg));
32ce697c 1602
453c5420 1603 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1604 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1605 I915_READ(pp_stat_reg),
1606 I915_READ(pp_ctrl_reg));
32ce697c 1607 }
54c136d4
CW
1608
1609 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1610}
32ce697c 1611
4be73780 1612static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1613{
1614 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1615 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1616}
1617
4be73780 1618static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1619{
1620 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1621 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1622}
1623
4be73780 1624static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1625{
1626 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1627
1628 /* When we disable the VDD override bit last we have to do the manual
1629 * wait. */
1630 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1631 intel_dp->panel_power_cycle_delay);
1632
4be73780 1633 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1634}
1635
4be73780 1636static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1637{
1638 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1639 intel_dp->backlight_on_delay);
1640}
1641
4be73780 1642static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1643{
1644 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1645 intel_dp->backlight_off_delay);
1646}
99ea7127 1647
832dd3c1
KP
1648/* Read the current pp_control value, unlocking the register if it
1649 * is locked
1650 */
1651
453c5420 1652static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1653{
453c5420
JB
1654 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1655 struct drm_i915_private *dev_priv = dev->dev_private;
1656 u32 control;
832dd3c1 1657
e39b999a
VS
1658 lockdep_assert_held(&dev_priv->pps_mutex);
1659
bf13e81b 1660 control = I915_READ(_pp_ctrl_reg(intel_dp));
832dd3c1
KP
1661 control &= ~PANEL_UNLOCK_MASK;
1662 control |= PANEL_UNLOCK_REGS;
1663 return control;
bd943159
KP
1664}
1665
951468f3
VS
1666/*
1667 * Must be paired with edp_panel_vdd_off().
1668 * Must hold pps_mutex around the whole on/off sequence.
1669 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1670 */
1e0560e0 1671static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1672{
30add22d 1673 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1674 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1675 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1676 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1677 enum intel_display_power_domain power_domain;
5d613501 1678 u32 pp;
453c5420 1679 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1680 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1681
e39b999a
VS
1682 lockdep_assert_held(&dev_priv->pps_mutex);
1683
97af61f5 1684 if (!is_edp(intel_dp))
adddaaf4 1685 return false;
bd943159 1686
2c623c11 1687 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1688 intel_dp->want_panel_vdd = true;
99ea7127 1689
4be73780 1690 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1691 return need_to_disable;
b0665d57 1692
4e6e1a54
ID
1693 power_domain = intel_display_port_power_domain(intel_encoder);
1694 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1695
3936fcf4
VS
1696 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1697 port_name(intel_dig_port->port));
bd943159 1698
4be73780
DV
1699 if (!edp_have_panel_power(intel_dp))
1700 wait_panel_power_cycle(intel_dp);
99ea7127 1701
453c5420 1702 pp = ironlake_get_pp_control(intel_dp);
5d613501 1703 pp |= EDP_FORCE_VDD;
ebf33b18 1704
bf13e81b
JN
1705 pp_stat_reg = _pp_stat_reg(intel_dp);
1706 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1707
1708 I915_WRITE(pp_ctrl_reg, pp);
1709 POSTING_READ(pp_ctrl_reg);
1710 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1711 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1712 /*
1713 * If the panel wasn't on, delay before accessing aux channel
1714 */
4be73780 1715 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1716 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1717 port_name(intel_dig_port->port));
f01eca2e 1718 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1719 }
adddaaf4
JN
1720
1721 return need_to_disable;
1722}
1723
951468f3
VS
1724/*
1725 * Must be paired with intel_edp_panel_vdd_off() or
1726 * intel_edp_panel_off().
1727 * Nested calls to these functions are not allowed since
1728 * we drop the lock. Caller must use some higher level
1729 * locking to prevent nested calls from other threads.
1730 */
b80d6c78 1731void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1732{
c695b6b6 1733 bool vdd;
adddaaf4 1734
c695b6b6
VS
1735 if (!is_edp(intel_dp))
1736 return;
1737
773538e8 1738 pps_lock(intel_dp);
c695b6b6 1739 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1740 pps_unlock(intel_dp);
c695b6b6 1741
e2c719b7 1742 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1743 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1744}
1745
4be73780 1746static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1747{
30add22d 1748 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1749 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1750 struct intel_digital_port *intel_dig_port =
1751 dp_to_dig_port(intel_dp);
1752 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1753 enum intel_display_power_domain power_domain;
5d613501 1754 u32 pp;
453c5420 1755 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1756
e39b999a 1757 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1758
15e899a0 1759 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1760
15e899a0 1761 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1762 return;
b0665d57 1763
3936fcf4
VS
1764 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1765 port_name(intel_dig_port->port));
bd943159 1766
be2c9196
VS
1767 pp = ironlake_get_pp_control(intel_dp);
1768 pp &= ~EDP_FORCE_VDD;
453c5420 1769
be2c9196
VS
1770 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1771 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1772
be2c9196
VS
1773 I915_WRITE(pp_ctrl_reg, pp);
1774 POSTING_READ(pp_ctrl_reg);
90791a5c 1775
be2c9196
VS
1776 /* Make sure sequencer is idle before allowing subsequent activity */
1777 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1778 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1779
be2c9196
VS
1780 if ((pp & POWER_TARGET_ON) == 0)
1781 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1782
be2c9196
VS
1783 power_domain = intel_display_port_power_domain(intel_encoder);
1784 intel_display_power_put(dev_priv, power_domain);
bd943159 1785}
5d613501 1786
4be73780 1787static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1788{
1789 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1790 struct intel_dp, panel_vdd_work);
bd943159 1791
773538e8 1792 pps_lock(intel_dp);
15e899a0
VS
1793 if (!intel_dp->want_panel_vdd)
1794 edp_panel_vdd_off_sync(intel_dp);
773538e8 1795 pps_unlock(intel_dp);
bd943159
KP
1796}
1797
aba86890
ID
1798static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1799{
1800 unsigned long delay;
1801
1802 /*
1803 * Queue the timer to fire a long time from now (relative to the power
1804 * down delay) to keep the panel power up across a sequence of
1805 * operations.
1806 */
1807 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1808 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1809}
1810
951468f3
VS
1811/*
1812 * Must be paired with edp_panel_vdd_on().
1813 * Must hold pps_mutex around the whole on/off sequence.
1814 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1815 */
4be73780 1816static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1817{
e39b999a
VS
1818 struct drm_i915_private *dev_priv =
1819 intel_dp_to_dev(intel_dp)->dev_private;
1820
1821 lockdep_assert_held(&dev_priv->pps_mutex);
1822
97af61f5
KP
1823 if (!is_edp(intel_dp))
1824 return;
5d613501 1825
e2c719b7 1826 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1827 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1828
bd943159
KP
1829 intel_dp->want_panel_vdd = false;
1830
aba86890 1831 if (sync)
4be73780 1832 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1833 else
1834 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1835}
1836
9f0fb5be 1837static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1838{
30add22d 1839 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1840 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1841 u32 pp;
453c5420 1842 u32 pp_ctrl_reg;
9934c132 1843
9f0fb5be
VS
1844 lockdep_assert_held(&dev_priv->pps_mutex);
1845
97af61f5 1846 if (!is_edp(intel_dp))
bd943159 1847 return;
99ea7127 1848
3936fcf4
VS
1849 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1850 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1851
e7a89ace
VS
1852 if (WARN(edp_have_panel_power(intel_dp),
1853 "eDP port %c panel power already on\n",
1854 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1855 return;
9934c132 1856
4be73780 1857 wait_panel_power_cycle(intel_dp);
37c6c9b0 1858
bf13e81b 1859 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1860 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1861 if (IS_GEN5(dev)) {
1862 /* ILK workaround: disable reset around power sequence */
1863 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1864 I915_WRITE(pp_ctrl_reg, pp);
1865 POSTING_READ(pp_ctrl_reg);
05ce1a49 1866 }
37c6c9b0 1867
1c0ae80a 1868 pp |= POWER_TARGET_ON;
99ea7127
KP
1869 if (!IS_GEN5(dev))
1870 pp |= PANEL_POWER_RESET;
1871
453c5420
JB
1872 I915_WRITE(pp_ctrl_reg, pp);
1873 POSTING_READ(pp_ctrl_reg);
9934c132 1874
4be73780 1875 wait_panel_on(intel_dp);
dce56b3c 1876 intel_dp->last_power_on = jiffies;
9934c132 1877
05ce1a49
KP
1878 if (IS_GEN5(dev)) {
1879 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1880 I915_WRITE(pp_ctrl_reg, pp);
1881 POSTING_READ(pp_ctrl_reg);
05ce1a49 1882 }
9f0fb5be 1883}
e39b999a 1884
9f0fb5be
VS
1885void intel_edp_panel_on(struct intel_dp *intel_dp)
1886{
1887 if (!is_edp(intel_dp))
1888 return;
1889
1890 pps_lock(intel_dp);
1891 edp_panel_on(intel_dp);
773538e8 1892 pps_unlock(intel_dp);
9934c132
JB
1893}
1894
9f0fb5be
VS
1895
1896static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1897{
4e6e1a54
ID
1898 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1899 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1900 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1901 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1902 enum intel_display_power_domain power_domain;
99ea7127 1903 u32 pp;
453c5420 1904 u32 pp_ctrl_reg;
9934c132 1905
9f0fb5be
VS
1906 lockdep_assert_held(&dev_priv->pps_mutex);
1907
97af61f5
KP
1908 if (!is_edp(intel_dp))
1909 return;
37c6c9b0 1910
3936fcf4
VS
1911 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1912 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 1913
3936fcf4
VS
1914 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1915 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 1916
453c5420 1917 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1918 /* We need to switch off panel power _and_ force vdd, for otherwise some
1919 * panels get very unhappy and cease to work. */
b3064154
PJ
1920 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1921 EDP_BLC_ENABLE);
453c5420 1922
bf13e81b 1923 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1924
849e39f5
PZ
1925 intel_dp->want_panel_vdd = false;
1926
453c5420
JB
1927 I915_WRITE(pp_ctrl_reg, pp);
1928 POSTING_READ(pp_ctrl_reg);
9934c132 1929
dce56b3c 1930 intel_dp->last_power_cycle = jiffies;
4be73780 1931 wait_panel_off(intel_dp);
849e39f5
PZ
1932
1933 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
1934 power_domain = intel_display_port_power_domain(intel_encoder);
1935 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 1936}
e39b999a 1937
9f0fb5be
VS
1938void intel_edp_panel_off(struct intel_dp *intel_dp)
1939{
1940 if (!is_edp(intel_dp))
1941 return;
e39b999a 1942
9f0fb5be
VS
1943 pps_lock(intel_dp);
1944 edp_panel_off(intel_dp);
773538e8 1945 pps_unlock(intel_dp);
9934c132
JB
1946}
1947
1250d107
JN
1948/* Enable backlight in the panel power control. */
1949static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1950{
da63a9f2
PZ
1951 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1952 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
1953 struct drm_i915_private *dev_priv = dev->dev_private;
1954 u32 pp;
453c5420 1955 u32 pp_ctrl_reg;
32f9d658 1956
01cb9ea6
JB
1957 /*
1958 * If we enable the backlight right away following a panel power
1959 * on, we may see slight flicker as the panel syncs with the eDP
1960 * link. So delay a bit to make sure the image is solid before
1961 * allowing it to appear.
1962 */
4be73780 1963 wait_backlight_on(intel_dp);
e39b999a 1964
773538e8 1965 pps_lock(intel_dp);
e39b999a 1966
453c5420 1967 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1968 pp |= EDP_BLC_ENABLE;
453c5420 1969
bf13e81b 1970 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1971
1972 I915_WRITE(pp_ctrl_reg, pp);
1973 POSTING_READ(pp_ctrl_reg);
e39b999a 1974
773538e8 1975 pps_unlock(intel_dp);
32f9d658
ZW
1976}
1977
1250d107
JN
1978/* Enable backlight PWM and backlight PP control. */
1979void intel_edp_backlight_on(struct intel_dp *intel_dp)
1980{
1981 if (!is_edp(intel_dp))
1982 return;
1983
1984 DRM_DEBUG_KMS("\n");
1985
1986 intel_panel_enable_backlight(intel_dp->attached_connector);
1987 _intel_edp_backlight_on(intel_dp);
1988}
1989
1990/* Disable backlight in the panel power control. */
1991static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 1992{
30add22d 1993 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
1994 struct drm_i915_private *dev_priv = dev->dev_private;
1995 u32 pp;
453c5420 1996 u32 pp_ctrl_reg;
32f9d658 1997
f01eca2e
KP
1998 if (!is_edp(intel_dp))
1999 return;
2000
773538e8 2001 pps_lock(intel_dp);
e39b999a 2002
453c5420 2003 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2004 pp &= ~EDP_BLC_ENABLE;
453c5420 2005
bf13e81b 2006 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2007
2008 I915_WRITE(pp_ctrl_reg, pp);
2009 POSTING_READ(pp_ctrl_reg);
f7d2323c 2010
773538e8 2011 pps_unlock(intel_dp);
e39b999a
VS
2012
2013 intel_dp->last_backlight_off = jiffies;
f7d2323c 2014 edp_wait_backlight_off(intel_dp);
1250d107 2015}
f7d2323c 2016
1250d107
JN
2017/* Disable backlight PP control and backlight PWM. */
2018void intel_edp_backlight_off(struct intel_dp *intel_dp)
2019{
2020 if (!is_edp(intel_dp))
2021 return;
2022
2023 DRM_DEBUG_KMS("\n");
f7d2323c 2024
1250d107 2025 _intel_edp_backlight_off(intel_dp);
f7d2323c 2026 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2027}
a4fc5ed6 2028
73580fb7
JN
2029/*
2030 * Hook for controlling the panel power control backlight through the bl_power
2031 * sysfs attribute. Take care to handle multiple calls.
2032 */
2033static void intel_edp_backlight_power(struct intel_connector *connector,
2034 bool enable)
2035{
2036 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2037 bool is_enabled;
2038
773538e8 2039 pps_lock(intel_dp);
e39b999a 2040 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2041 pps_unlock(intel_dp);
73580fb7
JN
2042
2043 if (is_enabled == enable)
2044 return;
2045
23ba9373
JN
2046 DRM_DEBUG_KMS("panel power control backlight %s\n",
2047 enable ? "enable" : "disable");
73580fb7
JN
2048
2049 if (enable)
2050 _intel_edp_backlight_on(intel_dp);
2051 else
2052 _intel_edp_backlight_off(intel_dp);
2053}
2054
2bd2ad64 2055static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2056{
da63a9f2
PZ
2057 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2058 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2059 struct drm_device *dev = crtc->dev;
d240f20f
JB
2060 struct drm_i915_private *dev_priv = dev->dev_private;
2061 u32 dpa_ctl;
2062
2bd2ad64
DV
2063 assert_pipe_disabled(dev_priv,
2064 to_intel_crtc(crtc)->pipe);
2065
d240f20f
JB
2066 DRM_DEBUG_KMS("\n");
2067 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2068 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2069 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2070
2071 /* We don't adjust intel_dp->DP while tearing down the link, to
2072 * facilitate link retraining (e.g. after hotplug). Hence clear all
2073 * enable bits here to ensure that we don't enable too much. */
2074 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2075 intel_dp->DP |= DP_PLL_ENABLE;
2076 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2077 POSTING_READ(DP_A);
2078 udelay(200);
d240f20f
JB
2079}
2080
2bd2ad64 2081static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2082{
da63a9f2
PZ
2083 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2084 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2085 struct drm_device *dev = crtc->dev;
d240f20f
JB
2086 struct drm_i915_private *dev_priv = dev->dev_private;
2087 u32 dpa_ctl;
2088
2bd2ad64
DV
2089 assert_pipe_disabled(dev_priv,
2090 to_intel_crtc(crtc)->pipe);
2091
d240f20f 2092 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2093 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2094 "dp pll off, should be on\n");
2095 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2096
2097 /* We can't rely on the value tracked for the DP register in
2098 * intel_dp->DP because link_down must not change that (otherwise link
2099 * re-training will fail. */
298b0b39 2100 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2101 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2102 POSTING_READ(DP_A);
d240f20f
JB
2103 udelay(200);
2104}
2105
c7ad3810 2106/* If the sink supports it, try to set the power state appropriately */
c19b0669 2107void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2108{
2109 int ret, i;
2110
2111 /* Should have a valid DPCD by this point */
2112 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2113 return;
2114
2115 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2116 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2117 DP_SET_POWER_D3);
c7ad3810
JB
2118 } else {
2119 /*
2120 * When turning on, we need to retry for 1ms to give the sink
2121 * time to wake up.
2122 */
2123 for (i = 0; i < 3; i++) {
9d1a1031
JN
2124 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2125 DP_SET_POWER_D0);
c7ad3810
JB
2126 if (ret == 1)
2127 break;
2128 msleep(1);
2129 }
2130 }
f9cac721
JN
2131
2132 if (ret != 1)
2133 DRM_DEBUG_KMS("failed to %s sink power state\n",
2134 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2135}
2136
19d8fe15
DV
2137static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2138 enum pipe *pipe)
d240f20f 2139{
19d8fe15 2140 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2141 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2142 struct drm_device *dev = encoder->base.dev;
2143 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2144 enum intel_display_power_domain power_domain;
2145 u32 tmp;
2146
2147 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2148 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2149 return false;
2150
2151 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2152
2153 if (!(tmp & DP_PORT_EN))
2154 return false;
2155
bc7d38a4 2156 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
19d8fe15 2157 *pipe = PORT_TO_PIPE_CPT(tmp);
71485e0a
VS
2158 } else if (IS_CHERRYVIEW(dev)) {
2159 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
bc7d38a4 2160 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
19d8fe15
DV
2161 *pipe = PORT_TO_PIPE(tmp);
2162 } else {
2163 u32 trans_sel;
2164 u32 trans_dp;
2165 int i;
2166
2167 switch (intel_dp->output_reg) {
2168 case PCH_DP_B:
2169 trans_sel = TRANS_DP_PORT_SEL_B;
2170 break;
2171 case PCH_DP_C:
2172 trans_sel = TRANS_DP_PORT_SEL_C;
2173 break;
2174 case PCH_DP_D:
2175 trans_sel = TRANS_DP_PORT_SEL_D;
2176 break;
2177 default:
2178 return true;
2179 }
2180
055e393f 2181 for_each_pipe(dev_priv, i) {
19d8fe15
DV
2182 trans_dp = I915_READ(TRANS_DP_CTL(i));
2183 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2184 *pipe = i;
2185 return true;
2186 }
2187 }
19d8fe15 2188
4a0833ec
DV
2189 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2190 intel_dp->output_reg);
2191 }
d240f20f 2192
19d8fe15
DV
2193 return true;
2194}
d240f20f 2195
045ac3b5 2196static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2197 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2198{
2199 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2200 u32 tmp, flags = 0;
63000ef6
XZ
2201 struct drm_device *dev = encoder->base.dev;
2202 struct drm_i915_private *dev_priv = dev->dev_private;
2203 enum port port = dp_to_dig_port(intel_dp)->port;
2204 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2205 int dotclock;
045ac3b5 2206
9ed109a7
DV
2207 tmp = I915_READ(intel_dp->output_reg);
2208 if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2209 pipe_config->has_audio = true;
2210
63000ef6 2211 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
63000ef6
XZ
2212 if (tmp & DP_SYNC_HS_HIGH)
2213 flags |= DRM_MODE_FLAG_PHSYNC;
2214 else
2215 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2216
63000ef6
XZ
2217 if (tmp & DP_SYNC_VS_HIGH)
2218 flags |= DRM_MODE_FLAG_PVSYNC;
2219 else
2220 flags |= DRM_MODE_FLAG_NVSYNC;
2221 } else {
2222 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2223 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2224 flags |= DRM_MODE_FLAG_PHSYNC;
2225 else
2226 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2227
63000ef6
XZ
2228 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2229 flags |= DRM_MODE_FLAG_PVSYNC;
2230 else
2231 flags |= DRM_MODE_FLAG_NVSYNC;
2232 }
045ac3b5 2233
2d112de7 2234 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2235
8c875fca
VS
2236 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2237 tmp & DP_COLOR_RANGE_16_235)
2238 pipe_config->limited_color_range = true;
2239
eb14cb74
VS
2240 pipe_config->has_dp_encoder = true;
2241
2242 intel_dp_get_m_n(crtc, pipe_config);
2243
18442d08 2244 if (port == PORT_A) {
f1f644dc
JB
2245 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2246 pipe_config->port_clock = 162000;
2247 else
2248 pipe_config->port_clock = 270000;
2249 }
18442d08
VS
2250
2251 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2252 &pipe_config->dp_m_n);
2253
2254 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2255 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2256
2d112de7 2257 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2258
c6cd2ee2
JN
2259 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2260 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2261 /*
2262 * This is a big fat ugly hack.
2263 *
2264 * Some machines in UEFI boot mode provide us a VBT that has 18
2265 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2266 * unknown we fail to light up. Yet the same BIOS boots up with
2267 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2268 * max, not what it tells us to use.
2269 *
2270 * Note: This will still be broken if the eDP panel is not lit
2271 * up by the BIOS, and thus we can't get the mode at module
2272 * load.
2273 */
2274 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2275 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2276 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2277 }
045ac3b5
JB
2278}
2279
e8cb4558 2280static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2281{
e8cb4558 2282 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2283 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2284 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2285
6e3c9717 2286 if (crtc->config->has_audio)
495a5bb8 2287 intel_audio_codec_disable(encoder);
6cb49835 2288
b32c6f48
RV
2289 if (HAS_PSR(dev) && !HAS_DDI(dev))
2290 intel_psr_disable(intel_dp);
2291
6cb49835
DV
2292 /* Make sure the panel is off before trying to change the mode. But also
2293 * ensure that we have vdd while we switch off the panel. */
24f3e092 2294 intel_edp_panel_vdd_on(intel_dp);
4be73780 2295 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2296 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2297 intel_edp_panel_off(intel_dp);
3739850b 2298
08aff3fe
VS
2299 /* disable the port before the pipe on g4x */
2300 if (INTEL_INFO(dev)->gen < 5)
3739850b 2301 intel_dp_link_down(intel_dp);
d240f20f
JB
2302}
2303
08aff3fe 2304static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2305{
2bd2ad64 2306 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2307 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2308
49277c31 2309 intel_dp_link_down(intel_dp);
08aff3fe
VS
2310 if (port == PORT_A)
2311 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2312}
2313
2314static void vlv_post_disable_dp(struct intel_encoder *encoder)
2315{
2316 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2317
2318 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2319}
2320
580d3811
VS
2321static void chv_post_disable_dp(struct intel_encoder *encoder)
2322{
2323 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2324 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2325 struct drm_device *dev = encoder->base.dev;
2326 struct drm_i915_private *dev_priv = dev->dev_private;
2327 struct intel_crtc *intel_crtc =
2328 to_intel_crtc(encoder->base.crtc);
2329 enum dpio_channel ch = vlv_dport_to_channel(dport);
2330 enum pipe pipe = intel_crtc->pipe;
2331 u32 val;
2332
2333 intel_dp_link_down(intel_dp);
2334
2335 mutex_lock(&dev_priv->dpio_lock);
2336
2337 /* Propagate soft reset to data lane reset */
97fd4d5c 2338 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2339 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2340 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2341
97fd4d5c
VS
2342 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2343 val |= CHV_PCS_REQ_SOFTRESET_EN;
2344 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2345
2346 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2347 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2348 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2349
2350 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2351 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2352 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811
VS
2353
2354 mutex_unlock(&dev_priv->dpio_lock);
2355}
2356
7b13b58a
VS
2357static void
2358_intel_dp_set_link_train(struct intel_dp *intel_dp,
2359 uint32_t *DP,
2360 uint8_t dp_train_pat)
2361{
2362 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2363 struct drm_device *dev = intel_dig_port->base.base.dev;
2364 struct drm_i915_private *dev_priv = dev->dev_private;
2365 enum port port = intel_dig_port->port;
2366
2367 if (HAS_DDI(dev)) {
2368 uint32_t temp = I915_READ(DP_TP_CTL(port));
2369
2370 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2371 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2372 else
2373 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2374
2375 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2376 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2377 case DP_TRAINING_PATTERN_DISABLE:
2378 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2379
2380 break;
2381 case DP_TRAINING_PATTERN_1:
2382 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2383 break;
2384 case DP_TRAINING_PATTERN_2:
2385 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2386 break;
2387 case DP_TRAINING_PATTERN_3:
2388 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2389 break;
2390 }
2391 I915_WRITE(DP_TP_CTL(port), temp);
2392
2393 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2394 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2395
2396 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2397 case DP_TRAINING_PATTERN_DISABLE:
2398 *DP |= DP_LINK_TRAIN_OFF_CPT;
2399 break;
2400 case DP_TRAINING_PATTERN_1:
2401 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2402 break;
2403 case DP_TRAINING_PATTERN_2:
2404 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2405 break;
2406 case DP_TRAINING_PATTERN_3:
2407 DRM_ERROR("DP training pattern 3 not supported\n");
2408 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2409 break;
2410 }
2411
2412 } else {
2413 if (IS_CHERRYVIEW(dev))
2414 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2415 else
2416 *DP &= ~DP_LINK_TRAIN_MASK;
2417
2418 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2419 case DP_TRAINING_PATTERN_DISABLE:
2420 *DP |= DP_LINK_TRAIN_OFF;
2421 break;
2422 case DP_TRAINING_PATTERN_1:
2423 *DP |= DP_LINK_TRAIN_PAT_1;
2424 break;
2425 case DP_TRAINING_PATTERN_2:
2426 *DP |= DP_LINK_TRAIN_PAT_2;
2427 break;
2428 case DP_TRAINING_PATTERN_3:
2429 if (IS_CHERRYVIEW(dev)) {
2430 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2431 } else {
2432 DRM_ERROR("DP training pattern 3 not supported\n");
2433 *DP |= DP_LINK_TRAIN_PAT_2;
2434 }
2435 break;
2436 }
2437 }
2438}
2439
2440static void intel_dp_enable_port(struct intel_dp *intel_dp)
2441{
2442 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2443 struct drm_i915_private *dev_priv = dev->dev_private;
2444
7b13b58a
VS
2445 /* enable with pattern 1 (as per spec) */
2446 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2447 DP_TRAINING_PATTERN_1);
2448
2449 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2450 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2451
2452 /*
2453 * Magic for VLV/CHV. We _must_ first set up the register
2454 * without actually enabling the port, and then do another
2455 * write to enable the port. Otherwise link training will
2456 * fail when the power sequencer is freshly used for this port.
2457 */
2458 intel_dp->DP |= DP_PORT_EN;
2459
2460 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2461 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2462}
2463
e8cb4558 2464static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2465{
e8cb4558
DV
2466 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2467 struct drm_device *dev = encoder->base.dev;
2468 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2469 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2470 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 2471
0c33d8d7
DV
2472 if (WARN_ON(dp_reg & DP_PORT_EN))
2473 return;
5d613501 2474
093e3f13
VS
2475 pps_lock(intel_dp);
2476
2477 if (IS_VALLEYVIEW(dev))
2478 vlv_init_panel_power_sequencer(intel_dp);
2479
7b13b58a 2480 intel_dp_enable_port(intel_dp);
093e3f13
VS
2481
2482 edp_panel_vdd_on(intel_dp);
2483 edp_panel_on(intel_dp);
2484 edp_panel_vdd_off(intel_dp, true);
2485
2486 pps_unlock(intel_dp);
2487
61234fa5
VS
2488 if (IS_VALLEYVIEW(dev))
2489 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2490
f01eca2e 2491 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2492 intel_dp_start_link_train(intel_dp);
33a34e4e 2493 intel_dp_complete_link_train(intel_dp);
3ab9c637 2494 intel_dp_stop_link_train(intel_dp);
c1dec79a 2495
6e3c9717 2496 if (crtc->config->has_audio) {
c1dec79a
JN
2497 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2498 pipe_name(crtc->pipe));
2499 intel_audio_codec_enable(encoder);
2500 }
ab1f90f9 2501}
89b667f8 2502
ecff4f3b
JN
2503static void g4x_enable_dp(struct intel_encoder *encoder)
2504{
828f5c6e
JN
2505 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2506
ecff4f3b 2507 intel_enable_dp(encoder);
4be73780 2508 intel_edp_backlight_on(intel_dp);
ab1f90f9 2509}
89b667f8 2510
ab1f90f9
JN
2511static void vlv_enable_dp(struct intel_encoder *encoder)
2512{
828f5c6e
JN
2513 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2514
4be73780 2515 intel_edp_backlight_on(intel_dp);
b32c6f48 2516 intel_psr_enable(intel_dp);
d240f20f
JB
2517}
2518
ecff4f3b 2519static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2520{
2521 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2522 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2523
8ac33ed3
DV
2524 intel_dp_prepare(encoder);
2525
d41f1efb
DV
2526 /* Only ilk+ has port A */
2527 if (dport->port == PORT_A) {
2528 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2529 ironlake_edp_pll_on(intel_dp);
d41f1efb 2530 }
ab1f90f9
JN
2531}
2532
83b84597
VS
2533static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2534{
2535 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2536 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2537 enum pipe pipe = intel_dp->pps_pipe;
2538 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2539
2540 edp_panel_vdd_off_sync(intel_dp);
2541
2542 /*
2543 * VLV seems to get confused when multiple power seqeuencers
2544 * have the same port selected (even if only one has power/vdd
2545 * enabled). The failure manifests as vlv_wait_port_ready() failing
2546 * CHV on the other hand doesn't seem to mind having the same port
2547 * selected in multiple power seqeuencers, but let's clear the
2548 * port select always when logically disconnecting a power sequencer
2549 * from a port.
2550 */
2551 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2552 pipe_name(pipe), port_name(intel_dig_port->port));
2553 I915_WRITE(pp_on_reg, 0);
2554 POSTING_READ(pp_on_reg);
2555
2556 intel_dp->pps_pipe = INVALID_PIPE;
2557}
2558
a4a5d2f8
VS
2559static void vlv_steal_power_sequencer(struct drm_device *dev,
2560 enum pipe pipe)
2561{
2562 struct drm_i915_private *dev_priv = dev->dev_private;
2563 struct intel_encoder *encoder;
2564
2565 lockdep_assert_held(&dev_priv->pps_mutex);
2566
ac3c12e4
VS
2567 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2568 return;
2569
a4a5d2f8
VS
2570 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2571 base.head) {
2572 struct intel_dp *intel_dp;
773538e8 2573 enum port port;
a4a5d2f8
VS
2574
2575 if (encoder->type != INTEL_OUTPUT_EDP)
2576 continue;
2577
2578 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2579 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2580
2581 if (intel_dp->pps_pipe != pipe)
2582 continue;
2583
2584 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2585 pipe_name(pipe), port_name(port));
a4a5d2f8 2586
034e43c6
VS
2587 WARN(encoder->connectors_active,
2588 "stealing pipe %c power sequencer from active eDP port %c\n",
2589 pipe_name(pipe), port_name(port));
a4a5d2f8 2590
a4a5d2f8 2591 /* make sure vdd is off before we steal it */
83b84597 2592 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2593 }
2594}
2595
2596static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2597{
2598 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2599 struct intel_encoder *encoder = &intel_dig_port->base;
2600 struct drm_device *dev = encoder->base.dev;
2601 struct drm_i915_private *dev_priv = dev->dev_private;
2602 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2603
2604 lockdep_assert_held(&dev_priv->pps_mutex);
2605
093e3f13
VS
2606 if (!is_edp(intel_dp))
2607 return;
2608
a4a5d2f8
VS
2609 if (intel_dp->pps_pipe == crtc->pipe)
2610 return;
2611
2612 /*
2613 * If another power sequencer was being used on this
2614 * port previously make sure to turn off vdd there while
2615 * we still have control of it.
2616 */
2617 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2618 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2619
2620 /*
2621 * We may be stealing the power
2622 * sequencer from another port.
2623 */
2624 vlv_steal_power_sequencer(dev, crtc->pipe);
2625
2626 /* now it's all ours */
2627 intel_dp->pps_pipe = crtc->pipe;
2628
2629 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2630 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2631
2632 /* init power sequencer on this pipe and port */
36b5f425
VS
2633 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2634 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2635}
2636
ab1f90f9 2637static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2638{
2bd2ad64 2639 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2640 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2641 struct drm_device *dev = encoder->base.dev;
89b667f8 2642 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2643 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2644 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2645 int pipe = intel_crtc->pipe;
2646 u32 val;
a4fc5ed6 2647
ab1f90f9 2648 mutex_lock(&dev_priv->dpio_lock);
89b667f8 2649
ab3c759a 2650 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2651 val = 0;
2652 if (pipe)
2653 val |= (1<<21);
2654 else
2655 val &= ~(1<<21);
2656 val |= 0x001000c4;
ab3c759a
CML
2657 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2658 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2659 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2660
ab1f90f9
JN
2661 mutex_unlock(&dev_priv->dpio_lock);
2662
2663 intel_enable_dp(encoder);
89b667f8
JB
2664}
2665
ecff4f3b 2666static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2667{
2668 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2669 struct drm_device *dev = encoder->base.dev;
2670 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2671 struct intel_crtc *intel_crtc =
2672 to_intel_crtc(encoder->base.crtc);
e4607fcf 2673 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2674 int pipe = intel_crtc->pipe;
89b667f8 2675
8ac33ed3
DV
2676 intel_dp_prepare(encoder);
2677
89b667f8 2678 /* Program Tx lane resets to default */
0980a60f 2679 mutex_lock(&dev_priv->dpio_lock);
ab3c759a 2680 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2681 DPIO_PCS_TX_LANE2_RESET |
2682 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2683 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2684 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2685 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2686 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2687 DPIO_PCS_CLK_SOFT_RESET);
2688
2689 /* Fix up inter-pair skew failure */
ab3c759a
CML
2690 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2691 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2692 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
0980a60f 2693 mutex_unlock(&dev_priv->dpio_lock);
a4fc5ed6
KP
2694}
2695
e4a1d846
CML
2696static void chv_pre_enable_dp(struct intel_encoder *encoder)
2697{
2698 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2699 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2700 struct drm_device *dev = encoder->base.dev;
2701 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2702 struct intel_crtc *intel_crtc =
2703 to_intel_crtc(encoder->base.crtc);
2704 enum dpio_channel ch = vlv_dport_to_channel(dport);
2705 int pipe = intel_crtc->pipe;
2706 int data, i;
949c1d43 2707 u32 val;
e4a1d846 2708
e4a1d846 2709 mutex_lock(&dev_priv->dpio_lock);
949c1d43 2710
570e2a74
VS
2711 /* allow hardware to manage TX FIFO reset source */
2712 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2713 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2714 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2715
2716 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2717 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2718 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2719
949c1d43 2720 /* Deassert soft data lane reset*/
97fd4d5c 2721 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2722 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2723 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2724
2725 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2726 val |= CHV_PCS_REQ_SOFTRESET_EN;
2727 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2728
2729 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2730 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2731 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2732
97fd4d5c 2733 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2734 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2735 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2736
2737 /* Program Tx lane latency optimal setting*/
e4a1d846
CML
2738 for (i = 0; i < 4; i++) {
2739 /* Set the latency optimal bit */
2740 data = (i == 1) ? 0x0 : 0x6;
2741 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2742 data << DPIO_FRC_LATENCY_SHFIT);
2743
2744 /* Set the upar bit */
2745 data = (i == 1) ? 0x0 : 0x1;
2746 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2747 data << DPIO_UPAR_SHIFT);
2748 }
2749
2750 /* Data lane stagger programming */
2751 /* FIXME: Fix up value only after power analysis */
2752
2753 mutex_unlock(&dev_priv->dpio_lock);
2754
e4a1d846 2755 intel_enable_dp(encoder);
e4a1d846
CML
2756}
2757
9197c88b
VS
2758static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2759{
2760 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2761 struct drm_device *dev = encoder->base.dev;
2762 struct drm_i915_private *dev_priv = dev->dev_private;
2763 struct intel_crtc *intel_crtc =
2764 to_intel_crtc(encoder->base.crtc);
2765 enum dpio_channel ch = vlv_dport_to_channel(dport);
2766 enum pipe pipe = intel_crtc->pipe;
2767 u32 val;
2768
625695f8
VS
2769 intel_dp_prepare(encoder);
2770
9197c88b
VS
2771 mutex_lock(&dev_priv->dpio_lock);
2772
b9e5ac3c
VS
2773 /* program left/right clock distribution */
2774 if (pipe != PIPE_B) {
2775 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2776 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2777 if (ch == DPIO_CH0)
2778 val |= CHV_BUFLEFTENA1_FORCE;
2779 if (ch == DPIO_CH1)
2780 val |= CHV_BUFRIGHTENA1_FORCE;
2781 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2782 } else {
2783 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2784 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2785 if (ch == DPIO_CH0)
2786 val |= CHV_BUFLEFTENA2_FORCE;
2787 if (ch == DPIO_CH1)
2788 val |= CHV_BUFRIGHTENA2_FORCE;
2789 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2790 }
2791
9197c88b
VS
2792 /* program clock channel usage */
2793 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2794 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2795 if (pipe != PIPE_B)
2796 val &= ~CHV_PCS_USEDCLKCHANNEL;
2797 else
2798 val |= CHV_PCS_USEDCLKCHANNEL;
2799 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2800
2801 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2802 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2803 if (pipe != PIPE_B)
2804 val &= ~CHV_PCS_USEDCLKCHANNEL;
2805 else
2806 val |= CHV_PCS_USEDCLKCHANNEL;
2807 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2808
2809 /*
2810 * This a a bit weird since generally CL
2811 * matches the pipe, but here we need to
2812 * pick the CL based on the port.
2813 */
2814 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2815 if (pipe != PIPE_B)
2816 val &= ~CHV_CMN_USEDCLKCHANNEL;
2817 else
2818 val |= CHV_CMN_USEDCLKCHANNEL;
2819 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2820
2821 mutex_unlock(&dev_priv->dpio_lock);
2822}
2823
a4fc5ed6 2824/*
df0c237d
JB
2825 * Native read with retry for link status and receiver capability reads for
2826 * cases where the sink may still be asleep.
9d1a1031
JN
2827 *
2828 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2829 * supposed to retry 3 times per the spec.
a4fc5ed6 2830 */
9d1a1031
JN
2831static ssize_t
2832intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2833 void *buffer, size_t size)
a4fc5ed6 2834{
9d1a1031
JN
2835 ssize_t ret;
2836 int i;
61da5fab 2837
f6a19066
VS
2838 /*
2839 * Sometime we just get the same incorrect byte repeated
2840 * over the entire buffer. Doing just one throw away read
2841 * initially seems to "solve" it.
2842 */
2843 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2844
61da5fab 2845 for (i = 0; i < 3; i++) {
9d1a1031
JN
2846 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2847 if (ret == size)
2848 return ret;
61da5fab
JB
2849 msleep(1);
2850 }
a4fc5ed6 2851
9d1a1031 2852 return ret;
a4fc5ed6
KP
2853}
2854
2855/*
2856 * Fetch AUX CH registers 0x202 - 0x207 which contain
2857 * link status information
2858 */
2859static bool
93f62dad 2860intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2861{
9d1a1031
JN
2862 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2863 DP_LANE0_1_STATUS,
2864 link_status,
2865 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2866}
2867
1100244e 2868/* These are source-specific values. */
a4fc5ed6 2869static uint8_t
1a2eb460 2870intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2871{
30add22d 2872 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2873 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2874 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2875
7ad14a29
SJ
2876 if (INTEL_INFO(dev)->gen >= 9) {
2877 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2878 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2879 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2880 } else if (IS_VALLEYVIEW(dev))
bd60018a 2881 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2882 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2883 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2884 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2885 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2886 else
bd60018a 2887 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
2888}
2889
2890static uint8_t
2891intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2892{
30add22d 2893 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 2894 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2895
5a9d1f1a
DL
2896 if (INTEL_INFO(dev)->gen >= 9) {
2897 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2898 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2899 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2900 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2901 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2902 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2903 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
2904 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2905 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
2906 default:
2907 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2908 }
2909 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 2910 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2911 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2912 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2913 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2914 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2915 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2916 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2917 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 2918 default:
bd60018a 2919 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 2920 }
e2fa6fba
P
2921 } else if (IS_VALLEYVIEW(dev)) {
2922 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2923 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2924 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2925 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2926 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2927 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2928 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2929 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 2930 default:
bd60018a 2931 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 2932 }
bc7d38a4 2933 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 2934 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2935 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2936 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2937 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2938 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2939 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 2940 default:
bd60018a 2941 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
2942 }
2943 } else {
2944 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2945 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2946 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2947 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2948 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2949 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2950 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2951 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 2952 default:
bd60018a 2953 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 2954 }
a4fc5ed6
KP
2955 }
2956}
2957
e2fa6fba
P
2958static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2959{
2960 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2961 struct drm_i915_private *dev_priv = dev->dev_private;
2962 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
2963 struct intel_crtc *intel_crtc =
2964 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
2965 unsigned long demph_reg_value, preemph_reg_value,
2966 uniqtranscale_reg_value;
2967 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 2968 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2969 int pipe = intel_crtc->pipe;
e2fa6fba
P
2970
2971 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 2972 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
2973 preemph_reg_value = 0x0004000;
2974 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2975 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2976 demph_reg_value = 0x2B405555;
2977 uniqtranscale_reg_value = 0x552AB83A;
2978 break;
bd60018a 2979 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2980 demph_reg_value = 0x2B404040;
2981 uniqtranscale_reg_value = 0x5548B83A;
2982 break;
bd60018a 2983 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2984 demph_reg_value = 0x2B245555;
2985 uniqtranscale_reg_value = 0x5560B83A;
2986 break;
bd60018a 2987 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
2988 demph_reg_value = 0x2B405555;
2989 uniqtranscale_reg_value = 0x5598DA3A;
2990 break;
2991 default:
2992 return 0;
2993 }
2994 break;
bd60018a 2995 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
2996 preemph_reg_value = 0x0002000;
2997 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2998 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2999 demph_reg_value = 0x2B404040;
3000 uniqtranscale_reg_value = 0x5552B83A;
3001 break;
bd60018a 3002 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3003 demph_reg_value = 0x2B404848;
3004 uniqtranscale_reg_value = 0x5580B83A;
3005 break;
bd60018a 3006 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3007 demph_reg_value = 0x2B404040;
3008 uniqtranscale_reg_value = 0x55ADDA3A;
3009 break;
3010 default:
3011 return 0;
3012 }
3013 break;
bd60018a 3014 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3015 preemph_reg_value = 0x0000000;
3016 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3017 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3018 demph_reg_value = 0x2B305555;
3019 uniqtranscale_reg_value = 0x5570B83A;
3020 break;
bd60018a 3021 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3022 demph_reg_value = 0x2B2B4040;
3023 uniqtranscale_reg_value = 0x55ADDA3A;
3024 break;
3025 default:
3026 return 0;
3027 }
3028 break;
bd60018a 3029 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3030 preemph_reg_value = 0x0006000;
3031 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3032 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3033 demph_reg_value = 0x1B405555;
3034 uniqtranscale_reg_value = 0x55ADDA3A;
3035 break;
3036 default:
3037 return 0;
3038 }
3039 break;
3040 default:
3041 return 0;
3042 }
3043
0980a60f 3044 mutex_lock(&dev_priv->dpio_lock);
ab3c759a
CML
3045 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3046 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3047 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3048 uniqtranscale_reg_value);
ab3c759a
CML
3049 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3050 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3051 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3052 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
0980a60f 3053 mutex_unlock(&dev_priv->dpio_lock);
e2fa6fba
P
3054
3055 return 0;
3056}
3057
e4a1d846
CML
3058static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3059{
3060 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3061 struct drm_i915_private *dev_priv = dev->dev_private;
3062 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3063 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3064 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3065 uint8_t train_set = intel_dp->train_set[0];
3066 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3067 enum pipe pipe = intel_crtc->pipe;
3068 int i;
e4a1d846
CML
3069
3070 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3071 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3072 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3073 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3074 deemph_reg_value = 128;
3075 margin_reg_value = 52;
3076 break;
bd60018a 3077 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3078 deemph_reg_value = 128;
3079 margin_reg_value = 77;
3080 break;
bd60018a 3081 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3082 deemph_reg_value = 128;
3083 margin_reg_value = 102;
3084 break;
bd60018a 3085 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3086 deemph_reg_value = 128;
3087 margin_reg_value = 154;
3088 /* FIXME extra to set for 1200 */
3089 break;
3090 default:
3091 return 0;
3092 }
3093 break;
bd60018a 3094 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3095 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3096 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3097 deemph_reg_value = 85;
3098 margin_reg_value = 78;
3099 break;
bd60018a 3100 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3101 deemph_reg_value = 85;
3102 margin_reg_value = 116;
3103 break;
bd60018a 3104 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3105 deemph_reg_value = 85;
3106 margin_reg_value = 154;
3107 break;
3108 default:
3109 return 0;
3110 }
3111 break;
bd60018a 3112 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3113 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3114 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3115 deemph_reg_value = 64;
3116 margin_reg_value = 104;
3117 break;
bd60018a 3118 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3119 deemph_reg_value = 64;
3120 margin_reg_value = 154;
3121 break;
3122 default:
3123 return 0;
3124 }
3125 break;
bd60018a 3126 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3127 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3128 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3129 deemph_reg_value = 43;
3130 margin_reg_value = 154;
3131 break;
3132 default:
3133 return 0;
3134 }
3135 break;
3136 default:
3137 return 0;
3138 }
3139
3140 mutex_lock(&dev_priv->dpio_lock);
3141
3142 /* Clear calc init */
1966e59e
VS
3143 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3144 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3145 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3146 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3147 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3148
3149 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3150 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3151 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3152 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3153 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3154
a02ef3c7
VS
3155 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3156 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3157 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3158 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3159
3160 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3161 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3162 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3163 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3164
e4a1d846 3165 /* Program swing deemph */
f72df8db
VS
3166 for (i = 0; i < 4; i++) {
3167 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3168 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3169 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3170 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3171 }
e4a1d846
CML
3172
3173 /* Program swing margin */
f72df8db
VS
3174 for (i = 0; i < 4; i++) {
3175 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3176 val &= ~DPIO_SWING_MARGIN000_MASK;
3177 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3178 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3179 }
e4a1d846
CML
3180
3181 /* Disable unique transition scale */
f72df8db
VS
3182 for (i = 0; i < 4; i++) {
3183 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3184 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3185 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3186 }
e4a1d846
CML
3187
3188 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3189 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3190 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3191 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3192
3193 /*
3194 * The document said it needs to set bit 27 for ch0 and bit 26
3195 * for ch1. Might be a typo in the doc.
3196 * For now, for this unique transition scale selection, set bit
3197 * 27 for ch0 and ch1.
3198 */
f72df8db
VS
3199 for (i = 0; i < 4; i++) {
3200 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3201 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3202 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3203 }
e4a1d846 3204
f72df8db
VS
3205 for (i = 0; i < 4; i++) {
3206 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3207 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3208 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3209 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3210 }
e4a1d846
CML
3211 }
3212
3213 /* Start swing calculation */
1966e59e
VS
3214 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3215 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3216 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3217
3218 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3219 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3220 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3221
3222 /* LRC Bypass */
3223 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3224 val |= DPIO_LRC_BYPASS;
3225 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3226
3227 mutex_unlock(&dev_priv->dpio_lock);
3228
3229 return 0;
3230}
3231
a4fc5ed6 3232static void
0301b3ac
JN
3233intel_get_adjust_train(struct intel_dp *intel_dp,
3234 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3235{
3236 uint8_t v = 0;
3237 uint8_t p = 0;
3238 int lane;
1a2eb460
KP
3239 uint8_t voltage_max;
3240 uint8_t preemph_max;
a4fc5ed6 3241
33a34e4e 3242 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3243 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3244 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3245
3246 if (this_v > v)
3247 v = this_v;
3248 if (this_p > p)
3249 p = this_p;
3250 }
3251
1a2eb460 3252 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3253 if (v >= voltage_max)
3254 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3255
1a2eb460
KP
3256 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3257 if (p >= preemph_max)
3258 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3259
3260 for (lane = 0; lane < 4; lane++)
33a34e4e 3261 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3262}
3263
3264static uint32_t
f0a3424e 3265intel_gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3266{
3cf2efb1 3267 uint32_t signal_levels = 0;
a4fc5ed6 3268
3cf2efb1 3269 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3270 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3271 default:
3272 signal_levels |= DP_VOLTAGE_0_4;
3273 break;
bd60018a 3274 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3275 signal_levels |= DP_VOLTAGE_0_6;
3276 break;
bd60018a 3277 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3278 signal_levels |= DP_VOLTAGE_0_8;
3279 break;
bd60018a 3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3281 signal_levels |= DP_VOLTAGE_1_2;
3282 break;
3283 }
3cf2efb1 3284 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3285 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3286 default:
3287 signal_levels |= DP_PRE_EMPHASIS_0;
3288 break;
bd60018a 3289 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3290 signal_levels |= DP_PRE_EMPHASIS_3_5;
3291 break;
bd60018a 3292 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3293 signal_levels |= DP_PRE_EMPHASIS_6;
3294 break;
bd60018a 3295 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3296 signal_levels |= DP_PRE_EMPHASIS_9_5;
3297 break;
3298 }
3299 return signal_levels;
3300}
3301
e3421a18
ZW
3302/* Gen6's DP voltage swing and pre-emphasis control */
3303static uint32_t
3304intel_gen6_edp_signal_levels(uint8_t train_set)
3305{
3c5a62b5
YL
3306 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3307 DP_TRAIN_PRE_EMPHASIS_MASK);
3308 switch (signal_levels) {
bd60018a
SJ
3309 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3310 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3311 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3312 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3313 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3314 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3316 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3317 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3318 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3319 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3320 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3321 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3322 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3323 default:
3c5a62b5
YL
3324 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3325 "0x%x\n", signal_levels);
3326 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3327 }
3328}
3329
1a2eb460
KP
3330/* Gen7's DP voltage swing and pre-emphasis control */
3331static uint32_t
3332intel_gen7_edp_signal_levels(uint8_t train_set)
3333{
3334 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3335 DP_TRAIN_PRE_EMPHASIS_MASK);
3336 switch (signal_levels) {
bd60018a 3337 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3338 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3339 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3340 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3341 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3342 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3343
bd60018a 3344 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3345 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3346 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3347 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3348
bd60018a 3349 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3350 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3351 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3352 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3353
3354 default:
3355 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3356 "0x%x\n", signal_levels);
3357 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3358 }
3359}
3360
d6c0d722
PZ
3361/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3362static uint32_t
f0a3424e 3363intel_hsw_signal_levels(uint8_t train_set)
a4fc5ed6 3364{
d6c0d722
PZ
3365 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3366 DP_TRAIN_PRE_EMPHASIS_MASK);
3367 switch (signal_levels) {
bd60018a 3368 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3369 return DDI_BUF_TRANS_SELECT(0);
bd60018a 3370 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3371 return DDI_BUF_TRANS_SELECT(1);
bd60018a 3372 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3373 return DDI_BUF_TRANS_SELECT(2);
bd60018a 3374 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
c5fe6a06 3375 return DDI_BUF_TRANS_SELECT(3);
a4fc5ed6 3376
bd60018a 3377 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3378 return DDI_BUF_TRANS_SELECT(4);
bd60018a 3379 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3380 return DDI_BUF_TRANS_SELECT(5);
bd60018a 3381 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3382 return DDI_BUF_TRANS_SELECT(6);
a4fc5ed6 3383
bd60018a 3384 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3385 return DDI_BUF_TRANS_SELECT(7);
bd60018a 3386 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3387 return DDI_BUF_TRANS_SELECT(8);
7ad14a29
SJ
3388
3389 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3390 return DDI_BUF_TRANS_SELECT(9);
d6c0d722
PZ
3391 default:
3392 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3393 "0x%x\n", signal_levels);
c5fe6a06 3394 return DDI_BUF_TRANS_SELECT(0);
a4fc5ed6 3395 }
a4fc5ed6
KP
3396}
3397
f0a3424e
PZ
3398/* Properly updates "DP" with the correct signal levels. */
3399static void
3400intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3401{
3402 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3403 enum port port = intel_dig_port->port;
f0a3424e
PZ
3404 struct drm_device *dev = intel_dig_port->base.base.dev;
3405 uint32_t signal_levels, mask;
3406 uint8_t train_set = intel_dp->train_set[0];
3407
5a9d1f1a 3408 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
f0a3424e
PZ
3409 signal_levels = intel_hsw_signal_levels(train_set);
3410 mask = DDI_BUF_EMP_MASK;
e4a1d846
CML
3411 } else if (IS_CHERRYVIEW(dev)) {
3412 signal_levels = intel_chv_signal_levels(intel_dp);
3413 mask = 0;
e2fa6fba
P
3414 } else if (IS_VALLEYVIEW(dev)) {
3415 signal_levels = intel_vlv_signal_levels(intel_dp);
3416 mask = 0;
bc7d38a4 3417 } else if (IS_GEN7(dev) && port == PORT_A) {
f0a3424e
PZ
3418 signal_levels = intel_gen7_edp_signal_levels(train_set);
3419 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3420 } else if (IS_GEN6(dev) && port == PORT_A) {
f0a3424e
PZ
3421 signal_levels = intel_gen6_edp_signal_levels(train_set);
3422 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3423 } else {
3424 signal_levels = intel_gen4_signal_levels(train_set);
3425 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3426 }
3427
3428 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3429
3430 *DP = (*DP & ~mask) | signal_levels;
3431}
3432
a4fc5ed6 3433static bool
ea5b213a 3434intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3435 uint32_t *DP,
58e10eb9 3436 uint8_t dp_train_pat)
a4fc5ed6 3437{
174edf1f
PZ
3438 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3439 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3440 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3441 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3442 int ret, len;
a4fc5ed6 3443
7b13b58a 3444 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3445
70aff66c 3446 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3447 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3448
2cdfe6c8
JN
3449 buf[0] = dp_train_pat;
3450 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3451 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3452 /* don't write DP_TRAINING_LANEx_SET on disable */
3453 len = 1;
3454 } else {
3455 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3456 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3457 len = intel_dp->lane_count + 1;
47ea7542 3458 }
a4fc5ed6 3459
9d1a1031
JN
3460 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3461 buf, len);
2cdfe6c8
JN
3462
3463 return ret == len;
a4fc5ed6
KP
3464}
3465
70aff66c
JN
3466static bool
3467intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3468 uint8_t dp_train_pat)
3469{
953d22e8 3470 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3471 intel_dp_set_signal_levels(intel_dp, DP);
3472 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3473}
3474
3475static bool
3476intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3477 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3478{
3479 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3480 struct drm_device *dev = intel_dig_port->base.base.dev;
3481 struct drm_i915_private *dev_priv = dev->dev_private;
3482 int ret;
3483
3484 intel_get_adjust_train(intel_dp, link_status);
3485 intel_dp_set_signal_levels(intel_dp, DP);
3486
3487 I915_WRITE(intel_dp->output_reg, *DP);
3488 POSTING_READ(intel_dp->output_reg);
3489
9d1a1031
JN
3490 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3491 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3492
3493 return ret == intel_dp->lane_count;
3494}
3495
3ab9c637
ID
3496static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3497{
3498 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3499 struct drm_device *dev = intel_dig_port->base.base.dev;
3500 struct drm_i915_private *dev_priv = dev->dev_private;
3501 enum port port = intel_dig_port->port;
3502 uint32_t val;
3503
3504 if (!HAS_DDI(dev))
3505 return;
3506
3507 val = I915_READ(DP_TP_CTL(port));
3508 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3509 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3510 I915_WRITE(DP_TP_CTL(port), val);
3511
3512 /*
3513 * On PORT_A we can have only eDP in SST mode. There the only reason
3514 * we need to set idle transmission mode is to work around a HW issue
3515 * where we enable the pipe while not in idle link-training mode.
3516 * In this case there is requirement to wait for a minimum number of
3517 * idle patterns to be sent.
3518 */
3519 if (port == PORT_A)
3520 return;
3521
3522 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3523 1))
3524 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3525}
3526
33a34e4e 3527/* Enable corresponding port and start training pattern 1 */
c19b0669 3528void
33a34e4e 3529intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3530{
da63a9f2 3531 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3532 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3533 int i;
3534 uint8_t voltage;
cdb0e95b 3535 int voltage_tries, loop_tries;
ea5b213a 3536 uint32_t DP = intel_dp->DP;
6aba5b6c 3537 uint8_t link_config[2];
a4fc5ed6 3538
affa9354 3539 if (HAS_DDI(dev))
c19b0669
PZ
3540 intel_ddi_prepare_link_retrain(encoder);
3541
3cf2efb1 3542 /* Write the link configuration data */
6aba5b6c
JN
3543 link_config[0] = intel_dp->link_bw;
3544 link_config[1] = intel_dp->lane_count;
3545 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3546 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3547 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
94ca719e 3548 if (intel_dp->num_sink_rates)
a8f3ef61
SJ
3549 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3550 &intel_dp->rate_select, 1);
6aba5b6c
JN
3551
3552 link_config[0] = 0;
3553 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3554 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3555
3556 DP |= DP_PORT_EN;
1a2eb460 3557
70aff66c
JN
3558 /* clock recovery */
3559 if (!intel_dp_reset_link_train(intel_dp, &DP,
3560 DP_TRAINING_PATTERN_1 |
3561 DP_LINK_SCRAMBLING_DISABLE)) {
3562 DRM_ERROR("failed to enable link training\n");
3563 return;
3564 }
3565
a4fc5ed6 3566 voltage = 0xff;
cdb0e95b
KP
3567 voltage_tries = 0;
3568 loop_tries = 0;
a4fc5ed6 3569 for (;;) {
70aff66c 3570 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3571
a7c9655f 3572 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3573 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3574 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3575 break;
93f62dad 3576 }
a4fc5ed6 3577
01916270 3578 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3579 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3580 break;
3581 }
3582
3583 /* Check to see if we've tried the max voltage */
3584 for (i = 0; i < intel_dp->lane_count; i++)
3585 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3586 break;
3b4f819d 3587 if (i == intel_dp->lane_count) {
b06fbda3
DV
3588 ++loop_tries;
3589 if (loop_tries == 5) {
3def84b3 3590 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3591 break;
3592 }
70aff66c
JN
3593 intel_dp_reset_link_train(intel_dp, &DP,
3594 DP_TRAINING_PATTERN_1 |
3595 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3596 voltage_tries = 0;
3597 continue;
3598 }
a4fc5ed6 3599
3cf2efb1 3600 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3601 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3602 ++voltage_tries;
b06fbda3 3603 if (voltage_tries == 5) {
3def84b3 3604 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3605 break;
3606 }
3607 } else
3608 voltage_tries = 0;
3609 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3610
70aff66c
JN
3611 /* Update training set as requested by target */
3612 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3613 DRM_ERROR("failed to update link training\n");
3614 break;
3615 }
a4fc5ed6
KP
3616 }
3617
33a34e4e
JB
3618 intel_dp->DP = DP;
3619}
3620
c19b0669 3621void
33a34e4e
JB
3622intel_dp_complete_link_train(struct intel_dp *intel_dp)
3623{
33a34e4e 3624 bool channel_eq = false;
37f80975 3625 int tries, cr_tries;
33a34e4e 3626 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3627 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3628
3629 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3630 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3631 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3632
a4fc5ed6 3633 /* channel equalization */
70aff66c 3634 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3635 training_pattern |
70aff66c
JN
3636 DP_LINK_SCRAMBLING_DISABLE)) {
3637 DRM_ERROR("failed to start channel equalization\n");
3638 return;
3639 }
3640
a4fc5ed6 3641 tries = 0;
37f80975 3642 cr_tries = 0;
a4fc5ed6
KP
3643 channel_eq = false;
3644 for (;;) {
70aff66c 3645 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3646
37f80975
JB
3647 if (cr_tries > 5) {
3648 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3649 break;
3650 }
3651
a7c9655f 3652 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3653 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3654 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3655 break;
70aff66c 3656 }
a4fc5ed6 3657
37f80975 3658 /* Make sure clock is still ok */
01916270 3659 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
37f80975 3660 intel_dp_start_link_train(intel_dp);
70aff66c 3661 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3662 training_pattern |
70aff66c 3663 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3664 cr_tries++;
3665 continue;
3666 }
3667
1ffdff13 3668 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3669 channel_eq = true;
3670 break;
3671 }
a4fc5ed6 3672
37f80975
JB
3673 /* Try 5 times, then try clock recovery if that fails */
3674 if (tries > 5) {
37f80975 3675 intel_dp_start_link_train(intel_dp);
70aff66c 3676 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3677 training_pattern |
70aff66c 3678 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3679 tries = 0;
3680 cr_tries++;
3681 continue;
3682 }
a4fc5ed6 3683
70aff66c
JN
3684 /* Update training set as requested by target */
3685 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3686 DRM_ERROR("failed to update link training\n");
3687 break;
3688 }
3cf2efb1 3689 ++tries;
869184a6 3690 }
3cf2efb1 3691
3ab9c637
ID
3692 intel_dp_set_idle_link_train(intel_dp);
3693
3694 intel_dp->DP = DP;
3695
d6c0d722 3696 if (channel_eq)
07f42258 3697 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
d6c0d722 3698
3ab9c637
ID
3699}
3700
3701void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3702{
70aff66c 3703 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3704 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3705}
3706
3707static void
ea5b213a 3708intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3709{
da63a9f2 3710 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3711 enum port port = intel_dig_port->port;
da63a9f2 3712 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3713 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3714 uint32_t DP = intel_dp->DP;
a4fc5ed6 3715
bc76e320 3716 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3717 return;
3718
0c33d8d7 3719 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3720 return;
3721
28c97730 3722 DRM_DEBUG_KMS("\n");
32f9d658 3723
bc7d38a4 3724 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
e3421a18 3725 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 3726 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18 3727 } else {
aad3d14d
VS
3728 if (IS_CHERRYVIEW(dev))
3729 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3730 else
3731 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 3732 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 3733 }
fe255d00 3734 POSTING_READ(intel_dp->output_reg);
5eb08b69 3735
493a7081 3736 if (HAS_PCH_IBX(dev) &&
1b39d6f3 3737 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
5bddd17f
EA
3738 /* Hardware workaround: leaving our transcoder select
3739 * set to transcoder B while it's off will prevent the
3740 * corresponding HDMI output on transcoder A.
3741 *
3742 * Combine this with another hardware workaround:
3743 * transcoder select bit can only be cleared while the
3744 * port is enabled.
3745 */
3746 DP &= ~DP_PIPEB_SELECT;
3747 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3748 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3749 }
3750
832afda6 3751 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
3752 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3753 POSTING_READ(intel_dp->output_reg);
f01eca2e 3754 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3755}
3756
26d61aad
KP
3757static bool
3758intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3759{
a031d709
RV
3760 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3761 struct drm_device *dev = dig_port->base.base.dev;
3762 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3763 uint8_t rev;
a031d709 3764
9d1a1031
JN
3765 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3766 sizeof(intel_dp->dpcd)) < 0)
edb39244 3767 return false; /* aux transfer failed */
92fd8fd1 3768
a8e98153 3769 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3770
edb39244
AJ
3771 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3772 return false; /* DPCD not present */
3773
2293bb5c
SK
3774 /* Check if the panel supports PSR */
3775 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3776 if (is_edp(intel_dp)) {
9d1a1031
JN
3777 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3778 intel_dp->psr_dpcd,
3779 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3780 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3781 dev_priv->psr.sink_support = true;
50003939 3782 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3783 }
50003939
JN
3784 }
3785
7809a611 3786 /* Training Pattern 3 support, both source and sink */
06ea66b6 3787 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3788 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3789 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3790 intel_dp->use_tps3 = true;
f8d8a672 3791 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3792 } else
3793 intel_dp->use_tps3 = false;
3794
fc0f8e25
SJ
3795 /* Intermediate frequency support */
3796 if (is_edp(intel_dp) &&
3797 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3798 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3799 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3800 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3801 int i;
3802
fc0f8e25
SJ
3803 intel_dp_dpcd_read_wake(&intel_dp->aux,
3804 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3805 sink_rates,
3806 sizeof(sink_rates));
ea2d8a42 3807
94ca719e
VS
3808 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3809 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3810
3811 if (val == 0)
3812 break;
3813
94ca719e 3814 intel_dp->sink_rates[i] = val * 200;
ea2d8a42 3815 }
94ca719e 3816 intel_dp->num_sink_rates = i;
fc0f8e25 3817 }
0336400e
VS
3818
3819 intel_dp_print_rates(intel_dp);
3820
edb39244
AJ
3821 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3822 DP_DWN_STRM_PORT_PRESENT))
3823 return true; /* native DP sink */
3824
3825 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3826 return true; /* no per-port downstream info */
3827
9d1a1031
JN
3828 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3829 intel_dp->downstream_ports,
3830 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3831 return false; /* downstream port status fetch failed */
3832
3833 return true;
92fd8fd1
KP
3834}
3835
0d198328
AJ
3836static void
3837intel_dp_probe_oui(struct intel_dp *intel_dp)
3838{
3839 u8 buf[3];
3840
3841 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3842 return;
3843
9d1a1031 3844 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3845 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3846 buf[0], buf[1], buf[2]);
3847
9d1a1031 3848 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3849 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3850 buf[0], buf[1], buf[2]);
3851}
3852
0e32b39c
DA
3853static bool
3854intel_dp_probe_mst(struct intel_dp *intel_dp)
3855{
3856 u8 buf[1];
3857
3858 if (!intel_dp->can_mst)
3859 return false;
3860
3861 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3862 return false;
3863
0e32b39c
DA
3864 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3865 if (buf[0] & DP_MST_CAP) {
3866 DRM_DEBUG_KMS("Sink is MST capable\n");
3867 intel_dp->is_mst = true;
3868 } else {
3869 DRM_DEBUG_KMS("Sink is not MST capable\n");
3870 intel_dp->is_mst = false;
3871 }
3872 }
0e32b39c
DA
3873
3874 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3875 return intel_dp->is_mst;
3876}
3877
d2e216d0
RV
3878int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3879{
3880 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3881 struct drm_device *dev = intel_dig_port->base.base.dev;
3882 struct intel_crtc *intel_crtc =
3883 to_intel_crtc(intel_dig_port->base.base.crtc);
ad9dc91b
RV
3884 u8 buf;
3885 int test_crc_count;
3886 int attempts = 6;
d2e216d0 3887
ad9dc91b 3888 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3889 return -EIO;
d2e216d0 3890
ad9dc91b 3891 if (!(buf & DP_TEST_CRC_SUPPORTED))
d2e216d0
RV
3892 return -ENOTTY;
3893
1dda5f93
RV
3894 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3895 return -EIO;
3896
9d1a1031 3897 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
ce31d9f4 3898 buf | DP_TEST_SINK_START) < 0)
bda0381e 3899 return -EIO;
d2e216d0 3900
1dda5f93 3901 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3902 return -EIO;
ad9dc91b 3903 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 3904
ad9dc91b 3905 do {
1dda5f93
RV
3906 if (drm_dp_dpcd_readb(&intel_dp->aux,
3907 DP_TEST_SINK_MISC, &buf) < 0)
3908 return -EIO;
ad9dc91b
RV
3909 intel_wait_for_vblank(dev, intel_crtc->pipe);
3910 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3911
3912 if (attempts == 0) {
90bd1f46
DV
3913 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3914 return -ETIMEDOUT;
ad9dc91b 3915 }
d2e216d0 3916
9d1a1031 3917 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
bda0381e 3918 return -EIO;
d2e216d0 3919
1dda5f93
RV
3920 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3921 return -EIO;
3922 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3923 buf & ~DP_TEST_SINK_START) < 0)
3924 return -EIO;
ce31d9f4 3925
d2e216d0
RV
3926 return 0;
3927}
3928
a60f0e38
JB
3929static bool
3930intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3931{
9d1a1031
JN
3932 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3933 DP_DEVICE_SERVICE_IRQ_VECTOR,
3934 sink_irq_vector, 1) == 1;
a60f0e38
JB
3935}
3936
0e32b39c
DA
3937static bool
3938intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3939{
3940 int ret;
3941
3942 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3943 DP_SINK_COUNT_ESI,
3944 sink_irq_vector, 14);
3945 if (ret != 14)
3946 return false;
3947
3948 return true;
3949}
3950
a60f0e38
JB
3951static void
3952intel_dp_handle_test_request(struct intel_dp *intel_dp)
3953{
3954 /* NAK by default */
9d1a1031 3955 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
a60f0e38
JB
3956}
3957
0e32b39c
DA
3958static int
3959intel_dp_check_mst_status(struct intel_dp *intel_dp)
3960{
3961 bool bret;
3962
3963 if (intel_dp->is_mst) {
3964 u8 esi[16] = { 0 };
3965 int ret = 0;
3966 int retry;
3967 bool handled;
3968 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3969go_again:
3970 if (bret == true) {
3971
3972 /* check link status - esi[10] = 0x200c */
3973 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3974 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3975 intel_dp_start_link_train(intel_dp);
3976 intel_dp_complete_link_train(intel_dp);
3977 intel_dp_stop_link_train(intel_dp);
3978 }
3979
6f34cc39 3980 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
3981 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3982
3983 if (handled) {
3984 for (retry = 0; retry < 3; retry++) {
3985 int wret;
3986 wret = drm_dp_dpcd_write(&intel_dp->aux,
3987 DP_SINK_COUNT_ESI+1,
3988 &esi[1], 3);
3989 if (wret == 3) {
3990 break;
3991 }
3992 }
3993
3994 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3995 if (bret == true) {
6f34cc39 3996 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
3997 goto go_again;
3998 }
3999 } else
4000 ret = 0;
4001
4002 return ret;
4003 } else {
4004 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4005 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4006 intel_dp->is_mst = false;
4007 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4008 /* send a hotplug event */
4009 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4010 }
4011 }
4012 return -EINVAL;
4013}
4014
a4fc5ed6
KP
4015/*
4016 * According to DP spec
4017 * 5.1.2:
4018 * 1. Read DPCD
4019 * 2. Configure link according to Receiver Capabilities
4020 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4021 * 4. Check link status on receipt of hot-plug interrupt
4022 */
a5146200 4023static void
ea5b213a 4024intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4025{
5b215bcf 4026 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4027 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4028 u8 sink_irq_vector;
93f62dad 4029 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4030
5b215bcf
DA
4031 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4032
da63a9f2 4033 if (!intel_encoder->connectors_active)
d2b996ac 4034 return;
59cd09e1 4035
da63a9f2 4036 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
4037 return;
4038
1a125d8a
ID
4039 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4040 return;
4041
92fd8fd1 4042 /* Try to read receiver status if the link appears to be up */
93f62dad 4043 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4044 return;
4045 }
4046
92fd8fd1 4047 /* Now read the DPCD to see if it's actually running */
26d61aad 4048 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4049 return;
4050 }
4051
a60f0e38
JB
4052 /* Try to read the source of the interrupt */
4053 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4054 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4055 /* Clear interrupt source */
9d1a1031
JN
4056 drm_dp_dpcd_writeb(&intel_dp->aux,
4057 DP_DEVICE_SERVICE_IRQ_VECTOR,
4058 sink_irq_vector);
a60f0e38
JB
4059
4060 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4061 intel_dp_handle_test_request(intel_dp);
4062 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4063 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4064 }
4065
1ffdff13 4066 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4067 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4068 intel_encoder->base.name);
33a34e4e
JB
4069 intel_dp_start_link_train(intel_dp);
4070 intel_dp_complete_link_train(intel_dp);
3ab9c637 4071 intel_dp_stop_link_train(intel_dp);
33a34e4e 4072 }
a4fc5ed6 4073}
a4fc5ed6 4074
caf9ab24 4075/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4076static enum drm_connector_status
26d61aad 4077intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4078{
caf9ab24 4079 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4080 uint8_t type;
4081
4082 if (!intel_dp_get_dpcd(intel_dp))
4083 return connector_status_disconnected;
4084
4085 /* if there's no downstream port, we're done */
4086 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4087 return connector_status_connected;
caf9ab24
AJ
4088
4089 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4090 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4091 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4092 uint8_t reg;
9d1a1031
JN
4093
4094 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4095 &reg, 1) < 0)
caf9ab24 4096 return connector_status_unknown;
9d1a1031 4097
23235177
AJ
4098 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4099 : connector_status_disconnected;
caf9ab24
AJ
4100 }
4101
4102 /* If no HPD, poke DDC gently */
0b99836f 4103 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4104 return connector_status_connected;
caf9ab24
AJ
4105
4106 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4107 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4108 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4109 if (type == DP_DS_PORT_TYPE_VGA ||
4110 type == DP_DS_PORT_TYPE_NON_EDID)
4111 return connector_status_unknown;
4112 } else {
4113 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4114 DP_DWN_STRM_PORT_TYPE_MASK;
4115 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4116 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4117 return connector_status_unknown;
4118 }
caf9ab24
AJ
4119
4120 /* Anything else is out of spec, warn and ignore */
4121 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4122 return connector_status_disconnected;
71ba9000
AJ
4123}
4124
d410b56d
CW
4125static enum drm_connector_status
4126edp_detect(struct intel_dp *intel_dp)
4127{
4128 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4129 enum drm_connector_status status;
4130
4131 status = intel_panel_detect(dev);
4132 if (status == connector_status_unknown)
4133 status = connector_status_connected;
4134
4135 return status;
4136}
4137
5eb08b69 4138static enum drm_connector_status
a9756bb5 4139ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4140{
30add22d 4141 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4142 struct drm_i915_private *dev_priv = dev->dev_private;
4143 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4144
1b469639
DL
4145 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4146 return connector_status_disconnected;
4147
26d61aad 4148 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4149}
4150
2a592bec
DA
4151static int g4x_digital_port_connected(struct drm_device *dev,
4152 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4153{
a4fc5ed6 4154 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4155 uint32_t bit;
5eb08b69 4156
232a6ee9
TP
4157 if (IS_VALLEYVIEW(dev)) {
4158 switch (intel_dig_port->port) {
4159 case PORT_B:
4160 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4161 break;
4162 case PORT_C:
4163 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4164 break;
4165 case PORT_D:
4166 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4167 break;
4168 default:
2a592bec 4169 return -EINVAL;
232a6ee9
TP
4170 }
4171 } else {
4172 switch (intel_dig_port->port) {
4173 case PORT_B:
4174 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4175 break;
4176 case PORT_C:
4177 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4178 break;
4179 case PORT_D:
4180 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4181 break;
4182 default:
2a592bec 4183 return -EINVAL;
232a6ee9 4184 }
a4fc5ed6
KP
4185 }
4186
10f76a38 4187 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4188 return 0;
4189 return 1;
4190}
4191
4192static enum drm_connector_status
4193g4x_dp_detect(struct intel_dp *intel_dp)
4194{
4195 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4196 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4197 int ret;
4198
4199 /* Can't disconnect eDP, but you can close the lid... */
4200 if (is_edp(intel_dp)) {
4201 enum drm_connector_status status;
4202
4203 status = intel_panel_detect(dev);
4204 if (status == connector_status_unknown)
4205 status = connector_status_connected;
4206 return status;
4207 }
4208
4209 ret = g4x_digital_port_connected(dev, intel_dig_port);
4210 if (ret == -EINVAL)
4211 return connector_status_unknown;
4212 else if (ret == 0)
a4fc5ed6
KP
4213 return connector_status_disconnected;
4214
26d61aad 4215 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4216}
4217
8c241fef 4218static struct edid *
beb60608 4219intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4220{
beb60608 4221 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4222
9cd300e0
JN
4223 /* use cached edid if we have one */
4224 if (intel_connector->edid) {
9cd300e0
JN
4225 /* invalid edid */
4226 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4227 return NULL;
4228
55e9edeb 4229 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4230 } else
4231 return drm_get_edid(&intel_connector->base,
4232 &intel_dp->aux.ddc);
4233}
8c241fef 4234
beb60608
CW
4235static void
4236intel_dp_set_edid(struct intel_dp *intel_dp)
4237{
4238 struct intel_connector *intel_connector = intel_dp->attached_connector;
4239 struct edid *edid;
8c241fef 4240
beb60608
CW
4241 edid = intel_dp_get_edid(intel_dp);
4242 intel_connector->detect_edid = edid;
4243
4244 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4245 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4246 else
4247 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4248}
4249
beb60608
CW
4250static void
4251intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4252{
beb60608 4253 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4254
beb60608
CW
4255 kfree(intel_connector->detect_edid);
4256 intel_connector->detect_edid = NULL;
9cd300e0 4257
beb60608
CW
4258 intel_dp->has_audio = false;
4259}
d6f24d0f 4260
beb60608
CW
4261static enum intel_display_power_domain
4262intel_dp_power_get(struct intel_dp *dp)
4263{
4264 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4265 enum intel_display_power_domain power_domain;
4266
4267 power_domain = intel_display_port_power_domain(encoder);
4268 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4269
4270 return power_domain;
4271}
d6f24d0f 4272
beb60608
CW
4273static void
4274intel_dp_power_put(struct intel_dp *dp,
4275 enum intel_display_power_domain power_domain)
4276{
4277 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4278 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4279}
4280
a9756bb5
ZW
4281static enum drm_connector_status
4282intel_dp_detect(struct drm_connector *connector, bool force)
4283{
4284 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4285 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4286 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4287 struct drm_device *dev = connector->dev;
a9756bb5 4288 enum drm_connector_status status;
671dedd2 4289 enum intel_display_power_domain power_domain;
0e32b39c 4290 bool ret;
a9756bb5 4291
164c8598 4292 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4293 connector->base.id, connector->name);
beb60608 4294 intel_dp_unset_edid(intel_dp);
164c8598 4295
0e32b39c
DA
4296 if (intel_dp->is_mst) {
4297 /* MST devices are disconnected from a monitor POV */
4298 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4299 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4300 return connector_status_disconnected;
0e32b39c
DA
4301 }
4302
beb60608 4303 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4304
d410b56d
CW
4305 /* Can't disconnect eDP, but you can close the lid... */
4306 if (is_edp(intel_dp))
4307 status = edp_detect(intel_dp);
4308 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4309 status = ironlake_dp_detect(intel_dp);
4310 else
4311 status = g4x_dp_detect(intel_dp);
4312 if (status != connector_status_connected)
c8c8fb33 4313 goto out;
a9756bb5 4314
0d198328
AJ
4315 intel_dp_probe_oui(intel_dp);
4316
0e32b39c
DA
4317 ret = intel_dp_probe_mst(intel_dp);
4318 if (ret) {
4319 /* if we are in MST mode then this connector
4320 won't appear connected or have anything with EDID on it */
4321 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4322 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4323 status = connector_status_disconnected;
4324 goto out;
4325 }
4326
beb60608 4327 intel_dp_set_edid(intel_dp);
a9756bb5 4328
d63885da
PZ
4329 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4330 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4331 status = connector_status_connected;
4332
4333out:
beb60608 4334 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4335 return status;
a4fc5ed6
KP
4336}
4337
beb60608
CW
4338static void
4339intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4340{
df0e9248 4341 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4342 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4343 enum intel_display_power_domain power_domain;
a4fc5ed6 4344
beb60608
CW
4345 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4346 connector->base.id, connector->name);
4347 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4348
beb60608
CW
4349 if (connector->status != connector_status_connected)
4350 return;
671dedd2 4351
beb60608
CW
4352 power_domain = intel_dp_power_get(intel_dp);
4353
4354 intel_dp_set_edid(intel_dp);
4355
4356 intel_dp_power_put(intel_dp, power_domain);
4357
4358 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4359 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4360}
4361
4362static int intel_dp_get_modes(struct drm_connector *connector)
4363{
4364 struct intel_connector *intel_connector = to_intel_connector(connector);
4365 struct edid *edid;
4366
4367 edid = intel_connector->detect_edid;
4368 if (edid) {
4369 int ret = intel_connector_update_modes(connector, edid);
4370 if (ret)
4371 return ret;
4372 }
32f9d658 4373
f8779fda 4374 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4375 if (is_edp(intel_attached_dp(connector)) &&
4376 intel_connector->panel.fixed_mode) {
f8779fda 4377 struct drm_display_mode *mode;
beb60608
CW
4378
4379 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4380 intel_connector->panel.fixed_mode);
f8779fda 4381 if (mode) {
32f9d658
ZW
4382 drm_mode_probed_add(connector, mode);
4383 return 1;
4384 }
4385 }
beb60608 4386
32f9d658 4387 return 0;
a4fc5ed6
KP
4388}
4389
1aad7ac0
CW
4390static bool
4391intel_dp_detect_audio(struct drm_connector *connector)
4392{
1aad7ac0 4393 bool has_audio = false;
beb60608 4394 struct edid *edid;
1aad7ac0 4395
beb60608
CW
4396 edid = to_intel_connector(connector)->detect_edid;
4397 if (edid)
1aad7ac0 4398 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4399
1aad7ac0
CW
4400 return has_audio;
4401}
4402
f684960e
CW
4403static int
4404intel_dp_set_property(struct drm_connector *connector,
4405 struct drm_property *property,
4406 uint64_t val)
4407{
e953fd7b 4408 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4409 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4410 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4411 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4412 int ret;
4413
662595df 4414 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4415 if (ret)
4416 return ret;
4417
3f43c48d 4418 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4419 int i = val;
4420 bool has_audio;
4421
4422 if (i == intel_dp->force_audio)
f684960e
CW
4423 return 0;
4424
1aad7ac0 4425 intel_dp->force_audio = i;
f684960e 4426
c3e5f67b 4427 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4428 has_audio = intel_dp_detect_audio(connector);
4429 else
c3e5f67b 4430 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4431
4432 if (has_audio == intel_dp->has_audio)
f684960e
CW
4433 return 0;
4434
1aad7ac0 4435 intel_dp->has_audio = has_audio;
f684960e
CW
4436 goto done;
4437 }
4438
e953fd7b 4439 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4440 bool old_auto = intel_dp->color_range_auto;
4441 uint32_t old_range = intel_dp->color_range;
4442
55bc60db
VS
4443 switch (val) {
4444 case INTEL_BROADCAST_RGB_AUTO:
4445 intel_dp->color_range_auto = true;
4446 break;
4447 case INTEL_BROADCAST_RGB_FULL:
4448 intel_dp->color_range_auto = false;
4449 intel_dp->color_range = 0;
4450 break;
4451 case INTEL_BROADCAST_RGB_LIMITED:
4452 intel_dp->color_range_auto = false;
4453 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4454 break;
4455 default:
4456 return -EINVAL;
4457 }
ae4edb80
DV
4458
4459 if (old_auto == intel_dp->color_range_auto &&
4460 old_range == intel_dp->color_range)
4461 return 0;
4462
e953fd7b
CW
4463 goto done;
4464 }
4465
53b41837
YN
4466 if (is_edp(intel_dp) &&
4467 property == connector->dev->mode_config.scaling_mode_property) {
4468 if (val == DRM_MODE_SCALE_NONE) {
4469 DRM_DEBUG_KMS("no scaling not supported\n");
4470 return -EINVAL;
4471 }
4472
4473 if (intel_connector->panel.fitting_mode == val) {
4474 /* the eDP scaling property is not changed */
4475 return 0;
4476 }
4477 intel_connector->panel.fitting_mode = val;
4478
4479 goto done;
4480 }
4481
f684960e
CW
4482 return -EINVAL;
4483
4484done:
c0c36b94
CW
4485 if (intel_encoder->base.crtc)
4486 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4487
4488 return 0;
4489}
4490
a4fc5ed6 4491static void
73845adf 4492intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4493{
1d508706 4494 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4495
10e972d3 4496 kfree(intel_connector->detect_edid);
beb60608 4497
9cd300e0
JN
4498 if (!IS_ERR_OR_NULL(intel_connector->edid))
4499 kfree(intel_connector->edid);
4500
acd8db10
PZ
4501 /* Can't call is_edp() since the encoder may have been destroyed
4502 * already. */
4503 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4504 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4505
a4fc5ed6 4506 drm_connector_cleanup(connector);
55f78c43 4507 kfree(connector);
a4fc5ed6
KP
4508}
4509
00c09d70 4510void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4511{
da63a9f2
PZ
4512 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4513 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4514
4f71d0cb 4515 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4516 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4517 if (is_edp(intel_dp)) {
4518 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4519 /*
4520 * vdd might still be enabled do to the delayed vdd off.
4521 * Make sure vdd is actually turned off here.
4522 */
773538e8 4523 pps_lock(intel_dp);
4be73780 4524 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4525 pps_unlock(intel_dp);
4526
01527b31
CT
4527 if (intel_dp->edp_notifier.notifier_call) {
4528 unregister_reboot_notifier(&intel_dp->edp_notifier);
4529 intel_dp->edp_notifier.notifier_call = NULL;
4530 }
bd943159 4531 }
c8bd0e49 4532 drm_encoder_cleanup(encoder);
da63a9f2 4533 kfree(intel_dig_port);
24d05927
DV
4534}
4535
07f9cd0b
ID
4536static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4537{
4538 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4539
4540 if (!is_edp(intel_dp))
4541 return;
4542
951468f3
VS
4543 /*
4544 * vdd might still be enabled do to the delayed vdd off.
4545 * Make sure vdd is actually turned off here.
4546 */
afa4e53a 4547 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4548 pps_lock(intel_dp);
07f9cd0b 4549 edp_panel_vdd_off_sync(intel_dp);
773538e8 4550 pps_unlock(intel_dp);
07f9cd0b
ID
4551}
4552
49e6bc51
VS
4553static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4554{
4555 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4556 struct drm_device *dev = intel_dig_port->base.base.dev;
4557 struct drm_i915_private *dev_priv = dev->dev_private;
4558 enum intel_display_power_domain power_domain;
4559
4560 lockdep_assert_held(&dev_priv->pps_mutex);
4561
4562 if (!edp_have_panel_vdd(intel_dp))
4563 return;
4564
4565 /*
4566 * The VDD bit needs a power domain reference, so if the bit is
4567 * already enabled when we boot or resume, grab this reference and
4568 * schedule a vdd off, so we don't hold on to the reference
4569 * indefinitely.
4570 */
4571 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4572 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4573 intel_display_power_get(dev_priv, power_domain);
4574
4575 edp_panel_vdd_schedule_off(intel_dp);
4576}
4577
6d93c0c4
ID
4578static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4579{
49e6bc51
VS
4580 struct intel_dp *intel_dp;
4581
4582 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4583 return;
4584
4585 intel_dp = enc_to_intel_dp(encoder);
4586
4587 pps_lock(intel_dp);
4588
4589 /*
4590 * Read out the current power sequencer assignment,
4591 * in case the BIOS did something with it.
4592 */
4593 if (IS_VALLEYVIEW(encoder->dev))
4594 vlv_initial_power_sequencer_setup(intel_dp);
4595
4596 intel_edp_panel_vdd_sanitize(intel_dp);
4597
4598 pps_unlock(intel_dp);
6d93c0c4
ID
4599}
4600
a4fc5ed6 4601static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 4602 .dpms = intel_connector_dpms,
a4fc5ed6 4603 .detect = intel_dp_detect,
beb60608 4604 .force = intel_dp_force,
a4fc5ed6 4605 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4606 .set_property = intel_dp_set_property,
2545e4a6 4607 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4608 .destroy = intel_dp_connector_destroy,
c6f95f27 4609 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
a4fc5ed6
KP
4610};
4611
4612static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4613 .get_modes = intel_dp_get_modes,
4614 .mode_valid = intel_dp_mode_valid,
df0e9248 4615 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4616};
4617
a4fc5ed6 4618static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4619 .reset = intel_dp_encoder_reset,
24d05927 4620 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4621};
4622
0e32b39c 4623void
21d40d37 4624intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 4625{
0e32b39c 4626 return;
c8110e52 4627}
6207937d 4628
b2c5c181 4629enum irqreturn
13cf5504
DA
4630intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4631{
4632 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4633 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4634 struct drm_device *dev = intel_dig_port->base.base.dev;
4635 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4636 enum intel_display_power_domain power_domain;
b2c5c181 4637 enum irqreturn ret = IRQ_NONE;
1c767b33 4638
0e32b39c
DA
4639 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4640 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4641
7a7f84cc
VS
4642 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4643 /*
4644 * vdd off can generate a long pulse on eDP which
4645 * would require vdd on to handle it, and thus we
4646 * would end up in an endless cycle of
4647 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4648 */
4649 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4650 port_name(intel_dig_port->port));
a8b3d52f 4651 return IRQ_HANDLED;
7a7f84cc
VS
4652 }
4653
26fbb774
VS
4654 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4655 port_name(intel_dig_port->port),
0e32b39c 4656 long_hpd ? "long" : "short");
13cf5504 4657
1c767b33
ID
4658 power_domain = intel_display_port_power_domain(intel_encoder);
4659 intel_display_power_get(dev_priv, power_domain);
4660
0e32b39c 4661 if (long_hpd) {
2a592bec
DA
4662
4663 if (HAS_PCH_SPLIT(dev)) {
4664 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4665 goto mst_fail;
4666 } else {
4667 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4668 goto mst_fail;
4669 }
0e32b39c
DA
4670
4671 if (!intel_dp_get_dpcd(intel_dp)) {
4672 goto mst_fail;
4673 }
4674
4675 intel_dp_probe_oui(intel_dp);
4676
4677 if (!intel_dp_probe_mst(intel_dp))
4678 goto mst_fail;
4679
4680 } else {
4681 if (intel_dp->is_mst) {
1c767b33 4682 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4683 goto mst_fail;
4684 }
4685
4686 if (!intel_dp->is_mst) {
4687 /*
4688 * we'll check the link status via the normal hot plug path later -
4689 * but for short hpds we should check it now
4690 */
5b215bcf 4691 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4692 intel_dp_check_link_status(intel_dp);
5b215bcf 4693 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4694 }
4695 }
b2c5c181
DV
4696
4697 ret = IRQ_HANDLED;
4698
1c767b33 4699 goto put_power;
0e32b39c
DA
4700mst_fail:
4701 /* if we were in MST mode, and device is not there get out of MST mode */
4702 if (intel_dp->is_mst) {
4703 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4704 intel_dp->is_mst = false;
4705 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4706 }
1c767b33
ID
4707put_power:
4708 intel_display_power_put(dev_priv, power_domain);
4709
4710 return ret;
13cf5504
DA
4711}
4712
e3421a18
ZW
4713/* Return which DP Port should be selected for Transcoder DP control */
4714int
0206e353 4715intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
4716{
4717 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
4718 struct intel_encoder *intel_encoder;
4719 struct intel_dp *intel_dp;
e3421a18 4720
fa90ecef
PZ
4721 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4722 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 4723
fa90ecef
PZ
4724 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4725 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 4726 return intel_dp->output_reg;
e3421a18 4727 }
ea5b213a 4728
e3421a18
ZW
4729 return -1;
4730}
4731
36e83a18 4732/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 4733bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
4734{
4735 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 4736 union child_device_config *p_child;
36e83a18 4737 int i;
5d8a7752
VS
4738 static const short port_mapping[] = {
4739 [PORT_B] = PORT_IDPB,
4740 [PORT_C] = PORT_IDPC,
4741 [PORT_D] = PORT_IDPD,
4742 };
36e83a18 4743
3b32a35b
VS
4744 if (port == PORT_A)
4745 return true;
4746
41aa3448 4747 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
4748 return false;
4749
41aa3448
RV
4750 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4751 p_child = dev_priv->vbt.child_dev + i;
36e83a18 4752
5d8a7752 4753 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
4754 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4755 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
4756 return true;
4757 }
4758 return false;
4759}
4760
0e32b39c 4761void
f684960e
CW
4762intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4763{
53b41837
YN
4764 struct intel_connector *intel_connector = to_intel_connector(connector);
4765
3f43c48d 4766 intel_attach_force_audio_property(connector);
e953fd7b 4767 intel_attach_broadcast_rgb_property(connector);
55bc60db 4768 intel_dp->color_range_auto = true;
53b41837
YN
4769
4770 if (is_edp(intel_dp)) {
4771 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
4772 drm_object_attach_property(
4773 &connector->base,
53b41837 4774 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
4775 DRM_MODE_SCALE_ASPECT);
4776 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 4777 }
f684960e
CW
4778}
4779
dada1a9f
ID
4780static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4781{
4782 intel_dp->last_power_cycle = jiffies;
4783 intel_dp->last_power_on = jiffies;
4784 intel_dp->last_backlight_off = jiffies;
4785}
4786
67a54566
DV
4787static void
4788intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 4789 struct intel_dp *intel_dp)
67a54566
DV
4790{
4791 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
4792 struct edp_power_seq cur, vbt, spec,
4793 *final = &intel_dp->pps_delays;
67a54566 4794 u32 pp_on, pp_off, pp_div, pp;
bf13e81b 4795 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 4796
e39b999a
VS
4797 lockdep_assert_held(&dev_priv->pps_mutex);
4798
81ddbc69
VS
4799 /* already initialized? */
4800 if (final->t11_t12 != 0)
4801 return;
4802
453c5420 4803 if (HAS_PCH_SPLIT(dev)) {
bf13e81b 4804 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
4805 pp_on_reg = PCH_PP_ON_DELAYS;
4806 pp_off_reg = PCH_PP_OFF_DELAYS;
4807 pp_div_reg = PCH_PP_DIVISOR;
4808 } else {
bf13e81b
JN
4809 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4810
4811 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4812 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4813 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4814 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 4815 }
67a54566
DV
4816
4817 /* Workaround: Need to write PP_CONTROL with the unlock key as
4818 * the very first thing. */
453c5420 4819 pp = ironlake_get_pp_control(intel_dp);
bf13e81b 4820 I915_WRITE(pp_ctrl_reg, pp);
67a54566 4821
453c5420
JB
4822 pp_on = I915_READ(pp_on_reg);
4823 pp_off = I915_READ(pp_off_reg);
4824 pp_div = I915_READ(pp_div_reg);
67a54566
DV
4825
4826 /* Pull timing values out of registers */
4827 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4828 PANEL_POWER_UP_DELAY_SHIFT;
4829
4830 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4831 PANEL_LIGHT_ON_DELAY_SHIFT;
4832
4833 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4834 PANEL_LIGHT_OFF_DELAY_SHIFT;
4835
4836 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4837 PANEL_POWER_DOWN_DELAY_SHIFT;
4838
4839 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4840 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4841
4842 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4843 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4844
41aa3448 4845 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
4846
4847 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4848 * our hw here, which are all in 100usec. */
4849 spec.t1_t3 = 210 * 10;
4850 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4851 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4852 spec.t10 = 500 * 10;
4853 /* This one is special and actually in units of 100ms, but zero
4854 * based in the hw (so we need to add 100 ms). But the sw vbt
4855 * table multiplies it with 1000 to make it in units of 100usec,
4856 * too. */
4857 spec.t11_t12 = (510 + 100) * 10;
4858
4859 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4860 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4861
4862 /* Use the max of the register settings and vbt. If both are
4863 * unset, fall back to the spec limits. */
36b5f425 4864#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
4865 spec.field : \
4866 max(cur.field, vbt.field))
4867 assign_final(t1_t3);
4868 assign_final(t8);
4869 assign_final(t9);
4870 assign_final(t10);
4871 assign_final(t11_t12);
4872#undef assign_final
4873
36b5f425 4874#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
4875 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4876 intel_dp->backlight_on_delay = get_delay(t8);
4877 intel_dp->backlight_off_delay = get_delay(t9);
4878 intel_dp->panel_power_down_delay = get_delay(t10);
4879 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4880#undef get_delay
4881
f30d26e4
JN
4882 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4883 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4884 intel_dp->panel_power_cycle_delay);
4885
4886 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4887 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
4888}
4889
4890static void
4891intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 4892 struct intel_dp *intel_dp)
f30d26e4
JN
4893{
4894 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
4895 u32 pp_on, pp_off, pp_div, port_sel = 0;
4896 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4897 int pp_on_reg, pp_off_reg, pp_div_reg;
ad933b56 4898 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 4899 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 4900
e39b999a 4901 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420
JB
4902
4903 if (HAS_PCH_SPLIT(dev)) {
4904 pp_on_reg = PCH_PP_ON_DELAYS;
4905 pp_off_reg = PCH_PP_OFF_DELAYS;
4906 pp_div_reg = PCH_PP_DIVISOR;
4907 } else {
bf13e81b
JN
4908 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4909
4910 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4911 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4912 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
4913 }
4914
b2f19d1a
PZ
4915 /*
4916 * And finally store the new values in the power sequencer. The
4917 * backlight delays are set to 1 because we do manual waits on them. For
4918 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4919 * we'll end up waiting for the backlight off delay twice: once when we
4920 * do the manual sleep, and once when we disable the panel and wait for
4921 * the PP_STATUS bit to become zero.
4922 */
f30d26e4 4923 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
4924 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4925 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 4926 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
4927 /* Compute the divisor for the pp clock, simply match the Bspec
4928 * formula. */
453c5420 4929 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 4930 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
4931 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4932
4933 /* Haswell doesn't have any port selection bits for the panel
4934 * power sequencer any more. */
bc7d38a4 4935 if (IS_VALLEYVIEW(dev)) {
ad933b56 4936 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 4937 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 4938 if (port == PORT_A)
a24c144c 4939 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 4940 else
a24c144c 4941 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
4942 }
4943
453c5420
JB
4944 pp_on |= port_sel;
4945
4946 I915_WRITE(pp_on_reg, pp_on);
4947 I915_WRITE(pp_off_reg, pp_off);
4948 I915_WRITE(pp_div_reg, pp_div);
67a54566 4949
67a54566 4950 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
4951 I915_READ(pp_on_reg),
4952 I915_READ(pp_off_reg),
4953 I915_READ(pp_div_reg));
f684960e
CW
4954}
4955
b33a2815
VK
4956/**
4957 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4958 * @dev: DRM device
4959 * @refresh_rate: RR to be programmed
4960 *
4961 * This function gets called when refresh rate (RR) has to be changed from
4962 * one frequency to another. Switches can be between high and low RR
4963 * supported by the panel or to any other RR based on media playback (in
4964 * this case, RR value needs to be passed from user space).
4965 *
4966 * The caller of this function needs to take a lock on dev_priv->drrs.
4967 */
96178eeb 4968static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
4969{
4970 struct drm_i915_private *dev_priv = dev->dev_private;
4971 struct intel_encoder *encoder;
96178eeb
VK
4972 struct intel_digital_port *dig_port = NULL;
4973 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 4974 struct intel_crtc_state *config = NULL;
439d7ac0 4975 struct intel_crtc *intel_crtc = NULL;
439d7ac0 4976 u32 reg, val;
96178eeb 4977 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
4978
4979 if (refresh_rate <= 0) {
4980 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4981 return;
4982 }
4983
96178eeb
VK
4984 if (intel_dp == NULL) {
4985 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
4986 return;
4987 }
4988
1fcc9d1c 4989 /*
e4d59f6b
RV
4990 * FIXME: This needs proper synchronization with psr state for some
4991 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 4992 */
439d7ac0 4993
96178eeb
VK
4994 dig_port = dp_to_dig_port(intel_dp);
4995 encoder = &dig_port->base;
439d7ac0
PB
4996 intel_crtc = encoder->new_crtc;
4997
4998 if (!intel_crtc) {
4999 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5000 return;
5001 }
5002
6e3c9717 5003 config = intel_crtc->config;
439d7ac0 5004
96178eeb 5005 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5006 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5007 return;
5008 }
5009
96178eeb
VK
5010 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5011 refresh_rate)
439d7ac0
PB
5012 index = DRRS_LOW_RR;
5013
96178eeb 5014 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5015 DRM_DEBUG_KMS(
5016 "DRRS requested for previously set RR...ignoring\n");
5017 return;
5018 }
5019
5020 if (!intel_crtc->active) {
5021 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5022 return;
5023 }
5024
44395bfe 5025 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5026 switch (index) {
5027 case DRRS_HIGH_RR:
5028 intel_dp_set_m_n(intel_crtc, M1_N1);
5029 break;
5030 case DRRS_LOW_RR:
5031 intel_dp_set_m_n(intel_crtc, M2_N2);
5032 break;
5033 case DRRS_MAX_RR:
5034 default:
5035 DRM_ERROR("Unsupported refreshrate type\n");
5036 }
5037 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 5038 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 5039 val = I915_READ(reg);
a4c30b1d 5040
439d7ac0 5041 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5042 if (IS_VALLEYVIEW(dev))
5043 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5044 else
5045 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5046 } else {
6fa7aec1
VK
5047 if (IS_VALLEYVIEW(dev))
5048 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5049 else
5050 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5051 }
5052 I915_WRITE(reg, val);
5053 }
5054
4e9ac947
VK
5055 dev_priv->drrs.refresh_rate_type = index;
5056
5057 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5058}
5059
b33a2815
VK
5060/**
5061 * intel_edp_drrs_enable - init drrs struct if supported
5062 * @intel_dp: DP struct
5063 *
5064 * Initializes frontbuffer_bits and drrs.dp
5065 */
c395578e
VK
5066void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5067{
5068 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5069 struct drm_i915_private *dev_priv = dev->dev_private;
5070 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5071 struct drm_crtc *crtc = dig_port->base.base.crtc;
5072 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5073
5074 if (!intel_crtc->config->has_drrs) {
5075 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5076 return;
5077 }
5078
5079 mutex_lock(&dev_priv->drrs.mutex);
5080 if (WARN_ON(dev_priv->drrs.dp)) {
5081 DRM_ERROR("DRRS already enabled\n");
5082 goto unlock;
5083 }
5084
5085 dev_priv->drrs.busy_frontbuffer_bits = 0;
5086
5087 dev_priv->drrs.dp = intel_dp;
5088
5089unlock:
5090 mutex_unlock(&dev_priv->drrs.mutex);
5091}
5092
b33a2815
VK
5093/**
5094 * intel_edp_drrs_disable - Disable DRRS
5095 * @intel_dp: DP struct
5096 *
5097 */
c395578e
VK
5098void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5099{
5100 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5101 struct drm_i915_private *dev_priv = dev->dev_private;
5102 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5103 struct drm_crtc *crtc = dig_port->base.base.crtc;
5104 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5105
5106 if (!intel_crtc->config->has_drrs)
5107 return;
5108
5109 mutex_lock(&dev_priv->drrs.mutex);
5110 if (!dev_priv->drrs.dp) {
5111 mutex_unlock(&dev_priv->drrs.mutex);
5112 return;
5113 }
5114
5115 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5116 intel_dp_set_drrs_state(dev_priv->dev,
5117 intel_dp->attached_connector->panel.
5118 fixed_mode->vrefresh);
5119
5120 dev_priv->drrs.dp = NULL;
5121 mutex_unlock(&dev_priv->drrs.mutex);
5122
5123 cancel_delayed_work_sync(&dev_priv->drrs.work);
5124}
5125
4e9ac947
VK
5126static void intel_edp_drrs_downclock_work(struct work_struct *work)
5127{
5128 struct drm_i915_private *dev_priv =
5129 container_of(work, typeof(*dev_priv), drrs.work.work);
5130 struct intel_dp *intel_dp;
5131
5132 mutex_lock(&dev_priv->drrs.mutex);
5133
5134 intel_dp = dev_priv->drrs.dp;
5135
5136 if (!intel_dp)
5137 goto unlock;
5138
439d7ac0 5139 /*
4e9ac947
VK
5140 * The delayed work can race with an invalidate hence we need to
5141 * recheck.
439d7ac0
PB
5142 */
5143
4e9ac947
VK
5144 if (dev_priv->drrs.busy_frontbuffer_bits)
5145 goto unlock;
439d7ac0 5146
4e9ac947
VK
5147 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5148 intel_dp_set_drrs_state(dev_priv->dev,
5149 intel_dp->attached_connector->panel.
5150 downclock_mode->vrefresh);
439d7ac0 5151
4e9ac947 5152unlock:
439d7ac0 5153
4e9ac947 5154 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5155}
5156
b33a2815
VK
5157/**
5158 * intel_edp_drrs_invalidate - Invalidate DRRS
5159 * @dev: DRM device
5160 * @frontbuffer_bits: frontbuffer plane tracking bits
5161 *
5162 * When there is a disturbance on screen (due to cursor movement/time
5163 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5164 * high RR.
5165 *
5166 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5167 */
a93fad0f
VK
5168void intel_edp_drrs_invalidate(struct drm_device *dev,
5169 unsigned frontbuffer_bits)
5170{
5171 struct drm_i915_private *dev_priv = dev->dev_private;
5172 struct drm_crtc *crtc;
5173 enum pipe pipe;
5174
5175 if (!dev_priv->drrs.dp)
5176 return;
5177
3954e733
R
5178 cancel_delayed_work_sync(&dev_priv->drrs.work);
5179
a93fad0f
VK
5180 mutex_lock(&dev_priv->drrs.mutex);
5181 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5182 pipe = to_intel_crtc(crtc)->pipe;
5183
5184 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
a93fad0f
VK
5185 intel_dp_set_drrs_state(dev_priv->dev,
5186 dev_priv->drrs.dp->attached_connector->panel.
5187 fixed_mode->vrefresh);
5188 }
5189
5190 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5191
5192 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5193 mutex_unlock(&dev_priv->drrs.mutex);
5194}
5195
b33a2815
VK
5196/**
5197 * intel_edp_drrs_flush - Flush DRRS
5198 * @dev: DRM device
5199 * @frontbuffer_bits: frontbuffer plane tracking bits
5200 *
5201 * When there is no movement on screen, DRRS work can be scheduled.
5202 * This DRRS work is responsible for setting relevant registers after a
5203 * timeout of 1 second.
5204 *
5205 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5206 */
a93fad0f
VK
5207void intel_edp_drrs_flush(struct drm_device *dev,
5208 unsigned frontbuffer_bits)
5209{
5210 struct drm_i915_private *dev_priv = dev->dev_private;
5211 struct drm_crtc *crtc;
5212 enum pipe pipe;
5213
5214 if (!dev_priv->drrs.dp)
5215 return;
5216
3954e733
R
5217 cancel_delayed_work_sync(&dev_priv->drrs.work);
5218
a93fad0f
VK
5219 mutex_lock(&dev_priv->drrs.mutex);
5220 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5221 pipe = to_intel_crtc(crtc)->pipe;
5222 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5223
a93fad0f
VK
5224 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5225 !dev_priv->drrs.busy_frontbuffer_bits)
5226 schedule_delayed_work(&dev_priv->drrs.work,
5227 msecs_to_jiffies(1000));
5228 mutex_unlock(&dev_priv->drrs.mutex);
5229}
5230
b33a2815
VK
5231/**
5232 * DOC: Display Refresh Rate Switching (DRRS)
5233 *
5234 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5235 * which enables swtching between low and high refresh rates,
5236 * dynamically, based on the usage scenario. This feature is applicable
5237 * for internal panels.
5238 *
5239 * Indication that the panel supports DRRS is given by the panel EDID, which
5240 * would list multiple refresh rates for one resolution.
5241 *
5242 * DRRS is of 2 types - static and seamless.
5243 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5244 * (may appear as a blink on screen) and is used in dock-undock scenario.
5245 * Seamless DRRS involves changing RR without any visual effect to the user
5246 * and can be used during normal system usage. This is done by programming
5247 * certain registers.
5248 *
5249 * Support for static/seamless DRRS may be indicated in the VBT based on
5250 * inputs from the panel spec.
5251 *
5252 * DRRS saves power by switching to low RR based on usage scenarios.
5253 *
5254 * eDP DRRS:-
5255 * The implementation is based on frontbuffer tracking implementation.
5256 * When there is a disturbance on the screen triggered by user activity or a
5257 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5258 * When there is no movement on screen, after a timeout of 1 second, a switch
5259 * to low RR is made.
5260 * For integration with frontbuffer tracking code,
5261 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5262 *
5263 * DRRS can be further extended to support other internal panels and also
5264 * the scenario of video playback wherein RR is set based on the rate
5265 * requested by userspace.
5266 */
5267
5268/**
5269 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5270 * @intel_connector: eDP connector
5271 * @fixed_mode: preferred mode of panel
5272 *
5273 * This function is called only once at driver load to initialize basic
5274 * DRRS stuff.
5275 *
5276 * Returns:
5277 * Downclock mode if panel supports it, else return NULL.
5278 * DRRS support is determined by the presence of downclock mode (apart
5279 * from VBT setting).
5280 */
4f9db5b5 5281static struct drm_display_mode *
96178eeb
VK
5282intel_dp_drrs_init(struct intel_connector *intel_connector,
5283 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5284{
5285 struct drm_connector *connector = &intel_connector->base;
96178eeb 5286 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5287 struct drm_i915_private *dev_priv = dev->dev_private;
5288 struct drm_display_mode *downclock_mode = NULL;
5289
5290 if (INTEL_INFO(dev)->gen <= 6) {
5291 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5292 return NULL;
5293 }
5294
5295 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5296 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5297 return NULL;
5298 }
5299
5300 downclock_mode = intel_find_panel_downclock
5301 (dev, fixed_mode, connector);
5302
5303 if (!downclock_mode) {
a1d26342 5304 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5305 return NULL;
5306 }
5307
4e9ac947
VK
5308 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5309
96178eeb 5310 mutex_init(&dev_priv->drrs.mutex);
439d7ac0 5311
96178eeb 5312 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5313
96178eeb 5314 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5315 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5316 return downclock_mode;
5317}
5318
ed92f0b2 5319static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5320 struct intel_connector *intel_connector)
ed92f0b2
PZ
5321{
5322 struct drm_connector *connector = &intel_connector->base;
5323 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5324 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5325 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5326 struct drm_i915_private *dev_priv = dev->dev_private;
5327 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5328 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5329 bool has_dpcd;
5330 struct drm_display_mode *scan;
5331 struct edid *edid;
6517d273 5332 enum pipe pipe = INVALID_PIPE;
ed92f0b2 5333
96178eeb 5334 dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
4f9db5b5 5335
ed92f0b2
PZ
5336 if (!is_edp(intel_dp))
5337 return true;
5338
49e6bc51
VS
5339 pps_lock(intel_dp);
5340 intel_edp_panel_vdd_sanitize(intel_dp);
5341 pps_unlock(intel_dp);
63635217 5342
ed92f0b2 5343 /* Cache DPCD and EDID for edp. */
ed92f0b2 5344 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5345
5346 if (has_dpcd) {
5347 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5348 dev_priv->no_aux_handshake =
5349 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5350 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5351 } else {
5352 /* if this fails, presume the device is a ghost */
5353 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5354 return false;
5355 }
5356
5357 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5358 pps_lock(intel_dp);
36b5f425 5359 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5360 pps_unlock(intel_dp);
ed92f0b2 5361
060c8778 5362 mutex_lock(&dev->mode_config.mutex);
0b99836f 5363 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5364 if (edid) {
5365 if (drm_add_edid_modes(connector, edid)) {
5366 drm_mode_connector_update_edid_property(connector,
5367 edid);
5368 drm_edid_to_eld(connector, edid);
5369 } else {
5370 kfree(edid);
5371 edid = ERR_PTR(-EINVAL);
5372 }
5373 } else {
5374 edid = ERR_PTR(-ENOENT);
5375 }
5376 intel_connector->edid = edid;
5377
5378 /* prefer fixed mode from EDID if available */
5379 list_for_each_entry(scan, &connector->probed_modes, head) {
5380 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5381 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5382 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5383 intel_connector, fixed_mode);
ed92f0b2
PZ
5384 break;
5385 }
5386 }
5387
5388 /* fallback to VBT if available for eDP */
5389 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5390 fixed_mode = drm_mode_duplicate(dev,
5391 dev_priv->vbt.lfp_lvds_vbt_mode);
5392 if (fixed_mode)
5393 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5394 }
060c8778 5395 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5396
01527b31
CT
5397 if (IS_VALLEYVIEW(dev)) {
5398 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5399 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5400
5401 /*
5402 * Figure out the current pipe for the initial backlight setup.
5403 * If the current pipe isn't valid, try the PPS pipe, and if that
5404 * fails just assume pipe A.
5405 */
5406 if (IS_CHERRYVIEW(dev))
5407 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5408 else
5409 pipe = PORT_TO_PIPE(intel_dp->DP);
5410
5411 if (pipe != PIPE_A && pipe != PIPE_B)
5412 pipe = intel_dp->pps_pipe;
5413
5414 if (pipe != PIPE_A && pipe != PIPE_B)
5415 pipe = PIPE_A;
5416
5417 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5418 pipe_name(pipe));
01527b31
CT
5419 }
5420
4f9db5b5 5421 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5422 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5423 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5424
5425 return true;
5426}
5427
16c25533 5428bool
f0fec3f2
PZ
5429intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5430 struct intel_connector *intel_connector)
a4fc5ed6 5431{
f0fec3f2
PZ
5432 struct drm_connector *connector = &intel_connector->base;
5433 struct intel_dp *intel_dp = &intel_dig_port->dp;
5434 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5435 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5436 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5437 enum port port = intel_dig_port->port;
0b99836f 5438 int type;
a4fc5ed6 5439
a4a5d2f8
VS
5440 intel_dp->pps_pipe = INVALID_PIPE;
5441
ec5b01dd 5442 /* intel_dp vfuncs */
b6b5e383
DL
5443 if (INTEL_INFO(dev)->gen >= 9)
5444 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5445 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5446 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5447 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5448 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5449 else if (HAS_PCH_SPLIT(dev))
5450 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5451 else
5452 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5453
b9ca5fad
DL
5454 if (INTEL_INFO(dev)->gen >= 9)
5455 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5456 else
5457 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5458
0767935e
DV
5459 /* Preserve the current hw state. */
5460 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5461 intel_dp->attached_connector = intel_connector;
3d3dc149 5462
3b32a35b 5463 if (intel_dp_is_edp(dev, port))
b329530c 5464 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5465 else
5466 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5467
f7d24902
ID
5468 /*
5469 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5470 * for DP the encoder type can be set by the caller to
5471 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5472 */
5473 if (type == DRM_MODE_CONNECTOR_eDP)
5474 intel_encoder->type = INTEL_OUTPUT_EDP;
5475
c17ed5b5
VS
5476 /* eDP only on port B and/or C on vlv/chv */
5477 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5478 port != PORT_B && port != PORT_C))
5479 return false;
5480
e7281eab
ID
5481 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5482 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5483 port_name(port));
5484
b329530c 5485 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5486 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5487
a4fc5ed6
KP
5488 connector->interlace_allowed = true;
5489 connector->doublescan_allowed = 0;
5490
f0fec3f2 5491 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5492 edp_panel_vdd_work);
a4fc5ed6 5493
df0e9248 5494 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5495 drm_connector_register(connector);
a4fc5ed6 5496
affa9354 5497 if (HAS_DDI(dev))
bcbc889b
PZ
5498 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5499 else
5500 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5501 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5502
0b99836f 5503 /* Set up the hotplug pin. */
ab9d7c30
PZ
5504 switch (port) {
5505 case PORT_A:
1d843f9d 5506 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5507 break;
5508 case PORT_B:
1d843f9d 5509 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5510 break;
5511 case PORT_C:
1d843f9d 5512 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5513 break;
5514 case PORT_D:
1d843f9d 5515 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5516 break;
5517 default:
ad1c0b19 5518 BUG();
5eb08b69
ZW
5519 }
5520
dada1a9f 5521 if (is_edp(intel_dp)) {
773538e8 5522 pps_lock(intel_dp);
1e74a324
VS
5523 intel_dp_init_panel_power_timestamps(intel_dp);
5524 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5525 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5526 else
36b5f425 5527 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5528 pps_unlock(intel_dp);
dada1a9f 5529 }
0095e6dc 5530
9d1a1031 5531 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5532
0e32b39c 5533 /* init MST on ports that can support it */
c86ea3d0 5534 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
0e32b39c 5535 if (port == PORT_B || port == PORT_C || port == PORT_D) {
a4a5d2f8
VS
5536 intel_dp_mst_encoder_init(intel_dig_port,
5537 intel_connector->base.base.id);
0e32b39c
DA
5538 }
5539 }
5540
36b5f425 5541 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5542 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5543 if (is_edp(intel_dp)) {
5544 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5545 /*
5546 * vdd might still be enabled do to the delayed vdd off.
5547 * Make sure vdd is actually turned off here.
5548 */
773538e8 5549 pps_lock(intel_dp);
4be73780 5550 edp_panel_vdd_off_sync(intel_dp);
773538e8 5551 pps_unlock(intel_dp);
15b1d171 5552 }
34ea3d38 5553 drm_connector_unregister(connector);
b2f246a8 5554 drm_connector_cleanup(connector);
16c25533 5555 return false;
b2f246a8 5556 }
32f9d658 5557
f684960e
CW
5558 intel_dp_add_properties(intel_dp, connector);
5559
a4fc5ed6
KP
5560 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5561 * 0xd. Failure to do so will result in spurious interrupts being
5562 * generated on the port when a cable is not attached.
5563 */
5564 if (IS_G4X(dev) && !IS_GM45(dev)) {
5565 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5566 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5567 }
16c25533
PZ
5568
5569 return true;
a4fc5ed6 5570}
f0fec3f2
PZ
5571
5572void
5573intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5574{
13cf5504 5575 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5576 struct intel_digital_port *intel_dig_port;
5577 struct intel_encoder *intel_encoder;
5578 struct drm_encoder *encoder;
5579 struct intel_connector *intel_connector;
5580
b14c5679 5581 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5582 if (!intel_dig_port)
5583 return;
5584
b14c5679 5585 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
f0fec3f2
PZ
5586 if (!intel_connector) {
5587 kfree(intel_dig_port);
5588 return;
5589 }
5590
5591 intel_encoder = &intel_dig_port->base;
5592 encoder = &intel_encoder->base;
5593
5594 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5595 DRM_MODE_ENCODER_TMDS);
5596
5bfe2ac0 5597 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5598 intel_encoder->disable = intel_disable_dp;
00c09d70 5599 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5600 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5601 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5602 if (IS_CHERRYVIEW(dev)) {
9197c88b 5603 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5604 intel_encoder->pre_enable = chv_pre_enable_dp;
5605 intel_encoder->enable = vlv_enable_dp;
580d3811 5606 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5607 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5608 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5609 intel_encoder->pre_enable = vlv_pre_enable_dp;
5610 intel_encoder->enable = vlv_enable_dp;
49277c31 5611 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5612 } else {
ecff4f3b
JN
5613 intel_encoder->pre_enable = g4x_pre_enable_dp;
5614 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5615 if (INTEL_INFO(dev)->gen >= 5)
5616 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5617 }
f0fec3f2 5618
174edf1f 5619 intel_dig_port->port = port;
f0fec3f2
PZ
5620 intel_dig_port->dp.output_reg = output_reg;
5621
00c09d70 5622 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5623 if (IS_CHERRYVIEW(dev)) {
5624 if (port == PORT_D)
5625 intel_encoder->crtc_mask = 1 << 2;
5626 else
5627 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5628 } else {
5629 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5630 }
bc079e8b 5631 intel_encoder->cloneable = 0;
f0fec3f2
PZ
5632 intel_encoder->hot_plug = intel_dp_hot_plug;
5633
13cf5504
DA
5634 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5635 dev_priv->hpd_irq_port[port] = intel_dig_port;
5636
15b1d171
PZ
5637 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5638 drm_encoder_cleanup(encoder);
5639 kfree(intel_dig_port);
b2f246a8 5640 kfree(intel_connector);
15b1d171 5641 }
f0fec3f2 5642}
0e32b39c
DA
5643
5644void intel_dp_mst_suspend(struct drm_device *dev)
5645{
5646 struct drm_i915_private *dev_priv = dev->dev_private;
5647 int i;
5648
5649 /* disable MST */
5650 for (i = 0; i < I915_MAX_PORTS; i++) {
5651 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5652 if (!intel_dig_port)
5653 continue;
5654
5655 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5656 if (!intel_dig_port->dp.can_mst)
5657 continue;
5658 if (intel_dig_port->dp.is_mst)
5659 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5660 }
5661 }
5662}
5663
5664void intel_dp_mst_resume(struct drm_device *dev)
5665{
5666 struct drm_i915_private *dev_priv = dev->dev_private;
5667 int i;
5668
5669 for (i = 0; i < I915_MAX_PORTS; i++) {
5670 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5671 if (!intel_dig_port)
5672 continue;
5673 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5674 int ret;
5675
5676 if (!intel_dig_port->dp.can_mst)
5677 continue;
5678
5679 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5680 if (ret != 0) {
5681 intel_dp_check_mst_status(&intel_dig_port->dp);
5682 }
5683 }
5684 }
5685}
This page took 0.903865 seconds and 5 git commands to generate.