drm/i915: Always program m2 fractional value on CHV
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf 50struct dp_link_dpll {
840b32b7 51 int clock;
9dd4ffdf
CML
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
840b32b7 56 { 162000,
9dd4ffdf 57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
840b32b7 58 { 270000,
9dd4ffdf
CML
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
840b32b7 63 { 162000,
9dd4ffdf 64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
840b32b7 65 { 270000,
9dd4ffdf
CML
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5 69static const struct dp_link_dpll vlv_dpll[] = {
840b32b7 70 { 162000,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
840b32b7 72 { 270000,
65ce4bf5
CML
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
840b32b7 86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
ef9348c8 87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
840b32b7 88 { 270000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8 89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
840b32b7 90 { 540000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8
CML
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63 93
64987fc5
SJ
94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
637a9c63 96static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15 97 324000, 432000, 540000 };
fe51bfb9
VS
98static const int chv_rates[] = { 162000, 202500, 210000, 216000,
99 243000, 270000, 324000, 405000,
100 420000, 432000, 540000 };
f4896f15 101static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 102
cfcb0fc9
JB
103/**
104 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
105 * @intel_dp: DP struct
106 *
107 * If a CPU or PCH DP output is attached to an eDP panel, this function
108 * will return true, and false otherwise.
109 */
110static bool is_edp(struct intel_dp *intel_dp)
111{
da63a9f2
PZ
112 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
113
114 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
115}
116
68b4d824 117static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 118{
68b4d824
ID
119 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
120
121 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
122}
123
df0e9248
CW
124static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
125{
fa90ecef 126 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
127}
128
ea5b213a 129static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 130static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 131static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 132static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
133static void vlv_steal_power_sequencer(struct drm_device *dev,
134 enum pipe pipe);
a4fc5ed6 135
ed4e9c1d
VS
136static int
137intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 138{
7183dc29 139 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
140
141 switch (max_link_bw) {
142 case DP_LINK_BW_1_62:
143 case DP_LINK_BW_2_7:
1db10e28 144 case DP_LINK_BW_5_4:
d4eead50 145 break;
a4fc5ed6 146 default:
d4eead50
ID
147 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
148 max_link_bw);
a4fc5ed6
KP
149 max_link_bw = DP_LINK_BW_1_62;
150 break;
151 }
152 return max_link_bw;
153}
154
eeb6324d
PZ
155static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
156{
157 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
158 struct drm_device *dev = intel_dig_port->base.base.dev;
159 u8 source_max, sink_max;
160
161 source_max = 4;
162 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
163 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
164 source_max = 2;
165
166 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
167
168 return min(source_max, sink_max);
169}
170
cd9dde44
AJ
171/*
172 * The units on the numbers in the next two are... bizarre. Examples will
173 * make it clearer; this one parallels an example in the eDP spec.
174 *
175 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
176 *
177 * 270000 * 1 * 8 / 10 == 216000
178 *
179 * The actual data capacity of that configuration is 2.16Gbit/s, so the
180 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
181 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
182 * 119000. At 18bpp that's 2142000 kilobits per second.
183 *
184 * Thus the strange-looking division by 10 in intel_dp_link_required, to
185 * get the result in decakilobits instead of kilobits.
186 */
187
a4fc5ed6 188static int
c898261c 189intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 190{
cd9dde44 191 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
192}
193
fe27d53e
DA
194static int
195intel_dp_max_data_rate(int max_link_clock, int max_lanes)
196{
197 return (max_link_clock * max_lanes * 8) / 10;
198}
199
c19de8eb 200static enum drm_mode_status
a4fc5ed6
KP
201intel_dp_mode_valid(struct drm_connector *connector,
202 struct drm_display_mode *mode)
203{
df0e9248 204 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
205 struct intel_connector *intel_connector = to_intel_connector(connector);
206 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
207 int target_clock = mode->clock;
208 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 209
dd06f90e
JN
210 if (is_edp(intel_dp) && fixed_mode) {
211 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
212 return MODE_PANEL;
213
dd06f90e 214 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 215 return MODE_PANEL;
03afc4a2
DV
216
217 target_clock = fixed_mode->clock;
7de56f43
ZY
218 }
219
50fec21a 220 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 221 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
222
223 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
224 mode_rate = intel_dp_link_required(target_clock, 18);
225
226 if (mode_rate > max_rate)
c4867936 227 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
228
229 if (mode->clock < 10000)
230 return MODE_CLOCK_LOW;
231
0af78a2b
DV
232 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
233 return MODE_H_ILLEGAL;
234
a4fc5ed6
KP
235 return MODE_OK;
236}
237
a4f1289e 238uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
239{
240 int i;
241 uint32_t v = 0;
242
243 if (src_bytes > 4)
244 src_bytes = 4;
245 for (i = 0; i < src_bytes; i++)
246 v |= ((uint32_t) src[i]) << ((3-i) * 8);
247 return v;
248}
249
c2af70e2 250static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
251{
252 int i;
253 if (dst_bytes > 4)
254 dst_bytes = 4;
255 for (i = 0; i < dst_bytes; i++)
256 dst[i] = src >> ((3-i) * 8);
257}
258
fb0f8fbf
KP
259/* hrawclock is 1/4 the FSB frequency */
260static int
261intel_hrawclk(struct drm_device *dev)
262{
263 struct drm_i915_private *dev_priv = dev->dev_private;
264 uint32_t clkcfg;
265
9473c8f4
VP
266 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
267 if (IS_VALLEYVIEW(dev))
268 return 200;
269
fb0f8fbf
KP
270 clkcfg = I915_READ(CLKCFG);
271 switch (clkcfg & CLKCFG_FSB_MASK) {
272 case CLKCFG_FSB_400:
273 return 100;
274 case CLKCFG_FSB_533:
275 return 133;
276 case CLKCFG_FSB_667:
277 return 166;
278 case CLKCFG_FSB_800:
279 return 200;
280 case CLKCFG_FSB_1067:
281 return 266;
282 case CLKCFG_FSB_1333:
283 return 333;
284 /* these two are just a guess; one of them might be right */
285 case CLKCFG_FSB_1600:
286 case CLKCFG_FSB_1600_ALT:
287 return 400;
288 default:
289 return 133;
290 }
291}
292
bf13e81b
JN
293static void
294intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 295 struct intel_dp *intel_dp);
bf13e81b
JN
296static void
297intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 298 struct intel_dp *intel_dp);
bf13e81b 299
773538e8
VS
300static void pps_lock(struct intel_dp *intel_dp)
301{
302 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
303 struct intel_encoder *encoder = &intel_dig_port->base;
304 struct drm_device *dev = encoder->base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum intel_display_power_domain power_domain;
307
308 /*
309 * See vlv_power_sequencer_reset() why we need
310 * a power domain reference here.
311 */
312 power_domain = intel_display_port_power_domain(encoder);
313 intel_display_power_get(dev_priv, power_domain);
314
315 mutex_lock(&dev_priv->pps_mutex);
316}
317
318static void pps_unlock(struct intel_dp *intel_dp)
319{
320 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
321 struct intel_encoder *encoder = &intel_dig_port->base;
322 struct drm_device *dev = encoder->base.dev;
323 struct drm_i915_private *dev_priv = dev->dev_private;
324 enum intel_display_power_domain power_domain;
325
326 mutex_unlock(&dev_priv->pps_mutex);
327
328 power_domain = intel_display_port_power_domain(encoder);
329 intel_display_power_put(dev_priv, power_domain);
330}
331
961a0db0
VS
332static void
333vlv_power_sequencer_kick(struct intel_dp *intel_dp)
334{
335 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
336 struct drm_device *dev = intel_dig_port->base.base.dev;
337 struct drm_i915_private *dev_priv = dev->dev_private;
338 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 339 bool pll_enabled;
961a0db0
VS
340 uint32_t DP;
341
342 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
343 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
344 pipe_name(pipe), port_name(intel_dig_port->port)))
345 return;
346
347 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
348 pipe_name(pipe), port_name(intel_dig_port->port));
349
350 /* Preserve the BIOS-computed detected bit. This is
351 * supposed to be read-only.
352 */
353 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
354 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
355 DP |= DP_PORT_WIDTH(1);
356 DP |= DP_LINK_TRAIN_PAT_1;
357
358 if (IS_CHERRYVIEW(dev))
359 DP |= DP_PIPE_SELECT_CHV(pipe);
360 else if (pipe == PIPE_B)
361 DP |= DP_PIPEB_SELECT;
362
d288f65f
VS
363 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
364
365 /*
366 * The DPLL for the pipe must be enabled for this to work.
367 * So enable temporarily it if it's not already enabled.
368 */
369 if (!pll_enabled)
370 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
371 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
372
961a0db0
VS
373 /*
374 * Similar magic as in intel_dp_enable_port().
375 * We _must_ do this port enable + disable trick
376 * to make this power seqeuencer lock onto the port.
377 * Otherwise even VDD force bit won't work.
378 */
379 I915_WRITE(intel_dp->output_reg, DP);
380 POSTING_READ(intel_dp->output_reg);
381
382 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
383 POSTING_READ(intel_dp->output_reg);
384
385 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
386 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
387
388 if (!pll_enabled)
389 vlv_force_pll_off(dev, pipe);
961a0db0
VS
390}
391
bf13e81b
JN
392static enum pipe
393vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
394{
395 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
396 struct drm_device *dev = intel_dig_port->base.base.dev;
397 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
398 struct intel_encoder *encoder;
399 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 400 enum pipe pipe;
bf13e81b 401
e39b999a 402 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 403
a8c3344e
VS
404 /* We should never land here with regular DP ports */
405 WARN_ON(!is_edp(intel_dp));
406
a4a5d2f8
VS
407 if (intel_dp->pps_pipe != INVALID_PIPE)
408 return intel_dp->pps_pipe;
409
410 /*
411 * We don't have power sequencer currently.
412 * Pick one that's not used by other ports.
413 */
414 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
415 base.head) {
416 struct intel_dp *tmp;
417
418 if (encoder->type != INTEL_OUTPUT_EDP)
419 continue;
420
421 tmp = enc_to_intel_dp(&encoder->base);
422
423 if (tmp->pps_pipe != INVALID_PIPE)
424 pipes &= ~(1 << tmp->pps_pipe);
425 }
426
427 /*
428 * Didn't find one. This should not happen since there
429 * are two power sequencers and up to two eDP ports.
430 */
431 if (WARN_ON(pipes == 0))
a8c3344e
VS
432 pipe = PIPE_A;
433 else
434 pipe = ffs(pipes) - 1;
a4a5d2f8 435
a8c3344e
VS
436 vlv_steal_power_sequencer(dev, pipe);
437 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
438
439 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
440 pipe_name(intel_dp->pps_pipe),
441 port_name(intel_dig_port->port));
442
443 /* init power sequencer on this pipe and port */
36b5f425
VS
444 intel_dp_init_panel_power_sequencer(dev, intel_dp);
445 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 446
961a0db0
VS
447 /*
448 * Even vdd force doesn't work until we've made
449 * the power sequencer lock in on the port.
450 */
451 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
452
453 return intel_dp->pps_pipe;
454}
455
6491ab27
VS
456typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
457 enum pipe pipe);
458
459static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
460 enum pipe pipe)
461{
462 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
463}
464
465static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
466 enum pipe pipe)
467{
468 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
469}
470
471static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
472 enum pipe pipe)
473{
474 return true;
475}
bf13e81b 476
a4a5d2f8 477static enum pipe
6491ab27
VS
478vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
479 enum port port,
480 vlv_pipe_check pipe_check)
a4a5d2f8
VS
481{
482 enum pipe pipe;
bf13e81b 483
bf13e81b
JN
484 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
485 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
486 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
487
488 if (port_sel != PANEL_PORT_SELECT_VLV(port))
489 continue;
490
6491ab27
VS
491 if (!pipe_check(dev_priv, pipe))
492 continue;
493
a4a5d2f8 494 return pipe;
bf13e81b
JN
495 }
496
a4a5d2f8
VS
497 return INVALID_PIPE;
498}
499
500static void
501vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
502{
503 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
504 struct drm_device *dev = intel_dig_port->base.base.dev;
505 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
506 enum port port = intel_dig_port->port;
507
508 lockdep_assert_held(&dev_priv->pps_mutex);
509
510 /* try to find a pipe with this port selected */
6491ab27
VS
511 /* first pick one where the panel is on */
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513 vlv_pipe_has_pp_on);
514 /* didn't find one? pick one where vdd is on */
515 if (intel_dp->pps_pipe == INVALID_PIPE)
516 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
517 vlv_pipe_has_vdd_on);
518 /* didn't find one? pick one with just the correct port */
519 if (intel_dp->pps_pipe == INVALID_PIPE)
520 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
521 vlv_pipe_any);
a4a5d2f8
VS
522
523 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
524 if (intel_dp->pps_pipe == INVALID_PIPE) {
525 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
526 port_name(port));
527 return;
bf13e81b
JN
528 }
529
a4a5d2f8
VS
530 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
531 port_name(port), pipe_name(intel_dp->pps_pipe));
532
36b5f425
VS
533 intel_dp_init_panel_power_sequencer(dev, intel_dp);
534 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
535}
536
773538e8
VS
537void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
538{
539 struct drm_device *dev = dev_priv->dev;
540 struct intel_encoder *encoder;
541
542 if (WARN_ON(!IS_VALLEYVIEW(dev)))
543 return;
544
545 /*
546 * We can't grab pps_mutex here due to deadlock with power_domain
547 * mutex when power_domain functions are called while holding pps_mutex.
548 * That also means that in order to use pps_pipe the code needs to
549 * hold both a power domain reference and pps_mutex, and the power domain
550 * reference get/put must be done while _not_ holding pps_mutex.
551 * pps_{lock,unlock}() do these steps in the correct order, so one
552 * should use them always.
553 */
554
555 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
556 struct intel_dp *intel_dp;
557
558 if (encoder->type != INTEL_OUTPUT_EDP)
559 continue;
560
561 intel_dp = enc_to_intel_dp(&encoder->base);
562 intel_dp->pps_pipe = INVALID_PIPE;
563 }
bf13e81b
JN
564}
565
566static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
567{
568 struct drm_device *dev = intel_dp_to_dev(intel_dp);
569
b0a08bec
VK
570 if (IS_BROXTON(dev))
571 return BXT_PP_CONTROL(0);
572 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
573 return PCH_PP_CONTROL;
574 else
575 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
576}
577
578static u32 _pp_stat_reg(struct intel_dp *intel_dp)
579{
580 struct drm_device *dev = intel_dp_to_dev(intel_dp);
581
b0a08bec
VK
582 if (IS_BROXTON(dev))
583 return BXT_PP_STATUS(0);
584 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
585 return PCH_PP_STATUS;
586 else
587 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
588}
589
01527b31
CT
590/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
591 This function only applicable when panel PM state is not to be tracked */
592static int edp_notify_handler(struct notifier_block *this, unsigned long code,
593 void *unused)
594{
595 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
596 edp_notifier);
597 struct drm_device *dev = intel_dp_to_dev(intel_dp);
598 struct drm_i915_private *dev_priv = dev->dev_private;
599 u32 pp_div;
600 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
601
602 if (!is_edp(intel_dp) || code != SYS_RESTART)
603 return 0;
604
773538e8 605 pps_lock(intel_dp);
e39b999a 606
01527b31 607 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
608 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
609
01527b31
CT
610 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
611 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
612 pp_div = I915_READ(pp_div_reg);
613 pp_div &= PP_REFERENCE_DIVIDER_MASK;
614
615 /* 0x1F write to PP_DIV_REG sets max cycle delay */
616 I915_WRITE(pp_div_reg, pp_div | 0x1F);
617 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
618 msleep(intel_dp->panel_power_cycle_delay);
619 }
620
773538e8 621 pps_unlock(intel_dp);
e39b999a 622
01527b31
CT
623 return 0;
624}
625
4be73780 626static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 627{
30add22d 628 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
629 struct drm_i915_private *dev_priv = dev->dev_private;
630
e39b999a
VS
631 lockdep_assert_held(&dev_priv->pps_mutex);
632
9a42356b
VS
633 if (IS_VALLEYVIEW(dev) &&
634 intel_dp->pps_pipe == INVALID_PIPE)
635 return false;
636
bf13e81b 637 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
638}
639
4be73780 640static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 641{
30add22d 642 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
643 struct drm_i915_private *dev_priv = dev->dev_private;
644
e39b999a
VS
645 lockdep_assert_held(&dev_priv->pps_mutex);
646
9a42356b
VS
647 if (IS_VALLEYVIEW(dev) &&
648 intel_dp->pps_pipe == INVALID_PIPE)
649 return false;
650
773538e8 651 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
652}
653
9b984dae
KP
654static void
655intel_dp_check_edp(struct intel_dp *intel_dp)
656{
30add22d 657 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 658 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 659
9b984dae
KP
660 if (!is_edp(intel_dp))
661 return;
453c5420 662
4be73780 663 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
664 WARN(1, "eDP powered off while attempting aux channel communication.\n");
665 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
666 I915_READ(_pp_stat_reg(intel_dp)),
667 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
668 }
669}
670
9ee32fea
DV
671static uint32_t
672intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
673{
674 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
675 struct drm_device *dev = intel_dig_port->base.base.dev;
676 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 677 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
678 uint32_t status;
679 bool done;
680
ef04f00d 681#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 682 if (has_aux_irq)
b18ac466 683 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 684 msecs_to_jiffies_timeout(10));
9ee32fea
DV
685 else
686 done = wait_for_atomic(C, 10) == 0;
687 if (!done)
688 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
689 has_aux_irq);
690#undef C
691
692 return status;
693}
694
ec5b01dd 695static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 696{
174edf1f
PZ
697 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 699
ec5b01dd
DL
700 /*
701 * The clock divider is based off the hrawclk, and would like to run at
702 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 703 */
ec5b01dd
DL
704 return index ? 0 : intel_hrawclk(dev) / 2;
705}
706
707static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
708{
709 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
710 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 711 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
712
713 if (index)
714 return 0;
715
716 if (intel_dig_port->port == PORT_A) {
05024da3
VS
717 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
718
ec5b01dd
DL
719 } else {
720 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
721 }
722}
723
724static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
725{
726 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
727 struct drm_device *dev = intel_dig_port->base.base.dev;
728 struct drm_i915_private *dev_priv = dev->dev_private;
729
730 if (intel_dig_port->port == PORT_A) {
731 if (index)
732 return 0;
05024da3 733 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
2c55c336
JN
734 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
735 /* Workaround for non-ULT HSW */
bc86625a
CW
736 switch (index) {
737 case 0: return 63;
738 case 1: return 72;
739 default: return 0;
740 }
ec5b01dd 741 } else {
bc86625a 742 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 743 }
b84a1cf8
RV
744}
745
ec5b01dd
DL
746static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
747{
748 return index ? 0 : 100;
749}
750
b6b5e383
DL
751static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
752{
753 /*
754 * SKL doesn't need us to program the AUX clock divider (Hardware will
755 * derive the clock from CDCLK automatically). We still implement the
756 * get_aux_clock_divider vfunc to plug-in into the existing code.
757 */
758 return index ? 0 : 1;
759}
760
5ed12a19
DL
761static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
762 bool has_aux_irq,
763 int send_bytes,
764 uint32_t aux_clock_divider)
765{
766 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
767 struct drm_device *dev = intel_dig_port->base.base.dev;
768 uint32_t precharge, timeout;
769
770 if (IS_GEN6(dev))
771 precharge = 3;
772 else
773 precharge = 5;
774
775 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
776 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
777 else
778 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
779
780 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 781 DP_AUX_CH_CTL_DONE |
5ed12a19 782 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 783 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 784 timeout |
788d4433 785 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
786 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
787 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 788 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
789}
790
b9ca5fad
DL
791static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
792 bool has_aux_irq,
793 int send_bytes,
794 uint32_t unused)
795{
796 return DP_AUX_CH_CTL_SEND_BUSY |
797 DP_AUX_CH_CTL_DONE |
798 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
799 DP_AUX_CH_CTL_TIME_OUT_ERROR |
800 DP_AUX_CH_CTL_TIME_OUT_1600us |
801 DP_AUX_CH_CTL_RECEIVE_ERROR |
802 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
803 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
804}
805
b84a1cf8
RV
806static int
807intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 808 const uint8_t *send, int send_bytes,
b84a1cf8
RV
809 uint8_t *recv, int recv_size)
810{
811 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
812 struct drm_device *dev = intel_dig_port->base.base.dev;
813 struct drm_i915_private *dev_priv = dev->dev_private;
814 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
815 uint32_t ch_data = ch_ctl + 4;
bc86625a 816 uint32_t aux_clock_divider;
b84a1cf8
RV
817 int i, ret, recv_bytes;
818 uint32_t status;
5ed12a19 819 int try, clock = 0;
4e6b788c 820 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
821 bool vdd;
822
773538e8 823 pps_lock(intel_dp);
e39b999a 824
72c3500a
VS
825 /*
826 * We will be called with VDD already enabled for dpcd/edid/oui reads.
827 * In such cases we want to leave VDD enabled and it's up to upper layers
828 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
829 * ourselves.
830 */
1e0560e0 831 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
832
833 /* dp aux is extremely sensitive to irq latency, hence request the
834 * lowest possible wakeup latency and so prevent the cpu from going into
835 * deep sleep states.
836 */
837 pm_qos_update_request(&dev_priv->pm_qos, 0);
838
839 intel_dp_check_edp(intel_dp);
5eb08b69 840
c67a470b
PZ
841 intel_aux_display_runtime_get(dev_priv);
842
11bee43e
JB
843 /* Try to wait for any previous AUX channel activity */
844 for (try = 0; try < 3; try++) {
ef04f00d 845 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
846 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
847 break;
848 msleep(1);
849 }
850
851 if (try == 3) {
02196c77
MK
852 static u32 last_status = -1;
853 const u32 status = I915_READ(ch_ctl);
854
855 if (status != last_status) {
856 WARN(1, "dp_aux_ch not started status 0x%08x\n",
857 status);
858 last_status = status;
859 }
860
9ee32fea
DV
861 ret = -EBUSY;
862 goto out;
4f7f7b7e
CW
863 }
864
46a5ae9f
PZ
865 /* Only 5 data registers! */
866 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
867 ret = -E2BIG;
868 goto out;
869 }
870
ec5b01dd 871 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
872 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
873 has_aux_irq,
874 send_bytes,
875 aux_clock_divider);
5ed12a19 876
bc86625a
CW
877 /* Must try at least 3 times according to DP spec */
878 for (try = 0; try < 5; try++) {
879 /* Load the send data into the aux channel data registers */
880 for (i = 0; i < send_bytes; i += 4)
881 I915_WRITE(ch_data + i,
a4f1289e
RV
882 intel_dp_pack_aux(send + i,
883 send_bytes - i));
bc86625a
CW
884
885 /* Send the command and wait for it to complete */
5ed12a19 886 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
887
888 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
889
890 /* Clear done status and any errors */
891 I915_WRITE(ch_ctl,
892 status |
893 DP_AUX_CH_CTL_DONE |
894 DP_AUX_CH_CTL_TIME_OUT_ERROR |
895 DP_AUX_CH_CTL_RECEIVE_ERROR);
896
74ebf294 897 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 898 continue;
74ebf294
TP
899
900 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
901 * 400us delay required for errors and timeouts
902 * Timeout errors from the HW already meet this
903 * requirement so skip to next iteration
904 */
905 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
906 usleep_range(400, 500);
bc86625a 907 continue;
74ebf294 908 }
bc86625a 909 if (status & DP_AUX_CH_CTL_DONE)
e058c945 910 goto done;
bc86625a 911 }
a4fc5ed6
KP
912 }
913
a4fc5ed6 914 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 915 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
916 ret = -EBUSY;
917 goto out;
a4fc5ed6
KP
918 }
919
e058c945 920done:
a4fc5ed6
KP
921 /* Check for timeout or receive error.
922 * Timeouts occur when the sink is not connected
923 */
a5b3da54 924 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 925 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
926 ret = -EIO;
927 goto out;
a5b3da54 928 }
1ae8c0a5
KP
929
930 /* Timeouts occur when the device isn't connected, so they're
931 * "normal" -- don't fill the kernel log with these */
a5b3da54 932 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 933 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
934 ret = -ETIMEDOUT;
935 goto out;
a4fc5ed6
KP
936 }
937
938 /* Unload any bytes sent back from the other side */
939 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
940 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
941 if (recv_bytes > recv_size)
942 recv_bytes = recv_size;
0206e353 943
4f7f7b7e 944 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
945 intel_dp_unpack_aux(I915_READ(ch_data + i),
946 recv + i, recv_bytes - i);
a4fc5ed6 947
9ee32fea
DV
948 ret = recv_bytes;
949out:
950 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 951 intel_aux_display_runtime_put(dev_priv);
9ee32fea 952
884f19e9
JN
953 if (vdd)
954 edp_panel_vdd_off(intel_dp, false);
955
773538e8 956 pps_unlock(intel_dp);
e39b999a 957
9ee32fea 958 return ret;
a4fc5ed6
KP
959}
960
a6c8aff0
JN
961#define BARE_ADDRESS_SIZE 3
962#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
963static ssize_t
964intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 965{
9d1a1031
JN
966 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
967 uint8_t txbuf[20], rxbuf[20];
968 size_t txsize, rxsize;
a4fc5ed6 969 int ret;
a4fc5ed6 970
d2d9cbbd
VS
971 txbuf[0] = (msg->request << 4) |
972 ((msg->address >> 16) & 0xf);
973 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
974 txbuf[2] = msg->address & 0xff;
975 txbuf[3] = msg->size - 1;
46a5ae9f 976
9d1a1031
JN
977 switch (msg->request & ~DP_AUX_I2C_MOT) {
978 case DP_AUX_NATIVE_WRITE:
979 case DP_AUX_I2C_WRITE:
a6c8aff0 980 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 981 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 982
9d1a1031
JN
983 if (WARN_ON(txsize > 20))
984 return -E2BIG;
a4fc5ed6 985
9d1a1031 986 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 987
9d1a1031
JN
988 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
989 if (ret > 0) {
990 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 991
a1ddefd8
JN
992 if (ret > 1) {
993 /* Number of bytes written in a short write. */
994 ret = clamp_t(int, rxbuf[1], 0, msg->size);
995 } else {
996 /* Return payload size. */
997 ret = msg->size;
998 }
9d1a1031
JN
999 }
1000 break;
46a5ae9f 1001
9d1a1031
JN
1002 case DP_AUX_NATIVE_READ:
1003 case DP_AUX_I2C_READ:
a6c8aff0 1004 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 1005 rxsize = msg->size + 1;
a4fc5ed6 1006
9d1a1031
JN
1007 if (WARN_ON(rxsize > 20))
1008 return -E2BIG;
a4fc5ed6 1009
9d1a1031
JN
1010 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1011 if (ret > 0) {
1012 msg->reply = rxbuf[0] >> 4;
1013 /*
1014 * Assume happy day, and copy the data. The caller is
1015 * expected to check msg->reply before touching it.
1016 *
1017 * Return payload size.
1018 */
1019 ret--;
1020 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1021 }
9d1a1031
JN
1022 break;
1023
1024 default:
1025 ret = -EINVAL;
1026 break;
a4fc5ed6 1027 }
f51a44b9 1028
9d1a1031 1029 return ret;
a4fc5ed6
KP
1030}
1031
9d1a1031
JN
1032static void
1033intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1034{
1035 struct drm_device *dev = intel_dp_to_dev(intel_dp);
500ea70d 1036 struct drm_i915_private *dev_priv = dev->dev_private;
33ad6626
JN
1037 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1038 enum port port = intel_dig_port->port;
500ea70d 1039 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
0b99836f 1040 const char *name = NULL;
500ea70d 1041 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
ab2c0672
DA
1042 int ret;
1043
500ea70d
RV
1044 /* On SKL we don't have Aux for port E so we rely on VBT to set
1045 * a proper alternate aux channel.
1046 */
1047 if (IS_SKYLAKE(dev) && port == PORT_E) {
1048 switch (info->alternate_aux_channel) {
1049 case DP_AUX_B:
1050 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1051 break;
1052 case DP_AUX_C:
1053 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1054 break;
1055 case DP_AUX_D:
1056 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1057 break;
1058 case DP_AUX_A:
1059 default:
1060 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1061 }
1062 }
1063
33ad6626
JN
1064 switch (port) {
1065 case PORT_A:
1066 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1067 name = "DPDDC-A";
ab2c0672 1068 break;
33ad6626
JN
1069 case PORT_B:
1070 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1071 name = "DPDDC-B";
ab2c0672 1072 break;
33ad6626
JN
1073 case PORT_C:
1074 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1075 name = "DPDDC-C";
ab2c0672 1076 break;
33ad6626
JN
1077 case PORT_D:
1078 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1079 name = "DPDDC-D";
33ad6626 1080 break;
500ea70d
RV
1081 case PORT_E:
1082 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1083 name = "DPDDC-E";
1084 break;
33ad6626
JN
1085 default:
1086 BUG();
ab2c0672
DA
1087 }
1088
1b1aad75
DL
1089 /*
1090 * The AUX_CTL register is usually DP_CTL + 0x10.
1091 *
1092 * On Haswell and Broadwell though:
1093 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1094 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1095 *
1096 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1097 */
500ea70d 1098 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
33ad6626 1099 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1100
0b99836f 1101 intel_dp->aux.name = name;
9d1a1031
JN
1102 intel_dp->aux.dev = dev->dev;
1103 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1104
0b99836f
JN
1105 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1106 connector->base.kdev->kobj.name);
8316f337 1107
4f71d0cb 1108 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1109 if (ret < 0) {
4f71d0cb 1110 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1111 name, ret);
1112 return;
ab2c0672 1113 }
8a5e6aeb 1114
0b99836f
JN
1115 ret = sysfs_create_link(&connector->base.kdev->kobj,
1116 &intel_dp->aux.ddc.dev.kobj,
1117 intel_dp->aux.ddc.dev.kobj.name);
1118 if (ret < 0) {
1119 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1120 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1121 }
a4fc5ed6
KP
1122}
1123
80f65de3
ID
1124static void
1125intel_dp_connector_unregister(struct intel_connector *intel_connector)
1126{
1127 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1128
0e32b39c
DA
1129 if (!intel_connector->mst_port)
1130 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1131 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1132 intel_connector_unregister(intel_connector);
1133}
1134
5416d871 1135static void
840b32b7 1136skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
5416d871
DL
1137{
1138 u32 ctrl1;
1139
dd3cd74a
ACO
1140 memset(&pipe_config->dpll_hw_state, 0,
1141 sizeof(pipe_config->dpll_hw_state));
1142
5416d871
DL
1143 pipe_config->ddi_pll_sel = SKL_DPLL0;
1144 pipe_config->dpll_hw_state.cfgcr1 = 0;
1145 pipe_config->dpll_hw_state.cfgcr2 = 0;
1146
1147 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
840b32b7 1148 switch (pipe_config->port_clock / 2) {
c3346ef6 1149 case 81000:
71cd8423 1150 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1151 SKL_DPLL0);
1152 break;
c3346ef6 1153 case 135000:
71cd8423 1154 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1155 SKL_DPLL0);
1156 break;
c3346ef6 1157 case 270000:
71cd8423 1158 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1159 SKL_DPLL0);
1160 break;
c3346ef6 1161 case 162000:
71cd8423 1162 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1163 SKL_DPLL0);
1164 break;
1165 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1166 results in CDCLK change. Need to handle the change of CDCLK by
1167 disabling pipes and re-enabling them */
1168 case 108000:
71cd8423 1169 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1170 SKL_DPLL0);
1171 break;
1172 case 216000:
71cd8423 1173 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1174 SKL_DPLL0);
1175 break;
1176
5416d871
DL
1177 }
1178 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1179}
1180
0e50338c 1181static void
840b32b7 1182hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
0e50338c 1183{
ee46f3c7
ACO
1184 memset(&pipe_config->dpll_hw_state, 0,
1185 sizeof(pipe_config->dpll_hw_state));
1186
840b32b7
VS
1187 switch (pipe_config->port_clock / 2) {
1188 case 81000:
0e50338c
DV
1189 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1190 break;
840b32b7 1191 case 135000:
0e50338c
DV
1192 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1193 break;
840b32b7 1194 case 270000:
0e50338c
DV
1195 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1196 break;
1197 }
1198}
1199
fc0f8e25 1200static int
12f6a2e2 1201intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1202{
94ca719e
VS
1203 if (intel_dp->num_sink_rates) {
1204 *sink_rates = intel_dp->sink_rates;
1205 return intel_dp->num_sink_rates;
fc0f8e25 1206 }
12f6a2e2
VS
1207
1208 *sink_rates = default_rates;
1209
1210 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1211}
1212
a8f3ef61 1213static int
1db10e28 1214intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1215{
64987fc5
SJ
1216 if (IS_BROXTON(dev)) {
1217 *source_rates = bxt_rates;
1218 return ARRAY_SIZE(bxt_rates);
1219 } else if (IS_SKYLAKE(dev)) {
637a9c63
SJ
1220 *source_rates = skl_rates;
1221 return ARRAY_SIZE(skl_rates);
fe51bfb9
VS
1222 } else if (IS_CHERRYVIEW(dev)) {
1223 *source_rates = chv_rates;
1224 return ARRAY_SIZE(chv_rates);
a8f3ef61 1225 }
636280ba
VS
1226
1227 *source_rates = default_rates;
1228
1db10e28
VS
1229 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1230 /* WaDisableHBR2:skl */
1231 return (DP_LINK_BW_2_7 >> 3) + 1;
1232 else if (INTEL_INFO(dev)->gen >= 8 ||
1233 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1234 return (DP_LINK_BW_5_4 >> 3) + 1;
1235 else
1236 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1237}
1238
c6bb3538
DV
1239static void
1240intel_dp_set_clock(struct intel_encoder *encoder,
840b32b7 1241 struct intel_crtc_state *pipe_config)
c6bb3538
DV
1242{
1243 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1244 const struct dp_link_dpll *divisor = NULL;
1245 int i, count = 0;
c6bb3538
DV
1246
1247 if (IS_G4X(dev)) {
9dd4ffdf
CML
1248 divisor = gen4_dpll;
1249 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1250 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1251 divisor = pch_dpll;
1252 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1253 } else if (IS_CHERRYVIEW(dev)) {
1254 divisor = chv_dpll;
1255 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1256 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1257 divisor = vlv_dpll;
1258 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1259 }
9dd4ffdf
CML
1260
1261 if (divisor && count) {
1262 for (i = 0; i < count; i++) {
840b32b7 1263 if (pipe_config->port_clock == divisor[i].clock) {
9dd4ffdf
CML
1264 pipe_config->dpll = divisor[i].dpll;
1265 pipe_config->clock_set = true;
1266 break;
1267 }
1268 }
c6bb3538
DV
1269 }
1270}
1271
2ecae76a
VS
1272static int intersect_rates(const int *source_rates, int source_len,
1273 const int *sink_rates, int sink_len,
94ca719e 1274 int *common_rates)
a8f3ef61
SJ
1275{
1276 int i = 0, j = 0, k = 0;
1277
a8f3ef61
SJ
1278 while (i < source_len && j < sink_len) {
1279 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1280 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1281 return k;
94ca719e 1282 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1283 ++k;
1284 ++i;
1285 ++j;
1286 } else if (source_rates[i] < sink_rates[j]) {
1287 ++i;
1288 } else {
1289 ++j;
1290 }
1291 }
1292 return k;
1293}
1294
94ca719e
VS
1295static int intel_dp_common_rates(struct intel_dp *intel_dp,
1296 int *common_rates)
2ecae76a
VS
1297{
1298 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1299 const int *source_rates, *sink_rates;
1300 int source_len, sink_len;
1301
1302 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1303 source_len = intel_dp_source_rates(dev, &source_rates);
1304
1305 return intersect_rates(source_rates, source_len,
1306 sink_rates, sink_len,
94ca719e 1307 common_rates);
2ecae76a
VS
1308}
1309
0336400e
VS
1310static void snprintf_int_array(char *str, size_t len,
1311 const int *array, int nelem)
1312{
1313 int i;
1314
1315 str[0] = '\0';
1316
1317 for (i = 0; i < nelem; i++) {
b2f505be 1318 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1319 if (r >= len)
1320 return;
1321 str += r;
1322 len -= r;
1323 }
1324}
1325
1326static void intel_dp_print_rates(struct intel_dp *intel_dp)
1327{
1328 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1329 const int *source_rates, *sink_rates;
94ca719e
VS
1330 int source_len, sink_len, common_len;
1331 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1332 char str[128]; /* FIXME: too big for stack? */
1333
1334 if ((drm_debug & DRM_UT_KMS) == 0)
1335 return;
1336
1337 source_len = intel_dp_source_rates(dev, &source_rates);
1338 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1339 DRM_DEBUG_KMS("source rates: %s\n", str);
1340
1341 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1342 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1343 DRM_DEBUG_KMS("sink rates: %s\n", str);
1344
94ca719e
VS
1345 common_len = intel_dp_common_rates(intel_dp, common_rates);
1346 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1347 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1348}
1349
f4896f15 1350static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1351{
1352 int i = 0;
1353
1354 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1355 if (find == rates[i])
1356 break;
1357
1358 return i;
1359}
1360
50fec21a
VS
1361int
1362intel_dp_max_link_rate(struct intel_dp *intel_dp)
1363{
1364 int rates[DP_MAX_SUPPORTED_RATES] = {};
1365 int len;
1366
94ca719e 1367 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1368 if (WARN_ON(len <= 0))
1369 return 162000;
1370
1371 return rates[rate_to_index(0, rates) - 1];
1372}
1373
ed4e9c1d
VS
1374int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1375{
94ca719e 1376 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1377}
1378
04a60f9f
VS
1379static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1380 uint8_t *link_bw, uint8_t *rate_select)
1381{
1382 if (intel_dp->num_sink_rates) {
1383 *link_bw = 0;
1384 *rate_select =
1385 intel_dp_rate_select(intel_dp, port_clock);
1386 } else {
1387 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1388 *rate_select = 0;
1389 }
1390}
1391
00c09d70 1392bool
5bfe2ac0 1393intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1394 struct intel_crtc_state *pipe_config)
a4fc5ed6 1395{
5bfe2ac0 1396 struct drm_device *dev = encoder->base.dev;
36008365 1397 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1398 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1399 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1400 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1401 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1402 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1403 int lane_count, clock;
56071a20 1404 int min_lane_count = 1;
eeb6324d 1405 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1406 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1407 int min_clock = 0;
a8f3ef61 1408 int max_clock;
083f9560 1409 int bpp, mode_rate;
ff9a6750 1410 int link_avail, link_clock;
94ca719e
VS
1411 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1412 int common_len;
04a60f9f 1413 uint8_t link_bw, rate_select;
a8f3ef61 1414
94ca719e 1415 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1416
1417 /* No common link rates between source and sink */
94ca719e 1418 WARN_ON(common_len <= 0);
a8f3ef61 1419
94ca719e 1420 max_clock = common_len - 1;
a4fc5ed6 1421
bc7d38a4 1422 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1423 pipe_config->has_pch_encoder = true;
1424
03afc4a2 1425 pipe_config->has_dp_encoder = true;
f769cd24 1426 pipe_config->has_drrs = false;
9fcb1704 1427 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1428
dd06f90e
JN
1429 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1430 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1431 adjusted_mode);
a1b2278e
CK
1432
1433 if (INTEL_INFO(dev)->gen >= 9) {
1434 int ret;
e435d6e5 1435 ret = skl_update_scaler_crtc(pipe_config);
a1b2278e
CK
1436 if (ret)
1437 return ret;
1438 }
1439
2dd24552
JB
1440 if (!HAS_PCH_SPLIT(dev))
1441 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1442 intel_connector->panel.fitting_mode);
1443 else
b074cec8
JB
1444 intel_pch_panel_fitting(intel_crtc, pipe_config,
1445 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1446 }
1447
cb1793ce 1448 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1449 return false;
1450
083f9560 1451 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1452 "max bw %d pixel clock %iKHz\n",
94ca719e 1453 max_lane_count, common_rates[max_clock],
241bfc38 1454 adjusted_mode->crtc_clock);
083f9560 1455
36008365
DV
1456 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1457 * bpc in between. */
3e7ca985 1458 bpp = pipe_config->pipe_bpp;
56071a20 1459 if (is_edp(intel_dp)) {
22ce5628
TS
1460
1461 /* Get bpp from vbt only for panels that dont have bpp in edid */
1462 if (intel_connector->base.display_info.bpc == 0 &&
1463 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
56071a20
JN
1464 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1465 dev_priv->vbt.edp_bpp);
1466 bpp = dev_priv->vbt.edp_bpp;
1467 }
1468
344c5bbc
JN
1469 /*
1470 * Use the maximum clock and number of lanes the eDP panel
1471 * advertizes being capable of. The panels are generally
1472 * designed to support only a single clock and lane
1473 * configuration, and typically these values correspond to the
1474 * native resolution of the panel.
1475 */
1476 min_lane_count = max_lane_count;
1477 min_clock = max_clock;
7984211e 1478 }
657445fe 1479
36008365 1480 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1481 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1482 bpp);
36008365 1483
c6930992 1484 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1485 for (lane_count = min_lane_count;
1486 lane_count <= max_lane_count;
1487 lane_count <<= 1) {
1488
94ca719e 1489 link_clock = common_rates[clock];
36008365
DV
1490 link_avail = intel_dp_max_data_rate(link_clock,
1491 lane_count);
1492
1493 if (mode_rate <= link_avail) {
1494 goto found;
1495 }
1496 }
1497 }
1498 }
c4867936 1499
36008365 1500 return false;
3685a8f3 1501
36008365 1502found:
55bc60db
VS
1503 if (intel_dp->color_range_auto) {
1504 /*
1505 * See:
1506 * CEA-861-E - 5.1 Default Encoding Parameters
1507 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1508 */
0f2a2a75
VS
1509 pipe_config->limited_color_range =
1510 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1511 } else {
1512 pipe_config->limited_color_range =
1513 intel_dp->limited_color_range;
55bc60db
VS
1514 }
1515
90a6b7b0 1516 pipe_config->lane_count = lane_count;
a8f3ef61 1517
657445fe 1518 pipe_config->pipe_bpp = bpp;
94ca719e 1519 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1520
04a60f9f
VS
1521 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1522 &link_bw, &rate_select);
1523
1524 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1525 link_bw, rate_select, pipe_config->lane_count,
ff9a6750 1526 pipe_config->port_clock, bpp);
36008365
DV
1527 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1528 mode_rate, link_avail);
a4fc5ed6 1529
03afc4a2 1530 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1531 adjusted_mode->crtc_clock,
1532 pipe_config->port_clock,
03afc4a2 1533 &pipe_config->dp_m_n);
9d1a455b 1534
439d7ac0 1535 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1536 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1537 pipe_config->has_drrs = true;
439d7ac0
PB
1538 intel_link_compute_m_n(bpp, lane_count,
1539 intel_connector->panel.downclock_mode->clock,
1540 pipe_config->port_clock,
1541 &pipe_config->dp_m2_n2);
1542 }
1543
5416d871 1544 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
840b32b7 1545 skl_edp_set_pll_config(pipe_config);
977bb38d
S
1546 else if (IS_BROXTON(dev))
1547 /* handled in ddi */;
5416d871 1548 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
840b32b7 1549 hsw_dp_set_ddi_pll_sel(pipe_config);
0e50338c 1550 else
840b32b7 1551 intel_dp_set_clock(encoder, pipe_config);
c6bb3538 1552
03afc4a2 1553 return true;
a4fc5ed6
KP
1554}
1555
7c62a164 1556static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1557{
7c62a164
DV
1558 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1559 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1560 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1561 struct drm_i915_private *dev_priv = dev->dev_private;
1562 u32 dpa_ctl;
1563
6e3c9717
ACO
1564 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1565 crtc->config->port_clock);
ea9b6006
DV
1566 dpa_ctl = I915_READ(DP_A);
1567 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1568
6e3c9717 1569 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1570 /* For a long time we've carried around a ILK-DevA w/a for the
1571 * 160MHz clock. If we're really unlucky, it's still required.
1572 */
1573 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1574 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1575 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1576 } else {
1577 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1578 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1579 }
1ce17038 1580
ea9b6006
DV
1581 I915_WRITE(DP_A, dpa_ctl);
1582
1583 POSTING_READ(DP_A);
1584 udelay(500);
1585}
1586
901c2daf
VS
1587void intel_dp_set_link_params(struct intel_dp *intel_dp,
1588 const struct intel_crtc_state *pipe_config)
1589{
1590 intel_dp->link_rate = pipe_config->port_clock;
1591 intel_dp->lane_count = pipe_config->lane_count;
1592}
1593
8ac33ed3 1594static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1595{
b934223d 1596 struct drm_device *dev = encoder->base.dev;
417e822d 1597 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1598 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1599 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1600 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1601 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1602
901c2daf
VS
1603 intel_dp_set_link_params(intel_dp, crtc->config);
1604
417e822d 1605 /*
1a2eb460 1606 * There are four kinds of DP registers:
417e822d
KP
1607 *
1608 * IBX PCH
1a2eb460
KP
1609 * SNB CPU
1610 * IVB CPU
417e822d
KP
1611 * CPT PCH
1612 *
1613 * IBX PCH and CPU are the same for almost everything,
1614 * except that the CPU DP PLL is configured in this
1615 * register
1616 *
1617 * CPT PCH is quite different, having many bits moved
1618 * to the TRANS_DP_CTL register instead. That
1619 * configuration happens (oddly) in ironlake_pch_enable
1620 */
9c9e7927 1621
417e822d
KP
1622 /* Preserve the BIOS-computed detected bit. This is
1623 * supposed to be read-only.
1624 */
1625 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1626
417e822d 1627 /* Handle DP bits in common between all three register formats */
417e822d 1628 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
90a6b7b0 1629 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
a4fc5ed6 1630
6e3c9717 1631 if (crtc->config->has_audio)
ea5b213a 1632 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1633
417e822d 1634 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1635
39e5fa88 1636 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1637 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1638 intel_dp->DP |= DP_SYNC_HS_HIGH;
1639 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1640 intel_dp->DP |= DP_SYNC_VS_HIGH;
1641 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1642
6aba5b6c 1643 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1644 intel_dp->DP |= DP_ENHANCED_FRAMING;
1645
7c62a164 1646 intel_dp->DP |= crtc->pipe << 29;
39e5fa88 1647 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
e3ef4479
VS
1648 u32 trans_dp;
1649
39e5fa88 1650 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
1651
1652 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1653 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1654 trans_dp |= TRANS_DP_ENH_FRAMING;
1655 else
1656 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1657 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 1658 } else {
0f2a2a75
VS
1659 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1660 crtc->config->limited_color_range)
1661 intel_dp->DP |= DP_COLOR_RANGE_16_235;
417e822d
KP
1662
1663 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1664 intel_dp->DP |= DP_SYNC_HS_HIGH;
1665 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1666 intel_dp->DP |= DP_SYNC_VS_HIGH;
1667 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1668
6aba5b6c 1669 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1670 intel_dp->DP |= DP_ENHANCED_FRAMING;
1671
39e5fa88 1672 if (IS_CHERRYVIEW(dev))
44f37d1f 1673 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1674 else if (crtc->pipe == PIPE_B)
1675 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1676 }
a4fc5ed6
KP
1677}
1678
ffd6749d
PZ
1679#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1680#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1681
1a5ef5b7
PZ
1682#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1683#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1684
ffd6749d
PZ
1685#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1686#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1687
4be73780 1688static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1689 u32 mask,
1690 u32 value)
bd943159 1691{
30add22d 1692 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1693 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1694 u32 pp_stat_reg, pp_ctrl_reg;
1695
e39b999a
VS
1696 lockdep_assert_held(&dev_priv->pps_mutex);
1697
bf13e81b
JN
1698 pp_stat_reg = _pp_stat_reg(intel_dp);
1699 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1700
99ea7127 1701 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1702 mask, value,
1703 I915_READ(pp_stat_reg),
1704 I915_READ(pp_ctrl_reg));
32ce697c 1705
453c5420 1706 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1707 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1708 I915_READ(pp_stat_reg),
1709 I915_READ(pp_ctrl_reg));
32ce697c 1710 }
54c136d4
CW
1711
1712 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1713}
32ce697c 1714
4be73780 1715static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1716{
1717 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1718 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1719}
1720
4be73780 1721static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1722{
1723 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1724 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1725}
1726
4be73780 1727static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1728{
1729 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1730
1731 /* When we disable the VDD override bit last we have to do the manual
1732 * wait. */
1733 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1734 intel_dp->panel_power_cycle_delay);
1735
4be73780 1736 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1737}
1738
4be73780 1739static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1740{
1741 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1742 intel_dp->backlight_on_delay);
1743}
1744
4be73780 1745static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1746{
1747 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1748 intel_dp->backlight_off_delay);
1749}
99ea7127 1750
832dd3c1
KP
1751/* Read the current pp_control value, unlocking the register if it
1752 * is locked
1753 */
1754
453c5420 1755static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1756{
453c5420
JB
1757 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1758 struct drm_i915_private *dev_priv = dev->dev_private;
1759 u32 control;
832dd3c1 1760
e39b999a
VS
1761 lockdep_assert_held(&dev_priv->pps_mutex);
1762
bf13e81b 1763 control = I915_READ(_pp_ctrl_reg(intel_dp));
b0a08bec
VK
1764 if (!IS_BROXTON(dev)) {
1765 control &= ~PANEL_UNLOCK_MASK;
1766 control |= PANEL_UNLOCK_REGS;
1767 }
832dd3c1 1768 return control;
bd943159
KP
1769}
1770
951468f3
VS
1771/*
1772 * Must be paired with edp_panel_vdd_off().
1773 * Must hold pps_mutex around the whole on/off sequence.
1774 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1775 */
1e0560e0 1776static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1777{
30add22d 1778 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1779 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1780 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1781 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1782 enum intel_display_power_domain power_domain;
5d613501 1783 u32 pp;
453c5420 1784 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1785 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1786
e39b999a
VS
1787 lockdep_assert_held(&dev_priv->pps_mutex);
1788
97af61f5 1789 if (!is_edp(intel_dp))
adddaaf4 1790 return false;
bd943159 1791
2c623c11 1792 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1793 intel_dp->want_panel_vdd = true;
99ea7127 1794
4be73780 1795 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1796 return need_to_disable;
b0665d57 1797
4e6e1a54
ID
1798 power_domain = intel_display_port_power_domain(intel_encoder);
1799 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1800
3936fcf4
VS
1801 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1802 port_name(intel_dig_port->port));
bd943159 1803
4be73780
DV
1804 if (!edp_have_panel_power(intel_dp))
1805 wait_panel_power_cycle(intel_dp);
99ea7127 1806
453c5420 1807 pp = ironlake_get_pp_control(intel_dp);
5d613501 1808 pp |= EDP_FORCE_VDD;
ebf33b18 1809
bf13e81b
JN
1810 pp_stat_reg = _pp_stat_reg(intel_dp);
1811 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1812
1813 I915_WRITE(pp_ctrl_reg, pp);
1814 POSTING_READ(pp_ctrl_reg);
1815 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1816 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1817 /*
1818 * If the panel wasn't on, delay before accessing aux channel
1819 */
4be73780 1820 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1821 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1822 port_name(intel_dig_port->port));
f01eca2e 1823 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1824 }
adddaaf4
JN
1825
1826 return need_to_disable;
1827}
1828
951468f3
VS
1829/*
1830 * Must be paired with intel_edp_panel_vdd_off() or
1831 * intel_edp_panel_off().
1832 * Nested calls to these functions are not allowed since
1833 * we drop the lock. Caller must use some higher level
1834 * locking to prevent nested calls from other threads.
1835 */
b80d6c78 1836void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1837{
c695b6b6 1838 bool vdd;
adddaaf4 1839
c695b6b6
VS
1840 if (!is_edp(intel_dp))
1841 return;
1842
773538e8 1843 pps_lock(intel_dp);
c695b6b6 1844 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1845 pps_unlock(intel_dp);
c695b6b6 1846
e2c719b7 1847 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1848 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1849}
1850
4be73780 1851static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1852{
30add22d 1853 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1854 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1855 struct intel_digital_port *intel_dig_port =
1856 dp_to_dig_port(intel_dp);
1857 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1858 enum intel_display_power_domain power_domain;
5d613501 1859 u32 pp;
453c5420 1860 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1861
e39b999a 1862 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1863
15e899a0 1864 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1865
15e899a0 1866 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1867 return;
b0665d57 1868
3936fcf4
VS
1869 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1870 port_name(intel_dig_port->port));
bd943159 1871
be2c9196
VS
1872 pp = ironlake_get_pp_control(intel_dp);
1873 pp &= ~EDP_FORCE_VDD;
453c5420 1874
be2c9196
VS
1875 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1876 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1877
be2c9196
VS
1878 I915_WRITE(pp_ctrl_reg, pp);
1879 POSTING_READ(pp_ctrl_reg);
90791a5c 1880
be2c9196
VS
1881 /* Make sure sequencer is idle before allowing subsequent activity */
1882 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1883 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1884
be2c9196
VS
1885 if ((pp & POWER_TARGET_ON) == 0)
1886 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1887
be2c9196
VS
1888 power_domain = intel_display_port_power_domain(intel_encoder);
1889 intel_display_power_put(dev_priv, power_domain);
bd943159 1890}
5d613501 1891
4be73780 1892static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1893{
1894 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1895 struct intel_dp, panel_vdd_work);
bd943159 1896
773538e8 1897 pps_lock(intel_dp);
15e899a0
VS
1898 if (!intel_dp->want_panel_vdd)
1899 edp_panel_vdd_off_sync(intel_dp);
773538e8 1900 pps_unlock(intel_dp);
bd943159
KP
1901}
1902
aba86890
ID
1903static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1904{
1905 unsigned long delay;
1906
1907 /*
1908 * Queue the timer to fire a long time from now (relative to the power
1909 * down delay) to keep the panel power up across a sequence of
1910 * operations.
1911 */
1912 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1913 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1914}
1915
951468f3
VS
1916/*
1917 * Must be paired with edp_panel_vdd_on().
1918 * Must hold pps_mutex around the whole on/off sequence.
1919 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1920 */
4be73780 1921static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1922{
e39b999a
VS
1923 struct drm_i915_private *dev_priv =
1924 intel_dp_to_dev(intel_dp)->dev_private;
1925
1926 lockdep_assert_held(&dev_priv->pps_mutex);
1927
97af61f5
KP
1928 if (!is_edp(intel_dp))
1929 return;
5d613501 1930
e2c719b7 1931 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1932 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1933
bd943159
KP
1934 intel_dp->want_panel_vdd = false;
1935
aba86890 1936 if (sync)
4be73780 1937 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1938 else
1939 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1940}
1941
9f0fb5be 1942static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1943{
30add22d 1944 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1945 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1946 u32 pp;
453c5420 1947 u32 pp_ctrl_reg;
9934c132 1948
9f0fb5be
VS
1949 lockdep_assert_held(&dev_priv->pps_mutex);
1950
97af61f5 1951 if (!is_edp(intel_dp))
bd943159 1952 return;
99ea7127 1953
3936fcf4
VS
1954 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1955 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1956
e7a89ace
VS
1957 if (WARN(edp_have_panel_power(intel_dp),
1958 "eDP port %c panel power already on\n",
1959 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1960 return;
9934c132 1961
4be73780 1962 wait_panel_power_cycle(intel_dp);
37c6c9b0 1963
bf13e81b 1964 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1965 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1966 if (IS_GEN5(dev)) {
1967 /* ILK workaround: disable reset around power sequence */
1968 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1969 I915_WRITE(pp_ctrl_reg, pp);
1970 POSTING_READ(pp_ctrl_reg);
05ce1a49 1971 }
37c6c9b0 1972
1c0ae80a 1973 pp |= POWER_TARGET_ON;
99ea7127
KP
1974 if (!IS_GEN5(dev))
1975 pp |= PANEL_POWER_RESET;
1976
453c5420
JB
1977 I915_WRITE(pp_ctrl_reg, pp);
1978 POSTING_READ(pp_ctrl_reg);
9934c132 1979
4be73780 1980 wait_panel_on(intel_dp);
dce56b3c 1981 intel_dp->last_power_on = jiffies;
9934c132 1982
05ce1a49
KP
1983 if (IS_GEN5(dev)) {
1984 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1985 I915_WRITE(pp_ctrl_reg, pp);
1986 POSTING_READ(pp_ctrl_reg);
05ce1a49 1987 }
9f0fb5be 1988}
e39b999a 1989
9f0fb5be
VS
1990void intel_edp_panel_on(struct intel_dp *intel_dp)
1991{
1992 if (!is_edp(intel_dp))
1993 return;
1994
1995 pps_lock(intel_dp);
1996 edp_panel_on(intel_dp);
773538e8 1997 pps_unlock(intel_dp);
9934c132
JB
1998}
1999
9f0fb5be
VS
2000
2001static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 2002{
4e6e1a54
ID
2003 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2004 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 2005 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 2006 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 2007 enum intel_display_power_domain power_domain;
99ea7127 2008 u32 pp;
453c5420 2009 u32 pp_ctrl_reg;
9934c132 2010
9f0fb5be
VS
2011 lockdep_assert_held(&dev_priv->pps_mutex);
2012
97af61f5
KP
2013 if (!is_edp(intel_dp))
2014 return;
37c6c9b0 2015
3936fcf4
VS
2016 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2017 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 2018
3936fcf4
VS
2019 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2020 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 2021
453c5420 2022 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
2023 /* We need to switch off panel power _and_ force vdd, for otherwise some
2024 * panels get very unhappy and cease to work. */
b3064154
PJ
2025 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2026 EDP_BLC_ENABLE);
453c5420 2027
bf13e81b 2028 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2029
849e39f5
PZ
2030 intel_dp->want_panel_vdd = false;
2031
453c5420
JB
2032 I915_WRITE(pp_ctrl_reg, pp);
2033 POSTING_READ(pp_ctrl_reg);
9934c132 2034
dce56b3c 2035 intel_dp->last_power_cycle = jiffies;
4be73780 2036 wait_panel_off(intel_dp);
849e39f5
PZ
2037
2038 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
2039 power_domain = intel_display_port_power_domain(intel_encoder);
2040 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 2041}
e39b999a 2042
9f0fb5be
VS
2043void intel_edp_panel_off(struct intel_dp *intel_dp)
2044{
2045 if (!is_edp(intel_dp))
2046 return;
e39b999a 2047
9f0fb5be
VS
2048 pps_lock(intel_dp);
2049 edp_panel_off(intel_dp);
773538e8 2050 pps_unlock(intel_dp);
9934c132
JB
2051}
2052
1250d107
JN
2053/* Enable backlight in the panel power control. */
2054static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 2055{
da63a9f2
PZ
2056 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2057 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
2058 struct drm_i915_private *dev_priv = dev->dev_private;
2059 u32 pp;
453c5420 2060 u32 pp_ctrl_reg;
32f9d658 2061
01cb9ea6
JB
2062 /*
2063 * If we enable the backlight right away following a panel power
2064 * on, we may see slight flicker as the panel syncs with the eDP
2065 * link. So delay a bit to make sure the image is solid before
2066 * allowing it to appear.
2067 */
4be73780 2068 wait_backlight_on(intel_dp);
e39b999a 2069
773538e8 2070 pps_lock(intel_dp);
e39b999a 2071
453c5420 2072 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2073 pp |= EDP_BLC_ENABLE;
453c5420 2074
bf13e81b 2075 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2076
2077 I915_WRITE(pp_ctrl_reg, pp);
2078 POSTING_READ(pp_ctrl_reg);
e39b999a 2079
773538e8 2080 pps_unlock(intel_dp);
32f9d658
ZW
2081}
2082
1250d107
JN
2083/* Enable backlight PWM and backlight PP control. */
2084void intel_edp_backlight_on(struct intel_dp *intel_dp)
2085{
2086 if (!is_edp(intel_dp))
2087 return;
2088
2089 DRM_DEBUG_KMS("\n");
2090
2091 intel_panel_enable_backlight(intel_dp->attached_connector);
2092 _intel_edp_backlight_on(intel_dp);
2093}
2094
2095/* Disable backlight in the panel power control. */
2096static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2097{
30add22d 2098 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2099 struct drm_i915_private *dev_priv = dev->dev_private;
2100 u32 pp;
453c5420 2101 u32 pp_ctrl_reg;
32f9d658 2102
f01eca2e
KP
2103 if (!is_edp(intel_dp))
2104 return;
2105
773538e8 2106 pps_lock(intel_dp);
e39b999a 2107
453c5420 2108 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2109 pp &= ~EDP_BLC_ENABLE;
453c5420 2110
bf13e81b 2111 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2112
2113 I915_WRITE(pp_ctrl_reg, pp);
2114 POSTING_READ(pp_ctrl_reg);
f7d2323c 2115
773538e8 2116 pps_unlock(intel_dp);
e39b999a
VS
2117
2118 intel_dp->last_backlight_off = jiffies;
f7d2323c 2119 edp_wait_backlight_off(intel_dp);
1250d107 2120}
f7d2323c 2121
1250d107
JN
2122/* Disable backlight PP control and backlight PWM. */
2123void intel_edp_backlight_off(struct intel_dp *intel_dp)
2124{
2125 if (!is_edp(intel_dp))
2126 return;
2127
2128 DRM_DEBUG_KMS("\n");
f7d2323c 2129
1250d107 2130 _intel_edp_backlight_off(intel_dp);
f7d2323c 2131 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2132}
a4fc5ed6 2133
73580fb7
JN
2134/*
2135 * Hook for controlling the panel power control backlight through the bl_power
2136 * sysfs attribute. Take care to handle multiple calls.
2137 */
2138static void intel_edp_backlight_power(struct intel_connector *connector,
2139 bool enable)
2140{
2141 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2142 bool is_enabled;
2143
773538e8 2144 pps_lock(intel_dp);
e39b999a 2145 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2146 pps_unlock(intel_dp);
73580fb7
JN
2147
2148 if (is_enabled == enable)
2149 return;
2150
23ba9373
JN
2151 DRM_DEBUG_KMS("panel power control backlight %s\n",
2152 enable ? "enable" : "disable");
73580fb7
JN
2153
2154 if (enable)
2155 _intel_edp_backlight_on(intel_dp);
2156 else
2157 _intel_edp_backlight_off(intel_dp);
2158}
2159
2bd2ad64 2160static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2161{
da63a9f2
PZ
2162 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2163 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2164 struct drm_device *dev = crtc->dev;
d240f20f
JB
2165 struct drm_i915_private *dev_priv = dev->dev_private;
2166 u32 dpa_ctl;
2167
2bd2ad64
DV
2168 assert_pipe_disabled(dev_priv,
2169 to_intel_crtc(crtc)->pipe);
2170
d240f20f
JB
2171 DRM_DEBUG_KMS("\n");
2172 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2173 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2174 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2175
2176 /* We don't adjust intel_dp->DP while tearing down the link, to
2177 * facilitate link retraining (e.g. after hotplug). Hence clear all
2178 * enable bits here to ensure that we don't enable too much. */
2179 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2180 intel_dp->DP |= DP_PLL_ENABLE;
2181 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2182 POSTING_READ(DP_A);
2183 udelay(200);
d240f20f
JB
2184}
2185
2bd2ad64 2186static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2187{
da63a9f2
PZ
2188 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2189 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2190 struct drm_device *dev = crtc->dev;
d240f20f
JB
2191 struct drm_i915_private *dev_priv = dev->dev_private;
2192 u32 dpa_ctl;
2193
2bd2ad64
DV
2194 assert_pipe_disabled(dev_priv,
2195 to_intel_crtc(crtc)->pipe);
2196
d240f20f 2197 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2198 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2199 "dp pll off, should be on\n");
2200 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2201
2202 /* We can't rely on the value tracked for the DP register in
2203 * intel_dp->DP because link_down must not change that (otherwise link
2204 * re-training will fail. */
298b0b39 2205 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2206 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2207 POSTING_READ(DP_A);
d240f20f
JB
2208 udelay(200);
2209}
2210
c7ad3810 2211/* If the sink supports it, try to set the power state appropriately */
c19b0669 2212void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2213{
2214 int ret, i;
2215
2216 /* Should have a valid DPCD by this point */
2217 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2218 return;
2219
2220 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2221 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2222 DP_SET_POWER_D3);
c7ad3810
JB
2223 } else {
2224 /*
2225 * When turning on, we need to retry for 1ms to give the sink
2226 * time to wake up.
2227 */
2228 for (i = 0; i < 3; i++) {
9d1a1031
JN
2229 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2230 DP_SET_POWER_D0);
c7ad3810
JB
2231 if (ret == 1)
2232 break;
2233 msleep(1);
2234 }
2235 }
f9cac721
JN
2236
2237 if (ret != 1)
2238 DRM_DEBUG_KMS("failed to %s sink power state\n",
2239 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2240}
2241
19d8fe15
DV
2242static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2243 enum pipe *pipe)
d240f20f 2244{
19d8fe15 2245 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2246 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2247 struct drm_device *dev = encoder->base.dev;
2248 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2249 enum intel_display_power_domain power_domain;
2250 u32 tmp;
2251
2252 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2253 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2254 return false;
2255
2256 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2257
2258 if (!(tmp & DP_PORT_EN))
2259 return false;
2260
39e5fa88 2261 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2262 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2263 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2264 enum pipe p;
19d8fe15 2265
adc289d7
VS
2266 for_each_pipe(dev_priv, p) {
2267 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2268 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2269 *pipe = p;
19d8fe15
DV
2270 return true;
2271 }
2272 }
19d8fe15 2273
4a0833ec
DV
2274 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2275 intel_dp->output_reg);
39e5fa88
VS
2276 } else if (IS_CHERRYVIEW(dev)) {
2277 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2278 } else {
2279 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2280 }
d240f20f 2281
19d8fe15
DV
2282 return true;
2283}
d240f20f 2284
045ac3b5 2285static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2286 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2287{
2288 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2289 u32 tmp, flags = 0;
63000ef6
XZ
2290 struct drm_device *dev = encoder->base.dev;
2291 struct drm_i915_private *dev_priv = dev->dev_private;
2292 enum port port = dp_to_dig_port(intel_dp)->port;
2293 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2294 int dotclock;
045ac3b5 2295
9ed109a7 2296 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2297
2298 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2299
39e5fa88 2300 if (HAS_PCH_CPT(dev) && port != PORT_A) {
b81e34c2
VS
2301 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2302
2303 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2304 flags |= DRM_MODE_FLAG_PHSYNC;
2305 else
2306 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2307
b81e34c2 2308 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2309 flags |= DRM_MODE_FLAG_PVSYNC;
2310 else
2311 flags |= DRM_MODE_FLAG_NVSYNC;
2312 } else {
39e5fa88 2313 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2314 flags |= DRM_MODE_FLAG_PHSYNC;
2315 else
2316 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2317
39e5fa88 2318 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2319 flags |= DRM_MODE_FLAG_PVSYNC;
2320 else
2321 flags |= DRM_MODE_FLAG_NVSYNC;
2322 }
045ac3b5 2323
2d112de7 2324 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2325
8c875fca
VS
2326 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2327 tmp & DP_COLOR_RANGE_16_235)
2328 pipe_config->limited_color_range = true;
2329
eb14cb74
VS
2330 pipe_config->has_dp_encoder = true;
2331
90a6b7b0
VS
2332 pipe_config->lane_count =
2333 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2334
eb14cb74
VS
2335 intel_dp_get_m_n(crtc, pipe_config);
2336
18442d08 2337 if (port == PORT_A) {
f1f644dc
JB
2338 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2339 pipe_config->port_clock = 162000;
2340 else
2341 pipe_config->port_clock = 270000;
2342 }
18442d08
VS
2343
2344 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2345 &pipe_config->dp_m_n);
2346
2347 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2348 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2349
2d112de7 2350 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2351
c6cd2ee2
JN
2352 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2353 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2354 /*
2355 * This is a big fat ugly hack.
2356 *
2357 * Some machines in UEFI boot mode provide us a VBT that has 18
2358 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2359 * unknown we fail to light up. Yet the same BIOS boots up with
2360 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2361 * max, not what it tells us to use.
2362 *
2363 * Note: This will still be broken if the eDP panel is not lit
2364 * up by the BIOS, and thus we can't get the mode at module
2365 * load.
2366 */
2367 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2368 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2369 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2370 }
045ac3b5
JB
2371}
2372
e8cb4558 2373static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2374{
e8cb4558 2375 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2376 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2377 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2378
6e3c9717 2379 if (crtc->config->has_audio)
495a5bb8 2380 intel_audio_codec_disable(encoder);
6cb49835 2381
b32c6f48
RV
2382 if (HAS_PSR(dev) && !HAS_DDI(dev))
2383 intel_psr_disable(intel_dp);
2384
6cb49835
DV
2385 /* Make sure the panel is off before trying to change the mode. But also
2386 * ensure that we have vdd while we switch off the panel. */
24f3e092 2387 intel_edp_panel_vdd_on(intel_dp);
4be73780 2388 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2389 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2390 intel_edp_panel_off(intel_dp);
3739850b 2391
08aff3fe
VS
2392 /* disable the port before the pipe on g4x */
2393 if (INTEL_INFO(dev)->gen < 5)
3739850b 2394 intel_dp_link_down(intel_dp);
d240f20f
JB
2395}
2396
08aff3fe 2397static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2398{
2bd2ad64 2399 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2400 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2401
49277c31 2402 intel_dp_link_down(intel_dp);
08aff3fe
VS
2403 if (port == PORT_A)
2404 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2405}
2406
2407static void vlv_post_disable_dp(struct intel_encoder *encoder)
2408{
2409 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2410
2411 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2412}
2413
580d3811
VS
2414static void chv_post_disable_dp(struct intel_encoder *encoder)
2415{
2416 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2417 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2418 struct drm_device *dev = encoder->base.dev;
2419 struct drm_i915_private *dev_priv = dev->dev_private;
2420 struct intel_crtc *intel_crtc =
2421 to_intel_crtc(encoder->base.crtc);
2422 enum dpio_channel ch = vlv_dport_to_channel(dport);
2423 enum pipe pipe = intel_crtc->pipe;
2424 u32 val;
2425
2426 intel_dp_link_down(intel_dp);
2427
a580516d 2428 mutex_lock(&dev_priv->sb_lock);
580d3811
VS
2429
2430 /* Propagate soft reset to data lane reset */
97fd4d5c 2431 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2432 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2433 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2434
97fd4d5c
VS
2435 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2436 val |= CHV_PCS_REQ_SOFTRESET_EN;
2437 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2438
2439 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2440 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2441 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2442
2443 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2444 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2445 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811 2446
a580516d 2447 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
2448}
2449
7b13b58a
VS
2450static void
2451_intel_dp_set_link_train(struct intel_dp *intel_dp,
2452 uint32_t *DP,
2453 uint8_t dp_train_pat)
2454{
2455 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2456 struct drm_device *dev = intel_dig_port->base.base.dev;
2457 struct drm_i915_private *dev_priv = dev->dev_private;
2458 enum port port = intel_dig_port->port;
2459
2460 if (HAS_DDI(dev)) {
2461 uint32_t temp = I915_READ(DP_TP_CTL(port));
2462
2463 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2464 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2465 else
2466 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2467
2468 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2469 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2470 case DP_TRAINING_PATTERN_DISABLE:
2471 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2472
2473 break;
2474 case DP_TRAINING_PATTERN_1:
2475 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2476 break;
2477 case DP_TRAINING_PATTERN_2:
2478 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2479 break;
2480 case DP_TRAINING_PATTERN_3:
2481 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2482 break;
2483 }
2484 I915_WRITE(DP_TP_CTL(port), temp);
2485
39e5fa88
VS
2486 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2487 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2488 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2489
2490 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2491 case DP_TRAINING_PATTERN_DISABLE:
2492 *DP |= DP_LINK_TRAIN_OFF_CPT;
2493 break;
2494 case DP_TRAINING_PATTERN_1:
2495 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2496 break;
2497 case DP_TRAINING_PATTERN_2:
2498 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2499 break;
2500 case DP_TRAINING_PATTERN_3:
2501 DRM_ERROR("DP training pattern 3 not supported\n");
2502 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2503 break;
2504 }
2505
2506 } else {
2507 if (IS_CHERRYVIEW(dev))
2508 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2509 else
2510 *DP &= ~DP_LINK_TRAIN_MASK;
2511
2512 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2513 case DP_TRAINING_PATTERN_DISABLE:
2514 *DP |= DP_LINK_TRAIN_OFF;
2515 break;
2516 case DP_TRAINING_PATTERN_1:
2517 *DP |= DP_LINK_TRAIN_PAT_1;
2518 break;
2519 case DP_TRAINING_PATTERN_2:
2520 *DP |= DP_LINK_TRAIN_PAT_2;
2521 break;
2522 case DP_TRAINING_PATTERN_3:
2523 if (IS_CHERRYVIEW(dev)) {
2524 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2525 } else {
2526 DRM_ERROR("DP training pattern 3 not supported\n");
2527 *DP |= DP_LINK_TRAIN_PAT_2;
2528 }
2529 break;
2530 }
2531 }
2532}
2533
2534static void intel_dp_enable_port(struct intel_dp *intel_dp)
2535{
2536 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2537 struct drm_i915_private *dev_priv = dev->dev_private;
2538
7b13b58a
VS
2539 /* enable with pattern 1 (as per spec) */
2540 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2541 DP_TRAINING_PATTERN_1);
2542
2543 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2544 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2545
2546 /*
2547 * Magic for VLV/CHV. We _must_ first set up the register
2548 * without actually enabling the port, and then do another
2549 * write to enable the port. Otherwise link training will
2550 * fail when the power sequencer is freshly used for this port.
2551 */
2552 intel_dp->DP |= DP_PORT_EN;
2553
2554 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2555 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2556}
2557
e8cb4558 2558static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2559{
e8cb4558
DV
2560 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2561 struct drm_device *dev = encoder->base.dev;
2562 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2563 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2564 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
9b6de0a1 2565 unsigned int lane_mask = 0x0;
5d613501 2566
0c33d8d7
DV
2567 if (WARN_ON(dp_reg & DP_PORT_EN))
2568 return;
5d613501 2569
093e3f13
VS
2570 pps_lock(intel_dp);
2571
2572 if (IS_VALLEYVIEW(dev))
2573 vlv_init_panel_power_sequencer(intel_dp);
2574
7b13b58a 2575 intel_dp_enable_port(intel_dp);
093e3f13
VS
2576
2577 edp_panel_vdd_on(intel_dp);
2578 edp_panel_on(intel_dp);
2579 edp_panel_vdd_off(intel_dp, true);
2580
2581 pps_unlock(intel_dp);
2582
61234fa5 2583 if (IS_VALLEYVIEW(dev))
9b6de0a1
VS
2584 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2585 lane_mask);
61234fa5 2586
f01eca2e 2587 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2588 intel_dp_start_link_train(intel_dp);
33a34e4e 2589 intel_dp_complete_link_train(intel_dp);
3ab9c637 2590 intel_dp_stop_link_train(intel_dp);
c1dec79a 2591
6e3c9717 2592 if (crtc->config->has_audio) {
c1dec79a
JN
2593 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2594 pipe_name(crtc->pipe));
2595 intel_audio_codec_enable(encoder);
2596 }
ab1f90f9 2597}
89b667f8 2598
ecff4f3b
JN
2599static void g4x_enable_dp(struct intel_encoder *encoder)
2600{
828f5c6e
JN
2601 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2602
ecff4f3b 2603 intel_enable_dp(encoder);
4be73780 2604 intel_edp_backlight_on(intel_dp);
ab1f90f9 2605}
89b667f8 2606
ab1f90f9
JN
2607static void vlv_enable_dp(struct intel_encoder *encoder)
2608{
828f5c6e
JN
2609 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2610
4be73780 2611 intel_edp_backlight_on(intel_dp);
b32c6f48 2612 intel_psr_enable(intel_dp);
d240f20f
JB
2613}
2614
ecff4f3b 2615static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2616{
2617 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2618 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2619
8ac33ed3
DV
2620 intel_dp_prepare(encoder);
2621
d41f1efb
DV
2622 /* Only ilk+ has port A */
2623 if (dport->port == PORT_A) {
2624 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2625 ironlake_edp_pll_on(intel_dp);
d41f1efb 2626 }
ab1f90f9
JN
2627}
2628
83b84597
VS
2629static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2630{
2631 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2632 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2633 enum pipe pipe = intel_dp->pps_pipe;
2634 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2635
2636 edp_panel_vdd_off_sync(intel_dp);
2637
2638 /*
2639 * VLV seems to get confused when multiple power seqeuencers
2640 * have the same port selected (even if only one has power/vdd
2641 * enabled). The failure manifests as vlv_wait_port_ready() failing
2642 * CHV on the other hand doesn't seem to mind having the same port
2643 * selected in multiple power seqeuencers, but let's clear the
2644 * port select always when logically disconnecting a power sequencer
2645 * from a port.
2646 */
2647 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2648 pipe_name(pipe), port_name(intel_dig_port->port));
2649 I915_WRITE(pp_on_reg, 0);
2650 POSTING_READ(pp_on_reg);
2651
2652 intel_dp->pps_pipe = INVALID_PIPE;
2653}
2654
a4a5d2f8
VS
2655static void vlv_steal_power_sequencer(struct drm_device *dev,
2656 enum pipe pipe)
2657{
2658 struct drm_i915_private *dev_priv = dev->dev_private;
2659 struct intel_encoder *encoder;
2660
2661 lockdep_assert_held(&dev_priv->pps_mutex);
2662
ac3c12e4
VS
2663 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2664 return;
2665
a4a5d2f8
VS
2666 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2667 base.head) {
2668 struct intel_dp *intel_dp;
773538e8 2669 enum port port;
a4a5d2f8
VS
2670
2671 if (encoder->type != INTEL_OUTPUT_EDP)
2672 continue;
2673
2674 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2675 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2676
2677 if (intel_dp->pps_pipe != pipe)
2678 continue;
2679
2680 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2681 pipe_name(pipe), port_name(port));
a4a5d2f8 2682
e02f9a06 2683 WARN(encoder->base.crtc,
034e43c6
VS
2684 "stealing pipe %c power sequencer from active eDP port %c\n",
2685 pipe_name(pipe), port_name(port));
a4a5d2f8 2686
a4a5d2f8 2687 /* make sure vdd is off before we steal it */
83b84597 2688 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2689 }
2690}
2691
2692static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2693{
2694 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2695 struct intel_encoder *encoder = &intel_dig_port->base;
2696 struct drm_device *dev = encoder->base.dev;
2697 struct drm_i915_private *dev_priv = dev->dev_private;
2698 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2699
2700 lockdep_assert_held(&dev_priv->pps_mutex);
2701
093e3f13
VS
2702 if (!is_edp(intel_dp))
2703 return;
2704
a4a5d2f8
VS
2705 if (intel_dp->pps_pipe == crtc->pipe)
2706 return;
2707
2708 /*
2709 * If another power sequencer was being used on this
2710 * port previously make sure to turn off vdd there while
2711 * we still have control of it.
2712 */
2713 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2714 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2715
2716 /*
2717 * We may be stealing the power
2718 * sequencer from another port.
2719 */
2720 vlv_steal_power_sequencer(dev, crtc->pipe);
2721
2722 /* now it's all ours */
2723 intel_dp->pps_pipe = crtc->pipe;
2724
2725 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2726 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2727
2728 /* init power sequencer on this pipe and port */
36b5f425
VS
2729 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2730 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2731}
2732
ab1f90f9 2733static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2734{
2bd2ad64 2735 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2736 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2737 struct drm_device *dev = encoder->base.dev;
89b667f8 2738 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2739 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2740 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2741 int pipe = intel_crtc->pipe;
2742 u32 val;
a4fc5ed6 2743
a580516d 2744 mutex_lock(&dev_priv->sb_lock);
89b667f8 2745
ab3c759a 2746 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2747 val = 0;
2748 if (pipe)
2749 val |= (1<<21);
2750 else
2751 val &= ~(1<<21);
2752 val |= 0x001000c4;
ab3c759a
CML
2753 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2754 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2755 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2756
a580516d 2757 mutex_unlock(&dev_priv->sb_lock);
ab1f90f9
JN
2758
2759 intel_enable_dp(encoder);
89b667f8
JB
2760}
2761
ecff4f3b 2762static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2763{
2764 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2765 struct drm_device *dev = encoder->base.dev;
2766 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2767 struct intel_crtc *intel_crtc =
2768 to_intel_crtc(encoder->base.crtc);
e4607fcf 2769 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2770 int pipe = intel_crtc->pipe;
89b667f8 2771
8ac33ed3
DV
2772 intel_dp_prepare(encoder);
2773
89b667f8 2774 /* Program Tx lane resets to default */
a580516d 2775 mutex_lock(&dev_priv->sb_lock);
ab3c759a 2776 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2777 DPIO_PCS_TX_LANE2_RESET |
2778 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2779 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2780 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2781 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2782 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2783 DPIO_PCS_CLK_SOFT_RESET);
2784
2785 /* Fix up inter-pair skew failure */
ab3c759a
CML
2786 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2787 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2788 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
a580516d 2789 mutex_unlock(&dev_priv->sb_lock);
a4fc5ed6
KP
2790}
2791
e4a1d846
CML
2792static void chv_pre_enable_dp(struct intel_encoder *encoder)
2793{
2794 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2795 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2796 struct drm_device *dev = encoder->base.dev;
2797 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2798 struct intel_crtc *intel_crtc =
2799 to_intel_crtc(encoder->base.crtc);
2800 enum dpio_channel ch = vlv_dport_to_channel(dport);
2801 int pipe = intel_crtc->pipe;
2e523e98 2802 int data, i, stagger;
949c1d43 2803 u32 val;
e4a1d846 2804
a580516d 2805 mutex_lock(&dev_priv->sb_lock);
949c1d43 2806
570e2a74
VS
2807 /* allow hardware to manage TX FIFO reset source */
2808 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2809 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2810 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2811
2812 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2813 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2814 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2815
949c1d43 2816 /* Deassert soft data lane reset*/
97fd4d5c 2817 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2818 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2819 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2820
2821 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2822 val |= CHV_PCS_REQ_SOFTRESET_EN;
2823 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2824
2825 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2826 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2827 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2828
97fd4d5c 2829 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2830 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2831 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2832
2833 /* Program Tx lane latency optimal setting*/
e4a1d846 2834 for (i = 0; i < 4; i++) {
e4a1d846
CML
2835 /* Set the upar bit */
2836 data = (i == 1) ? 0x0 : 0x1;
2837 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2838 data << DPIO_UPAR_SHIFT);
2839 }
2840
2841 /* Data lane stagger programming */
2e523e98
VS
2842 if (intel_crtc->config->port_clock > 270000)
2843 stagger = 0x18;
2844 else if (intel_crtc->config->port_clock > 135000)
2845 stagger = 0xd;
2846 else if (intel_crtc->config->port_clock > 67500)
2847 stagger = 0x7;
2848 else if (intel_crtc->config->port_clock > 33750)
2849 stagger = 0x4;
2850 else
2851 stagger = 0x2;
2852
2853 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2854 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2855 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2856
2857 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2858 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2859 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2860
2861 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2862 DPIO_LANESTAGGER_STRAP(stagger) |
2863 DPIO_LANESTAGGER_STRAP_OVRD |
2864 DPIO_TX1_STAGGER_MASK(0x1f) |
2865 DPIO_TX1_STAGGER_MULT(6) |
2866 DPIO_TX2_STAGGER_MULT(0));
2867
2868 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2869 DPIO_LANESTAGGER_STRAP(stagger) |
2870 DPIO_LANESTAGGER_STRAP_OVRD |
2871 DPIO_TX1_STAGGER_MASK(0x1f) |
2872 DPIO_TX1_STAGGER_MULT(7) |
2873 DPIO_TX2_STAGGER_MULT(5));
e4a1d846 2874
a580516d 2875 mutex_unlock(&dev_priv->sb_lock);
e4a1d846 2876
e4a1d846 2877 intel_enable_dp(encoder);
e4a1d846
CML
2878}
2879
9197c88b
VS
2880static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2881{
2882 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2883 struct drm_device *dev = encoder->base.dev;
2884 struct drm_i915_private *dev_priv = dev->dev_private;
2885 struct intel_crtc *intel_crtc =
2886 to_intel_crtc(encoder->base.crtc);
2887 enum dpio_channel ch = vlv_dport_to_channel(dport);
2888 enum pipe pipe = intel_crtc->pipe;
2889 u32 val;
2890
625695f8
VS
2891 intel_dp_prepare(encoder);
2892
a580516d 2893 mutex_lock(&dev_priv->sb_lock);
9197c88b 2894
b9e5ac3c
VS
2895 /* program left/right clock distribution */
2896 if (pipe != PIPE_B) {
2897 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2898 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2899 if (ch == DPIO_CH0)
2900 val |= CHV_BUFLEFTENA1_FORCE;
2901 if (ch == DPIO_CH1)
2902 val |= CHV_BUFRIGHTENA1_FORCE;
2903 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2904 } else {
2905 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2906 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2907 if (ch == DPIO_CH0)
2908 val |= CHV_BUFLEFTENA2_FORCE;
2909 if (ch == DPIO_CH1)
2910 val |= CHV_BUFRIGHTENA2_FORCE;
2911 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2912 }
2913
9197c88b
VS
2914 /* program clock channel usage */
2915 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2916 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2917 if (pipe != PIPE_B)
2918 val &= ~CHV_PCS_USEDCLKCHANNEL;
2919 else
2920 val |= CHV_PCS_USEDCLKCHANNEL;
2921 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2922
2923 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2924 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2925 if (pipe != PIPE_B)
2926 val &= ~CHV_PCS_USEDCLKCHANNEL;
2927 else
2928 val |= CHV_PCS_USEDCLKCHANNEL;
2929 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2930
2931 /*
2932 * This a a bit weird since generally CL
2933 * matches the pipe, but here we need to
2934 * pick the CL based on the port.
2935 */
2936 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2937 if (pipe != PIPE_B)
2938 val &= ~CHV_CMN_USEDCLKCHANNEL;
2939 else
2940 val |= CHV_CMN_USEDCLKCHANNEL;
2941 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2942
a580516d 2943 mutex_unlock(&dev_priv->sb_lock);
9197c88b
VS
2944}
2945
a4fc5ed6 2946/*
df0c237d
JB
2947 * Native read with retry for link status and receiver capability reads for
2948 * cases where the sink may still be asleep.
9d1a1031
JN
2949 *
2950 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2951 * supposed to retry 3 times per the spec.
a4fc5ed6 2952 */
9d1a1031
JN
2953static ssize_t
2954intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2955 void *buffer, size_t size)
a4fc5ed6 2956{
9d1a1031
JN
2957 ssize_t ret;
2958 int i;
61da5fab 2959
f6a19066
VS
2960 /*
2961 * Sometime we just get the same incorrect byte repeated
2962 * over the entire buffer. Doing just one throw away read
2963 * initially seems to "solve" it.
2964 */
2965 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2966
61da5fab 2967 for (i = 0; i < 3; i++) {
9d1a1031
JN
2968 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2969 if (ret == size)
2970 return ret;
61da5fab
JB
2971 msleep(1);
2972 }
a4fc5ed6 2973
9d1a1031 2974 return ret;
a4fc5ed6
KP
2975}
2976
2977/*
2978 * Fetch AUX CH registers 0x202 - 0x207 which contain
2979 * link status information
2980 */
2981static bool
93f62dad 2982intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2983{
9d1a1031
JN
2984 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2985 DP_LANE0_1_STATUS,
2986 link_status,
2987 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2988}
2989
1100244e 2990/* These are source-specific values. */
a4fc5ed6 2991static uint8_t
1a2eb460 2992intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2993{
30add22d 2994 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2995 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2996 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2997
9314726b
VK
2998 if (IS_BROXTON(dev))
2999 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3000 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 3001 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 3002 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 3003 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 3004 } else if (IS_VALLEYVIEW(dev))
bd60018a 3005 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 3006 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 3007 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 3008 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 3009 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 3010 else
bd60018a 3011 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
3012}
3013
3014static uint8_t
3015intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3016{
30add22d 3017 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 3018 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3019
5a9d1f1a
DL
3020 if (INTEL_INFO(dev)->gen >= 9) {
3021 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3022 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3023 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3024 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3025 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3026 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3027 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
3028 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3029 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
3030 default:
3031 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3032 }
3033 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 3034 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3035 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3036 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3037 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3038 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3039 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3040 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3041 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 3042 default:
bd60018a 3043 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 3044 }
e2fa6fba
P
3045 } else if (IS_VALLEYVIEW(dev)) {
3046 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3047 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3048 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3049 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3050 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3051 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3052 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3053 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 3054 default:
bd60018a 3055 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 3056 }
bc7d38a4 3057 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 3058 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3059 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3060 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3061 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3062 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3063 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3064 default:
bd60018a 3065 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3066 }
3067 } else {
3068 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3069 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3070 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3071 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3072 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3073 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3074 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3075 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3076 default:
bd60018a 3077 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3078 }
a4fc5ed6
KP
3079 }
3080}
3081
5829975c 3082static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3083{
3084 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3085 struct drm_i915_private *dev_priv = dev->dev_private;
3086 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3087 struct intel_crtc *intel_crtc =
3088 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3089 unsigned long demph_reg_value, preemph_reg_value,
3090 uniqtranscale_reg_value;
3091 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3092 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3093 int pipe = intel_crtc->pipe;
e2fa6fba
P
3094
3095 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3096 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3097 preemph_reg_value = 0x0004000;
3098 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3099 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3100 demph_reg_value = 0x2B405555;
3101 uniqtranscale_reg_value = 0x552AB83A;
3102 break;
bd60018a 3103 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3104 demph_reg_value = 0x2B404040;
3105 uniqtranscale_reg_value = 0x5548B83A;
3106 break;
bd60018a 3107 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3108 demph_reg_value = 0x2B245555;
3109 uniqtranscale_reg_value = 0x5560B83A;
3110 break;
bd60018a 3111 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3112 demph_reg_value = 0x2B405555;
3113 uniqtranscale_reg_value = 0x5598DA3A;
3114 break;
3115 default:
3116 return 0;
3117 }
3118 break;
bd60018a 3119 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3120 preemph_reg_value = 0x0002000;
3121 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3122 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3123 demph_reg_value = 0x2B404040;
3124 uniqtranscale_reg_value = 0x5552B83A;
3125 break;
bd60018a 3126 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3127 demph_reg_value = 0x2B404848;
3128 uniqtranscale_reg_value = 0x5580B83A;
3129 break;
bd60018a 3130 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3131 demph_reg_value = 0x2B404040;
3132 uniqtranscale_reg_value = 0x55ADDA3A;
3133 break;
3134 default:
3135 return 0;
3136 }
3137 break;
bd60018a 3138 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3139 preemph_reg_value = 0x0000000;
3140 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3141 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3142 demph_reg_value = 0x2B305555;
3143 uniqtranscale_reg_value = 0x5570B83A;
3144 break;
bd60018a 3145 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3146 demph_reg_value = 0x2B2B4040;
3147 uniqtranscale_reg_value = 0x55ADDA3A;
3148 break;
3149 default:
3150 return 0;
3151 }
3152 break;
bd60018a 3153 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3154 preemph_reg_value = 0x0006000;
3155 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3156 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3157 demph_reg_value = 0x1B405555;
3158 uniqtranscale_reg_value = 0x55ADDA3A;
3159 break;
3160 default:
3161 return 0;
3162 }
3163 break;
3164 default:
3165 return 0;
3166 }
3167
a580516d 3168 mutex_lock(&dev_priv->sb_lock);
ab3c759a
CML
3169 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3170 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3171 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3172 uniqtranscale_reg_value);
ab3c759a
CML
3173 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3174 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3175 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3176 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
a580516d 3177 mutex_unlock(&dev_priv->sb_lock);
e2fa6fba
P
3178
3179 return 0;
3180}
3181
5829975c 3182static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3183{
3184 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3185 struct drm_i915_private *dev_priv = dev->dev_private;
3186 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3187 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3188 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3189 uint8_t train_set = intel_dp->train_set[0];
3190 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3191 enum pipe pipe = intel_crtc->pipe;
3192 int i;
e4a1d846
CML
3193
3194 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3195 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3196 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3197 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3198 deemph_reg_value = 128;
3199 margin_reg_value = 52;
3200 break;
bd60018a 3201 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3202 deemph_reg_value = 128;
3203 margin_reg_value = 77;
3204 break;
bd60018a 3205 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3206 deemph_reg_value = 128;
3207 margin_reg_value = 102;
3208 break;
bd60018a 3209 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3210 deemph_reg_value = 128;
3211 margin_reg_value = 154;
3212 /* FIXME extra to set for 1200 */
3213 break;
3214 default:
3215 return 0;
3216 }
3217 break;
bd60018a 3218 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3219 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3220 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3221 deemph_reg_value = 85;
3222 margin_reg_value = 78;
3223 break;
bd60018a 3224 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3225 deemph_reg_value = 85;
3226 margin_reg_value = 116;
3227 break;
bd60018a 3228 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3229 deemph_reg_value = 85;
3230 margin_reg_value = 154;
3231 break;
3232 default:
3233 return 0;
3234 }
3235 break;
bd60018a 3236 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3237 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3238 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3239 deemph_reg_value = 64;
3240 margin_reg_value = 104;
3241 break;
bd60018a 3242 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3243 deemph_reg_value = 64;
3244 margin_reg_value = 154;
3245 break;
3246 default:
3247 return 0;
3248 }
3249 break;
bd60018a 3250 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3251 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3252 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3253 deemph_reg_value = 43;
3254 margin_reg_value = 154;
3255 break;
3256 default:
3257 return 0;
3258 }
3259 break;
3260 default:
3261 return 0;
3262 }
3263
a580516d 3264 mutex_lock(&dev_priv->sb_lock);
e4a1d846
CML
3265
3266 /* Clear calc init */
1966e59e
VS
3267 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3268 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3269 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3270 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3271 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3272
3273 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3274 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3275 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3276 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3277 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3278
a02ef3c7
VS
3279 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3280 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3281 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3282 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3283
3284 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3285 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3286 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3287 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3288
e4a1d846 3289 /* Program swing deemph */
f72df8db
VS
3290 for (i = 0; i < 4; i++) {
3291 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3292 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3293 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3294 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3295 }
e4a1d846
CML
3296
3297 /* Program swing margin */
f72df8db
VS
3298 for (i = 0; i < 4; i++) {
3299 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3300 val &= ~DPIO_SWING_MARGIN000_MASK;
3301 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3302 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3303 }
e4a1d846
CML
3304
3305 /* Disable unique transition scale */
f72df8db
VS
3306 for (i = 0; i < 4; i++) {
3307 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3308 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3309 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3310 }
e4a1d846
CML
3311
3312 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3313 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3314 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3315 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3316
3317 /*
3318 * The document said it needs to set bit 27 for ch0 and bit 26
3319 * for ch1. Might be a typo in the doc.
3320 * For now, for this unique transition scale selection, set bit
3321 * 27 for ch0 and ch1.
3322 */
f72df8db
VS
3323 for (i = 0; i < 4; i++) {
3324 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3325 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3326 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3327 }
e4a1d846 3328
f72df8db
VS
3329 for (i = 0; i < 4; i++) {
3330 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3331 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3332 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3333 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3334 }
e4a1d846
CML
3335 }
3336
3337 /* Start swing calculation */
1966e59e
VS
3338 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3339 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3340 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3341
3342 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3343 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3344 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3345
3346 /* LRC Bypass */
3347 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3348 val |= DPIO_LRC_BYPASS;
3349 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3350
a580516d 3351 mutex_unlock(&dev_priv->sb_lock);
e4a1d846
CML
3352
3353 return 0;
3354}
3355
a4fc5ed6 3356static void
0301b3ac
JN
3357intel_get_adjust_train(struct intel_dp *intel_dp,
3358 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3359{
3360 uint8_t v = 0;
3361 uint8_t p = 0;
3362 int lane;
1a2eb460
KP
3363 uint8_t voltage_max;
3364 uint8_t preemph_max;
a4fc5ed6 3365
901c2daf 3366 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3367 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3368 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3369
3370 if (this_v > v)
3371 v = this_v;
3372 if (this_p > p)
3373 p = this_p;
3374 }
3375
1a2eb460 3376 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3377 if (v >= voltage_max)
3378 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3379
1a2eb460
KP
3380 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3381 if (p >= preemph_max)
3382 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3383
3384 for (lane = 0; lane < 4; lane++)
33a34e4e 3385 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3386}
3387
3388static uint32_t
5829975c 3389gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3390{
3cf2efb1 3391 uint32_t signal_levels = 0;
a4fc5ed6 3392
3cf2efb1 3393 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3394 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3395 default:
3396 signal_levels |= DP_VOLTAGE_0_4;
3397 break;
bd60018a 3398 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3399 signal_levels |= DP_VOLTAGE_0_6;
3400 break;
bd60018a 3401 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3402 signal_levels |= DP_VOLTAGE_0_8;
3403 break;
bd60018a 3404 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3405 signal_levels |= DP_VOLTAGE_1_2;
3406 break;
3407 }
3cf2efb1 3408 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3409 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3410 default:
3411 signal_levels |= DP_PRE_EMPHASIS_0;
3412 break;
bd60018a 3413 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3414 signal_levels |= DP_PRE_EMPHASIS_3_5;
3415 break;
bd60018a 3416 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3417 signal_levels |= DP_PRE_EMPHASIS_6;
3418 break;
bd60018a 3419 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3420 signal_levels |= DP_PRE_EMPHASIS_9_5;
3421 break;
3422 }
3423 return signal_levels;
3424}
3425
e3421a18
ZW
3426/* Gen6's DP voltage swing and pre-emphasis control */
3427static uint32_t
5829975c 3428gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3429{
3c5a62b5
YL
3430 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3431 DP_TRAIN_PRE_EMPHASIS_MASK);
3432 switch (signal_levels) {
bd60018a
SJ
3433 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3434 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3435 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3436 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3437 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3438 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3439 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3440 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3441 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3442 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3443 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3444 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3445 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3446 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3447 default:
3c5a62b5
YL
3448 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3449 "0x%x\n", signal_levels);
3450 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3451 }
3452}
3453
1a2eb460
KP
3454/* Gen7's DP voltage swing and pre-emphasis control */
3455static uint32_t
5829975c 3456gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3457{
3458 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3459 DP_TRAIN_PRE_EMPHASIS_MASK);
3460 switch (signal_levels) {
bd60018a 3461 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3462 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3463 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3464 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3465 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3466 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3467
bd60018a 3468 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3469 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3470 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3471 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3472
bd60018a 3473 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3474 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3475 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3476 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3477
3478 default:
3479 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3480 "0x%x\n", signal_levels);
3481 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3482 }
3483}
3484
f0a3424e
PZ
3485/* Properly updates "DP" with the correct signal levels. */
3486static void
3487intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3488{
3489 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3490 enum port port = intel_dig_port->port;
f0a3424e 3491 struct drm_device *dev = intel_dig_port->base.base.dev;
f8896f5d 3492 uint32_t signal_levels, mask = 0;
f0a3424e
PZ
3493 uint8_t train_set = intel_dp->train_set[0];
3494
f8896f5d
DW
3495 if (HAS_DDI(dev)) {
3496 signal_levels = ddi_signal_levels(intel_dp);
3497
3498 if (IS_BROXTON(dev))
3499 signal_levels = 0;
3500 else
3501 mask = DDI_BUF_EMP_MASK;
e4a1d846 3502 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3503 signal_levels = chv_signal_levels(intel_dp);
e2fa6fba 3504 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3505 signal_levels = vlv_signal_levels(intel_dp);
bc7d38a4 3506 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3507 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3508 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3509 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3510 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3511 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3512 } else {
5829975c 3513 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3514 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3515 }
3516
96fb9f9b
VK
3517 if (mask)
3518 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3519
3520 DRM_DEBUG_KMS("Using vswing level %d\n",
3521 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3522 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3523 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3524 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e
PZ
3525
3526 *DP = (*DP & ~mask) | signal_levels;
3527}
3528
a4fc5ed6 3529static bool
ea5b213a 3530intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3531 uint32_t *DP,
58e10eb9 3532 uint8_t dp_train_pat)
a4fc5ed6 3533{
174edf1f 3534 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3535 struct drm_i915_private *dev_priv =
3536 to_i915(intel_dig_port->base.base.dev);
2cdfe6c8
JN
3537 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3538 int ret, len;
a4fc5ed6 3539
7b13b58a 3540 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3541
70aff66c 3542 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3543 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3544
2cdfe6c8
JN
3545 buf[0] = dp_train_pat;
3546 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3547 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3548 /* don't write DP_TRAINING_LANEx_SET on disable */
3549 len = 1;
3550 } else {
3551 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
901c2daf
VS
3552 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3553 len = intel_dp->lane_count + 1;
47ea7542 3554 }
a4fc5ed6 3555
9d1a1031
JN
3556 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3557 buf, len);
2cdfe6c8
JN
3558
3559 return ret == len;
a4fc5ed6
KP
3560}
3561
70aff66c
JN
3562static bool
3563intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3564 uint8_t dp_train_pat)
3565{
4e96c977
MK
3566 if (!intel_dp->train_set_valid)
3567 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3568 intel_dp_set_signal_levels(intel_dp, DP);
3569 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3570}
3571
3572static bool
3573intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3574 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3575{
3576 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3577 struct drm_i915_private *dev_priv =
3578 to_i915(intel_dig_port->base.base.dev);
70aff66c
JN
3579 int ret;
3580
3581 intel_get_adjust_train(intel_dp, link_status);
3582 intel_dp_set_signal_levels(intel_dp, DP);
3583
3584 I915_WRITE(intel_dp->output_reg, *DP);
3585 POSTING_READ(intel_dp->output_reg);
3586
9d1a1031 3587 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
901c2daf 3588 intel_dp->train_set, intel_dp->lane_count);
70aff66c 3589
901c2daf 3590 return ret == intel_dp->lane_count;
70aff66c
JN
3591}
3592
3ab9c637
ID
3593static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3594{
3595 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3596 struct drm_device *dev = intel_dig_port->base.base.dev;
3597 struct drm_i915_private *dev_priv = dev->dev_private;
3598 enum port port = intel_dig_port->port;
3599 uint32_t val;
3600
3601 if (!HAS_DDI(dev))
3602 return;
3603
3604 val = I915_READ(DP_TP_CTL(port));
3605 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3606 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3607 I915_WRITE(DP_TP_CTL(port), val);
3608
3609 /*
3610 * On PORT_A we can have only eDP in SST mode. There the only reason
3611 * we need to set idle transmission mode is to work around a HW issue
3612 * where we enable the pipe while not in idle link-training mode.
3613 * In this case there is requirement to wait for a minimum number of
3614 * idle patterns to be sent.
3615 */
3616 if (port == PORT_A)
3617 return;
3618
3619 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3620 1))
3621 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3622}
3623
33a34e4e 3624/* Enable corresponding port and start training pattern 1 */
c19b0669 3625void
33a34e4e 3626intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3627{
da63a9f2 3628 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3629 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3630 int i;
3631 uint8_t voltage;
cdb0e95b 3632 int voltage_tries, loop_tries;
ea5b213a 3633 uint32_t DP = intel_dp->DP;
6aba5b6c 3634 uint8_t link_config[2];
04a60f9f 3635 uint8_t link_bw, rate_select;
a4fc5ed6 3636
affa9354 3637 if (HAS_DDI(dev))
c19b0669
PZ
3638 intel_ddi_prepare_link_retrain(encoder);
3639
901c2daf 3640 intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
04a60f9f
VS
3641 &link_bw, &rate_select);
3642
3cf2efb1 3643 /* Write the link configuration data */
04a60f9f 3644 link_config[0] = link_bw;
901c2daf 3645 link_config[1] = intel_dp->lane_count;
6aba5b6c
JN
3646 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3647 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3648 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
94ca719e 3649 if (intel_dp->num_sink_rates)
a8f3ef61 3650 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
04a60f9f 3651 &rate_select, 1);
6aba5b6c
JN
3652
3653 link_config[0] = 0;
3654 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3655 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3656
3657 DP |= DP_PORT_EN;
1a2eb460 3658
70aff66c
JN
3659 /* clock recovery */
3660 if (!intel_dp_reset_link_train(intel_dp, &DP,
3661 DP_TRAINING_PATTERN_1 |
3662 DP_LINK_SCRAMBLING_DISABLE)) {
3663 DRM_ERROR("failed to enable link training\n");
3664 return;
3665 }
3666
a4fc5ed6 3667 voltage = 0xff;
cdb0e95b
KP
3668 voltage_tries = 0;
3669 loop_tries = 0;
a4fc5ed6 3670 for (;;) {
70aff66c 3671 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3672
a7c9655f 3673 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3674 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3675 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3676 break;
93f62dad 3677 }
a4fc5ed6 3678
901c2daf 3679 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3680 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3681 break;
3682 }
3683
4e96c977
MK
3684 /*
3685 * if we used previously trained voltage and pre-emphasis values
3686 * and we don't get clock recovery, reset link training values
3687 */
3688 if (intel_dp->train_set_valid) {
3689 DRM_DEBUG_KMS("clock recovery not ok, reset");
3690 /* clear the flag as we are not reusing train set */
3691 intel_dp->train_set_valid = false;
3692 if (!intel_dp_reset_link_train(intel_dp, &DP,
3693 DP_TRAINING_PATTERN_1 |
3694 DP_LINK_SCRAMBLING_DISABLE)) {
3695 DRM_ERROR("failed to enable link training\n");
3696 return;
3697 }
3698 continue;
3699 }
3700
3cf2efb1 3701 /* Check to see if we've tried the max voltage */
901c2daf 3702 for (i = 0; i < intel_dp->lane_count; i++)
3cf2efb1 3703 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3704 break;
901c2daf 3705 if (i == intel_dp->lane_count) {
b06fbda3
DV
3706 ++loop_tries;
3707 if (loop_tries == 5) {
3def84b3 3708 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3709 break;
3710 }
70aff66c
JN
3711 intel_dp_reset_link_train(intel_dp, &DP,
3712 DP_TRAINING_PATTERN_1 |
3713 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3714 voltage_tries = 0;
3715 continue;
3716 }
a4fc5ed6 3717
3cf2efb1 3718 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3719 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3720 ++voltage_tries;
b06fbda3 3721 if (voltage_tries == 5) {
3def84b3 3722 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3723 break;
3724 }
3725 } else
3726 voltage_tries = 0;
3727 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3728
70aff66c
JN
3729 /* Update training set as requested by target */
3730 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3731 DRM_ERROR("failed to update link training\n");
3732 break;
3733 }
a4fc5ed6
KP
3734 }
3735
33a34e4e
JB
3736 intel_dp->DP = DP;
3737}
3738
c19b0669 3739void
33a34e4e
JB
3740intel_dp_complete_link_train(struct intel_dp *intel_dp)
3741{
33a34e4e 3742 bool channel_eq = false;
37f80975 3743 int tries, cr_tries;
33a34e4e 3744 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3745 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3746
a79b8165 3747 /* Training Pattern 3 for HBR2 or 1.2 devices that support it*/
901c2daf 3748 if (intel_dp->link_rate == 540000 || intel_dp->use_tps3)
06ea66b6 3749 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3750
a4fc5ed6 3751 /* channel equalization */
70aff66c 3752 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3753 training_pattern |
70aff66c
JN
3754 DP_LINK_SCRAMBLING_DISABLE)) {
3755 DRM_ERROR("failed to start channel equalization\n");
3756 return;
3757 }
3758
a4fc5ed6 3759 tries = 0;
37f80975 3760 cr_tries = 0;
a4fc5ed6
KP
3761 channel_eq = false;
3762 for (;;) {
70aff66c 3763 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3764
37f80975
JB
3765 if (cr_tries > 5) {
3766 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3767 break;
3768 }
3769
a7c9655f 3770 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3771 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3772 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3773 break;
70aff66c 3774 }
a4fc5ed6 3775
37f80975 3776 /* Make sure clock is still ok */
90a6b7b0 3777 if (!drm_dp_clock_recovery_ok(link_status,
901c2daf 3778 intel_dp->lane_count)) {
4e96c977 3779 intel_dp->train_set_valid = false;
37f80975 3780 intel_dp_start_link_train(intel_dp);
70aff66c 3781 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3782 training_pattern |
70aff66c 3783 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3784 cr_tries++;
3785 continue;
3786 }
3787
90a6b7b0 3788 if (drm_dp_channel_eq_ok(link_status,
901c2daf 3789 intel_dp->lane_count)) {
3cf2efb1
CW
3790 channel_eq = true;
3791 break;
3792 }
a4fc5ed6 3793
37f80975
JB
3794 /* Try 5 times, then try clock recovery if that fails */
3795 if (tries > 5) {
4e96c977 3796 intel_dp->train_set_valid = false;
37f80975 3797 intel_dp_start_link_train(intel_dp);
70aff66c 3798 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3799 training_pattern |
70aff66c 3800 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3801 tries = 0;
3802 cr_tries++;
3803 continue;
3804 }
a4fc5ed6 3805
70aff66c
JN
3806 /* Update training set as requested by target */
3807 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3808 DRM_ERROR("failed to update link training\n");
3809 break;
3810 }
3cf2efb1 3811 ++tries;
869184a6 3812 }
3cf2efb1 3813
3ab9c637
ID
3814 intel_dp_set_idle_link_train(intel_dp);
3815
3816 intel_dp->DP = DP;
3817
4e96c977 3818 if (channel_eq) {
5fa836a9 3819 intel_dp->train_set_valid = true;
07f42258 3820 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
4e96c977 3821 }
3ab9c637
ID
3822}
3823
3824void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3825{
70aff66c 3826 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3827 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3828}
3829
3830static void
ea5b213a 3831intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3832{
da63a9f2 3833 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1612c8bd 3834 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
bc7d38a4 3835 enum port port = intel_dig_port->port;
da63a9f2 3836 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3837 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3838 uint32_t DP = intel_dp->DP;
a4fc5ed6 3839
bc76e320 3840 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3841 return;
3842
0c33d8d7 3843 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3844 return;
3845
28c97730 3846 DRM_DEBUG_KMS("\n");
32f9d658 3847
39e5fa88
VS
3848 if ((IS_GEN7(dev) && port == PORT_A) ||
3849 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3850 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3851 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3852 } else {
aad3d14d
VS
3853 if (IS_CHERRYVIEW(dev))
3854 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3855 else
3856 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3857 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3858 }
1612c8bd 3859 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3860 POSTING_READ(intel_dp->output_reg);
5eb08b69 3861
1612c8bd
VS
3862 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3863 I915_WRITE(intel_dp->output_reg, DP);
3864 POSTING_READ(intel_dp->output_reg);
3865
3866 /*
3867 * HW workaround for IBX, we need to move the port
3868 * to transcoder A after disabling it to allow the
3869 * matching HDMI port to be enabled on transcoder A.
3870 */
3871 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3872 /* always enable with pattern 1 (as per spec) */
3873 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3874 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3875 I915_WRITE(intel_dp->output_reg, DP);
3876 POSTING_READ(intel_dp->output_reg);
3877
3878 DP &= ~DP_PORT_EN;
5bddd17f 3879 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3880 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3881 }
3882
f01eca2e 3883 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3884}
3885
26d61aad
KP
3886static bool
3887intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3888{
a031d709
RV
3889 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3890 struct drm_device *dev = dig_port->base.base.dev;
3891 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3892 uint8_t rev;
a031d709 3893
9d1a1031
JN
3894 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3895 sizeof(intel_dp->dpcd)) < 0)
edb39244 3896 return false; /* aux transfer failed */
92fd8fd1 3897
a8e98153 3898 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3899
edb39244
AJ
3900 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3901 return false; /* DPCD not present */
3902
2293bb5c
SK
3903 /* Check if the panel supports PSR */
3904 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3905 if (is_edp(intel_dp)) {
9d1a1031
JN
3906 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3907 intel_dp->psr_dpcd,
3908 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3909 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3910 dev_priv->psr.sink_support = true;
50003939 3911 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3912 }
474d1ec4
SJ
3913
3914 if (INTEL_INFO(dev)->gen >= 9 &&
3915 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3916 uint8_t frame_sync_cap;
3917
3918 dev_priv->psr.sink_support = true;
3919 intel_dp_dpcd_read_wake(&intel_dp->aux,
3920 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3921 &frame_sync_cap, 1);
3922 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3923 /* PSR2 needs frame sync as well */
3924 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3925 DRM_DEBUG_KMS("PSR2 %s on sink",
3926 dev_priv->psr.psr2_support ? "supported" : "not supported");
3927 }
50003939
JN
3928 }
3929
7809a611 3930 /* Training Pattern 3 support, both source and sink */
06ea66b6 3931 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3932 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3933 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3934 intel_dp->use_tps3 = true;
f8d8a672 3935 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3936 } else
3937 intel_dp->use_tps3 = false;
3938
fc0f8e25
SJ
3939 /* Intermediate frequency support */
3940 if (is_edp(intel_dp) &&
3941 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3942 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3943 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3944 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3945 int i;
3946
fc0f8e25
SJ
3947 intel_dp_dpcd_read_wake(&intel_dp->aux,
3948 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3949 sink_rates,
3950 sizeof(sink_rates));
ea2d8a42 3951
94ca719e
VS
3952 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3953 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3954
3955 if (val == 0)
3956 break;
3957
af77b974
SJ
3958 /* Value read is in kHz while drm clock is saved in deca-kHz */
3959 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 3960 }
94ca719e 3961 intel_dp->num_sink_rates = i;
fc0f8e25 3962 }
0336400e
VS
3963
3964 intel_dp_print_rates(intel_dp);
3965
edb39244
AJ
3966 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3967 DP_DWN_STRM_PORT_PRESENT))
3968 return true; /* native DP sink */
3969
3970 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3971 return true; /* no per-port downstream info */
3972
9d1a1031
JN
3973 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3974 intel_dp->downstream_ports,
3975 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3976 return false; /* downstream port status fetch failed */
3977
3978 return true;
92fd8fd1
KP
3979}
3980
0d198328
AJ
3981static void
3982intel_dp_probe_oui(struct intel_dp *intel_dp)
3983{
3984 u8 buf[3];
3985
3986 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3987 return;
3988
9d1a1031 3989 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3990 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3991 buf[0], buf[1], buf[2]);
3992
9d1a1031 3993 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3994 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3995 buf[0], buf[1], buf[2]);
3996}
3997
0e32b39c
DA
3998static bool
3999intel_dp_probe_mst(struct intel_dp *intel_dp)
4000{
4001 u8 buf[1];
4002
4003 if (!intel_dp->can_mst)
4004 return false;
4005
4006 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4007 return false;
4008
0e32b39c
DA
4009 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4010 if (buf[0] & DP_MST_CAP) {
4011 DRM_DEBUG_KMS("Sink is MST capable\n");
4012 intel_dp->is_mst = true;
4013 } else {
4014 DRM_DEBUG_KMS("Sink is not MST capable\n");
4015 intel_dp->is_mst = false;
4016 }
4017 }
0e32b39c
DA
4018
4019 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4020 return intel_dp->is_mst;
4021}
4022
e5a1cab5 4023static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
d2e216d0 4024{
082dcc7c
RV
4025 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4026 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
ad9dc91b 4027 u8 buf;
e5a1cab5 4028 int ret = 0;
d2e216d0 4029
082dcc7c
RV
4030 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4031 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
4032 ret = -EIO;
4033 goto out;
4373f0f2
PZ
4034 }
4035
082dcc7c 4036 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
e5a1cab5 4037 buf & ~DP_TEST_SINK_START) < 0) {
082dcc7c 4038 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
4039 ret = -EIO;
4040 goto out;
4041 }
d2e216d0 4042
621d4c76 4043 intel_dp->sink_crc.started = false;
e5a1cab5 4044 out:
082dcc7c 4045 hsw_enable_ips(intel_crtc);
e5a1cab5 4046 return ret;
082dcc7c
RV
4047}
4048
4049static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4050{
4051 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4052 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4053 u8 buf;
e5a1cab5
RV
4054 int ret;
4055
621d4c76 4056 if (intel_dp->sink_crc.started) {
e5a1cab5
RV
4057 ret = intel_dp_sink_crc_stop(intel_dp);
4058 if (ret)
4059 return ret;
4060 }
082dcc7c
RV
4061
4062 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4063 return -EIO;
4064
4065 if (!(buf & DP_TEST_CRC_SUPPORTED))
4066 return -ENOTTY;
4067
621d4c76
RV
4068 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4069
082dcc7c
RV
4070 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4071 return -EIO;
4072
4073 hsw_disable_ips(intel_crtc);
1dda5f93 4074
9d1a1031 4075 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
082dcc7c
RV
4076 buf | DP_TEST_SINK_START) < 0) {
4077 hsw_enable_ips(intel_crtc);
4078 return -EIO;
4373f0f2
PZ
4079 }
4080
621d4c76 4081 intel_dp->sink_crc.started = true;
082dcc7c
RV
4082 return 0;
4083}
4084
4085int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4086{
4087 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4088 struct drm_device *dev = dig_port->base.base.dev;
4089 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4090 u8 buf;
621d4c76 4091 int count, ret;
082dcc7c 4092 int attempts = 6;
aabc95dc 4093 bool old_equal_new;
082dcc7c
RV
4094
4095 ret = intel_dp_sink_crc_start(intel_dp);
4096 if (ret)
4097 return ret;
4098
ad9dc91b 4099 do {
621d4c76
RV
4100 intel_wait_for_vblank(dev, intel_crtc->pipe);
4101
1dda5f93 4102 if (drm_dp_dpcd_readb(&intel_dp->aux,
4373f0f2
PZ
4103 DP_TEST_SINK_MISC, &buf) < 0) {
4104 ret = -EIO;
afe0d67e 4105 goto stop;
4373f0f2 4106 }
621d4c76 4107 count = buf & DP_TEST_COUNT_MASK;
aabc95dc 4108
621d4c76
RV
4109 /*
4110 * Count might be reset during the loop. In this case
4111 * last known count needs to be reset as well.
4112 */
4113 if (count == 0)
4114 intel_dp->sink_crc.last_count = 0;
4115
4116 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4117 ret = -EIO;
4118 goto stop;
4119 }
aabc95dc
RV
4120
4121 old_equal_new = (count == intel_dp->sink_crc.last_count &&
4122 !memcmp(intel_dp->sink_crc.last_crc, crc,
4123 6 * sizeof(u8)));
4124
4125 } while (--attempts && (count == 0 || old_equal_new));
621d4c76
RV
4126
4127 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4128 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
ad9dc91b
RV
4129
4130 if (attempts == 0) {
aabc95dc
RV
4131 if (old_equal_new) {
4132 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4133 } else {
4134 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4135 ret = -ETIMEDOUT;
4136 goto stop;
4137 }
ad9dc91b 4138 }
d2e216d0 4139
afe0d67e 4140stop:
082dcc7c 4141 intel_dp_sink_crc_stop(intel_dp);
4373f0f2 4142 return ret;
d2e216d0
RV
4143}
4144
a60f0e38
JB
4145static bool
4146intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4147{
9d1a1031
JN
4148 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4149 DP_DEVICE_SERVICE_IRQ_VECTOR,
4150 sink_irq_vector, 1) == 1;
a60f0e38
JB
4151}
4152
0e32b39c
DA
4153static bool
4154intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4155{
4156 int ret;
4157
4158 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4159 DP_SINK_COUNT_ESI,
4160 sink_irq_vector, 14);
4161 if (ret != 14)
4162 return false;
4163
4164 return true;
4165}
4166
c5d5ab7a
TP
4167static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4168{
4169 uint8_t test_result = DP_TEST_ACK;
4170 return test_result;
4171}
4172
4173static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4174{
4175 uint8_t test_result = DP_TEST_NAK;
4176 return test_result;
4177}
4178
4179static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4180{
c5d5ab7a 4181 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4182 struct intel_connector *intel_connector = intel_dp->attached_connector;
4183 struct drm_connector *connector = &intel_connector->base;
4184
4185 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4186 connector->edid_corrupt ||
559be30c
TP
4187 intel_dp->aux.i2c_defer_count > 6) {
4188 /* Check EDID read for NACKs, DEFERs and corruption
4189 * (DP CTS 1.2 Core r1.1)
4190 * 4.2.2.4 : Failed EDID read, I2C_NAK
4191 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4192 * 4.2.2.6 : EDID corruption detected
4193 * Use failsafe mode for all cases
4194 */
4195 if (intel_dp->aux.i2c_nack_count > 0 ||
4196 intel_dp->aux.i2c_defer_count > 0)
4197 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4198 intel_dp->aux.i2c_nack_count,
4199 intel_dp->aux.i2c_defer_count);
4200 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4201 } else {
f79b468e
TS
4202 struct edid *block = intel_connector->detect_edid;
4203
4204 /* We have to write the checksum
4205 * of the last block read
4206 */
4207 block += intel_connector->detect_edid->extensions;
4208
559be30c
TP
4209 if (!drm_dp_dpcd_write(&intel_dp->aux,
4210 DP_TEST_EDID_CHECKSUM,
f79b468e 4211 &block->checksum,
5a1cc655 4212 1))
559be30c
TP
4213 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4214
4215 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4216 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4217 }
4218
4219 /* Set test active flag here so userspace doesn't interrupt things */
4220 intel_dp->compliance_test_active = 1;
4221
c5d5ab7a
TP
4222 return test_result;
4223}
4224
4225static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4226{
c5d5ab7a
TP
4227 uint8_t test_result = DP_TEST_NAK;
4228 return test_result;
4229}
4230
4231static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4232{
4233 uint8_t response = DP_TEST_NAK;
4234 uint8_t rxdata = 0;
4235 int status = 0;
4236
559be30c 4237 intel_dp->compliance_test_active = 0;
c5d5ab7a 4238 intel_dp->compliance_test_type = 0;
559be30c
TP
4239 intel_dp->compliance_test_data = 0;
4240
c5d5ab7a
TP
4241 intel_dp->aux.i2c_nack_count = 0;
4242 intel_dp->aux.i2c_defer_count = 0;
4243
4244 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4245 if (status <= 0) {
4246 DRM_DEBUG_KMS("Could not read test request from sink\n");
4247 goto update_status;
4248 }
4249
4250 switch (rxdata) {
4251 case DP_TEST_LINK_TRAINING:
4252 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4253 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4254 response = intel_dp_autotest_link_training(intel_dp);
4255 break;
4256 case DP_TEST_LINK_VIDEO_PATTERN:
4257 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4258 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4259 response = intel_dp_autotest_video_pattern(intel_dp);
4260 break;
4261 case DP_TEST_LINK_EDID_READ:
4262 DRM_DEBUG_KMS("EDID test requested\n");
4263 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4264 response = intel_dp_autotest_edid(intel_dp);
4265 break;
4266 case DP_TEST_LINK_PHY_TEST_PATTERN:
4267 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4268 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4269 response = intel_dp_autotest_phy_pattern(intel_dp);
4270 break;
4271 default:
4272 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4273 break;
4274 }
4275
4276update_status:
4277 status = drm_dp_dpcd_write(&intel_dp->aux,
4278 DP_TEST_RESPONSE,
4279 &response, 1);
4280 if (status <= 0)
4281 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4282}
4283
0e32b39c
DA
4284static int
4285intel_dp_check_mst_status(struct intel_dp *intel_dp)
4286{
4287 bool bret;
4288
4289 if (intel_dp->is_mst) {
4290 u8 esi[16] = { 0 };
4291 int ret = 0;
4292 int retry;
4293 bool handled;
4294 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4295go_again:
4296 if (bret == true) {
4297
4298 /* check link status - esi[10] = 0x200c */
90a6b7b0 4299 if (intel_dp->active_mst_links &&
901c2daf 4300 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
0e32b39c
DA
4301 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4302 intel_dp_start_link_train(intel_dp);
4303 intel_dp_complete_link_train(intel_dp);
4304 intel_dp_stop_link_train(intel_dp);
4305 }
4306
6f34cc39 4307 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4308 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4309
4310 if (handled) {
4311 for (retry = 0; retry < 3; retry++) {
4312 int wret;
4313 wret = drm_dp_dpcd_write(&intel_dp->aux,
4314 DP_SINK_COUNT_ESI+1,
4315 &esi[1], 3);
4316 if (wret == 3) {
4317 break;
4318 }
4319 }
4320
4321 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4322 if (bret == true) {
6f34cc39 4323 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4324 goto go_again;
4325 }
4326 } else
4327 ret = 0;
4328
4329 return ret;
4330 } else {
4331 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4332 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4333 intel_dp->is_mst = false;
4334 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4335 /* send a hotplug event */
4336 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4337 }
4338 }
4339 return -EINVAL;
4340}
4341
a4fc5ed6
KP
4342/*
4343 * According to DP spec
4344 * 5.1.2:
4345 * 1. Read DPCD
4346 * 2. Configure link according to Receiver Capabilities
4347 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4348 * 4. Check link status on receipt of hot-plug interrupt
4349 */
a5146200 4350static void
ea5b213a 4351intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4352{
5b215bcf 4353 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4354 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4355 u8 sink_irq_vector;
93f62dad 4356 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4357
5b215bcf
DA
4358 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4359
e02f9a06 4360 if (!intel_encoder->base.crtc)
a4fc5ed6
KP
4361 return;
4362
1a125d8a
ID
4363 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4364 return;
4365
92fd8fd1 4366 /* Try to read receiver status if the link appears to be up */
93f62dad 4367 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4368 return;
4369 }
4370
92fd8fd1 4371 /* Now read the DPCD to see if it's actually running */
26d61aad 4372 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4373 return;
4374 }
4375
a60f0e38
JB
4376 /* Try to read the source of the interrupt */
4377 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4378 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4379 /* Clear interrupt source */
9d1a1031
JN
4380 drm_dp_dpcd_writeb(&intel_dp->aux,
4381 DP_DEVICE_SERVICE_IRQ_VECTOR,
4382 sink_irq_vector);
a60f0e38
JB
4383
4384 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4385 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4386 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4387 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4388 }
4389
901c2daf 4390 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4391 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4392 intel_encoder->base.name);
33a34e4e
JB
4393 intel_dp_start_link_train(intel_dp);
4394 intel_dp_complete_link_train(intel_dp);
3ab9c637 4395 intel_dp_stop_link_train(intel_dp);
33a34e4e 4396 }
a4fc5ed6 4397}
a4fc5ed6 4398
caf9ab24 4399/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4400static enum drm_connector_status
26d61aad 4401intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4402{
caf9ab24 4403 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4404 uint8_t type;
4405
4406 if (!intel_dp_get_dpcd(intel_dp))
4407 return connector_status_disconnected;
4408
4409 /* if there's no downstream port, we're done */
4410 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4411 return connector_status_connected;
caf9ab24
AJ
4412
4413 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4414 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4415 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4416 uint8_t reg;
9d1a1031
JN
4417
4418 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4419 &reg, 1) < 0)
caf9ab24 4420 return connector_status_unknown;
9d1a1031 4421
23235177
AJ
4422 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4423 : connector_status_disconnected;
caf9ab24
AJ
4424 }
4425
4426 /* If no HPD, poke DDC gently */
0b99836f 4427 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4428 return connector_status_connected;
caf9ab24
AJ
4429
4430 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4431 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4432 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4433 if (type == DP_DS_PORT_TYPE_VGA ||
4434 type == DP_DS_PORT_TYPE_NON_EDID)
4435 return connector_status_unknown;
4436 } else {
4437 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4438 DP_DWN_STRM_PORT_TYPE_MASK;
4439 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4440 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4441 return connector_status_unknown;
4442 }
caf9ab24
AJ
4443
4444 /* Anything else is out of spec, warn and ignore */
4445 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4446 return connector_status_disconnected;
71ba9000
AJ
4447}
4448
d410b56d
CW
4449static enum drm_connector_status
4450edp_detect(struct intel_dp *intel_dp)
4451{
4452 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4453 enum drm_connector_status status;
4454
4455 status = intel_panel_detect(dev);
4456 if (status == connector_status_unknown)
4457 status = connector_status_connected;
4458
4459 return status;
4460}
4461
5eb08b69 4462static enum drm_connector_status
a9756bb5 4463ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4464{
30add22d 4465 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4466 struct drm_i915_private *dev_priv = dev->dev_private;
4467 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4468
1b469639
DL
4469 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4470 return connector_status_disconnected;
4471
26d61aad 4472 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4473}
4474
2a592bec
DA
4475static int g4x_digital_port_connected(struct drm_device *dev,
4476 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4477{
a4fc5ed6 4478 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4479 uint32_t bit;
5eb08b69 4480
232a6ee9
TP
4481 if (IS_VALLEYVIEW(dev)) {
4482 switch (intel_dig_port->port) {
4483 case PORT_B:
4484 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4485 break;
4486 case PORT_C:
4487 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4488 break;
4489 case PORT_D:
4490 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4491 break;
4492 default:
2a592bec 4493 return -EINVAL;
232a6ee9
TP
4494 }
4495 } else {
4496 switch (intel_dig_port->port) {
4497 case PORT_B:
4498 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4499 break;
4500 case PORT_C:
4501 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4502 break;
4503 case PORT_D:
4504 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4505 break;
4506 default:
2a592bec 4507 return -EINVAL;
232a6ee9 4508 }
a4fc5ed6
KP
4509 }
4510
10f76a38 4511 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4512 return 0;
4513 return 1;
4514}
4515
4516static enum drm_connector_status
4517g4x_dp_detect(struct intel_dp *intel_dp)
4518{
4519 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4520 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4521 int ret;
4522
4523 /* Can't disconnect eDP, but you can close the lid... */
4524 if (is_edp(intel_dp)) {
4525 enum drm_connector_status status;
4526
4527 status = intel_panel_detect(dev);
4528 if (status == connector_status_unknown)
4529 status = connector_status_connected;
4530 return status;
4531 }
4532
4533 ret = g4x_digital_port_connected(dev, intel_dig_port);
4534 if (ret == -EINVAL)
4535 return connector_status_unknown;
4536 else if (ret == 0)
a4fc5ed6
KP
4537 return connector_status_disconnected;
4538
26d61aad 4539 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4540}
4541
8c241fef 4542static struct edid *
beb60608 4543intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4544{
beb60608 4545 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4546
9cd300e0
JN
4547 /* use cached edid if we have one */
4548 if (intel_connector->edid) {
9cd300e0
JN
4549 /* invalid edid */
4550 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4551 return NULL;
4552
55e9edeb 4553 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4554 } else
4555 return drm_get_edid(&intel_connector->base,
4556 &intel_dp->aux.ddc);
4557}
8c241fef 4558
beb60608
CW
4559static void
4560intel_dp_set_edid(struct intel_dp *intel_dp)
4561{
4562 struct intel_connector *intel_connector = intel_dp->attached_connector;
4563 struct edid *edid;
8c241fef 4564
beb60608
CW
4565 edid = intel_dp_get_edid(intel_dp);
4566 intel_connector->detect_edid = edid;
4567
4568 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4569 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4570 else
4571 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4572}
4573
beb60608
CW
4574static void
4575intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4576{
beb60608 4577 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4578
beb60608
CW
4579 kfree(intel_connector->detect_edid);
4580 intel_connector->detect_edid = NULL;
9cd300e0 4581
beb60608
CW
4582 intel_dp->has_audio = false;
4583}
d6f24d0f 4584
beb60608
CW
4585static enum intel_display_power_domain
4586intel_dp_power_get(struct intel_dp *dp)
4587{
4588 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4589 enum intel_display_power_domain power_domain;
4590
4591 power_domain = intel_display_port_power_domain(encoder);
4592 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4593
4594 return power_domain;
4595}
d6f24d0f 4596
beb60608
CW
4597static void
4598intel_dp_power_put(struct intel_dp *dp,
4599 enum intel_display_power_domain power_domain)
4600{
4601 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4602 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4603}
4604
a9756bb5
ZW
4605static enum drm_connector_status
4606intel_dp_detect(struct drm_connector *connector, bool force)
4607{
4608 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4609 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4610 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4611 struct drm_device *dev = connector->dev;
a9756bb5 4612 enum drm_connector_status status;
671dedd2 4613 enum intel_display_power_domain power_domain;
0e32b39c 4614 bool ret;
09b1eb13 4615 u8 sink_irq_vector;
a9756bb5 4616
164c8598 4617 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4618 connector->base.id, connector->name);
beb60608 4619 intel_dp_unset_edid(intel_dp);
164c8598 4620
0e32b39c
DA
4621 if (intel_dp->is_mst) {
4622 /* MST devices are disconnected from a monitor POV */
4623 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4624 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4625 return connector_status_disconnected;
0e32b39c
DA
4626 }
4627
beb60608 4628 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4629
d410b56d
CW
4630 /* Can't disconnect eDP, but you can close the lid... */
4631 if (is_edp(intel_dp))
4632 status = edp_detect(intel_dp);
4633 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4634 status = ironlake_dp_detect(intel_dp);
4635 else
4636 status = g4x_dp_detect(intel_dp);
4637 if (status != connector_status_connected)
c8c8fb33 4638 goto out;
a9756bb5 4639
0d198328
AJ
4640 intel_dp_probe_oui(intel_dp);
4641
0e32b39c
DA
4642 ret = intel_dp_probe_mst(intel_dp);
4643 if (ret) {
4644 /* if we are in MST mode then this connector
4645 won't appear connected or have anything with EDID on it */
4646 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4647 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4648 status = connector_status_disconnected;
4649 goto out;
4650 }
4651
beb60608 4652 intel_dp_set_edid(intel_dp);
a9756bb5 4653
d63885da
PZ
4654 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4655 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4656 status = connector_status_connected;
4657
09b1eb13
TP
4658 /* Try to read the source of the interrupt */
4659 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4660 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4661 /* Clear interrupt source */
4662 drm_dp_dpcd_writeb(&intel_dp->aux,
4663 DP_DEVICE_SERVICE_IRQ_VECTOR,
4664 sink_irq_vector);
4665
4666 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4667 intel_dp_handle_test_request(intel_dp);
4668 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4669 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4670 }
4671
c8c8fb33 4672out:
beb60608 4673 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4674 return status;
a4fc5ed6
KP
4675}
4676
beb60608
CW
4677static void
4678intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4679{
df0e9248 4680 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4681 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4682 enum intel_display_power_domain power_domain;
a4fc5ed6 4683
beb60608
CW
4684 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4685 connector->base.id, connector->name);
4686 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4687
beb60608
CW
4688 if (connector->status != connector_status_connected)
4689 return;
671dedd2 4690
beb60608
CW
4691 power_domain = intel_dp_power_get(intel_dp);
4692
4693 intel_dp_set_edid(intel_dp);
4694
4695 intel_dp_power_put(intel_dp, power_domain);
4696
4697 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4698 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4699}
4700
4701static int intel_dp_get_modes(struct drm_connector *connector)
4702{
4703 struct intel_connector *intel_connector = to_intel_connector(connector);
4704 struct edid *edid;
4705
4706 edid = intel_connector->detect_edid;
4707 if (edid) {
4708 int ret = intel_connector_update_modes(connector, edid);
4709 if (ret)
4710 return ret;
4711 }
32f9d658 4712
f8779fda 4713 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4714 if (is_edp(intel_attached_dp(connector)) &&
4715 intel_connector->panel.fixed_mode) {
f8779fda 4716 struct drm_display_mode *mode;
beb60608
CW
4717
4718 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4719 intel_connector->panel.fixed_mode);
f8779fda 4720 if (mode) {
32f9d658
ZW
4721 drm_mode_probed_add(connector, mode);
4722 return 1;
4723 }
4724 }
beb60608 4725
32f9d658 4726 return 0;
a4fc5ed6
KP
4727}
4728
1aad7ac0
CW
4729static bool
4730intel_dp_detect_audio(struct drm_connector *connector)
4731{
1aad7ac0 4732 bool has_audio = false;
beb60608 4733 struct edid *edid;
1aad7ac0 4734
beb60608
CW
4735 edid = to_intel_connector(connector)->detect_edid;
4736 if (edid)
1aad7ac0 4737 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4738
1aad7ac0
CW
4739 return has_audio;
4740}
4741
f684960e
CW
4742static int
4743intel_dp_set_property(struct drm_connector *connector,
4744 struct drm_property *property,
4745 uint64_t val)
4746{
e953fd7b 4747 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4748 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4749 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4750 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4751 int ret;
4752
662595df 4753 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4754 if (ret)
4755 return ret;
4756
3f43c48d 4757 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4758 int i = val;
4759 bool has_audio;
4760
4761 if (i == intel_dp->force_audio)
f684960e
CW
4762 return 0;
4763
1aad7ac0 4764 intel_dp->force_audio = i;
f684960e 4765
c3e5f67b 4766 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4767 has_audio = intel_dp_detect_audio(connector);
4768 else
c3e5f67b 4769 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4770
4771 if (has_audio == intel_dp->has_audio)
f684960e
CW
4772 return 0;
4773
1aad7ac0 4774 intel_dp->has_audio = has_audio;
f684960e
CW
4775 goto done;
4776 }
4777
e953fd7b 4778 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80 4779 bool old_auto = intel_dp->color_range_auto;
0f2a2a75 4780 bool old_range = intel_dp->limited_color_range;
ae4edb80 4781
55bc60db
VS
4782 switch (val) {
4783 case INTEL_BROADCAST_RGB_AUTO:
4784 intel_dp->color_range_auto = true;
4785 break;
4786 case INTEL_BROADCAST_RGB_FULL:
4787 intel_dp->color_range_auto = false;
0f2a2a75 4788 intel_dp->limited_color_range = false;
55bc60db
VS
4789 break;
4790 case INTEL_BROADCAST_RGB_LIMITED:
4791 intel_dp->color_range_auto = false;
0f2a2a75 4792 intel_dp->limited_color_range = true;
55bc60db
VS
4793 break;
4794 default:
4795 return -EINVAL;
4796 }
ae4edb80
DV
4797
4798 if (old_auto == intel_dp->color_range_auto &&
0f2a2a75 4799 old_range == intel_dp->limited_color_range)
ae4edb80
DV
4800 return 0;
4801
e953fd7b
CW
4802 goto done;
4803 }
4804
53b41837
YN
4805 if (is_edp(intel_dp) &&
4806 property == connector->dev->mode_config.scaling_mode_property) {
4807 if (val == DRM_MODE_SCALE_NONE) {
4808 DRM_DEBUG_KMS("no scaling not supported\n");
4809 return -EINVAL;
4810 }
4811
4812 if (intel_connector->panel.fitting_mode == val) {
4813 /* the eDP scaling property is not changed */
4814 return 0;
4815 }
4816 intel_connector->panel.fitting_mode = val;
4817
4818 goto done;
4819 }
4820
f684960e
CW
4821 return -EINVAL;
4822
4823done:
c0c36b94
CW
4824 if (intel_encoder->base.crtc)
4825 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4826
4827 return 0;
4828}
4829
a4fc5ed6 4830static void
73845adf 4831intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4832{
1d508706 4833 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4834
10e972d3 4835 kfree(intel_connector->detect_edid);
beb60608 4836
9cd300e0
JN
4837 if (!IS_ERR_OR_NULL(intel_connector->edid))
4838 kfree(intel_connector->edid);
4839
acd8db10
PZ
4840 /* Can't call is_edp() since the encoder may have been destroyed
4841 * already. */
4842 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4843 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4844
a4fc5ed6 4845 drm_connector_cleanup(connector);
55f78c43 4846 kfree(connector);
a4fc5ed6
KP
4847}
4848
00c09d70 4849void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4850{
da63a9f2
PZ
4851 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4852 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4853
4f71d0cb 4854 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4855 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4856 if (is_edp(intel_dp)) {
4857 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4858 /*
4859 * vdd might still be enabled do to the delayed vdd off.
4860 * Make sure vdd is actually turned off here.
4861 */
773538e8 4862 pps_lock(intel_dp);
4be73780 4863 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4864 pps_unlock(intel_dp);
4865
01527b31
CT
4866 if (intel_dp->edp_notifier.notifier_call) {
4867 unregister_reboot_notifier(&intel_dp->edp_notifier);
4868 intel_dp->edp_notifier.notifier_call = NULL;
4869 }
bd943159 4870 }
c8bd0e49 4871 drm_encoder_cleanup(encoder);
da63a9f2 4872 kfree(intel_dig_port);
24d05927
DV
4873}
4874
07f9cd0b
ID
4875static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4876{
4877 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4878
4879 if (!is_edp(intel_dp))
4880 return;
4881
951468f3
VS
4882 /*
4883 * vdd might still be enabled do to the delayed vdd off.
4884 * Make sure vdd is actually turned off here.
4885 */
afa4e53a 4886 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4887 pps_lock(intel_dp);
07f9cd0b 4888 edp_panel_vdd_off_sync(intel_dp);
773538e8 4889 pps_unlock(intel_dp);
07f9cd0b
ID
4890}
4891
49e6bc51
VS
4892static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4893{
4894 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4895 struct drm_device *dev = intel_dig_port->base.base.dev;
4896 struct drm_i915_private *dev_priv = dev->dev_private;
4897 enum intel_display_power_domain power_domain;
4898
4899 lockdep_assert_held(&dev_priv->pps_mutex);
4900
4901 if (!edp_have_panel_vdd(intel_dp))
4902 return;
4903
4904 /*
4905 * The VDD bit needs a power domain reference, so if the bit is
4906 * already enabled when we boot or resume, grab this reference and
4907 * schedule a vdd off, so we don't hold on to the reference
4908 * indefinitely.
4909 */
4910 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4911 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4912 intel_display_power_get(dev_priv, power_domain);
4913
4914 edp_panel_vdd_schedule_off(intel_dp);
4915}
4916
6d93c0c4
ID
4917static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4918{
49e6bc51
VS
4919 struct intel_dp *intel_dp;
4920
4921 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4922 return;
4923
4924 intel_dp = enc_to_intel_dp(encoder);
4925
4926 pps_lock(intel_dp);
4927
4928 /*
4929 * Read out the current power sequencer assignment,
4930 * in case the BIOS did something with it.
4931 */
4932 if (IS_VALLEYVIEW(encoder->dev))
4933 vlv_initial_power_sequencer_setup(intel_dp);
4934
4935 intel_edp_panel_vdd_sanitize(intel_dp);
4936
4937 pps_unlock(intel_dp);
6d93c0c4
ID
4938}
4939
a4fc5ed6 4940static const struct drm_connector_funcs intel_dp_connector_funcs = {
4d688a2a 4941 .dpms = drm_atomic_helper_connector_dpms,
a4fc5ed6 4942 .detect = intel_dp_detect,
beb60608 4943 .force = intel_dp_force,
a4fc5ed6 4944 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4945 .set_property = intel_dp_set_property,
2545e4a6 4946 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4947 .destroy = intel_dp_connector_destroy,
c6f95f27 4948 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4949 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4950};
4951
4952static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4953 .get_modes = intel_dp_get_modes,
4954 .mode_valid = intel_dp_mode_valid,
df0e9248 4955 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4956};
4957
a4fc5ed6 4958static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4959 .reset = intel_dp_encoder_reset,
24d05927 4960 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4961};
4962
b2c5c181 4963enum irqreturn
13cf5504
DA
4964intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4965{
4966 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4967 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4968 struct drm_device *dev = intel_dig_port->base.base.dev;
4969 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4970 enum intel_display_power_domain power_domain;
b2c5c181 4971 enum irqreturn ret = IRQ_NONE;
1c767b33 4972
0e32b39c
DA
4973 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4974 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4975
7a7f84cc
VS
4976 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4977 /*
4978 * vdd off can generate a long pulse on eDP which
4979 * would require vdd on to handle it, and thus we
4980 * would end up in an endless cycle of
4981 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4982 */
4983 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4984 port_name(intel_dig_port->port));
a8b3d52f 4985 return IRQ_HANDLED;
7a7f84cc
VS
4986 }
4987
26fbb774
VS
4988 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4989 port_name(intel_dig_port->port),
0e32b39c 4990 long_hpd ? "long" : "short");
13cf5504 4991
1c767b33
ID
4992 power_domain = intel_display_port_power_domain(intel_encoder);
4993 intel_display_power_get(dev_priv, power_domain);
4994
0e32b39c 4995 if (long_hpd) {
5fa836a9
MK
4996 /* indicate that we need to restart link training */
4997 intel_dp->train_set_valid = false;
2a592bec
DA
4998
4999 if (HAS_PCH_SPLIT(dev)) {
5000 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
5001 goto mst_fail;
5002 } else {
5003 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
5004 goto mst_fail;
5005 }
0e32b39c
DA
5006
5007 if (!intel_dp_get_dpcd(intel_dp)) {
5008 goto mst_fail;
5009 }
5010
5011 intel_dp_probe_oui(intel_dp);
5012
5013 if (!intel_dp_probe_mst(intel_dp))
5014 goto mst_fail;
5015
5016 } else {
5017 if (intel_dp->is_mst) {
1c767b33 5018 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
5019 goto mst_fail;
5020 }
5021
5022 if (!intel_dp->is_mst) {
5023 /*
5024 * we'll check the link status via the normal hot plug path later -
5025 * but for short hpds we should check it now
5026 */
5b215bcf 5027 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 5028 intel_dp_check_link_status(intel_dp);
5b215bcf 5029 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
5030 }
5031 }
b2c5c181
DV
5032
5033 ret = IRQ_HANDLED;
5034
1c767b33 5035 goto put_power;
0e32b39c
DA
5036mst_fail:
5037 /* if we were in MST mode, and device is not there get out of MST mode */
5038 if (intel_dp->is_mst) {
5039 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5040 intel_dp->is_mst = false;
5041 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5042 }
1c767b33
ID
5043put_power:
5044 intel_display_power_put(dev_priv, power_domain);
5045
5046 return ret;
13cf5504
DA
5047}
5048
e3421a18
ZW
5049/* Return which DP Port should be selected for Transcoder DP control */
5050int
0206e353 5051intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
5052{
5053 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
5054 struct intel_encoder *intel_encoder;
5055 struct intel_dp *intel_dp;
e3421a18 5056
fa90ecef
PZ
5057 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5058 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 5059
fa90ecef
PZ
5060 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5061 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 5062 return intel_dp->output_reg;
e3421a18 5063 }
ea5b213a 5064
e3421a18
ZW
5065 return -1;
5066}
5067
36e83a18 5068/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 5069bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
5070{
5071 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5072 union child_device_config *p_child;
36e83a18 5073 int i;
5d8a7752
VS
5074 static const short port_mapping[] = {
5075 [PORT_B] = PORT_IDPB,
5076 [PORT_C] = PORT_IDPC,
5077 [PORT_D] = PORT_IDPD,
5078 };
36e83a18 5079
3b32a35b
VS
5080 if (port == PORT_A)
5081 return true;
5082
41aa3448 5083 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5084 return false;
5085
41aa3448
RV
5086 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5087 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5088
5d8a7752 5089 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5090 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5091 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5092 return true;
5093 }
5094 return false;
5095}
5096
0e32b39c 5097void
f684960e
CW
5098intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5099{
53b41837
YN
5100 struct intel_connector *intel_connector = to_intel_connector(connector);
5101
3f43c48d 5102 intel_attach_force_audio_property(connector);
e953fd7b 5103 intel_attach_broadcast_rgb_property(connector);
55bc60db 5104 intel_dp->color_range_auto = true;
53b41837
YN
5105
5106 if (is_edp(intel_dp)) {
5107 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5108 drm_object_attach_property(
5109 &connector->base,
53b41837 5110 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5111 DRM_MODE_SCALE_ASPECT);
5112 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5113 }
f684960e
CW
5114}
5115
dada1a9f
ID
5116static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5117{
5118 intel_dp->last_power_cycle = jiffies;
5119 intel_dp->last_power_on = jiffies;
5120 intel_dp->last_backlight_off = jiffies;
5121}
5122
67a54566
DV
5123static void
5124intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5125 struct intel_dp *intel_dp)
67a54566
DV
5126{
5127 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5128 struct edp_power_seq cur, vbt, spec,
5129 *final = &intel_dp->pps_delays;
b0a08bec
VK
5130 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5131 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
453c5420 5132
e39b999a
VS
5133 lockdep_assert_held(&dev_priv->pps_mutex);
5134
81ddbc69
VS
5135 /* already initialized? */
5136 if (final->t11_t12 != 0)
5137 return;
5138
b0a08bec
VK
5139 if (IS_BROXTON(dev)) {
5140 /*
5141 * TODO: BXT has 2 sets of PPS registers.
5142 * Correct Register for Broxton need to be identified
5143 * using VBT. hardcoding for now
5144 */
5145 pp_ctrl_reg = BXT_PP_CONTROL(0);
5146 pp_on_reg = BXT_PP_ON_DELAYS(0);
5147 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5148 } else if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5149 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5150 pp_on_reg = PCH_PP_ON_DELAYS;
5151 pp_off_reg = PCH_PP_OFF_DELAYS;
5152 pp_div_reg = PCH_PP_DIVISOR;
5153 } else {
bf13e81b
JN
5154 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5155
5156 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5157 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5158 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5159 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5160 }
67a54566
DV
5161
5162 /* Workaround: Need to write PP_CONTROL with the unlock key as
5163 * the very first thing. */
b0a08bec 5164 pp_ctl = ironlake_get_pp_control(intel_dp);
67a54566 5165
453c5420
JB
5166 pp_on = I915_READ(pp_on_reg);
5167 pp_off = I915_READ(pp_off_reg);
b0a08bec
VK
5168 if (!IS_BROXTON(dev)) {
5169 I915_WRITE(pp_ctrl_reg, pp_ctl);
5170 pp_div = I915_READ(pp_div_reg);
5171 }
67a54566
DV
5172
5173 /* Pull timing values out of registers */
5174 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5175 PANEL_POWER_UP_DELAY_SHIFT;
5176
5177 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5178 PANEL_LIGHT_ON_DELAY_SHIFT;
5179
5180 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5181 PANEL_LIGHT_OFF_DELAY_SHIFT;
5182
5183 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5184 PANEL_POWER_DOWN_DELAY_SHIFT;
5185
b0a08bec
VK
5186 if (IS_BROXTON(dev)) {
5187 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5188 BXT_POWER_CYCLE_DELAY_SHIFT;
5189 if (tmp > 0)
5190 cur.t11_t12 = (tmp - 1) * 1000;
5191 else
5192 cur.t11_t12 = 0;
5193 } else {
5194 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
67a54566 5195 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
b0a08bec 5196 }
67a54566
DV
5197
5198 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5199 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5200
41aa3448 5201 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5202
5203 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5204 * our hw here, which are all in 100usec. */
5205 spec.t1_t3 = 210 * 10;
5206 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5207 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5208 spec.t10 = 500 * 10;
5209 /* This one is special and actually in units of 100ms, but zero
5210 * based in the hw (so we need to add 100 ms). But the sw vbt
5211 * table multiplies it with 1000 to make it in units of 100usec,
5212 * too. */
5213 spec.t11_t12 = (510 + 100) * 10;
5214
5215 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5216 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5217
5218 /* Use the max of the register settings and vbt. If both are
5219 * unset, fall back to the spec limits. */
36b5f425 5220#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5221 spec.field : \
5222 max(cur.field, vbt.field))
5223 assign_final(t1_t3);
5224 assign_final(t8);
5225 assign_final(t9);
5226 assign_final(t10);
5227 assign_final(t11_t12);
5228#undef assign_final
5229
36b5f425 5230#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5231 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5232 intel_dp->backlight_on_delay = get_delay(t8);
5233 intel_dp->backlight_off_delay = get_delay(t9);
5234 intel_dp->panel_power_down_delay = get_delay(t10);
5235 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5236#undef get_delay
5237
f30d26e4
JN
5238 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5239 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5240 intel_dp->panel_power_cycle_delay);
5241
5242 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5243 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5244}
5245
5246static void
5247intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5248 struct intel_dp *intel_dp)
f30d26e4
JN
5249{
5250 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5251 u32 pp_on, pp_off, pp_div, port_sel = 0;
5252 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
b0a08bec 5253 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
ad933b56 5254 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5255 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5256
e39b999a 5257 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 5258
b0a08bec
VK
5259 if (IS_BROXTON(dev)) {
5260 /*
5261 * TODO: BXT has 2 sets of PPS registers.
5262 * Correct Register for Broxton need to be identified
5263 * using VBT. hardcoding for now
5264 */
5265 pp_ctrl_reg = BXT_PP_CONTROL(0);
5266 pp_on_reg = BXT_PP_ON_DELAYS(0);
5267 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5268
5269 } else if (HAS_PCH_SPLIT(dev)) {
453c5420
JB
5270 pp_on_reg = PCH_PP_ON_DELAYS;
5271 pp_off_reg = PCH_PP_OFF_DELAYS;
5272 pp_div_reg = PCH_PP_DIVISOR;
5273 } else {
bf13e81b
JN
5274 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5275
5276 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5277 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5278 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5279 }
5280
b2f19d1a
PZ
5281 /*
5282 * And finally store the new values in the power sequencer. The
5283 * backlight delays are set to 1 because we do manual waits on them. For
5284 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5285 * we'll end up waiting for the backlight off delay twice: once when we
5286 * do the manual sleep, and once when we disable the panel and wait for
5287 * the PP_STATUS bit to become zero.
5288 */
f30d26e4 5289 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5290 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5291 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5292 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5293 /* Compute the divisor for the pp clock, simply match the Bspec
5294 * formula. */
b0a08bec
VK
5295 if (IS_BROXTON(dev)) {
5296 pp_div = I915_READ(pp_ctrl_reg);
5297 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5298 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5299 << BXT_POWER_CYCLE_DELAY_SHIFT);
5300 } else {
5301 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5302 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5303 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5304 }
67a54566
DV
5305
5306 /* Haswell doesn't have any port selection bits for the panel
5307 * power sequencer any more. */
bc7d38a4 5308 if (IS_VALLEYVIEW(dev)) {
ad933b56 5309 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5310 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5311 if (port == PORT_A)
a24c144c 5312 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5313 else
a24c144c 5314 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5315 }
5316
453c5420
JB
5317 pp_on |= port_sel;
5318
5319 I915_WRITE(pp_on_reg, pp_on);
5320 I915_WRITE(pp_off_reg, pp_off);
b0a08bec
VK
5321 if (IS_BROXTON(dev))
5322 I915_WRITE(pp_ctrl_reg, pp_div);
5323 else
5324 I915_WRITE(pp_div_reg, pp_div);
67a54566 5325
67a54566 5326 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5327 I915_READ(pp_on_reg),
5328 I915_READ(pp_off_reg),
b0a08bec
VK
5329 IS_BROXTON(dev) ?
5330 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
453c5420 5331 I915_READ(pp_div_reg));
f684960e
CW
5332}
5333
b33a2815
VK
5334/**
5335 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5336 * @dev: DRM device
5337 * @refresh_rate: RR to be programmed
5338 *
5339 * This function gets called when refresh rate (RR) has to be changed from
5340 * one frequency to another. Switches can be between high and low RR
5341 * supported by the panel or to any other RR based on media playback (in
5342 * this case, RR value needs to be passed from user space).
5343 *
5344 * The caller of this function needs to take a lock on dev_priv->drrs.
5345 */
96178eeb 5346static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5347{
5348 struct drm_i915_private *dev_priv = dev->dev_private;
5349 struct intel_encoder *encoder;
96178eeb
VK
5350 struct intel_digital_port *dig_port = NULL;
5351 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5352 struct intel_crtc_state *config = NULL;
439d7ac0 5353 struct intel_crtc *intel_crtc = NULL;
439d7ac0 5354 u32 reg, val;
96178eeb 5355 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5356
5357 if (refresh_rate <= 0) {
5358 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5359 return;
5360 }
5361
96178eeb
VK
5362 if (intel_dp == NULL) {
5363 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5364 return;
5365 }
5366
1fcc9d1c 5367 /*
e4d59f6b
RV
5368 * FIXME: This needs proper synchronization with psr state for some
5369 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5370 */
439d7ac0 5371
96178eeb
VK
5372 dig_port = dp_to_dig_port(intel_dp);
5373 encoder = &dig_port->base;
723f9aab 5374 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5375
5376 if (!intel_crtc) {
5377 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5378 return;
5379 }
5380
6e3c9717 5381 config = intel_crtc->config;
439d7ac0 5382
96178eeb 5383 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5384 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5385 return;
5386 }
5387
96178eeb
VK
5388 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5389 refresh_rate)
439d7ac0
PB
5390 index = DRRS_LOW_RR;
5391
96178eeb 5392 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5393 DRM_DEBUG_KMS(
5394 "DRRS requested for previously set RR...ignoring\n");
5395 return;
5396 }
5397
5398 if (!intel_crtc->active) {
5399 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5400 return;
5401 }
5402
44395bfe 5403 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5404 switch (index) {
5405 case DRRS_HIGH_RR:
5406 intel_dp_set_m_n(intel_crtc, M1_N1);
5407 break;
5408 case DRRS_LOW_RR:
5409 intel_dp_set_m_n(intel_crtc, M2_N2);
5410 break;
5411 case DRRS_MAX_RR:
5412 default:
5413 DRM_ERROR("Unsupported refreshrate type\n");
5414 }
5415 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 5416 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 5417 val = I915_READ(reg);
a4c30b1d 5418
439d7ac0 5419 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5420 if (IS_VALLEYVIEW(dev))
5421 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5422 else
5423 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5424 } else {
6fa7aec1
VK
5425 if (IS_VALLEYVIEW(dev))
5426 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5427 else
5428 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5429 }
5430 I915_WRITE(reg, val);
5431 }
5432
4e9ac947
VK
5433 dev_priv->drrs.refresh_rate_type = index;
5434
5435 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5436}
5437
b33a2815
VK
5438/**
5439 * intel_edp_drrs_enable - init drrs struct if supported
5440 * @intel_dp: DP struct
5441 *
5442 * Initializes frontbuffer_bits and drrs.dp
5443 */
c395578e
VK
5444void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5445{
5446 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5447 struct drm_i915_private *dev_priv = dev->dev_private;
5448 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5449 struct drm_crtc *crtc = dig_port->base.base.crtc;
5450 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5451
5452 if (!intel_crtc->config->has_drrs) {
5453 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5454 return;
5455 }
5456
5457 mutex_lock(&dev_priv->drrs.mutex);
5458 if (WARN_ON(dev_priv->drrs.dp)) {
5459 DRM_ERROR("DRRS already enabled\n");
5460 goto unlock;
5461 }
5462
5463 dev_priv->drrs.busy_frontbuffer_bits = 0;
5464
5465 dev_priv->drrs.dp = intel_dp;
5466
5467unlock:
5468 mutex_unlock(&dev_priv->drrs.mutex);
5469}
5470
b33a2815
VK
5471/**
5472 * intel_edp_drrs_disable - Disable DRRS
5473 * @intel_dp: DP struct
5474 *
5475 */
c395578e
VK
5476void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5477{
5478 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5479 struct drm_i915_private *dev_priv = dev->dev_private;
5480 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5481 struct drm_crtc *crtc = dig_port->base.base.crtc;
5482 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5483
5484 if (!intel_crtc->config->has_drrs)
5485 return;
5486
5487 mutex_lock(&dev_priv->drrs.mutex);
5488 if (!dev_priv->drrs.dp) {
5489 mutex_unlock(&dev_priv->drrs.mutex);
5490 return;
5491 }
5492
5493 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5494 intel_dp_set_drrs_state(dev_priv->dev,
5495 intel_dp->attached_connector->panel.
5496 fixed_mode->vrefresh);
5497
5498 dev_priv->drrs.dp = NULL;
5499 mutex_unlock(&dev_priv->drrs.mutex);
5500
5501 cancel_delayed_work_sync(&dev_priv->drrs.work);
5502}
5503
4e9ac947
VK
5504static void intel_edp_drrs_downclock_work(struct work_struct *work)
5505{
5506 struct drm_i915_private *dev_priv =
5507 container_of(work, typeof(*dev_priv), drrs.work.work);
5508 struct intel_dp *intel_dp;
5509
5510 mutex_lock(&dev_priv->drrs.mutex);
5511
5512 intel_dp = dev_priv->drrs.dp;
5513
5514 if (!intel_dp)
5515 goto unlock;
5516
439d7ac0 5517 /*
4e9ac947
VK
5518 * The delayed work can race with an invalidate hence we need to
5519 * recheck.
439d7ac0
PB
5520 */
5521
4e9ac947
VK
5522 if (dev_priv->drrs.busy_frontbuffer_bits)
5523 goto unlock;
439d7ac0 5524
4e9ac947
VK
5525 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5526 intel_dp_set_drrs_state(dev_priv->dev,
5527 intel_dp->attached_connector->panel.
5528 downclock_mode->vrefresh);
439d7ac0 5529
4e9ac947 5530unlock:
4e9ac947 5531 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5532}
5533
b33a2815 5534/**
0ddfd203 5535 * intel_edp_drrs_invalidate - Disable Idleness DRRS
b33a2815
VK
5536 * @dev: DRM device
5537 * @frontbuffer_bits: frontbuffer plane tracking bits
5538 *
0ddfd203
R
5539 * This function gets called everytime rendering on the given planes start.
5540 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
5541 *
5542 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5543 */
a93fad0f
VK
5544void intel_edp_drrs_invalidate(struct drm_device *dev,
5545 unsigned frontbuffer_bits)
5546{
5547 struct drm_i915_private *dev_priv = dev->dev_private;
5548 struct drm_crtc *crtc;
5549 enum pipe pipe;
5550
9da7d693 5551 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5552 return;
5553
88f933a8 5554 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5555
a93fad0f 5556 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5557 if (!dev_priv->drrs.dp) {
5558 mutex_unlock(&dev_priv->drrs.mutex);
5559 return;
5560 }
5561
a93fad0f
VK
5562 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5563 pipe = to_intel_crtc(crtc)->pipe;
5564
c1d038c6
DV
5565 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5566 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5567
0ddfd203 5568 /* invalidate means busy screen hence upclock */
c1d038c6 5569 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
a93fad0f
VK
5570 intel_dp_set_drrs_state(dev_priv->dev,
5571 dev_priv->drrs.dp->attached_connector->panel.
5572 fixed_mode->vrefresh);
a93fad0f 5573
a93fad0f
VK
5574 mutex_unlock(&dev_priv->drrs.mutex);
5575}
5576
b33a2815 5577/**
0ddfd203 5578 * intel_edp_drrs_flush - Restart Idleness DRRS
b33a2815
VK
5579 * @dev: DRM device
5580 * @frontbuffer_bits: frontbuffer plane tracking bits
5581 *
0ddfd203
R
5582 * This function gets called every time rendering on the given planes has
5583 * completed or flip on a crtc is completed. So DRRS should be upclocked
5584 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5585 * if no other planes are dirty.
b33a2815
VK
5586 *
5587 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5588 */
a93fad0f
VK
5589void intel_edp_drrs_flush(struct drm_device *dev,
5590 unsigned frontbuffer_bits)
5591{
5592 struct drm_i915_private *dev_priv = dev->dev_private;
5593 struct drm_crtc *crtc;
5594 enum pipe pipe;
5595
9da7d693 5596 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5597 return;
5598
88f933a8 5599 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5600
a93fad0f 5601 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5602 if (!dev_priv->drrs.dp) {
5603 mutex_unlock(&dev_priv->drrs.mutex);
5604 return;
5605 }
5606
a93fad0f
VK
5607 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5608 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
5609
5610 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
5611 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5612
0ddfd203 5613 /* flush means busy screen hence upclock */
c1d038c6 5614 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
0ddfd203
R
5615 intel_dp_set_drrs_state(dev_priv->dev,
5616 dev_priv->drrs.dp->attached_connector->panel.
5617 fixed_mode->vrefresh);
5618
5619 /*
5620 * flush also means no more activity hence schedule downclock, if all
5621 * other fbs are quiescent too
5622 */
5623 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
5624 schedule_delayed_work(&dev_priv->drrs.work,
5625 msecs_to_jiffies(1000));
5626 mutex_unlock(&dev_priv->drrs.mutex);
5627}
5628
b33a2815
VK
5629/**
5630 * DOC: Display Refresh Rate Switching (DRRS)
5631 *
5632 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5633 * which enables swtching between low and high refresh rates,
5634 * dynamically, based on the usage scenario. This feature is applicable
5635 * for internal panels.
5636 *
5637 * Indication that the panel supports DRRS is given by the panel EDID, which
5638 * would list multiple refresh rates for one resolution.
5639 *
5640 * DRRS is of 2 types - static and seamless.
5641 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5642 * (may appear as a blink on screen) and is used in dock-undock scenario.
5643 * Seamless DRRS involves changing RR without any visual effect to the user
5644 * and can be used during normal system usage. This is done by programming
5645 * certain registers.
5646 *
5647 * Support for static/seamless DRRS may be indicated in the VBT based on
5648 * inputs from the panel spec.
5649 *
5650 * DRRS saves power by switching to low RR based on usage scenarios.
5651 *
5652 * eDP DRRS:-
5653 * The implementation is based on frontbuffer tracking implementation.
5654 * When there is a disturbance on the screen triggered by user activity or a
5655 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5656 * When there is no movement on screen, after a timeout of 1 second, a switch
5657 * to low RR is made.
5658 * For integration with frontbuffer tracking code,
5659 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5660 *
5661 * DRRS can be further extended to support other internal panels and also
5662 * the scenario of video playback wherein RR is set based on the rate
5663 * requested by userspace.
5664 */
5665
5666/**
5667 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5668 * @intel_connector: eDP connector
5669 * @fixed_mode: preferred mode of panel
5670 *
5671 * This function is called only once at driver load to initialize basic
5672 * DRRS stuff.
5673 *
5674 * Returns:
5675 * Downclock mode if panel supports it, else return NULL.
5676 * DRRS support is determined by the presence of downclock mode (apart
5677 * from VBT setting).
5678 */
4f9db5b5 5679static struct drm_display_mode *
96178eeb
VK
5680intel_dp_drrs_init(struct intel_connector *intel_connector,
5681 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5682{
5683 struct drm_connector *connector = &intel_connector->base;
96178eeb 5684 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5685 struct drm_i915_private *dev_priv = dev->dev_private;
5686 struct drm_display_mode *downclock_mode = NULL;
5687
9da7d693
DV
5688 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5689 mutex_init(&dev_priv->drrs.mutex);
5690
4f9db5b5
PB
5691 if (INTEL_INFO(dev)->gen <= 6) {
5692 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5693 return NULL;
5694 }
5695
5696 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5697 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5698 return NULL;
5699 }
5700
5701 downclock_mode = intel_find_panel_downclock
5702 (dev, fixed_mode, connector);
5703
5704 if (!downclock_mode) {
a1d26342 5705 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5706 return NULL;
5707 }
5708
96178eeb 5709 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5710
96178eeb 5711 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5712 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5713 return downclock_mode;
5714}
5715
ed92f0b2 5716static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5717 struct intel_connector *intel_connector)
ed92f0b2
PZ
5718{
5719 struct drm_connector *connector = &intel_connector->base;
5720 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5721 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5722 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5723 struct drm_i915_private *dev_priv = dev->dev_private;
5724 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5725 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5726 bool has_dpcd;
5727 struct drm_display_mode *scan;
5728 struct edid *edid;
6517d273 5729 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5730
5731 if (!is_edp(intel_dp))
5732 return true;
5733
49e6bc51
VS
5734 pps_lock(intel_dp);
5735 intel_edp_panel_vdd_sanitize(intel_dp);
5736 pps_unlock(intel_dp);
63635217 5737
ed92f0b2 5738 /* Cache DPCD and EDID for edp. */
ed92f0b2 5739 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5740
5741 if (has_dpcd) {
5742 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5743 dev_priv->no_aux_handshake =
5744 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5745 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5746 } else {
5747 /* if this fails, presume the device is a ghost */
5748 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5749 return false;
5750 }
5751
5752 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5753 pps_lock(intel_dp);
36b5f425 5754 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5755 pps_unlock(intel_dp);
ed92f0b2 5756
060c8778 5757 mutex_lock(&dev->mode_config.mutex);
0b99836f 5758 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5759 if (edid) {
5760 if (drm_add_edid_modes(connector, edid)) {
5761 drm_mode_connector_update_edid_property(connector,
5762 edid);
5763 drm_edid_to_eld(connector, edid);
5764 } else {
5765 kfree(edid);
5766 edid = ERR_PTR(-EINVAL);
5767 }
5768 } else {
5769 edid = ERR_PTR(-ENOENT);
5770 }
5771 intel_connector->edid = edid;
5772
5773 /* prefer fixed mode from EDID if available */
5774 list_for_each_entry(scan, &connector->probed_modes, head) {
5775 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5776 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5777 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5778 intel_connector, fixed_mode);
ed92f0b2
PZ
5779 break;
5780 }
5781 }
5782
5783 /* fallback to VBT if available for eDP */
5784 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5785 fixed_mode = drm_mode_duplicate(dev,
5786 dev_priv->vbt.lfp_lvds_vbt_mode);
5787 if (fixed_mode)
5788 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5789 }
060c8778 5790 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5791
01527b31
CT
5792 if (IS_VALLEYVIEW(dev)) {
5793 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5794 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5795
5796 /*
5797 * Figure out the current pipe for the initial backlight setup.
5798 * If the current pipe isn't valid, try the PPS pipe, and if that
5799 * fails just assume pipe A.
5800 */
5801 if (IS_CHERRYVIEW(dev))
5802 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5803 else
5804 pipe = PORT_TO_PIPE(intel_dp->DP);
5805
5806 if (pipe != PIPE_A && pipe != PIPE_B)
5807 pipe = intel_dp->pps_pipe;
5808
5809 if (pipe != PIPE_A && pipe != PIPE_B)
5810 pipe = PIPE_A;
5811
5812 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5813 pipe_name(pipe));
01527b31
CT
5814 }
5815
4f9db5b5 5816 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5817 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5818 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5819
5820 return true;
5821}
5822
16c25533 5823bool
f0fec3f2
PZ
5824intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5825 struct intel_connector *intel_connector)
a4fc5ed6 5826{
f0fec3f2
PZ
5827 struct drm_connector *connector = &intel_connector->base;
5828 struct intel_dp *intel_dp = &intel_dig_port->dp;
5829 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5830 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5831 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5832 enum port port = intel_dig_port->port;
0b99836f 5833 int type;
a4fc5ed6 5834
a4a5d2f8
VS
5835 intel_dp->pps_pipe = INVALID_PIPE;
5836
ec5b01dd 5837 /* intel_dp vfuncs */
b6b5e383
DL
5838 if (INTEL_INFO(dev)->gen >= 9)
5839 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5840 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5841 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5842 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5843 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5844 else if (HAS_PCH_SPLIT(dev))
5845 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5846 else
5847 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5848
b9ca5fad
DL
5849 if (INTEL_INFO(dev)->gen >= 9)
5850 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5851 else
5852 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5853
0767935e
DV
5854 /* Preserve the current hw state. */
5855 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5856 intel_dp->attached_connector = intel_connector;
3d3dc149 5857
3b32a35b 5858 if (intel_dp_is_edp(dev, port))
b329530c 5859 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5860 else
5861 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5862
f7d24902
ID
5863 /*
5864 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5865 * for DP the encoder type can be set by the caller to
5866 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5867 */
5868 if (type == DRM_MODE_CONNECTOR_eDP)
5869 intel_encoder->type = INTEL_OUTPUT_EDP;
5870
c17ed5b5
VS
5871 /* eDP only on port B and/or C on vlv/chv */
5872 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5873 port != PORT_B && port != PORT_C))
5874 return false;
5875
e7281eab
ID
5876 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5877 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5878 port_name(port));
5879
b329530c 5880 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5881 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5882
a4fc5ed6
KP
5883 connector->interlace_allowed = true;
5884 connector->doublescan_allowed = 0;
5885
f0fec3f2 5886 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5887 edp_panel_vdd_work);
a4fc5ed6 5888
df0e9248 5889 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5890 drm_connector_register(connector);
a4fc5ed6 5891
affa9354 5892 if (HAS_DDI(dev))
bcbc889b
PZ
5893 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5894 else
5895 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5896 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5897
0b99836f 5898 /* Set up the hotplug pin. */
ab9d7c30
PZ
5899 switch (port) {
5900 case PORT_A:
1d843f9d 5901 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5902 break;
5903 case PORT_B:
1d843f9d 5904 intel_encoder->hpd_pin = HPD_PORT_B;
cf1d5883
SJ
5905 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
5906 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5907 break;
5908 case PORT_C:
1d843f9d 5909 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5910 break;
5911 case PORT_D:
1d843f9d 5912 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5913 break;
5914 default:
ad1c0b19 5915 BUG();
5eb08b69
ZW
5916 }
5917
dada1a9f 5918 if (is_edp(intel_dp)) {
773538e8 5919 pps_lock(intel_dp);
1e74a324
VS
5920 intel_dp_init_panel_power_timestamps(intel_dp);
5921 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5922 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5923 else
36b5f425 5924 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5925 pps_unlock(intel_dp);
dada1a9f 5926 }
0095e6dc 5927
9d1a1031 5928 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5929
0e32b39c 5930 /* init MST on ports that can support it */
0c9b3715
JN
5931 if (HAS_DP_MST(dev) &&
5932 (port == PORT_B || port == PORT_C || port == PORT_D))
5933 intel_dp_mst_encoder_init(intel_dig_port,
5934 intel_connector->base.base.id);
0e32b39c 5935
36b5f425 5936 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5937 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5938 if (is_edp(intel_dp)) {
5939 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5940 /*
5941 * vdd might still be enabled do to the delayed vdd off.
5942 * Make sure vdd is actually turned off here.
5943 */
773538e8 5944 pps_lock(intel_dp);
4be73780 5945 edp_panel_vdd_off_sync(intel_dp);
773538e8 5946 pps_unlock(intel_dp);
15b1d171 5947 }
34ea3d38 5948 drm_connector_unregister(connector);
b2f246a8 5949 drm_connector_cleanup(connector);
16c25533 5950 return false;
b2f246a8 5951 }
32f9d658 5952
f684960e
CW
5953 intel_dp_add_properties(intel_dp, connector);
5954
a4fc5ed6
KP
5955 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5956 * 0xd. Failure to do so will result in spurious interrupts being
5957 * generated on the port when a cable is not attached.
5958 */
5959 if (IS_G4X(dev) && !IS_GM45(dev)) {
5960 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5961 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5962 }
16c25533 5963
aa7471d2
JN
5964 i915_debugfs_connector_add(connector);
5965
16c25533 5966 return true;
a4fc5ed6 5967}
f0fec3f2
PZ
5968
5969void
5970intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5971{
13cf5504 5972 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5973 struct intel_digital_port *intel_dig_port;
5974 struct intel_encoder *intel_encoder;
5975 struct drm_encoder *encoder;
5976 struct intel_connector *intel_connector;
5977
b14c5679 5978 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5979 if (!intel_dig_port)
5980 return;
5981
08d9bc92 5982 intel_connector = intel_connector_alloc();
f0fec3f2
PZ
5983 if (!intel_connector) {
5984 kfree(intel_dig_port);
5985 return;
5986 }
5987
5988 intel_encoder = &intel_dig_port->base;
5989 encoder = &intel_encoder->base;
5990
5991 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5992 DRM_MODE_ENCODER_TMDS);
5993
5bfe2ac0 5994 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5995 intel_encoder->disable = intel_disable_dp;
00c09d70 5996 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5997 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5998 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5999 if (IS_CHERRYVIEW(dev)) {
9197c88b 6000 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
6001 intel_encoder->pre_enable = chv_pre_enable_dp;
6002 intel_encoder->enable = vlv_enable_dp;
580d3811 6003 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 6004 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 6005 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
6006 intel_encoder->pre_enable = vlv_pre_enable_dp;
6007 intel_encoder->enable = vlv_enable_dp;
49277c31 6008 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 6009 } else {
ecff4f3b
JN
6010 intel_encoder->pre_enable = g4x_pre_enable_dp;
6011 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
6012 if (INTEL_INFO(dev)->gen >= 5)
6013 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 6014 }
f0fec3f2 6015
174edf1f 6016 intel_dig_port->port = port;
f0fec3f2
PZ
6017 intel_dig_port->dp.output_reg = output_reg;
6018
00c09d70 6019 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
6020 if (IS_CHERRYVIEW(dev)) {
6021 if (port == PORT_D)
6022 intel_encoder->crtc_mask = 1 << 2;
6023 else
6024 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6025 } else {
6026 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6027 }
bc079e8b 6028 intel_encoder->cloneable = 0;
f0fec3f2 6029
13cf5504 6030 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5fcece80 6031 dev_priv->hotplug.irq_port[port] = intel_dig_port;
13cf5504 6032
15b1d171
PZ
6033 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
6034 drm_encoder_cleanup(encoder);
6035 kfree(intel_dig_port);
b2f246a8 6036 kfree(intel_connector);
15b1d171 6037 }
f0fec3f2 6038}
0e32b39c
DA
6039
6040void intel_dp_mst_suspend(struct drm_device *dev)
6041{
6042 struct drm_i915_private *dev_priv = dev->dev_private;
6043 int i;
6044
6045 /* disable MST */
6046 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6047 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6048 if (!intel_dig_port)
6049 continue;
6050
6051 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6052 if (!intel_dig_port->dp.can_mst)
6053 continue;
6054 if (intel_dig_port->dp.is_mst)
6055 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6056 }
6057 }
6058}
6059
6060void intel_dp_mst_resume(struct drm_device *dev)
6061{
6062 struct drm_i915_private *dev_priv = dev->dev_private;
6063 int i;
6064
6065 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6066 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6067 if (!intel_dig_port)
6068 continue;
6069 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6070 int ret;
6071
6072 if (!intel_dig_port->dp.can_mst)
6073 continue;
6074
6075 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6076 if (ret != 0) {
6077 intel_dp_check_mst_status(&intel_dig_port->dp);
6078 }
6079 }
6080 }
6081}
This page took 1.274192 seconds and 5 git commands to generate.