drm/i915: Don't copy the DP source rates arrays
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
9dd4ffdf
CML
44struct dp_link_dpll {
45 int link_bw;
46 struct dpll dpll;
47};
48
49static const struct dp_link_dpll gen4_dpll[] = {
50 { DP_LINK_BW_1_62,
51 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52 { DP_LINK_BW_2_7,
53 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54};
55
56static const struct dp_link_dpll pch_dpll[] = {
57 { DP_LINK_BW_1_62,
58 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59 { DP_LINK_BW_2_7,
60 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61};
62
65ce4bf5
CML
63static const struct dp_link_dpll vlv_dpll[] = {
64 { DP_LINK_BW_1_62,
58f6e632 65 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
66 { DP_LINK_BW_2_7,
67 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68};
69
ef9348c8
CML
70/*
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
73 */
74static const struct dp_link_dpll chv_dpll[] = {
75 /*
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
79 */
80 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
83 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86};
a8f3ef61 87/* Skylake supports following rates */
f4896f15
VS
88static const int gen9_rates[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
90static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 91
cfcb0fc9
JB
92/**
93 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
94 * @intel_dp: DP struct
95 *
96 * If a CPU or PCH DP output is attached to an eDP panel, this function
97 * will return true, and false otherwise.
98 */
99static bool is_edp(struct intel_dp *intel_dp)
100{
da63a9f2
PZ
101 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
102
103 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
104}
105
68b4d824 106static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 107{
68b4d824
ID
108 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109
110 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
111}
112
df0e9248
CW
113static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
114{
fa90ecef 115 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
116}
117
ea5b213a 118static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 119static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 120static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 121static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
122static void vlv_steal_power_sequencer(struct drm_device *dev,
123 enum pipe pipe);
a4fc5ed6 124
0e32b39c 125int
ea5b213a 126intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 127{
7183dc29 128 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
06ea66b6 129 struct drm_device *dev = intel_dp->attached_connector->base.dev;
a4fc5ed6
KP
130
131 switch (max_link_bw) {
132 case DP_LINK_BW_1_62:
133 case DP_LINK_BW_2_7:
134 break;
d4eead50 135 case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */
8749be86
DL
136 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
137 /* WaDisableHBR2:skl */
138 max_link_bw = DP_LINK_BW_2_7;
139 else if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) ||
9bbfd20a 140 INTEL_INFO(dev)->gen >= 8) &&
06ea66b6
TP
141 intel_dp->dpcd[DP_DPCD_REV] >= 0x12)
142 max_link_bw = DP_LINK_BW_5_4;
143 else
144 max_link_bw = DP_LINK_BW_2_7;
d4eead50 145 break;
a4fc5ed6 146 default:
d4eead50
ID
147 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
148 max_link_bw);
a4fc5ed6
KP
149 max_link_bw = DP_LINK_BW_1_62;
150 break;
151 }
152 return max_link_bw;
153}
154
eeb6324d
PZ
155static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
156{
157 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
158 struct drm_device *dev = intel_dig_port->base.base.dev;
159 u8 source_max, sink_max;
160
161 source_max = 4;
162 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
163 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
164 source_max = 2;
165
166 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
167
168 return min(source_max, sink_max);
169}
170
cd9dde44
AJ
171/*
172 * The units on the numbers in the next two are... bizarre. Examples will
173 * make it clearer; this one parallels an example in the eDP spec.
174 *
175 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
176 *
177 * 270000 * 1 * 8 / 10 == 216000
178 *
179 * The actual data capacity of that configuration is 2.16Gbit/s, so the
180 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
181 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
182 * 119000. At 18bpp that's 2142000 kilobits per second.
183 *
184 * Thus the strange-looking division by 10 in intel_dp_link_required, to
185 * get the result in decakilobits instead of kilobits.
186 */
187
a4fc5ed6 188static int
c898261c 189intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 190{
cd9dde44 191 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
192}
193
fe27d53e
DA
194static int
195intel_dp_max_data_rate(int max_link_clock, int max_lanes)
196{
197 return (max_link_clock * max_lanes * 8) / 10;
198}
199
c19de8eb 200static enum drm_mode_status
a4fc5ed6
KP
201intel_dp_mode_valid(struct drm_connector *connector,
202 struct drm_display_mode *mode)
203{
df0e9248 204 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
205 struct intel_connector *intel_connector = to_intel_connector(connector);
206 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
207 int target_clock = mode->clock;
208 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 209
dd06f90e
JN
210 if (is_edp(intel_dp) && fixed_mode) {
211 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
212 return MODE_PANEL;
213
dd06f90e 214 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 215 return MODE_PANEL;
03afc4a2
DV
216
217 target_clock = fixed_mode->clock;
7de56f43
ZY
218 }
219
36008365 220 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
eeb6324d 221 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
222
223 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
224 mode_rate = intel_dp_link_required(target_clock, 18);
225
226 if (mode_rate > max_rate)
c4867936 227 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
228
229 if (mode->clock < 10000)
230 return MODE_CLOCK_LOW;
231
0af78a2b
DV
232 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
233 return MODE_H_ILLEGAL;
234
a4fc5ed6
KP
235 return MODE_OK;
236}
237
a4f1289e 238uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
239{
240 int i;
241 uint32_t v = 0;
242
243 if (src_bytes > 4)
244 src_bytes = 4;
245 for (i = 0; i < src_bytes; i++)
246 v |= ((uint32_t) src[i]) << ((3-i) * 8);
247 return v;
248}
249
c2af70e2 250static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
251{
252 int i;
253 if (dst_bytes > 4)
254 dst_bytes = 4;
255 for (i = 0; i < dst_bytes; i++)
256 dst[i] = src >> ((3-i) * 8);
257}
258
fb0f8fbf
KP
259/* hrawclock is 1/4 the FSB frequency */
260static int
261intel_hrawclk(struct drm_device *dev)
262{
263 struct drm_i915_private *dev_priv = dev->dev_private;
264 uint32_t clkcfg;
265
9473c8f4
VP
266 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
267 if (IS_VALLEYVIEW(dev))
268 return 200;
269
fb0f8fbf
KP
270 clkcfg = I915_READ(CLKCFG);
271 switch (clkcfg & CLKCFG_FSB_MASK) {
272 case CLKCFG_FSB_400:
273 return 100;
274 case CLKCFG_FSB_533:
275 return 133;
276 case CLKCFG_FSB_667:
277 return 166;
278 case CLKCFG_FSB_800:
279 return 200;
280 case CLKCFG_FSB_1067:
281 return 266;
282 case CLKCFG_FSB_1333:
283 return 333;
284 /* these two are just a guess; one of them might be right */
285 case CLKCFG_FSB_1600:
286 case CLKCFG_FSB_1600_ALT:
287 return 400;
288 default:
289 return 133;
290 }
291}
292
bf13e81b
JN
293static void
294intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 295 struct intel_dp *intel_dp);
bf13e81b
JN
296static void
297intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 298 struct intel_dp *intel_dp);
bf13e81b 299
773538e8
VS
300static void pps_lock(struct intel_dp *intel_dp)
301{
302 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
303 struct intel_encoder *encoder = &intel_dig_port->base;
304 struct drm_device *dev = encoder->base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum intel_display_power_domain power_domain;
307
308 /*
309 * See vlv_power_sequencer_reset() why we need
310 * a power domain reference here.
311 */
312 power_domain = intel_display_port_power_domain(encoder);
313 intel_display_power_get(dev_priv, power_domain);
314
315 mutex_lock(&dev_priv->pps_mutex);
316}
317
318static void pps_unlock(struct intel_dp *intel_dp)
319{
320 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
321 struct intel_encoder *encoder = &intel_dig_port->base;
322 struct drm_device *dev = encoder->base.dev;
323 struct drm_i915_private *dev_priv = dev->dev_private;
324 enum intel_display_power_domain power_domain;
325
326 mutex_unlock(&dev_priv->pps_mutex);
327
328 power_domain = intel_display_port_power_domain(encoder);
329 intel_display_power_put(dev_priv, power_domain);
330}
331
961a0db0
VS
332static void
333vlv_power_sequencer_kick(struct intel_dp *intel_dp)
334{
335 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
336 struct drm_device *dev = intel_dig_port->base.base.dev;
337 struct drm_i915_private *dev_priv = dev->dev_private;
338 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 339 bool pll_enabled;
961a0db0
VS
340 uint32_t DP;
341
342 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
343 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
344 pipe_name(pipe), port_name(intel_dig_port->port)))
345 return;
346
347 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
348 pipe_name(pipe), port_name(intel_dig_port->port));
349
350 /* Preserve the BIOS-computed detected bit. This is
351 * supposed to be read-only.
352 */
353 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
354 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
355 DP |= DP_PORT_WIDTH(1);
356 DP |= DP_LINK_TRAIN_PAT_1;
357
358 if (IS_CHERRYVIEW(dev))
359 DP |= DP_PIPE_SELECT_CHV(pipe);
360 else if (pipe == PIPE_B)
361 DP |= DP_PIPEB_SELECT;
362
d288f65f
VS
363 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
364
365 /*
366 * The DPLL for the pipe must be enabled for this to work.
367 * So enable temporarily it if it's not already enabled.
368 */
369 if (!pll_enabled)
370 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
371 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
372
961a0db0
VS
373 /*
374 * Similar magic as in intel_dp_enable_port().
375 * We _must_ do this port enable + disable trick
376 * to make this power seqeuencer lock onto the port.
377 * Otherwise even VDD force bit won't work.
378 */
379 I915_WRITE(intel_dp->output_reg, DP);
380 POSTING_READ(intel_dp->output_reg);
381
382 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
383 POSTING_READ(intel_dp->output_reg);
384
385 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
386 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
387
388 if (!pll_enabled)
389 vlv_force_pll_off(dev, pipe);
961a0db0
VS
390}
391
bf13e81b
JN
392static enum pipe
393vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
394{
395 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
396 struct drm_device *dev = intel_dig_port->base.base.dev;
397 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
398 struct intel_encoder *encoder;
399 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 400 enum pipe pipe;
bf13e81b 401
e39b999a 402 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 403
a8c3344e
VS
404 /* We should never land here with regular DP ports */
405 WARN_ON(!is_edp(intel_dp));
406
a4a5d2f8
VS
407 if (intel_dp->pps_pipe != INVALID_PIPE)
408 return intel_dp->pps_pipe;
409
410 /*
411 * We don't have power sequencer currently.
412 * Pick one that's not used by other ports.
413 */
414 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
415 base.head) {
416 struct intel_dp *tmp;
417
418 if (encoder->type != INTEL_OUTPUT_EDP)
419 continue;
420
421 tmp = enc_to_intel_dp(&encoder->base);
422
423 if (tmp->pps_pipe != INVALID_PIPE)
424 pipes &= ~(1 << tmp->pps_pipe);
425 }
426
427 /*
428 * Didn't find one. This should not happen since there
429 * are two power sequencers and up to two eDP ports.
430 */
431 if (WARN_ON(pipes == 0))
a8c3344e
VS
432 pipe = PIPE_A;
433 else
434 pipe = ffs(pipes) - 1;
a4a5d2f8 435
a8c3344e
VS
436 vlv_steal_power_sequencer(dev, pipe);
437 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
438
439 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
440 pipe_name(intel_dp->pps_pipe),
441 port_name(intel_dig_port->port));
442
443 /* init power sequencer on this pipe and port */
36b5f425
VS
444 intel_dp_init_panel_power_sequencer(dev, intel_dp);
445 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 446
961a0db0
VS
447 /*
448 * Even vdd force doesn't work until we've made
449 * the power sequencer lock in on the port.
450 */
451 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
452
453 return intel_dp->pps_pipe;
454}
455
6491ab27
VS
456typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
457 enum pipe pipe);
458
459static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
460 enum pipe pipe)
461{
462 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
463}
464
465static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
466 enum pipe pipe)
467{
468 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
469}
470
471static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
472 enum pipe pipe)
473{
474 return true;
475}
bf13e81b 476
a4a5d2f8 477static enum pipe
6491ab27
VS
478vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
479 enum port port,
480 vlv_pipe_check pipe_check)
a4a5d2f8
VS
481{
482 enum pipe pipe;
bf13e81b 483
bf13e81b
JN
484 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
485 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
486 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
487
488 if (port_sel != PANEL_PORT_SELECT_VLV(port))
489 continue;
490
6491ab27
VS
491 if (!pipe_check(dev_priv, pipe))
492 continue;
493
a4a5d2f8 494 return pipe;
bf13e81b
JN
495 }
496
a4a5d2f8
VS
497 return INVALID_PIPE;
498}
499
500static void
501vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
502{
503 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
504 struct drm_device *dev = intel_dig_port->base.base.dev;
505 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
506 enum port port = intel_dig_port->port;
507
508 lockdep_assert_held(&dev_priv->pps_mutex);
509
510 /* try to find a pipe with this port selected */
6491ab27
VS
511 /* first pick one where the panel is on */
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513 vlv_pipe_has_pp_on);
514 /* didn't find one? pick one where vdd is on */
515 if (intel_dp->pps_pipe == INVALID_PIPE)
516 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
517 vlv_pipe_has_vdd_on);
518 /* didn't find one? pick one with just the correct port */
519 if (intel_dp->pps_pipe == INVALID_PIPE)
520 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
521 vlv_pipe_any);
a4a5d2f8
VS
522
523 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
524 if (intel_dp->pps_pipe == INVALID_PIPE) {
525 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
526 port_name(port));
527 return;
bf13e81b
JN
528 }
529
a4a5d2f8
VS
530 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
531 port_name(port), pipe_name(intel_dp->pps_pipe));
532
36b5f425
VS
533 intel_dp_init_panel_power_sequencer(dev, intel_dp);
534 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
535}
536
773538e8
VS
537void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
538{
539 struct drm_device *dev = dev_priv->dev;
540 struct intel_encoder *encoder;
541
542 if (WARN_ON(!IS_VALLEYVIEW(dev)))
543 return;
544
545 /*
546 * We can't grab pps_mutex here due to deadlock with power_domain
547 * mutex when power_domain functions are called while holding pps_mutex.
548 * That also means that in order to use pps_pipe the code needs to
549 * hold both a power domain reference and pps_mutex, and the power domain
550 * reference get/put must be done while _not_ holding pps_mutex.
551 * pps_{lock,unlock}() do these steps in the correct order, so one
552 * should use them always.
553 */
554
555 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
556 struct intel_dp *intel_dp;
557
558 if (encoder->type != INTEL_OUTPUT_EDP)
559 continue;
560
561 intel_dp = enc_to_intel_dp(&encoder->base);
562 intel_dp->pps_pipe = INVALID_PIPE;
563 }
bf13e81b
JN
564}
565
566static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
567{
568 struct drm_device *dev = intel_dp_to_dev(intel_dp);
569
570 if (HAS_PCH_SPLIT(dev))
571 return PCH_PP_CONTROL;
572 else
573 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
574}
575
576static u32 _pp_stat_reg(struct intel_dp *intel_dp)
577{
578 struct drm_device *dev = intel_dp_to_dev(intel_dp);
579
580 if (HAS_PCH_SPLIT(dev))
581 return PCH_PP_STATUS;
582 else
583 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
584}
585
01527b31
CT
586/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
587 This function only applicable when panel PM state is not to be tracked */
588static int edp_notify_handler(struct notifier_block *this, unsigned long code,
589 void *unused)
590{
591 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
592 edp_notifier);
593 struct drm_device *dev = intel_dp_to_dev(intel_dp);
594 struct drm_i915_private *dev_priv = dev->dev_private;
595 u32 pp_div;
596 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
597
598 if (!is_edp(intel_dp) || code != SYS_RESTART)
599 return 0;
600
773538e8 601 pps_lock(intel_dp);
e39b999a 602
01527b31 603 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
604 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
605
01527b31
CT
606 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
607 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
608 pp_div = I915_READ(pp_div_reg);
609 pp_div &= PP_REFERENCE_DIVIDER_MASK;
610
611 /* 0x1F write to PP_DIV_REG sets max cycle delay */
612 I915_WRITE(pp_div_reg, pp_div | 0x1F);
613 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
614 msleep(intel_dp->panel_power_cycle_delay);
615 }
616
773538e8 617 pps_unlock(intel_dp);
e39b999a 618
01527b31
CT
619 return 0;
620}
621
4be73780 622static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 623{
30add22d 624 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
625 struct drm_i915_private *dev_priv = dev->dev_private;
626
e39b999a
VS
627 lockdep_assert_held(&dev_priv->pps_mutex);
628
9a42356b
VS
629 if (IS_VALLEYVIEW(dev) &&
630 intel_dp->pps_pipe == INVALID_PIPE)
631 return false;
632
bf13e81b 633 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
634}
635
4be73780 636static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 637{
30add22d 638 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
639 struct drm_i915_private *dev_priv = dev->dev_private;
640
e39b999a
VS
641 lockdep_assert_held(&dev_priv->pps_mutex);
642
9a42356b
VS
643 if (IS_VALLEYVIEW(dev) &&
644 intel_dp->pps_pipe == INVALID_PIPE)
645 return false;
646
773538e8 647 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
648}
649
9b984dae
KP
650static void
651intel_dp_check_edp(struct intel_dp *intel_dp)
652{
30add22d 653 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 654 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 655
9b984dae
KP
656 if (!is_edp(intel_dp))
657 return;
453c5420 658
4be73780 659 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
660 WARN(1, "eDP powered off while attempting aux channel communication.\n");
661 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
662 I915_READ(_pp_stat_reg(intel_dp)),
663 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
664 }
665}
666
9ee32fea
DV
667static uint32_t
668intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
669{
670 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
671 struct drm_device *dev = intel_dig_port->base.base.dev;
672 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 673 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
674 uint32_t status;
675 bool done;
676
ef04f00d 677#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 678 if (has_aux_irq)
b18ac466 679 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 680 msecs_to_jiffies_timeout(10));
9ee32fea
DV
681 else
682 done = wait_for_atomic(C, 10) == 0;
683 if (!done)
684 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
685 has_aux_irq);
686#undef C
687
688 return status;
689}
690
ec5b01dd 691static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 692{
174edf1f
PZ
693 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
694 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 695
ec5b01dd
DL
696 /*
697 * The clock divider is based off the hrawclk, and would like to run at
698 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 699 */
ec5b01dd
DL
700 return index ? 0 : intel_hrawclk(dev) / 2;
701}
702
703static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
704{
705 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
706 struct drm_device *dev = intel_dig_port->base.base.dev;
707
708 if (index)
709 return 0;
710
711 if (intel_dig_port->port == PORT_A) {
712 if (IS_GEN6(dev) || IS_GEN7(dev))
b84a1cf8 713 return 200; /* SNB & IVB eDP input clock at 400Mhz */
e3421a18 714 else
b84a1cf8 715 return 225; /* eDP input clock at 450Mhz */
ec5b01dd
DL
716 } else {
717 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
718 }
719}
720
721static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
722{
723 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
724 struct drm_device *dev = intel_dig_port->base.base.dev;
725 struct drm_i915_private *dev_priv = dev->dev_private;
726
727 if (intel_dig_port->port == PORT_A) {
728 if (index)
729 return 0;
730 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
2c55c336
JN
731 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
732 /* Workaround for non-ULT HSW */
bc86625a
CW
733 switch (index) {
734 case 0: return 63;
735 case 1: return 72;
736 default: return 0;
737 }
ec5b01dd 738 } else {
bc86625a 739 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 740 }
b84a1cf8
RV
741}
742
ec5b01dd
DL
743static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
744{
745 return index ? 0 : 100;
746}
747
b6b5e383
DL
748static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
749{
750 /*
751 * SKL doesn't need us to program the AUX clock divider (Hardware will
752 * derive the clock from CDCLK automatically). We still implement the
753 * get_aux_clock_divider vfunc to plug-in into the existing code.
754 */
755 return index ? 0 : 1;
756}
757
5ed12a19
DL
758static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
759 bool has_aux_irq,
760 int send_bytes,
761 uint32_t aux_clock_divider)
762{
763 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
764 struct drm_device *dev = intel_dig_port->base.base.dev;
765 uint32_t precharge, timeout;
766
767 if (IS_GEN6(dev))
768 precharge = 3;
769 else
770 precharge = 5;
771
772 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
773 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
774 else
775 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
776
777 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 778 DP_AUX_CH_CTL_DONE |
5ed12a19 779 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 780 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 781 timeout |
788d4433 782 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
783 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
784 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 785 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
786}
787
b9ca5fad
DL
788static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
789 bool has_aux_irq,
790 int send_bytes,
791 uint32_t unused)
792{
793 return DP_AUX_CH_CTL_SEND_BUSY |
794 DP_AUX_CH_CTL_DONE |
795 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
796 DP_AUX_CH_CTL_TIME_OUT_ERROR |
797 DP_AUX_CH_CTL_TIME_OUT_1600us |
798 DP_AUX_CH_CTL_RECEIVE_ERROR |
799 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
800 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
801}
802
b84a1cf8
RV
803static int
804intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 805 const uint8_t *send, int send_bytes,
b84a1cf8
RV
806 uint8_t *recv, int recv_size)
807{
808 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
809 struct drm_device *dev = intel_dig_port->base.base.dev;
810 struct drm_i915_private *dev_priv = dev->dev_private;
811 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
812 uint32_t ch_data = ch_ctl + 4;
bc86625a 813 uint32_t aux_clock_divider;
b84a1cf8
RV
814 int i, ret, recv_bytes;
815 uint32_t status;
5ed12a19 816 int try, clock = 0;
4e6b788c 817 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
818 bool vdd;
819
773538e8 820 pps_lock(intel_dp);
e39b999a 821
72c3500a
VS
822 /*
823 * We will be called with VDD already enabled for dpcd/edid/oui reads.
824 * In such cases we want to leave VDD enabled and it's up to upper layers
825 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
826 * ourselves.
827 */
1e0560e0 828 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
829
830 /* dp aux is extremely sensitive to irq latency, hence request the
831 * lowest possible wakeup latency and so prevent the cpu from going into
832 * deep sleep states.
833 */
834 pm_qos_update_request(&dev_priv->pm_qos, 0);
835
836 intel_dp_check_edp(intel_dp);
5eb08b69 837
c67a470b
PZ
838 intel_aux_display_runtime_get(dev_priv);
839
11bee43e
JB
840 /* Try to wait for any previous AUX channel activity */
841 for (try = 0; try < 3; try++) {
ef04f00d 842 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
843 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
844 break;
845 msleep(1);
846 }
847
848 if (try == 3) {
849 WARN(1, "dp_aux_ch not started status 0x%08x\n",
850 I915_READ(ch_ctl));
9ee32fea
DV
851 ret = -EBUSY;
852 goto out;
4f7f7b7e
CW
853 }
854
46a5ae9f
PZ
855 /* Only 5 data registers! */
856 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
857 ret = -E2BIG;
858 goto out;
859 }
860
ec5b01dd 861 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
862 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
863 has_aux_irq,
864 send_bytes,
865 aux_clock_divider);
5ed12a19 866
bc86625a
CW
867 /* Must try at least 3 times according to DP spec */
868 for (try = 0; try < 5; try++) {
869 /* Load the send data into the aux channel data registers */
870 for (i = 0; i < send_bytes; i += 4)
871 I915_WRITE(ch_data + i,
a4f1289e
RV
872 intel_dp_pack_aux(send + i,
873 send_bytes - i));
bc86625a
CW
874
875 /* Send the command and wait for it to complete */
5ed12a19 876 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
877
878 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
879
880 /* Clear done status and any errors */
881 I915_WRITE(ch_ctl,
882 status |
883 DP_AUX_CH_CTL_DONE |
884 DP_AUX_CH_CTL_TIME_OUT_ERROR |
885 DP_AUX_CH_CTL_RECEIVE_ERROR);
886
887 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
888 DP_AUX_CH_CTL_RECEIVE_ERROR))
889 continue;
890 if (status & DP_AUX_CH_CTL_DONE)
891 break;
892 }
4f7f7b7e 893 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
894 break;
895 }
896
a4fc5ed6 897 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 898 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
899 ret = -EBUSY;
900 goto out;
a4fc5ed6
KP
901 }
902
903 /* Check for timeout or receive error.
904 * Timeouts occur when the sink is not connected
905 */
a5b3da54 906 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 907 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
908 ret = -EIO;
909 goto out;
a5b3da54 910 }
1ae8c0a5
KP
911
912 /* Timeouts occur when the device isn't connected, so they're
913 * "normal" -- don't fill the kernel log with these */
a5b3da54 914 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 915 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
916 ret = -ETIMEDOUT;
917 goto out;
a4fc5ed6
KP
918 }
919
920 /* Unload any bytes sent back from the other side */
921 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
922 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
923 if (recv_bytes > recv_size)
924 recv_bytes = recv_size;
0206e353 925
4f7f7b7e 926 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
927 intel_dp_unpack_aux(I915_READ(ch_data + i),
928 recv + i, recv_bytes - i);
a4fc5ed6 929
9ee32fea
DV
930 ret = recv_bytes;
931out:
932 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 933 intel_aux_display_runtime_put(dev_priv);
9ee32fea 934
884f19e9
JN
935 if (vdd)
936 edp_panel_vdd_off(intel_dp, false);
937
773538e8 938 pps_unlock(intel_dp);
e39b999a 939
9ee32fea 940 return ret;
a4fc5ed6
KP
941}
942
a6c8aff0
JN
943#define BARE_ADDRESS_SIZE 3
944#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
945static ssize_t
946intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 947{
9d1a1031
JN
948 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
949 uint8_t txbuf[20], rxbuf[20];
950 size_t txsize, rxsize;
a4fc5ed6 951 int ret;
a4fc5ed6 952
9d1a1031
JN
953 txbuf[0] = msg->request << 4;
954 txbuf[1] = msg->address >> 8;
955 txbuf[2] = msg->address & 0xff;
956 txbuf[3] = msg->size - 1;
46a5ae9f 957
9d1a1031
JN
958 switch (msg->request & ~DP_AUX_I2C_MOT) {
959 case DP_AUX_NATIVE_WRITE:
960 case DP_AUX_I2C_WRITE:
a6c8aff0 961 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
9d1a1031 962 rxsize = 1;
f51a44b9 963
9d1a1031
JN
964 if (WARN_ON(txsize > 20))
965 return -E2BIG;
a4fc5ed6 966
9d1a1031 967 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 968
9d1a1031
JN
969 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
970 if (ret > 0) {
971 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 972
9d1a1031
JN
973 /* Return payload size. */
974 ret = msg->size;
975 }
976 break;
46a5ae9f 977
9d1a1031
JN
978 case DP_AUX_NATIVE_READ:
979 case DP_AUX_I2C_READ:
a6c8aff0 980 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 981 rxsize = msg->size + 1;
a4fc5ed6 982
9d1a1031
JN
983 if (WARN_ON(rxsize > 20))
984 return -E2BIG;
a4fc5ed6 985
9d1a1031
JN
986 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
987 if (ret > 0) {
988 msg->reply = rxbuf[0] >> 4;
989 /*
990 * Assume happy day, and copy the data. The caller is
991 * expected to check msg->reply before touching it.
992 *
993 * Return payload size.
994 */
995 ret--;
996 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 997 }
9d1a1031
JN
998 break;
999
1000 default:
1001 ret = -EINVAL;
1002 break;
a4fc5ed6 1003 }
f51a44b9 1004
9d1a1031 1005 return ret;
a4fc5ed6
KP
1006}
1007
9d1a1031
JN
1008static void
1009intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1010{
1011 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1012 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1013 enum port port = intel_dig_port->port;
0b99836f 1014 const char *name = NULL;
ab2c0672
DA
1015 int ret;
1016
33ad6626
JN
1017 switch (port) {
1018 case PORT_A:
1019 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1020 name = "DPDDC-A";
ab2c0672 1021 break;
33ad6626
JN
1022 case PORT_B:
1023 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1024 name = "DPDDC-B";
ab2c0672 1025 break;
33ad6626
JN
1026 case PORT_C:
1027 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1028 name = "DPDDC-C";
ab2c0672 1029 break;
33ad6626
JN
1030 case PORT_D:
1031 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1032 name = "DPDDC-D";
33ad6626
JN
1033 break;
1034 default:
1035 BUG();
ab2c0672
DA
1036 }
1037
1b1aad75
DL
1038 /*
1039 * The AUX_CTL register is usually DP_CTL + 0x10.
1040 *
1041 * On Haswell and Broadwell though:
1042 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1043 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1044 *
1045 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1046 */
1047 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
33ad6626 1048 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1049
0b99836f 1050 intel_dp->aux.name = name;
9d1a1031
JN
1051 intel_dp->aux.dev = dev->dev;
1052 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1053
0b99836f
JN
1054 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1055 connector->base.kdev->kobj.name);
8316f337 1056
4f71d0cb 1057 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1058 if (ret < 0) {
4f71d0cb 1059 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1060 name, ret);
1061 return;
ab2c0672 1062 }
8a5e6aeb 1063
0b99836f
JN
1064 ret = sysfs_create_link(&connector->base.kdev->kobj,
1065 &intel_dp->aux.ddc.dev.kobj,
1066 intel_dp->aux.ddc.dev.kobj.name);
1067 if (ret < 0) {
1068 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1069 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1070 }
a4fc5ed6
KP
1071}
1072
80f65de3
ID
1073static void
1074intel_dp_connector_unregister(struct intel_connector *intel_connector)
1075{
1076 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1077
0e32b39c
DA
1078 if (!intel_connector->mst_port)
1079 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1080 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1081 intel_connector_unregister(intel_connector);
1082}
1083
5416d871 1084static void
c3346ef6 1085skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1086{
1087 u32 ctrl1;
1088
1089 pipe_config->ddi_pll_sel = SKL_DPLL0;
1090 pipe_config->dpll_hw_state.cfgcr1 = 0;
1091 pipe_config->dpll_hw_state.cfgcr2 = 0;
1092
1093 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1094 switch (link_clock / 2) {
1095 case 81000:
5416d871
DL
1096 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1097 SKL_DPLL0);
1098 break;
c3346ef6 1099 case 135000:
5416d871
DL
1100 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1101 SKL_DPLL0);
1102 break;
c3346ef6 1103 case 270000:
5416d871
DL
1104 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1105 SKL_DPLL0);
1106 break;
c3346ef6
SJ
1107 case 162000:
1108 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1109 SKL_DPLL0);
1110 break;
1111 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1112 results in CDCLK change. Need to handle the change of CDCLK by
1113 disabling pipes and re-enabling them */
1114 case 108000:
1115 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1116 SKL_DPLL0);
1117 break;
1118 case 216000:
1119 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1120 SKL_DPLL0);
1121 break;
1122
5416d871
DL
1123 }
1124 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1125}
1126
0e50338c 1127static void
5cec258b 1128hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c
DV
1129{
1130 switch (link_bw) {
1131 case DP_LINK_BW_1_62:
1132 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1133 break;
1134 case DP_LINK_BW_2_7:
1135 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1136 break;
1137 case DP_LINK_BW_5_4:
1138 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1139 break;
1140 }
1141}
1142
fc0f8e25 1143static int
f4896f15 1144intel_read_sink_rates(struct intel_dp *intel_dp, int *sink_rates)
fc0f8e25
SJ
1145{
1146 struct drm_device *dev = intel_dp_to_dev(intel_dp);
fc0f8e25
SJ
1147
1148 if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) {
1149 /*
1150 * Receiver supports only main-link rate selection by
1151 * link rate table method, so read link rates from
1152 * supported_link_rates
1153 */
ea2d8a42
VS
1154 memcpy(sink_rates, intel_dp->supported_rates,
1155 sizeof(intel_dp->supported_rates));
fc0f8e25 1156
ea2d8a42 1157 return intel_dp->num_supported_rates;
fc0f8e25 1158 }
ea2d8a42 1159 return 0;
fc0f8e25
SJ
1160}
1161
a8f3ef61 1162static int
636280ba 1163intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
a8f3ef61
SJ
1164{
1165 struct drm_device *dev = intel_dp_to_dev(intel_dp);
a8f3ef61 1166
636280ba
VS
1167 if (INTEL_INFO(dev)->gen >= 9) {
1168 *source_rates = gen9_rates;
1169 return ARRAY_SIZE(gen9_rates);
a8f3ef61 1170 }
636280ba
VS
1171
1172 *source_rates = default_rates;
1173
1174 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
a8f3ef61
SJ
1175}
1176
c6bb3538
DV
1177static void
1178intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1179 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1180{
1181 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1182 const struct dp_link_dpll *divisor = NULL;
1183 int i, count = 0;
c6bb3538
DV
1184
1185 if (IS_G4X(dev)) {
9dd4ffdf
CML
1186 divisor = gen4_dpll;
1187 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1188 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1189 divisor = pch_dpll;
1190 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1191 } else if (IS_CHERRYVIEW(dev)) {
1192 divisor = chv_dpll;
1193 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1194 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1195 divisor = vlv_dpll;
1196 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1197 }
9dd4ffdf
CML
1198
1199 if (divisor && count) {
1200 for (i = 0; i < count; i++) {
1201 if (link_bw == divisor[i].link_bw) {
1202 pipe_config->dpll = divisor[i].dpll;
1203 pipe_config->clock_set = true;
1204 break;
1205 }
1206 }
c6bb3538
DV
1207 }
1208}
1209
f4896f15
VS
1210static int intel_supported_rates(const int *source_rates, int source_len,
1211 const int *sink_rates, int sink_len,
1212 int *supported_rates)
a8f3ef61
SJ
1213{
1214 int i = 0, j = 0, k = 0;
1215
1216 /* For panels with edp version less than 1.4 */
1217 if (sink_len == 0) {
1218 for (i = 0; i < source_len; ++i)
1219 supported_rates[i] = source_rates[i];
1220 return source_len;
1221 }
1222
1223 /* For edp1.4 panels, find the common rates between source and sink */
1224 while (i < source_len && j < sink_len) {
1225 if (source_rates[i] == sink_rates[j]) {
1226 supported_rates[k] = source_rates[i];
1227 ++k;
1228 ++i;
1229 ++j;
1230 } else if (source_rates[i] < sink_rates[j]) {
1231 ++i;
1232 } else {
1233 ++j;
1234 }
1235 }
1236 return k;
1237}
1238
f4896f15 1239static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1240{
1241 int i = 0;
1242
1243 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1244 if (find == rates[i])
1245 break;
1246
1247 return i;
1248}
1249
00c09d70 1250bool
5bfe2ac0 1251intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1252 struct intel_crtc_state *pipe_config)
a4fc5ed6 1253{
5bfe2ac0 1254 struct drm_device *dev = encoder->base.dev;
36008365 1255 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1256 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1257 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1258 enum port port = dp_to_dig_port(intel_dp)->port;
2dd24552 1259 struct intel_crtc *intel_crtc = encoder->new_crtc;
dd06f90e 1260 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1261 int lane_count, clock;
56071a20 1262 int min_lane_count = 1;
eeb6324d 1263 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1264 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1265 int min_clock = 0;
a8f3ef61 1266 int max_clock;
083f9560 1267 int bpp, mode_rate;
ff9a6750 1268 int link_avail, link_clock;
f4896f15
VS
1269 int sink_rates[8];
1270 int supported_rates[8] = {0};
636280ba 1271 const int *source_rates;
a8f3ef61
SJ
1272 int source_len, sink_len, supported_len;
1273
1274 sink_len = intel_read_sink_rates(intel_dp, sink_rates);
1275
636280ba 1276 source_len = intel_dp_source_rates(intel_dp, &source_rates);
a8f3ef61
SJ
1277
1278 supported_len = intel_supported_rates(source_rates, source_len,
1279 sink_rates, sink_len, supported_rates);
1280
1281 /* No common link rates between source and sink */
1282 WARN_ON(supported_len <= 0);
1283
1284 max_clock = supported_len - 1;
a4fc5ed6 1285
bc7d38a4 1286 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1287 pipe_config->has_pch_encoder = true;
1288
03afc4a2 1289 pipe_config->has_dp_encoder = true;
f769cd24 1290 pipe_config->has_drrs = false;
9ed109a7 1291 pipe_config->has_audio = intel_dp->has_audio;
a4fc5ed6 1292
dd06f90e
JN
1293 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1294 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1295 adjusted_mode);
2dd24552
JB
1296 if (!HAS_PCH_SPLIT(dev))
1297 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1298 intel_connector->panel.fitting_mode);
1299 else
b074cec8
JB
1300 intel_pch_panel_fitting(intel_crtc, pipe_config,
1301 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1302 }
1303
cb1793ce 1304 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1305 return false;
1306
083f9560 1307 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61
SJ
1308 "max bw %d pixel clock %iKHz\n",
1309 max_lane_count, supported_rates[max_clock],
241bfc38 1310 adjusted_mode->crtc_clock);
083f9560 1311
36008365
DV
1312 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1313 * bpc in between. */
3e7ca985 1314 bpp = pipe_config->pipe_bpp;
56071a20
JN
1315 if (is_edp(intel_dp)) {
1316 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1317 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1318 dev_priv->vbt.edp_bpp);
1319 bpp = dev_priv->vbt.edp_bpp;
1320 }
1321
344c5bbc
JN
1322 /*
1323 * Use the maximum clock and number of lanes the eDP panel
1324 * advertizes being capable of. The panels are generally
1325 * designed to support only a single clock and lane
1326 * configuration, and typically these values correspond to the
1327 * native resolution of the panel.
1328 */
1329 min_lane_count = max_lane_count;
1330 min_clock = max_clock;
7984211e 1331 }
657445fe 1332
36008365 1333 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1334 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1335 bpp);
36008365 1336
c6930992 1337 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1338 for (lane_count = min_lane_count;
1339 lane_count <= max_lane_count;
1340 lane_count <<= 1) {
1341
1342 link_clock = supported_rates[clock];
36008365
DV
1343 link_avail = intel_dp_max_data_rate(link_clock,
1344 lane_count);
1345
1346 if (mode_rate <= link_avail) {
1347 goto found;
1348 }
1349 }
1350 }
1351 }
c4867936 1352
36008365 1353 return false;
3685a8f3 1354
36008365 1355found:
55bc60db
VS
1356 if (intel_dp->color_range_auto) {
1357 /*
1358 * See:
1359 * CEA-861-E - 5.1 Default Encoding Parameters
1360 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1361 */
18316c8c 1362 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1363 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1364 else
1365 intel_dp->color_range = 0;
1366 }
1367
3685a8f3 1368 if (intel_dp->color_range)
50f3b016 1369 pipe_config->limited_color_range = true;
a4fc5ed6 1370
36008365 1371 intel_dp->lane_count = lane_count;
a8f3ef61
SJ
1372
1373 intel_dp->link_bw =
1374 drm_dp_link_rate_to_bw_code(supported_rates[clock]);
1375
1376 if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) {
1377 intel_dp->rate_select =
1378 rate_to_index(supported_rates[clock], sink_rates);
1379 intel_dp->link_bw = 0;
1380 }
1381
657445fe 1382 pipe_config->pipe_bpp = bpp;
a8f3ef61 1383 pipe_config->port_clock = supported_rates[clock];
a4fc5ed6 1384
36008365
DV
1385 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1386 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1387 pipe_config->port_clock, bpp);
36008365
DV
1388 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1389 mode_rate, link_avail);
a4fc5ed6 1390
03afc4a2 1391 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1392 adjusted_mode->crtc_clock,
1393 pipe_config->port_clock,
03afc4a2 1394 &pipe_config->dp_m_n);
9d1a455b 1395
439d7ac0 1396 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1397 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1398 pipe_config->has_drrs = true;
439d7ac0
PB
1399 intel_link_compute_m_n(bpp, lane_count,
1400 intel_connector->panel.downclock_mode->clock,
1401 pipe_config->port_clock,
1402 &pipe_config->dp_m2_n2);
1403 }
1404
5416d871 1405 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
c3346ef6 1406 skl_edp_set_pll_config(pipe_config, supported_rates[clock]);
5416d871 1407 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1408 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1409 else
1410 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1411
03afc4a2 1412 return true;
a4fc5ed6
KP
1413}
1414
7c62a164 1415static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1416{
7c62a164
DV
1417 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1418 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1419 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1420 struct drm_i915_private *dev_priv = dev->dev_private;
1421 u32 dpa_ctl;
1422
6e3c9717
ACO
1423 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1424 crtc->config->port_clock);
ea9b6006
DV
1425 dpa_ctl = I915_READ(DP_A);
1426 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1427
6e3c9717 1428 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1429 /* For a long time we've carried around a ILK-DevA w/a for the
1430 * 160MHz clock. If we're really unlucky, it's still required.
1431 */
1432 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1433 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1434 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1435 } else {
1436 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1437 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1438 }
1ce17038 1439
ea9b6006
DV
1440 I915_WRITE(DP_A, dpa_ctl);
1441
1442 POSTING_READ(DP_A);
1443 udelay(500);
1444}
1445
8ac33ed3 1446static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1447{
b934223d 1448 struct drm_device *dev = encoder->base.dev;
417e822d 1449 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1450 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1451 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1452 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1453 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1454
417e822d 1455 /*
1a2eb460 1456 * There are four kinds of DP registers:
417e822d
KP
1457 *
1458 * IBX PCH
1a2eb460
KP
1459 * SNB CPU
1460 * IVB CPU
417e822d
KP
1461 * CPT PCH
1462 *
1463 * IBX PCH and CPU are the same for almost everything,
1464 * except that the CPU DP PLL is configured in this
1465 * register
1466 *
1467 * CPT PCH is quite different, having many bits moved
1468 * to the TRANS_DP_CTL register instead. That
1469 * configuration happens (oddly) in ironlake_pch_enable
1470 */
9c9e7927 1471
417e822d
KP
1472 /* Preserve the BIOS-computed detected bit. This is
1473 * supposed to be read-only.
1474 */
1475 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1476
417e822d 1477 /* Handle DP bits in common between all three register formats */
417e822d 1478 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1479 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1480
6e3c9717 1481 if (crtc->config->has_audio)
ea5b213a 1482 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1483
417e822d 1484 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1485
bc7d38a4 1486 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
1487 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1488 intel_dp->DP |= DP_SYNC_HS_HIGH;
1489 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1490 intel_dp->DP |= DP_SYNC_VS_HIGH;
1491 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1492
6aba5b6c 1493 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1494 intel_dp->DP |= DP_ENHANCED_FRAMING;
1495
7c62a164 1496 intel_dp->DP |= crtc->pipe << 29;
bc7d38a4 1497 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
b2634017 1498 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1499 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1500
1501 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1502 intel_dp->DP |= DP_SYNC_HS_HIGH;
1503 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1504 intel_dp->DP |= DP_SYNC_VS_HIGH;
1505 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1506
6aba5b6c 1507 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1508 intel_dp->DP |= DP_ENHANCED_FRAMING;
1509
44f37d1f
CML
1510 if (!IS_CHERRYVIEW(dev)) {
1511 if (crtc->pipe == 1)
1512 intel_dp->DP |= DP_PIPEB_SELECT;
1513 } else {
1514 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1515 }
417e822d
KP
1516 } else {
1517 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
32f9d658 1518 }
a4fc5ed6
KP
1519}
1520
ffd6749d
PZ
1521#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1522#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1523
1a5ef5b7
PZ
1524#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1525#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1526
ffd6749d
PZ
1527#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1528#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1529
4be73780 1530static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1531 u32 mask,
1532 u32 value)
bd943159 1533{
30add22d 1534 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1535 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1536 u32 pp_stat_reg, pp_ctrl_reg;
1537
e39b999a
VS
1538 lockdep_assert_held(&dev_priv->pps_mutex);
1539
bf13e81b
JN
1540 pp_stat_reg = _pp_stat_reg(intel_dp);
1541 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1542
99ea7127 1543 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1544 mask, value,
1545 I915_READ(pp_stat_reg),
1546 I915_READ(pp_ctrl_reg));
32ce697c 1547
453c5420 1548 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1549 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1550 I915_READ(pp_stat_reg),
1551 I915_READ(pp_ctrl_reg));
32ce697c 1552 }
54c136d4
CW
1553
1554 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1555}
32ce697c 1556
4be73780 1557static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1558{
1559 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1560 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1561}
1562
4be73780 1563static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1564{
1565 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1566 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1567}
1568
4be73780 1569static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1570{
1571 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1572
1573 /* When we disable the VDD override bit last we have to do the manual
1574 * wait. */
1575 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1576 intel_dp->panel_power_cycle_delay);
1577
4be73780 1578 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1579}
1580
4be73780 1581static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1582{
1583 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1584 intel_dp->backlight_on_delay);
1585}
1586
4be73780 1587static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1588{
1589 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1590 intel_dp->backlight_off_delay);
1591}
99ea7127 1592
832dd3c1
KP
1593/* Read the current pp_control value, unlocking the register if it
1594 * is locked
1595 */
1596
453c5420 1597static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1598{
453c5420
JB
1599 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1600 struct drm_i915_private *dev_priv = dev->dev_private;
1601 u32 control;
832dd3c1 1602
e39b999a
VS
1603 lockdep_assert_held(&dev_priv->pps_mutex);
1604
bf13e81b 1605 control = I915_READ(_pp_ctrl_reg(intel_dp));
832dd3c1
KP
1606 control &= ~PANEL_UNLOCK_MASK;
1607 control |= PANEL_UNLOCK_REGS;
1608 return control;
bd943159
KP
1609}
1610
951468f3
VS
1611/*
1612 * Must be paired with edp_panel_vdd_off().
1613 * Must hold pps_mutex around the whole on/off sequence.
1614 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1615 */
1e0560e0 1616static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1617{
30add22d 1618 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1619 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1620 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1621 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1622 enum intel_display_power_domain power_domain;
5d613501 1623 u32 pp;
453c5420 1624 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1625 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1626
e39b999a
VS
1627 lockdep_assert_held(&dev_priv->pps_mutex);
1628
97af61f5 1629 if (!is_edp(intel_dp))
adddaaf4 1630 return false;
bd943159 1631
2c623c11 1632 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1633 intel_dp->want_panel_vdd = true;
99ea7127 1634
4be73780 1635 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1636 return need_to_disable;
b0665d57 1637
4e6e1a54
ID
1638 power_domain = intel_display_port_power_domain(intel_encoder);
1639 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1640
3936fcf4
VS
1641 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1642 port_name(intel_dig_port->port));
bd943159 1643
4be73780
DV
1644 if (!edp_have_panel_power(intel_dp))
1645 wait_panel_power_cycle(intel_dp);
99ea7127 1646
453c5420 1647 pp = ironlake_get_pp_control(intel_dp);
5d613501 1648 pp |= EDP_FORCE_VDD;
ebf33b18 1649
bf13e81b
JN
1650 pp_stat_reg = _pp_stat_reg(intel_dp);
1651 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1652
1653 I915_WRITE(pp_ctrl_reg, pp);
1654 POSTING_READ(pp_ctrl_reg);
1655 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1656 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1657 /*
1658 * If the panel wasn't on, delay before accessing aux channel
1659 */
4be73780 1660 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1661 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1662 port_name(intel_dig_port->port));
f01eca2e 1663 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1664 }
adddaaf4
JN
1665
1666 return need_to_disable;
1667}
1668
951468f3
VS
1669/*
1670 * Must be paired with intel_edp_panel_vdd_off() or
1671 * intel_edp_panel_off().
1672 * Nested calls to these functions are not allowed since
1673 * we drop the lock. Caller must use some higher level
1674 * locking to prevent nested calls from other threads.
1675 */
b80d6c78 1676void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1677{
c695b6b6 1678 bool vdd;
adddaaf4 1679
c695b6b6
VS
1680 if (!is_edp(intel_dp))
1681 return;
1682
773538e8 1683 pps_lock(intel_dp);
c695b6b6 1684 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1685 pps_unlock(intel_dp);
c695b6b6 1686
e2c719b7 1687 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1688 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1689}
1690
4be73780 1691static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1692{
30add22d 1693 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1694 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1695 struct intel_digital_port *intel_dig_port =
1696 dp_to_dig_port(intel_dp);
1697 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1698 enum intel_display_power_domain power_domain;
5d613501 1699 u32 pp;
453c5420 1700 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1701
e39b999a 1702 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1703
15e899a0 1704 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1705
15e899a0 1706 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1707 return;
b0665d57 1708
3936fcf4
VS
1709 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1710 port_name(intel_dig_port->port));
bd943159 1711
be2c9196
VS
1712 pp = ironlake_get_pp_control(intel_dp);
1713 pp &= ~EDP_FORCE_VDD;
453c5420 1714
be2c9196
VS
1715 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1716 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1717
be2c9196
VS
1718 I915_WRITE(pp_ctrl_reg, pp);
1719 POSTING_READ(pp_ctrl_reg);
90791a5c 1720
be2c9196
VS
1721 /* Make sure sequencer is idle before allowing subsequent activity */
1722 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1723 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1724
be2c9196
VS
1725 if ((pp & POWER_TARGET_ON) == 0)
1726 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1727
be2c9196
VS
1728 power_domain = intel_display_port_power_domain(intel_encoder);
1729 intel_display_power_put(dev_priv, power_domain);
bd943159 1730}
5d613501 1731
4be73780 1732static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1733{
1734 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1735 struct intel_dp, panel_vdd_work);
bd943159 1736
773538e8 1737 pps_lock(intel_dp);
15e899a0
VS
1738 if (!intel_dp->want_panel_vdd)
1739 edp_panel_vdd_off_sync(intel_dp);
773538e8 1740 pps_unlock(intel_dp);
bd943159
KP
1741}
1742
aba86890
ID
1743static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1744{
1745 unsigned long delay;
1746
1747 /*
1748 * Queue the timer to fire a long time from now (relative to the power
1749 * down delay) to keep the panel power up across a sequence of
1750 * operations.
1751 */
1752 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1753 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1754}
1755
951468f3
VS
1756/*
1757 * Must be paired with edp_panel_vdd_on().
1758 * Must hold pps_mutex around the whole on/off sequence.
1759 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1760 */
4be73780 1761static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1762{
e39b999a
VS
1763 struct drm_i915_private *dev_priv =
1764 intel_dp_to_dev(intel_dp)->dev_private;
1765
1766 lockdep_assert_held(&dev_priv->pps_mutex);
1767
97af61f5
KP
1768 if (!is_edp(intel_dp))
1769 return;
5d613501 1770
e2c719b7 1771 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1772 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1773
bd943159
KP
1774 intel_dp->want_panel_vdd = false;
1775
aba86890 1776 if (sync)
4be73780 1777 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1778 else
1779 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1780}
1781
9f0fb5be 1782static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1783{
30add22d 1784 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1785 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1786 u32 pp;
453c5420 1787 u32 pp_ctrl_reg;
9934c132 1788
9f0fb5be
VS
1789 lockdep_assert_held(&dev_priv->pps_mutex);
1790
97af61f5 1791 if (!is_edp(intel_dp))
bd943159 1792 return;
99ea7127 1793
3936fcf4
VS
1794 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1795 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1796
e7a89ace
VS
1797 if (WARN(edp_have_panel_power(intel_dp),
1798 "eDP port %c panel power already on\n",
1799 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1800 return;
9934c132 1801
4be73780 1802 wait_panel_power_cycle(intel_dp);
37c6c9b0 1803
bf13e81b 1804 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1805 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1806 if (IS_GEN5(dev)) {
1807 /* ILK workaround: disable reset around power sequence */
1808 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1809 I915_WRITE(pp_ctrl_reg, pp);
1810 POSTING_READ(pp_ctrl_reg);
05ce1a49 1811 }
37c6c9b0 1812
1c0ae80a 1813 pp |= POWER_TARGET_ON;
99ea7127
KP
1814 if (!IS_GEN5(dev))
1815 pp |= PANEL_POWER_RESET;
1816
453c5420
JB
1817 I915_WRITE(pp_ctrl_reg, pp);
1818 POSTING_READ(pp_ctrl_reg);
9934c132 1819
4be73780 1820 wait_panel_on(intel_dp);
dce56b3c 1821 intel_dp->last_power_on = jiffies;
9934c132 1822
05ce1a49
KP
1823 if (IS_GEN5(dev)) {
1824 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1825 I915_WRITE(pp_ctrl_reg, pp);
1826 POSTING_READ(pp_ctrl_reg);
05ce1a49 1827 }
9f0fb5be 1828}
e39b999a 1829
9f0fb5be
VS
1830void intel_edp_panel_on(struct intel_dp *intel_dp)
1831{
1832 if (!is_edp(intel_dp))
1833 return;
1834
1835 pps_lock(intel_dp);
1836 edp_panel_on(intel_dp);
773538e8 1837 pps_unlock(intel_dp);
9934c132
JB
1838}
1839
9f0fb5be
VS
1840
1841static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1842{
4e6e1a54
ID
1843 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1844 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1845 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1846 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1847 enum intel_display_power_domain power_domain;
99ea7127 1848 u32 pp;
453c5420 1849 u32 pp_ctrl_reg;
9934c132 1850
9f0fb5be
VS
1851 lockdep_assert_held(&dev_priv->pps_mutex);
1852
97af61f5
KP
1853 if (!is_edp(intel_dp))
1854 return;
37c6c9b0 1855
3936fcf4
VS
1856 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1857 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 1858
3936fcf4
VS
1859 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1860 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 1861
453c5420 1862 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1863 /* We need to switch off panel power _and_ force vdd, for otherwise some
1864 * panels get very unhappy and cease to work. */
b3064154
PJ
1865 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1866 EDP_BLC_ENABLE);
453c5420 1867
bf13e81b 1868 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1869
849e39f5
PZ
1870 intel_dp->want_panel_vdd = false;
1871
453c5420
JB
1872 I915_WRITE(pp_ctrl_reg, pp);
1873 POSTING_READ(pp_ctrl_reg);
9934c132 1874
dce56b3c 1875 intel_dp->last_power_cycle = jiffies;
4be73780 1876 wait_panel_off(intel_dp);
849e39f5
PZ
1877
1878 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
1879 power_domain = intel_display_port_power_domain(intel_encoder);
1880 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 1881}
e39b999a 1882
9f0fb5be
VS
1883void intel_edp_panel_off(struct intel_dp *intel_dp)
1884{
1885 if (!is_edp(intel_dp))
1886 return;
e39b999a 1887
9f0fb5be
VS
1888 pps_lock(intel_dp);
1889 edp_panel_off(intel_dp);
773538e8 1890 pps_unlock(intel_dp);
9934c132
JB
1891}
1892
1250d107
JN
1893/* Enable backlight in the panel power control. */
1894static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1895{
da63a9f2
PZ
1896 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1897 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
1898 struct drm_i915_private *dev_priv = dev->dev_private;
1899 u32 pp;
453c5420 1900 u32 pp_ctrl_reg;
32f9d658 1901
01cb9ea6
JB
1902 /*
1903 * If we enable the backlight right away following a panel power
1904 * on, we may see slight flicker as the panel syncs with the eDP
1905 * link. So delay a bit to make sure the image is solid before
1906 * allowing it to appear.
1907 */
4be73780 1908 wait_backlight_on(intel_dp);
e39b999a 1909
773538e8 1910 pps_lock(intel_dp);
e39b999a 1911
453c5420 1912 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1913 pp |= EDP_BLC_ENABLE;
453c5420 1914
bf13e81b 1915 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1916
1917 I915_WRITE(pp_ctrl_reg, pp);
1918 POSTING_READ(pp_ctrl_reg);
e39b999a 1919
773538e8 1920 pps_unlock(intel_dp);
32f9d658
ZW
1921}
1922
1250d107
JN
1923/* Enable backlight PWM and backlight PP control. */
1924void intel_edp_backlight_on(struct intel_dp *intel_dp)
1925{
1926 if (!is_edp(intel_dp))
1927 return;
1928
1929 DRM_DEBUG_KMS("\n");
1930
1931 intel_panel_enable_backlight(intel_dp->attached_connector);
1932 _intel_edp_backlight_on(intel_dp);
1933}
1934
1935/* Disable backlight in the panel power control. */
1936static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 1937{
30add22d 1938 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
1939 struct drm_i915_private *dev_priv = dev->dev_private;
1940 u32 pp;
453c5420 1941 u32 pp_ctrl_reg;
32f9d658 1942
f01eca2e
KP
1943 if (!is_edp(intel_dp))
1944 return;
1945
773538e8 1946 pps_lock(intel_dp);
e39b999a 1947
453c5420 1948 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1949 pp &= ~EDP_BLC_ENABLE;
453c5420 1950
bf13e81b 1951 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1952
1953 I915_WRITE(pp_ctrl_reg, pp);
1954 POSTING_READ(pp_ctrl_reg);
f7d2323c 1955
773538e8 1956 pps_unlock(intel_dp);
e39b999a
VS
1957
1958 intel_dp->last_backlight_off = jiffies;
f7d2323c 1959 edp_wait_backlight_off(intel_dp);
1250d107 1960}
f7d2323c 1961
1250d107
JN
1962/* Disable backlight PP control and backlight PWM. */
1963void intel_edp_backlight_off(struct intel_dp *intel_dp)
1964{
1965 if (!is_edp(intel_dp))
1966 return;
1967
1968 DRM_DEBUG_KMS("\n");
f7d2323c 1969
1250d107 1970 _intel_edp_backlight_off(intel_dp);
f7d2323c 1971 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 1972}
a4fc5ed6 1973
73580fb7
JN
1974/*
1975 * Hook for controlling the panel power control backlight through the bl_power
1976 * sysfs attribute. Take care to handle multiple calls.
1977 */
1978static void intel_edp_backlight_power(struct intel_connector *connector,
1979 bool enable)
1980{
1981 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
1982 bool is_enabled;
1983
773538e8 1984 pps_lock(intel_dp);
e39b999a 1985 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 1986 pps_unlock(intel_dp);
73580fb7
JN
1987
1988 if (is_enabled == enable)
1989 return;
1990
23ba9373
JN
1991 DRM_DEBUG_KMS("panel power control backlight %s\n",
1992 enable ? "enable" : "disable");
73580fb7
JN
1993
1994 if (enable)
1995 _intel_edp_backlight_on(intel_dp);
1996 else
1997 _intel_edp_backlight_off(intel_dp);
1998}
1999
2bd2ad64 2000static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2001{
da63a9f2
PZ
2002 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2003 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2004 struct drm_device *dev = crtc->dev;
d240f20f
JB
2005 struct drm_i915_private *dev_priv = dev->dev_private;
2006 u32 dpa_ctl;
2007
2bd2ad64
DV
2008 assert_pipe_disabled(dev_priv,
2009 to_intel_crtc(crtc)->pipe);
2010
d240f20f
JB
2011 DRM_DEBUG_KMS("\n");
2012 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2013 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2014 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2015
2016 /* We don't adjust intel_dp->DP while tearing down the link, to
2017 * facilitate link retraining (e.g. after hotplug). Hence clear all
2018 * enable bits here to ensure that we don't enable too much. */
2019 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2020 intel_dp->DP |= DP_PLL_ENABLE;
2021 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2022 POSTING_READ(DP_A);
2023 udelay(200);
d240f20f
JB
2024}
2025
2bd2ad64 2026static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2027{
da63a9f2
PZ
2028 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2029 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2030 struct drm_device *dev = crtc->dev;
d240f20f
JB
2031 struct drm_i915_private *dev_priv = dev->dev_private;
2032 u32 dpa_ctl;
2033
2bd2ad64
DV
2034 assert_pipe_disabled(dev_priv,
2035 to_intel_crtc(crtc)->pipe);
2036
d240f20f 2037 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2038 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2039 "dp pll off, should be on\n");
2040 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2041
2042 /* We can't rely on the value tracked for the DP register in
2043 * intel_dp->DP because link_down must not change that (otherwise link
2044 * re-training will fail. */
298b0b39 2045 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2046 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2047 POSTING_READ(DP_A);
d240f20f
JB
2048 udelay(200);
2049}
2050
c7ad3810 2051/* If the sink supports it, try to set the power state appropriately */
c19b0669 2052void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2053{
2054 int ret, i;
2055
2056 /* Should have a valid DPCD by this point */
2057 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2058 return;
2059
2060 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2061 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2062 DP_SET_POWER_D3);
c7ad3810
JB
2063 } else {
2064 /*
2065 * When turning on, we need to retry for 1ms to give the sink
2066 * time to wake up.
2067 */
2068 for (i = 0; i < 3; i++) {
9d1a1031
JN
2069 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2070 DP_SET_POWER_D0);
c7ad3810
JB
2071 if (ret == 1)
2072 break;
2073 msleep(1);
2074 }
2075 }
f9cac721
JN
2076
2077 if (ret != 1)
2078 DRM_DEBUG_KMS("failed to %s sink power state\n",
2079 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2080}
2081
19d8fe15
DV
2082static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2083 enum pipe *pipe)
d240f20f 2084{
19d8fe15 2085 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2086 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2087 struct drm_device *dev = encoder->base.dev;
2088 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2089 enum intel_display_power_domain power_domain;
2090 u32 tmp;
2091
2092 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2093 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2094 return false;
2095
2096 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2097
2098 if (!(tmp & DP_PORT_EN))
2099 return false;
2100
bc7d38a4 2101 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
19d8fe15 2102 *pipe = PORT_TO_PIPE_CPT(tmp);
71485e0a
VS
2103 } else if (IS_CHERRYVIEW(dev)) {
2104 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
bc7d38a4 2105 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
19d8fe15
DV
2106 *pipe = PORT_TO_PIPE(tmp);
2107 } else {
2108 u32 trans_sel;
2109 u32 trans_dp;
2110 int i;
2111
2112 switch (intel_dp->output_reg) {
2113 case PCH_DP_B:
2114 trans_sel = TRANS_DP_PORT_SEL_B;
2115 break;
2116 case PCH_DP_C:
2117 trans_sel = TRANS_DP_PORT_SEL_C;
2118 break;
2119 case PCH_DP_D:
2120 trans_sel = TRANS_DP_PORT_SEL_D;
2121 break;
2122 default:
2123 return true;
2124 }
2125
055e393f 2126 for_each_pipe(dev_priv, i) {
19d8fe15
DV
2127 trans_dp = I915_READ(TRANS_DP_CTL(i));
2128 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2129 *pipe = i;
2130 return true;
2131 }
2132 }
19d8fe15 2133
4a0833ec
DV
2134 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2135 intel_dp->output_reg);
2136 }
d240f20f 2137
19d8fe15
DV
2138 return true;
2139}
d240f20f 2140
045ac3b5 2141static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2142 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2143{
2144 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2145 u32 tmp, flags = 0;
63000ef6
XZ
2146 struct drm_device *dev = encoder->base.dev;
2147 struct drm_i915_private *dev_priv = dev->dev_private;
2148 enum port port = dp_to_dig_port(intel_dp)->port;
2149 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2150 int dotclock;
045ac3b5 2151
9ed109a7
DV
2152 tmp = I915_READ(intel_dp->output_reg);
2153 if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2154 pipe_config->has_audio = true;
2155
63000ef6 2156 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
63000ef6
XZ
2157 if (tmp & DP_SYNC_HS_HIGH)
2158 flags |= DRM_MODE_FLAG_PHSYNC;
2159 else
2160 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2161
63000ef6
XZ
2162 if (tmp & DP_SYNC_VS_HIGH)
2163 flags |= DRM_MODE_FLAG_PVSYNC;
2164 else
2165 flags |= DRM_MODE_FLAG_NVSYNC;
2166 } else {
2167 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2168 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2169 flags |= DRM_MODE_FLAG_PHSYNC;
2170 else
2171 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2172
63000ef6
XZ
2173 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2174 flags |= DRM_MODE_FLAG_PVSYNC;
2175 else
2176 flags |= DRM_MODE_FLAG_NVSYNC;
2177 }
045ac3b5 2178
2d112de7 2179 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2180
8c875fca
VS
2181 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2182 tmp & DP_COLOR_RANGE_16_235)
2183 pipe_config->limited_color_range = true;
2184
eb14cb74
VS
2185 pipe_config->has_dp_encoder = true;
2186
2187 intel_dp_get_m_n(crtc, pipe_config);
2188
18442d08 2189 if (port == PORT_A) {
f1f644dc
JB
2190 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2191 pipe_config->port_clock = 162000;
2192 else
2193 pipe_config->port_clock = 270000;
2194 }
18442d08
VS
2195
2196 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2197 &pipe_config->dp_m_n);
2198
2199 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2200 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2201
2d112de7 2202 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2203
c6cd2ee2
JN
2204 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2205 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2206 /*
2207 * This is a big fat ugly hack.
2208 *
2209 * Some machines in UEFI boot mode provide us a VBT that has 18
2210 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2211 * unknown we fail to light up. Yet the same BIOS boots up with
2212 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2213 * max, not what it tells us to use.
2214 *
2215 * Note: This will still be broken if the eDP panel is not lit
2216 * up by the BIOS, and thus we can't get the mode at module
2217 * load.
2218 */
2219 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2220 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2221 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2222 }
045ac3b5
JB
2223}
2224
e8cb4558 2225static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2226{
e8cb4558 2227 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2228 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2229 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2230
6e3c9717 2231 if (crtc->config->has_audio)
495a5bb8 2232 intel_audio_codec_disable(encoder);
6cb49835 2233
b32c6f48
RV
2234 if (HAS_PSR(dev) && !HAS_DDI(dev))
2235 intel_psr_disable(intel_dp);
2236
6cb49835
DV
2237 /* Make sure the panel is off before trying to change the mode. But also
2238 * ensure that we have vdd while we switch off the panel. */
24f3e092 2239 intel_edp_panel_vdd_on(intel_dp);
4be73780 2240 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2241 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2242 intel_edp_panel_off(intel_dp);
3739850b 2243
08aff3fe
VS
2244 /* disable the port before the pipe on g4x */
2245 if (INTEL_INFO(dev)->gen < 5)
3739850b 2246 intel_dp_link_down(intel_dp);
d240f20f
JB
2247}
2248
08aff3fe 2249static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2250{
2bd2ad64 2251 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2252 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2253
49277c31 2254 intel_dp_link_down(intel_dp);
08aff3fe
VS
2255 if (port == PORT_A)
2256 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2257}
2258
2259static void vlv_post_disable_dp(struct intel_encoder *encoder)
2260{
2261 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2262
2263 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2264}
2265
580d3811
VS
2266static void chv_post_disable_dp(struct intel_encoder *encoder)
2267{
2268 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2269 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2270 struct drm_device *dev = encoder->base.dev;
2271 struct drm_i915_private *dev_priv = dev->dev_private;
2272 struct intel_crtc *intel_crtc =
2273 to_intel_crtc(encoder->base.crtc);
2274 enum dpio_channel ch = vlv_dport_to_channel(dport);
2275 enum pipe pipe = intel_crtc->pipe;
2276 u32 val;
2277
2278 intel_dp_link_down(intel_dp);
2279
2280 mutex_lock(&dev_priv->dpio_lock);
2281
2282 /* Propagate soft reset to data lane reset */
97fd4d5c 2283 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2284 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2285 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2286
97fd4d5c
VS
2287 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2288 val |= CHV_PCS_REQ_SOFTRESET_EN;
2289 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2290
2291 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2292 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2293 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2294
2295 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2296 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2297 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811
VS
2298
2299 mutex_unlock(&dev_priv->dpio_lock);
2300}
2301
7b13b58a
VS
2302static void
2303_intel_dp_set_link_train(struct intel_dp *intel_dp,
2304 uint32_t *DP,
2305 uint8_t dp_train_pat)
2306{
2307 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2308 struct drm_device *dev = intel_dig_port->base.base.dev;
2309 struct drm_i915_private *dev_priv = dev->dev_private;
2310 enum port port = intel_dig_port->port;
2311
2312 if (HAS_DDI(dev)) {
2313 uint32_t temp = I915_READ(DP_TP_CTL(port));
2314
2315 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2316 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2317 else
2318 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2319
2320 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2321 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2322 case DP_TRAINING_PATTERN_DISABLE:
2323 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2324
2325 break;
2326 case DP_TRAINING_PATTERN_1:
2327 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2328 break;
2329 case DP_TRAINING_PATTERN_2:
2330 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2331 break;
2332 case DP_TRAINING_PATTERN_3:
2333 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2334 break;
2335 }
2336 I915_WRITE(DP_TP_CTL(port), temp);
2337
2338 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2339 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2340
2341 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2342 case DP_TRAINING_PATTERN_DISABLE:
2343 *DP |= DP_LINK_TRAIN_OFF_CPT;
2344 break;
2345 case DP_TRAINING_PATTERN_1:
2346 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2347 break;
2348 case DP_TRAINING_PATTERN_2:
2349 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2350 break;
2351 case DP_TRAINING_PATTERN_3:
2352 DRM_ERROR("DP training pattern 3 not supported\n");
2353 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2354 break;
2355 }
2356
2357 } else {
2358 if (IS_CHERRYVIEW(dev))
2359 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2360 else
2361 *DP &= ~DP_LINK_TRAIN_MASK;
2362
2363 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2364 case DP_TRAINING_PATTERN_DISABLE:
2365 *DP |= DP_LINK_TRAIN_OFF;
2366 break;
2367 case DP_TRAINING_PATTERN_1:
2368 *DP |= DP_LINK_TRAIN_PAT_1;
2369 break;
2370 case DP_TRAINING_PATTERN_2:
2371 *DP |= DP_LINK_TRAIN_PAT_2;
2372 break;
2373 case DP_TRAINING_PATTERN_3:
2374 if (IS_CHERRYVIEW(dev)) {
2375 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2376 } else {
2377 DRM_ERROR("DP training pattern 3 not supported\n");
2378 *DP |= DP_LINK_TRAIN_PAT_2;
2379 }
2380 break;
2381 }
2382 }
2383}
2384
2385static void intel_dp_enable_port(struct intel_dp *intel_dp)
2386{
2387 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2388 struct drm_i915_private *dev_priv = dev->dev_private;
2389
7b13b58a
VS
2390 /* enable with pattern 1 (as per spec) */
2391 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2392 DP_TRAINING_PATTERN_1);
2393
2394 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2395 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2396
2397 /*
2398 * Magic for VLV/CHV. We _must_ first set up the register
2399 * without actually enabling the port, and then do another
2400 * write to enable the port. Otherwise link training will
2401 * fail when the power sequencer is freshly used for this port.
2402 */
2403 intel_dp->DP |= DP_PORT_EN;
2404
2405 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2406 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2407}
2408
e8cb4558 2409static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2410{
e8cb4558
DV
2411 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2412 struct drm_device *dev = encoder->base.dev;
2413 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2414 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2415 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 2416
0c33d8d7
DV
2417 if (WARN_ON(dp_reg & DP_PORT_EN))
2418 return;
5d613501 2419
093e3f13
VS
2420 pps_lock(intel_dp);
2421
2422 if (IS_VALLEYVIEW(dev))
2423 vlv_init_panel_power_sequencer(intel_dp);
2424
7b13b58a 2425 intel_dp_enable_port(intel_dp);
093e3f13
VS
2426
2427 edp_panel_vdd_on(intel_dp);
2428 edp_panel_on(intel_dp);
2429 edp_panel_vdd_off(intel_dp, true);
2430
2431 pps_unlock(intel_dp);
2432
61234fa5
VS
2433 if (IS_VALLEYVIEW(dev))
2434 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2435
f01eca2e 2436 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2437 intel_dp_start_link_train(intel_dp);
33a34e4e 2438 intel_dp_complete_link_train(intel_dp);
3ab9c637 2439 intel_dp_stop_link_train(intel_dp);
c1dec79a 2440
6e3c9717 2441 if (crtc->config->has_audio) {
c1dec79a
JN
2442 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2443 pipe_name(crtc->pipe));
2444 intel_audio_codec_enable(encoder);
2445 }
ab1f90f9 2446}
89b667f8 2447
ecff4f3b
JN
2448static void g4x_enable_dp(struct intel_encoder *encoder)
2449{
828f5c6e
JN
2450 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2451
ecff4f3b 2452 intel_enable_dp(encoder);
4be73780 2453 intel_edp_backlight_on(intel_dp);
ab1f90f9 2454}
89b667f8 2455
ab1f90f9
JN
2456static void vlv_enable_dp(struct intel_encoder *encoder)
2457{
828f5c6e
JN
2458 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2459
4be73780 2460 intel_edp_backlight_on(intel_dp);
b32c6f48 2461 intel_psr_enable(intel_dp);
d240f20f
JB
2462}
2463
ecff4f3b 2464static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2465{
2466 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2467 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2468
8ac33ed3
DV
2469 intel_dp_prepare(encoder);
2470
d41f1efb
DV
2471 /* Only ilk+ has port A */
2472 if (dport->port == PORT_A) {
2473 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2474 ironlake_edp_pll_on(intel_dp);
d41f1efb 2475 }
ab1f90f9
JN
2476}
2477
83b84597
VS
2478static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2479{
2480 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2481 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2482 enum pipe pipe = intel_dp->pps_pipe;
2483 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2484
2485 edp_panel_vdd_off_sync(intel_dp);
2486
2487 /*
2488 * VLV seems to get confused when multiple power seqeuencers
2489 * have the same port selected (even if only one has power/vdd
2490 * enabled). The failure manifests as vlv_wait_port_ready() failing
2491 * CHV on the other hand doesn't seem to mind having the same port
2492 * selected in multiple power seqeuencers, but let's clear the
2493 * port select always when logically disconnecting a power sequencer
2494 * from a port.
2495 */
2496 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2497 pipe_name(pipe), port_name(intel_dig_port->port));
2498 I915_WRITE(pp_on_reg, 0);
2499 POSTING_READ(pp_on_reg);
2500
2501 intel_dp->pps_pipe = INVALID_PIPE;
2502}
2503
a4a5d2f8
VS
2504static void vlv_steal_power_sequencer(struct drm_device *dev,
2505 enum pipe pipe)
2506{
2507 struct drm_i915_private *dev_priv = dev->dev_private;
2508 struct intel_encoder *encoder;
2509
2510 lockdep_assert_held(&dev_priv->pps_mutex);
2511
ac3c12e4
VS
2512 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2513 return;
2514
a4a5d2f8
VS
2515 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2516 base.head) {
2517 struct intel_dp *intel_dp;
773538e8 2518 enum port port;
a4a5d2f8
VS
2519
2520 if (encoder->type != INTEL_OUTPUT_EDP)
2521 continue;
2522
2523 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2524 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2525
2526 if (intel_dp->pps_pipe != pipe)
2527 continue;
2528
2529 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2530 pipe_name(pipe), port_name(port));
a4a5d2f8 2531
034e43c6
VS
2532 WARN(encoder->connectors_active,
2533 "stealing pipe %c power sequencer from active eDP port %c\n",
2534 pipe_name(pipe), port_name(port));
a4a5d2f8 2535
a4a5d2f8 2536 /* make sure vdd is off before we steal it */
83b84597 2537 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2538 }
2539}
2540
2541static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2542{
2543 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2544 struct intel_encoder *encoder = &intel_dig_port->base;
2545 struct drm_device *dev = encoder->base.dev;
2546 struct drm_i915_private *dev_priv = dev->dev_private;
2547 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2548
2549 lockdep_assert_held(&dev_priv->pps_mutex);
2550
093e3f13
VS
2551 if (!is_edp(intel_dp))
2552 return;
2553
a4a5d2f8
VS
2554 if (intel_dp->pps_pipe == crtc->pipe)
2555 return;
2556
2557 /*
2558 * If another power sequencer was being used on this
2559 * port previously make sure to turn off vdd there while
2560 * we still have control of it.
2561 */
2562 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2563 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2564
2565 /*
2566 * We may be stealing the power
2567 * sequencer from another port.
2568 */
2569 vlv_steal_power_sequencer(dev, crtc->pipe);
2570
2571 /* now it's all ours */
2572 intel_dp->pps_pipe = crtc->pipe;
2573
2574 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2575 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2576
2577 /* init power sequencer on this pipe and port */
36b5f425
VS
2578 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2579 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2580}
2581
ab1f90f9 2582static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2583{
2bd2ad64 2584 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2585 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2586 struct drm_device *dev = encoder->base.dev;
89b667f8 2587 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2588 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2589 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2590 int pipe = intel_crtc->pipe;
2591 u32 val;
a4fc5ed6 2592
ab1f90f9 2593 mutex_lock(&dev_priv->dpio_lock);
89b667f8 2594
ab3c759a 2595 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2596 val = 0;
2597 if (pipe)
2598 val |= (1<<21);
2599 else
2600 val &= ~(1<<21);
2601 val |= 0x001000c4;
ab3c759a
CML
2602 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2603 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2604 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2605
ab1f90f9
JN
2606 mutex_unlock(&dev_priv->dpio_lock);
2607
2608 intel_enable_dp(encoder);
89b667f8
JB
2609}
2610
ecff4f3b 2611static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2612{
2613 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2614 struct drm_device *dev = encoder->base.dev;
2615 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2616 struct intel_crtc *intel_crtc =
2617 to_intel_crtc(encoder->base.crtc);
e4607fcf 2618 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2619 int pipe = intel_crtc->pipe;
89b667f8 2620
8ac33ed3
DV
2621 intel_dp_prepare(encoder);
2622
89b667f8 2623 /* Program Tx lane resets to default */
0980a60f 2624 mutex_lock(&dev_priv->dpio_lock);
ab3c759a 2625 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2626 DPIO_PCS_TX_LANE2_RESET |
2627 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2628 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2629 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2630 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2631 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2632 DPIO_PCS_CLK_SOFT_RESET);
2633
2634 /* Fix up inter-pair skew failure */
ab3c759a
CML
2635 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2636 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2637 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
0980a60f 2638 mutex_unlock(&dev_priv->dpio_lock);
a4fc5ed6
KP
2639}
2640
e4a1d846
CML
2641static void chv_pre_enable_dp(struct intel_encoder *encoder)
2642{
2643 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2644 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2645 struct drm_device *dev = encoder->base.dev;
2646 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2647 struct intel_crtc *intel_crtc =
2648 to_intel_crtc(encoder->base.crtc);
2649 enum dpio_channel ch = vlv_dport_to_channel(dport);
2650 int pipe = intel_crtc->pipe;
2651 int data, i;
949c1d43 2652 u32 val;
e4a1d846 2653
e4a1d846 2654 mutex_lock(&dev_priv->dpio_lock);
949c1d43 2655
570e2a74
VS
2656 /* allow hardware to manage TX FIFO reset source */
2657 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2658 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2659 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2660
2661 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2662 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2663 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2664
949c1d43 2665 /* Deassert soft data lane reset*/
97fd4d5c 2666 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2667 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2668 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2669
2670 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2671 val |= CHV_PCS_REQ_SOFTRESET_EN;
2672 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2673
2674 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2675 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2676 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2677
97fd4d5c 2678 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2679 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2680 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2681
2682 /* Program Tx lane latency optimal setting*/
e4a1d846
CML
2683 for (i = 0; i < 4; i++) {
2684 /* Set the latency optimal bit */
2685 data = (i == 1) ? 0x0 : 0x6;
2686 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2687 data << DPIO_FRC_LATENCY_SHFIT);
2688
2689 /* Set the upar bit */
2690 data = (i == 1) ? 0x0 : 0x1;
2691 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2692 data << DPIO_UPAR_SHIFT);
2693 }
2694
2695 /* Data lane stagger programming */
2696 /* FIXME: Fix up value only after power analysis */
2697
2698 mutex_unlock(&dev_priv->dpio_lock);
2699
e4a1d846 2700 intel_enable_dp(encoder);
e4a1d846
CML
2701}
2702
9197c88b
VS
2703static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2704{
2705 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2706 struct drm_device *dev = encoder->base.dev;
2707 struct drm_i915_private *dev_priv = dev->dev_private;
2708 struct intel_crtc *intel_crtc =
2709 to_intel_crtc(encoder->base.crtc);
2710 enum dpio_channel ch = vlv_dport_to_channel(dport);
2711 enum pipe pipe = intel_crtc->pipe;
2712 u32 val;
2713
625695f8
VS
2714 intel_dp_prepare(encoder);
2715
9197c88b
VS
2716 mutex_lock(&dev_priv->dpio_lock);
2717
b9e5ac3c
VS
2718 /* program left/right clock distribution */
2719 if (pipe != PIPE_B) {
2720 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2721 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2722 if (ch == DPIO_CH0)
2723 val |= CHV_BUFLEFTENA1_FORCE;
2724 if (ch == DPIO_CH1)
2725 val |= CHV_BUFRIGHTENA1_FORCE;
2726 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2727 } else {
2728 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2729 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2730 if (ch == DPIO_CH0)
2731 val |= CHV_BUFLEFTENA2_FORCE;
2732 if (ch == DPIO_CH1)
2733 val |= CHV_BUFRIGHTENA2_FORCE;
2734 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2735 }
2736
9197c88b
VS
2737 /* program clock channel usage */
2738 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2739 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2740 if (pipe != PIPE_B)
2741 val &= ~CHV_PCS_USEDCLKCHANNEL;
2742 else
2743 val |= CHV_PCS_USEDCLKCHANNEL;
2744 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2745
2746 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2747 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2748 if (pipe != PIPE_B)
2749 val &= ~CHV_PCS_USEDCLKCHANNEL;
2750 else
2751 val |= CHV_PCS_USEDCLKCHANNEL;
2752 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2753
2754 /*
2755 * This a a bit weird since generally CL
2756 * matches the pipe, but here we need to
2757 * pick the CL based on the port.
2758 */
2759 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2760 if (pipe != PIPE_B)
2761 val &= ~CHV_CMN_USEDCLKCHANNEL;
2762 else
2763 val |= CHV_CMN_USEDCLKCHANNEL;
2764 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2765
2766 mutex_unlock(&dev_priv->dpio_lock);
2767}
2768
a4fc5ed6 2769/*
df0c237d
JB
2770 * Native read with retry for link status and receiver capability reads for
2771 * cases where the sink may still be asleep.
9d1a1031
JN
2772 *
2773 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2774 * supposed to retry 3 times per the spec.
a4fc5ed6 2775 */
9d1a1031
JN
2776static ssize_t
2777intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2778 void *buffer, size_t size)
a4fc5ed6 2779{
9d1a1031
JN
2780 ssize_t ret;
2781 int i;
61da5fab 2782
f6a19066
VS
2783 /*
2784 * Sometime we just get the same incorrect byte repeated
2785 * over the entire buffer. Doing just one throw away read
2786 * initially seems to "solve" it.
2787 */
2788 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2789
61da5fab 2790 for (i = 0; i < 3; i++) {
9d1a1031
JN
2791 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2792 if (ret == size)
2793 return ret;
61da5fab
JB
2794 msleep(1);
2795 }
a4fc5ed6 2796
9d1a1031 2797 return ret;
a4fc5ed6
KP
2798}
2799
2800/*
2801 * Fetch AUX CH registers 0x202 - 0x207 which contain
2802 * link status information
2803 */
2804static bool
93f62dad 2805intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2806{
9d1a1031
JN
2807 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2808 DP_LANE0_1_STATUS,
2809 link_status,
2810 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2811}
2812
1100244e 2813/* These are source-specific values. */
a4fc5ed6 2814static uint8_t
1a2eb460 2815intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2816{
30add22d 2817 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2818 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2819 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2820
7ad14a29
SJ
2821 if (INTEL_INFO(dev)->gen >= 9) {
2822 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2823 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2824 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2825 } else if (IS_VALLEYVIEW(dev))
bd60018a 2826 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2827 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2828 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2829 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2830 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2831 else
bd60018a 2832 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
2833}
2834
2835static uint8_t
2836intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2837{
30add22d 2838 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 2839 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2840
5a9d1f1a
DL
2841 if (INTEL_INFO(dev)->gen >= 9) {
2842 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2843 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2844 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2845 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2846 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2847 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2848 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
2849 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2850 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
2851 default:
2852 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2853 }
2854 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 2855 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2856 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2857 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2858 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2859 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2860 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2861 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2862 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 2863 default:
bd60018a 2864 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 2865 }
e2fa6fba
P
2866 } else if (IS_VALLEYVIEW(dev)) {
2867 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2868 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2869 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2870 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2871 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2872 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2873 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2874 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 2875 default:
bd60018a 2876 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 2877 }
bc7d38a4 2878 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 2879 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2880 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2881 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2882 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2883 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2884 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 2885 default:
bd60018a 2886 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
2887 }
2888 } else {
2889 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2890 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2891 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2892 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2893 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2894 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2895 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2896 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 2897 default:
bd60018a 2898 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 2899 }
a4fc5ed6
KP
2900 }
2901}
2902
e2fa6fba
P
2903static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2904{
2905 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2906 struct drm_i915_private *dev_priv = dev->dev_private;
2907 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
2908 struct intel_crtc *intel_crtc =
2909 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
2910 unsigned long demph_reg_value, preemph_reg_value,
2911 uniqtranscale_reg_value;
2912 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 2913 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2914 int pipe = intel_crtc->pipe;
e2fa6fba
P
2915
2916 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 2917 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
2918 preemph_reg_value = 0x0004000;
2919 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2920 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2921 demph_reg_value = 0x2B405555;
2922 uniqtranscale_reg_value = 0x552AB83A;
2923 break;
bd60018a 2924 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2925 demph_reg_value = 0x2B404040;
2926 uniqtranscale_reg_value = 0x5548B83A;
2927 break;
bd60018a 2928 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2929 demph_reg_value = 0x2B245555;
2930 uniqtranscale_reg_value = 0x5560B83A;
2931 break;
bd60018a 2932 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
2933 demph_reg_value = 0x2B405555;
2934 uniqtranscale_reg_value = 0x5598DA3A;
2935 break;
2936 default:
2937 return 0;
2938 }
2939 break;
bd60018a 2940 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
2941 preemph_reg_value = 0x0002000;
2942 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2943 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2944 demph_reg_value = 0x2B404040;
2945 uniqtranscale_reg_value = 0x5552B83A;
2946 break;
bd60018a 2947 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2948 demph_reg_value = 0x2B404848;
2949 uniqtranscale_reg_value = 0x5580B83A;
2950 break;
bd60018a 2951 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2952 demph_reg_value = 0x2B404040;
2953 uniqtranscale_reg_value = 0x55ADDA3A;
2954 break;
2955 default:
2956 return 0;
2957 }
2958 break;
bd60018a 2959 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
2960 preemph_reg_value = 0x0000000;
2961 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2962 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2963 demph_reg_value = 0x2B305555;
2964 uniqtranscale_reg_value = 0x5570B83A;
2965 break;
bd60018a 2966 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2967 demph_reg_value = 0x2B2B4040;
2968 uniqtranscale_reg_value = 0x55ADDA3A;
2969 break;
2970 default:
2971 return 0;
2972 }
2973 break;
bd60018a 2974 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
2975 preemph_reg_value = 0x0006000;
2976 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2977 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2978 demph_reg_value = 0x1B405555;
2979 uniqtranscale_reg_value = 0x55ADDA3A;
2980 break;
2981 default:
2982 return 0;
2983 }
2984 break;
2985 default:
2986 return 0;
2987 }
2988
0980a60f 2989 mutex_lock(&dev_priv->dpio_lock);
ab3c759a
CML
2990 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
2991 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
2992 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 2993 uniqtranscale_reg_value);
ab3c759a
CML
2994 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
2995 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
2996 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
2997 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
0980a60f 2998 mutex_unlock(&dev_priv->dpio_lock);
e2fa6fba
P
2999
3000 return 0;
3001}
3002
e4a1d846
CML
3003static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3004{
3005 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3006 struct drm_i915_private *dev_priv = dev->dev_private;
3007 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3008 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3009 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3010 uint8_t train_set = intel_dp->train_set[0];
3011 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3012 enum pipe pipe = intel_crtc->pipe;
3013 int i;
e4a1d846
CML
3014
3015 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3016 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3017 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3018 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3019 deemph_reg_value = 128;
3020 margin_reg_value = 52;
3021 break;
bd60018a 3022 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3023 deemph_reg_value = 128;
3024 margin_reg_value = 77;
3025 break;
bd60018a 3026 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3027 deemph_reg_value = 128;
3028 margin_reg_value = 102;
3029 break;
bd60018a 3030 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3031 deemph_reg_value = 128;
3032 margin_reg_value = 154;
3033 /* FIXME extra to set for 1200 */
3034 break;
3035 default:
3036 return 0;
3037 }
3038 break;
bd60018a 3039 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3040 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3041 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3042 deemph_reg_value = 85;
3043 margin_reg_value = 78;
3044 break;
bd60018a 3045 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3046 deemph_reg_value = 85;
3047 margin_reg_value = 116;
3048 break;
bd60018a 3049 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3050 deemph_reg_value = 85;
3051 margin_reg_value = 154;
3052 break;
3053 default:
3054 return 0;
3055 }
3056 break;
bd60018a 3057 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3058 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3059 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3060 deemph_reg_value = 64;
3061 margin_reg_value = 104;
3062 break;
bd60018a 3063 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3064 deemph_reg_value = 64;
3065 margin_reg_value = 154;
3066 break;
3067 default:
3068 return 0;
3069 }
3070 break;
bd60018a 3071 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3072 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3073 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3074 deemph_reg_value = 43;
3075 margin_reg_value = 154;
3076 break;
3077 default:
3078 return 0;
3079 }
3080 break;
3081 default:
3082 return 0;
3083 }
3084
3085 mutex_lock(&dev_priv->dpio_lock);
3086
3087 /* Clear calc init */
1966e59e
VS
3088 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3089 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3090 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3091 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3092 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3093
3094 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3095 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3096 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3097 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3098 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3099
a02ef3c7
VS
3100 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3101 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3102 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3103 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3104
3105 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3106 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3107 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3108 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3109
e4a1d846 3110 /* Program swing deemph */
f72df8db
VS
3111 for (i = 0; i < 4; i++) {
3112 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3113 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3114 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3115 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3116 }
e4a1d846
CML
3117
3118 /* Program swing margin */
f72df8db
VS
3119 for (i = 0; i < 4; i++) {
3120 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3121 val &= ~DPIO_SWING_MARGIN000_MASK;
3122 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3123 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3124 }
e4a1d846
CML
3125
3126 /* Disable unique transition scale */
f72df8db
VS
3127 for (i = 0; i < 4; i++) {
3128 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3129 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3130 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3131 }
e4a1d846
CML
3132
3133 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3134 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3135 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3136 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3137
3138 /*
3139 * The document said it needs to set bit 27 for ch0 and bit 26
3140 * for ch1. Might be a typo in the doc.
3141 * For now, for this unique transition scale selection, set bit
3142 * 27 for ch0 and ch1.
3143 */
f72df8db
VS
3144 for (i = 0; i < 4; i++) {
3145 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3146 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3147 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3148 }
e4a1d846 3149
f72df8db
VS
3150 for (i = 0; i < 4; i++) {
3151 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3152 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3153 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3154 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3155 }
e4a1d846
CML
3156 }
3157
3158 /* Start swing calculation */
1966e59e
VS
3159 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3160 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3161 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3162
3163 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3164 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3165 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3166
3167 /* LRC Bypass */
3168 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3169 val |= DPIO_LRC_BYPASS;
3170 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3171
3172 mutex_unlock(&dev_priv->dpio_lock);
3173
3174 return 0;
3175}
3176
a4fc5ed6 3177static void
0301b3ac
JN
3178intel_get_adjust_train(struct intel_dp *intel_dp,
3179 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3180{
3181 uint8_t v = 0;
3182 uint8_t p = 0;
3183 int lane;
1a2eb460
KP
3184 uint8_t voltage_max;
3185 uint8_t preemph_max;
a4fc5ed6 3186
33a34e4e 3187 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3188 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3189 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3190
3191 if (this_v > v)
3192 v = this_v;
3193 if (this_p > p)
3194 p = this_p;
3195 }
3196
1a2eb460 3197 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3198 if (v >= voltage_max)
3199 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3200
1a2eb460
KP
3201 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3202 if (p >= preemph_max)
3203 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3204
3205 for (lane = 0; lane < 4; lane++)
33a34e4e 3206 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3207}
3208
3209static uint32_t
f0a3424e 3210intel_gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3211{
3cf2efb1 3212 uint32_t signal_levels = 0;
a4fc5ed6 3213
3cf2efb1 3214 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3215 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3216 default:
3217 signal_levels |= DP_VOLTAGE_0_4;
3218 break;
bd60018a 3219 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3220 signal_levels |= DP_VOLTAGE_0_6;
3221 break;
bd60018a 3222 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3223 signal_levels |= DP_VOLTAGE_0_8;
3224 break;
bd60018a 3225 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3226 signal_levels |= DP_VOLTAGE_1_2;
3227 break;
3228 }
3cf2efb1 3229 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3230 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3231 default:
3232 signal_levels |= DP_PRE_EMPHASIS_0;
3233 break;
bd60018a 3234 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3235 signal_levels |= DP_PRE_EMPHASIS_3_5;
3236 break;
bd60018a 3237 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3238 signal_levels |= DP_PRE_EMPHASIS_6;
3239 break;
bd60018a 3240 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3241 signal_levels |= DP_PRE_EMPHASIS_9_5;
3242 break;
3243 }
3244 return signal_levels;
3245}
3246
e3421a18
ZW
3247/* Gen6's DP voltage swing and pre-emphasis control */
3248static uint32_t
3249intel_gen6_edp_signal_levels(uint8_t train_set)
3250{
3c5a62b5
YL
3251 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3252 DP_TRAIN_PRE_EMPHASIS_MASK);
3253 switch (signal_levels) {
bd60018a
SJ
3254 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3255 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3256 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3257 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3258 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3259 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3260 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3261 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3262 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3263 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3264 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3265 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3266 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3267 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3268 default:
3c5a62b5
YL
3269 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3270 "0x%x\n", signal_levels);
3271 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3272 }
3273}
3274
1a2eb460
KP
3275/* Gen7's DP voltage swing and pre-emphasis control */
3276static uint32_t
3277intel_gen7_edp_signal_levels(uint8_t train_set)
3278{
3279 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3280 DP_TRAIN_PRE_EMPHASIS_MASK);
3281 switch (signal_levels) {
bd60018a 3282 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3283 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3284 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3285 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3286 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3287 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3288
bd60018a 3289 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3290 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3291 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3292 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3293
bd60018a 3294 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3295 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3296 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3297 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3298
3299 default:
3300 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3301 "0x%x\n", signal_levels);
3302 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3303 }
3304}
3305
d6c0d722
PZ
3306/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3307static uint32_t
f0a3424e 3308intel_hsw_signal_levels(uint8_t train_set)
a4fc5ed6 3309{
d6c0d722
PZ
3310 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3311 DP_TRAIN_PRE_EMPHASIS_MASK);
3312 switch (signal_levels) {
bd60018a 3313 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3314 return DDI_BUF_TRANS_SELECT(0);
bd60018a 3315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3316 return DDI_BUF_TRANS_SELECT(1);
bd60018a 3317 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3318 return DDI_BUF_TRANS_SELECT(2);
bd60018a 3319 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
c5fe6a06 3320 return DDI_BUF_TRANS_SELECT(3);
a4fc5ed6 3321
bd60018a 3322 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3323 return DDI_BUF_TRANS_SELECT(4);
bd60018a 3324 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3325 return DDI_BUF_TRANS_SELECT(5);
bd60018a 3326 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3327 return DDI_BUF_TRANS_SELECT(6);
a4fc5ed6 3328
bd60018a 3329 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3330 return DDI_BUF_TRANS_SELECT(7);
bd60018a 3331 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3332 return DDI_BUF_TRANS_SELECT(8);
7ad14a29
SJ
3333
3334 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3335 return DDI_BUF_TRANS_SELECT(9);
d6c0d722
PZ
3336 default:
3337 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3338 "0x%x\n", signal_levels);
c5fe6a06 3339 return DDI_BUF_TRANS_SELECT(0);
a4fc5ed6 3340 }
a4fc5ed6
KP
3341}
3342
f0a3424e
PZ
3343/* Properly updates "DP" with the correct signal levels. */
3344static void
3345intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3346{
3347 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3348 enum port port = intel_dig_port->port;
f0a3424e
PZ
3349 struct drm_device *dev = intel_dig_port->base.base.dev;
3350 uint32_t signal_levels, mask;
3351 uint8_t train_set = intel_dp->train_set[0];
3352
5a9d1f1a 3353 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
f0a3424e
PZ
3354 signal_levels = intel_hsw_signal_levels(train_set);
3355 mask = DDI_BUF_EMP_MASK;
e4a1d846
CML
3356 } else if (IS_CHERRYVIEW(dev)) {
3357 signal_levels = intel_chv_signal_levels(intel_dp);
3358 mask = 0;
e2fa6fba
P
3359 } else if (IS_VALLEYVIEW(dev)) {
3360 signal_levels = intel_vlv_signal_levels(intel_dp);
3361 mask = 0;
bc7d38a4 3362 } else if (IS_GEN7(dev) && port == PORT_A) {
f0a3424e
PZ
3363 signal_levels = intel_gen7_edp_signal_levels(train_set);
3364 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3365 } else if (IS_GEN6(dev) && port == PORT_A) {
f0a3424e
PZ
3366 signal_levels = intel_gen6_edp_signal_levels(train_set);
3367 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3368 } else {
3369 signal_levels = intel_gen4_signal_levels(train_set);
3370 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3371 }
3372
3373 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3374
3375 *DP = (*DP & ~mask) | signal_levels;
3376}
3377
a4fc5ed6 3378static bool
ea5b213a 3379intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3380 uint32_t *DP,
58e10eb9 3381 uint8_t dp_train_pat)
a4fc5ed6 3382{
174edf1f
PZ
3383 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3384 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3385 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3386 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3387 int ret, len;
a4fc5ed6 3388
7b13b58a 3389 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3390
70aff66c 3391 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3392 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3393
2cdfe6c8
JN
3394 buf[0] = dp_train_pat;
3395 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3396 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3397 /* don't write DP_TRAINING_LANEx_SET on disable */
3398 len = 1;
3399 } else {
3400 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3401 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3402 len = intel_dp->lane_count + 1;
47ea7542 3403 }
a4fc5ed6 3404
9d1a1031
JN
3405 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3406 buf, len);
2cdfe6c8
JN
3407
3408 return ret == len;
a4fc5ed6
KP
3409}
3410
70aff66c
JN
3411static bool
3412intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3413 uint8_t dp_train_pat)
3414{
953d22e8 3415 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3416 intel_dp_set_signal_levels(intel_dp, DP);
3417 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3418}
3419
3420static bool
3421intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3422 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3423{
3424 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3425 struct drm_device *dev = intel_dig_port->base.base.dev;
3426 struct drm_i915_private *dev_priv = dev->dev_private;
3427 int ret;
3428
3429 intel_get_adjust_train(intel_dp, link_status);
3430 intel_dp_set_signal_levels(intel_dp, DP);
3431
3432 I915_WRITE(intel_dp->output_reg, *DP);
3433 POSTING_READ(intel_dp->output_reg);
3434
9d1a1031
JN
3435 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3436 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3437
3438 return ret == intel_dp->lane_count;
3439}
3440
3ab9c637
ID
3441static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3442{
3443 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3444 struct drm_device *dev = intel_dig_port->base.base.dev;
3445 struct drm_i915_private *dev_priv = dev->dev_private;
3446 enum port port = intel_dig_port->port;
3447 uint32_t val;
3448
3449 if (!HAS_DDI(dev))
3450 return;
3451
3452 val = I915_READ(DP_TP_CTL(port));
3453 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3454 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3455 I915_WRITE(DP_TP_CTL(port), val);
3456
3457 /*
3458 * On PORT_A we can have only eDP in SST mode. There the only reason
3459 * we need to set idle transmission mode is to work around a HW issue
3460 * where we enable the pipe while not in idle link-training mode.
3461 * In this case there is requirement to wait for a minimum number of
3462 * idle patterns to be sent.
3463 */
3464 if (port == PORT_A)
3465 return;
3466
3467 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3468 1))
3469 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3470}
3471
33a34e4e 3472/* Enable corresponding port and start training pattern 1 */
c19b0669 3473void
33a34e4e 3474intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3475{
da63a9f2 3476 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3477 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3478 int i;
3479 uint8_t voltage;
cdb0e95b 3480 int voltage_tries, loop_tries;
ea5b213a 3481 uint32_t DP = intel_dp->DP;
6aba5b6c 3482 uint8_t link_config[2];
a4fc5ed6 3483
affa9354 3484 if (HAS_DDI(dev))
c19b0669
PZ
3485 intel_ddi_prepare_link_retrain(encoder);
3486
3cf2efb1 3487 /* Write the link configuration data */
6aba5b6c
JN
3488 link_config[0] = intel_dp->link_bw;
3489 link_config[1] = intel_dp->lane_count;
3490 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3491 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3492 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
a8f3ef61
SJ
3493 if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0])
3494 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3495 &intel_dp->rate_select, 1);
6aba5b6c
JN
3496
3497 link_config[0] = 0;
3498 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3499 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3500
3501 DP |= DP_PORT_EN;
1a2eb460 3502
70aff66c
JN
3503 /* clock recovery */
3504 if (!intel_dp_reset_link_train(intel_dp, &DP,
3505 DP_TRAINING_PATTERN_1 |
3506 DP_LINK_SCRAMBLING_DISABLE)) {
3507 DRM_ERROR("failed to enable link training\n");
3508 return;
3509 }
3510
a4fc5ed6 3511 voltage = 0xff;
cdb0e95b
KP
3512 voltage_tries = 0;
3513 loop_tries = 0;
a4fc5ed6 3514 for (;;) {
70aff66c 3515 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3516
a7c9655f 3517 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3518 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3519 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3520 break;
93f62dad 3521 }
a4fc5ed6 3522
01916270 3523 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3524 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3525 break;
3526 }
3527
3528 /* Check to see if we've tried the max voltage */
3529 for (i = 0; i < intel_dp->lane_count; i++)
3530 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3531 break;
3b4f819d 3532 if (i == intel_dp->lane_count) {
b06fbda3
DV
3533 ++loop_tries;
3534 if (loop_tries == 5) {
3def84b3 3535 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3536 break;
3537 }
70aff66c
JN
3538 intel_dp_reset_link_train(intel_dp, &DP,
3539 DP_TRAINING_PATTERN_1 |
3540 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3541 voltage_tries = 0;
3542 continue;
3543 }
a4fc5ed6 3544
3cf2efb1 3545 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3546 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3547 ++voltage_tries;
b06fbda3 3548 if (voltage_tries == 5) {
3def84b3 3549 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3550 break;
3551 }
3552 } else
3553 voltage_tries = 0;
3554 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3555
70aff66c
JN
3556 /* Update training set as requested by target */
3557 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3558 DRM_ERROR("failed to update link training\n");
3559 break;
3560 }
a4fc5ed6
KP
3561 }
3562
33a34e4e
JB
3563 intel_dp->DP = DP;
3564}
3565
c19b0669 3566void
33a34e4e
JB
3567intel_dp_complete_link_train(struct intel_dp *intel_dp)
3568{
33a34e4e 3569 bool channel_eq = false;
37f80975 3570 int tries, cr_tries;
33a34e4e 3571 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3572 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3573
3574 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3575 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3576 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3577
a4fc5ed6 3578 /* channel equalization */
70aff66c 3579 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3580 training_pattern |
70aff66c
JN
3581 DP_LINK_SCRAMBLING_DISABLE)) {
3582 DRM_ERROR("failed to start channel equalization\n");
3583 return;
3584 }
3585
a4fc5ed6 3586 tries = 0;
37f80975 3587 cr_tries = 0;
a4fc5ed6
KP
3588 channel_eq = false;
3589 for (;;) {
70aff66c 3590 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3591
37f80975
JB
3592 if (cr_tries > 5) {
3593 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3594 break;
3595 }
3596
a7c9655f 3597 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3598 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3599 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3600 break;
70aff66c 3601 }
a4fc5ed6 3602
37f80975 3603 /* Make sure clock is still ok */
01916270 3604 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
37f80975 3605 intel_dp_start_link_train(intel_dp);
70aff66c 3606 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3607 training_pattern |
70aff66c 3608 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3609 cr_tries++;
3610 continue;
3611 }
3612
1ffdff13 3613 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3614 channel_eq = true;
3615 break;
3616 }
a4fc5ed6 3617
37f80975
JB
3618 /* Try 5 times, then try clock recovery if that fails */
3619 if (tries > 5) {
37f80975 3620 intel_dp_start_link_train(intel_dp);
70aff66c 3621 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3622 training_pattern |
70aff66c 3623 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3624 tries = 0;
3625 cr_tries++;
3626 continue;
3627 }
a4fc5ed6 3628
70aff66c
JN
3629 /* Update training set as requested by target */
3630 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3631 DRM_ERROR("failed to update link training\n");
3632 break;
3633 }
3cf2efb1 3634 ++tries;
869184a6 3635 }
3cf2efb1 3636
3ab9c637
ID
3637 intel_dp_set_idle_link_train(intel_dp);
3638
3639 intel_dp->DP = DP;
3640
d6c0d722 3641 if (channel_eq)
07f42258 3642 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
d6c0d722 3643
3ab9c637
ID
3644}
3645
3646void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3647{
70aff66c 3648 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3649 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3650}
3651
3652static void
ea5b213a 3653intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3654{
da63a9f2 3655 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3656 enum port port = intel_dig_port->port;
da63a9f2 3657 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3658 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3659 uint32_t DP = intel_dp->DP;
a4fc5ed6 3660
bc76e320 3661 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3662 return;
3663
0c33d8d7 3664 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3665 return;
3666
28c97730 3667 DRM_DEBUG_KMS("\n");
32f9d658 3668
bc7d38a4 3669 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
e3421a18 3670 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 3671 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18 3672 } else {
aad3d14d
VS
3673 if (IS_CHERRYVIEW(dev))
3674 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3675 else
3676 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 3677 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 3678 }
fe255d00 3679 POSTING_READ(intel_dp->output_reg);
5eb08b69 3680
493a7081 3681 if (HAS_PCH_IBX(dev) &&
1b39d6f3 3682 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
5bddd17f
EA
3683 /* Hardware workaround: leaving our transcoder select
3684 * set to transcoder B while it's off will prevent the
3685 * corresponding HDMI output on transcoder A.
3686 *
3687 * Combine this with another hardware workaround:
3688 * transcoder select bit can only be cleared while the
3689 * port is enabled.
3690 */
3691 DP &= ~DP_PIPEB_SELECT;
3692 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3693 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3694 }
3695
832afda6 3696 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
3697 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3698 POSTING_READ(intel_dp->output_reg);
f01eca2e 3699 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3700}
3701
26d61aad
KP
3702static bool
3703intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3704{
a031d709
RV
3705 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3706 struct drm_device *dev = dig_port->base.base.dev;
3707 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3708 uint8_t rev;
a031d709 3709
9d1a1031
JN
3710 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3711 sizeof(intel_dp->dpcd)) < 0)
edb39244 3712 return false; /* aux transfer failed */
92fd8fd1 3713
a8e98153 3714 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3715
edb39244
AJ
3716 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3717 return false; /* DPCD not present */
3718
2293bb5c
SK
3719 /* Check if the panel supports PSR */
3720 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3721 if (is_edp(intel_dp)) {
9d1a1031
JN
3722 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3723 intel_dp->psr_dpcd,
3724 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3725 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3726 dev_priv->psr.sink_support = true;
50003939 3727 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3728 }
50003939
JN
3729 }
3730
7809a611 3731 /* Training Pattern 3 support, both source and sink */
06ea66b6 3732 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3733 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3734 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3735 intel_dp->use_tps3 = true;
f8d8a672 3736 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3737 } else
3738 intel_dp->use_tps3 = false;
3739
fc0f8e25
SJ
3740 /* Intermediate frequency support */
3741 if (is_edp(intel_dp) &&
3742 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3743 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3744 (rev >= 0x03)) { /* eDp v1.4 or higher */
ea2d8a42
VS
3745 __le16 supported_rates[DP_MAX_SUPPORTED_RATES];
3746 int i;
3747
fc0f8e25
SJ
3748 intel_dp_dpcd_read_wake(&intel_dp->aux,
3749 DP_SUPPORTED_LINK_RATES,
ea2d8a42
VS
3750 supported_rates,
3751 sizeof(supported_rates));
3752
3753 for (i = 0; i < ARRAY_SIZE(supported_rates); i++) {
3754 int val = le16_to_cpu(supported_rates[i]);
3755
3756 if (val == 0)
3757 break;
3758
3759 intel_dp->supported_rates[i] = val * 200;
3760 }
3761 intel_dp->num_supported_rates = i;
fc0f8e25 3762 }
edb39244
AJ
3763 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3764 DP_DWN_STRM_PORT_PRESENT))
3765 return true; /* native DP sink */
3766
3767 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3768 return true; /* no per-port downstream info */
3769
9d1a1031
JN
3770 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3771 intel_dp->downstream_ports,
3772 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3773 return false; /* downstream port status fetch failed */
3774
3775 return true;
92fd8fd1
KP
3776}
3777
0d198328
AJ
3778static void
3779intel_dp_probe_oui(struct intel_dp *intel_dp)
3780{
3781 u8 buf[3];
3782
3783 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3784 return;
3785
9d1a1031 3786 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3787 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3788 buf[0], buf[1], buf[2]);
3789
9d1a1031 3790 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3791 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3792 buf[0], buf[1], buf[2]);
3793}
3794
0e32b39c
DA
3795static bool
3796intel_dp_probe_mst(struct intel_dp *intel_dp)
3797{
3798 u8 buf[1];
3799
3800 if (!intel_dp->can_mst)
3801 return false;
3802
3803 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3804 return false;
3805
0e32b39c
DA
3806 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3807 if (buf[0] & DP_MST_CAP) {
3808 DRM_DEBUG_KMS("Sink is MST capable\n");
3809 intel_dp->is_mst = true;
3810 } else {
3811 DRM_DEBUG_KMS("Sink is not MST capable\n");
3812 intel_dp->is_mst = false;
3813 }
3814 }
0e32b39c
DA
3815
3816 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3817 return intel_dp->is_mst;
3818}
3819
d2e216d0
RV
3820int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3821{
3822 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3823 struct drm_device *dev = intel_dig_port->base.base.dev;
3824 struct intel_crtc *intel_crtc =
3825 to_intel_crtc(intel_dig_port->base.base.crtc);
ad9dc91b
RV
3826 u8 buf;
3827 int test_crc_count;
3828 int attempts = 6;
d2e216d0 3829
ad9dc91b 3830 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3831 return -EIO;
d2e216d0 3832
ad9dc91b 3833 if (!(buf & DP_TEST_CRC_SUPPORTED))
d2e216d0
RV
3834 return -ENOTTY;
3835
1dda5f93
RV
3836 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3837 return -EIO;
3838
9d1a1031 3839 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
ce31d9f4 3840 buf | DP_TEST_SINK_START) < 0)
bda0381e 3841 return -EIO;
d2e216d0 3842
1dda5f93 3843 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3844 return -EIO;
ad9dc91b 3845 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 3846
ad9dc91b 3847 do {
1dda5f93
RV
3848 if (drm_dp_dpcd_readb(&intel_dp->aux,
3849 DP_TEST_SINK_MISC, &buf) < 0)
3850 return -EIO;
ad9dc91b
RV
3851 intel_wait_for_vblank(dev, intel_crtc->pipe);
3852 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3853
3854 if (attempts == 0) {
90bd1f46
DV
3855 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3856 return -ETIMEDOUT;
ad9dc91b 3857 }
d2e216d0 3858
9d1a1031 3859 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
bda0381e 3860 return -EIO;
d2e216d0 3861
1dda5f93
RV
3862 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3863 return -EIO;
3864 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3865 buf & ~DP_TEST_SINK_START) < 0)
3866 return -EIO;
ce31d9f4 3867
d2e216d0
RV
3868 return 0;
3869}
3870
a60f0e38
JB
3871static bool
3872intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3873{
9d1a1031
JN
3874 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3875 DP_DEVICE_SERVICE_IRQ_VECTOR,
3876 sink_irq_vector, 1) == 1;
a60f0e38
JB
3877}
3878
0e32b39c
DA
3879static bool
3880intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3881{
3882 int ret;
3883
3884 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3885 DP_SINK_COUNT_ESI,
3886 sink_irq_vector, 14);
3887 if (ret != 14)
3888 return false;
3889
3890 return true;
3891}
3892
a60f0e38
JB
3893static void
3894intel_dp_handle_test_request(struct intel_dp *intel_dp)
3895{
3896 /* NAK by default */
9d1a1031 3897 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
a60f0e38
JB
3898}
3899
0e32b39c
DA
3900static int
3901intel_dp_check_mst_status(struct intel_dp *intel_dp)
3902{
3903 bool bret;
3904
3905 if (intel_dp->is_mst) {
3906 u8 esi[16] = { 0 };
3907 int ret = 0;
3908 int retry;
3909 bool handled;
3910 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3911go_again:
3912 if (bret == true) {
3913
3914 /* check link status - esi[10] = 0x200c */
3915 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3916 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3917 intel_dp_start_link_train(intel_dp);
3918 intel_dp_complete_link_train(intel_dp);
3919 intel_dp_stop_link_train(intel_dp);
3920 }
3921
6f34cc39 3922 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
3923 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3924
3925 if (handled) {
3926 for (retry = 0; retry < 3; retry++) {
3927 int wret;
3928 wret = drm_dp_dpcd_write(&intel_dp->aux,
3929 DP_SINK_COUNT_ESI+1,
3930 &esi[1], 3);
3931 if (wret == 3) {
3932 break;
3933 }
3934 }
3935
3936 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3937 if (bret == true) {
6f34cc39 3938 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
3939 goto go_again;
3940 }
3941 } else
3942 ret = 0;
3943
3944 return ret;
3945 } else {
3946 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3947 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3948 intel_dp->is_mst = false;
3949 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3950 /* send a hotplug event */
3951 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3952 }
3953 }
3954 return -EINVAL;
3955}
3956
a4fc5ed6
KP
3957/*
3958 * According to DP spec
3959 * 5.1.2:
3960 * 1. Read DPCD
3961 * 2. Configure link according to Receiver Capabilities
3962 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
3963 * 4. Check link status on receipt of hot-plug interrupt
3964 */
a5146200 3965static void
ea5b213a 3966intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 3967{
5b215bcf 3968 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 3969 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 3970 u8 sink_irq_vector;
93f62dad 3971 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 3972
5b215bcf
DA
3973 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3974
da63a9f2 3975 if (!intel_encoder->connectors_active)
d2b996ac 3976 return;
59cd09e1 3977
da63a9f2 3978 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
3979 return;
3980
1a125d8a
ID
3981 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3982 return;
3983
92fd8fd1 3984 /* Try to read receiver status if the link appears to be up */
93f62dad 3985 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
3986 return;
3987 }
3988
92fd8fd1 3989 /* Now read the DPCD to see if it's actually running */
26d61aad 3990 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
3991 return;
3992 }
3993
a60f0e38
JB
3994 /* Try to read the source of the interrupt */
3995 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
3996 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
3997 /* Clear interrupt source */
9d1a1031
JN
3998 drm_dp_dpcd_writeb(&intel_dp->aux,
3999 DP_DEVICE_SERVICE_IRQ_VECTOR,
4000 sink_irq_vector);
a60f0e38
JB
4001
4002 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4003 intel_dp_handle_test_request(intel_dp);
4004 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4005 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4006 }
4007
1ffdff13 4008 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4009 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4010 intel_encoder->base.name);
33a34e4e
JB
4011 intel_dp_start_link_train(intel_dp);
4012 intel_dp_complete_link_train(intel_dp);
3ab9c637 4013 intel_dp_stop_link_train(intel_dp);
33a34e4e 4014 }
a4fc5ed6 4015}
a4fc5ed6 4016
caf9ab24 4017/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4018static enum drm_connector_status
26d61aad 4019intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4020{
caf9ab24 4021 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4022 uint8_t type;
4023
4024 if (!intel_dp_get_dpcd(intel_dp))
4025 return connector_status_disconnected;
4026
4027 /* if there's no downstream port, we're done */
4028 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4029 return connector_status_connected;
caf9ab24
AJ
4030
4031 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4032 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4033 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4034 uint8_t reg;
9d1a1031
JN
4035
4036 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4037 &reg, 1) < 0)
caf9ab24 4038 return connector_status_unknown;
9d1a1031 4039
23235177
AJ
4040 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4041 : connector_status_disconnected;
caf9ab24
AJ
4042 }
4043
4044 /* If no HPD, poke DDC gently */
0b99836f 4045 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4046 return connector_status_connected;
caf9ab24
AJ
4047
4048 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4049 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4050 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4051 if (type == DP_DS_PORT_TYPE_VGA ||
4052 type == DP_DS_PORT_TYPE_NON_EDID)
4053 return connector_status_unknown;
4054 } else {
4055 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4056 DP_DWN_STRM_PORT_TYPE_MASK;
4057 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4058 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4059 return connector_status_unknown;
4060 }
caf9ab24
AJ
4061
4062 /* Anything else is out of spec, warn and ignore */
4063 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4064 return connector_status_disconnected;
71ba9000
AJ
4065}
4066
d410b56d
CW
4067static enum drm_connector_status
4068edp_detect(struct intel_dp *intel_dp)
4069{
4070 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4071 enum drm_connector_status status;
4072
4073 status = intel_panel_detect(dev);
4074 if (status == connector_status_unknown)
4075 status = connector_status_connected;
4076
4077 return status;
4078}
4079
5eb08b69 4080static enum drm_connector_status
a9756bb5 4081ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4082{
30add22d 4083 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4084 struct drm_i915_private *dev_priv = dev->dev_private;
4085 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4086
1b469639
DL
4087 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4088 return connector_status_disconnected;
4089
26d61aad 4090 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4091}
4092
2a592bec
DA
4093static int g4x_digital_port_connected(struct drm_device *dev,
4094 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4095{
a4fc5ed6 4096 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4097 uint32_t bit;
5eb08b69 4098
232a6ee9
TP
4099 if (IS_VALLEYVIEW(dev)) {
4100 switch (intel_dig_port->port) {
4101 case PORT_B:
4102 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4103 break;
4104 case PORT_C:
4105 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4106 break;
4107 case PORT_D:
4108 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4109 break;
4110 default:
2a592bec 4111 return -EINVAL;
232a6ee9
TP
4112 }
4113 } else {
4114 switch (intel_dig_port->port) {
4115 case PORT_B:
4116 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4117 break;
4118 case PORT_C:
4119 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4120 break;
4121 case PORT_D:
4122 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4123 break;
4124 default:
2a592bec 4125 return -EINVAL;
232a6ee9 4126 }
a4fc5ed6
KP
4127 }
4128
10f76a38 4129 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4130 return 0;
4131 return 1;
4132}
4133
4134static enum drm_connector_status
4135g4x_dp_detect(struct intel_dp *intel_dp)
4136{
4137 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4138 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4139 int ret;
4140
4141 /* Can't disconnect eDP, but you can close the lid... */
4142 if (is_edp(intel_dp)) {
4143 enum drm_connector_status status;
4144
4145 status = intel_panel_detect(dev);
4146 if (status == connector_status_unknown)
4147 status = connector_status_connected;
4148 return status;
4149 }
4150
4151 ret = g4x_digital_port_connected(dev, intel_dig_port);
4152 if (ret == -EINVAL)
4153 return connector_status_unknown;
4154 else if (ret == 0)
a4fc5ed6
KP
4155 return connector_status_disconnected;
4156
26d61aad 4157 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4158}
4159
8c241fef 4160static struct edid *
beb60608 4161intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4162{
beb60608 4163 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4164
9cd300e0
JN
4165 /* use cached edid if we have one */
4166 if (intel_connector->edid) {
9cd300e0
JN
4167 /* invalid edid */
4168 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4169 return NULL;
4170
55e9edeb 4171 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4172 } else
4173 return drm_get_edid(&intel_connector->base,
4174 &intel_dp->aux.ddc);
4175}
8c241fef 4176
beb60608
CW
4177static void
4178intel_dp_set_edid(struct intel_dp *intel_dp)
4179{
4180 struct intel_connector *intel_connector = intel_dp->attached_connector;
4181 struct edid *edid;
8c241fef 4182
beb60608
CW
4183 edid = intel_dp_get_edid(intel_dp);
4184 intel_connector->detect_edid = edid;
4185
4186 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4187 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4188 else
4189 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4190}
4191
beb60608
CW
4192static void
4193intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4194{
beb60608 4195 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4196
beb60608
CW
4197 kfree(intel_connector->detect_edid);
4198 intel_connector->detect_edid = NULL;
9cd300e0 4199
beb60608
CW
4200 intel_dp->has_audio = false;
4201}
d6f24d0f 4202
beb60608
CW
4203static enum intel_display_power_domain
4204intel_dp_power_get(struct intel_dp *dp)
4205{
4206 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4207 enum intel_display_power_domain power_domain;
4208
4209 power_domain = intel_display_port_power_domain(encoder);
4210 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4211
4212 return power_domain;
4213}
d6f24d0f 4214
beb60608
CW
4215static void
4216intel_dp_power_put(struct intel_dp *dp,
4217 enum intel_display_power_domain power_domain)
4218{
4219 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4220 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4221}
4222
a9756bb5
ZW
4223static enum drm_connector_status
4224intel_dp_detect(struct drm_connector *connector, bool force)
4225{
4226 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4227 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4228 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4229 struct drm_device *dev = connector->dev;
a9756bb5 4230 enum drm_connector_status status;
671dedd2 4231 enum intel_display_power_domain power_domain;
0e32b39c 4232 bool ret;
a9756bb5 4233
164c8598 4234 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4235 connector->base.id, connector->name);
beb60608 4236 intel_dp_unset_edid(intel_dp);
164c8598 4237
0e32b39c
DA
4238 if (intel_dp->is_mst) {
4239 /* MST devices are disconnected from a monitor POV */
4240 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4241 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4242 return connector_status_disconnected;
0e32b39c
DA
4243 }
4244
beb60608 4245 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4246
d410b56d
CW
4247 /* Can't disconnect eDP, but you can close the lid... */
4248 if (is_edp(intel_dp))
4249 status = edp_detect(intel_dp);
4250 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4251 status = ironlake_dp_detect(intel_dp);
4252 else
4253 status = g4x_dp_detect(intel_dp);
4254 if (status != connector_status_connected)
c8c8fb33 4255 goto out;
a9756bb5 4256
0d198328
AJ
4257 intel_dp_probe_oui(intel_dp);
4258
0e32b39c
DA
4259 ret = intel_dp_probe_mst(intel_dp);
4260 if (ret) {
4261 /* if we are in MST mode then this connector
4262 won't appear connected or have anything with EDID on it */
4263 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4264 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4265 status = connector_status_disconnected;
4266 goto out;
4267 }
4268
beb60608 4269 intel_dp_set_edid(intel_dp);
a9756bb5 4270
d63885da
PZ
4271 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4272 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4273 status = connector_status_connected;
4274
4275out:
beb60608 4276 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4277 return status;
a4fc5ed6
KP
4278}
4279
beb60608
CW
4280static void
4281intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4282{
df0e9248 4283 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4284 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4285 enum intel_display_power_domain power_domain;
a4fc5ed6 4286
beb60608
CW
4287 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4288 connector->base.id, connector->name);
4289 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4290
beb60608
CW
4291 if (connector->status != connector_status_connected)
4292 return;
671dedd2 4293
beb60608
CW
4294 power_domain = intel_dp_power_get(intel_dp);
4295
4296 intel_dp_set_edid(intel_dp);
4297
4298 intel_dp_power_put(intel_dp, power_domain);
4299
4300 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4301 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4302}
4303
4304static int intel_dp_get_modes(struct drm_connector *connector)
4305{
4306 struct intel_connector *intel_connector = to_intel_connector(connector);
4307 struct edid *edid;
4308
4309 edid = intel_connector->detect_edid;
4310 if (edid) {
4311 int ret = intel_connector_update_modes(connector, edid);
4312 if (ret)
4313 return ret;
4314 }
32f9d658 4315
f8779fda 4316 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4317 if (is_edp(intel_attached_dp(connector)) &&
4318 intel_connector->panel.fixed_mode) {
f8779fda 4319 struct drm_display_mode *mode;
beb60608
CW
4320
4321 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4322 intel_connector->panel.fixed_mode);
f8779fda 4323 if (mode) {
32f9d658
ZW
4324 drm_mode_probed_add(connector, mode);
4325 return 1;
4326 }
4327 }
beb60608 4328
32f9d658 4329 return 0;
a4fc5ed6
KP
4330}
4331
1aad7ac0
CW
4332static bool
4333intel_dp_detect_audio(struct drm_connector *connector)
4334{
1aad7ac0 4335 bool has_audio = false;
beb60608 4336 struct edid *edid;
1aad7ac0 4337
beb60608
CW
4338 edid = to_intel_connector(connector)->detect_edid;
4339 if (edid)
1aad7ac0 4340 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4341
1aad7ac0
CW
4342 return has_audio;
4343}
4344
f684960e
CW
4345static int
4346intel_dp_set_property(struct drm_connector *connector,
4347 struct drm_property *property,
4348 uint64_t val)
4349{
e953fd7b 4350 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4351 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4352 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4353 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4354 int ret;
4355
662595df 4356 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4357 if (ret)
4358 return ret;
4359
3f43c48d 4360 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4361 int i = val;
4362 bool has_audio;
4363
4364 if (i == intel_dp->force_audio)
f684960e
CW
4365 return 0;
4366
1aad7ac0 4367 intel_dp->force_audio = i;
f684960e 4368
c3e5f67b 4369 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4370 has_audio = intel_dp_detect_audio(connector);
4371 else
c3e5f67b 4372 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4373
4374 if (has_audio == intel_dp->has_audio)
f684960e
CW
4375 return 0;
4376
1aad7ac0 4377 intel_dp->has_audio = has_audio;
f684960e
CW
4378 goto done;
4379 }
4380
e953fd7b 4381 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4382 bool old_auto = intel_dp->color_range_auto;
4383 uint32_t old_range = intel_dp->color_range;
4384
55bc60db
VS
4385 switch (val) {
4386 case INTEL_BROADCAST_RGB_AUTO:
4387 intel_dp->color_range_auto = true;
4388 break;
4389 case INTEL_BROADCAST_RGB_FULL:
4390 intel_dp->color_range_auto = false;
4391 intel_dp->color_range = 0;
4392 break;
4393 case INTEL_BROADCAST_RGB_LIMITED:
4394 intel_dp->color_range_auto = false;
4395 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4396 break;
4397 default:
4398 return -EINVAL;
4399 }
ae4edb80
DV
4400
4401 if (old_auto == intel_dp->color_range_auto &&
4402 old_range == intel_dp->color_range)
4403 return 0;
4404
e953fd7b
CW
4405 goto done;
4406 }
4407
53b41837
YN
4408 if (is_edp(intel_dp) &&
4409 property == connector->dev->mode_config.scaling_mode_property) {
4410 if (val == DRM_MODE_SCALE_NONE) {
4411 DRM_DEBUG_KMS("no scaling not supported\n");
4412 return -EINVAL;
4413 }
4414
4415 if (intel_connector->panel.fitting_mode == val) {
4416 /* the eDP scaling property is not changed */
4417 return 0;
4418 }
4419 intel_connector->panel.fitting_mode = val;
4420
4421 goto done;
4422 }
4423
f684960e
CW
4424 return -EINVAL;
4425
4426done:
c0c36b94
CW
4427 if (intel_encoder->base.crtc)
4428 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4429
4430 return 0;
4431}
4432
a4fc5ed6 4433static void
73845adf 4434intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4435{
1d508706 4436 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4437
10e972d3 4438 kfree(intel_connector->detect_edid);
beb60608 4439
9cd300e0
JN
4440 if (!IS_ERR_OR_NULL(intel_connector->edid))
4441 kfree(intel_connector->edid);
4442
acd8db10
PZ
4443 /* Can't call is_edp() since the encoder may have been destroyed
4444 * already. */
4445 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4446 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4447
a4fc5ed6 4448 drm_connector_cleanup(connector);
55f78c43 4449 kfree(connector);
a4fc5ed6
KP
4450}
4451
00c09d70 4452void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4453{
da63a9f2
PZ
4454 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4455 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4456
4f71d0cb 4457 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4458 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4459 if (is_edp(intel_dp)) {
4460 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4461 /*
4462 * vdd might still be enabled do to the delayed vdd off.
4463 * Make sure vdd is actually turned off here.
4464 */
773538e8 4465 pps_lock(intel_dp);
4be73780 4466 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4467 pps_unlock(intel_dp);
4468
01527b31
CT
4469 if (intel_dp->edp_notifier.notifier_call) {
4470 unregister_reboot_notifier(&intel_dp->edp_notifier);
4471 intel_dp->edp_notifier.notifier_call = NULL;
4472 }
bd943159 4473 }
c8bd0e49 4474 drm_encoder_cleanup(encoder);
da63a9f2 4475 kfree(intel_dig_port);
24d05927
DV
4476}
4477
07f9cd0b
ID
4478static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4479{
4480 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4481
4482 if (!is_edp(intel_dp))
4483 return;
4484
951468f3
VS
4485 /*
4486 * vdd might still be enabled do to the delayed vdd off.
4487 * Make sure vdd is actually turned off here.
4488 */
afa4e53a 4489 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4490 pps_lock(intel_dp);
07f9cd0b 4491 edp_panel_vdd_off_sync(intel_dp);
773538e8 4492 pps_unlock(intel_dp);
07f9cd0b
ID
4493}
4494
49e6bc51
VS
4495static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4496{
4497 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4498 struct drm_device *dev = intel_dig_port->base.base.dev;
4499 struct drm_i915_private *dev_priv = dev->dev_private;
4500 enum intel_display_power_domain power_domain;
4501
4502 lockdep_assert_held(&dev_priv->pps_mutex);
4503
4504 if (!edp_have_panel_vdd(intel_dp))
4505 return;
4506
4507 /*
4508 * The VDD bit needs a power domain reference, so if the bit is
4509 * already enabled when we boot or resume, grab this reference and
4510 * schedule a vdd off, so we don't hold on to the reference
4511 * indefinitely.
4512 */
4513 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4514 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4515 intel_display_power_get(dev_priv, power_domain);
4516
4517 edp_panel_vdd_schedule_off(intel_dp);
4518}
4519
6d93c0c4
ID
4520static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4521{
49e6bc51
VS
4522 struct intel_dp *intel_dp;
4523
4524 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4525 return;
4526
4527 intel_dp = enc_to_intel_dp(encoder);
4528
4529 pps_lock(intel_dp);
4530
4531 /*
4532 * Read out the current power sequencer assignment,
4533 * in case the BIOS did something with it.
4534 */
4535 if (IS_VALLEYVIEW(encoder->dev))
4536 vlv_initial_power_sequencer_setup(intel_dp);
4537
4538 intel_edp_panel_vdd_sanitize(intel_dp);
4539
4540 pps_unlock(intel_dp);
6d93c0c4
ID
4541}
4542
a4fc5ed6 4543static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 4544 .dpms = intel_connector_dpms,
a4fc5ed6 4545 .detect = intel_dp_detect,
beb60608 4546 .force = intel_dp_force,
a4fc5ed6 4547 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4548 .set_property = intel_dp_set_property,
2545e4a6 4549 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4550 .destroy = intel_dp_connector_destroy,
c6f95f27 4551 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
a4fc5ed6
KP
4552};
4553
4554static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4555 .get_modes = intel_dp_get_modes,
4556 .mode_valid = intel_dp_mode_valid,
df0e9248 4557 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4558};
4559
a4fc5ed6 4560static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4561 .reset = intel_dp_encoder_reset,
24d05927 4562 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4563};
4564
0e32b39c 4565void
21d40d37 4566intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 4567{
0e32b39c 4568 return;
c8110e52 4569}
6207937d 4570
b2c5c181 4571enum irqreturn
13cf5504
DA
4572intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4573{
4574 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4575 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4576 struct drm_device *dev = intel_dig_port->base.base.dev;
4577 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4578 enum intel_display_power_domain power_domain;
b2c5c181 4579 enum irqreturn ret = IRQ_NONE;
1c767b33 4580
0e32b39c
DA
4581 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4582 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4583
7a7f84cc
VS
4584 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4585 /*
4586 * vdd off can generate a long pulse on eDP which
4587 * would require vdd on to handle it, and thus we
4588 * would end up in an endless cycle of
4589 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4590 */
4591 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4592 port_name(intel_dig_port->port));
a8b3d52f 4593 return IRQ_HANDLED;
7a7f84cc
VS
4594 }
4595
26fbb774
VS
4596 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4597 port_name(intel_dig_port->port),
0e32b39c 4598 long_hpd ? "long" : "short");
13cf5504 4599
1c767b33
ID
4600 power_domain = intel_display_port_power_domain(intel_encoder);
4601 intel_display_power_get(dev_priv, power_domain);
4602
0e32b39c 4603 if (long_hpd) {
2a592bec
DA
4604
4605 if (HAS_PCH_SPLIT(dev)) {
4606 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4607 goto mst_fail;
4608 } else {
4609 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4610 goto mst_fail;
4611 }
0e32b39c
DA
4612
4613 if (!intel_dp_get_dpcd(intel_dp)) {
4614 goto mst_fail;
4615 }
4616
4617 intel_dp_probe_oui(intel_dp);
4618
4619 if (!intel_dp_probe_mst(intel_dp))
4620 goto mst_fail;
4621
4622 } else {
4623 if (intel_dp->is_mst) {
1c767b33 4624 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4625 goto mst_fail;
4626 }
4627
4628 if (!intel_dp->is_mst) {
4629 /*
4630 * we'll check the link status via the normal hot plug path later -
4631 * but for short hpds we should check it now
4632 */
5b215bcf 4633 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4634 intel_dp_check_link_status(intel_dp);
5b215bcf 4635 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4636 }
4637 }
b2c5c181
DV
4638
4639 ret = IRQ_HANDLED;
4640
1c767b33 4641 goto put_power;
0e32b39c
DA
4642mst_fail:
4643 /* if we were in MST mode, and device is not there get out of MST mode */
4644 if (intel_dp->is_mst) {
4645 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4646 intel_dp->is_mst = false;
4647 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4648 }
1c767b33
ID
4649put_power:
4650 intel_display_power_put(dev_priv, power_domain);
4651
4652 return ret;
13cf5504
DA
4653}
4654
e3421a18
ZW
4655/* Return which DP Port should be selected for Transcoder DP control */
4656int
0206e353 4657intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
4658{
4659 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
4660 struct intel_encoder *intel_encoder;
4661 struct intel_dp *intel_dp;
e3421a18 4662
fa90ecef
PZ
4663 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4664 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 4665
fa90ecef
PZ
4666 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4667 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 4668 return intel_dp->output_reg;
e3421a18 4669 }
ea5b213a 4670
e3421a18
ZW
4671 return -1;
4672}
4673
36e83a18 4674/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 4675bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
4676{
4677 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 4678 union child_device_config *p_child;
36e83a18 4679 int i;
5d8a7752
VS
4680 static const short port_mapping[] = {
4681 [PORT_B] = PORT_IDPB,
4682 [PORT_C] = PORT_IDPC,
4683 [PORT_D] = PORT_IDPD,
4684 };
36e83a18 4685
3b32a35b
VS
4686 if (port == PORT_A)
4687 return true;
4688
41aa3448 4689 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
4690 return false;
4691
41aa3448
RV
4692 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4693 p_child = dev_priv->vbt.child_dev + i;
36e83a18 4694
5d8a7752 4695 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
4696 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4697 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
4698 return true;
4699 }
4700 return false;
4701}
4702
0e32b39c 4703void
f684960e
CW
4704intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4705{
53b41837
YN
4706 struct intel_connector *intel_connector = to_intel_connector(connector);
4707
3f43c48d 4708 intel_attach_force_audio_property(connector);
e953fd7b 4709 intel_attach_broadcast_rgb_property(connector);
55bc60db 4710 intel_dp->color_range_auto = true;
53b41837
YN
4711
4712 if (is_edp(intel_dp)) {
4713 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
4714 drm_object_attach_property(
4715 &connector->base,
53b41837 4716 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
4717 DRM_MODE_SCALE_ASPECT);
4718 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 4719 }
f684960e
CW
4720}
4721
dada1a9f
ID
4722static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4723{
4724 intel_dp->last_power_cycle = jiffies;
4725 intel_dp->last_power_on = jiffies;
4726 intel_dp->last_backlight_off = jiffies;
4727}
4728
67a54566
DV
4729static void
4730intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 4731 struct intel_dp *intel_dp)
67a54566
DV
4732{
4733 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
4734 struct edp_power_seq cur, vbt, spec,
4735 *final = &intel_dp->pps_delays;
67a54566 4736 u32 pp_on, pp_off, pp_div, pp;
bf13e81b 4737 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 4738
e39b999a
VS
4739 lockdep_assert_held(&dev_priv->pps_mutex);
4740
81ddbc69
VS
4741 /* already initialized? */
4742 if (final->t11_t12 != 0)
4743 return;
4744
453c5420 4745 if (HAS_PCH_SPLIT(dev)) {
bf13e81b 4746 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
4747 pp_on_reg = PCH_PP_ON_DELAYS;
4748 pp_off_reg = PCH_PP_OFF_DELAYS;
4749 pp_div_reg = PCH_PP_DIVISOR;
4750 } else {
bf13e81b
JN
4751 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4752
4753 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4754 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4755 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4756 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 4757 }
67a54566
DV
4758
4759 /* Workaround: Need to write PP_CONTROL with the unlock key as
4760 * the very first thing. */
453c5420 4761 pp = ironlake_get_pp_control(intel_dp);
bf13e81b 4762 I915_WRITE(pp_ctrl_reg, pp);
67a54566 4763
453c5420
JB
4764 pp_on = I915_READ(pp_on_reg);
4765 pp_off = I915_READ(pp_off_reg);
4766 pp_div = I915_READ(pp_div_reg);
67a54566
DV
4767
4768 /* Pull timing values out of registers */
4769 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4770 PANEL_POWER_UP_DELAY_SHIFT;
4771
4772 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4773 PANEL_LIGHT_ON_DELAY_SHIFT;
4774
4775 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4776 PANEL_LIGHT_OFF_DELAY_SHIFT;
4777
4778 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4779 PANEL_POWER_DOWN_DELAY_SHIFT;
4780
4781 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4782 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4783
4784 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4785 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4786
41aa3448 4787 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
4788
4789 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4790 * our hw here, which are all in 100usec. */
4791 spec.t1_t3 = 210 * 10;
4792 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4793 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4794 spec.t10 = 500 * 10;
4795 /* This one is special and actually in units of 100ms, but zero
4796 * based in the hw (so we need to add 100 ms). But the sw vbt
4797 * table multiplies it with 1000 to make it in units of 100usec,
4798 * too. */
4799 spec.t11_t12 = (510 + 100) * 10;
4800
4801 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4802 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4803
4804 /* Use the max of the register settings and vbt. If both are
4805 * unset, fall back to the spec limits. */
36b5f425 4806#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
4807 spec.field : \
4808 max(cur.field, vbt.field))
4809 assign_final(t1_t3);
4810 assign_final(t8);
4811 assign_final(t9);
4812 assign_final(t10);
4813 assign_final(t11_t12);
4814#undef assign_final
4815
36b5f425 4816#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
4817 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4818 intel_dp->backlight_on_delay = get_delay(t8);
4819 intel_dp->backlight_off_delay = get_delay(t9);
4820 intel_dp->panel_power_down_delay = get_delay(t10);
4821 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4822#undef get_delay
4823
f30d26e4
JN
4824 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4825 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4826 intel_dp->panel_power_cycle_delay);
4827
4828 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4829 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
4830}
4831
4832static void
4833intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 4834 struct intel_dp *intel_dp)
f30d26e4
JN
4835{
4836 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
4837 u32 pp_on, pp_off, pp_div, port_sel = 0;
4838 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4839 int pp_on_reg, pp_off_reg, pp_div_reg;
ad933b56 4840 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 4841 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 4842
e39b999a 4843 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420
JB
4844
4845 if (HAS_PCH_SPLIT(dev)) {
4846 pp_on_reg = PCH_PP_ON_DELAYS;
4847 pp_off_reg = PCH_PP_OFF_DELAYS;
4848 pp_div_reg = PCH_PP_DIVISOR;
4849 } else {
bf13e81b
JN
4850 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4851
4852 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4853 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4854 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
4855 }
4856
b2f19d1a
PZ
4857 /*
4858 * And finally store the new values in the power sequencer. The
4859 * backlight delays are set to 1 because we do manual waits on them. For
4860 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4861 * we'll end up waiting for the backlight off delay twice: once when we
4862 * do the manual sleep, and once when we disable the panel and wait for
4863 * the PP_STATUS bit to become zero.
4864 */
f30d26e4 4865 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
4866 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4867 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 4868 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
4869 /* Compute the divisor for the pp clock, simply match the Bspec
4870 * formula. */
453c5420 4871 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 4872 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
4873 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4874
4875 /* Haswell doesn't have any port selection bits for the panel
4876 * power sequencer any more. */
bc7d38a4 4877 if (IS_VALLEYVIEW(dev)) {
ad933b56 4878 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 4879 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 4880 if (port == PORT_A)
a24c144c 4881 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 4882 else
a24c144c 4883 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
4884 }
4885
453c5420
JB
4886 pp_on |= port_sel;
4887
4888 I915_WRITE(pp_on_reg, pp_on);
4889 I915_WRITE(pp_off_reg, pp_off);
4890 I915_WRITE(pp_div_reg, pp_div);
67a54566 4891
67a54566 4892 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
4893 I915_READ(pp_on_reg),
4894 I915_READ(pp_off_reg),
4895 I915_READ(pp_div_reg));
f684960e
CW
4896}
4897
b33a2815
VK
4898/**
4899 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4900 * @dev: DRM device
4901 * @refresh_rate: RR to be programmed
4902 *
4903 * This function gets called when refresh rate (RR) has to be changed from
4904 * one frequency to another. Switches can be between high and low RR
4905 * supported by the panel or to any other RR based on media playback (in
4906 * this case, RR value needs to be passed from user space).
4907 *
4908 * The caller of this function needs to take a lock on dev_priv->drrs.
4909 */
96178eeb 4910static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
4911{
4912 struct drm_i915_private *dev_priv = dev->dev_private;
4913 struct intel_encoder *encoder;
96178eeb
VK
4914 struct intel_digital_port *dig_port = NULL;
4915 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 4916 struct intel_crtc_state *config = NULL;
439d7ac0 4917 struct intel_crtc *intel_crtc = NULL;
439d7ac0 4918 u32 reg, val;
96178eeb 4919 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
4920
4921 if (refresh_rate <= 0) {
4922 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4923 return;
4924 }
4925
96178eeb
VK
4926 if (intel_dp == NULL) {
4927 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
4928 return;
4929 }
4930
1fcc9d1c 4931 /*
e4d59f6b
RV
4932 * FIXME: This needs proper synchronization with psr state for some
4933 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 4934 */
439d7ac0 4935
96178eeb
VK
4936 dig_port = dp_to_dig_port(intel_dp);
4937 encoder = &dig_port->base;
439d7ac0
PB
4938 intel_crtc = encoder->new_crtc;
4939
4940 if (!intel_crtc) {
4941 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4942 return;
4943 }
4944
6e3c9717 4945 config = intel_crtc->config;
439d7ac0 4946
96178eeb 4947 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
4948 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4949 return;
4950 }
4951
96178eeb
VK
4952 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
4953 refresh_rate)
439d7ac0
PB
4954 index = DRRS_LOW_RR;
4955
96178eeb 4956 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
4957 DRM_DEBUG_KMS(
4958 "DRRS requested for previously set RR...ignoring\n");
4959 return;
4960 }
4961
4962 if (!intel_crtc->active) {
4963 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4964 return;
4965 }
4966
44395bfe 4967 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
4968 switch (index) {
4969 case DRRS_HIGH_RR:
4970 intel_dp_set_m_n(intel_crtc, M1_N1);
4971 break;
4972 case DRRS_LOW_RR:
4973 intel_dp_set_m_n(intel_crtc, M2_N2);
4974 break;
4975 case DRRS_MAX_RR:
4976 default:
4977 DRM_ERROR("Unsupported refreshrate type\n");
4978 }
4979 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 4980 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 4981 val = I915_READ(reg);
a4c30b1d 4982
439d7ac0 4983 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
4984 if (IS_VALLEYVIEW(dev))
4985 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4986 else
4987 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 4988 } else {
6fa7aec1
VK
4989 if (IS_VALLEYVIEW(dev))
4990 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4991 else
4992 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
4993 }
4994 I915_WRITE(reg, val);
4995 }
4996
4e9ac947
VK
4997 dev_priv->drrs.refresh_rate_type = index;
4998
4999 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5000}
5001
b33a2815
VK
5002/**
5003 * intel_edp_drrs_enable - init drrs struct if supported
5004 * @intel_dp: DP struct
5005 *
5006 * Initializes frontbuffer_bits and drrs.dp
5007 */
c395578e
VK
5008void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5009{
5010 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5011 struct drm_i915_private *dev_priv = dev->dev_private;
5012 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5013 struct drm_crtc *crtc = dig_port->base.base.crtc;
5014 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5015
5016 if (!intel_crtc->config->has_drrs) {
5017 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5018 return;
5019 }
5020
5021 mutex_lock(&dev_priv->drrs.mutex);
5022 if (WARN_ON(dev_priv->drrs.dp)) {
5023 DRM_ERROR("DRRS already enabled\n");
5024 goto unlock;
5025 }
5026
5027 dev_priv->drrs.busy_frontbuffer_bits = 0;
5028
5029 dev_priv->drrs.dp = intel_dp;
5030
5031unlock:
5032 mutex_unlock(&dev_priv->drrs.mutex);
5033}
5034
b33a2815
VK
5035/**
5036 * intel_edp_drrs_disable - Disable DRRS
5037 * @intel_dp: DP struct
5038 *
5039 */
c395578e
VK
5040void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5041{
5042 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5043 struct drm_i915_private *dev_priv = dev->dev_private;
5044 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5045 struct drm_crtc *crtc = dig_port->base.base.crtc;
5046 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5047
5048 if (!intel_crtc->config->has_drrs)
5049 return;
5050
5051 mutex_lock(&dev_priv->drrs.mutex);
5052 if (!dev_priv->drrs.dp) {
5053 mutex_unlock(&dev_priv->drrs.mutex);
5054 return;
5055 }
5056
5057 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5058 intel_dp_set_drrs_state(dev_priv->dev,
5059 intel_dp->attached_connector->panel.
5060 fixed_mode->vrefresh);
5061
5062 dev_priv->drrs.dp = NULL;
5063 mutex_unlock(&dev_priv->drrs.mutex);
5064
5065 cancel_delayed_work_sync(&dev_priv->drrs.work);
5066}
5067
4e9ac947
VK
5068static void intel_edp_drrs_downclock_work(struct work_struct *work)
5069{
5070 struct drm_i915_private *dev_priv =
5071 container_of(work, typeof(*dev_priv), drrs.work.work);
5072 struct intel_dp *intel_dp;
5073
5074 mutex_lock(&dev_priv->drrs.mutex);
5075
5076 intel_dp = dev_priv->drrs.dp;
5077
5078 if (!intel_dp)
5079 goto unlock;
5080
439d7ac0 5081 /*
4e9ac947
VK
5082 * The delayed work can race with an invalidate hence we need to
5083 * recheck.
439d7ac0
PB
5084 */
5085
4e9ac947
VK
5086 if (dev_priv->drrs.busy_frontbuffer_bits)
5087 goto unlock;
439d7ac0 5088
4e9ac947
VK
5089 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5090 intel_dp_set_drrs_state(dev_priv->dev,
5091 intel_dp->attached_connector->panel.
5092 downclock_mode->vrefresh);
439d7ac0 5093
4e9ac947 5094unlock:
439d7ac0 5095
4e9ac947 5096 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5097}
5098
b33a2815
VK
5099/**
5100 * intel_edp_drrs_invalidate - Invalidate DRRS
5101 * @dev: DRM device
5102 * @frontbuffer_bits: frontbuffer plane tracking bits
5103 *
5104 * When there is a disturbance on screen (due to cursor movement/time
5105 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5106 * high RR.
5107 *
5108 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5109 */
a93fad0f
VK
5110void intel_edp_drrs_invalidate(struct drm_device *dev,
5111 unsigned frontbuffer_bits)
5112{
5113 struct drm_i915_private *dev_priv = dev->dev_private;
5114 struct drm_crtc *crtc;
5115 enum pipe pipe;
5116
5117 if (!dev_priv->drrs.dp)
5118 return;
5119
3954e733
R
5120 cancel_delayed_work_sync(&dev_priv->drrs.work);
5121
a93fad0f
VK
5122 mutex_lock(&dev_priv->drrs.mutex);
5123 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5124 pipe = to_intel_crtc(crtc)->pipe;
5125
5126 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
a93fad0f
VK
5127 intel_dp_set_drrs_state(dev_priv->dev,
5128 dev_priv->drrs.dp->attached_connector->panel.
5129 fixed_mode->vrefresh);
5130 }
5131
5132 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5133
5134 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5135 mutex_unlock(&dev_priv->drrs.mutex);
5136}
5137
b33a2815
VK
5138/**
5139 * intel_edp_drrs_flush - Flush DRRS
5140 * @dev: DRM device
5141 * @frontbuffer_bits: frontbuffer plane tracking bits
5142 *
5143 * When there is no movement on screen, DRRS work can be scheduled.
5144 * This DRRS work is responsible for setting relevant registers after a
5145 * timeout of 1 second.
5146 *
5147 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5148 */
a93fad0f
VK
5149void intel_edp_drrs_flush(struct drm_device *dev,
5150 unsigned frontbuffer_bits)
5151{
5152 struct drm_i915_private *dev_priv = dev->dev_private;
5153 struct drm_crtc *crtc;
5154 enum pipe pipe;
5155
5156 if (!dev_priv->drrs.dp)
5157 return;
5158
3954e733
R
5159 cancel_delayed_work_sync(&dev_priv->drrs.work);
5160
a93fad0f
VK
5161 mutex_lock(&dev_priv->drrs.mutex);
5162 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5163 pipe = to_intel_crtc(crtc)->pipe;
5164 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5165
a93fad0f
VK
5166 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5167 !dev_priv->drrs.busy_frontbuffer_bits)
5168 schedule_delayed_work(&dev_priv->drrs.work,
5169 msecs_to_jiffies(1000));
5170 mutex_unlock(&dev_priv->drrs.mutex);
5171}
5172
b33a2815
VK
5173/**
5174 * DOC: Display Refresh Rate Switching (DRRS)
5175 *
5176 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5177 * which enables swtching between low and high refresh rates,
5178 * dynamically, based on the usage scenario. This feature is applicable
5179 * for internal panels.
5180 *
5181 * Indication that the panel supports DRRS is given by the panel EDID, which
5182 * would list multiple refresh rates for one resolution.
5183 *
5184 * DRRS is of 2 types - static and seamless.
5185 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5186 * (may appear as a blink on screen) and is used in dock-undock scenario.
5187 * Seamless DRRS involves changing RR without any visual effect to the user
5188 * and can be used during normal system usage. This is done by programming
5189 * certain registers.
5190 *
5191 * Support for static/seamless DRRS may be indicated in the VBT based on
5192 * inputs from the panel spec.
5193 *
5194 * DRRS saves power by switching to low RR based on usage scenarios.
5195 *
5196 * eDP DRRS:-
5197 * The implementation is based on frontbuffer tracking implementation.
5198 * When there is a disturbance on the screen triggered by user activity or a
5199 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5200 * When there is no movement on screen, after a timeout of 1 second, a switch
5201 * to low RR is made.
5202 * For integration with frontbuffer tracking code,
5203 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5204 *
5205 * DRRS can be further extended to support other internal panels and also
5206 * the scenario of video playback wherein RR is set based on the rate
5207 * requested by userspace.
5208 */
5209
5210/**
5211 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5212 * @intel_connector: eDP connector
5213 * @fixed_mode: preferred mode of panel
5214 *
5215 * This function is called only once at driver load to initialize basic
5216 * DRRS stuff.
5217 *
5218 * Returns:
5219 * Downclock mode if panel supports it, else return NULL.
5220 * DRRS support is determined by the presence of downclock mode (apart
5221 * from VBT setting).
5222 */
4f9db5b5 5223static struct drm_display_mode *
96178eeb
VK
5224intel_dp_drrs_init(struct intel_connector *intel_connector,
5225 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5226{
5227 struct drm_connector *connector = &intel_connector->base;
96178eeb 5228 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5229 struct drm_i915_private *dev_priv = dev->dev_private;
5230 struct drm_display_mode *downclock_mode = NULL;
5231
5232 if (INTEL_INFO(dev)->gen <= 6) {
5233 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5234 return NULL;
5235 }
5236
5237 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5238 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5239 return NULL;
5240 }
5241
5242 downclock_mode = intel_find_panel_downclock
5243 (dev, fixed_mode, connector);
5244
5245 if (!downclock_mode) {
a1d26342 5246 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5247 return NULL;
5248 }
5249
4e9ac947
VK
5250 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5251
96178eeb 5252 mutex_init(&dev_priv->drrs.mutex);
439d7ac0 5253
96178eeb 5254 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5255
96178eeb 5256 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5257 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5258 return downclock_mode;
5259}
5260
ed92f0b2 5261static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5262 struct intel_connector *intel_connector)
ed92f0b2
PZ
5263{
5264 struct drm_connector *connector = &intel_connector->base;
5265 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5266 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5267 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5268 struct drm_i915_private *dev_priv = dev->dev_private;
5269 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5270 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5271 bool has_dpcd;
5272 struct drm_display_mode *scan;
5273 struct edid *edid;
6517d273 5274 enum pipe pipe = INVALID_PIPE;
ed92f0b2 5275
96178eeb 5276 dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
4f9db5b5 5277
ed92f0b2
PZ
5278 if (!is_edp(intel_dp))
5279 return true;
5280
49e6bc51
VS
5281 pps_lock(intel_dp);
5282 intel_edp_panel_vdd_sanitize(intel_dp);
5283 pps_unlock(intel_dp);
63635217 5284
ed92f0b2 5285 /* Cache DPCD and EDID for edp. */
ed92f0b2 5286 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5287
5288 if (has_dpcd) {
5289 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5290 dev_priv->no_aux_handshake =
5291 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5292 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5293 } else {
5294 /* if this fails, presume the device is a ghost */
5295 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5296 return false;
5297 }
5298
5299 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5300 pps_lock(intel_dp);
36b5f425 5301 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5302 pps_unlock(intel_dp);
ed92f0b2 5303
060c8778 5304 mutex_lock(&dev->mode_config.mutex);
0b99836f 5305 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5306 if (edid) {
5307 if (drm_add_edid_modes(connector, edid)) {
5308 drm_mode_connector_update_edid_property(connector,
5309 edid);
5310 drm_edid_to_eld(connector, edid);
5311 } else {
5312 kfree(edid);
5313 edid = ERR_PTR(-EINVAL);
5314 }
5315 } else {
5316 edid = ERR_PTR(-ENOENT);
5317 }
5318 intel_connector->edid = edid;
5319
5320 /* prefer fixed mode from EDID if available */
5321 list_for_each_entry(scan, &connector->probed_modes, head) {
5322 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5323 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5324 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5325 intel_connector, fixed_mode);
ed92f0b2
PZ
5326 break;
5327 }
5328 }
5329
5330 /* fallback to VBT if available for eDP */
5331 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5332 fixed_mode = drm_mode_duplicate(dev,
5333 dev_priv->vbt.lfp_lvds_vbt_mode);
5334 if (fixed_mode)
5335 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5336 }
060c8778 5337 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5338
01527b31
CT
5339 if (IS_VALLEYVIEW(dev)) {
5340 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5341 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5342
5343 /*
5344 * Figure out the current pipe for the initial backlight setup.
5345 * If the current pipe isn't valid, try the PPS pipe, and if that
5346 * fails just assume pipe A.
5347 */
5348 if (IS_CHERRYVIEW(dev))
5349 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5350 else
5351 pipe = PORT_TO_PIPE(intel_dp->DP);
5352
5353 if (pipe != PIPE_A && pipe != PIPE_B)
5354 pipe = intel_dp->pps_pipe;
5355
5356 if (pipe != PIPE_A && pipe != PIPE_B)
5357 pipe = PIPE_A;
5358
5359 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5360 pipe_name(pipe));
01527b31
CT
5361 }
5362
4f9db5b5 5363 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5364 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5365 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5366
5367 return true;
5368}
5369
16c25533 5370bool
f0fec3f2
PZ
5371intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5372 struct intel_connector *intel_connector)
a4fc5ed6 5373{
f0fec3f2
PZ
5374 struct drm_connector *connector = &intel_connector->base;
5375 struct intel_dp *intel_dp = &intel_dig_port->dp;
5376 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5377 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5378 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5379 enum port port = intel_dig_port->port;
0b99836f 5380 int type;
a4fc5ed6 5381
a4a5d2f8
VS
5382 intel_dp->pps_pipe = INVALID_PIPE;
5383
ec5b01dd 5384 /* intel_dp vfuncs */
b6b5e383
DL
5385 if (INTEL_INFO(dev)->gen >= 9)
5386 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5387 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5388 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5389 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5390 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5391 else if (HAS_PCH_SPLIT(dev))
5392 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5393 else
5394 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5395
b9ca5fad
DL
5396 if (INTEL_INFO(dev)->gen >= 9)
5397 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5398 else
5399 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5400
0767935e
DV
5401 /* Preserve the current hw state. */
5402 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5403 intel_dp->attached_connector = intel_connector;
3d3dc149 5404
3b32a35b 5405 if (intel_dp_is_edp(dev, port))
b329530c 5406 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5407 else
5408 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5409
f7d24902
ID
5410 /*
5411 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5412 * for DP the encoder type can be set by the caller to
5413 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5414 */
5415 if (type == DRM_MODE_CONNECTOR_eDP)
5416 intel_encoder->type = INTEL_OUTPUT_EDP;
5417
c17ed5b5
VS
5418 /* eDP only on port B and/or C on vlv/chv */
5419 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5420 port != PORT_B && port != PORT_C))
5421 return false;
5422
e7281eab
ID
5423 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5424 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5425 port_name(port));
5426
b329530c 5427 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5428 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5429
a4fc5ed6
KP
5430 connector->interlace_allowed = true;
5431 connector->doublescan_allowed = 0;
5432
f0fec3f2 5433 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5434 edp_panel_vdd_work);
a4fc5ed6 5435
df0e9248 5436 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5437 drm_connector_register(connector);
a4fc5ed6 5438
affa9354 5439 if (HAS_DDI(dev))
bcbc889b
PZ
5440 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5441 else
5442 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5443 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5444
0b99836f 5445 /* Set up the hotplug pin. */
ab9d7c30
PZ
5446 switch (port) {
5447 case PORT_A:
1d843f9d 5448 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5449 break;
5450 case PORT_B:
1d843f9d 5451 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5452 break;
5453 case PORT_C:
1d843f9d 5454 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5455 break;
5456 case PORT_D:
1d843f9d 5457 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5458 break;
5459 default:
ad1c0b19 5460 BUG();
5eb08b69
ZW
5461 }
5462
dada1a9f 5463 if (is_edp(intel_dp)) {
773538e8 5464 pps_lock(intel_dp);
1e74a324
VS
5465 intel_dp_init_panel_power_timestamps(intel_dp);
5466 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5467 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5468 else
36b5f425 5469 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5470 pps_unlock(intel_dp);
dada1a9f 5471 }
0095e6dc 5472
9d1a1031 5473 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5474
0e32b39c 5475 /* init MST on ports that can support it */
c86ea3d0 5476 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
0e32b39c 5477 if (port == PORT_B || port == PORT_C || port == PORT_D) {
a4a5d2f8
VS
5478 intel_dp_mst_encoder_init(intel_dig_port,
5479 intel_connector->base.base.id);
0e32b39c
DA
5480 }
5481 }
5482
36b5f425 5483 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5484 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5485 if (is_edp(intel_dp)) {
5486 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5487 /*
5488 * vdd might still be enabled do to the delayed vdd off.
5489 * Make sure vdd is actually turned off here.
5490 */
773538e8 5491 pps_lock(intel_dp);
4be73780 5492 edp_panel_vdd_off_sync(intel_dp);
773538e8 5493 pps_unlock(intel_dp);
15b1d171 5494 }
34ea3d38 5495 drm_connector_unregister(connector);
b2f246a8 5496 drm_connector_cleanup(connector);
16c25533 5497 return false;
b2f246a8 5498 }
32f9d658 5499
f684960e
CW
5500 intel_dp_add_properties(intel_dp, connector);
5501
a4fc5ed6
KP
5502 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5503 * 0xd. Failure to do so will result in spurious interrupts being
5504 * generated on the port when a cable is not attached.
5505 */
5506 if (IS_G4X(dev) && !IS_GM45(dev)) {
5507 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5508 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5509 }
16c25533
PZ
5510
5511 return true;
a4fc5ed6 5512}
f0fec3f2
PZ
5513
5514void
5515intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5516{
13cf5504 5517 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5518 struct intel_digital_port *intel_dig_port;
5519 struct intel_encoder *intel_encoder;
5520 struct drm_encoder *encoder;
5521 struct intel_connector *intel_connector;
5522
b14c5679 5523 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5524 if (!intel_dig_port)
5525 return;
5526
b14c5679 5527 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
f0fec3f2
PZ
5528 if (!intel_connector) {
5529 kfree(intel_dig_port);
5530 return;
5531 }
5532
5533 intel_encoder = &intel_dig_port->base;
5534 encoder = &intel_encoder->base;
5535
5536 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5537 DRM_MODE_ENCODER_TMDS);
5538
5bfe2ac0 5539 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5540 intel_encoder->disable = intel_disable_dp;
00c09d70 5541 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5542 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5543 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5544 if (IS_CHERRYVIEW(dev)) {
9197c88b 5545 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5546 intel_encoder->pre_enable = chv_pre_enable_dp;
5547 intel_encoder->enable = vlv_enable_dp;
580d3811 5548 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5549 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5550 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5551 intel_encoder->pre_enable = vlv_pre_enable_dp;
5552 intel_encoder->enable = vlv_enable_dp;
49277c31 5553 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5554 } else {
ecff4f3b
JN
5555 intel_encoder->pre_enable = g4x_pre_enable_dp;
5556 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5557 if (INTEL_INFO(dev)->gen >= 5)
5558 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5559 }
f0fec3f2 5560
174edf1f 5561 intel_dig_port->port = port;
f0fec3f2
PZ
5562 intel_dig_port->dp.output_reg = output_reg;
5563
00c09d70 5564 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5565 if (IS_CHERRYVIEW(dev)) {
5566 if (port == PORT_D)
5567 intel_encoder->crtc_mask = 1 << 2;
5568 else
5569 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5570 } else {
5571 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5572 }
bc079e8b 5573 intel_encoder->cloneable = 0;
f0fec3f2
PZ
5574 intel_encoder->hot_plug = intel_dp_hot_plug;
5575
13cf5504
DA
5576 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5577 dev_priv->hpd_irq_port[port] = intel_dig_port;
5578
15b1d171
PZ
5579 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5580 drm_encoder_cleanup(encoder);
5581 kfree(intel_dig_port);
b2f246a8 5582 kfree(intel_connector);
15b1d171 5583 }
f0fec3f2 5584}
0e32b39c
DA
5585
5586void intel_dp_mst_suspend(struct drm_device *dev)
5587{
5588 struct drm_i915_private *dev_priv = dev->dev_private;
5589 int i;
5590
5591 /* disable MST */
5592 for (i = 0; i < I915_MAX_PORTS; i++) {
5593 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5594 if (!intel_dig_port)
5595 continue;
5596
5597 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5598 if (!intel_dig_port->dp.can_mst)
5599 continue;
5600 if (intel_dig_port->dp.is_mst)
5601 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5602 }
5603 }
5604}
5605
5606void intel_dp_mst_resume(struct drm_device *dev)
5607{
5608 struct drm_i915_private *dev_priv = dev->dev_private;
5609 int i;
5610
5611 for (i = 0; i < I915_MAX_PORTS; i++) {
5612 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5613 if (!intel_dig_port)
5614 continue;
5615 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5616 int ret;
5617
5618 if (!intel_dig_port->dp.can_mst)
5619 continue;
5620
5621 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5622 if (ret != 0) {
5623 intel_dp_check_mst_status(&intel_dig_port->dp);
5624 }
5625 }
5626 }
5627}
This page took 0.92453 seconds and 5 git commands to generate.