drm/i915: Fix MST link rate handling
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
9dd4ffdf
CML
44struct dp_link_dpll {
45 int link_bw;
46 struct dpll dpll;
47};
48
49static const struct dp_link_dpll gen4_dpll[] = {
50 { DP_LINK_BW_1_62,
51 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52 { DP_LINK_BW_2_7,
53 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54};
55
56static const struct dp_link_dpll pch_dpll[] = {
57 { DP_LINK_BW_1_62,
58 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59 { DP_LINK_BW_2_7,
60 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61};
62
65ce4bf5
CML
63static const struct dp_link_dpll vlv_dpll[] = {
64 { DP_LINK_BW_1_62,
58f6e632 65 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
66 { DP_LINK_BW_2_7,
67 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68};
69
ef9348c8
CML
70/*
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
73 */
74static const struct dp_link_dpll chv_dpll[] = {
75 /*
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
79 */
80 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
83 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86};
a8f3ef61 87/* Skylake supports following rates */
f4896f15
VS
88static const int gen9_rates[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
90static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 91
cfcb0fc9
JB
92/**
93 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
94 * @intel_dp: DP struct
95 *
96 * If a CPU or PCH DP output is attached to an eDP panel, this function
97 * will return true, and false otherwise.
98 */
99static bool is_edp(struct intel_dp *intel_dp)
100{
da63a9f2
PZ
101 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
102
103 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
104}
105
68b4d824 106static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 107{
68b4d824
ID
108 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109
110 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
111}
112
df0e9248
CW
113static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
114{
fa90ecef 115 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
116}
117
ea5b213a 118static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 119static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 120static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 121static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
122static void vlv_steal_power_sequencer(struct drm_device *dev,
123 enum pipe pipe);
a4fc5ed6 124
ed4e9c1d
VS
125static int
126intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 127{
7183dc29 128 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
129
130 switch (max_link_bw) {
131 case DP_LINK_BW_1_62:
132 case DP_LINK_BW_2_7:
1db10e28 133 case DP_LINK_BW_5_4:
d4eead50 134 break;
a4fc5ed6 135 default:
d4eead50
ID
136 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
137 max_link_bw);
a4fc5ed6
KP
138 max_link_bw = DP_LINK_BW_1_62;
139 break;
140 }
141 return max_link_bw;
142}
143
eeb6324d
PZ
144static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
145{
146 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
147 struct drm_device *dev = intel_dig_port->base.base.dev;
148 u8 source_max, sink_max;
149
150 source_max = 4;
151 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
152 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
153 source_max = 2;
154
155 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
156
157 return min(source_max, sink_max);
158}
159
cd9dde44
AJ
160/*
161 * The units on the numbers in the next two are... bizarre. Examples will
162 * make it clearer; this one parallels an example in the eDP spec.
163 *
164 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
165 *
166 * 270000 * 1 * 8 / 10 == 216000
167 *
168 * The actual data capacity of that configuration is 2.16Gbit/s, so the
169 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
170 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
171 * 119000. At 18bpp that's 2142000 kilobits per second.
172 *
173 * Thus the strange-looking division by 10 in intel_dp_link_required, to
174 * get the result in decakilobits instead of kilobits.
175 */
176
a4fc5ed6 177static int
c898261c 178intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 179{
cd9dde44 180 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
181}
182
fe27d53e
DA
183static int
184intel_dp_max_data_rate(int max_link_clock, int max_lanes)
185{
186 return (max_link_clock * max_lanes * 8) / 10;
187}
188
c19de8eb 189static enum drm_mode_status
a4fc5ed6
KP
190intel_dp_mode_valid(struct drm_connector *connector,
191 struct drm_display_mode *mode)
192{
df0e9248 193 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
194 struct intel_connector *intel_connector = to_intel_connector(connector);
195 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
196 int target_clock = mode->clock;
197 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 198
dd06f90e
JN
199 if (is_edp(intel_dp) && fixed_mode) {
200 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
201 return MODE_PANEL;
202
dd06f90e 203 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 204 return MODE_PANEL;
03afc4a2
DV
205
206 target_clock = fixed_mode->clock;
7de56f43
ZY
207 }
208
50fec21a 209 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 210 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
211
212 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
213 mode_rate = intel_dp_link_required(target_clock, 18);
214
215 if (mode_rate > max_rate)
c4867936 216 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
217
218 if (mode->clock < 10000)
219 return MODE_CLOCK_LOW;
220
0af78a2b
DV
221 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
222 return MODE_H_ILLEGAL;
223
a4fc5ed6
KP
224 return MODE_OK;
225}
226
a4f1289e 227uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
228{
229 int i;
230 uint32_t v = 0;
231
232 if (src_bytes > 4)
233 src_bytes = 4;
234 for (i = 0; i < src_bytes; i++)
235 v |= ((uint32_t) src[i]) << ((3-i) * 8);
236 return v;
237}
238
c2af70e2 239static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
240{
241 int i;
242 if (dst_bytes > 4)
243 dst_bytes = 4;
244 for (i = 0; i < dst_bytes; i++)
245 dst[i] = src >> ((3-i) * 8);
246}
247
fb0f8fbf
KP
248/* hrawclock is 1/4 the FSB frequency */
249static int
250intel_hrawclk(struct drm_device *dev)
251{
252 struct drm_i915_private *dev_priv = dev->dev_private;
253 uint32_t clkcfg;
254
9473c8f4
VP
255 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
256 if (IS_VALLEYVIEW(dev))
257 return 200;
258
fb0f8fbf
KP
259 clkcfg = I915_READ(CLKCFG);
260 switch (clkcfg & CLKCFG_FSB_MASK) {
261 case CLKCFG_FSB_400:
262 return 100;
263 case CLKCFG_FSB_533:
264 return 133;
265 case CLKCFG_FSB_667:
266 return 166;
267 case CLKCFG_FSB_800:
268 return 200;
269 case CLKCFG_FSB_1067:
270 return 266;
271 case CLKCFG_FSB_1333:
272 return 333;
273 /* these two are just a guess; one of them might be right */
274 case CLKCFG_FSB_1600:
275 case CLKCFG_FSB_1600_ALT:
276 return 400;
277 default:
278 return 133;
279 }
280}
281
bf13e81b
JN
282static void
283intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 284 struct intel_dp *intel_dp);
bf13e81b
JN
285static void
286intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 287 struct intel_dp *intel_dp);
bf13e81b 288
773538e8
VS
289static void pps_lock(struct intel_dp *intel_dp)
290{
291 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
292 struct intel_encoder *encoder = &intel_dig_port->base;
293 struct drm_device *dev = encoder->base.dev;
294 struct drm_i915_private *dev_priv = dev->dev_private;
295 enum intel_display_power_domain power_domain;
296
297 /*
298 * See vlv_power_sequencer_reset() why we need
299 * a power domain reference here.
300 */
301 power_domain = intel_display_port_power_domain(encoder);
302 intel_display_power_get(dev_priv, power_domain);
303
304 mutex_lock(&dev_priv->pps_mutex);
305}
306
307static void pps_unlock(struct intel_dp *intel_dp)
308{
309 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
310 struct intel_encoder *encoder = &intel_dig_port->base;
311 struct drm_device *dev = encoder->base.dev;
312 struct drm_i915_private *dev_priv = dev->dev_private;
313 enum intel_display_power_domain power_domain;
314
315 mutex_unlock(&dev_priv->pps_mutex);
316
317 power_domain = intel_display_port_power_domain(encoder);
318 intel_display_power_put(dev_priv, power_domain);
319}
320
961a0db0
VS
321static void
322vlv_power_sequencer_kick(struct intel_dp *intel_dp)
323{
324 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
325 struct drm_device *dev = intel_dig_port->base.base.dev;
326 struct drm_i915_private *dev_priv = dev->dev_private;
327 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 328 bool pll_enabled;
961a0db0
VS
329 uint32_t DP;
330
331 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
332 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
333 pipe_name(pipe), port_name(intel_dig_port->port)))
334 return;
335
336 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
337 pipe_name(pipe), port_name(intel_dig_port->port));
338
339 /* Preserve the BIOS-computed detected bit. This is
340 * supposed to be read-only.
341 */
342 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
343 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
344 DP |= DP_PORT_WIDTH(1);
345 DP |= DP_LINK_TRAIN_PAT_1;
346
347 if (IS_CHERRYVIEW(dev))
348 DP |= DP_PIPE_SELECT_CHV(pipe);
349 else if (pipe == PIPE_B)
350 DP |= DP_PIPEB_SELECT;
351
d288f65f
VS
352 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
353
354 /*
355 * The DPLL for the pipe must be enabled for this to work.
356 * So enable temporarily it if it's not already enabled.
357 */
358 if (!pll_enabled)
359 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
360 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
361
961a0db0
VS
362 /*
363 * Similar magic as in intel_dp_enable_port().
364 * We _must_ do this port enable + disable trick
365 * to make this power seqeuencer lock onto the port.
366 * Otherwise even VDD force bit won't work.
367 */
368 I915_WRITE(intel_dp->output_reg, DP);
369 POSTING_READ(intel_dp->output_reg);
370
371 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
372 POSTING_READ(intel_dp->output_reg);
373
374 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
375 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
376
377 if (!pll_enabled)
378 vlv_force_pll_off(dev, pipe);
961a0db0
VS
379}
380
bf13e81b
JN
381static enum pipe
382vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
383{
384 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
385 struct drm_device *dev = intel_dig_port->base.base.dev;
386 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
387 struct intel_encoder *encoder;
388 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 389 enum pipe pipe;
bf13e81b 390
e39b999a 391 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 392
a8c3344e
VS
393 /* We should never land here with regular DP ports */
394 WARN_ON(!is_edp(intel_dp));
395
a4a5d2f8
VS
396 if (intel_dp->pps_pipe != INVALID_PIPE)
397 return intel_dp->pps_pipe;
398
399 /*
400 * We don't have power sequencer currently.
401 * Pick one that's not used by other ports.
402 */
403 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
404 base.head) {
405 struct intel_dp *tmp;
406
407 if (encoder->type != INTEL_OUTPUT_EDP)
408 continue;
409
410 tmp = enc_to_intel_dp(&encoder->base);
411
412 if (tmp->pps_pipe != INVALID_PIPE)
413 pipes &= ~(1 << tmp->pps_pipe);
414 }
415
416 /*
417 * Didn't find one. This should not happen since there
418 * are two power sequencers and up to two eDP ports.
419 */
420 if (WARN_ON(pipes == 0))
a8c3344e
VS
421 pipe = PIPE_A;
422 else
423 pipe = ffs(pipes) - 1;
a4a5d2f8 424
a8c3344e
VS
425 vlv_steal_power_sequencer(dev, pipe);
426 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
427
428 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
429 pipe_name(intel_dp->pps_pipe),
430 port_name(intel_dig_port->port));
431
432 /* init power sequencer on this pipe and port */
36b5f425
VS
433 intel_dp_init_panel_power_sequencer(dev, intel_dp);
434 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 435
961a0db0
VS
436 /*
437 * Even vdd force doesn't work until we've made
438 * the power sequencer lock in on the port.
439 */
440 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
441
442 return intel_dp->pps_pipe;
443}
444
6491ab27
VS
445typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
446 enum pipe pipe);
447
448static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
449 enum pipe pipe)
450{
451 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
452}
453
454static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
455 enum pipe pipe)
456{
457 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
458}
459
460static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
461 enum pipe pipe)
462{
463 return true;
464}
bf13e81b 465
a4a5d2f8 466static enum pipe
6491ab27
VS
467vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
468 enum port port,
469 vlv_pipe_check pipe_check)
a4a5d2f8
VS
470{
471 enum pipe pipe;
bf13e81b 472
bf13e81b
JN
473 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
474 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
475 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
476
477 if (port_sel != PANEL_PORT_SELECT_VLV(port))
478 continue;
479
6491ab27
VS
480 if (!pipe_check(dev_priv, pipe))
481 continue;
482
a4a5d2f8 483 return pipe;
bf13e81b
JN
484 }
485
a4a5d2f8
VS
486 return INVALID_PIPE;
487}
488
489static void
490vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
491{
492 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
493 struct drm_device *dev = intel_dig_port->base.base.dev;
494 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
495 enum port port = intel_dig_port->port;
496
497 lockdep_assert_held(&dev_priv->pps_mutex);
498
499 /* try to find a pipe with this port selected */
6491ab27
VS
500 /* first pick one where the panel is on */
501 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
502 vlv_pipe_has_pp_on);
503 /* didn't find one? pick one where vdd is on */
504 if (intel_dp->pps_pipe == INVALID_PIPE)
505 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
506 vlv_pipe_has_vdd_on);
507 /* didn't find one? pick one with just the correct port */
508 if (intel_dp->pps_pipe == INVALID_PIPE)
509 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
510 vlv_pipe_any);
a4a5d2f8
VS
511
512 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
513 if (intel_dp->pps_pipe == INVALID_PIPE) {
514 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
515 port_name(port));
516 return;
bf13e81b
JN
517 }
518
a4a5d2f8
VS
519 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
520 port_name(port), pipe_name(intel_dp->pps_pipe));
521
36b5f425
VS
522 intel_dp_init_panel_power_sequencer(dev, intel_dp);
523 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
524}
525
773538e8
VS
526void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
527{
528 struct drm_device *dev = dev_priv->dev;
529 struct intel_encoder *encoder;
530
531 if (WARN_ON(!IS_VALLEYVIEW(dev)))
532 return;
533
534 /*
535 * We can't grab pps_mutex here due to deadlock with power_domain
536 * mutex when power_domain functions are called while holding pps_mutex.
537 * That also means that in order to use pps_pipe the code needs to
538 * hold both a power domain reference and pps_mutex, and the power domain
539 * reference get/put must be done while _not_ holding pps_mutex.
540 * pps_{lock,unlock}() do these steps in the correct order, so one
541 * should use them always.
542 */
543
544 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
545 struct intel_dp *intel_dp;
546
547 if (encoder->type != INTEL_OUTPUT_EDP)
548 continue;
549
550 intel_dp = enc_to_intel_dp(&encoder->base);
551 intel_dp->pps_pipe = INVALID_PIPE;
552 }
bf13e81b
JN
553}
554
555static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
556{
557 struct drm_device *dev = intel_dp_to_dev(intel_dp);
558
559 if (HAS_PCH_SPLIT(dev))
560 return PCH_PP_CONTROL;
561 else
562 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
563}
564
565static u32 _pp_stat_reg(struct intel_dp *intel_dp)
566{
567 struct drm_device *dev = intel_dp_to_dev(intel_dp);
568
569 if (HAS_PCH_SPLIT(dev))
570 return PCH_PP_STATUS;
571 else
572 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
573}
574
01527b31
CT
575/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
576 This function only applicable when panel PM state is not to be tracked */
577static int edp_notify_handler(struct notifier_block *this, unsigned long code,
578 void *unused)
579{
580 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
581 edp_notifier);
582 struct drm_device *dev = intel_dp_to_dev(intel_dp);
583 struct drm_i915_private *dev_priv = dev->dev_private;
584 u32 pp_div;
585 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
586
587 if (!is_edp(intel_dp) || code != SYS_RESTART)
588 return 0;
589
773538e8 590 pps_lock(intel_dp);
e39b999a 591
01527b31 592 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
593 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
594
01527b31
CT
595 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
596 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
597 pp_div = I915_READ(pp_div_reg);
598 pp_div &= PP_REFERENCE_DIVIDER_MASK;
599
600 /* 0x1F write to PP_DIV_REG sets max cycle delay */
601 I915_WRITE(pp_div_reg, pp_div | 0x1F);
602 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
603 msleep(intel_dp->panel_power_cycle_delay);
604 }
605
773538e8 606 pps_unlock(intel_dp);
e39b999a 607
01527b31
CT
608 return 0;
609}
610
4be73780 611static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 612{
30add22d 613 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
614 struct drm_i915_private *dev_priv = dev->dev_private;
615
e39b999a
VS
616 lockdep_assert_held(&dev_priv->pps_mutex);
617
9a42356b
VS
618 if (IS_VALLEYVIEW(dev) &&
619 intel_dp->pps_pipe == INVALID_PIPE)
620 return false;
621
bf13e81b 622 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
623}
624
4be73780 625static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 626{
30add22d 627 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
628 struct drm_i915_private *dev_priv = dev->dev_private;
629
e39b999a
VS
630 lockdep_assert_held(&dev_priv->pps_mutex);
631
9a42356b
VS
632 if (IS_VALLEYVIEW(dev) &&
633 intel_dp->pps_pipe == INVALID_PIPE)
634 return false;
635
773538e8 636 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
637}
638
9b984dae
KP
639static void
640intel_dp_check_edp(struct intel_dp *intel_dp)
641{
30add22d 642 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 643 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 644
9b984dae
KP
645 if (!is_edp(intel_dp))
646 return;
453c5420 647
4be73780 648 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
649 WARN(1, "eDP powered off while attempting aux channel communication.\n");
650 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
651 I915_READ(_pp_stat_reg(intel_dp)),
652 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
653 }
654}
655
9ee32fea
DV
656static uint32_t
657intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
658{
659 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
660 struct drm_device *dev = intel_dig_port->base.base.dev;
661 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 662 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
663 uint32_t status;
664 bool done;
665
ef04f00d 666#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 667 if (has_aux_irq)
b18ac466 668 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 669 msecs_to_jiffies_timeout(10));
9ee32fea
DV
670 else
671 done = wait_for_atomic(C, 10) == 0;
672 if (!done)
673 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
674 has_aux_irq);
675#undef C
676
677 return status;
678}
679
ec5b01dd 680static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 681{
174edf1f
PZ
682 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
683 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 684
ec5b01dd
DL
685 /*
686 * The clock divider is based off the hrawclk, and would like to run at
687 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 688 */
ec5b01dd
DL
689 return index ? 0 : intel_hrawclk(dev) / 2;
690}
691
692static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
693{
694 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
695 struct drm_device *dev = intel_dig_port->base.base.dev;
696
697 if (index)
698 return 0;
699
700 if (intel_dig_port->port == PORT_A) {
701 if (IS_GEN6(dev) || IS_GEN7(dev))
b84a1cf8 702 return 200; /* SNB & IVB eDP input clock at 400Mhz */
e3421a18 703 else
b84a1cf8 704 return 225; /* eDP input clock at 450Mhz */
ec5b01dd
DL
705 } else {
706 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
707 }
708}
709
710static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
711{
712 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
713 struct drm_device *dev = intel_dig_port->base.base.dev;
714 struct drm_i915_private *dev_priv = dev->dev_private;
715
716 if (intel_dig_port->port == PORT_A) {
717 if (index)
718 return 0;
719 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
2c55c336
JN
720 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
721 /* Workaround for non-ULT HSW */
bc86625a
CW
722 switch (index) {
723 case 0: return 63;
724 case 1: return 72;
725 default: return 0;
726 }
ec5b01dd 727 } else {
bc86625a 728 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 729 }
b84a1cf8
RV
730}
731
ec5b01dd
DL
732static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
733{
734 return index ? 0 : 100;
735}
736
b6b5e383
DL
737static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
738{
739 /*
740 * SKL doesn't need us to program the AUX clock divider (Hardware will
741 * derive the clock from CDCLK automatically). We still implement the
742 * get_aux_clock_divider vfunc to plug-in into the existing code.
743 */
744 return index ? 0 : 1;
745}
746
5ed12a19
DL
747static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
748 bool has_aux_irq,
749 int send_bytes,
750 uint32_t aux_clock_divider)
751{
752 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
753 struct drm_device *dev = intel_dig_port->base.base.dev;
754 uint32_t precharge, timeout;
755
756 if (IS_GEN6(dev))
757 precharge = 3;
758 else
759 precharge = 5;
760
761 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
762 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
763 else
764 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
765
766 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 767 DP_AUX_CH_CTL_DONE |
5ed12a19 768 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 769 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 770 timeout |
788d4433 771 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
772 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
773 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 774 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
775}
776
b9ca5fad
DL
777static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
778 bool has_aux_irq,
779 int send_bytes,
780 uint32_t unused)
781{
782 return DP_AUX_CH_CTL_SEND_BUSY |
783 DP_AUX_CH_CTL_DONE |
784 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
785 DP_AUX_CH_CTL_TIME_OUT_ERROR |
786 DP_AUX_CH_CTL_TIME_OUT_1600us |
787 DP_AUX_CH_CTL_RECEIVE_ERROR |
788 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
789 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
790}
791
b84a1cf8
RV
792static int
793intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 794 const uint8_t *send, int send_bytes,
b84a1cf8
RV
795 uint8_t *recv, int recv_size)
796{
797 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
798 struct drm_device *dev = intel_dig_port->base.base.dev;
799 struct drm_i915_private *dev_priv = dev->dev_private;
800 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
801 uint32_t ch_data = ch_ctl + 4;
bc86625a 802 uint32_t aux_clock_divider;
b84a1cf8
RV
803 int i, ret, recv_bytes;
804 uint32_t status;
5ed12a19 805 int try, clock = 0;
4e6b788c 806 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
807 bool vdd;
808
773538e8 809 pps_lock(intel_dp);
e39b999a 810
72c3500a
VS
811 /*
812 * We will be called with VDD already enabled for dpcd/edid/oui reads.
813 * In such cases we want to leave VDD enabled and it's up to upper layers
814 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
815 * ourselves.
816 */
1e0560e0 817 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
818
819 /* dp aux is extremely sensitive to irq latency, hence request the
820 * lowest possible wakeup latency and so prevent the cpu from going into
821 * deep sleep states.
822 */
823 pm_qos_update_request(&dev_priv->pm_qos, 0);
824
825 intel_dp_check_edp(intel_dp);
5eb08b69 826
c67a470b
PZ
827 intel_aux_display_runtime_get(dev_priv);
828
11bee43e
JB
829 /* Try to wait for any previous AUX channel activity */
830 for (try = 0; try < 3; try++) {
ef04f00d 831 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
832 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
833 break;
834 msleep(1);
835 }
836
837 if (try == 3) {
838 WARN(1, "dp_aux_ch not started status 0x%08x\n",
839 I915_READ(ch_ctl));
9ee32fea
DV
840 ret = -EBUSY;
841 goto out;
4f7f7b7e
CW
842 }
843
46a5ae9f
PZ
844 /* Only 5 data registers! */
845 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
846 ret = -E2BIG;
847 goto out;
848 }
849
ec5b01dd 850 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
851 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
852 has_aux_irq,
853 send_bytes,
854 aux_clock_divider);
5ed12a19 855
bc86625a
CW
856 /* Must try at least 3 times according to DP spec */
857 for (try = 0; try < 5; try++) {
858 /* Load the send data into the aux channel data registers */
859 for (i = 0; i < send_bytes; i += 4)
860 I915_WRITE(ch_data + i,
a4f1289e
RV
861 intel_dp_pack_aux(send + i,
862 send_bytes - i));
bc86625a
CW
863
864 /* Send the command and wait for it to complete */
5ed12a19 865 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
866
867 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
868
869 /* Clear done status and any errors */
870 I915_WRITE(ch_ctl,
871 status |
872 DP_AUX_CH_CTL_DONE |
873 DP_AUX_CH_CTL_TIME_OUT_ERROR |
874 DP_AUX_CH_CTL_RECEIVE_ERROR);
875
876 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
877 DP_AUX_CH_CTL_RECEIVE_ERROR))
878 continue;
879 if (status & DP_AUX_CH_CTL_DONE)
880 break;
881 }
4f7f7b7e 882 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
883 break;
884 }
885
a4fc5ed6 886 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 887 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
888 ret = -EBUSY;
889 goto out;
a4fc5ed6
KP
890 }
891
892 /* Check for timeout or receive error.
893 * Timeouts occur when the sink is not connected
894 */
a5b3da54 895 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 896 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
897 ret = -EIO;
898 goto out;
a5b3da54 899 }
1ae8c0a5
KP
900
901 /* Timeouts occur when the device isn't connected, so they're
902 * "normal" -- don't fill the kernel log with these */
a5b3da54 903 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 904 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
905 ret = -ETIMEDOUT;
906 goto out;
a4fc5ed6
KP
907 }
908
909 /* Unload any bytes sent back from the other side */
910 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
911 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
912 if (recv_bytes > recv_size)
913 recv_bytes = recv_size;
0206e353 914
4f7f7b7e 915 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
916 intel_dp_unpack_aux(I915_READ(ch_data + i),
917 recv + i, recv_bytes - i);
a4fc5ed6 918
9ee32fea
DV
919 ret = recv_bytes;
920out:
921 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 922 intel_aux_display_runtime_put(dev_priv);
9ee32fea 923
884f19e9
JN
924 if (vdd)
925 edp_panel_vdd_off(intel_dp, false);
926
773538e8 927 pps_unlock(intel_dp);
e39b999a 928
9ee32fea 929 return ret;
a4fc5ed6
KP
930}
931
a6c8aff0
JN
932#define BARE_ADDRESS_SIZE 3
933#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
934static ssize_t
935intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 936{
9d1a1031
JN
937 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
938 uint8_t txbuf[20], rxbuf[20];
939 size_t txsize, rxsize;
a4fc5ed6 940 int ret;
a4fc5ed6 941
9d1a1031
JN
942 txbuf[0] = msg->request << 4;
943 txbuf[1] = msg->address >> 8;
944 txbuf[2] = msg->address & 0xff;
945 txbuf[3] = msg->size - 1;
46a5ae9f 946
9d1a1031
JN
947 switch (msg->request & ~DP_AUX_I2C_MOT) {
948 case DP_AUX_NATIVE_WRITE:
949 case DP_AUX_I2C_WRITE:
a6c8aff0 950 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
9d1a1031 951 rxsize = 1;
f51a44b9 952
9d1a1031
JN
953 if (WARN_ON(txsize > 20))
954 return -E2BIG;
a4fc5ed6 955
9d1a1031 956 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 957
9d1a1031
JN
958 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
959 if (ret > 0) {
960 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 961
9d1a1031
JN
962 /* Return payload size. */
963 ret = msg->size;
964 }
965 break;
46a5ae9f 966
9d1a1031
JN
967 case DP_AUX_NATIVE_READ:
968 case DP_AUX_I2C_READ:
a6c8aff0 969 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 970 rxsize = msg->size + 1;
a4fc5ed6 971
9d1a1031
JN
972 if (WARN_ON(rxsize > 20))
973 return -E2BIG;
a4fc5ed6 974
9d1a1031
JN
975 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
976 if (ret > 0) {
977 msg->reply = rxbuf[0] >> 4;
978 /*
979 * Assume happy day, and copy the data. The caller is
980 * expected to check msg->reply before touching it.
981 *
982 * Return payload size.
983 */
984 ret--;
985 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 986 }
9d1a1031
JN
987 break;
988
989 default:
990 ret = -EINVAL;
991 break;
a4fc5ed6 992 }
f51a44b9 993
9d1a1031 994 return ret;
a4fc5ed6
KP
995}
996
9d1a1031
JN
997static void
998intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
999{
1000 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1001 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1002 enum port port = intel_dig_port->port;
0b99836f 1003 const char *name = NULL;
ab2c0672
DA
1004 int ret;
1005
33ad6626
JN
1006 switch (port) {
1007 case PORT_A:
1008 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1009 name = "DPDDC-A";
ab2c0672 1010 break;
33ad6626
JN
1011 case PORT_B:
1012 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1013 name = "DPDDC-B";
ab2c0672 1014 break;
33ad6626
JN
1015 case PORT_C:
1016 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1017 name = "DPDDC-C";
ab2c0672 1018 break;
33ad6626
JN
1019 case PORT_D:
1020 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1021 name = "DPDDC-D";
33ad6626
JN
1022 break;
1023 default:
1024 BUG();
ab2c0672
DA
1025 }
1026
1b1aad75
DL
1027 /*
1028 * The AUX_CTL register is usually DP_CTL + 0x10.
1029 *
1030 * On Haswell and Broadwell though:
1031 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1032 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1033 *
1034 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1035 */
1036 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
33ad6626 1037 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1038
0b99836f 1039 intel_dp->aux.name = name;
9d1a1031
JN
1040 intel_dp->aux.dev = dev->dev;
1041 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1042
0b99836f
JN
1043 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1044 connector->base.kdev->kobj.name);
8316f337 1045
4f71d0cb 1046 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1047 if (ret < 0) {
4f71d0cb 1048 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1049 name, ret);
1050 return;
ab2c0672 1051 }
8a5e6aeb 1052
0b99836f
JN
1053 ret = sysfs_create_link(&connector->base.kdev->kobj,
1054 &intel_dp->aux.ddc.dev.kobj,
1055 intel_dp->aux.ddc.dev.kobj.name);
1056 if (ret < 0) {
1057 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1058 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1059 }
a4fc5ed6
KP
1060}
1061
80f65de3
ID
1062static void
1063intel_dp_connector_unregister(struct intel_connector *intel_connector)
1064{
1065 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1066
0e32b39c
DA
1067 if (!intel_connector->mst_port)
1068 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1069 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1070 intel_connector_unregister(intel_connector);
1071}
1072
5416d871 1073static void
c3346ef6 1074skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1075{
1076 u32 ctrl1;
1077
1078 pipe_config->ddi_pll_sel = SKL_DPLL0;
1079 pipe_config->dpll_hw_state.cfgcr1 = 0;
1080 pipe_config->dpll_hw_state.cfgcr2 = 0;
1081
1082 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1083 switch (link_clock / 2) {
1084 case 81000:
5416d871
DL
1085 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1086 SKL_DPLL0);
1087 break;
c3346ef6 1088 case 135000:
5416d871
DL
1089 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1090 SKL_DPLL0);
1091 break;
c3346ef6 1092 case 270000:
5416d871
DL
1093 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1094 SKL_DPLL0);
1095 break;
c3346ef6
SJ
1096 case 162000:
1097 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1098 SKL_DPLL0);
1099 break;
1100 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1101 results in CDCLK change. Need to handle the change of CDCLK by
1102 disabling pipes and re-enabling them */
1103 case 108000:
1104 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1105 SKL_DPLL0);
1106 break;
1107 case 216000:
1108 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1109 SKL_DPLL0);
1110 break;
1111
5416d871
DL
1112 }
1113 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1114}
1115
0e50338c 1116static void
5cec258b 1117hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c
DV
1118{
1119 switch (link_bw) {
1120 case DP_LINK_BW_1_62:
1121 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1122 break;
1123 case DP_LINK_BW_2_7:
1124 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1125 break;
1126 case DP_LINK_BW_5_4:
1127 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1128 break;
1129 }
1130}
1131
fc0f8e25 1132static int
12f6a2e2 1133intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1134{
12f6a2e2
VS
1135 if (intel_dp->num_supported_rates) {
1136 *sink_rates = intel_dp->supported_rates;
ea2d8a42 1137 return intel_dp->num_supported_rates;
fc0f8e25 1138 }
12f6a2e2
VS
1139
1140 *sink_rates = default_rates;
1141
1142 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1143}
1144
a8f3ef61 1145static int
1db10e28 1146intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1147{
636280ba
VS
1148 if (INTEL_INFO(dev)->gen >= 9) {
1149 *source_rates = gen9_rates;
1150 return ARRAY_SIZE(gen9_rates);
a8f3ef61 1151 }
636280ba
VS
1152
1153 *source_rates = default_rates;
1154
1db10e28
VS
1155 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1156 /* WaDisableHBR2:skl */
1157 return (DP_LINK_BW_2_7 >> 3) + 1;
1158 else if (INTEL_INFO(dev)->gen >= 8 ||
1159 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1160 return (DP_LINK_BW_5_4 >> 3) + 1;
1161 else
1162 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1163}
1164
c6bb3538
DV
1165static void
1166intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1167 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1168{
1169 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1170 const struct dp_link_dpll *divisor = NULL;
1171 int i, count = 0;
c6bb3538
DV
1172
1173 if (IS_G4X(dev)) {
9dd4ffdf
CML
1174 divisor = gen4_dpll;
1175 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1176 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1177 divisor = pch_dpll;
1178 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1179 } else if (IS_CHERRYVIEW(dev)) {
1180 divisor = chv_dpll;
1181 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1182 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1183 divisor = vlv_dpll;
1184 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1185 }
9dd4ffdf
CML
1186
1187 if (divisor && count) {
1188 for (i = 0; i < count; i++) {
1189 if (link_bw == divisor[i].link_bw) {
1190 pipe_config->dpll = divisor[i].dpll;
1191 pipe_config->clock_set = true;
1192 break;
1193 }
1194 }
c6bb3538
DV
1195 }
1196}
1197
2ecae76a
VS
1198static int intersect_rates(const int *source_rates, int source_len,
1199 const int *sink_rates, int sink_len,
1200 int *supported_rates)
a8f3ef61
SJ
1201{
1202 int i = 0, j = 0, k = 0;
1203
a8f3ef61
SJ
1204 while (i < source_len && j < sink_len) {
1205 if (source_rates[i] == sink_rates[j]) {
1206 supported_rates[k] = source_rates[i];
1207 ++k;
1208 ++i;
1209 ++j;
1210 } else if (source_rates[i] < sink_rates[j]) {
1211 ++i;
1212 } else {
1213 ++j;
1214 }
1215 }
1216 return k;
1217}
1218
2ecae76a
VS
1219static int intel_supported_rates(struct intel_dp *intel_dp,
1220 int *supported_rates)
1221{
1222 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1223 const int *source_rates, *sink_rates;
1224 int source_len, sink_len;
1225
1226 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1227 source_len = intel_dp_source_rates(dev, &source_rates);
1228
1229 return intersect_rates(source_rates, source_len,
1230 sink_rates, sink_len,
1231 supported_rates);
1232}
1233
f4896f15 1234static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1235{
1236 int i = 0;
1237
1238 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1239 if (find == rates[i])
1240 break;
1241
1242 return i;
1243}
1244
50fec21a
VS
1245int
1246intel_dp_max_link_rate(struct intel_dp *intel_dp)
1247{
1248 int rates[DP_MAX_SUPPORTED_RATES] = {};
1249 int len;
1250
1251 len = intel_supported_rates(intel_dp, rates);
1252 if (WARN_ON(len <= 0))
1253 return 162000;
1254
1255 return rates[rate_to_index(0, rates) - 1];
1256}
1257
ed4e9c1d
VS
1258int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1259{
1260 return rate_to_index(rate, intel_dp->supported_rates);
1261}
1262
00c09d70 1263bool
5bfe2ac0 1264intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1265 struct intel_crtc_state *pipe_config)
a4fc5ed6 1266{
5bfe2ac0 1267 struct drm_device *dev = encoder->base.dev;
36008365 1268 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1269 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1270 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1271 enum port port = dp_to_dig_port(intel_dp)->port;
2dd24552 1272 struct intel_crtc *intel_crtc = encoder->new_crtc;
dd06f90e 1273 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1274 int lane_count, clock;
56071a20 1275 int min_lane_count = 1;
eeb6324d 1276 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1277 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1278 int min_clock = 0;
a8f3ef61 1279 int max_clock;
083f9560 1280 int bpp, mode_rate;
ff9a6750 1281 int link_avail, link_clock;
2ecae76a
VS
1282 int supported_rates[DP_MAX_SUPPORTED_RATES] = {};
1283 int supported_len;
a8f3ef61 1284
2ecae76a 1285 supported_len = intel_supported_rates(intel_dp, supported_rates);
a8f3ef61
SJ
1286
1287 /* No common link rates between source and sink */
1288 WARN_ON(supported_len <= 0);
1289
1290 max_clock = supported_len - 1;
a4fc5ed6 1291
bc7d38a4 1292 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1293 pipe_config->has_pch_encoder = true;
1294
03afc4a2 1295 pipe_config->has_dp_encoder = true;
f769cd24 1296 pipe_config->has_drrs = false;
9ed109a7 1297 pipe_config->has_audio = intel_dp->has_audio;
a4fc5ed6 1298
dd06f90e
JN
1299 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1300 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1301 adjusted_mode);
2dd24552
JB
1302 if (!HAS_PCH_SPLIT(dev))
1303 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1304 intel_connector->panel.fitting_mode);
1305 else
b074cec8
JB
1306 intel_pch_panel_fitting(intel_crtc, pipe_config,
1307 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1308 }
1309
cb1793ce 1310 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1311 return false;
1312
083f9560 1313 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61
SJ
1314 "max bw %d pixel clock %iKHz\n",
1315 max_lane_count, supported_rates[max_clock],
241bfc38 1316 adjusted_mode->crtc_clock);
083f9560 1317
36008365
DV
1318 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1319 * bpc in between. */
3e7ca985 1320 bpp = pipe_config->pipe_bpp;
56071a20
JN
1321 if (is_edp(intel_dp)) {
1322 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1323 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1324 dev_priv->vbt.edp_bpp);
1325 bpp = dev_priv->vbt.edp_bpp;
1326 }
1327
344c5bbc
JN
1328 /*
1329 * Use the maximum clock and number of lanes the eDP panel
1330 * advertizes being capable of. The panels are generally
1331 * designed to support only a single clock and lane
1332 * configuration, and typically these values correspond to the
1333 * native resolution of the panel.
1334 */
1335 min_lane_count = max_lane_count;
1336 min_clock = max_clock;
7984211e 1337 }
657445fe 1338
36008365 1339 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1340 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1341 bpp);
36008365 1342
c6930992 1343 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1344 for (lane_count = min_lane_count;
1345 lane_count <= max_lane_count;
1346 lane_count <<= 1) {
1347
1348 link_clock = supported_rates[clock];
36008365
DV
1349 link_avail = intel_dp_max_data_rate(link_clock,
1350 lane_count);
1351
1352 if (mode_rate <= link_avail) {
1353 goto found;
1354 }
1355 }
1356 }
1357 }
c4867936 1358
36008365 1359 return false;
3685a8f3 1360
36008365 1361found:
55bc60db
VS
1362 if (intel_dp->color_range_auto) {
1363 /*
1364 * See:
1365 * CEA-861-E - 5.1 Default Encoding Parameters
1366 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1367 */
18316c8c 1368 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1369 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1370 else
1371 intel_dp->color_range = 0;
1372 }
1373
3685a8f3 1374 if (intel_dp->color_range)
50f3b016 1375 pipe_config->limited_color_range = true;
a4fc5ed6 1376
36008365 1377 intel_dp->lane_count = lane_count;
a8f3ef61 1378
bc27b7d3
VS
1379 if (intel_dp->num_supported_rates) {
1380 intel_dp->link_bw = 0;
a8f3ef61 1381 intel_dp->rate_select =
ed4e9c1d 1382 intel_dp_rate_select(intel_dp, supported_rates[clock]);
bc27b7d3
VS
1383 } else {
1384 intel_dp->link_bw =
1385 drm_dp_link_rate_to_bw_code(supported_rates[clock]);
1386 intel_dp->rate_select = 0;
a8f3ef61
SJ
1387 }
1388
657445fe 1389 pipe_config->pipe_bpp = bpp;
a8f3ef61 1390 pipe_config->port_clock = supported_rates[clock];
a4fc5ed6 1391
36008365
DV
1392 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1393 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1394 pipe_config->port_clock, bpp);
36008365
DV
1395 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1396 mode_rate, link_avail);
a4fc5ed6 1397
03afc4a2 1398 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1399 adjusted_mode->crtc_clock,
1400 pipe_config->port_clock,
03afc4a2 1401 &pipe_config->dp_m_n);
9d1a455b 1402
439d7ac0 1403 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1404 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1405 pipe_config->has_drrs = true;
439d7ac0
PB
1406 intel_link_compute_m_n(bpp, lane_count,
1407 intel_connector->panel.downclock_mode->clock,
1408 pipe_config->port_clock,
1409 &pipe_config->dp_m2_n2);
1410 }
1411
5416d871 1412 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
c3346ef6 1413 skl_edp_set_pll_config(pipe_config, supported_rates[clock]);
5416d871 1414 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1415 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1416 else
1417 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1418
03afc4a2 1419 return true;
a4fc5ed6
KP
1420}
1421
7c62a164 1422static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1423{
7c62a164
DV
1424 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1425 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1426 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1427 struct drm_i915_private *dev_priv = dev->dev_private;
1428 u32 dpa_ctl;
1429
6e3c9717
ACO
1430 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1431 crtc->config->port_clock);
ea9b6006
DV
1432 dpa_ctl = I915_READ(DP_A);
1433 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1434
6e3c9717 1435 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1436 /* For a long time we've carried around a ILK-DevA w/a for the
1437 * 160MHz clock. If we're really unlucky, it's still required.
1438 */
1439 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1440 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1441 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1442 } else {
1443 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1444 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1445 }
1ce17038 1446
ea9b6006
DV
1447 I915_WRITE(DP_A, dpa_ctl);
1448
1449 POSTING_READ(DP_A);
1450 udelay(500);
1451}
1452
8ac33ed3 1453static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1454{
b934223d 1455 struct drm_device *dev = encoder->base.dev;
417e822d 1456 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1457 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1458 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1459 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1460 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1461
417e822d 1462 /*
1a2eb460 1463 * There are four kinds of DP registers:
417e822d
KP
1464 *
1465 * IBX PCH
1a2eb460
KP
1466 * SNB CPU
1467 * IVB CPU
417e822d
KP
1468 * CPT PCH
1469 *
1470 * IBX PCH and CPU are the same for almost everything,
1471 * except that the CPU DP PLL is configured in this
1472 * register
1473 *
1474 * CPT PCH is quite different, having many bits moved
1475 * to the TRANS_DP_CTL register instead. That
1476 * configuration happens (oddly) in ironlake_pch_enable
1477 */
9c9e7927 1478
417e822d
KP
1479 /* Preserve the BIOS-computed detected bit. This is
1480 * supposed to be read-only.
1481 */
1482 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1483
417e822d 1484 /* Handle DP bits in common between all three register formats */
417e822d 1485 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1486 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1487
6e3c9717 1488 if (crtc->config->has_audio)
ea5b213a 1489 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1490
417e822d 1491 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1492
bc7d38a4 1493 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
1494 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1495 intel_dp->DP |= DP_SYNC_HS_HIGH;
1496 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1497 intel_dp->DP |= DP_SYNC_VS_HIGH;
1498 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1499
6aba5b6c 1500 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1501 intel_dp->DP |= DP_ENHANCED_FRAMING;
1502
7c62a164 1503 intel_dp->DP |= crtc->pipe << 29;
bc7d38a4 1504 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
b2634017 1505 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1506 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1507
1508 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1509 intel_dp->DP |= DP_SYNC_HS_HIGH;
1510 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1511 intel_dp->DP |= DP_SYNC_VS_HIGH;
1512 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1513
6aba5b6c 1514 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1515 intel_dp->DP |= DP_ENHANCED_FRAMING;
1516
44f37d1f
CML
1517 if (!IS_CHERRYVIEW(dev)) {
1518 if (crtc->pipe == 1)
1519 intel_dp->DP |= DP_PIPEB_SELECT;
1520 } else {
1521 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1522 }
417e822d
KP
1523 } else {
1524 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
32f9d658 1525 }
a4fc5ed6
KP
1526}
1527
ffd6749d
PZ
1528#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1529#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1530
1a5ef5b7
PZ
1531#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1532#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1533
ffd6749d
PZ
1534#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1535#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1536
4be73780 1537static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1538 u32 mask,
1539 u32 value)
bd943159 1540{
30add22d 1541 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1542 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1543 u32 pp_stat_reg, pp_ctrl_reg;
1544
e39b999a
VS
1545 lockdep_assert_held(&dev_priv->pps_mutex);
1546
bf13e81b
JN
1547 pp_stat_reg = _pp_stat_reg(intel_dp);
1548 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1549
99ea7127 1550 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1551 mask, value,
1552 I915_READ(pp_stat_reg),
1553 I915_READ(pp_ctrl_reg));
32ce697c 1554
453c5420 1555 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1556 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1557 I915_READ(pp_stat_reg),
1558 I915_READ(pp_ctrl_reg));
32ce697c 1559 }
54c136d4
CW
1560
1561 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1562}
32ce697c 1563
4be73780 1564static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1565{
1566 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1567 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1568}
1569
4be73780 1570static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1571{
1572 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1573 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1574}
1575
4be73780 1576static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1577{
1578 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1579
1580 /* When we disable the VDD override bit last we have to do the manual
1581 * wait. */
1582 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1583 intel_dp->panel_power_cycle_delay);
1584
4be73780 1585 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1586}
1587
4be73780 1588static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1589{
1590 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1591 intel_dp->backlight_on_delay);
1592}
1593
4be73780 1594static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1595{
1596 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1597 intel_dp->backlight_off_delay);
1598}
99ea7127 1599
832dd3c1
KP
1600/* Read the current pp_control value, unlocking the register if it
1601 * is locked
1602 */
1603
453c5420 1604static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1605{
453c5420
JB
1606 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1607 struct drm_i915_private *dev_priv = dev->dev_private;
1608 u32 control;
832dd3c1 1609
e39b999a
VS
1610 lockdep_assert_held(&dev_priv->pps_mutex);
1611
bf13e81b 1612 control = I915_READ(_pp_ctrl_reg(intel_dp));
832dd3c1
KP
1613 control &= ~PANEL_UNLOCK_MASK;
1614 control |= PANEL_UNLOCK_REGS;
1615 return control;
bd943159
KP
1616}
1617
951468f3
VS
1618/*
1619 * Must be paired with edp_panel_vdd_off().
1620 * Must hold pps_mutex around the whole on/off sequence.
1621 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1622 */
1e0560e0 1623static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1624{
30add22d 1625 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1626 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1627 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1628 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1629 enum intel_display_power_domain power_domain;
5d613501 1630 u32 pp;
453c5420 1631 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1632 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1633
e39b999a
VS
1634 lockdep_assert_held(&dev_priv->pps_mutex);
1635
97af61f5 1636 if (!is_edp(intel_dp))
adddaaf4 1637 return false;
bd943159 1638
2c623c11 1639 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1640 intel_dp->want_panel_vdd = true;
99ea7127 1641
4be73780 1642 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1643 return need_to_disable;
b0665d57 1644
4e6e1a54
ID
1645 power_domain = intel_display_port_power_domain(intel_encoder);
1646 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1647
3936fcf4
VS
1648 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1649 port_name(intel_dig_port->port));
bd943159 1650
4be73780
DV
1651 if (!edp_have_panel_power(intel_dp))
1652 wait_panel_power_cycle(intel_dp);
99ea7127 1653
453c5420 1654 pp = ironlake_get_pp_control(intel_dp);
5d613501 1655 pp |= EDP_FORCE_VDD;
ebf33b18 1656
bf13e81b
JN
1657 pp_stat_reg = _pp_stat_reg(intel_dp);
1658 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1659
1660 I915_WRITE(pp_ctrl_reg, pp);
1661 POSTING_READ(pp_ctrl_reg);
1662 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1663 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1664 /*
1665 * If the panel wasn't on, delay before accessing aux channel
1666 */
4be73780 1667 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1668 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1669 port_name(intel_dig_port->port));
f01eca2e 1670 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1671 }
adddaaf4
JN
1672
1673 return need_to_disable;
1674}
1675
951468f3
VS
1676/*
1677 * Must be paired with intel_edp_panel_vdd_off() or
1678 * intel_edp_panel_off().
1679 * Nested calls to these functions are not allowed since
1680 * we drop the lock. Caller must use some higher level
1681 * locking to prevent nested calls from other threads.
1682 */
b80d6c78 1683void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1684{
c695b6b6 1685 bool vdd;
adddaaf4 1686
c695b6b6
VS
1687 if (!is_edp(intel_dp))
1688 return;
1689
773538e8 1690 pps_lock(intel_dp);
c695b6b6 1691 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1692 pps_unlock(intel_dp);
c695b6b6 1693
e2c719b7 1694 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1695 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1696}
1697
4be73780 1698static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1699{
30add22d 1700 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1701 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1702 struct intel_digital_port *intel_dig_port =
1703 dp_to_dig_port(intel_dp);
1704 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1705 enum intel_display_power_domain power_domain;
5d613501 1706 u32 pp;
453c5420 1707 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1708
e39b999a 1709 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1710
15e899a0 1711 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1712
15e899a0 1713 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1714 return;
b0665d57 1715
3936fcf4
VS
1716 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1717 port_name(intel_dig_port->port));
bd943159 1718
be2c9196
VS
1719 pp = ironlake_get_pp_control(intel_dp);
1720 pp &= ~EDP_FORCE_VDD;
453c5420 1721
be2c9196
VS
1722 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1723 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1724
be2c9196
VS
1725 I915_WRITE(pp_ctrl_reg, pp);
1726 POSTING_READ(pp_ctrl_reg);
90791a5c 1727
be2c9196
VS
1728 /* Make sure sequencer is idle before allowing subsequent activity */
1729 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1730 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1731
be2c9196
VS
1732 if ((pp & POWER_TARGET_ON) == 0)
1733 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1734
be2c9196
VS
1735 power_domain = intel_display_port_power_domain(intel_encoder);
1736 intel_display_power_put(dev_priv, power_domain);
bd943159 1737}
5d613501 1738
4be73780 1739static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1740{
1741 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1742 struct intel_dp, panel_vdd_work);
bd943159 1743
773538e8 1744 pps_lock(intel_dp);
15e899a0
VS
1745 if (!intel_dp->want_panel_vdd)
1746 edp_panel_vdd_off_sync(intel_dp);
773538e8 1747 pps_unlock(intel_dp);
bd943159
KP
1748}
1749
aba86890
ID
1750static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1751{
1752 unsigned long delay;
1753
1754 /*
1755 * Queue the timer to fire a long time from now (relative to the power
1756 * down delay) to keep the panel power up across a sequence of
1757 * operations.
1758 */
1759 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1760 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1761}
1762
951468f3
VS
1763/*
1764 * Must be paired with edp_panel_vdd_on().
1765 * Must hold pps_mutex around the whole on/off sequence.
1766 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1767 */
4be73780 1768static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1769{
e39b999a
VS
1770 struct drm_i915_private *dev_priv =
1771 intel_dp_to_dev(intel_dp)->dev_private;
1772
1773 lockdep_assert_held(&dev_priv->pps_mutex);
1774
97af61f5
KP
1775 if (!is_edp(intel_dp))
1776 return;
5d613501 1777
e2c719b7 1778 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1779 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1780
bd943159
KP
1781 intel_dp->want_panel_vdd = false;
1782
aba86890 1783 if (sync)
4be73780 1784 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1785 else
1786 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1787}
1788
9f0fb5be 1789static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1790{
30add22d 1791 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1792 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1793 u32 pp;
453c5420 1794 u32 pp_ctrl_reg;
9934c132 1795
9f0fb5be
VS
1796 lockdep_assert_held(&dev_priv->pps_mutex);
1797
97af61f5 1798 if (!is_edp(intel_dp))
bd943159 1799 return;
99ea7127 1800
3936fcf4
VS
1801 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1802 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1803
e7a89ace
VS
1804 if (WARN(edp_have_panel_power(intel_dp),
1805 "eDP port %c panel power already on\n",
1806 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1807 return;
9934c132 1808
4be73780 1809 wait_panel_power_cycle(intel_dp);
37c6c9b0 1810
bf13e81b 1811 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1812 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1813 if (IS_GEN5(dev)) {
1814 /* ILK workaround: disable reset around power sequence */
1815 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1816 I915_WRITE(pp_ctrl_reg, pp);
1817 POSTING_READ(pp_ctrl_reg);
05ce1a49 1818 }
37c6c9b0 1819
1c0ae80a 1820 pp |= POWER_TARGET_ON;
99ea7127
KP
1821 if (!IS_GEN5(dev))
1822 pp |= PANEL_POWER_RESET;
1823
453c5420
JB
1824 I915_WRITE(pp_ctrl_reg, pp);
1825 POSTING_READ(pp_ctrl_reg);
9934c132 1826
4be73780 1827 wait_panel_on(intel_dp);
dce56b3c 1828 intel_dp->last_power_on = jiffies;
9934c132 1829
05ce1a49
KP
1830 if (IS_GEN5(dev)) {
1831 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1832 I915_WRITE(pp_ctrl_reg, pp);
1833 POSTING_READ(pp_ctrl_reg);
05ce1a49 1834 }
9f0fb5be 1835}
e39b999a 1836
9f0fb5be
VS
1837void intel_edp_panel_on(struct intel_dp *intel_dp)
1838{
1839 if (!is_edp(intel_dp))
1840 return;
1841
1842 pps_lock(intel_dp);
1843 edp_panel_on(intel_dp);
773538e8 1844 pps_unlock(intel_dp);
9934c132
JB
1845}
1846
9f0fb5be
VS
1847
1848static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1849{
4e6e1a54
ID
1850 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1851 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1852 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1853 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1854 enum intel_display_power_domain power_domain;
99ea7127 1855 u32 pp;
453c5420 1856 u32 pp_ctrl_reg;
9934c132 1857
9f0fb5be
VS
1858 lockdep_assert_held(&dev_priv->pps_mutex);
1859
97af61f5
KP
1860 if (!is_edp(intel_dp))
1861 return;
37c6c9b0 1862
3936fcf4
VS
1863 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1864 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 1865
3936fcf4
VS
1866 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1867 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 1868
453c5420 1869 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1870 /* We need to switch off panel power _and_ force vdd, for otherwise some
1871 * panels get very unhappy and cease to work. */
b3064154
PJ
1872 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1873 EDP_BLC_ENABLE);
453c5420 1874
bf13e81b 1875 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1876
849e39f5
PZ
1877 intel_dp->want_panel_vdd = false;
1878
453c5420
JB
1879 I915_WRITE(pp_ctrl_reg, pp);
1880 POSTING_READ(pp_ctrl_reg);
9934c132 1881
dce56b3c 1882 intel_dp->last_power_cycle = jiffies;
4be73780 1883 wait_panel_off(intel_dp);
849e39f5
PZ
1884
1885 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
1886 power_domain = intel_display_port_power_domain(intel_encoder);
1887 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 1888}
e39b999a 1889
9f0fb5be
VS
1890void intel_edp_panel_off(struct intel_dp *intel_dp)
1891{
1892 if (!is_edp(intel_dp))
1893 return;
e39b999a 1894
9f0fb5be
VS
1895 pps_lock(intel_dp);
1896 edp_panel_off(intel_dp);
773538e8 1897 pps_unlock(intel_dp);
9934c132
JB
1898}
1899
1250d107
JN
1900/* Enable backlight in the panel power control. */
1901static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1902{
da63a9f2
PZ
1903 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1904 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
1905 struct drm_i915_private *dev_priv = dev->dev_private;
1906 u32 pp;
453c5420 1907 u32 pp_ctrl_reg;
32f9d658 1908
01cb9ea6
JB
1909 /*
1910 * If we enable the backlight right away following a panel power
1911 * on, we may see slight flicker as the panel syncs with the eDP
1912 * link. So delay a bit to make sure the image is solid before
1913 * allowing it to appear.
1914 */
4be73780 1915 wait_backlight_on(intel_dp);
e39b999a 1916
773538e8 1917 pps_lock(intel_dp);
e39b999a 1918
453c5420 1919 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1920 pp |= EDP_BLC_ENABLE;
453c5420 1921
bf13e81b 1922 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1923
1924 I915_WRITE(pp_ctrl_reg, pp);
1925 POSTING_READ(pp_ctrl_reg);
e39b999a 1926
773538e8 1927 pps_unlock(intel_dp);
32f9d658
ZW
1928}
1929
1250d107
JN
1930/* Enable backlight PWM and backlight PP control. */
1931void intel_edp_backlight_on(struct intel_dp *intel_dp)
1932{
1933 if (!is_edp(intel_dp))
1934 return;
1935
1936 DRM_DEBUG_KMS("\n");
1937
1938 intel_panel_enable_backlight(intel_dp->attached_connector);
1939 _intel_edp_backlight_on(intel_dp);
1940}
1941
1942/* Disable backlight in the panel power control. */
1943static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 1944{
30add22d 1945 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
1946 struct drm_i915_private *dev_priv = dev->dev_private;
1947 u32 pp;
453c5420 1948 u32 pp_ctrl_reg;
32f9d658 1949
f01eca2e
KP
1950 if (!is_edp(intel_dp))
1951 return;
1952
773538e8 1953 pps_lock(intel_dp);
e39b999a 1954
453c5420 1955 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1956 pp &= ~EDP_BLC_ENABLE;
453c5420 1957
bf13e81b 1958 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1959
1960 I915_WRITE(pp_ctrl_reg, pp);
1961 POSTING_READ(pp_ctrl_reg);
f7d2323c 1962
773538e8 1963 pps_unlock(intel_dp);
e39b999a
VS
1964
1965 intel_dp->last_backlight_off = jiffies;
f7d2323c 1966 edp_wait_backlight_off(intel_dp);
1250d107 1967}
f7d2323c 1968
1250d107
JN
1969/* Disable backlight PP control and backlight PWM. */
1970void intel_edp_backlight_off(struct intel_dp *intel_dp)
1971{
1972 if (!is_edp(intel_dp))
1973 return;
1974
1975 DRM_DEBUG_KMS("\n");
f7d2323c 1976
1250d107 1977 _intel_edp_backlight_off(intel_dp);
f7d2323c 1978 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 1979}
a4fc5ed6 1980
73580fb7
JN
1981/*
1982 * Hook for controlling the panel power control backlight through the bl_power
1983 * sysfs attribute. Take care to handle multiple calls.
1984 */
1985static void intel_edp_backlight_power(struct intel_connector *connector,
1986 bool enable)
1987{
1988 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
1989 bool is_enabled;
1990
773538e8 1991 pps_lock(intel_dp);
e39b999a 1992 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 1993 pps_unlock(intel_dp);
73580fb7
JN
1994
1995 if (is_enabled == enable)
1996 return;
1997
23ba9373
JN
1998 DRM_DEBUG_KMS("panel power control backlight %s\n",
1999 enable ? "enable" : "disable");
73580fb7
JN
2000
2001 if (enable)
2002 _intel_edp_backlight_on(intel_dp);
2003 else
2004 _intel_edp_backlight_off(intel_dp);
2005}
2006
2bd2ad64 2007static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2008{
da63a9f2
PZ
2009 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2010 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2011 struct drm_device *dev = crtc->dev;
d240f20f
JB
2012 struct drm_i915_private *dev_priv = dev->dev_private;
2013 u32 dpa_ctl;
2014
2bd2ad64
DV
2015 assert_pipe_disabled(dev_priv,
2016 to_intel_crtc(crtc)->pipe);
2017
d240f20f
JB
2018 DRM_DEBUG_KMS("\n");
2019 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2020 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2021 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2022
2023 /* We don't adjust intel_dp->DP while tearing down the link, to
2024 * facilitate link retraining (e.g. after hotplug). Hence clear all
2025 * enable bits here to ensure that we don't enable too much. */
2026 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2027 intel_dp->DP |= DP_PLL_ENABLE;
2028 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2029 POSTING_READ(DP_A);
2030 udelay(200);
d240f20f
JB
2031}
2032
2bd2ad64 2033static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2034{
da63a9f2
PZ
2035 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2036 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2037 struct drm_device *dev = crtc->dev;
d240f20f
JB
2038 struct drm_i915_private *dev_priv = dev->dev_private;
2039 u32 dpa_ctl;
2040
2bd2ad64
DV
2041 assert_pipe_disabled(dev_priv,
2042 to_intel_crtc(crtc)->pipe);
2043
d240f20f 2044 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2045 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2046 "dp pll off, should be on\n");
2047 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2048
2049 /* We can't rely on the value tracked for the DP register in
2050 * intel_dp->DP because link_down must not change that (otherwise link
2051 * re-training will fail. */
298b0b39 2052 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2053 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2054 POSTING_READ(DP_A);
d240f20f
JB
2055 udelay(200);
2056}
2057
c7ad3810 2058/* If the sink supports it, try to set the power state appropriately */
c19b0669 2059void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2060{
2061 int ret, i;
2062
2063 /* Should have a valid DPCD by this point */
2064 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2065 return;
2066
2067 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2068 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2069 DP_SET_POWER_D3);
c7ad3810
JB
2070 } else {
2071 /*
2072 * When turning on, we need to retry for 1ms to give the sink
2073 * time to wake up.
2074 */
2075 for (i = 0; i < 3; i++) {
9d1a1031
JN
2076 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2077 DP_SET_POWER_D0);
c7ad3810
JB
2078 if (ret == 1)
2079 break;
2080 msleep(1);
2081 }
2082 }
f9cac721
JN
2083
2084 if (ret != 1)
2085 DRM_DEBUG_KMS("failed to %s sink power state\n",
2086 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2087}
2088
19d8fe15
DV
2089static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2090 enum pipe *pipe)
d240f20f 2091{
19d8fe15 2092 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2093 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2094 struct drm_device *dev = encoder->base.dev;
2095 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2096 enum intel_display_power_domain power_domain;
2097 u32 tmp;
2098
2099 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2100 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2101 return false;
2102
2103 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2104
2105 if (!(tmp & DP_PORT_EN))
2106 return false;
2107
bc7d38a4 2108 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
19d8fe15 2109 *pipe = PORT_TO_PIPE_CPT(tmp);
71485e0a
VS
2110 } else if (IS_CHERRYVIEW(dev)) {
2111 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
bc7d38a4 2112 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
19d8fe15
DV
2113 *pipe = PORT_TO_PIPE(tmp);
2114 } else {
2115 u32 trans_sel;
2116 u32 trans_dp;
2117 int i;
2118
2119 switch (intel_dp->output_reg) {
2120 case PCH_DP_B:
2121 trans_sel = TRANS_DP_PORT_SEL_B;
2122 break;
2123 case PCH_DP_C:
2124 trans_sel = TRANS_DP_PORT_SEL_C;
2125 break;
2126 case PCH_DP_D:
2127 trans_sel = TRANS_DP_PORT_SEL_D;
2128 break;
2129 default:
2130 return true;
2131 }
2132
055e393f 2133 for_each_pipe(dev_priv, i) {
19d8fe15
DV
2134 trans_dp = I915_READ(TRANS_DP_CTL(i));
2135 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2136 *pipe = i;
2137 return true;
2138 }
2139 }
19d8fe15 2140
4a0833ec
DV
2141 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2142 intel_dp->output_reg);
2143 }
d240f20f 2144
19d8fe15
DV
2145 return true;
2146}
d240f20f 2147
045ac3b5 2148static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2149 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2150{
2151 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2152 u32 tmp, flags = 0;
63000ef6
XZ
2153 struct drm_device *dev = encoder->base.dev;
2154 struct drm_i915_private *dev_priv = dev->dev_private;
2155 enum port port = dp_to_dig_port(intel_dp)->port;
2156 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2157 int dotclock;
045ac3b5 2158
9ed109a7
DV
2159 tmp = I915_READ(intel_dp->output_reg);
2160 if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2161 pipe_config->has_audio = true;
2162
63000ef6 2163 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
63000ef6
XZ
2164 if (tmp & DP_SYNC_HS_HIGH)
2165 flags |= DRM_MODE_FLAG_PHSYNC;
2166 else
2167 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2168
63000ef6
XZ
2169 if (tmp & DP_SYNC_VS_HIGH)
2170 flags |= DRM_MODE_FLAG_PVSYNC;
2171 else
2172 flags |= DRM_MODE_FLAG_NVSYNC;
2173 } else {
2174 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2175 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2176 flags |= DRM_MODE_FLAG_PHSYNC;
2177 else
2178 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2179
63000ef6
XZ
2180 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2181 flags |= DRM_MODE_FLAG_PVSYNC;
2182 else
2183 flags |= DRM_MODE_FLAG_NVSYNC;
2184 }
045ac3b5 2185
2d112de7 2186 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2187
8c875fca
VS
2188 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2189 tmp & DP_COLOR_RANGE_16_235)
2190 pipe_config->limited_color_range = true;
2191
eb14cb74
VS
2192 pipe_config->has_dp_encoder = true;
2193
2194 intel_dp_get_m_n(crtc, pipe_config);
2195
18442d08 2196 if (port == PORT_A) {
f1f644dc
JB
2197 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2198 pipe_config->port_clock = 162000;
2199 else
2200 pipe_config->port_clock = 270000;
2201 }
18442d08
VS
2202
2203 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2204 &pipe_config->dp_m_n);
2205
2206 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2207 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2208
2d112de7 2209 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2210
c6cd2ee2
JN
2211 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2212 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2213 /*
2214 * This is a big fat ugly hack.
2215 *
2216 * Some machines in UEFI boot mode provide us a VBT that has 18
2217 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2218 * unknown we fail to light up. Yet the same BIOS boots up with
2219 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2220 * max, not what it tells us to use.
2221 *
2222 * Note: This will still be broken if the eDP panel is not lit
2223 * up by the BIOS, and thus we can't get the mode at module
2224 * load.
2225 */
2226 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2227 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2228 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2229 }
045ac3b5
JB
2230}
2231
e8cb4558 2232static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2233{
e8cb4558 2234 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2235 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2236 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2237
6e3c9717 2238 if (crtc->config->has_audio)
495a5bb8 2239 intel_audio_codec_disable(encoder);
6cb49835 2240
b32c6f48
RV
2241 if (HAS_PSR(dev) && !HAS_DDI(dev))
2242 intel_psr_disable(intel_dp);
2243
6cb49835
DV
2244 /* Make sure the panel is off before trying to change the mode. But also
2245 * ensure that we have vdd while we switch off the panel. */
24f3e092 2246 intel_edp_panel_vdd_on(intel_dp);
4be73780 2247 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2248 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2249 intel_edp_panel_off(intel_dp);
3739850b 2250
08aff3fe
VS
2251 /* disable the port before the pipe on g4x */
2252 if (INTEL_INFO(dev)->gen < 5)
3739850b 2253 intel_dp_link_down(intel_dp);
d240f20f
JB
2254}
2255
08aff3fe 2256static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2257{
2bd2ad64 2258 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2259 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2260
49277c31 2261 intel_dp_link_down(intel_dp);
08aff3fe
VS
2262 if (port == PORT_A)
2263 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2264}
2265
2266static void vlv_post_disable_dp(struct intel_encoder *encoder)
2267{
2268 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2269
2270 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2271}
2272
580d3811
VS
2273static void chv_post_disable_dp(struct intel_encoder *encoder)
2274{
2275 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2276 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2277 struct drm_device *dev = encoder->base.dev;
2278 struct drm_i915_private *dev_priv = dev->dev_private;
2279 struct intel_crtc *intel_crtc =
2280 to_intel_crtc(encoder->base.crtc);
2281 enum dpio_channel ch = vlv_dport_to_channel(dport);
2282 enum pipe pipe = intel_crtc->pipe;
2283 u32 val;
2284
2285 intel_dp_link_down(intel_dp);
2286
2287 mutex_lock(&dev_priv->dpio_lock);
2288
2289 /* Propagate soft reset to data lane reset */
97fd4d5c 2290 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2291 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2292 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2293
97fd4d5c
VS
2294 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2295 val |= CHV_PCS_REQ_SOFTRESET_EN;
2296 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2297
2298 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2299 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2300 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2301
2302 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2303 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2304 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811
VS
2305
2306 mutex_unlock(&dev_priv->dpio_lock);
2307}
2308
7b13b58a
VS
2309static void
2310_intel_dp_set_link_train(struct intel_dp *intel_dp,
2311 uint32_t *DP,
2312 uint8_t dp_train_pat)
2313{
2314 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2315 struct drm_device *dev = intel_dig_port->base.base.dev;
2316 struct drm_i915_private *dev_priv = dev->dev_private;
2317 enum port port = intel_dig_port->port;
2318
2319 if (HAS_DDI(dev)) {
2320 uint32_t temp = I915_READ(DP_TP_CTL(port));
2321
2322 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2323 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2324 else
2325 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2326
2327 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2328 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2329 case DP_TRAINING_PATTERN_DISABLE:
2330 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2331
2332 break;
2333 case DP_TRAINING_PATTERN_1:
2334 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2335 break;
2336 case DP_TRAINING_PATTERN_2:
2337 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2338 break;
2339 case DP_TRAINING_PATTERN_3:
2340 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2341 break;
2342 }
2343 I915_WRITE(DP_TP_CTL(port), temp);
2344
2345 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2346 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2347
2348 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2349 case DP_TRAINING_PATTERN_DISABLE:
2350 *DP |= DP_LINK_TRAIN_OFF_CPT;
2351 break;
2352 case DP_TRAINING_PATTERN_1:
2353 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2354 break;
2355 case DP_TRAINING_PATTERN_2:
2356 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2357 break;
2358 case DP_TRAINING_PATTERN_3:
2359 DRM_ERROR("DP training pattern 3 not supported\n");
2360 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2361 break;
2362 }
2363
2364 } else {
2365 if (IS_CHERRYVIEW(dev))
2366 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2367 else
2368 *DP &= ~DP_LINK_TRAIN_MASK;
2369
2370 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2371 case DP_TRAINING_PATTERN_DISABLE:
2372 *DP |= DP_LINK_TRAIN_OFF;
2373 break;
2374 case DP_TRAINING_PATTERN_1:
2375 *DP |= DP_LINK_TRAIN_PAT_1;
2376 break;
2377 case DP_TRAINING_PATTERN_2:
2378 *DP |= DP_LINK_TRAIN_PAT_2;
2379 break;
2380 case DP_TRAINING_PATTERN_3:
2381 if (IS_CHERRYVIEW(dev)) {
2382 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2383 } else {
2384 DRM_ERROR("DP training pattern 3 not supported\n");
2385 *DP |= DP_LINK_TRAIN_PAT_2;
2386 }
2387 break;
2388 }
2389 }
2390}
2391
2392static void intel_dp_enable_port(struct intel_dp *intel_dp)
2393{
2394 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2395 struct drm_i915_private *dev_priv = dev->dev_private;
2396
7b13b58a
VS
2397 /* enable with pattern 1 (as per spec) */
2398 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2399 DP_TRAINING_PATTERN_1);
2400
2401 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2402 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2403
2404 /*
2405 * Magic for VLV/CHV. We _must_ first set up the register
2406 * without actually enabling the port, and then do another
2407 * write to enable the port. Otherwise link training will
2408 * fail when the power sequencer is freshly used for this port.
2409 */
2410 intel_dp->DP |= DP_PORT_EN;
2411
2412 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2413 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2414}
2415
e8cb4558 2416static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2417{
e8cb4558
DV
2418 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2419 struct drm_device *dev = encoder->base.dev;
2420 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2421 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2422 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 2423
0c33d8d7
DV
2424 if (WARN_ON(dp_reg & DP_PORT_EN))
2425 return;
5d613501 2426
093e3f13
VS
2427 pps_lock(intel_dp);
2428
2429 if (IS_VALLEYVIEW(dev))
2430 vlv_init_panel_power_sequencer(intel_dp);
2431
7b13b58a 2432 intel_dp_enable_port(intel_dp);
093e3f13
VS
2433
2434 edp_panel_vdd_on(intel_dp);
2435 edp_panel_on(intel_dp);
2436 edp_panel_vdd_off(intel_dp, true);
2437
2438 pps_unlock(intel_dp);
2439
61234fa5
VS
2440 if (IS_VALLEYVIEW(dev))
2441 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2442
f01eca2e 2443 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2444 intel_dp_start_link_train(intel_dp);
33a34e4e 2445 intel_dp_complete_link_train(intel_dp);
3ab9c637 2446 intel_dp_stop_link_train(intel_dp);
c1dec79a 2447
6e3c9717 2448 if (crtc->config->has_audio) {
c1dec79a
JN
2449 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2450 pipe_name(crtc->pipe));
2451 intel_audio_codec_enable(encoder);
2452 }
ab1f90f9 2453}
89b667f8 2454
ecff4f3b
JN
2455static void g4x_enable_dp(struct intel_encoder *encoder)
2456{
828f5c6e
JN
2457 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2458
ecff4f3b 2459 intel_enable_dp(encoder);
4be73780 2460 intel_edp_backlight_on(intel_dp);
ab1f90f9 2461}
89b667f8 2462
ab1f90f9
JN
2463static void vlv_enable_dp(struct intel_encoder *encoder)
2464{
828f5c6e
JN
2465 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2466
4be73780 2467 intel_edp_backlight_on(intel_dp);
b32c6f48 2468 intel_psr_enable(intel_dp);
d240f20f
JB
2469}
2470
ecff4f3b 2471static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2472{
2473 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2474 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2475
8ac33ed3
DV
2476 intel_dp_prepare(encoder);
2477
d41f1efb
DV
2478 /* Only ilk+ has port A */
2479 if (dport->port == PORT_A) {
2480 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2481 ironlake_edp_pll_on(intel_dp);
d41f1efb 2482 }
ab1f90f9
JN
2483}
2484
83b84597
VS
2485static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2486{
2487 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2488 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2489 enum pipe pipe = intel_dp->pps_pipe;
2490 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2491
2492 edp_panel_vdd_off_sync(intel_dp);
2493
2494 /*
2495 * VLV seems to get confused when multiple power seqeuencers
2496 * have the same port selected (even if only one has power/vdd
2497 * enabled). The failure manifests as vlv_wait_port_ready() failing
2498 * CHV on the other hand doesn't seem to mind having the same port
2499 * selected in multiple power seqeuencers, but let's clear the
2500 * port select always when logically disconnecting a power sequencer
2501 * from a port.
2502 */
2503 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2504 pipe_name(pipe), port_name(intel_dig_port->port));
2505 I915_WRITE(pp_on_reg, 0);
2506 POSTING_READ(pp_on_reg);
2507
2508 intel_dp->pps_pipe = INVALID_PIPE;
2509}
2510
a4a5d2f8
VS
2511static void vlv_steal_power_sequencer(struct drm_device *dev,
2512 enum pipe pipe)
2513{
2514 struct drm_i915_private *dev_priv = dev->dev_private;
2515 struct intel_encoder *encoder;
2516
2517 lockdep_assert_held(&dev_priv->pps_mutex);
2518
ac3c12e4
VS
2519 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2520 return;
2521
a4a5d2f8
VS
2522 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2523 base.head) {
2524 struct intel_dp *intel_dp;
773538e8 2525 enum port port;
a4a5d2f8
VS
2526
2527 if (encoder->type != INTEL_OUTPUT_EDP)
2528 continue;
2529
2530 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2531 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2532
2533 if (intel_dp->pps_pipe != pipe)
2534 continue;
2535
2536 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2537 pipe_name(pipe), port_name(port));
a4a5d2f8 2538
034e43c6
VS
2539 WARN(encoder->connectors_active,
2540 "stealing pipe %c power sequencer from active eDP port %c\n",
2541 pipe_name(pipe), port_name(port));
a4a5d2f8 2542
a4a5d2f8 2543 /* make sure vdd is off before we steal it */
83b84597 2544 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2545 }
2546}
2547
2548static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2549{
2550 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2551 struct intel_encoder *encoder = &intel_dig_port->base;
2552 struct drm_device *dev = encoder->base.dev;
2553 struct drm_i915_private *dev_priv = dev->dev_private;
2554 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2555
2556 lockdep_assert_held(&dev_priv->pps_mutex);
2557
093e3f13
VS
2558 if (!is_edp(intel_dp))
2559 return;
2560
a4a5d2f8
VS
2561 if (intel_dp->pps_pipe == crtc->pipe)
2562 return;
2563
2564 /*
2565 * If another power sequencer was being used on this
2566 * port previously make sure to turn off vdd there while
2567 * we still have control of it.
2568 */
2569 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2570 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2571
2572 /*
2573 * We may be stealing the power
2574 * sequencer from another port.
2575 */
2576 vlv_steal_power_sequencer(dev, crtc->pipe);
2577
2578 /* now it's all ours */
2579 intel_dp->pps_pipe = crtc->pipe;
2580
2581 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2582 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2583
2584 /* init power sequencer on this pipe and port */
36b5f425
VS
2585 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2586 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2587}
2588
ab1f90f9 2589static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2590{
2bd2ad64 2591 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2592 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2593 struct drm_device *dev = encoder->base.dev;
89b667f8 2594 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2595 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2596 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2597 int pipe = intel_crtc->pipe;
2598 u32 val;
a4fc5ed6 2599
ab1f90f9 2600 mutex_lock(&dev_priv->dpio_lock);
89b667f8 2601
ab3c759a 2602 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2603 val = 0;
2604 if (pipe)
2605 val |= (1<<21);
2606 else
2607 val &= ~(1<<21);
2608 val |= 0x001000c4;
ab3c759a
CML
2609 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2610 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2611 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2612
ab1f90f9
JN
2613 mutex_unlock(&dev_priv->dpio_lock);
2614
2615 intel_enable_dp(encoder);
89b667f8
JB
2616}
2617
ecff4f3b 2618static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2619{
2620 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2621 struct drm_device *dev = encoder->base.dev;
2622 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2623 struct intel_crtc *intel_crtc =
2624 to_intel_crtc(encoder->base.crtc);
e4607fcf 2625 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2626 int pipe = intel_crtc->pipe;
89b667f8 2627
8ac33ed3
DV
2628 intel_dp_prepare(encoder);
2629
89b667f8 2630 /* Program Tx lane resets to default */
0980a60f 2631 mutex_lock(&dev_priv->dpio_lock);
ab3c759a 2632 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2633 DPIO_PCS_TX_LANE2_RESET |
2634 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2635 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2636 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2637 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2638 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2639 DPIO_PCS_CLK_SOFT_RESET);
2640
2641 /* Fix up inter-pair skew failure */
ab3c759a
CML
2642 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2643 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2644 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
0980a60f 2645 mutex_unlock(&dev_priv->dpio_lock);
a4fc5ed6
KP
2646}
2647
e4a1d846
CML
2648static void chv_pre_enable_dp(struct intel_encoder *encoder)
2649{
2650 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2651 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2652 struct drm_device *dev = encoder->base.dev;
2653 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2654 struct intel_crtc *intel_crtc =
2655 to_intel_crtc(encoder->base.crtc);
2656 enum dpio_channel ch = vlv_dport_to_channel(dport);
2657 int pipe = intel_crtc->pipe;
2658 int data, i;
949c1d43 2659 u32 val;
e4a1d846 2660
e4a1d846 2661 mutex_lock(&dev_priv->dpio_lock);
949c1d43 2662
570e2a74
VS
2663 /* allow hardware to manage TX FIFO reset source */
2664 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2665 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2666 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2667
2668 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2669 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2670 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2671
949c1d43 2672 /* Deassert soft data lane reset*/
97fd4d5c 2673 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2674 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2675 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2676
2677 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2678 val |= CHV_PCS_REQ_SOFTRESET_EN;
2679 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2680
2681 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2682 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2683 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2684
97fd4d5c 2685 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2686 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2687 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2688
2689 /* Program Tx lane latency optimal setting*/
e4a1d846
CML
2690 for (i = 0; i < 4; i++) {
2691 /* Set the latency optimal bit */
2692 data = (i == 1) ? 0x0 : 0x6;
2693 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2694 data << DPIO_FRC_LATENCY_SHFIT);
2695
2696 /* Set the upar bit */
2697 data = (i == 1) ? 0x0 : 0x1;
2698 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2699 data << DPIO_UPAR_SHIFT);
2700 }
2701
2702 /* Data lane stagger programming */
2703 /* FIXME: Fix up value only after power analysis */
2704
2705 mutex_unlock(&dev_priv->dpio_lock);
2706
e4a1d846 2707 intel_enable_dp(encoder);
e4a1d846
CML
2708}
2709
9197c88b
VS
2710static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2711{
2712 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2713 struct drm_device *dev = encoder->base.dev;
2714 struct drm_i915_private *dev_priv = dev->dev_private;
2715 struct intel_crtc *intel_crtc =
2716 to_intel_crtc(encoder->base.crtc);
2717 enum dpio_channel ch = vlv_dport_to_channel(dport);
2718 enum pipe pipe = intel_crtc->pipe;
2719 u32 val;
2720
625695f8
VS
2721 intel_dp_prepare(encoder);
2722
9197c88b
VS
2723 mutex_lock(&dev_priv->dpio_lock);
2724
b9e5ac3c
VS
2725 /* program left/right clock distribution */
2726 if (pipe != PIPE_B) {
2727 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2728 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2729 if (ch == DPIO_CH0)
2730 val |= CHV_BUFLEFTENA1_FORCE;
2731 if (ch == DPIO_CH1)
2732 val |= CHV_BUFRIGHTENA1_FORCE;
2733 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2734 } else {
2735 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2736 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2737 if (ch == DPIO_CH0)
2738 val |= CHV_BUFLEFTENA2_FORCE;
2739 if (ch == DPIO_CH1)
2740 val |= CHV_BUFRIGHTENA2_FORCE;
2741 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2742 }
2743
9197c88b
VS
2744 /* program clock channel usage */
2745 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2746 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2747 if (pipe != PIPE_B)
2748 val &= ~CHV_PCS_USEDCLKCHANNEL;
2749 else
2750 val |= CHV_PCS_USEDCLKCHANNEL;
2751 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2752
2753 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2754 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2755 if (pipe != PIPE_B)
2756 val &= ~CHV_PCS_USEDCLKCHANNEL;
2757 else
2758 val |= CHV_PCS_USEDCLKCHANNEL;
2759 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2760
2761 /*
2762 * This a a bit weird since generally CL
2763 * matches the pipe, but here we need to
2764 * pick the CL based on the port.
2765 */
2766 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2767 if (pipe != PIPE_B)
2768 val &= ~CHV_CMN_USEDCLKCHANNEL;
2769 else
2770 val |= CHV_CMN_USEDCLKCHANNEL;
2771 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2772
2773 mutex_unlock(&dev_priv->dpio_lock);
2774}
2775
a4fc5ed6 2776/*
df0c237d
JB
2777 * Native read with retry for link status and receiver capability reads for
2778 * cases where the sink may still be asleep.
9d1a1031
JN
2779 *
2780 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2781 * supposed to retry 3 times per the spec.
a4fc5ed6 2782 */
9d1a1031
JN
2783static ssize_t
2784intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2785 void *buffer, size_t size)
a4fc5ed6 2786{
9d1a1031
JN
2787 ssize_t ret;
2788 int i;
61da5fab 2789
f6a19066
VS
2790 /*
2791 * Sometime we just get the same incorrect byte repeated
2792 * over the entire buffer. Doing just one throw away read
2793 * initially seems to "solve" it.
2794 */
2795 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2796
61da5fab 2797 for (i = 0; i < 3; i++) {
9d1a1031
JN
2798 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2799 if (ret == size)
2800 return ret;
61da5fab
JB
2801 msleep(1);
2802 }
a4fc5ed6 2803
9d1a1031 2804 return ret;
a4fc5ed6
KP
2805}
2806
2807/*
2808 * Fetch AUX CH registers 0x202 - 0x207 which contain
2809 * link status information
2810 */
2811static bool
93f62dad 2812intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2813{
9d1a1031
JN
2814 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2815 DP_LANE0_1_STATUS,
2816 link_status,
2817 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2818}
2819
1100244e 2820/* These are source-specific values. */
a4fc5ed6 2821static uint8_t
1a2eb460 2822intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2823{
30add22d 2824 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2825 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2826 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2827
7ad14a29
SJ
2828 if (INTEL_INFO(dev)->gen >= 9) {
2829 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2830 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2831 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2832 } else if (IS_VALLEYVIEW(dev))
bd60018a 2833 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2834 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2835 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2836 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2837 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2838 else
bd60018a 2839 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
2840}
2841
2842static uint8_t
2843intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2844{
30add22d 2845 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 2846 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2847
5a9d1f1a
DL
2848 if (INTEL_INFO(dev)->gen >= 9) {
2849 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2850 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2851 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2852 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2853 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2854 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2855 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
2856 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2857 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
2858 default:
2859 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2860 }
2861 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 2862 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2863 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2864 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2865 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2866 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2867 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2868 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2869 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 2870 default:
bd60018a 2871 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 2872 }
e2fa6fba
P
2873 } else if (IS_VALLEYVIEW(dev)) {
2874 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2875 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2876 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2877 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2878 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2879 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2880 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2881 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 2882 default:
bd60018a 2883 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 2884 }
bc7d38a4 2885 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 2886 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2887 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2888 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2889 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2890 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2891 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 2892 default:
bd60018a 2893 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
2894 }
2895 } else {
2896 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2897 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2898 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2899 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2900 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2901 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2902 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2903 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 2904 default:
bd60018a 2905 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 2906 }
a4fc5ed6
KP
2907 }
2908}
2909
e2fa6fba
P
2910static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2911{
2912 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2913 struct drm_i915_private *dev_priv = dev->dev_private;
2914 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
2915 struct intel_crtc *intel_crtc =
2916 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
2917 unsigned long demph_reg_value, preemph_reg_value,
2918 uniqtranscale_reg_value;
2919 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 2920 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2921 int pipe = intel_crtc->pipe;
e2fa6fba
P
2922
2923 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 2924 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
2925 preemph_reg_value = 0x0004000;
2926 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2927 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2928 demph_reg_value = 0x2B405555;
2929 uniqtranscale_reg_value = 0x552AB83A;
2930 break;
bd60018a 2931 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2932 demph_reg_value = 0x2B404040;
2933 uniqtranscale_reg_value = 0x5548B83A;
2934 break;
bd60018a 2935 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2936 demph_reg_value = 0x2B245555;
2937 uniqtranscale_reg_value = 0x5560B83A;
2938 break;
bd60018a 2939 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
2940 demph_reg_value = 0x2B405555;
2941 uniqtranscale_reg_value = 0x5598DA3A;
2942 break;
2943 default:
2944 return 0;
2945 }
2946 break;
bd60018a 2947 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
2948 preemph_reg_value = 0x0002000;
2949 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2950 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2951 demph_reg_value = 0x2B404040;
2952 uniqtranscale_reg_value = 0x5552B83A;
2953 break;
bd60018a 2954 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2955 demph_reg_value = 0x2B404848;
2956 uniqtranscale_reg_value = 0x5580B83A;
2957 break;
bd60018a 2958 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2959 demph_reg_value = 0x2B404040;
2960 uniqtranscale_reg_value = 0x55ADDA3A;
2961 break;
2962 default:
2963 return 0;
2964 }
2965 break;
bd60018a 2966 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
2967 preemph_reg_value = 0x0000000;
2968 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2969 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2970 demph_reg_value = 0x2B305555;
2971 uniqtranscale_reg_value = 0x5570B83A;
2972 break;
bd60018a 2973 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2974 demph_reg_value = 0x2B2B4040;
2975 uniqtranscale_reg_value = 0x55ADDA3A;
2976 break;
2977 default:
2978 return 0;
2979 }
2980 break;
bd60018a 2981 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
2982 preemph_reg_value = 0x0006000;
2983 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2984 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2985 demph_reg_value = 0x1B405555;
2986 uniqtranscale_reg_value = 0x55ADDA3A;
2987 break;
2988 default:
2989 return 0;
2990 }
2991 break;
2992 default:
2993 return 0;
2994 }
2995
0980a60f 2996 mutex_lock(&dev_priv->dpio_lock);
ab3c759a
CML
2997 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
2998 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
2999 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3000 uniqtranscale_reg_value);
ab3c759a
CML
3001 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3002 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3003 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3004 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
0980a60f 3005 mutex_unlock(&dev_priv->dpio_lock);
e2fa6fba
P
3006
3007 return 0;
3008}
3009
e4a1d846
CML
3010static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3011{
3012 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3013 struct drm_i915_private *dev_priv = dev->dev_private;
3014 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3015 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3016 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3017 uint8_t train_set = intel_dp->train_set[0];
3018 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3019 enum pipe pipe = intel_crtc->pipe;
3020 int i;
e4a1d846
CML
3021
3022 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3023 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3024 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3025 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3026 deemph_reg_value = 128;
3027 margin_reg_value = 52;
3028 break;
bd60018a 3029 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3030 deemph_reg_value = 128;
3031 margin_reg_value = 77;
3032 break;
bd60018a 3033 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3034 deemph_reg_value = 128;
3035 margin_reg_value = 102;
3036 break;
bd60018a 3037 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3038 deemph_reg_value = 128;
3039 margin_reg_value = 154;
3040 /* FIXME extra to set for 1200 */
3041 break;
3042 default:
3043 return 0;
3044 }
3045 break;
bd60018a 3046 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3047 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3048 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3049 deemph_reg_value = 85;
3050 margin_reg_value = 78;
3051 break;
bd60018a 3052 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3053 deemph_reg_value = 85;
3054 margin_reg_value = 116;
3055 break;
bd60018a 3056 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3057 deemph_reg_value = 85;
3058 margin_reg_value = 154;
3059 break;
3060 default:
3061 return 0;
3062 }
3063 break;
bd60018a 3064 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3065 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3066 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3067 deemph_reg_value = 64;
3068 margin_reg_value = 104;
3069 break;
bd60018a 3070 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3071 deemph_reg_value = 64;
3072 margin_reg_value = 154;
3073 break;
3074 default:
3075 return 0;
3076 }
3077 break;
bd60018a 3078 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3079 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3080 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3081 deemph_reg_value = 43;
3082 margin_reg_value = 154;
3083 break;
3084 default:
3085 return 0;
3086 }
3087 break;
3088 default:
3089 return 0;
3090 }
3091
3092 mutex_lock(&dev_priv->dpio_lock);
3093
3094 /* Clear calc init */
1966e59e
VS
3095 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3096 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3097 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3098 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3099 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3100
3101 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3102 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3103 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3104 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3105 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3106
a02ef3c7
VS
3107 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3108 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3109 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3110 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3111
3112 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3113 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3114 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3115 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3116
e4a1d846 3117 /* Program swing deemph */
f72df8db
VS
3118 for (i = 0; i < 4; i++) {
3119 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3120 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3121 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3122 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3123 }
e4a1d846
CML
3124
3125 /* Program swing margin */
f72df8db
VS
3126 for (i = 0; i < 4; i++) {
3127 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3128 val &= ~DPIO_SWING_MARGIN000_MASK;
3129 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3130 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3131 }
e4a1d846
CML
3132
3133 /* Disable unique transition scale */
f72df8db
VS
3134 for (i = 0; i < 4; i++) {
3135 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3136 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3137 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3138 }
e4a1d846
CML
3139
3140 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3141 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3142 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3143 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3144
3145 /*
3146 * The document said it needs to set bit 27 for ch0 and bit 26
3147 * for ch1. Might be a typo in the doc.
3148 * For now, for this unique transition scale selection, set bit
3149 * 27 for ch0 and ch1.
3150 */
f72df8db
VS
3151 for (i = 0; i < 4; i++) {
3152 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3153 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3154 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3155 }
e4a1d846 3156
f72df8db
VS
3157 for (i = 0; i < 4; i++) {
3158 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3159 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3160 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3161 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3162 }
e4a1d846
CML
3163 }
3164
3165 /* Start swing calculation */
1966e59e
VS
3166 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3167 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3168 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3169
3170 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3171 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3172 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3173
3174 /* LRC Bypass */
3175 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3176 val |= DPIO_LRC_BYPASS;
3177 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3178
3179 mutex_unlock(&dev_priv->dpio_lock);
3180
3181 return 0;
3182}
3183
a4fc5ed6 3184static void
0301b3ac
JN
3185intel_get_adjust_train(struct intel_dp *intel_dp,
3186 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3187{
3188 uint8_t v = 0;
3189 uint8_t p = 0;
3190 int lane;
1a2eb460
KP
3191 uint8_t voltage_max;
3192 uint8_t preemph_max;
a4fc5ed6 3193
33a34e4e 3194 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3195 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3196 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3197
3198 if (this_v > v)
3199 v = this_v;
3200 if (this_p > p)
3201 p = this_p;
3202 }
3203
1a2eb460 3204 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3205 if (v >= voltage_max)
3206 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3207
1a2eb460
KP
3208 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3209 if (p >= preemph_max)
3210 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3211
3212 for (lane = 0; lane < 4; lane++)
33a34e4e 3213 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3214}
3215
3216static uint32_t
f0a3424e 3217intel_gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3218{
3cf2efb1 3219 uint32_t signal_levels = 0;
a4fc5ed6 3220
3cf2efb1 3221 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3222 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3223 default:
3224 signal_levels |= DP_VOLTAGE_0_4;
3225 break;
bd60018a 3226 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3227 signal_levels |= DP_VOLTAGE_0_6;
3228 break;
bd60018a 3229 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3230 signal_levels |= DP_VOLTAGE_0_8;
3231 break;
bd60018a 3232 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3233 signal_levels |= DP_VOLTAGE_1_2;
3234 break;
3235 }
3cf2efb1 3236 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3237 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3238 default:
3239 signal_levels |= DP_PRE_EMPHASIS_0;
3240 break;
bd60018a 3241 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3242 signal_levels |= DP_PRE_EMPHASIS_3_5;
3243 break;
bd60018a 3244 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3245 signal_levels |= DP_PRE_EMPHASIS_6;
3246 break;
bd60018a 3247 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3248 signal_levels |= DP_PRE_EMPHASIS_9_5;
3249 break;
3250 }
3251 return signal_levels;
3252}
3253
e3421a18
ZW
3254/* Gen6's DP voltage swing and pre-emphasis control */
3255static uint32_t
3256intel_gen6_edp_signal_levels(uint8_t train_set)
3257{
3c5a62b5
YL
3258 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3259 DP_TRAIN_PRE_EMPHASIS_MASK);
3260 switch (signal_levels) {
bd60018a
SJ
3261 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3262 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3263 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3264 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3265 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3266 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3267 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3268 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3269 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3270 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3271 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3272 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3273 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3274 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3275 default:
3c5a62b5
YL
3276 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3277 "0x%x\n", signal_levels);
3278 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3279 }
3280}
3281
1a2eb460
KP
3282/* Gen7's DP voltage swing and pre-emphasis control */
3283static uint32_t
3284intel_gen7_edp_signal_levels(uint8_t train_set)
3285{
3286 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3287 DP_TRAIN_PRE_EMPHASIS_MASK);
3288 switch (signal_levels) {
bd60018a 3289 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3290 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3291 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3292 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3293 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3294 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3295
bd60018a 3296 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3297 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3298 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3299 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3300
bd60018a 3301 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3302 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3303 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3304 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3305
3306 default:
3307 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3308 "0x%x\n", signal_levels);
3309 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3310 }
3311}
3312
d6c0d722
PZ
3313/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3314static uint32_t
f0a3424e 3315intel_hsw_signal_levels(uint8_t train_set)
a4fc5ed6 3316{
d6c0d722
PZ
3317 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3318 DP_TRAIN_PRE_EMPHASIS_MASK);
3319 switch (signal_levels) {
bd60018a 3320 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3321 return DDI_BUF_TRANS_SELECT(0);
bd60018a 3322 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3323 return DDI_BUF_TRANS_SELECT(1);
bd60018a 3324 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3325 return DDI_BUF_TRANS_SELECT(2);
bd60018a 3326 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
c5fe6a06 3327 return DDI_BUF_TRANS_SELECT(3);
a4fc5ed6 3328
bd60018a 3329 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3330 return DDI_BUF_TRANS_SELECT(4);
bd60018a 3331 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3332 return DDI_BUF_TRANS_SELECT(5);
bd60018a 3333 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3334 return DDI_BUF_TRANS_SELECT(6);
a4fc5ed6 3335
bd60018a 3336 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3337 return DDI_BUF_TRANS_SELECT(7);
bd60018a 3338 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3339 return DDI_BUF_TRANS_SELECT(8);
7ad14a29
SJ
3340
3341 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3342 return DDI_BUF_TRANS_SELECT(9);
d6c0d722
PZ
3343 default:
3344 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3345 "0x%x\n", signal_levels);
c5fe6a06 3346 return DDI_BUF_TRANS_SELECT(0);
a4fc5ed6 3347 }
a4fc5ed6
KP
3348}
3349
f0a3424e
PZ
3350/* Properly updates "DP" with the correct signal levels. */
3351static void
3352intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3353{
3354 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3355 enum port port = intel_dig_port->port;
f0a3424e
PZ
3356 struct drm_device *dev = intel_dig_port->base.base.dev;
3357 uint32_t signal_levels, mask;
3358 uint8_t train_set = intel_dp->train_set[0];
3359
5a9d1f1a 3360 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
f0a3424e
PZ
3361 signal_levels = intel_hsw_signal_levels(train_set);
3362 mask = DDI_BUF_EMP_MASK;
e4a1d846
CML
3363 } else if (IS_CHERRYVIEW(dev)) {
3364 signal_levels = intel_chv_signal_levels(intel_dp);
3365 mask = 0;
e2fa6fba
P
3366 } else if (IS_VALLEYVIEW(dev)) {
3367 signal_levels = intel_vlv_signal_levels(intel_dp);
3368 mask = 0;
bc7d38a4 3369 } else if (IS_GEN7(dev) && port == PORT_A) {
f0a3424e
PZ
3370 signal_levels = intel_gen7_edp_signal_levels(train_set);
3371 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3372 } else if (IS_GEN6(dev) && port == PORT_A) {
f0a3424e
PZ
3373 signal_levels = intel_gen6_edp_signal_levels(train_set);
3374 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3375 } else {
3376 signal_levels = intel_gen4_signal_levels(train_set);
3377 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3378 }
3379
3380 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3381
3382 *DP = (*DP & ~mask) | signal_levels;
3383}
3384
a4fc5ed6 3385static bool
ea5b213a 3386intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3387 uint32_t *DP,
58e10eb9 3388 uint8_t dp_train_pat)
a4fc5ed6 3389{
174edf1f
PZ
3390 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3391 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3392 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3393 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3394 int ret, len;
a4fc5ed6 3395
7b13b58a 3396 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3397
70aff66c 3398 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3399 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3400
2cdfe6c8
JN
3401 buf[0] = dp_train_pat;
3402 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3403 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3404 /* don't write DP_TRAINING_LANEx_SET on disable */
3405 len = 1;
3406 } else {
3407 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3408 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3409 len = intel_dp->lane_count + 1;
47ea7542 3410 }
a4fc5ed6 3411
9d1a1031
JN
3412 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3413 buf, len);
2cdfe6c8
JN
3414
3415 return ret == len;
a4fc5ed6
KP
3416}
3417
70aff66c
JN
3418static bool
3419intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3420 uint8_t dp_train_pat)
3421{
953d22e8 3422 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3423 intel_dp_set_signal_levels(intel_dp, DP);
3424 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3425}
3426
3427static bool
3428intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3429 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3430{
3431 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3432 struct drm_device *dev = intel_dig_port->base.base.dev;
3433 struct drm_i915_private *dev_priv = dev->dev_private;
3434 int ret;
3435
3436 intel_get_adjust_train(intel_dp, link_status);
3437 intel_dp_set_signal_levels(intel_dp, DP);
3438
3439 I915_WRITE(intel_dp->output_reg, *DP);
3440 POSTING_READ(intel_dp->output_reg);
3441
9d1a1031
JN
3442 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3443 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3444
3445 return ret == intel_dp->lane_count;
3446}
3447
3ab9c637
ID
3448static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3449{
3450 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3451 struct drm_device *dev = intel_dig_port->base.base.dev;
3452 struct drm_i915_private *dev_priv = dev->dev_private;
3453 enum port port = intel_dig_port->port;
3454 uint32_t val;
3455
3456 if (!HAS_DDI(dev))
3457 return;
3458
3459 val = I915_READ(DP_TP_CTL(port));
3460 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3461 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3462 I915_WRITE(DP_TP_CTL(port), val);
3463
3464 /*
3465 * On PORT_A we can have only eDP in SST mode. There the only reason
3466 * we need to set idle transmission mode is to work around a HW issue
3467 * where we enable the pipe while not in idle link-training mode.
3468 * In this case there is requirement to wait for a minimum number of
3469 * idle patterns to be sent.
3470 */
3471 if (port == PORT_A)
3472 return;
3473
3474 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3475 1))
3476 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3477}
3478
33a34e4e 3479/* Enable corresponding port and start training pattern 1 */
c19b0669 3480void
33a34e4e 3481intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3482{
da63a9f2 3483 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3484 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3485 int i;
3486 uint8_t voltage;
cdb0e95b 3487 int voltage_tries, loop_tries;
ea5b213a 3488 uint32_t DP = intel_dp->DP;
6aba5b6c 3489 uint8_t link_config[2];
a4fc5ed6 3490
affa9354 3491 if (HAS_DDI(dev))
c19b0669
PZ
3492 intel_ddi_prepare_link_retrain(encoder);
3493
3cf2efb1 3494 /* Write the link configuration data */
6aba5b6c
JN
3495 link_config[0] = intel_dp->link_bw;
3496 link_config[1] = intel_dp->lane_count;
3497 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3498 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3499 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
bc27b7d3 3500 if (intel_dp->num_supported_rates)
a8f3ef61
SJ
3501 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3502 &intel_dp->rate_select, 1);
6aba5b6c
JN
3503
3504 link_config[0] = 0;
3505 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3506 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3507
3508 DP |= DP_PORT_EN;
1a2eb460 3509
70aff66c
JN
3510 /* clock recovery */
3511 if (!intel_dp_reset_link_train(intel_dp, &DP,
3512 DP_TRAINING_PATTERN_1 |
3513 DP_LINK_SCRAMBLING_DISABLE)) {
3514 DRM_ERROR("failed to enable link training\n");
3515 return;
3516 }
3517
a4fc5ed6 3518 voltage = 0xff;
cdb0e95b
KP
3519 voltage_tries = 0;
3520 loop_tries = 0;
a4fc5ed6 3521 for (;;) {
70aff66c 3522 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3523
a7c9655f 3524 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3525 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3526 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3527 break;
93f62dad 3528 }
a4fc5ed6 3529
01916270 3530 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3531 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3532 break;
3533 }
3534
3535 /* Check to see if we've tried the max voltage */
3536 for (i = 0; i < intel_dp->lane_count; i++)
3537 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3538 break;
3b4f819d 3539 if (i == intel_dp->lane_count) {
b06fbda3
DV
3540 ++loop_tries;
3541 if (loop_tries == 5) {
3def84b3 3542 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3543 break;
3544 }
70aff66c
JN
3545 intel_dp_reset_link_train(intel_dp, &DP,
3546 DP_TRAINING_PATTERN_1 |
3547 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3548 voltage_tries = 0;
3549 continue;
3550 }
a4fc5ed6 3551
3cf2efb1 3552 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3553 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3554 ++voltage_tries;
b06fbda3 3555 if (voltage_tries == 5) {
3def84b3 3556 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3557 break;
3558 }
3559 } else
3560 voltage_tries = 0;
3561 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3562
70aff66c
JN
3563 /* Update training set as requested by target */
3564 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3565 DRM_ERROR("failed to update link training\n");
3566 break;
3567 }
a4fc5ed6
KP
3568 }
3569
33a34e4e
JB
3570 intel_dp->DP = DP;
3571}
3572
c19b0669 3573void
33a34e4e
JB
3574intel_dp_complete_link_train(struct intel_dp *intel_dp)
3575{
33a34e4e 3576 bool channel_eq = false;
37f80975 3577 int tries, cr_tries;
33a34e4e 3578 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3579 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3580
3581 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3582 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3583 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3584
a4fc5ed6 3585 /* channel equalization */
70aff66c 3586 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3587 training_pattern |
70aff66c
JN
3588 DP_LINK_SCRAMBLING_DISABLE)) {
3589 DRM_ERROR("failed to start channel equalization\n");
3590 return;
3591 }
3592
a4fc5ed6 3593 tries = 0;
37f80975 3594 cr_tries = 0;
a4fc5ed6
KP
3595 channel_eq = false;
3596 for (;;) {
70aff66c 3597 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3598
37f80975
JB
3599 if (cr_tries > 5) {
3600 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3601 break;
3602 }
3603
a7c9655f 3604 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3605 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3606 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3607 break;
70aff66c 3608 }
a4fc5ed6 3609
37f80975 3610 /* Make sure clock is still ok */
01916270 3611 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
37f80975 3612 intel_dp_start_link_train(intel_dp);
70aff66c 3613 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3614 training_pattern |
70aff66c 3615 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3616 cr_tries++;
3617 continue;
3618 }
3619
1ffdff13 3620 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3621 channel_eq = true;
3622 break;
3623 }
a4fc5ed6 3624
37f80975
JB
3625 /* Try 5 times, then try clock recovery if that fails */
3626 if (tries > 5) {
37f80975 3627 intel_dp_start_link_train(intel_dp);
70aff66c 3628 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3629 training_pattern |
70aff66c 3630 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3631 tries = 0;
3632 cr_tries++;
3633 continue;
3634 }
a4fc5ed6 3635
70aff66c
JN
3636 /* Update training set as requested by target */
3637 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3638 DRM_ERROR("failed to update link training\n");
3639 break;
3640 }
3cf2efb1 3641 ++tries;
869184a6 3642 }
3cf2efb1 3643
3ab9c637
ID
3644 intel_dp_set_idle_link_train(intel_dp);
3645
3646 intel_dp->DP = DP;
3647
d6c0d722 3648 if (channel_eq)
07f42258 3649 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
d6c0d722 3650
3ab9c637
ID
3651}
3652
3653void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3654{
70aff66c 3655 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3656 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3657}
3658
3659static void
ea5b213a 3660intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3661{
da63a9f2 3662 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3663 enum port port = intel_dig_port->port;
da63a9f2 3664 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3665 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3666 uint32_t DP = intel_dp->DP;
a4fc5ed6 3667
bc76e320 3668 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3669 return;
3670
0c33d8d7 3671 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3672 return;
3673
28c97730 3674 DRM_DEBUG_KMS("\n");
32f9d658 3675
bc7d38a4 3676 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
e3421a18 3677 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 3678 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18 3679 } else {
aad3d14d
VS
3680 if (IS_CHERRYVIEW(dev))
3681 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3682 else
3683 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 3684 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 3685 }
fe255d00 3686 POSTING_READ(intel_dp->output_reg);
5eb08b69 3687
493a7081 3688 if (HAS_PCH_IBX(dev) &&
1b39d6f3 3689 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
5bddd17f
EA
3690 /* Hardware workaround: leaving our transcoder select
3691 * set to transcoder B while it's off will prevent the
3692 * corresponding HDMI output on transcoder A.
3693 *
3694 * Combine this with another hardware workaround:
3695 * transcoder select bit can only be cleared while the
3696 * port is enabled.
3697 */
3698 DP &= ~DP_PIPEB_SELECT;
3699 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3700 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3701 }
3702
832afda6 3703 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
3704 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3705 POSTING_READ(intel_dp->output_reg);
f01eca2e 3706 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3707}
3708
26d61aad
KP
3709static bool
3710intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3711{
a031d709
RV
3712 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3713 struct drm_device *dev = dig_port->base.base.dev;
3714 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3715 uint8_t rev;
a031d709 3716
9d1a1031
JN
3717 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3718 sizeof(intel_dp->dpcd)) < 0)
edb39244 3719 return false; /* aux transfer failed */
92fd8fd1 3720
a8e98153 3721 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3722
edb39244
AJ
3723 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3724 return false; /* DPCD not present */
3725
2293bb5c
SK
3726 /* Check if the panel supports PSR */
3727 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3728 if (is_edp(intel_dp)) {
9d1a1031
JN
3729 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3730 intel_dp->psr_dpcd,
3731 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3732 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3733 dev_priv->psr.sink_support = true;
50003939 3734 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3735 }
50003939
JN
3736 }
3737
7809a611 3738 /* Training Pattern 3 support, both source and sink */
06ea66b6 3739 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3740 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3741 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3742 intel_dp->use_tps3 = true;
f8d8a672 3743 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3744 } else
3745 intel_dp->use_tps3 = false;
3746
fc0f8e25
SJ
3747 /* Intermediate frequency support */
3748 if (is_edp(intel_dp) &&
3749 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3750 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3751 (rev >= 0x03)) { /* eDp v1.4 or higher */
ea2d8a42
VS
3752 __le16 supported_rates[DP_MAX_SUPPORTED_RATES];
3753 int i;
3754
fc0f8e25
SJ
3755 intel_dp_dpcd_read_wake(&intel_dp->aux,
3756 DP_SUPPORTED_LINK_RATES,
ea2d8a42
VS
3757 supported_rates,
3758 sizeof(supported_rates));
3759
3760 for (i = 0; i < ARRAY_SIZE(supported_rates); i++) {
3761 int val = le16_to_cpu(supported_rates[i]);
3762
3763 if (val == 0)
3764 break;
3765
3766 intel_dp->supported_rates[i] = val * 200;
3767 }
3768 intel_dp->num_supported_rates = i;
fc0f8e25 3769 }
edb39244
AJ
3770 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3771 DP_DWN_STRM_PORT_PRESENT))
3772 return true; /* native DP sink */
3773
3774 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3775 return true; /* no per-port downstream info */
3776
9d1a1031
JN
3777 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3778 intel_dp->downstream_ports,
3779 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3780 return false; /* downstream port status fetch failed */
3781
3782 return true;
92fd8fd1
KP
3783}
3784
0d198328
AJ
3785static void
3786intel_dp_probe_oui(struct intel_dp *intel_dp)
3787{
3788 u8 buf[3];
3789
3790 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3791 return;
3792
9d1a1031 3793 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3794 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3795 buf[0], buf[1], buf[2]);
3796
9d1a1031 3797 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3798 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3799 buf[0], buf[1], buf[2]);
3800}
3801
0e32b39c
DA
3802static bool
3803intel_dp_probe_mst(struct intel_dp *intel_dp)
3804{
3805 u8 buf[1];
3806
3807 if (!intel_dp->can_mst)
3808 return false;
3809
3810 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3811 return false;
3812
0e32b39c
DA
3813 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3814 if (buf[0] & DP_MST_CAP) {
3815 DRM_DEBUG_KMS("Sink is MST capable\n");
3816 intel_dp->is_mst = true;
3817 } else {
3818 DRM_DEBUG_KMS("Sink is not MST capable\n");
3819 intel_dp->is_mst = false;
3820 }
3821 }
0e32b39c
DA
3822
3823 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3824 return intel_dp->is_mst;
3825}
3826
d2e216d0
RV
3827int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3828{
3829 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3830 struct drm_device *dev = intel_dig_port->base.base.dev;
3831 struct intel_crtc *intel_crtc =
3832 to_intel_crtc(intel_dig_port->base.base.crtc);
ad9dc91b
RV
3833 u8 buf;
3834 int test_crc_count;
3835 int attempts = 6;
d2e216d0 3836
ad9dc91b 3837 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3838 return -EIO;
d2e216d0 3839
ad9dc91b 3840 if (!(buf & DP_TEST_CRC_SUPPORTED))
d2e216d0
RV
3841 return -ENOTTY;
3842
1dda5f93
RV
3843 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3844 return -EIO;
3845
9d1a1031 3846 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
ce31d9f4 3847 buf | DP_TEST_SINK_START) < 0)
bda0381e 3848 return -EIO;
d2e216d0 3849
1dda5f93 3850 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3851 return -EIO;
ad9dc91b 3852 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 3853
ad9dc91b 3854 do {
1dda5f93
RV
3855 if (drm_dp_dpcd_readb(&intel_dp->aux,
3856 DP_TEST_SINK_MISC, &buf) < 0)
3857 return -EIO;
ad9dc91b
RV
3858 intel_wait_for_vblank(dev, intel_crtc->pipe);
3859 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3860
3861 if (attempts == 0) {
90bd1f46
DV
3862 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3863 return -ETIMEDOUT;
ad9dc91b 3864 }
d2e216d0 3865
9d1a1031 3866 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
bda0381e 3867 return -EIO;
d2e216d0 3868
1dda5f93
RV
3869 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3870 return -EIO;
3871 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3872 buf & ~DP_TEST_SINK_START) < 0)
3873 return -EIO;
ce31d9f4 3874
d2e216d0
RV
3875 return 0;
3876}
3877
a60f0e38
JB
3878static bool
3879intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3880{
9d1a1031
JN
3881 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3882 DP_DEVICE_SERVICE_IRQ_VECTOR,
3883 sink_irq_vector, 1) == 1;
a60f0e38
JB
3884}
3885
0e32b39c
DA
3886static bool
3887intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3888{
3889 int ret;
3890
3891 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3892 DP_SINK_COUNT_ESI,
3893 sink_irq_vector, 14);
3894 if (ret != 14)
3895 return false;
3896
3897 return true;
3898}
3899
a60f0e38
JB
3900static void
3901intel_dp_handle_test_request(struct intel_dp *intel_dp)
3902{
3903 /* NAK by default */
9d1a1031 3904 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
a60f0e38
JB
3905}
3906
0e32b39c
DA
3907static int
3908intel_dp_check_mst_status(struct intel_dp *intel_dp)
3909{
3910 bool bret;
3911
3912 if (intel_dp->is_mst) {
3913 u8 esi[16] = { 0 };
3914 int ret = 0;
3915 int retry;
3916 bool handled;
3917 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3918go_again:
3919 if (bret == true) {
3920
3921 /* check link status - esi[10] = 0x200c */
3922 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3923 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3924 intel_dp_start_link_train(intel_dp);
3925 intel_dp_complete_link_train(intel_dp);
3926 intel_dp_stop_link_train(intel_dp);
3927 }
3928
6f34cc39 3929 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
3930 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3931
3932 if (handled) {
3933 for (retry = 0; retry < 3; retry++) {
3934 int wret;
3935 wret = drm_dp_dpcd_write(&intel_dp->aux,
3936 DP_SINK_COUNT_ESI+1,
3937 &esi[1], 3);
3938 if (wret == 3) {
3939 break;
3940 }
3941 }
3942
3943 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3944 if (bret == true) {
6f34cc39 3945 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
3946 goto go_again;
3947 }
3948 } else
3949 ret = 0;
3950
3951 return ret;
3952 } else {
3953 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3954 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
3955 intel_dp->is_mst = false;
3956 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3957 /* send a hotplug event */
3958 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
3959 }
3960 }
3961 return -EINVAL;
3962}
3963
a4fc5ed6
KP
3964/*
3965 * According to DP spec
3966 * 5.1.2:
3967 * 1. Read DPCD
3968 * 2. Configure link according to Receiver Capabilities
3969 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
3970 * 4. Check link status on receipt of hot-plug interrupt
3971 */
a5146200 3972static void
ea5b213a 3973intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 3974{
5b215bcf 3975 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 3976 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 3977 u8 sink_irq_vector;
93f62dad 3978 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 3979
5b215bcf
DA
3980 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3981
da63a9f2 3982 if (!intel_encoder->connectors_active)
d2b996ac 3983 return;
59cd09e1 3984
da63a9f2 3985 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
3986 return;
3987
1a125d8a
ID
3988 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
3989 return;
3990
92fd8fd1 3991 /* Try to read receiver status if the link appears to be up */
93f62dad 3992 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
3993 return;
3994 }
3995
92fd8fd1 3996 /* Now read the DPCD to see if it's actually running */
26d61aad 3997 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
3998 return;
3999 }
4000
a60f0e38
JB
4001 /* Try to read the source of the interrupt */
4002 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4003 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4004 /* Clear interrupt source */
9d1a1031
JN
4005 drm_dp_dpcd_writeb(&intel_dp->aux,
4006 DP_DEVICE_SERVICE_IRQ_VECTOR,
4007 sink_irq_vector);
a60f0e38
JB
4008
4009 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4010 intel_dp_handle_test_request(intel_dp);
4011 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4012 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4013 }
4014
1ffdff13 4015 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4016 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4017 intel_encoder->base.name);
33a34e4e
JB
4018 intel_dp_start_link_train(intel_dp);
4019 intel_dp_complete_link_train(intel_dp);
3ab9c637 4020 intel_dp_stop_link_train(intel_dp);
33a34e4e 4021 }
a4fc5ed6 4022}
a4fc5ed6 4023
caf9ab24 4024/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4025static enum drm_connector_status
26d61aad 4026intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4027{
caf9ab24 4028 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4029 uint8_t type;
4030
4031 if (!intel_dp_get_dpcd(intel_dp))
4032 return connector_status_disconnected;
4033
4034 /* if there's no downstream port, we're done */
4035 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4036 return connector_status_connected;
caf9ab24
AJ
4037
4038 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4039 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4040 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4041 uint8_t reg;
9d1a1031
JN
4042
4043 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4044 &reg, 1) < 0)
caf9ab24 4045 return connector_status_unknown;
9d1a1031 4046
23235177
AJ
4047 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4048 : connector_status_disconnected;
caf9ab24
AJ
4049 }
4050
4051 /* If no HPD, poke DDC gently */
0b99836f 4052 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4053 return connector_status_connected;
caf9ab24
AJ
4054
4055 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4056 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4057 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4058 if (type == DP_DS_PORT_TYPE_VGA ||
4059 type == DP_DS_PORT_TYPE_NON_EDID)
4060 return connector_status_unknown;
4061 } else {
4062 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4063 DP_DWN_STRM_PORT_TYPE_MASK;
4064 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4065 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4066 return connector_status_unknown;
4067 }
caf9ab24
AJ
4068
4069 /* Anything else is out of spec, warn and ignore */
4070 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4071 return connector_status_disconnected;
71ba9000
AJ
4072}
4073
d410b56d
CW
4074static enum drm_connector_status
4075edp_detect(struct intel_dp *intel_dp)
4076{
4077 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4078 enum drm_connector_status status;
4079
4080 status = intel_panel_detect(dev);
4081 if (status == connector_status_unknown)
4082 status = connector_status_connected;
4083
4084 return status;
4085}
4086
5eb08b69 4087static enum drm_connector_status
a9756bb5 4088ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4089{
30add22d 4090 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4091 struct drm_i915_private *dev_priv = dev->dev_private;
4092 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4093
1b469639
DL
4094 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4095 return connector_status_disconnected;
4096
26d61aad 4097 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4098}
4099
2a592bec
DA
4100static int g4x_digital_port_connected(struct drm_device *dev,
4101 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4102{
a4fc5ed6 4103 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4104 uint32_t bit;
5eb08b69 4105
232a6ee9
TP
4106 if (IS_VALLEYVIEW(dev)) {
4107 switch (intel_dig_port->port) {
4108 case PORT_B:
4109 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4110 break;
4111 case PORT_C:
4112 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4113 break;
4114 case PORT_D:
4115 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4116 break;
4117 default:
2a592bec 4118 return -EINVAL;
232a6ee9
TP
4119 }
4120 } else {
4121 switch (intel_dig_port->port) {
4122 case PORT_B:
4123 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4124 break;
4125 case PORT_C:
4126 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4127 break;
4128 case PORT_D:
4129 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4130 break;
4131 default:
2a592bec 4132 return -EINVAL;
232a6ee9 4133 }
a4fc5ed6
KP
4134 }
4135
10f76a38 4136 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4137 return 0;
4138 return 1;
4139}
4140
4141static enum drm_connector_status
4142g4x_dp_detect(struct intel_dp *intel_dp)
4143{
4144 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4145 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4146 int ret;
4147
4148 /* Can't disconnect eDP, but you can close the lid... */
4149 if (is_edp(intel_dp)) {
4150 enum drm_connector_status status;
4151
4152 status = intel_panel_detect(dev);
4153 if (status == connector_status_unknown)
4154 status = connector_status_connected;
4155 return status;
4156 }
4157
4158 ret = g4x_digital_port_connected(dev, intel_dig_port);
4159 if (ret == -EINVAL)
4160 return connector_status_unknown;
4161 else if (ret == 0)
a4fc5ed6
KP
4162 return connector_status_disconnected;
4163
26d61aad 4164 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4165}
4166
8c241fef 4167static struct edid *
beb60608 4168intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4169{
beb60608 4170 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4171
9cd300e0
JN
4172 /* use cached edid if we have one */
4173 if (intel_connector->edid) {
9cd300e0
JN
4174 /* invalid edid */
4175 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4176 return NULL;
4177
55e9edeb 4178 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4179 } else
4180 return drm_get_edid(&intel_connector->base,
4181 &intel_dp->aux.ddc);
4182}
8c241fef 4183
beb60608
CW
4184static void
4185intel_dp_set_edid(struct intel_dp *intel_dp)
4186{
4187 struct intel_connector *intel_connector = intel_dp->attached_connector;
4188 struct edid *edid;
8c241fef 4189
beb60608
CW
4190 edid = intel_dp_get_edid(intel_dp);
4191 intel_connector->detect_edid = edid;
4192
4193 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4194 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4195 else
4196 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4197}
4198
beb60608
CW
4199static void
4200intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4201{
beb60608 4202 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4203
beb60608
CW
4204 kfree(intel_connector->detect_edid);
4205 intel_connector->detect_edid = NULL;
9cd300e0 4206
beb60608
CW
4207 intel_dp->has_audio = false;
4208}
d6f24d0f 4209
beb60608
CW
4210static enum intel_display_power_domain
4211intel_dp_power_get(struct intel_dp *dp)
4212{
4213 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4214 enum intel_display_power_domain power_domain;
4215
4216 power_domain = intel_display_port_power_domain(encoder);
4217 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4218
4219 return power_domain;
4220}
d6f24d0f 4221
beb60608
CW
4222static void
4223intel_dp_power_put(struct intel_dp *dp,
4224 enum intel_display_power_domain power_domain)
4225{
4226 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4227 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4228}
4229
a9756bb5
ZW
4230static enum drm_connector_status
4231intel_dp_detect(struct drm_connector *connector, bool force)
4232{
4233 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4234 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4235 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4236 struct drm_device *dev = connector->dev;
a9756bb5 4237 enum drm_connector_status status;
671dedd2 4238 enum intel_display_power_domain power_domain;
0e32b39c 4239 bool ret;
a9756bb5 4240
164c8598 4241 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4242 connector->base.id, connector->name);
beb60608 4243 intel_dp_unset_edid(intel_dp);
164c8598 4244
0e32b39c
DA
4245 if (intel_dp->is_mst) {
4246 /* MST devices are disconnected from a monitor POV */
4247 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4248 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4249 return connector_status_disconnected;
0e32b39c
DA
4250 }
4251
beb60608 4252 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4253
d410b56d
CW
4254 /* Can't disconnect eDP, but you can close the lid... */
4255 if (is_edp(intel_dp))
4256 status = edp_detect(intel_dp);
4257 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4258 status = ironlake_dp_detect(intel_dp);
4259 else
4260 status = g4x_dp_detect(intel_dp);
4261 if (status != connector_status_connected)
c8c8fb33 4262 goto out;
a9756bb5 4263
0d198328
AJ
4264 intel_dp_probe_oui(intel_dp);
4265
0e32b39c
DA
4266 ret = intel_dp_probe_mst(intel_dp);
4267 if (ret) {
4268 /* if we are in MST mode then this connector
4269 won't appear connected or have anything with EDID on it */
4270 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4271 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4272 status = connector_status_disconnected;
4273 goto out;
4274 }
4275
beb60608 4276 intel_dp_set_edid(intel_dp);
a9756bb5 4277
d63885da
PZ
4278 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4279 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4280 status = connector_status_connected;
4281
4282out:
beb60608 4283 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4284 return status;
a4fc5ed6
KP
4285}
4286
beb60608
CW
4287static void
4288intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4289{
df0e9248 4290 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4291 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4292 enum intel_display_power_domain power_domain;
a4fc5ed6 4293
beb60608
CW
4294 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4295 connector->base.id, connector->name);
4296 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4297
beb60608
CW
4298 if (connector->status != connector_status_connected)
4299 return;
671dedd2 4300
beb60608
CW
4301 power_domain = intel_dp_power_get(intel_dp);
4302
4303 intel_dp_set_edid(intel_dp);
4304
4305 intel_dp_power_put(intel_dp, power_domain);
4306
4307 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4308 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4309}
4310
4311static int intel_dp_get_modes(struct drm_connector *connector)
4312{
4313 struct intel_connector *intel_connector = to_intel_connector(connector);
4314 struct edid *edid;
4315
4316 edid = intel_connector->detect_edid;
4317 if (edid) {
4318 int ret = intel_connector_update_modes(connector, edid);
4319 if (ret)
4320 return ret;
4321 }
32f9d658 4322
f8779fda 4323 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4324 if (is_edp(intel_attached_dp(connector)) &&
4325 intel_connector->panel.fixed_mode) {
f8779fda 4326 struct drm_display_mode *mode;
beb60608
CW
4327
4328 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4329 intel_connector->panel.fixed_mode);
f8779fda 4330 if (mode) {
32f9d658
ZW
4331 drm_mode_probed_add(connector, mode);
4332 return 1;
4333 }
4334 }
beb60608 4335
32f9d658 4336 return 0;
a4fc5ed6
KP
4337}
4338
1aad7ac0
CW
4339static bool
4340intel_dp_detect_audio(struct drm_connector *connector)
4341{
1aad7ac0 4342 bool has_audio = false;
beb60608 4343 struct edid *edid;
1aad7ac0 4344
beb60608
CW
4345 edid = to_intel_connector(connector)->detect_edid;
4346 if (edid)
1aad7ac0 4347 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4348
1aad7ac0
CW
4349 return has_audio;
4350}
4351
f684960e
CW
4352static int
4353intel_dp_set_property(struct drm_connector *connector,
4354 struct drm_property *property,
4355 uint64_t val)
4356{
e953fd7b 4357 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4358 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4359 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4360 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4361 int ret;
4362
662595df 4363 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4364 if (ret)
4365 return ret;
4366
3f43c48d 4367 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4368 int i = val;
4369 bool has_audio;
4370
4371 if (i == intel_dp->force_audio)
f684960e
CW
4372 return 0;
4373
1aad7ac0 4374 intel_dp->force_audio = i;
f684960e 4375
c3e5f67b 4376 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4377 has_audio = intel_dp_detect_audio(connector);
4378 else
c3e5f67b 4379 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4380
4381 if (has_audio == intel_dp->has_audio)
f684960e
CW
4382 return 0;
4383
1aad7ac0 4384 intel_dp->has_audio = has_audio;
f684960e
CW
4385 goto done;
4386 }
4387
e953fd7b 4388 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4389 bool old_auto = intel_dp->color_range_auto;
4390 uint32_t old_range = intel_dp->color_range;
4391
55bc60db
VS
4392 switch (val) {
4393 case INTEL_BROADCAST_RGB_AUTO:
4394 intel_dp->color_range_auto = true;
4395 break;
4396 case INTEL_BROADCAST_RGB_FULL:
4397 intel_dp->color_range_auto = false;
4398 intel_dp->color_range = 0;
4399 break;
4400 case INTEL_BROADCAST_RGB_LIMITED:
4401 intel_dp->color_range_auto = false;
4402 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4403 break;
4404 default:
4405 return -EINVAL;
4406 }
ae4edb80
DV
4407
4408 if (old_auto == intel_dp->color_range_auto &&
4409 old_range == intel_dp->color_range)
4410 return 0;
4411
e953fd7b
CW
4412 goto done;
4413 }
4414
53b41837
YN
4415 if (is_edp(intel_dp) &&
4416 property == connector->dev->mode_config.scaling_mode_property) {
4417 if (val == DRM_MODE_SCALE_NONE) {
4418 DRM_DEBUG_KMS("no scaling not supported\n");
4419 return -EINVAL;
4420 }
4421
4422 if (intel_connector->panel.fitting_mode == val) {
4423 /* the eDP scaling property is not changed */
4424 return 0;
4425 }
4426 intel_connector->panel.fitting_mode = val;
4427
4428 goto done;
4429 }
4430
f684960e
CW
4431 return -EINVAL;
4432
4433done:
c0c36b94
CW
4434 if (intel_encoder->base.crtc)
4435 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4436
4437 return 0;
4438}
4439
a4fc5ed6 4440static void
73845adf 4441intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4442{
1d508706 4443 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4444
10e972d3 4445 kfree(intel_connector->detect_edid);
beb60608 4446
9cd300e0
JN
4447 if (!IS_ERR_OR_NULL(intel_connector->edid))
4448 kfree(intel_connector->edid);
4449
acd8db10
PZ
4450 /* Can't call is_edp() since the encoder may have been destroyed
4451 * already. */
4452 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4453 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4454
a4fc5ed6 4455 drm_connector_cleanup(connector);
55f78c43 4456 kfree(connector);
a4fc5ed6
KP
4457}
4458
00c09d70 4459void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4460{
da63a9f2
PZ
4461 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4462 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4463
4f71d0cb 4464 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4465 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4466 if (is_edp(intel_dp)) {
4467 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4468 /*
4469 * vdd might still be enabled do to the delayed vdd off.
4470 * Make sure vdd is actually turned off here.
4471 */
773538e8 4472 pps_lock(intel_dp);
4be73780 4473 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4474 pps_unlock(intel_dp);
4475
01527b31
CT
4476 if (intel_dp->edp_notifier.notifier_call) {
4477 unregister_reboot_notifier(&intel_dp->edp_notifier);
4478 intel_dp->edp_notifier.notifier_call = NULL;
4479 }
bd943159 4480 }
c8bd0e49 4481 drm_encoder_cleanup(encoder);
da63a9f2 4482 kfree(intel_dig_port);
24d05927
DV
4483}
4484
07f9cd0b
ID
4485static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4486{
4487 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4488
4489 if (!is_edp(intel_dp))
4490 return;
4491
951468f3
VS
4492 /*
4493 * vdd might still be enabled do to the delayed vdd off.
4494 * Make sure vdd is actually turned off here.
4495 */
afa4e53a 4496 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4497 pps_lock(intel_dp);
07f9cd0b 4498 edp_panel_vdd_off_sync(intel_dp);
773538e8 4499 pps_unlock(intel_dp);
07f9cd0b
ID
4500}
4501
49e6bc51
VS
4502static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4503{
4504 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4505 struct drm_device *dev = intel_dig_port->base.base.dev;
4506 struct drm_i915_private *dev_priv = dev->dev_private;
4507 enum intel_display_power_domain power_domain;
4508
4509 lockdep_assert_held(&dev_priv->pps_mutex);
4510
4511 if (!edp_have_panel_vdd(intel_dp))
4512 return;
4513
4514 /*
4515 * The VDD bit needs a power domain reference, so if the bit is
4516 * already enabled when we boot or resume, grab this reference and
4517 * schedule a vdd off, so we don't hold on to the reference
4518 * indefinitely.
4519 */
4520 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4521 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4522 intel_display_power_get(dev_priv, power_domain);
4523
4524 edp_panel_vdd_schedule_off(intel_dp);
4525}
4526
6d93c0c4
ID
4527static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4528{
49e6bc51
VS
4529 struct intel_dp *intel_dp;
4530
4531 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4532 return;
4533
4534 intel_dp = enc_to_intel_dp(encoder);
4535
4536 pps_lock(intel_dp);
4537
4538 /*
4539 * Read out the current power sequencer assignment,
4540 * in case the BIOS did something with it.
4541 */
4542 if (IS_VALLEYVIEW(encoder->dev))
4543 vlv_initial_power_sequencer_setup(intel_dp);
4544
4545 intel_edp_panel_vdd_sanitize(intel_dp);
4546
4547 pps_unlock(intel_dp);
6d93c0c4
ID
4548}
4549
a4fc5ed6 4550static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 4551 .dpms = intel_connector_dpms,
a4fc5ed6 4552 .detect = intel_dp_detect,
beb60608 4553 .force = intel_dp_force,
a4fc5ed6 4554 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4555 .set_property = intel_dp_set_property,
2545e4a6 4556 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4557 .destroy = intel_dp_connector_destroy,
c6f95f27 4558 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
a4fc5ed6
KP
4559};
4560
4561static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4562 .get_modes = intel_dp_get_modes,
4563 .mode_valid = intel_dp_mode_valid,
df0e9248 4564 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4565};
4566
a4fc5ed6 4567static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4568 .reset = intel_dp_encoder_reset,
24d05927 4569 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4570};
4571
0e32b39c 4572void
21d40d37 4573intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 4574{
0e32b39c 4575 return;
c8110e52 4576}
6207937d 4577
b2c5c181 4578enum irqreturn
13cf5504
DA
4579intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4580{
4581 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4582 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4583 struct drm_device *dev = intel_dig_port->base.base.dev;
4584 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4585 enum intel_display_power_domain power_domain;
b2c5c181 4586 enum irqreturn ret = IRQ_NONE;
1c767b33 4587
0e32b39c
DA
4588 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4589 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4590
7a7f84cc
VS
4591 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4592 /*
4593 * vdd off can generate a long pulse on eDP which
4594 * would require vdd on to handle it, and thus we
4595 * would end up in an endless cycle of
4596 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4597 */
4598 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4599 port_name(intel_dig_port->port));
a8b3d52f 4600 return IRQ_HANDLED;
7a7f84cc
VS
4601 }
4602
26fbb774
VS
4603 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4604 port_name(intel_dig_port->port),
0e32b39c 4605 long_hpd ? "long" : "short");
13cf5504 4606
1c767b33
ID
4607 power_domain = intel_display_port_power_domain(intel_encoder);
4608 intel_display_power_get(dev_priv, power_domain);
4609
0e32b39c 4610 if (long_hpd) {
2a592bec
DA
4611
4612 if (HAS_PCH_SPLIT(dev)) {
4613 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4614 goto mst_fail;
4615 } else {
4616 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4617 goto mst_fail;
4618 }
0e32b39c
DA
4619
4620 if (!intel_dp_get_dpcd(intel_dp)) {
4621 goto mst_fail;
4622 }
4623
4624 intel_dp_probe_oui(intel_dp);
4625
4626 if (!intel_dp_probe_mst(intel_dp))
4627 goto mst_fail;
4628
4629 } else {
4630 if (intel_dp->is_mst) {
1c767b33 4631 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4632 goto mst_fail;
4633 }
4634
4635 if (!intel_dp->is_mst) {
4636 /*
4637 * we'll check the link status via the normal hot plug path later -
4638 * but for short hpds we should check it now
4639 */
5b215bcf 4640 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4641 intel_dp_check_link_status(intel_dp);
5b215bcf 4642 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4643 }
4644 }
b2c5c181
DV
4645
4646 ret = IRQ_HANDLED;
4647
1c767b33 4648 goto put_power;
0e32b39c
DA
4649mst_fail:
4650 /* if we were in MST mode, and device is not there get out of MST mode */
4651 if (intel_dp->is_mst) {
4652 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4653 intel_dp->is_mst = false;
4654 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4655 }
1c767b33
ID
4656put_power:
4657 intel_display_power_put(dev_priv, power_domain);
4658
4659 return ret;
13cf5504
DA
4660}
4661
e3421a18
ZW
4662/* Return which DP Port should be selected for Transcoder DP control */
4663int
0206e353 4664intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
4665{
4666 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
4667 struct intel_encoder *intel_encoder;
4668 struct intel_dp *intel_dp;
e3421a18 4669
fa90ecef
PZ
4670 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4671 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 4672
fa90ecef
PZ
4673 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4674 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 4675 return intel_dp->output_reg;
e3421a18 4676 }
ea5b213a 4677
e3421a18
ZW
4678 return -1;
4679}
4680
36e83a18 4681/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 4682bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
4683{
4684 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 4685 union child_device_config *p_child;
36e83a18 4686 int i;
5d8a7752
VS
4687 static const short port_mapping[] = {
4688 [PORT_B] = PORT_IDPB,
4689 [PORT_C] = PORT_IDPC,
4690 [PORT_D] = PORT_IDPD,
4691 };
36e83a18 4692
3b32a35b
VS
4693 if (port == PORT_A)
4694 return true;
4695
41aa3448 4696 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
4697 return false;
4698
41aa3448
RV
4699 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4700 p_child = dev_priv->vbt.child_dev + i;
36e83a18 4701
5d8a7752 4702 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
4703 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4704 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
4705 return true;
4706 }
4707 return false;
4708}
4709
0e32b39c 4710void
f684960e
CW
4711intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4712{
53b41837
YN
4713 struct intel_connector *intel_connector = to_intel_connector(connector);
4714
3f43c48d 4715 intel_attach_force_audio_property(connector);
e953fd7b 4716 intel_attach_broadcast_rgb_property(connector);
55bc60db 4717 intel_dp->color_range_auto = true;
53b41837
YN
4718
4719 if (is_edp(intel_dp)) {
4720 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
4721 drm_object_attach_property(
4722 &connector->base,
53b41837 4723 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
4724 DRM_MODE_SCALE_ASPECT);
4725 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 4726 }
f684960e
CW
4727}
4728
dada1a9f
ID
4729static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4730{
4731 intel_dp->last_power_cycle = jiffies;
4732 intel_dp->last_power_on = jiffies;
4733 intel_dp->last_backlight_off = jiffies;
4734}
4735
67a54566
DV
4736static void
4737intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 4738 struct intel_dp *intel_dp)
67a54566
DV
4739{
4740 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
4741 struct edp_power_seq cur, vbt, spec,
4742 *final = &intel_dp->pps_delays;
67a54566 4743 u32 pp_on, pp_off, pp_div, pp;
bf13e81b 4744 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 4745
e39b999a
VS
4746 lockdep_assert_held(&dev_priv->pps_mutex);
4747
81ddbc69
VS
4748 /* already initialized? */
4749 if (final->t11_t12 != 0)
4750 return;
4751
453c5420 4752 if (HAS_PCH_SPLIT(dev)) {
bf13e81b 4753 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
4754 pp_on_reg = PCH_PP_ON_DELAYS;
4755 pp_off_reg = PCH_PP_OFF_DELAYS;
4756 pp_div_reg = PCH_PP_DIVISOR;
4757 } else {
bf13e81b
JN
4758 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4759
4760 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4761 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4762 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4763 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 4764 }
67a54566
DV
4765
4766 /* Workaround: Need to write PP_CONTROL with the unlock key as
4767 * the very first thing. */
453c5420 4768 pp = ironlake_get_pp_control(intel_dp);
bf13e81b 4769 I915_WRITE(pp_ctrl_reg, pp);
67a54566 4770
453c5420
JB
4771 pp_on = I915_READ(pp_on_reg);
4772 pp_off = I915_READ(pp_off_reg);
4773 pp_div = I915_READ(pp_div_reg);
67a54566
DV
4774
4775 /* Pull timing values out of registers */
4776 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4777 PANEL_POWER_UP_DELAY_SHIFT;
4778
4779 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4780 PANEL_LIGHT_ON_DELAY_SHIFT;
4781
4782 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4783 PANEL_LIGHT_OFF_DELAY_SHIFT;
4784
4785 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4786 PANEL_POWER_DOWN_DELAY_SHIFT;
4787
4788 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4789 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4790
4791 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4792 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4793
41aa3448 4794 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
4795
4796 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4797 * our hw here, which are all in 100usec. */
4798 spec.t1_t3 = 210 * 10;
4799 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4800 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4801 spec.t10 = 500 * 10;
4802 /* This one is special and actually in units of 100ms, but zero
4803 * based in the hw (so we need to add 100 ms). But the sw vbt
4804 * table multiplies it with 1000 to make it in units of 100usec,
4805 * too. */
4806 spec.t11_t12 = (510 + 100) * 10;
4807
4808 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4809 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4810
4811 /* Use the max of the register settings and vbt. If both are
4812 * unset, fall back to the spec limits. */
36b5f425 4813#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
4814 spec.field : \
4815 max(cur.field, vbt.field))
4816 assign_final(t1_t3);
4817 assign_final(t8);
4818 assign_final(t9);
4819 assign_final(t10);
4820 assign_final(t11_t12);
4821#undef assign_final
4822
36b5f425 4823#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
4824 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4825 intel_dp->backlight_on_delay = get_delay(t8);
4826 intel_dp->backlight_off_delay = get_delay(t9);
4827 intel_dp->panel_power_down_delay = get_delay(t10);
4828 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4829#undef get_delay
4830
f30d26e4
JN
4831 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4832 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4833 intel_dp->panel_power_cycle_delay);
4834
4835 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4836 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
4837}
4838
4839static void
4840intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 4841 struct intel_dp *intel_dp)
f30d26e4
JN
4842{
4843 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
4844 u32 pp_on, pp_off, pp_div, port_sel = 0;
4845 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4846 int pp_on_reg, pp_off_reg, pp_div_reg;
ad933b56 4847 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 4848 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 4849
e39b999a 4850 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420
JB
4851
4852 if (HAS_PCH_SPLIT(dev)) {
4853 pp_on_reg = PCH_PP_ON_DELAYS;
4854 pp_off_reg = PCH_PP_OFF_DELAYS;
4855 pp_div_reg = PCH_PP_DIVISOR;
4856 } else {
bf13e81b
JN
4857 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4858
4859 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4860 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4861 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
4862 }
4863
b2f19d1a
PZ
4864 /*
4865 * And finally store the new values in the power sequencer. The
4866 * backlight delays are set to 1 because we do manual waits on them. For
4867 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4868 * we'll end up waiting for the backlight off delay twice: once when we
4869 * do the manual sleep, and once when we disable the panel and wait for
4870 * the PP_STATUS bit to become zero.
4871 */
f30d26e4 4872 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
4873 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4874 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 4875 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
4876 /* Compute the divisor for the pp clock, simply match the Bspec
4877 * formula. */
453c5420 4878 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 4879 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
4880 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4881
4882 /* Haswell doesn't have any port selection bits for the panel
4883 * power sequencer any more. */
bc7d38a4 4884 if (IS_VALLEYVIEW(dev)) {
ad933b56 4885 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 4886 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 4887 if (port == PORT_A)
a24c144c 4888 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 4889 else
a24c144c 4890 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
4891 }
4892
453c5420
JB
4893 pp_on |= port_sel;
4894
4895 I915_WRITE(pp_on_reg, pp_on);
4896 I915_WRITE(pp_off_reg, pp_off);
4897 I915_WRITE(pp_div_reg, pp_div);
67a54566 4898
67a54566 4899 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
4900 I915_READ(pp_on_reg),
4901 I915_READ(pp_off_reg),
4902 I915_READ(pp_div_reg));
f684960e
CW
4903}
4904
b33a2815
VK
4905/**
4906 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4907 * @dev: DRM device
4908 * @refresh_rate: RR to be programmed
4909 *
4910 * This function gets called when refresh rate (RR) has to be changed from
4911 * one frequency to another. Switches can be between high and low RR
4912 * supported by the panel or to any other RR based on media playback (in
4913 * this case, RR value needs to be passed from user space).
4914 *
4915 * The caller of this function needs to take a lock on dev_priv->drrs.
4916 */
96178eeb 4917static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
4918{
4919 struct drm_i915_private *dev_priv = dev->dev_private;
4920 struct intel_encoder *encoder;
96178eeb
VK
4921 struct intel_digital_port *dig_port = NULL;
4922 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 4923 struct intel_crtc_state *config = NULL;
439d7ac0 4924 struct intel_crtc *intel_crtc = NULL;
439d7ac0 4925 u32 reg, val;
96178eeb 4926 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
4927
4928 if (refresh_rate <= 0) {
4929 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4930 return;
4931 }
4932
96178eeb
VK
4933 if (intel_dp == NULL) {
4934 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
4935 return;
4936 }
4937
1fcc9d1c 4938 /*
e4d59f6b
RV
4939 * FIXME: This needs proper synchronization with psr state for some
4940 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 4941 */
439d7ac0 4942
96178eeb
VK
4943 dig_port = dp_to_dig_port(intel_dp);
4944 encoder = &dig_port->base;
439d7ac0
PB
4945 intel_crtc = encoder->new_crtc;
4946
4947 if (!intel_crtc) {
4948 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
4949 return;
4950 }
4951
6e3c9717 4952 config = intel_crtc->config;
439d7ac0 4953
96178eeb 4954 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
4955 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
4956 return;
4957 }
4958
96178eeb
VK
4959 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
4960 refresh_rate)
439d7ac0
PB
4961 index = DRRS_LOW_RR;
4962
96178eeb 4963 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
4964 DRM_DEBUG_KMS(
4965 "DRRS requested for previously set RR...ignoring\n");
4966 return;
4967 }
4968
4969 if (!intel_crtc->active) {
4970 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
4971 return;
4972 }
4973
44395bfe 4974 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
4975 switch (index) {
4976 case DRRS_HIGH_RR:
4977 intel_dp_set_m_n(intel_crtc, M1_N1);
4978 break;
4979 case DRRS_LOW_RR:
4980 intel_dp_set_m_n(intel_crtc, M2_N2);
4981 break;
4982 case DRRS_MAX_RR:
4983 default:
4984 DRM_ERROR("Unsupported refreshrate type\n");
4985 }
4986 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 4987 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 4988 val = I915_READ(reg);
a4c30b1d 4989
439d7ac0 4990 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
4991 if (IS_VALLEYVIEW(dev))
4992 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4993 else
4994 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 4995 } else {
6fa7aec1
VK
4996 if (IS_VALLEYVIEW(dev))
4997 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
4998 else
4999 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5000 }
5001 I915_WRITE(reg, val);
5002 }
5003
4e9ac947
VK
5004 dev_priv->drrs.refresh_rate_type = index;
5005
5006 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5007}
5008
b33a2815
VK
5009/**
5010 * intel_edp_drrs_enable - init drrs struct if supported
5011 * @intel_dp: DP struct
5012 *
5013 * Initializes frontbuffer_bits and drrs.dp
5014 */
c395578e
VK
5015void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5016{
5017 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5018 struct drm_i915_private *dev_priv = dev->dev_private;
5019 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5020 struct drm_crtc *crtc = dig_port->base.base.crtc;
5021 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5022
5023 if (!intel_crtc->config->has_drrs) {
5024 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5025 return;
5026 }
5027
5028 mutex_lock(&dev_priv->drrs.mutex);
5029 if (WARN_ON(dev_priv->drrs.dp)) {
5030 DRM_ERROR("DRRS already enabled\n");
5031 goto unlock;
5032 }
5033
5034 dev_priv->drrs.busy_frontbuffer_bits = 0;
5035
5036 dev_priv->drrs.dp = intel_dp;
5037
5038unlock:
5039 mutex_unlock(&dev_priv->drrs.mutex);
5040}
5041
b33a2815
VK
5042/**
5043 * intel_edp_drrs_disable - Disable DRRS
5044 * @intel_dp: DP struct
5045 *
5046 */
c395578e
VK
5047void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5048{
5049 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5050 struct drm_i915_private *dev_priv = dev->dev_private;
5051 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5052 struct drm_crtc *crtc = dig_port->base.base.crtc;
5053 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5054
5055 if (!intel_crtc->config->has_drrs)
5056 return;
5057
5058 mutex_lock(&dev_priv->drrs.mutex);
5059 if (!dev_priv->drrs.dp) {
5060 mutex_unlock(&dev_priv->drrs.mutex);
5061 return;
5062 }
5063
5064 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5065 intel_dp_set_drrs_state(dev_priv->dev,
5066 intel_dp->attached_connector->panel.
5067 fixed_mode->vrefresh);
5068
5069 dev_priv->drrs.dp = NULL;
5070 mutex_unlock(&dev_priv->drrs.mutex);
5071
5072 cancel_delayed_work_sync(&dev_priv->drrs.work);
5073}
5074
4e9ac947
VK
5075static void intel_edp_drrs_downclock_work(struct work_struct *work)
5076{
5077 struct drm_i915_private *dev_priv =
5078 container_of(work, typeof(*dev_priv), drrs.work.work);
5079 struct intel_dp *intel_dp;
5080
5081 mutex_lock(&dev_priv->drrs.mutex);
5082
5083 intel_dp = dev_priv->drrs.dp;
5084
5085 if (!intel_dp)
5086 goto unlock;
5087
439d7ac0 5088 /*
4e9ac947
VK
5089 * The delayed work can race with an invalidate hence we need to
5090 * recheck.
439d7ac0
PB
5091 */
5092
4e9ac947
VK
5093 if (dev_priv->drrs.busy_frontbuffer_bits)
5094 goto unlock;
439d7ac0 5095
4e9ac947
VK
5096 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5097 intel_dp_set_drrs_state(dev_priv->dev,
5098 intel_dp->attached_connector->panel.
5099 downclock_mode->vrefresh);
439d7ac0 5100
4e9ac947 5101unlock:
439d7ac0 5102
4e9ac947 5103 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5104}
5105
b33a2815
VK
5106/**
5107 * intel_edp_drrs_invalidate - Invalidate DRRS
5108 * @dev: DRM device
5109 * @frontbuffer_bits: frontbuffer plane tracking bits
5110 *
5111 * When there is a disturbance on screen (due to cursor movement/time
5112 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5113 * high RR.
5114 *
5115 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5116 */
a93fad0f
VK
5117void intel_edp_drrs_invalidate(struct drm_device *dev,
5118 unsigned frontbuffer_bits)
5119{
5120 struct drm_i915_private *dev_priv = dev->dev_private;
5121 struct drm_crtc *crtc;
5122 enum pipe pipe;
5123
5124 if (!dev_priv->drrs.dp)
5125 return;
5126
3954e733
R
5127 cancel_delayed_work_sync(&dev_priv->drrs.work);
5128
a93fad0f
VK
5129 mutex_lock(&dev_priv->drrs.mutex);
5130 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5131 pipe = to_intel_crtc(crtc)->pipe;
5132
5133 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
a93fad0f
VK
5134 intel_dp_set_drrs_state(dev_priv->dev,
5135 dev_priv->drrs.dp->attached_connector->panel.
5136 fixed_mode->vrefresh);
5137 }
5138
5139 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5140
5141 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5142 mutex_unlock(&dev_priv->drrs.mutex);
5143}
5144
b33a2815
VK
5145/**
5146 * intel_edp_drrs_flush - Flush DRRS
5147 * @dev: DRM device
5148 * @frontbuffer_bits: frontbuffer plane tracking bits
5149 *
5150 * When there is no movement on screen, DRRS work can be scheduled.
5151 * This DRRS work is responsible for setting relevant registers after a
5152 * timeout of 1 second.
5153 *
5154 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5155 */
a93fad0f
VK
5156void intel_edp_drrs_flush(struct drm_device *dev,
5157 unsigned frontbuffer_bits)
5158{
5159 struct drm_i915_private *dev_priv = dev->dev_private;
5160 struct drm_crtc *crtc;
5161 enum pipe pipe;
5162
5163 if (!dev_priv->drrs.dp)
5164 return;
5165
3954e733
R
5166 cancel_delayed_work_sync(&dev_priv->drrs.work);
5167
a93fad0f
VK
5168 mutex_lock(&dev_priv->drrs.mutex);
5169 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5170 pipe = to_intel_crtc(crtc)->pipe;
5171 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5172
a93fad0f
VK
5173 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5174 !dev_priv->drrs.busy_frontbuffer_bits)
5175 schedule_delayed_work(&dev_priv->drrs.work,
5176 msecs_to_jiffies(1000));
5177 mutex_unlock(&dev_priv->drrs.mutex);
5178}
5179
b33a2815
VK
5180/**
5181 * DOC: Display Refresh Rate Switching (DRRS)
5182 *
5183 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5184 * which enables swtching between low and high refresh rates,
5185 * dynamically, based on the usage scenario. This feature is applicable
5186 * for internal panels.
5187 *
5188 * Indication that the panel supports DRRS is given by the panel EDID, which
5189 * would list multiple refresh rates for one resolution.
5190 *
5191 * DRRS is of 2 types - static and seamless.
5192 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5193 * (may appear as a blink on screen) and is used in dock-undock scenario.
5194 * Seamless DRRS involves changing RR without any visual effect to the user
5195 * and can be used during normal system usage. This is done by programming
5196 * certain registers.
5197 *
5198 * Support for static/seamless DRRS may be indicated in the VBT based on
5199 * inputs from the panel spec.
5200 *
5201 * DRRS saves power by switching to low RR based on usage scenarios.
5202 *
5203 * eDP DRRS:-
5204 * The implementation is based on frontbuffer tracking implementation.
5205 * When there is a disturbance on the screen triggered by user activity or a
5206 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5207 * When there is no movement on screen, after a timeout of 1 second, a switch
5208 * to low RR is made.
5209 * For integration with frontbuffer tracking code,
5210 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5211 *
5212 * DRRS can be further extended to support other internal panels and also
5213 * the scenario of video playback wherein RR is set based on the rate
5214 * requested by userspace.
5215 */
5216
5217/**
5218 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5219 * @intel_connector: eDP connector
5220 * @fixed_mode: preferred mode of panel
5221 *
5222 * This function is called only once at driver load to initialize basic
5223 * DRRS stuff.
5224 *
5225 * Returns:
5226 * Downclock mode if panel supports it, else return NULL.
5227 * DRRS support is determined by the presence of downclock mode (apart
5228 * from VBT setting).
5229 */
4f9db5b5 5230static struct drm_display_mode *
96178eeb
VK
5231intel_dp_drrs_init(struct intel_connector *intel_connector,
5232 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5233{
5234 struct drm_connector *connector = &intel_connector->base;
96178eeb 5235 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5236 struct drm_i915_private *dev_priv = dev->dev_private;
5237 struct drm_display_mode *downclock_mode = NULL;
5238
5239 if (INTEL_INFO(dev)->gen <= 6) {
5240 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5241 return NULL;
5242 }
5243
5244 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5245 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5246 return NULL;
5247 }
5248
5249 downclock_mode = intel_find_panel_downclock
5250 (dev, fixed_mode, connector);
5251
5252 if (!downclock_mode) {
a1d26342 5253 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5254 return NULL;
5255 }
5256
4e9ac947
VK
5257 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5258
96178eeb 5259 mutex_init(&dev_priv->drrs.mutex);
439d7ac0 5260
96178eeb 5261 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5262
96178eeb 5263 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5264 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5265 return downclock_mode;
5266}
5267
ed92f0b2 5268static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5269 struct intel_connector *intel_connector)
ed92f0b2
PZ
5270{
5271 struct drm_connector *connector = &intel_connector->base;
5272 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5273 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5274 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5275 struct drm_i915_private *dev_priv = dev->dev_private;
5276 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5277 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5278 bool has_dpcd;
5279 struct drm_display_mode *scan;
5280 struct edid *edid;
6517d273 5281 enum pipe pipe = INVALID_PIPE;
ed92f0b2 5282
96178eeb 5283 dev_priv->drrs.type = DRRS_NOT_SUPPORTED;
4f9db5b5 5284
ed92f0b2
PZ
5285 if (!is_edp(intel_dp))
5286 return true;
5287
49e6bc51
VS
5288 pps_lock(intel_dp);
5289 intel_edp_panel_vdd_sanitize(intel_dp);
5290 pps_unlock(intel_dp);
63635217 5291
ed92f0b2 5292 /* Cache DPCD and EDID for edp. */
ed92f0b2 5293 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5294
5295 if (has_dpcd) {
5296 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5297 dev_priv->no_aux_handshake =
5298 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5299 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5300 } else {
5301 /* if this fails, presume the device is a ghost */
5302 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5303 return false;
5304 }
5305
5306 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5307 pps_lock(intel_dp);
36b5f425 5308 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5309 pps_unlock(intel_dp);
ed92f0b2 5310
060c8778 5311 mutex_lock(&dev->mode_config.mutex);
0b99836f 5312 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5313 if (edid) {
5314 if (drm_add_edid_modes(connector, edid)) {
5315 drm_mode_connector_update_edid_property(connector,
5316 edid);
5317 drm_edid_to_eld(connector, edid);
5318 } else {
5319 kfree(edid);
5320 edid = ERR_PTR(-EINVAL);
5321 }
5322 } else {
5323 edid = ERR_PTR(-ENOENT);
5324 }
5325 intel_connector->edid = edid;
5326
5327 /* prefer fixed mode from EDID if available */
5328 list_for_each_entry(scan, &connector->probed_modes, head) {
5329 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5330 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5331 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5332 intel_connector, fixed_mode);
ed92f0b2
PZ
5333 break;
5334 }
5335 }
5336
5337 /* fallback to VBT if available for eDP */
5338 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5339 fixed_mode = drm_mode_duplicate(dev,
5340 dev_priv->vbt.lfp_lvds_vbt_mode);
5341 if (fixed_mode)
5342 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5343 }
060c8778 5344 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5345
01527b31
CT
5346 if (IS_VALLEYVIEW(dev)) {
5347 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5348 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5349
5350 /*
5351 * Figure out the current pipe for the initial backlight setup.
5352 * If the current pipe isn't valid, try the PPS pipe, and if that
5353 * fails just assume pipe A.
5354 */
5355 if (IS_CHERRYVIEW(dev))
5356 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5357 else
5358 pipe = PORT_TO_PIPE(intel_dp->DP);
5359
5360 if (pipe != PIPE_A && pipe != PIPE_B)
5361 pipe = intel_dp->pps_pipe;
5362
5363 if (pipe != PIPE_A && pipe != PIPE_B)
5364 pipe = PIPE_A;
5365
5366 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5367 pipe_name(pipe));
01527b31
CT
5368 }
5369
4f9db5b5 5370 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5371 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5372 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5373
5374 return true;
5375}
5376
16c25533 5377bool
f0fec3f2
PZ
5378intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5379 struct intel_connector *intel_connector)
a4fc5ed6 5380{
f0fec3f2
PZ
5381 struct drm_connector *connector = &intel_connector->base;
5382 struct intel_dp *intel_dp = &intel_dig_port->dp;
5383 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5384 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5385 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5386 enum port port = intel_dig_port->port;
0b99836f 5387 int type;
a4fc5ed6 5388
a4a5d2f8
VS
5389 intel_dp->pps_pipe = INVALID_PIPE;
5390
ec5b01dd 5391 /* intel_dp vfuncs */
b6b5e383
DL
5392 if (INTEL_INFO(dev)->gen >= 9)
5393 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5394 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5395 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5396 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5397 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5398 else if (HAS_PCH_SPLIT(dev))
5399 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5400 else
5401 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5402
b9ca5fad
DL
5403 if (INTEL_INFO(dev)->gen >= 9)
5404 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5405 else
5406 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5407
0767935e
DV
5408 /* Preserve the current hw state. */
5409 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5410 intel_dp->attached_connector = intel_connector;
3d3dc149 5411
3b32a35b 5412 if (intel_dp_is_edp(dev, port))
b329530c 5413 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5414 else
5415 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5416
f7d24902
ID
5417 /*
5418 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5419 * for DP the encoder type can be set by the caller to
5420 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5421 */
5422 if (type == DRM_MODE_CONNECTOR_eDP)
5423 intel_encoder->type = INTEL_OUTPUT_EDP;
5424
c17ed5b5
VS
5425 /* eDP only on port B and/or C on vlv/chv */
5426 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5427 port != PORT_B && port != PORT_C))
5428 return false;
5429
e7281eab
ID
5430 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5431 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5432 port_name(port));
5433
b329530c 5434 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5435 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5436
a4fc5ed6
KP
5437 connector->interlace_allowed = true;
5438 connector->doublescan_allowed = 0;
5439
f0fec3f2 5440 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5441 edp_panel_vdd_work);
a4fc5ed6 5442
df0e9248 5443 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5444 drm_connector_register(connector);
a4fc5ed6 5445
affa9354 5446 if (HAS_DDI(dev))
bcbc889b
PZ
5447 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5448 else
5449 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5450 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5451
0b99836f 5452 /* Set up the hotplug pin. */
ab9d7c30
PZ
5453 switch (port) {
5454 case PORT_A:
1d843f9d 5455 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5456 break;
5457 case PORT_B:
1d843f9d 5458 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5459 break;
5460 case PORT_C:
1d843f9d 5461 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5462 break;
5463 case PORT_D:
1d843f9d 5464 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5465 break;
5466 default:
ad1c0b19 5467 BUG();
5eb08b69
ZW
5468 }
5469
dada1a9f 5470 if (is_edp(intel_dp)) {
773538e8 5471 pps_lock(intel_dp);
1e74a324
VS
5472 intel_dp_init_panel_power_timestamps(intel_dp);
5473 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5474 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5475 else
36b5f425 5476 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5477 pps_unlock(intel_dp);
dada1a9f 5478 }
0095e6dc 5479
9d1a1031 5480 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5481
0e32b39c 5482 /* init MST on ports that can support it */
c86ea3d0 5483 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
0e32b39c 5484 if (port == PORT_B || port == PORT_C || port == PORT_D) {
a4a5d2f8
VS
5485 intel_dp_mst_encoder_init(intel_dig_port,
5486 intel_connector->base.base.id);
0e32b39c
DA
5487 }
5488 }
5489
36b5f425 5490 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5491 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5492 if (is_edp(intel_dp)) {
5493 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5494 /*
5495 * vdd might still be enabled do to the delayed vdd off.
5496 * Make sure vdd is actually turned off here.
5497 */
773538e8 5498 pps_lock(intel_dp);
4be73780 5499 edp_panel_vdd_off_sync(intel_dp);
773538e8 5500 pps_unlock(intel_dp);
15b1d171 5501 }
34ea3d38 5502 drm_connector_unregister(connector);
b2f246a8 5503 drm_connector_cleanup(connector);
16c25533 5504 return false;
b2f246a8 5505 }
32f9d658 5506
f684960e
CW
5507 intel_dp_add_properties(intel_dp, connector);
5508
a4fc5ed6
KP
5509 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5510 * 0xd. Failure to do so will result in spurious interrupts being
5511 * generated on the port when a cable is not attached.
5512 */
5513 if (IS_G4X(dev) && !IS_GM45(dev)) {
5514 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5515 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5516 }
16c25533
PZ
5517
5518 return true;
a4fc5ed6 5519}
f0fec3f2
PZ
5520
5521void
5522intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5523{
13cf5504 5524 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5525 struct intel_digital_port *intel_dig_port;
5526 struct intel_encoder *intel_encoder;
5527 struct drm_encoder *encoder;
5528 struct intel_connector *intel_connector;
5529
b14c5679 5530 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5531 if (!intel_dig_port)
5532 return;
5533
b14c5679 5534 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
f0fec3f2
PZ
5535 if (!intel_connector) {
5536 kfree(intel_dig_port);
5537 return;
5538 }
5539
5540 intel_encoder = &intel_dig_port->base;
5541 encoder = &intel_encoder->base;
5542
5543 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5544 DRM_MODE_ENCODER_TMDS);
5545
5bfe2ac0 5546 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5547 intel_encoder->disable = intel_disable_dp;
00c09d70 5548 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5549 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5550 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5551 if (IS_CHERRYVIEW(dev)) {
9197c88b 5552 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5553 intel_encoder->pre_enable = chv_pre_enable_dp;
5554 intel_encoder->enable = vlv_enable_dp;
580d3811 5555 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5556 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5557 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5558 intel_encoder->pre_enable = vlv_pre_enable_dp;
5559 intel_encoder->enable = vlv_enable_dp;
49277c31 5560 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5561 } else {
ecff4f3b
JN
5562 intel_encoder->pre_enable = g4x_pre_enable_dp;
5563 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5564 if (INTEL_INFO(dev)->gen >= 5)
5565 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5566 }
f0fec3f2 5567
174edf1f 5568 intel_dig_port->port = port;
f0fec3f2
PZ
5569 intel_dig_port->dp.output_reg = output_reg;
5570
00c09d70 5571 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5572 if (IS_CHERRYVIEW(dev)) {
5573 if (port == PORT_D)
5574 intel_encoder->crtc_mask = 1 << 2;
5575 else
5576 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5577 } else {
5578 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5579 }
bc079e8b 5580 intel_encoder->cloneable = 0;
f0fec3f2
PZ
5581 intel_encoder->hot_plug = intel_dp_hot_plug;
5582
13cf5504
DA
5583 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5584 dev_priv->hpd_irq_port[port] = intel_dig_port;
5585
15b1d171
PZ
5586 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5587 drm_encoder_cleanup(encoder);
5588 kfree(intel_dig_port);
b2f246a8 5589 kfree(intel_connector);
15b1d171 5590 }
f0fec3f2 5591}
0e32b39c
DA
5592
5593void intel_dp_mst_suspend(struct drm_device *dev)
5594{
5595 struct drm_i915_private *dev_priv = dev->dev_private;
5596 int i;
5597
5598 /* disable MST */
5599 for (i = 0; i < I915_MAX_PORTS; i++) {
5600 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5601 if (!intel_dig_port)
5602 continue;
5603
5604 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5605 if (!intel_dig_port->dp.can_mst)
5606 continue;
5607 if (intel_dig_port->dp.is_mst)
5608 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5609 }
5610 }
5611}
5612
5613void intel_dp_mst_resume(struct drm_device *dev)
5614{
5615 struct drm_i915_private *dev_priv = dev->dev_private;
5616 int i;
5617
5618 for (i = 0; i < I915_MAX_PORTS; i++) {
5619 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5620 if (!intel_dig_port)
5621 continue;
5622 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5623 int ret;
5624
5625 if (!intel_dig_port->dp.can_mst)
5626 continue;
5627
5628 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5629 if (ret != 0) {
5630 intel_dp_check_mst_status(&intel_dig_port->dp);
5631 }
5632 }
5633 }
5634}
This page took 0.891232 seconds and 5 git commands to generate.