drm/i915: Fix locking in DRRS flush/invalidate hooks
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
9dd4ffdf
CML
44struct dp_link_dpll {
45 int link_bw;
46 struct dpll dpll;
47};
48
49static const struct dp_link_dpll gen4_dpll[] = {
50 { DP_LINK_BW_1_62,
51 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
52 { DP_LINK_BW_2_7,
53 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
54};
55
56static const struct dp_link_dpll pch_dpll[] = {
57 { DP_LINK_BW_1_62,
58 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
59 { DP_LINK_BW_2_7,
60 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
61};
62
65ce4bf5
CML
63static const struct dp_link_dpll vlv_dpll[] = {
64 { DP_LINK_BW_1_62,
58f6e632 65 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
65ce4bf5
CML
66 { DP_LINK_BW_2_7,
67 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
68};
69
ef9348c8
CML
70/*
71 * CHV supports eDP 1.4 that have more link rates.
72 * Below only provides the fixed rate but exclude variable rate.
73 */
74static const struct dp_link_dpll chv_dpll[] = {
75 /*
76 * CHV requires to program fractional division for m2.
77 * m2 is stored in fixed point format using formula below
78 * (m2_int << 22) | m2_fraction
79 */
80 { DP_LINK_BW_1_62, /* m2_int = 32, m2_fraction = 1677722 */
81 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
82 { DP_LINK_BW_2_7, /* m2_int = 27, m2_fraction = 0 */
83 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
84 { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
85 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
86};
a8f3ef61 87/* Skylake supports following rates */
f4896f15
VS
88static const int gen9_rates[] = { 162000, 216000, 270000,
89 324000, 432000, 540000 };
fe51bfb9
VS
90static const int chv_rates[] = { 162000, 202500, 210000, 216000,
91 243000, 270000, 324000, 405000,
92 420000, 432000, 540000 };
f4896f15 93static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 94
cfcb0fc9
JB
95/**
96 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
97 * @intel_dp: DP struct
98 *
99 * If a CPU or PCH DP output is attached to an eDP panel, this function
100 * will return true, and false otherwise.
101 */
102static bool is_edp(struct intel_dp *intel_dp)
103{
da63a9f2
PZ
104 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
105
106 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
107}
108
68b4d824 109static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 110{
68b4d824
ID
111 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112
113 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
114}
115
df0e9248
CW
116static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
117{
fa90ecef 118 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
119}
120
ea5b213a 121static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 122static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 123static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 124static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
125static void vlv_steal_power_sequencer(struct drm_device *dev,
126 enum pipe pipe);
a4fc5ed6 127
ed4e9c1d
VS
128static int
129intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 130{
7183dc29 131 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
132
133 switch (max_link_bw) {
134 case DP_LINK_BW_1_62:
135 case DP_LINK_BW_2_7:
1db10e28 136 case DP_LINK_BW_5_4:
d4eead50 137 break;
a4fc5ed6 138 default:
d4eead50
ID
139 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
140 max_link_bw);
a4fc5ed6
KP
141 max_link_bw = DP_LINK_BW_1_62;
142 break;
143 }
144 return max_link_bw;
145}
146
eeb6324d
PZ
147static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
148{
149 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
150 struct drm_device *dev = intel_dig_port->base.base.dev;
151 u8 source_max, sink_max;
152
153 source_max = 4;
154 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
155 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
156 source_max = 2;
157
158 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
159
160 return min(source_max, sink_max);
161}
162
cd9dde44
AJ
163/*
164 * The units on the numbers in the next two are... bizarre. Examples will
165 * make it clearer; this one parallels an example in the eDP spec.
166 *
167 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
168 *
169 * 270000 * 1 * 8 / 10 == 216000
170 *
171 * The actual data capacity of that configuration is 2.16Gbit/s, so the
172 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
173 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
174 * 119000. At 18bpp that's 2142000 kilobits per second.
175 *
176 * Thus the strange-looking division by 10 in intel_dp_link_required, to
177 * get the result in decakilobits instead of kilobits.
178 */
179
a4fc5ed6 180static int
c898261c 181intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 182{
cd9dde44 183 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
184}
185
fe27d53e
DA
186static int
187intel_dp_max_data_rate(int max_link_clock, int max_lanes)
188{
189 return (max_link_clock * max_lanes * 8) / 10;
190}
191
c19de8eb 192static enum drm_mode_status
a4fc5ed6
KP
193intel_dp_mode_valid(struct drm_connector *connector,
194 struct drm_display_mode *mode)
195{
df0e9248 196 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
197 struct intel_connector *intel_connector = to_intel_connector(connector);
198 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
199 int target_clock = mode->clock;
200 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 201
dd06f90e
JN
202 if (is_edp(intel_dp) && fixed_mode) {
203 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
204 return MODE_PANEL;
205
dd06f90e 206 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 207 return MODE_PANEL;
03afc4a2
DV
208
209 target_clock = fixed_mode->clock;
7de56f43
ZY
210 }
211
50fec21a 212 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 213 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
214
215 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
216 mode_rate = intel_dp_link_required(target_clock, 18);
217
218 if (mode_rate > max_rate)
c4867936 219 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
220
221 if (mode->clock < 10000)
222 return MODE_CLOCK_LOW;
223
0af78a2b
DV
224 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
225 return MODE_H_ILLEGAL;
226
a4fc5ed6
KP
227 return MODE_OK;
228}
229
a4f1289e 230uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
231{
232 int i;
233 uint32_t v = 0;
234
235 if (src_bytes > 4)
236 src_bytes = 4;
237 for (i = 0; i < src_bytes; i++)
238 v |= ((uint32_t) src[i]) << ((3-i) * 8);
239 return v;
240}
241
c2af70e2 242static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
243{
244 int i;
245 if (dst_bytes > 4)
246 dst_bytes = 4;
247 for (i = 0; i < dst_bytes; i++)
248 dst[i] = src >> ((3-i) * 8);
249}
250
fb0f8fbf
KP
251/* hrawclock is 1/4 the FSB frequency */
252static int
253intel_hrawclk(struct drm_device *dev)
254{
255 struct drm_i915_private *dev_priv = dev->dev_private;
256 uint32_t clkcfg;
257
9473c8f4
VP
258 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
259 if (IS_VALLEYVIEW(dev))
260 return 200;
261
fb0f8fbf
KP
262 clkcfg = I915_READ(CLKCFG);
263 switch (clkcfg & CLKCFG_FSB_MASK) {
264 case CLKCFG_FSB_400:
265 return 100;
266 case CLKCFG_FSB_533:
267 return 133;
268 case CLKCFG_FSB_667:
269 return 166;
270 case CLKCFG_FSB_800:
271 return 200;
272 case CLKCFG_FSB_1067:
273 return 266;
274 case CLKCFG_FSB_1333:
275 return 333;
276 /* these two are just a guess; one of them might be right */
277 case CLKCFG_FSB_1600:
278 case CLKCFG_FSB_1600_ALT:
279 return 400;
280 default:
281 return 133;
282 }
283}
284
bf13e81b
JN
285static void
286intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 287 struct intel_dp *intel_dp);
bf13e81b
JN
288static void
289intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 290 struct intel_dp *intel_dp);
bf13e81b 291
773538e8
VS
292static void pps_lock(struct intel_dp *intel_dp)
293{
294 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
295 struct intel_encoder *encoder = &intel_dig_port->base;
296 struct drm_device *dev = encoder->base.dev;
297 struct drm_i915_private *dev_priv = dev->dev_private;
298 enum intel_display_power_domain power_domain;
299
300 /*
301 * See vlv_power_sequencer_reset() why we need
302 * a power domain reference here.
303 */
304 power_domain = intel_display_port_power_domain(encoder);
305 intel_display_power_get(dev_priv, power_domain);
306
307 mutex_lock(&dev_priv->pps_mutex);
308}
309
310static void pps_unlock(struct intel_dp *intel_dp)
311{
312 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
313 struct intel_encoder *encoder = &intel_dig_port->base;
314 struct drm_device *dev = encoder->base.dev;
315 struct drm_i915_private *dev_priv = dev->dev_private;
316 enum intel_display_power_domain power_domain;
317
318 mutex_unlock(&dev_priv->pps_mutex);
319
320 power_domain = intel_display_port_power_domain(encoder);
321 intel_display_power_put(dev_priv, power_domain);
322}
323
961a0db0
VS
324static void
325vlv_power_sequencer_kick(struct intel_dp *intel_dp)
326{
327 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
328 struct drm_device *dev = intel_dig_port->base.base.dev;
329 struct drm_i915_private *dev_priv = dev->dev_private;
330 enum pipe pipe = intel_dp->pps_pipe;
d288f65f 331 bool pll_enabled;
961a0db0
VS
332 uint32_t DP;
333
334 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
335 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
336 pipe_name(pipe), port_name(intel_dig_port->port)))
337 return;
338
339 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
340 pipe_name(pipe), port_name(intel_dig_port->port));
341
342 /* Preserve the BIOS-computed detected bit. This is
343 * supposed to be read-only.
344 */
345 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
346 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
347 DP |= DP_PORT_WIDTH(1);
348 DP |= DP_LINK_TRAIN_PAT_1;
349
350 if (IS_CHERRYVIEW(dev))
351 DP |= DP_PIPE_SELECT_CHV(pipe);
352 else if (pipe == PIPE_B)
353 DP |= DP_PIPEB_SELECT;
354
d288f65f
VS
355 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
356
357 /*
358 * The DPLL for the pipe must be enabled for this to work.
359 * So enable temporarily it if it's not already enabled.
360 */
361 if (!pll_enabled)
362 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
363 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
364
961a0db0
VS
365 /*
366 * Similar magic as in intel_dp_enable_port().
367 * We _must_ do this port enable + disable trick
368 * to make this power seqeuencer lock onto the port.
369 * Otherwise even VDD force bit won't work.
370 */
371 I915_WRITE(intel_dp->output_reg, DP);
372 POSTING_READ(intel_dp->output_reg);
373
374 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
375 POSTING_READ(intel_dp->output_reg);
376
377 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
378 POSTING_READ(intel_dp->output_reg);
d288f65f
VS
379
380 if (!pll_enabled)
381 vlv_force_pll_off(dev, pipe);
961a0db0
VS
382}
383
bf13e81b
JN
384static enum pipe
385vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
386{
387 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
388 struct drm_device *dev = intel_dig_port->base.base.dev;
389 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
390 struct intel_encoder *encoder;
391 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 392 enum pipe pipe;
bf13e81b 393
e39b999a 394 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 395
a8c3344e
VS
396 /* We should never land here with regular DP ports */
397 WARN_ON(!is_edp(intel_dp));
398
a4a5d2f8
VS
399 if (intel_dp->pps_pipe != INVALID_PIPE)
400 return intel_dp->pps_pipe;
401
402 /*
403 * We don't have power sequencer currently.
404 * Pick one that's not used by other ports.
405 */
406 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
407 base.head) {
408 struct intel_dp *tmp;
409
410 if (encoder->type != INTEL_OUTPUT_EDP)
411 continue;
412
413 tmp = enc_to_intel_dp(&encoder->base);
414
415 if (tmp->pps_pipe != INVALID_PIPE)
416 pipes &= ~(1 << tmp->pps_pipe);
417 }
418
419 /*
420 * Didn't find one. This should not happen since there
421 * are two power sequencers and up to two eDP ports.
422 */
423 if (WARN_ON(pipes == 0))
a8c3344e
VS
424 pipe = PIPE_A;
425 else
426 pipe = ffs(pipes) - 1;
a4a5d2f8 427
a8c3344e
VS
428 vlv_steal_power_sequencer(dev, pipe);
429 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
430
431 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
432 pipe_name(intel_dp->pps_pipe),
433 port_name(intel_dig_port->port));
434
435 /* init power sequencer on this pipe and port */
36b5f425
VS
436 intel_dp_init_panel_power_sequencer(dev, intel_dp);
437 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 438
961a0db0
VS
439 /*
440 * Even vdd force doesn't work until we've made
441 * the power sequencer lock in on the port.
442 */
443 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
444
445 return intel_dp->pps_pipe;
446}
447
6491ab27
VS
448typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
449 enum pipe pipe);
450
451static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
452 enum pipe pipe)
453{
454 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
455}
456
457static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
458 enum pipe pipe)
459{
460 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
461}
462
463static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
464 enum pipe pipe)
465{
466 return true;
467}
bf13e81b 468
a4a5d2f8 469static enum pipe
6491ab27
VS
470vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
471 enum port port,
472 vlv_pipe_check pipe_check)
a4a5d2f8
VS
473{
474 enum pipe pipe;
bf13e81b 475
bf13e81b
JN
476 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
477 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
478 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
479
480 if (port_sel != PANEL_PORT_SELECT_VLV(port))
481 continue;
482
6491ab27
VS
483 if (!pipe_check(dev_priv, pipe))
484 continue;
485
a4a5d2f8 486 return pipe;
bf13e81b
JN
487 }
488
a4a5d2f8
VS
489 return INVALID_PIPE;
490}
491
492static void
493vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
494{
495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
496 struct drm_device *dev = intel_dig_port->base.base.dev;
497 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
498 enum port port = intel_dig_port->port;
499
500 lockdep_assert_held(&dev_priv->pps_mutex);
501
502 /* try to find a pipe with this port selected */
6491ab27
VS
503 /* first pick one where the panel is on */
504 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
505 vlv_pipe_has_pp_on);
506 /* didn't find one? pick one where vdd is on */
507 if (intel_dp->pps_pipe == INVALID_PIPE)
508 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
509 vlv_pipe_has_vdd_on);
510 /* didn't find one? pick one with just the correct port */
511 if (intel_dp->pps_pipe == INVALID_PIPE)
512 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
513 vlv_pipe_any);
a4a5d2f8
VS
514
515 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
516 if (intel_dp->pps_pipe == INVALID_PIPE) {
517 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
518 port_name(port));
519 return;
bf13e81b
JN
520 }
521
a4a5d2f8
VS
522 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
523 port_name(port), pipe_name(intel_dp->pps_pipe));
524
36b5f425
VS
525 intel_dp_init_panel_power_sequencer(dev, intel_dp);
526 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
527}
528
773538e8
VS
529void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
530{
531 struct drm_device *dev = dev_priv->dev;
532 struct intel_encoder *encoder;
533
534 if (WARN_ON(!IS_VALLEYVIEW(dev)))
535 return;
536
537 /*
538 * We can't grab pps_mutex here due to deadlock with power_domain
539 * mutex when power_domain functions are called while holding pps_mutex.
540 * That also means that in order to use pps_pipe the code needs to
541 * hold both a power domain reference and pps_mutex, and the power domain
542 * reference get/put must be done while _not_ holding pps_mutex.
543 * pps_{lock,unlock}() do these steps in the correct order, so one
544 * should use them always.
545 */
546
547 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
548 struct intel_dp *intel_dp;
549
550 if (encoder->type != INTEL_OUTPUT_EDP)
551 continue;
552
553 intel_dp = enc_to_intel_dp(&encoder->base);
554 intel_dp->pps_pipe = INVALID_PIPE;
555 }
bf13e81b
JN
556}
557
558static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
559{
560 struct drm_device *dev = intel_dp_to_dev(intel_dp);
561
562 if (HAS_PCH_SPLIT(dev))
563 return PCH_PP_CONTROL;
564 else
565 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
566}
567
568static u32 _pp_stat_reg(struct intel_dp *intel_dp)
569{
570 struct drm_device *dev = intel_dp_to_dev(intel_dp);
571
572 if (HAS_PCH_SPLIT(dev))
573 return PCH_PP_STATUS;
574 else
575 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
576}
577
01527b31
CT
578/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
579 This function only applicable when panel PM state is not to be tracked */
580static int edp_notify_handler(struct notifier_block *this, unsigned long code,
581 void *unused)
582{
583 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
584 edp_notifier);
585 struct drm_device *dev = intel_dp_to_dev(intel_dp);
586 struct drm_i915_private *dev_priv = dev->dev_private;
587 u32 pp_div;
588 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
589
590 if (!is_edp(intel_dp) || code != SYS_RESTART)
591 return 0;
592
773538e8 593 pps_lock(intel_dp);
e39b999a 594
01527b31 595 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
596 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
597
01527b31
CT
598 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
599 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
600 pp_div = I915_READ(pp_div_reg);
601 pp_div &= PP_REFERENCE_DIVIDER_MASK;
602
603 /* 0x1F write to PP_DIV_REG sets max cycle delay */
604 I915_WRITE(pp_div_reg, pp_div | 0x1F);
605 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
606 msleep(intel_dp->panel_power_cycle_delay);
607 }
608
773538e8 609 pps_unlock(intel_dp);
e39b999a 610
01527b31
CT
611 return 0;
612}
613
4be73780 614static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 615{
30add22d 616 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
617 struct drm_i915_private *dev_priv = dev->dev_private;
618
e39b999a
VS
619 lockdep_assert_held(&dev_priv->pps_mutex);
620
9a42356b
VS
621 if (IS_VALLEYVIEW(dev) &&
622 intel_dp->pps_pipe == INVALID_PIPE)
623 return false;
624
bf13e81b 625 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
626}
627
4be73780 628static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 629{
30add22d 630 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
631 struct drm_i915_private *dev_priv = dev->dev_private;
632
e39b999a
VS
633 lockdep_assert_held(&dev_priv->pps_mutex);
634
9a42356b
VS
635 if (IS_VALLEYVIEW(dev) &&
636 intel_dp->pps_pipe == INVALID_PIPE)
637 return false;
638
773538e8 639 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
640}
641
9b984dae
KP
642static void
643intel_dp_check_edp(struct intel_dp *intel_dp)
644{
30add22d 645 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 646 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 647
9b984dae
KP
648 if (!is_edp(intel_dp))
649 return;
453c5420 650
4be73780 651 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
652 WARN(1, "eDP powered off while attempting aux channel communication.\n");
653 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
654 I915_READ(_pp_stat_reg(intel_dp)),
655 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
656 }
657}
658
9ee32fea
DV
659static uint32_t
660intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
661{
662 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
663 struct drm_device *dev = intel_dig_port->base.base.dev;
664 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 665 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
666 uint32_t status;
667 bool done;
668
ef04f00d 669#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 670 if (has_aux_irq)
b18ac466 671 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 672 msecs_to_jiffies_timeout(10));
9ee32fea
DV
673 else
674 done = wait_for_atomic(C, 10) == 0;
675 if (!done)
676 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
677 has_aux_irq);
678#undef C
679
680 return status;
681}
682
ec5b01dd 683static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 684{
174edf1f
PZ
685 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
686 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 687
ec5b01dd
DL
688 /*
689 * The clock divider is based off the hrawclk, and would like to run at
690 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 691 */
ec5b01dd
DL
692 return index ? 0 : intel_hrawclk(dev) / 2;
693}
694
695static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
696{
697 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
698 struct drm_device *dev = intel_dig_port->base.base.dev;
699
700 if (index)
701 return 0;
702
703 if (intel_dig_port->port == PORT_A) {
704 if (IS_GEN6(dev) || IS_GEN7(dev))
b84a1cf8 705 return 200; /* SNB & IVB eDP input clock at 400Mhz */
e3421a18 706 else
b84a1cf8 707 return 225; /* eDP input clock at 450Mhz */
ec5b01dd
DL
708 } else {
709 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
710 }
711}
712
713static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
714{
715 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
716 struct drm_device *dev = intel_dig_port->base.base.dev;
717 struct drm_i915_private *dev_priv = dev->dev_private;
718
719 if (intel_dig_port->port == PORT_A) {
720 if (index)
721 return 0;
722 return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
2c55c336
JN
723 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
724 /* Workaround for non-ULT HSW */
bc86625a
CW
725 switch (index) {
726 case 0: return 63;
727 case 1: return 72;
728 default: return 0;
729 }
ec5b01dd 730 } else {
bc86625a 731 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 732 }
b84a1cf8
RV
733}
734
ec5b01dd
DL
735static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
736{
737 return index ? 0 : 100;
738}
739
b6b5e383
DL
740static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
741{
742 /*
743 * SKL doesn't need us to program the AUX clock divider (Hardware will
744 * derive the clock from CDCLK automatically). We still implement the
745 * get_aux_clock_divider vfunc to plug-in into the existing code.
746 */
747 return index ? 0 : 1;
748}
749
5ed12a19
DL
750static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
751 bool has_aux_irq,
752 int send_bytes,
753 uint32_t aux_clock_divider)
754{
755 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
756 struct drm_device *dev = intel_dig_port->base.base.dev;
757 uint32_t precharge, timeout;
758
759 if (IS_GEN6(dev))
760 precharge = 3;
761 else
762 precharge = 5;
763
764 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
765 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
766 else
767 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
768
769 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 770 DP_AUX_CH_CTL_DONE |
5ed12a19 771 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 772 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 773 timeout |
788d4433 774 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
775 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
776 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 777 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
778}
779
b9ca5fad
DL
780static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
781 bool has_aux_irq,
782 int send_bytes,
783 uint32_t unused)
784{
785 return DP_AUX_CH_CTL_SEND_BUSY |
786 DP_AUX_CH_CTL_DONE |
787 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788 DP_AUX_CH_CTL_TIME_OUT_ERROR |
789 DP_AUX_CH_CTL_TIME_OUT_1600us |
790 DP_AUX_CH_CTL_RECEIVE_ERROR |
791 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
792 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
793}
794
b84a1cf8
RV
795static int
796intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 797 const uint8_t *send, int send_bytes,
b84a1cf8
RV
798 uint8_t *recv, int recv_size)
799{
800 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
801 struct drm_device *dev = intel_dig_port->base.base.dev;
802 struct drm_i915_private *dev_priv = dev->dev_private;
803 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
804 uint32_t ch_data = ch_ctl + 4;
bc86625a 805 uint32_t aux_clock_divider;
b84a1cf8
RV
806 int i, ret, recv_bytes;
807 uint32_t status;
5ed12a19 808 int try, clock = 0;
4e6b788c 809 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
810 bool vdd;
811
773538e8 812 pps_lock(intel_dp);
e39b999a 813
72c3500a
VS
814 /*
815 * We will be called with VDD already enabled for dpcd/edid/oui reads.
816 * In such cases we want to leave VDD enabled and it's up to upper layers
817 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
818 * ourselves.
819 */
1e0560e0 820 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
821
822 /* dp aux is extremely sensitive to irq latency, hence request the
823 * lowest possible wakeup latency and so prevent the cpu from going into
824 * deep sleep states.
825 */
826 pm_qos_update_request(&dev_priv->pm_qos, 0);
827
828 intel_dp_check_edp(intel_dp);
5eb08b69 829
c67a470b
PZ
830 intel_aux_display_runtime_get(dev_priv);
831
11bee43e
JB
832 /* Try to wait for any previous AUX channel activity */
833 for (try = 0; try < 3; try++) {
ef04f00d 834 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
835 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
836 break;
837 msleep(1);
838 }
839
840 if (try == 3) {
841 WARN(1, "dp_aux_ch not started status 0x%08x\n",
842 I915_READ(ch_ctl));
9ee32fea
DV
843 ret = -EBUSY;
844 goto out;
4f7f7b7e
CW
845 }
846
46a5ae9f
PZ
847 /* Only 5 data registers! */
848 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
849 ret = -E2BIG;
850 goto out;
851 }
852
ec5b01dd 853 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
854 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
855 has_aux_irq,
856 send_bytes,
857 aux_clock_divider);
5ed12a19 858
bc86625a
CW
859 /* Must try at least 3 times according to DP spec */
860 for (try = 0; try < 5; try++) {
861 /* Load the send data into the aux channel data registers */
862 for (i = 0; i < send_bytes; i += 4)
863 I915_WRITE(ch_data + i,
a4f1289e
RV
864 intel_dp_pack_aux(send + i,
865 send_bytes - i));
bc86625a
CW
866
867 /* Send the command and wait for it to complete */
5ed12a19 868 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
869
870 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
871
872 /* Clear done status and any errors */
873 I915_WRITE(ch_ctl,
874 status |
875 DP_AUX_CH_CTL_DONE |
876 DP_AUX_CH_CTL_TIME_OUT_ERROR |
877 DP_AUX_CH_CTL_RECEIVE_ERROR);
878
879 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
880 DP_AUX_CH_CTL_RECEIVE_ERROR))
881 continue;
882 if (status & DP_AUX_CH_CTL_DONE)
883 break;
884 }
4f7f7b7e 885 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
886 break;
887 }
888
a4fc5ed6 889 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 890 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
891 ret = -EBUSY;
892 goto out;
a4fc5ed6
KP
893 }
894
895 /* Check for timeout or receive error.
896 * Timeouts occur when the sink is not connected
897 */
a5b3da54 898 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 899 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
900 ret = -EIO;
901 goto out;
a5b3da54 902 }
1ae8c0a5
KP
903
904 /* Timeouts occur when the device isn't connected, so they're
905 * "normal" -- don't fill the kernel log with these */
a5b3da54 906 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 907 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
908 ret = -ETIMEDOUT;
909 goto out;
a4fc5ed6
KP
910 }
911
912 /* Unload any bytes sent back from the other side */
913 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
914 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
915 if (recv_bytes > recv_size)
916 recv_bytes = recv_size;
0206e353 917
4f7f7b7e 918 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
919 intel_dp_unpack_aux(I915_READ(ch_data + i),
920 recv + i, recv_bytes - i);
a4fc5ed6 921
9ee32fea
DV
922 ret = recv_bytes;
923out:
924 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 925 intel_aux_display_runtime_put(dev_priv);
9ee32fea 926
884f19e9
JN
927 if (vdd)
928 edp_panel_vdd_off(intel_dp, false);
929
773538e8 930 pps_unlock(intel_dp);
e39b999a 931
9ee32fea 932 return ret;
a4fc5ed6
KP
933}
934
a6c8aff0
JN
935#define BARE_ADDRESS_SIZE 3
936#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
937static ssize_t
938intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 939{
9d1a1031
JN
940 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
941 uint8_t txbuf[20], rxbuf[20];
942 size_t txsize, rxsize;
a4fc5ed6 943 int ret;
a4fc5ed6 944
d2d9cbbd
VS
945 txbuf[0] = (msg->request << 4) |
946 ((msg->address >> 16) & 0xf);
947 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
948 txbuf[2] = msg->address & 0xff;
949 txbuf[3] = msg->size - 1;
46a5ae9f 950
9d1a1031
JN
951 switch (msg->request & ~DP_AUX_I2C_MOT) {
952 case DP_AUX_NATIVE_WRITE:
953 case DP_AUX_I2C_WRITE:
a6c8aff0 954 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 955 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 956
9d1a1031
JN
957 if (WARN_ON(txsize > 20))
958 return -E2BIG;
a4fc5ed6 959
9d1a1031 960 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 961
9d1a1031
JN
962 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
963 if (ret > 0) {
964 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 965
a1ddefd8
JN
966 if (ret > 1) {
967 /* Number of bytes written in a short write. */
968 ret = clamp_t(int, rxbuf[1], 0, msg->size);
969 } else {
970 /* Return payload size. */
971 ret = msg->size;
972 }
9d1a1031
JN
973 }
974 break;
46a5ae9f 975
9d1a1031
JN
976 case DP_AUX_NATIVE_READ:
977 case DP_AUX_I2C_READ:
a6c8aff0 978 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 979 rxsize = msg->size + 1;
a4fc5ed6 980
9d1a1031
JN
981 if (WARN_ON(rxsize > 20))
982 return -E2BIG;
a4fc5ed6 983
9d1a1031
JN
984 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
985 if (ret > 0) {
986 msg->reply = rxbuf[0] >> 4;
987 /*
988 * Assume happy day, and copy the data. The caller is
989 * expected to check msg->reply before touching it.
990 *
991 * Return payload size.
992 */
993 ret--;
994 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 995 }
9d1a1031
JN
996 break;
997
998 default:
999 ret = -EINVAL;
1000 break;
a4fc5ed6 1001 }
f51a44b9 1002
9d1a1031 1003 return ret;
a4fc5ed6
KP
1004}
1005
9d1a1031
JN
1006static void
1007intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1008{
1009 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1010 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1011 enum port port = intel_dig_port->port;
0b99836f 1012 const char *name = NULL;
ab2c0672
DA
1013 int ret;
1014
33ad6626
JN
1015 switch (port) {
1016 case PORT_A:
1017 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1018 name = "DPDDC-A";
ab2c0672 1019 break;
33ad6626
JN
1020 case PORT_B:
1021 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1022 name = "DPDDC-B";
ab2c0672 1023 break;
33ad6626
JN
1024 case PORT_C:
1025 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1026 name = "DPDDC-C";
ab2c0672 1027 break;
33ad6626
JN
1028 case PORT_D:
1029 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1030 name = "DPDDC-D";
33ad6626
JN
1031 break;
1032 default:
1033 BUG();
ab2c0672
DA
1034 }
1035
1b1aad75
DL
1036 /*
1037 * The AUX_CTL register is usually DP_CTL + 0x10.
1038 *
1039 * On Haswell and Broadwell though:
1040 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1041 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1042 *
1043 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1044 */
1045 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
33ad6626 1046 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1047
0b99836f 1048 intel_dp->aux.name = name;
9d1a1031
JN
1049 intel_dp->aux.dev = dev->dev;
1050 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1051
0b99836f
JN
1052 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1053 connector->base.kdev->kobj.name);
8316f337 1054
4f71d0cb 1055 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1056 if (ret < 0) {
4f71d0cb 1057 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1058 name, ret);
1059 return;
ab2c0672 1060 }
8a5e6aeb 1061
0b99836f
JN
1062 ret = sysfs_create_link(&connector->base.kdev->kobj,
1063 &intel_dp->aux.ddc.dev.kobj,
1064 intel_dp->aux.ddc.dev.kobj.name);
1065 if (ret < 0) {
1066 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1067 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1068 }
a4fc5ed6
KP
1069}
1070
80f65de3
ID
1071static void
1072intel_dp_connector_unregister(struct intel_connector *intel_connector)
1073{
1074 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1075
0e32b39c
DA
1076 if (!intel_connector->mst_port)
1077 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1078 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1079 intel_connector_unregister(intel_connector);
1080}
1081
5416d871 1082static void
c3346ef6 1083skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
5416d871
DL
1084{
1085 u32 ctrl1;
1086
1087 pipe_config->ddi_pll_sel = SKL_DPLL0;
1088 pipe_config->dpll_hw_state.cfgcr1 = 0;
1089 pipe_config->dpll_hw_state.cfgcr2 = 0;
1090
1091 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
c3346ef6
SJ
1092 switch (link_clock / 2) {
1093 case 81000:
5416d871
DL
1094 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1095 SKL_DPLL0);
1096 break;
c3346ef6 1097 case 135000:
5416d871
DL
1098 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1099 SKL_DPLL0);
1100 break;
c3346ef6 1101 case 270000:
5416d871
DL
1102 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1103 SKL_DPLL0);
1104 break;
c3346ef6
SJ
1105 case 162000:
1106 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
1107 SKL_DPLL0);
1108 break;
1109 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1110 results in CDCLK change. Need to handle the change of CDCLK by
1111 disabling pipes and re-enabling them */
1112 case 108000:
1113 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
1114 SKL_DPLL0);
1115 break;
1116 case 216000:
1117 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
1118 SKL_DPLL0);
1119 break;
1120
5416d871
DL
1121 }
1122 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1123}
1124
0e50338c 1125static void
5cec258b 1126hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw)
0e50338c
DV
1127{
1128 switch (link_bw) {
1129 case DP_LINK_BW_1_62:
1130 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1131 break;
1132 case DP_LINK_BW_2_7:
1133 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1134 break;
1135 case DP_LINK_BW_5_4:
1136 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1137 break;
1138 }
1139}
1140
fc0f8e25 1141static int
12f6a2e2 1142intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1143{
94ca719e
VS
1144 if (intel_dp->num_sink_rates) {
1145 *sink_rates = intel_dp->sink_rates;
1146 return intel_dp->num_sink_rates;
fc0f8e25 1147 }
12f6a2e2
VS
1148
1149 *sink_rates = default_rates;
1150
1151 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1152}
1153
a8f3ef61 1154static int
1db10e28 1155intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1156{
636280ba
VS
1157 if (INTEL_INFO(dev)->gen >= 9) {
1158 *source_rates = gen9_rates;
1159 return ARRAY_SIZE(gen9_rates);
fe51bfb9
VS
1160 } else if (IS_CHERRYVIEW(dev)) {
1161 *source_rates = chv_rates;
1162 return ARRAY_SIZE(chv_rates);
a8f3ef61 1163 }
636280ba
VS
1164
1165 *source_rates = default_rates;
1166
1db10e28
VS
1167 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1168 /* WaDisableHBR2:skl */
1169 return (DP_LINK_BW_2_7 >> 3) + 1;
1170 else if (INTEL_INFO(dev)->gen >= 8 ||
1171 (IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
1172 return (DP_LINK_BW_5_4 >> 3) + 1;
1173 else
1174 return (DP_LINK_BW_2_7 >> 3) + 1;
a8f3ef61
SJ
1175}
1176
c6bb3538
DV
1177static void
1178intel_dp_set_clock(struct intel_encoder *encoder,
5cec258b 1179 struct intel_crtc_state *pipe_config, int link_bw)
c6bb3538
DV
1180{
1181 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1182 const struct dp_link_dpll *divisor = NULL;
1183 int i, count = 0;
c6bb3538
DV
1184
1185 if (IS_G4X(dev)) {
9dd4ffdf
CML
1186 divisor = gen4_dpll;
1187 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1188 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1189 divisor = pch_dpll;
1190 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1191 } else if (IS_CHERRYVIEW(dev)) {
1192 divisor = chv_dpll;
1193 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1194 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1195 divisor = vlv_dpll;
1196 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1197 }
9dd4ffdf
CML
1198
1199 if (divisor && count) {
1200 for (i = 0; i < count; i++) {
1201 if (link_bw == divisor[i].link_bw) {
1202 pipe_config->dpll = divisor[i].dpll;
1203 pipe_config->clock_set = true;
1204 break;
1205 }
1206 }
c6bb3538
DV
1207 }
1208}
1209
2ecae76a
VS
1210static int intersect_rates(const int *source_rates, int source_len,
1211 const int *sink_rates, int sink_len,
94ca719e 1212 int *common_rates)
a8f3ef61
SJ
1213{
1214 int i = 0, j = 0, k = 0;
1215
a8f3ef61
SJ
1216 while (i < source_len && j < sink_len) {
1217 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1218 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1219 return k;
94ca719e 1220 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1221 ++k;
1222 ++i;
1223 ++j;
1224 } else if (source_rates[i] < sink_rates[j]) {
1225 ++i;
1226 } else {
1227 ++j;
1228 }
1229 }
1230 return k;
1231}
1232
94ca719e
VS
1233static int intel_dp_common_rates(struct intel_dp *intel_dp,
1234 int *common_rates)
2ecae76a
VS
1235{
1236 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1237 const int *source_rates, *sink_rates;
1238 int source_len, sink_len;
1239
1240 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1241 source_len = intel_dp_source_rates(dev, &source_rates);
1242
1243 return intersect_rates(source_rates, source_len,
1244 sink_rates, sink_len,
94ca719e 1245 common_rates);
2ecae76a
VS
1246}
1247
0336400e
VS
1248static void snprintf_int_array(char *str, size_t len,
1249 const int *array, int nelem)
1250{
1251 int i;
1252
1253 str[0] = '\0';
1254
1255 for (i = 0; i < nelem; i++) {
1256 int r = snprintf(str, len, "%d,", array[i]);
1257 if (r >= len)
1258 return;
1259 str += r;
1260 len -= r;
1261 }
1262}
1263
1264static void intel_dp_print_rates(struct intel_dp *intel_dp)
1265{
1266 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1267 const int *source_rates, *sink_rates;
94ca719e
VS
1268 int source_len, sink_len, common_len;
1269 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1270 char str[128]; /* FIXME: too big for stack? */
1271
1272 if ((drm_debug & DRM_UT_KMS) == 0)
1273 return;
1274
1275 source_len = intel_dp_source_rates(dev, &source_rates);
1276 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1277 DRM_DEBUG_KMS("source rates: %s\n", str);
1278
1279 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1280 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1281 DRM_DEBUG_KMS("sink rates: %s\n", str);
1282
94ca719e
VS
1283 common_len = intel_dp_common_rates(intel_dp, common_rates);
1284 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1285 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1286}
1287
f4896f15 1288static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1289{
1290 int i = 0;
1291
1292 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1293 if (find == rates[i])
1294 break;
1295
1296 return i;
1297}
1298
50fec21a
VS
1299int
1300intel_dp_max_link_rate(struct intel_dp *intel_dp)
1301{
1302 int rates[DP_MAX_SUPPORTED_RATES] = {};
1303 int len;
1304
94ca719e 1305 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1306 if (WARN_ON(len <= 0))
1307 return 162000;
1308
1309 return rates[rate_to_index(0, rates) - 1];
1310}
1311
ed4e9c1d
VS
1312int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1313{
94ca719e 1314 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1315}
1316
00c09d70 1317bool
5bfe2ac0 1318intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1319 struct intel_crtc_state *pipe_config)
a4fc5ed6 1320{
5bfe2ac0 1321 struct drm_device *dev = encoder->base.dev;
36008365 1322 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1323 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1324 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1325 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1326 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1327 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1328 int lane_count, clock;
56071a20 1329 int min_lane_count = 1;
eeb6324d 1330 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1331 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1332 int min_clock = 0;
a8f3ef61 1333 int max_clock;
083f9560 1334 int bpp, mode_rate;
ff9a6750 1335 int link_avail, link_clock;
94ca719e
VS
1336 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1337 int common_len;
a8f3ef61 1338
94ca719e 1339 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1340
1341 /* No common link rates between source and sink */
94ca719e 1342 WARN_ON(common_len <= 0);
a8f3ef61 1343
94ca719e 1344 max_clock = common_len - 1;
a4fc5ed6 1345
bc7d38a4 1346 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1347 pipe_config->has_pch_encoder = true;
1348
03afc4a2 1349 pipe_config->has_dp_encoder = true;
f769cd24 1350 pipe_config->has_drrs = false;
9ed109a7 1351 pipe_config->has_audio = intel_dp->has_audio;
a4fc5ed6 1352
dd06f90e
JN
1353 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1354 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1355 adjusted_mode);
2dd24552
JB
1356 if (!HAS_PCH_SPLIT(dev))
1357 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1358 intel_connector->panel.fitting_mode);
1359 else
b074cec8
JB
1360 intel_pch_panel_fitting(intel_crtc, pipe_config,
1361 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1362 }
1363
cb1793ce 1364 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1365 return false;
1366
083f9560 1367 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1368 "max bw %d pixel clock %iKHz\n",
94ca719e 1369 max_lane_count, common_rates[max_clock],
241bfc38 1370 adjusted_mode->crtc_clock);
083f9560 1371
36008365
DV
1372 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1373 * bpc in between. */
3e7ca985 1374 bpp = pipe_config->pipe_bpp;
56071a20
JN
1375 if (is_edp(intel_dp)) {
1376 if (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp) {
1377 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1378 dev_priv->vbt.edp_bpp);
1379 bpp = dev_priv->vbt.edp_bpp;
1380 }
1381
344c5bbc
JN
1382 /*
1383 * Use the maximum clock and number of lanes the eDP panel
1384 * advertizes being capable of. The panels are generally
1385 * designed to support only a single clock and lane
1386 * configuration, and typically these values correspond to the
1387 * native resolution of the panel.
1388 */
1389 min_lane_count = max_lane_count;
1390 min_clock = max_clock;
7984211e 1391 }
657445fe 1392
36008365 1393 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1394 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1395 bpp);
36008365 1396
c6930992 1397 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1398 for (lane_count = min_lane_count;
1399 lane_count <= max_lane_count;
1400 lane_count <<= 1) {
1401
94ca719e 1402 link_clock = common_rates[clock];
36008365
DV
1403 link_avail = intel_dp_max_data_rate(link_clock,
1404 lane_count);
1405
1406 if (mode_rate <= link_avail) {
1407 goto found;
1408 }
1409 }
1410 }
1411 }
c4867936 1412
36008365 1413 return false;
3685a8f3 1414
36008365 1415found:
55bc60db
VS
1416 if (intel_dp->color_range_auto) {
1417 /*
1418 * See:
1419 * CEA-861-E - 5.1 Default Encoding Parameters
1420 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1421 */
18316c8c 1422 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
1423 intel_dp->color_range = DP_COLOR_RANGE_16_235;
1424 else
1425 intel_dp->color_range = 0;
1426 }
1427
3685a8f3 1428 if (intel_dp->color_range)
50f3b016 1429 pipe_config->limited_color_range = true;
a4fc5ed6 1430
36008365 1431 intel_dp->lane_count = lane_count;
a8f3ef61 1432
94ca719e 1433 if (intel_dp->num_sink_rates) {
bc27b7d3 1434 intel_dp->link_bw = 0;
a8f3ef61 1435 intel_dp->rate_select =
94ca719e 1436 intel_dp_rate_select(intel_dp, common_rates[clock]);
bc27b7d3
VS
1437 } else {
1438 intel_dp->link_bw =
94ca719e 1439 drm_dp_link_rate_to_bw_code(common_rates[clock]);
bc27b7d3 1440 intel_dp->rate_select = 0;
a8f3ef61
SJ
1441 }
1442
657445fe 1443 pipe_config->pipe_bpp = bpp;
94ca719e 1444 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1445
36008365
DV
1446 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
1447 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 1448 pipe_config->port_clock, bpp);
36008365
DV
1449 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1450 mode_rate, link_avail);
a4fc5ed6 1451
03afc4a2 1452 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1453 adjusted_mode->crtc_clock,
1454 pipe_config->port_clock,
03afc4a2 1455 &pipe_config->dp_m_n);
9d1a455b 1456
439d7ac0 1457 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1458 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1459 pipe_config->has_drrs = true;
439d7ac0
PB
1460 intel_link_compute_m_n(bpp, lane_count,
1461 intel_connector->panel.downclock_mode->clock,
1462 pipe_config->port_clock,
1463 &pipe_config->dp_m2_n2);
1464 }
1465
5416d871 1466 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
94ca719e 1467 skl_edp_set_pll_config(pipe_config, common_rates[clock]);
5416d871 1468 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
0e50338c
DV
1469 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1470 else
1471 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
c6bb3538 1472
03afc4a2 1473 return true;
a4fc5ed6
KP
1474}
1475
7c62a164 1476static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1477{
7c62a164
DV
1478 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1479 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1480 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1481 struct drm_i915_private *dev_priv = dev->dev_private;
1482 u32 dpa_ctl;
1483
6e3c9717
ACO
1484 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1485 crtc->config->port_clock);
ea9b6006
DV
1486 dpa_ctl = I915_READ(DP_A);
1487 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1488
6e3c9717 1489 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1490 /* For a long time we've carried around a ILK-DevA w/a for the
1491 * 160MHz clock. If we're really unlucky, it's still required.
1492 */
1493 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1494 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1495 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1496 } else {
1497 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1498 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1499 }
1ce17038 1500
ea9b6006
DV
1501 I915_WRITE(DP_A, dpa_ctl);
1502
1503 POSTING_READ(DP_A);
1504 udelay(500);
1505}
1506
8ac33ed3 1507static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1508{
b934223d 1509 struct drm_device *dev = encoder->base.dev;
417e822d 1510 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1511 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1512 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1513 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1514 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1515
417e822d 1516 /*
1a2eb460 1517 * There are four kinds of DP registers:
417e822d
KP
1518 *
1519 * IBX PCH
1a2eb460
KP
1520 * SNB CPU
1521 * IVB CPU
417e822d
KP
1522 * CPT PCH
1523 *
1524 * IBX PCH and CPU are the same for almost everything,
1525 * except that the CPU DP PLL is configured in this
1526 * register
1527 *
1528 * CPT PCH is quite different, having many bits moved
1529 * to the TRANS_DP_CTL register instead. That
1530 * configuration happens (oddly) in ironlake_pch_enable
1531 */
9c9e7927 1532
417e822d
KP
1533 /* Preserve the BIOS-computed detected bit. This is
1534 * supposed to be read-only.
1535 */
1536 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1537
417e822d 1538 /* Handle DP bits in common between all three register formats */
417e822d 1539 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 1540 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 1541
6e3c9717 1542 if (crtc->config->has_audio)
ea5b213a 1543 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1544
417e822d 1545 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1546
bc7d38a4 1547 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
1548 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1549 intel_dp->DP |= DP_SYNC_HS_HIGH;
1550 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1551 intel_dp->DP |= DP_SYNC_VS_HIGH;
1552 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1553
6aba5b6c 1554 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1555 intel_dp->DP |= DP_ENHANCED_FRAMING;
1556
7c62a164 1557 intel_dp->DP |= crtc->pipe << 29;
bc7d38a4 1558 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
b2634017 1559 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 1560 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
1561
1562 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1563 intel_dp->DP |= DP_SYNC_HS_HIGH;
1564 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1565 intel_dp->DP |= DP_SYNC_VS_HIGH;
1566 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1567
6aba5b6c 1568 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1569 intel_dp->DP |= DP_ENHANCED_FRAMING;
1570
44f37d1f
CML
1571 if (!IS_CHERRYVIEW(dev)) {
1572 if (crtc->pipe == 1)
1573 intel_dp->DP |= DP_PIPEB_SELECT;
1574 } else {
1575 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1576 }
417e822d
KP
1577 } else {
1578 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
32f9d658 1579 }
a4fc5ed6
KP
1580}
1581
ffd6749d
PZ
1582#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1583#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1584
1a5ef5b7
PZ
1585#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1586#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1587
ffd6749d
PZ
1588#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1589#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1590
4be73780 1591static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1592 u32 mask,
1593 u32 value)
bd943159 1594{
30add22d 1595 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1596 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1597 u32 pp_stat_reg, pp_ctrl_reg;
1598
e39b999a
VS
1599 lockdep_assert_held(&dev_priv->pps_mutex);
1600
bf13e81b
JN
1601 pp_stat_reg = _pp_stat_reg(intel_dp);
1602 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1603
99ea7127 1604 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1605 mask, value,
1606 I915_READ(pp_stat_reg),
1607 I915_READ(pp_ctrl_reg));
32ce697c 1608
453c5420 1609 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1610 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1611 I915_READ(pp_stat_reg),
1612 I915_READ(pp_ctrl_reg));
32ce697c 1613 }
54c136d4
CW
1614
1615 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1616}
32ce697c 1617
4be73780 1618static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1619{
1620 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1621 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1622}
1623
4be73780 1624static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1625{
1626 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1627 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1628}
1629
4be73780 1630static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1631{
1632 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1633
1634 /* When we disable the VDD override bit last we have to do the manual
1635 * wait. */
1636 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1637 intel_dp->panel_power_cycle_delay);
1638
4be73780 1639 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1640}
1641
4be73780 1642static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1643{
1644 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1645 intel_dp->backlight_on_delay);
1646}
1647
4be73780 1648static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1649{
1650 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1651 intel_dp->backlight_off_delay);
1652}
99ea7127 1653
832dd3c1
KP
1654/* Read the current pp_control value, unlocking the register if it
1655 * is locked
1656 */
1657
453c5420 1658static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1659{
453c5420
JB
1660 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1661 struct drm_i915_private *dev_priv = dev->dev_private;
1662 u32 control;
832dd3c1 1663
e39b999a
VS
1664 lockdep_assert_held(&dev_priv->pps_mutex);
1665
bf13e81b 1666 control = I915_READ(_pp_ctrl_reg(intel_dp));
832dd3c1
KP
1667 control &= ~PANEL_UNLOCK_MASK;
1668 control |= PANEL_UNLOCK_REGS;
1669 return control;
bd943159
KP
1670}
1671
951468f3
VS
1672/*
1673 * Must be paired with edp_panel_vdd_off().
1674 * Must hold pps_mutex around the whole on/off sequence.
1675 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1676 */
1e0560e0 1677static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1678{
30add22d 1679 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1680 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1681 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1682 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1683 enum intel_display_power_domain power_domain;
5d613501 1684 u32 pp;
453c5420 1685 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1686 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1687
e39b999a
VS
1688 lockdep_assert_held(&dev_priv->pps_mutex);
1689
97af61f5 1690 if (!is_edp(intel_dp))
adddaaf4 1691 return false;
bd943159 1692
2c623c11 1693 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1694 intel_dp->want_panel_vdd = true;
99ea7127 1695
4be73780 1696 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1697 return need_to_disable;
b0665d57 1698
4e6e1a54
ID
1699 power_domain = intel_display_port_power_domain(intel_encoder);
1700 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1701
3936fcf4
VS
1702 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1703 port_name(intel_dig_port->port));
bd943159 1704
4be73780
DV
1705 if (!edp_have_panel_power(intel_dp))
1706 wait_panel_power_cycle(intel_dp);
99ea7127 1707
453c5420 1708 pp = ironlake_get_pp_control(intel_dp);
5d613501 1709 pp |= EDP_FORCE_VDD;
ebf33b18 1710
bf13e81b
JN
1711 pp_stat_reg = _pp_stat_reg(intel_dp);
1712 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1713
1714 I915_WRITE(pp_ctrl_reg, pp);
1715 POSTING_READ(pp_ctrl_reg);
1716 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1717 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1718 /*
1719 * If the panel wasn't on, delay before accessing aux channel
1720 */
4be73780 1721 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1722 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1723 port_name(intel_dig_port->port));
f01eca2e 1724 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1725 }
adddaaf4
JN
1726
1727 return need_to_disable;
1728}
1729
951468f3
VS
1730/*
1731 * Must be paired with intel_edp_panel_vdd_off() or
1732 * intel_edp_panel_off().
1733 * Nested calls to these functions are not allowed since
1734 * we drop the lock. Caller must use some higher level
1735 * locking to prevent nested calls from other threads.
1736 */
b80d6c78 1737void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1738{
c695b6b6 1739 bool vdd;
adddaaf4 1740
c695b6b6
VS
1741 if (!is_edp(intel_dp))
1742 return;
1743
773538e8 1744 pps_lock(intel_dp);
c695b6b6 1745 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1746 pps_unlock(intel_dp);
c695b6b6 1747
e2c719b7 1748 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1749 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1750}
1751
4be73780 1752static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1753{
30add22d 1754 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1755 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1756 struct intel_digital_port *intel_dig_port =
1757 dp_to_dig_port(intel_dp);
1758 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1759 enum intel_display_power_domain power_domain;
5d613501 1760 u32 pp;
453c5420 1761 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1762
e39b999a 1763 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1764
15e899a0 1765 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1766
15e899a0 1767 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1768 return;
b0665d57 1769
3936fcf4
VS
1770 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1771 port_name(intel_dig_port->port));
bd943159 1772
be2c9196
VS
1773 pp = ironlake_get_pp_control(intel_dp);
1774 pp &= ~EDP_FORCE_VDD;
453c5420 1775
be2c9196
VS
1776 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1777 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1778
be2c9196
VS
1779 I915_WRITE(pp_ctrl_reg, pp);
1780 POSTING_READ(pp_ctrl_reg);
90791a5c 1781
be2c9196
VS
1782 /* Make sure sequencer is idle before allowing subsequent activity */
1783 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1784 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1785
be2c9196
VS
1786 if ((pp & POWER_TARGET_ON) == 0)
1787 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1788
be2c9196
VS
1789 power_domain = intel_display_port_power_domain(intel_encoder);
1790 intel_display_power_put(dev_priv, power_domain);
bd943159 1791}
5d613501 1792
4be73780 1793static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1794{
1795 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1796 struct intel_dp, panel_vdd_work);
bd943159 1797
773538e8 1798 pps_lock(intel_dp);
15e899a0
VS
1799 if (!intel_dp->want_panel_vdd)
1800 edp_panel_vdd_off_sync(intel_dp);
773538e8 1801 pps_unlock(intel_dp);
bd943159
KP
1802}
1803
aba86890
ID
1804static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1805{
1806 unsigned long delay;
1807
1808 /*
1809 * Queue the timer to fire a long time from now (relative to the power
1810 * down delay) to keep the panel power up across a sequence of
1811 * operations.
1812 */
1813 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1814 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1815}
1816
951468f3
VS
1817/*
1818 * Must be paired with edp_panel_vdd_on().
1819 * Must hold pps_mutex around the whole on/off sequence.
1820 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1821 */
4be73780 1822static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1823{
e39b999a
VS
1824 struct drm_i915_private *dev_priv =
1825 intel_dp_to_dev(intel_dp)->dev_private;
1826
1827 lockdep_assert_held(&dev_priv->pps_mutex);
1828
97af61f5
KP
1829 if (!is_edp(intel_dp))
1830 return;
5d613501 1831
e2c719b7 1832 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1833 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1834
bd943159
KP
1835 intel_dp->want_panel_vdd = false;
1836
aba86890 1837 if (sync)
4be73780 1838 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1839 else
1840 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1841}
1842
9f0fb5be 1843static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1844{
30add22d 1845 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1846 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1847 u32 pp;
453c5420 1848 u32 pp_ctrl_reg;
9934c132 1849
9f0fb5be
VS
1850 lockdep_assert_held(&dev_priv->pps_mutex);
1851
97af61f5 1852 if (!is_edp(intel_dp))
bd943159 1853 return;
99ea7127 1854
3936fcf4
VS
1855 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1856 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1857
e7a89ace
VS
1858 if (WARN(edp_have_panel_power(intel_dp),
1859 "eDP port %c panel power already on\n",
1860 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1861 return;
9934c132 1862
4be73780 1863 wait_panel_power_cycle(intel_dp);
37c6c9b0 1864
bf13e81b 1865 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1866 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1867 if (IS_GEN5(dev)) {
1868 /* ILK workaround: disable reset around power sequence */
1869 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1870 I915_WRITE(pp_ctrl_reg, pp);
1871 POSTING_READ(pp_ctrl_reg);
05ce1a49 1872 }
37c6c9b0 1873
1c0ae80a 1874 pp |= POWER_TARGET_ON;
99ea7127
KP
1875 if (!IS_GEN5(dev))
1876 pp |= PANEL_POWER_RESET;
1877
453c5420
JB
1878 I915_WRITE(pp_ctrl_reg, pp);
1879 POSTING_READ(pp_ctrl_reg);
9934c132 1880
4be73780 1881 wait_panel_on(intel_dp);
dce56b3c 1882 intel_dp->last_power_on = jiffies;
9934c132 1883
05ce1a49
KP
1884 if (IS_GEN5(dev)) {
1885 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1886 I915_WRITE(pp_ctrl_reg, pp);
1887 POSTING_READ(pp_ctrl_reg);
05ce1a49 1888 }
9f0fb5be 1889}
e39b999a 1890
9f0fb5be
VS
1891void intel_edp_panel_on(struct intel_dp *intel_dp)
1892{
1893 if (!is_edp(intel_dp))
1894 return;
1895
1896 pps_lock(intel_dp);
1897 edp_panel_on(intel_dp);
773538e8 1898 pps_unlock(intel_dp);
9934c132
JB
1899}
1900
9f0fb5be
VS
1901
1902static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1903{
4e6e1a54
ID
1904 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1905 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1906 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1907 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1908 enum intel_display_power_domain power_domain;
99ea7127 1909 u32 pp;
453c5420 1910 u32 pp_ctrl_reg;
9934c132 1911
9f0fb5be
VS
1912 lockdep_assert_held(&dev_priv->pps_mutex);
1913
97af61f5
KP
1914 if (!is_edp(intel_dp))
1915 return;
37c6c9b0 1916
3936fcf4
VS
1917 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1918 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 1919
3936fcf4
VS
1920 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1921 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 1922
453c5420 1923 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1924 /* We need to switch off panel power _and_ force vdd, for otherwise some
1925 * panels get very unhappy and cease to work. */
b3064154
PJ
1926 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
1927 EDP_BLC_ENABLE);
453c5420 1928
bf13e81b 1929 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1930
849e39f5
PZ
1931 intel_dp->want_panel_vdd = false;
1932
453c5420
JB
1933 I915_WRITE(pp_ctrl_reg, pp);
1934 POSTING_READ(pp_ctrl_reg);
9934c132 1935
dce56b3c 1936 intel_dp->last_power_cycle = jiffies;
4be73780 1937 wait_panel_off(intel_dp);
849e39f5
PZ
1938
1939 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
1940 power_domain = intel_display_port_power_domain(intel_encoder);
1941 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 1942}
e39b999a 1943
9f0fb5be
VS
1944void intel_edp_panel_off(struct intel_dp *intel_dp)
1945{
1946 if (!is_edp(intel_dp))
1947 return;
e39b999a 1948
9f0fb5be
VS
1949 pps_lock(intel_dp);
1950 edp_panel_off(intel_dp);
773538e8 1951 pps_unlock(intel_dp);
9934c132
JB
1952}
1953
1250d107
JN
1954/* Enable backlight in the panel power control. */
1955static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1956{
da63a9f2
PZ
1957 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1958 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
1959 struct drm_i915_private *dev_priv = dev->dev_private;
1960 u32 pp;
453c5420 1961 u32 pp_ctrl_reg;
32f9d658 1962
01cb9ea6
JB
1963 /*
1964 * If we enable the backlight right away following a panel power
1965 * on, we may see slight flicker as the panel syncs with the eDP
1966 * link. So delay a bit to make sure the image is solid before
1967 * allowing it to appear.
1968 */
4be73780 1969 wait_backlight_on(intel_dp);
e39b999a 1970
773538e8 1971 pps_lock(intel_dp);
e39b999a 1972
453c5420 1973 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1974 pp |= EDP_BLC_ENABLE;
453c5420 1975
bf13e81b 1976 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1977
1978 I915_WRITE(pp_ctrl_reg, pp);
1979 POSTING_READ(pp_ctrl_reg);
e39b999a 1980
773538e8 1981 pps_unlock(intel_dp);
32f9d658
ZW
1982}
1983
1250d107
JN
1984/* Enable backlight PWM and backlight PP control. */
1985void intel_edp_backlight_on(struct intel_dp *intel_dp)
1986{
1987 if (!is_edp(intel_dp))
1988 return;
1989
1990 DRM_DEBUG_KMS("\n");
1991
1992 intel_panel_enable_backlight(intel_dp->attached_connector);
1993 _intel_edp_backlight_on(intel_dp);
1994}
1995
1996/* Disable backlight in the panel power control. */
1997static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 1998{
30add22d 1999 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2000 struct drm_i915_private *dev_priv = dev->dev_private;
2001 u32 pp;
453c5420 2002 u32 pp_ctrl_reg;
32f9d658 2003
f01eca2e
KP
2004 if (!is_edp(intel_dp))
2005 return;
2006
773538e8 2007 pps_lock(intel_dp);
e39b999a 2008
453c5420 2009 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2010 pp &= ~EDP_BLC_ENABLE;
453c5420 2011
bf13e81b 2012 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2013
2014 I915_WRITE(pp_ctrl_reg, pp);
2015 POSTING_READ(pp_ctrl_reg);
f7d2323c 2016
773538e8 2017 pps_unlock(intel_dp);
e39b999a
VS
2018
2019 intel_dp->last_backlight_off = jiffies;
f7d2323c 2020 edp_wait_backlight_off(intel_dp);
1250d107 2021}
f7d2323c 2022
1250d107
JN
2023/* Disable backlight PP control and backlight PWM. */
2024void intel_edp_backlight_off(struct intel_dp *intel_dp)
2025{
2026 if (!is_edp(intel_dp))
2027 return;
2028
2029 DRM_DEBUG_KMS("\n");
f7d2323c 2030
1250d107 2031 _intel_edp_backlight_off(intel_dp);
f7d2323c 2032 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2033}
a4fc5ed6 2034
73580fb7
JN
2035/*
2036 * Hook for controlling the panel power control backlight through the bl_power
2037 * sysfs attribute. Take care to handle multiple calls.
2038 */
2039static void intel_edp_backlight_power(struct intel_connector *connector,
2040 bool enable)
2041{
2042 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2043 bool is_enabled;
2044
773538e8 2045 pps_lock(intel_dp);
e39b999a 2046 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2047 pps_unlock(intel_dp);
73580fb7
JN
2048
2049 if (is_enabled == enable)
2050 return;
2051
23ba9373
JN
2052 DRM_DEBUG_KMS("panel power control backlight %s\n",
2053 enable ? "enable" : "disable");
73580fb7
JN
2054
2055 if (enable)
2056 _intel_edp_backlight_on(intel_dp);
2057 else
2058 _intel_edp_backlight_off(intel_dp);
2059}
2060
2bd2ad64 2061static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2062{
da63a9f2
PZ
2063 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2064 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2065 struct drm_device *dev = crtc->dev;
d240f20f
JB
2066 struct drm_i915_private *dev_priv = dev->dev_private;
2067 u32 dpa_ctl;
2068
2bd2ad64
DV
2069 assert_pipe_disabled(dev_priv,
2070 to_intel_crtc(crtc)->pipe);
2071
d240f20f
JB
2072 DRM_DEBUG_KMS("\n");
2073 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2074 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2075 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2076
2077 /* We don't adjust intel_dp->DP while tearing down the link, to
2078 * facilitate link retraining (e.g. after hotplug). Hence clear all
2079 * enable bits here to ensure that we don't enable too much. */
2080 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2081 intel_dp->DP |= DP_PLL_ENABLE;
2082 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2083 POSTING_READ(DP_A);
2084 udelay(200);
d240f20f
JB
2085}
2086
2bd2ad64 2087static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2088{
da63a9f2
PZ
2089 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2090 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2091 struct drm_device *dev = crtc->dev;
d240f20f
JB
2092 struct drm_i915_private *dev_priv = dev->dev_private;
2093 u32 dpa_ctl;
2094
2bd2ad64
DV
2095 assert_pipe_disabled(dev_priv,
2096 to_intel_crtc(crtc)->pipe);
2097
d240f20f 2098 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2099 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2100 "dp pll off, should be on\n");
2101 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2102
2103 /* We can't rely on the value tracked for the DP register in
2104 * intel_dp->DP because link_down must not change that (otherwise link
2105 * re-training will fail. */
298b0b39 2106 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2107 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2108 POSTING_READ(DP_A);
d240f20f
JB
2109 udelay(200);
2110}
2111
c7ad3810 2112/* If the sink supports it, try to set the power state appropriately */
c19b0669 2113void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2114{
2115 int ret, i;
2116
2117 /* Should have a valid DPCD by this point */
2118 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2119 return;
2120
2121 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2122 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2123 DP_SET_POWER_D3);
c7ad3810
JB
2124 } else {
2125 /*
2126 * When turning on, we need to retry for 1ms to give the sink
2127 * time to wake up.
2128 */
2129 for (i = 0; i < 3; i++) {
9d1a1031
JN
2130 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2131 DP_SET_POWER_D0);
c7ad3810
JB
2132 if (ret == 1)
2133 break;
2134 msleep(1);
2135 }
2136 }
f9cac721
JN
2137
2138 if (ret != 1)
2139 DRM_DEBUG_KMS("failed to %s sink power state\n",
2140 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2141}
2142
19d8fe15
DV
2143static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2144 enum pipe *pipe)
d240f20f 2145{
19d8fe15 2146 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2147 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2148 struct drm_device *dev = encoder->base.dev;
2149 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2150 enum intel_display_power_domain power_domain;
2151 u32 tmp;
2152
2153 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2154 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2155 return false;
2156
2157 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2158
2159 if (!(tmp & DP_PORT_EN))
2160 return false;
2161
bc7d38a4 2162 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
19d8fe15 2163 *pipe = PORT_TO_PIPE_CPT(tmp);
71485e0a
VS
2164 } else if (IS_CHERRYVIEW(dev)) {
2165 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
bc7d38a4 2166 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
19d8fe15
DV
2167 *pipe = PORT_TO_PIPE(tmp);
2168 } else {
2169 u32 trans_sel;
2170 u32 trans_dp;
2171 int i;
2172
2173 switch (intel_dp->output_reg) {
2174 case PCH_DP_B:
2175 trans_sel = TRANS_DP_PORT_SEL_B;
2176 break;
2177 case PCH_DP_C:
2178 trans_sel = TRANS_DP_PORT_SEL_C;
2179 break;
2180 case PCH_DP_D:
2181 trans_sel = TRANS_DP_PORT_SEL_D;
2182 break;
2183 default:
2184 return true;
2185 }
2186
055e393f 2187 for_each_pipe(dev_priv, i) {
19d8fe15
DV
2188 trans_dp = I915_READ(TRANS_DP_CTL(i));
2189 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
2190 *pipe = i;
2191 return true;
2192 }
2193 }
19d8fe15 2194
4a0833ec
DV
2195 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2196 intel_dp->output_reg);
2197 }
d240f20f 2198
19d8fe15
DV
2199 return true;
2200}
d240f20f 2201
045ac3b5 2202static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2203 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2204{
2205 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2206 u32 tmp, flags = 0;
63000ef6
XZ
2207 struct drm_device *dev = encoder->base.dev;
2208 struct drm_i915_private *dev_priv = dev->dev_private;
2209 enum port port = dp_to_dig_port(intel_dp)->port;
2210 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2211 int dotclock;
045ac3b5 2212
9ed109a7
DV
2213 tmp = I915_READ(intel_dp->output_reg);
2214 if (tmp & DP_AUDIO_OUTPUT_ENABLE)
2215 pipe_config->has_audio = true;
2216
63000ef6 2217 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
63000ef6
XZ
2218 if (tmp & DP_SYNC_HS_HIGH)
2219 flags |= DRM_MODE_FLAG_PHSYNC;
2220 else
2221 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2222
63000ef6
XZ
2223 if (tmp & DP_SYNC_VS_HIGH)
2224 flags |= DRM_MODE_FLAG_PVSYNC;
2225 else
2226 flags |= DRM_MODE_FLAG_NVSYNC;
2227 } else {
2228 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2229 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2230 flags |= DRM_MODE_FLAG_PHSYNC;
2231 else
2232 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2233
63000ef6
XZ
2234 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2235 flags |= DRM_MODE_FLAG_PVSYNC;
2236 else
2237 flags |= DRM_MODE_FLAG_NVSYNC;
2238 }
045ac3b5 2239
2d112de7 2240 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2241
8c875fca
VS
2242 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2243 tmp & DP_COLOR_RANGE_16_235)
2244 pipe_config->limited_color_range = true;
2245
eb14cb74
VS
2246 pipe_config->has_dp_encoder = true;
2247
2248 intel_dp_get_m_n(crtc, pipe_config);
2249
18442d08 2250 if (port == PORT_A) {
f1f644dc
JB
2251 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2252 pipe_config->port_clock = 162000;
2253 else
2254 pipe_config->port_clock = 270000;
2255 }
18442d08
VS
2256
2257 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2258 &pipe_config->dp_m_n);
2259
2260 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2261 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2262
2d112de7 2263 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2264
c6cd2ee2
JN
2265 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2266 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2267 /*
2268 * This is a big fat ugly hack.
2269 *
2270 * Some machines in UEFI boot mode provide us a VBT that has 18
2271 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2272 * unknown we fail to light up. Yet the same BIOS boots up with
2273 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2274 * max, not what it tells us to use.
2275 *
2276 * Note: This will still be broken if the eDP panel is not lit
2277 * up by the BIOS, and thus we can't get the mode at module
2278 * load.
2279 */
2280 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2281 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2282 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2283 }
045ac3b5
JB
2284}
2285
e8cb4558 2286static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2287{
e8cb4558 2288 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2289 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2290 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2291
6e3c9717 2292 if (crtc->config->has_audio)
495a5bb8 2293 intel_audio_codec_disable(encoder);
6cb49835 2294
b32c6f48
RV
2295 if (HAS_PSR(dev) && !HAS_DDI(dev))
2296 intel_psr_disable(intel_dp);
2297
6cb49835
DV
2298 /* Make sure the panel is off before trying to change the mode. But also
2299 * ensure that we have vdd while we switch off the panel. */
24f3e092 2300 intel_edp_panel_vdd_on(intel_dp);
4be73780 2301 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2302 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2303 intel_edp_panel_off(intel_dp);
3739850b 2304
08aff3fe
VS
2305 /* disable the port before the pipe on g4x */
2306 if (INTEL_INFO(dev)->gen < 5)
3739850b 2307 intel_dp_link_down(intel_dp);
d240f20f
JB
2308}
2309
08aff3fe 2310static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2311{
2bd2ad64 2312 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2313 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2314
49277c31 2315 intel_dp_link_down(intel_dp);
08aff3fe
VS
2316 if (port == PORT_A)
2317 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2318}
2319
2320static void vlv_post_disable_dp(struct intel_encoder *encoder)
2321{
2322 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2323
2324 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2325}
2326
580d3811
VS
2327static void chv_post_disable_dp(struct intel_encoder *encoder)
2328{
2329 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2330 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2331 struct drm_device *dev = encoder->base.dev;
2332 struct drm_i915_private *dev_priv = dev->dev_private;
2333 struct intel_crtc *intel_crtc =
2334 to_intel_crtc(encoder->base.crtc);
2335 enum dpio_channel ch = vlv_dport_to_channel(dport);
2336 enum pipe pipe = intel_crtc->pipe;
2337 u32 val;
2338
2339 intel_dp_link_down(intel_dp);
2340
2341 mutex_lock(&dev_priv->dpio_lock);
2342
2343 /* Propagate soft reset to data lane reset */
97fd4d5c 2344 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2345 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c 2346 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2347
97fd4d5c
VS
2348 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2349 val |= CHV_PCS_REQ_SOFTRESET_EN;
2350 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2351
2352 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2353 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2354 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2355
2356 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
580d3811 2357 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2358 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
580d3811
VS
2359
2360 mutex_unlock(&dev_priv->dpio_lock);
2361}
2362
7b13b58a
VS
2363static void
2364_intel_dp_set_link_train(struct intel_dp *intel_dp,
2365 uint32_t *DP,
2366 uint8_t dp_train_pat)
2367{
2368 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2369 struct drm_device *dev = intel_dig_port->base.base.dev;
2370 struct drm_i915_private *dev_priv = dev->dev_private;
2371 enum port port = intel_dig_port->port;
2372
2373 if (HAS_DDI(dev)) {
2374 uint32_t temp = I915_READ(DP_TP_CTL(port));
2375
2376 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2377 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2378 else
2379 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2380
2381 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2382 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2383 case DP_TRAINING_PATTERN_DISABLE:
2384 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2385
2386 break;
2387 case DP_TRAINING_PATTERN_1:
2388 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2389 break;
2390 case DP_TRAINING_PATTERN_2:
2391 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2392 break;
2393 case DP_TRAINING_PATTERN_3:
2394 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2395 break;
2396 }
2397 I915_WRITE(DP_TP_CTL(port), temp);
2398
2399 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
2400 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2401
2402 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2403 case DP_TRAINING_PATTERN_DISABLE:
2404 *DP |= DP_LINK_TRAIN_OFF_CPT;
2405 break;
2406 case DP_TRAINING_PATTERN_1:
2407 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2408 break;
2409 case DP_TRAINING_PATTERN_2:
2410 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2411 break;
2412 case DP_TRAINING_PATTERN_3:
2413 DRM_ERROR("DP training pattern 3 not supported\n");
2414 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2415 break;
2416 }
2417
2418 } else {
2419 if (IS_CHERRYVIEW(dev))
2420 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2421 else
2422 *DP &= ~DP_LINK_TRAIN_MASK;
2423
2424 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2425 case DP_TRAINING_PATTERN_DISABLE:
2426 *DP |= DP_LINK_TRAIN_OFF;
2427 break;
2428 case DP_TRAINING_PATTERN_1:
2429 *DP |= DP_LINK_TRAIN_PAT_1;
2430 break;
2431 case DP_TRAINING_PATTERN_2:
2432 *DP |= DP_LINK_TRAIN_PAT_2;
2433 break;
2434 case DP_TRAINING_PATTERN_3:
2435 if (IS_CHERRYVIEW(dev)) {
2436 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2437 } else {
2438 DRM_ERROR("DP training pattern 3 not supported\n");
2439 *DP |= DP_LINK_TRAIN_PAT_2;
2440 }
2441 break;
2442 }
2443 }
2444}
2445
2446static void intel_dp_enable_port(struct intel_dp *intel_dp)
2447{
2448 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2449 struct drm_i915_private *dev_priv = dev->dev_private;
2450
7b13b58a
VS
2451 /* enable with pattern 1 (as per spec) */
2452 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2453 DP_TRAINING_PATTERN_1);
2454
2455 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2456 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2457
2458 /*
2459 * Magic for VLV/CHV. We _must_ first set up the register
2460 * without actually enabling the port, and then do another
2461 * write to enable the port. Otherwise link training will
2462 * fail when the power sequencer is freshly used for this port.
2463 */
2464 intel_dp->DP |= DP_PORT_EN;
2465
2466 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2467 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2468}
2469
e8cb4558 2470static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2471{
e8cb4558
DV
2472 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2473 struct drm_device *dev = encoder->base.dev;
2474 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2475 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2476 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 2477
0c33d8d7
DV
2478 if (WARN_ON(dp_reg & DP_PORT_EN))
2479 return;
5d613501 2480
093e3f13
VS
2481 pps_lock(intel_dp);
2482
2483 if (IS_VALLEYVIEW(dev))
2484 vlv_init_panel_power_sequencer(intel_dp);
2485
7b13b58a 2486 intel_dp_enable_port(intel_dp);
093e3f13
VS
2487
2488 edp_panel_vdd_on(intel_dp);
2489 edp_panel_on(intel_dp);
2490 edp_panel_vdd_off(intel_dp, true);
2491
2492 pps_unlock(intel_dp);
2493
61234fa5
VS
2494 if (IS_VALLEYVIEW(dev))
2495 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2496
f01eca2e 2497 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2498 intel_dp_start_link_train(intel_dp);
33a34e4e 2499 intel_dp_complete_link_train(intel_dp);
3ab9c637 2500 intel_dp_stop_link_train(intel_dp);
c1dec79a 2501
6e3c9717 2502 if (crtc->config->has_audio) {
c1dec79a
JN
2503 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2504 pipe_name(crtc->pipe));
2505 intel_audio_codec_enable(encoder);
2506 }
ab1f90f9 2507}
89b667f8 2508
ecff4f3b
JN
2509static void g4x_enable_dp(struct intel_encoder *encoder)
2510{
828f5c6e
JN
2511 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2512
ecff4f3b 2513 intel_enable_dp(encoder);
4be73780 2514 intel_edp_backlight_on(intel_dp);
ab1f90f9 2515}
89b667f8 2516
ab1f90f9
JN
2517static void vlv_enable_dp(struct intel_encoder *encoder)
2518{
828f5c6e
JN
2519 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2520
4be73780 2521 intel_edp_backlight_on(intel_dp);
b32c6f48 2522 intel_psr_enable(intel_dp);
d240f20f
JB
2523}
2524
ecff4f3b 2525static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2526{
2527 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2528 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2529
8ac33ed3
DV
2530 intel_dp_prepare(encoder);
2531
d41f1efb
DV
2532 /* Only ilk+ has port A */
2533 if (dport->port == PORT_A) {
2534 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2535 ironlake_edp_pll_on(intel_dp);
d41f1efb 2536 }
ab1f90f9
JN
2537}
2538
83b84597
VS
2539static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2540{
2541 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2542 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2543 enum pipe pipe = intel_dp->pps_pipe;
2544 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2545
2546 edp_panel_vdd_off_sync(intel_dp);
2547
2548 /*
2549 * VLV seems to get confused when multiple power seqeuencers
2550 * have the same port selected (even if only one has power/vdd
2551 * enabled). The failure manifests as vlv_wait_port_ready() failing
2552 * CHV on the other hand doesn't seem to mind having the same port
2553 * selected in multiple power seqeuencers, but let's clear the
2554 * port select always when logically disconnecting a power sequencer
2555 * from a port.
2556 */
2557 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2558 pipe_name(pipe), port_name(intel_dig_port->port));
2559 I915_WRITE(pp_on_reg, 0);
2560 POSTING_READ(pp_on_reg);
2561
2562 intel_dp->pps_pipe = INVALID_PIPE;
2563}
2564
a4a5d2f8
VS
2565static void vlv_steal_power_sequencer(struct drm_device *dev,
2566 enum pipe pipe)
2567{
2568 struct drm_i915_private *dev_priv = dev->dev_private;
2569 struct intel_encoder *encoder;
2570
2571 lockdep_assert_held(&dev_priv->pps_mutex);
2572
ac3c12e4
VS
2573 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2574 return;
2575
a4a5d2f8
VS
2576 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2577 base.head) {
2578 struct intel_dp *intel_dp;
773538e8 2579 enum port port;
a4a5d2f8
VS
2580
2581 if (encoder->type != INTEL_OUTPUT_EDP)
2582 continue;
2583
2584 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2585 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2586
2587 if (intel_dp->pps_pipe != pipe)
2588 continue;
2589
2590 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2591 pipe_name(pipe), port_name(port));
a4a5d2f8 2592
034e43c6
VS
2593 WARN(encoder->connectors_active,
2594 "stealing pipe %c power sequencer from active eDP port %c\n",
2595 pipe_name(pipe), port_name(port));
a4a5d2f8 2596
a4a5d2f8 2597 /* make sure vdd is off before we steal it */
83b84597 2598 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2599 }
2600}
2601
2602static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2603{
2604 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2605 struct intel_encoder *encoder = &intel_dig_port->base;
2606 struct drm_device *dev = encoder->base.dev;
2607 struct drm_i915_private *dev_priv = dev->dev_private;
2608 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2609
2610 lockdep_assert_held(&dev_priv->pps_mutex);
2611
093e3f13
VS
2612 if (!is_edp(intel_dp))
2613 return;
2614
a4a5d2f8
VS
2615 if (intel_dp->pps_pipe == crtc->pipe)
2616 return;
2617
2618 /*
2619 * If another power sequencer was being used on this
2620 * port previously make sure to turn off vdd there while
2621 * we still have control of it.
2622 */
2623 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2624 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2625
2626 /*
2627 * We may be stealing the power
2628 * sequencer from another port.
2629 */
2630 vlv_steal_power_sequencer(dev, crtc->pipe);
2631
2632 /* now it's all ours */
2633 intel_dp->pps_pipe = crtc->pipe;
2634
2635 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2636 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2637
2638 /* init power sequencer on this pipe and port */
36b5f425
VS
2639 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2640 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2641}
2642
ab1f90f9 2643static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2644{
2bd2ad64 2645 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2646 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2647 struct drm_device *dev = encoder->base.dev;
89b667f8 2648 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2649 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2650 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2651 int pipe = intel_crtc->pipe;
2652 u32 val;
a4fc5ed6 2653
ab1f90f9 2654 mutex_lock(&dev_priv->dpio_lock);
89b667f8 2655
ab3c759a 2656 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2657 val = 0;
2658 if (pipe)
2659 val |= (1<<21);
2660 else
2661 val &= ~(1<<21);
2662 val |= 0x001000c4;
ab3c759a
CML
2663 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2664 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2665 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2666
ab1f90f9
JN
2667 mutex_unlock(&dev_priv->dpio_lock);
2668
2669 intel_enable_dp(encoder);
89b667f8
JB
2670}
2671
ecff4f3b 2672static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2673{
2674 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2675 struct drm_device *dev = encoder->base.dev;
2676 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2677 struct intel_crtc *intel_crtc =
2678 to_intel_crtc(encoder->base.crtc);
e4607fcf 2679 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2680 int pipe = intel_crtc->pipe;
89b667f8 2681
8ac33ed3
DV
2682 intel_dp_prepare(encoder);
2683
89b667f8 2684 /* Program Tx lane resets to default */
0980a60f 2685 mutex_lock(&dev_priv->dpio_lock);
ab3c759a 2686 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2687 DPIO_PCS_TX_LANE2_RESET |
2688 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2689 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2690 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2691 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2692 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2693 DPIO_PCS_CLK_SOFT_RESET);
2694
2695 /* Fix up inter-pair skew failure */
ab3c759a
CML
2696 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2697 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2698 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
0980a60f 2699 mutex_unlock(&dev_priv->dpio_lock);
a4fc5ed6
KP
2700}
2701
e4a1d846
CML
2702static void chv_pre_enable_dp(struct intel_encoder *encoder)
2703{
2704 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2705 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2706 struct drm_device *dev = encoder->base.dev;
2707 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2708 struct intel_crtc *intel_crtc =
2709 to_intel_crtc(encoder->base.crtc);
2710 enum dpio_channel ch = vlv_dport_to_channel(dport);
2711 int pipe = intel_crtc->pipe;
2712 int data, i;
949c1d43 2713 u32 val;
e4a1d846 2714
e4a1d846 2715 mutex_lock(&dev_priv->dpio_lock);
949c1d43 2716
570e2a74
VS
2717 /* allow hardware to manage TX FIFO reset source */
2718 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2719 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2720 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2721
2722 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2723 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2724 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2725
949c1d43 2726 /* Deassert soft data lane reset*/
97fd4d5c 2727 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2728 val |= CHV_PCS_REQ_SOFTRESET_EN;
97fd4d5c
VS
2729 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2730
2731 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2732 val |= CHV_PCS_REQ_SOFTRESET_EN;
2733 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2734
2735 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2736 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2737 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
d2152b25 2738
97fd4d5c 2739 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
949c1d43 2740 val |= (DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
97fd4d5c 2741 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
949c1d43
VS
2742
2743 /* Program Tx lane latency optimal setting*/
e4a1d846
CML
2744 for (i = 0; i < 4; i++) {
2745 /* Set the latency optimal bit */
2746 data = (i == 1) ? 0x0 : 0x6;
2747 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW11(ch, i),
2748 data << DPIO_FRC_LATENCY_SHFIT);
2749
2750 /* Set the upar bit */
2751 data = (i == 1) ? 0x0 : 0x1;
2752 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2753 data << DPIO_UPAR_SHIFT);
2754 }
2755
2756 /* Data lane stagger programming */
2757 /* FIXME: Fix up value only after power analysis */
2758
2759 mutex_unlock(&dev_priv->dpio_lock);
2760
e4a1d846 2761 intel_enable_dp(encoder);
e4a1d846
CML
2762}
2763
9197c88b
VS
2764static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2765{
2766 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2767 struct drm_device *dev = encoder->base.dev;
2768 struct drm_i915_private *dev_priv = dev->dev_private;
2769 struct intel_crtc *intel_crtc =
2770 to_intel_crtc(encoder->base.crtc);
2771 enum dpio_channel ch = vlv_dport_to_channel(dport);
2772 enum pipe pipe = intel_crtc->pipe;
2773 u32 val;
2774
625695f8
VS
2775 intel_dp_prepare(encoder);
2776
9197c88b
VS
2777 mutex_lock(&dev_priv->dpio_lock);
2778
b9e5ac3c
VS
2779 /* program left/right clock distribution */
2780 if (pipe != PIPE_B) {
2781 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2782 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2783 if (ch == DPIO_CH0)
2784 val |= CHV_BUFLEFTENA1_FORCE;
2785 if (ch == DPIO_CH1)
2786 val |= CHV_BUFRIGHTENA1_FORCE;
2787 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2788 } else {
2789 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2790 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2791 if (ch == DPIO_CH0)
2792 val |= CHV_BUFLEFTENA2_FORCE;
2793 if (ch == DPIO_CH1)
2794 val |= CHV_BUFRIGHTENA2_FORCE;
2795 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2796 }
2797
9197c88b
VS
2798 /* program clock channel usage */
2799 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2800 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2801 if (pipe != PIPE_B)
2802 val &= ~CHV_PCS_USEDCLKCHANNEL;
2803 else
2804 val |= CHV_PCS_USEDCLKCHANNEL;
2805 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2806
2807 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2808 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2809 if (pipe != PIPE_B)
2810 val &= ~CHV_PCS_USEDCLKCHANNEL;
2811 else
2812 val |= CHV_PCS_USEDCLKCHANNEL;
2813 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2814
2815 /*
2816 * This a a bit weird since generally CL
2817 * matches the pipe, but here we need to
2818 * pick the CL based on the port.
2819 */
2820 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2821 if (pipe != PIPE_B)
2822 val &= ~CHV_CMN_USEDCLKCHANNEL;
2823 else
2824 val |= CHV_CMN_USEDCLKCHANNEL;
2825 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2826
2827 mutex_unlock(&dev_priv->dpio_lock);
2828}
2829
a4fc5ed6 2830/*
df0c237d
JB
2831 * Native read with retry for link status and receiver capability reads for
2832 * cases where the sink may still be asleep.
9d1a1031
JN
2833 *
2834 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
2835 * supposed to retry 3 times per the spec.
a4fc5ed6 2836 */
9d1a1031
JN
2837static ssize_t
2838intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2839 void *buffer, size_t size)
a4fc5ed6 2840{
9d1a1031
JN
2841 ssize_t ret;
2842 int i;
61da5fab 2843
f6a19066
VS
2844 /*
2845 * Sometime we just get the same incorrect byte repeated
2846 * over the entire buffer. Doing just one throw away read
2847 * initially seems to "solve" it.
2848 */
2849 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2850
61da5fab 2851 for (i = 0; i < 3; i++) {
9d1a1031
JN
2852 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2853 if (ret == size)
2854 return ret;
61da5fab
JB
2855 msleep(1);
2856 }
a4fc5ed6 2857
9d1a1031 2858 return ret;
a4fc5ed6
KP
2859}
2860
2861/*
2862 * Fetch AUX CH registers 0x202 - 0x207 which contain
2863 * link status information
2864 */
2865static bool
93f62dad 2866intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 2867{
9d1a1031
JN
2868 return intel_dp_dpcd_read_wake(&intel_dp->aux,
2869 DP_LANE0_1_STATUS,
2870 link_status,
2871 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
2872}
2873
1100244e 2874/* These are source-specific values. */
a4fc5ed6 2875static uint8_t
1a2eb460 2876intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 2877{
30add22d 2878 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 2879 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 2880 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2881
7ad14a29
SJ
2882 if (INTEL_INFO(dev)->gen >= 9) {
2883 if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
2884 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 2885 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 2886 } else if (IS_VALLEYVIEW(dev))
bd60018a 2887 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 2888 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 2889 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 2890 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 2891 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 2892 else
bd60018a 2893 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
2894}
2895
2896static uint8_t
2897intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2898{
30add22d 2899 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 2900 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 2901
5a9d1f1a
DL
2902 if (INTEL_INFO(dev)->gen >= 9) {
2903 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2904 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2905 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2906 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2907 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2908 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2909 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
2910 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
2911 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
2912 default:
2913 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2914 }
2915 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 2916 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2917 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2918 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2919 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2920 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2921 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2922 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2923 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 2924 default:
bd60018a 2925 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 2926 }
e2fa6fba
P
2927 } else if (IS_VALLEYVIEW(dev)) {
2928 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2929 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2930 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2931 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2932 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2933 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2934 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2935 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 2936 default:
bd60018a 2937 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 2938 }
bc7d38a4 2939 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 2940 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2941 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2942 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2943 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2944 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2945 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 2946 default:
bd60018a 2947 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
2948 }
2949 } else {
2950 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
2951 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2952 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2953 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2954 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2955 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2956 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2957 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 2958 default:
bd60018a 2959 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 2960 }
a4fc5ed6
KP
2961 }
2962}
2963
e2fa6fba
P
2964static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
2965{
2966 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2967 struct drm_i915_private *dev_priv = dev->dev_private;
2968 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
2969 struct intel_crtc *intel_crtc =
2970 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
2971 unsigned long demph_reg_value, preemph_reg_value,
2972 uniqtranscale_reg_value;
2973 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 2974 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2975 int pipe = intel_crtc->pipe;
e2fa6fba
P
2976
2977 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 2978 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
2979 preemph_reg_value = 0x0004000;
2980 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 2981 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
2982 demph_reg_value = 0x2B405555;
2983 uniqtranscale_reg_value = 0x552AB83A;
2984 break;
bd60018a 2985 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
2986 demph_reg_value = 0x2B404040;
2987 uniqtranscale_reg_value = 0x5548B83A;
2988 break;
bd60018a 2989 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
2990 demph_reg_value = 0x2B245555;
2991 uniqtranscale_reg_value = 0x5560B83A;
2992 break;
bd60018a 2993 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
2994 demph_reg_value = 0x2B405555;
2995 uniqtranscale_reg_value = 0x5598DA3A;
2996 break;
2997 default:
2998 return 0;
2999 }
3000 break;
bd60018a 3001 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3002 preemph_reg_value = 0x0002000;
3003 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3004 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3005 demph_reg_value = 0x2B404040;
3006 uniqtranscale_reg_value = 0x5552B83A;
3007 break;
bd60018a 3008 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3009 demph_reg_value = 0x2B404848;
3010 uniqtranscale_reg_value = 0x5580B83A;
3011 break;
bd60018a 3012 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3013 demph_reg_value = 0x2B404040;
3014 uniqtranscale_reg_value = 0x55ADDA3A;
3015 break;
3016 default:
3017 return 0;
3018 }
3019 break;
bd60018a 3020 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3021 preemph_reg_value = 0x0000000;
3022 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3023 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3024 demph_reg_value = 0x2B305555;
3025 uniqtranscale_reg_value = 0x5570B83A;
3026 break;
bd60018a 3027 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3028 demph_reg_value = 0x2B2B4040;
3029 uniqtranscale_reg_value = 0x55ADDA3A;
3030 break;
3031 default:
3032 return 0;
3033 }
3034 break;
bd60018a 3035 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3036 preemph_reg_value = 0x0006000;
3037 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3038 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3039 demph_reg_value = 0x1B405555;
3040 uniqtranscale_reg_value = 0x55ADDA3A;
3041 break;
3042 default:
3043 return 0;
3044 }
3045 break;
3046 default:
3047 return 0;
3048 }
3049
0980a60f 3050 mutex_lock(&dev_priv->dpio_lock);
ab3c759a
CML
3051 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3052 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3053 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3054 uniqtranscale_reg_value);
ab3c759a
CML
3055 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3056 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3057 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3058 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
0980a60f 3059 mutex_unlock(&dev_priv->dpio_lock);
e2fa6fba
P
3060
3061 return 0;
3062}
3063
e4a1d846
CML
3064static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3065{
3066 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3067 struct drm_i915_private *dev_priv = dev->dev_private;
3068 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3069 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3070 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3071 uint8_t train_set = intel_dp->train_set[0];
3072 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3073 enum pipe pipe = intel_crtc->pipe;
3074 int i;
e4a1d846
CML
3075
3076 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3077 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3078 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3079 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3080 deemph_reg_value = 128;
3081 margin_reg_value = 52;
3082 break;
bd60018a 3083 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3084 deemph_reg_value = 128;
3085 margin_reg_value = 77;
3086 break;
bd60018a 3087 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3088 deemph_reg_value = 128;
3089 margin_reg_value = 102;
3090 break;
bd60018a 3091 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3092 deemph_reg_value = 128;
3093 margin_reg_value = 154;
3094 /* FIXME extra to set for 1200 */
3095 break;
3096 default:
3097 return 0;
3098 }
3099 break;
bd60018a 3100 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3101 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3102 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3103 deemph_reg_value = 85;
3104 margin_reg_value = 78;
3105 break;
bd60018a 3106 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3107 deemph_reg_value = 85;
3108 margin_reg_value = 116;
3109 break;
bd60018a 3110 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3111 deemph_reg_value = 85;
3112 margin_reg_value = 154;
3113 break;
3114 default:
3115 return 0;
3116 }
3117 break;
bd60018a 3118 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3119 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3120 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3121 deemph_reg_value = 64;
3122 margin_reg_value = 104;
3123 break;
bd60018a 3124 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3125 deemph_reg_value = 64;
3126 margin_reg_value = 154;
3127 break;
3128 default:
3129 return 0;
3130 }
3131 break;
bd60018a 3132 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3133 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3134 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3135 deemph_reg_value = 43;
3136 margin_reg_value = 154;
3137 break;
3138 default:
3139 return 0;
3140 }
3141 break;
3142 default:
3143 return 0;
3144 }
3145
3146 mutex_lock(&dev_priv->dpio_lock);
3147
3148 /* Clear calc init */
1966e59e
VS
3149 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3150 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3151 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3152 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3153 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3154
3155 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3156 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3157 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3158 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e 3159 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846 3160
a02ef3c7
VS
3161 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3162 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3163 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3164 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3165
3166 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3167 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3168 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3169 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3170
e4a1d846 3171 /* Program swing deemph */
f72df8db
VS
3172 for (i = 0; i < 4; i++) {
3173 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3174 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3175 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3176 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3177 }
e4a1d846
CML
3178
3179 /* Program swing margin */
f72df8db
VS
3180 for (i = 0; i < 4; i++) {
3181 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
1fb44505
VS
3182 val &= ~DPIO_SWING_MARGIN000_MASK;
3183 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
f72df8db
VS
3184 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3185 }
e4a1d846
CML
3186
3187 /* Disable unique transition scale */
f72df8db
VS
3188 for (i = 0; i < 4; i++) {
3189 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3190 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3191 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3192 }
e4a1d846
CML
3193
3194 if (((train_set & DP_TRAIN_PRE_EMPHASIS_MASK)
bd60018a 3195 == DP_TRAIN_PRE_EMPH_LEVEL_0) &&
e4a1d846 3196 ((train_set & DP_TRAIN_VOLTAGE_SWING_MASK)
bd60018a 3197 == DP_TRAIN_VOLTAGE_SWING_LEVEL_3)) {
e4a1d846
CML
3198
3199 /*
3200 * The document said it needs to set bit 27 for ch0 and bit 26
3201 * for ch1. Might be a typo in the doc.
3202 * For now, for this unique transition scale selection, set bit
3203 * 27 for ch0 and ch1.
3204 */
f72df8db
VS
3205 for (i = 0; i < 4; i++) {
3206 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3207 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3208 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3209 }
e4a1d846 3210
f72df8db
VS
3211 for (i = 0; i < 4; i++) {
3212 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3213 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3214 val |= (0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3215 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3216 }
e4a1d846
CML
3217 }
3218
3219 /* Start swing calculation */
1966e59e
VS
3220 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3221 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3222 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3223
3224 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3225 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3226 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
e4a1d846
CML
3227
3228 /* LRC Bypass */
3229 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3230 val |= DPIO_LRC_BYPASS;
3231 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3232
3233 mutex_unlock(&dev_priv->dpio_lock);
3234
3235 return 0;
3236}
3237
a4fc5ed6 3238static void
0301b3ac
JN
3239intel_get_adjust_train(struct intel_dp *intel_dp,
3240 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3241{
3242 uint8_t v = 0;
3243 uint8_t p = 0;
3244 int lane;
1a2eb460
KP
3245 uint8_t voltage_max;
3246 uint8_t preemph_max;
a4fc5ed6 3247
33a34e4e 3248 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3249 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3250 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3251
3252 if (this_v > v)
3253 v = this_v;
3254 if (this_p > p)
3255 p = this_p;
3256 }
3257
1a2eb460 3258 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3259 if (v >= voltage_max)
3260 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3261
1a2eb460
KP
3262 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3263 if (p >= preemph_max)
3264 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3265
3266 for (lane = 0; lane < 4; lane++)
33a34e4e 3267 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3268}
3269
3270static uint32_t
f0a3424e 3271intel_gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3272{
3cf2efb1 3273 uint32_t signal_levels = 0;
a4fc5ed6 3274
3cf2efb1 3275 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3276 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3277 default:
3278 signal_levels |= DP_VOLTAGE_0_4;
3279 break;
bd60018a 3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3281 signal_levels |= DP_VOLTAGE_0_6;
3282 break;
bd60018a 3283 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3284 signal_levels |= DP_VOLTAGE_0_8;
3285 break;
bd60018a 3286 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3287 signal_levels |= DP_VOLTAGE_1_2;
3288 break;
3289 }
3cf2efb1 3290 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3291 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3292 default:
3293 signal_levels |= DP_PRE_EMPHASIS_0;
3294 break;
bd60018a 3295 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3296 signal_levels |= DP_PRE_EMPHASIS_3_5;
3297 break;
bd60018a 3298 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3299 signal_levels |= DP_PRE_EMPHASIS_6;
3300 break;
bd60018a 3301 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3302 signal_levels |= DP_PRE_EMPHASIS_9_5;
3303 break;
3304 }
3305 return signal_levels;
3306}
3307
e3421a18
ZW
3308/* Gen6's DP voltage swing and pre-emphasis control */
3309static uint32_t
3310intel_gen6_edp_signal_levels(uint8_t train_set)
3311{
3c5a62b5
YL
3312 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3313 DP_TRAIN_PRE_EMPHASIS_MASK);
3314 switch (signal_levels) {
bd60018a
SJ
3315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3317 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3318 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3319 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3320 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3321 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3322 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3323 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3324 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3325 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3326 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3327 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3328 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3329 default:
3c5a62b5
YL
3330 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3331 "0x%x\n", signal_levels);
3332 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3333 }
3334}
3335
1a2eb460
KP
3336/* Gen7's DP voltage swing and pre-emphasis control */
3337static uint32_t
3338intel_gen7_edp_signal_levels(uint8_t train_set)
3339{
3340 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3341 DP_TRAIN_PRE_EMPHASIS_MASK);
3342 switch (signal_levels) {
bd60018a 3343 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3344 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3345 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3346 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3347 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3348 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3349
bd60018a 3350 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3351 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3352 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3353 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3354
bd60018a 3355 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3356 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3357 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3358 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3359
3360 default:
3361 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3362 "0x%x\n", signal_levels);
3363 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3364 }
3365}
3366
d6c0d722
PZ
3367/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
3368static uint32_t
f0a3424e 3369intel_hsw_signal_levels(uint8_t train_set)
a4fc5ed6 3370{
d6c0d722
PZ
3371 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3372 DP_TRAIN_PRE_EMPHASIS_MASK);
3373 switch (signal_levels) {
bd60018a 3374 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3375 return DDI_BUF_TRANS_SELECT(0);
bd60018a 3376 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3377 return DDI_BUF_TRANS_SELECT(1);
bd60018a 3378 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3379 return DDI_BUF_TRANS_SELECT(2);
bd60018a 3380 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
c5fe6a06 3381 return DDI_BUF_TRANS_SELECT(3);
a4fc5ed6 3382
bd60018a 3383 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3384 return DDI_BUF_TRANS_SELECT(4);
bd60018a 3385 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3386 return DDI_BUF_TRANS_SELECT(5);
bd60018a 3387 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
c5fe6a06 3388 return DDI_BUF_TRANS_SELECT(6);
a4fc5ed6 3389
bd60018a 3390 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
c5fe6a06 3391 return DDI_BUF_TRANS_SELECT(7);
bd60018a 3392 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
c5fe6a06 3393 return DDI_BUF_TRANS_SELECT(8);
7ad14a29
SJ
3394
3395 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3396 return DDI_BUF_TRANS_SELECT(9);
d6c0d722
PZ
3397 default:
3398 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3399 "0x%x\n", signal_levels);
c5fe6a06 3400 return DDI_BUF_TRANS_SELECT(0);
a4fc5ed6 3401 }
a4fc5ed6
KP
3402}
3403
f0a3424e
PZ
3404/* Properly updates "DP" with the correct signal levels. */
3405static void
3406intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3407{
3408 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3409 enum port port = intel_dig_port->port;
f0a3424e
PZ
3410 struct drm_device *dev = intel_dig_port->base.base.dev;
3411 uint32_t signal_levels, mask;
3412 uint8_t train_set = intel_dp->train_set[0];
3413
5a9d1f1a 3414 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
f0a3424e
PZ
3415 signal_levels = intel_hsw_signal_levels(train_set);
3416 mask = DDI_BUF_EMP_MASK;
e4a1d846
CML
3417 } else if (IS_CHERRYVIEW(dev)) {
3418 signal_levels = intel_chv_signal_levels(intel_dp);
3419 mask = 0;
e2fa6fba
P
3420 } else if (IS_VALLEYVIEW(dev)) {
3421 signal_levels = intel_vlv_signal_levels(intel_dp);
3422 mask = 0;
bc7d38a4 3423 } else if (IS_GEN7(dev) && port == PORT_A) {
f0a3424e
PZ
3424 signal_levels = intel_gen7_edp_signal_levels(train_set);
3425 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3426 } else if (IS_GEN6(dev) && port == PORT_A) {
f0a3424e
PZ
3427 signal_levels = intel_gen6_edp_signal_levels(train_set);
3428 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3429 } else {
3430 signal_levels = intel_gen4_signal_levels(train_set);
3431 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3432 }
3433
3434 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3435
3436 *DP = (*DP & ~mask) | signal_levels;
3437}
3438
a4fc5ed6 3439static bool
ea5b213a 3440intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3441 uint32_t *DP,
58e10eb9 3442 uint8_t dp_train_pat)
a4fc5ed6 3443{
174edf1f
PZ
3444 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3445 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3446 struct drm_i915_private *dev_priv = dev->dev_private;
2cdfe6c8
JN
3447 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3448 int ret, len;
a4fc5ed6 3449
7b13b58a 3450 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3451
70aff66c 3452 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3453 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3454
2cdfe6c8
JN
3455 buf[0] = dp_train_pat;
3456 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3457 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3458 /* don't write DP_TRAINING_LANEx_SET on disable */
3459 len = 1;
3460 } else {
3461 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
3462 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3463 len = intel_dp->lane_count + 1;
47ea7542 3464 }
a4fc5ed6 3465
9d1a1031
JN
3466 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3467 buf, len);
2cdfe6c8
JN
3468
3469 return ret == len;
a4fc5ed6
KP
3470}
3471
70aff66c
JN
3472static bool
3473intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3474 uint8_t dp_train_pat)
3475{
953d22e8 3476 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3477 intel_dp_set_signal_levels(intel_dp, DP);
3478 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3479}
3480
3481static bool
3482intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3483 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3484{
3485 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3486 struct drm_device *dev = intel_dig_port->base.base.dev;
3487 struct drm_i915_private *dev_priv = dev->dev_private;
3488 int ret;
3489
3490 intel_get_adjust_train(intel_dp, link_status);
3491 intel_dp_set_signal_levels(intel_dp, DP);
3492
3493 I915_WRITE(intel_dp->output_reg, *DP);
3494 POSTING_READ(intel_dp->output_reg);
3495
9d1a1031
JN
3496 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
3497 intel_dp->train_set, intel_dp->lane_count);
70aff66c
JN
3498
3499 return ret == intel_dp->lane_count;
3500}
3501
3ab9c637
ID
3502static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3503{
3504 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3505 struct drm_device *dev = intel_dig_port->base.base.dev;
3506 struct drm_i915_private *dev_priv = dev->dev_private;
3507 enum port port = intel_dig_port->port;
3508 uint32_t val;
3509
3510 if (!HAS_DDI(dev))
3511 return;
3512
3513 val = I915_READ(DP_TP_CTL(port));
3514 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3515 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3516 I915_WRITE(DP_TP_CTL(port), val);
3517
3518 /*
3519 * On PORT_A we can have only eDP in SST mode. There the only reason
3520 * we need to set idle transmission mode is to work around a HW issue
3521 * where we enable the pipe while not in idle link-training mode.
3522 * In this case there is requirement to wait for a minimum number of
3523 * idle patterns to be sent.
3524 */
3525 if (port == PORT_A)
3526 return;
3527
3528 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3529 1))
3530 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3531}
3532
33a34e4e 3533/* Enable corresponding port and start training pattern 1 */
c19b0669 3534void
33a34e4e 3535intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3536{
da63a9f2 3537 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3538 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3539 int i;
3540 uint8_t voltage;
cdb0e95b 3541 int voltage_tries, loop_tries;
ea5b213a 3542 uint32_t DP = intel_dp->DP;
6aba5b6c 3543 uint8_t link_config[2];
a4fc5ed6 3544
affa9354 3545 if (HAS_DDI(dev))
c19b0669
PZ
3546 intel_ddi_prepare_link_retrain(encoder);
3547
3cf2efb1 3548 /* Write the link configuration data */
6aba5b6c
JN
3549 link_config[0] = intel_dp->link_bw;
3550 link_config[1] = intel_dp->lane_count;
3551 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3552 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3553 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
94ca719e 3554 if (intel_dp->num_sink_rates)
a8f3ef61
SJ
3555 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
3556 &intel_dp->rate_select, 1);
6aba5b6c
JN
3557
3558 link_config[0] = 0;
3559 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3560 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3561
3562 DP |= DP_PORT_EN;
1a2eb460 3563
70aff66c
JN
3564 /* clock recovery */
3565 if (!intel_dp_reset_link_train(intel_dp, &DP,
3566 DP_TRAINING_PATTERN_1 |
3567 DP_LINK_SCRAMBLING_DISABLE)) {
3568 DRM_ERROR("failed to enable link training\n");
3569 return;
3570 }
3571
a4fc5ed6 3572 voltage = 0xff;
cdb0e95b
KP
3573 voltage_tries = 0;
3574 loop_tries = 0;
a4fc5ed6 3575 for (;;) {
70aff66c 3576 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3577
a7c9655f 3578 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3579 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3580 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3581 break;
93f62dad 3582 }
a4fc5ed6 3583
01916270 3584 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3585 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3586 break;
3587 }
3588
3589 /* Check to see if we've tried the max voltage */
3590 for (i = 0; i < intel_dp->lane_count; i++)
3591 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3592 break;
3b4f819d 3593 if (i == intel_dp->lane_count) {
b06fbda3
DV
3594 ++loop_tries;
3595 if (loop_tries == 5) {
3def84b3 3596 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3597 break;
3598 }
70aff66c
JN
3599 intel_dp_reset_link_train(intel_dp, &DP,
3600 DP_TRAINING_PATTERN_1 |
3601 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3602 voltage_tries = 0;
3603 continue;
3604 }
a4fc5ed6 3605
3cf2efb1 3606 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3607 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3608 ++voltage_tries;
b06fbda3 3609 if (voltage_tries == 5) {
3def84b3 3610 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3611 break;
3612 }
3613 } else
3614 voltage_tries = 0;
3615 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3616
70aff66c
JN
3617 /* Update training set as requested by target */
3618 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3619 DRM_ERROR("failed to update link training\n");
3620 break;
3621 }
a4fc5ed6
KP
3622 }
3623
33a34e4e
JB
3624 intel_dp->DP = DP;
3625}
3626
c19b0669 3627void
33a34e4e
JB
3628intel_dp_complete_link_train(struct intel_dp *intel_dp)
3629{
33a34e4e 3630 bool channel_eq = false;
37f80975 3631 int tries, cr_tries;
33a34e4e 3632 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3633 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3634
3635 /* Training Pattern 3 for HBR2 ot 1.2 devices that support it*/
3636 if (intel_dp->link_bw == DP_LINK_BW_5_4 || intel_dp->use_tps3)
3637 training_pattern = DP_TRAINING_PATTERN_3;
33a34e4e 3638
a4fc5ed6 3639 /* channel equalization */
70aff66c 3640 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3641 training_pattern |
70aff66c
JN
3642 DP_LINK_SCRAMBLING_DISABLE)) {
3643 DRM_ERROR("failed to start channel equalization\n");
3644 return;
3645 }
3646
a4fc5ed6 3647 tries = 0;
37f80975 3648 cr_tries = 0;
a4fc5ed6
KP
3649 channel_eq = false;
3650 for (;;) {
70aff66c 3651 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3652
37f80975
JB
3653 if (cr_tries > 5) {
3654 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3655 break;
3656 }
3657
a7c9655f 3658 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3659 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3660 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3661 break;
70aff66c 3662 }
a4fc5ed6 3663
37f80975 3664 /* Make sure clock is still ok */
01916270 3665 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
37f80975 3666 intel_dp_start_link_train(intel_dp);
70aff66c 3667 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3668 training_pattern |
70aff66c 3669 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3670 cr_tries++;
3671 continue;
3672 }
3673
1ffdff13 3674 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
3675 channel_eq = true;
3676 break;
3677 }
a4fc5ed6 3678
37f80975
JB
3679 /* Try 5 times, then try clock recovery if that fails */
3680 if (tries > 5) {
37f80975 3681 intel_dp_start_link_train(intel_dp);
70aff66c 3682 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3683 training_pattern |
70aff66c 3684 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3685 tries = 0;
3686 cr_tries++;
3687 continue;
3688 }
a4fc5ed6 3689
70aff66c
JN
3690 /* Update training set as requested by target */
3691 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3692 DRM_ERROR("failed to update link training\n");
3693 break;
3694 }
3cf2efb1 3695 ++tries;
869184a6 3696 }
3cf2efb1 3697
3ab9c637
ID
3698 intel_dp_set_idle_link_train(intel_dp);
3699
3700 intel_dp->DP = DP;
3701
d6c0d722 3702 if (channel_eq)
07f42258 3703 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
d6c0d722 3704
3ab9c637
ID
3705}
3706
3707void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3708{
70aff66c 3709 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3710 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3711}
3712
3713static void
ea5b213a 3714intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3715{
da63a9f2 3716 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3717 enum port port = intel_dig_port->port;
da63a9f2 3718 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3719 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3720 uint32_t DP = intel_dp->DP;
a4fc5ed6 3721
bc76e320 3722 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3723 return;
3724
0c33d8d7 3725 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3726 return;
3727
28c97730 3728 DRM_DEBUG_KMS("\n");
32f9d658 3729
bc7d38a4 3730 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
e3421a18 3731 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 3732 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18 3733 } else {
aad3d14d
VS
3734 if (IS_CHERRYVIEW(dev))
3735 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3736 else
3737 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 3738 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 3739 }
fe255d00 3740 POSTING_READ(intel_dp->output_reg);
5eb08b69 3741
493a7081 3742 if (HAS_PCH_IBX(dev) &&
1b39d6f3 3743 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
5bddd17f
EA
3744 /* Hardware workaround: leaving our transcoder select
3745 * set to transcoder B while it's off will prevent the
3746 * corresponding HDMI output on transcoder A.
3747 *
3748 * Combine this with another hardware workaround:
3749 * transcoder select bit can only be cleared while the
3750 * port is enabled.
3751 */
3752 DP &= ~DP_PIPEB_SELECT;
3753 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3754 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3755 }
3756
832afda6 3757 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
3758 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
3759 POSTING_READ(intel_dp->output_reg);
f01eca2e 3760 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3761}
3762
26d61aad
KP
3763static bool
3764intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3765{
a031d709
RV
3766 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3767 struct drm_device *dev = dig_port->base.base.dev;
3768 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3769 uint8_t rev;
a031d709 3770
9d1a1031
JN
3771 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3772 sizeof(intel_dp->dpcd)) < 0)
edb39244 3773 return false; /* aux transfer failed */
92fd8fd1 3774
a8e98153 3775 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3776
edb39244
AJ
3777 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3778 return false; /* DPCD not present */
3779
2293bb5c
SK
3780 /* Check if the panel supports PSR */
3781 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3782 if (is_edp(intel_dp)) {
9d1a1031
JN
3783 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3784 intel_dp->psr_dpcd,
3785 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3786 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3787 dev_priv->psr.sink_support = true;
50003939 3788 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3789 }
50003939
JN
3790 }
3791
7809a611 3792 /* Training Pattern 3 support, both source and sink */
06ea66b6 3793 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
7809a611
JN
3794 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3795 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
06ea66b6 3796 intel_dp->use_tps3 = true;
f8d8a672 3797 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
06ea66b6
TP
3798 } else
3799 intel_dp->use_tps3 = false;
3800
fc0f8e25
SJ
3801 /* Intermediate frequency support */
3802 if (is_edp(intel_dp) &&
3803 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3804 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3805 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3806 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3807 int i;
3808
fc0f8e25
SJ
3809 intel_dp_dpcd_read_wake(&intel_dp->aux,
3810 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3811 sink_rates,
3812 sizeof(sink_rates));
ea2d8a42 3813
94ca719e
VS
3814 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3815 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3816
3817 if (val == 0)
3818 break;
3819
94ca719e 3820 intel_dp->sink_rates[i] = val * 200;
ea2d8a42 3821 }
94ca719e 3822 intel_dp->num_sink_rates = i;
fc0f8e25 3823 }
0336400e
VS
3824
3825 intel_dp_print_rates(intel_dp);
3826
edb39244
AJ
3827 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3828 DP_DWN_STRM_PORT_PRESENT))
3829 return true; /* native DP sink */
3830
3831 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3832 return true; /* no per-port downstream info */
3833
9d1a1031
JN
3834 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3835 intel_dp->downstream_ports,
3836 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3837 return false; /* downstream port status fetch failed */
3838
3839 return true;
92fd8fd1
KP
3840}
3841
0d198328
AJ
3842static void
3843intel_dp_probe_oui(struct intel_dp *intel_dp)
3844{
3845 u8 buf[3];
3846
3847 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3848 return;
3849
9d1a1031 3850 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3851 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3852 buf[0], buf[1], buf[2]);
3853
9d1a1031 3854 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3855 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3856 buf[0], buf[1], buf[2]);
3857}
3858
0e32b39c
DA
3859static bool
3860intel_dp_probe_mst(struct intel_dp *intel_dp)
3861{
3862 u8 buf[1];
3863
3864 if (!intel_dp->can_mst)
3865 return false;
3866
3867 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3868 return false;
3869
0e32b39c
DA
3870 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3871 if (buf[0] & DP_MST_CAP) {
3872 DRM_DEBUG_KMS("Sink is MST capable\n");
3873 intel_dp->is_mst = true;
3874 } else {
3875 DRM_DEBUG_KMS("Sink is not MST capable\n");
3876 intel_dp->is_mst = false;
3877 }
3878 }
0e32b39c
DA
3879
3880 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3881 return intel_dp->is_mst;
3882}
3883
d2e216d0
RV
3884int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3885{
3886 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3887 struct drm_device *dev = intel_dig_port->base.base.dev;
3888 struct intel_crtc *intel_crtc =
3889 to_intel_crtc(intel_dig_port->base.base.crtc);
ad9dc91b
RV
3890 u8 buf;
3891 int test_crc_count;
3892 int attempts = 6;
d2e216d0 3893
ad9dc91b 3894 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3895 return -EIO;
d2e216d0 3896
ad9dc91b 3897 if (!(buf & DP_TEST_CRC_SUPPORTED))
d2e216d0
RV
3898 return -ENOTTY;
3899
1dda5f93
RV
3900 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3901 return -EIO;
3902
9d1a1031 3903 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
ce31d9f4 3904 buf | DP_TEST_SINK_START) < 0)
bda0381e 3905 return -EIO;
d2e216d0 3906
1dda5f93 3907 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
bda0381e 3908 return -EIO;
ad9dc91b 3909 test_crc_count = buf & DP_TEST_COUNT_MASK;
d2e216d0 3910
ad9dc91b 3911 do {
1dda5f93
RV
3912 if (drm_dp_dpcd_readb(&intel_dp->aux,
3913 DP_TEST_SINK_MISC, &buf) < 0)
3914 return -EIO;
ad9dc91b
RV
3915 intel_wait_for_vblank(dev, intel_crtc->pipe);
3916 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3917
3918 if (attempts == 0) {
90bd1f46
DV
3919 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3920 return -ETIMEDOUT;
ad9dc91b 3921 }
d2e216d0 3922
9d1a1031 3923 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
bda0381e 3924 return -EIO;
d2e216d0 3925
1dda5f93
RV
3926 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3927 return -EIO;
3928 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3929 buf & ~DP_TEST_SINK_START) < 0)
3930 return -EIO;
ce31d9f4 3931
d2e216d0
RV
3932 return 0;
3933}
3934
a60f0e38
JB
3935static bool
3936intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3937{
9d1a1031
JN
3938 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3939 DP_DEVICE_SERVICE_IRQ_VECTOR,
3940 sink_irq_vector, 1) == 1;
a60f0e38
JB
3941}
3942
0e32b39c
DA
3943static bool
3944intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3945{
3946 int ret;
3947
3948 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
3949 DP_SINK_COUNT_ESI,
3950 sink_irq_vector, 14);
3951 if (ret != 14)
3952 return false;
3953
3954 return true;
3955}
3956
a60f0e38
JB
3957static void
3958intel_dp_handle_test_request(struct intel_dp *intel_dp)
3959{
3960 /* NAK by default */
9d1a1031 3961 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, DP_TEST_NAK);
a60f0e38
JB
3962}
3963
0e32b39c
DA
3964static int
3965intel_dp_check_mst_status(struct intel_dp *intel_dp)
3966{
3967 bool bret;
3968
3969 if (intel_dp->is_mst) {
3970 u8 esi[16] = { 0 };
3971 int ret = 0;
3972 int retry;
3973 bool handled;
3974 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
3975go_again:
3976 if (bret == true) {
3977
3978 /* check link status - esi[10] = 0x200c */
3979 if (intel_dp->active_mst_links && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
3980 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
3981 intel_dp_start_link_train(intel_dp);
3982 intel_dp_complete_link_train(intel_dp);
3983 intel_dp_stop_link_train(intel_dp);
3984 }
3985
6f34cc39 3986 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
3987 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
3988
3989 if (handled) {
3990 for (retry = 0; retry < 3; retry++) {
3991 int wret;
3992 wret = drm_dp_dpcd_write(&intel_dp->aux,
3993 DP_SINK_COUNT_ESI+1,
3994 &esi[1], 3);
3995 if (wret == 3) {
3996 break;
3997 }
3998 }
3999
4000 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4001 if (bret == true) {
6f34cc39 4002 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4003 goto go_again;
4004 }
4005 } else
4006 ret = 0;
4007
4008 return ret;
4009 } else {
4010 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4011 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4012 intel_dp->is_mst = false;
4013 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4014 /* send a hotplug event */
4015 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4016 }
4017 }
4018 return -EINVAL;
4019}
4020
a4fc5ed6
KP
4021/*
4022 * According to DP spec
4023 * 5.1.2:
4024 * 1. Read DPCD
4025 * 2. Configure link according to Receiver Capabilities
4026 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4027 * 4. Check link status on receipt of hot-plug interrupt
4028 */
a5146200 4029static void
ea5b213a 4030intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4031{
5b215bcf 4032 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4033 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4034 u8 sink_irq_vector;
93f62dad 4035 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4036
5b215bcf
DA
4037 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4038
da63a9f2 4039 if (!intel_encoder->connectors_active)
d2b996ac 4040 return;
59cd09e1 4041
da63a9f2 4042 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
4043 return;
4044
1a125d8a
ID
4045 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4046 return;
4047
92fd8fd1 4048 /* Try to read receiver status if the link appears to be up */
93f62dad 4049 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4050 return;
4051 }
4052
92fd8fd1 4053 /* Now read the DPCD to see if it's actually running */
26d61aad 4054 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4055 return;
4056 }
4057
a60f0e38
JB
4058 /* Try to read the source of the interrupt */
4059 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4060 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4061 /* Clear interrupt source */
9d1a1031
JN
4062 drm_dp_dpcd_writeb(&intel_dp->aux,
4063 DP_DEVICE_SERVICE_IRQ_VECTOR,
4064 sink_irq_vector);
a60f0e38
JB
4065
4066 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4067 intel_dp_handle_test_request(intel_dp);
4068 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4069 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4070 }
4071
1ffdff13 4072 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4073 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4074 intel_encoder->base.name);
33a34e4e
JB
4075 intel_dp_start_link_train(intel_dp);
4076 intel_dp_complete_link_train(intel_dp);
3ab9c637 4077 intel_dp_stop_link_train(intel_dp);
33a34e4e 4078 }
a4fc5ed6 4079}
a4fc5ed6 4080
caf9ab24 4081/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4082static enum drm_connector_status
26d61aad 4083intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4084{
caf9ab24 4085 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4086 uint8_t type;
4087
4088 if (!intel_dp_get_dpcd(intel_dp))
4089 return connector_status_disconnected;
4090
4091 /* if there's no downstream port, we're done */
4092 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4093 return connector_status_connected;
caf9ab24
AJ
4094
4095 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4096 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4097 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4098 uint8_t reg;
9d1a1031
JN
4099
4100 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4101 &reg, 1) < 0)
caf9ab24 4102 return connector_status_unknown;
9d1a1031 4103
23235177
AJ
4104 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4105 : connector_status_disconnected;
caf9ab24
AJ
4106 }
4107
4108 /* If no HPD, poke DDC gently */
0b99836f 4109 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4110 return connector_status_connected;
caf9ab24
AJ
4111
4112 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4113 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4114 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4115 if (type == DP_DS_PORT_TYPE_VGA ||
4116 type == DP_DS_PORT_TYPE_NON_EDID)
4117 return connector_status_unknown;
4118 } else {
4119 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4120 DP_DWN_STRM_PORT_TYPE_MASK;
4121 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4122 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4123 return connector_status_unknown;
4124 }
caf9ab24
AJ
4125
4126 /* Anything else is out of spec, warn and ignore */
4127 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4128 return connector_status_disconnected;
71ba9000
AJ
4129}
4130
d410b56d
CW
4131static enum drm_connector_status
4132edp_detect(struct intel_dp *intel_dp)
4133{
4134 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4135 enum drm_connector_status status;
4136
4137 status = intel_panel_detect(dev);
4138 if (status == connector_status_unknown)
4139 status = connector_status_connected;
4140
4141 return status;
4142}
4143
5eb08b69 4144static enum drm_connector_status
a9756bb5 4145ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 4146{
30add22d 4147 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
4148 struct drm_i915_private *dev_priv = dev->dev_private;
4149 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
01cb9ea6 4150
1b469639
DL
4151 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4152 return connector_status_disconnected;
4153
26d61aad 4154 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
4155}
4156
2a592bec
DA
4157static int g4x_digital_port_connected(struct drm_device *dev,
4158 struct intel_digital_port *intel_dig_port)
a4fc5ed6 4159{
a4fc5ed6 4160 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 4161 uint32_t bit;
5eb08b69 4162
232a6ee9
TP
4163 if (IS_VALLEYVIEW(dev)) {
4164 switch (intel_dig_port->port) {
4165 case PORT_B:
4166 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4167 break;
4168 case PORT_C:
4169 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4170 break;
4171 case PORT_D:
4172 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4173 break;
4174 default:
2a592bec 4175 return -EINVAL;
232a6ee9
TP
4176 }
4177 } else {
4178 switch (intel_dig_port->port) {
4179 case PORT_B:
4180 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4181 break;
4182 case PORT_C:
4183 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4184 break;
4185 case PORT_D:
4186 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4187 break;
4188 default:
2a592bec 4189 return -EINVAL;
232a6ee9 4190 }
a4fc5ed6
KP
4191 }
4192
10f76a38 4193 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2a592bec
DA
4194 return 0;
4195 return 1;
4196}
4197
4198static enum drm_connector_status
4199g4x_dp_detect(struct intel_dp *intel_dp)
4200{
4201 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4202 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4203 int ret;
4204
4205 /* Can't disconnect eDP, but you can close the lid... */
4206 if (is_edp(intel_dp)) {
4207 enum drm_connector_status status;
4208
4209 status = intel_panel_detect(dev);
4210 if (status == connector_status_unknown)
4211 status = connector_status_connected;
4212 return status;
4213 }
4214
4215 ret = g4x_digital_port_connected(dev, intel_dig_port);
4216 if (ret == -EINVAL)
4217 return connector_status_unknown;
4218 else if (ret == 0)
a4fc5ed6
KP
4219 return connector_status_disconnected;
4220
26d61aad 4221 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4222}
4223
8c241fef 4224static struct edid *
beb60608 4225intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4226{
beb60608 4227 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4228
9cd300e0
JN
4229 /* use cached edid if we have one */
4230 if (intel_connector->edid) {
9cd300e0
JN
4231 /* invalid edid */
4232 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4233 return NULL;
4234
55e9edeb 4235 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4236 } else
4237 return drm_get_edid(&intel_connector->base,
4238 &intel_dp->aux.ddc);
4239}
8c241fef 4240
beb60608
CW
4241static void
4242intel_dp_set_edid(struct intel_dp *intel_dp)
4243{
4244 struct intel_connector *intel_connector = intel_dp->attached_connector;
4245 struct edid *edid;
8c241fef 4246
beb60608
CW
4247 edid = intel_dp_get_edid(intel_dp);
4248 intel_connector->detect_edid = edid;
4249
4250 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4251 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4252 else
4253 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4254}
4255
beb60608
CW
4256static void
4257intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4258{
beb60608 4259 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4260
beb60608
CW
4261 kfree(intel_connector->detect_edid);
4262 intel_connector->detect_edid = NULL;
9cd300e0 4263
beb60608
CW
4264 intel_dp->has_audio = false;
4265}
d6f24d0f 4266
beb60608
CW
4267static enum intel_display_power_domain
4268intel_dp_power_get(struct intel_dp *dp)
4269{
4270 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4271 enum intel_display_power_domain power_domain;
4272
4273 power_domain = intel_display_port_power_domain(encoder);
4274 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4275
4276 return power_domain;
4277}
d6f24d0f 4278
beb60608
CW
4279static void
4280intel_dp_power_put(struct intel_dp *dp,
4281 enum intel_display_power_domain power_domain)
4282{
4283 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4284 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4285}
4286
a9756bb5
ZW
4287static enum drm_connector_status
4288intel_dp_detect(struct drm_connector *connector, bool force)
4289{
4290 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4291 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4292 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4293 struct drm_device *dev = connector->dev;
a9756bb5 4294 enum drm_connector_status status;
671dedd2 4295 enum intel_display_power_domain power_domain;
0e32b39c 4296 bool ret;
a9756bb5 4297
164c8598 4298 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4299 connector->base.id, connector->name);
beb60608 4300 intel_dp_unset_edid(intel_dp);
164c8598 4301
0e32b39c
DA
4302 if (intel_dp->is_mst) {
4303 /* MST devices are disconnected from a monitor POV */
4304 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4305 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4306 return connector_status_disconnected;
0e32b39c
DA
4307 }
4308
beb60608 4309 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4310
d410b56d
CW
4311 /* Can't disconnect eDP, but you can close the lid... */
4312 if (is_edp(intel_dp))
4313 status = edp_detect(intel_dp);
4314 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4315 status = ironlake_dp_detect(intel_dp);
4316 else
4317 status = g4x_dp_detect(intel_dp);
4318 if (status != connector_status_connected)
c8c8fb33 4319 goto out;
a9756bb5 4320
0d198328
AJ
4321 intel_dp_probe_oui(intel_dp);
4322
0e32b39c
DA
4323 ret = intel_dp_probe_mst(intel_dp);
4324 if (ret) {
4325 /* if we are in MST mode then this connector
4326 won't appear connected or have anything with EDID on it */
4327 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4328 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4329 status = connector_status_disconnected;
4330 goto out;
4331 }
4332
beb60608 4333 intel_dp_set_edid(intel_dp);
a9756bb5 4334
d63885da
PZ
4335 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4336 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4337 status = connector_status_connected;
4338
4339out:
beb60608 4340 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4341 return status;
a4fc5ed6
KP
4342}
4343
beb60608
CW
4344static void
4345intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4346{
df0e9248 4347 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4348 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4349 enum intel_display_power_domain power_domain;
a4fc5ed6 4350
beb60608
CW
4351 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4352 connector->base.id, connector->name);
4353 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4354
beb60608
CW
4355 if (connector->status != connector_status_connected)
4356 return;
671dedd2 4357
beb60608
CW
4358 power_domain = intel_dp_power_get(intel_dp);
4359
4360 intel_dp_set_edid(intel_dp);
4361
4362 intel_dp_power_put(intel_dp, power_domain);
4363
4364 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4365 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4366}
4367
4368static int intel_dp_get_modes(struct drm_connector *connector)
4369{
4370 struct intel_connector *intel_connector = to_intel_connector(connector);
4371 struct edid *edid;
4372
4373 edid = intel_connector->detect_edid;
4374 if (edid) {
4375 int ret = intel_connector_update_modes(connector, edid);
4376 if (ret)
4377 return ret;
4378 }
32f9d658 4379
f8779fda 4380 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4381 if (is_edp(intel_attached_dp(connector)) &&
4382 intel_connector->panel.fixed_mode) {
f8779fda 4383 struct drm_display_mode *mode;
beb60608
CW
4384
4385 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4386 intel_connector->panel.fixed_mode);
f8779fda 4387 if (mode) {
32f9d658
ZW
4388 drm_mode_probed_add(connector, mode);
4389 return 1;
4390 }
4391 }
beb60608 4392
32f9d658 4393 return 0;
a4fc5ed6
KP
4394}
4395
1aad7ac0
CW
4396static bool
4397intel_dp_detect_audio(struct drm_connector *connector)
4398{
1aad7ac0 4399 bool has_audio = false;
beb60608 4400 struct edid *edid;
1aad7ac0 4401
beb60608
CW
4402 edid = to_intel_connector(connector)->detect_edid;
4403 if (edid)
1aad7ac0 4404 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4405
1aad7ac0
CW
4406 return has_audio;
4407}
4408
f684960e
CW
4409static int
4410intel_dp_set_property(struct drm_connector *connector,
4411 struct drm_property *property,
4412 uint64_t val)
4413{
e953fd7b 4414 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4415 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4416 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4417 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4418 int ret;
4419
662595df 4420 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4421 if (ret)
4422 return ret;
4423
3f43c48d 4424 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4425 int i = val;
4426 bool has_audio;
4427
4428 if (i == intel_dp->force_audio)
f684960e
CW
4429 return 0;
4430
1aad7ac0 4431 intel_dp->force_audio = i;
f684960e 4432
c3e5f67b 4433 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4434 has_audio = intel_dp_detect_audio(connector);
4435 else
c3e5f67b 4436 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4437
4438 if (has_audio == intel_dp->has_audio)
f684960e
CW
4439 return 0;
4440
1aad7ac0 4441 intel_dp->has_audio = has_audio;
f684960e
CW
4442 goto done;
4443 }
4444
e953fd7b 4445 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
4446 bool old_auto = intel_dp->color_range_auto;
4447 uint32_t old_range = intel_dp->color_range;
4448
55bc60db
VS
4449 switch (val) {
4450 case INTEL_BROADCAST_RGB_AUTO:
4451 intel_dp->color_range_auto = true;
4452 break;
4453 case INTEL_BROADCAST_RGB_FULL:
4454 intel_dp->color_range_auto = false;
4455 intel_dp->color_range = 0;
4456 break;
4457 case INTEL_BROADCAST_RGB_LIMITED:
4458 intel_dp->color_range_auto = false;
4459 intel_dp->color_range = DP_COLOR_RANGE_16_235;
4460 break;
4461 default:
4462 return -EINVAL;
4463 }
ae4edb80
DV
4464
4465 if (old_auto == intel_dp->color_range_auto &&
4466 old_range == intel_dp->color_range)
4467 return 0;
4468
e953fd7b
CW
4469 goto done;
4470 }
4471
53b41837
YN
4472 if (is_edp(intel_dp) &&
4473 property == connector->dev->mode_config.scaling_mode_property) {
4474 if (val == DRM_MODE_SCALE_NONE) {
4475 DRM_DEBUG_KMS("no scaling not supported\n");
4476 return -EINVAL;
4477 }
4478
4479 if (intel_connector->panel.fitting_mode == val) {
4480 /* the eDP scaling property is not changed */
4481 return 0;
4482 }
4483 intel_connector->panel.fitting_mode = val;
4484
4485 goto done;
4486 }
4487
f684960e
CW
4488 return -EINVAL;
4489
4490done:
c0c36b94
CW
4491 if (intel_encoder->base.crtc)
4492 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4493
4494 return 0;
4495}
4496
a4fc5ed6 4497static void
73845adf 4498intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4499{
1d508706 4500 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4501
10e972d3 4502 kfree(intel_connector->detect_edid);
beb60608 4503
9cd300e0
JN
4504 if (!IS_ERR_OR_NULL(intel_connector->edid))
4505 kfree(intel_connector->edid);
4506
acd8db10
PZ
4507 /* Can't call is_edp() since the encoder may have been destroyed
4508 * already. */
4509 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4510 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4511
a4fc5ed6 4512 drm_connector_cleanup(connector);
55f78c43 4513 kfree(connector);
a4fc5ed6
KP
4514}
4515
00c09d70 4516void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4517{
da63a9f2
PZ
4518 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4519 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4520
4f71d0cb 4521 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 4522 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4523 if (is_edp(intel_dp)) {
4524 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4525 /*
4526 * vdd might still be enabled do to the delayed vdd off.
4527 * Make sure vdd is actually turned off here.
4528 */
773538e8 4529 pps_lock(intel_dp);
4be73780 4530 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4531 pps_unlock(intel_dp);
4532
01527b31
CT
4533 if (intel_dp->edp_notifier.notifier_call) {
4534 unregister_reboot_notifier(&intel_dp->edp_notifier);
4535 intel_dp->edp_notifier.notifier_call = NULL;
4536 }
bd943159 4537 }
c8bd0e49 4538 drm_encoder_cleanup(encoder);
da63a9f2 4539 kfree(intel_dig_port);
24d05927
DV
4540}
4541
07f9cd0b
ID
4542static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4543{
4544 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4545
4546 if (!is_edp(intel_dp))
4547 return;
4548
951468f3
VS
4549 /*
4550 * vdd might still be enabled do to the delayed vdd off.
4551 * Make sure vdd is actually turned off here.
4552 */
afa4e53a 4553 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4554 pps_lock(intel_dp);
07f9cd0b 4555 edp_panel_vdd_off_sync(intel_dp);
773538e8 4556 pps_unlock(intel_dp);
07f9cd0b
ID
4557}
4558
49e6bc51
VS
4559static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4560{
4561 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4562 struct drm_device *dev = intel_dig_port->base.base.dev;
4563 struct drm_i915_private *dev_priv = dev->dev_private;
4564 enum intel_display_power_domain power_domain;
4565
4566 lockdep_assert_held(&dev_priv->pps_mutex);
4567
4568 if (!edp_have_panel_vdd(intel_dp))
4569 return;
4570
4571 /*
4572 * The VDD bit needs a power domain reference, so if the bit is
4573 * already enabled when we boot or resume, grab this reference and
4574 * schedule a vdd off, so we don't hold on to the reference
4575 * indefinitely.
4576 */
4577 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4578 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4579 intel_display_power_get(dev_priv, power_domain);
4580
4581 edp_panel_vdd_schedule_off(intel_dp);
4582}
4583
6d93c0c4
ID
4584static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4585{
49e6bc51
VS
4586 struct intel_dp *intel_dp;
4587
4588 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4589 return;
4590
4591 intel_dp = enc_to_intel_dp(encoder);
4592
4593 pps_lock(intel_dp);
4594
4595 /*
4596 * Read out the current power sequencer assignment,
4597 * in case the BIOS did something with it.
4598 */
4599 if (IS_VALLEYVIEW(encoder->dev))
4600 vlv_initial_power_sequencer_setup(intel_dp);
4601
4602 intel_edp_panel_vdd_sanitize(intel_dp);
4603
4604 pps_unlock(intel_dp);
6d93c0c4
ID
4605}
4606
a4fc5ed6 4607static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 4608 .dpms = intel_connector_dpms,
a4fc5ed6 4609 .detect = intel_dp_detect,
beb60608 4610 .force = intel_dp_force,
a4fc5ed6 4611 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4612 .set_property = intel_dp_set_property,
2545e4a6 4613 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4614 .destroy = intel_dp_connector_destroy,
c6f95f27 4615 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4616 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4617};
4618
4619static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4620 .get_modes = intel_dp_get_modes,
4621 .mode_valid = intel_dp_mode_valid,
df0e9248 4622 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4623};
4624
a4fc5ed6 4625static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4626 .reset = intel_dp_encoder_reset,
24d05927 4627 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4628};
4629
0e32b39c 4630void
21d40d37 4631intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 4632{
0e32b39c 4633 return;
c8110e52 4634}
6207937d 4635
b2c5c181 4636enum irqreturn
13cf5504
DA
4637intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4638{
4639 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4640 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4641 struct drm_device *dev = intel_dig_port->base.base.dev;
4642 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4643 enum intel_display_power_domain power_domain;
b2c5c181 4644 enum irqreturn ret = IRQ_NONE;
1c767b33 4645
0e32b39c
DA
4646 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4647 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4648
7a7f84cc
VS
4649 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4650 /*
4651 * vdd off can generate a long pulse on eDP which
4652 * would require vdd on to handle it, and thus we
4653 * would end up in an endless cycle of
4654 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4655 */
4656 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4657 port_name(intel_dig_port->port));
a8b3d52f 4658 return IRQ_HANDLED;
7a7f84cc
VS
4659 }
4660
26fbb774
VS
4661 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4662 port_name(intel_dig_port->port),
0e32b39c 4663 long_hpd ? "long" : "short");
13cf5504 4664
1c767b33
ID
4665 power_domain = intel_display_port_power_domain(intel_encoder);
4666 intel_display_power_get(dev_priv, power_domain);
4667
0e32b39c 4668 if (long_hpd) {
2a592bec
DA
4669
4670 if (HAS_PCH_SPLIT(dev)) {
4671 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
4672 goto mst_fail;
4673 } else {
4674 if (g4x_digital_port_connected(dev, intel_dig_port) != 1)
4675 goto mst_fail;
4676 }
0e32b39c
DA
4677
4678 if (!intel_dp_get_dpcd(intel_dp)) {
4679 goto mst_fail;
4680 }
4681
4682 intel_dp_probe_oui(intel_dp);
4683
4684 if (!intel_dp_probe_mst(intel_dp))
4685 goto mst_fail;
4686
4687 } else {
4688 if (intel_dp->is_mst) {
1c767b33 4689 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
4690 goto mst_fail;
4691 }
4692
4693 if (!intel_dp->is_mst) {
4694 /*
4695 * we'll check the link status via the normal hot plug path later -
4696 * but for short hpds we should check it now
4697 */
5b215bcf 4698 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 4699 intel_dp_check_link_status(intel_dp);
5b215bcf 4700 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
4701 }
4702 }
b2c5c181
DV
4703
4704 ret = IRQ_HANDLED;
4705
1c767b33 4706 goto put_power;
0e32b39c
DA
4707mst_fail:
4708 /* if we were in MST mode, and device is not there get out of MST mode */
4709 if (intel_dp->is_mst) {
4710 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
4711 intel_dp->is_mst = false;
4712 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4713 }
1c767b33
ID
4714put_power:
4715 intel_display_power_put(dev_priv, power_domain);
4716
4717 return ret;
13cf5504
DA
4718}
4719
e3421a18
ZW
4720/* Return which DP Port should be selected for Transcoder DP control */
4721int
0206e353 4722intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
4723{
4724 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
4725 struct intel_encoder *intel_encoder;
4726 struct intel_dp *intel_dp;
e3421a18 4727
fa90ecef
PZ
4728 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4729 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 4730
fa90ecef
PZ
4731 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4732 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 4733 return intel_dp->output_reg;
e3421a18 4734 }
ea5b213a 4735
e3421a18
ZW
4736 return -1;
4737}
4738
36e83a18 4739/* check the VBT to see whether the eDP is on DP-D port */
5d8a7752 4740bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
4741{
4742 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 4743 union child_device_config *p_child;
36e83a18 4744 int i;
5d8a7752
VS
4745 static const short port_mapping[] = {
4746 [PORT_B] = PORT_IDPB,
4747 [PORT_C] = PORT_IDPC,
4748 [PORT_D] = PORT_IDPD,
4749 };
36e83a18 4750
3b32a35b
VS
4751 if (port == PORT_A)
4752 return true;
4753
41aa3448 4754 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
4755 return false;
4756
41aa3448
RV
4757 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
4758 p_child = dev_priv->vbt.child_dev + i;
36e83a18 4759
5d8a7752 4760 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
4761 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
4762 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
4763 return true;
4764 }
4765 return false;
4766}
4767
0e32b39c 4768void
f684960e
CW
4769intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
4770{
53b41837
YN
4771 struct intel_connector *intel_connector = to_intel_connector(connector);
4772
3f43c48d 4773 intel_attach_force_audio_property(connector);
e953fd7b 4774 intel_attach_broadcast_rgb_property(connector);
55bc60db 4775 intel_dp->color_range_auto = true;
53b41837
YN
4776
4777 if (is_edp(intel_dp)) {
4778 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
4779 drm_object_attach_property(
4780 &connector->base,
53b41837 4781 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
4782 DRM_MODE_SCALE_ASPECT);
4783 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 4784 }
f684960e
CW
4785}
4786
dada1a9f
ID
4787static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4788{
4789 intel_dp->last_power_cycle = jiffies;
4790 intel_dp->last_power_on = jiffies;
4791 intel_dp->last_backlight_off = jiffies;
4792}
4793
67a54566
DV
4794static void
4795intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 4796 struct intel_dp *intel_dp)
67a54566
DV
4797{
4798 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
4799 struct edp_power_seq cur, vbt, spec,
4800 *final = &intel_dp->pps_delays;
67a54566 4801 u32 pp_on, pp_off, pp_div, pp;
bf13e81b 4802 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 4803
e39b999a
VS
4804 lockdep_assert_held(&dev_priv->pps_mutex);
4805
81ddbc69
VS
4806 /* already initialized? */
4807 if (final->t11_t12 != 0)
4808 return;
4809
453c5420 4810 if (HAS_PCH_SPLIT(dev)) {
bf13e81b 4811 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
4812 pp_on_reg = PCH_PP_ON_DELAYS;
4813 pp_off_reg = PCH_PP_OFF_DELAYS;
4814 pp_div_reg = PCH_PP_DIVISOR;
4815 } else {
bf13e81b
JN
4816 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4817
4818 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
4819 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4820 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4821 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 4822 }
67a54566
DV
4823
4824 /* Workaround: Need to write PP_CONTROL with the unlock key as
4825 * the very first thing. */
453c5420 4826 pp = ironlake_get_pp_control(intel_dp);
bf13e81b 4827 I915_WRITE(pp_ctrl_reg, pp);
67a54566 4828
453c5420
JB
4829 pp_on = I915_READ(pp_on_reg);
4830 pp_off = I915_READ(pp_off_reg);
4831 pp_div = I915_READ(pp_div_reg);
67a54566
DV
4832
4833 /* Pull timing values out of registers */
4834 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
4835 PANEL_POWER_UP_DELAY_SHIFT;
4836
4837 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
4838 PANEL_LIGHT_ON_DELAY_SHIFT;
4839
4840 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
4841 PANEL_LIGHT_OFF_DELAY_SHIFT;
4842
4843 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
4844 PANEL_POWER_DOWN_DELAY_SHIFT;
4845
4846 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
4847 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
4848
4849 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4850 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
4851
41aa3448 4852 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
4853
4854 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
4855 * our hw here, which are all in 100usec. */
4856 spec.t1_t3 = 210 * 10;
4857 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
4858 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
4859 spec.t10 = 500 * 10;
4860 /* This one is special and actually in units of 100ms, but zero
4861 * based in the hw (so we need to add 100 ms). But the sw vbt
4862 * table multiplies it with 1000 to make it in units of 100usec,
4863 * too. */
4864 spec.t11_t12 = (510 + 100) * 10;
4865
4866 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4867 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
4868
4869 /* Use the max of the register settings and vbt. If both are
4870 * unset, fall back to the spec limits. */
36b5f425 4871#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
4872 spec.field : \
4873 max(cur.field, vbt.field))
4874 assign_final(t1_t3);
4875 assign_final(t8);
4876 assign_final(t9);
4877 assign_final(t10);
4878 assign_final(t11_t12);
4879#undef assign_final
4880
36b5f425 4881#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
4882 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4883 intel_dp->backlight_on_delay = get_delay(t8);
4884 intel_dp->backlight_off_delay = get_delay(t9);
4885 intel_dp->panel_power_down_delay = get_delay(t10);
4886 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
4887#undef get_delay
4888
f30d26e4
JN
4889 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
4890 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
4891 intel_dp->panel_power_cycle_delay);
4892
4893 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4894 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
4895}
4896
4897static void
4898intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 4899 struct intel_dp *intel_dp)
f30d26e4
JN
4900{
4901 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
4902 u32 pp_on, pp_off, pp_div, port_sel = 0;
4903 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4904 int pp_on_reg, pp_off_reg, pp_div_reg;
ad933b56 4905 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 4906 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 4907
e39b999a 4908 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420
JB
4909
4910 if (HAS_PCH_SPLIT(dev)) {
4911 pp_on_reg = PCH_PP_ON_DELAYS;
4912 pp_off_reg = PCH_PP_OFF_DELAYS;
4913 pp_div_reg = PCH_PP_DIVISOR;
4914 } else {
bf13e81b
JN
4915 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
4916
4917 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
4918 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
4919 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
4920 }
4921
b2f19d1a
PZ
4922 /*
4923 * And finally store the new values in the power sequencer. The
4924 * backlight delays are set to 1 because we do manual waits on them. For
4925 * T8, even BSpec recommends doing it. For T9, if we don't do this,
4926 * we'll end up waiting for the backlight off delay twice: once when we
4927 * do the manual sleep, and once when we disable the panel and wait for
4928 * the PP_STATUS bit to become zero.
4929 */
f30d26e4 4930 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
4931 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
4932 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 4933 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
4934 /* Compute the divisor for the pp clock, simply match the Bspec
4935 * formula. */
453c5420 4936 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 4937 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
4938 << PANEL_POWER_CYCLE_DELAY_SHIFT);
4939
4940 /* Haswell doesn't have any port selection bits for the panel
4941 * power sequencer any more. */
bc7d38a4 4942 if (IS_VALLEYVIEW(dev)) {
ad933b56 4943 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 4944 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 4945 if (port == PORT_A)
a24c144c 4946 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 4947 else
a24c144c 4948 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
4949 }
4950
453c5420
JB
4951 pp_on |= port_sel;
4952
4953 I915_WRITE(pp_on_reg, pp_on);
4954 I915_WRITE(pp_off_reg, pp_off);
4955 I915_WRITE(pp_div_reg, pp_div);
67a54566 4956
67a54566 4957 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
4958 I915_READ(pp_on_reg),
4959 I915_READ(pp_off_reg),
4960 I915_READ(pp_div_reg));
f684960e
CW
4961}
4962
b33a2815
VK
4963/**
4964 * intel_dp_set_drrs_state - program registers for RR switch to take effect
4965 * @dev: DRM device
4966 * @refresh_rate: RR to be programmed
4967 *
4968 * This function gets called when refresh rate (RR) has to be changed from
4969 * one frequency to another. Switches can be between high and low RR
4970 * supported by the panel or to any other RR based on media playback (in
4971 * this case, RR value needs to be passed from user space).
4972 *
4973 * The caller of this function needs to take a lock on dev_priv->drrs.
4974 */
96178eeb 4975static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
4976{
4977 struct drm_i915_private *dev_priv = dev->dev_private;
4978 struct intel_encoder *encoder;
96178eeb
VK
4979 struct intel_digital_port *dig_port = NULL;
4980 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 4981 struct intel_crtc_state *config = NULL;
439d7ac0 4982 struct intel_crtc *intel_crtc = NULL;
439d7ac0 4983 u32 reg, val;
96178eeb 4984 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
4985
4986 if (refresh_rate <= 0) {
4987 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
4988 return;
4989 }
4990
96178eeb
VK
4991 if (intel_dp == NULL) {
4992 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
4993 return;
4994 }
4995
1fcc9d1c 4996 /*
e4d59f6b
RV
4997 * FIXME: This needs proper synchronization with psr state for some
4998 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 4999 */
439d7ac0 5000
96178eeb
VK
5001 dig_port = dp_to_dig_port(intel_dp);
5002 encoder = &dig_port->base;
723f9aab 5003 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5004
5005 if (!intel_crtc) {
5006 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5007 return;
5008 }
5009
6e3c9717 5010 config = intel_crtc->config;
439d7ac0 5011
96178eeb 5012 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5013 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5014 return;
5015 }
5016
96178eeb
VK
5017 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5018 refresh_rate)
439d7ac0
PB
5019 index = DRRS_LOW_RR;
5020
96178eeb 5021 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5022 DRM_DEBUG_KMS(
5023 "DRRS requested for previously set RR...ignoring\n");
5024 return;
5025 }
5026
5027 if (!intel_crtc->active) {
5028 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5029 return;
5030 }
5031
44395bfe 5032 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5033 switch (index) {
5034 case DRRS_HIGH_RR:
5035 intel_dp_set_m_n(intel_crtc, M1_N1);
5036 break;
5037 case DRRS_LOW_RR:
5038 intel_dp_set_m_n(intel_crtc, M2_N2);
5039 break;
5040 case DRRS_MAX_RR:
5041 default:
5042 DRM_ERROR("Unsupported refreshrate type\n");
5043 }
5044 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 5045 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 5046 val = I915_READ(reg);
a4c30b1d 5047
439d7ac0 5048 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5049 if (IS_VALLEYVIEW(dev))
5050 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5051 else
5052 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5053 } else {
6fa7aec1
VK
5054 if (IS_VALLEYVIEW(dev))
5055 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5056 else
5057 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5058 }
5059 I915_WRITE(reg, val);
5060 }
5061
4e9ac947
VK
5062 dev_priv->drrs.refresh_rate_type = index;
5063
5064 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5065}
5066
b33a2815
VK
5067/**
5068 * intel_edp_drrs_enable - init drrs struct if supported
5069 * @intel_dp: DP struct
5070 *
5071 * Initializes frontbuffer_bits and drrs.dp
5072 */
c395578e
VK
5073void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5074{
5075 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5076 struct drm_i915_private *dev_priv = dev->dev_private;
5077 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5078 struct drm_crtc *crtc = dig_port->base.base.crtc;
5079 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5080
5081 if (!intel_crtc->config->has_drrs) {
5082 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5083 return;
5084 }
5085
5086 mutex_lock(&dev_priv->drrs.mutex);
5087 if (WARN_ON(dev_priv->drrs.dp)) {
5088 DRM_ERROR("DRRS already enabled\n");
5089 goto unlock;
5090 }
5091
5092 dev_priv->drrs.busy_frontbuffer_bits = 0;
5093
5094 dev_priv->drrs.dp = intel_dp;
5095
5096unlock:
5097 mutex_unlock(&dev_priv->drrs.mutex);
5098}
5099
b33a2815
VK
5100/**
5101 * intel_edp_drrs_disable - Disable DRRS
5102 * @intel_dp: DP struct
5103 *
5104 */
c395578e
VK
5105void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5106{
5107 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5108 struct drm_i915_private *dev_priv = dev->dev_private;
5109 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5110 struct drm_crtc *crtc = dig_port->base.base.crtc;
5111 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5112
5113 if (!intel_crtc->config->has_drrs)
5114 return;
5115
5116 mutex_lock(&dev_priv->drrs.mutex);
5117 if (!dev_priv->drrs.dp) {
5118 mutex_unlock(&dev_priv->drrs.mutex);
5119 return;
5120 }
5121
5122 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5123 intel_dp_set_drrs_state(dev_priv->dev,
5124 intel_dp->attached_connector->panel.
5125 fixed_mode->vrefresh);
5126
5127 dev_priv->drrs.dp = NULL;
5128 mutex_unlock(&dev_priv->drrs.mutex);
5129
5130 cancel_delayed_work_sync(&dev_priv->drrs.work);
5131}
5132
4e9ac947
VK
5133static void intel_edp_drrs_downclock_work(struct work_struct *work)
5134{
5135 struct drm_i915_private *dev_priv =
5136 container_of(work, typeof(*dev_priv), drrs.work.work);
5137 struct intel_dp *intel_dp;
5138
5139 mutex_lock(&dev_priv->drrs.mutex);
5140
5141 intel_dp = dev_priv->drrs.dp;
5142
5143 if (!intel_dp)
5144 goto unlock;
5145
439d7ac0 5146 /*
4e9ac947
VK
5147 * The delayed work can race with an invalidate hence we need to
5148 * recheck.
439d7ac0
PB
5149 */
5150
4e9ac947
VK
5151 if (dev_priv->drrs.busy_frontbuffer_bits)
5152 goto unlock;
439d7ac0 5153
4e9ac947
VK
5154 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5155 intel_dp_set_drrs_state(dev_priv->dev,
5156 intel_dp->attached_connector->panel.
5157 downclock_mode->vrefresh);
439d7ac0 5158
4e9ac947 5159unlock:
4e9ac947 5160 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5161}
5162
b33a2815
VK
5163/**
5164 * intel_edp_drrs_invalidate - Invalidate DRRS
5165 * @dev: DRM device
5166 * @frontbuffer_bits: frontbuffer plane tracking bits
5167 *
5168 * When there is a disturbance on screen (due to cursor movement/time
5169 * update etc), DRRS needs to be invalidated, i.e. need to switch to
5170 * high RR.
5171 *
5172 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5173 */
a93fad0f
VK
5174void intel_edp_drrs_invalidate(struct drm_device *dev,
5175 unsigned frontbuffer_bits)
5176{
5177 struct drm_i915_private *dev_priv = dev->dev_private;
5178 struct drm_crtc *crtc;
5179 enum pipe pipe;
5180
9da7d693 5181 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5182 return;
5183
3954e733
R
5184 cancel_delayed_work_sync(&dev_priv->drrs.work);
5185
a93fad0f 5186 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5187 if (!dev_priv->drrs.dp) {
5188 mutex_unlock(&dev_priv->drrs.mutex);
5189 return;
5190 }
5191
a93fad0f
VK
5192 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5193 pipe = to_intel_crtc(crtc)->pipe;
5194
5195 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
a93fad0f
VK
5196 intel_dp_set_drrs_state(dev_priv->dev,
5197 dev_priv->drrs.dp->attached_connector->panel.
5198 fixed_mode->vrefresh);
5199 }
5200
5201 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5202
5203 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5204 mutex_unlock(&dev_priv->drrs.mutex);
5205}
5206
b33a2815
VK
5207/**
5208 * intel_edp_drrs_flush - Flush DRRS
5209 * @dev: DRM device
5210 * @frontbuffer_bits: frontbuffer plane tracking bits
5211 *
5212 * When there is no movement on screen, DRRS work can be scheduled.
5213 * This DRRS work is responsible for setting relevant registers after a
5214 * timeout of 1 second.
5215 *
5216 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5217 */
a93fad0f
VK
5218void intel_edp_drrs_flush(struct drm_device *dev,
5219 unsigned frontbuffer_bits)
5220{
5221 struct drm_i915_private *dev_priv = dev->dev_private;
5222 struct drm_crtc *crtc;
5223 enum pipe pipe;
5224
9da7d693 5225 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5226 return;
5227
3954e733
R
5228 cancel_delayed_work_sync(&dev_priv->drrs.work);
5229
a93fad0f 5230 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5231 if (!dev_priv->drrs.dp) {
5232 mutex_unlock(&dev_priv->drrs.mutex);
5233 return;
5234 }
5235
a93fad0f
VK
5236 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5237 pipe = to_intel_crtc(crtc)->pipe;
5238 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5239
a93fad0f
VK
5240 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR &&
5241 !dev_priv->drrs.busy_frontbuffer_bits)
5242 schedule_delayed_work(&dev_priv->drrs.work,
5243 msecs_to_jiffies(1000));
5244 mutex_unlock(&dev_priv->drrs.mutex);
5245}
5246
b33a2815
VK
5247/**
5248 * DOC: Display Refresh Rate Switching (DRRS)
5249 *
5250 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5251 * which enables swtching between low and high refresh rates,
5252 * dynamically, based on the usage scenario. This feature is applicable
5253 * for internal panels.
5254 *
5255 * Indication that the panel supports DRRS is given by the panel EDID, which
5256 * would list multiple refresh rates for one resolution.
5257 *
5258 * DRRS is of 2 types - static and seamless.
5259 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5260 * (may appear as a blink on screen) and is used in dock-undock scenario.
5261 * Seamless DRRS involves changing RR without any visual effect to the user
5262 * and can be used during normal system usage. This is done by programming
5263 * certain registers.
5264 *
5265 * Support for static/seamless DRRS may be indicated in the VBT based on
5266 * inputs from the panel spec.
5267 *
5268 * DRRS saves power by switching to low RR based on usage scenarios.
5269 *
5270 * eDP DRRS:-
5271 * The implementation is based on frontbuffer tracking implementation.
5272 * When there is a disturbance on the screen triggered by user activity or a
5273 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5274 * When there is no movement on screen, after a timeout of 1 second, a switch
5275 * to low RR is made.
5276 * For integration with frontbuffer tracking code,
5277 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5278 *
5279 * DRRS can be further extended to support other internal panels and also
5280 * the scenario of video playback wherein RR is set based on the rate
5281 * requested by userspace.
5282 */
5283
5284/**
5285 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5286 * @intel_connector: eDP connector
5287 * @fixed_mode: preferred mode of panel
5288 *
5289 * This function is called only once at driver load to initialize basic
5290 * DRRS stuff.
5291 *
5292 * Returns:
5293 * Downclock mode if panel supports it, else return NULL.
5294 * DRRS support is determined by the presence of downclock mode (apart
5295 * from VBT setting).
5296 */
4f9db5b5 5297static struct drm_display_mode *
96178eeb
VK
5298intel_dp_drrs_init(struct intel_connector *intel_connector,
5299 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5300{
5301 struct drm_connector *connector = &intel_connector->base;
96178eeb 5302 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5303 struct drm_i915_private *dev_priv = dev->dev_private;
5304 struct drm_display_mode *downclock_mode = NULL;
5305
9da7d693
DV
5306 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5307 mutex_init(&dev_priv->drrs.mutex);
5308
4f9db5b5
PB
5309 if (INTEL_INFO(dev)->gen <= 6) {
5310 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5311 return NULL;
5312 }
5313
5314 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5315 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5316 return NULL;
5317 }
5318
5319 downclock_mode = intel_find_panel_downclock
5320 (dev, fixed_mode, connector);
5321
5322 if (!downclock_mode) {
a1d26342 5323 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5324 return NULL;
5325 }
5326
96178eeb 5327 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5328
96178eeb 5329 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5330 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5331 return downclock_mode;
5332}
5333
ed92f0b2 5334static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5335 struct intel_connector *intel_connector)
ed92f0b2
PZ
5336{
5337 struct drm_connector *connector = &intel_connector->base;
5338 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5339 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5340 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5341 struct drm_i915_private *dev_priv = dev->dev_private;
5342 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5343 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5344 bool has_dpcd;
5345 struct drm_display_mode *scan;
5346 struct edid *edid;
6517d273 5347 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5348
5349 if (!is_edp(intel_dp))
5350 return true;
5351
49e6bc51
VS
5352 pps_lock(intel_dp);
5353 intel_edp_panel_vdd_sanitize(intel_dp);
5354 pps_unlock(intel_dp);
63635217 5355
ed92f0b2 5356 /* Cache DPCD and EDID for edp. */
ed92f0b2 5357 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5358
5359 if (has_dpcd) {
5360 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5361 dev_priv->no_aux_handshake =
5362 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5363 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5364 } else {
5365 /* if this fails, presume the device is a ghost */
5366 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5367 return false;
5368 }
5369
5370 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5371 pps_lock(intel_dp);
36b5f425 5372 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5373 pps_unlock(intel_dp);
ed92f0b2 5374
060c8778 5375 mutex_lock(&dev->mode_config.mutex);
0b99836f 5376 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5377 if (edid) {
5378 if (drm_add_edid_modes(connector, edid)) {
5379 drm_mode_connector_update_edid_property(connector,
5380 edid);
5381 drm_edid_to_eld(connector, edid);
5382 } else {
5383 kfree(edid);
5384 edid = ERR_PTR(-EINVAL);
5385 }
5386 } else {
5387 edid = ERR_PTR(-ENOENT);
5388 }
5389 intel_connector->edid = edid;
5390
5391 /* prefer fixed mode from EDID if available */
5392 list_for_each_entry(scan, &connector->probed_modes, head) {
5393 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5394 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5395 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5396 intel_connector, fixed_mode);
ed92f0b2
PZ
5397 break;
5398 }
5399 }
5400
5401 /* fallback to VBT if available for eDP */
5402 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5403 fixed_mode = drm_mode_duplicate(dev,
5404 dev_priv->vbt.lfp_lvds_vbt_mode);
5405 if (fixed_mode)
5406 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5407 }
060c8778 5408 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5409
01527b31
CT
5410 if (IS_VALLEYVIEW(dev)) {
5411 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5412 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5413
5414 /*
5415 * Figure out the current pipe for the initial backlight setup.
5416 * If the current pipe isn't valid, try the PPS pipe, and if that
5417 * fails just assume pipe A.
5418 */
5419 if (IS_CHERRYVIEW(dev))
5420 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5421 else
5422 pipe = PORT_TO_PIPE(intel_dp->DP);
5423
5424 if (pipe != PIPE_A && pipe != PIPE_B)
5425 pipe = intel_dp->pps_pipe;
5426
5427 if (pipe != PIPE_A && pipe != PIPE_B)
5428 pipe = PIPE_A;
5429
5430 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5431 pipe_name(pipe));
01527b31
CT
5432 }
5433
4f9db5b5 5434 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5435 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5436 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5437
5438 return true;
5439}
5440
16c25533 5441bool
f0fec3f2
PZ
5442intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5443 struct intel_connector *intel_connector)
a4fc5ed6 5444{
f0fec3f2
PZ
5445 struct drm_connector *connector = &intel_connector->base;
5446 struct intel_dp *intel_dp = &intel_dig_port->dp;
5447 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5448 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5449 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5450 enum port port = intel_dig_port->port;
0b99836f 5451 int type;
a4fc5ed6 5452
a4a5d2f8
VS
5453 intel_dp->pps_pipe = INVALID_PIPE;
5454
ec5b01dd 5455 /* intel_dp vfuncs */
b6b5e383
DL
5456 if (INTEL_INFO(dev)->gen >= 9)
5457 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5458 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
5459 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5460 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5461 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5462 else if (HAS_PCH_SPLIT(dev))
5463 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5464 else
5465 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5466
b9ca5fad
DL
5467 if (INTEL_INFO(dev)->gen >= 9)
5468 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5469 else
5470 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5471
0767935e
DV
5472 /* Preserve the current hw state. */
5473 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5474 intel_dp->attached_connector = intel_connector;
3d3dc149 5475
3b32a35b 5476 if (intel_dp_is_edp(dev, port))
b329530c 5477 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5478 else
5479 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5480
f7d24902
ID
5481 /*
5482 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5483 * for DP the encoder type can be set by the caller to
5484 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5485 */
5486 if (type == DRM_MODE_CONNECTOR_eDP)
5487 intel_encoder->type = INTEL_OUTPUT_EDP;
5488
c17ed5b5
VS
5489 /* eDP only on port B and/or C on vlv/chv */
5490 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5491 port != PORT_B && port != PORT_C))
5492 return false;
5493
e7281eab
ID
5494 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5495 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5496 port_name(port));
5497
b329530c 5498 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5499 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5500
a4fc5ed6
KP
5501 connector->interlace_allowed = true;
5502 connector->doublescan_allowed = 0;
5503
f0fec3f2 5504 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5505 edp_panel_vdd_work);
a4fc5ed6 5506
df0e9248 5507 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5508 drm_connector_register(connector);
a4fc5ed6 5509
affa9354 5510 if (HAS_DDI(dev))
bcbc889b
PZ
5511 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5512 else
5513 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5514 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5515
0b99836f 5516 /* Set up the hotplug pin. */
ab9d7c30
PZ
5517 switch (port) {
5518 case PORT_A:
1d843f9d 5519 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5520 break;
5521 case PORT_B:
1d843f9d 5522 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
5523 break;
5524 case PORT_C:
1d843f9d 5525 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5526 break;
5527 case PORT_D:
1d843f9d 5528 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
5529 break;
5530 default:
ad1c0b19 5531 BUG();
5eb08b69
ZW
5532 }
5533
dada1a9f 5534 if (is_edp(intel_dp)) {
773538e8 5535 pps_lock(intel_dp);
1e74a324
VS
5536 intel_dp_init_panel_power_timestamps(intel_dp);
5537 if (IS_VALLEYVIEW(dev))
a4a5d2f8 5538 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5539 else
36b5f425 5540 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5541 pps_unlock(intel_dp);
dada1a9f 5542 }
0095e6dc 5543
9d1a1031 5544 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 5545
0e32b39c 5546 /* init MST on ports that can support it */
c86ea3d0 5547 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
0e32b39c 5548 if (port == PORT_B || port == PORT_C || port == PORT_D) {
a4a5d2f8
VS
5549 intel_dp_mst_encoder_init(intel_dig_port,
5550 intel_connector->base.base.id);
0e32b39c
DA
5551 }
5552 }
5553
36b5f425 5554 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 5555 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
5556 if (is_edp(intel_dp)) {
5557 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5558 /*
5559 * vdd might still be enabled do to the delayed vdd off.
5560 * Make sure vdd is actually turned off here.
5561 */
773538e8 5562 pps_lock(intel_dp);
4be73780 5563 edp_panel_vdd_off_sync(intel_dp);
773538e8 5564 pps_unlock(intel_dp);
15b1d171 5565 }
34ea3d38 5566 drm_connector_unregister(connector);
b2f246a8 5567 drm_connector_cleanup(connector);
16c25533 5568 return false;
b2f246a8 5569 }
32f9d658 5570
f684960e
CW
5571 intel_dp_add_properties(intel_dp, connector);
5572
a4fc5ed6
KP
5573 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5574 * 0xd. Failure to do so will result in spurious interrupts being
5575 * generated on the port when a cable is not attached.
5576 */
5577 if (IS_G4X(dev) && !IS_GM45(dev)) {
5578 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5579 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5580 }
16c25533
PZ
5581
5582 return true;
a4fc5ed6 5583}
f0fec3f2
PZ
5584
5585void
5586intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
5587{
13cf5504 5588 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5589 struct intel_digital_port *intel_dig_port;
5590 struct intel_encoder *intel_encoder;
5591 struct drm_encoder *encoder;
5592 struct intel_connector *intel_connector;
5593
b14c5679 5594 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
5595 if (!intel_dig_port)
5596 return;
5597
b14c5679 5598 intel_connector = kzalloc(sizeof(*intel_connector), GFP_KERNEL);
f0fec3f2
PZ
5599 if (!intel_connector) {
5600 kfree(intel_dig_port);
5601 return;
5602 }
5603
5604 intel_encoder = &intel_dig_port->base;
5605 encoder = &intel_encoder->base;
5606
5607 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5608 DRM_MODE_ENCODER_TMDS);
5609
5bfe2ac0 5610 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 5611 intel_encoder->disable = intel_disable_dp;
00c09d70 5612 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 5613 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 5614 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 5615 if (IS_CHERRYVIEW(dev)) {
9197c88b 5616 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
5617 intel_encoder->pre_enable = chv_pre_enable_dp;
5618 intel_encoder->enable = vlv_enable_dp;
580d3811 5619 intel_encoder->post_disable = chv_post_disable_dp;
e4a1d846 5620 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 5621 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
5622 intel_encoder->pre_enable = vlv_pre_enable_dp;
5623 intel_encoder->enable = vlv_enable_dp;
49277c31 5624 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 5625 } else {
ecff4f3b
JN
5626 intel_encoder->pre_enable = g4x_pre_enable_dp;
5627 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
5628 if (INTEL_INFO(dev)->gen >= 5)
5629 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 5630 }
f0fec3f2 5631
174edf1f 5632 intel_dig_port->port = port;
f0fec3f2
PZ
5633 intel_dig_port->dp.output_reg = output_reg;
5634
00c09d70 5635 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
5636 if (IS_CHERRYVIEW(dev)) {
5637 if (port == PORT_D)
5638 intel_encoder->crtc_mask = 1 << 2;
5639 else
5640 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
5641 } else {
5642 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
5643 }
bc079e8b 5644 intel_encoder->cloneable = 0;
f0fec3f2
PZ
5645 intel_encoder->hot_plug = intel_dp_hot_plug;
5646
13cf5504
DA
5647 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5648 dev_priv->hpd_irq_port[port] = intel_dig_port;
5649
15b1d171
PZ
5650 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
5651 drm_encoder_cleanup(encoder);
5652 kfree(intel_dig_port);
b2f246a8 5653 kfree(intel_connector);
15b1d171 5654 }
f0fec3f2 5655}
0e32b39c
DA
5656
5657void intel_dp_mst_suspend(struct drm_device *dev)
5658{
5659 struct drm_i915_private *dev_priv = dev->dev_private;
5660 int i;
5661
5662 /* disable MST */
5663 for (i = 0; i < I915_MAX_PORTS; i++) {
5664 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5665 if (!intel_dig_port)
5666 continue;
5667
5668 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5669 if (!intel_dig_port->dp.can_mst)
5670 continue;
5671 if (intel_dig_port->dp.is_mst)
5672 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
5673 }
5674 }
5675}
5676
5677void intel_dp_mst_resume(struct drm_device *dev)
5678{
5679 struct drm_i915_private *dev_priv = dev->dev_private;
5680 int i;
5681
5682 for (i = 0; i < I915_MAX_PORTS; i++) {
5683 struct intel_digital_port *intel_dig_port = dev_priv->hpd_irq_port[i];
5684 if (!intel_dig_port)
5685 continue;
5686 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
5687 int ret;
5688
5689 if (!intel_dig_port->dp.can_mst)
5690 continue;
5691
5692 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
5693 if (ret != 0) {
5694 intel_dp_check_mst_status(&intel_dig_port->dp);
5695 }
5696 }
5697 }
5698}
This page took 0.946842 seconds and 5 git commands to generate.