Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf 50struct dp_link_dpll {
840b32b7 51 int clock;
9dd4ffdf
CML
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
840b32b7 56 { 162000,
9dd4ffdf 57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
840b32b7 58 { 270000,
9dd4ffdf
CML
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
840b32b7 63 { 162000,
9dd4ffdf 64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
840b32b7 65 { 270000,
9dd4ffdf
CML
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5 69static const struct dp_link_dpll vlv_dpll[] = {
840b32b7 70 { 162000,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
840b32b7 72 { 270000,
65ce4bf5
CML
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
840b32b7 86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
ef9348c8 87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
840b32b7 88 { 270000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8 89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
840b32b7 90 { 540000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8
CML
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63 93
64987fc5
SJ
94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
637a9c63 96static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15
VS
97 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 99
cfcb0fc9
JB
100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
da63a9f2
PZ
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
112}
113
68b4d824 114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 115{
68b4d824
ID
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
119}
120
df0e9248
CW
121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
fa90ecef 123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
124}
125
ea5b213a 126static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
a4fc5ed6 132
e0fce78f
VS
133static unsigned int intel_dp_unused_lane_mask(int lane_count)
134{
135 return ~((1 << lane_count) - 1) & 0xf;
136}
137
ed4e9c1d
VS
138static int
139intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 140{
7183dc29 141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
1db10e28 146 case DP_LINK_BW_5_4:
d4eead50 147 break;
a4fc5ed6 148 default:
d4eead50
ID
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
a4fc5ed6
KP
151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155}
156
eeb6324d
PZ
157static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158{
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
eeb6324d
PZ
160 u8 source_max, sink_max;
161
ccb1a831 162 source_max = intel_dig_port->max_lanes;
eeb6324d
PZ
163 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
164
165 return min(source_max, sink_max);
166}
167
cd9dde44
AJ
168/*
169 * The units on the numbers in the next two are... bizarre. Examples will
170 * make it clearer; this one parallels an example in the eDP spec.
171 *
172 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
173 *
174 * 270000 * 1 * 8 / 10 == 216000
175 *
176 * The actual data capacity of that configuration is 2.16Gbit/s, so the
177 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
178 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
179 * 119000. At 18bpp that's 2142000 kilobits per second.
180 *
181 * Thus the strange-looking division by 10 in intel_dp_link_required, to
182 * get the result in decakilobits instead of kilobits.
183 */
184
a4fc5ed6 185static int
c898261c 186intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 187{
cd9dde44 188 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
189}
190
fe27d53e
DA
191static int
192intel_dp_max_data_rate(int max_link_clock, int max_lanes)
193{
194 return (max_link_clock * max_lanes * 8) / 10;
195}
196
c19de8eb 197static enum drm_mode_status
a4fc5ed6
KP
198intel_dp_mode_valid(struct drm_connector *connector,
199 struct drm_display_mode *mode)
200{
df0e9248 201 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
202 struct intel_connector *intel_connector = to_intel_connector(connector);
203 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
204 int target_clock = mode->clock;
205 int max_rate, mode_rate, max_lanes, max_link_clock;
799487f5 206 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
a4fc5ed6 207
dd06f90e
JN
208 if (is_edp(intel_dp) && fixed_mode) {
209 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
210 return MODE_PANEL;
211
dd06f90e 212 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 213 return MODE_PANEL;
03afc4a2
DV
214
215 target_clock = fixed_mode->clock;
7de56f43
ZY
216 }
217
50fec21a 218 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 219 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
220
221 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
222 mode_rate = intel_dp_link_required(target_clock, 18);
223
799487f5 224 if (mode_rate > max_rate || target_clock > max_dotclk)
c4867936 225 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
226
227 if (mode->clock < 10000)
228 return MODE_CLOCK_LOW;
229
0af78a2b
DV
230 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
231 return MODE_H_ILLEGAL;
232
a4fc5ed6
KP
233 return MODE_OK;
234}
235
a4f1289e 236uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
237{
238 int i;
239 uint32_t v = 0;
240
241 if (src_bytes > 4)
242 src_bytes = 4;
243 for (i = 0; i < src_bytes; i++)
244 v |= ((uint32_t) src[i]) << ((3-i) * 8);
245 return v;
246}
247
c2af70e2 248static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
249{
250 int i;
251 if (dst_bytes > 4)
252 dst_bytes = 4;
253 for (i = 0; i < dst_bytes; i++)
254 dst[i] = src >> ((3-i) * 8);
255}
256
bf13e81b
JN
257static void
258intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 259 struct intel_dp *intel_dp);
bf13e81b
JN
260static void
261intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 262 struct intel_dp *intel_dp);
bf13e81b 263
773538e8
VS
264static void pps_lock(struct intel_dp *intel_dp)
265{
266 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
267 struct intel_encoder *encoder = &intel_dig_port->base;
268 struct drm_device *dev = encoder->base.dev;
269 struct drm_i915_private *dev_priv = dev->dev_private;
270 enum intel_display_power_domain power_domain;
271
272 /*
273 * See vlv_power_sequencer_reset() why we need
274 * a power domain reference here.
275 */
25f78f58 276 power_domain = intel_display_port_aux_power_domain(encoder);
773538e8
VS
277 intel_display_power_get(dev_priv, power_domain);
278
279 mutex_lock(&dev_priv->pps_mutex);
280}
281
282static void pps_unlock(struct intel_dp *intel_dp)
283{
284 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
285 struct intel_encoder *encoder = &intel_dig_port->base;
286 struct drm_device *dev = encoder->base.dev;
287 struct drm_i915_private *dev_priv = dev->dev_private;
288 enum intel_display_power_domain power_domain;
289
290 mutex_unlock(&dev_priv->pps_mutex);
291
25f78f58 292 power_domain = intel_display_port_aux_power_domain(encoder);
773538e8
VS
293 intel_display_power_put(dev_priv, power_domain);
294}
295
961a0db0
VS
296static void
297vlv_power_sequencer_kick(struct intel_dp *intel_dp)
298{
299 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
300 struct drm_device *dev = intel_dig_port->base.base.dev;
301 struct drm_i915_private *dev_priv = dev->dev_private;
302 enum pipe pipe = intel_dp->pps_pipe;
0047eedc
VS
303 bool pll_enabled, release_cl_override = false;
304 enum dpio_phy phy = DPIO_PHY(pipe);
305 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
961a0db0
VS
306 uint32_t DP;
307
308 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
309 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
310 pipe_name(pipe), port_name(intel_dig_port->port)))
311 return;
312
313 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
314 pipe_name(pipe), port_name(intel_dig_port->port));
315
316 /* Preserve the BIOS-computed detected bit. This is
317 * supposed to be read-only.
318 */
319 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
320 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
321 DP |= DP_PORT_WIDTH(1);
322 DP |= DP_LINK_TRAIN_PAT_1;
323
324 if (IS_CHERRYVIEW(dev))
325 DP |= DP_PIPE_SELECT_CHV(pipe);
326 else if (pipe == PIPE_B)
327 DP |= DP_PIPEB_SELECT;
328
d288f65f
VS
329 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
330
331 /*
332 * The DPLL for the pipe must be enabled for this to work.
333 * So enable temporarily it if it's not already enabled.
334 */
0047eedc
VS
335 if (!pll_enabled) {
336 release_cl_override = IS_CHERRYVIEW(dev) &&
337 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
338
3f36b937
TU
339 if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
340 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
341 DRM_ERROR("Failed to force on pll for pipe %c!\n",
342 pipe_name(pipe));
343 return;
344 }
0047eedc 345 }
d288f65f 346
961a0db0
VS
347 /*
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
352 */
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
355
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
358
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
d288f65f 361
0047eedc 362 if (!pll_enabled) {
d288f65f 363 vlv_force_pll_off(dev, pipe);
0047eedc
VS
364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
961a0db0
VS
368}
369
bf13e81b
JN
370static enum pipe
371vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372{
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 378 enum pipe pipe;
bf13e81b 379
e39b999a 380 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 381
a8c3344e
VS
382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
384
a4a5d2f8
VS
385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
387
388 /*
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
391 */
19c8054c 392 for_each_intel_encoder(dev, encoder) {
a4a5d2f8
VS
393 struct intel_dp *tmp;
394
395 if (encoder->type != INTEL_OUTPUT_EDP)
396 continue;
397
398 tmp = enc_to_intel_dp(&encoder->base);
399
400 if (tmp->pps_pipe != INVALID_PIPE)
401 pipes &= ~(1 << tmp->pps_pipe);
402 }
403
404 /*
405 * Didn't find one. This should not happen since there
406 * are two power sequencers and up to two eDP ports.
407 */
408 if (WARN_ON(pipes == 0))
a8c3344e
VS
409 pipe = PIPE_A;
410 else
411 pipe = ffs(pipes) - 1;
a4a5d2f8 412
a8c3344e
VS
413 vlv_steal_power_sequencer(dev, pipe);
414 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
415
416 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
417 pipe_name(intel_dp->pps_pipe),
418 port_name(intel_dig_port->port));
419
420 /* init power sequencer on this pipe and port */
36b5f425
VS
421 intel_dp_init_panel_power_sequencer(dev, intel_dp);
422 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 423
961a0db0
VS
424 /*
425 * Even vdd force doesn't work until we've made
426 * the power sequencer lock in on the port.
427 */
428 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
429
430 return intel_dp->pps_pipe;
431}
432
6491ab27
VS
433typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
434 enum pipe pipe);
435
436static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
437 enum pipe pipe)
438{
439 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
440}
441
442static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
443 enum pipe pipe)
444{
445 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
446}
447
448static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
449 enum pipe pipe)
450{
451 return true;
452}
bf13e81b 453
a4a5d2f8 454static enum pipe
6491ab27
VS
455vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
456 enum port port,
457 vlv_pipe_check pipe_check)
a4a5d2f8
VS
458{
459 enum pipe pipe;
bf13e81b 460
bf13e81b
JN
461 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
462 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
463 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
464
465 if (port_sel != PANEL_PORT_SELECT_VLV(port))
466 continue;
467
6491ab27
VS
468 if (!pipe_check(dev_priv, pipe))
469 continue;
470
a4a5d2f8 471 return pipe;
bf13e81b
JN
472 }
473
a4a5d2f8
VS
474 return INVALID_PIPE;
475}
476
477static void
478vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
479{
480 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
481 struct drm_device *dev = intel_dig_port->base.base.dev;
482 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
483 enum port port = intel_dig_port->port;
484
485 lockdep_assert_held(&dev_priv->pps_mutex);
486
487 /* try to find a pipe with this port selected */
6491ab27
VS
488 /* first pick one where the panel is on */
489 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
490 vlv_pipe_has_pp_on);
491 /* didn't find one? pick one where vdd is on */
492 if (intel_dp->pps_pipe == INVALID_PIPE)
493 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
494 vlv_pipe_has_vdd_on);
495 /* didn't find one? pick one with just the correct port */
496 if (intel_dp->pps_pipe == INVALID_PIPE)
497 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
498 vlv_pipe_any);
a4a5d2f8
VS
499
500 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
501 if (intel_dp->pps_pipe == INVALID_PIPE) {
502 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
503 port_name(port));
504 return;
bf13e81b
JN
505 }
506
a4a5d2f8
VS
507 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
508 port_name(port), pipe_name(intel_dp->pps_pipe));
509
36b5f425
VS
510 intel_dp_init_panel_power_sequencer(dev, intel_dp);
511 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
512}
513
773538e8
VS
514void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
515{
516 struct drm_device *dev = dev_priv->dev;
517 struct intel_encoder *encoder;
518
666a4537 519 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
773538e8
VS
520 return;
521
522 /*
523 * We can't grab pps_mutex here due to deadlock with power_domain
524 * mutex when power_domain functions are called while holding pps_mutex.
525 * That also means that in order to use pps_pipe the code needs to
526 * hold both a power domain reference and pps_mutex, and the power domain
527 * reference get/put must be done while _not_ holding pps_mutex.
528 * pps_{lock,unlock}() do these steps in the correct order, so one
529 * should use them always.
530 */
531
19c8054c 532 for_each_intel_encoder(dev, encoder) {
773538e8
VS
533 struct intel_dp *intel_dp;
534
535 if (encoder->type != INTEL_OUTPUT_EDP)
536 continue;
537
538 intel_dp = enc_to_intel_dp(&encoder->base);
539 intel_dp->pps_pipe = INVALID_PIPE;
540 }
bf13e81b
JN
541}
542
f0f59a00
VS
543static i915_reg_t
544_pp_ctrl_reg(struct intel_dp *intel_dp)
bf13e81b
JN
545{
546 struct drm_device *dev = intel_dp_to_dev(intel_dp);
547
b0a08bec
VK
548 if (IS_BROXTON(dev))
549 return BXT_PP_CONTROL(0);
550 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
551 return PCH_PP_CONTROL;
552 else
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
554}
555
f0f59a00
VS
556static i915_reg_t
557_pp_stat_reg(struct intel_dp *intel_dp)
bf13e81b
JN
558{
559 struct drm_device *dev = intel_dp_to_dev(intel_dp);
560
b0a08bec
VK
561 if (IS_BROXTON(dev))
562 return BXT_PP_STATUS(0);
563 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
564 return PCH_PP_STATUS;
565 else
566 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
567}
568
01527b31
CT
569/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
570 This function only applicable when panel PM state is not to be tracked */
571static int edp_notify_handler(struct notifier_block *this, unsigned long code,
572 void *unused)
573{
574 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
575 edp_notifier);
576 struct drm_device *dev = intel_dp_to_dev(intel_dp);
577 struct drm_i915_private *dev_priv = dev->dev_private;
01527b31
CT
578
579 if (!is_edp(intel_dp) || code != SYS_RESTART)
580 return 0;
581
773538e8 582 pps_lock(intel_dp);
e39b999a 583
666a4537 584 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e39b999a 585 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
f0f59a00 586 i915_reg_t pp_ctrl_reg, pp_div_reg;
649636ef 587 u32 pp_div;
e39b999a 588
01527b31
CT
589 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
590 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
591 pp_div = I915_READ(pp_div_reg);
592 pp_div &= PP_REFERENCE_DIVIDER_MASK;
593
594 /* 0x1F write to PP_DIV_REG sets max cycle delay */
595 I915_WRITE(pp_div_reg, pp_div | 0x1F);
596 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
597 msleep(intel_dp->panel_power_cycle_delay);
598 }
599
773538e8 600 pps_unlock(intel_dp);
e39b999a 601
01527b31
CT
602 return 0;
603}
604
4be73780 605static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 606{
30add22d 607 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
608 struct drm_i915_private *dev_priv = dev->dev_private;
609
e39b999a
VS
610 lockdep_assert_held(&dev_priv->pps_mutex);
611
666a4537 612 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
9a42356b
VS
613 intel_dp->pps_pipe == INVALID_PIPE)
614 return false;
615
bf13e81b 616 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
617}
618
4be73780 619static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 620{
30add22d 621 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
622 struct drm_i915_private *dev_priv = dev->dev_private;
623
e39b999a
VS
624 lockdep_assert_held(&dev_priv->pps_mutex);
625
666a4537 626 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
9a42356b
VS
627 intel_dp->pps_pipe == INVALID_PIPE)
628 return false;
629
773538e8 630 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
631}
632
9b984dae
KP
633static void
634intel_dp_check_edp(struct intel_dp *intel_dp)
635{
30add22d 636 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 637 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 638
9b984dae
KP
639 if (!is_edp(intel_dp))
640 return;
453c5420 641
4be73780 642 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
643 WARN(1, "eDP powered off while attempting aux channel communication.\n");
644 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
645 I915_READ(_pp_stat_reg(intel_dp)),
646 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
647 }
648}
649
9ee32fea
DV
650static uint32_t
651intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
652{
653 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
654 struct drm_device *dev = intel_dig_port->base.base.dev;
655 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 656 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
657 uint32_t status;
658 bool done;
659
ef04f00d 660#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 661 if (has_aux_irq)
b18ac466 662 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 663 msecs_to_jiffies_timeout(10));
9ee32fea
DV
664 else
665 done = wait_for_atomic(C, 10) == 0;
666 if (!done)
667 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
668 has_aux_irq);
669#undef C
670
671 return status;
672}
673
ec5b01dd 674static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 675{
174edf1f
PZ
676 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
677 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 678
ec5b01dd
DL
679 /*
680 * The clock divider is based off the hrawclk, and would like to run at
681 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 682 */
fce18c4c 683 return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
ec5b01dd
DL
684}
685
686static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
687{
688 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
689 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 690 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
691
692 if (index)
693 return 0;
694
695 if (intel_dig_port->port == PORT_A) {
fce18c4c 696 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
05024da3 697
ec5b01dd 698 } else {
fce18c4c 699 return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
ec5b01dd
DL
700 }
701}
702
703static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
704{
705 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
706 struct drm_device *dev = intel_dig_port->base.base.dev;
707 struct drm_i915_private *dev_priv = dev->dev_private;
708
709 if (intel_dig_port->port == PORT_A) {
710 if (index)
711 return 0;
05024da3 712 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
56f5f700 713 } else if (HAS_PCH_LPT_H(dev_priv)) {
2c55c336 714 /* Workaround for non-ULT HSW */
bc86625a
CW
715 switch (index) {
716 case 0: return 63;
717 case 1: return 72;
718 default: return 0;
719 }
ec5b01dd 720 } else {
fce18c4c 721 return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
2c55c336 722 }
b84a1cf8
RV
723}
724
ec5b01dd
DL
725static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
726{
727 return index ? 0 : 100;
728}
729
b6b5e383
DL
730static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
731{
732 /*
733 * SKL doesn't need us to program the AUX clock divider (Hardware will
734 * derive the clock from CDCLK automatically). We still implement the
735 * get_aux_clock_divider vfunc to plug-in into the existing code.
736 */
737 return index ? 0 : 1;
738}
739
5ed12a19
DL
740static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
741 bool has_aux_irq,
742 int send_bytes,
743 uint32_t aux_clock_divider)
744{
745 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
746 struct drm_device *dev = intel_dig_port->base.base.dev;
747 uint32_t precharge, timeout;
748
749 if (IS_GEN6(dev))
750 precharge = 3;
751 else
752 precharge = 5;
753
f3c6a3a7 754 if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
5ed12a19
DL
755 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
756 else
757 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
758
759 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 760 DP_AUX_CH_CTL_DONE |
5ed12a19 761 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 762 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 763 timeout |
788d4433 764 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
765 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
766 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 767 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
768}
769
b9ca5fad
DL
770static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
771 bool has_aux_irq,
772 int send_bytes,
773 uint32_t unused)
774{
775 return DP_AUX_CH_CTL_SEND_BUSY |
776 DP_AUX_CH_CTL_DONE |
777 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
778 DP_AUX_CH_CTL_TIME_OUT_ERROR |
779 DP_AUX_CH_CTL_TIME_OUT_1600us |
780 DP_AUX_CH_CTL_RECEIVE_ERROR |
781 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
782 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
783}
784
b84a1cf8
RV
785static int
786intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 787 const uint8_t *send, int send_bytes,
b84a1cf8
RV
788 uint8_t *recv, int recv_size)
789{
790 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
791 struct drm_device *dev = intel_dig_port->base.base.dev;
792 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 793 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
bc86625a 794 uint32_t aux_clock_divider;
b84a1cf8
RV
795 int i, ret, recv_bytes;
796 uint32_t status;
5ed12a19 797 int try, clock = 0;
4e6b788c 798 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
799 bool vdd;
800
773538e8 801 pps_lock(intel_dp);
e39b999a 802
72c3500a
VS
803 /*
804 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805 * In such cases we want to leave VDD enabled and it's up to upper layers
806 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
807 * ourselves.
808 */
1e0560e0 809 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
810
811 /* dp aux is extremely sensitive to irq latency, hence request the
812 * lowest possible wakeup latency and so prevent the cpu from going into
813 * deep sleep states.
814 */
815 pm_qos_update_request(&dev_priv->pm_qos, 0);
816
817 intel_dp_check_edp(intel_dp);
5eb08b69 818
11bee43e
JB
819 /* Try to wait for any previous AUX channel activity */
820 for (try = 0; try < 3; try++) {
ef04f00d 821 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
822 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
823 break;
824 msleep(1);
825 }
826
827 if (try == 3) {
02196c77
MK
828 static u32 last_status = -1;
829 const u32 status = I915_READ(ch_ctl);
830
831 if (status != last_status) {
832 WARN(1, "dp_aux_ch not started status 0x%08x\n",
833 status);
834 last_status = status;
835 }
836
9ee32fea
DV
837 ret = -EBUSY;
838 goto out;
4f7f7b7e
CW
839 }
840
46a5ae9f
PZ
841 /* Only 5 data registers! */
842 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
843 ret = -E2BIG;
844 goto out;
845 }
846
ec5b01dd 847 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
848 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
849 has_aux_irq,
850 send_bytes,
851 aux_clock_divider);
5ed12a19 852
bc86625a
CW
853 /* Must try at least 3 times according to DP spec */
854 for (try = 0; try < 5; try++) {
855 /* Load the send data into the aux channel data registers */
856 for (i = 0; i < send_bytes; i += 4)
330e20ec 857 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
a4f1289e
RV
858 intel_dp_pack_aux(send + i,
859 send_bytes - i));
bc86625a
CW
860
861 /* Send the command and wait for it to complete */
5ed12a19 862 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
863
864 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
865
866 /* Clear done status and any errors */
867 I915_WRITE(ch_ctl,
868 status |
869 DP_AUX_CH_CTL_DONE |
870 DP_AUX_CH_CTL_TIME_OUT_ERROR |
871 DP_AUX_CH_CTL_RECEIVE_ERROR);
872
74ebf294 873 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 874 continue;
74ebf294
TP
875
876 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
877 * 400us delay required for errors and timeouts
878 * Timeout errors from the HW already meet this
879 * requirement so skip to next iteration
880 */
881 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
882 usleep_range(400, 500);
bc86625a 883 continue;
74ebf294 884 }
bc86625a 885 if (status & DP_AUX_CH_CTL_DONE)
e058c945 886 goto done;
bc86625a 887 }
a4fc5ed6
KP
888 }
889
a4fc5ed6 890 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 891 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
892 ret = -EBUSY;
893 goto out;
a4fc5ed6
KP
894 }
895
e058c945 896done:
a4fc5ed6
KP
897 /* Check for timeout or receive error.
898 * Timeouts occur when the sink is not connected
899 */
a5b3da54 900 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 901 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
902 ret = -EIO;
903 goto out;
a5b3da54 904 }
1ae8c0a5
KP
905
906 /* Timeouts occur when the device isn't connected, so they're
907 * "normal" -- don't fill the kernel log with these */
a5b3da54 908 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 909 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
910 ret = -ETIMEDOUT;
911 goto out;
a4fc5ed6
KP
912 }
913
914 /* Unload any bytes sent back from the other side */
915 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
916 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
14e01889
RV
917
918 /*
919 * By BSpec: "Message sizes of 0 or >20 are not allowed."
920 * We have no idea of what happened so we return -EBUSY so
921 * drm layer takes care for the necessary retries.
922 */
923 if (recv_bytes == 0 || recv_bytes > 20) {
924 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
925 recv_bytes);
926 /*
927 * FIXME: This patch was created on top of a series that
928 * organize the retries at drm level. There EBUSY should
929 * also take care for 1ms wait before retrying.
930 * That aux retries re-org is still needed and after that is
931 * merged we remove this sleep from here.
932 */
933 usleep_range(1000, 1500);
934 ret = -EBUSY;
935 goto out;
936 }
937
a4fc5ed6
KP
938 if (recv_bytes > recv_size)
939 recv_bytes = recv_size;
0206e353 940
4f7f7b7e 941 for (i = 0; i < recv_bytes; i += 4)
330e20ec 942 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
a4f1289e 943 recv + i, recv_bytes - i);
a4fc5ed6 944
9ee32fea
DV
945 ret = recv_bytes;
946out:
947 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
948
884f19e9
JN
949 if (vdd)
950 edp_panel_vdd_off(intel_dp, false);
951
773538e8 952 pps_unlock(intel_dp);
e39b999a 953
9ee32fea 954 return ret;
a4fc5ed6
KP
955}
956
a6c8aff0
JN
957#define BARE_ADDRESS_SIZE 3
958#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
959static ssize_t
960intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 961{
9d1a1031
JN
962 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
963 uint8_t txbuf[20], rxbuf[20];
964 size_t txsize, rxsize;
a4fc5ed6 965 int ret;
a4fc5ed6 966
d2d9cbbd
VS
967 txbuf[0] = (msg->request << 4) |
968 ((msg->address >> 16) & 0xf);
969 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
970 txbuf[2] = msg->address & 0xff;
971 txbuf[3] = msg->size - 1;
46a5ae9f 972
9d1a1031
JN
973 switch (msg->request & ~DP_AUX_I2C_MOT) {
974 case DP_AUX_NATIVE_WRITE:
975 case DP_AUX_I2C_WRITE:
c1e74122 976 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
a6c8aff0 977 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 978 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 979
9d1a1031
JN
980 if (WARN_ON(txsize > 20))
981 return -E2BIG;
a4fc5ed6 982
d81a67cc
ID
983 if (msg->buffer)
984 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
985 else
986 WARN_ON(msg->size);
a4fc5ed6 987
9d1a1031
JN
988 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
989 if (ret > 0) {
990 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 991
a1ddefd8
JN
992 if (ret > 1) {
993 /* Number of bytes written in a short write. */
994 ret = clamp_t(int, rxbuf[1], 0, msg->size);
995 } else {
996 /* Return payload size. */
997 ret = msg->size;
998 }
9d1a1031
JN
999 }
1000 break;
46a5ae9f 1001
9d1a1031
JN
1002 case DP_AUX_NATIVE_READ:
1003 case DP_AUX_I2C_READ:
a6c8aff0 1004 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 1005 rxsize = msg->size + 1;
a4fc5ed6 1006
9d1a1031
JN
1007 if (WARN_ON(rxsize > 20))
1008 return -E2BIG;
a4fc5ed6 1009
9d1a1031
JN
1010 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1011 if (ret > 0) {
1012 msg->reply = rxbuf[0] >> 4;
1013 /*
1014 * Assume happy day, and copy the data. The caller is
1015 * expected to check msg->reply before touching it.
1016 *
1017 * Return payload size.
1018 */
1019 ret--;
1020 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1021 }
9d1a1031
JN
1022 break;
1023
1024 default:
1025 ret = -EINVAL;
1026 break;
a4fc5ed6 1027 }
f51a44b9 1028
9d1a1031 1029 return ret;
a4fc5ed6
KP
1030}
1031
f0f59a00
VS
1032static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1033 enum port port)
da00bdcf
VS
1034{
1035 switch (port) {
1036 case PORT_B:
1037 case PORT_C:
1038 case PORT_D:
1039 return DP_AUX_CH_CTL(port);
1040 default:
1041 MISSING_CASE(port);
1042 return DP_AUX_CH_CTL(PORT_B);
1043 }
1044}
1045
f0f59a00
VS
1046static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1047 enum port port, int index)
330e20ec
VS
1048{
1049 switch (port) {
1050 case PORT_B:
1051 case PORT_C:
1052 case PORT_D:
1053 return DP_AUX_CH_DATA(port, index);
1054 default:
1055 MISSING_CASE(port);
1056 return DP_AUX_CH_DATA(PORT_B, index);
1057 }
1058}
1059
f0f59a00
VS
1060static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1061 enum port port)
da00bdcf
VS
1062{
1063 switch (port) {
1064 case PORT_A:
1065 return DP_AUX_CH_CTL(port);
1066 case PORT_B:
1067 case PORT_C:
1068 case PORT_D:
1069 return PCH_DP_AUX_CH_CTL(port);
1070 default:
1071 MISSING_CASE(port);
1072 return DP_AUX_CH_CTL(PORT_A);
1073 }
1074}
1075
f0f59a00
VS
1076static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1077 enum port port, int index)
330e20ec
VS
1078{
1079 switch (port) {
1080 case PORT_A:
1081 return DP_AUX_CH_DATA(port, index);
1082 case PORT_B:
1083 case PORT_C:
1084 case PORT_D:
1085 return PCH_DP_AUX_CH_DATA(port, index);
1086 default:
1087 MISSING_CASE(port);
1088 return DP_AUX_CH_DATA(PORT_A, index);
1089 }
1090}
1091
da00bdcf
VS
1092/*
1093 * On SKL we don't have Aux for port E so we rely
1094 * on VBT to set a proper alternate aux channel.
1095 */
1096static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1097{
1098 const struct ddi_vbt_port_info *info =
1099 &dev_priv->vbt.ddi_port_info[PORT_E];
1100
1101 switch (info->alternate_aux_channel) {
1102 case DP_AUX_A:
1103 return PORT_A;
1104 case DP_AUX_B:
1105 return PORT_B;
1106 case DP_AUX_C:
1107 return PORT_C;
1108 case DP_AUX_D:
1109 return PORT_D;
1110 default:
1111 MISSING_CASE(info->alternate_aux_channel);
1112 return PORT_A;
1113 }
1114}
1115
f0f59a00
VS
1116static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1117 enum port port)
da00bdcf
VS
1118{
1119 if (port == PORT_E)
1120 port = skl_porte_aux_port(dev_priv);
1121
1122 switch (port) {
1123 case PORT_A:
1124 case PORT_B:
1125 case PORT_C:
1126 case PORT_D:
1127 return DP_AUX_CH_CTL(port);
1128 default:
1129 MISSING_CASE(port);
1130 return DP_AUX_CH_CTL(PORT_A);
1131 }
1132}
1133
f0f59a00
VS
1134static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1135 enum port port, int index)
330e20ec
VS
1136{
1137 if (port == PORT_E)
1138 port = skl_porte_aux_port(dev_priv);
1139
1140 switch (port) {
1141 case PORT_A:
1142 case PORT_B:
1143 case PORT_C:
1144 case PORT_D:
1145 return DP_AUX_CH_DATA(port, index);
1146 default:
1147 MISSING_CASE(port);
1148 return DP_AUX_CH_DATA(PORT_A, index);
1149 }
1150}
1151
f0f59a00
VS
1152static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1153 enum port port)
330e20ec
VS
1154{
1155 if (INTEL_INFO(dev_priv)->gen >= 9)
1156 return skl_aux_ctl_reg(dev_priv, port);
1157 else if (HAS_PCH_SPLIT(dev_priv))
1158 return ilk_aux_ctl_reg(dev_priv, port);
1159 else
1160 return g4x_aux_ctl_reg(dev_priv, port);
1161}
1162
f0f59a00
VS
1163static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1164 enum port port, int index)
330e20ec
VS
1165{
1166 if (INTEL_INFO(dev_priv)->gen >= 9)
1167 return skl_aux_data_reg(dev_priv, port, index);
1168 else if (HAS_PCH_SPLIT(dev_priv))
1169 return ilk_aux_data_reg(dev_priv, port, index);
1170 else
1171 return g4x_aux_data_reg(dev_priv, port, index);
1172}
1173
1174static void intel_aux_reg_init(struct intel_dp *intel_dp)
1175{
1176 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1177 enum port port = dp_to_dig_port(intel_dp)->port;
1178 int i;
1179
1180 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1181 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1182 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1183}
1184
9d1a1031 1185static void
a121f4e5
VS
1186intel_dp_aux_fini(struct intel_dp *intel_dp)
1187{
1188 drm_dp_aux_unregister(&intel_dp->aux);
1189 kfree(intel_dp->aux.name);
1190}
1191
1192static int
9d1a1031
JN
1193intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1194{
33ad6626
JN
1195 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1196 enum port port = intel_dig_port->port;
ab2c0672
DA
1197 int ret;
1198
330e20ec 1199 intel_aux_reg_init(intel_dp);
8316f337 1200
a121f4e5
VS
1201 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1202 if (!intel_dp->aux.name)
1203 return -ENOMEM;
1204
4d32c0d8 1205 intel_dp->aux.dev = connector->base.kdev;
9d1a1031 1206 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1207
a121f4e5
VS
1208 DRM_DEBUG_KMS("registering %s bus for %s\n",
1209 intel_dp->aux.name,
0b99836f 1210 connector->base.kdev->kobj.name);
8316f337 1211
4f71d0cb 1212 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1213 if (ret < 0) {
4f71d0cb 1214 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
a121f4e5
VS
1215 intel_dp->aux.name, ret);
1216 kfree(intel_dp->aux.name);
1217 return ret;
ab2c0672 1218 }
8a5e6aeb 1219
a121f4e5 1220 return 0;
a4fc5ed6
KP
1221}
1222
80f65de3
ID
1223static void
1224intel_dp_connector_unregister(struct intel_connector *intel_connector)
1225{
1226 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1227
4d32c0d8 1228 intel_dp_aux_fini(intel_dp);
80f65de3
ID
1229 intel_connector_unregister(intel_connector);
1230}
1231
5416d871 1232static void
840b32b7 1233skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
5416d871
DL
1234{
1235 u32 ctrl1;
1236
dd3cd74a
ACO
1237 memset(&pipe_config->dpll_hw_state, 0,
1238 sizeof(pipe_config->dpll_hw_state));
1239
5416d871
DL
1240 pipe_config->ddi_pll_sel = SKL_DPLL0;
1241 pipe_config->dpll_hw_state.cfgcr1 = 0;
1242 pipe_config->dpll_hw_state.cfgcr2 = 0;
1243
1244 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
840b32b7 1245 switch (pipe_config->port_clock / 2) {
c3346ef6 1246 case 81000:
71cd8423 1247 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1248 SKL_DPLL0);
1249 break;
c3346ef6 1250 case 135000:
71cd8423 1251 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1252 SKL_DPLL0);
1253 break;
c3346ef6 1254 case 270000:
71cd8423 1255 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1256 SKL_DPLL0);
1257 break;
c3346ef6 1258 case 162000:
71cd8423 1259 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1260 SKL_DPLL0);
1261 break;
1262 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1263 results in CDCLK change. Need to handle the change of CDCLK by
1264 disabling pipes and re-enabling them */
1265 case 108000:
71cd8423 1266 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1267 SKL_DPLL0);
1268 break;
1269 case 216000:
71cd8423 1270 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1271 SKL_DPLL0);
1272 break;
1273
5416d871
DL
1274 }
1275 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1276}
1277
6fa2d197 1278void
840b32b7 1279hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
0e50338c 1280{
ee46f3c7
ACO
1281 memset(&pipe_config->dpll_hw_state, 0,
1282 sizeof(pipe_config->dpll_hw_state));
1283
840b32b7
VS
1284 switch (pipe_config->port_clock / 2) {
1285 case 81000:
0e50338c
DV
1286 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1287 break;
840b32b7 1288 case 135000:
0e50338c
DV
1289 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1290 break;
840b32b7 1291 case 270000:
0e50338c
DV
1292 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1293 break;
1294 }
1295}
1296
fc0f8e25 1297static int
12f6a2e2 1298intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1299{
94ca719e
VS
1300 if (intel_dp->num_sink_rates) {
1301 *sink_rates = intel_dp->sink_rates;
1302 return intel_dp->num_sink_rates;
fc0f8e25 1303 }
12f6a2e2
VS
1304
1305 *sink_rates = default_rates;
1306
1307 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1308}
1309
e588fa18 1310bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
ed63baaf 1311{
e588fa18
ACO
1312 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1313 struct drm_device *dev = dig_port->base.base.dev;
1314
ed63baaf 1315 /* WaDisableHBR2:skl */
e87a005d 1316 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
ed63baaf
TS
1317 return false;
1318
1319 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1320 (INTEL_INFO(dev)->gen >= 9))
1321 return true;
1322 else
1323 return false;
1324}
1325
a8f3ef61 1326static int
e588fa18 1327intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
a8f3ef61 1328{
e588fa18
ACO
1329 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1330 struct drm_device *dev = dig_port->base.base.dev;
af7080f5
TS
1331 int size;
1332
64987fc5
SJ
1333 if (IS_BROXTON(dev)) {
1334 *source_rates = bxt_rates;
af7080f5 1335 size = ARRAY_SIZE(bxt_rates);
ef11bdb3 1336 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
637a9c63 1337 *source_rates = skl_rates;
af7080f5
TS
1338 size = ARRAY_SIZE(skl_rates);
1339 } else {
1340 *source_rates = default_rates;
1341 size = ARRAY_SIZE(default_rates);
a8f3ef61 1342 }
636280ba 1343
ed63baaf 1344 /* This depends on the fact that 5.4 is last value in the array */
e588fa18 1345 if (!intel_dp_source_supports_hbr2(intel_dp))
af7080f5 1346 size--;
636280ba 1347
af7080f5 1348 return size;
a8f3ef61
SJ
1349}
1350
c6bb3538
DV
1351static void
1352intel_dp_set_clock(struct intel_encoder *encoder,
840b32b7 1353 struct intel_crtc_state *pipe_config)
c6bb3538
DV
1354{
1355 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1356 const struct dp_link_dpll *divisor = NULL;
1357 int i, count = 0;
c6bb3538
DV
1358
1359 if (IS_G4X(dev)) {
9dd4ffdf
CML
1360 divisor = gen4_dpll;
1361 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1362 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1363 divisor = pch_dpll;
1364 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1365 } else if (IS_CHERRYVIEW(dev)) {
1366 divisor = chv_dpll;
1367 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1368 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1369 divisor = vlv_dpll;
1370 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1371 }
9dd4ffdf
CML
1372
1373 if (divisor && count) {
1374 for (i = 0; i < count; i++) {
840b32b7 1375 if (pipe_config->port_clock == divisor[i].clock) {
9dd4ffdf
CML
1376 pipe_config->dpll = divisor[i].dpll;
1377 pipe_config->clock_set = true;
1378 break;
1379 }
1380 }
c6bb3538
DV
1381 }
1382}
1383
2ecae76a
VS
1384static int intersect_rates(const int *source_rates, int source_len,
1385 const int *sink_rates, int sink_len,
94ca719e 1386 int *common_rates)
a8f3ef61
SJ
1387{
1388 int i = 0, j = 0, k = 0;
1389
a8f3ef61
SJ
1390 while (i < source_len && j < sink_len) {
1391 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1392 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1393 return k;
94ca719e 1394 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1395 ++k;
1396 ++i;
1397 ++j;
1398 } else if (source_rates[i] < sink_rates[j]) {
1399 ++i;
1400 } else {
1401 ++j;
1402 }
1403 }
1404 return k;
1405}
1406
94ca719e
VS
1407static int intel_dp_common_rates(struct intel_dp *intel_dp,
1408 int *common_rates)
2ecae76a 1409{
2ecae76a
VS
1410 const int *source_rates, *sink_rates;
1411 int source_len, sink_len;
1412
1413 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
e588fa18 1414 source_len = intel_dp_source_rates(intel_dp, &source_rates);
2ecae76a
VS
1415
1416 return intersect_rates(source_rates, source_len,
1417 sink_rates, sink_len,
94ca719e 1418 common_rates);
2ecae76a
VS
1419}
1420
0336400e
VS
1421static void snprintf_int_array(char *str, size_t len,
1422 const int *array, int nelem)
1423{
1424 int i;
1425
1426 str[0] = '\0';
1427
1428 for (i = 0; i < nelem; i++) {
b2f505be 1429 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1430 if (r >= len)
1431 return;
1432 str += r;
1433 len -= r;
1434 }
1435}
1436
1437static void intel_dp_print_rates(struct intel_dp *intel_dp)
1438{
0336400e 1439 const int *source_rates, *sink_rates;
94ca719e
VS
1440 int source_len, sink_len, common_len;
1441 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1442 char str[128]; /* FIXME: too big for stack? */
1443
1444 if ((drm_debug & DRM_UT_KMS) == 0)
1445 return;
1446
e588fa18 1447 source_len = intel_dp_source_rates(intel_dp, &source_rates);
0336400e
VS
1448 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1449 DRM_DEBUG_KMS("source rates: %s\n", str);
1450
1451 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1452 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1453 DRM_DEBUG_KMS("sink rates: %s\n", str);
1454
94ca719e
VS
1455 common_len = intel_dp_common_rates(intel_dp, common_rates);
1456 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1457 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1458}
1459
f4896f15 1460static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1461{
1462 int i = 0;
1463
1464 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1465 if (find == rates[i])
1466 break;
1467
1468 return i;
1469}
1470
50fec21a
VS
1471int
1472intel_dp_max_link_rate(struct intel_dp *intel_dp)
1473{
1474 int rates[DP_MAX_SUPPORTED_RATES] = {};
1475 int len;
1476
94ca719e 1477 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1478 if (WARN_ON(len <= 0))
1479 return 162000;
1480
1481 return rates[rate_to_index(0, rates) - 1];
1482}
1483
ed4e9c1d
VS
1484int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1485{
94ca719e 1486 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1487}
1488
94223d04
ACO
1489void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1490 uint8_t *link_bw, uint8_t *rate_select)
04a60f9f
VS
1491{
1492 if (intel_dp->num_sink_rates) {
1493 *link_bw = 0;
1494 *rate_select =
1495 intel_dp_rate_select(intel_dp, port_clock);
1496 } else {
1497 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1498 *rate_select = 0;
1499 }
1500}
1501
00c09d70 1502bool
5bfe2ac0 1503intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1504 struct intel_crtc_state *pipe_config)
a4fc5ed6 1505{
5bfe2ac0 1506 struct drm_device *dev = encoder->base.dev;
36008365 1507 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1508 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1509 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1510 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1511 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1512 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1513 int lane_count, clock;
56071a20 1514 int min_lane_count = 1;
eeb6324d 1515 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1516 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1517 int min_clock = 0;
a8f3ef61 1518 int max_clock;
083f9560 1519 int bpp, mode_rate;
ff9a6750 1520 int link_avail, link_clock;
94ca719e
VS
1521 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1522 int common_len;
04a60f9f 1523 uint8_t link_bw, rate_select;
a8f3ef61 1524
94ca719e 1525 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1526
1527 /* No common link rates between source and sink */
94ca719e 1528 WARN_ON(common_len <= 0);
a8f3ef61 1529
94ca719e 1530 max_clock = common_len - 1;
a4fc5ed6 1531
bc7d38a4 1532 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1533 pipe_config->has_pch_encoder = true;
1534
03afc4a2 1535 pipe_config->has_dp_encoder = true;
f769cd24 1536 pipe_config->has_drrs = false;
9fcb1704 1537 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1538
dd06f90e
JN
1539 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1540 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1541 adjusted_mode);
a1b2278e
CK
1542
1543 if (INTEL_INFO(dev)->gen >= 9) {
1544 int ret;
e435d6e5 1545 ret = skl_update_scaler_crtc(pipe_config);
a1b2278e
CK
1546 if (ret)
1547 return ret;
1548 }
1549
b5667627 1550 if (HAS_GMCH_DISPLAY(dev))
2dd24552
JB
1551 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1552 intel_connector->panel.fitting_mode);
1553 else
b074cec8
JB
1554 intel_pch_panel_fitting(intel_crtc, pipe_config,
1555 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1556 }
1557
cb1793ce 1558 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1559 return false;
1560
083f9560 1561 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1562 "max bw %d pixel clock %iKHz\n",
94ca719e 1563 max_lane_count, common_rates[max_clock],
241bfc38 1564 adjusted_mode->crtc_clock);
083f9560 1565
36008365
DV
1566 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1567 * bpc in between. */
3e7ca985 1568 bpp = pipe_config->pipe_bpp;
56071a20 1569 if (is_edp(intel_dp)) {
22ce5628
TS
1570
1571 /* Get bpp from vbt only for panels that dont have bpp in edid */
1572 if (intel_connector->base.display_info.bpc == 0 &&
1573 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
56071a20
JN
1574 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1575 dev_priv->vbt.edp_bpp);
1576 bpp = dev_priv->vbt.edp_bpp;
1577 }
1578
344c5bbc
JN
1579 /*
1580 * Use the maximum clock and number of lanes the eDP panel
1581 * advertizes being capable of. The panels are generally
1582 * designed to support only a single clock and lane
1583 * configuration, and typically these values correspond to the
1584 * native resolution of the panel.
1585 */
1586 min_lane_count = max_lane_count;
1587 min_clock = max_clock;
7984211e 1588 }
657445fe 1589
36008365 1590 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1591 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1592 bpp);
36008365 1593
c6930992 1594 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1595 for (lane_count = min_lane_count;
1596 lane_count <= max_lane_count;
1597 lane_count <<= 1) {
1598
94ca719e 1599 link_clock = common_rates[clock];
36008365
DV
1600 link_avail = intel_dp_max_data_rate(link_clock,
1601 lane_count);
1602
1603 if (mode_rate <= link_avail) {
1604 goto found;
1605 }
1606 }
1607 }
1608 }
c4867936 1609
36008365 1610 return false;
3685a8f3 1611
36008365 1612found:
55bc60db
VS
1613 if (intel_dp->color_range_auto) {
1614 /*
1615 * See:
1616 * CEA-861-E - 5.1 Default Encoding Parameters
1617 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1618 */
0f2a2a75
VS
1619 pipe_config->limited_color_range =
1620 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1621 } else {
1622 pipe_config->limited_color_range =
1623 intel_dp->limited_color_range;
55bc60db
VS
1624 }
1625
90a6b7b0 1626 pipe_config->lane_count = lane_count;
a8f3ef61 1627
657445fe 1628 pipe_config->pipe_bpp = bpp;
94ca719e 1629 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1630
04a60f9f
VS
1631 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1632 &link_bw, &rate_select);
1633
1634 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1635 link_bw, rate_select, pipe_config->lane_count,
ff9a6750 1636 pipe_config->port_clock, bpp);
36008365
DV
1637 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1638 mode_rate, link_avail);
a4fc5ed6 1639
03afc4a2 1640 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1641 adjusted_mode->crtc_clock,
1642 pipe_config->port_clock,
03afc4a2 1643 &pipe_config->dp_m_n);
9d1a455b 1644
439d7ac0 1645 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1646 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1647 pipe_config->has_drrs = true;
439d7ac0
PB
1648 intel_link_compute_m_n(bpp, lane_count,
1649 intel_connector->panel.downclock_mode->clock,
1650 pipe_config->port_clock,
1651 &pipe_config->dp_m2_n2);
1652 }
1653
ef11bdb3 1654 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
840b32b7 1655 skl_edp_set_pll_config(pipe_config);
977bb38d
S
1656 else if (IS_BROXTON(dev))
1657 /* handled in ddi */;
5416d871 1658 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
840b32b7 1659 hsw_dp_set_ddi_pll_sel(pipe_config);
0e50338c 1660 else
840b32b7 1661 intel_dp_set_clock(encoder, pipe_config);
c6bb3538 1662
03afc4a2 1663 return true;
a4fc5ed6
KP
1664}
1665
901c2daf
VS
1666void intel_dp_set_link_params(struct intel_dp *intel_dp,
1667 const struct intel_crtc_state *pipe_config)
1668{
1669 intel_dp->link_rate = pipe_config->port_clock;
1670 intel_dp->lane_count = pipe_config->lane_count;
1671}
1672
8ac33ed3 1673static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1674{
b934223d 1675 struct drm_device *dev = encoder->base.dev;
417e822d 1676 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1677 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1678 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1679 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
7c5f93b0 1680 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1681
901c2daf
VS
1682 intel_dp_set_link_params(intel_dp, crtc->config);
1683
417e822d 1684 /*
1a2eb460 1685 * There are four kinds of DP registers:
417e822d
KP
1686 *
1687 * IBX PCH
1a2eb460
KP
1688 * SNB CPU
1689 * IVB CPU
417e822d
KP
1690 * CPT PCH
1691 *
1692 * IBX PCH and CPU are the same for almost everything,
1693 * except that the CPU DP PLL is configured in this
1694 * register
1695 *
1696 * CPT PCH is quite different, having many bits moved
1697 * to the TRANS_DP_CTL register instead. That
1698 * configuration happens (oddly) in ironlake_pch_enable
1699 */
9c9e7927 1700
417e822d
KP
1701 /* Preserve the BIOS-computed detected bit. This is
1702 * supposed to be read-only.
1703 */
1704 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1705
417e822d 1706 /* Handle DP bits in common between all three register formats */
417e822d 1707 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
90a6b7b0 1708 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
a4fc5ed6 1709
417e822d 1710 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1711
39e5fa88 1712 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1713 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1714 intel_dp->DP |= DP_SYNC_HS_HIGH;
1715 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1716 intel_dp->DP |= DP_SYNC_VS_HIGH;
1717 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1718
6aba5b6c 1719 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1720 intel_dp->DP |= DP_ENHANCED_FRAMING;
1721
7c62a164 1722 intel_dp->DP |= crtc->pipe << 29;
39e5fa88 1723 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
e3ef4479
VS
1724 u32 trans_dp;
1725
39e5fa88 1726 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
1727
1728 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1729 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1730 trans_dp |= TRANS_DP_ENH_FRAMING;
1731 else
1732 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1733 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 1734 } else {
0f2a2a75 1735 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
666a4537 1736 !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
0f2a2a75 1737 intel_dp->DP |= DP_COLOR_RANGE_16_235;
417e822d
KP
1738
1739 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1740 intel_dp->DP |= DP_SYNC_HS_HIGH;
1741 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1742 intel_dp->DP |= DP_SYNC_VS_HIGH;
1743 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1744
6aba5b6c 1745 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1746 intel_dp->DP |= DP_ENHANCED_FRAMING;
1747
39e5fa88 1748 if (IS_CHERRYVIEW(dev))
44f37d1f 1749 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1750 else if (crtc->pipe == PIPE_B)
1751 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1752 }
a4fc5ed6
KP
1753}
1754
ffd6749d
PZ
1755#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1756#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1757
1a5ef5b7
PZ
1758#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1759#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1760
ffd6749d
PZ
1761#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1762#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1763
4be73780 1764static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1765 u32 mask,
1766 u32 value)
bd943159 1767{
30add22d 1768 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1769 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 1770 i915_reg_t pp_stat_reg, pp_ctrl_reg;
453c5420 1771
e39b999a
VS
1772 lockdep_assert_held(&dev_priv->pps_mutex);
1773
bf13e81b
JN
1774 pp_stat_reg = _pp_stat_reg(intel_dp);
1775 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1776
99ea7127 1777 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1778 mask, value,
1779 I915_READ(pp_stat_reg),
1780 I915_READ(pp_ctrl_reg));
32ce697c 1781
453c5420 1782 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1783 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1784 I915_READ(pp_stat_reg),
1785 I915_READ(pp_ctrl_reg));
32ce697c 1786 }
54c136d4
CW
1787
1788 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1789}
32ce697c 1790
4be73780 1791static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1792{
1793 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1794 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1795}
1796
4be73780 1797static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1798{
1799 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1800 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1801}
1802
4be73780 1803static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127 1804{
d28d4731
AK
1805 ktime_t panel_power_on_time;
1806 s64 panel_power_off_duration;
1807
99ea7127 1808 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c 1809
d28d4731
AK
1810 /* take the difference of currrent time and panel power off time
1811 * and then make panel wait for t11_t12 if needed. */
1812 panel_power_on_time = ktime_get_boottime();
1813 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1814
dce56b3c
PZ
1815 /* When we disable the VDD override bit last we have to do the manual
1816 * wait. */
d28d4731
AK
1817 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1818 wait_remaining_ms_from_jiffies(jiffies,
1819 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
dce56b3c 1820
4be73780 1821 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1822}
1823
4be73780 1824static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1825{
1826 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1827 intel_dp->backlight_on_delay);
1828}
1829
4be73780 1830static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1831{
1832 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1833 intel_dp->backlight_off_delay);
1834}
99ea7127 1835
832dd3c1
KP
1836/* Read the current pp_control value, unlocking the register if it
1837 * is locked
1838 */
1839
453c5420 1840static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1841{
453c5420
JB
1842 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1843 struct drm_i915_private *dev_priv = dev->dev_private;
1844 u32 control;
832dd3c1 1845
e39b999a
VS
1846 lockdep_assert_held(&dev_priv->pps_mutex);
1847
bf13e81b 1848 control = I915_READ(_pp_ctrl_reg(intel_dp));
b0a08bec
VK
1849 if (!IS_BROXTON(dev)) {
1850 control &= ~PANEL_UNLOCK_MASK;
1851 control |= PANEL_UNLOCK_REGS;
1852 }
832dd3c1 1853 return control;
bd943159
KP
1854}
1855
951468f3
VS
1856/*
1857 * Must be paired with edp_panel_vdd_off().
1858 * Must hold pps_mutex around the whole on/off sequence.
1859 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1860 */
1e0560e0 1861static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1862{
30add22d 1863 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1864 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1865 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1866 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1867 enum intel_display_power_domain power_domain;
5d613501 1868 u32 pp;
f0f59a00 1869 i915_reg_t pp_stat_reg, pp_ctrl_reg;
adddaaf4 1870 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1871
e39b999a
VS
1872 lockdep_assert_held(&dev_priv->pps_mutex);
1873
97af61f5 1874 if (!is_edp(intel_dp))
adddaaf4 1875 return false;
bd943159 1876
2c623c11 1877 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1878 intel_dp->want_panel_vdd = true;
99ea7127 1879
4be73780 1880 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1881 return need_to_disable;
b0665d57 1882
25f78f58 1883 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4e6e1a54 1884 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1885
3936fcf4
VS
1886 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1887 port_name(intel_dig_port->port));
bd943159 1888
4be73780
DV
1889 if (!edp_have_panel_power(intel_dp))
1890 wait_panel_power_cycle(intel_dp);
99ea7127 1891
453c5420 1892 pp = ironlake_get_pp_control(intel_dp);
5d613501 1893 pp |= EDP_FORCE_VDD;
ebf33b18 1894
bf13e81b
JN
1895 pp_stat_reg = _pp_stat_reg(intel_dp);
1896 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1897
1898 I915_WRITE(pp_ctrl_reg, pp);
1899 POSTING_READ(pp_ctrl_reg);
1900 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1901 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1902 /*
1903 * If the panel wasn't on, delay before accessing aux channel
1904 */
4be73780 1905 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1906 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1907 port_name(intel_dig_port->port));
f01eca2e 1908 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1909 }
adddaaf4
JN
1910
1911 return need_to_disable;
1912}
1913
951468f3
VS
1914/*
1915 * Must be paired with intel_edp_panel_vdd_off() or
1916 * intel_edp_panel_off().
1917 * Nested calls to these functions are not allowed since
1918 * we drop the lock. Caller must use some higher level
1919 * locking to prevent nested calls from other threads.
1920 */
b80d6c78 1921void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1922{
c695b6b6 1923 bool vdd;
adddaaf4 1924
c695b6b6
VS
1925 if (!is_edp(intel_dp))
1926 return;
1927
773538e8 1928 pps_lock(intel_dp);
c695b6b6 1929 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1930 pps_unlock(intel_dp);
c695b6b6 1931
e2c719b7 1932 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1933 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1934}
1935
4be73780 1936static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1937{
30add22d 1938 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1939 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1940 struct intel_digital_port *intel_dig_port =
1941 dp_to_dig_port(intel_dp);
1942 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1943 enum intel_display_power_domain power_domain;
5d613501 1944 u32 pp;
f0f59a00 1945 i915_reg_t pp_stat_reg, pp_ctrl_reg;
5d613501 1946
e39b999a 1947 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1948
15e899a0 1949 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1950
15e899a0 1951 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1952 return;
b0665d57 1953
3936fcf4
VS
1954 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1955 port_name(intel_dig_port->port));
bd943159 1956
be2c9196
VS
1957 pp = ironlake_get_pp_control(intel_dp);
1958 pp &= ~EDP_FORCE_VDD;
453c5420 1959
be2c9196
VS
1960 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1961 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1962
be2c9196
VS
1963 I915_WRITE(pp_ctrl_reg, pp);
1964 POSTING_READ(pp_ctrl_reg);
90791a5c 1965
be2c9196
VS
1966 /* Make sure sequencer is idle before allowing subsequent activity */
1967 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1968 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1969
be2c9196 1970 if ((pp & POWER_TARGET_ON) == 0)
d28d4731 1971 intel_dp->panel_power_off_time = ktime_get_boottime();
e9cb81a2 1972
25f78f58 1973 power_domain = intel_display_port_aux_power_domain(intel_encoder);
be2c9196 1974 intel_display_power_put(dev_priv, power_domain);
bd943159 1975}
5d613501 1976
4be73780 1977static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1978{
1979 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1980 struct intel_dp, panel_vdd_work);
bd943159 1981
773538e8 1982 pps_lock(intel_dp);
15e899a0
VS
1983 if (!intel_dp->want_panel_vdd)
1984 edp_panel_vdd_off_sync(intel_dp);
773538e8 1985 pps_unlock(intel_dp);
bd943159
KP
1986}
1987
aba86890
ID
1988static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1989{
1990 unsigned long delay;
1991
1992 /*
1993 * Queue the timer to fire a long time from now (relative to the power
1994 * down delay) to keep the panel power up across a sequence of
1995 * operations.
1996 */
1997 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1998 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1999}
2000
951468f3
VS
2001/*
2002 * Must be paired with edp_panel_vdd_on().
2003 * Must hold pps_mutex around the whole on/off sequence.
2004 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2005 */
4be73780 2006static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 2007{
e39b999a
VS
2008 struct drm_i915_private *dev_priv =
2009 intel_dp_to_dev(intel_dp)->dev_private;
2010
2011 lockdep_assert_held(&dev_priv->pps_mutex);
2012
97af61f5
KP
2013 if (!is_edp(intel_dp))
2014 return;
5d613501 2015
e2c719b7 2016 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 2017 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 2018
bd943159
KP
2019 intel_dp->want_panel_vdd = false;
2020
aba86890 2021 if (sync)
4be73780 2022 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
2023 else
2024 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
2025}
2026
9f0fb5be 2027static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 2028{
30add22d 2029 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 2030 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 2031 u32 pp;
f0f59a00 2032 i915_reg_t pp_ctrl_reg;
9934c132 2033
9f0fb5be
VS
2034 lockdep_assert_held(&dev_priv->pps_mutex);
2035
97af61f5 2036 if (!is_edp(intel_dp))
bd943159 2037 return;
99ea7127 2038
3936fcf4
VS
2039 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2040 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 2041
e7a89ace
VS
2042 if (WARN(edp_have_panel_power(intel_dp),
2043 "eDP port %c panel power already on\n",
2044 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 2045 return;
9934c132 2046
4be73780 2047 wait_panel_power_cycle(intel_dp);
37c6c9b0 2048
bf13e81b 2049 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2050 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
2051 if (IS_GEN5(dev)) {
2052 /* ILK workaround: disable reset around power sequence */
2053 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
2054 I915_WRITE(pp_ctrl_reg, pp);
2055 POSTING_READ(pp_ctrl_reg);
05ce1a49 2056 }
37c6c9b0 2057
1c0ae80a 2058 pp |= POWER_TARGET_ON;
99ea7127
KP
2059 if (!IS_GEN5(dev))
2060 pp |= PANEL_POWER_RESET;
2061
453c5420
JB
2062 I915_WRITE(pp_ctrl_reg, pp);
2063 POSTING_READ(pp_ctrl_reg);
9934c132 2064
4be73780 2065 wait_panel_on(intel_dp);
dce56b3c 2066 intel_dp->last_power_on = jiffies;
9934c132 2067
05ce1a49
KP
2068 if (IS_GEN5(dev)) {
2069 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
2070 I915_WRITE(pp_ctrl_reg, pp);
2071 POSTING_READ(pp_ctrl_reg);
05ce1a49 2072 }
9f0fb5be 2073}
e39b999a 2074
9f0fb5be
VS
2075void intel_edp_panel_on(struct intel_dp *intel_dp)
2076{
2077 if (!is_edp(intel_dp))
2078 return;
2079
2080 pps_lock(intel_dp);
2081 edp_panel_on(intel_dp);
773538e8 2082 pps_unlock(intel_dp);
9934c132
JB
2083}
2084
9f0fb5be
VS
2085
2086static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 2087{
4e6e1a54
ID
2088 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2089 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 2090 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 2091 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 2092 enum intel_display_power_domain power_domain;
99ea7127 2093 u32 pp;
f0f59a00 2094 i915_reg_t pp_ctrl_reg;
9934c132 2095
9f0fb5be
VS
2096 lockdep_assert_held(&dev_priv->pps_mutex);
2097
97af61f5
KP
2098 if (!is_edp(intel_dp))
2099 return;
37c6c9b0 2100
3936fcf4
VS
2101 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2102 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 2103
3936fcf4
VS
2104 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2105 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 2106
453c5420 2107 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
2108 /* We need to switch off panel power _and_ force vdd, for otherwise some
2109 * panels get very unhappy and cease to work. */
b3064154
PJ
2110 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2111 EDP_BLC_ENABLE);
453c5420 2112
bf13e81b 2113 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2114
849e39f5
PZ
2115 intel_dp->want_panel_vdd = false;
2116
453c5420
JB
2117 I915_WRITE(pp_ctrl_reg, pp);
2118 POSTING_READ(pp_ctrl_reg);
9934c132 2119
d28d4731 2120 intel_dp->panel_power_off_time = ktime_get_boottime();
4be73780 2121 wait_panel_off(intel_dp);
849e39f5
PZ
2122
2123 /* We got a reference when we enabled the VDD. */
25f78f58 2124 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4e6e1a54 2125 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 2126}
e39b999a 2127
9f0fb5be
VS
2128void intel_edp_panel_off(struct intel_dp *intel_dp)
2129{
2130 if (!is_edp(intel_dp))
2131 return;
e39b999a 2132
9f0fb5be
VS
2133 pps_lock(intel_dp);
2134 edp_panel_off(intel_dp);
773538e8 2135 pps_unlock(intel_dp);
9934c132
JB
2136}
2137
1250d107
JN
2138/* Enable backlight in the panel power control. */
2139static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 2140{
da63a9f2
PZ
2141 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2142 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
2143 struct drm_i915_private *dev_priv = dev->dev_private;
2144 u32 pp;
f0f59a00 2145 i915_reg_t pp_ctrl_reg;
32f9d658 2146
01cb9ea6
JB
2147 /*
2148 * If we enable the backlight right away following a panel power
2149 * on, we may see slight flicker as the panel syncs with the eDP
2150 * link. So delay a bit to make sure the image is solid before
2151 * allowing it to appear.
2152 */
4be73780 2153 wait_backlight_on(intel_dp);
e39b999a 2154
773538e8 2155 pps_lock(intel_dp);
e39b999a 2156
453c5420 2157 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2158 pp |= EDP_BLC_ENABLE;
453c5420 2159
bf13e81b 2160 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2161
2162 I915_WRITE(pp_ctrl_reg, pp);
2163 POSTING_READ(pp_ctrl_reg);
e39b999a 2164
773538e8 2165 pps_unlock(intel_dp);
32f9d658
ZW
2166}
2167
1250d107
JN
2168/* Enable backlight PWM and backlight PP control. */
2169void intel_edp_backlight_on(struct intel_dp *intel_dp)
2170{
2171 if (!is_edp(intel_dp))
2172 return;
2173
2174 DRM_DEBUG_KMS("\n");
2175
2176 intel_panel_enable_backlight(intel_dp->attached_connector);
2177 _intel_edp_backlight_on(intel_dp);
2178}
2179
2180/* Disable backlight in the panel power control. */
2181static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2182{
30add22d 2183 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2184 struct drm_i915_private *dev_priv = dev->dev_private;
2185 u32 pp;
f0f59a00 2186 i915_reg_t pp_ctrl_reg;
32f9d658 2187
f01eca2e
KP
2188 if (!is_edp(intel_dp))
2189 return;
2190
773538e8 2191 pps_lock(intel_dp);
e39b999a 2192
453c5420 2193 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2194 pp &= ~EDP_BLC_ENABLE;
453c5420 2195
bf13e81b 2196 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2197
2198 I915_WRITE(pp_ctrl_reg, pp);
2199 POSTING_READ(pp_ctrl_reg);
f7d2323c 2200
773538e8 2201 pps_unlock(intel_dp);
e39b999a
VS
2202
2203 intel_dp->last_backlight_off = jiffies;
f7d2323c 2204 edp_wait_backlight_off(intel_dp);
1250d107 2205}
f7d2323c 2206
1250d107
JN
2207/* Disable backlight PP control and backlight PWM. */
2208void intel_edp_backlight_off(struct intel_dp *intel_dp)
2209{
2210 if (!is_edp(intel_dp))
2211 return;
2212
2213 DRM_DEBUG_KMS("\n");
f7d2323c 2214
1250d107 2215 _intel_edp_backlight_off(intel_dp);
f7d2323c 2216 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2217}
a4fc5ed6 2218
73580fb7
JN
2219/*
2220 * Hook for controlling the panel power control backlight through the bl_power
2221 * sysfs attribute. Take care to handle multiple calls.
2222 */
2223static void intel_edp_backlight_power(struct intel_connector *connector,
2224 bool enable)
2225{
2226 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2227 bool is_enabled;
2228
773538e8 2229 pps_lock(intel_dp);
e39b999a 2230 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2231 pps_unlock(intel_dp);
73580fb7
JN
2232
2233 if (is_enabled == enable)
2234 return;
2235
23ba9373
JN
2236 DRM_DEBUG_KMS("panel power control backlight %s\n",
2237 enable ? "enable" : "disable");
73580fb7
JN
2238
2239 if (enable)
2240 _intel_edp_backlight_on(intel_dp);
2241 else
2242 _intel_edp_backlight_off(intel_dp);
2243}
2244
64e1077a
VS
2245static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2246{
2247 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2248 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2249 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2250
2251 I915_STATE_WARN(cur_state != state,
2252 "DP port %c state assertion failure (expected %s, current %s)\n",
2253 port_name(dig_port->port),
87ad3212 2254 onoff(state), onoff(cur_state));
64e1077a
VS
2255}
2256#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2257
2258static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2259{
2260 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2261
2262 I915_STATE_WARN(cur_state != state,
2263 "eDP PLL state assertion failure (expected %s, current %s)\n",
87ad3212 2264 onoff(state), onoff(cur_state));
64e1077a
VS
2265}
2266#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2267#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2268
2bd2ad64 2269static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2270{
da63a9f2 2271 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
64e1077a
VS
2272 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2273 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2274
64e1077a
VS
2275 assert_pipe_disabled(dev_priv, crtc->pipe);
2276 assert_dp_port_disabled(intel_dp);
2277 assert_edp_pll_disabled(dev_priv);
2bd2ad64 2278
abfce949
VS
2279 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2280 crtc->config->port_clock);
2281
2282 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2283
2284 if (crtc->config->port_clock == 162000)
2285 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2286 else
2287 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2288
2289 I915_WRITE(DP_A, intel_dp->DP);
2290 POSTING_READ(DP_A);
2291 udelay(500);
2292
0767935e 2293 intel_dp->DP |= DP_PLL_ENABLE;
6fec7662 2294
0767935e 2295 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2296 POSTING_READ(DP_A);
2297 udelay(200);
d240f20f
JB
2298}
2299
2bd2ad64 2300static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2301{
da63a9f2 2302 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
64e1077a
VS
2303 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2304 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2305
64e1077a
VS
2306 assert_pipe_disabled(dev_priv, crtc->pipe);
2307 assert_dp_port_disabled(intel_dp);
2308 assert_edp_pll_enabled(dev_priv);
2bd2ad64 2309
abfce949
VS
2310 DRM_DEBUG_KMS("disabling eDP PLL\n");
2311
6fec7662 2312 intel_dp->DP &= ~DP_PLL_ENABLE;
0767935e 2313
6fec7662 2314 I915_WRITE(DP_A, intel_dp->DP);
1af5fa1b 2315 POSTING_READ(DP_A);
d240f20f
JB
2316 udelay(200);
2317}
2318
c7ad3810 2319/* If the sink supports it, try to set the power state appropriately */
c19b0669 2320void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2321{
2322 int ret, i;
2323
2324 /* Should have a valid DPCD by this point */
2325 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2326 return;
2327
2328 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2329 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2330 DP_SET_POWER_D3);
c7ad3810
JB
2331 } else {
2332 /*
2333 * When turning on, we need to retry for 1ms to give the sink
2334 * time to wake up.
2335 */
2336 for (i = 0; i < 3; i++) {
9d1a1031
JN
2337 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2338 DP_SET_POWER_D0);
c7ad3810
JB
2339 if (ret == 1)
2340 break;
2341 msleep(1);
2342 }
2343 }
f9cac721
JN
2344
2345 if (ret != 1)
2346 DRM_DEBUG_KMS("failed to %s sink power state\n",
2347 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2348}
2349
19d8fe15
DV
2350static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2351 enum pipe *pipe)
d240f20f 2352{
19d8fe15 2353 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2354 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2355 struct drm_device *dev = encoder->base.dev;
2356 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2357 enum intel_display_power_domain power_domain;
2358 u32 tmp;
6fa9a5ec 2359 bool ret;
6d129bea
ID
2360
2361 power_domain = intel_display_port_power_domain(encoder);
6fa9a5ec 2362 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
6d129bea
ID
2363 return false;
2364
6fa9a5ec
ID
2365 ret = false;
2366
6d129bea 2367 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2368
2369 if (!(tmp & DP_PORT_EN))
6fa9a5ec 2370 goto out;
19d8fe15 2371
39e5fa88 2372 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2373 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2374 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2375 enum pipe p;
19d8fe15 2376
adc289d7
VS
2377 for_each_pipe(dev_priv, p) {
2378 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2379 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2380 *pipe = p;
6fa9a5ec
ID
2381 ret = true;
2382
2383 goto out;
19d8fe15
DV
2384 }
2385 }
19d8fe15 2386
4a0833ec 2387 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
f0f59a00 2388 i915_mmio_reg_offset(intel_dp->output_reg));
39e5fa88
VS
2389 } else if (IS_CHERRYVIEW(dev)) {
2390 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2391 } else {
2392 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2393 }
d240f20f 2394
6fa9a5ec
ID
2395 ret = true;
2396
2397out:
2398 intel_display_power_put(dev_priv, power_domain);
2399
2400 return ret;
19d8fe15 2401}
d240f20f 2402
045ac3b5 2403static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2404 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2405{
2406 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2407 u32 tmp, flags = 0;
63000ef6
XZ
2408 struct drm_device *dev = encoder->base.dev;
2409 struct drm_i915_private *dev_priv = dev->dev_private;
2410 enum port port = dp_to_dig_port(intel_dp)->port;
2411 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2412 int dotclock;
045ac3b5 2413
9ed109a7 2414 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2415
2416 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2417
39e5fa88 2418 if (HAS_PCH_CPT(dev) && port != PORT_A) {
b81e34c2
VS
2419 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2420
2421 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2422 flags |= DRM_MODE_FLAG_PHSYNC;
2423 else
2424 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2425
b81e34c2 2426 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2427 flags |= DRM_MODE_FLAG_PVSYNC;
2428 else
2429 flags |= DRM_MODE_FLAG_NVSYNC;
2430 } else {
39e5fa88 2431 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2432 flags |= DRM_MODE_FLAG_PHSYNC;
2433 else
2434 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2435
39e5fa88 2436 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2437 flags |= DRM_MODE_FLAG_PVSYNC;
2438 else
2439 flags |= DRM_MODE_FLAG_NVSYNC;
2440 }
045ac3b5 2441
2d112de7 2442 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2443
8c875fca 2444 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
666a4537 2445 !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
8c875fca
VS
2446 pipe_config->limited_color_range = true;
2447
eb14cb74
VS
2448 pipe_config->has_dp_encoder = true;
2449
90a6b7b0
VS
2450 pipe_config->lane_count =
2451 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2452
eb14cb74
VS
2453 intel_dp_get_m_n(crtc, pipe_config);
2454
18442d08 2455 if (port == PORT_A) {
b377e0df 2456 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
f1f644dc
JB
2457 pipe_config->port_clock = 162000;
2458 else
2459 pipe_config->port_clock = 270000;
2460 }
18442d08
VS
2461
2462 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2463 &pipe_config->dp_m_n);
2464
2465 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2466 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2467
2d112de7 2468 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2469
c6cd2ee2
JN
2470 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2471 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2472 /*
2473 * This is a big fat ugly hack.
2474 *
2475 * Some machines in UEFI boot mode provide us a VBT that has 18
2476 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2477 * unknown we fail to light up. Yet the same BIOS boots up with
2478 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2479 * max, not what it tells us to use.
2480 *
2481 * Note: This will still be broken if the eDP panel is not lit
2482 * up by the BIOS, and thus we can't get the mode at module
2483 * load.
2484 */
2485 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2486 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2487 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2488 }
045ac3b5
JB
2489}
2490
e8cb4558 2491static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2492{
e8cb4558 2493 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2494 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2495 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2496
6e3c9717 2497 if (crtc->config->has_audio)
495a5bb8 2498 intel_audio_codec_disable(encoder);
6cb49835 2499
b32c6f48
RV
2500 if (HAS_PSR(dev) && !HAS_DDI(dev))
2501 intel_psr_disable(intel_dp);
2502
6cb49835
DV
2503 /* Make sure the panel is off before trying to change the mode. But also
2504 * ensure that we have vdd while we switch off the panel. */
24f3e092 2505 intel_edp_panel_vdd_on(intel_dp);
4be73780 2506 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2507 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2508 intel_edp_panel_off(intel_dp);
3739850b 2509
08aff3fe
VS
2510 /* disable the port before the pipe on g4x */
2511 if (INTEL_INFO(dev)->gen < 5)
3739850b 2512 intel_dp_link_down(intel_dp);
d240f20f
JB
2513}
2514
08aff3fe 2515static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2516{
2bd2ad64 2517 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2518 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2519
49277c31 2520 intel_dp_link_down(intel_dp);
abfce949
VS
2521
2522 /* Only ilk+ has port A */
08aff3fe
VS
2523 if (port == PORT_A)
2524 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2525}
2526
2527static void vlv_post_disable_dp(struct intel_encoder *encoder)
2528{
2529 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2530
2531 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2532}
2533
a8f327fb
VS
2534static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2535 bool reset)
580d3811 2536{
a8f327fb
VS
2537 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2538 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2539 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2540 enum pipe pipe = crtc->pipe;
2541 uint32_t val;
580d3811 2542
a8f327fb
VS
2543 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2544 if (reset)
2545 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2546 else
2547 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2548 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
580d3811 2549
a8f327fb
VS
2550 if (crtc->config->lane_count > 2) {
2551 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2552 if (reset)
2553 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2554 else
2555 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2556 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2557 }
580d3811 2558
97fd4d5c 2559 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2560 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2561 if (reset)
2562 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2563 else
2564 val |= DPIO_PCS_CLK_SOFT_RESET;
97fd4d5c 2565 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2566
a8f327fb 2567 if (crtc->config->lane_count > 2) {
e0fce78f
VS
2568 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2569 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2570 if (reset)
2571 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2572 else
2573 val |= DPIO_PCS_CLK_SOFT_RESET;
e0fce78f
VS
2574 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2575 }
a8f327fb 2576}
97fd4d5c 2577
a8f327fb
VS
2578static void chv_post_disable_dp(struct intel_encoder *encoder)
2579{
2580 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2581 struct drm_device *dev = encoder->base.dev;
2582 struct drm_i915_private *dev_priv = dev->dev_private;
97fd4d5c 2583
a8f327fb
VS
2584 intel_dp_link_down(intel_dp);
2585
2586 mutex_lock(&dev_priv->sb_lock);
2587
2588 /* Assert data lane reset */
2589 chv_data_lane_soft_reset(encoder, true);
580d3811 2590
a580516d 2591 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
2592}
2593
7b13b58a
VS
2594static void
2595_intel_dp_set_link_train(struct intel_dp *intel_dp,
2596 uint32_t *DP,
2597 uint8_t dp_train_pat)
2598{
2599 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2600 struct drm_device *dev = intel_dig_port->base.base.dev;
2601 struct drm_i915_private *dev_priv = dev->dev_private;
2602 enum port port = intel_dig_port->port;
2603
2604 if (HAS_DDI(dev)) {
2605 uint32_t temp = I915_READ(DP_TP_CTL(port));
2606
2607 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2608 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2609 else
2610 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2611
2612 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2613 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2614 case DP_TRAINING_PATTERN_DISABLE:
2615 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2616
2617 break;
2618 case DP_TRAINING_PATTERN_1:
2619 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2620 break;
2621 case DP_TRAINING_PATTERN_2:
2622 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2623 break;
2624 case DP_TRAINING_PATTERN_3:
2625 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2626 break;
2627 }
2628 I915_WRITE(DP_TP_CTL(port), temp);
2629
39e5fa88
VS
2630 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2631 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2632 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2633
2634 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2635 case DP_TRAINING_PATTERN_DISABLE:
2636 *DP |= DP_LINK_TRAIN_OFF_CPT;
2637 break;
2638 case DP_TRAINING_PATTERN_1:
2639 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2640 break;
2641 case DP_TRAINING_PATTERN_2:
2642 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2643 break;
2644 case DP_TRAINING_PATTERN_3:
2645 DRM_ERROR("DP training pattern 3 not supported\n");
2646 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2647 break;
2648 }
2649
2650 } else {
2651 if (IS_CHERRYVIEW(dev))
2652 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2653 else
2654 *DP &= ~DP_LINK_TRAIN_MASK;
2655
2656 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2657 case DP_TRAINING_PATTERN_DISABLE:
2658 *DP |= DP_LINK_TRAIN_OFF;
2659 break;
2660 case DP_TRAINING_PATTERN_1:
2661 *DP |= DP_LINK_TRAIN_PAT_1;
2662 break;
2663 case DP_TRAINING_PATTERN_2:
2664 *DP |= DP_LINK_TRAIN_PAT_2;
2665 break;
2666 case DP_TRAINING_PATTERN_3:
2667 if (IS_CHERRYVIEW(dev)) {
2668 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2669 } else {
2670 DRM_ERROR("DP training pattern 3 not supported\n");
2671 *DP |= DP_LINK_TRAIN_PAT_2;
2672 }
2673 break;
2674 }
2675 }
2676}
2677
2678static void intel_dp_enable_port(struct intel_dp *intel_dp)
2679{
2680 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2681 struct drm_i915_private *dev_priv = dev->dev_private;
6fec7662
VS
2682 struct intel_crtc *crtc =
2683 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
7b13b58a 2684
7b13b58a
VS
2685 /* enable with pattern 1 (as per spec) */
2686 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2687 DP_TRAINING_PATTERN_1);
2688
2689 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2690 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2691
2692 /*
2693 * Magic for VLV/CHV. We _must_ first set up the register
2694 * without actually enabling the port, and then do another
2695 * write to enable the port. Otherwise link training will
2696 * fail when the power sequencer is freshly used for this port.
2697 */
2698 intel_dp->DP |= DP_PORT_EN;
6fec7662
VS
2699 if (crtc->config->has_audio)
2700 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
7b713f50
VS
2701
2702 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2703 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2704}
2705
e8cb4558 2706static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2707{
e8cb4558
DV
2708 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2709 struct drm_device *dev = encoder->base.dev;
2710 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2711 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2712 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
d6fbdd15
VS
2713 enum port port = dp_to_dig_port(intel_dp)->port;
2714 enum pipe pipe = crtc->pipe;
5d613501 2715
0c33d8d7
DV
2716 if (WARN_ON(dp_reg & DP_PORT_EN))
2717 return;
5d613501 2718
093e3f13
VS
2719 pps_lock(intel_dp);
2720
666a4537 2721 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
093e3f13
VS
2722 vlv_init_panel_power_sequencer(intel_dp);
2723
7864578a
VS
2724 /*
2725 * We get an occasional spurious underrun between the port
2726 * enable and vdd enable, when enabling port A eDP.
2727 *
2728 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2729 */
2730 if (port == PORT_A)
2731 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2732
7b13b58a 2733 intel_dp_enable_port(intel_dp);
093e3f13 2734
d6fbdd15
VS
2735 if (port == PORT_A && IS_GEN5(dev_priv)) {
2736 /*
2737 * Underrun reporting for the other pipe was disabled in
2738 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2739 * enabled, so it's now safe to re-enable underrun reporting.
2740 */
2741 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2742 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2743 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2744 }
2745
093e3f13
VS
2746 edp_panel_vdd_on(intel_dp);
2747 edp_panel_on(intel_dp);
2748 edp_panel_vdd_off(intel_dp, true);
2749
7864578a
VS
2750 if (port == PORT_A)
2751 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2752
093e3f13
VS
2753 pps_unlock(intel_dp);
2754
666a4537 2755 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e0fce78f
VS
2756 unsigned int lane_mask = 0x0;
2757
2758 if (IS_CHERRYVIEW(dev))
2759 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2760
9b6de0a1
VS
2761 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2762 lane_mask);
e0fce78f 2763 }
61234fa5 2764
f01eca2e 2765 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2766 intel_dp_start_link_train(intel_dp);
3ab9c637 2767 intel_dp_stop_link_train(intel_dp);
c1dec79a 2768
6e3c9717 2769 if (crtc->config->has_audio) {
c1dec79a 2770 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
d6fbdd15 2771 pipe_name(pipe));
c1dec79a
JN
2772 intel_audio_codec_enable(encoder);
2773 }
ab1f90f9 2774}
89b667f8 2775
ecff4f3b
JN
2776static void g4x_enable_dp(struct intel_encoder *encoder)
2777{
828f5c6e
JN
2778 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2779
ecff4f3b 2780 intel_enable_dp(encoder);
4be73780 2781 intel_edp_backlight_on(intel_dp);
ab1f90f9 2782}
89b667f8 2783
ab1f90f9
JN
2784static void vlv_enable_dp(struct intel_encoder *encoder)
2785{
828f5c6e
JN
2786 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2787
4be73780 2788 intel_edp_backlight_on(intel_dp);
b32c6f48 2789 intel_psr_enable(intel_dp);
d240f20f
JB
2790}
2791
ecff4f3b 2792static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9 2793{
d6fbdd15 2794 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
ab1f90f9 2795 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
d6fbdd15
VS
2796 enum port port = dp_to_dig_port(intel_dp)->port;
2797 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
ab1f90f9 2798
8ac33ed3
DV
2799 intel_dp_prepare(encoder);
2800
d6fbdd15
VS
2801 if (port == PORT_A && IS_GEN5(dev_priv)) {
2802 /*
2803 * We get FIFO underruns on the other pipe when
2804 * enabling the CPU eDP PLL, and when enabling CPU
2805 * eDP port. We could potentially avoid the PLL
2806 * underrun with a vblank wait just prior to enabling
2807 * the PLL, but that doesn't appear to help the port
2808 * enable case. Just sweep it all under the rug.
2809 */
2810 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2811 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2812 }
2813
d41f1efb 2814 /* Only ilk+ has port A */
abfce949 2815 if (port == PORT_A)
ab1f90f9
JN
2816 ironlake_edp_pll_on(intel_dp);
2817}
2818
83b84597
VS
2819static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2820{
2821 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2822 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2823 enum pipe pipe = intel_dp->pps_pipe;
f0f59a00 2824 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
83b84597
VS
2825
2826 edp_panel_vdd_off_sync(intel_dp);
2827
2828 /*
2829 * VLV seems to get confused when multiple power seqeuencers
2830 * have the same port selected (even if only one has power/vdd
2831 * enabled). The failure manifests as vlv_wait_port_ready() failing
2832 * CHV on the other hand doesn't seem to mind having the same port
2833 * selected in multiple power seqeuencers, but let's clear the
2834 * port select always when logically disconnecting a power sequencer
2835 * from a port.
2836 */
2837 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2838 pipe_name(pipe), port_name(intel_dig_port->port));
2839 I915_WRITE(pp_on_reg, 0);
2840 POSTING_READ(pp_on_reg);
2841
2842 intel_dp->pps_pipe = INVALID_PIPE;
2843}
2844
a4a5d2f8
VS
2845static void vlv_steal_power_sequencer(struct drm_device *dev,
2846 enum pipe pipe)
2847{
2848 struct drm_i915_private *dev_priv = dev->dev_private;
2849 struct intel_encoder *encoder;
2850
2851 lockdep_assert_held(&dev_priv->pps_mutex);
2852
ac3c12e4
VS
2853 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2854 return;
2855
19c8054c 2856 for_each_intel_encoder(dev, encoder) {
a4a5d2f8 2857 struct intel_dp *intel_dp;
773538e8 2858 enum port port;
a4a5d2f8
VS
2859
2860 if (encoder->type != INTEL_OUTPUT_EDP)
2861 continue;
2862
2863 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2864 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2865
2866 if (intel_dp->pps_pipe != pipe)
2867 continue;
2868
2869 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2870 pipe_name(pipe), port_name(port));
a4a5d2f8 2871
e02f9a06 2872 WARN(encoder->base.crtc,
034e43c6
VS
2873 "stealing pipe %c power sequencer from active eDP port %c\n",
2874 pipe_name(pipe), port_name(port));
a4a5d2f8 2875
a4a5d2f8 2876 /* make sure vdd is off before we steal it */
83b84597 2877 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2878 }
2879}
2880
2881static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2882{
2883 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2884 struct intel_encoder *encoder = &intel_dig_port->base;
2885 struct drm_device *dev = encoder->base.dev;
2886 struct drm_i915_private *dev_priv = dev->dev_private;
2887 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2888
2889 lockdep_assert_held(&dev_priv->pps_mutex);
2890
093e3f13
VS
2891 if (!is_edp(intel_dp))
2892 return;
2893
a4a5d2f8
VS
2894 if (intel_dp->pps_pipe == crtc->pipe)
2895 return;
2896
2897 /*
2898 * If another power sequencer was being used on this
2899 * port previously make sure to turn off vdd there while
2900 * we still have control of it.
2901 */
2902 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2903 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2904
2905 /*
2906 * We may be stealing the power
2907 * sequencer from another port.
2908 */
2909 vlv_steal_power_sequencer(dev, crtc->pipe);
2910
2911 /* now it's all ours */
2912 intel_dp->pps_pipe = crtc->pipe;
2913
2914 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2915 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2916
2917 /* init power sequencer on this pipe and port */
36b5f425
VS
2918 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2919 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2920}
2921
ab1f90f9 2922static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2923{
2bd2ad64 2924 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2925 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2926 struct drm_device *dev = encoder->base.dev;
89b667f8 2927 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2928 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2929 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2930 int pipe = intel_crtc->pipe;
2931 u32 val;
a4fc5ed6 2932
a580516d 2933 mutex_lock(&dev_priv->sb_lock);
89b667f8 2934
ab3c759a 2935 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2936 val = 0;
2937 if (pipe)
2938 val |= (1<<21);
2939 else
2940 val &= ~(1<<21);
2941 val |= 0x001000c4;
ab3c759a
CML
2942 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2943 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2944 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2945
a580516d 2946 mutex_unlock(&dev_priv->sb_lock);
ab1f90f9
JN
2947
2948 intel_enable_dp(encoder);
89b667f8
JB
2949}
2950
ecff4f3b 2951static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2952{
2953 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2954 struct drm_device *dev = encoder->base.dev;
2955 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2956 struct intel_crtc *intel_crtc =
2957 to_intel_crtc(encoder->base.crtc);
e4607fcf 2958 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2959 int pipe = intel_crtc->pipe;
89b667f8 2960
8ac33ed3
DV
2961 intel_dp_prepare(encoder);
2962
89b667f8 2963 /* Program Tx lane resets to default */
a580516d 2964 mutex_lock(&dev_priv->sb_lock);
ab3c759a 2965 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2966 DPIO_PCS_TX_LANE2_RESET |
2967 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2968 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2969 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2970 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2971 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2972 DPIO_PCS_CLK_SOFT_RESET);
2973
2974 /* Fix up inter-pair skew failure */
ab3c759a
CML
2975 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2976 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2977 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
a580516d 2978 mutex_unlock(&dev_priv->sb_lock);
a4fc5ed6
KP
2979}
2980
e4a1d846
CML
2981static void chv_pre_enable_dp(struct intel_encoder *encoder)
2982{
2983 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2984 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2985 struct drm_device *dev = encoder->base.dev;
2986 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2987 struct intel_crtc *intel_crtc =
2988 to_intel_crtc(encoder->base.crtc);
2989 enum dpio_channel ch = vlv_dport_to_channel(dport);
2990 int pipe = intel_crtc->pipe;
2e523e98 2991 int data, i, stagger;
949c1d43 2992 u32 val;
e4a1d846 2993
a580516d 2994 mutex_lock(&dev_priv->sb_lock);
949c1d43 2995
570e2a74
VS
2996 /* allow hardware to manage TX FIFO reset source */
2997 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2998 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2999 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3000
e0fce78f
VS
3001 if (intel_crtc->config->lane_count > 2) {
3002 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3003 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3004 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3005 }
570e2a74 3006
949c1d43 3007 /* Program Tx lane latency optimal setting*/
e0fce78f 3008 for (i = 0; i < intel_crtc->config->lane_count; i++) {
e4a1d846 3009 /* Set the upar bit */
e0fce78f
VS
3010 if (intel_crtc->config->lane_count == 1)
3011 data = 0x0;
3012 else
3013 data = (i == 1) ? 0x0 : 0x1;
e4a1d846
CML
3014 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
3015 data << DPIO_UPAR_SHIFT);
3016 }
3017
3018 /* Data lane stagger programming */
2e523e98
VS
3019 if (intel_crtc->config->port_clock > 270000)
3020 stagger = 0x18;
3021 else if (intel_crtc->config->port_clock > 135000)
3022 stagger = 0xd;
3023 else if (intel_crtc->config->port_clock > 67500)
3024 stagger = 0x7;
3025 else if (intel_crtc->config->port_clock > 33750)
3026 stagger = 0x4;
3027 else
3028 stagger = 0x2;
3029
3030 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3031 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3032 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3033
e0fce78f
VS
3034 if (intel_crtc->config->lane_count > 2) {
3035 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3036 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3037 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3038 }
2e523e98
VS
3039
3040 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3041 DPIO_LANESTAGGER_STRAP(stagger) |
3042 DPIO_LANESTAGGER_STRAP_OVRD |
3043 DPIO_TX1_STAGGER_MASK(0x1f) |
3044 DPIO_TX1_STAGGER_MULT(6) |
3045 DPIO_TX2_STAGGER_MULT(0));
3046
e0fce78f
VS
3047 if (intel_crtc->config->lane_count > 2) {
3048 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3049 DPIO_LANESTAGGER_STRAP(stagger) |
3050 DPIO_LANESTAGGER_STRAP_OVRD |
3051 DPIO_TX1_STAGGER_MASK(0x1f) |
3052 DPIO_TX1_STAGGER_MULT(7) |
3053 DPIO_TX2_STAGGER_MULT(5));
3054 }
e4a1d846 3055
a8f327fb
VS
3056 /* Deassert data lane reset */
3057 chv_data_lane_soft_reset(encoder, false);
3058
a580516d 3059 mutex_unlock(&dev_priv->sb_lock);
e4a1d846 3060
e4a1d846 3061 intel_enable_dp(encoder);
b0b33846
VS
3062
3063 /* Second common lane will stay alive on its own now */
3064 if (dport->release_cl2_override) {
3065 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3066 dport->release_cl2_override = false;
3067 }
e4a1d846
CML
3068}
3069
9197c88b
VS
3070static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3071{
3072 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3073 struct drm_device *dev = encoder->base.dev;
3074 struct drm_i915_private *dev_priv = dev->dev_private;
3075 struct intel_crtc *intel_crtc =
3076 to_intel_crtc(encoder->base.crtc);
3077 enum dpio_channel ch = vlv_dport_to_channel(dport);
3078 enum pipe pipe = intel_crtc->pipe;
e0fce78f
VS
3079 unsigned int lane_mask =
3080 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
9197c88b
VS
3081 u32 val;
3082
625695f8
VS
3083 intel_dp_prepare(encoder);
3084
b0b33846
VS
3085 /*
3086 * Must trick the second common lane into life.
3087 * Otherwise we can't even access the PLL.
3088 */
3089 if (ch == DPIO_CH0 && pipe == PIPE_B)
3090 dport->release_cl2_override =
3091 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3092
e0fce78f
VS
3093 chv_phy_powergate_lanes(encoder, true, lane_mask);
3094
a580516d 3095 mutex_lock(&dev_priv->sb_lock);
9197c88b 3096
a8f327fb
VS
3097 /* Assert data lane reset */
3098 chv_data_lane_soft_reset(encoder, true);
3099
b9e5ac3c
VS
3100 /* program left/right clock distribution */
3101 if (pipe != PIPE_B) {
3102 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3103 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3104 if (ch == DPIO_CH0)
3105 val |= CHV_BUFLEFTENA1_FORCE;
3106 if (ch == DPIO_CH1)
3107 val |= CHV_BUFRIGHTENA1_FORCE;
3108 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3109 } else {
3110 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3111 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3112 if (ch == DPIO_CH0)
3113 val |= CHV_BUFLEFTENA2_FORCE;
3114 if (ch == DPIO_CH1)
3115 val |= CHV_BUFRIGHTENA2_FORCE;
3116 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3117 }
3118
9197c88b
VS
3119 /* program clock channel usage */
3120 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3121 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3122 if (pipe != PIPE_B)
3123 val &= ~CHV_PCS_USEDCLKCHANNEL;
3124 else
3125 val |= CHV_PCS_USEDCLKCHANNEL;
3126 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3127
e0fce78f
VS
3128 if (intel_crtc->config->lane_count > 2) {
3129 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3130 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3131 if (pipe != PIPE_B)
3132 val &= ~CHV_PCS_USEDCLKCHANNEL;
3133 else
3134 val |= CHV_PCS_USEDCLKCHANNEL;
3135 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3136 }
9197c88b
VS
3137
3138 /*
3139 * This a a bit weird since generally CL
3140 * matches the pipe, but here we need to
3141 * pick the CL based on the port.
3142 */
3143 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3144 if (pipe != PIPE_B)
3145 val &= ~CHV_CMN_USEDCLKCHANNEL;
3146 else
3147 val |= CHV_CMN_USEDCLKCHANNEL;
3148 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3149
a580516d 3150 mutex_unlock(&dev_priv->sb_lock);
9197c88b
VS
3151}
3152
d6db995f
VS
3153static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3154{
3155 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3156 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3157 u32 val;
3158
3159 mutex_lock(&dev_priv->sb_lock);
3160
3161 /* disable left/right clock distribution */
3162 if (pipe != PIPE_B) {
3163 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3164 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3165 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3166 } else {
3167 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3168 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3169 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3170 }
3171
3172 mutex_unlock(&dev_priv->sb_lock);
e0fce78f 3173
b0b33846
VS
3174 /*
3175 * Leave the power down bit cleared for at least one
3176 * lane so that chv_powergate_phy_ch() will power
3177 * on something when the channel is otherwise unused.
3178 * When the port is off and the override is removed
3179 * the lanes power down anyway, so otherwise it doesn't
3180 * really matter what the state of power down bits is
3181 * after this.
3182 */
e0fce78f 3183 chv_phy_powergate_lanes(encoder, false, 0x0);
d6db995f
VS
3184}
3185
a4fc5ed6 3186/*
df0c237d
JB
3187 * Native read with retry for link status and receiver capability reads for
3188 * cases where the sink may still be asleep.
9d1a1031
JN
3189 *
3190 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3191 * supposed to retry 3 times per the spec.
a4fc5ed6 3192 */
9d1a1031
JN
3193static ssize_t
3194intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3195 void *buffer, size_t size)
a4fc5ed6 3196{
9d1a1031
JN
3197 ssize_t ret;
3198 int i;
61da5fab 3199
f6a19066
VS
3200 /*
3201 * Sometime we just get the same incorrect byte repeated
3202 * over the entire buffer. Doing just one throw away read
3203 * initially seems to "solve" it.
3204 */
3205 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3206
61da5fab 3207 for (i = 0; i < 3; i++) {
9d1a1031
JN
3208 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3209 if (ret == size)
3210 return ret;
61da5fab
JB
3211 msleep(1);
3212 }
a4fc5ed6 3213
9d1a1031 3214 return ret;
a4fc5ed6
KP
3215}
3216
3217/*
3218 * Fetch AUX CH registers 0x202 - 0x207 which contain
3219 * link status information
3220 */
94223d04 3221bool
93f62dad 3222intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 3223{
9d1a1031
JN
3224 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3225 DP_LANE0_1_STATUS,
3226 link_status,
3227 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
3228}
3229
1100244e 3230/* These are source-specific values. */
94223d04 3231uint8_t
1a2eb460 3232intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 3233{
30add22d 3234 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 3235 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 3236 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3237
9314726b
VK
3238 if (IS_BROXTON(dev))
3239 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3240 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 3241 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 3242 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 3243 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
666a4537 3244 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
bd60018a 3245 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 3246 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 3247 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 3248 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 3249 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 3250 else
bd60018a 3251 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
3252}
3253
94223d04 3254uint8_t
1a2eb460
KP
3255intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3256{
30add22d 3257 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 3258 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3259
5a9d1f1a
DL
3260 if (INTEL_INFO(dev)->gen >= 9) {
3261 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3262 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3263 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3264 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3265 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3266 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3267 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
3268 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3269 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
3270 default:
3271 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3272 }
3273 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 3274 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3276 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3277 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3278 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3279 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3280 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3281 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 3282 default:
bd60018a 3283 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 3284 }
666a4537 3285 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e2fa6fba 3286 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3287 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3288 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3289 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3290 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3291 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3292 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3293 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 3294 default:
bd60018a 3295 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 3296 }
bc7d38a4 3297 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 3298 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3299 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3300 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3301 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3303 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3304 default:
bd60018a 3305 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3306 }
3307 } else {
3308 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3309 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3310 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3311 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3312 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3313 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3314 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3315 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3316 default:
bd60018a 3317 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3318 }
a4fc5ed6
KP
3319 }
3320}
3321
5829975c 3322static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3323{
3324 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3325 struct drm_i915_private *dev_priv = dev->dev_private;
3326 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3327 struct intel_crtc *intel_crtc =
3328 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3329 unsigned long demph_reg_value, preemph_reg_value,
3330 uniqtranscale_reg_value;
3331 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3332 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3333 int pipe = intel_crtc->pipe;
e2fa6fba
P
3334
3335 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3336 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3337 preemph_reg_value = 0x0004000;
3338 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3339 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3340 demph_reg_value = 0x2B405555;
3341 uniqtranscale_reg_value = 0x552AB83A;
3342 break;
bd60018a 3343 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3344 demph_reg_value = 0x2B404040;
3345 uniqtranscale_reg_value = 0x5548B83A;
3346 break;
bd60018a 3347 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3348 demph_reg_value = 0x2B245555;
3349 uniqtranscale_reg_value = 0x5560B83A;
3350 break;
bd60018a 3351 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3352 demph_reg_value = 0x2B405555;
3353 uniqtranscale_reg_value = 0x5598DA3A;
3354 break;
3355 default:
3356 return 0;
3357 }
3358 break;
bd60018a 3359 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3360 preemph_reg_value = 0x0002000;
3361 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3362 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3363 demph_reg_value = 0x2B404040;
3364 uniqtranscale_reg_value = 0x5552B83A;
3365 break;
bd60018a 3366 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3367 demph_reg_value = 0x2B404848;
3368 uniqtranscale_reg_value = 0x5580B83A;
3369 break;
bd60018a 3370 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3371 demph_reg_value = 0x2B404040;
3372 uniqtranscale_reg_value = 0x55ADDA3A;
3373 break;
3374 default:
3375 return 0;
3376 }
3377 break;
bd60018a 3378 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3379 preemph_reg_value = 0x0000000;
3380 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3381 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3382 demph_reg_value = 0x2B305555;
3383 uniqtranscale_reg_value = 0x5570B83A;
3384 break;
bd60018a 3385 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3386 demph_reg_value = 0x2B2B4040;
3387 uniqtranscale_reg_value = 0x55ADDA3A;
3388 break;
3389 default:
3390 return 0;
3391 }
3392 break;
bd60018a 3393 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3394 preemph_reg_value = 0x0006000;
3395 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3396 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3397 demph_reg_value = 0x1B405555;
3398 uniqtranscale_reg_value = 0x55ADDA3A;
3399 break;
3400 default:
3401 return 0;
3402 }
3403 break;
3404 default:
3405 return 0;
3406 }
3407
a580516d 3408 mutex_lock(&dev_priv->sb_lock);
ab3c759a
CML
3409 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3410 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3411 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3412 uniqtranscale_reg_value);
ab3c759a
CML
3413 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3414 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3415 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3416 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
a580516d 3417 mutex_unlock(&dev_priv->sb_lock);
e2fa6fba
P
3418
3419 return 0;
3420}
3421
67fa24b4
VS
3422static bool chv_need_uniq_trans_scale(uint8_t train_set)
3423{
3424 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3425 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3426}
3427
5829975c 3428static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3429{
3430 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3431 struct drm_i915_private *dev_priv = dev->dev_private;
3432 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3433 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3434 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3435 uint8_t train_set = intel_dp->train_set[0];
3436 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3437 enum pipe pipe = intel_crtc->pipe;
3438 int i;
e4a1d846
CML
3439
3440 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3441 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3442 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3443 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3444 deemph_reg_value = 128;
3445 margin_reg_value = 52;
3446 break;
bd60018a 3447 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3448 deemph_reg_value = 128;
3449 margin_reg_value = 77;
3450 break;
bd60018a 3451 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3452 deemph_reg_value = 128;
3453 margin_reg_value = 102;
3454 break;
bd60018a 3455 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3456 deemph_reg_value = 128;
3457 margin_reg_value = 154;
3458 /* FIXME extra to set for 1200 */
3459 break;
3460 default:
3461 return 0;
3462 }
3463 break;
bd60018a 3464 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3465 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3466 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3467 deemph_reg_value = 85;
3468 margin_reg_value = 78;
3469 break;
bd60018a 3470 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3471 deemph_reg_value = 85;
3472 margin_reg_value = 116;
3473 break;
bd60018a 3474 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3475 deemph_reg_value = 85;
3476 margin_reg_value = 154;
3477 break;
3478 default:
3479 return 0;
3480 }
3481 break;
bd60018a 3482 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3483 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3484 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3485 deemph_reg_value = 64;
3486 margin_reg_value = 104;
3487 break;
bd60018a 3488 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3489 deemph_reg_value = 64;
3490 margin_reg_value = 154;
3491 break;
3492 default:
3493 return 0;
3494 }
3495 break;
bd60018a 3496 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3497 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3498 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3499 deemph_reg_value = 43;
3500 margin_reg_value = 154;
3501 break;
3502 default:
3503 return 0;
3504 }
3505 break;
3506 default:
3507 return 0;
3508 }
3509
a580516d 3510 mutex_lock(&dev_priv->sb_lock);
e4a1d846
CML
3511
3512 /* Clear calc init */
1966e59e
VS
3513 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3514 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3515 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3516 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3517 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3518
e0fce78f
VS
3519 if (intel_crtc->config->lane_count > 2) {
3520 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3521 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3522 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3523 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3524 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3525 }
e4a1d846 3526
a02ef3c7
VS
3527 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3528 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3529 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3530 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3531
e0fce78f
VS
3532 if (intel_crtc->config->lane_count > 2) {
3533 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3534 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3535 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3536 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3537 }
a02ef3c7 3538
e4a1d846 3539 /* Program swing deemph */
e0fce78f 3540 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db
VS
3541 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3542 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3543 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3544 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3545 }
e4a1d846
CML
3546
3547 /* Program swing margin */
e0fce78f 3548 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3549 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
67fa24b4 3550
1fb44505
VS
3551 val &= ~DPIO_SWING_MARGIN000_MASK;
3552 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
67fa24b4
VS
3553
3554 /*
3555 * Supposedly this value shouldn't matter when unique transition
3556 * scale is disabled, but in fact it does matter. Let's just
3557 * always program the same value and hope it's OK.
3558 */
3559 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3560 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3561
f72df8db
VS
3562 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3563 }
e4a1d846 3564
67fa24b4
VS
3565 /*
3566 * The document said it needs to set bit 27 for ch0 and bit 26
3567 * for ch1. Might be a typo in the doc.
3568 * For now, for this unique transition scale selection, set bit
3569 * 27 for ch0 and ch1.
3570 */
e0fce78f 3571 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3572 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
67fa24b4 3573 if (chv_need_uniq_trans_scale(train_set))
f72df8db 3574 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
67fa24b4
VS
3575 else
3576 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3577 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
e4a1d846
CML
3578 }
3579
3580 /* Start swing calculation */
1966e59e
VS
3581 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3582 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3583 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3584
e0fce78f
VS
3585 if (intel_crtc->config->lane_count > 2) {
3586 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3587 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3588 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3589 }
e4a1d846 3590
a580516d 3591 mutex_unlock(&dev_priv->sb_lock);
e4a1d846
CML
3592
3593 return 0;
3594}
3595
a4fc5ed6 3596static uint32_t
5829975c 3597gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3598{
3cf2efb1 3599 uint32_t signal_levels = 0;
a4fc5ed6 3600
3cf2efb1 3601 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3602 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3603 default:
3604 signal_levels |= DP_VOLTAGE_0_4;
3605 break;
bd60018a 3606 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3607 signal_levels |= DP_VOLTAGE_0_6;
3608 break;
bd60018a 3609 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3610 signal_levels |= DP_VOLTAGE_0_8;
3611 break;
bd60018a 3612 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3613 signal_levels |= DP_VOLTAGE_1_2;
3614 break;
3615 }
3cf2efb1 3616 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3617 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3618 default:
3619 signal_levels |= DP_PRE_EMPHASIS_0;
3620 break;
bd60018a 3621 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3622 signal_levels |= DP_PRE_EMPHASIS_3_5;
3623 break;
bd60018a 3624 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3625 signal_levels |= DP_PRE_EMPHASIS_6;
3626 break;
bd60018a 3627 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3628 signal_levels |= DP_PRE_EMPHASIS_9_5;
3629 break;
3630 }
3631 return signal_levels;
3632}
3633
e3421a18
ZW
3634/* Gen6's DP voltage swing and pre-emphasis control */
3635static uint32_t
5829975c 3636gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3637{
3c5a62b5
YL
3638 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3639 DP_TRAIN_PRE_EMPHASIS_MASK);
3640 switch (signal_levels) {
bd60018a
SJ
3641 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3642 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3643 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3644 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3645 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3646 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3647 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3648 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3649 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3650 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3651 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3652 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3653 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3654 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3655 default:
3c5a62b5
YL
3656 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3657 "0x%x\n", signal_levels);
3658 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3659 }
3660}
3661
1a2eb460
KP
3662/* Gen7's DP voltage swing and pre-emphasis control */
3663static uint32_t
5829975c 3664gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3665{
3666 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3667 DP_TRAIN_PRE_EMPHASIS_MASK);
3668 switch (signal_levels) {
bd60018a 3669 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3670 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3671 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3672 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3673 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3674 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3675
bd60018a 3676 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3677 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3678 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3679 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3680
bd60018a 3681 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3682 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3683 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3684 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3685
3686 default:
3687 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3688 "0x%x\n", signal_levels);
3689 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3690 }
3691}
3692
94223d04 3693void
f4eb692e 3694intel_dp_set_signal_levels(struct intel_dp *intel_dp)
f0a3424e
PZ
3695{
3696 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3697 enum port port = intel_dig_port->port;
f0a3424e 3698 struct drm_device *dev = intel_dig_port->base.base.dev;
b905a915 3699 struct drm_i915_private *dev_priv = to_i915(dev);
f8896f5d 3700 uint32_t signal_levels, mask = 0;
f0a3424e
PZ
3701 uint8_t train_set = intel_dp->train_set[0];
3702
f8896f5d
DW
3703 if (HAS_DDI(dev)) {
3704 signal_levels = ddi_signal_levels(intel_dp);
3705
3706 if (IS_BROXTON(dev))
3707 signal_levels = 0;
3708 else
3709 mask = DDI_BUF_EMP_MASK;
e4a1d846 3710 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3711 signal_levels = chv_signal_levels(intel_dp);
e2fa6fba 3712 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3713 signal_levels = vlv_signal_levels(intel_dp);
bc7d38a4 3714 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3715 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3716 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3717 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3718 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3719 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3720 } else {
5829975c 3721 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3722 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3723 }
3724
96fb9f9b
VK
3725 if (mask)
3726 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3727
3728 DRM_DEBUG_KMS("Using vswing level %d\n",
3729 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3730 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3731 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3732 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e 3733
f4eb692e 3734 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
b905a915
ACO
3735
3736 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3737 POSTING_READ(intel_dp->output_reg);
f0a3424e
PZ
3738}
3739
94223d04 3740void
e9c176d5
ACO
3741intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3742 uint8_t dp_train_pat)
a4fc5ed6 3743{
174edf1f 3744 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3745 struct drm_i915_private *dev_priv =
3746 to_i915(intel_dig_port->base.base.dev);
a4fc5ed6 3747
f4eb692e 3748 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
47ea7542 3749
f4eb692e 3750 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
ea5b213a 3751 POSTING_READ(intel_dp->output_reg);
e9c176d5
ACO
3752}
3753
94223d04 3754void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3ab9c637
ID
3755{
3756 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3757 struct drm_device *dev = intel_dig_port->base.base.dev;
3758 struct drm_i915_private *dev_priv = dev->dev_private;
3759 enum port port = intel_dig_port->port;
3760 uint32_t val;
3761
3762 if (!HAS_DDI(dev))
3763 return;
3764
3765 val = I915_READ(DP_TP_CTL(port));
3766 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3767 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3768 I915_WRITE(DP_TP_CTL(port), val);
3769
3770 /*
3771 * On PORT_A we can have only eDP in SST mode. There the only reason
3772 * we need to set idle transmission mode is to work around a HW issue
3773 * where we enable the pipe while not in idle link-training mode.
3774 * In this case there is requirement to wait for a minimum number of
3775 * idle patterns to be sent.
3776 */
3777 if (port == PORT_A)
3778 return;
3779
3780 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3781 1))
3782 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3783}
3784
a4fc5ed6 3785static void
ea5b213a 3786intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3787{
da63a9f2 3788 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1612c8bd 3789 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
bc7d38a4 3790 enum port port = intel_dig_port->port;
da63a9f2 3791 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3792 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3793 uint32_t DP = intel_dp->DP;
a4fc5ed6 3794
bc76e320 3795 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3796 return;
3797
0c33d8d7 3798 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3799 return;
3800
28c97730 3801 DRM_DEBUG_KMS("\n");
32f9d658 3802
39e5fa88
VS
3803 if ((IS_GEN7(dev) && port == PORT_A) ||
3804 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3805 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3806 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3807 } else {
aad3d14d
VS
3808 if (IS_CHERRYVIEW(dev))
3809 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3810 else
3811 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3812 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3813 }
1612c8bd 3814 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3815 POSTING_READ(intel_dp->output_reg);
5eb08b69 3816
1612c8bd
VS
3817 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3818 I915_WRITE(intel_dp->output_reg, DP);
3819 POSTING_READ(intel_dp->output_reg);
3820
3821 /*
3822 * HW workaround for IBX, we need to move the port
3823 * to transcoder A after disabling it to allow the
3824 * matching HDMI port to be enabled on transcoder A.
3825 */
3826 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
0c241d5b
VS
3827 /*
3828 * We get CPU/PCH FIFO underruns on the other pipe when
3829 * doing the workaround. Sweep them under the rug.
3830 */
3831 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3832 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3833
1612c8bd
VS
3834 /* always enable with pattern 1 (as per spec) */
3835 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3836 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3837 I915_WRITE(intel_dp->output_reg, DP);
3838 POSTING_READ(intel_dp->output_reg);
3839
3840 DP &= ~DP_PORT_EN;
5bddd17f 3841 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3842 POSTING_READ(intel_dp->output_reg);
0c241d5b
VS
3843
3844 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3845 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3846 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
5bddd17f
EA
3847 }
3848
f01eca2e 3849 msleep(intel_dp->panel_power_down_delay);
6fec7662
VS
3850
3851 intel_dp->DP = DP;
a4fc5ed6
KP
3852}
3853
26d61aad
KP
3854static bool
3855intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3856{
a031d709
RV
3857 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3858 struct drm_device *dev = dig_port->base.base.dev;
3859 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3860 uint8_t rev;
a031d709 3861
9d1a1031
JN
3862 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3863 sizeof(intel_dp->dpcd)) < 0)
edb39244 3864 return false; /* aux transfer failed */
92fd8fd1 3865
a8e98153 3866 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3867
edb39244
AJ
3868 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3869 return false; /* DPCD not present */
3870
2293bb5c
SK
3871 /* Check if the panel supports PSR */
3872 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3873 if (is_edp(intel_dp)) {
9d1a1031
JN
3874 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3875 intel_dp->psr_dpcd,
3876 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3877 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3878 dev_priv->psr.sink_support = true;
50003939 3879 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3880 }
474d1ec4
SJ
3881
3882 if (INTEL_INFO(dev)->gen >= 9 &&
3883 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3884 uint8_t frame_sync_cap;
3885
3886 dev_priv->psr.sink_support = true;
3887 intel_dp_dpcd_read_wake(&intel_dp->aux,
3888 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3889 &frame_sync_cap, 1);
3890 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3891 /* PSR2 needs frame sync as well */
3892 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3893 DRM_DEBUG_KMS("PSR2 %s on sink",
3894 dev_priv->psr.psr2_support ? "supported" : "not supported");
3895 }
50003939
JN
3896 }
3897
bc5133d5 3898 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
e588fa18 3899 yesno(intel_dp_source_supports_hbr2(intel_dp)),
742f491d 3900 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
06ea66b6 3901
fc0f8e25
SJ
3902 /* Intermediate frequency support */
3903 if (is_edp(intel_dp) &&
3904 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3905 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3906 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3907 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3908 int i;
3909
fc0f8e25
SJ
3910 intel_dp_dpcd_read_wake(&intel_dp->aux,
3911 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3912 sink_rates,
3913 sizeof(sink_rates));
ea2d8a42 3914
94ca719e
VS
3915 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3916 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3917
3918 if (val == 0)
3919 break;
3920
af77b974
SJ
3921 /* Value read is in kHz while drm clock is saved in deca-kHz */
3922 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 3923 }
94ca719e 3924 intel_dp->num_sink_rates = i;
fc0f8e25 3925 }
0336400e
VS
3926
3927 intel_dp_print_rates(intel_dp);
3928
edb39244
AJ
3929 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3930 DP_DWN_STRM_PORT_PRESENT))
3931 return true; /* native DP sink */
3932
3933 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3934 return true; /* no per-port downstream info */
3935
9d1a1031
JN
3936 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3937 intel_dp->downstream_ports,
3938 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3939 return false; /* downstream port status fetch failed */
3940
3941 return true;
92fd8fd1
KP
3942}
3943
0d198328
AJ
3944static void
3945intel_dp_probe_oui(struct intel_dp *intel_dp)
3946{
3947 u8 buf[3];
3948
3949 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3950 return;
3951
9d1a1031 3952 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3953 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3954 buf[0], buf[1], buf[2]);
3955
9d1a1031 3956 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3957 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3958 buf[0], buf[1], buf[2]);
3959}
3960
0e32b39c
DA
3961static bool
3962intel_dp_probe_mst(struct intel_dp *intel_dp)
3963{
3964 u8 buf[1];
3965
3966 if (!intel_dp->can_mst)
3967 return false;
3968
3969 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3970 return false;
3971
0e32b39c
DA
3972 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3973 if (buf[0] & DP_MST_CAP) {
3974 DRM_DEBUG_KMS("Sink is MST capable\n");
3975 intel_dp->is_mst = true;
3976 } else {
3977 DRM_DEBUG_KMS("Sink is not MST capable\n");
3978 intel_dp->is_mst = false;
3979 }
3980 }
0e32b39c
DA
3981
3982 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3983 return intel_dp->is_mst;
3984}
3985
e5a1cab5 3986static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
d2e216d0 3987{
082dcc7c 3988 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
d72f9d91 3989 struct drm_device *dev = dig_port->base.base.dev;
082dcc7c 3990 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
ad9dc91b 3991 u8 buf;
e5a1cab5 3992 int ret = 0;
c6297843
RV
3993 int count = 0;
3994 int attempts = 10;
d2e216d0 3995
082dcc7c
RV
3996 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3997 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
3998 ret = -EIO;
3999 goto out;
4373f0f2
PZ
4000 }
4001
082dcc7c 4002 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
e5a1cab5 4003 buf & ~DP_TEST_SINK_START) < 0) {
082dcc7c 4004 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
4005 ret = -EIO;
4006 goto out;
4007 }
d2e216d0 4008
c6297843
RV
4009 do {
4010 intel_wait_for_vblank(dev, intel_crtc->pipe);
4011
4012 if (drm_dp_dpcd_readb(&intel_dp->aux,
4013 DP_TEST_SINK_MISC, &buf) < 0) {
4014 ret = -EIO;
4015 goto out;
4016 }
4017 count = buf & DP_TEST_COUNT_MASK;
4018 } while (--attempts && count);
4019
4020 if (attempts == 0) {
dc5a9037 4021 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
c6297843
RV
4022 ret = -ETIMEDOUT;
4023 }
4024
e5a1cab5 4025 out:
082dcc7c 4026 hsw_enable_ips(intel_crtc);
e5a1cab5 4027 return ret;
082dcc7c
RV
4028}
4029
4030static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4031{
4032 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
d72f9d91 4033 struct drm_device *dev = dig_port->base.base.dev;
082dcc7c
RV
4034 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4035 u8 buf;
e5a1cab5
RV
4036 int ret;
4037
082dcc7c
RV
4038 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4039 return -EIO;
4040
4041 if (!(buf & DP_TEST_CRC_SUPPORTED))
4042 return -ENOTTY;
4043
4044 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4045 return -EIO;
4046
6d8175da
RV
4047 if (buf & DP_TEST_SINK_START) {
4048 ret = intel_dp_sink_crc_stop(intel_dp);
4049 if (ret)
4050 return ret;
4051 }
4052
082dcc7c 4053 hsw_disable_ips(intel_crtc);
1dda5f93 4054
9d1a1031 4055 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
082dcc7c
RV
4056 buf | DP_TEST_SINK_START) < 0) {
4057 hsw_enable_ips(intel_crtc);
4058 return -EIO;
4373f0f2
PZ
4059 }
4060
d72f9d91 4061 intel_wait_for_vblank(dev, intel_crtc->pipe);
082dcc7c
RV
4062 return 0;
4063}
4064
4065int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4066{
4067 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4068 struct drm_device *dev = dig_port->base.base.dev;
4069 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4070 u8 buf;
621d4c76 4071 int count, ret;
082dcc7c 4072 int attempts = 6;
082dcc7c
RV
4073
4074 ret = intel_dp_sink_crc_start(intel_dp);
4075 if (ret)
4076 return ret;
4077
ad9dc91b 4078 do {
621d4c76
RV
4079 intel_wait_for_vblank(dev, intel_crtc->pipe);
4080
1dda5f93 4081 if (drm_dp_dpcd_readb(&intel_dp->aux,
4373f0f2
PZ
4082 DP_TEST_SINK_MISC, &buf) < 0) {
4083 ret = -EIO;
afe0d67e 4084 goto stop;
4373f0f2 4085 }
621d4c76 4086 count = buf & DP_TEST_COUNT_MASK;
aabc95dc 4087
7e38eeff 4088 } while (--attempts && count == 0);
ad9dc91b
RV
4089
4090 if (attempts == 0) {
7e38eeff
RV
4091 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4092 ret = -ETIMEDOUT;
4093 goto stop;
4094 }
4095
4096 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4097 ret = -EIO;
4098 goto stop;
ad9dc91b 4099 }
d2e216d0 4100
afe0d67e 4101stop:
082dcc7c 4102 intel_dp_sink_crc_stop(intel_dp);
4373f0f2 4103 return ret;
d2e216d0
RV
4104}
4105
a60f0e38
JB
4106static bool
4107intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4108{
9d1a1031
JN
4109 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4110 DP_DEVICE_SERVICE_IRQ_VECTOR,
4111 sink_irq_vector, 1) == 1;
a60f0e38
JB
4112}
4113
0e32b39c
DA
4114static bool
4115intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4116{
4117 int ret;
4118
4119 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4120 DP_SINK_COUNT_ESI,
4121 sink_irq_vector, 14);
4122 if (ret != 14)
4123 return false;
4124
4125 return true;
4126}
4127
c5d5ab7a
TP
4128static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4129{
4130 uint8_t test_result = DP_TEST_ACK;
4131 return test_result;
4132}
4133
4134static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4135{
4136 uint8_t test_result = DP_TEST_NAK;
4137 return test_result;
4138}
4139
4140static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4141{
c5d5ab7a 4142 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4143 struct intel_connector *intel_connector = intel_dp->attached_connector;
4144 struct drm_connector *connector = &intel_connector->base;
4145
4146 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4147 connector->edid_corrupt ||
559be30c
TP
4148 intel_dp->aux.i2c_defer_count > 6) {
4149 /* Check EDID read for NACKs, DEFERs and corruption
4150 * (DP CTS 1.2 Core r1.1)
4151 * 4.2.2.4 : Failed EDID read, I2C_NAK
4152 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4153 * 4.2.2.6 : EDID corruption detected
4154 * Use failsafe mode for all cases
4155 */
4156 if (intel_dp->aux.i2c_nack_count > 0 ||
4157 intel_dp->aux.i2c_defer_count > 0)
4158 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4159 intel_dp->aux.i2c_nack_count,
4160 intel_dp->aux.i2c_defer_count);
4161 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4162 } else {
f79b468e
TS
4163 struct edid *block = intel_connector->detect_edid;
4164
4165 /* We have to write the checksum
4166 * of the last block read
4167 */
4168 block += intel_connector->detect_edid->extensions;
4169
559be30c
TP
4170 if (!drm_dp_dpcd_write(&intel_dp->aux,
4171 DP_TEST_EDID_CHECKSUM,
f79b468e 4172 &block->checksum,
5a1cc655 4173 1))
559be30c
TP
4174 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4175
4176 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4177 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4178 }
4179
4180 /* Set test active flag here so userspace doesn't interrupt things */
4181 intel_dp->compliance_test_active = 1;
4182
c5d5ab7a
TP
4183 return test_result;
4184}
4185
4186static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4187{
c5d5ab7a
TP
4188 uint8_t test_result = DP_TEST_NAK;
4189 return test_result;
4190}
4191
4192static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4193{
4194 uint8_t response = DP_TEST_NAK;
4195 uint8_t rxdata = 0;
4196 int status = 0;
4197
c5d5ab7a
TP
4198 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4199 if (status <= 0) {
4200 DRM_DEBUG_KMS("Could not read test request from sink\n");
4201 goto update_status;
4202 }
4203
4204 switch (rxdata) {
4205 case DP_TEST_LINK_TRAINING:
4206 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4207 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4208 response = intel_dp_autotest_link_training(intel_dp);
4209 break;
4210 case DP_TEST_LINK_VIDEO_PATTERN:
4211 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4212 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4213 response = intel_dp_autotest_video_pattern(intel_dp);
4214 break;
4215 case DP_TEST_LINK_EDID_READ:
4216 DRM_DEBUG_KMS("EDID test requested\n");
4217 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4218 response = intel_dp_autotest_edid(intel_dp);
4219 break;
4220 case DP_TEST_LINK_PHY_TEST_PATTERN:
4221 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4222 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4223 response = intel_dp_autotest_phy_pattern(intel_dp);
4224 break;
4225 default:
4226 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4227 break;
4228 }
4229
4230update_status:
4231 status = drm_dp_dpcd_write(&intel_dp->aux,
4232 DP_TEST_RESPONSE,
4233 &response, 1);
4234 if (status <= 0)
4235 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4236}
4237
0e32b39c
DA
4238static int
4239intel_dp_check_mst_status(struct intel_dp *intel_dp)
4240{
4241 bool bret;
4242
4243 if (intel_dp->is_mst) {
4244 u8 esi[16] = { 0 };
4245 int ret = 0;
4246 int retry;
4247 bool handled;
4248 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4249go_again:
4250 if (bret == true) {
4251
4252 /* check link status - esi[10] = 0x200c */
90a6b7b0 4253 if (intel_dp->active_mst_links &&
901c2daf 4254 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
0e32b39c
DA
4255 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4256 intel_dp_start_link_train(intel_dp);
0e32b39c
DA
4257 intel_dp_stop_link_train(intel_dp);
4258 }
4259
6f34cc39 4260 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4261 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4262
4263 if (handled) {
4264 for (retry = 0; retry < 3; retry++) {
4265 int wret;
4266 wret = drm_dp_dpcd_write(&intel_dp->aux,
4267 DP_SINK_COUNT_ESI+1,
4268 &esi[1], 3);
4269 if (wret == 3) {
4270 break;
4271 }
4272 }
4273
4274 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4275 if (bret == true) {
6f34cc39 4276 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4277 goto go_again;
4278 }
4279 } else
4280 ret = 0;
4281
4282 return ret;
4283 } else {
4284 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4285 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4286 intel_dp->is_mst = false;
4287 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4288 /* send a hotplug event */
4289 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4290 }
4291 }
4292 return -EINVAL;
4293}
4294
a4fc5ed6
KP
4295/*
4296 * According to DP spec
4297 * 5.1.2:
4298 * 1. Read DPCD
4299 * 2. Configure link according to Receiver Capabilities
4300 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4301 * 4. Check link status on receipt of hot-plug interrupt
4302 */
a5146200 4303static void
ea5b213a 4304intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4305{
5b215bcf 4306 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4307 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4308 u8 sink_irq_vector;
93f62dad 4309 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4310
5b215bcf
DA
4311 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4312
4df6960e
SS
4313 /*
4314 * Clearing compliance test variables to allow capturing
4315 * of values for next automated test request.
4316 */
4317 intel_dp->compliance_test_active = 0;
4318 intel_dp->compliance_test_type = 0;
4319 intel_dp->compliance_test_data = 0;
4320
e02f9a06 4321 if (!intel_encoder->base.crtc)
a4fc5ed6
KP
4322 return;
4323
1a125d8a
ID
4324 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4325 return;
4326
92fd8fd1 4327 /* Try to read receiver status if the link appears to be up */
93f62dad 4328 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4329 return;
4330 }
4331
92fd8fd1 4332 /* Now read the DPCD to see if it's actually running */
26d61aad 4333 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4334 return;
4335 }
4336
a60f0e38
JB
4337 /* Try to read the source of the interrupt */
4338 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4339 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4340 /* Clear interrupt source */
9d1a1031
JN
4341 drm_dp_dpcd_writeb(&intel_dp->aux,
4342 DP_DEVICE_SERVICE_IRQ_VECTOR,
4343 sink_irq_vector);
a60f0e38
JB
4344
4345 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4346 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4347 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4348 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4349 }
4350
14631e9d
SS
4351 /* if link training is requested we should perform it always */
4352 if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4353 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
92fd8fd1 4354 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4355 intel_encoder->base.name);
33a34e4e 4356 intel_dp_start_link_train(intel_dp);
3ab9c637 4357 intel_dp_stop_link_train(intel_dp);
33a34e4e 4358 }
a4fc5ed6 4359}
a4fc5ed6 4360
caf9ab24 4361/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4362static enum drm_connector_status
26d61aad 4363intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4364{
caf9ab24 4365 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4366 uint8_t type;
4367
4368 if (!intel_dp_get_dpcd(intel_dp))
4369 return connector_status_disconnected;
4370
4371 /* if there's no downstream port, we're done */
4372 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4373 return connector_status_connected;
caf9ab24
AJ
4374
4375 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4376 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4377 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4378 uint8_t reg;
9d1a1031
JN
4379
4380 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4381 &reg, 1) < 0)
caf9ab24 4382 return connector_status_unknown;
9d1a1031 4383
23235177
AJ
4384 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4385 : connector_status_disconnected;
caf9ab24
AJ
4386 }
4387
4388 /* If no HPD, poke DDC gently */
0b99836f 4389 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4390 return connector_status_connected;
caf9ab24
AJ
4391
4392 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4393 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4394 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4395 if (type == DP_DS_PORT_TYPE_VGA ||
4396 type == DP_DS_PORT_TYPE_NON_EDID)
4397 return connector_status_unknown;
4398 } else {
4399 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4400 DP_DWN_STRM_PORT_TYPE_MASK;
4401 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4402 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4403 return connector_status_unknown;
4404 }
caf9ab24
AJ
4405
4406 /* Anything else is out of spec, warn and ignore */
4407 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4408 return connector_status_disconnected;
71ba9000
AJ
4409}
4410
d410b56d
CW
4411static enum drm_connector_status
4412edp_detect(struct intel_dp *intel_dp)
4413{
4414 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4415 enum drm_connector_status status;
4416
4417 status = intel_panel_detect(dev);
4418 if (status == connector_status_unknown)
4419 status = connector_status_connected;
4420
4421 return status;
4422}
4423
b93433cc
JN
4424static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4425 struct intel_digital_port *port)
5eb08b69 4426{
b93433cc 4427 u32 bit;
01cb9ea6 4428
0df53b77
JN
4429 switch (port->port) {
4430 case PORT_A:
4431 return true;
4432 case PORT_B:
4433 bit = SDE_PORTB_HOTPLUG;
4434 break;
4435 case PORT_C:
4436 bit = SDE_PORTC_HOTPLUG;
4437 break;
4438 case PORT_D:
4439 bit = SDE_PORTD_HOTPLUG;
4440 break;
4441 default:
4442 MISSING_CASE(port->port);
4443 return false;
4444 }
4445
4446 return I915_READ(SDEISR) & bit;
4447}
4448
4449static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4450 struct intel_digital_port *port)
4451{
4452 u32 bit;
4453
4454 switch (port->port) {
4455 case PORT_A:
4456 return true;
4457 case PORT_B:
4458 bit = SDE_PORTB_HOTPLUG_CPT;
4459 break;
4460 case PORT_C:
4461 bit = SDE_PORTC_HOTPLUG_CPT;
4462 break;
4463 case PORT_D:
4464 bit = SDE_PORTD_HOTPLUG_CPT;
4465 break;
a78695d3
JN
4466 case PORT_E:
4467 bit = SDE_PORTE_HOTPLUG_SPT;
4468 break;
0df53b77
JN
4469 default:
4470 MISSING_CASE(port->port);
4471 return false;
b93433cc 4472 }
1b469639 4473
b93433cc 4474 return I915_READ(SDEISR) & bit;
5eb08b69
ZW
4475}
4476
7e66bcf2 4477static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
1d245987 4478 struct intel_digital_port *port)
a4fc5ed6 4479{
9642c81c 4480 u32 bit;
5eb08b69 4481
9642c81c
JN
4482 switch (port->port) {
4483 case PORT_B:
4484 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4485 break;
4486 case PORT_C:
4487 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4488 break;
4489 case PORT_D:
4490 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4491 break;
4492 default:
4493 MISSING_CASE(port->port);
4494 return false;
4495 }
4496
4497 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4498}
4499
0780cd36
VS
4500static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4501 struct intel_digital_port *port)
9642c81c
JN
4502{
4503 u32 bit;
4504
4505 switch (port->port) {
4506 case PORT_B:
0780cd36 4507 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
9642c81c
JN
4508 break;
4509 case PORT_C:
0780cd36 4510 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
9642c81c
JN
4511 break;
4512 case PORT_D:
0780cd36 4513 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
9642c81c
JN
4514 break;
4515 default:
4516 MISSING_CASE(port->port);
4517 return false;
a4fc5ed6
KP
4518 }
4519
1d245987 4520 return I915_READ(PORT_HOTPLUG_STAT) & bit;
2a592bec
DA
4521}
4522
e464bfde 4523static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
e2ec35a5 4524 struct intel_digital_port *intel_dig_port)
e464bfde 4525{
e2ec35a5
SJ
4526 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4527 enum port port;
e464bfde
JN
4528 u32 bit;
4529
e2ec35a5
SJ
4530 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4531 switch (port) {
e464bfde
JN
4532 case PORT_A:
4533 bit = BXT_DE_PORT_HP_DDIA;
4534 break;
4535 case PORT_B:
4536 bit = BXT_DE_PORT_HP_DDIB;
4537 break;
4538 case PORT_C:
4539 bit = BXT_DE_PORT_HP_DDIC;
4540 break;
4541 default:
e2ec35a5 4542 MISSING_CASE(port);
e464bfde
JN
4543 return false;
4544 }
4545
4546 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4547}
4548
7e66bcf2
JN
4549/*
4550 * intel_digital_port_connected - is the specified port connected?
4551 * @dev_priv: i915 private structure
4552 * @port: the port to test
4553 *
4554 * Return %true if @port is connected, %false otherwise.
4555 */
237ed86c 4556bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
7e66bcf2
JN
4557 struct intel_digital_port *port)
4558{
0df53b77 4559 if (HAS_PCH_IBX(dev_priv))
7e66bcf2 4560 return ibx_digital_port_connected(dev_priv, port);
22824fac 4561 else if (HAS_PCH_SPLIT(dev_priv))
0df53b77 4562 return cpt_digital_port_connected(dev_priv, port);
e464bfde
JN
4563 else if (IS_BROXTON(dev_priv))
4564 return bxt_digital_port_connected(dev_priv, port);
0780cd36
VS
4565 else if (IS_GM45(dev_priv))
4566 return gm45_digital_port_connected(dev_priv, port);
7e66bcf2
JN
4567 else
4568 return g4x_digital_port_connected(dev_priv, port);
4569}
4570
8c241fef 4571static struct edid *
beb60608 4572intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4573{
beb60608 4574 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4575
9cd300e0
JN
4576 /* use cached edid if we have one */
4577 if (intel_connector->edid) {
9cd300e0
JN
4578 /* invalid edid */
4579 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4580 return NULL;
4581
55e9edeb 4582 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4583 } else
4584 return drm_get_edid(&intel_connector->base,
4585 &intel_dp->aux.ddc);
4586}
8c241fef 4587
beb60608
CW
4588static void
4589intel_dp_set_edid(struct intel_dp *intel_dp)
4590{
4591 struct intel_connector *intel_connector = intel_dp->attached_connector;
4592 struct edid *edid;
8c241fef 4593
beb60608
CW
4594 edid = intel_dp_get_edid(intel_dp);
4595 intel_connector->detect_edid = edid;
4596
4597 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4598 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4599 else
4600 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4601}
4602
beb60608
CW
4603static void
4604intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4605{
beb60608 4606 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4607
beb60608
CW
4608 kfree(intel_connector->detect_edid);
4609 intel_connector->detect_edid = NULL;
9cd300e0 4610
beb60608
CW
4611 intel_dp->has_audio = false;
4612}
d6f24d0f 4613
a9756bb5
ZW
4614static enum drm_connector_status
4615intel_dp_detect(struct drm_connector *connector, bool force)
4616{
4617 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4618 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4619 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4620 struct drm_device *dev = connector->dev;
a9756bb5 4621 enum drm_connector_status status;
671dedd2 4622 enum intel_display_power_domain power_domain;
0e32b39c 4623 bool ret;
09b1eb13 4624 u8 sink_irq_vector;
a9756bb5 4625
164c8598 4626 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4627 connector->base.id, connector->name);
beb60608 4628 intel_dp_unset_edid(intel_dp);
164c8598 4629
0e32b39c
DA
4630 if (intel_dp->is_mst) {
4631 /* MST devices are disconnected from a monitor POV */
4632 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4633 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4634 return connector_status_disconnected;
0e32b39c
DA
4635 }
4636
25f78f58
VS
4637 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4638 intel_display_power_get(to_i915(dev), power_domain);
a9756bb5 4639
d410b56d
CW
4640 /* Can't disconnect eDP, but you can close the lid... */
4641 if (is_edp(intel_dp))
4642 status = edp_detect(intel_dp);
c555a81d
ACO
4643 else if (intel_digital_port_connected(to_i915(dev),
4644 dp_to_dig_port(intel_dp)))
4645 status = intel_dp_detect_dpcd(intel_dp);
a9756bb5 4646 else
c555a81d
ACO
4647 status = connector_status_disconnected;
4648
4df6960e
SS
4649 if (status != connector_status_connected) {
4650 intel_dp->compliance_test_active = 0;
4651 intel_dp->compliance_test_type = 0;
4652 intel_dp->compliance_test_data = 0;
4653
c8c8fb33 4654 goto out;
4df6960e 4655 }
a9756bb5 4656
0d198328
AJ
4657 intel_dp_probe_oui(intel_dp);
4658
0e32b39c
DA
4659 ret = intel_dp_probe_mst(intel_dp);
4660 if (ret) {
4661 /* if we are in MST mode then this connector
4662 won't appear connected or have anything with EDID on it */
4663 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4664 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4665 status = connector_status_disconnected;
4666 goto out;
4667 }
4668
4df6960e
SS
4669 /*
4670 * Clearing NACK and defer counts to get their exact values
4671 * while reading EDID which are required by Compliance tests
4672 * 4.2.2.4 and 4.2.2.5
4673 */
4674 intel_dp->aux.i2c_nack_count = 0;
4675 intel_dp->aux.i2c_defer_count = 0;
4676
beb60608 4677 intel_dp_set_edid(intel_dp);
a9756bb5 4678
d63885da
PZ
4679 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4680 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4681 status = connector_status_connected;
4682
09b1eb13
TP
4683 /* Try to read the source of the interrupt */
4684 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4685 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4686 /* Clear interrupt source */
4687 drm_dp_dpcd_writeb(&intel_dp->aux,
4688 DP_DEVICE_SERVICE_IRQ_VECTOR,
4689 sink_irq_vector);
4690
4691 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4692 intel_dp_handle_test_request(intel_dp);
4693 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4694 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4695 }
4696
c8c8fb33 4697out:
25f78f58 4698 intel_display_power_put(to_i915(dev), power_domain);
c8c8fb33 4699 return status;
a4fc5ed6
KP
4700}
4701
beb60608
CW
4702static void
4703intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4704{
df0e9248 4705 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4706 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
25f78f58 4707 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
671dedd2 4708 enum intel_display_power_domain power_domain;
a4fc5ed6 4709
beb60608
CW
4710 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4711 connector->base.id, connector->name);
4712 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4713
beb60608
CW
4714 if (connector->status != connector_status_connected)
4715 return;
671dedd2 4716
25f78f58
VS
4717 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4718 intel_display_power_get(dev_priv, power_domain);
beb60608
CW
4719
4720 intel_dp_set_edid(intel_dp);
4721
25f78f58 4722 intel_display_power_put(dev_priv, power_domain);
beb60608
CW
4723
4724 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4725 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4726}
4727
4728static int intel_dp_get_modes(struct drm_connector *connector)
4729{
4730 struct intel_connector *intel_connector = to_intel_connector(connector);
4731 struct edid *edid;
4732
4733 edid = intel_connector->detect_edid;
4734 if (edid) {
4735 int ret = intel_connector_update_modes(connector, edid);
4736 if (ret)
4737 return ret;
4738 }
32f9d658 4739
f8779fda 4740 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4741 if (is_edp(intel_attached_dp(connector)) &&
4742 intel_connector->panel.fixed_mode) {
f8779fda 4743 struct drm_display_mode *mode;
beb60608
CW
4744
4745 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4746 intel_connector->panel.fixed_mode);
f8779fda 4747 if (mode) {
32f9d658
ZW
4748 drm_mode_probed_add(connector, mode);
4749 return 1;
4750 }
4751 }
beb60608 4752
32f9d658 4753 return 0;
a4fc5ed6
KP
4754}
4755
1aad7ac0
CW
4756static bool
4757intel_dp_detect_audio(struct drm_connector *connector)
4758{
1aad7ac0 4759 bool has_audio = false;
beb60608 4760 struct edid *edid;
1aad7ac0 4761
beb60608
CW
4762 edid = to_intel_connector(connector)->detect_edid;
4763 if (edid)
1aad7ac0 4764 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4765
1aad7ac0
CW
4766 return has_audio;
4767}
4768
f684960e
CW
4769static int
4770intel_dp_set_property(struct drm_connector *connector,
4771 struct drm_property *property,
4772 uint64_t val)
4773{
e953fd7b 4774 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4775 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4776 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4777 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4778 int ret;
4779
662595df 4780 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4781 if (ret)
4782 return ret;
4783
3f43c48d 4784 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4785 int i = val;
4786 bool has_audio;
4787
4788 if (i == intel_dp->force_audio)
f684960e
CW
4789 return 0;
4790
1aad7ac0 4791 intel_dp->force_audio = i;
f684960e 4792
c3e5f67b 4793 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4794 has_audio = intel_dp_detect_audio(connector);
4795 else
c3e5f67b 4796 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4797
4798 if (has_audio == intel_dp->has_audio)
f684960e
CW
4799 return 0;
4800
1aad7ac0 4801 intel_dp->has_audio = has_audio;
f684960e
CW
4802 goto done;
4803 }
4804
e953fd7b 4805 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80 4806 bool old_auto = intel_dp->color_range_auto;
0f2a2a75 4807 bool old_range = intel_dp->limited_color_range;
ae4edb80 4808
55bc60db
VS
4809 switch (val) {
4810 case INTEL_BROADCAST_RGB_AUTO:
4811 intel_dp->color_range_auto = true;
4812 break;
4813 case INTEL_BROADCAST_RGB_FULL:
4814 intel_dp->color_range_auto = false;
0f2a2a75 4815 intel_dp->limited_color_range = false;
55bc60db
VS
4816 break;
4817 case INTEL_BROADCAST_RGB_LIMITED:
4818 intel_dp->color_range_auto = false;
0f2a2a75 4819 intel_dp->limited_color_range = true;
55bc60db
VS
4820 break;
4821 default:
4822 return -EINVAL;
4823 }
ae4edb80
DV
4824
4825 if (old_auto == intel_dp->color_range_auto &&
0f2a2a75 4826 old_range == intel_dp->limited_color_range)
ae4edb80
DV
4827 return 0;
4828
e953fd7b
CW
4829 goto done;
4830 }
4831
53b41837
YN
4832 if (is_edp(intel_dp) &&
4833 property == connector->dev->mode_config.scaling_mode_property) {
4834 if (val == DRM_MODE_SCALE_NONE) {
4835 DRM_DEBUG_KMS("no scaling not supported\n");
4836 return -EINVAL;
4837 }
4838
4839 if (intel_connector->panel.fitting_mode == val) {
4840 /* the eDP scaling property is not changed */
4841 return 0;
4842 }
4843 intel_connector->panel.fitting_mode = val;
4844
4845 goto done;
4846 }
4847
f684960e
CW
4848 return -EINVAL;
4849
4850done:
c0c36b94
CW
4851 if (intel_encoder->base.crtc)
4852 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4853
4854 return 0;
4855}
4856
a4fc5ed6 4857static void
73845adf 4858intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4859{
1d508706 4860 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4861
10e972d3 4862 kfree(intel_connector->detect_edid);
beb60608 4863
9cd300e0
JN
4864 if (!IS_ERR_OR_NULL(intel_connector->edid))
4865 kfree(intel_connector->edid);
4866
acd8db10
PZ
4867 /* Can't call is_edp() since the encoder may have been destroyed
4868 * already. */
4869 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4870 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4871
a4fc5ed6 4872 drm_connector_cleanup(connector);
55f78c43 4873 kfree(connector);
a4fc5ed6
KP
4874}
4875
00c09d70 4876void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4877{
da63a9f2
PZ
4878 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4879 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4880
0e32b39c 4881 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4882 if (is_edp(intel_dp)) {
4883 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4884 /*
4885 * vdd might still be enabled do to the delayed vdd off.
4886 * Make sure vdd is actually turned off here.
4887 */
773538e8 4888 pps_lock(intel_dp);
4be73780 4889 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4890 pps_unlock(intel_dp);
4891
01527b31
CT
4892 if (intel_dp->edp_notifier.notifier_call) {
4893 unregister_reboot_notifier(&intel_dp->edp_notifier);
4894 intel_dp->edp_notifier.notifier_call = NULL;
4895 }
bd943159 4896 }
c8bd0e49 4897 drm_encoder_cleanup(encoder);
da63a9f2 4898 kfree(intel_dig_port);
24d05927
DV
4899}
4900
5eaa60c7 4901void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
07f9cd0b
ID
4902{
4903 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4904
4905 if (!is_edp(intel_dp))
4906 return;
4907
951468f3
VS
4908 /*
4909 * vdd might still be enabled do to the delayed vdd off.
4910 * Make sure vdd is actually turned off here.
4911 */
afa4e53a 4912 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4913 pps_lock(intel_dp);
07f9cd0b 4914 edp_panel_vdd_off_sync(intel_dp);
773538e8 4915 pps_unlock(intel_dp);
07f9cd0b
ID
4916}
4917
49e6bc51
VS
4918static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4919{
4920 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4921 struct drm_device *dev = intel_dig_port->base.base.dev;
4922 struct drm_i915_private *dev_priv = dev->dev_private;
4923 enum intel_display_power_domain power_domain;
4924
4925 lockdep_assert_held(&dev_priv->pps_mutex);
4926
4927 if (!edp_have_panel_vdd(intel_dp))
4928 return;
4929
4930 /*
4931 * The VDD bit needs a power domain reference, so if the bit is
4932 * already enabled when we boot or resume, grab this reference and
4933 * schedule a vdd off, so we don't hold on to the reference
4934 * indefinitely.
4935 */
4936 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
25f78f58 4937 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
49e6bc51
VS
4938 intel_display_power_get(dev_priv, power_domain);
4939
4940 edp_panel_vdd_schedule_off(intel_dp);
4941}
4942
5eaa60c7 4943void intel_dp_encoder_reset(struct drm_encoder *encoder)
6d93c0c4 4944{
49e6bc51
VS
4945 struct intel_dp *intel_dp;
4946
4947 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4948 return;
4949
4950 intel_dp = enc_to_intel_dp(encoder);
4951
4952 pps_lock(intel_dp);
4953
4954 /*
4955 * Read out the current power sequencer assignment,
4956 * in case the BIOS did something with it.
4957 */
666a4537 4958 if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
49e6bc51
VS
4959 vlv_initial_power_sequencer_setup(intel_dp);
4960
4961 intel_edp_panel_vdd_sanitize(intel_dp);
4962
4963 pps_unlock(intel_dp);
6d93c0c4
ID
4964}
4965
a4fc5ed6 4966static const struct drm_connector_funcs intel_dp_connector_funcs = {
4d688a2a 4967 .dpms = drm_atomic_helper_connector_dpms,
a4fc5ed6 4968 .detect = intel_dp_detect,
beb60608 4969 .force = intel_dp_force,
a4fc5ed6 4970 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4971 .set_property = intel_dp_set_property,
2545e4a6 4972 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4973 .destroy = intel_dp_connector_destroy,
c6f95f27 4974 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4975 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4976};
4977
4978static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4979 .get_modes = intel_dp_get_modes,
4980 .mode_valid = intel_dp_mode_valid,
df0e9248 4981 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4982};
4983
a4fc5ed6 4984static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4985 .reset = intel_dp_encoder_reset,
24d05927 4986 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4987};
4988
b2c5c181 4989enum irqreturn
13cf5504
DA
4990intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4991{
4992 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4993 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4994 struct drm_device *dev = intel_dig_port->base.base.dev;
4995 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4996 enum intel_display_power_domain power_domain;
b2c5c181 4997 enum irqreturn ret = IRQ_NONE;
1c767b33 4998
2540058f
TI
4999 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5000 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
0e32b39c 5001 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 5002
7a7f84cc
VS
5003 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5004 /*
5005 * vdd off can generate a long pulse on eDP which
5006 * would require vdd on to handle it, and thus we
5007 * would end up in an endless cycle of
5008 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5009 */
5010 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5011 port_name(intel_dig_port->port));
a8b3d52f 5012 return IRQ_HANDLED;
7a7f84cc
VS
5013 }
5014
26fbb774
VS
5015 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5016 port_name(intel_dig_port->port),
0e32b39c 5017 long_hpd ? "long" : "short");
13cf5504 5018
25f78f58 5019 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1c767b33
ID
5020 intel_display_power_get(dev_priv, power_domain);
5021
0e32b39c 5022 if (long_hpd) {
5fa836a9
MK
5023 /* indicate that we need to restart link training */
5024 intel_dp->train_set_valid = false;
2a592bec 5025
7e66bcf2
JN
5026 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5027 goto mst_fail;
0e32b39c
DA
5028
5029 if (!intel_dp_get_dpcd(intel_dp)) {
5030 goto mst_fail;
5031 }
5032
5033 intel_dp_probe_oui(intel_dp);
5034
d14e7b6d
VS
5035 if (!intel_dp_probe_mst(intel_dp)) {
5036 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5037 intel_dp_check_link_status(intel_dp);
5038 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c 5039 goto mst_fail;
d14e7b6d 5040 }
0e32b39c
DA
5041 } else {
5042 if (intel_dp->is_mst) {
1c767b33 5043 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
5044 goto mst_fail;
5045 }
5046
5047 if (!intel_dp->is_mst) {
5b215bcf 5048 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 5049 intel_dp_check_link_status(intel_dp);
5b215bcf 5050 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
5051 }
5052 }
b2c5c181
DV
5053
5054 ret = IRQ_HANDLED;
5055
1c767b33 5056 goto put_power;
0e32b39c
DA
5057mst_fail:
5058 /* if we were in MST mode, and device is not there get out of MST mode */
5059 if (intel_dp->is_mst) {
5060 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5061 intel_dp->is_mst = false;
5062 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5063 }
1c767b33
ID
5064put_power:
5065 intel_display_power_put(dev_priv, power_domain);
5066
5067 return ret;
13cf5504
DA
5068}
5069
477ec328 5070/* check the VBT to see whether the eDP is on another port */
5d8a7752 5071bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
5072{
5073 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5074 union child_device_config *p_child;
36e83a18 5075 int i;
5d8a7752 5076 static const short port_mapping[] = {
477ec328
RV
5077 [PORT_B] = DVO_PORT_DPB,
5078 [PORT_C] = DVO_PORT_DPC,
5079 [PORT_D] = DVO_PORT_DPD,
5080 [PORT_E] = DVO_PORT_DPE,
5d8a7752 5081 };
36e83a18 5082
53ce81a7
VS
5083 /*
5084 * eDP not supported on g4x. so bail out early just
5085 * for a bit extra safety in case the VBT is bonkers.
5086 */
5087 if (INTEL_INFO(dev)->gen < 5)
5088 return false;
5089
3b32a35b
VS
5090 if (port == PORT_A)
5091 return true;
5092
41aa3448 5093 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5094 return false;
5095
41aa3448
RV
5096 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5097 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5098
5d8a7752 5099 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5100 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5101 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5102 return true;
5103 }
5104 return false;
5105}
5106
0e32b39c 5107void
f684960e
CW
5108intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5109{
53b41837
YN
5110 struct intel_connector *intel_connector = to_intel_connector(connector);
5111
3f43c48d 5112 intel_attach_force_audio_property(connector);
e953fd7b 5113 intel_attach_broadcast_rgb_property(connector);
55bc60db 5114 intel_dp->color_range_auto = true;
53b41837
YN
5115
5116 if (is_edp(intel_dp)) {
5117 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5118 drm_object_attach_property(
5119 &connector->base,
53b41837 5120 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5121 DRM_MODE_SCALE_ASPECT);
5122 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5123 }
f684960e
CW
5124}
5125
dada1a9f
ID
5126static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5127{
d28d4731 5128 intel_dp->panel_power_off_time = ktime_get_boottime();
dada1a9f
ID
5129 intel_dp->last_power_on = jiffies;
5130 intel_dp->last_backlight_off = jiffies;
5131}
5132
67a54566
DV
5133static void
5134intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5135 struct intel_dp *intel_dp)
67a54566
DV
5136{
5137 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5138 struct edp_power_seq cur, vbt, spec,
5139 *final = &intel_dp->pps_delays;
b0a08bec 5140 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
f0f59a00 5141 i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 5142
e39b999a
VS
5143 lockdep_assert_held(&dev_priv->pps_mutex);
5144
81ddbc69
VS
5145 /* already initialized? */
5146 if (final->t11_t12 != 0)
5147 return;
5148
b0a08bec
VK
5149 if (IS_BROXTON(dev)) {
5150 /*
5151 * TODO: BXT has 2 sets of PPS registers.
5152 * Correct Register for Broxton need to be identified
5153 * using VBT. hardcoding for now
5154 */
5155 pp_ctrl_reg = BXT_PP_CONTROL(0);
5156 pp_on_reg = BXT_PP_ON_DELAYS(0);
5157 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5158 } else if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5159 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5160 pp_on_reg = PCH_PP_ON_DELAYS;
5161 pp_off_reg = PCH_PP_OFF_DELAYS;
5162 pp_div_reg = PCH_PP_DIVISOR;
5163 } else {
bf13e81b
JN
5164 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5165
5166 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5167 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5168 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5169 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5170 }
67a54566
DV
5171
5172 /* Workaround: Need to write PP_CONTROL with the unlock key as
5173 * the very first thing. */
b0a08bec 5174 pp_ctl = ironlake_get_pp_control(intel_dp);
67a54566 5175
453c5420
JB
5176 pp_on = I915_READ(pp_on_reg);
5177 pp_off = I915_READ(pp_off_reg);
b0a08bec
VK
5178 if (!IS_BROXTON(dev)) {
5179 I915_WRITE(pp_ctrl_reg, pp_ctl);
5180 pp_div = I915_READ(pp_div_reg);
5181 }
67a54566
DV
5182
5183 /* Pull timing values out of registers */
5184 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5185 PANEL_POWER_UP_DELAY_SHIFT;
5186
5187 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5188 PANEL_LIGHT_ON_DELAY_SHIFT;
5189
5190 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5191 PANEL_LIGHT_OFF_DELAY_SHIFT;
5192
5193 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5194 PANEL_POWER_DOWN_DELAY_SHIFT;
5195
b0a08bec
VK
5196 if (IS_BROXTON(dev)) {
5197 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5198 BXT_POWER_CYCLE_DELAY_SHIFT;
5199 if (tmp > 0)
5200 cur.t11_t12 = (tmp - 1) * 1000;
5201 else
5202 cur.t11_t12 = 0;
5203 } else {
5204 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
67a54566 5205 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
b0a08bec 5206 }
67a54566
DV
5207
5208 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5209 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5210
41aa3448 5211 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5212
5213 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5214 * our hw here, which are all in 100usec. */
5215 spec.t1_t3 = 210 * 10;
5216 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5217 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5218 spec.t10 = 500 * 10;
5219 /* This one is special and actually in units of 100ms, but zero
5220 * based in the hw (so we need to add 100 ms). But the sw vbt
5221 * table multiplies it with 1000 to make it in units of 100usec,
5222 * too. */
5223 spec.t11_t12 = (510 + 100) * 10;
5224
5225 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5226 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5227
5228 /* Use the max of the register settings and vbt. If both are
5229 * unset, fall back to the spec limits. */
36b5f425 5230#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5231 spec.field : \
5232 max(cur.field, vbt.field))
5233 assign_final(t1_t3);
5234 assign_final(t8);
5235 assign_final(t9);
5236 assign_final(t10);
5237 assign_final(t11_t12);
5238#undef assign_final
5239
36b5f425 5240#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5241 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5242 intel_dp->backlight_on_delay = get_delay(t8);
5243 intel_dp->backlight_off_delay = get_delay(t9);
5244 intel_dp->panel_power_down_delay = get_delay(t10);
5245 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5246#undef get_delay
5247
f30d26e4
JN
5248 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5249 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5250 intel_dp->panel_power_cycle_delay);
5251
5252 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5253 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5254}
5255
5256static void
5257intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5258 struct intel_dp *intel_dp)
f30d26e4
JN
5259{
5260 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5261 u32 pp_on, pp_off, pp_div, port_sel = 0;
5262 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
f0f59a00 5263 i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
ad933b56 5264 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5265 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5266
e39b999a 5267 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 5268
b0a08bec
VK
5269 if (IS_BROXTON(dev)) {
5270 /*
5271 * TODO: BXT has 2 sets of PPS registers.
5272 * Correct Register for Broxton need to be identified
5273 * using VBT. hardcoding for now
5274 */
5275 pp_ctrl_reg = BXT_PP_CONTROL(0);
5276 pp_on_reg = BXT_PP_ON_DELAYS(0);
5277 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5278
5279 } else if (HAS_PCH_SPLIT(dev)) {
453c5420
JB
5280 pp_on_reg = PCH_PP_ON_DELAYS;
5281 pp_off_reg = PCH_PP_OFF_DELAYS;
5282 pp_div_reg = PCH_PP_DIVISOR;
5283 } else {
bf13e81b
JN
5284 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5285
5286 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5287 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5288 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5289 }
5290
b2f19d1a
PZ
5291 /*
5292 * And finally store the new values in the power sequencer. The
5293 * backlight delays are set to 1 because we do manual waits on them. For
5294 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5295 * we'll end up waiting for the backlight off delay twice: once when we
5296 * do the manual sleep, and once when we disable the panel and wait for
5297 * the PP_STATUS bit to become zero.
5298 */
f30d26e4 5299 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5300 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5301 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5302 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5303 /* Compute the divisor for the pp clock, simply match the Bspec
5304 * formula. */
b0a08bec
VK
5305 if (IS_BROXTON(dev)) {
5306 pp_div = I915_READ(pp_ctrl_reg);
5307 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5308 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5309 << BXT_POWER_CYCLE_DELAY_SHIFT);
5310 } else {
5311 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5312 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5313 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5314 }
67a54566
DV
5315
5316 /* Haswell doesn't have any port selection bits for the panel
5317 * power sequencer any more. */
666a4537 5318 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
ad933b56 5319 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5320 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5321 if (port == PORT_A)
a24c144c 5322 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5323 else
a24c144c 5324 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5325 }
5326
453c5420
JB
5327 pp_on |= port_sel;
5328
5329 I915_WRITE(pp_on_reg, pp_on);
5330 I915_WRITE(pp_off_reg, pp_off);
b0a08bec
VK
5331 if (IS_BROXTON(dev))
5332 I915_WRITE(pp_ctrl_reg, pp_div);
5333 else
5334 I915_WRITE(pp_div_reg, pp_div);
67a54566 5335
67a54566 5336 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5337 I915_READ(pp_on_reg),
5338 I915_READ(pp_off_reg),
b0a08bec
VK
5339 IS_BROXTON(dev) ?
5340 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
453c5420 5341 I915_READ(pp_div_reg));
f684960e
CW
5342}
5343
b33a2815
VK
5344/**
5345 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5346 * @dev: DRM device
5347 * @refresh_rate: RR to be programmed
5348 *
5349 * This function gets called when refresh rate (RR) has to be changed from
5350 * one frequency to another. Switches can be between high and low RR
5351 * supported by the panel or to any other RR based on media playback (in
5352 * this case, RR value needs to be passed from user space).
5353 *
5354 * The caller of this function needs to take a lock on dev_priv->drrs.
5355 */
96178eeb 5356static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5357{
5358 struct drm_i915_private *dev_priv = dev->dev_private;
5359 struct intel_encoder *encoder;
96178eeb
VK
5360 struct intel_digital_port *dig_port = NULL;
5361 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5362 struct intel_crtc_state *config = NULL;
439d7ac0 5363 struct intel_crtc *intel_crtc = NULL;
96178eeb 5364 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5365
5366 if (refresh_rate <= 0) {
5367 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5368 return;
5369 }
5370
96178eeb
VK
5371 if (intel_dp == NULL) {
5372 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5373 return;
5374 }
5375
1fcc9d1c 5376 /*
e4d59f6b
RV
5377 * FIXME: This needs proper synchronization with psr state for some
5378 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5379 */
439d7ac0 5380
96178eeb
VK
5381 dig_port = dp_to_dig_port(intel_dp);
5382 encoder = &dig_port->base;
723f9aab 5383 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5384
5385 if (!intel_crtc) {
5386 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5387 return;
5388 }
5389
6e3c9717 5390 config = intel_crtc->config;
439d7ac0 5391
96178eeb 5392 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5393 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5394 return;
5395 }
5396
96178eeb
VK
5397 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5398 refresh_rate)
439d7ac0
PB
5399 index = DRRS_LOW_RR;
5400
96178eeb 5401 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5402 DRM_DEBUG_KMS(
5403 "DRRS requested for previously set RR...ignoring\n");
5404 return;
5405 }
5406
5407 if (!intel_crtc->active) {
5408 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5409 return;
5410 }
5411
44395bfe 5412 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5413 switch (index) {
5414 case DRRS_HIGH_RR:
5415 intel_dp_set_m_n(intel_crtc, M1_N1);
5416 break;
5417 case DRRS_LOW_RR:
5418 intel_dp_set_m_n(intel_crtc, M2_N2);
5419 break;
5420 case DRRS_MAX_RR:
5421 default:
5422 DRM_ERROR("Unsupported refreshrate type\n");
5423 }
5424 } else if (INTEL_INFO(dev)->gen > 6) {
f0f59a00 5425 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
649636ef 5426 u32 val;
a4c30b1d 5427
649636ef 5428 val = I915_READ(reg);
439d7ac0 5429 if (index > DRRS_HIGH_RR) {
666a4537 5430 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6fa7aec1
VK
5431 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5432 else
5433 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5434 } else {
666a4537 5435 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6fa7aec1
VK
5436 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5437 else
5438 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5439 }
5440 I915_WRITE(reg, val);
5441 }
5442
4e9ac947
VK
5443 dev_priv->drrs.refresh_rate_type = index;
5444
5445 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5446}
5447
b33a2815
VK
5448/**
5449 * intel_edp_drrs_enable - init drrs struct if supported
5450 * @intel_dp: DP struct
5451 *
5452 * Initializes frontbuffer_bits and drrs.dp
5453 */
c395578e
VK
5454void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5455{
5456 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5457 struct drm_i915_private *dev_priv = dev->dev_private;
5458 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5459 struct drm_crtc *crtc = dig_port->base.base.crtc;
5460 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5461
5462 if (!intel_crtc->config->has_drrs) {
5463 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5464 return;
5465 }
5466
5467 mutex_lock(&dev_priv->drrs.mutex);
5468 if (WARN_ON(dev_priv->drrs.dp)) {
5469 DRM_ERROR("DRRS already enabled\n");
5470 goto unlock;
5471 }
5472
5473 dev_priv->drrs.busy_frontbuffer_bits = 0;
5474
5475 dev_priv->drrs.dp = intel_dp;
5476
5477unlock:
5478 mutex_unlock(&dev_priv->drrs.mutex);
5479}
5480
b33a2815
VK
5481/**
5482 * intel_edp_drrs_disable - Disable DRRS
5483 * @intel_dp: DP struct
5484 *
5485 */
c395578e
VK
5486void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5487{
5488 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5489 struct drm_i915_private *dev_priv = dev->dev_private;
5490 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5491 struct drm_crtc *crtc = dig_port->base.base.crtc;
5492 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5493
5494 if (!intel_crtc->config->has_drrs)
5495 return;
5496
5497 mutex_lock(&dev_priv->drrs.mutex);
5498 if (!dev_priv->drrs.dp) {
5499 mutex_unlock(&dev_priv->drrs.mutex);
5500 return;
5501 }
5502
5503 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5504 intel_dp_set_drrs_state(dev_priv->dev,
5505 intel_dp->attached_connector->panel.
5506 fixed_mode->vrefresh);
5507
5508 dev_priv->drrs.dp = NULL;
5509 mutex_unlock(&dev_priv->drrs.mutex);
5510
5511 cancel_delayed_work_sync(&dev_priv->drrs.work);
5512}
5513
4e9ac947
VK
5514static void intel_edp_drrs_downclock_work(struct work_struct *work)
5515{
5516 struct drm_i915_private *dev_priv =
5517 container_of(work, typeof(*dev_priv), drrs.work.work);
5518 struct intel_dp *intel_dp;
5519
5520 mutex_lock(&dev_priv->drrs.mutex);
5521
5522 intel_dp = dev_priv->drrs.dp;
5523
5524 if (!intel_dp)
5525 goto unlock;
5526
439d7ac0 5527 /*
4e9ac947
VK
5528 * The delayed work can race with an invalidate hence we need to
5529 * recheck.
439d7ac0
PB
5530 */
5531
4e9ac947
VK
5532 if (dev_priv->drrs.busy_frontbuffer_bits)
5533 goto unlock;
439d7ac0 5534
4e9ac947
VK
5535 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5536 intel_dp_set_drrs_state(dev_priv->dev,
5537 intel_dp->attached_connector->panel.
5538 downclock_mode->vrefresh);
439d7ac0 5539
4e9ac947 5540unlock:
4e9ac947 5541 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5542}
5543
b33a2815 5544/**
0ddfd203 5545 * intel_edp_drrs_invalidate - Disable Idleness DRRS
b33a2815
VK
5546 * @dev: DRM device
5547 * @frontbuffer_bits: frontbuffer plane tracking bits
5548 *
0ddfd203
R
5549 * This function gets called everytime rendering on the given planes start.
5550 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
5551 *
5552 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5553 */
a93fad0f
VK
5554void intel_edp_drrs_invalidate(struct drm_device *dev,
5555 unsigned frontbuffer_bits)
5556{
5557 struct drm_i915_private *dev_priv = dev->dev_private;
5558 struct drm_crtc *crtc;
5559 enum pipe pipe;
5560
9da7d693 5561 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5562 return;
5563
88f933a8 5564 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5565
a93fad0f 5566 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5567 if (!dev_priv->drrs.dp) {
5568 mutex_unlock(&dev_priv->drrs.mutex);
5569 return;
5570 }
5571
a93fad0f
VK
5572 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5573 pipe = to_intel_crtc(crtc)->pipe;
5574
c1d038c6
DV
5575 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5576 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5577
0ddfd203 5578 /* invalidate means busy screen hence upclock */
c1d038c6 5579 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
a93fad0f
VK
5580 intel_dp_set_drrs_state(dev_priv->dev,
5581 dev_priv->drrs.dp->attached_connector->panel.
5582 fixed_mode->vrefresh);
a93fad0f 5583
a93fad0f
VK
5584 mutex_unlock(&dev_priv->drrs.mutex);
5585}
5586
b33a2815 5587/**
0ddfd203 5588 * intel_edp_drrs_flush - Restart Idleness DRRS
b33a2815
VK
5589 * @dev: DRM device
5590 * @frontbuffer_bits: frontbuffer plane tracking bits
5591 *
0ddfd203
R
5592 * This function gets called every time rendering on the given planes has
5593 * completed or flip on a crtc is completed. So DRRS should be upclocked
5594 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5595 * if no other planes are dirty.
b33a2815
VK
5596 *
5597 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5598 */
a93fad0f
VK
5599void intel_edp_drrs_flush(struct drm_device *dev,
5600 unsigned frontbuffer_bits)
5601{
5602 struct drm_i915_private *dev_priv = dev->dev_private;
5603 struct drm_crtc *crtc;
5604 enum pipe pipe;
5605
9da7d693 5606 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5607 return;
5608
88f933a8 5609 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5610
a93fad0f 5611 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5612 if (!dev_priv->drrs.dp) {
5613 mutex_unlock(&dev_priv->drrs.mutex);
5614 return;
5615 }
5616
a93fad0f
VK
5617 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5618 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
5619
5620 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
5621 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5622
0ddfd203 5623 /* flush means busy screen hence upclock */
c1d038c6 5624 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
0ddfd203
R
5625 intel_dp_set_drrs_state(dev_priv->dev,
5626 dev_priv->drrs.dp->attached_connector->panel.
5627 fixed_mode->vrefresh);
5628
5629 /*
5630 * flush also means no more activity hence schedule downclock, if all
5631 * other fbs are quiescent too
5632 */
5633 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
5634 schedule_delayed_work(&dev_priv->drrs.work,
5635 msecs_to_jiffies(1000));
5636 mutex_unlock(&dev_priv->drrs.mutex);
5637}
5638
b33a2815
VK
5639/**
5640 * DOC: Display Refresh Rate Switching (DRRS)
5641 *
5642 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5643 * which enables swtching between low and high refresh rates,
5644 * dynamically, based on the usage scenario. This feature is applicable
5645 * for internal panels.
5646 *
5647 * Indication that the panel supports DRRS is given by the panel EDID, which
5648 * would list multiple refresh rates for one resolution.
5649 *
5650 * DRRS is of 2 types - static and seamless.
5651 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5652 * (may appear as a blink on screen) and is used in dock-undock scenario.
5653 * Seamless DRRS involves changing RR without any visual effect to the user
5654 * and can be used during normal system usage. This is done by programming
5655 * certain registers.
5656 *
5657 * Support for static/seamless DRRS may be indicated in the VBT based on
5658 * inputs from the panel spec.
5659 *
5660 * DRRS saves power by switching to low RR based on usage scenarios.
5661 *
5662 * eDP DRRS:-
5663 * The implementation is based on frontbuffer tracking implementation.
5664 * When there is a disturbance on the screen triggered by user activity or a
5665 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5666 * When there is no movement on screen, after a timeout of 1 second, a switch
5667 * to low RR is made.
5668 * For integration with frontbuffer tracking code,
5669 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5670 *
5671 * DRRS can be further extended to support other internal panels and also
5672 * the scenario of video playback wherein RR is set based on the rate
5673 * requested by userspace.
5674 */
5675
5676/**
5677 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5678 * @intel_connector: eDP connector
5679 * @fixed_mode: preferred mode of panel
5680 *
5681 * This function is called only once at driver load to initialize basic
5682 * DRRS stuff.
5683 *
5684 * Returns:
5685 * Downclock mode if panel supports it, else return NULL.
5686 * DRRS support is determined by the presence of downclock mode (apart
5687 * from VBT setting).
5688 */
4f9db5b5 5689static struct drm_display_mode *
96178eeb
VK
5690intel_dp_drrs_init(struct intel_connector *intel_connector,
5691 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5692{
5693 struct drm_connector *connector = &intel_connector->base;
96178eeb 5694 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5695 struct drm_i915_private *dev_priv = dev->dev_private;
5696 struct drm_display_mode *downclock_mode = NULL;
5697
9da7d693
DV
5698 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5699 mutex_init(&dev_priv->drrs.mutex);
5700
4f9db5b5
PB
5701 if (INTEL_INFO(dev)->gen <= 6) {
5702 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5703 return NULL;
5704 }
5705
5706 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5707 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5708 return NULL;
5709 }
5710
5711 downclock_mode = intel_find_panel_downclock
5712 (dev, fixed_mode, connector);
5713
5714 if (!downclock_mode) {
a1d26342 5715 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5716 return NULL;
5717 }
5718
96178eeb 5719 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5720
96178eeb 5721 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5722 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5723 return downclock_mode;
5724}
5725
ed92f0b2 5726static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5727 struct intel_connector *intel_connector)
ed92f0b2
PZ
5728{
5729 struct drm_connector *connector = &intel_connector->base;
5730 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5731 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5732 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5733 struct drm_i915_private *dev_priv = dev->dev_private;
5734 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5735 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5736 bool has_dpcd;
5737 struct drm_display_mode *scan;
5738 struct edid *edid;
6517d273 5739 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5740
5741 if (!is_edp(intel_dp))
5742 return true;
5743
49e6bc51
VS
5744 pps_lock(intel_dp);
5745 intel_edp_panel_vdd_sanitize(intel_dp);
5746 pps_unlock(intel_dp);
63635217 5747
ed92f0b2 5748 /* Cache DPCD and EDID for edp. */
ed92f0b2 5749 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5750
5751 if (has_dpcd) {
5752 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5753 dev_priv->no_aux_handshake =
5754 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5755 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5756 } else {
5757 /* if this fails, presume the device is a ghost */
5758 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5759 return false;
5760 }
5761
5762 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5763 pps_lock(intel_dp);
36b5f425 5764 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5765 pps_unlock(intel_dp);
ed92f0b2 5766
060c8778 5767 mutex_lock(&dev->mode_config.mutex);
0b99836f 5768 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5769 if (edid) {
5770 if (drm_add_edid_modes(connector, edid)) {
5771 drm_mode_connector_update_edid_property(connector,
5772 edid);
5773 drm_edid_to_eld(connector, edid);
5774 } else {
5775 kfree(edid);
5776 edid = ERR_PTR(-EINVAL);
5777 }
5778 } else {
5779 edid = ERR_PTR(-ENOENT);
5780 }
5781 intel_connector->edid = edid;
5782
5783 /* prefer fixed mode from EDID if available */
5784 list_for_each_entry(scan, &connector->probed_modes, head) {
5785 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5786 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5787 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5788 intel_connector, fixed_mode);
ed92f0b2
PZ
5789 break;
5790 }
5791 }
5792
5793 /* fallback to VBT if available for eDP */
5794 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5795 fixed_mode = drm_mode_duplicate(dev,
5796 dev_priv->vbt.lfp_lvds_vbt_mode);
5797 if (fixed_mode)
5798 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5799 }
060c8778 5800 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5801
666a4537 5802 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
01527b31
CT
5803 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5804 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5805
5806 /*
5807 * Figure out the current pipe for the initial backlight setup.
5808 * If the current pipe isn't valid, try the PPS pipe, and if that
5809 * fails just assume pipe A.
5810 */
5811 if (IS_CHERRYVIEW(dev))
5812 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5813 else
5814 pipe = PORT_TO_PIPE(intel_dp->DP);
5815
5816 if (pipe != PIPE_A && pipe != PIPE_B)
5817 pipe = intel_dp->pps_pipe;
5818
5819 if (pipe != PIPE_A && pipe != PIPE_B)
5820 pipe = PIPE_A;
5821
5822 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5823 pipe_name(pipe));
01527b31
CT
5824 }
5825
4f9db5b5 5826 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5507faeb 5827 intel_connector->panel.backlight.power = intel_edp_backlight_power;
6517d273 5828 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5829
5830 return true;
5831}
5832
16c25533 5833bool
f0fec3f2
PZ
5834intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5835 struct intel_connector *intel_connector)
a4fc5ed6 5836{
f0fec3f2
PZ
5837 struct drm_connector *connector = &intel_connector->base;
5838 struct intel_dp *intel_dp = &intel_dig_port->dp;
5839 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5840 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5841 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5842 enum port port = intel_dig_port->port;
a121f4e5 5843 int type, ret;
a4fc5ed6 5844
ccb1a831
VS
5845 if (WARN(intel_dig_port->max_lanes < 1,
5846 "Not enough lanes (%d) for DP on port %c\n",
5847 intel_dig_port->max_lanes, port_name(port)))
5848 return false;
5849
a4a5d2f8
VS
5850 intel_dp->pps_pipe = INVALID_PIPE;
5851
ec5b01dd 5852 /* intel_dp vfuncs */
b6b5e383
DL
5853 if (INTEL_INFO(dev)->gen >= 9)
5854 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
666a4537 5855 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
ec5b01dd
DL
5856 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5857 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5858 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5859 else if (HAS_PCH_SPLIT(dev))
5860 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5861 else
5862 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5863
b9ca5fad
DL
5864 if (INTEL_INFO(dev)->gen >= 9)
5865 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5866 else
5867 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5868
ad64217b
ACO
5869 if (HAS_DDI(dev))
5870 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5871
0767935e
DV
5872 /* Preserve the current hw state. */
5873 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5874 intel_dp->attached_connector = intel_connector;
3d3dc149 5875
3b32a35b 5876 if (intel_dp_is_edp(dev, port))
b329530c 5877 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5878 else
5879 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5880
f7d24902
ID
5881 /*
5882 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5883 * for DP the encoder type can be set by the caller to
5884 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5885 */
5886 if (type == DRM_MODE_CONNECTOR_eDP)
5887 intel_encoder->type = INTEL_OUTPUT_EDP;
5888
c17ed5b5 5889 /* eDP only on port B and/or C on vlv/chv */
666a4537
WB
5890 if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5891 is_edp(intel_dp) && port != PORT_B && port != PORT_C))
c17ed5b5
VS
5892 return false;
5893
e7281eab
ID
5894 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5895 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5896 port_name(port));
5897
b329530c 5898 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5899 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5900
a4fc5ed6
KP
5901 connector->interlace_allowed = true;
5902 connector->doublescan_allowed = 0;
5903
f0fec3f2 5904 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5905 edp_panel_vdd_work);
a4fc5ed6 5906
df0e9248 5907 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5908 drm_connector_register(connector);
a4fc5ed6 5909
affa9354 5910 if (HAS_DDI(dev))
bcbc889b
PZ
5911 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5912 else
5913 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5914 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5915
0b99836f 5916 /* Set up the hotplug pin. */
ab9d7c30
PZ
5917 switch (port) {
5918 case PORT_A:
1d843f9d 5919 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5920 break;
5921 case PORT_B:
1d843f9d 5922 intel_encoder->hpd_pin = HPD_PORT_B;
e87a005d 5923 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
cf1d5883 5924 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5925 break;
5926 case PORT_C:
1d843f9d 5927 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5928 break;
5929 case PORT_D:
1d843f9d 5930 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30 5931 break;
26951caf
XZ
5932 case PORT_E:
5933 intel_encoder->hpd_pin = HPD_PORT_E;
5934 break;
ab9d7c30 5935 default:
ad1c0b19 5936 BUG();
5eb08b69
ZW
5937 }
5938
dada1a9f 5939 if (is_edp(intel_dp)) {
773538e8 5940 pps_lock(intel_dp);
1e74a324 5941 intel_dp_init_panel_power_timestamps(intel_dp);
666a4537 5942 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
a4a5d2f8 5943 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5944 else
36b5f425 5945 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5946 pps_unlock(intel_dp);
dada1a9f 5947 }
0095e6dc 5948
a121f4e5
VS
5949 ret = intel_dp_aux_init(intel_dp, intel_connector);
5950 if (ret)
5951 goto fail;
c1f05264 5952
0e32b39c 5953 /* init MST on ports that can support it */
0c9b3715
JN
5954 if (HAS_DP_MST(dev) &&
5955 (port == PORT_B || port == PORT_C || port == PORT_D))
5956 intel_dp_mst_encoder_init(intel_dig_port,
5957 intel_connector->base.base.id);
0e32b39c 5958
36b5f425 5959 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
a121f4e5
VS
5960 intel_dp_aux_fini(intel_dp);
5961 intel_dp_mst_encoder_cleanup(intel_dig_port);
5962 goto fail;
b2f246a8 5963 }
32f9d658 5964
f684960e
CW
5965 intel_dp_add_properties(intel_dp, connector);
5966
a4fc5ed6
KP
5967 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5968 * 0xd. Failure to do so will result in spurious interrupts being
5969 * generated on the port when a cable is not attached.
5970 */
5971 if (IS_G4X(dev) && !IS_GM45(dev)) {
5972 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5973 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5974 }
16c25533 5975
aa7471d2
JN
5976 i915_debugfs_connector_add(connector);
5977
16c25533 5978 return true;
a121f4e5
VS
5979
5980fail:
5981 if (is_edp(intel_dp)) {
5982 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5983 /*
5984 * vdd might still be enabled do to the delayed vdd off.
5985 * Make sure vdd is actually turned off here.
5986 */
5987 pps_lock(intel_dp);
5988 edp_panel_vdd_off_sync(intel_dp);
5989 pps_unlock(intel_dp);
5990 }
5991 drm_connector_unregister(connector);
5992 drm_connector_cleanup(connector);
5993
5994 return false;
a4fc5ed6 5995}
f0fec3f2
PZ
5996
5997void
f0f59a00
VS
5998intel_dp_init(struct drm_device *dev,
5999 i915_reg_t output_reg, enum port port)
f0fec3f2 6000{
13cf5504 6001 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
6002 struct intel_digital_port *intel_dig_port;
6003 struct intel_encoder *intel_encoder;
6004 struct drm_encoder *encoder;
6005 struct intel_connector *intel_connector;
6006
b14c5679 6007 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
6008 if (!intel_dig_port)
6009 return;
6010
08d9bc92 6011 intel_connector = intel_connector_alloc();
11aee0f6
SM
6012 if (!intel_connector)
6013 goto err_connector_alloc;
f0fec3f2
PZ
6014
6015 intel_encoder = &intel_dig_port->base;
6016 encoder = &intel_encoder->base;
6017
893da0c9 6018 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
ade1ba73 6019 DRM_MODE_ENCODER_TMDS, NULL))
893da0c9 6020 goto err_encoder_init;
f0fec3f2 6021
5bfe2ac0 6022 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 6023 intel_encoder->disable = intel_disable_dp;
00c09d70 6024 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 6025 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 6026 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 6027 if (IS_CHERRYVIEW(dev)) {
9197c88b 6028 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
6029 intel_encoder->pre_enable = chv_pre_enable_dp;
6030 intel_encoder->enable = vlv_enable_dp;
580d3811 6031 intel_encoder->post_disable = chv_post_disable_dp;
d6db995f 6032 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
e4a1d846 6033 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 6034 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
6035 intel_encoder->pre_enable = vlv_pre_enable_dp;
6036 intel_encoder->enable = vlv_enable_dp;
49277c31 6037 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 6038 } else {
ecff4f3b
JN
6039 intel_encoder->pre_enable = g4x_pre_enable_dp;
6040 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
6041 if (INTEL_INFO(dev)->gen >= 5)
6042 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 6043 }
f0fec3f2 6044
174edf1f 6045 intel_dig_port->port = port;
f0fec3f2 6046 intel_dig_port->dp.output_reg = output_reg;
ccb1a831 6047 intel_dig_port->max_lanes = 4;
f0fec3f2 6048
00c09d70 6049 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
6050 if (IS_CHERRYVIEW(dev)) {
6051 if (port == PORT_D)
6052 intel_encoder->crtc_mask = 1 << 2;
6053 else
6054 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6055 } else {
6056 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6057 }
bc079e8b 6058 intel_encoder->cloneable = 0;
f0fec3f2 6059
13cf5504 6060 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5fcece80 6061 dev_priv->hotplug.irq_port[port] = intel_dig_port;
13cf5504 6062
11aee0f6
SM
6063 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6064 goto err_init_connector;
6065
6066 return;
6067
6068err_init_connector:
6069 drm_encoder_cleanup(encoder);
893da0c9 6070err_encoder_init:
11aee0f6
SM
6071 kfree(intel_connector);
6072err_connector_alloc:
6073 kfree(intel_dig_port);
6074
6075 return;
f0fec3f2 6076}
0e32b39c
DA
6077
6078void intel_dp_mst_suspend(struct drm_device *dev)
6079{
6080 struct drm_i915_private *dev_priv = dev->dev_private;
6081 int i;
6082
6083 /* disable MST */
6084 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6085 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6086 if (!intel_dig_port)
6087 continue;
6088
6089 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6090 if (!intel_dig_port->dp.can_mst)
6091 continue;
6092 if (intel_dig_port->dp.is_mst)
6093 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6094 }
6095 }
6096}
6097
6098void intel_dp_mst_resume(struct drm_device *dev)
6099{
6100 struct drm_i915_private *dev_priv = dev->dev_private;
6101 int i;
6102
6103 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6104 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6105 if (!intel_dig_port)
6106 continue;
6107 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6108 int ret;
6109
6110 if (!intel_dig_port->dp.can_mst)
6111 continue;
6112
6113 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6114 if (ret != 0) {
6115 intel_dp_check_mst_status(&intel_dig_port->dp);
6116 }
6117 }
6118 }
6119}
This page took 1.128681 seconds and 5 git commands to generate.