drm/i915: Add missing 'else' to intel_digital_port_connected()
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf 50struct dp_link_dpll {
840b32b7 51 int clock;
9dd4ffdf
CML
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
840b32b7 56 { 162000,
9dd4ffdf 57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
840b32b7 58 { 270000,
9dd4ffdf
CML
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
840b32b7 63 { 162000,
9dd4ffdf 64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
840b32b7 65 { 270000,
9dd4ffdf
CML
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5 69static const struct dp_link_dpll vlv_dpll[] = {
840b32b7 70 { 162000,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
840b32b7 72 { 270000,
65ce4bf5
CML
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
840b32b7 86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
ef9348c8 87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
840b32b7 88 { 270000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8 89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
840b32b7 90 { 540000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8
CML
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63 93
64987fc5
SJ
94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
637a9c63 96static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15
VS
97 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 99
cfcb0fc9
JB
100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
da63a9f2
PZ
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
112}
113
68b4d824 114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 115{
68b4d824
ID
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
119}
120
df0e9248
CW
121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
fa90ecef 123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
124}
125
ea5b213a 126static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
a4fc5ed6 132
e0fce78f
VS
133static unsigned int intel_dp_unused_lane_mask(int lane_count)
134{
135 return ~((1 << lane_count) - 1) & 0xf;
136}
137
ed4e9c1d
VS
138static int
139intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 140{
7183dc29 141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
1db10e28 146 case DP_LINK_BW_5_4:
d4eead50 147 break;
a4fc5ed6 148 default:
d4eead50
ID
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
a4fc5ed6
KP
151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155}
156
eeb6324d
PZ
157static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158{
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
eeb6324d
PZ
160 u8 source_max, sink_max;
161
ccb1a831 162 source_max = intel_dig_port->max_lanes;
eeb6324d
PZ
163 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
164
165 return min(source_max, sink_max);
166}
167
cd9dde44
AJ
168/*
169 * The units on the numbers in the next two are... bizarre. Examples will
170 * make it clearer; this one parallels an example in the eDP spec.
171 *
172 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
173 *
174 * 270000 * 1 * 8 / 10 == 216000
175 *
176 * The actual data capacity of that configuration is 2.16Gbit/s, so the
177 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
178 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
179 * 119000. At 18bpp that's 2142000 kilobits per second.
180 *
181 * Thus the strange-looking division by 10 in intel_dp_link_required, to
182 * get the result in decakilobits instead of kilobits.
183 */
184
a4fc5ed6 185static int
c898261c 186intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 187{
cd9dde44 188 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
189}
190
fe27d53e
DA
191static int
192intel_dp_max_data_rate(int max_link_clock, int max_lanes)
193{
194 return (max_link_clock * max_lanes * 8) / 10;
195}
196
c19de8eb 197static enum drm_mode_status
a4fc5ed6
KP
198intel_dp_mode_valid(struct drm_connector *connector,
199 struct drm_display_mode *mode)
200{
df0e9248 201 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
202 struct intel_connector *intel_connector = to_intel_connector(connector);
203 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
204 int target_clock = mode->clock;
205 int max_rate, mode_rate, max_lanes, max_link_clock;
799487f5 206 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
a4fc5ed6 207
dd06f90e
JN
208 if (is_edp(intel_dp) && fixed_mode) {
209 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
210 return MODE_PANEL;
211
dd06f90e 212 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 213 return MODE_PANEL;
03afc4a2
DV
214
215 target_clock = fixed_mode->clock;
7de56f43
ZY
216 }
217
50fec21a 218 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 219 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
220
221 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
222 mode_rate = intel_dp_link_required(target_clock, 18);
223
799487f5 224 if (mode_rate > max_rate || target_clock > max_dotclk)
c4867936 225 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
226
227 if (mode->clock < 10000)
228 return MODE_CLOCK_LOW;
229
0af78a2b
DV
230 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
231 return MODE_H_ILLEGAL;
232
a4fc5ed6
KP
233 return MODE_OK;
234}
235
a4f1289e 236uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
237{
238 int i;
239 uint32_t v = 0;
240
241 if (src_bytes > 4)
242 src_bytes = 4;
243 for (i = 0; i < src_bytes; i++)
244 v |= ((uint32_t) src[i]) << ((3-i) * 8);
245 return v;
246}
247
c2af70e2 248static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
249{
250 int i;
251 if (dst_bytes > 4)
252 dst_bytes = 4;
253 for (i = 0; i < dst_bytes; i++)
254 dst[i] = src >> ((3-i) * 8);
255}
256
bf13e81b
JN
257static void
258intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 259 struct intel_dp *intel_dp);
bf13e81b
JN
260static void
261intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 262 struct intel_dp *intel_dp);
bf13e81b 263
773538e8
VS
264static void pps_lock(struct intel_dp *intel_dp)
265{
266 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
267 struct intel_encoder *encoder = &intel_dig_port->base;
268 struct drm_device *dev = encoder->base.dev;
269 struct drm_i915_private *dev_priv = dev->dev_private;
270 enum intel_display_power_domain power_domain;
271
272 /*
273 * See vlv_power_sequencer_reset() why we need
274 * a power domain reference here.
275 */
25f78f58 276 power_domain = intel_display_port_aux_power_domain(encoder);
773538e8
VS
277 intel_display_power_get(dev_priv, power_domain);
278
279 mutex_lock(&dev_priv->pps_mutex);
280}
281
282static void pps_unlock(struct intel_dp *intel_dp)
283{
284 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
285 struct intel_encoder *encoder = &intel_dig_port->base;
286 struct drm_device *dev = encoder->base.dev;
287 struct drm_i915_private *dev_priv = dev->dev_private;
288 enum intel_display_power_domain power_domain;
289
290 mutex_unlock(&dev_priv->pps_mutex);
291
25f78f58 292 power_domain = intel_display_port_aux_power_domain(encoder);
773538e8
VS
293 intel_display_power_put(dev_priv, power_domain);
294}
295
961a0db0
VS
296static void
297vlv_power_sequencer_kick(struct intel_dp *intel_dp)
298{
299 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
300 struct drm_device *dev = intel_dig_port->base.base.dev;
301 struct drm_i915_private *dev_priv = dev->dev_private;
302 enum pipe pipe = intel_dp->pps_pipe;
0047eedc
VS
303 bool pll_enabled, release_cl_override = false;
304 enum dpio_phy phy = DPIO_PHY(pipe);
305 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
961a0db0
VS
306 uint32_t DP;
307
308 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
309 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
310 pipe_name(pipe), port_name(intel_dig_port->port)))
311 return;
312
313 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
314 pipe_name(pipe), port_name(intel_dig_port->port));
315
316 /* Preserve the BIOS-computed detected bit. This is
317 * supposed to be read-only.
318 */
319 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
320 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
321 DP |= DP_PORT_WIDTH(1);
322 DP |= DP_LINK_TRAIN_PAT_1;
323
324 if (IS_CHERRYVIEW(dev))
325 DP |= DP_PIPE_SELECT_CHV(pipe);
326 else if (pipe == PIPE_B)
327 DP |= DP_PIPEB_SELECT;
328
d288f65f
VS
329 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
330
331 /*
332 * The DPLL for the pipe must be enabled for this to work.
333 * So enable temporarily it if it's not already enabled.
334 */
0047eedc
VS
335 if (!pll_enabled) {
336 release_cl_override = IS_CHERRYVIEW(dev) &&
337 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
338
3f36b937
TU
339 if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
340 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
341 DRM_ERROR("Failed to force on pll for pipe %c!\n",
342 pipe_name(pipe));
343 return;
344 }
0047eedc 345 }
d288f65f 346
961a0db0
VS
347 /*
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
352 */
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
355
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
358
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
d288f65f 361
0047eedc 362 if (!pll_enabled) {
d288f65f 363 vlv_force_pll_off(dev, pipe);
0047eedc
VS
364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
961a0db0
VS
368}
369
bf13e81b
JN
370static enum pipe
371vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372{
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 378 enum pipe pipe;
bf13e81b 379
e39b999a 380 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 381
a8c3344e
VS
382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
384
a4a5d2f8
VS
385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
387
388 /*
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
391 */
19c8054c 392 for_each_intel_encoder(dev, encoder) {
a4a5d2f8
VS
393 struct intel_dp *tmp;
394
395 if (encoder->type != INTEL_OUTPUT_EDP)
396 continue;
397
398 tmp = enc_to_intel_dp(&encoder->base);
399
400 if (tmp->pps_pipe != INVALID_PIPE)
401 pipes &= ~(1 << tmp->pps_pipe);
402 }
403
404 /*
405 * Didn't find one. This should not happen since there
406 * are two power sequencers and up to two eDP ports.
407 */
408 if (WARN_ON(pipes == 0))
a8c3344e
VS
409 pipe = PIPE_A;
410 else
411 pipe = ffs(pipes) - 1;
a4a5d2f8 412
a8c3344e
VS
413 vlv_steal_power_sequencer(dev, pipe);
414 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
415
416 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
417 pipe_name(intel_dp->pps_pipe),
418 port_name(intel_dig_port->port));
419
420 /* init power sequencer on this pipe and port */
36b5f425
VS
421 intel_dp_init_panel_power_sequencer(dev, intel_dp);
422 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 423
961a0db0
VS
424 /*
425 * Even vdd force doesn't work until we've made
426 * the power sequencer lock in on the port.
427 */
428 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
429
430 return intel_dp->pps_pipe;
431}
432
6491ab27
VS
433typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
434 enum pipe pipe);
435
436static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
437 enum pipe pipe)
438{
439 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
440}
441
442static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
443 enum pipe pipe)
444{
445 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
446}
447
448static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
449 enum pipe pipe)
450{
451 return true;
452}
bf13e81b 453
a4a5d2f8 454static enum pipe
6491ab27
VS
455vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
456 enum port port,
457 vlv_pipe_check pipe_check)
a4a5d2f8
VS
458{
459 enum pipe pipe;
bf13e81b 460
bf13e81b
JN
461 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
462 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
463 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
464
465 if (port_sel != PANEL_PORT_SELECT_VLV(port))
466 continue;
467
6491ab27
VS
468 if (!pipe_check(dev_priv, pipe))
469 continue;
470
a4a5d2f8 471 return pipe;
bf13e81b
JN
472 }
473
a4a5d2f8
VS
474 return INVALID_PIPE;
475}
476
477static void
478vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
479{
480 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
481 struct drm_device *dev = intel_dig_port->base.base.dev;
482 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
483 enum port port = intel_dig_port->port;
484
485 lockdep_assert_held(&dev_priv->pps_mutex);
486
487 /* try to find a pipe with this port selected */
6491ab27
VS
488 /* first pick one where the panel is on */
489 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
490 vlv_pipe_has_pp_on);
491 /* didn't find one? pick one where vdd is on */
492 if (intel_dp->pps_pipe == INVALID_PIPE)
493 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
494 vlv_pipe_has_vdd_on);
495 /* didn't find one? pick one with just the correct port */
496 if (intel_dp->pps_pipe == INVALID_PIPE)
497 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
498 vlv_pipe_any);
a4a5d2f8
VS
499
500 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
501 if (intel_dp->pps_pipe == INVALID_PIPE) {
502 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
503 port_name(port));
504 return;
bf13e81b
JN
505 }
506
a4a5d2f8
VS
507 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
508 port_name(port), pipe_name(intel_dp->pps_pipe));
509
36b5f425
VS
510 intel_dp_init_panel_power_sequencer(dev, intel_dp);
511 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
512}
513
773538e8
VS
514void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
515{
516 struct drm_device *dev = dev_priv->dev;
517 struct intel_encoder *encoder;
518
666a4537 519 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
773538e8
VS
520 return;
521
522 /*
523 * We can't grab pps_mutex here due to deadlock with power_domain
524 * mutex when power_domain functions are called while holding pps_mutex.
525 * That also means that in order to use pps_pipe the code needs to
526 * hold both a power domain reference and pps_mutex, and the power domain
527 * reference get/put must be done while _not_ holding pps_mutex.
528 * pps_{lock,unlock}() do these steps in the correct order, so one
529 * should use them always.
530 */
531
19c8054c 532 for_each_intel_encoder(dev, encoder) {
773538e8
VS
533 struct intel_dp *intel_dp;
534
535 if (encoder->type != INTEL_OUTPUT_EDP)
536 continue;
537
538 intel_dp = enc_to_intel_dp(&encoder->base);
539 intel_dp->pps_pipe = INVALID_PIPE;
540 }
bf13e81b
JN
541}
542
f0f59a00
VS
543static i915_reg_t
544_pp_ctrl_reg(struct intel_dp *intel_dp)
bf13e81b
JN
545{
546 struct drm_device *dev = intel_dp_to_dev(intel_dp);
547
b0a08bec
VK
548 if (IS_BROXTON(dev))
549 return BXT_PP_CONTROL(0);
550 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
551 return PCH_PP_CONTROL;
552 else
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
554}
555
f0f59a00
VS
556static i915_reg_t
557_pp_stat_reg(struct intel_dp *intel_dp)
bf13e81b
JN
558{
559 struct drm_device *dev = intel_dp_to_dev(intel_dp);
560
b0a08bec
VK
561 if (IS_BROXTON(dev))
562 return BXT_PP_STATUS(0);
563 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
564 return PCH_PP_STATUS;
565 else
566 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
567}
568
01527b31
CT
569/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
570 This function only applicable when panel PM state is not to be tracked */
571static int edp_notify_handler(struct notifier_block *this, unsigned long code,
572 void *unused)
573{
574 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
575 edp_notifier);
576 struct drm_device *dev = intel_dp_to_dev(intel_dp);
577 struct drm_i915_private *dev_priv = dev->dev_private;
01527b31
CT
578
579 if (!is_edp(intel_dp) || code != SYS_RESTART)
580 return 0;
581
773538e8 582 pps_lock(intel_dp);
e39b999a 583
666a4537 584 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e39b999a 585 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
f0f59a00 586 i915_reg_t pp_ctrl_reg, pp_div_reg;
649636ef 587 u32 pp_div;
e39b999a 588
01527b31
CT
589 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
590 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
591 pp_div = I915_READ(pp_div_reg);
592 pp_div &= PP_REFERENCE_DIVIDER_MASK;
593
594 /* 0x1F write to PP_DIV_REG sets max cycle delay */
595 I915_WRITE(pp_div_reg, pp_div | 0x1F);
596 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
597 msleep(intel_dp->panel_power_cycle_delay);
598 }
599
773538e8 600 pps_unlock(intel_dp);
e39b999a 601
01527b31
CT
602 return 0;
603}
604
4be73780 605static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 606{
30add22d 607 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
608 struct drm_i915_private *dev_priv = dev->dev_private;
609
e39b999a
VS
610 lockdep_assert_held(&dev_priv->pps_mutex);
611
666a4537 612 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
9a42356b
VS
613 intel_dp->pps_pipe == INVALID_PIPE)
614 return false;
615
bf13e81b 616 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
617}
618
4be73780 619static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 620{
30add22d 621 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
622 struct drm_i915_private *dev_priv = dev->dev_private;
623
e39b999a
VS
624 lockdep_assert_held(&dev_priv->pps_mutex);
625
666a4537 626 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
9a42356b
VS
627 intel_dp->pps_pipe == INVALID_PIPE)
628 return false;
629
773538e8 630 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
631}
632
9b984dae
KP
633static void
634intel_dp_check_edp(struct intel_dp *intel_dp)
635{
30add22d 636 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 637 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 638
9b984dae
KP
639 if (!is_edp(intel_dp))
640 return;
453c5420 641
4be73780 642 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
643 WARN(1, "eDP powered off while attempting aux channel communication.\n");
644 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
645 I915_READ(_pp_stat_reg(intel_dp)),
646 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
647 }
648}
649
9ee32fea
DV
650static uint32_t
651intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
652{
653 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
654 struct drm_device *dev = intel_dig_port->base.base.dev;
655 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 656 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
657 uint32_t status;
658 bool done;
659
ef04f00d 660#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 661 if (has_aux_irq)
b18ac466 662 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 663 msecs_to_jiffies_timeout(10));
9ee32fea
DV
664 else
665 done = wait_for_atomic(C, 10) == 0;
666 if (!done)
667 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
668 has_aux_irq);
669#undef C
670
671 return status;
672}
673
ec5b01dd 674static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 675{
174edf1f
PZ
676 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
677 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 678
ec5b01dd
DL
679 /*
680 * The clock divider is based off the hrawclk, and would like to run at
681 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 682 */
fce18c4c 683 return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
ec5b01dd
DL
684}
685
686static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
687{
688 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
689 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 690 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
691
692 if (index)
693 return 0;
694
695 if (intel_dig_port->port == PORT_A) {
fce18c4c 696 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
05024da3 697
ec5b01dd 698 } else {
fce18c4c 699 return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
ec5b01dd
DL
700 }
701}
702
703static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
704{
705 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
706 struct drm_device *dev = intel_dig_port->base.base.dev;
707 struct drm_i915_private *dev_priv = dev->dev_private;
708
709 if (intel_dig_port->port == PORT_A) {
710 if (index)
711 return 0;
05024da3 712 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
56f5f700 713 } else if (HAS_PCH_LPT_H(dev_priv)) {
2c55c336 714 /* Workaround for non-ULT HSW */
bc86625a
CW
715 switch (index) {
716 case 0: return 63;
717 case 1: return 72;
718 default: return 0;
719 }
ec5b01dd 720 } else {
fce18c4c 721 return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
2c55c336 722 }
b84a1cf8
RV
723}
724
ec5b01dd
DL
725static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
726{
727 return index ? 0 : 100;
728}
729
b6b5e383
DL
730static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
731{
732 /*
733 * SKL doesn't need us to program the AUX clock divider (Hardware will
734 * derive the clock from CDCLK automatically). We still implement the
735 * get_aux_clock_divider vfunc to plug-in into the existing code.
736 */
737 return index ? 0 : 1;
738}
739
5ed12a19
DL
740static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
741 bool has_aux_irq,
742 int send_bytes,
743 uint32_t aux_clock_divider)
744{
745 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
746 struct drm_device *dev = intel_dig_port->base.base.dev;
747 uint32_t precharge, timeout;
748
749 if (IS_GEN6(dev))
750 precharge = 3;
751 else
752 precharge = 5;
753
f3c6a3a7 754 if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
5ed12a19
DL
755 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
756 else
757 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
758
759 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 760 DP_AUX_CH_CTL_DONE |
5ed12a19 761 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 762 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 763 timeout |
788d4433 764 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
765 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
766 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 767 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
768}
769
b9ca5fad
DL
770static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
771 bool has_aux_irq,
772 int send_bytes,
773 uint32_t unused)
774{
775 return DP_AUX_CH_CTL_SEND_BUSY |
776 DP_AUX_CH_CTL_DONE |
777 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
778 DP_AUX_CH_CTL_TIME_OUT_ERROR |
779 DP_AUX_CH_CTL_TIME_OUT_1600us |
780 DP_AUX_CH_CTL_RECEIVE_ERROR |
781 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
782 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
783}
784
b84a1cf8
RV
785static int
786intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 787 const uint8_t *send, int send_bytes,
b84a1cf8
RV
788 uint8_t *recv, int recv_size)
789{
790 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
791 struct drm_device *dev = intel_dig_port->base.base.dev;
792 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 793 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
bc86625a 794 uint32_t aux_clock_divider;
b84a1cf8
RV
795 int i, ret, recv_bytes;
796 uint32_t status;
5ed12a19 797 int try, clock = 0;
4e6b788c 798 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
799 bool vdd;
800
773538e8 801 pps_lock(intel_dp);
e39b999a 802
72c3500a
VS
803 /*
804 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805 * In such cases we want to leave VDD enabled and it's up to upper layers
806 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
807 * ourselves.
808 */
1e0560e0 809 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
810
811 /* dp aux is extremely sensitive to irq latency, hence request the
812 * lowest possible wakeup latency and so prevent the cpu from going into
813 * deep sleep states.
814 */
815 pm_qos_update_request(&dev_priv->pm_qos, 0);
816
817 intel_dp_check_edp(intel_dp);
5eb08b69 818
11bee43e
JB
819 /* Try to wait for any previous AUX channel activity */
820 for (try = 0; try < 3; try++) {
ef04f00d 821 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
822 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
823 break;
824 msleep(1);
825 }
826
827 if (try == 3) {
02196c77
MK
828 static u32 last_status = -1;
829 const u32 status = I915_READ(ch_ctl);
830
831 if (status != last_status) {
832 WARN(1, "dp_aux_ch not started status 0x%08x\n",
833 status);
834 last_status = status;
835 }
836
9ee32fea
DV
837 ret = -EBUSY;
838 goto out;
4f7f7b7e
CW
839 }
840
46a5ae9f
PZ
841 /* Only 5 data registers! */
842 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
843 ret = -E2BIG;
844 goto out;
845 }
846
ec5b01dd 847 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
848 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
849 has_aux_irq,
850 send_bytes,
851 aux_clock_divider);
5ed12a19 852
bc86625a
CW
853 /* Must try at least 3 times according to DP spec */
854 for (try = 0; try < 5; try++) {
855 /* Load the send data into the aux channel data registers */
856 for (i = 0; i < send_bytes; i += 4)
330e20ec 857 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
a4f1289e
RV
858 intel_dp_pack_aux(send + i,
859 send_bytes - i));
bc86625a
CW
860
861 /* Send the command and wait for it to complete */
5ed12a19 862 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
863
864 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
865
866 /* Clear done status and any errors */
867 I915_WRITE(ch_ctl,
868 status |
869 DP_AUX_CH_CTL_DONE |
870 DP_AUX_CH_CTL_TIME_OUT_ERROR |
871 DP_AUX_CH_CTL_RECEIVE_ERROR);
872
74ebf294 873 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 874 continue;
74ebf294
TP
875
876 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
877 * 400us delay required for errors and timeouts
878 * Timeout errors from the HW already meet this
879 * requirement so skip to next iteration
880 */
881 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
882 usleep_range(400, 500);
bc86625a 883 continue;
74ebf294 884 }
bc86625a 885 if (status & DP_AUX_CH_CTL_DONE)
e058c945 886 goto done;
bc86625a 887 }
a4fc5ed6
KP
888 }
889
a4fc5ed6 890 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 891 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
892 ret = -EBUSY;
893 goto out;
a4fc5ed6
KP
894 }
895
e058c945 896done:
a4fc5ed6
KP
897 /* Check for timeout or receive error.
898 * Timeouts occur when the sink is not connected
899 */
a5b3da54 900 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 901 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
902 ret = -EIO;
903 goto out;
a5b3da54 904 }
1ae8c0a5
KP
905
906 /* Timeouts occur when the device isn't connected, so they're
907 * "normal" -- don't fill the kernel log with these */
a5b3da54 908 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 909 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
910 ret = -ETIMEDOUT;
911 goto out;
a4fc5ed6
KP
912 }
913
914 /* Unload any bytes sent back from the other side */
915 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
916 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
14e01889
RV
917
918 /*
919 * By BSpec: "Message sizes of 0 or >20 are not allowed."
920 * We have no idea of what happened so we return -EBUSY so
921 * drm layer takes care for the necessary retries.
922 */
923 if (recv_bytes == 0 || recv_bytes > 20) {
924 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
925 recv_bytes);
926 /*
927 * FIXME: This patch was created on top of a series that
928 * organize the retries at drm level. There EBUSY should
929 * also take care for 1ms wait before retrying.
930 * That aux retries re-org is still needed and after that is
931 * merged we remove this sleep from here.
932 */
933 usleep_range(1000, 1500);
934 ret = -EBUSY;
935 goto out;
936 }
937
a4fc5ed6
KP
938 if (recv_bytes > recv_size)
939 recv_bytes = recv_size;
0206e353 940
4f7f7b7e 941 for (i = 0; i < recv_bytes; i += 4)
330e20ec 942 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
a4f1289e 943 recv + i, recv_bytes - i);
a4fc5ed6 944
9ee32fea
DV
945 ret = recv_bytes;
946out:
947 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
948
884f19e9
JN
949 if (vdd)
950 edp_panel_vdd_off(intel_dp, false);
951
773538e8 952 pps_unlock(intel_dp);
e39b999a 953
9ee32fea 954 return ret;
a4fc5ed6
KP
955}
956
a6c8aff0
JN
957#define BARE_ADDRESS_SIZE 3
958#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
959static ssize_t
960intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 961{
9d1a1031
JN
962 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
963 uint8_t txbuf[20], rxbuf[20];
964 size_t txsize, rxsize;
a4fc5ed6 965 int ret;
a4fc5ed6 966
d2d9cbbd
VS
967 txbuf[0] = (msg->request << 4) |
968 ((msg->address >> 16) & 0xf);
969 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
970 txbuf[2] = msg->address & 0xff;
971 txbuf[3] = msg->size - 1;
46a5ae9f 972
9d1a1031
JN
973 switch (msg->request & ~DP_AUX_I2C_MOT) {
974 case DP_AUX_NATIVE_WRITE:
975 case DP_AUX_I2C_WRITE:
c1e74122 976 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
a6c8aff0 977 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 978 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 979
9d1a1031
JN
980 if (WARN_ON(txsize > 20))
981 return -E2BIG;
a4fc5ed6 982
d81a67cc
ID
983 if (msg->buffer)
984 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
985 else
986 WARN_ON(msg->size);
a4fc5ed6 987
9d1a1031
JN
988 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
989 if (ret > 0) {
990 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 991
a1ddefd8
JN
992 if (ret > 1) {
993 /* Number of bytes written in a short write. */
994 ret = clamp_t(int, rxbuf[1], 0, msg->size);
995 } else {
996 /* Return payload size. */
997 ret = msg->size;
998 }
9d1a1031
JN
999 }
1000 break;
46a5ae9f 1001
9d1a1031
JN
1002 case DP_AUX_NATIVE_READ:
1003 case DP_AUX_I2C_READ:
a6c8aff0 1004 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 1005 rxsize = msg->size + 1;
a4fc5ed6 1006
9d1a1031
JN
1007 if (WARN_ON(rxsize > 20))
1008 return -E2BIG;
a4fc5ed6 1009
9d1a1031
JN
1010 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1011 if (ret > 0) {
1012 msg->reply = rxbuf[0] >> 4;
1013 /*
1014 * Assume happy day, and copy the data. The caller is
1015 * expected to check msg->reply before touching it.
1016 *
1017 * Return payload size.
1018 */
1019 ret--;
1020 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1021 }
9d1a1031
JN
1022 break;
1023
1024 default:
1025 ret = -EINVAL;
1026 break;
a4fc5ed6 1027 }
f51a44b9 1028
9d1a1031 1029 return ret;
a4fc5ed6
KP
1030}
1031
f0f59a00
VS
1032static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1033 enum port port)
da00bdcf
VS
1034{
1035 switch (port) {
1036 case PORT_B:
1037 case PORT_C:
1038 case PORT_D:
1039 return DP_AUX_CH_CTL(port);
1040 default:
1041 MISSING_CASE(port);
1042 return DP_AUX_CH_CTL(PORT_B);
1043 }
1044}
1045
f0f59a00
VS
1046static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1047 enum port port, int index)
330e20ec
VS
1048{
1049 switch (port) {
1050 case PORT_B:
1051 case PORT_C:
1052 case PORT_D:
1053 return DP_AUX_CH_DATA(port, index);
1054 default:
1055 MISSING_CASE(port);
1056 return DP_AUX_CH_DATA(PORT_B, index);
1057 }
1058}
1059
f0f59a00
VS
1060static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1061 enum port port)
da00bdcf
VS
1062{
1063 switch (port) {
1064 case PORT_A:
1065 return DP_AUX_CH_CTL(port);
1066 case PORT_B:
1067 case PORT_C:
1068 case PORT_D:
1069 return PCH_DP_AUX_CH_CTL(port);
1070 default:
1071 MISSING_CASE(port);
1072 return DP_AUX_CH_CTL(PORT_A);
1073 }
1074}
1075
f0f59a00
VS
1076static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1077 enum port port, int index)
330e20ec
VS
1078{
1079 switch (port) {
1080 case PORT_A:
1081 return DP_AUX_CH_DATA(port, index);
1082 case PORT_B:
1083 case PORT_C:
1084 case PORT_D:
1085 return PCH_DP_AUX_CH_DATA(port, index);
1086 default:
1087 MISSING_CASE(port);
1088 return DP_AUX_CH_DATA(PORT_A, index);
1089 }
1090}
1091
da00bdcf
VS
1092/*
1093 * On SKL we don't have Aux for port E so we rely
1094 * on VBT to set a proper alternate aux channel.
1095 */
1096static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1097{
1098 const struct ddi_vbt_port_info *info =
1099 &dev_priv->vbt.ddi_port_info[PORT_E];
1100
1101 switch (info->alternate_aux_channel) {
1102 case DP_AUX_A:
1103 return PORT_A;
1104 case DP_AUX_B:
1105 return PORT_B;
1106 case DP_AUX_C:
1107 return PORT_C;
1108 case DP_AUX_D:
1109 return PORT_D;
1110 default:
1111 MISSING_CASE(info->alternate_aux_channel);
1112 return PORT_A;
1113 }
1114}
1115
f0f59a00
VS
1116static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1117 enum port port)
da00bdcf
VS
1118{
1119 if (port == PORT_E)
1120 port = skl_porte_aux_port(dev_priv);
1121
1122 switch (port) {
1123 case PORT_A:
1124 case PORT_B:
1125 case PORT_C:
1126 case PORT_D:
1127 return DP_AUX_CH_CTL(port);
1128 default:
1129 MISSING_CASE(port);
1130 return DP_AUX_CH_CTL(PORT_A);
1131 }
1132}
1133
f0f59a00
VS
1134static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1135 enum port port, int index)
330e20ec
VS
1136{
1137 if (port == PORT_E)
1138 port = skl_porte_aux_port(dev_priv);
1139
1140 switch (port) {
1141 case PORT_A:
1142 case PORT_B:
1143 case PORT_C:
1144 case PORT_D:
1145 return DP_AUX_CH_DATA(port, index);
1146 default:
1147 MISSING_CASE(port);
1148 return DP_AUX_CH_DATA(PORT_A, index);
1149 }
1150}
1151
f0f59a00
VS
1152static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1153 enum port port)
330e20ec
VS
1154{
1155 if (INTEL_INFO(dev_priv)->gen >= 9)
1156 return skl_aux_ctl_reg(dev_priv, port);
1157 else if (HAS_PCH_SPLIT(dev_priv))
1158 return ilk_aux_ctl_reg(dev_priv, port);
1159 else
1160 return g4x_aux_ctl_reg(dev_priv, port);
1161}
1162
f0f59a00
VS
1163static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1164 enum port port, int index)
330e20ec
VS
1165{
1166 if (INTEL_INFO(dev_priv)->gen >= 9)
1167 return skl_aux_data_reg(dev_priv, port, index);
1168 else if (HAS_PCH_SPLIT(dev_priv))
1169 return ilk_aux_data_reg(dev_priv, port, index);
1170 else
1171 return g4x_aux_data_reg(dev_priv, port, index);
1172}
1173
1174static void intel_aux_reg_init(struct intel_dp *intel_dp)
1175{
1176 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1177 enum port port = dp_to_dig_port(intel_dp)->port;
1178 int i;
1179
1180 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1181 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1182 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1183}
1184
9d1a1031 1185static void
a121f4e5
VS
1186intel_dp_aux_fini(struct intel_dp *intel_dp)
1187{
1188 drm_dp_aux_unregister(&intel_dp->aux);
1189 kfree(intel_dp->aux.name);
1190}
1191
1192static int
9d1a1031
JN
1193intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1194{
1195 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1196 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1197 enum port port = intel_dig_port->port;
ab2c0672
DA
1198 int ret;
1199
330e20ec 1200 intel_aux_reg_init(intel_dp);
8316f337 1201
a121f4e5
VS
1202 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1203 if (!intel_dp->aux.name)
1204 return -ENOMEM;
1205
9d1a1031
JN
1206 intel_dp->aux.dev = dev->dev;
1207 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1208
a121f4e5
VS
1209 DRM_DEBUG_KMS("registering %s bus for %s\n",
1210 intel_dp->aux.name,
0b99836f 1211 connector->base.kdev->kobj.name);
8316f337 1212
4f71d0cb 1213 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1214 if (ret < 0) {
4f71d0cb 1215 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
a121f4e5
VS
1216 intel_dp->aux.name, ret);
1217 kfree(intel_dp->aux.name);
1218 return ret;
ab2c0672 1219 }
8a5e6aeb 1220
0b99836f
JN
1221 ret = sysfs_create_link(&connector->base.kdev->kobj,
1222 &intel_dp->aux.ddc.dev.kobj,
1223 intel_dp->aux.ddc.dev.kobj.name);
1224 if (ret < 0) {
a121f4e5
VS
1225 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1226 intel_dp->aux.name, ret);
1227 intel_dp_aux_fini(intel_dp);
1228 return ret;
ab2c0672 1229 }
a121f4e5
VS
1230
1231 return 0;
a4fc5ed6
KP
1232}
1233
80f65de3
ID
1234static void
1235intel_dp_connector_unregister(struct intel_connector *intel_connector)
1236{
1237 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1238
0e32b39c
DA
1239 if (!intel_connector->mst_port)
1240 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1241 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1242 intel_connector_unregister(intel_connector);
1243}
1244
5416d871 1245static void
840b32b7 1246skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
5416d871
DL
1247{
1248 u32 ctrl1;
1249
dd3cd74a
ACO
1250 memset(&pipe_config->dpll_hw_state, 0,
1251 sizeof(pipe_config->dpll_hw_state));
1252
5416d871
DL
1253 pipe_config->ddi_pll_sel = SKL_DPLL0;
1254 pipe_config->dpll_hw_state.cfgcr1 = 0;
1255 pipe_config->dpll_hw_state.cfgcr2 = 0;
1256
1257 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
840b32b7 1258 switch (pipe_config->port_clock / 2) {
c3346ef6 1259 case 81000:
71cd8423 1260 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1261 SKL_DPLL0);
1262 break;
c3346ef6 1263 case 135000:
71cd8423 1264 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1265 SKL_DPLL0);
1266 break;
c3346ef6 1267 case 270000:
71cd8423 1268 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1269 SKL_DPLL0);
1270 break;
c3346ef6 1271 case 162000:
71cd8423 1272 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1273 SKL_DPLL0);
1274 break;
1275 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1276 results in CDCLK change. Need to handle the change of CDCLK by
1277 disabling pipes and re-enabling them */
1278 case 108000:
71cd8423 1279 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1280 SKL_DPLL0);
1281 break;
1282 case 216000:
71cd8423 1283 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1284 SKL_DPLL0);
1285 break;
1286
5416d871
DL
1287 }
1288 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1289}
1290
6fa2d197 1291void
840b32b7 1292hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
0e50338c 1293{
ee46f3c7
ACO
1294 memset(&pipe_config->dpll_hw_state, 0,
1295 sizeof(pipe_config->dpll_hw_state));
1296
840b32b7
VS
1297 switch (pipe_config->port_clock / 2) {
1298 case 81000:
0e50338c
DV
1299 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1300 break;
840b32b7 1301 case 135000:
0e50338c
DV
1302 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1303 break;
840b32b7 1304 case 270000:
0e50338c
DV
1305 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1306 break;
1307 }
1308}
1309
fc0f8e25 1310static int
12f6a2e2 1311intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1312{
94ca719e
VS
1313 if (intel_dp->num_sink_rates) {
1314 *sink_rates = intel_dp->sink_rates;
1315 return intel_dp->num_sink_rates;
fc0f8e25 1316 }
12f6a2e2
VS
1317
1318 *sink_rates = default_rates;
1319
1320 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1321}
1322
e588fa18 1323bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
ed63baaf 1324{
e588fa18
ACO
1325 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1326 struct drm_device *dev = dig_port->base.base.dev;
1327
ed63baaf 1328 /* WaDisableHBR2:skl */
e87a005d 1329 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
ed63baaf
TS
1330 return false;
1331
1332 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1333 (INTEL_INFO(dev)->gen >= 9))
1334 return true;
1335 else
1336 return false;
1337}
1338
a8f3ef61 1339static int
e588fa18 1340intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
a8f3ef61 1341{
e588fa18
ACO
1342 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1343 struct drm_device *dev = dig_port->base.base.dev;
af7080f5
TS
1344 int size;
1345
64987fc5
SJ
1346 if (IS_BROXTON(dev)) {
1347 *source_rates = bxt_rates;
af7080f5 1348 size = ARRAY_SIZE(bxt_rates);
ef11bdb3 1349 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
637a9c63 1350 *source_rates = skl_rates;
af7080f5
TS
1351 size = ARRAY_SIZE(skl_rates);
1352 } else {
1353 *source_rates = default_rates;
1354 size = ARRAY_SIZE(default_rates);
a8f3ef61 1355 }
636280ba 1356
ed63baaf 1357 /* This depends on the fact that 5.4 is last value in the array */
e588fa18 1358 if (!intel_dp_source_supports_hbr2(intel_dp))
af7080f5 1359 size--;
636280ba 1360
af7080f5 1361 return size;
a8f3ef61
SJ
1362}
1363
c6bb3538
DV
1364static void
1365intel_dp_set_clock(struct intel_encoder *encoder,
840b32b7 1366 struct intel_crtc_state *pipe_config)
c6bb3538
DV
1367{
1368 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1369 const struct dp_link_dpll *divisor = NULL;
1370 int i, count = 0;
c6bb3538
DV
1371
1372 if (IS_G4X(dev)) {
9dd4ffdf
CML
1373 divisor = gen4_dpll;
1374 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1375 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1376 divisor = pch_dpll;
1377 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1378 } else if (IS_CHERRYVIEW(dev)) {
1379 divisor = chv_dpll;
1380 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1381 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1382 divisor = vlv_dpll;
1383 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1384 }
9dd4ffdf
CML
1385
1386 if (divisor && count) {
1387 for (i = 0; i < count; i++) {
840b32b7 1388 if (pipe_config->port_clock == divisor[i].clock) {
9dd4ffdf
CML
1389 pipe_config->dpll = divisor[i].dpll;
1390 pipe_config->clock_set = true;
1391 break;
1392 }
1393 }
c6bb3538
DV
1394 }
1395}
1396
2ecae76a
VS
1397static int intersect_rates(const int *source_rates, int source_len,
1398 const int *sink_rates, int sink_len,
94ca719e 1399 int *common_rates)
a8f3ef61
SJ
1400{
1401 int i = 0, j = 0, k = 0;
1402
a8f3ef61
SJ
1403 while (i < source_len && j < sink_len) {
1404 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1405 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1406 return k;
94ca719e 1407 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1408 ++k;
1409 ++i;
1410 ++j;
1411 } else if (source_rates[i] < sink_rates[j]) {
1412 ++i;
1413 } else {
1414 ++j;
1415 }
1416 }
1417 return k;
1418}
1419
94ca719e
VS
1420static int intel_dp_common_rates(struct intel_dp *intel_dp,
1421 int *common_rates)
2ecae76a 1422{
2ecae76a
VS
1423 const int *source_rates, *sink_rates;
1424 int source_len, sink_len;
1425
1426 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
e588fa18 1427 source_len = intel_dp_source_rates(intel_dp, &source_rates);
2ecae76a
VS
1428
1429 return intersect_rates(source_rates, source_len,
1430 sink_rates, sink_len,
94ca719e 1431 common_rates);
2ecae76a
VS
1432}
1433
0336400e
VS
1434static void snprintf_int_array(char *str, size_t len,
1435 const int *array, int nelem)
1436{
1437 int i;
1438
1439 str[0] = '\0';
1440
1441 for (i = 0; i < nelem; i++) {
b2f505be 1442 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1443 if (r >= len)
1444 return;
1445 str += r;
1446 len -= r;
1447 }
1448}
1449
1450static void intel_dp_print_rates(struct intel_dp *intel_dp)
1451{
0336400e 1452 const int *source_rates, *sink_rates;
94ca719e
VS
1453 int source_len, sink_len, common_len;
1454 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1455 char str[128]; /* FIXME: too big for stack? */
1456
1457 if ((drm_debug & DRM_UT_KMS) == 0)
1458 return;
1459
e588fa18 1460 source_len = intel_dp_source_rates(intel_dp, &source_rates);
0336400e
VS
1461 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1462 DRM_DEBUG_KMS("source rates: %s\n", str);
1463
1464 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1465 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1466 DRM_DEBUG_KMS("sink rates: %s\n", str);
1467
94ca719e
VS
1468 common_len = intel_dp_common_rates(intel_dp, common_rates);
1469 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1470 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1471}
1472
f4896f15 1473static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1474{
1475 int i = 0;
1476
1477 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1478 if (find == rates[i])
1479 break;
1480
1481 return i;
1482}
1483
50fec21a
VS
1484int
1485intel_dp_max_link_rate(struct intel_dp *intel_dp)
1486{
1487 int rates[DP_MAX_SUPPORTED_RATES] = {};
1488 int len;
1489
94ca719e 1490 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1491 if (WARN_ON(len <= 0))
1492 return 162000;
1493
1494 return rates[rate_to_index(0, rates) - 1];
1495}
1496
ed4e9c1d
VS
1497int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1498{
94ca719e 1499 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1500}
1501
94223d04
ACO
1502void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1503 uint8_t *link_bw, uint8_t *rate_select)
04a60f9f
VS
1504{
1505 if (intel_dp->num_sink_rates) {
1506 *link_bw = 0;
1507 *rate_select =
1508 intel_dp_rate_select(intel_dp, port_clock);
1509 } else {
1510 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1511 *rate_select = 0;
1512 }
1513}
1514
00c09d70 1515bool
5bfe2ac0 1516intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1517 struct intel_crtc_state *pipe_config)
a4fc5ed6 1518{
5bfe2ac0 1519 struct drm_device *dev = encoder->base.dev;
36008365 1520 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1521 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1522 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1523 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1524 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1525 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1526 int lane_count, clock;
56071a20 1527 int min_lane_count = 1;
eeb6324d 1528 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1529 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1530 int min_clock = 0;
a8f3ef61 1531 int max_clock;
083f9560 1532 int bpp, mode_rate;
ff9a6750 1533 int link_avail, link_clock;
94ca719e
VS
1534 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1535 int common_len;
04a60f9f 1536 uint8_t link_bw, rate_select;
a8f3ef61 1537
94ca719e 1538 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1539
1540 /* No common link rates between source and sink */
94ca719e 1541 WARN_ON(common_len <= 0);
a8f3ef61 1542
94ca719e 1543 max_clock = common_len - 1;
a4fc5ed6 1544
bc7d38a4 1545 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1546 pipe_config->has_pch_encoder = true;
1547
03afc4a2 1548 pipe_config->has_dp_encoder = true;
f769cd24 1549 pipe_config->has_drrs = false;
9fcb1704 1550 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1551
dd06f90e
JN
1552 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1553 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1554 adjusted_mode);
a1b2278e
CK
1555
1556 if (INTEL_INFO(dev)->gen >= 9) {
1557 int ret;
e435d6e5 1558 ret = skl_update_scaler_crtc(pipe_config);
a1b2278e
CK
1559 if (ret)
1560 return ret;
1561 }
1562
b5667627 1563 if (HAS_GMCH_DISPLAY(dev))
2dd24552
JB
1564 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1565 intel_connector->panel.fitting_mode);
1566 else
b074cec8
JB
1567 intel_pch_panel_fitting(intel_crtc, pipe_config,
1568 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1569 }
1570
cb1793ce 1571 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1572 return false;
1573
083f9560 1574 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1575 "max bw %d pixel clock %iKHz\n",
94ca719e 1576 max_lane_count, common_rates[max_clock],
241bfc38 1577 adjusted_mode->crtc_clock);
083f9560 1578
36008365
DV
1579 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1580 * bpc in between. */
3e7ca985 1581 bpp = pipe_config->pipe_bpp;
56071a20 1582 if (is_edp(intel_dp)) {
22ce5628
TS
1583
1584 /* Get bpp from vbt only for panels that dont have bpp in edid */
1585 if (intel_connector->base.display_info.bpc == 0 &&
1586 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
56071a20
JN
1587 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1588 dev_priv->vbt.edp_bpp);
1589 bpp = dev_priv->vbt.edp_bpp;
1590 }
1591
344c5bbc
JN
1592 /*
1593 * Use the maximum clock and number of lanes the eDP panel
1594 * advertizes being capable of. The panels are generally
1595 * designed to support only a single clock and lane
1596 * configuration, and typically these values correspond to the
1597 * native resolution of the panel.
1598 */
1599 min_lane_count = max_lane_count;
1600 min_clock = max_clock;
7984211e 1601 }
657445fe 1602
36008365 1603 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1604 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1605 bpp);
36008365 1606
c6930992 1607 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1608 for (lane_count = min_lane_count;
1609 lane_count <= max_lane_count;
1610 lane_count <<= 1) {
1611
94ca719e 1612 link_clock = common_rates[clock];
36008365
DV
1613 link_avail = intel_dp_max_data_rate(link_clock,
1614 lane_count);
1615
1616 if (mode_rate <= link_avail) {
1617 goto found;
1618 }
1619 }
1620 }
1621 }
c4867936 1622
36008365 1623 return false;
3685a8f3 1624
36008365 1625found:
55bc60db
VS
1626 if (intel_dp->color_range_auto) {
1627 /*
1628 * See:
1629 * CEA-861-E - 5.1 Default Encoding Parameters
1630 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1631 */
0f2a2a75
VS
1632 pipe_config->limited_color_range =
1633 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1634 } else {
1635 pipe_config->limited_color_range =
1636 intel_dp->limited_color_range;
55bc60db
VS
1637 }
1638
90a6b7b0 1639 pipe_config->lane_count = lane_count;
a8f3ef61 1640
657445fe 1641 pipe_config->pipe_bpp = bpp;
94ca719e 1642 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1643
04a60f9f
VS
1644 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1645 &link_bw, &rate_select);
1646
1647 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1648 link_bw, rate_select, pipe_config->lane_count,
ff9a6750 1649 pipe_config->port_clock, bpp);
36008365
DV
1650 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1651 mode_rate, link_avail);
a4fc5ed6 1652
03afc4a2 1653 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1654 adjusted_mode->crtc_clock,
1655 pipe_config->port_clock,
03afc4a2 1656 &pipe_config->dp_m_n);
9d1a455b 1657
439d7ac0 1658 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1659 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1660 pipe_config->has_drrs = true;
439d7ac0
PB
1661 intel_link_compute_m_n(bpp, lane_count,
1662 intel_connector->panel.downclock_mode->clock,
1663 pipe_config->port_clock,
1664 &pipe_config->dp_m2_n2);
1665 }
1666
ef11bdb3 1667 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
840b32b7 1668 skl_edp_set_pll_config(pipe_config);
977bb38d
S
1669 else if (IS_BROXTON(dev))
1670 /* handled in ddi */;
5416d871 1671 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
840b32b7 1672 hsw_dp_set_ddi_pll_sel(pipe_config);
0e50338c 1673 else
840b32b7 1674 intel_dp_set_clock(encoder, pipe_config);
c6bb3538 1675
03afc4a2 1676 return true;
a4fc5ed6
KP
1677}
1678
901c2daf
VS
1679void intel_dp_set_link_params(struct intel_dp *intel_dp,
1680 const struct intel_crtc_state *pipe_config)
1681{
1682 intel_dp->link_rate = pipe_config->port_clock;
1683 intel_dp->lane_count = pipe_config->lane_count;
1684}
1685
8ac33ed3 1686static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1687{
b934223d 1688 struct drm_device *dev = encoder->base.dev;
417e822d 1689 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1690 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1691 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1692 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
7c5f93b0 1693 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1694
901c2daf
VS
1695 intel_dp_set_link_params(intel_dp, crtc->config);
1696
417e822d 1697 /*
1a2eb460 1698 * There are four kinds of DP registers:
417e822d
KP
1699 *
1700 * IBX PCH
1a2eb460
KP
1701 * SNB CPU
1702 * IVB CPU
417e822d
KP
1703 * CPT PCH
1704 *
1705 * IBX PCH and CPU are the same for almost everything,
1706 * except that the CPU DP PLL is configured in this
1707 * register
1708 *
1709 * CPT PCH is quite different, having many bits moved
1710 * to the TRANS_DP_CTL register instead. That
1711 * configuration happens (oddly) in ironlake_pch_enable
1712 */
9c9e7927 1713
417e822d
KP
1714 /* Preserve the BIOS-computed detected bit. This is
1715 * supposed to be read-only.
1716 */
1717 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1718
417e822d 1719 /* Handle DP bits in common between all three register formats */
417e822d 1720 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
90a6b7b0 1721 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
a4fc5ed6 1722
417e822d 1723 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1724
39e5fa88 1725 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1726 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1727 intel_dp->DP |= DP_SYNC_HS_HIGH;
1728 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1729 intel_dp->DP |= DP_SYNC_VS_HIGH;
1730 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1731
6aba5b6c 1732 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1733 intel_dp->DP |= DP_ENHANCED_FRAMING;
1734
7c62a164 1735 intel_dp->DP |= crtc->pipe << 29;
39e5fa88 1736 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
e3ef4479
VS
1737 u32 trans_dp;
1738
39e5fa88 1739 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
1740
1741 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1742 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1743 trans_dp |= TRANS_DP_ENH_FRAMING;
1744 else
1745 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1746 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 1747 } else {
0f2a2a75 1748 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
666a4537 1749 !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
0f2a2a75 1750 intel_dp->DP |= DP_COLOR_RANGE_16_235;
417e822d
KP
1751
1752 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1753 intel_dp->DP |= DP_SYNC_HS_HIGH;
1754 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1755 intel_dp->DP |= DP_SYNC_VS_HIGH;
1756 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1757
6aba5b6c 1758 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1759 intel_dp->DP |= DP_ENHANCED_FRAMING;
1760
39e5fa88 1761 if (IS_CHERRYVIEW(dev))
44f37d1f 1762 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1763 else if (crtc->pipe == PIPE_B)
1764 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1765 }
a4fc5ed6
KP
1766}
1767
ffd6749d
PZ
1768#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1769#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1770
1a5ef5b7
PZ
1771#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1772#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1773
ffd6749d
PZ
1774#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1775#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1776
4be73780 1777static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1778 u32 mask,
1779 u32 value)
bd943159 1780{
30add22d 1781 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1782 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 1783 i915_reg_t pp_stat_reg, pp_ctrl_reg;
453c5420 1784
e39b999a
VS
1785 lockdep_assert_held(&dev_priv->pps_mutex);
1786
bf13e81b
JN
1787 pp_stat_reg = _pp_stat_reg(intel_dp);
1788 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1789
99ea7127 1790 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1791 mask, value,
1792 I915_READ(pp_stat_reg),
1793 I915_READ(pp_ctrl_reg));
32ce697c 1794
453c5420 1795 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1796 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1797 I915_READ(pp_stat_reg),
1798 I915_READ(pp_ctrl_reg));
32ce697c 1799 }
54c136d4
CW
1800
1801 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1802}
32ce697c 1803
4be73780 1804static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1805{
1806 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1807 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1808}
1809
4be73780 1810static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1811{
1812 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1813 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1814}
1815
4be73780 1816static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127 1817{
d28d4731
AK
1818 ktime_t panel_power_on_time;
1819 s64 panel_power_off_duration;
1820
99ea7127 1821 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c 1822
d28d4731
AK
1823 /* take the difference of currrent time and panel power off time
1824 * and then make panel wait for t11_t12 if needed. */
1825 panel_power_on_time = ktime_get_boottime();
1826 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1827
dce56b3c
PZ
1828 /* When we disable the VDD override bit last we have to do the manual
1829 * wait. */
d28d4731
AK
1830 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1831 wait_remaining_ms_from_jiffies(jiffies,
1832 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
dce56b3c 1833
4be73780 1834 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1835}
1836
4be73780 1837static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1838{
1839 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1840 intel_dp->backlight_on_delay);
1841}
1842
4be73780 1843static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1844{
1845 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1846 intel_dp->backlight_off_delay);
1847}
99ea7127 1848
832dd3c1
KP
1849/* Read the current pp_control value, unlocking the register if it
1850 * is locked
1851 */
1852
453c5420 1853static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1854{
453c5420
JB
1855 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1856 struct drm_i915_private *dev_priv = dev->dev_private;
1857 u32 control;
832dd3c1 1858
e39b999a
VS
1859 lockdep_assert_held(&dev_priv->pps_mutex);
1860
bf13e81b 1861 control = I915_READ(_pp_ctrl_reg(intel_dp));
b0a08bec
VK
1862 if (!IS_BROXTON(dev)) {
1863 control &= ~PANEL_UNLOCK_MASK;
1864 control |= PANEL_UNLOCK_REGS;
1865 }
832dd3c1 1866 return control;
bd943159
KP
1867}
1868
951468f3
VS
1869/*
1870 * Must be paired with edp_panel_vdd_off().
1871 * Must hold pps_mutex around the whole on/off sequence.
1872 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1873 */
1e0560e0 1874static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1875{
30add22d 1876 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1877 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1878 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1879 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1880 enum intel_display_power_domain power_domain;
5d613501 1881 u32 pp;
f0f59a00 1882 i915_reg_t pp_stat_reg, pp_ctrl_reg;
adddaaf4 1883 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1884
e39b999a
VS
1885 lockdep_assert_held(&dev_priv->pps_mutex);
1886
97af61f5 1887 if (!is_edp(intel_dp))
adddaaf4 1888 return false;
bd943159 1889
2c623c11 1890 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1891 intel_dp->want_panel_vdd = true;
99ea7127 1892
4be73780 1893 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1894 return need_to_disable;
b0665d57 1895
25f78f58 1896 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4e6e1a54 1897 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1898
3936fcf4
VS
1899 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1900 port_name(intel_dig_port->port));
bd943159 1901
4be73780
DV
1902 if (!edp_have_panel_power(intel_dp))
1903 wait_panel_power_cycle(intel_dp);
99ea7127 1904
453c5420 1905 pp = ironlake_get_pp_control(intel_dp);
5d613501 1906 pp |= EDP_FORCE_VDD;
ebf33b18 1907
bf13e81b
JN
1908 pp_stat_reg = _pp_stat_reg(intel_dp);
1909 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1910
1911 I915_WRITE(pp_ctrl_reg, pp);
1912 POSTING_READ(pp_ctrl_reg);
1913 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1914 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1915 /*
1916 * If the panel wasn't on, delay before accessing aux channel
1917 */
4be73780 1918 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1919 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1920 port_name(intel_dig_port->port));
f01eca2e 1921 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1922 }
adddaaf4
JN
1923
1924 return need_to_disable;
1925}
1926
951468f3
VS
1927/*
1928 * Must be paired with intel_edp_panel_vdd_off() or
1929 * intel_edp_panel_off().
1930 * Nested calls to these functions are not allowed since
1931 * we drop the lock. Caller must use some higher level
1932 * locking to prevent nested calls from other threads.
1933 */
b80d6c78 1934void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1935{
c695b6b6 1936 bool vdd;
adddaaf4 1937
c695b6b6
VS
1938 if (!is_edp(intel_dp))
1939 return;
1940
773538e8 1941 pps_lock(intel_dp);
c695b6b6 1942 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1943 pps_unlock(intel_dp);
c695b6b6 1944
e2c719b7 1945 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1946 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1947}
1948
4be73780 1949static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1950{
30add22d 1951 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1952 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1953 struct intel_digital_port *intel_dig_port =
1954 dp_to_dig_port(intel_dp);
1955 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1956 enum intel_display_power_domain power_domain;
5d613501 1957 u32 pp;
f0f59a00 1958 i915_reg_t pp_stat_reg, pp_ctrl_reg;
5d613501 1959
e39b999a 1960 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1961
15e899a0 1962 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1963
15e899a0 1964 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1965 return;
b0665d57 1966
3936fcf4
VS
1967 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1968 port_name(intel_dig_port->port));
bd943159 1969
be2c9196
VS
1970 pp = ironlake_get_pp_control(intel_dp);
1971 pp &= ~EDP_FORCE_VDD;
453c5420 1972
be2c9196
VS
1973 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1974 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1975
be2c9196
VS
1976 I915_WRITE(pp_ctrl_reg, pp);
1977 POSTING_READ(pp_ctrl_reg);
90791a5c 1978
be2c9196
VS
1979 /* Make sure sequencer is idle before allowing subsequent activity */
1980 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1981 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1982
be2c9196 1983 if ((pp & POWER_TARGET_ON) == 0)
d28d4731 1984 intel_dp->panel_power_off_time = ktime_get_boottime();
e9cb81a2 1985
25f78f58 1986 power_domain = intel_display_port_aux_power_domain(intel_encoder);
be2c9196 1987 intel_display_power_put(dev_priv, power_domain);
bd943159 1988}
5d613501 1989
4be73780 1990static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1991{
1992 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1993 struct intel_dp, panel_vdd_work);
bd943159 1994
773538e8 1995 pps_lock(intel_dp);
15e899a0
VS
1996 if (!intel_dp->want_panel_vdd)
1997 edp_panel_vdd_off_sync(intel_dp);
773538e8 1998 pps_unlock(intel_dp);
bd943159
KP
1999}
2000
aba86890
ID
2001static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2002{
2003 unsigned long delay;
2004
2005 /*
2006 * Queue the timer to fire a long time from now (relative to the power
2007 * down delay) to keep the panel power up across a sequence of
2008 * operations.
2009 */
2010 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2011 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2012}
2013
951468f3
VS
2014/*
2015 * Must be paired with edp_panel_vdd_on().
2016 * Must hold pps_mutex around the whole on/off sequence.
2017 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2018 */
4be73780 2019static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 2020{
e39b999a
VS
2021 struct drm_i915_private *dev_priv =
2022 intel_dp_to_dev(intel_dp)->dev_private;
2023
2024 lockdep_assert_held(&dev_priv->pps_mutex);
2025
97af61f5
KP
2026 if (!is_edp(intel_dp))
2027 return;
5d613501 2028
e2c719b7 2029 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 2030 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 2031
bd943159
KP
2032 intel_dp->want_panel_vdd = false;
2033
aba86890 2034 if (sync)
4be73780 2035 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
2036 else
2037 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
2038}
2039
9f0fb5be 2040static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 2041{
30add22d 2042 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 2043 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 2044 u32 pp;
f0f59a00 2045 i915_reg_t pp_ctrl_reg;
9934c132 2046
9f0fb5be
VS
2047 lockdep_assert_held(&dev_priv->pps_mutex);
2048
97af61f5 2049 if (!is_edp(intel_dp))
bd943159 2050 return;
99ea7127 2051
3936fcf4
VS
2052 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2053 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 2054
e7a89ace
VS
2055 if (WARN(edp_have_panel_power(intel_dp),
2056 "eDP port %c panel power already on\n",
2057 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 2058 return;
9934c132 2059
4be73780 2060 wait_panel_power_cycle(intel_dp);
37c6c9b0 2061
bf13e81b 2062 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2063 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
2064 if (IS_GEN5(dev)) {
2065 /* ILK workaround: disable reset around power sequence */
2066 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
2067 I915_WRITE(pp_ctrl_reg, pp);
2068 POSTING_READ(pp_ctrl_reg);
05ce1a49 2069 }
37c6c9b0 2070
1c0ae80a 2071 pp |= POWER_TARGET_ON;
99ea7127
KP
2072 if (!IS_GEN5(dev))
2073 pp |= PANEL_POWER_RESET;
2074
453c5420
JB
2075 I915_WRITE(pp_ctrl_reg, pp);
2076 POSTING_READ(pp_ctrl_reg);
9934c132 2077
4be73780 2078 wait_panel_on(intel_dp);
dce56b3c 2079 intel_dp->last_power_on = jiffies;
9934c132 2080
05ce1a49
KP
2081 if (IS_GEN5(dev)) {
2082 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
2083 I915_WRITE(pp_ctrl_reg, pp);
2084 POSTING_READ(pp_ctrl_reg);
05ce1a49 2085 }
9f0fb5be 2086}
e39b999a 2087
9f0fb5be
VS
2088void intel_edp_panel_on(struct intel_dp *intel_dp)
2089{
2090 if (!is_edp(intel_dp))
2091 return;
2092
2093 pps_lock(intel_dp);
2094 edp_panel_on(intel_dp);
773538e8 2095 pps_unlock(intel_dp);
9934c132
JB
2096}
2097
9f0fb5be
VS
2098
2099static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 2100{
4e6e1a54
ID
2101 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2102 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 2103 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 2104 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 2105 enum intel_display_power_domain power_domain;
99ea7127 2106 u32 pp;
f0f59a00 2107 i915_reg_t pp_ctrl_reg;
9934c132 2108
9f0fb5be
VS
2109 lockdep_assert_held(&dev_priv->pps_mutex);
2110
97af61f5
KP
2111 if (!is_edp(intel_dp))
2112 return;
37c6c9b0 2113
3936fcf4
VS
2114 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2115 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 2116
3936fcf4
VS
2117 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2118 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 2119
453c5420 2120 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
2121 /* We need to switch off panel power _and_ force vdd, for otherwise some
2122 * panels get very unhappy and cease to work. */
b3064154
PJ
2123 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2124 EDP_BLC_ENABLE);
453c5420 2125
bf13e81b 2126 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2127
849e39f5
PZ
2128 intel_dp->want_panel_vdd = false;
2129
453c5420
JB
2130 I915_WRITE(pp_ctrl_reg, pp);
2131 POSTING_READ(pp_ctrl_reg);
9934c132 2132
d28d4731 2133 intel_dp->panel_power_off_time = ktime_get_boottime();
4be73780 2134 wait_panel_off(intel_dp);
849e39f5
PZ
2135
2136 /* We got a reference when we enabled the VDD. */
25f78f58 2137 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4e6e1a54 2138 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 2139}
e39b999a 2140
9f0fb5be
VS
2141void intel_edp_panel_off(struct intel_dp *intel_dp)
2142{
2143 if (!is_edp(intel_dp))
2144 return;
e39b999a 2145
9f0fb5be
VS
2146 pps_lock(intel_dp);
2147 edp_panel_off(intel_dp);
773538e8 2148 pps_unlock(intel_dp);
9934c132
JB
2149}
2150
1250d107
JN
2151/* Enable backlight in the panel power control. */
2152static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 2153{
da63a9f2
PZ
2154 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2155 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
2156 struct drm_i915_private *dev_priv = dev->dev_private;
2157 u32 pp;
f0f59a00 2158 i915_reg_t pp_ctrl_reg;
32f9d658 2159
01cb9ea6
JB
2160 /*
2161 * If we enable the backlight right away following a panel power
2162 * on, we may see slight flicker as the panel syncs with the eDP
2163 * link. So delay a bit to make sure the image is solid before
2164 * allowing it to appear.
2165 */
4be73780 2166 wait_backlight_on(intel_dp);
e39b999a 2167
773538e8 2168 pps_lock(intel_dp);
e39b999a 2169
453c5420 2170 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2171 pp |= EDP_BLC_ENABLE;
453c5420 2172
bf13e81b 2173 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2174
2175 I915_WRITE(pp_ctrl_reg, pp);
2176 POSTING_READ(pp_ctrl_reg);
e39b999a 2177
773538e8 2178 pps_unlock(intel_dp);
32f9d658
ZW
2179}
2180
1250d107
JN
2181/* Enable backlight PWM and backlight PP control. */
2182void intel_edp_backlight_on(struct intel_dp *intel_dp)
2183{
2184 if (!is_edp(intel_dp))
2185 return;
2186
2187 DRM_DEBUG_KMS("\n");
2188
2189 intel_panel_enable_backlight(intel_dp->attached_connector);
2190 _intel_edp_backlight_on(intel_dp);
2191}
2192
2193/* Disable backlight in the panel power control. */
2194static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2195{
30add22d 2196 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2197 struct drm_i915_private *dev_priv = dev->dev_private;
2198 u32 pp;
f0f59a00 2199 i915_reg_t pp_ctrl_reg;
32f9d658 2200
f01eca2e
KP
2201 if (!is_edp(intel_dp))
2202 return;
2203
773538e8 2204 pps_lock(intel_dp);
e39b999a 2205
453c5420 2206 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2207 pp &= ~EDP_BLC_ENABLE;
453c5420 2208
bf13e81b 2209 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2210
2211 I915_WRITE(pp_ctrl_reg, pp);
2212 POSTING_READ(pp_ctrl_reg);
f7d2323c 2213
773538e8 2214 pps_unlock(intel_dp);
e39b999a
VS
2215
2216 intel_dp->last_backlight_off = jiffies;
f7d2323c 2217 edp_wait_backlight_off(intel_dp);
1250d107 2218}
f7d2323c 2219
1250d107
JN
2220/* Disable backlight PP control and backlight PWM. */
2221void intel_edp_backlight_off(struct intel_dp *intel_dp)
2222{
2223 if (!is_edp(intel_dp))
2224 return;
2225
2226 DRM_DEBUG_KMS("\n");
f7d2323c 2227
1250d107 2228 _intel_edp_backlight_off(intel_dp);
f7d2323c 2229 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2230}
a4fc5ed6 2231
73580fb7
JN
2232/*
2233 * Hook for controlling the panel power control backlight through the bl_power
2234 * sysfs attribute. Take care to handle multiple calls.
2235 */
2236static void intel_edp_backlight_power(struct intel_connector *connector,
2237 bool enable)
2238{
2239 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2240 bool is_enabled;
2241
773538e8 2242 pps_lock(intel_dp);
e39b999a 2243 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2244 pps_unlock(intel_dp);
73580fb7
JN
2245
2246 if (is_enabled == enable)
2247 return;
2248
23ba9373
JN
2249 DRM_DEBUG_KMS("panel power control backlight %s\n",
2250 enable ? "enable" : "disable");
73580fb7
JN
2251
2252 if (enable)
2253 _intel_edp_backlight_on(intel_dp);
2254 else
2255 _intel_edp_backlight_off(intel_dp);
2256}
2257
64e1077a
VS
2258static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2259{
2260 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2261 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2262 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2263
2264 I915_STATE_WARN(cur_state != state,
2265 "DP port %c state assertion failure (expected %s, current %s)\n",
2266 port_name(dig_port->port),
87ad3212 2267 onoff(state), onoff(cur_state));
64e1077a
VS
2268}
2269#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2270
2271static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2272{
2273 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2274
2275 I915_STATE_WARN(cur_state != state,
2276 "eDP PLL state assertion failure (expected %s, current %s)\n",
87ad3212 2277 onoff(state), onoff(cur_state));
64e1077a
VS
2278}
2279#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2280#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2281
2bd2ad64 2282static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2283{
da63a9f2 2284 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
64e1077a
VS
2285 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2286 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2287
64e1077a
VS
2288 assert_pipe_disabled(dev_priv, crtc->pipe);
2289 assert_dp_port_disabled(intel_dp);
2290 assert_edp_pll_disabled(dev_priv);
2bd2ad64 2291
abfce949
VS
2292 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2293 crtc->config->port_clock);
2294
2295 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2296
2297 if (crtc->config->port_clock == 162000)
2298 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2299 else
2300 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2301
2302 I915_WRITE(DP_A, intel_dp->DP);
2303 POSTING_READ(DP_A);
2304 udelay(500);
2305
0767935e 2306 intel_dp->DP |= DP_PLL_ENABLE;
6fec7662 2307
0767935e 2308 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2309 POSTING_READ(DP_A);
2310 udelay(200);
d240f20f
JB
2311}
2312
2bd2ad64 2313static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2314{
da63a9f2 2315 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
64e1077a
VS
2316 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2317 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2318
64e1077a
VS
2319 assert_pipe_disabled(dev_priv, crtc->pipe);
2320 assert_dp_port_disabled(intel_dp);
2321 assert_edp_pll_enabled(dev_priv);
2bd2ad64 2322
abfce949
VS
2323 DRM_DEBUG_KMS("disabling eDP PLL\n");
2324
6fec7662 2325 intel_dp->DP &= ~DP_PLL_ENABLE;
0767935e 2326
6fec7662 2327 I915_WRITE(DP_A, intel_dp->DP);
1af5fa1b 2328 POSTING_READ(DP_A);
d240f20f
JB
2329 udelay(200);
2330}
2331
c7ad3810 2332/* If the sink supports it, try to set the power state appropriately */
c19b0669 2333void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2334{
2335 int ret, i;
2336
2337 /* Should have a valid DPCD by this point */
2338 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2339 return;
2340
2341 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2342 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2343 DP_SET_POWER_D3);
c7ad3810
JB
2344 } else {
2345 /*
2346 * When turning on, we need to retry for 1ms to give the sink
2347 * time to wake up.
2348 */
2349 for (i = 0; i < 3; i++) {
9d1a1031
JN
2350 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2351 DP_SET_POWER_D0);
c7ad3810
JB
2352 if (ret == 1)
2353 break;
2354 msleep(1);
2355 }
2356 }
f9cac721
JN
2357
2358 if (ret != 1)
2359 DRM_DEBUG_KMS("failed to %s sink power state\n",
2360 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2361}
2362
19d8fe15
DV
2363static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2364 enum pipe *pipe)
d240f20f 2365{
19d8fe15 2366 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2367 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2368 struct drm_device *dev = encoder->base.dev;
2369 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2370 enum intel_display_power_domain power_domain;
2371 u32 tmp;
2372
2373 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2374 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2375 return false;
2376
2377 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2378
2379 if (!(tmp & DP_PORT_EN))
2380 return false;
2381
39e5fa88 2382 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2383 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2384 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2385 enum pipe p;
19d8fe15 2386
adc289d7
VS
2387 for_each_pipe(dev_priv, p) {
2388 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2389 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2390 *pipe = p;
19d8fe15
DV
2391 return true;
2392 }
2393 }
19d8fe15 2394
4a0833ec 2395 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
f0f59a00 2396 i915_mmio_reg_offset(intel_dp->output_reg));
39e5fa88
VS
2397 } else if (IS_CHERRYVIEW(dev)) {
2398 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2399 } else {
2400 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2401 }
d240f20f 2402
19d8fe15
DV
2403 return true;
2404}
d240f20f 2405
045ac3b5 2406static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2407 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2408{
2409 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2410 u32 tmp, flags = 0;
63000ef6
XZ
2411 struct drm_device *dev = encoder->base.dev;
2412 struct drm_i915_private *dev_priv = dev->dev_private;
2413 enum port port = dp_to_dig_port(intel_dp)->port;
2414 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2415 int dotclock;
045ac3b5 2416
9ed109a7 2417 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2418
2419 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2420
39e5fa88 2421 if (HAS_PCH_CPT(dev) && port != PORT_A) {
b81e34c2
VS
2422 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2423
2424 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2425 flags |= DRM_MODE_FLAG_PHSYNC;
2426 else
2427 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2428
b81e34c2 2429 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2430 flags |= DRM_MODE_FLAG_PVSYNC;
2431 else
2432 flags |= DRM_MODE_FLAG_NVSYNC;
2433 } else {
39e5fa88 2434 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2435 flags |= DRM_MODE_FLAG_PHSYNC;
2436 else
2437 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2438
39e5fa88 2439 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2440 flags |= DRM_MODE_FLAG_PVSYNC;
2441 else
2442 flags |= DRM_MODE_FLAG_NVSYNC;
2443 }
045ac3b5 2444
2d112de7 2445 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2446
8c875fca 2447 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
666a4537 2448 !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
8c875fca
VS
2449 pipe_config->limited_color_range = true;
2450
eb14cb74
VS
2451 pipe_config->has_dp_encoder = true;
2452
90a6b7b0
VS
2453 pipe_config->lane_count =
2454 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2455
eb14cb74
VS
2456 intel_dp_get_m_n(crtc, pipe_config);
2457
18442d08 2458 if (port == PORT_A) {
b377e0df 2459 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
f1f644dc
JB
2460 pipe_config->port_clock = 162000;
2461 else
2462 pipe_config->port_clock = 270000;
2463 }
18442d08
VS
2464
2465 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2466 &pipe_config->dp_m_n);
2467
2468 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2469 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2470
2d112de7 2471 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2472
c6cd2ee2
JN
2473 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2474 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2475 /*
2476 * This is a big fat ugly hack.
2477 *
2478 * Some machines in UEFI boot mode provide us a VBT that has 18
2479 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2480 * unknown we fail to light up. Yet the same BIOS boots up with
2481 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2482 * max, not what it tells us to use.
2483 *
2484 * Note: This will still be broken if the eDP panel is not lit
2485 * up by the BIOS, and thus we can't get the mode at module
2486 * load.
2487 */
2488 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2489 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2490 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2491 }
045ac3b5
JB
2492}
2493
e8cb4558 2494static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2495{
e8cb4558 2496 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2497 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2498 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2499
6e3c9717 2500 if (crtc->config->has_audio)
495a5bb8 2501 intel_audio_codec_disable(encoder);
6cb49835 2502
b32c6f48
RV
2503 if (HAS_PSR(dev) && !HAS_DDI(dev))
2504 intel_psr_disable(intel_dp);
2505
6cb49835
DV
2506 /* Make sure the panel is off before trying to change the mode. But also
2507 * ensure that we have vdd while we switch off the panel. */
24f3e092 2508 intel_edp_panel_vdd_on(intel_dp);
4be73780 2509 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2510 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2511 intel_edp_panel_off(intel_dp);
3739850b 2512
08aff3fe
VS
2513 /* disable the port before the pipe on g4x */
2514 if (INTEL_INFO(dev)->gen < 5)
3739850b 2515 intel_dp_link_down(intel_dp);
d240f20f
JB
2516}
2517
08aff3fe 2518static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2519{
2bd2ad64 2520 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2521 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2522
49277c31 2523 intel_dp_link_down(intel_dp);
abfce949
VS
2524
2525 /* Only ilk+ has port A */
08aff3fe
VS
2526 if (port == PORT_A)
2527 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2528}
2529
2530static void vlv_post_disable_dp(struct intel_encoder *encoder)
2531{
2532 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2533
2534 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2535}
2536
a8f327fb
VS
2537static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2538 bool reset)
580d3811 2539{
a8f327fb
VS
2540 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2541 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2542 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2543 enum pipe pipe = crtc->pipe;
2544 uint32_t val;
580d3811 2545
a8f327fb
VS
2546 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2547 if (reset)
2548 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2549 else
2550 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2551 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
580d3811 2552
a8f327fb
VS
2553 if (crtc->config->lane_count > 2) {
2554 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2555 if (reset)
2556 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2557 else
2558 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2559 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2560 }
580d3811 2561
97fd4d5c 2562 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2563 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2564 if (reset)
2565 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2566 else
2567 val |= DPIO_PCS_CLK_SOFT_RESET;
97fd4d5c 2568 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2569
a8f327fb 2570 if (crtc->config->lane_count > 2) {
e0fce78f
VS
2571 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2572 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2573 if (reset)
2574 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2575 else
2576 val |= DPIO_PCS_CLK_SOFT_RESET;
e0fce78f
VS
2577 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2578 }
a8f327fb 2579}
97fd4d5c 2580
a8f327fb
VS
2581static void chv_post_disable_dp(struct intel_encoder *encoder)
2582{
2583 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2584 struct drm_device *dev = encoder->base.dev;
2585 struct drm_i915_private *dev_priv = dev->dev_private;
97fd4d5c 2586
a8f327fb
VS
2587 intel_dp_link_down(intel_dp);
2588
2589 mutex_lock(&dev_priv->sb_lock);
2590
2591 /* Assert data lane reset */
2592 chv_data_lane_soft_reset(encoder, true);
580d3811 2593
a580516d 2594 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
2595}
2596
7b13b58a
VS
2597static void
2598_intel_dp_set_link_train(struct intel_dp *intel_dp,
2599 uint32_t *DP,
2600 uint8_t dp_train_pat)
2601{
2602 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2603 struct drm_device *dev = intel_dig_port->base.base.dev;
2604 struct drm_i915_private *dev_priv = dev->dev_private;
2605 enum port port = intel_dig_port->port;
2606
2607 if (HAS_DDI(dev)) {
2608 uint32_t temp = I915_READ(DP_TP_CTL(port));
2609
2610 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2611 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2612 else
2613 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2614
2615 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2616 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2617 case DP_TRAINING_PATTERN_DISABLE:
2618 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2619
2620 break;
2621 case DP_TRAINING_PATTERN_1:
2622 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2623 break;
2624 case DP_TRAINING_PATTERN_2:
2625 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2626 break;
2627 case DP_TRAINING_PATTERN_3:
2628 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2629 break;
2630 }
2631 I915_WRITE(DP_TP_CTL(port), temp);
2632
39e5fa88
VS
2633 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2634 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2635 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2636
2637 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2638 case DP_TRAINING_PATTERN_DISABLE:
2639 *DP |= DP_LINK_TRAIN_OFF_CPT;
2640 break;
2641 case DP_TRAINING_PATTERN_1:
2642 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2643 break;
2644 case DP_TRAINING_PATTERN_2:
2645 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2646 break;
2647 case DP_TRAINING_PATTERN_3:
2648 DRM_ERROR("DP training pattern 3 not supported\n");
2649 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2650 break;
2651 }
2652
2653 } else {
2654 if (IS_CHERRYVIEW(dev))
2655 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2656 else
2657 *DP &= ~DP_LINK_TRAIN_MASK;
2658
2659 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2660 case DP_TRAINING_PATTERN_DISABLE:
2661 *DP |= DP_LINK_TRAIN_OFF;
2662 break;
2663 case DP_TRAINING_PATTERN_1:
2664 *DP |= DP_LINK_TRAIN_PAT_1;
2665 break;
2666 case DP_TRAINING_PATTERN_2:
2667 *DP |= DP_LINK_TRAIN_PAT_2;
2668 break;
2669 case DP_TRAINING_PATTERN_3:
2670 if (IS_CHERRYVIEW(dev)) {
2671 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2672 } else {
2673 DRM_ERROR("DP training pattern 3 not supported\n");
2674 *DP |= DP_LINK_TRAIN_PAT_2;
2675 }
2676 break;
2677 }
2678 }
2679}
2680
2681static void intel_dp_enable_port(struct intel_dp *intel_dp)
2682{
2683 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2684 struct drm_i915_private *dev_priv = dev->dev_private;
6fec7662
VS
2685 struct intel_crtc *crtc =
2686 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
7b13b58a 2687
7b13b58a
VS
2688 /* enable with pattern 1 (as per spec) */
2689 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2690 DP_TRAINING_PATTERN_1);
2691
2692 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2693 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2694
2695 /*
2696 * Magic for VLV/CHV. We _must_ first set up the register
2697 * without actually enabling the port, and then do another
2698 * write to enable the port. Otherwise link training will
2699 * fail when the power sequencer is freshly used for this port.
2700 */
2701 intel_dp->DP |= DP_PORT_EN;
6fec7662
VS
2702 if (crtc->config->has_audio)
2703 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
7b713f50
VS
2704
2705 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2706 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2707}
2708
e8cb4558 2709static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2710{
e8cb4558
DV
2711 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2712 struct drm_device *dev = encoder->base.dev;
2713 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2714 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2715 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
d6fbdd15
VS
2716 enum port port = dp_to_dig_port(intel_dp)->port;
2717 enum pipe pipe = crtc->pipe;
5d613501 2718
0c33d8d7
DV
2719 if (WARN_ON(dp_reg & DP_PORT_EN))
2720 return;
5d613501 2721
093e3f13
VS
2722 pps_lock(intel_dp);
2723
666a4537 2724 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
093e3f13
VS
2725 vlv_init_panel_power_sequencer(intel_dp);
2726
7864578a
VS
2727 /*
2728 * We get an occasional spurious underrun between the port
2729 * enable and vdd enable, when enabling port A eDP.
2730 *
2731 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2732 */
2733 if (port == PORT_A)
2734 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2735
7b13b58a 2736 intel_dp_enable_port(intel_dp);
093e3f13 2737
d6fbdd15
VS
2738 if (port == PORT_A && IS_GEN5(dev_priv)) {
2739 /*
2740 * Underrun reporting for the other pipe was disabled in
2741 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2742 * enabled, so it's now safe to re-enable underrun reporting.
2743 */
2744 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2745 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2746 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2747 }
2748
093e3f13
VS
2749 edp_panel_vdd_on(intel_dp);
2750 edp_panel_on(intel_dp);
2751 edp_panel_vdd_off(intel_dp, true);
2752
7864578a
VS
2753 if (port == PORT_A)
2754 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2755
093e3f13
VS
2756 pps_unlock(intel_dp);
2757
666a4537 2758 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e0fce78f
VS
2759 unsigned int lane_mask = 0x0;
2760
2761 if (IS_CHERRYVIEW(dev))
2762 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2763
9b6de0a1
VS
2764 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2765 lane_mask);
e0fce78f 2766 }
61234fa5 2767
f01eca2e 2768 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2769 intel_dp_start_link_train(intel_dp);
3ab9c637 2770 intel_dp_stop_link_train(intel_dp);
c1dec79a 2771
6e3c9717 2772 if (crtc->config->has_audio) {
c1dec79a 2773 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
d6fbdd15 2774 pipe_name(pipe));
c1dec79a
JN
2775 intel_audio_codec_enable(encoder);
2776 }
ab1f90f9 2777}
89b667f8 2778
ecff4f3b
JN
2779static void g4x_enable_dp(struct intel_encoder *encoder)
2780{
828f5c6e
JN
2781 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2782
ecff4f3b 2783 intel_enable_dp(encoder);
4be73780 2784 intel_edp_backlight_on(intel_dp);
ab1f90f9 2785}
89b667f8 2786
ab1f90f9
JN
2787static void vlv_enable_dp(struct intel_encoder *encoder)
2788{
828f5c6e
JN
2789 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2790
4be73780 2791 intel_edp_backlight_on(intel_dp);
b32c6f48 2792 intel_psr_enable(intel_dp);
d240f20f
JB
2793}
2794
ecff4f3b 2795static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9 2796{
d6fbdd15 2797 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
ab1f90f9 2798 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
d6fbdd15
VS
2799 enum port port = dp_to_dig_port(intel_dp)->port;
2800 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
ab1f90f9 2801
8ac33ed3
DV
2802 intel_dp_prepare(encoder);
2803
d6fbdd15
VS
2804 if (port == PORT_A && IS_GEN5(dev_priv)) {
2805 /*
2806 * We get FIFO underruns on the other pipe when
2807 * enabling the CPU eDP PLL, and when enabling CPU
2808 * eDP port. We could potentially avoid the PLL
2809 * underrun with a vblank wait just prior to enabling
2810 * the PLL, but that doesn't appear to help the port
2811 * enable case. Just sweep it all under the rug.
2812 */
2813 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2814 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2815 }
2816
d41f1efb 2817 /* Only ilk+ has port A */
abfce949 2818 if (port == PORT_A)
ab1f90f9
JN
2819 ironlake_edp_pll_on(intel_dp);
2820}
2821
83b84597
VS
2822static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2823{
2824 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2825 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2826 enum pipe pipe = intel_dp->pps_pipe;
f0f59a00 2827 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
83b84597
VS
2828
2829 edp_panel_vdd_off_sync(intel_dp);
2830
2831 /*
2832 * VLV seems to get confused when multiple power seqeuencers
2833 * have the same port selected (even if only one has power/vdd
2834 * enabled). The failure manifests as vlv_wait_port_ready() failing
2835 * CHV on the other hand doesn't seem to mind having the same port
2836 * selected in multiple power seqeuencers, but let's clear the
2837 * port select always when logically disconnecting a power sequencer
2838 * from a port.
2839 */
2840 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2841 pipe_name(pipe), port_name(intel_dig_port->port));
2842 I915_WRITE(pp_on_reg, 0);
2843 POSTING_READ(pp_on_reg);
2844
2845 intel_dp->pps_pipe = INVALID_PIPE;
2846}
2847
a4a5d2f8
VS
2848static void vlv_steal_power_sequencer(struct drm_device *dev,
2849 enum pipe pipe)
2850{
2851 struct drm_i915_private *dev_priv = dev->dev_private;
2852 struct intel_encoder *encoder;
2853
2854 lockdep_assert_held(&dev_priv->pps_mutex);
2855
ac3c12e4
VS
2856 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2857 return;
2858
19c8054c 2859 for_each_intel_encoder(dev, encoder) {
a4a5d2f8 2860 struct intel_dp *intel_dp;
773538e8 2861 enum port port;
a4a5d2f8
VS
2862
2863 if (encoder->type != INTEL_OUTPUT_EDP)
2864 continue;
2865
2866 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2867 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2868
2869 if (intel_dp->pps_pipe != pipe)
2870 continue;
2871
2872 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2873 pipe_name(pipe), port_name(port));
a4a5d2f8 2874
e02f9a06 2875 WARN(encoder->base.crtc,
034e43c6
VS
2876 "stealing pipe %c power sequencer from active eDP port %c\n",
2877 pipe_name(pipe), port_name(port));
a4a5d2f8 2878
a4a5d2f8 2879 /* make sure vdd is off before we steal it */
83b84597 2880 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2881 }
2882}
2883
2884static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2885{
2886 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2887 struct intel_encoder *encoder = &intel_dig_port->base;
2888 struct drm_device *dev = encoder->base.dev;
2889 struct drm_i915_private *dev_priv = dev->dev_private;
2890 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2891
2892 lockdep_assert_held(&dev_priv->pps_mutex);
2893
093e3f13
VS
2894 if (!is_edp(intel_dp))
2895 return;
2896
a4a5d2f8
VS
2897 if (intel_dp->pps_pipe == crtc->pipe)
2898 return;
2899
2900 /*
2901 * If another power sequencer was being used on this
2902 * port previously make sure to turn off vdd there while
2903 * we still have control of it.
2904 */
2905 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2906 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2907
2908 /*
2909 * We may be stealing the power
2910 * sequencer from another port.
2911 */
2912 vlv_steal_power_sequencer(dev, crtc->pipe);
2913
2914 /* now it's all ours */
2915 intel_dp->pps_pipe = crtc->pipe;
2916
2917 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2918 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2919
2920 /* init power sequencer on this pipe and port */
36b5f425
VS
2921 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2922 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2923}
2924
ab1f90f9 2925static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2926{
2bd2ad64 2927 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2928 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2929 struct drm_device *dev = encoder->base.dev;
89b667f8 2930 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2931 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2932 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2933 int pipe = intel_crtc->pipe;
2934 u32 val;
a4fc5ed6 2935
a580516d 2936 mutex_lock(&dev_priv->sb_lock);
89b667f8 2937
ab3c759a 2938 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2939 val = 0;
2940 if (pipe)
2941 val |= (1<<21);
2942 else
2943 val &= ~(1<<21);
2944 val |= 0x001000c4;
ab3c759a
CML
2945 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2946 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2947 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2948
a580516d 2949 mutex_unlock(&dev_priv->sb_lock);
ab1f90f9
JN
2950
2951 intel_enable_dp(encoder);
89b667f8
JB
2952}
2953
ecff4f3b 2954static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2955{
2956 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2957 struct drm_device *dev = encoder->base.dev;
2958 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2959 struct intel_crtc *intel_crtc =
2960 to_intel_crtc(encoder->base.crtc);
e4607fcf 2961 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2962 int pipe = intel_crtc->pipe;
89b667f8 2963
8ac33ed3
DV
2964 intel_dp_prepare(encoder);
2965
89b667f8 2966 /* Program Tx lane resets to default */
a580516d 2967 mutex_lock(&dev_priv->sb_lock);
ab3c759a 2968 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2969 DPIO_PCS_TX_LANE2_RESET |
2970 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2971 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2972 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2973 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2974 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2975 DPIO_PCS_CLK_SOFT_RESET);
2976
2977 /* Fix up inter-pair skew failure */
ab3c759a
CML
2978 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2979 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2980 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
a580516d 2981 mutex_unlock(&dev_priv->sb_lock);
a4fc5ed6
KP
2982}
2983
e4a1d846
CML
2984static void chv_pre_enable_dp(struct intel_encoder *encoder)
2985{
2986 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2987 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2988 struct drm_device *dev = encoder->base.dev;
2989 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2990 struct intel_crtc *intel_crtc =
2991 to_intel_crtc(encoder->base.crtc);
2992 enum dpio_channel ch = vlv_dport_to_channel(dport);
2993 int pipe = intel_crtc->pipe;
2e523e98 2994 int data, i, stagger;
949c1d43 2995 u32 val;
e4a1d846 2996
a580516d 2997 mutex_lock(&dev_priv->sb_lock);
949c1d43 2998
570e2a74
VS
2999 /* allow hardware to manage TX FIFO reset source */
3000 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3001 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3002 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3003
e0fce78f
VS
3004 if (intel_crtc->config->lane_count > 2) {
3005 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3006 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3007 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3008 }
570e2a74 3009
949c1d43 3010 /* Program Tx lane latency optimal setting*/
e0fce78f 3011 for (i = 0; i < intel_crtc->config->lane_count; i++) {
e4a1d846 3012 /* Set the upar bit */
e0fce78f
VS
3013 if (intel_crtc->config->lane_count == 1)
3014 data = 0x0;
3015 else
3016 data = (i == 1) ? 0x0 : 0x1;
e4a1d846
CML
3017 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
3018 data << DPIO_UPAR_SHIFT);
3019 }
3020
3021 /* Data lane stagger programming */
2e523e98
VS
3022 if (intel_crtc->config->port_clock > 270000)
3023 stagger = 0x18;
3024 else if (intel_crtc->config->port_clock > 135000)
3025 stagger = 0xd;
3026 else if (intel_crtc->config->port_clock > 67500)
3027 stagger = 0x7;
3028 else if (intel_crtc->config->port_clock > 33750)
3029 stagger = 0x4;
3030 else
3031 stagger = 0x2;
3032
3033 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3034 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3035 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3036
e0fce78f
VS
3037 if (intel_crtc->config->lane_count > 2) {
3038 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3039 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3040 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3041 }
2e523e98
VS
3042
3043 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3044 DPIO_LANESTAGGER_STRAP(stagger) |
3045 DPIO_LANESTAGGER_STRAP_OVRD |
3046 DPIO_TX1_STAGGER_MASK(0x1f) |
3047 DPIO_TX1_STAGGER_MULT(6) |
3048 DPIO_TX2_STAGGER_MULT(0));
3049
e0fce78f
VS
3050 if (intel_crtc->config->lane_count > 2) {
3051 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3052 DPIO_LANESTAGGER_STRAP(stagger) |
3053 DPIO_LANESTAGGER_STRAP_OVRD |
3054 DPIO_TX1_STAGGER_MASK(0x1f) |
3055 DPIO_TX1_STAGGER_MULT(7) |
3056 DPIO_TX2_STAGGER_MULT(5));
3057 }
e4a1d846 3058
a8f327fb
VS
3059 /* Deassert data lane reset */
3060 chv_data_lane_soft_reset(encoder, false);
3061
a580516d 3062 mutex_unlock(&dev_priv->sb_lock);
e4a1d846 3063
e4a1d846 3064 intel_enable_dp(encoder);
b0b33846
VS
3065
3066 /* Second common lane will stay alive on its own now */
3067 if (dport->release_cl2_override) {
3068 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3069 dport->release_cl2_override = false;
3070 }
e4a1d846
CML
3071}
3072
9197c88b
VS
3073static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3074{
3075 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3076 struct drm_device *dev = encoder->base.dev;
3077 struct drm_i915_private *dev_priv = dev->dev_private;
3078 struct intel_crtc *intel_crtc =
3079 to_intel_crtc(encoder->base.crtc);
3080 enum dpio_channel ch = vlv_dport_to_channel(dport);
3081 enum pipe pipe = intel_crtc->pipe;
e0fce78f
VS
3082 unsigned int lane_mask =
3083 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
9197c88b
VS
3084 u32 val;
3085
625695f8
VS
3086 intel_dp_prepare(encoder);
3087
b0b33846
VS
3088 /*
3089 * Must trick the second common lane into life.
3090 * Otherwise we can't even access the PLL.
3091 */
3092 if (ch == DPIO_CH0 && pipe == PIPE_B)
3093 dport->release_cl2_override =
3094 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3095
e0fce78f
VS
3096 chv_phy_powergate_lanes(encoder, true, lane_mask);
3097
a580516d 3098 mutex_lock(&dev_priv->sb_lock);
9197c88b 3099
a8f327fb
VS
3100 /* Assert data lane reset */
3101 chv_data_lane_soft_reset(encoder, true);
3102
b9e5ac3c
VS
3103 /* program left/right clock distribution */
3104 if (pipe != PIPE_B) {
3105 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3106 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3107 if (ch == DPIO_CH0)
3108 val |= CHV_BUFLEFTENA1_FORCE;
3109 if (ch == DPIO_CH1)
3110 val |= CHV_BUFRIGHTENA1_FORCE;
3111 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3112 } else {
3113 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3114 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3115 if (ch == DPIO_CH0)
3116 val |= CHV_BUFLEFTENA2_FORCE;
3117 if (ch == DPIO_CH1)
3118 val |= CHV_BUFRIGHTENA2_FORCE;
3119 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3120 }
3121
9197c88b
VS
3122 /* program clock channel usage */
3123 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3124 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3125 if (pipe != PIPE_B)
3126 val &= ~CHV_PCS_USEDCLKCHANNEL;
3127 else
3128 val |= CHV_PCS_USEDCLKCHANNEL;
3129 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3130
e0fce78f
VS
3131 if (intel_crtc->config->lane_count > 2) {
3132 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3133 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3134 if (pipe != PIPE_B)
3135 val &= ~CHV_PCS_USEDCLKCHANNEL;
3136 else
3137 val |= CHV_PCS_USEDCLKCHANNEL;
3138 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3139 }
9197c88b
VS
3140
3141 /*
3142 * This a a bit weird since generally CL
3143 * matches the pipe, but here we need to
3144 * pick the CL based on the port.
3145 */
3146 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3147 if (pipe != PIPE_B)
3148 val &= ~CHV_CMN_USEDCLKCHANNEL;
3149 else
3150 val |= CHV_CMN_USEDCLKCHANNEL;
3151 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3152
a580516d 3153 mutex_unlock(&dev_priv->sb_lock);
9197c88b
VS
3154}
3155
d6db995f
VS
3156static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3157{
3158 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3159 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3160 u32 val;
3161
3162 mutex_lock(&dev_priv->sb_lock);
3163
3164 /* disable left/right clock distribution */
3165 if (pipe != PIPE_B) {
3166 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3167 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3168 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3169 } else {
3170 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3171 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3172 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3173 }
3174
3175 mutex_unlock(&dev_priv->sb_lock);
e0fce78f 3176
b0b33846
VS
3177 /*
3178 * Leave the power down bit cleared for at least one
3179 * lane so that chv_powergate_phy_ch() will power
3180 * on something when the channel is otherwise unused.
3181 * When the port is off and the override is removed
3182 * the lanes power down anyway, so otherwise it doesn't
3183 * really matter what the state of power down bits is
3184 * after this.
3185 */
e0fce78f 3186 chv_phy_powergate_lanes(encoder, false, 0x0);
d6db995f
VS
3187}
3188
a4fc5ed6 3189/*
df0c237d
JB
3190 * Native read with retry for link status and receiver capability reads for
3191 * cases where the sink may still be asleep.
9d1a1031
JN
3192 *
3193 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3194 * supposed to retry 3 times per the spec.
a4fc5ed6 3195 */
9d1a1031
JN
3196static ssize_t
3197intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3198 void *buffer, size_t size)
a4fc5ed6 3199{
9d1a1031
JN
3200 ssize_t ret;
3201 int i;
61da5fab 3202
f6a19066
VS
3203 /*
3204 * Sometime we just get the same incorrect byte repeated
3205 * over the entire buffer. Doing just one throw away read
3206 * initially seems to "solve" it.
3207 */
3208 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3209
61da5fab 3210 for (i = 0; i < 3; i++) {
9d1a1031
JN
3211 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3212 if (ret == size)
3213 return ret;
61da5fab
JB
3214 msleep(1);
3215 }
a4fc5ed6 3216
9d1a1031 3217 return ret;
a4fc5ed6
KP
3218}
3219
3220/*
3221 * Fetch AUX CH registers 0x202 - 0x207 which contain
3222 * link status information
3223 */
94223d04 3224bool
93f62dad 3225intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 3226{
9d1a1031
JN
3227 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3228 DP_LANE0_1_STATUS,
3229 link_status,
3230 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
3231}
3232
1100244e 3233/* These are source-specific values. */
94223d04 3234uint8_t
1a2eb460 3235intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 3236{
30add22d 3237 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 3238 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 3239 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3240
9314726b
VK
3241 if (IS_BROXTON(dev))
3242 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3243 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 3244 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 3245 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 3246 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
666a4537 3247 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
bd60018a 3248 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 3249 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 3250 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 3251 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 3252 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 3253 else
bd60018a 3254 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
3255}
3256
94223d04 3257uint8_t
1a2eb460
KP
3258intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3259{
30add22d 3260 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 3261 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3262
5a9d1f1a
DL
3263 if (INTEL_INFO(dev)->gen >= 9) {
3264 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3265 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3266 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3267 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3268 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3269 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3270 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
3271 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3272 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
3273 default:
3274 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3275 }
3276 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 3277 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3279 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3281 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3282 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3283 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3284 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 3285 default:
bd60018a 3286 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 3287 }
666a4537 3288 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e2fa6fba 3289 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3290 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3291 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3292 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3293 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3294 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3295 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3296 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 3297 default:
bd60018a 3298 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 3299 }
bc7d38a4 3300 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 3301 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3303 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3304 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3305 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3306 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3307 default:
bd60018a 3308 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3309 }
3310 } else {
3311 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3312 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3313 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3314 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3315 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3317 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3318 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3319 default:
bd60018a 3320 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3321 }
a4fc5ed6
KP
3322 }
3323}
3324
5829975c 3325static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3326{
3327 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3328 struct drm_i915_private *dev_priv = dev->dev_private;
3329 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3330 struct intel_crtc *intel_crtc =
3331 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3332 unsigned long demph_reg_value, preemph_reg_value,
3333 uniqtranscale_reg_value;
3334 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3335 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3336 int pipe = intel_crtc->pipe;
e2fa6fba
P
3337
3338 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3339 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3340 preemph_reg_value = 0x0004000;
3341 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3342 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3343 demph_reg_value = 0x2B405555;
3344 uniqtranscale_reg_value = 0x552AB83A;
3345 break;
bd60018a 3346 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3347 demph_reg_value = 0x2B404040;
3348 uniqtranscale_reg_value = 0x5548B83A;
3349 break;
bd60018a 3350 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3351 demph_reg_value = 0x2B245555;
3352 uniqtranscale_reg_value = 0x5560B83A;
3353 break;
bd60018a 3354 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3355 demph_reg_value = 0x2B405555;
3356 uniqtranscale_reg_value = 0x5598DA3A;
3357 break;
3358 default:
3359 return 0;
3360 }
3361 break;
bd60018a 3362 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3363 preemph_reg_value = 0x0002000;
3364 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3365 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3366 demph_reg_value = 0x2B404040;
3367 uniqtranscale_reg_value = 0x5552B83A;
3368 break;
bd60018a 3369 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3370 demph_reg_value = 0x2B404848;
3371 uniqtranscale_reg_value = 0x5580B83A;
3372 break;
bd60018a 3373 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3374 demph_reg_value = 0x2B404040;
3375 uniqtranscale_reg_value = 0x55ADDA3A;
3376 break;
3377 default:
3378 return 0;
3379 }
3380 break;
bd60018a 3381 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3382 preemph_reg_value = 0x0000000;
3383 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3384 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3385 demph_reg_value = 0x2B305555;
3386 uniqtranscale_reg_value = 0x5570B83A;
3387 break;
bd60018a 3388 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3389 demph_reg_value = 0x2B2B4040;
3390 uniqtranscale_reg_value = 0x55ADDA3A;
3391 break;
3392 default:
3393 return 0;
3394 }
3395 break;
bd60018a 3396 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3397 preemph_reg_value = 0x0006000;
3398 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3399 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3400 demph_reg_value = 0x1B405555;
3401 uniqtranscale_reg_value = 0x55ADDA3A;
3402 break;
3403 default:
3404 return 0;
3405 }
3406 break;
3407 default:
3408 return 0;
3409 }
3410
a580516d 3411 mutex_lock(&dev_priv->sb_lock);
ab3c759a
CML
3412 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3413 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3414 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3415 uniqtranscale_reg_value);
ab3c759a
CML
3416 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3417 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3418 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3419 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
a580516d 3420 mutex_unlock(&dev_priv->sb_lock);
e2fa6fba
P
3421
3422 return 0;
3423}
3424
67fa24b4
VS
3425static bool chv_need_uniq_trans_scale(uint8_t train_set)
3426{
3427 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3428 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3429}
3430
5829975c 3431static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3432{
3433 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3434 struct drm_i915_private *dev_priv = dev->dev_private;
3435 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3436 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3437 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3438 uint8_t train_set = intel_dp->train_set[0];
3439 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3440 enum pipe pipe = intel_crtc->pipe;
3441 int i;
e4a1d846
CML
3442
3443 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3444 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3445 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3446 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3447 deemph_reg_value = 128;
3448 margin_reg_value = 52;
3449 break;
bd60018a 3450 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3451 deemph_reg_value = 128;
3452 margin_reg_value = 77;
3453 break;
bd60018a 3454 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3455 deemph_reg_value = 128;
3456 margin_reg_value = 102;
3457 break;
bd60018a 3458 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3459 deemph_reg_value = 128;
3460 margin_reg_value = 154;
3461 /* FIXME extra to set for 1200 */
3462 break;
3463 default:
3464 return 0;
3465 }
3466 break;
bd60018a 3467 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3468 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3469 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3470 deemph_reg_value = 85;
3471 margin_reg_value = 78;
3472 break;
bd60018a 3473 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3474 deemph_reg_value = 85;
3475 margin_reg_value = 116;
3476 break;
bd60018a 3477 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3478 deemph_reg_value = 85;
3479 margin_reg_value = 154;
3480 break;
3481 default:
3482 return 0;
3483 }
3484 break;
bd60018a 3485 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3486 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3487 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3488 deemph_reg_value = 64;
3489 margin_reg_value = 104;
3490 break;
bd60018a 3491 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3492 deemph_reg_value = 64;
3493 margin_reg_value = 154;
3494 break;
3495 default:
3496 return 0;
3497 }
3498 break;
bd60018a 3499 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3500 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3501 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3502 deemph_reg_value = 43;
3503 margin_reg_value = 154;
3504 break;
3505 default:
3506 return 0;
3507 }
3508 break;
3509 default:
3510 return 0;
3511 }
3512
a580516d 3513 mutex_lock(&dev_priv->sb_lock);
e4a1d846
CML
3514
3515 /* Clear calc init */
1966e59e
VS
3516 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3517 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3518 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3519 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3520 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3521
e0fce78f
VS
3522 if (intel_crtc->config->lane_count > 2) {
3523 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3524 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3525 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3526 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3527 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3528 }
e4a1d846 3529
a02ef3c7
VS
3530 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3531 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3532 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3533 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3534
e0fce78f
VS
3535 if (intel_crtc->config->lane_count > 2) {
3536 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3537 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3538 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3539 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3540 }
a02ef3c7 3541
e4a1d846 3542 /* Program swing deemph */
e0fce78f 3543 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db
VS
3544 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3545 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3546 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3547 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3548 }
e4a1d846
CML
3549
3550 /* Program swing margin */
e0fce78f 3551 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3552 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
67fa24b4 3553
1fb44505
VS
3554 val &= ~DPIO_SWING_MARGIN000_MASK;
3555 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
67fa24b4
VS
3556
3557 /*
3558 * Supposedly this value shouldn't matter when unique transition
3559 * scale is disabled, but in fact it does matter. Let's just
3560 * always program the same value and hope it's OK.
3561 */
3562 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3563 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3564
f72df8db
VS
3565 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3566 }
e4a1d846 3567
67fa24b4
VS
3568 /*
3569 * The document said it needs to set bit 27 for ch0 and bit 26
3570 * for ch1. Might be a typo in the doc.
3571 * For now, for this unique transition scale selection, set bit
3572 * 27 for ch0 and ch1.
3573 */
e0fce78f 3574 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3575 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
67fa24b4 3576 if (chv_need_uniq_trans_scale(train_set))
f72df8db 3577 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
67fa24b4
VS
3578 else
3579 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3580 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
e4a1d846
CML
3581 }
3582
3583 /* Start swing calculation */
1966e59e
VS
3584 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3585 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3586 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3587
e0fce78f
VS
3588 if (intel_crtc->config->lane_count > 2) {
3589 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3590 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3591 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3592 }
e4a1d846 3593
a580516d 3594 mutex_unlock(&dev_priv->sb_lock);
e4a1d846
CML
3595
3596 return 0;
3597}
3598
a4fc5ed6 3599static uint32_t
5829975c 3600gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3601{
3cf2efb1 3602 uint32_t signal_levels = 0;
a4fc5ed6 3603
3cf2efb1 3604 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3605 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3606 default:
3607 signal_levels |= DP_VOLTAGE_0_4;
3608 break;
bd60018a 3609 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3610 signal_levels |= DP_VOLTAGE_0_6;
3611 break;
bd60018a 3612 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3613 signal_levels |= DP_VOLTAGE_0_8;
3614 break;
bd60018a 3615 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3616 signal_levels |= DP_VOLTAGE_1_2;
3617 break;
3618 }
3cf2efb1 3619 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3620 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3621 default:
3622 signal_levels |= DP_PRE_EMPHASIS_0;
3623 break;
bd60018a 3624 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3625 signal_levels |= DP_PRE_EMPHASIS_3_5;
3626 break;
bd60018a 3627 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3628 signal_levels |= DP_PRE_EMPHASIS_6;
3629 break;
bd60018a 3630 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3631 signal_levels |= DP_PRE_EMPHASIS_9_5;
3632 break;
3633 }
3634 return signal_levels;
3635}
3636
e3421a18
ZW
3637/* Gen6's DP voltage swing and pre-emphasis control */
3638static uint32_t
5829975c 3639gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3640{
3c5a62b5
YL
3641 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3642 DP_TRAIN_PRE_EMPHASIS_MASK);
3643 switch (signal_levels) {
bd60018a
SJ
3644 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3645 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3646 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3647 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3648 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3649 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3650 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3651 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3652 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3653 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3654 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3655 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3656 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3657 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3658 default:
3c5a62b5
YL
3659 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3660 "0x%x\n", signal_levels);
3661 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3662 }
3663}
3664
1a2eb460
KP
3665/* Gen7's DP voltage swing and pre-emphasis control */
3666static uint32_t
5829975c 3667gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3668{
3669 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3670 DP_TRAIN_PRE_EMPHASIS_MASK);
3671 switch (signal_levels) {
bd60018a 3672 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3673 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3674 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3675 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3676 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3677 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3678
bd60018a 3679 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3680 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3681 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3682 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3683
bd60018a 3684 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3685 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3686 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3687 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3688
3689 default:
3690 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3691 "0x%x\n", signal_levels);
3692 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3693 }
3694}
3695
94223d04 3696void
f4eb692e 3697intel_dp_set_signal_levels(struct intel_dp *intel_dp)
f0a3424e
PZ
3698{
3699 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3700 enum port port = intel_dig_port->port;
f0a3424e 3701 struct drm_device *dev = intel_dig_port->base.base.dev;
b905a915 3702 struct drm_i915_private *dev_priv = to_i915(dev);
f8896f5d 3703 uint32_t signal_levels, mask = 0;
f0a3424e
PZ
3704 uint8_t train_set = intel_dp->train_set[0];
3705
f8896f5d
DW
3706 if (HAS_DDI(dev)) {
3707 signal_levels = ddi_signal_levels(intel_dp);
3708
3709 if (IS_BROXTON(dev))
3710 signal_levels = 0;
3711 else
3712 mask = DDI_BUF_EMP_MASK;
e4a1d846 3713 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3714 signal_levels = chv_signal_levels(intel_dp);
e2fa6fba 3715 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3716 signal_levels = vlv_signal_levels(intel_dp);
bc7d38a4 3717 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3718 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3719 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3720 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3721 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3722 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3723 } else {
5829975c 3724 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3725 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3726 }
3727
96fb9f9b
VK
3728 if (mask)
3729 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3730
3731 DRM_DEBUG_KMS("Using vswing level %d\n",
3732 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3733 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3734 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3735 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e 3736
f4eb692e 3737 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
b905a915
ACO
3738
3739 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3740 POSTING_READ(intel_dp->output_reg);
f0a3424e
PZ
3741}
3742
94223d04 3743void
e9c176d5
ACO
3744intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3745 uint8_t dp_train_pat)
a4fc5ed6 3746{
174edf1f 3747 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3748 struct drm_i915_private *dev_priv =
3749 to_i915(intel_dig_port->base.base.dev);
a4fc5ed6 3750
f4eb692e 3751 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
47ea7542 3752
f4eb692e 3753 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
ea5b213a 3754 POSTING_READ(intel_dp->output_reg);
e9c176d5
ACO
3755}
3756
94223d04 3757void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3ab9c637
ID
3758{
3759 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3760 struct drm_device *dev = intel_dig_port->base.base.dev;
3761 struct drm_i915_private *dev_priv = dev->dev_private;
3762 enum port port = intel_dig_port->port;
3763 uint32_t val;
3764
3765 if (!HAS_DDI(dev))
3766 return;
3767
3768 val = I915_READ(DP_TP_CTL(port));
3769 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3770 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3771 I915_WRITE(DP_TP_CTL(port), val);
3772
3773 /*
3774 * On PORT_A we can have only eDP in SST mode. There the only reason
3775 * we need to set idle transmission mode is to work around a HW issue
3776 * where we enable the pipe while not in idle link-training mode.
3777 * In this case there is requirement to wait for a minimum number of
3778 * idle patterns to be sent.
3779 */
3780 if (port == PORT_A)
3781 return;
3782
3783 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3784 1))
3785 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3786}
3787
a4fc5ed6 3788static void
ea5b213a 3789intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3790{
da63a9f2 3791 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1612c8bd 3792 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
bc7d38a4 3793 enum port port = intel_dig_port->port;
da63a9f2 3794 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3795 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3796 uint32_t DP = intel_dp->DP;
a4fc5ed6 3797
bc76e320 3798 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3799 return;
3800
0c33d8d7 3801 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3802 return;
3803
28c97730 3804 DRM_DEBUG_KMS("\n");
32f9d658 3805
39e5fa88
VS
3806 if ((IS_GEN7(dev) && port == PORT_A) ||
3807 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3808 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3809 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3810 } else {
aad3d14d
VS
3811 if (IS_CHERRYVIEW(dev))
3812 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3813 else
3814 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3815 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3816 }
1612c8bd 3817 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3818 POSTING_READ(intel_dp->output_reg);
5eb08b69 3819
1612c8bd
VS
3820 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3821 I915_WRITE(intel_dp->output_reg, DP);
3822 POSTING_READ(intel_dp->output_reg);
3823
3824 /*
3825 * HW workaround for IBX, we need to move the port
3826 * to transcoder A after disabling it to allow the
3827 * matching HDMI port to be enabled on transcoder A.
3828 */
3829 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
0c241d5b
VS
3830 /*
3831 * We get CPU/PCH FIFO underruns on the other pipe when
3832 * doing the workaround. Sweep them under the rug.
3833 */
3834 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3835 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3836
1612c8bd
VS
3837 /* always enable with pattern 1 (as per spec) */
3838 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3839 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3840 I915_WRITE(intel_dp->output_reg, DP);
3841 POSTING_READ(intel_dp->output_reg);
3842
3843 DP &= ~DP_PORT_EN;
5bddd17f 3844 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3845 POSTING_READ(intel_dp->output_reg);
0c241d5b
VS
3846
3847 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3848 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3849 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
5bddd17f
EA
3850 }
3851
f01eca2e 3852 msleep(intel_dp->panel_power_down_delay);
6fec7662
VS
3853
3854 intel_dp->DP = DP;
a4fc5ed6
KP
3855}
3856
26d61aad
KP
3857static bool
3858intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3859{
a031d709
RV
3860 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3861 struct drm_device *dev = dig_port->base.base.dev;
3862 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3863 uint8_t rev;
a031d709 3864
9d1a1031
JN
3865 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3866 sizeof(intel_dp->dpcd)) < 0)
edb39244 3867 return false; /* aux transfer failed */
92fd8fd1 3868
a8e98153 3869 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3870
edb39244
AJ
3871 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3872 return false; /* DPCD not present */
3873
2293bb5c
SK
3874 /* Check if the panel supports PSR */
3875 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3876 if (is_edp(intel_dp)) {
9d1a1031
JN
3877 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3878 intel_dp->psr_dpcd,
3879 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3880 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3881 dev_priv->psr.sink_support = true;
50003939 3882 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3883 }
474d1ec4
SJ
3884
3885 if (INTEL_INFO(dev)->gen >= 9 &&
3886 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3887 uint8_t frame_sync_cap;
3888
3889 dev_priv->psr.sink_support = true;
3890 intel_dp_dpcd_read_wake(&intel_dp->aux,
3891 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3892 &frame_sync_cap, 1);
3893 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3894 /* PSR2 needs frame sync as well */
3895 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3896 DRM_DEBUG_KMS("PSR2 %s on sink",
3897 dev_priv->psr.psr2_support ? "supported" : "not supported");
3898 }
50003939
JN
3899 }
3900
bc5133d5 3901 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
e588fa18 3902 yesno(intel_dp_source_supports_hbr2(intel_dp)),
742f491d 3903 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
06ea66b6 3904
fc0f8e25
SJ
3905 /* Intermediate frequency support */
3906 if (is_edp(intel_dp) &&
3907 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3908 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3909 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3910 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3911 int i;
3912
fc0f8e25
SJ
3913 intel_dp_dpcd_read_wake(&intel_dp->aux,
3914 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3915 sink_rates,
3916 sizeof(sink_rates));
ea2d8a42 3917
94ca719e
VS
3918 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3919 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3920
3921 if (val == 0)
3922 break;
3923
af77b974
SJ
3924 /* Value read is in kHz while drm clock is saved in deca-kHz */
3925 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 3926 }
94ca719e 3927 intel_dp->num_sink_rates = i;
fc0f8e25 3928 }
0336400e
VS
3929
3930 intel_dp_print_rates(intel_dp);
3931
edb39244
AJ
3932 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3933 DP_DWN_STRM_PORT_PRESENT))
3934 return true; /* native DP sink */
3935
3936 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3937 return true; /* no per-port downstream info */
3938
9d1a1031
JN
3939 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3940 intel_dp->downstream_ports,
3941 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3942 return false; /* downstream port status fetch failed */
3943
3944 return true;
92fd8fd1
KP
3945}
3946
0d198328
AJ
3947static void
3948intel_dp_probe_oui(struct intel_dp *intel_dp)
3949{
3950 u8 buf[3];
3951
3952 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3953 return;
3954
9d1a1031 3955 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3956 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3957 buf[0], buf[1], buf[2]);
3958
9d1a1031 3959 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3960 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3961 buf[0], buf[1], buf[2]);
3962}
3963
0e32b39c
DA
3964static bool
3965intel_dp_probe_mst(struct intel_dp *intel_dp)
3966{
3967 u8 buf[1];
3968
3969 if (!intel_dp->can_mst)
3970 return false;
3971
3972 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3973 return false;
3974
0e32b39c
DA
3975 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3976 if (buf[0] & DP_MST_CAP) {
3977 DRM_DEBUG_KMS("Sink is MST capable\n");
3978 intel_dp->is_mst = true;
3979 } else {
3980 DRM_DEBUG_KMS("Sink is not MST capable\n");
3981 intel_dp->is_mst = false;
3982 }
3983 }
0e32b39c
DA
3984
3985 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3986 return intel_dp->is_mst;
3987}
3988
e5a1cab5 3989static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
d2e216d0 3990{
082dcc7c 3991 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
d72f9d91 3992 struct drm_device *dev = dig_port->base.base.dev;
082dcc7c 3993 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
ad9dc91b 3994 u8 buf;
e5a1cab5 3995 int ret = 0;
c6297843
RV
3996 int count = 0;
3997 int attempts = 10;
d2e216d0 3998
082dcc7c
RV
3999 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4000 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
4001 ret = -EIO;
4002 goto out;
4373f0f2
PZ
4003 }
4004
082dcc7c 4005 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
e5a1cab5 4006 buf & ~DP_TEST_SINK_START) < 0) {
082dcc7c 4007 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
4008 ret = -EIO;
4009 goto out;
4010 }
d2e216d0 4011
c6297843
RV
4012 do {
4013 intel_wait_for_vblank(dev, intel_crtc->pipe);
4014
4015 if (drm_dp_dpcd_readb(&intel_dp->aux,
4016 DP_TEST_SINK_MISC, &buf) < 0) {
4017 ret = -EIO;
4018 goto out;
4019 }
4020 count = buf & DP_TEST_COUNT_MASK;
4021 } while (--attempts && count);
4022
4023 if (attempts == 0) {
dc5a9037 4024 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
c6297843
RV
4025 ret = -ETIMEDOUT;
4026 }
4027
e5a1cab5 4028 out:
082dcc7c 4029 hsw_enable_ips(intel_crtc);
e5a1cab5 4030 return ret;
082dcc7c
RV
4031}
4032
4033static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4034{
4035 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
d72f9d91 4036 struct drm_device *dev = dig_port->base.base.dev;
082dcc7c
RV
4037 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4038 u8 buf;
e5a1cab5
RV
4039 int ret;
4040
082dcc7c
RV
4041 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4042 return -EIO;
4043
4044 if (!(buf & DP_TEST_CRC_SUPPORTED))
4045 return -ENOTTY;
4046
4047 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4048 return -EIO;
4049
6d8175da
RV
4050 if (buf & DP_TEST_SINK_START) {
4051 ret = intel_dp_sink_crc_stop(intel_dp);
4052 if (ret)
4053 return ret;
4054 }
4055
082dcc7c 4056 hsw_disable_ips(intel_crtc);
1dda5f93 4057
9d1a1031 4058 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
082dcc7c
RV
4059 buf | DP_TEST_SINK_START) < 0) {
4060 hsw_enable_ips(intel_crtc);
4061 return -EIO;
4373f0f2
PZ
4062 }
4063
d72f9d91 4064 intel_wait_for_vblank(dev, intel_crtc->pipe);
082dcc7c
RV
4065 return 0;
4066}
4067
4068int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4069{
4070 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4071 struct drm_device *dev = dig_port->base.base.dev;
4072 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4073 u8 buf;
621d4c76 4074 int count, ret;
082dcc7c 4075 int attempts = 6;
082dcc7c
RV
4076
4077 ret = intel_dp_sink_crc_start(intel_dp);
4078 if (ret)
4079 return ret;
4080
ad9dc91b 4081 do {
621d4c76
RV
4082 intel_wait_for_vblank(dev, intel_crtc->pipe);
4083
1dda5f93 4084 if (drm_dp_dpcd_readb(&intel_dp->aux,
4373f0f2
PZ
4085 DP_TEST_SINK_MISC, &buf) < 0) {
4086 ret = -EIO;
afe0d67e 4087 goto stop;
4373f0f2 4088 }
621d4c76 4089 count = buf & DP_TEST_COUNT_MASK;
aabc95dc 4090
7e38eeff 4091 } while (--attempts && count == 0);
ad9dc91b
RV
4092
4093 if (attempts == 0) {
7e38eeff
RV
4094 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4095 ret = -ETIMEDOUT;
4096 goto stop;
4097 }
4098
4099 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4100 ret = -EIO;
4101 goto stop;
ad9dc91b 4102 }
d2e216d0 4103
afe0d67e 4104stop:
082dcc7c 4105 intel_dp_sink_crc_stop(intel_dp);
4373f0f2 4106 return ret;
d2e216d0
RV
4107}
4108
a60f0e38
JB
4109static bool
4110intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4111{
9d1a1031
JN
4112 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4113 DP_DEVICE_SERVICE_IRQ_VECTOR,
4114 sink_irq_vector, 1) == 1;
a60f0e38
JB
4115}
4116
0e32b39c
DA
4117static bool
4118intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4119{
4120 int ret;
4121
4122 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4123 DP_SINK_COUNT_ESI,
4124 sink_irq_vector, 14);
4125 if (ret != 14)
4126 return false;
4127
4128 return true;
4129}
4130
c5d5ab7a
TP
4131static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4132{
4133 uint8_t test_result = DP_TEST_ACK;
4134 return test_result;
4135}
4136
4137static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4138{
4139 uint8_t test_result = DP_TEST_NAK;
4140 return test_result;
4141}
4142
4143static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4144{
c5d5ab7a 4145 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4146 struct intel_connector *intel_connector = intel_dp->attached_connector;
4147 struct drm_connector *connector = &intel_connector->base;
4148
4149 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4150 connector->edid_corrupt ||
559be30c
TP
4151 intel_dp->aux.i2c_defer_count > 6) {
4152 /* Check EDID read for NACKs, DEFERs and corruption
4153 * (DP CTS 1.2 Core r1.1)
4154 * 4.2.2.4 : Failed EDID read, I2C_NAK
4155 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4156 * 4.2.2.6 : EDID corruption detected
4157 * Use failsafe mode for all cases
4158 */
4159 if (intel_dp->aux.i2c_nack_count > 0 ||
4160 intel_dp->aux.i2c_defer_count > 0)
4161 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4162 intel_dp->aux.i2c_nack_count,
4163 intel_dp->aux.i2c_defer_count);
4164 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4165 } else {
f79b468e
TS
4166 struct edid *block = intel_connector->detect_edid;
4167
4168 /* We have to write the checksum
4169 * of the last block read
4170 */
4171 block += intel_connector->detect_edid->extensions;
4172
559be30c
TP
4173 if (!drm_dp_dpcd_write(&intel_dp->aux,
4174 DP_TEST_EDID_CHECKSUM,
f79b468e 4175 &block->checksum,
5a1cc655 4176 1))
559be30c
TP
4177 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4178
4179 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4180 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4181 }
4182
4183 /* Set test active flag here so userspace doesn't interrupt things */
4184 intel_dp->compliance_test_active = 1;
4185
c5d5ab7a
TP
4186 return test_result;
4187}
4188
4189static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4190{
c5d5ab7a
TP
4191 uint8_t test_result = DP_TEST_NAK;
4192 return test_result;
4193}
4194
4195static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4196{
4197 uint8_t response = DP_TEST_NAK;
4198 uint8_t rxdata = 0;
4199 int status = 0;
4200
c5d5ab7a
TP
4201 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4202 if (status <= 0) {
4203 DRM_DEBUG_KMS("Could not read test request from sink\n");
4204 goto update_status;
4205 }
4206
4207 switch (rxdata) {
4208 case DP_TEST_LINK_TRAINING:
4209 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4210 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4211 response = intel_dp_autotest_link_training(intel_dp);
4212 break;
4213 case DP_TEST_LINK_VIDEO_PATTERN:
4214 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4215 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4216 response = intel_dp_autotest_video_pattern(intel_dp);
4217 break;
4218 case DP_TEST_LINK_EDID_READ:
4219 DRM_DEBUG_KMS("EDID test requested\n");
4220 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4221 response = intel_dp_autotest_edid(intel_dp);
4222 break;
4223 case DP_TEST_LINK_PHY_TEST_PATTERN:
4224 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4225 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4226 response = intel_dp_autotest_phy_pattern(intel_dp);
4227 break;
4228 default:
4229 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4230 break;
4231 }
4232
4233update_status:
4234 status = drm_dp_dpcd_write(&intel_dp->aux,
4235 DP_TEST_RESPONSE,
4236 &response, 1);
4237 if (status <= 0)
4238 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4239}
4240
0e32b39c
DA
4241static int
4242intel_dp_check_mst_status(struct intel_dp *intel_dp)
4243{
4244 bool bret;
4245
4246 if (intel_dp->is_mst) {
4247 u8 esi[16] = { 0 };
4248 int ret = 0;
4249 int retry;
4250 bool handled;
4251 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4252go_again:
4253 if (bret == true) {
4254
4255 /* check link status - esi[10] = 0x200c */
90a6b7b0 4256 if (intel_dp->active_mst_links &&
901c2daf 4257 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
0e32b39c
DA
4258 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4259 intel_dp_start_link_train(intel_dp);
0e32b39c
DA
4260 intel_dp_stop_link_train(intel_dp);
4261 }
4262
6f34cc39 4263 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4264 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4265
4266 if (handled) {
4267 for (retry = 0; retry < 3; retry++) {
4268 int wret;
4269 wret = drm_dp_dpcd_write(&intel_dp->aux,
4270 DP_SINK_COUNT_ESI+1,
4271 &esi[1], 3);
4272 if (wret == 3) {
4273 break;
4274 }
4275 }
4276
4277 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4278 if (bret == true) {
6f34cc39 4279 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4280 goto go_again;
4281 }
4282 } else
4283 ret = 0;
4284
4285 return ret;
4286 } else {
4287 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4288 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4289 intel_dp->is_mst = false;
4290 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4291 /* send a hotplug event */
4292 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4293 }
4294 }
4295 return -EINVAL;
4296}
4297
a4fc5ed6
KP
4298/*
4299 * According to DP spec
4300 * 5.1.2:
4301 * 1. Read DPCD
4302 * 2. Configure link according to Receiver Capabilities
4303 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4304 * 4. Check link status on receipt of hot-plug interrupt
4305 */
a5146200 4306static void
ea5b213a 4307intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4308{
5b215bcf 4309 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4310 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4311 u8 sink_irq_vector;
93f62dad 4312 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4313
5b215bcf
DA
4314 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4315
4df6960e
SS
4316 /*
4317 * Clearing compliance test variables to allow capturing
4318 * of values for next automated test request.
4319 */
4320 intel_dp->compliance_test_active = 0;
4321 intel_dp->compliance_test_type = 0;
4322 intel_dp->compliance_test_data = 0;
4323
e02f9a06 4324 if (!intel_encoder->base.crtc)
a4fc5ed6
KP
4325 return;
4326
1a125d8a
ID
4327 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4328 return;
4329
92fd8fd1 4330 /* Try to read receiver status if the link appears to be up */
93f62dad 4331 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4332 return;
4333 }
4334
92fd8fd1 4335 /* Now read the DPCD to see if it's actually running */
26d61aad 4336 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4337 return;
4338 }
4339
a60f0e38
JB
4340 /* Try to read the source of the interrupt */
4341 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4342 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4343 /* Clear interrupt source */
9d1a1031
JN
4344 drm_dp_dpcd_writeb(&intel_dp->aux,
4345 DP_DEVICE_SERVICE_IRQ_VECTOR,
4346 sink_irq_vector);
a60f0e38
JB
4347
4348 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4349 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4350 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4351 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4352 }
4353
14631e9d
SS
4354 /* if link training is requested we should perform it always */
4355 if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4356 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
92fd8fd1 4357 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4358 intel_encoder->base.name);
33a34e4e 4359 intel_dp_start_link_train(intel_dp);
3ab9c637 4360 intel_dp_stop_link_train(intel_dp);
33a34e4e 4361 }
a4fc5ed6 4362}
a4fc5ed6 4363
caf9ab24 4364/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4365static enum drm_connector_status
26d61aad 4366intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4367{
caf9ab24 4368 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4369 uint8_t type;
4370
4371 if (!intel_dp_get_dpcd(intel_dp))
4372 return connector_status_disconnected;
4373
4374 /* if there's no downstream port, we're done */
4375 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4376 return connector_status_connected;
caf9ab24
AJ
4377
4378 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4379 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4380 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4381 uint8_t reg;
9d1a1031
JN
4382
4383 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4384 &reg, 1) < 0)
caf9ab24 4385 return connector_status_unknown;
9d1a1031 4386
23235177
AJ
4387 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4388 : connector_status_disconnected;
caf9ab24
AJ
4389 }
4390
4391 /* If no HPD, poke DDC gently */
0b99836f 4392 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4393 return connector_status_connected;
caf9ab24
AJ
4394
4395 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4396 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4397 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4398 if (type == DP_DS_PORT_TYPE_VGA ||
4399 type == DP_DS_PORT_TYPE_NON_EDID)
4400 return connector_status_unknown;
4401 } else {
4402 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4403 DP_DWN_STRM_PORT_TYPE_MASK;
4404 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4405 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4406 return connector_status_unknown;
4407 }
caf9ab24
AJ
4408
4409 /* Anything else is out of spec, warn and ignore */
4410 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4411 return connector_status_disconnected;
71ba9000
AJ
4412}
4413
d410b56d
CW
4414static enum drm_connector_status
4415edp_detect(struct intel_dp *intel_dp)
4416{
4417 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4418 enum drm_connector_status status;
4419
4420 status = intel_panel_detect(dev);
4421 if (status == connector_status_unknown)
4422 status = connector_status_connected;
4423
4424 return status;
4425}
4426
b93433cc
JN
4427static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4428 struct intel_digital_port *port)
5eb08b69 4429{
b93433cc 4430 u32 bit;
01cb9ea6 4431
0df53b77
JN
4432 switch (port->port) {
4433 case PORT_A:
4434 return true;
4435 case PORT_B:
4436 bit = SDE_PORTB_HOTPLUG;
4437 break;
4438 case PORT_C:
4439 bit = SDE_PORTC_HOTPLUG;
4440 break;
4441 case PORT_D:
4442 bit = SDE_PORTD_HOTPLUG;
4443 break;
4444 default:
4445 MISSING_CASE(port->port);
4446 return false;
4447 }
4448
4449 return I915_READ(SDEISR) & bit;
4450}
4451
4452static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4453 struct intel_digital_port *port)
4454{
4455 u32 bit;
4456
4457 switch (port->port) {
4458 case PORT_A:
4459 return true;
4460 case PORT_B:
4461 bit = SDE_PORTB_HOTPLUG_CPT;
4462 break;
4463 case PORT_C:
4464 bit = SDE_PORTC_HOTPLUG_CPT;
4465 break;
4466 case PORT_D:
4467 bit = SDE_PORTD_HOTPLUG_CPT;
4468 break;
a78695d3
JN
4469 case PORT_E:
4470 bit = SDE_PORTE_HOTPLUG_SPT;
4471 break;
0df53b77
JN
4472 default:
4473 MISSING_CASE(port->port);
4474 return false;
b93433cc 4475 }
1b469639 4476
b93433cc 4477 return I915_READ(SDEISR) & bit;
5eb08b69
ZW
4478}
4479
7e66bcf2 4480static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
1d245987 4481 struct intel_digital_port *port)
a4fc5ed6 4482{
9642c81c 4483 u32 bit;
5eb08b69 4484
9642c81c
JN
4485 switch (port->port) {
4486 case PORT_B:
4487 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4488 break;
4489 case PORT_C:
4490 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4491 break;
4492 case PORT_D:
4493 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4494 break;
4495 default:
4496 MISSING_CASE(port->port);
4497 return false;
4498 }
4499
4500 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4501}
4502
0780cd36
VS
4503static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4504 struct intel_digital_port *port)
9642c81c
JN
4505{
4506 u32 bit;
4507
4508 switch (port->port) {
4509 case PORT_B:
0780cd36 4510 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
9642c81c
JN
4511 break;
4512 case PORT_C:
0780cd36 4513 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
9642c81c
JN
4514 break;
4515 case PORT_D:
0780cd36 4516 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
9642c81c
JN
4517 break;
4518 default:
4519 MISSING_CASE(port->port);
4520 return false;
a4fc5ed6
KP
4521 }
4522
1d245987 4523 return I915_READ(PORT_HOTPLUG_STAT) & bit;
2a592bec
DA
4524}
4525
e464bfde 4526static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
e2ec35a5 4527 struct intel_digital_port *intel_dig_port)
e464bfde 4528{
e2ec35a5
SJ
4529 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4530 enum port port;
e464bfde
JN
4531 u32 bit;
4532
e2ec35a5
SJ
4533 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4534 switch (port) {
e464bfde
JN
4535 case PORT_A:
4536 bit = BXT_DE_PORT_HP_DDIA;
4537 break;
4538 case PORT_B:
4539 bit = BXT_DE_PORT_HP_DDIB;
4540 break;
4541 case PORT_C:
4542 bit = BXT_DE_PORT_HP_DDIC;
4543 break;
4544 default:
e2ec35a5 4545 MISSING_CASE(port);
e464bfde
JN
4546 return false;
4547 }
4548
4549 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4550}
4551
7e66bcf2
JN
4552/*
4553 * intel_digital_port_connected - is the specified port connected?
4554 * @dev_priv: i915 private structure
4555 * @port: the port to test
4556 *
4557 * Return %true if @port is connected, %false otherwise.
4558 */
237ed86c 4559bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
7e66bcf2
JN
4560 struct intel_digital_port *port)
4561{
0df53b77 4562 if (HAS_PCH_IBX(dev_priv))
7e66bcf2 4563 return ibx_digital_port_connected(dev_priv, port);
22824fac 4564 else if (HAS_PCH_SPLIT(dev_priv))
0df53b77 4565 return cpt_digital_port_connected(dev_priv, port);
e464bfde
JN
4566 else if (IS_BROXTON(dev_priv))
4567 return bxt_digital_port_connected(dev_priv, port);
0780cd36
VS
4568 else if (IS_GM45(dev_priv))
4569 return gm45_digital_port_connected(dev_priv, port);
7e66bcf2
JN
4570 else
4571 return g4x_digital_port_connected(dev_priv, port);
4572}
4573
8c241fef 4574static struct edid *
beb60608 4575intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4576{
beb60608 4577 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4578
9cd300e0
JN
4579 /* use cached edid if we have one */
4580 if (intel_connector->edid) {
9cd300e0
JN
4581 /* invalid edid */
4582 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4583 return NULL;
4584
55e9edeb 4585 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4586 } else
4587 return drm_get_edid(&intel_connector->base,
4588 &intel_dp->aux.ddc);
4589}
8c241fef 4590
beb60608
CW
4591static void
4592intel_dp_set_edid(struct intel_dp *intel_dp)
4593{
4594 struct intel_connector *intel_connector = intel_dp->attached_connector;
4595 struct edid *edid;
8c241fef 4596
beb60608
CW
4597 edid = intel_dp_get_edid(intel_dp);
4598 intel_connector->detect_edid = edid;
4599
4600 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4601 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4602 else
4603 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4604}
4605
beb60608
CW
4606static void
4607intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4608{
beb60608 4609 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4610
beb60608
CW
4611 kfree(intel_connector->detect_edid);
4612 intel_connector->detect_edid = NULL;
9cd300e0 4613
beb60608
CW
4614 intel_dp->has_audio = false;
4615}
d6f24d0f 4616
a9756bb5
ZW
4617static enum drm_connector_status
4618intel_dp_detect(struct drm_connector *connector, bool force)
4619{
4620 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4621 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4622 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4623 struct drm_device *dev = connector->dev;
a9756bb5 4624 enum drm_connector_status status;
671dedd2 4625 enum intel_display_power_domain power_domain;
0e32b39c 4626 bool ret;
09b1eb13 4627 u8 sink_irq_vector;
a9756bb5 4628
164c8598 4629 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4630 connector->base.id, connector->name);
beb60608 4631 intel_dp_unset_edid(intel_dp);
164c8598 4632
0e32b39c
DA
4633 if (intel_dp->is_mst) {
4634 /* MST devices are disconnected from a monitor POV */
4635 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4636 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4637 return connector_status_disconnected;
0e32b39c
DA
4638 }
4639
25f78f58
VS
4640 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4641 intel_display_power_get(to_i915(dev), power_domain);
a9756bb5 4642
d410b56d
CW
4643 /* Can't disconnect eDP, but you can close the lid... */
4644 if (is_edp(intel_dp))
4645 status = edp_detect(intel_dp);
c555a81d
ACO
4646 else if (intel_digital_port_connected(to_i915(dev),
4647 dp_to_dig_port(intel_dp)))
4648 status = intel_dp_detect_dpcd(intel_dp);
a9756bb5 4649 else
c555a81d
ACO
4650 status = connector_status_disconnected;
4651
4df6960e
SS
4652 if (status != connector_status_connected) {
4653 intel_dp->compliance_test_active = 0;
4654 intel_dp->compliance_test_type = 0;
4655 intel_dp->compliance_test_data = 0;
4656
c8c8fb33 4657 goto out;
4df6960e 4658 }
a9756bb5 4659
0d198328
AJ
4660 intel_dp_probe_oui(intel_dp);
4661
0e32b39c
DA
4662 ret = intel_dp_probe_mst(intel_dp);
4663 if (ret) {
4664 /* if we are in MST mode then this connector
4665 won't appear connected or have anything with EDID on it */
4666 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4667 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4668 status = connector_status_disconnected;
4669 goto out;
4670 }
4671
4df6960e
SS
4672 /*
4673 * Clearing NACK and defer counts to get their exact values
4674 * while reading EDID which are required by Compliance tests
4675 * 4.2.2.4 and 4.2.2.5
4676 */
4677 intel_dp->aux.i2c_nack_count = 0;
4678 intel_dp->aux.i2c_defer_count = 0;
4679
beb60608 4680 intel_dp_set_edid(intel_dp);
a9756bb5 4681
d63885da
PZ
4682 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4683 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4684 status = connector_status_connected;
4685
09b1eb13
TP
4686 /* Try to read the source of the interrupt */
4687 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4688 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4689 /* Clear interrupt source */
4690 drm_dp_dpcd_writeb(&intel_dp->aux,
4691 DP_DEVICE_SERVICE_IRQ_VECTOR,
4692 sink_irq_vector);
4693
4694 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4695 intel_dp_handle_test_request(intel_dp);
4696 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4697 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4698 }
4699
c8c8fb33 4700out:
25f78f58 4701 intel_display_power_put(to_i915(dev), power_domain);
c8c8fb33 4702 return status;
a4fc5ed6
KP
4703}
4704
beb60608
CW
4705static void
4706intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4707{
df0e9248 4708 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4709 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
25f78f58 4710 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
671dedd2 4711 enum intel_display_power_domain power_domain;
a4fc5ed6 4712
beb60608
CW
4713 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4714 connector->base.id, connector->name);
4715 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4716
beb60608
CW
4717 if (connector->status != connector_status_connected)
4718 return;
671dedd2 4719
25f78f58
VS
4720 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4721 intel_display_power_get(dev_priv, power_domain);
beb60608
CW
4722
4723 intel_dp_set_edid(intel_dp);
4724
25f78f58 4725 intel_display_power_put(dev_priv, power_domain);
beb60608
CW
4726
4727 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4728 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4729}
4730
4731static int intel_dp_get_modes(struct drm_connector *connector)
4732{
4733 struct intel_connector *intel_connector = to_intel_connector(connector);
4734 struct edid *edid;
4735
4736 edid = intel_connector->detect_edid;
4737 if (edid) {
4738 int ret = intel_connector_update_modes(connector, edid);
4739 if (ret)
4740 return ret;
4741 }
32f9d658 4742
f8779fda 4743 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4744 if (is_edp(intel_attached_dp(connector)) &&
4745 intel_connector->panel.fixed_mode) {
f8779fda 4746 struct drm_display_mode *mode;
beb60608
CW
4747
4748 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4749 intel_connector->panel.fixed_mode);
f8779fda 4750 if (mode) {
32f9d658
ZW
4751 drm_mode_probed_add(connector, mode);
4752 return 1;
4753 }
4754 }
beb60608 4755
32f9d658 4756 return 0;
a4fc5ed6
KP
4757}
4758
1aad7ac0
CW
4759static bool
4760intel_dp_detect_audio(struct drm_connector *connector)
4761{
1aad7ac0 4762 bool has_audio = false;
beb60608 4763 struct edid *edid;
1aad7ac0 4764
beb60608
CW
4765 edid = to_intel_connector(connector)->detect_edid;
4766 if (edid)
1aad7ac0 4767 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4768
1aad7ac0
CW
4769 return has_audio;
4770}
4771
f684960e
CW
4772static int
4773intel_dp_set_property(struct drm_connector *connector,
4774 struct drm_property *property,
4775 uint64_t val)
4776{
e953fd7b 4777 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4778 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4779 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4780 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4781 int ret;
4782
662595df 4783 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4784 if (ret)
4785 return ret;
4786
3f43c48d 4787 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4788 int i = val;
4789 bool has_audio;
4790
4791 if (i == intel_dp->force_audio)
f684960e
CW
4792 return 0;
4793
1aad7ac0 4794 intel_dp->force_audio = i;
f684960e 4795
c3e5f67b 4796 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4797 has_audio = intel_dp_detect_audio(connector);
4798 else
c3e5f67b 4799 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4800
4801 if (has_audio == intel_dp->has_audio)
f684960e
CW
4802 return 0;
4803
1aad7ac0 4804 intel_dp->has_audio = has_audio;
f684960e
CW
4805 goto done;
4806 }
4807
e953fd7b 4808 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80 4809 bool old_auto = intel_dp->color_range_auto;
0f2a2a75 4810 bool old_range = intel_dp->limited_color_range;
ae4edb80 4811
55bc60db
VS
4812 switch (val) {
4813 case INTEL_BROADCAST_RGB_AUTO:
4814 intel_dp->color_range_auto = true;
4815 break;
4816 case INTEL_BROADCAST_RGB_FULL:
4817 intel_dp->color_range_auto = false;
0f2a2a75 4818 intel_dp->limited_color_range = false;
55bc60db
VS
4819 break;
4820 case INTEL_BROADCAST_RGB_LIMITED:
4821 intel_dp->color_range_auto = false;
0f2a2a75 4822 intel_dp->limited_color_range = true;
55bc60db
VS
4823 break;
4824 default:
4825 return -EINVAL;
4826 }
ae4edb80
DV
4827
4828 if (old_auto == intel_dp->color_range_auto &&
0f2a2a75 4829 old_range == intel_dp->limited_color_range)
ae4edb80
DV
4830 return 0;
4831
e953fd7b
CW
4832 goto done;
4833 }
4834
53b41837
YN
4835 if (is_edp(intel_dp) &&
4836 property == connector->dev->mode_config.scaling_mode_property) {
4837 if (val == DRM_MODE_SCALE_NONE) {
4838 DRM_DEBUG_KMS("no scaling not supported\n");
4839 return -EINVAL;
4840 }
4841
4842 if (intel_connector->panel.fitting_mode == val) {
4843 /* the eDP scaling property is not changed */
4844 return 0;
4845 }
4846 intel_connector->panel.fitting_mode = val;
4847
4848 goto done;
4849 }
4850
f684960e
CW
4851 return -EINVAL;
4852
4853done:
c0c36b94
CW
4854 if (intel_encoder->base.crtc)
4855 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4856
4857 return 0;
4858}
4859
a4fc5ed6 4860static void
73845adf 4861intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4862{
1d508706 4863 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4864
10e972d3 4865 kfree(intel_connector->detect_edid);
beb60608 4866
9cd300e0
JN
4867 if (!IS_ERR_OR_NULL(intel_connector->edid))
4868 kfree(intel_connector->edid);
4869
acd8db10
PZ
4870 /* Can't call is_edp() since the encoder may have been destroyed
4871 * already. */
4872 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4873 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4874
a4fc5ed6 4875 drm_connector_cleanup(connector);
55f78c43 4876 kfree(connector);
a4fc5ed6
KP
4877}
4878
00c09d70 4879void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4880{
da63a9f2
PZ
4881 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4882 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4883
a121f4e5 4884 intel_dp_aux_fini(intel_dp);
0e32b39c 4885 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4886 if (is_edp(intel_dp)) {
4887 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4888 /*
4889 * vdd might still be enabled do to the delayed vdd off.
4890 * Make sure vdd is actually turned off here.
4891 */
773538e8 4892 pps_lock(intel_dp);
4be73780 4893 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4894 pps_unlock(intel_dp);
4895
01527b31
CT
4896 if (intel_dp->edp_notifier.notifier_call) {
4897 unregister_reboot_notifier(&intel_dp->edp_notifier);
4898 intel_dp->edp_notifier.notifier_call = NULL;
4899 }
bd943159 4900 }
c8bd0e49 4901 drm_encoder_cleanup(encoder);
da63a9f2 4902 kfree(intel_dig_port);
24d05927
DV
4903}
4904
07f9cd0b
ID
4905static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4906{
4907 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4908
4909 if (!is_edp(intel_dp))
4910 return;
4911
951468f3
VS
4912 /*
4913 * vdd might still be enabled do to the delayed vdd off.
4914 * Make sure vdd is actually turned off here.
4915 */
afa4e53a 4916 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4917 pps_lock(intel_dp);
07f9cd0b 4918 edp_panel_vdd_off_sync(intel_dp);
773538e8 4919 pps_unlock(intel_dp);
07f9cd0b
ID
4920}
4921
49e6bc51
VS
4922static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4923{
4924 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4925 struct drm_device *dev = intel_dig_port->base.base.dev;
4926 struct drm_i915_private *dev_priv = dev->dev_private;
4927 enum intel_display_power_domain power_domain;
4928
4929 lockdep_assert_held(&dev_priv->pps_mutex);
4930
4931 if (!edp_have_panel_vdd(intel_dp))
4932 return;
4933
4934 /*
4935 * The VDD bit needs a power domain reference, so if the bit is
4936 * already enabled when we boot or resume, grab this reference and
4937 * schedule a vdd off, so we don't hold on to the reference
4938 * indefinitely.
4939 */
4940 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
25f78f58 4941 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
49e6bc51
VS
4942 intel_display_power_get(dev_priv, power_domain);
4943
4944 edp_panel_vdd_schedule_off(intel_dp);
4945}
4946
6d93c0c4
ID
4947static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4948{
49e6bc51
VS
4949 struct intel_dp *intel_dp;
4950
4951 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4952 return;
4953
4954 intel_dp = enc_to_intel_dp(encoder);
4955
4956 pps_lock(intel_dp);
4957
4958 /*
4959 * Read out the current power sequencer assignment,
4960 * in case the BIOS did something with it.
4961 */
666a4537 4962 if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
49e6bc51
VS
4963 vlv_initial_power_sequencer_setup(intel_dp);
4964
4965 intel_edp_panel_vdd_sanitize(intel_dp);
4966
4967 pps_unlock(intel_dp);
6d93c0c4
ID
4968}
4969
a4fc5ed6 4970static const struct drm_connector_funcs intel_dp_connector_funcs = {
4d688a2a 4971 .dpms = drm_atomic_helper_connector_dpms,
a4fc5ed6 4972 .detect = intel_dp_detect,
beb60608 4973 .force = intel_dp_force,
a4fc5ed6 4974 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4975 .set_property = intel_dp_set_property,
2545e4a6 4976 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4977 .destroy = intel_dp_connector_destroy,
c6f95f27 4978 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4979 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4980};
4981
4982static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4983 .get_modes = intel_dp_get_modes,
4984 .mode_valid = intel_dp_mode_valid,
df0e9248 4985 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4986};
4987
a4fc5ed6 4988static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4989 .reset = intel_dp_encoder_reset,
24d05927 4990 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4991};
4992
b2c5c181 4993enum irqreturn
13cf5504
DA
4994intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4995{
4996 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4997 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4998 struct drm_device *dev = intel_dig_port->base.base.dev;
4999 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 5000 enum intel_display_power_domain power_domain;
b2c5c181 5001 enum irqreturn ret = IRQ_NONE;
1c767b33 5002
2540058f
TI
5003 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5004 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
0e32b39c 5005 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 5006
7a7f84cc
VS
5007 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5008 /*
5009 * vdd off can generate a long pulse on eDP which
5010 * would require vdd on to handle it, and thus we
5011 * would end up in an endless cycle of
5012 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5013 */
5014 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5015 port_name(intel_dig_port->port));
a8b3d52f 5016 return IRQ_HANDLED;
7a7f84cc
VS
5017 }
5018
26fbb774
VS
5019 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5020 port_name(intel_dig_port->port),
0e32b39c 5021 long_hpd ? "long" : "short");
13cf5504 5022
25f78f58 5023 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1c767b33
ID
5024 intel_display_power_get(dev_priv, power_domain);
5025
0e32b39c 5026 if (long_hpd) {
5fa836a9
MK
5027 /* indicate that we need to restart link training */
5028 intel_dp->train_set_valid = false;
2a592bec 5029
7e66bcf2
JN
5030 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5031 goto mst_fail;
0e32b39c
DA
5032
5033 if (!intel_dp_get_dpcd(intel_dp)) {
5034 goto mst_fail;
5035 }
5036
5037 intel_dp_probe_oui(intel_dp);
5038
d14e7b6d
VS
5039 if (!intel_dp_probe_mst(intel_dp)) {
5040 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5041 intel_dp_check_link_status(intel_dp);
5042 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c 5043 goto mst_fail;
d14e7b6d 5044 }
0e32b39c
DA
5045 } else {
5046 if (intel_dp->is_mst) {
1c767b33 5047 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
5048 goto mst_fail;
5049 }
5050
5051 if (!intel_dp->is_mst) {
5b215bcf 5052 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 5053 intel_dp_check_link_status(intel_dp);
5b215bcf 5054 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
5055 }
5056 }
b2c5c181
DV
5057
5058 ret = IRQ_HANDLED;
5059
1c767b33 5060 goto put_power;
0e32b39c
DA
5061mst_fail:
5062 /* if we were in MST mode, and device is not there get out of MST mode */
5063 if (intel_dp->is_mst) {
5064 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5065 intel_dp->is_mst = false;
5066 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5067 }
1c767b33
ID
5068put_power:
5069 intel_display_power_put(dev_priv, power_domain);
5070
5071 return ret;
13cf5504
DA
5072}
5073
477ec328 5074/* check the VBT to see whether the eDP is on another port */
5d8a7752 5075bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
5076{
5077 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5078 union child_device_config *p_child;
36e83a18 5079 int i;
5d8a7752 5080 static const short port_mapping[] = {
477ec328
RV
5081 [PORT_B] = DVO_PORT_DPB,
5082 [PORT_C] = DVO_PORT_DPC,
5083 [PORT_D] = DVO_PORT_DPD,
5084 [PORT_E] = DVO_PORT_DPE,
5d8a7752 5085 };
36e83a18 5086
53ce81a7
VS
5087 /*
5088 * eDP not supported on g4x. so bail out early just
5089 * for a bit extra safety in case the VBT is bonkers.
5090 */
5091 if (INTEL_INFO(dev)->gen < 5)
5092 return false;
5093
3b32a35b
VS
5094 if (port == PORT_A)
5095 return true;
5096
41aa3448 5097 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5098 return false;
5099
41aa3448
RV
5100 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5101 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5102
5d8a7752 5103 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5104 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5105 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5106 return true;
5107 }
5108 return false;
5109}
5110
0e32b39c 5111void
f684960e
CW
5112intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5113{
53b41837
YN
5114 struct intel_connector *intel_connector = to_intel_connector(connector);
5115
3f43c48d 5116 intel_attach_force_audio_property(connector);
e953fd7b 5117 intel_attach_broadcast_rgb_property(connector);
55bc60db 5118 intel_dp->color_range_auto = true;
53b41837
YN
5119
5120 if (is_edp(intel_dp)) {
5121 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5122 drm_object_attach_property(
5123 &connector->base,
53b41837 5124 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5125 DRM_MODE_SCALE_ASPECT);
5126 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5127 }
f684960e
CW
5128}
5129
dada1a9f
ID
5130static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5131{
d28d4731 5132 intel_dp->panel_power_off_time = ktime_get_boottime();
dada1a9f
ID
5133 intel_dp->last_power_on = jiffies;
5134 intel_dp->last_backlight_off = jiffies;
5135}
5136
67a54566
DV
5137static void
5138intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5139 struct intel_dp *intel_dp)
67a54566
DV
5140{
5141 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5142 struct edp_power_seq cur, vbt, spec,
5143 *final = &intel_dp->pps_delays;
b0a08bec 5144 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
f0f59a00 5145 i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 5146
e39b999a
VS
5147 lockdep_assert_held(&dev_priv->pps_mutex);
5148
81ddbc69
VS
5149 /* already initialized? */
5150 if (final->t11_t12 != 0)
5151 return;
5152
b0a08bec
VK
5153 if (IS_BROXTON(dev)) {
5154 /*
5155 * TODO: BXT has 2 sets of PPS registers.
5156 * Correct Register for Broxton need to be identified
5157 * using VBT. hardcoding for now
5158 */
5159 pp_ctrl_reg = BXT_PP_CONTROL(0);
5160 pp_on_reg = BXT_PP_ON_DELAYS(0);
5161 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5162 } else if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5163 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5164 pp_on_reg = PCH_PP_ON_DELAYS;
5165 pp_off_reg = PCH_PP_OFF_DELAYS;
5166 pp_div_reg = PCH_PP_DIVISOR;
5167 } else {
bf13e81b
JN
5168 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5169
5170 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5171 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5172 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5173 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5174 }
67a54566
DV
5175
5176 /* Workaround: Need to write PP_CONTROL with the unlock key as
5177 * the very first thing. */
b0a08bec 5178 pp_ctl = ironlake_get_pp_control(intel_dp);
67a54566 5179
453c5420
JB
5180 pp_on = I915_READ(pp_on_reg);
5181 pp_off = I915_READ(pp_off_reg);
b0a08bec
VK
5182 if (!IS_BROXTON(dev)) {
5183 I915_WRITE(pp_ctrl_reg, pp_ctl);
5184 pp_div = I915_READ(pp_div_reg);
5185 }
67a54566
DV
5186
5187 /* Pull timing values out of registers */
5188 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5189 PANEL_POWER_UP_DELAY_SHIFT;
5190
5191 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5192 PANEL_LIGHT_ON_DELAY_SHIFT;
5193
5194 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5195 PANEL_LIGHT_OFF_DELAY_SHIFT;
5196
5197 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5198 PANEL_POWER_DOWN_DELAY_SHIFT;
5199
b0a08bec
VK
5200 if (IS_BROXTON(dev)) {
5201 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5202 BXT_POWER_CYCLE_DELAY_SHIFT;
5203 if (tmp > 0)
5204 cur.t11_t12 = (tmp - 1) * 1000;
5205 else
5206 cur.t11_t12 = 0;
5207 } else {
5208 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
67a54566 5209 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
b0a08bec 5210 }
67a54566
DV
5211
5212 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5213 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5214
41aa3448 5215 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5216
5217 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5218 * our hw here, which are all in 100usec. */
5219 spec.t1_t3 = 210 * 10;
5220 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5221 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5222 spec.t10 = 500 * 10;
5223 /* This one is special and actually in units of 100ms, but zero
5224 * based in the hw (so we need to add 100 ms). But the sw vbt
5225 * table multiplies it with 1000 to make it in units of 100usec,
5226 * too. */
5227 spec.t11_t12 = (510 + 100) * 10;
5228
5229 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5230 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5231
5232 /* Use the max of the register settings and vbt. If both are
5233 * unset, fall back to the spec limits. */
36b5f425 5234#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5235 spec.field : \
5236 max(cur.field, vbt.field))
5237 assign_final(t1_t3);
5238 assign_final(t8);
5239 assign_final(t9);
5240 assign_final(t10);
5241 assign_final(t11_t12);
5242#undef assign_final
5243
36b5f425 5244#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5245 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5246 intel_dp->backlight_on_delay = get_delay(t8);
5247 intel_dp->backlight_off_delay = get_delay(t9);
5248 intel_dp->panel_power_down_delay = get_delay(t10);
5249 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5250#undef get_delay
5251
f30d26e4
JN
5252 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5253 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5254 intel_dp->panel_power_cycle_delay);
5255
5256 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5257 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5258}
5259
5260static void
5261intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5262 struct intel_dp *intel_dp)
f30d26e4
JN
5263{
5264 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5265 u32 pp_on, pp_off, pp_div, port_sel = 0;
5266 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
f0f59a00 5267 i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
ad933b56 5268 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5269 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5270
e39b999a 5271 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 5272
b0a08bec
VK
5273 if (IS_BROXTON(dev)) {
5274 /*
5275 * TODO: BXT has 2 sets of PPS registers.
5276 * Correct Register for Broxton need to be identified
5277 * using VBT. hardcoding for now
5278 */
5279 pp_ctrl_reg = BXT_PP_CONTROL(0);
5280 pp_on_reg = BXT_PP_ON_DELAYS(0);
5281 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5282
5283 } else if (HAS_PCH_SPLIT(dev)) {
453c5420
JB
5284 pp_on_reg = PCH_PP_ON_DELAYS;
5285 pp_off_reg = PCH_PP_OFF_DELAYS;
5286 pp_div_reg = PCH_PP_DIVISOR;
5287 } else {
bf13e81b
JN
5288 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5289
5290 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5291 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5292 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5293 }
5294
b2f19d1a
PZ
5295 /*
5296 * And finally store the new values in the power sequencer. The
5297 * backlight delays are set to 1 because we do manual waits on them. For
5298 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5299 * we'll end up waiting for the backlight off delay twice: once when we
5300 * do the manual sleep, and once when we disable the panel and wait for
5301 * the PP_STATUS bit to become zero.
5302 */
f30d26e4 5303 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5304 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5305 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5306 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5307 /* Compute the divisor for the pp clock, simply match the Bspec
5308 * formula. */
b0a08bec
VK
5309 if (IS_BROXTON(dev)) {
5310 pp_div = I915_READ(pp_ctrl_reg);
5311 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5312 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5313 << BXT_POWER_CYCLE_DELAY_SHIFT);
5314 } else {
5315 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5316 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5317 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5318 }
67a54566
DV
5319
5320 /* Haswell doesn't have any port selection bits for the panel
5321 * power sequencer any more. */
666a4537 5322 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
ad933b56 5323 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5324 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5325 if (port == PORT_A)
a24c144c 5326 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5327 else
a24c144c 5328 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5329 }
5330
453c5420
JB
5331 pp_on |= port_sel;
5332
5333 I915_WRITE(pp_on_reg, pp_on);
5334 I915_WRITE(pp_off_reg, pp_off);
b0a08bec
VK
5335 if (IS_BROXTON(dev))
5336 I915_WRITE(pp_ctrl_reg, pp_div);
5337 else
5338 I915_WRITE(pp_div_reg, pp_div);
67a54566 5339
67a54566 5340 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5341 I915_READ(pp_on_reg),
5342 I915_READ(pp_off_reg),
b0a08bec
VK
5343 IS_BROXTON(dev) ?
5344 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
453c5420 5345 I915_READ(pp_div_reg));
f684960e
CW
5346}
5347
b33a2815
VK
5348/**
5349 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5350 * @dev: DRM device
5351 * @refresh_rate: RR to be programmed
5352 *
5353 * This function gets called when refresh rate (RR) has to be changed from
5354 * one frequency to another. Switches can be between high and low RR
5355 * supported by the panel or to any other RR based on media playback (in
5356 * this case, RR value needs to be passed from user space).
5357 *
5358 * The caller of this function needs to take a lock on dev_priv->drrs.
5359 */
96178eeb 5360static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5361{
5362 struct drm_i915_private *dev_priv = dev->dev_private;
5363 struct intel_encoder *encoder;
96178eeb
VK
5364 struct intel_digital_port *dig_port = NULL;
5365 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5366 struct intel_crtc_state *config = NULL;
439d7ac0 5367 struct intel_crtc *intel_crtc = NULL;
96178eeb 5368 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5369
5370 if (refresh_rate <= 0) {
5371 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5372 return;
5373 }
5374
96178eeb
VK
5375 if (intel_dp == NULL) {
5376 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5377 return;
5378 }
5379
1fcc9d1c 5380 /*
e4d59f6b
RV
5381 * FIXME: This needs proper synchronization with psr state for some
5382 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5383 */
439d7ac0 5384
96178eeb
VK
5385 dig_port = dp_to_dig_port(intel_dp);
5386 encoder = &dig_port->base;
723f9aab 5387 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5388
5389 if (!intel_crtc) {
5390 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5391 return;
5392 }
5393
6e3c9717 5394 config = intel_crtc->config;
439d7ac0 5395
96178eeb 5396 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5397 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5398 return;
5399 }
5400
96178eeb
VK
5401 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5402 refresh_rate)
439d7ac0
PB
5403 index = DRRS_LOW_RR;
5404
96178eeb 5405 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5406 DRM_DEBUG_KMS(
5407 "DRRS requested for previously set RR...ignoring\n");
5408 return;
5409 }
5410
5411 if (!intel_crtc->active) {
5412 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5413 return;
5414 }
5415
44395bfe 5416 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5417 switch (index) {
5418 case DRRS_HIGH_RR:
5419 intel_dp_set_m_n(intel_crtc, M1_N1);
5420 break;
5421 case DRRS_LOW_RR:
5422 intel_dp_set_m_n(intel_crtc, M2_N2);
5423 break;
5424 case DRRS_MAX_RR:
5425 default:
5426 DRM_ERROR("Unsupported refreshrate type\n");
5427 }
5428 } else if (INTEL_INFO(dev)->gen > 6) {
f0f59a00 5429 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
649636ef 5430 u32 val;
a4c30b1d 5431
649636ef 5432 val = I915_READ(reg);
439d7ac0 5433 if (index > DRRS_HIGH_RR) {
666a4537 5434 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6fa7aec1
VK
5435 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5436 else
5437 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5438 } else {
666a4537 5439 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6fa7aec1
VK
5440 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5441 else
5442 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5443 }
5444 I915_WRITE(reg, val);
5445 }
5446
4e9ac947
VK
5447 dev_priv->drrs.refresh_rate_type = index;
5448
5449 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5450}
5451
b33a2815
VK
5452/**
5453 * intel_edp_drrs_enable - init drrs struct if supported
5454 * @intel_dp: DP struct
5455 *
5456 * Initializes frontbuffer_bits and drrs.dp
5457 */
c395578e
VK
5458void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5459{
5460 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5461 struct drm_i915_private *dev_priv = dev->dev_private;
5462 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5463 struct drm_crtc *crtc = dig_port->base.base.crtc;
5464 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5465
5466 if (!intel_crtc->config->has_drrs) {
5467 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5468 return;
5469 }
5470
5471 mutex_lock(&dev_priv->drrs.mutex);
5472 if (WARN_ON(dev_priv->drrs.dp)) {
5473 DRM_ERROR("DRRS already enabled\n");
5474 goto unlock;
5475 }
5476
5477 dev_priv->drrs.busy_frontbuffer_bits = 0;
5478
5479 dev_priv->drrs.dp = intel_dp;
5480
5481unlock:
5482 mutex_unlock(&dev_priv->drrs.mutex);
5483}
5484
b33a2815
VK
5485/**
5486 * intel_edp_drrs_disable - Disable DRRS
5487 * @intel_dp: DP struct
5488 *
5489 */
c395578e
VK
5490void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5491{
5492 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5493 struct drm_i915_private *dev_priv = dev->dev_private;
5494 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5495 struct drm_crtc *crtc = dig_port->base.base.crtc;
5496 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5497
5498 if (!intel_crtc->config->has_drrs)
5499 return;
5500
5501 mutex_lock(&dev_priv->drrs.mutex);
5502 if (!dev_priv->drrs.dp) {
5503 mutex_unlock(&dev_priv->drrs.mutex);
5504 return;
5505 }
5506
5507 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5508 intel_dp_set_drrs_state(dev_priv->dev,
5509 intel_dp->attached_connector->panel.
5510 fixed_mode->vrefresh);
5511
5512 dev_priv->drrs.dp = NULL;
5513 mutex_unlock(&dev_priv->drrs.mutex);
5514
5515 cancel_delayed_work_sync(&dev_priv->drrs.work);
5516}
5517
4e9ac947
VK
5518static void intel_edp_drrs_downclock_work(struct work_struct *work)
5519{
5520 struct drm_i915_private *dev_priv =
5521 container_of(work, typeof(*dev_priv), drrs.work.work);
5522 struct intel_dp *intel_dp;
5523
5524 mutex_lock(&dev_priv->drrs.mutex);
5525
5526 intel_dp = dev_priv->drrs.dp;
5527
5528 if (!intel_dp)
5529 goto unlock;
5530
439d7ac0 5531 /*
4e9ac947
VK
5532 * The delayed work can race with an invalidate hence we need to
5533 * recheck.
439d7ac0
PB
5534 */
5535
4e9ac947
VK
5536 if (dev_priv->drrs.busy_frontbuffer_bits)
5537 goto unlock;
439d7ac0 5538
4e9ac947
VK
5539 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5540 intel_dp_set_drrs_state(dev_priv->dev,
5541 intel_dp->attached_connector->panel.
5542 downclock_mode->vrefresh);
439d7ac0 5543
4e9ac947 5544unlock:
4e9ac947 5545 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5546}
5547
b33a2815 5548/**
0ddfd203 5549 * intel_edp_drrs_invalidate - Disable Idleness DRRS
b33a2815
VK
5550 * @dev: DRM device
5551 * @frontbuffer_bits: frontbuffer plane tracking bits
5552 *
0ddfd203
R
5553 * This function gets called everytime rendering on the given planes start.
5554 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
5555 *
5556 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5557 */
a93fad0f
VK
5558void intel_edp_drrs_invalidate(struct drm_device *dev,
5559 unsigned frontbuffer_bits)
5560{
5561 struct drm_i915_private *dev_priv = dev->dev_private;
5562 struct drm_crtc *crtc;
5563 enum pipe pipe;
5564
9da7d693 5565 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5566 return;
5567
88f933a8 5568 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5569
a93fad0f 5570 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5571 if (!dev_priv->drrs.dp) {
5572 mutex_unlock(&dev_priv->drrs.mutex);
5573 return;
5574 }
5575
a93fad0f
VK
5576 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5577 pipe = to_intel_crtc(crtc)->pipe;
5578
c1d038c6
DV
5579 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5580 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5581
0ddfd203 5582 /* invalidate means busy screen hence upclock */
c1d038c6 5583 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
a93fad0f
VK
5584 intel_dp_set_drrs_state(dev_priv->dev,
5585 dev_priv->drrs.dp->attached_connector->panel.
5586 fixed_mode->vrefresh);
a93fad0f 5587
a93fad0f
VK
5588 mutex_unlock(&dev_priv->drrs.mutex);
5589}
5590
b33a2815 5591/**
0ddfd203 5592 * intel_edp_drrs_flush - Restart Idleness DRRS
b33a2815
VK
5593 * @dev: DRM device
5594 * @frontbuffer_bits: frontbuffer plane tracking bits
5595 *
0ddfd203
R
5596 * This function gets called every time rendering on the given planes has
5597 * completed or flip on a crtc is completed. So DRRS should be upclocked
5598 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5599 * if no other planes are dirty.
b33a2815
VK
5600 *
5601 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5602 */
a93fad0f
VK
5603void intel_edp_drrs_flush(struct drm_device *dev,
5604 unsigned frontbuffer_bits)
5605{
5606 struct drm_i915_private *dev_priv = dev->dev_private;
5607 struct drm_crtc *crtc;
5608 enum pipe pipe;
5609
9da7d693 5610 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5611 return;
5612
88f933a8 5613 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5614
a93fad0f 5615 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5616 if (!dev_priv->drrs.dp) {
5617 mutex_unlock(&dev_priv->drrs.mutex);
5618 return;
5619 }
5620
a93fad0f
VK
5621 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5622 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
5623
5624 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
5625 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5626
0ddfd203 5627 /* flush means busy screen hence upclock */
c1d038c6 5628 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
0ddfd203
R
5629 intel_dp_set_drrs_state(dev_priv->dev,
5630 dev_priv->drrs.dp->attached_connector->panel.
5631 fixed_mode->vrefresh);
5632
5633 /*
5634 * flush also means no more activity hence schedule downclock, if all
5635 * other fbs are quiescent too
5636 */
5637 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
5638 schedule_delayed_work(&dev_priv->drrs.work,
5639 msecs_to_jiffies(1000));
5640 mutex_unlock(&dev_priv->drrs.mutex);
5641}
5642
b33a2815
VK
5643/**
5644 * DOC: Display Refresh Rate Switching (DRRS)
5645 *
5646 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5647 * which enables swtching between low and high refresh rates,
5648 * dynamically, based on the usage scenario. This feature is applicable
5649 * for internal panels.
5650 *
5651 * Indication that the panel supports DRRS is given by the panel EDID, which
5652 * would list multiple refresh rates for one resolution.
5653 *
5654 * DRRS is of 2 types - static and seamless.
5655 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5656 * (may appear as a blink on screen) and is used in dock-undock scenario.
5657 * Seamless DRRS involves changing RR without any visual effect to the user
5658 * and can be used during normal system usage. This is done by programming
5659 * certain registers.
5660 *
5661 * Support for static/seamless DRRS may be indicated in the VBT based on
5662 * inputs from the panel spec.
5663 *
5664 * DRRS saves power by switching to low RR based on usage scenarios.
5665 *
5666 * eDP DRRS:-
5667 * The implementation is based on frontbuffer tracking implementation.
5668 * When there is a disturbance on the screen triggered by user activity or a
5669 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5670 * When there is no movement on screen, after a timeout of 1 second, a switch
5671 * to low RR is made.
5672 * For integration with frontbuffer tracking code,
5673 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5674 *
5675 * DRRS can be further extended to support other internal panels and also
5676 * the scenario of video playback wherein RR is set based on the rate
5677 * requested by userspace.
5678 */
5679
5680/**
5681 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5682 * @intel_connector: eDP connector
5683 * @fixed_mode: preferred mode of panel
5684 *
5685 * This function is called only once at driver load to initialize basic
5686 * DRRS stuff.
5687 *
5688 * Returns:
5689 * Downclock mode if panel supports it, else return NULL.
5690 * DRRS support is determined by the presence of downclock mode (apart
5691 * from VBT setting).
5692 */
4f9db5b5 5693static struct drm_display_mode *
96178eeb
VK
5694intel_dp_drrs_init(struct intel_connector *intel_connector,
5695 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5696{
5697 struct drm_connector *connector = &intel_connector->base;
96178eeb 5698 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5699 struct drm_i915_private *dev_priv = dev->dev_private;
5700 struct drm_display_mode *downclock_mode = NULL;
5701
9da7d693
DV
5702 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5703 mutex_init(&dev_priv->drrs.mutex);
5704
4f9db5b5
PB
5705 if (INTEL_INFO(dev)->gen <= 6) {
5706 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5707 return NULL;
5708 }
5709
5710 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5711 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5712 return NULL;
5713 }
5714
5715 downclock_mode = intel_find_panel_downclock
5716 (dev, fixed_mode, connector);
5717
5718 if (!downclock_mode) {
a1d26342 5719 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5720 return NULL;
5721 }
5722
96178eeb 5723 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5724
96178eeb 5725 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5726 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5727 return downclock_mode;
5728}
5729
ed92f0b2 5730static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5731 struct intel_connector *intel_connector)
ed92f0b2
PZ
5732{
5733 struct drm_connector *connector = &intel_connector->base;
5734 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5735 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5736 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5737 struct drm_i915_private *dev_priv = dev->dev_private;
5738 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5739 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5740 bool has_dpcd;
5741 struct drm_display_mode *scan;
5742 struct edid *edid;
6517d273 5743 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5744
5745 if (!is_edp(intel_dp))
5746 return true;
5747
49e6bc51
VS
5748 pps_lock(intel_dp);
5749 intel_edp_panel_vdd_sanitize(intel_dp);
5750 pps_unlock(intel_dp);
63635217 5751
ed92f0b2 5752 /* Cache DPCD and EDID for edp. */
ed92f0b2 5753 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5754
5755 if (has_dpcd) {
5756 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5757 dev_priv->no_aux_handshake =
5758 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5759 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5760 } else {
5761 /* if this fails, presume the device is a ghost */
5762 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5763 return false;
5764 }
5765
5766 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5767 pps_lock(intel_dp);
36b5f425 5768 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5769 pps_unlock(intel_dp);
ed92f0b2 5770
060c8778 5771 mutex_lock(&dev->mode_config.mutex);
0b99836f 5772 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5773 if (edid) {
5774 if (drm_add_edid_modes(connector, edid)) {
5775 drm_mode_connector_update_edid_property(connector,
5776 edid);
5777 drm_edid_to_eld(connector, edid);
5778 } else {
5779 kfree(edid);
5780 edid = ERR_PTR(-EINVAL);
5781 }
5782 } else {
5783 edid = ERR_PTR(-ENOENT);
5784 }
5785 intel_connector->edid = edid;
5786
5787 /* prefer fixed mode from EDID if available */
5788 list_for_each_entry(scan, &connector->probed_modes, head) {
5789 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5790 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5791 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5792 intel_connector, fixed_mode);
ed92f0b2
PZ
5793 break;
5794 }
5795 }
5796
5797 /* fallback to VBT if available for eDP */
5798 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5799 fixed_mode = drm_mode_duplicate(dev,
5800 dev_priv->vbt.lfp_lvds_vbt_mode);
5801 if (fixed_mode)
5802 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5803 }
060c8778 5804 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5805
666a4537 5806 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
01527b31
CT
5807 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5808 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5809
5810 /*
5811 * Figure out the current pipe for the initial backlight setup.
5812 * If the current pipe isn't valid, try the PPS pipe, and if that
5813 * fails just assume pipe A.
5814 */
5815 if (IS_CHERRYVIEW(dev))
5816 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5817 else
5818 pipe = PORT_TO_PIPE(intel_dp->DP);
5819
5820 if (pipe != PIPE_A && pipe != PIPE_B)
5821 pipe = intel_dp->pps_pipe;
5822
5823 if (pipe != PIPE_A && pipe != PIPE_B)
5824 pipe = PIPE_A;
5825
5826 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5827 pipe_name(pipe));
01527b31
CT
5828 }
5829
4f9db5b5 5830 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5507faeb 5831 intel_connector->panel.backlight.power = intel_edp_backlight_power;
6517d273 5832 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5833
5834 return true;
5835}
5836
16c25533 5837bool
f0fec3f2
PZ
5838intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5839 struct intel_connector *intel_connector)
a4fc5ed6 5840{
f0fec3f2
PZ
5841 struct drm_connector *connector = &intel_connector->base;
5842 struct intel_dp *intel_dp = &intel_dig_port->dp;
5843 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5844 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5845 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5846 enum port port = intel_dig_port->port;
a121f4e5 5847 int type, ret;
a4fc5ed6 5848
ccb1a831
VS
5849 if (WARN(intel_dig_port->max_lanes < 1,
5850 "Not enough lanes (%d) for DP on port %c\n",
5851 intel_dig_port->max_lanes, port_name(port)))
5852 return false;
5853
a4a5d2f8
VS
5854 intel_dp->pps_pipe = INVALID_PIPE;
5855
ec5b01dd 5856 /* intel_dp vfuncs */
b6b5e383
DL
5857 if (INTEL_INFO(dev)->gen >= 9)
5858 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
666a4537 5859 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
ec5b01dd
DL
5860 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5861 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5862 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5863 else if (HAS_PCH_SPLIT(dev))
5864 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5865 else
5866 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5867
b9ca5fad
DL
5868 if (INTEL_INFO(dev)->gen >= 9)
5869 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5870 else
5871 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5872
ad64217b
ACO
5873 if (HAS_DDI(dev))
5874 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5875
0767935e
DV
5876 /* Preserve the current hw state. */
5877 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5878 intel_dp->attached_connector = intel_connector;
3d3dc149 5879
3b32a35b 5880 if (intel_dp_is_edp(dev, port))
b329530c 5881 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5882 else
5883 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5884
f7d24902
ID
5885 /*
5886 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5887 * for DP the encoder type can be set by the caller to
5888 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5889 */
5890 if (type == DRM_MODE_CONNECTOR_eDP)
5891 intel_encoder->type = INTEL_OUTPUT_EDP;
5892
c17ed5b5 5893 /* eDP only on port B and/or C on vlv/chv */
666a4537
WB
5894 if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5895 is_edp(intel_dp) && port != PORT_B && port != PORT_C))
c17ed5b5
VS
5896 return false;
5897
e7281eab
ID
5898 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5899 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5900 port_name(port));
5901
b329530c 5902 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5903 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5904
a4fc5ed6
KP
5905 connector->interlace_allowed = true;
5906 connector->doublescan_allowed = 0;
5907
f0fec3f2 5908 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5909 edp_panel_vdd_work);
a4fc5ed6 5910
df0e9248 5911 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5912 drm_connector_register(connector);
a4fc5ed6 5913
affa9354 5914 if (HAS_DDI(dev))
bcbc889b
PZ
5915 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5916 else
5917 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5918 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5919
0b99836f 5920 /* Set up the hotplug pin. */
ab9d7c30
PZ
5921 switch (port) {
5922 case PORT_A:
1d843f9d 5923 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5924 break;
5925 case PORT_B:
1d843f9d 5926 intel_encoder->hpd_pin = HPD_PORT_B;
e87a005d 5927 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
cf1d5883 5928 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5929 break;
5930 case PORT_C:
1d843f9d 5931 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5932 break;
5933 case PORT_D:
1d843f9d 5934 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30 5935 break;
26951caf
XZ
5936 case PORT_E:
5937 intel_encoder->hpd_pin = HPD_PORT_E;
5938 break;
ab9d7c30 5939 default:
ad1c0b19 5940 BUG();
5eb08b69
ZW
5941 }
5942
dada1a9f 5943 if (is_edp(intel_dp)) {
773538e8 5944 pps_lock(intel_dp);
1e74a324 5945 intel_dp_init_panel_power_timestamps(intel_dp);
666a4537 5946 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
a4a5d2f8 5947 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5948 else
36b5f425 5949 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5950 pps_unlock(intel_dp);
dada1a9f 5951 }
0095e6dc 5952
a121f4e5
VS
5953 ret = intel_dp_aux_init(intel_dp, intel_connector);
5954 if (ret)
5955 goto fail;
c1f05264 5956
0e32b39c 5957 /* init MST on ports that can support it */
0c9b3715
JN
5958 if (HAS_DP_MST(dev) &&
5959 (port == PORT_B || port == PORT_C || port == PORT_D))
5960 intel_dp_mst_encoder_init(intel_dig_port,
5961 intel_connector->base.base.id);
0e32b39c 5962
36b5f425 5963 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
a121f4e5
VS
5964 intel_dp_aux_fini(intel_dp);
5965 intel_dp_mst_encoder_cleanup(intel_dig_port);
5966 goto fail;
b2f246a8 5967 }
32f9d658 5968
f684960e
CW
5969 intel_dp_add_properties(intel_dp, connector);
5970
a4fc5ed6
KP
5971 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5972 * 0xd. Failure to do so will result in spurious interrupts being
5973 * generated on the port when a cable is not attached.
5974 */
5975 if (IS_G4X(dev) && !IS_GM45(dev)) {
5976 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5977 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5978 }
16c25533 5979
aa7471d2
JN
5980 i915_debugfs_connector_add(connector);
5981
16c25533 5982 return true;
a121f4e5
VS
5983
5984fail:
5985 if (is_edp(intel_dp)) {
5986 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5987 /*
5988 * vdd might still be enabled do to the delayed vdd off.
5989 * Make sure vdd is actually turned off here.
5990 */
5991 pps_lock(intel_dp);
5992 edp_panel_vdd_off_sync(intel_dp);
5993 pps_unlock(intel_dp);
5994 }
5995 drm_connector_unregister(connector);
5996 drm_connector_cleanup(connector);
5997
5998 return false;
a4fc5ed6 5999}
f0fec3f2
PZ
6000
6001void
f0f59a00
VS
6002intel_dp_init(struct drm_device *dev,
6003 i915_reg_t output_reg, enum port port)
f0fec3f2 6004{
13cf5504 6005 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
6006 struct intel_digital_port *intel_dig_port;
6007 struct intel_encoder *intel_encoder;
6008 struct drm_encoder *encoder;
6009 struct intel_connector *intel_connector;
6010
b14c5679 6011 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
6012 if (!intel_dig_port)
6013 return;
6014
08d9bc92 6015 intel_connector = intel_connector_alloc();
11aee0f6
SM
6016 if (!intel_connector)
6017 goto err_connector_alloc;
f0fec3f2
PZ
6018
6019 intel_encoder = &intel_dig_port->base;
6020 encoder = &intel_encoder->base;
6021
893da0c9 6022 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
ade1ba73 6023 DRM_MODE_ENCODER_TMDS, NULL))
893da0c9 6024 goto err_encoder_init;
f0fec3f2 6025
5bfe2ac0 6026 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 6027 intel_encoder->disable = intel_disable_dp;
00c09d70 6028 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 6029 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 6030 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 6031 if (IS_CHERRYVIEW(dev)) {
9197c88b 6032 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
6033 intel_encoder->pre_enable = chv_pre_enable_dp;
6034 intel_encoder->enable = vlv_enable_dp;
580d3811 6035 intel_encoder->post_disable = chv_post_disable_dp;
d6db995f 6036 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
e4a1d846 6037 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 6038 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
6039 intel_encoder->pre_enable = vlv_pre_enable_dp;
6040 intel_encoder->enable = vlv_enable_dp;
49277c31 6041 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 6042 } else {
ecff4f3b
JN
6043 intel_encoder->pre_enable = g4x_pre_enable_dp;
6044 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
6045 if (INTEL_INFO(dev)->gen >= 5)
6046 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 6047 }
f0fec3f2 6048
174edf1f 6049 intel_dig_port->port = port;
0bdf5a05 6050 dev_priv->dig_port_map[port] = intel_encoder;
f0fec3f2 6051 intel_dig_port->dp.output_reg = output_reg;
ccb1a831 6052 intel_dig_port->max_lanes = 4;
f0fec3f2 6053
00c09d70 6054 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
6055 if (IS_CHERRYVIEW(dev)) {
6056 if (port == PORT_D)
6057 intel_encoder->crtc_mask = 1 << 2;
6058 else
6059 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6060 } else {
6061 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6062 }
bc079e8b 6063 intel_encoder->cloneable = 0;
f0fec3f2 6064
13cf5504 6065 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5fcece80 6066 dev_priv->hotplug.irq_port[port] = intel_dig_port;
13cf5504 6067
11aee0f6
SM
6068 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6069 goto err_init_connector;
6070
6071 return;
6072
6073err_init_connector:
6074 drm_encoder_cleanup(encoder);
893da0c9 6075err_encoder_init:
11aee0f6
SM
6076 kfree(intel_connector);
6077err_connector_alloc:
6078 kfree(intel_dig_port);
6079
6080 return;
f0fec3f2 6081}
0e32b39c
DA
6082
6083void intel_dp_mst_suspend(struct drm_device *dev)
6084{
6085 struct drm_i915_private *dev_priv = dev->dev_private;
6086 int i;
6087
6088 /* disable MST */
6089 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6090 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6091 if (!intel_dig_port)
6092 continue;
6093
6094 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6095 if (!intel_dig_port->dp.can_mst)
6096 continue;
6097 if (intel_dig_port->dp.is_mst)
6098 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6099 }
6100 }
6101}
6102
6103void intel_dp_mst_resume(struct drm_device *dev)
6104{
6105 struct drm_i915_private *dev_priv = dev->dev_private;
6106 int i;
6107
6108 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6109 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6110 if (!intel_dig_port)
6111 continue;
6112 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6113 int ret;
6114
6115 if (!intel_dig_port->dp.can_mst)
6116 continue;
6117
6118 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6119 if (ret != 0) {
6120 intel_dp_check_mst_status(&intel_dig_port->dp);
6121 }
6122 }
6123 }
6124}
This page took 1.281653 seconds and 5 git commands to generate.