drm/i915: Refactor intel_surf_alignment()
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf 50struct dp_link_dpll {
840b32b7 51 int clock;
9dd4ffdf
CML
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
840b32b7 56 { 162000,
9dd4ffdf 57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
840b32b7 58 { 270000,
9dd4ffdf
CML
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
840b32b7 63 { 162000,
9dd4ffdf 64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
840b32b7 65 { 270000,
9dd4ffdf
CML
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5 69static const struct dp_link_dpll vlv_dpll[] = {
840b32b7 70 { 162000,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
840b32b7 72 { 270000,
65ce4bf5
CML
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
840b32b7 86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
ef9348c8 87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
840b32b7 88 { 270000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8 89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
840b32b7 90 { 540000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8
CML
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63 93
64987fc5
SJ
94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
637a9c63 96static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15
VS
97 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 99
cfcb0fc9
JB
100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
da63a9f2
PZ
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
112}
113
68b4d824 114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 115{
68b4d824
ID
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
119}
120
df0e9248
CW
121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
fa90ecef 123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
124}
125
ea5b213a 126static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
a4fc5ed6 132
e0fce78f
VS
133static unsigned int intel_dp_unused_lane_mask(int lane_count)
134{
135 return ~((1 << lane_count) - 1) & 0xf;
136}
137
ed4e9c1d
VS
138static int
139intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 140{
7183dc29 141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
1db10e28 146 case DP_LINK_BW_5_4:
d4eead50 147 break;
a4fc5ed6 148 default:
d4eead50
ID
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
a4fc5ed6
KP
151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155}
156
eeb6324d
PZ
157static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158{
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
eeb6324d
PZ
160 u8 source_max, sink_max;
161
ccb1a831 162 source_max = intel_dig_port->max_lanes;
eeb6324d
PZ
163 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
164
165 return min(source_max, sink_max);
166}
167
cd9dde44
AJ
168/*
169 * The units on the numbers in the next two are... bizarre. Examples will
170 * make it clearer; this one parallels an example in the eDP spec.
171 *
172 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
173 *
174 * 270000 * 1 * 8 / 10 == 216000
175 *
176 * The actual data capacity of that configuration is 2.16Gbit/s, so the
177 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
178 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
179 * 119000. At 18bpp that's 2142000 kilobits per second.
180 *
181 * Thus the strange-looking division by 10 in intel_dp_link_required, to
182 * get the result in decakilobits instead of kilobits.
183 */
184
a4fc5ed6 185static int
c898261c 186intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 187{
cd9dde44 188 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
189}
190
fe27d53e
DA
191static int
192intel_dp_max_data_rate(int max_link_clock, int max_lanes)
193{
194 return (max_link_clock * max_lanes * 8) / 10;
195}
196
c19de8eb 197static enum drm_mode_status
a4fc5ed6
KP
198intel_dp_mode_valid(struct drm_connector *connector,
199 struct drm_display_mode *mode)
200{
df0e9248 201 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
202 struct intel_connector *intel_connector = to_intel_connector(connector);
203 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
204 int target_clock = mode->clock;
205 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 206
dd06f90e
JN
207 if (is_edp(intel_dp) && fixed_mode) {
208 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
209 return MODE_PANEL;
210
dd06f90e 211 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 212 return MODE_PANEL;
03afc4a2
DV
213
214 target_clock = fixed_mode->clock;
7de56f43
ZY
215 }
216
50fec21a 217 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 218 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
219
220 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
221 mode_rate = intel_dp_link_required(target_clock, 18);
222
223 if (mode_rate > max_rate)
c4867936 224 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
225
226 if (mode->clock < 10000)
227 return MODE_CLOCK_LOW;
228
0af78a2b
DV
229 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
230 return MODE_H_ILLEGAL;
231
a4fc5ed6
KP
232 return MODE_OK;
233}
234
a4f1289e 235uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
236{
237 int i;
238 uint32_t v = 0;
239
240 if (src_bytes > 4)
241 src_bytes = 4;
242 for (i = 0; i < src_bytes; i++)
243 v |= ((uint32_t) src[i]) << ((3-i) * 8);
244 return v;
245}
246
c2af70e2 247static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
248{
249 int i;
250 if (dst_bytes > 4)
251 dst_bytes = 4;
252 for (i = 0; i < dst_bytes; i++)
253 dst[i] = src >> ((3-i) * 8);
254}
255
bf13e81b
JN
256static void
257intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 258 struct intel_dp *intel_dp);
bf13e81b
JN
259static void
260intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 261 struct intel_dp *intel_dp);
bf13e81b 262
773538e8
VS
263static void pps_lock(struct intel_dp *intel_dp)
264{
265 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
266 struct intel_encoder *encoder = &intel_dig_port->base;
267 struct drm_device *dev = encoder->base.dev;
268 struct drm_i915_private *dev_priv = dev->dev_private;
269 enum intel_display_power_domain power_domain;
270
271 /*
272 * See vlv_power_sequencer_reset() why we need
273 * a power domain reference here.
274 */
25f78f58 275 power_domain = intel_display_port_aux_power_domain(encoder);
773538e8
VS
276 intel_display_power_get(dev_priv, power_domain);
277
278 mutex_lock(&dev_priv->pps_mutex);
279}
280
281static void pps_unlock(struct intel_dp *intel_dp)
282{
283 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
284 struct intel_encoder *encoder = &intel_dig_port->base;
285 struct drm_device *dev = encoder->base.dev;
286 struct drm_i915_private *dev_priv = dev->dev_private;
287 enum intel_display_power_domain power_domain;
288
289 mutex_unlock(&dev_priv->pps_mutex);
290
25f78f58 291 power_domain = intel_display_port_aux_power_domain(encoder);
773538e8
VS
292 intel_display_power_put(dev_priv, power_domain);
293}
294
961a0db0
VS
295static void
296vlv_power_sequencer_kick(struct intel_dp *intel_dp)
297{
298 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
299 struct drm_device *dev = intel_dig_port->base.base.dev;
300 struct drm_i915_private *dev_priv = dev->dev_private;
301 enum pipe pipe = intel_dp->pps_pipe;
0047eedc
VS
302 bool pll_enabled, release_cl_override = false;
303 enum dpio_phy phy = DPIO_PHY(pipe);
304 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
961a0db0
VS
305 uint32_t DP;
306
307 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
308 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
309 pipe_name(pipe), port_name(intel_dig_port->port)))
310 return;
311
312 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
313 pipe_name(pipe), port_name(intel_dig_port->port));
314
315 /* Preserve the BIOS-computed detected bit. This is
316 * supposed to be read-only.
317 */
318 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
319 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
320 DP |= DP_PORT_WIDTH(1);
321 DP |= DP_LINK_TRAIN_PAT_1;
322
323 if (IS_CHERRYVIEW(dev))
324 DP |= DP_PIPE_SELECT_CHV(pipe);
325 else if (pipe == PIPE_B)
326 DP |= DP_PIPEB_SELECT;
327
d288f65f
VS
328 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
329
330 /*
331 * The DPLL for the pipe must be enabled for this to work.
332 * So enable temporarily it if it's not already enabled.
333 */
0047eedc
VS
334 if (!pll_enabled) {
335 release_cl_override = IS_CHERRYVIEW(dev) &&
336 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
337
d288f65f
VS
338 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
339 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
0047eedc 340 }
d288f65f 341
961a0db0
VS
342 /*
343 * Similar magic as in intel_dp_enable_port().
344 * We _must_ do this port enable + disable trick
345 * to make this power seqeuencer lock onto the port.
346 * Otherwise even VDD force bit won't work.
347 */
348 I915_WRITE(intel_dp->output_reg, DP);
349 POSTING_READ(intel_dp->output_reg);
350
351 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
352 POSTING_READ(intel_dp->output_reg);
353
354 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
355 POSTING_READ(intel_dp->output_reg);
d288f65f 356
0047eedc 357 if (!pll_enabled) {
d288f65f 358 vlv_force_pll_off(dev, pipe);
0047eedc
VS
359
360 if (release_cl_override)
361 chv_phy_powergate_ch(dev_priv, phy, ch, false);
362 }
961a0db0
VS
363}
364
bf13e81b
JN
365static enum pipe
366vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
367{
368 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
369 struct drm_device *dev = intel_dig_port->base.base.dev;
370 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
371 struct intel_encoder *encoder;
372 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 373 enum pipe pipe;
bf13e81b 374
e39b999a 375 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 376
a8c3344e
VS
377 /* We should never land here with regular DP ports */
378 WARN_ON(!is_edp(intel_dp));
379
a4a5d2f8
VS
380 if (intel_dp->pps_pipe != INVALID_PIPE)
381 return intel_dp->pps_pipe;
382
383 /*
384 * We don't have power sequencer currently.
385 * Pick one that's not used by other ports.
386 */
19c8054c 387 for_each_intel_encoder(dev, encoder) {
a4a5d2f8
VS
388 struct intel_dp *tmp;
389
390 if (encoder->type != INTEL_OUTPUT_EDP)
391 continue;
392
393 tmp = enc_to_intel_dp(&encoder->base);
394
395 if (tmp->pps_pipe != INVALID_PIPE)
396 pipes &= ~(1 << tmp->pps_pipe);
397 }
398
399 /*
400 * Didn't find one. This should not happen since there
401 * are two power sequencers and up to two eDP ports.
402 */
403 if (WARN_ON(pipes == 0))
a8c3344e
VS
404 pipe = PIPE_A;
405 else
406 pipe = ffs(pipes) - 1;
a4a5d2f8 407
a8c3344e
VS
408 vlv_steal_power_sequencer(dev, pipe);
409 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
410
411 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
412 pipe_name(intel_dp->pps_pipe),
413 port_name(intel_dig_port->port));
414
415 /* init power sequencer on this pipe and port */
36b5f425
VS
416 intel_dp_init_panel_power_sequencer(dev, intel_dp);
417 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 418
961a0db0
VS
419 /*
420 * Even vdd force doesn't work until we've made
421 * the power sequencer lock in on the port.
422 */
423 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
424
425 return intel_dp->pps_pipe;
426}
427
6491ab27
VS
428typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
429 enum pipe pipe);
430
431static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
432 enum pipe pipe)
433{
434 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
435}
436
437static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
438 enum pipe pipe)
439{
440 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
441}
442
443static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
444 enum pipe pipe)
445{
446 return true;
447}
bf13e81b 448
a4a5d2f8 449static enum pipe
6491ab27
VS
450vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
451 enum port port,
452 vlv_pipe_check pipe_check)
a4a5d2f8
VS
453{
454 enum pipe pipe;
bf13e81b 455
bf13e81b
JN
456 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
457 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
458 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
459
460 if (port_sel != PANEL_PORT_SELECT_VLV(port))
461 continue;
462
6491ab27
VS
463 if (!pipe_check(dev_priv, pipe))
464 continue;
465
a4a5d2f8 466 return pipe;
bf13e81b
JN
467 }
468
a4a5d2f8
VS
469 return INVALID_PIPE;
470}
471
472static void
473vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
474{
475 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
476 struct drm_device *dev = intel_dig_port->base.base.dev;
477 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
478 enum port port = intel_dig_port->port;
479
480 lockdep_assert_held(&dev_priv->pps_mutex);
481
482 /* try to find a pipe with this port selected */
6491ab27
VS
483 /* first pick one where the panel is on */
484 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
485 vlv_pipe_has_pp_on);
486 /* didn't find one? pick one where vdd is on */
487 if (intel_dp->pps_pipe == INVALID_PIPE)
488 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
489 vlv_pipe_has_vdd_on);
490 /* didn't find one? pick one with just the correct port */
491 if (intel_dp->pps_pipe == INVALID_PIPE)
492 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
493 vlv_pipe_any);
a4a5d2f8
VS
494
495 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
496 if (intel_dp->pps_pipe == INVALID_PIPE) {
497 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
498 port_name(port));
499 return;
bf13e81b
JN
500 }
501
a4a5d2f8
VS
502 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
503 port_name(port), pipe_name(intel_dp->pps_pipe));
504
36b5f425
VS
505 intel_dp_init_panel_power_sequencer(dev, intel_dp);
506 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
507}
508
773538e8
VS
509void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
510{
511 struct drm_device *dev = dev_priv->dev;
512 struct intel_encoder *encoder;
513
666a4537 514 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
773538e8
VS
515 return;
516
517 /*
518 * We can't grab pps_mutex here due to deadlock with power_domain
519 * mutex when power_domain functions are called while holding pps_mutex.
520 * That also means that in order to use pps_pipe the code needs to
521 * hold both a power domain reference and pps_mutex, and the power domain
522 * reference get/put must be done while _not_ holding pps_mutex.
523 * pps_{lock,unlock}() do these steps in the correct order, so one
524 * should use them always.
525 */
526
19c8054c 527 for_each_intel_encoder(dev, encoder) {
773538e8
VS
528 struct intel_dp *intel_dp;
529
530 if (encoder->type != INTEL_OUTPUT_EDP)
531 continue;
532
533 intel_dp = enc_to_intel_dp(&encoder->base);
534 intel_dp->pps_pipe = INVALID_PIPE;
535 }
bf13e81b
JN
536}
537
f0f59a00
VS
538static i915_reg_t
539_pp_ctrl_reg(struct intel_dp *intel_dp)
bf13e81b
JN
540{
541 struct drm_device *dev = intel_dp_to_dev(intel_dp);
542
b0a08bec
VK
543 if (IS_BROXTON(dev))
544 return BXT_PP_CONTROL(0);
545 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
546 return PCH_PP_CONTROL;
547 else
548 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
549}
550
f0f59a00
VS
551static i915_reg_t
552_pp_stat_reg(struct intel_dp *intel_dp)
bf13e81b
JN
553{
554 struct drm_device *dev = intel_dp_to_dev(intel_dp);
555
b0a08bec
VK
556 if (IS_BROXTON(dev))
557 return BXT_PP_STATUS(0);
558 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
559 return PCH_PP_STATUS;
560 else
561 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
562}
563
01527b31
CT
564/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
565 This function only applicable when panel PM state is not to be tracked */
566static int edp_notify_handler(struct notifier_block *this, unsigned long code,
567 void *unused)
568{
569 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
570 edp_notifier);
571 struct drm_device *dev = intel_dp_to_dev(intel_dp);
572 struct drm_i915_private *dev_priv = dev->dev_private;
01527b31
CT
573
574 if (!is_edp(intel_dp) || code != SYS_RESTART)
575 return 0;
576
773538e8 577 pps_lock(intel_dp);
e39b999a 578
666a4537 579 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e39b999a 580 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
f0f59a00 581 i915_reg_t pp_ctrl_reg, pp_div_reg;
649636ef 582 u32 pp_div;
e39b999a 583
01527b31
CT
584 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
585 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
586 pp_div = I915_READ(pp_div_reg);
587 pp_div &= PP_REFERENCE_DIVIDER_MASK;
588
589 /* 0x1F write to PP_DIV_REG sets max cycle delay */
590 I915_WRITE(pp_div_reg, pp_div | 0x1F);
591 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
592 msleep(intel_dp->panel_power_cycle_delay);
593 }
594
773538e8 595 pps_unlock(intel_dp);
e39b999a 596
01527b31
CT
597 return 0;
598}
599
4be73780 600static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 601{
30add22d 602 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
603 struct drm_i915_private *dev_priv = dev->dev_private;
604
e39b999a
VS
605 lockdep_assert_held(&dev_priv->pps_mutex);
606
666a4537 607 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
9a42356b
VS
608 intel_dp->pps_pipe == INVALID_PIPE)
609 return false;
610
bf13e81b 611 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
612}
613
4be73780 614static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 615{
30add22d 616 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
617 struct drm_i915_private *dev_priv = dev->dev_private;
618
e39b999a
VS
619 lockdep_assert_held(&dev_priv->pps_mutex);
620
666a4537 621 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
9a42356b
VS
622 intel_dp->pps_pipe == INVALID_PIPE)
623 return false;
624
773538e8 625 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
626}
627
9b984dae
KP
628static void
629intel_dp_check_edp(struct intel_dp *intel_dp)
630{
30add22d 631 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 632 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 633
9b984dae
KP
634 if (!is_edp(intel_dp))
635 return;
453c5420 636
4be73780 637 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
638 WARN(1, "eDP powered off while attempting aux channel communication.\n");
639 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
640 I915_READ(_pp_stat_reg(intel_dp)),
641 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
642 }
643}
644
9ee32fea
DV
645static uint32_t
646intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
647{
648 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
649 struct drm_device *dev = intel_dig_port->base.base.dev;
650 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 651 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
652 uint32_t status;
653 bool done;
654
ef04f00d 655#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 656 if (has_aux_irq)
b18ac466 657 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 658 msecs_to_jiffies_timeout(10));
9ee32fea
DV
659 else
660 done = wait_for_atomic(C, 10) == 0;
661 if (!done)
662 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
663 has_aux_irq);
664#undef C
665
666 return status;
667}
668
ec5b01dd 669static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 670{
174edf1f
PZ
671 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
672 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 673
ec5b01dd
DL
674 /*
675 * The clock divider is based off the hrawclk, and would like to run at
676 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 677 */
fce18c4c 678 return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
ec5b01dd
DL
679}
680
681static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
682{
683 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
684 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 685 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
686
687 if (index)
688 return 0;
689
690 if (intel_dig_port->port == PORT_A) {
fce18c4c 691 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
05024da3 692
ec5b01dd 693 } else {
fce18c4c 694 return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
ec5b01dd
DL
695 }
696}
697
698static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
699{
700 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
701 struct drm_device *dev = intel_dig_port->base.base.dev;
702 struct drm_i915_private *dev_priv = dev->dev_private;
703
704 if (intel_dig_port->port == PORT_A) {
705 if (index)
706 return 0;
05024da3 707 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
56f5f700 708 } else if (HAS_PCH_LPT_H(dev_priv)) {
2c55c336 709 /* Workaround for non-ULT HSW */
bc86625a
CW
710 switch (index) {
711 case 0: return 63;
712 case 1: return 72;
713 default: return 0;
714 }
ec5b01dd 715 } else {
fce18c4c 716 return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
2c55c336 717 }
b84a1cf8
RV
718}
719
ec5b01dd
DL
720static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
721{
722 return index ? 0 : 100;
723}
724
b6b5e383
DL
725static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
726{
727 /*
728 * SKL doesn't need us to program the AUX clock divider (Hardware will
729 * derive the clock from CDCLK automatically). We still implement the
730 * get_aux_clock_divider vfunc to plug-in into the existing code.
731 */
732 return index ? 0 : 1;
733}
734
5ed12a19
DL
735static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
736 bool has_aux_irq,
737 int send_bytes,
738 uint32_t aux_clock_divider)
739{
740 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
741 struct drm_device *dev = intel_dig_port->base.base.dev;
742 uint32_t precharge, timeout;
743
744 if (IS_GEN6(dev))
745 precharge = 3;
746 else
747 precharge = 5;
748
f3c6a3a7 749 if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
5ed12a19
DL
750 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
751 else
752 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
753
754 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 755 DP_AUX_CH_CTL_DONE |
5ed12a19 756 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 757 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 758 timeout |
788d4433 759 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
760 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
761 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 762 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
763}
764
b9ca5fad
DL
765static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
766 bool has_aux_irq,
767 int send_bytes,
768 uint32_t unused)
769{
770 return DP_AUX_CH_CTL_SEND_BUSY |
771 DP_AUX_CH_CTL_DONE |
772 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
773 DP_AUX_CH_CTL_TIME_OUT_ERROR |
774 DP_AUX_CH_CTL_TIME_OUT_1600us |
775 DP_AUX_CH_CTL_RECEIVE_ERROR |
776 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
777 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
778}
779
b84a1cf8
RV
780static int
781intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 782 const uint8_t *send, int send_bytes,
b84a1cf8
RV
783 uint8_t *recv, int recv_size)
784{
785 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
786 struct drm_device *dev = intel_dig_port->base.base.dev;
787 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 788 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
bc86625a 789 uint32_t aux_clock_divider;
b84a1cf8
RV
790 int i, ret, recv_bytes;
791 uint32_t status;
5ed12a19 792 int try, clock = 0;
4e6b788c 793 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
794 bool vdd;
795
773538e8 796 pps_lock(intel_dp);
e39b999a 797
72c3500a
VS
798 /*
799 * We will be called with VDD already enabled for dpcd/edid/oui reads.
800 * In such cases we want to leave VDD enabled and it's up to upper layers
801 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
802 * ourselves.
803 */
1e0560e0 804 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
805
806 /* dp aux is extremely sensitive to irq latency, hence request the
807 * lowest possible wakeup latency and so prevent the cpu from going into
808 * deep sleep states.
809 */
810 pm_qos_update_request(&dev_priv->pm_qos, 0);
811
812 intel_dp_check_edp(intel_dp);
5eb08b69 813
11bee43e
JB
814 /* Try to wait for any previous AUX channel activity */
815 for (try = 0; try < 3; try++) {
ef04f00d 816 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
817 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
818 break;
819 msleep(1);
820 }
821
822 if (try == 3) {
02196c77
MK
823 static u32 last_status = -1;
824 const u32 status = I915_READ(ch_ctl);
825
826 if (status != last_status) {
827 WARN(1, "dp_aux_ch not started status 0x%08x\n",
828 status);
829 last_status = status;
830 }
831
9ee32fea
DV
832 ret = -EBUSY;
833 goto out;
4f7f7b7e
CW
834 }
835
46a5ae9f
PZ
836 /* Only 5 data registers! */
837 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
838 ret = -E2BIG;
839 goto out;
840 }
841
ec5b01dd 842 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
843 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
844 has_aux_irq,
845 send_bytes,
846 aux_clock_divider);
5ed12a19 847
bc86625a
CW
848 /* Must try at least 3 times according to DP spec */
849 for (try = 0; try < 5; try++) {
850 /* Load the send data into the aux channel data registers */
851 for (i = 0; i < send_bytes; i += 4)
330e20ec 852 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
a4f1289e
RV
853 intel_dp_pack_aux(send + i,
854 send_bytes - i));
bc86625a
CW
855
856 /* Send the command and wait for it to complete */
5ed12a19 857 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
858
859 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
860
861 /* Clear done status and any errors */
862 I915_WRITE(ch_ctl,
863 status |
864 DP_AUX_CH_CTL_DONE |
865 DP_AUX_CH_CTL_TIME_OUT_ERROR |
866 DP_AUX_CH_CTL_RECEIVE_ERROR);
867
74ebf294 868 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 869 continue;
74ebf294
TP
870
871 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
872 * 400us delay required for errors and timeouts
873 * Timeout errors from the HW already meet this
874 * requirement so skip to next iteration
875 */
876 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
877 usleep_range(400, 500);
bc86625a 878 continue;
74ebf294 879 }
bc86625a 880 if (status & DP_AUX_CH_CTL_DONE)
e058c945 881 goto done;
bc86625a 882 }
a4fc5ed6
KP
883 }
884
a4fc5ed6 885 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 886 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
887 ret = -EBUSY;
888 goto out;
a4fc5ed6
KP
889 }
890
e058c945 891done:
a4fc5ed6
KP
892 /* Check for timeout or receive error.
893 * Timeouts occur when the sink is not connected
894 */
a5b3da54 895 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 896 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
897 ret = -EIO;
898 goto out;
a5b3da54 899 }
1ae8c0a5
KP
900
901 /* Timeouts occur when the device isn't connected, so they're
902 * "normal" -- don't fill the kernel log with these */
a5b3da54 903 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 904 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
905 ret = -ETIMEDOUT;
906 goto out;
a4fc5ed6
KP
907 }
908
909 /* Unload any bytes sent back from the other side */
910 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
911 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
14e01889
RV
912
913 /*
914 * By BSpec: "Message sizes of 0 or >20 are not allowed."
915 * We have no idea of what happened so we return -EBUSY so
916 * drm layer takes care for the necessary retries.
917 */
918 if (recv_bytes == 0 || recv_bytes > 20) {
919 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
920 recv_bytes);
921 /*
922 * FIXME: This patch was created on top of a series that
923 * organize the retries at drm level. There EBUSY should
924 * also take care for 1ms wait before retrying.
925 * That aux retries re-org is still needed and after that is
926 * merged we remove this sleep from here.
927 */
928 usleep_range(1000, 1500);
929 ret = -EBUSY;
930 goto out;
931 }
932
a4fc5ed6
KP
933 if (recv_bytes > recv_size)
934 recv_bytes = recv_size;
0206e353 935
4f7f7b7e 936 for (i = 0; i < recv_bytes; i += 4)
330e20ec 937 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
a4f1289e 938 recv + i, recv_bytes - i);
a4fc5ed6 939
9ee32fea
DV
940 ret = recv_bytes;
941out:
942 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
943
884f19e9
JN
944 if (vdd)
945 edp_panel_vdd_off(intel_dp, false);
946
773538e8 947 pps_unlock(intel_dp);
e39b999a 948
9ee32fea 949 return ret;
a4fc5ed6
KP
950}
951
a6c8aff0
JN
952#define BARE_ADDRESS_SIZE 3
953#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
954static ssize_t
955intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 956{
9d1a1031
JN
957 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
958 uint8_t txbuf[20], rxbuf[20];
959 size_t txsize, rxsize;
a4fc5ed6 960 int ret;
a4fc5ed6 961
d2d9cbbd
VS
962 txbuf[0] = (msg->request << 4) |
963 ((msg->address >> 16) & 0xf);
964 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
965 txbuf[2] = msg->address & 0xff;
966 txbuf[3] = msg->size - 1;
46a5ae9f 967
9d1a1031
JN
968 switch (msg->request & ~DP_AUX_I2C_MOT) {
969 case DP_AUX_NATIVE_WRITE:
970 case DP_AUX_I2C_WRITE:
c1e74122 971 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
a6c8aff0 972 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 973 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 974
9d1a1031
JN
975 if (WARN_ON(txsize > 20))
976 return -E2BIG;
a4fc5ed6 977
9d1a1031 978 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 979
9d1a1031
JN
980 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
981 if (ret > 0) {
982 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 983
a1ddefd8
JN
984 if (ret > 1) {
985 /* Number of bytes written in a short write. */
986 ret = clamp_t(int, rxbuf[1], 0, msg->size);
987 } else {
988 /* Return payload size. */
989 ret = msg->size;
990 }
9d1a1031
JN
991 }
992 break;
46a5ae9f 993
9d1a1031
JN
994 case DP_AUX_NATIVE_READ:
995 case DP_AUX_I2C_READ:
a6c8aff0 996 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 997 rxsize = msg->size + 1;
a4fc5ed6 998
9d1a1031
JN
999 if (WARN_ON(rxsize > 20))
1000 return -E2BIG;
a4fc5ed6 1001
9d1a1031
JN
1002 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1003 if (ret > 0) {
1004 msg->reply = rxbuf[0] >> 4;
1005 /*
1006 * Assume happy day, and copy the data. The caller is
1007 * expected to check msg->reply before touching it.
1008 *
1009 * Return payload size.
1010 */
1011 ret--;
1012 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1013 }
9d1a1031
JN
1014 break;
1015
1016 default:
1017 ret = -EINVAL;
1018 break;
a4fc5ed6 1019 }
f51a44b9 1020
9d1a1031 1021 return ret;
a4fc5ed6
KP
1022}
1023
f0f59a00
VS
1024static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1025 enum port port)
da00bdcf
VS
1026{
1027 switch (port) {
1028 case PORT_B:
1029 case PORT_C:
1030 case PORT_D:
1031 return DP_AUX_CH_CTL(port);
1032 default:
1033 MISSING_CASE(port);
1034 return DP_AUX_CH_CTL(PORT_B);
1035 }
1036}
1037
f0f59a00
VS
1038static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1039 enum port port, int index)
330e20ec
VS
1040{
1041 switch (port) {
1042 case PORT_B:
1043 case PORT_C:
1044 case PORT_D:
1045 return DP_AUX_CH_DATA(port, index);
1046 default:
1047 MISSING_CASE(port);
1048 return DP_AUX_CH_DATA(PORT_B, index);
1049 }
1050}
1051
f0f59a00
VS
1052static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1053 enum port port)
da00bdcf
VS
1054{
1055 switch (port) {
1056 case PORT_A:
1057 return DP_AUX_CH_CTL(port);
1058 case PORT_B:
1059 case PORT_C:
1060 case PORT_D:
1061 return PCH_DP_AUX_CH_CTL(port);
1062 default:
1063 MISSING_CASE(port);
1064 return DP_AUX_CH_CTL(PORT_A);
1065 }
1066}
1067
f0f59a00
VS
1068static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1069 enum port port, int index)
330e20ec
VS
1070{
1071 switch (port) {
1072 case PORT_A:
1073 return DP_AUX_CH_DATA(port, index);
1074 case PORT_B:
1075 case PORT_C:
1076 case PORT_D:
1077 return PCH_DP_AUX_CH_DATA(port, index);
1078 default:
1079 MISSING_CASE(port);
1080 return DP_AUX_CH_DATA(PORT_A, index);
1081 }
1082}
1083
da00bdcf
VS
1084/*
1085 * On SKL we don't have Aux for port E so we rely
1086 * on VBT to set a proper alternate aux channel.
1087 */
1088static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1089{
1090 const struct ddi_vbt_port_info *info =
1091 &dev_priv->vbt.ddi_port_info[PORT_E];
1092
1093 switch (info->alternate_aux_channel) {
1094 case DP_AUX_A:
1095 return PORT_A;
1096 case DP_AUX_B:
1097 return PORT_B;
1098 case DP_AUX_C:
1099 return PORT_C;
1100 case DP_AUX_D:
1101 return PORT_D;
1102 default:
1103 MISSING_CASE(info->alternate_aux_channel);
1104 return PORT_A;
1105 }
1106}
1107
f0f59a00
VS
1108static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1109 enum port port)
da00bdcf
VS
1110{
1111 if (port == PORT_E)
1112 port = skl_porte_aux_port(dev_priv);
1113
1114 switch (port) {
1115 case PORT_A:
1116 case PORT_B:
1117 case PORT_C:
1118 case PORT_D:
1119 return DP_AUX_CH_CTL(port);
1120 default:
1121 MISSING_CASE(port);
1122 return DP_AUX_CH_CTL(PORT_A);
1123 }
1124}
1125
f0f59a00
VS
1126static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1127 enum port port, int index)
330e20ec
VS
1128{
1129 if (port == PORT_E)
1130 port = skl_porte_aux_port(dev_priv);
1131
1132 switch (port) {
1133 case PORT_A:
1134 case PORT_B:
1135 case PORT_C:
1136 case PORT_D:
1137 return DP_AUX_CH_DATA(port, index);
1138 default:
1139 MISSING_CASE(port);
1140 return DP_AUX_CH_DATA(PORT_A, index);
1141 }
1142}
1143
f0f59a00
VS
1144static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1145 enum port port)
330e20ec
VS
1146{
1147 if (INTEL_INFO(dev_priv)->gen >= 9)
1148 return skl_aux_ctl_reg(dev_priv, port);
1149 else if (HAS_PCH_SPLIT(dev_priv))
1150 return ilk_aux_ctl_reg(dev_priv, port);
1151 else
1152 return g4x_aux_ctl_reg(dev_priv, port);
1153}
1154
f0f59a00
VS
1155static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1156 enum port port, int index)
330e20ec
VS
1157{
1158 if (INTEL_INFO(dev_priv)->gen >= 9)
1159 return skl_aux_data_reg(dev_priv, port, index);
1160 else if (HAS_PCH_SPLIT(dev_priv))
1161 return ilk_aux_data_reg(dev_priv, port, index);
1162 else
1163 return g4x_aux_data_reg(dev_priv, port, index);
1164}
1165
1166static void intel_aux_reg_init(struct intel_dp *intel_dp)
1167{
1168 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1169 enum port port = dp_to_dig_port(intel_dp)->port;
1170 int i;
1171
1172 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1173 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1174 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1175}
1176
9d1a1031 1177static void
a121f4e5
VS
1178intel_dp_aux_fini(struct intel_dp *intel_dp)
1179{
1180 drm_dp_aux_unregister(&intel_dp->aux);
1181 kfree(intel_dp->aux.name);
1182}
1183
1184static int
9d1a1031
JN
1185intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1186{
1187 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33ad6626
JN
1188 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1189 enum port port = intel_dig_port->port;
ab2c0672
DA
1190 int ret;
1191
330e20ec 1192 intel_aux_reg_init(intel_dp);
8316f337 1193
a121f4e5
VS
1194 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1195 if (!intel_dp->aux.name)
1196 return -ENOMEM;
1197
9d1a1031
JN
1198 intel_dp->aux.dev = dev->dev;
1199 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1200
a121f4e5
VS
1201 DRM_DEBUG_KMS("registering %s bus for %s\n",
1202 intel_dp->aux.name,
0b99836f 1203 connector->base.kdev->kobj.name);
8316f337 1204
4f71d0cb 1205 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1206 if (ret < 0) {
4f71d0cb 1207 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
a121f4e5
VS
1208 intel_dp->aux.name, ret);
1209 kfree(intel_dp->aux.name);
1210 return ret;
ab2c0672 1211 }
8a5e6aeb 1212
0b99836f
JN
1213 ret = sysfs_create_link(&connector->base.kdev->kobj,
1214 &intel_dp->aux.ddc.dev.kobj,
1215 intel_dp->aux.ddc.dev.kobj.name);
1216 if (ret < 0) {
a121f4e5
VS
1217 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1218 intel_dp->aux.name, ret);
1219 intel_dp_aux_fini(intel_dp);
1220 return ret;
ab2c0672 1221 }
a121f4e5
VS
1222
1223 return 0;
a4fc5ed6
KP
1224}
1225
80f65de3
ID
1226static void
1227intel_dp_connector_unregister(struct intel_connector *intel_connector)
1228{
1229 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1230
0e32b39c
DA
1231 if (!intel_connector->mst_port)
1232 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1233 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1234 intel_connector_unregister(intel_connector);
1235}
1236
5416d871 1237static void
840b32b7 1238skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
5416d871
DL
1239{
1240 u32 ctrl1;
1241
dd3cd74a
ACO
1242 memset(&pipe_config->dpll_hw_state, 0,
1243 sizeof(pipe_config->dpll_hw_state));
1244
5416d871
DL
1245 pipe_config->ddi_pll_sel = SKL_DPLL0;
1246 pipe_config->dpll_hw_state.cfgcr1 = 0;
1247 pipe_config->dpll_hw_state.cfgcr2 = 0;
1248
1249 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
840b32b7 1250 switch (pipe_config->port_clock / 2) {
c3346ef6 1251 case 81000:
71cd8423 1252 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1253 SKL_DPLL0);
1254 break;
c3346ef6 1255 case 135000:
71cd8423 1256 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1257 SKL_DPLL0);
1258 break;
c3346ef6 1259 case 270000:
71cd8423 1260 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1261 SKL_DPLL0);
1262 break;
c3346ef6 1263 case 162000:
71cd8423 1264 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1265 SKL_DPLL0);
1266 break;
1267 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1268 results in CDCLK change. Need to handle the change of CDCLK by
1269 disabling pipes and re-enabling them */
1270 case 108000:
71cd8423 1271 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1272 SKL_DPLL0);
1273 break;
1274 case 216000:
71cd8423 1275 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1276 SKL_DPLL0);
1277 break;
1278
5416d871
DL
1279 }
1280 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1281}
1282
6fa2d197 1283void
840b32b7 1284hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
0e50338c 1285{
ee46f3c7
ACO
1286 memset(&pipe_config->dpll_hw_state, 0,
1287 sizeof(pipe_config->dpll_hw_state));
1288
840b32b7
VS
1289 switch (pipe_config->port_clock / 2) {
1290 case 81000:
0e50338c
DV
1291 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1292 break;
840b32b7 1293 case 135000:
0e50338c
DV
1294 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1295 break;
840b32b7 1296 case 270000:
0e50338c
DV
1297 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1298 break;
1299 }
1300}
1301
fc0f8e25 1302static int
12f6a2e2 1303intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1304{
94ca719e
VS
1305 if (intel_dp->num_sink_rates) {
1306 *sink_rates = intel_dp->sink_rates;
1307 return intel_dp->num_sink_rates;
fc0f8e25 1308 }
12f6a2e2
VS
1309
1310 *sink_rates = default_rates;
1311
1312 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1313}
1314
e588fa18 1315bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
ed63baaf 1316{
e588fa18
ACO
1317 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1318 struct drm_device *dev = dig_port->base.base.dev;
1319
ed63baaf 1320 /* WaDisableHBR2:skl */
e87a005d 1321 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
ed63baaf
TS
1322 return false;
1323
1324 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1325 (INTEL_INFO(dev)->gen >= 9))
1326 return true;
1327 else
1328 return false;
1329}
1330
a8f3ef61 1331static int
e588fa18 1332intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
a8f3ef61 1333{
e588fa18
ACO
1334 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1335 struct drm_device *dev = dig_port->base.base.dev;
af7080f5
TS
1336 int size;
1337
64987fc5
SJ
1338 if (IS_BROXTON(dev)) {
1339 *source_rates = bxt_rates;
af7080f5 1340 size = ARRAY_SIZE(bxt_rates);
ef11bdb3 1341 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
637a9c63 1342 *source_rates = skl_rates;
af7080f5
TS
1343 size = ARRAY_SIZE(skl_rates);
1344 } else {
1345 *source_rates = default_rates;
1346 size = ARRAY_SIZE(default_rates);
a8f3ef61 1347 }
636280ba 1348
ed63baaf 1349 /* This depends on the fact that 5.4 is last value in the array */
e588fa18 1350 if (!intel_dp_source_supports_hbr2(intel_dp))
af7080f5 1351 size--;
636280ba 1352
af7080f5 1353 return size;
a8f3ef61
SJ
1354}
1355
c6bb3538
DV
1356static void
1357intel_dp_set_clock(struct intel_encoder *encoder,
840b32b7 1358 struct intel_crtc_state *pipe_config)
c6bb3538
DV
1359{
1360 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1361 const struct dp_link_dpll *divisor = NULL;
1362 int i, count = 0;
c6bb3538
DV
1363
1364 if (IS_G4X(dev)) {
9dd4ffdf
CML
1365 divisor = gen4_dpll;
1366 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1367 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1368 divisor = pch_dpll;
1369 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1370 } else if (IS_CHERRYVIEW(dev)) {
1371 divisor = chv_dpll;
1372 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1373 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1374 divisor = vlv_dpll;
1375 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1376 }
9dd4ffdf
CML
1377
1378 if (divisor && count) {
1379 for (i = 0; i < count; i++) {
840b32b7 1380 if (pipe_config->port_clock == divisor[i].clock) {
9dd4ffdf
CML
1381 pipe_config->dpll = divisor[i].dpll;
1382 pipe_config->clock_set = true;
1383 break;
1384 }
1385 }
c6bb3538
DV
1386 }
1387}
1388
2ecae76a
VS
1389static int intersect_rates(const int *source_rates, int source_len,
1390 const int *sink_rates, int sink_len,
94ca719e 1391 int *common_rates)
a8f3ef61
SJ
1392{
1393 int i = 0, j = 0, k = 0;
1394
a8f3ef61
SJ
1395 while (i < source_len && j < sink_len) {
1396 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1397 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1398 return k;
94ca719e 1399 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1400 ++k;
1401 ++i;
1402 ++j;
1403 } else if (source_rates[i] < sink_rates[j]) {
1404 ++i;
1405 } else {
1406 ++j;
1407 }
1408 }
1409 return k;
1410}
1411
94ca719e
VS
1412static int intel_dp_common_rates(struct intel_dp *intel_dp,
1413 int *common_rates)
2ecae76a 1414{
2ecae76a
VS
1415 const int *source_rates, *sink_rates;
1416 int source_len, sink_len;
1417
1418 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
e588fa18 1419 source_len = intel_dp_source_rates(intel_dp, &source_rates);
2ecae76a
VS
1420
1421 return intersect_rates(source_rates, source_len,
1422 sink_rates, sink_len,
94ca719e 1423 common_rates);
2ecae76a
VS
1424}
1425
0336400e
VS
1426static void snprintf_int_array(char *str, size_t len,
1427 const int *array, int nelem)
1428{
1429 int i;
1430
1431 str[0] = '\0';
1432
1433 for (i = 0; i < nelem; i++) {
b2f505be 1434 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1435 if (r >= len)
1436 return;
1437 str += r;
1438 len -= r;
1439 }
1440}
1441
1442static void intel_dp_print_rates(struct intel_dp *intel_dp)
1443{
0336400e 1444 const int *source_rates, *sink_rates;
94ca719e
VS
1445 int source_len, sink_len, common_len;
1446 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1447 char str[128]; /* FIXME: too big for stack? */
1448
1449 if ((drm_debug & DRM_UT_KMS) == 0)
1450 return;
1451
e588fa18 1452 source_len = intel_dp_source_rates(intel_dp, &source_rates);
0336400e
VS
1453 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1454 DRM_DEBUG_KMS("source rates: %s\n", str);
1455
1456 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1457 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1458 DRM_DEBUG_KMS("sink rates: %s\n", str);
1459
94ca719e
VS
1460 common_len = intel_dp_common_rates(intel_dp, common_rates);
1461 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1462 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1463}
1464
f4896f15 1465static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1466{
1467 int i = 0;
1468
1469 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1470 if (find == rates[i])
1471 break;
1472
1473 return i;
1474}
1475
50fec21a
VS
1476int
1477intel_dp_max_link_rate(struct intel_dp *intel_dp)
1478{
1479 int rates[DP_MAX_SUPPORTED_RATES] = {};
1480 int len;
1481
94ca719e 1482 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1483 if (WARN_ON(len <= 0))
1484 return 162000;
1485
1486 return rates[rate_to_index(0, rates) - 1];
1487}
1488
ed4e9c1d
VS
1489int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1490{
94ca719e 1491 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1492}
1493
94223d04
ACO
1494void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1495 uint8_t *link_bw, uint8_t *rate_select)
04a60f9f
VS
1496{
1497 if (intel_dp->num_sink_rates) {
1498 *link_bw = 0;
1499 *rate_select =
1500 intel_dp_rate_select(intel_dp, port_clock);
1501 } else {
1502 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1503 *rate_select = 0;
1504 }
1505}
1506
00c09d70 1507bool
5bfe2ac0 1508intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1509 struct intel_crtc_state *pipe_config)
a4fc5ed6 1510{
5bfe2ac0 1511 struct drm_device *dev = encoder->base.dev;
36008365 1512 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1513 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1514 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1515 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1516 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1517 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1518 int lane_count, clock;
56071a20 1519 int min_lane_count = 1;
eeb6324d 1520 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1521 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1522 int min_clock = 0;
a8f3ef61 1523 int max_clock;
083f9560 1524 int bpp, mode_rate;
ff9a6750 1525 int link_avail, link_clock;
94ca719e
VS
1526 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1527 int common_len;
04a60f9f 1528 uint8_t link_bw, rate_select;
a8f3ef61 1529
94ca719e 1530 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1531
1532 /* No common link rates between source and sink */
94ca719e 1533 WARN_ON(common_len <= 0);
a8f3ef61 1534
94ca719e 1535 max_clock = common_len - 1;
a4fc5ed6 1536
bc7d38a4 1537 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1538 pipe_config->has_pch_encoder = true;
1539
03afc4a2 1540 pipe_config->has_dp_encoder = true;
f769cd24 1541 pipe_config->has_drrs = false;
9fcb1704 1542 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1543
dd06f90e
JN
1544 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1545 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1546 adjusted_mode);
a1b2278e
CK
1547
1548 if (INTEL_INFO(dev)->gen >= 9) {
1549 int ret;
e435d6e5 1550 ret = skl_update_scaler_crtc(pipe_config);
a1b2278e
CK
1551 if (ret)
1552 return ret;
1553 }
1554
b5667627 1555 if (HAS_GMCH_DISPLAY(dev))
2dd24552
JB
1556 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1557 intel_connector->panel.fitting_mode);
1558 else
b074cec8
JB
1559 intel_pch_panel_fitting(intel_crtc, pipe_config,
1560 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1561 }
1562
cb1793ce 1563 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1564 return false;
1565
083f9560 1566 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1567 "max bw %d pixel clock %iKHz\n",
94ca719e 1568 max_lane_count, common_rates[max_clock],
241bfc38 1569 adjusted_mode->crtc_clock);
083f9560 1570
36008365
DV
1571 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1572 * bpc in between. */
3e7ca985 1573 bpp = pipe_config->pipe_bpp;
56071a20 1574 if (is_edp(intel_dp)) {
22ce5628
TS
1575
1576 /* Get bpp from vbt only for panels that dont have bpp in edid */
1577 if (intel_connector->base.display_info.bpc == 0 &&
1578 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
56071a20
JN
1579 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1580 dev_priv->vbt.edp_bpp);
1581 bpp = dev_priv->vbt.edp_bpp;
1582 }
1583
344c5bbc
JN
1584 /*
1585 * Use the maximum clock and number of lanes the eDP panel
1586 * advertizes being capable of. The panels are generally
1587 * designed to support only a single clock and lane
1588 * configuration, and typically these values correspond to the
1589 * native resolution of the panel.
1590 */
1591 min_lane_count = max_lane_count;
1592 min_clock = max_clock;
7984211e 1593 }
657445fe 1594
36008365 1595 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1596 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1597 bpp);
36008365 1598
c6930992 1599 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1600 for (lane_count = min_lane_count;
1601 lane_count <= max_lane_count;
1602 lane_count <<= 1) {
1603
94ca719e 1604 link_clock = common_rates[clock];
36008365
DV
1605 link_avail = intel_dp_max_data_rate(link_clock,
1606 lane_count);
1607
1608 if (mode_rate <= link_avail) {
1609 goto found;
1610 }
1611 }
1612 }
1613 }
c4867936 1614
36008365 1615 return false;
3685a8f3 1616
36008365 1617found:
55bc60db
VS
1618 if (intel_dp->color_range_auto) {
1619 /*
1620 * See:
1621 * CEA-861-E - 5.1 Default Encoding Parameters
1622 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1623 */
0f2a2a75
VS
1624 pipe_config->limited_color_range =
1625 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1626 } else {
1627 pipe_config->limited_color_range =
1628 intel_dp->limited_color_range;
55bc60db
VS
1629 }
1630
90a6b7b0 1631 pipe_config->lane_count = lane_count;
a8f3ef61 1632
657445fe 1633 pipe_config->pipe_bpp = bpp;
94ca719e 1634 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1635
04a60f9f
VS
1636 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1637 &link_bw, &rate_select);
1638
1639 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1640 link_bw, rate_select, pipe_config->lane_count,
ff9a6750 1641 pipe_config->port_clock, bpp);
36008365
DV
1642 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1643 mode_rate, link_avail);
a4fc5ed6 1644
03afc4a2 1645 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1646 adjusted_mode->crtc_clock,
1647 pipe_config->port_clock,
03afc4a2 1648 &pipe_config->dp_m_n);
9d1a455b 1649
439d7ac0 1650 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1651 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1652 pipe_config->has_drrs = true;
439d7ac0
PB
1653 intel_link_compute_m_n(bpp, lane_count,
1654 intel_connector->panel.downclock_mode->clock,
1655 pipe_config->port_clock,
1656 &pipe_config->dp_m2_n2);
1657 }
1658
ef11bdb3 1659 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
840b32b7 1660 skl_edp_set_pll_config(pipe_config);
977bb38d
S
1661 else if (IS_BROXTON(dev))
1662 /* handled in ddi */;
5416d871 1663 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
840b32b7 1664 hsw_dp_set_ddi_pll_sel(pipe_config);
0e50338c 1665 else
840b32b7 1666 intel_dp_set_clock(encoder, pipe_config);
c6bb3538 1667
03afc4a2 1668 return true;
a4fc5ed6
KP
1669}
1670
901c2daf
VS
1671void intel_dp_set_link_params(struct intel_dp *intel_dp,
1672 const struct intel_crtc_state *pipe_config)
1673{
1674 intel_dp->link_rate = pipe_config->port_clock;
1675 intel_dp->lane_count = pipe_config->lane_count;
1676}
1677
8ac33ed3 1678static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1679{
b934223d 1680 struct drm_device *dev = encoder->base.dev;
417e822d 1681 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1682 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1683 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1684 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
7c5f93b0 1685 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1686
901c2daf
VS
1687 intel_dp_set_link_params(intel_dp, crtc->config);
1688
417e822d 1689 /*
1a2eb460 1690 * There are four kinds of DP registers:
417e822d
KP
1691 *
1692 * IBX PCH
1a2eb460
KP
1693 * SNB CPU
1694 * IVB CPU
417e822d
KP
1695 * CPT PCH
1696 *
1697 * IBX PCH and CPU are the same for almost everything,
1698 * except that the CPU DP PLL is configured in this
1699 * register
1700 *
1701 * CPT PCH is quite different, having many bits moved
1702 * to the TRANS_DP_CTL register instead. That
1703 * configuration happens (oddly) in ironlake_pch_enable
1704 */
9c9e7927 1705
417e822d
KP
1706 /* Preserve the BIOS-computed detected bit. This is
1707 * supposed to be read-only.
1708 */
1709 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1710
417e822d 1711 /* Handle DP bits in common between all three register formats */
417e822d 1712 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
90a6b7b0 1713 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
a4fc5ed6 1714
417e822d 1715 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1716
39e5fa88 1717 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1718 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1719 intel_dp->DP |= DP_SYNC_HS_HIGH;
1720 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1721 intel_dp->DP |= DP_SYNC_VS_HIGH;
1722 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1723
6aba5b6c 1724 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1725 intel_dp->DP |= DP_ENHANCED_FRAMING;
1726
7c62a164 1727 intel_dp->DP |= crtc->pipe << 29;
39e5fa88 1728 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
e3ef4479
VS
1729 u32 trans_dp;
1730
39e5fa88 1731 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
1732
1733 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1734 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1735 trans_dp |= TRANS_DP_ENH_FRAMING;
1736 else
1737 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1738 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 1739 } else {
0f2a2a75 1740 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
666a4537 1741 !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
0f2a2a75 1742 intel_dp->DP |= DP_COLOR_RANGE_16_235;
417e822d
KP
1743
1744 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1745 intel_dp->DP |= DP_SYNC_HS_HIGH;
1746 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1747 intel_dp->DP |= DP_SYNC_VS_HIGH;
1748 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1749
6aba5b6c 1750 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1751 intel_dp->DP |= DP_ENHANCED_FRAMING;
1752
39e5fa88 1753 if (IS_CHERRYVIEW(dev))
44f37d1f 1754 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1755 else if (crtc->pipe == PIPE_B)
1756 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1757 }
a4fc5ed6
KP
1758}
1759
ffd6749d
PZ
1760#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1761#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1762
1a5ef5b7
PZ
1763#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1764#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1765
ffd6749d
PZ
1766#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1767#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1768
4be73780 1769static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1770 u32 mask,
1771 u32 value)
bd943159 1772{
30add22d 1773 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1774 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 1775 i915_reg_t pp_stat_reg, pp_ctrl_reg;
453c5420 1776
e39b999a
VS
1777 lockdep_assert_held(&dev_priv->pps_mutex);
1778
bf13e81b
JN
1779 pp_stat_reg = _pp_stat_reg(intel_dp);
1780 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1781
99ea7127 1782 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1783 mask, value,
1784 I915_READ(pp_stat_reg),
1785 I915_READ(pp_ctrl_reg));
32ce697c 1786
453c5420 1787 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1788 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1789 I915_READ(pp_stat_reg),
1790 I915_READ(pp_ctrl_reg));
32ce697c 1791 }
54c136d4
CW
1792
1793 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1794}
32ce697c 1795
4be73780 1796static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1797{
1798 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1799 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1800}
1801
4be73780 1802static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1803{
1804 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1805 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1806}
1807
4be73780 1808static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1809{
1810 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1811
1812 /* When we disable the VDD override bit last we have to do the manual
1813 * wait. */
1814 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1815 intel_dp->panel_power_cycle_delay);
1816
4be73780 1817 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1818}
1819
4be73780 1820static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1821{
1822 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1823 intel_dp->backlight_on_delay);
1824}
1825
4be73780 1826static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1827{
1828 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1829 intel_dp->backlight_off_delay);
1830}
99ea7127 1831
832dd3c1
KP
1832/* Read the current pp_control value, unlocking the register if it
1833 * is locked
1834 */
1835
453c5420 1836static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1837{
453c5420
JB
1838 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1839 struct drm_i915_private *dev_priv = dev->dev_private;
1840 u32 control;
832dd3c1 1841
e39b999a
VS
1842 lockdep_assert_held(&dev_priv->pps_mutex);
1843
bf13e81b 1844 control = I915_READ(_pp_ctrl_reg(intel_dp));
b0a08bec
VK
1845 if (!IS_BROXTON(dev)) {
1846 control &= ~PANEL_UNLOCK_MASK;
1847 control |= PANEL_UNLOCK_REGS;
1848 }
832dd3c1 1849 return control;
bd943159
KP
1850}
1851
951468f3
VS
1852/*
1853 * Must be paired with edp_panel_vdd_off().
1854 * Must hold pps_mutex around the whole on/off sequence.
1855 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1856 */
1e0560e0 1857static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1858{
30add22d 1859 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1860 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1861 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1862 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1863 enum intel_display_power_domain power_domain;
5d613501 1864 u32 pp;
f0f59a00 1865 i915_reg_t pp_stat_reg, pp_ctrl_reg;
adddaaf4 1866 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1867
e39b999a
VS
1868 lockdep_assert_held(&dev_priv->pps_mutex);
1869
97af61f5 1870 if (!is_edp(intel_dp))
adddaaf4 1871 return false;
bd943159 1872
2c623c11 1873 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1874 intel_dp->want_panel_vdd = true;
99ea7127 1875
4be73780 1876 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1877 return need_to_disable;
b0665d57 1878
25f78f58 1879 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4e6e1a54 1880 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1881
3936fcf4
VS
1882 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1883 port_name(intel_dig_port->port));
bd943159 1884
4be73780
DV
1885 if (!edp_have_panel_power(intel_dp))
1886 wait_panel_power_cycle(intel_dp);
99ea7127 1887
453c5420 1888 pp = ironlake_get_pp_control(intel_dp);
5d613501 1889 pp |= EDP_FORCE_VDD;
ebf33b18 1890
bf13e81b
JN
1891 pp_stat_reg = _pp_stat_reg(intel_dp);
1892 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1893
1894 I915_WRITE(pp_ctrl_reg, pp);
1895 POSTING_READ(pp_ctrl_reg);
1896 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1897 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1898 /*
1899 * If the panel wasn't on, delay before accessing aux channel
1900 */
4be73780 1901 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1902 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1903 port_name(intel_dig_port->port));
f01eca2e 1904 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1905 }
adddaaf4
JN
1906
1907 return need_to_disable;
1908}
1909
951468f3
VS
1910/*
1911 * Must be paired with intel_edp_panel_vdd_off() or
1912 * intel_edp_panel_off().
1913 * Nested calls to these functions are not allowed since
1914 * we drop the lock. Caller must use some higher level
1915 * locking to prevent nested calls from other threads.
1916 */
b80d6c78 1917void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1918{
c695b6b6 1919 bool vdd;
adddaaf4 1920
c695b6b6
VS
1921 if (!is_edp(intel_dp))
1922 return;
1923
773538e8 1924 pps_lock(intel_dp);
c695b6b6 1925 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1926 pps_unlock(intel_dp);
c695b6b6 1927
e2c719b7 1928 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1929 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1930}
1931
4be73780 1932static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1933{
30add22d 1934 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1935 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1936 struct intel_digital_port *intel_dig_port =
1937 dp_to_dig_port(intel_dp);
1938 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1939 enum intel_display_power_domain power_domain;
5d613501 1940 u32 pp;
f0f59a00 1941 i915_reg_t pp_stat_reg, pp_ctrl_reg;
5d613501 1942
e39b999a 1943 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1944
15e899a0 1945 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1946
15e899a0 1947 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1948 return;
b0665d57 1949
3936fcf4
VS
1950 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1951 port_name(intel_dig_port->port));
bd943159 1952
be2c9196
VS
1953 pp = ironlake_get_pp_control(intel_dp);
1954 pp &= ~EDP_FORCE_VDD;
453c5420 1955
be2c9196
VS
1956 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1957 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1958
be2c9196
VS
1959 I915_WRITE(pp_ctrl_reg, pp);
1960 POSTING_READ(pp_ctrl_reg);
90791a5c 1961
be2c9196
VS
1962 /* Make sure sequencer is idle before allowing subsequent activity */
1963 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1964 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1965
be2c9196
VS
1966 if ((pp & POWER_TARGET_ON) == 0)
1967 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1968
25f78f58 1969 power_domain = intel_display_port_aux_power_domain(intel_encoder);
be2c9196 1970 intel_display_power_put(dev_priv, power_domain);
bd943159 1971}
5d613501 1972
4be73780 1973static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1974{
1975 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1976 struct intel_dp, panel_vdd_work);
bd943159 1977
773538e8 1978 pps_lock(intel_dp);
15e899a0
VS
1979 if (!intel_dp->want_panel_vdd)
1980 edp_panel_vdd_off_sync(intel_dp);
773538e8 1981 pps_unlock(intel_dp);
bd943159
KP
1982}
1983
aba86890
ID
1984static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1985{
1986 unsigned long delay;
1987
1988 /*
1989 * Queue the timer to fire a long time from now (relative to the power
1990 * down delay) to keep the panel power up across a sequence of
1991 * operations.
1992 */
1993 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1994 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1995}
1996
951468f3
VS
1997/*
1998 * Must be paired with edp_panel_vdd_on().
1999 * Must hold pps_mutex around the whole on/off sequence.
2000 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2001 */
4be73780 2002static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 2003{
e39b999a
VS
2004 struct drm_i915_private *dev_priv =
2005 intel_dp_to_dev(intel_dp)->dev_private;
2006
2007 lockdep_assert_held(&dev_priv->pps_mutex);
2008
97af61f5
KP
2009 if (!is_edp(intel_dp))
2010 return;
5d613501 2011
e2c719b7 2012 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 2013 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 2014
bd943159
KP
2015 intel_dp->want_panel_vdd = false;
2016
aba86890 2017 if (sync)
4be73780 2018 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
2019 else
2020 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
2021}
2022
9f0fb5be 2023static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 2024{
30add22d 2025 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 2026 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 2027 u32 pp;
f0f59a00 2028 i915_reg_t pp_ctrl_reg;
9934c132 2029
9f0fb5be
VS
2030 lockdep_assert_held(&dev_priv->pps_mutex);
2031
97af61f5 2032 if (!is_edp(intel_dp))
bd943159 2033 return;
99ea7127 2034
3936fcf4
VS
2035 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2036 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 2037
e7a89ace
VS
2038 if (WARN(edp_have_panel_power(intel_dp),
2039 "eDP port %c panel power already on\n",
2040 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 2041 return;
9934c132 2042
4be73780 2043 wait_panel_power_cycle(intel_dp);
37c6c9b0 2044
bf13e81b 2045 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2046 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
2047 if (IS_GEN5(dev)) {
2048 /* ILK workaround: disable reset around power sequence */
2049 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
2050 I915_WRITE(pp_ctrl_reg, pp);
2051 POSTING_READ(pp_ctrl_reg);
05ce1a49 2052 }
37c6c9b0 2053
1c0ae80a 2054 pp |= POWER_TARGET_ON;
99ea7127
KP
2055 if (!IS_GEN5(dev))
2056 pp |= PANEL_POWER_RESET;
2057
453c5420
JB
2058 I915_WRITE(pp_ctrl_reg, pp);
2059 POSTING_READ(pp_ctrl_reg);
9934c132 2060
4be73780 2061 wait_panel_on(intel_dp);
dce56b3c 2062 intel_dp->last_power_on = jiffies;
9934c132 2063
05ce1a49
KP
2064 if (IS_GEN5(dev)) {
2065 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
2066 I915_WRITE(pp_ctrl_reg, pp);
2067 POSTING_READ(pp_ctrl_reg);
05ce1a49 2068 }
9f0fb5be 2069}
e39b999a 2070
9f0fb5be
VS
2071void intel_edp_panel_on(struct intel_dp *intel_dp)
2072{
2073 if (!is_edp(intel_dp))
2074 return;
2075
2076 pps_lock(intel_dp);
2077 edp_panel_on(intel_dp);
773538e8 2078 pps_unlock(intel_dp);
9934c132
JB
2079}
2080
9f0fb5be
VS
2081
2082static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 2083{
4e6e1a54
ID
2084 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2085 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 2086 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 2087 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 2088 enum intel_display_power_domain power_domain;
99ea7127 2089 u32 pp;
f0f59a00 2090 i915_reg_t pp_ctrl_reg;
9934c132 2091
9f0fb5be
VS
2092 lockdep_assert_held(&dev_priv->pps_mutex);
2093
97af61f5
KP
2094 if (!is_edp(intel_dp))
2095 return;
37c6c9b0 2096
3936fcf4
VS
2097 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2098 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 2099
3936fcf4
VS
2100 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2101 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 2102
453c5420 2103 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
2104 /* We need to switch off panel power _and_ force vdd, for otherwise some
2105 * panels get very unhappy and cease to work. */
b3064154
PJ
2106 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2107 EDP_BLC_ENABLE);
453c5420 2108
bf13e81b 2109 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2110
849e39f5
PZ
2111 intel_dp->want_panel_vdd = false;
2112
453c5420
JB
2113 I915_WRITE(pp_ctrl_reg, pp);
2114 POSTING_READ(pp_ctrl_reg);
9934c132 2115
dce56b3c 2116 intel_dp->last_power_cycle = jiffies;
4be73780 2117 wait_panel_off(intel_dp);
849e39f5
PZ
2118
2119 /* We got a reference when we enabled the VDD. */
25f78f58 2120 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4e6e1a54 2121 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 2122}
e39b999a 2123
9f0fb5be
VS
2124void intel_edp_panel_off(struct intel_dp *intel_dp)
2125{
2126 if (!is_edp(intel_dp))
2127 return;
e39b999a 2128
9f0fb5be
VS
2129 pps_lock(intel_dp);
2130 edp_panel_off(intel_dp);
773538e8 2131 pps_unlock(intel_dp);
9934c132
JB
2132}
2133
1250d107
JN
2134/* Enable backlight in the panel power control. */
2135static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 2136{
da63a9f2
PZ
2137 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2138 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
2139 struct drm_i915_private *dev_priv = dev->dev_private;
2140 u32 pp;
f0f59a00 2141 i915_reg_t pp_ctrl_reg;
32f9d658 2142
01cb9ea6
JB
2143 /*
2144 * If we enable the backlight right away following a panel power
2145 * on, we may see slight flicker as the panel syncs with the eDP
2146 * link. So delay a bit to make sure the image is solid before
2147 * allowing it to appear.
2148 */
4be73780 2149 wait_backlight_on(intel_dp);
e39b999a 2150
773538e8 2151 pps_lock(intel_dp);
e39b999a 2152
453c5420 2153 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2154 pp |= EDP_BLC_ENABLE;
453c5420 2155
bf13e81b 2156 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2157
2158 I915_WRITE(pp_ctrl_reg, pp);
2159 POSTING_READ(pp_ctrl_reg);
e39b999a 2160
773538e8 2161 pps_unlock(intel_dp);
32f9d658
ZW
2162}
2163
1250d107
JN
2164/* Enable backlight PWM and backlight PP control. */
2165void intel_edp_backlight_on(struct intel_dp *intel_dp)
2166{
2167 if (!is_edp(intel_dp))
2168 return;
2169
2170 DRM_DEBUG_KMS("\n");
2171
2172 intel_panel_enable_backlight(intel_dp->attached_connector);
2173 _intel_edp_backlight_on(intel_dp);
2174}
2175
2176/* Disable backlight in the panel power control. */
2177static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2178{
30add22d 2179 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2180 struct drm_i915_private *dev_priv = dev->dev_private;
2181 u32 pp;
f0f59a00 2182 i915_reg_t pp_ctrl_reg;
32f9d658 2183
f01eca2e
KP
2184 if (!is_edp(intel_dp))
2185 return;
2186
773538e8 2187 pps_lock(intel_dp);
e39b999a 2188
453c5420 2189 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2190 pp &= ~EDP_BLC_ENABLE;
453c5420 2191
bf13e81b 2192 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2193
2194 I915_WRITE(pp_ctrl_reg, pp);
2195 POSTING_READ(pp_ctrl_reg);
f7d2323c 2196
773538e8 2197 pps_unlock(intel_dp);
e39b999a
VS
2198
2199 intel_dp->last_backlight_off = jiffies;
f7d2323c 2200 edp_wait_backlight_off(intel_dp);
1250d107 2201}
f7d2323c 2202
1250d107
JN
2203/* Disable backlight PP control and backlight PWM. */
2204void intel_edp_backlight_off(struct intel_dp *intel_dp)
2205{
2206 if (!is_edp(intel_dp))
2207 return;
2208
2209 DRM_DEBUG_KMS("\n");
f7d2323c 2210
1250d107 2211 _intel_edp_backlight_off(intel_dp);
f7d2323c 2212 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2213}
a4fc5ed6 2214
73580fb7
JN
2215/*
2216 * Hook for controlling the panel power control backlight through the bl_power
2217 * sysfs attribute. Take care to handle multiple calls.
2218 */
2219static void intel_edp_backlight_power(struct intel_connector *connector,
2220 bool enable)
2221{
2222 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2223 bool is_enabled;
2224
773538e8 2225 pps_lock(intel_dp);
e39b999a 2226 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2227 pps_unlock(intel_dp);
73580fb7
JN
2228
2229 if (is_enabled == enable)
2230 return;
2231
23ba9373
JN
2232 DRM_DEBUG_KMS("panel power control backlight %s\n",
2233 enable ? "enable" : "disable");
73580fb7
JN
2234
2235 if (enable)
2236 _intel_edp_backlight_on(intel_dp);
2237 else
2238 _intel_edp_backlight_off(intel_dp);
2239}
2240
64e1077a
VS
2241static const char *state_string(bool enabled)
2242{
2243 return enabled ? "on" : "off";
2244}
2245
2246static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2247{
2248 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2249 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2250 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2251
2252 I915_STATE_WARN(cur_state != state,
2253 "DP port %c state assertion failure (expected %s, current %s)\n",
2254 port_name(dig_port->port),
2255 state_string(state), state_string(cur_state));
2256}
2257#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2258
2259static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2260{
2261 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2262
2263 I915_STATE_WARN(cur_state != state,
2264 "eDP PLL state assertion failure (expected %s, current %s)\n",
2265 state_string(state), state_string(cur_state));
2266}
2267#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2268#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2269
2bd2ad64 2270static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2271{
da63a9f2 2272 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
64e1077a
VS
2273 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2274 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2275
64e1077a
VS
2276 assert_pipe_disabled(dev_priv, crtc->pipe);
2277 assert_dp_port_disabled(intel_dp);
2278 assert_edp_pll_disabled(dev_priv);
2bd2ad64 2279
abfce949
VS
2280 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2281 crtc->config->port_clock);
2282
2283 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2284
2285 if (crtc->config->port_clock == 162000)
2286 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2287 else
2288 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2289
2290 I915_WRITE(DP_A, intel_dp->DP);
2291 POSTING_READ(DP_A);
2292 udelay(500);
2293
0767935e 2294 intel_dp->DP |= DP_PLL_ENABLE;
6fec7662 2295
0767935e 2296 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2297 POSTING_READ(DP_A);
2298 udelay(200);
d240f20f
JB
2299}
2300
2bd2ad64 2301static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2302{
da63a9f2 2303 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
64e1077a
VS
2304 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2305 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2306
64e1077a
VS
2307 assert_pipe_disabled(dev_priv, crtc->pipe);
2308 assert_dp_port_disabled(intel_dp);
2309 assert_edp_pll_enabled(dev_priv);
2bd2ad64 2310
abfce949
VS
2311 DRM_DEBUG_KMS("disabling eDP PLL\n");
2312
6fec7662 2313 intel_dp->DP &= ~DP_PLL_ENABLE;
0767935e 2314
6fec7662 2315 I915_WRITE(DP_A, intel_dp->DP);
1af5fa1b 2316 POSTING_READ(DP_A);
d240f20f
JB
2317 udelay(200);
2318}
2319
c7ad3810 2320/* If the sink supports it, try to set the power state appropriately */
c19b0669 2321void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2322{
2323 int ret, i;
2324
2325 /* Should have a valid DPCD by this point */
2326 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2327 return;
2328
2329 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2330 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2331 DP_SET_POWER_D3);
c7ad3810
JB
2332 } else {
2333 /*
2334 * When turning on, we need to retry for 1ms to give the sink
2335 * time to wake up.
2336 */
2337 for (i = 0; i < 3; i++) {
9d1a1031
JN
2338 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2339 DP_SET_POWER_D0);
c7ad3810
JB
2340 if (ret == 1)
2341 break;
2342 msleep(1);
2343 }
2344 }
f9cac721
JN
2345
2346 if (ret != 1)
2347 DRM_DEBUG_KMS("failed to %s sink power state\n",
2348 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2349}
2350
19d8fe15
DV
2351static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2352 enum pipe *pipe)
d240f20f 2353{
19d8fe15 2354 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2355 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2356 struct drm_device *dev = encoder->base.dev;
2357 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2358 enum intel_display_power_domain power_domain;
2359 u32 tmp;
2360
2361 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2362 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2363 return false;
2364
2365 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2366
2367 if (!(tmp & DP_PORT_EN))
2368 return false;
2369
39e5fa88 2370 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2371 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2372 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2373 enum pipe p;
19d8fe15 2374
adc289d7
VS
2375 for_each_pipe(dev_priv, p) {
2376 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2377 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2378 *pipe = p;
19d8fe15
DV
2379 return true;
2380 }
2381 }
19d8fe15 2382
4a0833ec 2383 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
f0f59a00 2384 i915_mmio_reg_offset(intel_dp->output_reg));
39e5fa88
VS
2385 } else if (IS_CHERRYVIEW(dev)) {
2386 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2387 } else {
2388 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2389 }
d240f20f 2390
19d8fe15
DV
2391 return true;
2392}
d240f20f 2393
045ac3b5 2394static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2395 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2396{
2397 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2398 u32 tmp, flags = 0;
63000ef6
XZ
2399 struct drm_device *dev = encoder->base.dev;
2400 struct drm_i915_private *dev_priv = dev->dev_private;
2401 enum port port = dp_to_dig_port(intel_dp)->port;
2402 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2403 int dotclock;
045ac3b5 2404
9ed109a7 2405 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2406
2407 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2408
39e5fa88 2409 if (HAS_PCH_CPT(dev) && port != PORT_A) {
b81e34c2
VS
2410 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2411
2412 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2413 flags |= DRM_MODE_FLAG_PHSYNC;
2414 else
2415 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2416
b81e34c2 2417 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2418 flags |= DRM_MODE_FLAG_PVSYNC;
2419 else
2420 flags |= DRM_MODE_FLAG_NVSYNC;
2421 } else {
39e5fa88 2422 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2423 flags |= DRM_MODE_FLAG_PHSYNC;
2424 else
2425 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2426
39e5fa88 2427 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2428 flags |= DRM_MODE_FLAG_PVSYNC;
2429 else
2430 flags |= DRM_MODE_FLAG_NVSYNC;
2431 }
045ac3b5 2432
2d112de7 2433 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2434
8c875fca 2435 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
666a4537 2436 !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
8c875fca
VS
2437 pipe_config->limited_color_range = true;
2438
eb14cb74
VS
2439 pipe_config->has_dp_encoder = true;
2440
90a6b7b0
VS
2441 pipe_config->lane_count =
2442 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2443
eb14cb74
VS
2444 intel_dp_get_m_n(crtc, pipe_config);
2445
18442d08 2446 if (port == PORT_A) {
b377e0df 2447 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
f1f644dc
JB
2448 pipe_config->port_clock = 162000;
2449 else
2450 pipe_config->port_clock = 270000;
2451 }
18442d08
VS
2452
2453 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2454 &pipe_config->dp_m_n);
2455
2456 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2457 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2458
2d112de7 2459 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2460
c6cd2ee2
JN
2461 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2462 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2463 /*
2464 * This is a big fat ugly hack.
2465 *
2466 * Some machines in UEFI boot mode provide us a VBT that has 18
2467 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2468 * unknown we fail to light up. Yet the same BIOS boots up with
2469 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2470 * max, not what it tells us to use.
2471 *
2472 * Note: This will still be broken if the eDP panel is not lit
2473 * up by the BIOS, and thus we can't get the mode at module
2474 * load.
2475 */
2476 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2477 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2478 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2479 }
045ac3b5
JB
2480}
2481
e8cb4558 2482static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2483{
e8cb4558 2484 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2485 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2486 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2487
6e3c9717 2488 if (crtc->config->has_audio)
495a5bb8 2489 intel_audio_codec_disable(encoder);
6cb49835 2490
b32c6f48
RV
2491 if (HAS_PSR(dev) && !HAS_DDI(dev))
2492 intel_psr_disable(intel_dp);
2493
6cb49835
DV
2494 /* Make sure the panel is off before trying to change the mode. But also
2495 * ensure that we have vdd while we switch off the panel. */
24f3e092 2496 intel_edp_panel_vdd_on(intel_dp);
4be73780 2497 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2498 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2499 intel_edp_panel_off(intel_dp);
3739850b 2500
08aff3fe
VS
2501 /* disable the port before the pipe on g4x */
2502 if (INTEL_INFO(dev)->gen < 5)
3739850b 2503 intel_dp_link_down(intel_dp);
d240f20f
JB
2504}
2505
08aff3fe 2506static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2507{
2bd2ad64 2508 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2509 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2510
49277c31 2511 intel_dp_link_down(intel_dp);
abfce949
VS
2512
2513 /* Only ilk+ has port A */
08aff3fe
VS
2514 if (port == PORT_A)
2515 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2516}
2517
2518static void vlv_post_disable_dp(struct intel_encoder *encoder)
2519{
2520 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2521
2522 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2523}
2524
a8f327fb
VS
2525static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2526 bool reset)
580d3811 2527{
a8f327fb
VS
2528 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2529 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2530 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2531 enum pipe pipe = crtc->pipe;
2532 uint32_t val;
580d3811 2533
a8f327fb
VS
2534 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2535 if (reset)
2536 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2537 else
2538 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2539 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
580d3811 2540
a8f327fb
VS
2541 if (crtc->config->lane_count > 2) {
2542 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2543 if (reset)
2544 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2545 else
2546 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2547 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2548 }
580d3811 2549
97fd4d5c 2550 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2551 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2552 if (reset)
2553 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2554 else
2555 val |= DPIO_PCS_CLK_SOFT_RESET;
97fd4d5c 2556 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2557
a8f327fb 2558 if (crtc->config->lane_count > 2) {
e0fce78f
VS
2559 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2560 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2561 if (reset)
2562 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2563 else
2564 val |= DPIO_PCS_CLK_SOFT_RESET;
e0fce78f
VS
2565 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2566 }
a8f327fb 2567}
97fd4d5c 2568
a8f327fb
VS
2569static void chv_post_disable_dp(struct intel_encoder *encoder)
2570{
2571 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2572 struct drm_device *dev = encoder->base.dev;
2573 struct drm_i915_private *dev_priv = dev->dev_private;
97fd4d5c 2574
a8f327fb
VS
2575 intel_dp_link_down(intel_dp);
2576
2577 mutex_lock(&dev_priv->sb_lock);
2578
2579 /* Assert data lane reset */
2580 chv_data_lane_soft_reset(encoder, true);
580d3811 2581
a580516d 2582 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
2583}
2584
7b13b58a
VS
2585static void
2586_intel_dp_set_link_train(struct intel_dp *intel_dp,
2587 uint32_t *DP,
2588 uint8_t dp_train_pat)
2589{
2590 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2591 struct drm_device *dev = intel_dig_port->base.base.dev;
2592 struct drm_i915_private *dev_priv = dev->dev_private;
2593 enum port port = intel_dig_port->port;
2594
2595 if (HAS_DDI(dev)) {
2596 uint32_t temp = I915_READ(DP_TP_CTL(port));
2597
2598 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2599 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2600 else
2601 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2602
2603 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2604 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2605 case DP_TRAINING_PATTERN_DISABLE:
2606 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2607
2608 break;
2609 case DP_TRAINING_PATTERN_1:
2610 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2611 break;
2612 case DP_TRAINING_PATTERN_2:
2613 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2614 break;
2615 case DP_TRAINING_PATTERN_3:
2616 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2617 break;
2618 }
2619 I915_WRITE(DP_TP_CTL(port), temp);
2620
39e5fa88
VS
2621 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2622 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2623 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2624
2625 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2626 case DP_TRAINING_PATTERN_DISABLE:
2627 *DP |= DP_LINK_TRAIN_OFF_CPT;
2628 break;
2629 case DP_TRAINING_PATTERN_1:
2630 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2631 break;
2632 case DP_TRAINING_PATTERN_2:
2633 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2634 break;
2635 case DP_TRAINING_PATTERN_3:
2636 DRM_ERROR("DP training pattern 3 not supported\n");
2637 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2638 break;
2639 }
2640
2641 } else {
2642 if (IS_CHERRYVIEW(dev))
2643 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2644 else
2645 *DP &= ~DP_LINK_TRAIN_MASK;
2646
2647 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2648 case DP_TRAINING_PATTERN_DISABLE:
2649 *DP |= DP_LINK_TRAIN_OFF;
2650 break;
2651 case DP_TRAINING_PATTERN_1:
2652 *DP |= DP_LINK_TRAIN_PAT_1;
2653 break;
2654 case DP_TRAINING_PATTERN_2:
2655 *DP |= DP_LINK_TRAIN_PAT_2;
2656 break;
2657 case DP_TRAINING_PATTERN_3:
2658 if (IS_CHERRYVIEW(dev)) {
2659 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2660 } else {
2661 DRM_ERROR("DP training pattern 3 not supported\n");
2662 *DP |= DP_LINK_TRAIN_PAT_2;
2663 }
2664 break;
2665 }
2666 }
2667}
2668
2669static void intel_dp_enable_port(struct intel_dp *intel_dp)
2670{
2671 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2672 struct drm_i915_private *dev_priv = dev->dev_private;
6fec7662
VS
2673 struct intel_crtc *crtc =
2674 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
7b13b58a 2675
7b13b58a
VS
2676 /* enable with pattern 1 (as per spec) */
2677 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2678 DP_TRAINING_PATTERN_1);
2679
2680 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2681 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2682
2683 /*
2684 * Magic for VLV/CHV. We _must_ first set up the register
2685 * without actually enabling the port, and then do another
2686 * write to enable the port. Otherwise link training will
2687 * fail when the power sequencer is freshly used for this port.
2688 */
2689 intel_dp->DP |= DP_PORT_EN;
6fec7662
VS
2690 if (crtc->config->has_audio)
2691 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
7b713f50
VS
2692
2693 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2694 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2695}
2696
e8cb4558 2697static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2698{
e8cb4558
DV
2699 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2700 struct drm_device *dev = encoder->base.dev;
2701 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2702 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2703 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
d6fbdd15
VS
2704 enum port port = dp_to_dig_port(intel_dp)->port;
2705 enum pipe pipe = crtc->pipe;
5d613501 2706
0c33d8d7
DV
2707 if (WARN_ON(dp_reg & DP_PORT_EN))
2708 return;
5d613501 2709
093e3f13
VS
2710 pps_lock(intel_dp);
2711
666a4537 2712 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
093e3f13
VS
2713 vlv_init_panel_power_sequencer(intel_dp);
2714
7864578a
VS
2715 /*
2716 * We get an occasional spurious underrun between the port
2717 * enable and vdd enable, when enabling port A eDP.
2718 *
2719 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2720 */
2721 if (port == PORT_A)
2722 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2723
7b13b58a 2724 intel_dp_enable_port(intel_dp);
093e3f13 2725
d6fbdd15
VS
2726 if (port == PORT_A && IS_GEN5(dev_priv)) {
2727 /*
2728 * Underrun reporting for the other pipe was disabled in
2729 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2730 * enabled, so it's now safe to re-enable underrun reporting.
2731 */
2732 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2733 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2734 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2735 }
2736
093e3f13
VS
2737 edp_panel_vdd_on(intel_dp);
2738 edp_panel_on(intel_dp);
2739 edp_panel_vdd_off(intel_dp, true);
2740
7864578a
VS
2741 if (port == PORT_A)
2742 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2743
093e3f13
VS
2744 pps_unlock(intel_dp);
2745
666a4537 2746 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e0fce78f
VS
2747 unsigned int lane_mask = 0x0;
2748
2749 if (IS_CHERRYVIEW(dev))
2750 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2751
9b6de0a1
VS
2752 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2753 lane_mask);
e0fce78f 2754 }
61234fa5 2755
f01eca2e 2756 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2757 intel_dp_start_link_train(intel_dp);
3ab9c637 2758 intel_dp_stop_link_train(intel_dp);
c1dec79a 2759
6e3c9717 2760 if (crtc->config->has_audio) {
c1dec79a 2761 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
d6fbdd15 2762 pipe_name(pipe));
c1dec79a
JN
2763 intel_audio_codec_enable(encoder);
2764 }
ab1f90f9 2765}
89b667f8 2766
ecff4f3b
JN
2767static void g4x_enable_dp(struct intel_encoder *encoder)
2768{
828f5c6e
JN
2769 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2770
ecff4f3b 2771 intel_enable_dp(encoder);
4be73780 2772 intel_edp_backlight_on(intel_dp);
ab1f90f9 2773}
89b667f8 2774
ab1f90f9
JN
2775static void vlv_enable_dp(struct intel_encoder *encoder)
2776{
828f5c6e
JN
2777 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2778
4be73780 2779 intel_edp_backlight_on(intel_dp);
b32c6f48 2780 intel_psr_enable(intel_dp);
d240f20f
JB
2781}
2782
ecff4f3b 2783static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9 2784{
d6fbdd15 2785 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
ab1f90f9 2786 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
d6fbdd15
VS
2787 enum port port = dp_to_dig_port(intel_dp)->port;
2788 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
ab1f90f9 2789
8ac33ed3
DV
2790 intel_dp_prepare(encoder);
2791
d6fbdd15
VS
2792 if (port == PORT_A && IS_GEN5(dev_priv)) {
2793 /*
2794 * We get FIFO underruns on the other pipe when
2795 * enabling the CPU eDP PLL, and when enabling CPU
2796 * eDP port. We could potentially avoid the PLL
2797 * underrun with a vblank wait just prior to enabling
2798 * the PLL, but that doesn't appear to help the port
2799 * enable case. Just sweep it all under the rug.
2800 */
2801 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2802 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2803 }
2804
d41f1efb 2805 /* Only ilk+ has port A */
abfce949 2806 if (port == PORT_A)
ab1f90f9
JN
2807 ironlake_edp_pll_on(intel_dp);
2808}
2809
83b84597
VS
2810static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2811{
2812 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2813 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2814 enum pipe pipe = intel_dp->pps_pipe;
f0f59a00 2815 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
83b84597
VS
2816
2817 edp_panel_vdd_off_sync(intel_dp);
2818
2819 /*
2820 * VLV seems to get confused when multiple power seqeuencers
2821 * have the same port selected (even if only one has power/vdd
2822 * enabled). The failure manifests as vlv_wait_port_ready() failing
2823 * CHV on the other hand doesn't seem to mind having the same port
2824 * selected in multiple power seqeuencers, but let's clear the
2825 * port select always when logically disconnecting a power sequencer
2826 * from a port.
2827 */
2828 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2829 pipe_name(pipe), port_name(intel_dig_port->port));
2830 I915_WRITE(pp_on_reg, 0);
2831 POSTING_READ(pp_on_reg);
2832
2833 intel_dp->pps_pipe = INVALID_PIPE;
2834}
2835
a4a5d2f8
VS
2836static void vlv_steal_power_sequencer(struct drm_device *dev,
2837 enum pipe pipe)
2838{
2839 struct drm_i915_private *dev_priv = dev->dev_private;
2840 struct intel_encoder *encoder;
2841
2842 lockdep_assert_held(&dev_priv->pps_mutex);
2843
ac3c12e4
VS
2844 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2845 return;
2846
19c8054c 2847 for_each_intel_encoder(dev, encoder) {
a4a5d2f8 2848 struct intel_dp *intel_dp;
773538e8 2849 enum port port;
a4a5d2f8
VS
2850
2851 if (encoder->type != INTEL_OUTPUT_EDP)
2852 continue;
2853
2854 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2855 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2856
2857 if (intel_dp->pps_pipe != pipe)
2858 continue;
2859
2860 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2861 pipe_name(pipe), port_name(port));
a4a5d2f8 2862
e02f9a06 2863 WARN(encoder->base.crtc,
034e43c6
VS
2864 "stealing pipe %c power sequencer from active eDP port %c\n",
2865 pipe_name(pipe), port_name(port));
a4a5d2f8 2866
a4a5d2f8 2867 /* make sure vdd is off before we steal it */
83b84597 2868 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2869 }
2870}
2871
2872static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2873{
2874 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2875 struct intel_encoder *encoder = &intel_dig_port->base;
2876 struct drm_device *dev = encoder->base.dev;
2877 struct drm_i915_private *dev_priv = dev->dev_private;
2878 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2879
2880 lockdep_assert_held(&dev_priv->pps_mutex);
2881
093e3f13
VS
2882 if (!is_edp(intel_dp))
2883 return;
2884
a4a5d2f8
VS
2885 if (intel_dp->pps_pipe == crtc->pipe)
2886 return;
2887
2888 /*
2889 * If another power sequencer was being used on this
2890 * port previously make sure to turn off vdd there while
2891 * we still have control of it.
2892 */
2893 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2894 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2895
2896 /*
2897 * We may be stealing the power
2898 * sequencer from another port.
2899 */
2900 vlv_steal_power_sequencer(dev, crtc->pipe);
2901
2902 /* now it's all ours */
2903 intel_dp->pps_pipe = crtc->pipe;
2904
2905 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2906 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2907
2908 /* init power sequencer on this pipe and port */
36b5f425
VS
2909 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2910 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2911}
2912
ab1f90f9 2913static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2914{
2bd2ad64 2915 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2916 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2917 struct drm_device *dev = encoder->base.dev;
89b667f8 2918 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2919 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2920 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2921 int pipe = intel_crtc->pipe;
2922 u32 val;
a4fc5ed6 2923
a580516d 2924 mutex_lock(&dev_priv->sb_lock);
89b667f8 2925
ab3c759a 2926 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2927 val = 0;
2928 if (pipe)
2929 val |= (1<<21);
2930 else
2931 val &= ~(1<<21);
2932 val |= 0x001000c4;
ab3c759a
CML
2933 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2934 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2935 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2936
a580516d 2937 mutex_unlock(&dev_priv->sb_lock);
ab1f90f9
JN
2938
2939 intel_enable_dp(encoder);
89b667f8
JB
2940}
2941
ecff4f3b 2942static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2943{
2944 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2945 struct drm_device *dev = encoder->base.dev;
2946 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2947 struct intel_crtc *intel_crtc =
2948 to_intel_crtc(encoder->base.crtc);
e4607fcf 2949 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2950 int pipe = intel_crtc->pipe;
89b667f8 2951
8ac33ed3
DV
2952 intel_dp_prepare(encoder);
2953
89b667f8 2954 /* Program Tx lane resets to default */
a580516d 2955 mutex_lock(&dev_priv->sb_lock);
ab3c759a 2956 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2957 DPIO_PCS_TX_LANE2_RESET |
2958 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2959 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2960 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2961 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2962 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2963 DPIO_PCS_CLK_SOFT_RESET);
2964
2965 /* Fix up inter-pair skew failure */
ab3c759a
CML
2966 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2967 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2968 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
a580516d 2969 mutex_unlock(&dev_priv->sb_lock);
a4fc5ed6
KP
2970}
2971
e4a1d846
CML
2972static void chv_pre_enable_dp(struct intel_encoder *encoder)
2973{
2974 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2975 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2976 struct drm_device *dev = encoder->base.dev;
2977 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2978 struct intel_crtc *intel_crtc =
2979 to_intel_crtc(encoder->base.crtc);
2980 enum dpio_channel ch = vlv_dport_to_channel(dport);
2981 int pipe = intel_crtc->pipe;
2e523e98 2982 int data, i, stagger;
949c1d43 2983 u32 val;
e4a1d846 2984
a580516d 2985 mutex_lock(&dev_priv->sb_lock);
949c1d43 2986
570e2a74
VS
2987 /* allow hardware to manage TX FIFO reset source */
2988 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2989 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2990 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2991
e0fce78f
VS
2992 if (intel_crtc->config->lane_count > 2) {
2993 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2994 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2995 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2996 }
570e2a74 2997
949c1d43 2998 /* Program Tx lane latency optimal setting*/
e0fce78f 2999 for (i = 0; i < intel_crtc->config->lane_count; i++) {
e4a1d846 3000 /* Set the upar bit */
e0fce78f
VS
3001 if (intel_crtc->config->lane_count == 1)
3002 data = 0x0;
3003 else
3004 data = (i == 1) ? 0x0 : 0x1;
e4a1d846
CML
3005 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
3006 data << DPIO_UPAR_SHIFT);
3007 }
3008
3009 /* Data lane stagger programming */
2e523e98
VS
3010 if (intel_crtc->config->port_clock > 270000)
3011 stagger = 0x18;
3012 else if (intel_crtc->config->port_clock > 135000)
3013 stagger = 0xd;
3014 else if (intel_crtc->config->port_clock > 67500)
3015 stagger = 0x7;
3016 else if (intel_crtc->config->port_clock > 33750)
3017 stagger = 0x4;
3018 else
3019 stagger = 0x2;
3020
3021 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3022 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3023 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3024
e0fce78f
VS
3025 if (intel_crtc->config->lane_count > 2) {
3026 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3027 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3028 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3029 }
2e523e98
VS
3030
3031 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3032 DPIO_LANESTAGGER_STRAP(stagger) |
3033 DPIO_LANESTAGGER_STRAP_OVRD |
3034 DPIO_TX1_STAGGER_MASK(0x1f) |
3035 DPIO_TX1_STAGGER_MULT(6) |
3036 DPIO_TX2_STAGGER_MULT(0));
3037
e0fce78f
VS
3038 if (intel_crtc->config->lane_count > 2) {
3039 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3040 DPIO_LANESTAGGER_STRAP(stagger) |
3041 DPIO_LANESTAGGER_STRAP_OVRD |
3042 DPIO_TX1_STAGGER_MASK(0x1f) |
3043 DPIO_TX1_STAGGER_MULT(7) |
3044 DPIO_TX2_STAGGER_MULT(5));
3045 }
e4a1d846 3046
a8f327fb
VS
3047 /* Deassert data lane reset */
3048 chv_data_lane_soft_reset(encoder, false);
3049
a580516d 3050 mutex_unlock(&dev_priv->sb_lock);
e4a1d846 3051
e4a1d846 3052 intel_enable_dp(encoder);
b0b33846
VS
3053
3054 /* Second common lane will stay alive on its own now */
3055 if (dport->release_cl2_override) {
3056 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3057 dport->release_cl2_override = false;
3058 }
e4a1d846
CML
3059}
3060
9197c88b
VS
3061static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3062{
3063 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3064 struct drm_device *dev = encoder->base.dev;
3065 struct drm_i915_private *dev_priv = dev->dev_private;
3066 struct intel_crtc *intel_crtc =
3067 to_intel_crtc(encoder->base.crtc);
3068 enum dpio_channel ch = vlv_dport_to_channel(dport);
3069 enum pipe pipe = intel_crtc->pipe;
e0fce78f
VS
3070 unsigned int lane_mask =
3071 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
9197c88b
VS
3072 u32 val;
3073
625695f8
VS
3074 intel_dp_prepare(encoder);
3075
b0b33846
VS
3076 /*
3077 * Must trick the second common lane into life.
3078 * Otherwise we can't even access the PLL.
3079 */
3080 if (ch == DPIO_CH0 && pipe == PIPE_B)
3081 dport->release_cl2_override =
3082 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3083
e0fce78f
VS
3084 chv_phy_powergate_lanes(encoder, true, lane_mask);
3085
a580516d 3086 mutex_lock(&dev_priv->sb_lock);
9197c88b 3087
a8f327fb
VS
3088 /* Assert data lane reset */
3089 chv_data_lane_soft_reset(encoder, true);
3090
b9e5ac3c
VS
3091 /* program left/right clock distribution */
3092 if (pipe != PIPE_B) {
3093 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3094 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3095 if (ch == DPIO_CH0)
3096 val |= CHV_BUFLEFTENA1_FORCE;
3097 if (ch == DPIO_CH1)
3098 val |= CHV_BUFRIGHTENA1_FORCE;
3099 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3100 } else {
3101 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3102 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3103 if (ch == DPIO_CH0)
3104 val |= CHV_BUFLEFTENA2_FORCE;
3105 if (ch == DPIO_CH1)
3106 val |= CHV_BUFRIGHTENA2_FORCE;
3107 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3108 }
3109
9197c88b
VS
3110 /* program clock channel usage */
3111 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3112 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3113 if (pipe != PIPE_B)
3114 val &= ~CHV_PCS_USEDCLKCHANNEL;
3115 else
3116 val |= CHV_PCS_USEDCLKCHANNEL;
3117 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3118
e0fce78f
VS
3119 if (intel_crtc->config->lane_count > 2) {
3120 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3121 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3122 if (pipe != PIPE_B)
3123 val &= ~CHV_PCS_USEDCLKCHANNEL;
3124 else
3125 val |= CHV_PCS_USEDCLKCHANNEL;
3126 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3127 }
9197c88b
VS
3128
3129 /*
3130 * This a a bit weird since generally CL
3131 * matches the pipe, but here we need to
3132 * pick the CL based on the port.
3133 */
3134 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3135 if (pipe != PIPE_B)
3136 val &= ~CHV_CMN_USEDCLKCHANNEL;
3137 else
3138 val |= CHV_CMN_USEDCLKCHANNEL;
3139 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3140
a580516d 3141 mutex_unlock(&dev_priv->sb_lock);
9197c88b
VS
3142}
3143
d6db995f
VS
3144static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3145{
3146 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3147 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3148 u32 val;
3149
3150 mutex_lock(&dev_priv->sb_lock);
3151
3152 /* disable left/right clock distribution */
3153 if (pipe != PIPE_B) {
3154 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3155 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3156 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3157 } else {
3158 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3159 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3160 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3161 }
3162
3163 mutex_unlock(&dev_priv->sb_lock);
e0fce78f 3164
b0b33846
VS
3165 /*
3166 * Leave the power down bit cleared for at least one
3167 * lane so that chv_powergate_phy_ch() will power
3168 * on something when the channel is otherwise unused.
3169 * When the port is off and the override is removed
3170 * the lanes power down anyway, so otherwise it doesn't
3171 * really matter what the state of power down bits is
3172 * after this.
3173 */
e0fce78f 3174 chv_phy_powergate_lanes(encoder, false, 0x0);
d6db995f
VS
3175}
3176
a4fc5ed6 3177/*
df0c237d
JB
3178 * Native read with retry for link status and receiver capability reads for
3179 * cases where the sink may still be asleep.
9d1a1031
JN
3180 *
3181 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3182 * supposed to retry 3 times per the spec.
a4fc5ed6 3183 */
9d1a1031
JN
3184static ssize_t
3185intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3186 void *buffer, size_t size)
a4fc5ed6 3187{
9d1a1031
JN
3188 ssize_t ret;
3189 int i;
61da5fab 3190
f6a19066
VS
3191 /*
3192 * Sometime we just get the same incorrect byte repeated
3193 * over the entire buffer. Doing just one throw away read
3194 * initially seems to "solve" it.
3195 */
3196 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3197
61da5fab 3198 for (i = 0; i < 3; i++) {
9d1a1031
JN
3199 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3200 if (ret == size)
3201 return ret;
61da5fab
JB
3202 msleep(1);
3203 }
a4fc5ed6 3204
9d1a1031 3205 return ret;
a4fc5ed6
KP
3206}
3207
3208/*
3209 * Fetch AUX CH registers 0x202 - 0x207 which contain
3210 * link status information
3211 */
94223d04 3212bool
93f62dad 3213intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 3214{
9d1a1031
JN
3215 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3216 DP_LANE0_1_STATUS,
3217 link_status,
3218 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
3219}
3220
1100244e 3221/* These are source-specific values. */
94223d04 3222uint8_t
1a2eb460 3223intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 3224{
30add22d 3225 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 3226 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 3227 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3228
9314726b
VK
3229 if (IS_BROXTON(dev))
3230 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3231 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 3232 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 3233 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 3234 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
666a4537 3235 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
bd60018a 3236 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 3237 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 3238 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 3239 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 3240 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 3241 else
bd60018a 3242 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
3243}
3244
94223d04 3245uint8_t
1a2eb460
KP
3246intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3247{
30add22d 3248 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 3249 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3250
5a9d1f1a
DL
3251 if (INTEL_INFO(dev)->gen >= 9) {
3252 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3253 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3254 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3255 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3256 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3257 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3258 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
3259 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3260 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
3261 default:
3262 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3263 }
3264 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 3265 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3266 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3267 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3268 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3269 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3270 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3271 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3272 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 3273 default:
bd60018a 3274 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 3275 }
666a4537 3276 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e2fa6fba 3277 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3279 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3281 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3282 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3283 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3284 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 3285 default:
bd60018a 3286 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 3287 }
bc7d38a4 3288 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 3289 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3290 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3291 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3292 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3293 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3294 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3295 default:
bd60018a 3296 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3297 }
3298 } else {
3299 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3300 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3301 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3303 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3304 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3305 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3306 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3307 default:
bd60018a 3308 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3309 }
a4fc5ed6
KP
3310 }
3311}
3312
5829975c 3313static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3314{
3315 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3316 struct drm_i915_private *dev_priv = dev->dev_private;
3317 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3318 struct intel_crtc *intel_crtc =
3319 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3320 unsigned long demph_reg_value, preemph_reg_value,
3321 uniqtranscale_reg_value;
3322 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3323 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3324 int pipe = intel_crtc->pipe;
e2fa6fba
P
3325
3326 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3327 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3328 preemph_reg_value = 0x0004000;
3329 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3330 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3331 demph_reg_value = 0x2B405555;
3332 uniqtranscale_reg_value = 0x552AB83A;
3333 break;
bd60018a 3334 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3335 demph_reg_value = 0x2B404040;
3336 uniqtranscale_reg_value = 0x5548B83A;
3337 break;
bd60018a 3338 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3339 demph_reg_value = 0x2B245555;
3340 uniqtranscale_reg_value = 0x5560B83A;
3341 break;
bd60018a 3342 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3343 demph_reg_value = 0x2B405555;
3344 uniqtranscale_reg_value = 0x5598DA3A;
3345 break;
3346 default:
3347 return 0;
3348 }
3349 break;
bd60018a 3350 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3351 preemph_reg_value = 0x0002000;
3352 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3353 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3354 demph_reg_value = 0x2B404040;
3355 uniqtranscale_reg_value = 0x5552B83A;
3356 break;
bd60018a 3357 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3358 demph_reg_value = 0x2B404848;
3359 uniqtranscale_reg_value = 0x5580B83A;
3360 break;
bd60018a 3361 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3362 demph_reg_value = 0x2B404040;
3363 uniqtranscale_reg_value = 0x55ADDA3A;
3364 break;
3365 default:
3366 return 0;
3367 }
3368 break;
bd60018a 3369 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3370 preemph_reg_value = 0x0000000;
3371 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3372 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3373 demph_reg_value = 0x2B305555;
3374 uniqtranscale_reg_value = 0x5570B83A;
3375 break;
bd60018a 3376 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3377 demph_reg_value = 0x2B2B4040;
3378 uniqtranscale_reg_value = 0x55ADDA3A;
3379 break;
3380 default:
3381 return 0;
3382 }
3383 break;
bd60018a 3384 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3385 preemph_reg_value = 0x0006000;
3386 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3387 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3388 demph_reg_value = 0x1B405555;
3389 uniqtranscale_reg_value = 0x55ADDA3A;
3390 break;
3391 default:
3392 return 0;
3393 }
3394 break;
3395 default:
3396 return 0;
3397 }
3398
a580516d 3399 mutex_lock(&dev_priv->sb_lock);
ab3c759a
CML
3400 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3401 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3402 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3403 uniqtranscale_reg_value);
ab3c759a
CML
3404 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3405 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3406 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3407 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
a580516d 3408 mutex_unlock(&dev_priv->sb_lock);
e2fa6fba
P
3409
3410 return 0;
3411}
3412
67fa24b4
VS
3413static bool chv_need_uniq_trans_scale(uint8_t train_set)
3414{
3415 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3416 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3417}
3418
5829975c 3419static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3420{
3421 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3422 struct drm_i915_private *dev_priv = dev->dev_private;
3423 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3424 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3425 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3426 uint8_t train_set = intel_dp->train_set[0];
3427 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3428 enum pipe pipe = intel_crtc->pipe;
3429 int i;
e4a1d846
CML
3430
3431 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3432 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3433 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3434 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3435 deemph_reg_value = 128;
3436 margin_reg_value = 52;
3437 break;
bd60018a 3438 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3439 deemph_reg_value = 128;
3440 margin_reg_value = 77;
3441 break;
bd60018a 3442 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3443 deemph_reg_value = 128;
3444 margin_reg_value = 102;
3445 break;
bd60018a 3446 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3447 deemph_reg_value = 128;
3448 margin_reg_value = 154;
3449 /* FIXME extra to set for 1200 */
3450 break;
3451 default:
3452 return 0;
3453 }
3454 break;
bd60018a 3455 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3456 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3457 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3458 deemph_reg_value = 85;
3459 margin_reg_value = 78;
3460 break;
bd60018a 3461 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3462 deemph_reg_value = 85;
3463 margin_reg_value = 116;
3464 break;
bd60018a 3465 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3466 deemph_reg_value = 85;
3467 margin_reg_value = 154;
3468 break;
3469 default:
3470 return 0;
3471 }
3472 break;
bd60018a 3473 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3474 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3475 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3476 deemph_reg_value = 64;
3477 margin_reg_value = 104;
3478 break;
bd60018a 3479 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3480 deemph_reg_value = 64;
3481 margin_reg_value = 154;
3482 break;
3483 default:
3484 return 0;
3485 }
3486 break;
bd60018a 3487 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3488 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3489 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3490 deemph_reg_value = 43;
3491 margin_reg_value = 154;
3492 break;
3493 default:
3494 return 0;
3495 }
3496 break;
3497 default:
3498 return 0;
3499 }
3500
a580516d 3501 mutex_lock(&dev_priv->sb_lock);
e4a1d846
CML
3502
3503 /* Clear calc init */
1966e59e
VS
3504 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3505 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3506 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3507 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3508 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3509
e0fce78f
VS
3510 if (intel_crtc->config->lane_count > 2) {
3511 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3512 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3513 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3514 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3515 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3516 }
e4a1d846 3517
a02ef3c7
VS
3518 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3519 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3520 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3521 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3522
e0fce78f
VS
3523 if (intel_crtc->config->lane_count > 2) {
3524 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3525 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3526 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3527 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3528 }
a02ef3c7 3529
e4a1d846 3530 /* Program swing deemph */
e0fce78f 3531 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db
VS
3532 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3533 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3534 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3535 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3536 }
e4a1d846
CML
3537
3538 /* Program swing margin */
e0fce78f 3539 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3540 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
67fa24b4 3541
1fb44505
VS
3542 val &= ~DPIO_SWING_MARGIN000_MASK;
3543 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
67fa24b4
VS
3544
3545 /*
3546 * Supposedly this value shouldn't matter when unique transition
3547 * scale is disabled, but in fact it does matter. Let's just
3548 * always program the same value and hope it's OK.
3549 */
3550 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3551 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3552
f72df8db
VS
3553 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3554 }
e4a1d846 3555
67fa24b4
VS
3556 /*
3557 * The document said it needs to set bit 27 for ch0 and bit 26
3558 * for ch1. Might be a typo in the doc.
3559 * For now, for this unique transition scale selection, set bit
3560 * 27 for ch0 and ch1.
3561 */
e0fce78f 3562 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3563 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
67fa24b4 3564 if (chv_need_uniq_trans_scale(train_set))
f72df8db 3565 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
67fa24b4
VS
3566 else
3567 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3568 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
e4a1d846
CML
3569 }
3570
3571 /* Start swing calculation */
1966e59e
VS
3572 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3573 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3574 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3575
e0fce78f
VS
3576 if (intel_crtc->config->lane_count > 2) {
3577 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3578 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3579 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3580 }
e4a1d846 3581
a580516d 3582 mutex_unlock(&dev_priv->sb_lock);
e4a1d846
CML
3583
3584 return 0;
3585}
3586
a4fc5ed6 3587static uint32_t
5829975c 3588gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3589{
3cf2efb1 3590 uint32_t signal_levels = 0;
a4fc5ed6 3591
3cf2efb1 3592 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3593 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3594 default:
3595 signal_levels |= DP_VOLTAGE_0_4;
3596 break;
bd60018a 3597 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3598 signal_levels |= DP_VOLTAGE_0_6;
3599 break;
bd60018a 3600 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3601 signal_levels |= DP_VOLTAGE_0_8;
3602 break;
bd60018a 3603 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3604 signal_levels |= DP_VOLTAGE_1_2;
3605 break;
3606 }
3cf2efb1 3607 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3608 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3609 default:
3610 signal_levels |= DP_PRE_EMPHASIS_0;
3611 break;
bd60018a 3612 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3613 signal_levels |= DP_PRE_EMPHASIS_3_5;
3614 break;
bd60018a 3615 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3616 signal_levels |= DP_PRE_EMPHASIS_6;
3617 break;
bd60018a 3618 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3619 signal_levels |= DP_PRE_EMPHASIS_9_5;
3620 break;
3621 }
3622 return signal_levels;
3623}
3624
e3421a18
ZW
3625/* Gen6's DP voltage swing and pre-emphasis control */
3626static uint32_t
5829975c 3627gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3628{
3c5a62b5
YL
3629 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3630 DP_TRAIN_PRE_EMPHASIS_MASK);
3631 switch (signal_levels) {
bd60018a
SJ
3632 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3633 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3634 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3635 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3636 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3637 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3638 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3639 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3640 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3641 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3642 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3643 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3644 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3645 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3646 default:
3c5a62b5
YL
3647 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3648 "0x%x\n", signal_levels);
3649 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3650 }
3651}
3652
1a2eb460
KP
3653/* Gen7's DP voltage swing and pre-emphasis control */
3654static uint32_t
5829975c 3655gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3656{
3657 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3658 DP_TRAIN_PRE_EMPHASIS_MASK);
3659 switch (signal_levels) {
bd60018a 3660 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3661 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3662 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3663 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3664 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3665 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3666
bd60018a 3667 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3668 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3669 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3670 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3671
bd60018a 3672 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3673 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3674 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3675 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3676
3677 default:
3678 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3679 "0x%x\n", signal_levels);
3680 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3681 }
3682}
3683
94223d04 3684void
f4eb692e 3685intel_dp_set_signal_levels(struct intel_dp *intel_dp)
f0a3424e
PZ
3686{
3687 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3688 enum port port = intel_dig_port->port;
f0a3424e 3689 struct drm_device *dev = intel_dig_port->base.base.dev;
b905a915 3690 struct drm_i915_private *dev_priv = to_i915(dev);
f8896f5d 3691 uint32_t signal_levels, mask = 0;
f0a3424e
PZ
3692 uint8_t train_set = intel_dp->train_set[0];
3693
f8896f5d
DW
3694 if (HAS_DDI(dev)) {
3695 signal_levels = ddi_signal_levels(intel_dp);
3696
3697 if (IS_BROXTON(dev))
3698 signal_levels = 0;
3699 else
3700 mask = DDI_BUF_EMP_MASK;
e4a1d846 3701 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3702 signal_levels = chv_signal_levels(intel_dp);
e2fa6fba 3703 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3704 signal_levels = vlv_signal_levels(intel_dp);
bc7d38a4 3705 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3706 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3707 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3708 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3709 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3710 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3711 } else {
5829975c 3712 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3713 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3714 }
3715
96fb9f9b
VK
3716 if (mask)
3717 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3718
3719 DRM_DEBUG_KMS("Using vswing level %d\n",
3720 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3721 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3722 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3723 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e 3724
f4eb692e 3725 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
b905a915
ACO
3726
3727 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3728 POSTING_READ(intel_dp->output_reg);
f0a3424e
PZ
3729}
3730
94223d04 3731void
e9c176d5
ACO
3732intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3733 uint8_t dp_train_pat)
a4fc5ed6 3734{
174edf1f 3735 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3736 struct drm_i915_private *dev_priv =
3737 to_i915(intel_dig_port->base.base.dev);
a4fc5ed6 3738
f4eb692e 3739 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
47ea7542 3740
f4eb692e 3741 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
ea5b213a 3742 POSTING_READ(intel_dp->output_reg);
e9c176d5
ACO
3743}
3744
94223d04 3745void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3ab9c637
ID
3746{
3747 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3748 struct drm_device *dev = intel_dig_port->base.base.dev;
3749 struct drm_i915_private *dev_priv = dev->dev_private;
3750 enum port port = intel_dig_port->port;
3751 uint32_t val;
3752
3753 if (!HAS_DDI(dev))
3754 return;
3755
3756 val = I915_READ(DP_TP_CTL(port));
3757 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3758 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3759 I915_WRITE(DP_TP_CTL(port), val);
3760
3761 /*
3762 * On PORT_A we can have only eDP in SST mode. There the only reason
3763 * we need to set idle transmission mode is to work around a HW issue
3764 * where we enable the pipe while not in idle link-training mode.
3765 * In this case there is requirement to wait for a minimum number of
3766 * idle patterns to be sent.
3767 */
3768 if (port == PORT_A)
3769 return;
3770
3771 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3772 1))
3773 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3774}
3775
a4fc5ed6 3776static void
ea5b213a 3777intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3778{
da63a9f2 3779 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1612c8bd 3780 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
bc7d38a4 3781 enum port port = intel_dig_port->port;
da63a9f2 3782 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3783 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3784 uint32_t DP = intel_dp->DP;
a4fc5ed6 3785
bc76e320 3786 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3787 return;
3788
0c33d8d7 3789 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3790 return;
3791
28c97730 3792 DRM_DEBUG_KMS("\n");
32f9d658 3793
39e5fa88
VS
3794 if ((IS_GEN7(dev) && port == PORT_A) ||
3795 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3796 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3797 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3798 } else {
aad3d14d
VS
3799 if (IS_CHERRYVIEW(dev))
3800 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3801 else
3802 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3803 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3804 }
1612c8bd 3805 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3806 POSTING_READ(intel_dp->output_reg);
5eb08b69 3807
1612c8bd
VS
3808 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3809 I915_WRITE(intel_dp->output_reg, DP);
3810 POSTING_READ(intel_dp->output_reg);
3811
3812 /*
3813 * HW workaround for IBX, we need to move the port
3814 * to transcoder A after disabling it to allow the
3815 * matching HDMI port to be enabled on transcoder A.
3816 */
3817 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
0c241d5b
VS
3818 /*
3819 * We get CPU/PCH FIFO underruns on the other pipe when
3820 * doing the workaround. Sweep them under the rug.
3821 */
3822 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3823 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3824
1612c8bd
VS
3825 /* always enable with pattern 1 (as per spec) */
3826 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3827 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3828 I915_WRITE(intel_dp->output_reg, DP);
3829 POSTING_READ(intel_dp->output_reg);
3830
3831 DP &= ~DP_PORT_EN;
5bddd17f 3832 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3833 POSTING_READ(intel_dp->output_reg);
0c241d5b
VS
3834
3835 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3836 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3837 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
5bddd17f
EA
3838 }
3839
f01eca2e 3840 msleep(intel_dp->panel_power_down_delay);
6fec7662
VS
3841
3842 intel_dp->DP = DP;
a4fc5ed6
KP
3843}
3844
26d61aad
KP
3845static bool
3846intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3847{
a031d709
RV
3848 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3849 struct drm_device *dev = dig_port->base.base.dev;
3850 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3851 uint8_t rev;
a031d709 3852
9d1a1031
JN
3853 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3854 sizeof(intel_dp->dpcd)) < 0)
edb39244 3855 return false; /* aux transfer failed */
92fd8fd1 3856
a8e98153 3857 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3858
edb39244
AJ
3859 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3860 return false; /* DPCD not present */
3861
2293bb5c
SK
3862 /* Check if the panel supports PSR */
3863 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3864 if (is_edp(intel_dp)) {
9d1a1031
JN
3865 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3866 intel_dp->psr_dpcd,
3867 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3868 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3869 dev_priv->psr.sink_support = true;
50003939 3870 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3871 }
474d1ec4
SJ
3872
3873 if (INTEL_INFO(dev)->gen >= 9 &&
3874 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3875 uint8_t frame_sync_cap;
3876
3877 dev_priv->psr.sink_support = true;
3878 intel_dp_dpcd_read_wake(&intel_dp->aux,
3879 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3880 &frame_sync_cap, 1);
3881 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3882 /* PSR2 needs frame sync as well */
3883 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3884 DRM_DEBUG_KMS("PSR2 %s on sink",
3885 dev_priv->psr.psr2_support ? "supported" : "not supported");
3886 }
50003939
JN
3887 }
3888
bc5133d5 3889 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
e588fa18 3890 yesno(intel_dp_source_supports_hbr2(intel_dp)),
742f491d 3891 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
06ea66b6 3892
fc0f8e25
SJ
3893 /* Intermediate frequency support */
3894 if (is_edp(intel_dp) &&
3895 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3896 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3897 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 3898 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
3899 int i;
3900
fc0f8e25
SJ
3901 intel_dp_dpcd_read_wake(&intel_dp->aux,
3902 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
3903 sink_rates,
3904 sizeof(sink_rates));
ea2d8a42 3905
94ca719e
VS
3906 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3907 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
3908
3909 if (val == 0)
3910 break;
3911
af77b974
SJ
3912 /* Value read is in kHz while drm clock is saved in deca-kHz */
3913 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 3914 }
94ca719e 3915 intel_dp->num_sink_rates = i;
fc0f8e25 3916 }
0336400e
VS
3917
3918 intel_dp_print_rates(intel_dp);
3919
edb39244
AJ
3920 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3921 DP_DWN_STRM_PORT_PRESENT))
3922 return true; /* native DP sink */
3923
3924 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3925 return true; /* no per-port downstream info */
3926
9d1a1031
JN
3927 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3928 intel_dp->downstream_ports,
3929 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
3930 return false; /* downstream port status fetch failed */
3931
3932 return true;
92fd8fd1
KP
3933}
3934
0d198328
AJ
3935static void
3936intel_dp_probe_oui(struct intel_dp *intel_dp)
3937{
3938 u8 buf[3];
3939
3940 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3941 return;
3942
9d1a1031 3943 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
3944 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3945 buf[0], buf[1], buf[2]);
3946
9d1a1031 3947 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
3948 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3949 buf[0], buf[1], buf[2]);
3950}
3951
0e32b39c
DA
3952static bool
3953intel_dp_probe_mst(struct intel_dp *intel_dp)
3954{
3955 u8 buf[1];
3956
3957 if (!intel_dp->can_mst)
3958 return false;
3959
3960 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3961 return false;
3962
0e32b39c
DA
3963 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3964 if (buf[0] & DP_MST_CAP) {
3965 DRM_DEBUG_KMS("Sink is MST capable\n");
3966 intel_dp->is_mst = true;
3967 } else {
3968 DRM_DEBUG_KMS("Sink is not MST capable\n");
3969 intel_dp->is_mst = false;
3970 }
3971 }
0e32b39c
DA
3972
3973 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3974 return intel_dp->is_mst;
3975}
3976
e5a1cab5 3977static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
d2e216d0 3978{
082dcc7c 3979 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
d72f9d91 3980 struct drm_device *dev = dig_port->base.base.dev;
082dcc7c 3981 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
ad9dc91b 3982 u8 buf;
e5a1cab5 3983 int ret = 0;
c6297843
RV
3984 int count = 0;
3985 int attempts = 10;
d2e216d0 3986
082dcc7c
RV
3987 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3988 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
3989 ret = -EIO;
3990 goto out;
4373f0f2
PZ
3991 }
3992
082dcc7c 3993 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
e5a1cab5 3994 buf & ~DP_TEST_SINK_START) < 0) {
082dcc7c 3995 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
3996 ret = -EIO;
3997 goto out;
3998 }
d2e216d0 3999
c6297843
RV
4000 do {
4001 intel_wait_for_vblank(dev, intel_crtc->pipe);
4002
4003 if (drm_dp_dpcd_readb(&intel_dp->aux,
4004 DP_TEST_SINK_MISC, &buf) < 0) {
4005 ret = -EIO;
4006 goto out;
4007 }
4008 count = buf & DP_TEST_COUNT_MASK;
4009 } while (--attempts && count);
4010
4011 if (attempts == 0) {
4012 DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
4013 ret = -ETIMEDOUT;
4014 }
4015
e5a1cab5 4016 out:
082dcc7c 4017 hsw_enable_ips(intel_crtc);
e5a1cab5 4018 return ret;
082dcc7c
RV
4019}
4020
4021static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4022{
4023 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
d72f9d91 4024 struct drm_device *dev = dig_port->base.base.dev;
082dcc7c
RV
4025 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4026 u8 buf;
e5a1cab5
RV
4027 int ret;
4028
082dcc7c
RV
4029 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4030 return -EIO;
4031
4032 if (!(buf & DP_TEST_CRC_SUPPORTED))
4033 return -ENOTTY;
4034
4035 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4036 return -EIO;
4037
6d8175da
RV
4038 if (buf & DP_TEST_SINK_START) {
4039 ret = intel_dp_sink_crc_stop(intel_dp);
4040 if (ret)
4041 return ret;
4042 }
4043
082dcc7c 4044 hsw_disable_ips(intel_crtc);
1dda5f93 4045
9d1a1031 4046 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
082dcc7c
RV
4047 buf | DP_TEST_SINK_START) < 0) {
4048 hsw_enable_ips(intel_crtc);
4049 return -EIO;
4373f0f2
PZ
4050 }
4051
d72f9d91 4052 intel_wait_for_vblank(dev, intel_crtc->pipe);
082dcc7c
RV
4053 return 0;
4054}
4055
4056int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4057{
4058 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4059 struct drm_device *dev = dig_port->base.base.dev;
4060 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4061 u8 buf;
621d4c76 4062 int count, ret;
082dcc7c 4063 int attempts = 6;
082dcc7c
RV
4064
4065 ret = intel_dp_sink_crc_start(intel_dp);
4066 if (ret)
4067 return ret;
4068
ad9dc91b 4069 do {
621d4c76
RV
4070 intel_wait_for_vblank(dev, intel_crtc->pipe);
4071
1dda5f93 4072 if (drm_dp_dpcd_readb(&intel_dp->aux,
4373f0f2
PZ
4073 DP_TEST_SINK_MISC, &buf) < 0) {
4074 ret = -EIO;
afe0d67e 4075 goto stop;
4373f0f2 4076 }
621d4c76 4077 count = buf & DP_TEST_COUNT_MASK;
aabc95dc 4078
7e38eeff 4079 } while (--attempts && count == 0);
ad9dc91b
RV
4080
4081 if (attempts == 0) {
7e38eeff
RV
4082 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4083 ret = -ETIMEDOUT;
4084 goto stop;
4085 }
4086
4087 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4088 ret = -EIO;
4089 goto stop;
ad9dc91b 4090 }
d2e216d0 4091
afe0d67e 4092stop:
082dcc7c 4093 intel_dp_sink_crc_stop(intel_dp);
4373f0f2 4094 return ret;
d2e216d0
RV
4095}
4096
a60f0e38
JB
4097static bool
4098intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4099{
9d1a1031
JN
4100 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4101 DP_DEVICE_SERVICE_IRQ_VECTOR,
4102 sink_irq_vector, 1) == 1;
a60f0e38
JB
4103}
4104
0e32b39c
DA
4105static bool
4106intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4107{
4108 int ret;
4109
4110 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4111 DP_SINK_COUNT_ESI,
4112 sink_irq_vector, 14);
4113 if (ret != 14)
4114 return false;
4115
4116 return true;
4117}
4118
c5d5ab7a
TP
4119static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4120{
4121 uint8_t test_result = DP_TEST_ACK;
4122 return test_result;
4123}
4124
4125static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4126{
4127 uint8_t test_result = DP_TEST_NAK;
4128 return test_result;
4129}
4130
4131static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4132{
c5d5ab7a 4133 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4134 struct intel_connector *intel_connector = intel_dp->attached_connector;
4135 struct drm_connector *connector = &intel_connector->base;
4136
4137 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4138 connector->edid_corrupt ||
559be30c
TP
4139 intel_dp->aux.i2c_defer_count > 6) {
4140 /* Check EDID read for NACKs, DEFERs and corruption
4141 * (DP CTS 1.2 Core r1.1)
4142 * 4.2.2.4 : Failed EDID read, I2C_NAK
4143 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4144 * 4.2.2.6 : EDID corruption detected
4145 * Use failsafe mode for all cases
4146 */
4147 if (intel_dp->aux.i2c_nack_count > 0 ||
4148 intel_dp->aux.i2c_defer_count > 0)
4149 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4150 intel_dp->aux.i2c_nack_count,
4151 intel_dp->aux.i2c_defer_count);
4152 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4153 } else {
f79b468e
TS
4154 struct edid *block = intel_connector->detect_edid;
4155
4156 /* We have to write the checksum
4157 * of the last block read
4158 */
4159 block += intel_connector->detect_edid->extensions;
4160
559be30c
TP
4161 if (!drm_dp_dpcd_write(&intel_dp->aux,
4162 DP_TEST_EDID_CHECKSUM,
f79b468e 4163 &block->checksum,
5a1cc655 4164 1))
559be30c
TP
4165 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4166
4167 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4168 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4169 }
4170
4171 /* Set test active flag here so userspace doesn't interrupt things */
4172 intel_dp->compliance_test_active = 1;
4173
c5d5ab7a
TP
4174 return test_result;
4175}
4176
4177static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4178{
c5d5ab7a
TP
4179 uint8_t test_result = DP_TEST_NAK;
4180 return test_result;
4181}
4182
4183static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4184{
4185 uint8_t response = DP_TEST_NAK;
4186 uint8_t rxdata = 0;
4187 int status = 0;
4188
c5d5ab7a
TP
4189 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4190 if (status <= 0) {
4191 DRM_DEBUG_KMS("Could not read test request from sink\n");
4192 goto update_status;
4193 }
4194
4195 switch (rxdata) {
4196 case DP_TEST_LINK_TRAINING:
4197 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4198 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4199 response = intel_dp_autotest_link_training(intel_dp);
4200 break;
4201 case DP_TEST_LINK_VIDEO_PATTERN:
4202 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4203 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4204 response = intel_dp_autotest_video_pattern(intel_dp);
4205 break;
4206 case DP_TEST_LINK_EDID_READ:
4207 DRM_DEBUG_KMS("EDID test requested\n");
4208 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4209 response = intel_dp_autotest_edid(intel_dp);
4210 break;
4211 case DP_TEST_LINK_PHY_TEST_PATTERN:
4212 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4213 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4214 response = intel_dp_autotest_phy_pattern(intel_dp);
4215 break;
4216 default:
4217 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4218 break;
4219 }
4220
4221update_status:
4222 status = drm_dp_dpcd_write(&intel_dp->aux,
4223 DP_TEST_RESPONSE,
4224 &response, 1);
4225 if (status <= 0)
4226 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4227}
4228
0e32b39c
DA
4229static int
4230intel_dp_check_mst_status(struct intel_dp *intel_dp)
4231{
4232 bool bret;
4233
4234 if (intel_dp->is_mst) {
4235 u8 esi[16] = { 0 };
4236 int ret = 0;
4237 int retry;
4238 bool handled;
4239 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4240go_again:
4241 if (bret == true) {
4242
4243 /* check link status - esi[10] = 0x200c */
90a6b7b0 4244 if (intel_dp->active_mst_links &&
901c2daf 4245 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
0e32b39c
DA
4246 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4247 intel_dp_start_link_train(intel_dp);
0e32b39c
DA
4248 intel_dp_stop_link_train(intel_dp);
4249 }
4250
6f34cc39 4251 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4252 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4253
4254 if (handled) {
4255 for (retry = 0; retry < 3; retry++) {
4256 int wret;
4257 wret = drm_dp_dpcd_write(&intel_dp->aux,
4258 DP_SINK_COUNT_ESI+1,
4259 &esi[1], 3);
4260 if (wret == 3) {
4261 break;
4262 }
4263 }
4264
4265 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4266 if (bret == true) {
6f34cc39 4267 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4268 goto go_again;
4269 }
4270 } else
4271 ret = 0;
4272
4273 return ret;
4274 } else {
4275 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4276 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4277 intel_dp->is_mst = false;
4278 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4279 /* send a hotplug event */
4280 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4281 }
4282 }
4283 return -EINVAL;
4284}
4285
a4fc5ed6
KP
4286/*
4287 * According to DP spec
4288 * 5.1.2:
4289 * 1. Read DPCD
4290 * 2. Configure link according to Receiver Capabilities
4291 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4292 * 4. Check link status on receipt of hot-plug interrupt
4293 */
a5146200 4294static void
ea5b213a 4295intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4296{
5b215bcf 4297 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4298 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4299 u8 sink_irq_vector;
93f62dad 4300 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4301
5b215bcf
DA
4302 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4303
4df6960e
SS
4304 /*
4305 * Clearing compliance test variables to allow capturing
4306 * of values for next automated test request.
4307 */
4308 intel_dp->compliance_test_active = 0;
4309 intel_dp->compliance_test_type = 0;
4310 intel_dp->compliance_test_data = 0;
4311
e02f9a06 4312 if (!intel_encoder->base.crtc)
a4fc5ed6
KP
4313 return;
4314
1a125d8a
ID
4315 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4316 return;
4317
92fd8fd1 4318 /* Try to read receiver status if the link appears to be up */
93f62dad 4319 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4320 return;
4321 }
4322
92fd8fd1 4323 /* Now read the DPCD to see if it's actually running */
26d61aad 4324 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4325 return;
4326 }
4327
a60f0e38
JB
4328 /* Try to read the source of the interrupt */
4329 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4330 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4331 /* Clear interrupt source */
9d1a1031
JN
4332 drm_dp_dpcd_writeb(&intel_dp->aux,
4333 DP_DEVICE_SERVICE_IRQ_VECTOR,
4334 sink_irq_vector);
a60f0e38
JB
4335
4336 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4337 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4338 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4339 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4340 }
4341
14631e9d
SS
4342 /* if link training is requested we should perform it always */
4343 if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4344 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
92fd8fd1 4345 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4346 intel_encoder->base.name);
33a34e4e 4347 intel_dp_start_link_train(intel_dp);
3ab9c637 4348 intel_dp_stop_link_train(intel_dp);
33a34e4e 4349 }
a4fc5ed6 4350}
a4fc5ed6 4351
caf9ab24 4352/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4353static enum drm_connector_status
26d61aad 4354intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4355{
caf9ab24 4356 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4357 uint8_t type;
4358
4359 if (!intel_dp_get_dpcd(intel_dp))
4360 return connector_status_disconnected;
4361
4362 /* if there's no downstream port, we're done */
4363 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4364 return connector_status_connected;
caf9ab24
AJ
4365
4366 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4367 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4368 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4369 uint8_t reg;
9d1a1031
JN
4370
4371 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4372 &reg, 1) < 0)
caf9ab24 4373 return connector_status_unknown;
9d1a1031 4374
23235177
AJ
4375 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4376 : connector_status_disconnected;
caf9ab24
AJ
4377 }
4378
4379 /* If no HPD, poke DDC gently */
0b99836f 4380 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4381 return connector_status_connected;
caf9ab24
AJ
4382
4383 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4384 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4385 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4386 if (type == DP_DS_PORT_TYPE_VGA ||
4387 type == DP_DS_PORT_TYPE_NON_EDID)
4388 return connector_status_unknown;
4389 } else {
4390 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4391 DP_DWN_STRM_PORT_TYPE_MASK;
4392 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4393 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4394 return connector_status_unknown;
4395 }
caf9ab24
AJ
4396
4397 /* Anything else is out of spec, warn and ignore */
4398 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4399 return connector_status_disconnected;
71ba9000
AJ
4400}
4401
d410b56d
CW
4402static enum drm_connector_status
4403edp_detect(struct intel_dp *intel_dp)
4404{
4405 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4406 enum drm_connector_status status;
4407
4408 status = intel_panel_detect(dev);
4409 if (status == connector_status_unknown)
4410 status = connector_status_connected;
4411
4412 return status;
4413}
4414
b93433cc
JN
4415static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4416 struct intel_digital_port *port)
5eb08b69 4417{
b93433cc 4418 u32 bit;
01cb9ea6 4419
0df53b77
JN
4420 switch (port->port) {
4421 case PORT_A:
4422 return true;
4423 case PORT_B:
4424 bit = SDE_PORTB_HOTPLUG;
4425 break;
4426 case PORT_C:
4427 bit = SDE_PORTC_HOTPLUG;
4428 break;
4429 case PORT_D:
4430 bit = SDE_PORTD_HOTPLUG;
4431 break;
4432 default:
4433 MISSING_CASE(port->port);
4434 return false;
4435 }
4436
4437 return I915_READ(SDEISR) & bit;
4438}
4439
4440static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4441 struct intel_digital_port *port)
4442{
4443 u32 bit;
4444
4445 switch (port->port) {
4446 case PORT_A:
4447 return true;
4448 case PORT_B:
4449 bit = SDE_PORTB_HOTPLUG_CPT;
4450 break;
4451 case PORT_C:
4452 bit = SDE_PORTC_HOTPLUG_CPT;
4453 break;
4454 case PORT_D:
4455 bit = SDE_PORTD_HOTPLUG_CPT;
4456 break;
a78695d3
JN
4457 case PORT_E:
4458 bit = SDE_PORTE_HOTPLUG_SPT;
4459 break;
0df53b77
JN
4460 default:
4461 MISSING_CASE(port->port);
4462 return false;
b93433cc 4463 }
1b469639 4464
b93433cc 4465 return I915_READ(SDEISR) & bit;
5eb08b69
ZW
4466}
4467
7e66bcf2 4468static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
1d245987 4469 struct intel_digital_port *port)
a4fc5ed6 4470{
9642c81c 4471 u32 bit;
5eb08b69 4472
9642c81c
JN
4473 switch (port->port) {
4474 case PORT_B:
4475 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4476 break;
4477 case PORT_C:
4478 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4479 break;
4480 case PORT_D:
4481 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4482 break;
4483 default:
4484 MISSING_CASE(port->port);
4485 return false;
4486 }
4487
4488 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4489}
4490
4491static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4492 struct intel_digital_port *port)
4493{
4494 u32 bit;
4495
4496 switch (port->port) {
4497 case PORT_B:
4498 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4499 break;
4500 case PORT_C:
4501 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4502 break;
4503 case PORT_D:
4504 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4505 break;
4506 default:
4507 MISSING_CASE(port->port);
4508 return false;
a4fc5ed6
KP
4509 }
4510
1d245987 4511 return I915_READ(PORT_HOTPLUG_STAT) & bit;
2a592bec
DA
4512}
4513
e464bfde 4514static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
e2ec35a5 4515 struct intel_digital_port *intel_dig_port)
e464bfde 4516{
e2ec35a5
SJ
4517 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4518 enum port port;
e464bfde
JN
4519 u32 bit;
4520
e2ec35a5
SJ
4521 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4522 switch (port) {
e464bfde
JN
4523 case PORT_A:
4524 bit = BXT_DE_PORT_HP_DDIA;
4525 break;
4526 case PORT_B:
4527 bit = BXT_DE_PORT_HP_DDIB;
4528 break;
4529 case PORT_C:
4530 bit = BXT_DE_PORT_HP_DDIC;
4531 break;
4532 default:
e2ec35a5 4533 MISSING_CASE(port);
e464bfde
JN
4534 return false;
4535 }
4536
4537 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4538}
4539
7e66bcf2
JN
4540/*
4541 * intel_digital_port_connected - is the specified port connected?
4542 * @dev_priv: i915 private structure
4543 * @port: the port to test
4544 *
4545 * Return %true if @port is connected, %false otherwise.
4546 */
237ed86c 4547bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
7e66bcf2
JN
4548 struct intel_digital_port *port)
4549{
0df53b77 4550 if (HAS_PCH_IBX(dev_priv))
7e66bcf2 4551 return ibx_digital_port_connected(dev_priv, port);
0df53b77
JN
4552 if (HAS_PCH_SPLIT(dev_priv))
4553 return cpt_digital_port_connected(dev_priv, port);
e464bfde
JN
4554 else if (IS_BROXTON(dev_priv))
4555 return bxt_digital_port_connected(dev_priv, port);
666a4537 4556 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
9642c81c 4557 return vlv_digital_port_connected(dev_priv, port);
7e66bcf2
JN
4558 else
4559 return g4x_digital_port_connected(dev_priv, port);
4560}
4561
8c241fef 4562static struct edid *
beb60608 4563intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4564{
beb60608 4565 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4566
9cd300e0
JN
4567 /* use cached edid if we have one */
4568 if (intel_connector->edid) {
9cd300e0
JN
4569 /* invalid edid */
4570 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4571 return NULL;
4572
55e9edeb 4573 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4574 } else
4575 return drm_get_edid(&intel_connector->base,
4576 &intel_dp->aux.ddc);
4577}
8c241fef 4578
beb60608
CW
4579static void
4580intel_dp_set_edid(struct intel_dp *intel_dp)
4581{
4582 struct intel_connector *intel_connector = intel_dp->attached_connector;
4583 struct edid *edid;
8c241fef 4584
beb60608
CW
4585 edid = intel_dp_get_edid(intel_dp);
4586 intel_connector->detect_edid = edid;
4587
4588 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4589 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4590 else
4591 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4592}
4593
beb60608
CW
4594static void
4595intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4596{
beb60608 4597 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4598
beb60608
CW
4599 kfree(intel_connector->detect_edid);
4600 intel_connector->detect_edid = NULL;
9cd300e0 4601
beb60608
CW
4602 intel_dp->has_audio = false;
4603}
d6f24d0f 4604
a9756bb5
ZW
4605static enum drm_connector_status
4606intel_dp_detect(struct drm_connector *connector, bool force)
4607{
4608 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4609 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4610 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4611 struct drm_device *dev = connector->dev;
a9756bb5 4612 enum drm_connector_status status;
671dedd2 4613 enum intel_display_power_domain power_domain;
0e32b39c 4614 bool ret;
09b1eb13 4615 u8 sink_irq_vector;
a9756bb5 4616
164c8598 4617 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4618 connector->base.id, connector->name);
beb60608 4619 intel_dp_unset_edid(intel_dp);
164c8598 4620
0e32b39c
DA
4621 if (intel_dp->is_mst) {
4622 /* MST devices are disconnected from a monitor POV */
4623 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4624 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4625 return connector_status_disconnected;
0e32b39c
DA
4626 }
4627
25f78f58
VS
4628 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4629 intel_display_power_get(to_i915(dev), power_domain);
a9756bb5 4630
d410b56d
CW
4631 /* Can't disconnect eDP, but you can close the lid... */
4632 if (is_edp(intel_dp))
4633 status = edp_detect(intel_dp);
c555a81d
ACO
4634 else if (intel_digital_port_connected(to_i915(dev),
4635 dp_to_dig_port(intel_dp)))
4636 status = intel_dp_detect_dpcd(intel_dp);
a9756bb5 4637 else
c555a81d
ACO
4638 status = connector_status_disconnected;
4639
4df6960e
SS
4640 if (status != connector_status_connected) {
4641 intel_dp->compliance_test_active = 0;
4642 intel_dp->compliance_test_type = 0;
4643 intel_dp->compliance_test_data = 0;
4644
c8c8fb33 4645 goto out;
4df6960e 4646 }
a9756bb5 4647
0d198328
AJ
4648 intel_dp_probe_oui(intel_dp);
4649
0e32b39c
DA
4650 ret = intel_dp_probe_mst(intel_dp);
4651 if (ret) {
4652 /* if we are in MST mode then this connector
4653 won't appear connected or have anything with EDID on it */
4654 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4655 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4656 status = connector_status_disconnected;
4657 goto out;
4658 }
4659
4df6960e
SS
4660 /*
4661 * Clearing NACK and defer counts to get their exact values
4662 * while reading EDID which are required by Compliance tests
4663 * 4.2.2.4 and 4.2.2.5
4664 */
4665 intel_dp->aux.i2c_nack_count = 0;
4666 intel_dp->aux.i2c_defer_count = 0;
4667
beb60608 4668 intel_dp_set_edid(intel_dp);
a9756bb5 4669
d63885da
PZ
4670 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4671 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4672 status = connector_status_connected;
4673
09b1eb13
TP
4674 /* Try to read the source of the interrupt */
4675 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4676 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4677 /* Clear interrupt source */
4678 drm_dp_dpcd_writeb(&intel_dp->aux,
4679 DP_DEVICE_SERVICE_IRQ_VECTOR,
4680 sink_irq_vector);
4681
4682 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4683 intel_dp_handle_test_request(intel_dp);
4684 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4685 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4686 }
4687
c8c8fb33 4688out:
25f78f58 4689 intel_display_power_put(to_i915(dev), power_domain);
c8c8fb33 4690 return status;
a4fc5ed6
KP
4691}
4692
beb60608
CW
4693static void
4694intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4695{
df0e9248 4696 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4697 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
25f78f58 4698 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
671dedd2 4699 enum intel_display_power_domain power_domain;
a4fc5ed6 4700
beb60608
CW
4701 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4702 connector->base.id, connector->name);
4703 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4704
beb60608
CW
4705 if (connector->status != connector_status_connected)
4706 return;
671dedd2 4707
25f78f58
VS
4708 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4709 intel_display_power_get(dev_priv, power_domain);
beb60608
CW
4710
4711 intel_dp_set_edid(intel_dp);
4712
25f78f58 4713 intel_display_power_put(dev_priv, power_domain);
beb60608
CW
4714
4715 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4716 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4717}
4718
4719static int intel_dp_get_modes(struct drm_connector *connector)
4720{
4721 struct intel_connector *intel_connector = to_intel_connector(connector);
4722 struct edid *edid;
4723
4724 edid = intel_connector->detect_edid;
4725 if (edid) {
4726 int ret = intel_connector_update_modes(connector, edid);
4727 if (ret)
4728 return ret;
4729 }
32f9d658 4730
f8779fda 4731 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4732 if (is_edp(intel_attached_dp(connector)) &&
4733 intel_connector->panel.fixed_mode) {
f8779fda 4734 struct drm_display_mode *mode;
beb60608
CW
4735
4736 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4737 intel_connector->panel.fixed_mode);
f8779fda 4738 if (mode) {
32f9d658
ZW
4739 drm_mode_probed_add(connector, mode);
4740 return 1;
4741 }
4742 }
beb60608 4743
32f9d658 4744 return 0;
a4fc5ed6
KP
4745}
4746
1aad7ac0
CW
4747static bool
4748intel_dp_detect_audio(struct drm_connector *connector)
4749{
1aad7ac0 4750 bool has_audio = false;
beb60608 4751 struct edid *edid;
1aad7ac0 4752
beb60608
CW
4753 edid = to_intel_connector(connector)->detect_edid;
4754 if (edid)
1aad7ac0 4755 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4756
1aad7ac0
CW
4757 return has_audio;
4758}
4759
f684960e
CW
4760static int
4761intel_dp_set_property(struct drm_connector *connector,
4762 struct drm_property *property,
4763 uint64_t val)
4764{
e953fd7b 4765 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4766 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4767 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4768 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4769 int ret;
4770
662595df 4771 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4772 if (ret)
4773 return ret;
4774
3f43c48d 4775 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4776 int i = val;
4777 bool has_audio;
4778
4779 if (i == intel_dp->force_audio)
f684960e
CW
4780 return 0;
4781
1aad7ac0 4782 intel_dp->force_audio = i;
f684960e 4783
c3e5f67b 4784 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4785 has_audio = intel_dp_detect_audio(connector);
4786 else
c3e5f67b 4787 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4788
4789 if (has_audio == intel_dp->has_audio)
f684960e
CW
4790 return 0;
4791
1aad7ac0 4792 intel_dp->has_audio = has_audio;
f684960e
CW
4793 goto done;
4794 }
4795
e953fd7b 4796 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80 4797 bool old_auto = intel_dp->color_range_auto;
0f2a2a75 4798 bool old_range = intel_dp->limited_color_range;
ae4edb80 4799
55bc60db
VS
4800 switch (val) {
4801 case INTEL_BROADCAST_RGB_AUTO:
4802 intel_dp->color_range_auto = true;
4803 break;
4804 case INTEL_BROADCAST_RGB_FULL:
4805 intel_dp->color_range_auto = false;
0f2a2a75 4806 intel_dp->limited_color_range = false;
55bc60db
VS
4807 break;
4808 case INTEL_BROADCAST_RGB_LIMITED:
4809 intel_dp->color_range_auto = false;
0f2a2a75 4810 intel_dp->limited_color_range = true;
55bc60db
VS
4811 break;
4812 default:
4813 return -EINVAL;
4814 }
ae4edb80
DV
4815
4816 if (old_auto == intel_dp->color_range_auto &&
0f2a2a75 4817 old_range == intel_dp->limited_color_range)
ae4edb80
DV
4818 return 0;
4819
e953fd7b
CW
4820 goto done;
4821 }
4822
53b41837
YN
4823 if (is_edp(intel_dp) &&
4824 property == connector->dev->mode_config.scaling_mode_property) {
4825 if (val == DRM_MODE_SCALE_NONE) {
4826 DRM_DEBUG_KMS("no scaling not supported\n");
4827 return -EINVAL;
4828 }
4829
4830 if (intel_connector->panel.fitting_mode == val) {
4831 /* the eDP scaling property is not changed */
4832 return 0;
4833 }
4834 intel_connector->panel.fitting_mode = val;
4835
4836 goto done;
4837 }
4838
f684960e
CW
4839 return -EINVAL;
4840
4841done:
c0c36b94
CW
4842 if (intel_encoder->base.crtc)
4843 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
4844
4845 return 0;
4846}
4847
a4fc5ed6 4848static void
73845adf 4849intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 4850{
1d508706 4851 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 4852
10e972d3 4853 kfree(intel_connector->detect_edid);
beb60608 4854
9cd300e0
JN
4855 if (!IS_ERR_OR_NULL(intel_connector->edid))
4856 kfree(intel_connector->edid);
4857
acd8db10
PZ
4858 /* Can't call is_edp() since the encoder may have been destroyed
4859 * already. */
4860 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 4861 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 4862
a4fc5ed6 4863 drm_connector_cleanup(connector);
55f78c43 4864 kfree(connector);
a4fc5ed6
KP
4865}
4866
00c09d70 4867void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 4868{
da63a9f2
PZ
4869 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4870 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 4871
a121f4e5 4872 intel_dp_aux_fini(intel_dp);
0e32b39c 4873 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
4874 if (is_edp(intel_dp)) {
4875 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
4876 /*
4877 * vdd might still be enabled do to the delayed vdd off.
4878 * Make sure vdd is actually turned off here.
4879 */
773538e8 4880 pps_lock(intel_dp);
4be73780 4881 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
4882 pps_unlock(intel_dp);
4883
01527b31
CT
4884 if (intel_dp->edp_notifier.notifier_call) {
4885 unregister_reboot_notifier(&intel_dp->edp_notifier);
4886 intel_dp->edp_notifier.notifier_call = NULL;
4887 }
bd943159 4888 }
c8bd0e49 4889 drm_encoder_cleanup(encoder);
da63a9f2 4890 kfree(intel_dig_port);
24d05927
DV
4891}
4892
07f9cd0b
ID
4893static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4894{
4895 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4896
4897 if (!is_edp(intel_dp))
4898 return;
4899
951468f3
VS
4900 /*
4901 * vdd might still be enabled do to the delayed vdd off.
4902 * Make sure vdd is actually turned off here.
4903 */
afa4e53a 4904 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 4905 pps_lock(intel_dp);
07f9cd0b 4906 edp_panel_vdd_off_sync(intel_dp);
773538e8 4907 pps_unlock(intel_dp);
07f9cd0b
ID
4908}
4909
49e6bc51
VS
4910static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4911{
4912 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4913 struct drm_device *dev = intel_dig_port->base.base.dev;
4914 struct drm_i915_private *dev_priv = dev->dev_private;
4915 enum intel_display_power_domain power_domain;
4916
4917 lockdep_assert_held(&dev_priv->pps_mutex);
4918
4919 if (!edp_have_panel_vdd(intel_dp))
4920 return;
4921
4922 /*
4923 * The VDD bit needs a power domain reference, so if the bit is
4924 * already enabled when we boot or resume, grab this reference and
4925 * schedule a vdd off, so we don't hold on to the reference
4926 * indefinitely.
4927 */
4928 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
25f78f58 4929 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
49e6bc51
VS
4930 intel_display_power_get(dev_priv, power_domain);
4931
4932 edp_panel_vdd_schedule_off(intel_dp);
4933}
4934
6d93c0c4
ID
4935static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4936{
49e6bc51
VS
4937 struct intel_dp *intel_dp;
4938
4939 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4940 return;
4941
4942 intel_dp = enc_to_intel_dp(encoder);
4943
4944 pps_lock(intel_dp);
4945
4946 /*
4947 * Read out the current power sequencer assignment,
4948 * in case the BIOS did something with it.
4949 */
666a4537 4950 if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
49e6bc51
VS
4951 vlv_initial_power_sequencer_setup(intel_dp);
4952
4953 intel_edp_panel_vdd_sanitize(intel_dp);
4954
4955 pps_unlock(intel_dp);
6d93c0c4
ID
4956}
4957
a4fc5ed6 4958static const struct drm_connector_funcs intel_dp_connector_funcs = {
4d688a2a 4959 .dpms = drm_atomic_helper_connector_dpms,
a4fc5ed6 4960 .detect = intel_dp_detect,
beb60608 4961 .force = intel_dp_force,
a4fc5ed6 4962 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 4963 .set_property = intel_dp_set_property,
2545e4a6 4964 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 4965 .destroy = intel_dp_connector_destroy,
c6f95f27 4966 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 4967 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
4968};
4969
4970static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4971 .get_modes = intel_dp_get_modes,
4972 .mode_valid = intel_dp_mode_valid,
df0e9248 4973 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
4974};
4975
a4fc5ed6 4976static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 4977 .reset = intel_dp_encoder_reset,
24d05927 4978 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
4979};
4980
b2c5c181 4981enum irqreturn
13cf5504
DA
4982intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4983{
4984 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 4985 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
4986 struct drm_device *dev = intel_dig_port->base.base.dev;
4987 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 4988 enum intel_display_power_domain power_domain;
b2c5c181 4989 enum irqreturn ret = IRQ_NONE;
1c767b33 4990
2540058f
TI
4991 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
4992 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
0e32b39c 4993 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 4994
7a7f84cc
VS
4995 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4996 /*
4997 * vdd off can generate a long pulse on eDP which
4998 * would require vdd on to handle it, and thus we
4999 * would end up in an endless cycle of
5000 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5001 */
5002 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5003 port_name(intel_dig_port->port));
a8b3d52f 5004 return IRQ_HANDLED;
7a7f84cc
VS
5005 }
5006
26fbb774
VS
5007 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5008 port_name(intel_dig_port->port),
0e32b39c 5009 long_hpd ? "long" : "short");
13cf5504 5010
25f78f58 5011 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1c767b33
ID
5012 intel_display_power_get(dev_priv, power_domain);
5013
0e32b39c 5014 if (long_hpd) {
5fa836a9
MK
5015 /* indicate that we need to restart link training */
5016 intel_dp->train_set_valid = false;
2a592bec 5017
7e66bcf2
JN
5018 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5019 goto mst_fail;
0e32b39c
DA
5020
5021 if (!intel_dp_get_dpcd(intel_dp)) {
5022 goto mst_fail;
5023 }
5024
5025 intel_dp_probe_oui(intel_dp);
5026
d14e7b6d
VS
5027 if (!intel_dp_probe_mst(intel_dp)) {
5028 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5029 intel_dp_check_link_status(intel_dp);
5030 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c 5031 goto mst_fail;
d14e7b6d 5032 }
0e32b39c
DA
5033 } else {
5034 if (intel_dp->is_mst) {
1c767b33 5035 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
5036 goto mst_fail;
5037 }
5038
5039 if (!intel_dp->is_mst) {
5b215bcf 5040 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 5041 intel_dp_check_link_status(intel_dp);
5b215bcf 5042 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
5043 }
5044 }
b2c5c181
DV
5045
5046 ret = IRQ_HANDLED;
5047
1c767b33 5048 goto put_power;
0e32b39c
DA
5049mst_fail:
5050 /* if we were in MST mode, and device is not there get out of MST mode */
5051 if (intel_dp->is_mst) {
5052 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5053 intel_dp->is_mst = false;
5054 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5055 }
1c767b33
ID
5056put_power:
5057 intel_display_power_put(dev_priv, power_domain);
5058
5059 return ret;
13cf5504
DA
5060}
5061
477ec328 5062/* check the VBT to see whether the eDP is on another port */
5d8a7752 5063bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
5064{
5065 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5066 union child_device_config *p_child;
36e83a18 5067 int i;
5d8a7752 5068 static const short port_mapping[] = {
477ec328
RV
5069 [PORT_B] = DVO_PORT_DPB,
5070 [PORT_C] = DVO_PORT_DPC,
5071 [PORT_D] = DVO_PORT_DPD,
5072 [PORT_E] = DVO_PORT_DPE,
5d8a7752 5073 };
36e83a18 5074
53ce81a7
VS
5075 /*
5076 * eDP not supported on g4x. so bail out early just
5077 * for a bit extra safety in case the VBT is bonkers.
5078 */
5079 if (INTEL_INFO(dev)->gen < 5)
5080 return false;
5081
3b32a35b
VS
5082 if (port == PORT_A)
5083 return true;
5084
41aa3448 5085 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5086 return false;
5087
41aa3448
RV
5088 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5089 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5090
5d8a7752 5091 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5092 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5093 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5094 return true;
5095 }
5096 return false;
5097}
5098
0e32b39c 5099void
f684960e
CW
5100intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5101{
53b41837
YN
5102 struct intel_connector *intel_connector = to_intel_connector(connector);
5103
3f43c48d 5104 intel_attach_force_audio_property(connector);
e953fd7b 5105 intel_attach_broadcast_rgb_property(connector);
55bc60db 5106 intel_dp->color_range_auto = true;
53b41837
YN
5107
5108 if (is_edp(intel_dp)) {
5109 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5110 drm_object_attach_property(
5111 &connector->base,
53b41837 5112 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5113 DRM_MODE_SCALE_ASPECT);
5114 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5115 }
f684960e
CW
5116}
5117
dada1a9f
ID
5118static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5119{
5120 intel_dp->last_power_cycle = jiffies;
5121 intel_dp->last_power_on = jiffies;
5122 intel_dp->last_backlight_off = jiffies;
5123}
5124
67a54566
DV
5125static void
5126intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5127 struct intel_dp *intel_dp)
67a54566
DV
5128{
5129 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5130 struct edp_power_seq cur, vbt, spec,
5131 *final = &intel_dp->pps_delays;
b0a08bec 5132 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
f0f59a00 5133 i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
453c5420 5134
e39b999a
VS
5135 lockdep_assert_held(&dev_priv->pps_mutex);
5136
81ddbc69
VS
5137 /* already initialized? */
5138 if (final->t11_t12 != 0)
5139 return;
5140
b0a08bec
VK
5141 if (IS_BROXTON(dev)) {
5142 /*
5143 * TODO: BXT has 2 sets of PPS registers.
5144 * Correct Register for Broxton need to be identified
5145 * using VBT. hardcoding for now
5146 */
5147 pp_ctrl_reg = BXT_PP_CONTROL(0);
5148 pp_on_reg = BXT_PP_ON_DELAYS(0);
5149 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5150 } else if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5151 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5152 pp_on_reg = PCH_PP_ON_DELAYS;
5153 pp_off_reg = PCH_PP_OFF_DELAYS;
5154 pp_div_reg = PCH_PP_DIVISOR;
5155 } else {
bf13e81b
JN
5156 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5157
5158 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5159 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5160 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5161 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5162 }
67a54566
DV
5163
5164 /* Workaround: Need to write PP_CONTROL with the unlock key as
5165 * the very first thing. */
b0a08bec 5166 pp_ctl = ironlake_get_pp_control(intel_dp);
67a54566 5167
453c5420
JB
5168 pp_on = I915_READ(pp_on_reg);
5169 pp_off = I915_READ(pp_off_reg);
b0a08bec
VK
5170 if (!IS_BROXTON(dev)) {
5171 I915_WRITE(pp_ctrl_reg, pp_ctl);
5172 pp_div = I915_READ(pp_div_reg);
5173 }
67a54566
DV
5174
5175 /* Pull timing values out of registers */
5176 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5177 PANEL_POWER_UP_DELAY_SHIFT;
5178
5179 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5180 PANEL_LIGHT_ON_DELAY_SHIFT;
5181
5182 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5183 PANEL_LIGHT_OFF_DELAY_SHIFT;
5184
5185 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5186 PANEL_POWER_DOWN_DELAY_SHIFT;
5187
b0a08bec
VK
5188 if (IS_BROXTON(dev)) {
5189 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5190 BXT_POWER_CYCLE_DELAY_SHIFT;
5191 if (tmp > 0)
5192 cur.t11_t12 = (tmp - 1) * 1000;
5193 else
5194 cur.t11_t12 = 0;
5195 } else {
5196 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
67a54566 5197 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
b0a08bec 5198 }
67a54566
DV
5199
5200 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5201 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5202
41aa3448 5203 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5204
5205 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5206 * our hw here, which are all in 100usec. */
5207 spec.t1_t3 = 210 * 10;
5208 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5209 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5210 spec.t10 = 500 * 10;
5211 /* This one is special and actually in units of 100ms, but zero
5212 * based in the hw (so we need to add 100 ms). But the sw vbt
5213 * table multiplies it with 1000 to make it in units of 100usec,
5214 * too. */
5215 spec.t11_t12 = (510 + 100) * 10;
5216
5217 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5218 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5219
5220 /* Use the max of the register settings and vbt. If both are
5221 * unset, fall back to the spec limits. */
36b5f425 5222#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5223 spec.field : \
5224 max(cur.field, vbt.field))
5225 assign_final(t1_t3);
5226 assign_final(t8);
5227 assign_final(t9);
5228 assign_final(t10);
5229 assign_final(t11_t12);
5230#undef assign_final
5231
36b5f425 5232#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5233 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5234 intel_dp->backlight_on_delay = get_delay(t8);
5235 intel_dp->backlight_off_delay = get_delay(t9);
5236 intel_dp->panel_power_down_delay = get_delay(t10);
5237 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5238#undef get_delay
5239
f30d26e4
JN
5240 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5241 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5242 intel_dp->panel_power_cycle_delay);
5243
5244 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5245 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5246}
5247
5248static void
5249intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5250 struct intel_dp *intel_dp)
f30d26e4
JN
5251{
5252 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5253 u32 pp_on, pp_off, pp_div, port_sel = 0;
5254 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
f0f59a00 5255 i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
ad933b56 5256 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5257 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5258
e39b999a 5259 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 5260
b0a08bec
VK
5261 if (IS_BROXTON(dev)) {
5262 /*
5263 * TODO: BXT has 2 sets of PPS registers.
5264 * Correct Register for Broxton need to be identified
5265 * using VBT. hardcoding for now
5266 */
5267 pp_ctrl_reg = BXT_PP_CONTROL(0);
5268 pp_on_reg = BXT_PP_ON_DELAYS(0);
5269 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5270
5271 } else if (HAS_PCH_SPLIT(dev)) {
453c5420
JB
5272 pp_on_reg = PCH_PP_ON_DELAYS;
5273 pp_off_reg = PCH_PP_OFF_DELAYS;
5274 pp_div_reg = PCH_PP_DIVISOR;
5275 } else {
bf13e81b
JN
5276 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5277
5278 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5279 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5280 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5281 }
5282
b2f19d1a
PZ
5283 /*
5284 * And finally store the new values in the power sequencer. The
5285 * backlight delays are set to 1 because we do manual waits on them. For
5286 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5287 * we'll end up waiting for the backlight off delay twice: once when we
5288 * do the manual sleep, and once when we disable the panel and wait for
5289 * the PP_STATUS bit to become zero.
5290 */
f30d26e4 5291 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5292 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5293 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5294 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5295 /* Compute the divisor for the pp clock, simply match the Bspec
5296 * formula. */
b0a08bec
VK
5297 if (IS_BROXTON(dev)) {
5298 pp_div = I915_READ(pp_ctrl_reg);
5299 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5300 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5301 << BXT_POWER_CYCLE_DELAY_SHIFT);
5302 } else {
5303 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5304 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5305 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5306 }
67a54566
DV
5307
5308 /* Haswell doesn't have any port selection bits for the panel
5309 * power sequencer any more. */
666a4537 5310 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
ad933b56 5311 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5312 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5313 if (port == PORT_A)
a24c144c 5314 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5315 else
a24c144c 5316 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5317 }
5318
453c5420
JB
5319 pp_on |= port_sel;
5320
5321 I915_WRITE(pp_on_reg, pp_on);
5322 I915_WRITE(pp_off_reg, pp_off);
b0a08bec
VK
5323 if (IS_BROXTON(dev))
5324 I915_WRITE(pp_ctrl_reg, pp_div);
5325 else
5326 I915_WRITE(pp_div_reg, pp_div);
67a54566 5327
67a54566 5328 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5329 I915_READ(pp_on_reg),
5330 I915_READ(pp_off_reg),
b0a08bec
VK
5331 IS_BROXTON(dev) ?
5332 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
453c5420 5333 I915_READ(pp_div_reg));
f684960e
CW
5334}
5335
b33a2815
VK
5336/**
5337 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5338 * @dev: DRM device
5339 * @refresh_rate: RR to be programmed
5340 *
5341 * This function gets called when refresh rate (RR) has to be changed from
5342 * one frequency to another. Switches can be between high and low RR
5343 * supported by the panel or to any other RR based on media playback (in
5344 * this case, RR value needs to be passed from user space).
5345 *
5346 * The caller of this function needs to take a lock on dev_priv->drrs.
5347 */
96178eeb 5348static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5349{
5350 struct drm_i915_private *dev_priv = dev->dev_private;
5351 struct intel_encoder *encoder;
96178eeb
VK
5352 struct intel_digital_port *dig_port = NULL;
5353 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5354 struct intel_crtc_state *config = NULL;
439d7ac0 5355 struct intel_crtc *intel_crtc = NULL;
96178eeb 5356 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5357
5358 if (refresh_rate <= 0) {
5359 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5360 return;
5361 }
5362
96178eeb
VK
5363 if (intel_dp == NULL) {
5364 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5365 return;
5366 }
5367
1fcc9d1c 5368 /*
e4d59f6b
RV
5369 * FIXME: This needs proper synchronization with psr state for some
5370 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5371 */
439d7ac0 5372
96178eeb
VK
5373 dig_port = dp_to_dig_port(intel_dp);
5374 encoder = &dig_port->base;
723f9aab 5375 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5376
5377 if (!intel_crtc) {
5378 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5379 return;
5380 }
5381
6e3c9717 5382 config = intel_crtc->config;
439d7ac0 5383
96178eeb 5384 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5385 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5386 return;
5387 }
5388
96178eeb
VK
5389 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5390 refresh_rate)
439d7ac0
PB
5391 index = DRRS_LOW_RR;
5392
96178eeb 5393 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5394 DRM_DEBUG_KMS(
5395 "DRRS requested for previously set RR...ignoring\n");
5396 return;
5397 }
5398
5399 if (!intel_crtc->active) {
5400 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5401 return;
5402 }
5403
44395bfe 5404 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5405 switch (index) {
5406 case DRRS_HIGH_RR:
5407 intel_dp_set_m_n(intel_crtc, M1_N1);
5408 break;
5409 case DRRS_LOW_RR:
5410 intel_dp_set_m_n(intel_crtc, M2_N2);
5411 break;
5412 case DRRS_MAX_RR:
5413 default:
5414 DRM_ERROR("Unsupported refreshrate type\n");
5415 }
5416 } else if (INTEL_INFO(dev)->gen > 6) {
f0f59a00 5417 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
649636ef 5418 u32 val;
a4c30b1d 5419
649636ef 5420 val = I915_READ(reg);
439d7ac0 5421 if (index > DRRS_HIGH_RR) {
666a4537 5422 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6fa7aec1
VK
5423 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5424 else
5425 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5426 } else {
666a4537 5427 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6fa7aec1
VK
5428 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5429 else
5430 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5431 }
5432 I915_WRITE(reg, val);
5433 }
5434
4e9ac947
VK
5435 dev_priv->drrs.refresh_rate_type = index;
5436
5437 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5438}
5439
b33a2815
VK
5440/**
5441 * intel_edp_drrs_enable - init drrs struct if supported
5442 * @intel_dp: DP struct
5443 *
5444 * Initializes frontbuffer_bits and drrs.dp
5445 */
c395578e
VK
5446void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5447{
5448 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5449 struct drm_i915_private *dev_priv = dev->dev_private;
5450 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5451 struct drm_crtc *crtc = dig_port->base.base.crtc;
5452 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5453
5454 if (!intel_crtc->config->has_drrs) {
5455 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5456 return;
5457 }
5458
5459 mutex_lock(&dev_priv->drrs.mutex);
5460 if (WARN_ON(dev_priv->drrs.dp)) {
5461 DRM_ERROR("DRRS already enabled\n");
5462 goto unlock;
5463 }
5464
5465 dev_priv->drrs.busy_frontbuffer_bits = 0;
5466
5467 dev_priv->drrs.dp = intel_dp;
5468
5469unlock:
5470 mutex_unlock(&dev_priv->drrs.mutex);
5471}
5472
b33a2815
VK
5473/**
5474 * intel_edp_drrs_disable - Disable DRRS
5475 * @intel_dp: DP struct
5476 *
5477 */
c395578e
VK
5478void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5479{
5480 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5481 struct drm_i915_private *dev_priv = dev->dev_private;
5482 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5483 struct drm_crtc *crtc = dig_port->base.base.crtc;
5484 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5485
5486 if (!intel_crtc->config->has_drrs)
5487 return;
5488
5489 mutex_lock(&dev_priv->drrs.mutex);
5490 if (!dev_priv->drrs.dp) {
5491 mutex_unlock(&dev_priv->drrs.mutex);
5492 return;
5493 }
5494
5495 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5496 intel_dp_set_drrs_state(dev_priv->dev,
5497 intel_dp->attached_connector->panel.
5498 fixed_mode->vrefresh);
5499
5500 dev_priv->drrs.dp = NULL;
5501 mutex_unlock(&dev_priv->drrs.mutex);
5502
5503 cancel_delayed_work_sync(&dev_priv->drrs.work);
5504}
5505
4e9ac947
VK
5506static void intel_edp_drrs_downclock_work(struct work_struct *work)
5507{
5508 struct drm_i915_private *dev_priv =
5509 container_of(work, typeof(*dev_priv), drrs.work.work);
5510 struct intel_dp *intel_dp;
5511
5512 mutex_lock(&dev_priv->drrs.mutex);
5513
5514 intel_dp = dev_priv->drrs.dp;
5515
5516 if (!intel_dp)
5517 goto unlock;
5518
439d7ac0 5519 /*
4e9ac947
VK
5520 * The delayed work can race with an invalidate hence we need to
5521 * recheck.
439d7ac0
PB
5522 */
5523
4e9ac947
VK
5524 if (dev_priv->drrs.busy_frontbuffer_bits)
5525 goto unlock;
439d7ac0 5526
4e9ac947
VK
5527 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5528 intel_dp_set_drrs_state(dev_priv->dev,
5529 intel_dp->attached_connector->panel.
5530 downclock_mode->vrefresh);
439d7ac0 5531
4e9ac947 5532unlock:
4e9ac947 5533 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5534}
5535
b33a2815 5536/**
0ddfd203 5537 * intel_edp_drrs_invalidate - Disable Idleness DRRS
b33a2815
VK
5538 * @dev: DRM device
5539 * @frontbuffer_bits: frontbuffer plane tracking bits
5540 *
0ddfd203
R
5541 * This function gets called everytime rendering on the given planes start.
5542 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
5543 *
5544 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5545 */
a93fad0f
VK
5546void intel_edp_drrs_invalidate(struct drm_device *dev,
5547 unsigned frontbuffer_bits)
5548{
5549 struct drm_i915_private *dev_priv = dev->dev_private;
5550 struct drm_crtc *crtc;
5551 enum pipe pipe;
5552
9da7d693 5553 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5554 return;
5555
88f933a8 5556 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5557
a93fad0f 5558 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5559 if (!dev_priv->drrs.dp) {
5560 mutex_unlock(&dev_priv->drrs.mutex);
5561 return;
5562 }
5563
a93fad0f
VK
5564 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5565 pipe = to_intel_crtc(crtc)->pipe;
5566
c1d038c6
DV
5567 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5568 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5569
0ddfd203 5570 /* invalidate means busy screen hence upclock */
c1d038c6 5571 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
a93fad0f
VK
5572 intel_dp_set_drrs_state(dev_priv->dev,
5573 dev_priv->drrs.dp->attached_connector->panel.
5574 fixed_mode->vrefresh);
a93fad0f 5575
a93fad0f
VK
5576 mutex_unlock(&dev_priv->drrs.mutex);
5577}
5578
b33a2815 5579/**
0ddfd203 5580 * intel_edp_drrs_flush - Restart Idleness DRRS
b33a2815
VK
5581 * @dev: DRM device
5582 * @frontbuffer_bits: frontbuffer plane tracking bits
5583 *
0ddfd203
R
5584 * This function gets called every time rendering on the given planes has
5585 * completed or flip on a crtc is completed. So DRRS should be upclocked
5586 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5587 * if no other planes are dirty.
b33a2815
VK
5588 *
5589 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5590 */
a93fad0f
VK
5591void intel_edp_drrs_flush(struct drm_device *dev,
5592 unsigned frontbuffer_bits)
5593{
5594 struct drm_i915_private *dev_priv = dev->dev_private;
5595 struct drm_crtc *crtc;
5596 enum pipe pipe;
5597
9da7d693 5598 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5599 return;
5600
88f933a8 5601 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5602
a93fad0f 5603 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5604 if (!dev_priv->drrs.dp) {
5605 mutex_unlock(&dev_priv->drrs.mutex);
5606 return;
5607 }
5608
a93fad0f
VK
5609 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5610 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
5611
5612 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
5613 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5614
0ddfd203 5615 /* flush means busy screen hence upclock */
c1d038c6 5616 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
0ddfd203
R
5617 intel_dp_set_drrs_state(dev_priv->dev,
5618 dev_priv->drrs.dp->attached_connector->panel.
5619 fixed_mode->vrefresh);
5620
5621 /*
5622 * flush also means no more activity hence schedule downclock, if all
5623 * other fbs are quiescent too
5624 */
5625 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
5626 schedule_delayed_work(&dev_priv->drrs.work,
5627 msecs_to_jiffies(1000));
5628 mutex_unlock(&dev_priv->drrs.mutex);
5629}
5630
b33a2815
VK
5631/**
5632 * DOC: Display Refresh Rate Switching (DRRS)
5633 *
5634 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5635 * which enables swtching between low and high refresh rates,
5636 * dynamically, based on the usage scenario. This feature is applicable
5637 * for internal panels.
5638 *
5639 * Indication that the panel supports DRRS is given by the panel EDID, which
5640 * would list multiple refresh rates for one resolution.
5641 *
5642 * DRRS is of 2 types - static and seamless.
5643 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5644 * (may appear as a blink on screen) and is used in dock-undock scenario.
5645 * Seamless DRRS involves changing RR without any visual effect to the user
5646 * and can be used during normal system usage. This is done by programming
5647 * certain registers.
5648 *
5649 * Support for static/seamless DRRS may be indicated in the VBT based on
5650 * inputs from the panel spec.
5651 *
5652 * DRRS saves power by switching to low RR based on usage scenarios.
5653 *
5654 * eDP DRRS:-
5655 * The implementation is based on frontbuffer tracking implementation.
5656 * When there is a disturbance on the screen triggered by user activity or a
5657 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5658 * When there is no movement on screen, after a timeout of 1 second, a switch
5659 * to low RR is made.
5660 * For integration with frontbuffer tracking code,
5661 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5662 *
5663 * DRRS can be further extended to support other internal panels and also
5664 * the scenario of video playback wherein RR is set based on the rate
5665 * requested by userspace.
5666 */
5667
5668/**
5669 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5670 * @intel_connector: eDP connector
5671 * @fixed_mode: preferred mode of panel
5672 *
5673 * This function is called only once at driver load to initialize basic
5674 * DRRS stuff.
5675 *
5676 * Returns:
5677 * Downclock mode if panel supports it, else return NULL.
5678 * DRRS support is determined by the presence of downclock mode (apart
5679 * from VBT setting).
5680 */
4f9db5b5 5681static struct drm_display_mode *
96178eeb
VK
5682intel_dp_drrs_init(struct intel_connector *intel_connector,
5683 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5684{
5685 struct drm_connector *connector = &intel_connector->base;
96178eeb 5686 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5687 struct drm_i915_private *dev_priv = dev->dev_private;
5688 struct drm_display_mode *downclock_mode = NULL;
5689
9da7d693
DV
5690 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5691 mutex_init(&dev_priv->drrs.mutex);
5692
4f9db5b5
PB
5693 if (INTEL_INFO(dev)->gen <= 6) {
5694 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5695 return NULL;
5696 }
5697
5698 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5699 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5700 return NULL;
5701 }
5702
5703 downclock_mode = intel_find_panel_downclock
5704 (dev, fixed_mode, connector);
5705
5706 if (!downclock_mode) {
a1d26342 5707 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5708 return NULL;
5709 }
5710
96178eeb 5711 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5712
96178eeb 5713 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5714 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5715 return downclock_mode;
5716}
5717
ed92f0b2 5718static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5719 struct intel_connector *intel_connector)
ed92f0b2
PZ
5720{
5721 struct drm_connector *connector = &intel_connector->base;
5722 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5723 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5724 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5725 struct drm_i915_private *dev_priv = dev->dev_private;
5726 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5727 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5728 bool has_dpcd;
5729 struct drm_display_mode *scan;
5730 struct edid *edid;
6517d273 5731 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5732
5733 if (!is_edp(intel_dp))
5734 return true;
5735
49e6bc51
VS
5736 pps_lock(intel_dp);
5737 intel_edp_panel_vdd_sanitize(intel_dp);
5738 pps_unlock(intel_dp);
63635217 5739
ed92f0b2 5740 /* Cache DPCD and EDID for edp. */
ed92f0b2 5741 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5742
5743 if (has_dpcd) {
5744 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5745 dev_priv->no_aux_handshake =
5746 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5747 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5748 } else {
5749 /* if this fails, presume the device is a ghost */
5750 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5751 return false;
5752 }
5753
5754 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5755 pps_lock(intel_dp);
36b5f425 5756 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5757 pps_unlock(intel_dp);
ed92f0b2 5758
060c8778 5759 mutex_lock(&dev->mode_config.mutex);
0b99836f 5760 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5761 if (edid) {
5762 if (drm_add_edid_modes(connector, edid)) {
5763 drm_mode_connector_update_edid_property(connector,
5764 edid);
5765 drm_edid_to_eld(connector, edid);
5766 } else {
5767 kfree(edid);
5768 edid = ERR_PTR(-EINVAL);
5769 }
5770 } else {
5771 edid = ERR_PTR(-ENOENT);
5772 }
5773 intel_connector->edid = edid;
5774
5775 /* prefer fixed mode from EDID if available */
5776 list_for_each_entry(scan, &connector->probed_modes, head) {
5777 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5778 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5779 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5780 intel_connector, fixed_mode);
ed92f0b2
PZ
5781 break;
5782 }
5783 }
5784
5785 /* fallback to VBT if available for eDP */
5786 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5787 fixed_mode = drm_mode_duplicate(dev,
5788 dev_priv->vbt.lfp_lvds_vbt_mode);
5789 if (fixed_mode)
5790 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5791 }
060c8778 5792 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5793
666a4537 5794 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
01527b31
CT
5795 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5796 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5797
5798 /*
5799 * Figure out the current pipe for the initial backlight setup.
5800 * If the current pipe isn't valid, try the PPS pipe, and if that
5801 * fails just assume pipe A.
5802 */
5803 if (IS_CHERRYVIEW(dev))
5804 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5805 else
5806 pipe = PORT_TO_PIPE(intel_dp->DP);
5807
5808 if (pipe != PIPE_A && pipe != PIPE_B)
5809 pipe = intel_dp->pps_pipe;
5810
5811 if (pipe != PIPE_A && pipe != PIPE_B)
5812 pipe = PIPE_A;
5813
5814 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5815 pipe_name(pipe));
01527b31
CT
5816 }
5817
4f9db5b5 5818 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5507faeb 5819 intel_connector->panel.backlight.power = intel_edp_backlight_power;
6517d273 5820 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5821
5822 return true;
5823}
5824
16c25533 5825bool
f0fec3f2
PZ
5826intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5827 struct intel_connector *intel_connector)
a4fc5ed6 5828{
f0fec3f2
PZ
5829 struct drm_connector *connector = &intel_connector->base;
5830 struct intel_dp *intel_dp = &intel_dig_port->dp;
5831 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5832 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 5833 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 5834 enum port port = intel_dig_port->port;
a121f4e5 5835 int type, ret;
a4fc5ed6 5836
ccb1a831
VS
5837 if (WARN(intel_dig_port->max_lanes < 1,
5838 "Not enough lanes (%d) for DP on port %c\n",
5839 intel_dig_port->max_lanes, port_name(port)))
5840 return false;
5841
a4a5d2f8
VS
5842 intel_dp->pps_pipe = INVALID_PIPE;
5843
ec5b01dd 5844 /* intel_dp vfuncs */
b6b5e383
DL
5845 if (INTEL_INFO(dev)->gen >= 9)
5846 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
666a4537 5847 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
ec5b01dd
DL
5848 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5849 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5850 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5851 else if (HAS_PCH_SPLIT(dev))
5852 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5853 else
5854 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5855
b9ca5fad
DL
5856 if (INTEL_INFO(dev)->gen >= 9)
5857 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5858 else
5859 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 5860
ad64217b
ACO
5861 if (HAS_DDI(dev))
5862 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5863
0767935e
DV
5864 /* Preserve the current hw state. */
5865 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 5866 intel_dp->attached_connector = intel_connector;
3d3dc149 5867
3b32a35b 5868 if (intel_dp_is_edp(dev, port))
b329530c 5869 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
5870 else
5871 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 5872
f7d24902
ID
5873 /*
5874 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5875 * for DP the encoder type can be set by the caller to
5876 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5877 */
5878 if (type == DRM_MODE_CONNECTOR_eDP)
5879 intel_encoder->type = INTEL_OUTPUT_EDP;
5880
c17ed5b5 5881 /* eDP only on port B and/or C on vlv/chv */
666a4537
WB
5882 if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5883 is_edp(intel_dp) && port != PORT_B && port != PORT_C))
c17ed5b5
VS
5884 return false;
5885
e7281eab
ID
5886 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5887 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5888 port_name(port));
5889
b329530c 5890 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
5891 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5892
a4fc5ed6
KP
5893 connector->interlace_allowed = true;
5894 connector->doublescan_allowed = 0;
5895
f0fec3f2 5896 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 5897 edp_panel_vdd_work);
a4fc5ed6 5898
df0e9248 5899 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 5900 drm_connector_register(connector);
a4fc5ed6 5901
affa9354 5902 if (HAS_DDI(dev))
bcbc889b
PZ
5903 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5904 else
5905 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 5906 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 5907
0b99836f 5908 /* Set up the hotplug pin. */
ab9d7c30
PZ
5909 switch (port) {
5910 case PORT_A:
1d843f9d 5911 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5912 break;
5913 case PORT_B:
1d843f9d 5914 intel_encoder->hpd_pin = HPD_PORT_B;
e87a005d 5915 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
cf1d5883 5916 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
5917 break;
5918 case PORT_C:
1d843f9d 5919 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
5920 break;
5921 case PORT_D:
1d843f9d 5922 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30 5923 break;
26951caf
XZ
5924 case PORT_E:
5925 intel_encoder->hpd_pin = HPD_PORT_E;
5926 break;
ab9d7c30 5927 default:
ad1c0b19 5928 BUG();
5eb08b69
ZW
5929 }
5930
dada1a9f 5931 if (is_edp(intel_dp)) {
773538e8 5932 pps_lock(intel_dp);
1e74a324 5933 intel_dp_init_panel_power_timestamps(intel_dp);
666a4537 5934 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
a4a5d2f8 5935 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 5936 else
36b5f425 5937 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 5938 pps_unlock(intel_dp);
dada1a9f 5939 }
0095e6dc 5940
a121f4e5
VS
5941 ret = intel_dp_aux_init(intel_dp, intel_connector);
5942 if (ret)
5943 goto fail;
c1f05264 5944
0e32b39c 5945 /* init MST on ports that can support it */
0c9b3715
JN
5946 if (HAS_DP_MST(dev) &&
5947 (port == PORT_B || port == PORT_C || port == PORT_D))
5948 intel_dp_mst_encoder_init(intel_dig_port,
5949 intel_connector->base.base.id);
0e32b39c 5950
36b5f425 5951 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
a121f4e5
VS
5952 intel_dp_aux_fini(intel_dp);
5953 intel_dp_mst_encoder_cleanup(intel_dig_port);
5954 goto fail;
b2f246a8 5955 }
32f9d658 5956
f684960e
CW
5957 intel_dp_add_properties(intel_dp, connector);
5958
a4fc5ed6
KP
5959 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5960 * 0xd. Failure to do so will result in spurious interrupts being
5961 * generated on the port when a cable is not attached.
5962 */
5963 if (IS_G4X(dev) && !IS_GM45(dev)) {
5964 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5965 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5966 }
16c25533 5967
aa7471d2
JN
5968 i915_debugfs_connector_add(connector);
5969
16c25533 5970 return true;
a121f4e5
VS
5971
5972fail:
5973 if (is_edp(intel_dp)) {
5974 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5975 /*
5976 * vdd might still be enabled do to the delayed vdd off.
5977 * Make sure vdd is actually turned off here.
5978 */
5979 pps_lock(intel_dp);
5980 edp_panel_vdd_off_sync(intel_dp);
5981 pps_unlock(intel_dp);
5982 }
5983 drm_connector_unregister(connector);
5984 drm_connector_cleanup(connector);
5985
5986 return false;
a4fc5ed6 5987}
f0fec3f2
PZ
5988
5989void
f0f59a00
VS
5990intel_dp_init(struct drm_device *dev,
5991 i915_reg_t output_reg, enum port port)
f0fec3f2 5992{
13cf5504 5993 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
5994 struct intel_digital_port *intel_dig_port;
5995 struct intel_encoder *intel_encoder;
5996 struct drm_encoder *encoder;
5997 struct intel_connector *intel_connector;
5998
b14c5679 5999 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
6000 if (!intel_dig_port)
6001 return;
6002
08d9bc92 6003 intel_connector = intel_connector_alloc();
11aee0f6
SM
6004 if (!intel_connector)
6005 goto err_connector_alloc;
f0fec3f2
PZ
6006
6007 intel_encoder = &intel_dig_port->base;
6008 encoder = &intel_encoder->base;
6009
893da0c9
SM
6010 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6011 DRM_MODE_ENCODER_TMDS))
6012 goto err_encoder_init;
f0fec3f2 6013
5bfe2ac0 6014 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 6015 intel_encoder->disable = intel_disable_dp;
00c09d70 6016 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 6017 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 6018 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 6019 if (IS_CHERRYVIEW(dev)) {
9197c88b 6020 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
6021 intel_encoder->pre_enable = chv_pre_enable_dp;
6022 intel_encoder->enable = vlv_enable_dp;
580d3811 6023 intel_encoder->post_disable = chv_post_disable_dp;
d6db995f 6024 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
e4a1d846 6025 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 6026 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
6027 intel_encoder->pre_enable = vlv_pre_enable_dp;
6028 intel_encoder->enable = vlv_enable_dp;
49277c31 6029 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 6030 } else {
ecff4f3b
JN
6031 intel_encoder->pre_enable = g4x_pre_enable_dp;
6032 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
6033 if (INTEL_INFO(dev)->gen >= 5)
6034 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 6035 }
f0fec3f2 6036
174edf1f 6037 intel_dig_port->port = port;
0bdf5a05 6038 dev_priv->dig_port_map[port] = intel_encoder;
f0fec3f2 6039 intel_dig_port->dp.output_reg = output_reg;
ccb1a831 6040 intel_dig_port->max_lanes = 4;
f0fec3f2 6041
00c09d70 6042 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
6043 if (IS_CHERRYVIEW(dev)) {
6044 if (port == PORT_D)
6045 intel_encoder->crtc_mask = 1 << 2;
6046 else
6047 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6048 } else {
6049 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6050 }
bc079e8b 6051 intel_encoder->cloneable = 0;
f0fec3f2 6052
13cf5504 6053 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5fcece80 6054 dev_priv->hotplug.irq_port[port] = intel_dig_port;
13cf5504 6055
11aee0f6
SM
6056 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6057 goto err_init_connector;
6058
6059 return;
6060
6061err_init_connector:
6062 drm_encoder_cleanup(encoder);
893da0c9 6063err_encoder_init:
11aee0f6
SM
6064 kfree(intel_connector);
6065err_connector_alloc:
6066 kfree(intel_dig_port);
6067
6068 return;
f0fec3f2 6069}
0e32b39c
DA
6070
6071void intel_dp_mst_suspend(struct drm_device *dev)
6072{
6073 struct drm_i915_private *dev_priv = dev->dev_private;
6074 int i;
6075
6076 /* disable MST */
6077 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6078 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6079 if (!intel_dig_port)
6080 continue;
6081
6082 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6083 if (!intel_dig_port->dp.can_mst)
6084 continue;
6085 if (intel_dig_port->dp.is_mst)
6086 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6087 }
6088 }
6089}
6090
6091void intel_dp_mst_resume(struct drm_device *dev)
6092{
6093 struct drm_i915_private *dev_priv = dev->dev_private;
6094 int i;
6095
6096 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6097 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6098 if (!intel_dig_port)
6099 continue;
6100 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6101 int ret;
6102
6103 if (!intel_dig_port->dp.can_mst)
6104 continue;
6105
6106 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6107 if (ret != 0) {
6108 intel_dp_check_mst_status(&intel_dig_port->dp);
6109 }
6110 }
6111 }
6112}
This page took 1.132033 seconds and 5 git commands to generate.