drm/i915: Add missing 'else' to intel_digital_port_connected()
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <drm/drmP.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
40 #include "i915_drv.h"
41
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
50 struct dp_link_dpll {
51 int clock;
52 struct dpll dpll;
53 };
54
55 static const struct dp_link_dpll gen4_dpll[] = {
56 { 162000,
57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
58 { 270000,
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60 };
61
62 static const struct dp_link_dpll pch_dpll[] = {
63 { 162000,
64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
65 { 270000,
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67 };
68
69 static const struct dp_link_dpll vlv_dpll[] = {
70 { 162000,
71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
72 { 270000,
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74 };
75
76 /*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80 static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
88 { 270000, /* m2_int = 27, m2_fraction = 0 */
89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
90 { 540000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92 };
93
94 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
96 static const int skl_rates[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 };
98 static const int default_rates[] = { 162000, 270000, 540000 };
99
100 /**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107 static bool is_edp(struct intel_dp *intel_dp)
108 {
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
112 }
113
114 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
115 {
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
119 }
120
121 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122 {
123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
124 }
125
126 static void intel_dp_link_down(struct intel_dp *intel_dp);
127 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
128 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
129 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
130 static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
132
133 static unsigned int intel_dp_unused_lane_mask(int lane_count)
134 {
135 return ~((1 << lane_count) - 1) & 0xf;
136 }
137
138 static int
139 intel_dp_max_link_bw(struct intel_dp *intel_dp)
140 {
141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
146 case DP_LINK_BW_5_4:
147 break;
148 default:
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155 }
156
157 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158 {
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160 u8 source_max, sink_max;
161
162 source_max = intel_dig_port->max_lanes;
163 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
164
165 return min(source_max, sink_max);
166 }
167
168 /*
169 * The units on the numbers in the next two are... bizarre. Examples will
170 * make it clearer; this one parallels an example in the eDP spec.
171 *
172 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
173 *
174 * 270000 * 1 * 8 / 10 == 216000
175 *
176 * The actual data capacity of that configuration is 2.16Gbit/s, so the
177 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
178 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
179 * 119000. At 18bpp that's 2142000 kilobits per second.
180 *
181 * Thus the strange-looking division by 10 in intel_dp_link_required, to
182 * get the result in decakilobits instead of kilobits.
183 */
184
185 static int
186 intel_dp_link_required(int pixel_clock, int bpp)
187 {
188 return (pixel_clock * bpp + 9) / 10;
189 }
190
191 static int
192 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
193 {
194 return (max_link_clock * max_lanes * 8) / 10;
195 }
196
197 static enum drm_mode_status
198 intel_dp_mode_valid(struct drm_connector *connector,
199 struct drm_display_mode *mode)
200 {
201 struct intel_dp *intel_dp = intel_attached_dp(connector);
202 struct intel_connector *intel_connector = to_intel_connector(connector);
203 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
204 int target_clock = mode->clock;
205 int max_rate, mode_rate, max_lanes, max_link_clock;
206 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
207
208 if (is_edp(intel_dp) && fixed_mode) {
209 if (mode->hdisplay > fixed_mode->hdisplay)
210 return MODE_PANEL;
211
212 if (mode->vdisplay > fixed_mode->vdisplay)
213 return MODE_PANEL;
214
215 target_clock = fixed_mode->clock;
216 }
217
218 max_link_clock = intel_dp_max_link_rate(intel_dp);
219 max_lanes = intel_dp_max_lane_count(intel_dp);
220
221 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
222 mode_rate = intel_dp_link_required(target_clock, 18);
223
224 if (mode_rate > max_rate || target_clock > max_dotclk)
225 return MODE_CLOCK_HIGH;
226
227 if (mode->clock < 10000)
228 return MODE_CLOCK_LOW;
229
230 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
231 return MODE_H_ILLEGAL;
232
233 return MODE_OK;
234 }
235
236 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
237 {
238 int i;
239 uint32_t v = 0;
240
241 if (src_bytes > 4)
242 src_bytes = 4;
243 for (i = 0; i < src_bytes; i++)
244 v |= ((uint32_t) src[i]) << ((3-i) * 8);
245 return v;
246 }
247
248 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
249 {
250 int i;
251 if (dst_bytes > 4)
252 dst_bytes = 4;
253 for (i = 0; i < dst_bytes; i++)
254 dst[i] = src >> ((3-i) * 8);
255 }
256
257 static void
258 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
259 struct intel_dp *intel_dp);
260 static void
261 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
262 struct intel_dp *intel_dp);
263
264 static void pps_lock(struct intel_dp *intel_dp)
265 {
266 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
267 struct intel_encoder *encoder = &intel_dig_port->base;
268 struct drm_device *dev = encoder->base.dev;
269 struct drm_i915_private *dev_priv = dev->dev_private;
270 enum intel_display_power_domain power_domain;
271
272 /*
273 * See vlv_power_sequencer_reset() why we need
274 * a power domain reference here.
275 */
276 power_domain = intel_display_port_aux_power_domain(encoder);
277 intel_display_power_get(dev_priv, power_domain);
278
279 mutex_lock(&dev_priv->pps_mutex);
280 }
281
282 static void pps_unlock(struct intel_dp *intel_dp)
283 {
284 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
285 struct intel_encoder *encoder = &intel_dig_port->base;
286 struct drm_device *dev = encoder->base.dev;
287 struct drm_i915_private *dev_priv = dev->dev_private;
288 enum intel_display_power_domain power_domain;
289
290 mutex_unlock(&dev_priv->pps_mutex);
291
292 power_domain = intel_display_port_aux_power_domain(encoder);
293 intel_display_power_put(dev_priv, power_domain);
294 }
295
296 static void
297 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
298 {
299 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
300 struct drm_device *dev = intel_dig_port->base.base.dev;
301 struct drm_i915_private *dev_priv = dev->dev_private;
302 enum pipe pipe = intel_dp->pps_pipe;
303 bool pll_enabled, release_cl_override = false;
304 enum dpio_phy phy = DPIO_PHY(pipe);
305 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
306 uint32_t DP;
307
308 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
309 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
310 pipe_name(pipe), port_name(intel_dig_port->port)))
311 return;
312
313 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
314 pipe_name(pipe), port_name(intel_dig_port->port));
315
316 /* Preserve the BIOS-computed detected bit. This is
317 * supposed to be read-only.
318 */
319 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
320 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
321 DP |= DP_PORT_WIDTH(1);
322 DP |= DP_LINK_TRAIN_PAT_1;
323
324 if (IS_CHERRYVIEW(dev))
325 DP |= DP_PIPE_SELECT_CHV(pipe);
326 else if (pipe == PIPE_B)
327 DP |= DP_PIPEB_SELECT;
328
329 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
330
331 /*
332 * The DPLL for the pipe must be enabled for this to work.
333 * So enable temporarily it if it's not already enabled.
334 */
335 if (!pll_enabled) {
336 release_cl_override = IS_CHERRYVIEW(dev) &&
337 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
338
339 if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
340 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
341 DRM_ERROR("Failed to force on pll for pipe %c!\n",
342 pipe_name(pipe));
343 return;
344 }
345 }
346
347 /*
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
352 */
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
355
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
358
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
361
362 if (!pll_enabled) {
363 vlv_force_pll_off(dev, pipe);
364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
368 }
369
370 static enum pipe
371 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372 {
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
378 enum pipe pipe;
379
380 lockdep_assert_held(&dev_priv->pps_mutex);
381
382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
384
385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
387
388 /*
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
391 */
392 for_each_intel_encoder(dev, encoder) {
393 struct intel_dp *tmp;
394
395 if (encoder->type != INTEL_OUTPUT_EDP)
396 continue;
397
398 tmp = enc_to_intel_dp(&encoder->base);
399
400 if (tmp->pps_pipe != INVALID_PIPE)
401 pipes &= ~(1 << tmp->pps_pipe);
402 }
403
404 /*
405 * Didn't find one. This should not happen since there
406 * are two power sequencers and up to two eDP ports.
407 */
408 if (WARN_ON(pipes == 0))
409 pipe = PIPE_A;
410 else
411 pipe = ffs(pipes) - 1;
412
413 vlv_steal_power_sequencer(dev, pipe);
414 intel_dp->pps_pipe = pipe;
415
416 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
417 pipe_name(intel_dp->pps_pipe),
418 port_name(intel_dig_port->port));
419
420 /* init power sequencer on this pipe and port */
421 intel_dp_init_panel_power_sequencer(dev, intel_dp);
422 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
423
424 /*
425 * Even vdd force doesn't work until we've made
426 * the power sequencer lock in on the port.
427 */
428 vlv_power_sequencer_kick(intel_dp);
429
430 return intel_dp->pps_pipe;
431 }
432
433 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
434 enum pipe pipe);
435
436 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
437 enum pipe pipe)
438 {
439 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
440 }
441
442 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
443 enum pipe pipe)
444 {
445 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
446 }
447
448 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
449 enum pipe pipe)
450 {
451 return true;
452 }
453
454 static enum pipe
455 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
456 enum port port,
457 vlv_pipe_check pipe_check)
458 {
459 enum pipe pipe;
460
461 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
462 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
463 PANEL_PORT_SELECT_MASK;
464
465 if (port_sel != PANEL_PORT_SELECT_VLV(port))
466 continue;
467
468 if (!pipe_check(dev_priv, pipe))
469 continue;
470
471 return pipe;
472 }
473
474 return INVALID_PIPE;
475 }
476
477 static void
478 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
479 {
480 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
481 struct drm_device *dev = intel_dig_port->base.base.dev;
482 struct drm_i915_private *dev_priv = dev->dev_private;
483 enum port port = intel_dig_port->port;
484
485 lockdep_assert_held(&dev_priv->pps_mutex);
486
487 /* try to find a pipe with this port selected */
488 /* first pick one where the panel is on */
489 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
490 vlv_pipe_has_pp_on);
491 /* didn't find one? pick one where vdd is on */
492 if (intel_dp->pps_pipe == INVALID_PIPE)
493 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
494 vlv_pipe_has_vdd_on);
495 /* didn't find one? pick one with just the correct port */
496 if (intel_dp->pps_pipe == INVALID_PIPE)
497 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
498 vlv_pipe_any);
499
500 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
501 if (intel_dp->pps_pipe == INVALID_PIPE) {
502 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
503 port_name(port));
504 return;
505 }
506
507 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
508 port_name(port), pipe_name(intel_dp->pps_pipe));
509
510 intel_dp_init_panel_power_sequencer(dev, intel_dp);
511 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
512 }
513
514 void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
515 {
516 struct drm_device *dev = dev_priv->dev;
517 struct intel_encoder *encoder;
518
519 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)))
520 return;
521
522 /*
523 * We can't grab pps_mutex here due to deadlock with power_domain
524 * mutex when power_domain functions are called while holding pps_mutex.
525 * That also means that in order to use pps_pipe the code needs to
526 * hold both a power domain reference and pps_mutex, and the power domain
527 * reference get/put must be done while _not_ holding pps_mutex.
528 * pps_{lock,unlock}() do these steps in the correct order, so one
529 * should use them always.
530 */
531
532 for_each_intel_encoder(dev, encoder) {
533 struct intel_dp *intel_dp;
534
535 if (encoder->type != INTEL_OUTPUT_EDP)
536 continue;
537
538 intel_dp = enc_to_intel_dp(&encoder->base);
539 intel_dp->pps_pipe = INVALID_PIPE;
540 }
541 }
542
543 static i915_reg_t
544 _pp_ctrl_reg(struct intel_dp *intel_dp)
545 {
546 struct drm_device *dev = intel_dp_to_dev(intel_dp);
547
548 if (IS_BROXTON(dev))
549 return BXT_PP_CONTROL(0);
550 else if (HAS_PCH_SPLIT(dev))
551 return PCH_PP_CONTROL;
552 else
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
554 }
555
556 static i915_reg_t
557 _pp_stat_reg(struct intel_dp *intel_dp)
558 {
559 struct drm_device *dev = intel_dp_to_dev(intel_dp);
560
561 if (IS_BROXTON(dev))
562 return BXT_PP_STATUS(0);
563 else if (HAS_PCH_SPLIT(dev))
564 return PCH_PP_STATUS;
565 else
566 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
567 }
568
569 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
570 This function only applicable when panel PM state is not to be tracked */
571 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
572 void *unused)
573 {
574 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
575 edp_notifier);
576 struct drm_device *dev = intel_dp_to_dev(intel_dp);
577 struct drm_i915_private *dev_priv = dev->dev_private;
578
579 if (!is_edp(intel_dp) || code != SYS_RESTART)
580 return 0;
581
582 pps_lock(intel_dp);
583
584 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
585 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
586 i915_reg_t pp_ctrl_reg, pp_div_reg;
587 u32 pp_div;
588
589 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
590 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
591 pp_div = I915_READ(pp_div_reg);
592 pp_div &= PP_REFERENCE_DIVIDER_MASK;
593
594 /* 0x1F write to PP_DIV_REG sets max cycle delay */
595 I915_WRITE(pp_div_reg, pp_div | 0x1F);
596 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
597 msleep(intel_dp->panel_power_cycle_delay);
598 }
599
600 pps_unlock(intel_dp);
601
602 return 0;
603 }
604
605 static bool edp_have_panel_power(struct intel_dp *intel_dp)
606 {
607 struct drm_device *dev = intel_dp_to_dev(intel_dp);
608 struct drm_i915_private *dev_priv = dev->dev_private;
609
610 lockdep_assert_held(&dev_priv->pps_mutex);
611
612 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
613 intel_dp->pps_pipe == INVALID_PIPE)
614 return false;
615
616 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
617 }
618
619 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
620 {
621 struct drm_device *dev = intel_dp_to_dev(intel_dp);
622 struct drm_i915_private *dev_priv = dev->dev_private;
623
624 lockdep_assert_held(&dev_priv->pps_mutex);
625
626 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
627 intel_dp->pps_pipe == INVALID_PIPE)
628 return false;
629
630 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
631 }
632
633 static void
634 intel_dp_check_edp(struct intel_dp *intel_dp)
635 {
636 struct drm_device *dev = intel_dp_to_dev(intel_dp);
637 struct drm_i915_private *dev_priv = dev->dev_private;
638
639 if (!is_edp(intel_dp))
640 return;
641
642 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
643 WARN(1, "eDP powered off while attempting aux channel communication.\n");
644 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
645 I915_READ(_pp_stat_reg(intel_dp)),
646 I915_READ(_pp_ctrl_reg(intel_dp)));
647 }
648 }
649
650 static uint32_t
651 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
652 {
653 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
654 struct drm_device *dev = intel_dig_port->base.base.dev;
655 struct drm_i915_private *dev_priv = dev->dev_private;
656 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
657 uint32_t status;
658 bool done;
659
660 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
661 if (has_aux_irq)
662 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
663 msecs_to_jiffies_timeout(10));
664 else
665 done = wait_for_atomic(C, 10) == 0;
666 if (!done)
667 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
668 has_aux_irq);
669 #undef C
670
671 return status;
672 }
673
674 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
675 {
676 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
677 struct drm_device *dev = intel_dig_port->base.base.dev;
678
679 /*
680 * The clock divider is based off the hrawclk, and would like to run at
681 * 2MHz. So, take the hrawclk value and divide by 2 and use that
682 */
683 return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2);
684 }
685
686 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
687 {
688 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
689 struct drm_device *dev = intel_dig_port->base.base.dev;
690 struct drm_i915_private *dev_priv = dev->dev_private;
691
692 if (index)
693 return 0;
694
695 if (intel_dig_port->port == PORT_A) {
696 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
697
698 } else {
699 return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
700 }
701 }
702
703 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
704 {
705 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
706 struct drm_device *dev = intel_dig_port->base.base.dev;
707 struct drm_i915_private *dev_priv = dev->dev_private;
708
709 if (intel_dig_port->port == PORT_A) {
710 if (index)
711 return 0;
712 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
713 } else if (HAS_PCH_LPT_H(dev_priv)) {
714 /* Workaround for non-ULT HSW */
715 switch (index) {
716 case 0: return 63;
717 case 1: return 72;
718 default: return 0;
719 }
720 } else {
721 return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2);
722 }
723 }
724
725 static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
726 {
727 return index ? 0 : 100;
728 }
729
730 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
731 {
732 /*
733 * SKL doesn't need us to program the AUX clock divider (Hardware will
734 * derive the clock from CDCLK automatically). We still implement the
735 * get_aux_clock_divider vfunc to plug-in into the existing code.
736 */
737 return index ? 0 : 1;
738 }
739
740 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
741 bool has_aux_irq,
742 int send_bytes,
743 uint32_t aux_clock_divider)
744 {
745 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
746 struct drm_device *dev = intel_dig_port->base.base.dev;
747 uint32_t precharge, timeout;
748
749 if (IS_GEN6(dev))
750 precharge = 3;
751 else
752 precharge = 5;
753
754 if (IS_BROADWELL(dev) && intel_dig_port->port == PORT_A)
755 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
756 else
757 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
758
759 return DP_AUX_CH_CTL_SEND_BUSY |
760 DP_AUX_CH_CTL_DONE |
761 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
762 DP_AUX_CH_CTL_TIME_OUT_ERROR |
763 timeout |
764 DP_AUX_CH_CTL_RECEIVE_ERROR |
765 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
766 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
767 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
768 }
769
770 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
771 bool has_aux_irq,
772 int send_bytes,
773 uint32_t unused)
774 {
775 return DP_AUX_CH_CTL_SEND_BUSY |
776 DP_AUX_CH_CTL_DONE |
777 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
778 DP_AUX_CH_CTL_TIME_OUT_ERROR |
779 DP_AUX_CH_CTL_TIME_OUT_1600us |
780 DP_AUX_CH_CTL_RECEIVE_ERROR |
781 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
782 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
783 }
784
785 static int
786 intel_dp_aux_ch(struct intel_dp *intel_dp,
787 const uint8_t *send, int send_bytes,
788 uint8_t *recv, int recv_size)
789 {
790 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
791 struct drm_device *dev = intel_dig_port->base.base.dev;
792 struct drm_i915_private *dev_priv = dev->dev_private;
793 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
794 uint32_t aux_clock_divider;
795 int i, ret, recv_bytes;
796 uint32_t status;
797 int try, clock = 0;
798 bool has_aux_irq = HAS_AUX_IRQ(dev);
799 bool vdd;
800
801 pps_lock(intel_dp);
802
803 /*
804 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805 * In such cases we want to leave VDD enabled and it's up to upper layers
806 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
807 * ourselves.
808 */
809 vdd = edp_panel_vdd_on(intel_dp);
810
811 /* dp aux is extremely sensitive to irq latency, hence request the
812 * lowest possible wakeup latency and so prevent the cpu from going into
813 * deep sleep states.
814 */
815 pm_qos_update_request(&dev_priv->pm_qos, 0);
816
817 intel_dp_check_edp(intel_dp);
818
819 /* Try to wait for any previous AUX channel activity */
820 for (try = 0; try < 3; try++) {
821 status = I915_READ_NOTRACE(ch_ctl);
822 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
823 break;
824 msleep(1);
825 }
826
827 if (try == 3) {
828 static u32 last_status = -1;
829 const u32 status = I915_READ(ch_ctl);
830
831 if (status != last_status) {
832 WARN(1, "dp_aux_ch not started status 0x%08x\n",
833 status);
834 last_status = status;
835 }
836
837 ret = -EBUSY;
838 goto out;
839 }
840
841 /* Only 5 data registers! */
842 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
843 ret = -E2BIG;
844 goto out;
845 }
846
847 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
848 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
849 has_aux_irq,
850 send_bytes,
851 aux_clock_divider);
852
853 /* Must try at least 3 times according to DP spec */
854 for (try = 0; try < 5; try++) {
855 /* Load the send data into the aux channel data registers */
856 for (i = 0; i < send_bytes; i += 4)
857 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
858 intel_dp_pack_aux(send + i,
859 send_bytes - i));
860
861 /* Send the command and wait for it to complete */
862 I915_WRITE(ch_ctl, send_ctl);
863
864 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
865
866 /* Clear done status and any errors */
867 I915_WRITE(ch_ctl,
868 status |
869 DP_AUX_CH_CTL_DONE |
870 DP_AUX_CH_CTL_TIME_OUT_ERROR |
871 DP_AUX_CH_CTL_RECEIVE_ERROR);
872
873 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
874 continue;
875
876 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
877 * 400us delay required for errors and timeouts
878 * Timeout errors from the HW already meet this
879 * requirement so skip to next iteration
880 */
881 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
882 usleep_range(400, 500);
883 continue;
884 }
885 if (status & DP_AUX_CH_CTL_DONE)
886 goto done;
887 }
888 }
889
890 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
891 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
892 ret = -EBUSY;
893 goto out;
894 }
895
896 done:
897 /* Check for timeout or receive error.
898 * Timeouts occur when the sink is not connected
899 */
900 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
901 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
902 ret = -EIO;
903 goto out;
904 }
905
906 /* Timeouts occur when the device isn't connected, so they're
907 * "normal" -- don't fill the kernel log with these */
908 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
909 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
910 ret = -ETIMEDOUT;
911 goto out;
912 }
913
914 /* Unload any bytes sent back from the other side */
915 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
916 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
917
918 /*
919 * By BSpec: "Message sizes of 0 or >20 are not allowed."
920 * We have no idea of what happened so we return -EBUSY so
921 * drm layer takes care for the necessary retries.
922 */
923 if (recv_bytes == 0 || recv_bytes > 20) {
924 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
925 recv_bytes);
926 /*
927 * FIXME: This patch was created on top of a series that
928 * organize the retries at drm level. There EBUSY should
929 * also take care for 1ms wait before retrying.
930 * That aux retries re-org is still needed and after that is
931 * merged we remove this sleep from here.
932 */
933 usleep_range(1000, 1500);
934 ret = -EBUSY;
935 goto out;
936 }
937
938 if (recv_bytes > recv_size)
939 recv_bytes = recv_size;
940
941 for (i = 0; i < recv_bytes; i += 4)
942 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
943 recv + i, recv_bytes - i);
944
945 ret = recv_bytes;
946 out:
947 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
948
949 if (vdd)
950 edp_panel_vdd_off(intel_dp, false);
951
952 pps_unlock(intel_dp);
953
954 return ret;
955 }
956
957 #define BARE_ADDRESS_SIZE 3
958 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
959 static ssize_t
960 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
961 {
962 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
963 uint8_t txbuf[20], rxbuf[20];
964 size_t txsize, rxsize;
965 int ret;
966
967 txbuf[0] = (msg->request << 4) |
968 ((msg->address >> 16) & 0xf);
969 txbuf[1] = (msg->address >> 8) & 0xff;
970 txbuf[2] = msg->address & 0xff;
971 txbuf[3] = msg->size - 1;
972
973 switch (msg->request & ~DP_AUX_I2C_MOT) {
974 case DP_AUX_NATIVE_WRITE:
975 case DP_AUX_I2C_WRITE:
976 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
977 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
978 rxsize = 2; /* 0 or 1 data bytes */
979
980 if (WARN_ON(txsize > 20))
981 return -E2BIG;
982
983 if (msg->buffer)
984 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
985 else
986 WARN_ON(msg->size);
987
988 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
989 if (ret > 0) {
990 msg->reply = rxbuf[0] >> 4;
991
992 if (ret > 1) {
993 /* Number of bytes written in a short write. */
994 ret = clamp_t(int, rxbuf[1], 0, msg->size);
995 } else {
996 /* Return payload size. */
997 ret = msg->size;
998 }
999 }
1000 break;
1001
1002 case DP_AUX_NATIVE_READ:
1003 case DP_AUX_I2C_READ:
1004 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1005 rxsize = msg->size + 1;
1006
1007 if (WARN_ON(rxsize > 20))
1008 return -E2BIG;
1009
1010 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1011 if (ret > 0) {
1012 msg->reply = rxbuf[0] >> 4;
1013 /*
1014 * Assume happy day, and copy the data. The caller is
1015 * expected to check msg->reply before touching it.
1016 *
1017 * Return payload size.
1018 */
1019 ret--;
1020 memcpy(msg->buffer, rxbuf + 1, ret);
1021 }
1022 break;
1023
1024 default:
1025 ret = -EINVAL;
1026 break;
1027 }
1028
1029 return ret;
1030 }
1031
1032 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1033 enum port port)
1034 {
1035 switch (port) {
1036 case PORT_B:
1037 case PORT_C:
1038 case PORT_D:
1039 return DP_AUX_CH_CTL(port);
1040 default:
1041 MISSING_CASE(port);
1042 return DP_AUX_CH_CTL(PORT_B);
1043 }
1044 }
1045
1046 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1047 enum port port, int index)
1048 {
1049 switch (port) {
1050 case PORT_B:
1051 case PORT_C:
1052 case PORT_D:
1053 return DP_AUX_CH_DATA(port, index);
1054 default:
1055 MISSING_CASE(port);
1056 return DP_AUX_CH_DATA(PORT_B, index);
1057 }
1058 }
1059
1060 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1061 enum port port)
1062 {
1063 switch (port) {
1064 case PORT_A:
1065 return DP_AUX_CH_CTL(port);
1066 case PORT_B:
1067 case PORT_C:
1068 case PORT_D:
1069 return PCH_DP_AUX_CH_CTL(port);
1070 default:
1071 MISSING_CASE(port);
1072 return DP_AUX_CH_CTL(PORT_A);
1073 }
1074 }
1075
1076 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1077 enum port port, int index)
1078 {
1079 switch (port) {
1080 case PORT_A:
1081 return DP_AUX_CH_DATA(port, index);
1082 case PORT_B:
1083 case PORT_C:
1084 case PORT_D:
1085 return PCH_DP_AUX_CH_DATA(port, index);
1086 default:
1087 MISSING_CASE(port);
1088 return DP_AUX_CH_DATA(PORT_A, index);
1089 }
1090 }
1091
1092 /*
1093 * On SKL we don't have Aux for port E so we rely
1094 * on VBT to set a proper alternate aux channel.
1095 */
1096 static enum port skl_porte_aux_port(struct drm_i915_private *dev_priv)
1097 {
1098 const struct ddi_vbt_port_info *info =
1099 &dev_priv->vbt.ddi_port_info[PORT_E];
1100
1101 switch (info->alternate_aux_channel) {
1102 case DP_AUX_A:
1103 return PORT_A;
1104 case DP_AUX_B:
1105 return PORT_B;
1106 case DP_AUX_C:
1107 return PORT_C;
1108 case DP_AUX_D:
1109 return PORT_D;
1110 default:
1111 MISSING_CASE(info->alternate_aux_channel);
1112 return PORT_A;
1113 }
1114 }
1115
1116 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1117 enum port port)
1118 {
1119 if (port == PORT_E)
1120 port = skl_porte_aux_port(dev_priv);
1121
1122 switch (port) {
1123 case PORT_A:
1124 case PORT_B:
1125 case PORT_C:
1126 case PORT_D:
1127 return DP_AUX_CH_CTL(port);
1128 default:
1129 MISSING_CASE(port);
1130 return DP_AUX_CH_CTL(PORT_A);
1131 }
1132 }
1133
1134 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1135 enum port port, int index)
1136 {
1137 if (port == PORT_E)
1138 port = skl_porte_aux_port(dev_priv);
1139
1140 switch (port) {
1141 case PORT_A:
1142 case PORT_B:
1143 case PORT_C:
1144 case PORT_D:
1145 return DP_AUX_CH_DATA(port, index);
1146 default:
1147 MISSING_CASE(port);
1148 return DP_AUX_CH_DATA(PORT_A, index);
1149 }
1150 }
1151
1152 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1153 enum port port)
1154 {
1155 if (INTEL_INFO(dev_priv)->gen >= 9)
1156 return skl_aux_ctl_reg(dev_priv, port);
1157 else if (HAS_PCH_SPLIT(dev_priv))
1158 return ilk_aux_ctl_reg(dev_priv, port);
1159 else
1160 return g4x_aux_ctl_reg(dev_priv, port);
1161 }
1162
1163 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1164 enum port port, int index)
1165 {
1166 if (INTEL_INFO(dev_priv)->gen >= 9)
1167 return skl_aux_data_reg(dev_priv, port, index);
1168 else if (HAS_PCH_SPLIT(dev_priv))
1169 return ilk_aux_data_reg(dev_priv, port, index);
1170 else
1171 return g4x_aux_data_reg(dev_priv, port, index);
1172 }
1173
1174 static void intel_aux_reg_init(struct intel_dp *intel_dp)
1175 {
1176 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1177 enum port port = dp_to_dig_port(intel_dp)->port;
1178 int i;
1179
1180 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1181 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1182 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1183 }
1184
1185 static void
1186 intel_dp_aux_fini(struct intel_dp *intel_dp)
1187 {
1188 drm_dp_aux_unregister(&intel_dp->aux);
1189 kfree(intel_dp->aux.name);
1190 }
1191
1192 static int
1193 intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1194 {
1195 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1196 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1197 enum port port = intel_dig_port->port;
1198 int ret;
1199
1200 intel_aux_reg_init(intel_dp);
1201
1202 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1203 if (!intel_dp->aux.name)
1204 return -ENOMEM;
1205
1206 intel_dp->aux.dev = dev->dev;
1207 intel_dp->aux.transfer = intel_dp_aux_transfer;
1208
1209 DRM_DEBUG_KMS("registering %s bus for %s\n",
1210 intel_dp->aux.name,
1211 connector->base.kdev->kobj.name);
1212
1213 ret = drm_dp_aux_register(&intel_dp->aux);
1214 if (ret < 0) {
1215 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1216 intel_dp->aux.name, ret);
1217 kfree(intel_dp->aux.name);
1218 return ret;
1219 }
1220
1221 ret = sysfs_create_link(&connector->base.kdev->kobj,
1222 &intel_dp->aux.ddc.dev.kobj,
1223 intel_dp->aux.ddc.dev.kobj.name);
1224 if (ret < 0) {
1225 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1226 intel_dp->aux.name, ret);
1227 intel_dp_aux_fini(intel_dp);
1228 return ret;
1229 }
1230
1231 return 0;
1232 }
1233
1234 static void
1235 intel_dp_connector_unregister(struct intel_connector *intel_connector)
1236 {
1237 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1238
1239 if (!intel_connector->mst_port)
1240 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1241 intel_dp->aux.ddc.dev.kobj.name);
1242 intel_connector_unregister(intel_connector);
1243 }
1244
1245 static void
1246 skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
1247 {
1248 u32 ctrl1;
1249
1250 memset(&pipe_config->dpll_hw_state, 0,
1251 sizeof(pipe_config->dpll_hw_state));
1252
1253 pipe_config->ddi_pll_sel = SKL_DPLL0;
1254 pipe_config->dpll_hw_state.cfgcr1 = 0;
1255 pipe_config->dpll_hw_state.cfgcr2 = 0;
1256
1257 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1258 switch (pipe_config->port_clock / 2) {
1259 case 81000:
1260 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
1261 SKL_DPLL0);
1262 break;
1263 case 135000:
1264 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
1265 SKL_DPLL0);
1266 break;
1267 case 270000:
1268 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
1269 SKL_DPLL0);
1270 break;
1271 case 162000:
1272 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
1273 SKL_DPLL0);
1274 break;
1275 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1276 results in CDCLK change. Need to handle the change of CDCLK by
1277 disabling pipes and re-enabling them */
1278 case 108000:
1279 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
1280 SKL_DPLL0);
1281 break;
1282 case 216000:
1283 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
1284 SKL_DPLL0);
1285 break;
1286
1287 }
1288 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1289 }
1290
1291 void
1292 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
1293 {
1294 memset(&pipe_config->dpll_hw_state, 0,
1295 sizeof(pipe_config->dpll_hw_state));
1296
1297 switch (pipe_config->port_clock / 2) {
1298 case 81000:
1299 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1300 break;
1301 case 135000:
1302 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1303 break;
1304 case 270000:
1305 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1306 break;
1307 }
1308 }
1309
1310 static int
1311 intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
1312 {
1313 if (intel_dp->num_sink_rates) {
1314 *sink_rates = intel_dp->sink_rates;
1315 return intel_dp->num_sink_rates;
1316 }
1317
1318 *sink_rates = default_rates;
1319
1320 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
1321 }
1322
1323 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1324 {
1325 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1326 struct drm_device *dev = dig_port->base.base.dev;
1327
1328 /* WaDisableHBR2:skl */
1329 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0))
1330 return false;
1331
1332 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1333 (INTEL_INFO(dev)->gen >= 9))
1334 return true;
1335 else
1336 return false;
1337 }
1338
1339 static int
1340 intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
1341 {
1342 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1343 struct drm_device *dev = dig_port->base.base.dev;
1344 int size;
1345
1346 if (IS_BROXTON(dev)) {
1347 *source_rates = bxt_rates;
1348 size = ARRAY_SIZE(bxt_rates);
1349 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
1350 *source_rates = skl_rates;
1351 size = ARRAY_SIZE(skl_rates);
1352 } else {
1353 *source_rates = default_rates;
1354 size = ARRAY_SIZE(default_rates);
1355 }
1356
1357 /* This depends on the fact that 5.4 is last value in the array */
1358 if (!intel_dp_source_supports_hbr2(intel_dp))
1359 size--;
1360
1361 return size;
1362 }
1363
1364 static void
1365 intel_dp_set_clock(struct intel_encoder *encoder,
1366 struct intel_crtc_state *pipe_config)
1367 {
1368 struct drm_device *dev = encoder->base.dev;
1369 const struct dp_link_dpll *divisor = NULL;
1370 int i, count = 0;
1371
1372 if (IS_G4X(dev)) {
1373 divisor = gen4_dpll;
1374 count = ARRAY_SIZE(gen4_dpll);
1375 } else if (HAS_PCH_SPLIT(dev)) {
1376 divisor = pch_dpll;
1377 count = ARRAY_SIZE(pch_dpll);
1378 } else if (IS_CHERRYVIEW(dev)) {
1379 divisor = chv_dpll;
1380 count = ARRAY_SIZE(chv_dpll);
1381 } else if (IS_VALLEYVIEW(dev)) {
1382 divisor = vlv_dpll;
1383 count = ARRAY_SIZE(vlv_dpll);
1384 }
1385
1386 if (divisor && count) {
1387 for (i = 0; i < count; i++) {
1388 if (pipe_config->port_clock == divisor[i].clock) {
1389 pipe_config->dpll = divisor[i].dpll;
1390 pipe_config->clock_set = true;
1391 break;
1392 }
1393 }
1394 }
1395 }
1396
1397 static int intersect_rates(const int *source_rates, int source_len,
1398 const int *sink_rates, int sink_len,
1399 int *common_rates)
1400 {
1401 int i = 0, j = 0, k = 0;
1402
1403 while (i < source_len && j < sink_len) {
1404 if (source_rates[i] == sink_rates[j]) {
1405 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1406 return k;
1407 common_rates[k] = source_rates[i];
1408 ++k;
1409 ++i;
1410 ++j;
1411 } else if (source_rates[i] < sink_rates[j]) {
1412 ++i;
1413 } else {
1414 ++j;
1415 }
1416 }
1417 return k;
1418 }
1419
1420 static int intel_dp_common_rates(struct intel_dp *intel_dp,
1421 int *common_rates)
1422 {
1423 const int *source_rates, *sink_rates;
1424 int source_len, sink_len;
1425
1426 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1427 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1428
1429 return intersect_rates(source_rates, source_len,
1430 sink_rates, sink_len,
1431 common_rates);
1432 }
1433
1434 static void snprintf_int_array(char *str, size_t len,
1435 const int *array, int nelem)
1436 {
1437 int i;
1438
1439 str[0] = '\0';
1440
1441 for (i = 0; i < nelem; i++) {
1442 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1443 if (r >= len)
1444 return;
1445 str += r;
1446 len -= r;
1447 }
1448 }
1449
1450 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1451 {
1452 const int *source_rates, *sink_rates;
1453 int source_len, sink_len, common_len;
1454 int common_rates[DP_MAX_SUPPORTED_RATES];
1455 char str[128]; /* FIXME: too big for stack? */
1456
1457 if ((drm_debug & DRM_UT_KMS) == 0)
1458 return;
1459
1460 source_len = intel_dp_source_rates(intel_dp, &source_rates);
1461 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1462 DRM_DEBUG_KMS("source rates: %s\n", str);
1463
1464 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1465 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1466 DRM_DEBUG_KMS("sink rates: %s\n", str);
1467
1468 common_len = intel_dp_common_rates(intel_dp, common_rates);
1469 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1470 DRM_DEBUG_KMS("common rates: %s\n", str);
1471 }
1472
1473 static int rate_to_index(int find, const int *rates)
1474 {
1475 int i = 0;
1476
1477 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1478 if (find == rates[i])
1479 break;
1480
1481 return i;
1482 }
1483
1484 int
1485 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1486 {
1487 int rates[DP_MAX_SUPPORTED_RATES] = {};
1488 int len;
1489
1490 len = intel_dp_common_rates(intel_dp, rates);
1491 if (WARN_ON(len <= 0))
1492 return 162000;
1493
1494 return rates[rate_to_index(0, rates) - 1];
1495 }
1496
1497 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1498 {
1499 return rate_to_index(rate, intel_dp->sink_rates);
1500 }
1501
1502 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1503 uint8_t *link_bw, uint8_t *rate_select)
1504 {
1505 if (intel_dp->num_sink_rates) {
1506 *link_bw = 0;
1507 *rate_select =
1508 intel_dp_rate_select(intel_dp, port_clock);
1509 } else {
1510 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1511 *rate_select = 0;
1512 }
1513 }
1514
1515 bool
1516 intel_dp_compute_config(struct intel_encoder *encoder,
1517 struct intel_crtc_state *pipe_config)
1518 {
1519 struct drm_device *dev = encoder->base.dev;
1520 struct drm_i915_private *dev_priv = dev->dev_private;
1521 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1522 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1523 enum port port = dp_to_dig_port(intel_dp)->port;
1524 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1525 struct intel_connector *intel_connector = intel_dp->attached_connector;
1526 int lane_count, clock;
1527 int min_lane_count = 1;
1528 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1529 /* Conveniently, the link BW constants become indices with a shift...*/
1530 int min_clock = 0;
1531 int max_clock;
1532 int bpp, mode_rate;
1533 int link_avail, link_clock;
1534 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1535 int common_len;
1536 uint8_t link_bw, rate_select;
1537
1538 common_len = intel_dp_common_rates(intel_dp, common_rates);
1539
1540 /* No common link rates between source and sink */
1541 WARN_ON(common_len <= 0);
1542
1543 max_clock = common_len - 1;
1544
1545 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1546 pipe_config->has_pch_encoder = true;
1547
1548 pipe_config->has_dp_encoder = true;
1549 pipe_config->has_drrs = false;
1550 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1551
1552 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1553 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1554 adjusted_mode);
1555
1556 if (INTEL_INFO(dev)->gen >= 9) {
1557 int ret;
1558 ret = skl_update_scaler_crtc(pipe_config);
1559 if (ret)
1560 return ret;
1561 }
1562
1563 if (HAS_GMCH_DISPLAY(dev))
1564 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1565 intel_connector->panel.fitting_mode);
1566 else
1567 intel_pch_panel_fitting(intel_crtc, pipe_config,
1568 intel_connector->panel.fitting_mode);
1569 }
1570
1571 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1572 return false;
1573
1574 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1575 "max bw %d pixel clock %iKHz\n",
1576 max_lane_count, common_rates[max_clock],
1577 adjusted_mode->crtc_clock);
1578
1579 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1580 * bpc in between. */
1581 bpp = pipe_config->pipe_bpp;
1582 if (is_edp(intel_dp)) {
1583
1584 /* Get bpp from vbt only for panels that dont have bpp in edid */
1585 if (intel_connector->base.display_info.bpc == 0 &&
1586 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
1587 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1588 dev_priv->vbt.edp_bpp);
1589 bpp = dev_priv->vbt.edp_bpp;
1590 }
1591
1592 /*
1593 * Use the maximum clock and number of lanes the eDP panel
1594 * advertizes being capable of. The panels are generally
1595 * designed to support only a single clock and lane
1596 * configuration, and typically these values correspond to the
1597 * native resolution of the panel.
1598 */
1599 min_lane_count = max_lane_count;
1600 min_clock = max_clock;
1601 }
1602
1603 for (; bpp >= 6*3; bpp -= 2*3) {
1604 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1605 bpp);
1606
1607 for (clock = min_clock; clock <= max_clock; clock++) {
1608 for (lane_count = min_lane_count;
1609 lane_count <= max_lane_count;
1610 lane_count <<= 1) {
1611
1612 link_clock = common_rates[clock];
1613 link_avail = intel_dp_max_data_rate(link_clock,
1614 lane_count);
1615
1616 if (mode_rate <= link_avail) {
1617 goto found;
1618 }
1619 }
1620 }
1621 }
1622
1623 return false;
1624
1625 found:
1626 if (intel_dp->color_range_auto) {
1627 /*
1628 * See:
1629 * CEA-861-E - 5.1 Default Encoding Parameters
1630 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1631 */
1632 pipe_config->limited_color_range =
1633 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1634 } else {
1635 pipe_config->limited_color_range =
1636 intel_dp->limited_color_range;
1637 }
1638
1639 pipe_config->lane_count = lane_count;
1640
1641 pipe_config->pipe_bpp = bpp;
1642 pipe_config->port_clock = common_rates[clock];
1643
1644 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1645 &link_bw, &rate_select);
1646
1647 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1648 link_bw, rate_select, pipe_config->lane_count,
1649 pipe_config->port_clock, bpp);
1650 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1651 mode_rate, link_avail);
1652
1653 intel_link_compute_m_n(bpp, lane_count,
1654 adjusted_mode->crtc_clock,
1655 pipe_config->port_clock,
1656 &pipe_config->dp_m_n);
1657
1658 if (intel_connector->panel.downclock_mode != NULL &&
1659 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1660 pipe_config->has_drrs = true;
1661 intel_link_compute_m_n(bpp, lane_count,
1662 intel_connector->panel.downclock_mode->clock,
1663 pipe_config->port_clock,
1664 &pipe_config->dp_m2_n2);
1665 }
1666
1667 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp))
1668 skl_edp_set_pll_config(pipe_config);
1669 else if (IS_BROXTON(dev))
1670 /* handled in ddi */;
1671 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1672 hsw_dp_set_ddi_pll_sel(pipe_config);
1673 else
1674 intel_dp_set_clock(encoder, pipe_config);
1675
1676 return true;
1677 }
1678
1679 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1680 const struct intel_crtc_state *pipe_config)
1681 {
1682 intel_dp->link_rate = pipe_config->port_clock;
1683 intel_dp->lane_count = pipe_config->lane_count;
1684 }
1685
1686 static void intel_dp_prepare(struct intel_encoder *encoder)
1687 {
1688 struct drm_device *dev = encoder->base.dev;
1689 struct drm_i915_private *dev_priv = dev->dev_private;
1690 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1691 enum port port = dp_to_dig_port(intel_dp)->port;
1692 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1693 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1694
1695 intel_dp_set_link_params(intel_dp, crtc->config);
1696
1697 /*
1698 * There are four kinds of DP registers:
1699 *
1700 * IBX PCH
1701 * SNB CPU
1702 * IVB CPU
1703 * CPT PCH
1704 *
1705 * IBX PCH and CPU are the same for almost everything,
1706 * except that the CPU DP PLL is configured in this
1707 * register
1708 *
1709 * CPT PCH is quite different, having many bits moved
1710 * to the TRANS_DP_CTL register instead. That
1711 * configuration happens (oddly) in ironlake_pch_enable
1712 */
1713
1714 /* Preserve the BIOS-computed detected bit. This is
1715 * supposed to be read-only.
1716 */
1717 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1718
1719 /* Handle DP bits in common between all three register formats */
1720 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1721 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
1722
1723 /* Split out the IBX/CPU vs CPT settings */
1724
1725 if (IS_GEN7(dev) && port == PORT_A) {
1726 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1727 intel_dp->DP |= DP_SYNC_HS_HIGH;
1728 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1729 intel_dp->DP |= DP_SYNC_VS_HIGH;
1730 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1731
1732 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1733 intel_dp->DP |= DP_ENHANCED_FRAMING;
1734
1735 intel_dp->DP |= crtc->pipe << 29;
1736 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
1737 u32 trans_dp;
1738
1739 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1740
1741 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1742 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1743 trans_dp |= TRANS_DP_ENH_FRAMING;
1744 else
1745 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1746 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1747 } else {
1748 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1749 !IS_CHERRYVIEW(dev) && crtc->config->limited_color_range)
1750 intel_dp->DP |= DP_COLOR_RANGE_16_235;
1751
1752 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1753 intel_dp->DP |= DP_SYNC_HS_HIGH;
1754 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1755 intel_dp->DP |= DP_SYNC_VS_HIGH;
1756 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1757
1758 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1759 intel_dp->DP |= DP_ENHANCED_FRAMING;
1760
1761 if (IS_CHERRYVIEW(dev))
1762 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1763 else if (crtc->pipe == PIPE_B)
1764 intel_dp->DP |= DP_PIPEB_SELECT;
1765 }
1766 }
1767
1768 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1769 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1770
1771 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1772 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1773
1774 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1775 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1776
1777 static void wait_panel_status(struct intel_dp *intel_dp,
1778 u32 mask,
1779 u32 value)
1780 {
1781 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1782 struct drm_i915_private *dev_priv = dev->dev_private;
1783 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1784
1785 lockdep_assert_held(&dev_priv->pps_mutex);
1786
1787 pp_stat_reg = _pp_stat_reg(intel_dp);
1788 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1789
1790 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1791 mask, value,
1792 I915_READ(pp_stat_reg),
1793 I915_READ(pp_ctrl_reg));
1794
1795 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
1796 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1797 I915_READ(pp_stat_reg),
1798 I915_READ(pp_ctrl_reg));
1799 }
1800
1801 DRM_DEBUG_KMS("Wait complete\n");
1802 }
1803
1804 static void wait_panel_on(struct intel_dp *intel_dp)
1805 {
1806 DRM_DEBUG_KMS("Wait for panel power on\n");
1807 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1808 }
1809
1810 static void wait_panel_off(struct intel_dp *intel_dp)
1811 {
1812 DRM_DEBUG_KMS("Wait for panel power off time\n");
1813 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1814 }
1815
1816 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1817 {
1818 ktime_t panel_power_on_time;
1819 s64 panel_power_off_duration;
1820
1821 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1822
1823 /* take the difference of currrent time and panel power off time
1824 * and then make panel wait for t11_t12 if needed. */
1825 panel_power_on_time = ktime_get_boottime();
1826 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1827
1828 /* When we disable the VDD override bit last we have to do the manual
1829 * wait. */
1830 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1831 wait_remaining_ms_from_jiffies(jiffies,
1832 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
1833
1834 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1835 }
1836
1837 static void wait_backlight_on(struct intel_dp *intel_dp)
1838 {
1839 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1840 intel_dp->backlight_on_delay);
1841 }
1842
1843 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1844 {
1845 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1846 intel_dp->backlight_off_delay);
1847 }
1848
1849 /* Read the current pp_control value, unlocking the register if it
1850 * is locked
1851 */
1852
1853 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1854 {
1855 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1856 struct drm_i915_private *dev_priv = dev->dev_private;
1857 u32 control;
1858
1859 lockdep_assert_held(&dev_priv->pps_mutex);
1860
1861 control = I915_READ(_pp_ctrl_reg(intel_dp));
1862 if (!IS_BROXTON(dev)) {
1863 control &= ~PANEL_UNLOCK_MASK;
1864 control |= PANEL_UNLOCK_REGS;
1865 }
1866 return control;
1867 }
1868
1869 /*
1870 * Must be paired with edp_panel_vdd_off().
1871 * Must hold pps_mutex around the whole on/off sequence.
1872 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1873 */
1874 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1875 {
1876 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1877 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1878 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1879 struct drm_i915_private *dev_priv = dev->dev_private;
1880 enum intel_display_power_domain power_domain;
1881 u32 pp;
1882 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1883 bool need_to_disable = !intel_dp->want_panel_vdd;
1884
1885 lockdep_assert_held(&dev_priv->pps_mutex);
1886
1887 if (!is_edp(intel_dp))
1888 return false;
1889
1890 cancel_delayed_work(&intel_dp->panel_vdd_work);
1891 intel_dp->want_panel_vdd = true;
1892
1893 if (edp_have_panel_vdd(intel_dp))
1894 return need_to_disable;
1895
1896 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1897 intel_display_power_get(dev_priv, power_domain);
1898
1899 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1900 port_name(intel_dig_port->port));
1901
1902 if (!edp_have_panel_power(intel_dp))
1903 wait_panel_power_cycle(intel_dp);
1904
1905 pp = ironlake_get_pp_control(intel_dp);
1906 pp |= EDP_FORCE_VDD;
1907
1908 pp_stat_reg = _pp_stat_reg(intel_dp);
1909 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1910
1911 I915_WRITE(pp_ctrl_reg, pp);
1912 POSTING_READ(pp_ctrl_reg);
1913 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1914 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1915 /*
1916 * If the panel wasn't on, delay before accessing aux channel
1917 */
1918 if (!edp_have_panel_power(intel_dp)) {
1919 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1920 port_name(intel_dig_port->port));
1921 msleep(intel_dp->panel_power_up_delay);
1922 }
1923
1924 return need_to_disable;
1925 }
1926
1927 /*
1928 * Must be paired with intel_edp_panel_vdd_off() or
1929 * intel_edp_panel_off().
1930 * Nested calls to these functions are not allowed since
1931 * we drop the lock. Caller must use some higher level
1932 * locking to prevent nested calls from other threads.
1933 */
1934 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1935 {
1936 bool vdd;
1937
1938 if (!is_edp(intel_dp))
1939 return;
1940
1941 pps_lock(intel_dp);
1942 vdd = edp_panel_vdd_on(intel_dp);
1943 pps_unlock(intel_dp);
1944
1945 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
1946 port_name(dp_to_dig_port(intel_dp)->port));
1947 }
1948
1949 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1950 {
1951 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1952 struct drm_i915_private *dev_priv = dev->dev_private;
1953 struct intel_digital_port *intel_dig_port =
1954 dp_to_dig_port(intel_dp);
1955 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1956 enum intel_display_power_domain power_domain;
1957 u32 pp;
1958 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1959
1960 lockdep_assert_held(&dev_priv->pps_mutex);
1961
1962 WARN_ON(intel_dp->want_panel_vdd);
1963
1964 if (!edp_have_panel_vdd(intel_dp))
1965 return;
1966
1967 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1968 port_name(intel_dig_port->port));
1969
1970 pp = ironlake_get_pp_control(intel_dp);
1971 pp &= ~EDP_FORCE_VDD;
1972
1973 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1974 pp_stat_reg = _pp_stat_reg(intel_dp);
1975
1976 I915_WRITE(pp_ctrl_reg, pp);
1977 POSTING_READ(pp_ctrl_reg);
1978
1979 /* Make sure sequencer is idle before allowing subsequent activity */
1980 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1981 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1982
1983 if ((pp & POWER_TARGET_ON) == 0)
1984 intel_dp->panel_power_off_time = ktime_get_boottime();
1985
1986 power_domain = intel_display_port_aux_power_domain(intel_encoder);
1987 intel_display_power_put(dev_priv, power_domain);
1988 }
1989
1990 static void edp_panel_vdd_work(struct work_struct *__work)
1991 {
1992 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1993 struct intel_dp, panel_vdd_work);
1994
1995 pps_lock(intel_dp);
1996 if (!intel_dp->want_panel_vdd)
1997 edp_panel_vdd_off_sync(intel_dp);
1998 pps_unlock(intel_dp);
1999 }
2000
2001 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2002 {
2003 unsigned long delay;
2004
2005 /*
2006 * Queue the timer to fire a long time from now (relative to the power
2007 * down delay) to keep the panel power up across a sequence of
2008 * operations.
2009 */
2010 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2011 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2012 }
2013
2014 /*
2015 * Must be paired with edp_panel_vdd_on().
2016 * Must hold pps_mutex around the whole on/off sequence.
2017 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2018 */
2019 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2020 {
2021 struct drm_i915_private *dev_priv =
2022 intel_dp_to_dev(intel_dp)->dev_private;
2023
2024 lockdep_assert_held(&dev_priv->pps_mutex);
2025
2026 if (!is_edp(intel_dp))
2027 return;
2028
2029 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2030 port_name(dp_to_dig_port(intel_dp)->port));
2031
2032 intel_dp->want_panel_vdd = false;
2033
2034 if (sync)
2035 edp_panel_vdd_off_sync(intel_dp);
2036 else
2037 edp_panel_vdd_schedule_off(intel_dp);
2038 }
2039
2040 static void edp_panel_on(struct intel_dp *intel_dp)
2041 {
2042 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2043 struct drm_i915_private *dev_priv = dev->dev_private;
2044 u32 pp;
2045 i915_reg_t pp_ctrl_reg;
2046
2047 lockdep_assert_held(&dev_priv->pps_mutex);
2048
2049 if (!is_edp(intel_dp))
2050 return;
2051
2052 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2053 port_name(dp_to_dig_port(intel_dp)->port));
2054
2055 if (WARN(edp_have_panel_power(intel_dp),
2056 "eDP port %c panel power already on\n",
2057 port_name(dp_to_dig_port(intel_dp)->port)))
2058 return;
2059
2060 wait_panel_power_cycle(intel_dp);
2061
2062 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2063 pp = ironlake_get_pp_control(intel_dp);
2064 if (IS_GEN5(dev)) {
2065 /* ILK workaround: disable reset around power sequence */
2066 pp &= ~PANEL_POWER_RESET;
2067 I915_WRITE(pp_ctrl_reg, pp);
2068 POSTING_READ(pp_ctrl_reg);
2069 }
2070
2071 pp |= POWER_TARGET_ON;
2072 if (!IS_GEN5(dev))
2073 pp |= PANEL_POWER_RESET;
2074
2075 I915_WRITE(pp_ctrl_reg, pp);
2076 POSTING_READ(pp_ctrl_reg);
2077
2078 wait_panel_on(intel_dp);
2079 intel_dp->last_power_on = jiffies;
2080
2081 if (IS_GEN5(dev)) {
2082 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2083 I915_WRITE(pp_ctrl_reg, pp);
2084 POSTING_READ(pp_ctrl_reg);
2085 }
2086 }
2087
2088 void intel_edp_panel_on(struct intel_dp *intel_dp)
2089 {
2090 if (!is_edp(intel_dp))
2091 return;
2092
2093 pps_lock(intel_dp);
2094 edp_panel_on(intel_dp);
2095 pps_unlock(intel_dp);
2096 }
2097
2098
2099 static void edp_panel_off(struct intel_dp *intel_dp)
2100 {
2101 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2102 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2103 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2104 struct drm_i915_private *dev_priv = dev->dev_private;
2105 enum intel_display_power_domain power_domain;
2106 u32 pp;
2107 i915_reg_t pp_ctrl_reg;
2108
2109 lockdep_assert_held(&dev_priv->pps_mutex);
2110
2111 if (!is_edp(intel_dp))
2112 return;
2113
2114 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2115 port_name(dp_to_dig_port(intel_dp)->port));
2116
2117 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2118 port_name(dp_to_dig_port(intel_dp)->port));
2119
2120 pp = ironlake_get_pp_control(intel_dp);
2121 /* We need to switch off panel power _and_ force vdd, for otherwise some
2122 * panels get very unhappy and cease to work. */
2123 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2124 EDP_BLC_ENABLE);
2125
2126 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2127
2128 intel_dp->want_panel_vdd = false;
2129
2130 I915_WRITE(pp_ctrl_reg, pp);
2131 POSTING_READ(pp_ctrl_reg);
2132
2133 intel_dp->panel_power_off_time = ktime_get_boottime();
2134 wait_panel_off(intel_dp);
2135
2136 /* We got a reference when we enabled the VDD. */
2137 power_domain = intel_display_port_aux_power_domain(intel_encoder);
2138 intel_display_power_put(dev_priv, power_domain);
2139 }
2140
2141 void intel_edp_panel_off(struct intel_dp *intel_dp)
2142 {
2143 if (!is_edp(intel_dp))
2144 return;
2145
2146 pps_lock(intel_dp);
2147 edp_panel_off(intel_dp);
2148 pps_unlock(intel_dp);
2149 }
2150
2151 /* Enable backlight in the panel power control. */
2152 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2153 {
2154 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2155 struct drm_device *dev = intel_dig_port->base.base.dev;
2156 struct drm_i915_private *dev_priv = dev->dev_private;
2157 u32 pp;
2158 i915_reg_t pp_ctrl_reg;
2159
2160 /*
2161 * If we enable the backlight right away following a panel power
2162 * on, we may see slight flicker as the panel syncs with the eDP
2163 * link. So delay a bit to make sure the image is solid before
2164 * allowing it to appear.
2165 */
2166 wait_backlight_on(intel_dp);
2167
2168 pps_lock(intel_dp);
2169
2170 pp = ironlake_get_pp_control(intel_dp);
2171 pp |= EDP_BLC_ENABLE;
2172
2173 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2174
2175 I915_WRITE(pp_ctrl_reg, pp);
2176 POSTING_READ(pp_ctrl_reg);
2177
2178 pps_unlock(intel_dp);
2179 }
2180
2181 /* Enable backlight PWM and backlight PP control. */
2182 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2183 {
2184 if (!is_edp(intel_dp))
2185 return;
2186
2187 DRM_DEBUG_KMS("\n");
2188
2189 intel_panel_enable_backlight(intel_dp->attached_connector);
2190 _intel_edp_backlight_on(intel_dp);
2191 }
2192
2193 /* Disable backlight in the panel power control. */
2194 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2195 {
2196 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2197 struct drm_i915_private *dev_priv = dev->dev_private;
2198 u32 pp;
2199 i915_reg_t pp_ctrl_reg;
2200
2201 if (!is_edp(intel_dp))
2202 return;
2203
2204 pps_lock(intel_dp);
2205
2206 pp = ironlake_get_pp_control(intel_dp);
2207 pp &= ~EDP_BLC_ENABLE;
2208
2209 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2210
2211 I915_WRITE(pp_ctrl_reg, pp);
2212 POSTING_READ(pp_ctrl_reg);
2213
2214 pps_unlock(intel_dp);
2215
2216 intel_dp->last_backlight_off = jiffies;
2217 edp_wait_backlight_off(intel_dp);
2218 }
2219
2220 /* Disable backlight PP control and backlight PWM. */
2221 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2222 {
2223 if (!is_edp(intel_dp))
2224 return;
2225
2226 DRM_DEBUG_KMS("\n");
2227
2228 _intel_edp_backlight_off(intel_dp);
2229 intel_panel_disable_backlight(intel_dp->attached_connector);
2230 }
2231
2232 /*
2233 * Hook for controlling the panel power control backlight through the bl_power
2234 * sysfs attribute. Take care to handle multiple calls.
2235 */
2236 static void intel_edp_backlight_power(struct intel_connector *connector,
2237 bool enable)
2238 {
2239 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2240 bool is_enabled;
2241
2242 pps_lock(intel_dp);
2243 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2244 pps_unlock(intel_dp);
2245
2246 if (is_enabled == enable)
2247 return;
2248
2249 DRM_DEBUG_KMS("panel power control backlight %s\n",
2250 enable ? "enable" : "disable");
2251
2252 if (enable)
2253 _intel_edp_backlight_on(intel_dp);
2254 else
2255 _intel_edp_backlight_off(intel_dp);
2256 }
2257
2258 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2259 {
2260 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2261 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2262 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2263
2264 I915_STATE_WARN(cur_state != state,
2265 "DP port %c state assertion failure (expected %s, current %s)\n",
2266 port_name(dig_port->port),
2267 onoff(state), onoff(cur_state));
2268 }
2269 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2270
2271 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2272 {
2273 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2274
2275 I915_STATE_WARN(cur_state != state,
2276 "eDP PLL state assertion failure (expected %s, current %s)\n",
2277 onoff(state), onoff(cur_state));
2278 }
2279 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2280 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2281
2282 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2283 {
2284 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2285 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2286 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2287
2288 assert_pipe_disabled(dev_priv, crtc->pipe);
2289 assert_dp_port_disabled(intel_dp);
2290 assert_edp_pll_disabled(dev_priv);
2291
2292 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2293 crtc->config->port_clock);
2294
2295 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2296
2297 if (crtc->config->port_clock == 162000)
2298 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2299 else
2300 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2301
2302 I915_WRITE(DP_A, intel_dp->DP);
2303 POSTING_READ(DP_A);
2304 udelay(500);
2305
2306 intel_dp->DP |= DP_PLL_ENABLE;
2307
2308 I915_WRITE(DP_A, intel_dp->DP);
2309 POSTING_READ(DP_A);
2310 udelay(200);
2311 }
2312
2313 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2314 {
2315 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2316 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2317 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2318
2319 assert_pipe_disabled(dev_priv, crtc->pipe);
2320 assert_dp_port_disabled(intel_dp);
2321 assert_edp_pll_enabled(dev_priv);
2322
2323 DRM_DEBUG_KMS("disabling eDP PLL\n");
2324
2325 intel_dp->DP &= ~DP_PLL_ENABLE;
2326
2327 I915_WRITE(DP_A, intel_dp->DP);
2328 POSTING_READ(DP_A);
2329 udelay(200);
2330 }
2331
2332 /* If the sink supports it, try to set the power state appropriately */
2333 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2334 {
2335 int ret, i;
2336
2337 /* Should have a valid DPCD by this point */
2338 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2339 return;
2340
2341 if (mode != DRM_MODE_DPMS_ON) {
2342 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2343 DP_SET_POWER_D3);
2344 } else {
2345 /*
2346 * When turning on, we need to retry for 1ms to give the sink
2347 * time to wake up.
2348 */
2349 for (i = 0; i < 3; i++) {
2350 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2351 DP_SET_POWER_D0);
2352 if (ret == 1)
2353 break;
2354 msleep(1);
2355 }
2356 }
2357
2358 if (ret != 1)
2359 DRM_DEBUG_KMS("failed to %s sink power state\n",
2360 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2361 }
2362
2363 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2364 enum pipe *pipe)
2365 {
2366 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2367 enum port port = dp_to_dig_port(intel_dp)->port;
2368 struct drm_device *dev = encoder->base.dev;
2369 struct drm_i915_private *dev_priv = dev->dev_private;
2370 enum intel_display_power_domain power_domain;
2371 u32 tmp;
2372
2373 power_domain = intel_display_port_power_domain(encoder);
2374 if (!intel_display_power_is_enabled(dev_priv, power_domain))
2375 return false;
2376
2377 tmp = I915_READ(intel_dp->output_reg);
2378
2379 if (!(tmp & DP_PORT_EN))
2380 return false;
2381
2382 if (IS_GEN7(dev) && port == PORT_A) {
2383 *pipe = PORT_TO_PIPE_CPT(tmp);
2384 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
2385 enum pipe p;
2386
2387 for_each_pipe(dev_priv, p) {
2388 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2389 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2390 *pipe = p;
2391 return true;
2392 }
2393 }
2394
2395 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2396 i915_mmio_reg_offset(intel_dp->output_reg));
2397 } else if (IS_CHERRYVIEW(dev)) {
2398 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2399 } else {
2400 *pipe = PORT_TO_PIPE(tmp);
2401 }
2402
2403 return true;
2404 }
2405
2406 static void intel_dp_get_config(struct intel_encoder *encoder,
2407 struct intel_crtc_state *pipe_config)
2408 {
2409 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2410 u32 tmp, flags = 0;
2411 struct drm_device *dev = encoder->base.dev;
2412 struct drm_i915_private *dev_priv = dev->dev_private;
2413 enum port port = dp_to_dig_port(intel_dp)->port;
2414 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2415 int dotclock;
2416
2417 tmp = I915_READ(intel_dp->output_reg);
2418
2419 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2420
2421 if (HAS_PCH_CPT(dev) && port != PORT_A) {
2422 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2423
2424 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2425 flags |= DRM_MODE_FLAG_PHSYNC;
2426 else
2427 flags |= DRM_MODE_FLAG_NHSYNC;
2428
2429 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2430 flags |= DRM_MODE_FLAG_PVSYNC;
2431 else
2432 flags |= DRM_MODE_FLAG_NVSYNC;
2433 } else {
2434 if (tmp & DP_SYNC_HS_HIGH)
2435 flags |= DRM_MODE_FLAG_PHSYNC;
2436 else
2437 flags |= DRM_MODE_FLAG_NHSYNC;
2438
2439 if (tmp & DP_SYNC_VS_HIGH)
2440 flags |= DRM_MODE_FLAG_PVSYNC;
2441 else
2442 flags |= DRM_MODE_FLAG_NVSYNC;
2443 }
2444
2445 pipe_config->base.adjusted_mode.flags |= flags;
2446
2447 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2448 !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
2449 pipe_config->limited_color_range = true;
2450
2451 pipe_config->has_dp_encoder = true;
2452
2453 pipe_config->lane_count =
2454 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2455
2456 intel_dp_get_m_n(crtc, pipe_config);
2457
2458 if (port == PORT_A) {
2459 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2460 pipe_config->port_clock = 162000;
2461 else
2462 pipe_config->port_clock = 270000;
2463 }
2464
2465 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2466 &pipe_config->dp_m_n);
2467
2468 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2469 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2470
2471 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
2472
2473 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2474 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2475 /*
2476 * This is a big fat ugly hack.
2477 *
2478 * Some machines in UEFI boot mode provide us a VBT that has 18
2479 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2480 * unknown we fail to light up. Yet the same BIOS boots up with
2481 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2482 * max, not what it tells us to use.
2483 *
2484 * Note: This will still be broken if the eDP panel is not lit
2485 * up by the BIOS, and thus we can't get the mode at module
2486 * load.
2487 */
2488 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2489 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2490 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2491 }
2492 }
2493
2494 static void intel_disable_dp(struct intel_encoder *encoder)
2495 {
2496 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2497 struct drm_device *dev = encoder->base.dev;
2498 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2499
2500 if (crtc->config->has_audio)
2501 intel_audio_codec_disable(encoder);
2502
2503 if (HAS_PSR(dev) && !HAS_DDI(dev))
2504 intel_psr_disable(intel_dp);
2505
2506 /* Make sure the panel is off before trying to change the mode. But also
2507 * ensure that we have vdd while we switch off the panel. */
2508 intel_edp_panel_vdd_on(intel_dp);
2509 intel_edp_backlight_off(intel_dp);
2510 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2511 intel_edp_panel_off(intel_dp);
2512
2513 /* disable the port before the pipe on g4x */
2514 if (INTEL_INFO(dev)->gen < 5)
2515 intel_dp_link_down(intel_dp);
2516 }
2517
2518 static void ilk_post_disable_dp(struct intel_encoder *encoder)
2519 {
2520 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2521 enum port port = dp_to_dig_port(intel_dp)->port;
2522
2523 intel_dp_link_down(intel_dp);
2524
2525 /* Only ilk+ has port A */
2526 if (port == PORT_A)
2527 ironlake_edp_pll_off(intel_dp);
2528 }
2529
2530 static void vlv_post_disable_dp(struct intel_encoder *encoder)
2531 {
2532 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2533
2534 intel_dp_link_down(intel_dp);
2535 }
2536
2537 static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2538 bool reset)
2539 {
2540 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2541 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2542 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2543 enum pipe pipe = crtc->pipe;
2544 uint32_t val;
2545
2546 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2547 if (reset)
2548 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2549 else
2550 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2551 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2552
2553 if (crtc->config->lane_count > 2) {
2554 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2555 if (reset)
2556 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2557 else
2558 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2559 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2560 }
2561
2562 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2563 val |= CHV_PCS_REQ_SOFTRESET_EN;
2564 if (reset)
2565 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2566 else
2567 val |= DPIO_PCS_CLK_SOFT_RESET;
2568 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2569
2570 if (crtc->config->lane_count > 2) {
2571 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2572 val |= CHV_PCS_REQ_SOFTRESET_EN;
2573 if (reset)
2574 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2575 else
2576 val |= DPIO_PCS_CLK_SOFT_RESET;
2577 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2578 }
2579 }
2580
2581 static void chv_post_disable_dp(struct intel_encoder *encoder)
2582 {
2583 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2584 struct drm_device *dev = encoder->base.dev;
2585 struct drm_i915_private *dev_priv = dev->dev_private;
2586
2587 intel_dp_link_down(intel_dp);
2588
2589 mutex_lock(&dev_priv->sb_lock);
2590
2591 /* Assert data lane reset */
2592 chv_data_lane_soft_reset(encoder, true);
2593
2594 mutex_unlock(&dev_priv->sb_lock);
2595 }
2596
2597 static void
2598 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2599 uint32_t *DP,
2600 uint8_t dp_train_pat)
2601 {
2602 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2603 struct drm_device *dev = intel_dig_port->base.base.dev;
2604 struct drm_i915_private *dev_priv = dev->dev_private;
2605 enum port port = intel_dig_port->port;
2606
2607 if (HAS_DDI(dev)) {
2608 uint32_t temp = I915_READ(DP_TP_CTL(port));
2609
2610 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2611 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2612 else
2613 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2614
2615 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2616 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2617 case DP_TRAINING_PATTERN_DISABLE:
2618 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2619
2620 break;
2621 case DP_TRAINING_PATTERN_1:
2622 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2623 break;
2624 case DP_TRAINING_PATTERN_2:
2625 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2626 break;
2627 case DP_TRAINING_PATTERN_3:
2628 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2629 break;
2630 }
2631 I915_WRITE(DP_TP_CTL(port), temp);
2632
2633 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2634 (HAS_PCH_CPT(dev) && port != PORT_A)) {
2635 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2636
2637 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2638 case DP_TRAINING_PATTERN_DISABLE:
2639 *DP |= DP_LINK_TRAIN_OFF_CPT;
2640 break;
2641 case DP_TRAINING_PATTERN_1:
2642 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2643 break;
2644 case DP_TRAINING_PATTERN_2:
2645 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2646 break;
2647 case DP_TRAINING_PATTERN_3:
2648 DRM_ERROR("DP training pattern 3 not supported\n");
2649 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2650 break;
2651 }
2652
2653 } else {
2654 if (IS_CHERRYVIEW(dev))
2655 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2656 else
2657 *DP &= ~DP_LINK_TRAIN_MASK;
2658
2659 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2660 case DP_TRAINING_PATTERN_DISABLE:
2661 *DP |= DP_LINK_TRAIN_OFF;
2662 break;
2663 case DP_TRAINING_PATTERN_1:
2664 *DP |= DP_LINK_TRAIN_PAT_1;
2665 break;
2666 case DP_TRAINING_PATTERN_2:
2667 *DP |= DP_LINK_TRAIN_PAT_2;
2668 break;
2669 case DP_TRAINING_PATTERN_3:
2670 if (IS_CHERRYVIEW(dev)) {
2671 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2672 } else {
2673 DRM_ERROR("DP training pattern 3 not supported\n");
2674 *DP |= DP_LINK_TRAIN_PAT_2;
2675 }
2676 break;
2677 }
2678 }
2679 }
2680
2681 static void intel_dp_enable_port(struct intel_dp *intel_dp)
2682 {
2683 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2684 struct drm_i915_private *dev_priv = dev->dev_private;
2685 struct intel_crtc *crtc =
2686 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2687
2688 /* enable with pattern 1 (as per spec) */
2689 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2690 DP_TRAINING_PATTERN_1);
2691
2692 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2693 POSTING_READ(intel_dp->output_reg);
2694
2695 /*
2696 * Magic for VLV/CHV. We _must_ first set up the register
2697 * without actually enabling the port, and then do another
2698 * write to enable the port. Otherwise link training will
2699 * fail when the power sequencer is freshly used for this port.
2700 */
2701 intel_dp->DP |= DP_PORT_EN;
2702 if (crtc->config->has_audio)
2703 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2704
2705 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2706 POSTING_READ(intel_dp->output_reg);
2707 }
2708
2709 static void intel_enable_dp(struct intel_encoder *encoder)
2710 {
2711 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2712 struct drm_device *dev = encoder->base.dev;
2713 struct drm_i915_private *dev_priv = dev->dev_private;
2714 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2715 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2716 enum port port = dp_to_dig_port(intel_dp)->port;
2717 enum pipe pipe = crtc->pipe;
2718
2719 if (WARN_ON(dp_reg & DP_PORT_EN))
2720 return;
2721
2722 pps_lock(intel_dp);
2723
2724 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
2725 vlv_init_panel_power_sequencer(intel_dp);
2726
2727 /*
2728 * We get an occasional spurious underrun between the port
2729 * enable and vdd enable, when enabling port A eDP.
2730 *
2731 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2732 */
2733 if (port == PORT_A)
2734 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
2735
2736 intel_dp_enable_port(intel_dp);
2737
2738 if (port == PORT_A && IS_GEN5(dev_priv)) {
2739 /*
2740 * Underrun reporting for the other pipe was disabled in
2741 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2742 * enabled, so it's now safe to re-enable underrun reporting.
2743 */
2744 intel_wait_for_vblank_if_active(dev_priv->dev, !pipe);
2745 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true);
2746 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true);
2747 }
2748
2749 edp_panel_vdd_on(intel_dp);
2750 edp_panel_on(intel_dp);
2751 edp_panel_vdd_off(intel_dp, true);
2752
2753 if (port == PORT_A)
2754 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
2755
2756 pps_unlock(intel_dp);
2757
2758 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
2759 unsigned int lane_mask = 0x0;
2760
2761 if (IS_CHERRYVIEW(dev))
2762 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2763
2764 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2765 lane_mask);
2766 }
2767
2768 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2769 intel_dp_start_link_train(intel_dp);
2770 intel_dp_stop_link_train(intel_dp);
2771
2772 if (crtc->config->has_audio) {
2773 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2774 pipe_name(pipe));
2775 intel_audio_codec_enable(encoder);
2776 }
2777 }
2778
2779 static void g4x_enable_dp(struct intel_encoder *encoder)
2780 {
2781 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2782
2783 intel_enable_dp(encoder);
2784 intel_edp_backlight_on(intel_dp);
2785 }
2786
2787 static void vlv_enable_dp(struct intel_encoder *encoder)
2788 {
2789 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2790
2791 intel_edp_backlight_on(intel_dp);
2792 intel_psr_enable(intel_dp);
2793 }
2794
2795 static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2796 {
2797 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2798 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2799 enum port port = dp_to_dig_port(intel_dp)->port;
2800 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2801
2802 intel_dp_prepare(encoder);
2803
2804 if (port == PORT_A && IS_GEN5(dev_priv)) {
2805 /*
2806 * We get FIFO underruns on the other pipe when
2807 * enabling the CPU eDP PLL, and when enabling CPU
2808 * eDP port. We could potentially avoid the PLL
2809 * underrun with a vblank wait just prior to enabling
2810 * the PLL, but that doesn't appear to help the port
2811 * enable case. Just sweep it all under the rug.
2812 */
2813 intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false);
2814 intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false);
2815 }
2816
2817 /* Only ilk+ has port A */
2818 if (port == PORT_A)
2819 ironlake_edp_pll_on(intel_dp);
2820 }
2821
2822 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2823 {
2824 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2825 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2826 enum pipe pipe = intel_dp->pps_pipe;
2827 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2828
2829 edp_panel_vdd_off_sync(intel_dp);
2830
2831 /*
2832 * VLV seems to get confused when multiple power seqeuencers
2833 * have the same port selected (even if only one has power/vdd
2834 * enabled). The failure manifests as vlv_wait_port_ready() failing
2835 * CHV on the other hand doesn't seem to mind having the same port
2836 * selected in multiple power seqeuencers, but let's clear the
2837 * port select always when logically disconnecting a power sequencer
2838 * from a port.
2839 */
2840 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2841 pipe_name(pipe), port_name(intel_dig_port->port));
2842 I915_WRITE(pp_on_reg, 0);
2843 POSTING_READ(pp_on_reg);
2844
2845 intel_dp->pps_pipe = INVALID_PIPE;
2846 }
2847
2848 static void vlv_steal_power_sequencer(struct drm_device *dev,
2849 enum pipe pipe)
2850 {
2851 struct drm_i915_private *dev_priv = dev->dev_private;
2852 struct intel_encoder *encoder;
2853
2854 lockdep_assert_held(&dev_priv->pps_mutex);
2855
2856 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2857 return;
2858
2859 for_each_intel_encoder(dev, encoder) {
2860 struct intel_dp *intel_dp;
2861 enum port port;
2862
2863 if (encoder->type != INTEL_OUTPUT_EDP)
2864 continue;
2865
2866 intel_dp = enc_to_intel_dp(&encoder->base);
2867 port = dp_to_dig_port(intel_dp)->port;
2868
2869 if (intel_dp->pps_pipe != pipe)
2870 continue;
2871
2872 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2873 pipe_name(pipe), port_name(port));
2874
2875 WARN(encoder->base.crtc,
2876 "stealing pipe %c power sequencer from active eDP port %c\n",
2877 pipe_name(pipe), port_name(port));
2878
2879 /* make sure vdd is off before we steal it */
2880 vlv_detach_power_sequencer(intel_dp);
2881 }
2882 }
2883
2884 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2885 {
2886 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2887 struct intel_encoder *encoder = &intel_dig_port->base;
2888 struct drm_device *dev = encoder->base.dev;
2889 struct drm_i915_private *dev_priv = dev->dev_private;
2890 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2891
2892 lockdep_assert_held(&dev_priv->pps_mutex);
2893
2894 if (!is_edp(intel_dp))
2895 return;
2896
2897 if (intel_dp->pps_pipe == crtc->pipe)
2898 return;
2899
2900 /*
2901 * If another power sequencer was being used on this
2902 * port previously make sure to turn off vdd there while
2903 * we still have control of it.
2904 */
2905 if (intel_dp->pps_pipe != INVALID_PIPE)
2906 vlv_detach_power_sequencer(intel_dp);
2907
2908 /*
2909 * We may be stealing the power
2910 * sequencer from another port.
2911 */
2912 vlv_steal_power_sequencer(dev, crtc->pipe);
2913
2914 /* now it's all ours */
2915 intel_dp->pps_pipe = crtc->pipe;
2916
2917 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2918 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2919
2920 /* init power sequencer on this pipe and port */
2921 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2922 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2923 }
2924
2925 static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2926 {
2927 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2928 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2929 struct drm_device *dev = encoder->base.dev;
2930 struct drm_i915_private *dev_priv = dev->dev_private;
2931 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2932 enum dpio_channel port = vlv_dport_to_channel(dport);
2933 int pipe = intel_crtc->pipe;
2934 u32 val;
2935
2936 mutex_lock(&dev_priv->sb_lock);
2937
2938 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2939 val = 0;
2940 if (pipe)
2941 val |= (1<<21);
2942 else
2943 val &= ~(1<<21);
2944 val |= 0x001000c4;
2945 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2946 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2947 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2948
2949 mutex_unlock(&dev_priv->sb_lock);
2950
2951 intel_enable_dp(encoder);
2952 }
2953
2954 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2955 {
2956 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2957 struct drm_device *dev = encoder->base.dev;
2958 struct drm_i915_private *dev_priv = dev->dev_private;
2959 struct intel_crtc *intel_crtc =
2960 to_intel_crtc(encoder->base.crtc);
2961 enum dpio_channel port = vlv_dport_to_channel(dport);
2962 int pipe = intel_crtc->pipe;
2963
2964 intel_dp_prepare(encoder);
2965
2966 /* Program Tx lane resets to default */
2967 mutex_lock(&dev_priv->sb_lock);
2968 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2969 DPIO_PCS_TX_LANE2_RESET |
2970 DPIO_PCS_TX_LANE1_RESET);
2971 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2972 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2973 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2974 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2975 DPIO_PCS_CLK_SOFT_RESET);
2976
2977 /* Fix up inter-pair skew failure */
2978 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2979 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2980 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2981 mutex_unlock(&dev_priv->sb_lock);
2982 }
2983
2984 static void chv_pre_enable_dp(struct intel_encoder *encoder)
2985 {
2986 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2987 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2988 struct drm_device *dev = encoder->base.dev;
2989 struct drm_i915_private *dev_priv = dev->dev_private;
2990 struct intel_crtc *intel_crtc =
2991 to_intel_crtc(encoder->base.crtc);
2992 enum dpio_channel ch = vlv_dport_to_channel(dport);
2993 int pipe = intel_crtc->pipe;
2994 int data, i, stagger;
2995 u32 val;
2996
2997 mutex_lock(&dev_priv->sb_lock);
2998
2999 /* allow hardware to manage TX FIFO reset source */
3000 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3001 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3002 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3003
3004 if (intel_crtc->config->lane_count > 2) {
3005 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3006 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
3007 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3008 }
3009
3010 /* Program Tx lane latency optimal setting*/
3011 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3012 /* Set the upar bit */
3013 if (intel_crtc->config->lane_count == 1)
3014 data = 0x0;
3015 else
3016 data = (i == 1) ? 0x0 : 0x1;
3017 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
3018 data << DPIO_UPAR_SHIFT);
3019 }
3020
3021 /* Data lane stagger programming */
3022 if (intel_crtc->config->port_clock > 270000)
3023 stagger = 0x18;
3024 else if (intel_crtc->config->port_clock > 135000)
3025 stagger = 0xd;
3026 else if (intel_crtc->config->port_clock > 67500)
3027 stagger = 0x7;
3028 else if (intel_crtc->config->port_clock > 33750)
3029 stagger = 0x4;
3030 else
3031 stagger = 0x2;
3032
3033 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
3034 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3035 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
3036
3037 if (intel_crtc->config->lane_count > 2) {
3038 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
3039 val |= DPIO_TX2_STAGGER_MASK(0x1f);
3040 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
3041 }
3042
3043 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
3044 DPIO_LANESTAGGER_STRAP(stagger) |
3045 DPIO_LANESTAGGER_STRAP_OVRD |
3046 DPIO_TX1_STAGGER_MASK(0x1f) |
3047 DPIO_TX1_STAGGER_MULT(6) |
3048 DPIO_TX2_STAGGER_MULT(0));
3049
3050 if (intel_crtc->config->lane_count > 2) {
3051 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
3052 DPIO_LANESTAGGER_STRAP(stagger) |
3053 DPIO_LANESTAGGER_STRAP_OVRD |
3054 DPIO_TX1_STAGGER_MASK(0x1f) |
3055 DPIO_TX1_STAGGER_MULT(7) |
3056 DPIO_TX2_STAGGER_MULT(5));
3057 }
3058
3059 /* Deassert data lane reset */
3060 chv_data_lane_soft_reset(encoder, false);
3061
3062 mutex_unlock(&dev_priv->sb_lock);
3063
3064 intel_enable_dp(encoder);
3065
3066 /* Second common lane will stay alive on its own now */
3067 if (dport->release_cl2_override) {
3068 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
3069 dport->release_cl2_override = false;
3070 }
3071 }
3072
3073 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
3074 {
3075 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
3076 struct drm_device *dev = encoder->base.dev;
3077 struct drm_i915_private *dev_priv = dev->dev_private;
3078 struct intel_crtc *intel_crtc =
3079 to_intel_crtc(encoder->base.crtc);
3080 enum dpio_channel ch = vlv_dport_to_channel(dport);
3081 enum pipe pipe = intel_crtc->pipe;
3082 unsigned int lane_mask =
3083 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
3084 u32 val;
3085
3086 intel_dp_prepare(encoder);
3087
3088 /*
3089 * Must trick the second common lane into life.
3090 * Otherwise we can't even access the PLL.
3091 */
3092 if (ch == DPIO_CH0 && pipe == PIPE_B)
3093 dport->release_cl2_override =
3094 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
3095
3096 chv_phy_powergate_lanes(encoder, true, lane_mask);
3097
3098 mutex_lock(&dev_priv->sb_lock);
3099
3100 /* Assert data lane reset */
3101 chv_data_lane_soft_reset(encoder, true);
3102
3103 /* program left/right clock distribution */
3104 if (pipe != PIPE_B) {
3105 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3106 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3107 if (ch == DPIO_CH0)
3108 val |= CHV_BUFLEFTENA1_FORCE;
3109 if (ch == DPIO_CH1)
3110 val |= CHV_BUFRIGHTENA1_FORCE;
3111 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3112 } else {
3113 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3114 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3115 if (ch == DPIO_CH0)
3116 val |= CHV_BUFLEFTENA2_FORCE;
3117 if (ch == DPIO_CH1)
3118 val |= CHV_BUFRIGHTENA2_FORCE;
3119 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3120 }
3121
3122 /* program clock channel usage */
3123 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3124 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3125 if (pipe != PIPE_B)
3126 val &= ~CHV_PCS_USEDCLKCHANNEL;
3127 else
3128 val |= CHV_PCS_USEDCLKCHANNEL;
3129 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3130
3131 if (intel_crtc->config->lane_count > 2) {
3132 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3133 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3134 if (pipe != PIPE_B)
3135 val &= ~CHV_PCS_USEDCLKCHANNEL;
3136 else
3137 val |= CHV_PCS_USEDCLKCHANNEL;
3138 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3139 }
3140
3141 /*
3142 * This a a bit weird since generally CL
3143 * matches the pipe, but here we need to
3144 * pick the CL based on the port.
3145 */
3146 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3147 if (pipe != PIPE_B)
3148 val &= ~CHV_CMN_USEDCLKCHANNEL;
3149 else
3150 val |= CHV_CMN_USEDCLKCHANNEL;
3151 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3152
3153 mutex_unlock(&dev_priv->sb_lock);
3154 }
3155
3156 static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3157 {
3158 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3159 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3160 u32 val;
3161
3162 mutex_lock(&dev_priv->sb_lock);
3163
3164 /* disable left/right clock distribution */
3165 if (pipe != PIPE_B) {
3166 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3167 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3168 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3169 } else {
3170 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3171 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3172 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3173 }
3174
3175 mutex_unlock(&dev_priv->sb_lock);
3176
3177 /*
3178 * Leave the power down bit cleared for at least one
3179 * lane so that chv_powergate_phy_ch() will power
3180 * on something when the channel is otherwise unused.
3181 * When the port is off and the override is removed
3182 * the lanes power down anyway, so otherwise it doesn't
3183 * really matter what the state of power down bits is
3184 * after this.
3185 */
3186 chv_phy_powergate_lanes(encoder, false, 0x0);
3187 }
3188
3189 /*
3190 * Native read with retry for link status and receiver capability reads for
3191 * cases where the sink may still be asleep.
3192 *
3193 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3194 * supposed to retry 3 times per the spec.
3195 */
3196 static ssize_t
3197 intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3198 void *buffer, size_t size)
3199 {
3200 ssize_t ret;
3201 int i;
3202
3203 /*
3204 * Sometime we just get the same incorrect byte repeated
3205 * over the entire buffer. Doing just one throw away read
3206 * initially seems to "solve" it.
3207 */
3208 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3209
3210 for (i = 0; i < 3; i++) {
3211 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3212 if (ret == size)
3213 return ret;
3214 msleep(1);
3215 }
3216
3217 return ret;
3218 }
3219
3220 /*
3221 * Fetch AUX CH registers 0x202 - 0x207 which contain
3222 * link status information
3223 */
3224 bool
3225 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3226 {
3227 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3228 DP_LANE0_1_STATUS,
3229 link_status,
3230 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3231 }
3232
3233 /* These are source-specific values. */
3234 uint8_t
3235 intel_dp_voltage_max(struct intel_dp *intel_dp)
3236 {
3237 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3238 struct drm_i915_private *dev_priv = dev->dev_private;
3239 enum port port = dp_to_dig_port(intel_dp)->port;
3240
3241 if (IS_BROXTON(dev))
3242 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3243 else if (INTEL_INFO(dev)->gen >= 9) {
3244 if (dev_priv->edp_low_vswing && port == PORT_A)
3245 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3246 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3247 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
3248 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3249 else if (IS_GEN7(dev) && port == PORT_A)
3250 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3251 else if (HAS_PCH_CPT(dev) && port != PORT_A)
3252 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3253 else
3254 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3255 }
3256
3257 uint8_t
3258 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3259 {
3260 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3261 enum port port = dp_to_dig_port(intel_dp)->port;
3262
3263 if (INTEL_INFO(dev)->gen >= 9) {
3264 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3265 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3266 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3267 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3268 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3269 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3270 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3271 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3272 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3273 default:
3274 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3275 }
3276 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3277 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3279 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3281 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3282 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3283 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3284 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3285 default:
3286 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3287 }
3288 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
3289 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3290 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3291 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3292 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3293 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3294 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3295 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3296 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3297 default:
3298 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3299 }
3300 } else if (IS_GEN7(dev) && port == PORT_A) {
3301 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3303 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3304 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3305 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3306 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3307 default:
3308 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3309 }
3310 } else {
3311 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3312 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3313 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3314 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3315 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3317 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3318 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3319 default:
3320 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3321 }
3322 }
3323 }
3324
3325 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3326 {
3327 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3328 struct drm_i915_private *dev_priv = dev->dev_private;
3329 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3330 struct intel_crtc *intel_crtc =
3331 to_intel_crtc(dport->base.base.crtc);
3332 unsigned long demph_reg_value, preemph_reg_value,
3333 uniqtranscale_reg_value;
3334 uint8_t train_set = intel_dp->train_set[0];
3335 enum dpio_channel port = vlv_dport_to_channel(dport);
3336 int pipe = intel_crtc->pipe;
3337
3338 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3339 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3340 preemph_reg_value = 0x0004000;
3341 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3342 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3343 demph_reg_value = 0x2B405555;
3344 uniqtranscale_reg_value = 0x552AB83A;
3345 break;
3346 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3347 demph_reg_value = 0x2B404040;
3348 uniqtranscale_reg_value = 0x5548B83A;
3349 break;
3350 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3351 demph_reg_value = 0x2B245555;
3352 uniqtranscale_reg_value = 0x5560B83A;
3353 break;
3354 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3355 demph_reg_value = 0x2B405555;
3356 uniqtranscale_reg_value = 0x5598DA3A;
3357 break;
3358 default:
3359 return 0;
3360 }
3361 break;
3362 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3363 preemph_reg_value = 0x0002000;
3364 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3365 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3366 demph_reg_value = 0x2B404040;
3367 uniqtranscale_reg_value = 0x5552B83A;
3368 break;
3369 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3370 demph_reg_value = 0x2B404848;
3371 uniqtranscale_reg_value = 0x5580B83A;
3372 break;
3373 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3374 demph_reg_value = 0x2B404040;
3375 uniqtranscale_reg_value = 0x55ADDA3A;
3376 break;
3377 default:
3378 return 0;
3379 }
3380 break;
3381 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3382 preemph_reg_value = 0x0000000;
3383 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3384 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3385 demph_reg_value = 0x2B305555;
3386 uniqtranscale_reg_value = 0x5570B83A;
3387 break;
3388 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3389 demph_reg_value = 0x2B2B4040;
3390 uniqtranscale_reg_value = 0x55ADDA3A;
3391 break;
3392 default:
3393 return 0;
3394 }
3395 break;
3396 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3397 preemph_reg_value = 0x0006000;
3398 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3399 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3400 demph_reg_value = 0x1B405555;
3401 uniqtranscale_reg_value = 0x55ADDA3A;
3402 break;
3403 default:
3404 return 0;
3405 }
3406 break;
3407 default:
3408 return 0;
3409 }
3410
3411 mutex_lock(&dev_priv->sb_lock);
3412 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3413 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3414 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3415 uniqtranscale_reg_value);
3416 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3417 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3418 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3419 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3420 mutex_unlock(&dev_priv->sb_lock);
3421
3422 return 0;
3423 }
3424
3425 static bool chv_need_uniq_trans_scale(uint8_t train_set)
3426 {
3427 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3428 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3429 }
3430
3431 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3432 {
3433 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3434 struct drm_i915_private *dev_priv = dev->dev_private;
3435 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3436 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3437 u32 deemph_reg_value, margin_reg_value, val;
3438 uint8_t train_set = intel_dp->train_set[0];
3439 enum dpio_channel ch = vlv_dport_to_channel(dport);
3440 enum pipe pipe = intel_crtc->pipe;
3441 int i;
3442
3443 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3444 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3445 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3446 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3447 deemph_reg_value = 128;
3448 margin_reg_value = 52;
3449 break;
3450 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3451 deemph_reg_value = 128;
3452 margin_reg_value = 77;
3453 break;
3454 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3455 deemph_reg_value = 128;
3456 margin_reg_value = 102;
3457 break;
3458 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3459 deemph_reg_value = 128;
3460 margin_reg_value = 154;
3461 /* FIXME extra to set for 1200 */
3462 break;
3463 default:
3464 return 0;
3465 }
3466 break;
3467 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3468 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3469 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3470 deemph_reg_value = 85;
3471 margin_reg_value = 78;
3472 break;
3473 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3474 deemph_reg_value = 85;
3475 margin_reg_value = 116;
3476 break;
3477 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3478 deemph_reg_value = 85;
3479 margin_reg_value = 154;
3480 break;
3481 default:
3482 return 0;
3483 }
3484 break;
3485 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3486 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3487 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3488 deemph_reg_value = 64;
3489 margin_reg_value = 104;
3490 break;
3491 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3492 deemph_reg_value = 64;
3493 margin_reg_value = 154;
3494 break;
3495 default:
3496 return 0;
3497 }
3498 break;
3499 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3500 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3501 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3502 deemph_reg_value = 43;
3503 margin_reg_value = 154;
3504 break;
3505 default:
3506 return 0;
3507 }
3508 break;
3509 default:
3510 return 0;
3511 }
3512
3513 mutex_lock(&dev_priv->sb_lock);
3514
3515 /* Clear calc init */
3516 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3517 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3518 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3519 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3520 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3521
3522 if (intel_crtc->config->lane_count > 2) {
3523 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3524 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3525 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3526 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3527 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3528 }
3529
3530 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3531 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3532 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3533 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3534
3535 if (intel_crtc->config->lane_count > 2) {
3536 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3537 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3538 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3539 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3540 }
3541
3542 /* Program swing deemph */
3543 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3544 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3545 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3546 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3547 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3548 }
3549
3550 /* Program swing margin */
3551 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3552 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3553
3554 val &= ~DPIO_SWING_MARGIN000_MASK;
3555 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3556
3557 /*
3558 * Supposedly this value shouldn't matter when unique transition
3559 * scale is disabled, but in fact it does matter. Let's just
3560 * always program the same value and hope it's OK.
3561 */
3562 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3563 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3564
3565 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3566 }
3567
3568 /*
3569 * The document said it needs to set bit 27 for ch0 and bit 26
3570 * for ch1. Might be a typo in the doc.
3571 * For now, for this unique transition scale selection, set bit
3572 * 27 for ch0 and ch1.
3573 */
3574 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3575 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3576 if (chv_need_uniq_trans_scale(train_set))
3577 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3578 else
3579 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3580 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3581 }
3582
3583 /* Start swing calculation */
3584 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3585 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3586 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3587
3588 if (intel_crtc->config->lane_count > 2) {
3589 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3590 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3591 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3592 }
3593
3594 mutex_unlock(&dev_priv->sb_lock);
3595
3596 return 0;
3597 }
3598
3599 static uint32_t
3600 gen4_signal_levels(uint8_t train_set)
3601 {
3602 uint32_t signal_levels = 0;
3603
3604 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3605 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3606 default:
3607 signal_levels |= DP_VOLTAGE_0_4;
3608 break;
3609 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3610 signal_levels |= DP_VOLTAGE_0_6;
3611 break;
3612 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3613 signal_levels |= DP_VOLTAGE_0_8;
3614 break;
3615 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3616 signal_levels |= DP_VOLTAGE_1_2;
3617 break;
3618 }
3619 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3620 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3621 default:
3622 signal_levels |= DP_PRE_EMPHASIS_0;
3623 break;
3624 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3625 signal_levels |= DP_PRE_EMPHASIS_3_5;
3626 break;
3627 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3628 signal_levels |= DP_PRE_EMPHASIS_6;
3629 break;
3630 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3631 signal_levels |= DP_PRE_EMPHASIS_9_5;
3632 break;
3633 }
3634 return signal_levels;
3635 }
3636
3637 /* Gen6's DP voltage swing and pre-emphasis control */
3638 static uint32_t
3639 gen6_edp_signal_levels(uint8_t train_set)
3640 {
3641 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3642 DP_TRAIN_PRE_EMPHASIS_MASK);
3643 switch (signal_levels) {
3644 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3645 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3646 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3647 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3648 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3649 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3650 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3651 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3652 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3653 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3654 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3655 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3656 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3657 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3658 default:
3659 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3660 "0x%x\n", signal_levels);
3661 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3662 }
3663 }
3664
3665 /* Gen7's DP voltage swing and pre-emphasis control */
3666 static uint32_t
3667 gen7_edp_signal_levels(uint8_t train_set)
3668 {
3669 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3670 DP_TRAIN_PRE_EMPHASIS_MASK);
3671 switch (signal_levels) {
3672 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3673 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3674 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3675 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3676 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3677 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3678
3679 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3680 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3681 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3682 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3683
3684 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3685 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3686 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3687 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3688
3689 default:
3690 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3691 "0x%x\n", signal_levels);
3692 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3693 }
3694 }
3695
3696 void
3697 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3698 {
3699 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3700 enum port port = intel_dig_port->port;
3701 struct drm_device *dev = intel_dig_port->base.base.dev;
3702 struct drm_i915_private *dev_priv = to_i915(dev);
3703 uint32_t signal_levels, mask = 0;
3704 uint8_t train_set = intel_dp->train_set[0];
3705
3706 if (HAS_DDI(dev)) {
3707 signal_levels = ddi_signal_levels(intel_dp);
3708
3709 if (IS_BROXTON(dev))
3710 signal_levels = 0;
3711 else
3712 mask = DDI_BUF_EMP_MASK;
3713 } else if (IS_CHERRYVIEW(dev)) {
3714 signal_levels = chv_signal_levels(intel_dp);
3715 } else if (IS_VALLEYVIEW(dev)) {
3716 signal_levels = vlv_signal_levels(intel_dp);
3717 } else if (IS_GEN7(dev) && port == PORT_A) {
3718 signal_levels = gen7_edp_signal_levels(train_set);
3719 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3720 } else if (IS_GEN6(dev) && port == PORT_A) {
3721 signal_levels = gen6_edp_signal_levels(train_set);
3722 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3723 } else {
3724 signal_levels = gen4_signal_levels(train_set);
3725 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3726 }
3727
3728 if (mask)
3729 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3730
3731 DRM_DEBUG_KMS("Using vswing level %d\n",
3732 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3733 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3734 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3735 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3736
3737 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3738
3739 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3740 POSTING_READ(intel_dp->output_reg);
3741 }
3742
3743 void
3744 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3745 uint8_t dp_train_pat)
3746 {
3747 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3748 struct drm_i915_private *dev_priv =
3749 to_i915(intel_dig_port->base.base.dev);
3750
3751 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3752
3753 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3754 POSTING_READ(intel_dp->output_reg);
3755 }
3756
3757 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3758 {
3759 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3760 struct drm_device *dev = intel_dig_port->base.base.dev;
3761 struct drm_i915_private *dev_priv = dev->dev_private;
3762 enum port port = intel_dig_port->port;
3763 uint32_t val;
3764
3765 if (!HAS_DDI(dev))
3766 return;
3767
3768 val = I915_READ(DP_TP_CTL(port));
3769 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3770 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3771 I915_WRITE(DP_TP_CTL(port), val);
3772
3773 /*
3774 * On PORT_A we can have only eDP in SST mode. There the only reason
3775 * we need to set idle transmission mode is to work around a HW issue
3776 * where we enable the pipe while not in idle link-training mode.
3777 * In this case there is requirement to wait for a minimum number of
3778 * idle patterns to be sent.
3779 */
3780 if (port == PORT_A)
3781 return;
3782
3783 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3784 1))
3785 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3786 }
3787
3788 static void
3789 intel_dp_link_down(struct intel_dp *intel_dp)
3790 {
3791 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3792 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3793 enum port port = intel_dig_port->port;
3794 struct drm_device *dev = intel_dig_port->base.base.dev;
3795 struct drm_i915_private *dev_priv = dev->dev_private;
3796 uint32_t DP = intel_dp->DP;
3797
3798 if (WARN_ON(HAS_DDI(dev)))
3799 return;
3800
3801 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3802 return;
3803
3804 DRM_DEBUG_KMS("\n");
3805
3806 if ((IS_GEN7(dev) && port == PORT_A) ||
3807 (HAS_PCH_CPT(dev) && port != PORT_A)) {
3808 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3809 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3810 } else {
3811 if (IS_CHERRYVIEW(dev))
3812 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3813 else
3814 DP &= ~DP_LINK_TRAIN_MASK;
3815 DP |= DP_LINK_TRAIN_PAT_IDLE;
3816 }
3817 I915_WRITE(intel_dp->output_reg, DP);
3818 POSTING_READ(intel_dp->output_reg);
3819
3820 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3821 I915_WRITE(intel_dp->output_reg, DP);
3822 POSTING_READ(intel_dp->output_reg);
3823
3824 /*
3825 * HW workaround for IBX, we need to move the port
3826 * to transcoder A after disabling it to allow the
3827 * matching HDMI port to be enabled on transcoder A.
3828 */
3829 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3830 /*
3831 * We get CPU/PCH FIFO underruns on the other pipe when
3832 * doing the workaround. Sweep them under the rug.
3833 */
3834 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3835 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3836
3837 /* always enable with pattern 1 (as per spec) */
3838 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3839 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3840 I915_WRITE(intel_dp->output_reg, DP);
3841 POSTING_READ(intel_dp->output_reg);
3842
3843 DP &= ~DP_PORT_EN;
3844 I915_WRITE(intel_dp->output_reg, DP);
3845 POSTING_READ(intel_dp->output_reg);
3846
3847 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A);
3848 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3849 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3850 }
3851
3852 msleep(intel_dp->panel_power_down_delay);
3853
3854 intel_dp->DP = DP;
3855 }
3856
3857 static bool
3858 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3859 {
3860 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3861 struct drm_device *dev = dig_port->base.base.dev;
3862 struct drm_i915_private *dev_priv = dev->dev_private;
3863 uint8_t rev;
3864
3865 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3866 sizeof(intel_dp->dpcd)) < 0)
3867 return false; /* aux transfer failed */
3868
3869 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3870
3871 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3872 return false; /* DPCD not present */
3873
3874 /* Check if the panel supports PSR */
3875 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
3876 if (is_edp(intel_dp)) {
3877 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3878 intel_dp->psr_dpcd,
3879 sizeof(intel_dp->psr_dpcd));
3880 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3881 dev_priv->psr.sink_support = true;
3882 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3883 }
3884
3885 if (INTEL_INFO(dev)->gen >= 9 &&
3886 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3887 uint8_t frame_sync_cap;
3888
3889 dev_priv->psr.sink_support = true;
3890 intel_dp_dpcd_read_wake(&intel_dp->aux,
3891 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3892 &frame_sync_cap, 1);
3893 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3894 /* PSR2 needs frame sync as well */
3895 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3896 DRM_DEBUG_KMS("PSR2 %s on sink",
3897 dev_priv->psr.psr2_support ? "supported" : "not supported");
3898 }
3899 }
3900
3901 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3902 yesno(intel_dp_source_supports_hbr2(intel_dp)),
3903 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3904
3905 /* Intermediate frequency support */
3906 if (is_edp(intel_dp) &&
3907 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3908 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3909 (rev >= 0x03)) { /* eDp v1.4 or higher */
3910 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3911 int i;
3912
3913 intel_dp_dpcd_read_wake(&intel_dp->aux,
3914 DP_SUPPORTED_LINK_RATES,
3915 sink_rates,
3916 sizeof(sink_rates));
3917
3918 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3919 int val = le16_to_cpu(sink_rates[i]);
3920
3921 if (val == 0)
3922 break;
3923
3924 /* Value read is in kHz while drm clock is saved in deca-kHz */
3925 intel_dp->sink_rates[i] = (val * 200) / 10;
3926 }
3927 intel_dp->num_sink_rates = i;
3928 }
3929
3930 intel_dp_print_rates(intel_dp);
3931
3932 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
3933 DP_DWN_STRM_PORT_PRESENT))
3934 return true; /* native DP sink */
3935
3936 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3937 return true; /* no per-port downstream info */
3938
3939 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3940 intel_dp->downstream_ports,
3941 DP_MAX_DOWNSTREAM_PORTS) < 0)
3942 return false; /* downstream port status fetch failed */
3943
3944 return true;
3945 }
3946
3947 static void
3948 intel_dp_probe_oui(struct intel_dp *intel_dp)
3949 {
3950 u8 buf[3];
3951
3952 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3953 return;
3954
3955 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3956 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3957 buf[0], buf[1], buf[2]);
3958
3959 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3960 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3961 buf[0], buf[1], buf[2]);
3962 }
3963
3964 static bool
3965 intel_dp_probe_mst(struct intel_dp *intel_dp)
3966 {
3967 u8 buf[1];
3968
3969 if (!intel_dp->can_mst)
3970 return false;
3971
3972 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3973 return false;
3974
3975 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3976 if (buf[0] & DP_MST_CAP) {
3977 DRM_DEBUG_KMS("Sink is MST capable\n");
3978 intel_dp->is_mst = true;
3979 } else {
3980 DRM_DEBUG_KMS("Sink is not MST capable\n");
3981 intel_dp->is_mst = false;
3982 }
3983 }
3984
3985 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3986 return intel_dp->is_mst;
3987 }
3988
3989 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3990 {
3991 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3992 struct drm_device *dev = dig_port->base.base.dev;
3993 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3994 u8 buf;
3995 int ret = 0;
3996 int count = 0;
3997 int attempts = 10;
3998
3999 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4000 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4001 ret = -EIO;
4002 goto out;
4003 }
4004
4005 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4006 buf & ~DP_TEST_SINK_START) < 0) {
4007 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4008 ret = -EIO;
4009 goto out;
4010 }
4011
4012 do {
4013 intel_wait_for_vblank(dev, intel_crtc->pipe);
4014
4015 if (drm_dp_dpcd_readb(&intel_dp->aux,
4016 DP_TEST_SINK_MISC, &buf) < 0) {
4017 ret = -EIO;
4018 goto out;
4019 }
4020 count = buf & DP_TEST_COUNT_MASK;
4021 } while (--attempts && count);
4022
4023 if (attempts == 0) {
4024 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
4025 ret = -ETIMEDOUT;
4026 }
4027
4028 out:
4029 hsw_enable_ips(intel_crtc);
4030 return ret;
4031 }
4032
4033 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4034 {
4035 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4036 struct drm_device *dev = dig_port->base.base.dev;
4037 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4038 u8 buf;
4039 int ret;
4040
4041 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4042 return -EIO;
4043
4044 if (!(buf & DP_TEST_CRC_SUPPORTED))
4045 return -ENOTTY;
4046
4047 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4048 return -EIO;
4049
4050 if (buf & DP_TEST_SINK_START) {
4051 ret = intel_dp_sink_crc_stop(intel_dp);
4052 if (ret)
4053 return ret;
4054 }
4055
4056 hsw_disable_ips(intel_crtc);
4057
4058 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
4059 buf | DP_TEST_SINK_START) < 0) {
4060 hsw_enable_ips(intel_crtc);
4061 return -EIO;
4062 }
4063
4064 intel_wait_for_vblank(dev, intel_crtc->pipe);
4065 return 0;
4066 }
4067
4068 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4069 {
4070 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4071 struct drm_device *dev = dig_port->base.base.dev;
4072 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4073 u8 buf;
4074 int count, ret;
4075 int attempts = 6;
4076
4077 ret = intel_dp_sink_crc_start(intel_dp);
4078 if (ret)
4079 return ret;
4080
4081 do {
4082 intel_wait_for_vblank(dev, intel_crtc->pipe);
4083
4084 if (drm_dp_dpcd_readb(&intel_dp->aux,
4085 DP_TEST_SINK_MISC, &buf) < 0) {
4086 ret = -EIO;
4087 goto stop;
4088 }
4089 count = buf & DP_TEST_COUNT_MASK;
4090
4091 } while (--attempts && count == 0);
4092
4093 if (attempts == 0) {
4094 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4095 ret = -ETIMEDOUT;
4096 goto stop;
4097 }
4098
4099 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4100 ret = -EIO;
4101 goto stop;
4102 }
4103
4104 stop:
4105 intel_dp_sink_crc_stop(intel_dp);
4106 return ret;
4107 }
4108
4109 static bool
4110 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4111 {
4112 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4113 DP_DEVICE_SERVICE_IRQ_VECTOR,
4114 sink_irq_vector, 1) == 1;
4115 }
4116
4117 static bool
4118 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4119 {
4120 int ret;
4121
4122 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4123 DP_SINK_COUNT_ESI,
4124 sink_irq_vector, 14);
4125 if (ret != 14)
4126 return false;
4127
4128 return true;
4129 }
4130
4131 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4132 {
4133 uint8_t test_result = DP_TEST_ACK;
4134 return test_result;
4135 }
4136
4137 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4138 {
4139 uint8_t test_result = DP_TEST_NAK;
4140 return test_result;
4141 }
4142
4143 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4144 {
4145 uint8_t test_result = DP_TEST_NAK;
4146 struct intel_connector *intel_connector = intel_dp->attached_connector;
4147 struct drm_connector *connector = &intel_connector->base;
4148
4149 if (intel_connector->detect_edid == NULL ||
4150 connector->edid_corrupt ||
4151 intel_dp->aux.i2c_defer_count > 6) {
4152 /* Check EDID read for NACKs, DEFERs and corruption
4153 * (DP CTS 1.2 Core r1.1)
4154 * 4.2.2.4 : Failed EDID read, I2C_NAK
4155 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4156 * 4.2.2.6 : EDID corruption detected
4157 * Use failsafe mode for all cases
4158 */
4159 if (intel_dp->aux.i2c_nack_count > 0 ||
4160 intel_dp->aux.i2c_defer_count > 0)
4161 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4162 intel_dp->aux.i2c_nack_count,
4163 intel_dp->aux.i2c_defer_count);
4164 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4165 } else {
4166 struct edid *block = intel_connector->detect_edid;
4167
4168 /* We have to write the checksum
4169 * of the last block read
4170 */
4171 block += intel_connector->detect_edid->extensions;
4172
4173 if (!drm_dp_dpcd_write(&intel_dp->aux,
4174 DP_TEST_EDID_CHECKSUM,
4175 &block->checksum,
4176 1))
4177 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4178
4179 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4180 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4181 }
4182
4183 /* Set test active flag here so userspace doesn't interrupt things */
4184 intel_dp->compliance_test_active = 1;
4185
4186 return test_result;
4187 }
4188
4189 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4190 {
4191 uint8_t test_result = DP_TEST_NAK;
4192 return test_result;
4193 }
4194
4195 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4196 {
4197 uint8_t response = DP_TEST_NAK;
4198 uint8_t rxdata = 0;
4199 int status = 0;
4200
4201 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4202 if (status <= 0) {
4203 DRM_DEBUG_KMS("Could not read test request from sink\n");
4204 goto update_status;
4205 }
4206
4207 switch (rxdata) {
4208 case DP_TEST_LINK_TRAINING:
4209 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4210 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4211 response = intel_dp_autotest_link_training(intel_dp);
4212 break;
4213 case DP_TEST_LINK_VIDEO_PATTERN:
4214 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4215 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4216 response = intel_dp_autotest_video_pattern(intel_dp);
4217 break;
4218 case DP_TEST_LINK_EDID_READ:
4219 DRM_DEBUG_KMS("EDID test requested\n");
4220 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4221 response = intel_dp_autotest_edid(intel_dp);
4222 break;
4223 case DP_TEST_LINK_PHY_TEST_PATTERN:
4224 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4225 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4226 response = intel_dp_autotest_phy_pattern(intel_dp);
4227 break;
4228 default:
4229 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4230 break;
4231 }
4232
4233 update_status:
4234 status = drm_dp_dpcd_write(&intel_dp->aux,
4235 DP_TEST_RESPONSE,
4236 &response, 1);
4237 if (status <= 0)
4238 DRM_DEBUG_KMS("Could not write test response to sink\n");
4239 }
4240
4241 static int
4242 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4243 {
4244 bool bret;
4245
4246 if (intel_dp->is_mst) {
4247 u8 esi[16] = { 0 };
4248 int ret = 0;
4249 int retry;
4250 bool handled;
4251 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4252 go_again:
4253 if (bret == true) {
4254
4255 /* check link status - esi[10] = 0x200c */
4256 if (intel_dp->active_mst_links &&
4257 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4258 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4259 intel_dp_start_link_train(intel_dp);
4260 intel_dp_stop_link_train(intel_dp);
4261 }
4262
4263 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4264 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4265
4266 if (handled) {
4267 for (retry = 0; retry < 3; retry++) {
4268 int wret;
4269 wret = drm_dp_dpcd_write(&intel_dp->aux,
4270 DP_SINK_COUNT_ESI+1,
4271 &esi[1], 3);
4272 if (wret == 3) {
4273 break;
4274 }
4275 }
4276
4277 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4278 if (bret == true) {
4279 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4280 goto go_again;
4281 }
4282 } else
4283 ret = 0;
4284
4285 return ret;
4286 } else {
4287 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4288 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4289 intel_dp->is_mst = false;
4290 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4291 /* send a hotplug event */
4292 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4293 }
4294 }
4295 return -EINVAL;
4296 }
4297
4298 /*
4299 * According to DP spec
4300 * 5.1.2:
4301 * 1. Read DPCD
4302 * 2. Configure link according to Receiver Capabilities
4303 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4304 * 4. Check link status on receipt of hot-plug interrupt
4305 */
4306 static void
4307 intel_dp_check_link_status(struct intel_dp *intel_dp)
4308 {
4309 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4310 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4311 u8 sink_irq_vector;
4312 u8 link_status[DP_LINK_STATUS_SIZE];
4313
4314 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4315
4316 /*
4317 * Clearing compliance test variables to allow capturing
4318 * of values for next automated test request.
4319 */
4320 intel_dp->compliance_test_active = 0;
4321 intel_dp->compliance_test_type = 0;
4322 intel_dp->compliance_test_data = 0;
4323
4324 if (!intel_encoder->base.crtc)
4325 return;
4326
4327 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4328 return;
4329
4330 /* Try to read receiver status if the link appears to be up */
4331 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4332 return;
4333 }
4334
4335 /* Now read the DPCD to see if it's actually running */
4336 if (!intel_dp_get_dpcd(intel_dp)) {
4337 return;
4338 }
4339
4340 /* Try to read the source of the interrupt */
4341 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4342 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4343 /* Clear interrupt source */
4344 drm_dp_dpcd_writeb(&intel_dp->aux,
4345 DP_DEVICE_SERVICE_IRQ_VECTOR,
4346 sink_irq_vector);
4347
4348 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4349 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4350 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4351 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4352 }
4353
4354 /* if link training is requested we should perform it always */
4355 if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) ||
4356 (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
4357 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4358 intel_encoder->base.name);
4359 intel_dp_start_link_train(intel_dp);
4360 intel_dp_stop_link_train(intel_dp);
4361 }
4362 }
4363
4364 /* XXX this is probably wrong for multiple downstream ports */
4365 static enum drm_connector_status
4366 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4367 {
4368 uint8_t *dpcd = intel_dp->dpcd;
4369 uint8_t type;
4370
4371 if (!intel_dp_get_dpcd(intel_dp))
4372 return connector_status_disconnected;
4373
4374 /* if there's no downstream port, we're done */
4375 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
4376 return connector_status_connected;
4377
4378 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4379 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4380 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4381 uint8_t reg;
4382
4383 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4384 &reg, 1) < 0)
4385 return connector_status_unknown;
4386
4387 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4388 : connector_status_disconnected;
4389 }
4390
4391 /* If no HPD, poke DDC gently */
4392 if (drm_probe_ddc(&intel_dp->aux.ddc))
4393 return connector_status_connected;
4394
4395 /* Well we tried, say unknown for unreliable port types */
4396 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4397 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4398 if (type == DP_DS_PORT_TYPE_VGA ||
4399 type == DP_DS_PORT_TYPE_NON_EDID)
4400 return connector_status_unknown;
4401 } else {
4402 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4403 DP_DWN_STRM_PORT_TYPE_MASK;
4404 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4405 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4406 return connector_status_unknown;
4407 }
4408
4409 /* Anything else is out of spec, warn and ignore */
4410 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4411 return connector_status_disconnected;
4412 }
4413
4414 static enum drm_connector_status
4415 edp_detect(struct intel_dp *intel_dp)
4416 {
4417 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4418 enum drm_connector_status status;
4419
4420 status = intel_panel_detect(dev);
4421 if (status == connector_status_unknown)
4422 status = connector_status_connected;
4423
4424 return status;
4425 }
4426
4427 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4428 struct intel_digital_port *port)
4429 {
4430 u32 bit;
4431
4432 switch (port->port) {
4433 case PORT_A:
4434 return true;
4435 case PORT_B:
4436 bit = SDE_PORTB_HOTPLUG;
4437 break;
4438 case PORT_C:
4439 bit = SDE_PORTC_HOTPLUG;
4440 break;
4441 case PORT_D:
4442 bit = SDE_PORTD_HOTPLUG;
4443 break;
4444 default:
4445 MISSING_CASE(port->port);
4446 return false;
4447 }
4448
4449 return I915_READ(SDEISR) & bit;
4450 }
4451
4452 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4453 struct intel_digital_port *port)
4454 {
4455 u32 bit;
4456
4457 switch (port->port) {
4458 case PORT_A:
4459 return true;
4460 case PORT_B:
4461 bit = SDE_PORTB_HOTPLUG_CPT;
4462 break;
4463 case PORT_C:
4464 bit = SDE_PORTC_HOTPLUG_CPT;
4465 break;
4466 case PORT_D:
4467 bit = SDE_PORTD_HOTPLUG_CPT;
4468 break;
4469 case PORT_E:
4470 bit = SDE_PORTE_HOTPLUG_SPT;
4471 break;
4472 default:
4473 MISSING_CASE(port->port);
4474 return false;
4475 }
4476
4477 return I915_READ(SDEISR) & bit;
4478 }
4479
4480 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4481 struct intel_digital_port *port)
4482 {
4483 u32 bit;
4484
4485 switch (port->port) {
4486 case PORT_B:
4487 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4488 break;
4489 case PORT_C:
4490 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4491 break;
4492 case PORT_D:
4493 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4494 break;
4495 default:
4496 MISSING_CASE(port->port);
4497 return false;
4498 }
4499
4500 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4501 }
4502
4503 static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4504 struct intel_digital_port *port)
4505 {
4506 u32 bit;
4507
4508 switch (port->port) {
4509 case PORT_B:
4510 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4511 break;
4512 case PORT_C:
4513 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4514 break;
4515 case PORT_D:
4516 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4517 break;
4518 default:
4519 MISSING_CASE(port->port);
4520 return false;
4521 }
4522
4523 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4524 }
4525
4526 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4527 struct intel_digital_port *intel_dig_port)
4528 {
4529 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4530 enum port port;
4531 u32 bit;
4532
4533 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4534 switch (port) {
4535 case PORT_A:
4536 bit = BXT_DE_PORT_HP_DDIA;
4537 break;
4538 case PORT_B:
4539 bit = BXT_DE_PORT_HP_DDIB;
4540 break;
4541 case PORT_C:
4542 bit = BXT_DE_PORT_HP_DDIC;
4543 break;
4544 default:
4545 MISSING_CASE(port);
4546 return false;
4547 }
4548
4549 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4550 }
4551
4552 /*
4553 * intel_digital_port_connected - is the specified port connected?
4554 * @dev_priv: i915 private structure
4555 * @port: the port to test
4556 *
4557 * Return %true if @port is connected, %false otherwise.
4558 */
4559 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4560 struct intel_digital_port *port)
4561 {
4562 if (HAS_PCH_IBX(dev_priv))
4563 return ibx_digital_port_connected(dev_priv, port);
4564 else if (HAS_PCH_SPLIT(dev_priv))
4565 return cpt_digital_port_connected(dev_priv, port);
4566 else if (IS_BROXTON(dev_priv))
4567 return bxt_digital_port_connected(dev_priv, port);
4568 else if (IS_GM45(dev_priv))
4569 return gm45_digital_port_connected(dev_priv, port);
4570 else
4571 return g4x_digital_port_connected(dev_priv, port);
4572 }
4573
4574 static struct edid *
4575 intel_dp_get_edid(struct intel_dp *intel_dp)
4576 {
4577 struct intel_connector *intel_connector = intel_dp->attached_connector;
4578
4579 /* use cached edid if we have one */
4580 if (intel_connector->edid) {
4581 /* invalid edid */
4582 if (IS_ERR(intel_connector->edid))
4583 return NULL;
4584
4585 return drm_edid_duplicate(intel_connector->edid);
4586 } else
4587 return drm_get_edid(&intel_connector->base,
4588 &intel_dp->aux.ddc);
4589 }
4590
4591 static void
4592 intel_dp_set_edid(struct intel_dp *intel_dp)
4593 {
4594 struct intel_connector *intel_connector = intel_dp->attached_connector;
4595 struct edid *edid;
4596
4597 edid = intel_dp_get_edid(intel_dp);
4598 intel_connector->detect_edid = edid;
4599
4600 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4601 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4602 else
4603 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4604 }
4605
4606 static void
4607 intel_dp_unset_edid(struct intel_dp *intel_dp)
4608 {
4609 struct intel_connector *intel_connector = intel_dp->attached_connector;
4610
4611 kfree(intel_connector->detect_edid);
4612 intel_connector->detect_edid = NULL;
4613
4614 intel_dp->has_audio = false;
4615 }
4616
4617 static enum drm_connector_status
4618 intel_dp_detect(struct drm_connector *connector, bool force)
4619 {
4620 struct intel_dp *intel_dp = intel_attached_dp(connector);
4621 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4622 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4623 struct drm_device *dev = connector->dev;
4624 enum drm_connector_status status;
4625 enum intel_display_power_domain power_domain;
4626 bool ret;
4627 u8 sink_irq_vector;
4628
4629 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4630 connector->base.id, connector->name);
4631 intel_dp_unset_edid(intel_dp);
4632
4633 if (intel_dp->is_mst) {
4634 /* MST devices are disconnected from a monitor POV */
4635 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4636 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4637 return connector_status_disconnected;
4638 }
4639
4640 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4641 intel_display_power_get(to_i915(dev), power_domain);
4642
4643 /* Can't disconnect eDP, but you can close the lid... */
4644 if (is_edp(intel_dp))
4645 status = edp_detect(intel_dp);
4646 else if (intel_digital_port_connected(to_i915(dev),
4647 dp_to_dig_port(intel_dp)))
4648 status = intel_dp_detect_dpcd(intel_dp);
4649 else
4650 status = connector_status_disconnected;
4651
4652 if (status != connector_status_connected) {
4653 intel_dp->compliance_test_active = 0;
4654 intel_dp->compliance_test_type = 0;
4655 intel_dp->compliance_test_data = 0;
4656
4657 goto out;
4658 }
4659
4660 intel_dp_probe_oui(intel_dp);
4661
4662 ret = intel_dp_probe_mst(intel_dp);
4663 if (ret) {
4664 /* if we are in MST mode then this connector
4665 won't appear connected or have anything with EDID on it */
4666 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4667 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4668 status = connector_status_disconnected;
4669 goto out;
4670 }
4671
4672 /*
4673 * Clearing NACK and defer counts to get their exact values
4674 * while reading EDID which are required by Compliance tests
4675 * 4.2.2.4 and 4.2.2.5
4676 */
4677 intel_dp->aux.i2c_nack_count = 0;
4678 intel_dp->aux.i2c_defer_count = 0;
4679
4680 intel_dp_set_edid(intel_dp);
4681
4682 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4683 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4684 status = connector_status_connected;
4685
4686 /* Try to read the source of the interrupt */
4687 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4688 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4689 /* Clear interrupt source */
4690 drm_dp_dpcd_writeb(&intel_dp->aux,
4691 DP_DEVICE_SERVICE_IRQ_VECTOR,
4692 sink_irq_vector);
4693
4694 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4695 intel_dp_handle_test_request(intel_dp);
4696 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4697 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4698 }
4699
4700 out:
4701 intel_display_power_put(to_i915(dev), power_domain);
4702 return status;
4703 }
4704
4705 static void
4706 intel_dp_force(struct drm_connector *connector)
4707 {
4708 struct intel_dp *intel_dp = intel_attached_dp(connector);
4709 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4710 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4711 enum intel_display_power_domain power_domain;
4712
4713 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4714 connector->base.id, connector->name);
4715 intel_dp_unset_edid(intel_dp);
4716
4717 if (connector->status != connector_status_connected)
4718 return;
4719
4720 power_domain = intel_display_port_aux_power_domain(intel_encoder);
4721 intel_display_power_get(dev_priv, power_domain);
4722
4723 intel_dp_set_edid(intel_dp);
4724
4725 intel_display_power_put(dev_priv, power_domain);
4726
4727 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4728 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4729 }
4730
4731 static int intel_dp_get_modes(struct drm_connector *connector)
4732 {
4733 struct intel_connector *intel_connector = to_intel_connector(connector);
4734 struct edid *edid;
4735
4736 edid = intel_connector->detect_edid;
4737 if (edid) {
4738 int ret = intel_connector_update_modes(connector, edid);
4739 if (ret)
4740 return ret;
4741 }
4742
4743 /* if eDP has no EDID, fall back to fixed mode */
4744 if (is_edp(intel_attached_dp(connector)) &&
4745 intel_connector->panel.fixed_mode) {
4746 struct drm_display_mode *mode;
4747
4748 mode = drm_mode_duplicate(connector->dev,
4749 intel_connector->panel.fixed_mode);
4750 if (mode) {
4751 drm_mode_probed_add(connector, mode);
4752 return 1;
4753 }
4754 }
4755
4756 return 0;
4757 }
4758
4759 static bool
4760 intel_dp_detect_audio(struct drm_connector *connector)
4761 {
4762 bool has_audio = false;
4763 struct edid *edid;
4764
4765 edid = to_intel_connector(connector)->detect_edid;
4766 if (edid)
4767 has_audio = drm_detect_monitor_audio(edid);
4768
4769 return has_audio;
4770 }
4771
4772 static int
4773 intel_dp_set_property(struct drm_connector *connector,
4774 struct drm_property *property,
4775 uint64_t val)
4776 {
4777 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4778 struct intel_connector *intel_connector = to_intel_connector(connector);
4779 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4780 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4781 int ret;
4782
4783 ret = drm_object_property_set_value(&connector->base, property, val);
4784 if (ret)
4785 return ret;
4786
4787 if (property == dev_priv->force_audio_property) {
4788 int i = val;
4789 bool has_audio;
4790
4791 if (i == intel_dp->force_audio)
4792 return 0;
4793
4794 intel_dp->force_audio = i;
4795
4796 if (i == HDMI_AUDIO_AUTO)
4797 has_audio = intel_dp_detect_audio(connector);
4798 else
4799 has_audio = (i == HDMI_AUDIO_ON);
4800
4801 if (has_audio == intel_dp->has_audio)
4802 return 0;
4803
4804 intel_dp->has_audio = has_audio;
4805 goto done;
4806 }
4807
4808 if (property == dev_priv->broadcast_rgb_property) {
4809 bool old_auto = intel_dp->color_range_auto;
4810 bool old_range = intel_dp->limited_color_range;
4811
4812 switch (val) {
4813 case INTEL_BROADCAST_RGB_AUTO:
4814 intel_dp->color_range_auto = true;
4815 break;
4816 case INTEL_BROADCAST_RGB_FULL:
4817 intel_dp->color_range_auto = false;
4818 intel_dp->limited_color_range = false;
4819 break;
4820 case INTEL_BROADCAST_RGB_LIMITED:
4821 intel_dp->color_range_auto = false;
4822 intel_dp->limited_color_range = true;
4823 break;
4824 default:
4825 return -EINVAL;
4826 }
4827
4828 if (old_auto == intel_dp->color_range_auto &&
4829 old_range == intel_dp->limited_color_range)
4830 return 0;
4831
4832 goto done;
4833 }
4834
4835 if (is_edp(intel_dp) &&
4836 property == connector->dev->mode_config.scaling_mode_property) {
4837 if (val == DRM_MODE_SCALE_NONE) {
4838 DRM_DEBUG_KMS("no scaling not supported\n");
4839 return -EINVAL;
4840 }
4841
4842 if (intel_connector->panel.fitting_mode == val) {
4843 /* the eDP scaling property is not changed */
4844 return 0;
4845 }
4846 intel_connector->panel.fitting_mode = val;
4847
4848 goto done;
4849 }
4850
4851 return -EINVAL;
4852
4853 done:
4854 if (intel_encoder->base.crtc)
4855 intel_crtc_restore_mode(intel_encoder->base.crtc);
4856
4857 return 0;
4858 }
4859
4860 static void
4861 intel_dp_connector_destroy(struct drm_connector *connector)
4862 {
4863 struct intel_connector *intel_connector = to_intel_connector(connector);
4864
4865 kfree(intel_connector->detect_edid);
4866
4867 if (!IS_ERR_OR_NULL(intel_connector->edid))
4868 kfree(intel_connector->edid);
4869
4870 /* Can't call is_edp() since the encoder may have been destroyed
4871 * already. */
4872 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4873 intel_panel_fini(&intel_connector->panel);
4874
4875 drm_connector_cleanup(connector);
4876 kfree(connector);
4877 }
4878
4879 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4880 {
4881 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4882 struct intel_dp *intel_dp = &intel_dig_port->dp;
4883
4884 intel_dp_aux_fini(intel_dp);
4885 intel_dp_mst_encoder_cleanup(intel_dig_port);
4886 if (is_edp(intel_dp)) {
4887 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4888 /*
4889 * vdd might still be enabled do to the delayed vdd off.
4890 * Make sure vdd is actually turned off here.
4891 */
4892 pps_lock(intel_dp);
4893 edp_panel_vdd_off_sync(intel_dp);
4894 pps_unlock(intel_dp);
4895
4896 if (intel_dp->edp_notifier.notifier_call) {
4897 unregister_reboot_notifier(&intel_dp->edp_notifier);
4898 intel_dp->edp_notifier.notifier_call = NULL;
4899 }
4900 }
4901 drm_encoder_cleanup(encoder);
4902 kfree(intel_dig_port);
4903 }
4904
4905 static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4906 {
4907 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4908
4909 if (!is_edp(intel_dp))
4910 return;
4911
4912 /*
4913 * vdd might still be enabled do to the delayed vdd off.
4914 * Make sure vdd is actually turned off here.
4915 */
4916 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4917 pps_lock(intel_dp);
4918 edp_panel_vdd_off_sync(intel_dp);
4919 pps_unlock(intel_dp);
4920 }
4921
4922 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4923 {
4924 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4925 struct drm_device *dev = intel_dig_port->base.base.dev;
4926 struct drm_i915_private *dev_priv = dev->dev_private;
4927 enum intel_display_power_domain power_domain;
4928
4929 lockdep_assert_held(&dev_priv->pps_mutex);
4930
4931 if (!edp_have_panel_vdd(intel_dp))
4932 return;
4933
4934 /*
4935 * The VDD bit needs a power domain reference, so if the bit is
4936 * already enabled when we boot or resume, grab this reference and
4937 * schedule a vdd off, so we don't hold on to the reference
4938 * indefinitely.
4939 */
4940 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4941 power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
4942 intel_display_power_get(dev_priv, power_domain);
4943
4944 edp_panel_vdd_schedule_off(intel_dp);
4945 }
4946
4947 static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4948 {
4949 struct intel_dp *intel_dp;
4950
4951 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4952 return;
4953
4954 intel_dp = enc_to_intel_dp(encoder);
4955
4956 pps_lock(intel_dp);
4957
4958 /*
4959 * Read out the current power sequencer assignment,
4960 * in case the BIOS did something with it.
4961 */
4962 if (IS_VALLEYVIEW(encoder->dev) || IS_CHERRYVIEW(encoder->dev))
4963 vlv_initial_power_sequencer_setup(intel_dp);
4964
4965 intel_edp_panel_vdd_sanitize(intel_dp);
4966
4967 pps_unlock(intel_dp);
4968 }
4969
4970 static const struct drm_connector_funcs intel_dp_connector_funcs = {
4971 .dpms = drm_atomic_helper_connector_dpms,
4972 .detect = intel_dp_detect,
4973 .force = intel_dp_force,
4974 .fill_modes = drm_helper_probe_single_connector_modes,
4975 .set_property = intel_dp_set_property,
4976 .atomic_get_property = intel_connector_atomic_get_property,
4977 .destroy = intel_dp_connector_destroy,
4978 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4979 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
4980 };
4981
4982 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4983 .get_modes = intel_dp_get_modes,
4984 .mode_valid = intel_dp_mode_valid,
4985 .best_encoder = intel_best_encoder,
4986 };
4987
4988 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
4989 .reset = intel_dp_encoder_reset,
4990 .destroy = intel_dp_encoder_destroy,
4991 };
4992
4993 enum irqreturn
4994 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4995 {
4996 struct intel_dp *intel_dp = &intel_dig_port->dp;
4997 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4998 struct drm_device *dev = intel_dig_port->base.base.dev;
4999 struct drm_i915_private *dev_priv = dev->dev_private;
5000 enum intel_display_power_domain power_domain;
5001 enum irqreturn ret = IRQ_NONE;
5002
5003 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5004 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5005 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
5006
5007 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5008 /*
5009 * vdd off can generate a long pulse on eDP which
5010 * would require vdd on to handle it, and thus we
5011 * would end up in an endless cycle of
5012 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5013 */
5014 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5015 port_name(intel_dig_port->port));
5016 return IRQ_HANDLED;
5017 }
5018
5019 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5020 port_name(intel_dig_port->port),
5021 long_hpd ? "long" : "short");
5022
5023 power_domain = intel_display_port_aux_power_domain(intel_encoder);
5024 intel_display_power_get(dev_priv, power_domain);
5025
5026 if (long_hpd) {
5027 /* indicate that we need to restart link training */
5028 intel_dp->train_set_valid = false;
5029
5030 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5031 goto mst_fail;
5032
5033 if (!intel_dp_get_dpcd(intel_dp)) {
5034 goto mst_fail;
5035 }
5036
5037 intel_dp_probe_oui(intel_dp);
5038
5039 if (!intel_dp_probe_mst(intel_dp)) {
5040 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5041 intel_dp_check_link_status(intel_dp);
5042 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5043 goto mst_fail;
5044 }
5045 } else {
5046 if (intel_dp->is_mst) {
5047 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
5048 goto mst_fail;
5049 }
5050
5051 if (!intel_dp->is_mst) {
5052 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5053 intel_dp_check_link_status(intel_dp);
5054 drm_modeset_unlock(&dev->mode_config.connection_mutex);
5055 }
5056 }
5057
5058 ret = IRQ_HANDLED;
5059
5060 goto put_power;
5061 mst_fail:
5062 /* if we were in MST mode, and device is not there get out of MST mode */
5063 if (intel_dp->is_mst) {
5064 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5065 intel_dp->is_mst = false;
5066 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5067 }
5068 put_power:
5069 intel_display_power_put(dev_priv, power_domain);
5070
5071 return ret;
5072 }
5073
5074 /* check the VBT to see whether the eDP is on another port */
5075 bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5076 {
5077 struct drm_i915_private *dev_priv = dev->dev_private;
5078 union child_device_config *p_child;
5079 int i;
5080 static const short port_mapping[] = {
5081 [PORT_B] = DVO_PORT_DPB,
5082 [PORT_C] = DVO_PORT_DPC,
5083 [PORT_D] = DVO_PORT_DPD,
5084 [PORT_E] = DVO_PORT_DPE,
5085 };
5086
5087 /*
5088 * eDP not supported on g4x. so bail out early just
5089 * for a bit extra safety in case the VBT is bonkers.
5090 */
5091 if (INTEL_INFO(dev)->gen < 5)
5092 return false;
5093
5094 if (port == PORT_A)
5095 return true;
5096
5097 if (!dev_priv->vbt.child_dev_num)
5098 return false;
5099
5100 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5101 p_child = dev_priv->vbt.child_dev + i;
5102
5103 if (p_child->common.dvo_port == port_mapping[port] &&
5104 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5105 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
5106 return true;
5107 }
5108 return false;
5109 }
5110
5111 void
5112 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5113 {
5114 struct intel_connector *intel_connector = to_intel_connector(connector);
5115
5116 intel_attach_force_audio_property(connector);
5117 intel_attach_broadcast_rgb_property(connector);
5118 intel_dp->color_range_auto = true;
5119
5120 if (is_edp(intel_dp)) {
5121 drm_mode_create_scaling_mode_property(connector->dev);
5122 drm_object_attach_property(
5123 &connector->base,
5124 connector->dev->mode_config.scaling_mode_property,
5125 DRM_MODE_SCALE_ASPECT);
5126 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5127 }
5128 }
5129
5130 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5131 {
5132 intel_dp->panel_power_off_time = ktime_get_boottime();
5133 intel_dp->last_power_on = jiffies;
5134 intel_dp->last_backlight_off = jiffies;
5135 }
5136
5137 static void
5138 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5139 struct intel_dp *intel_dp)
5140 {
5141 struct drm_i915_private *dev_priv = dev->dev_private;
5142 struct edp_power_seq cur, vbt, spec,
5143 *final = &intel_dp->pps_delays;
5144 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5145 i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
5146
5147 lockdep_assert_held(&dev_priv->pps_mutex);
5148
5149 /* already initialized? */
5150 if (final->t11_t12 != 0)
5151 return;
5152
5153 if (IS_BROXTON(dev)) {
5154 /*
5155 * TODO: BXT has 2 sets of PPS registers.
5156 * Correct Register for Broxton need to be identified
5157 * using VBT. hardcoding for now
5158 */
5159 pp_ctrl_reg = BXT_PP_CONTROL(0);
5160 pp_on_reg = BXT_PP_ON_DELAYS(0);
5161 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5162 } else if (HAS_PCH_SPLIT(dev)) {
5163 pp_ctrl_reg = PCH_PP_CONTROL;
5164 pp_on_reg = PCH_PP_ON_DELAYS;
5165 pp_off_reg = PCH_PP_OFF_DELAYS;
5166 pp_div_reg = PCH_PP_DIVISOR;
5167 } else {
5168 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5169
5170 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5171 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5172 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5173 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5174 }
5175
5176 /* Workaround: Need to write PP_CONTROL with the unlock key as
5177 * the very first thing. */
5178 pp_ctl = ironlake_get_pp_control(intel_dp);
5179
5180 pp_on = I915_READ(pp_on_reg);
5181 pp_off = I915_READ(pp_off_reg);
5182 if (!IS_BROXTON(dev)) {
5183 I915_WRITE(pp_ctrl_reg, pp_ctl);
5184 pp_div = I915_READ(pp_div_reg);
5185 }
5186
5187 /* Pull timing values out of registers */
5188 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5189 PANEL_POWER_UP_DELAY_SHIFT;
5190
5191 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5192 PANEL_LIGHT_ON_DELAY_SHIFT;
5193
5194 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5195 PANEL_LIGHT_OFF_DELAY_SHIFT;
5196
5197 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5198 PANEL_POWER_DOWN_DELAY_SHIFT;
5199
5200 if (IS_BROXTON(dev)) {
5201 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5202 BXT_POWER_CYCLE_DELAY_SHIFT;
5203 if (tmp > 0)
5204 cur.t11_t12 = (tmp - 1) * 1000;
5205 else
5206 cur.t11_t12 = 0;
5207 } else {
5208 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5209 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5210 }
5211
5212 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5213 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5214
5215 vbt = dev_priv->vbt.edp_pps;
5216
5217 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5218 * our hw here, which are all in 100usec. */
5219 spec.t1_t3 = 210 * 10;
5220 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5221 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5222 spec.t10 = 500 * 10;
5223 /* This one is special and actually in units of 100ms, but zero
5224 * based in the hw (so we need to add 100 ms). But the sw vbt
5225 * table multiplies it with 1000 to make it in units of 100usec,
5226 * too. */
5227 spec.t11_t12 = (510 + 100) * 10;
5228
5229 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5230 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5231
5232 /* Use the max of the register settings and vbt. If both are
5233 * unset, fall back to the spec limits. */
5234 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5235 spec.field : \
5236 max(cur.field, vbt.field))
5237 assign_final(t1_t3);
5238 assign_final(t8);
5239 assign_final(t9);
5240 assign_final(t10);
5241 assign_final(t11_t12);
5242 #undef assign_final
5243
5244 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5245 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5246 intel_dp->backlight_on_delay = get_delay(t8);
5247 intel_dp->backlight_off_delay = get_delay(t9);
5248 intel_dp->panel_power_down_delay = get_delay(t10);
5249 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5250 #undef get_delay
5251
5252 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5253 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5254 intel_dp->panel_power_cycle_delay);
5255
5256 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5257 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5258 }
5259
5260 static void
5261 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5262 struct intel_dp *intel_dp)
5263 {
5264 struct drm_i915_private *dev_priv = dev->dev_private;
5265 u32 pp_on, pp_off, pp_div, port_sel = 0;
5266 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
5267 i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg;
5268 enum port port = dp_to_dig_port(intel_dp)->port;
5269 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5270
5271 lockdep_assert_held(&dev_priv->pps_mutex);
5272
5273 if (IS_BROXTON(dev)) {
5274 /*
5275 * TODO: BXT has 2 sets of PPS registers.
5276 * Correct Register for Broxton need to be identified
5277 * using VBT. hardcoding for now
5278 */
5279 pp_ctrl_reg = BXT_PP_CONTROL(0);
5280 pp_on_reg = BXT_PP_ON_DELAYS(0);
5281 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5282
5283 } else if (HAS_PCH_SPLIT(dev)) {
5284 pp_on_reg = PCH_PP_ON_DELAYS;
5285 pp_off_reg = PCH_PP_OFF_DELAYS;
5286 pp_div_reg = PCH_PP_DIVISOR;
5287 } else {
5288 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5289
5290 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5291 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5292 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5293 }
5294
5295 /*
5296 * And finally store the new values in the power sequencer. The
5297 * backlight delays are set to 1 because we do manual waits on them. For
5298 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5299 * we'll end up waiting for the backlight off delay twice: once when we
5300 * do the manual sleep, and once when we disable the panel and wait for
5301 * the PP_STATUS bit to become zero.
5302 */
5303 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5304 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5305 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5306 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5307 /* Compute the divisor for the pp clock, simply match the Bspec
5308 * formula. */
5309 if (IS_BROXTON(dev)) {
5310 pp_div = I915_READ(pp_ctrl_reg);
5311 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5312 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5313 << BXT_POWER_CYCLE_DELAY_SHIFT);
5314 } else {
5315 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5316 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5317 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5318 }
5319
5320 /* Haswell doesn't have any port selection bits for the panel
5321 * power sequencer any more. */
5322 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5323 port_sel = PANEL_PORT_SELECT_VLV(port);
5324 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5325 if (port == PORT_A)
5326 port_sel = PANEL_PORT_SELECT_DPA;
5327 else
5328 port_sel = PANEL_PORT_SELECT_DPD;
5329 }
5330
5331 pp_on |= port_sel;
5332
5333 I915_WRITE(pp_on_reg, pp_on);
5334 I915_WRITE(pp_off_reg, pp_off);
5335 if (IS_BROXTON(dev))
5336 I915_WRITE(pp_ctrl_reg, pp_div);
5337 else
5338 I915_WRITE(pp_div_reg, pp_div);
5339
5340 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5341 I915_READ(pp_on_reg),
5342 I915_READ(pp_off_reg),
5343 IS_BROXTON(dev) ?
5344 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
5345 I915_READ(pp_div_reg));
5346 }
5347
5348 /**
5349 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5350 * @dev: DRM device
5351 * @refresh_rate: RR to be programmed
5352 *
5353 * This function gets called when refresh rate (RR) has to be changed from
5354 * one frequency to another. Switches can be between high and low RR
5355 * supported by the panel or to any other RR based on media playback (in
5356 * this case, RR value needs to be passed from user space).
5357 *
5358 * The caller of this function needs to take a lock on dev_priv->drrs.
5359 */
5360 static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5361 {
5362 struct drm_i915_private *dev_priv = dev->dev_private;
5363 struct intel_encoder *encoder;
5364 struct intel_digital_port *dig_port = NULL;
5365 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5366 struct intel_crtc_state *config = NULL;
5367 struct intel_crtc *intel_crtc = NULL;
5368 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5369
5370 if (refresh_rate <= 0) {
5371 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5372 return;
5373 }
5374
5375 if (intel_dp == NULL) {
5376 DRM_DEBUG_KMS("DRRS not supported.\n");
5377 return;
5378 }
5379
5380 /*
5381 * FIXME: This needs proper synchronization with psr state for some
5382 * platforms that cannot have PSR and DRRS enabled at the same time.
5383 */
5384
5385 dig_port = dp_to_dig_port(intel_dp);
5386 encoder = &dig_port->base;
5387 intel_crtc = to_intel_crtc(encoder->base.crtc);
5388
5389 if (!intel_crtc) {
5390 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5391 return;
5392 }
5393
5394 config = intel_crtc->config;
5395
5396 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5397 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5398 return;
5399 }
5400
5401 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5402 refresh_rate)
5403 index = DRRS_LOW_RR;
5404
5405 if (index == dev_priv->drrs.refresh_rate_type) {
5406 DRM_DEBUG_KMS(
5407 "DRRS requested for previously set RR...ignoring\n");
5408 return;
5409 }
5410
5411 if (!intel_crtc->active) {
5412 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5413 return;
5414 }
5415
5416 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
5417 switch (index) {
5418 case DRRS_HIGH_RR:
5419 intel_dp_set_m_n(intel_crtc, M1_N1);
5420 break;
5421 case DRRS_LOW_RR:
5422 intel_dp_set_m_n(intel_crtc, M2_N2);
5423 break;
5424 case DRRS_MAX_RR:
5425 default:
5426 DRM_ERROR("Unsupported refreshrate type\n");
5427 }
5428 } else if (INTEL_INFO(dev)->gen > 6) {
5429 i915_reg_t reg = PIPECONF(intel_crtc->config->cpu_transcoder);
5430 u32 val;
5431
5432 val = I915_READ(reg);
5433 if (index > DRRS_HIGH_RR) {
5434 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5435 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5436 else
5437 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5438 } else {
5439 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5440 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5441 else
5442 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5443 }
5444 I915_WRITE(reg, val);
5445 }
5446
5447 dev_priv->drrs.refresh_rate_type = index;
5448
5449 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5450 }
5451
5452 /**
5453 * intel_edp_drrs_enable - init drrs struct if supported
5454 * @intel_dp: DP struct
5455 *
5456 * Initializes frontbuffer_bits and drrs.dp
5457 */
5458 void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5459 {
5460 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5461 struct drm_i915_private *dev_priv = dev->dev_private;
5462 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5463 struct drm_crtc *crtc = dig_port->base.base.crtc;
5464 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5465
5466 if (!intel_crtc->config->has_drrs) {
5467 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5468 return;
5469 }
5470
5471 mutex_lock(&dev_priv->drrs.mutex);
5472 if (WARN_ON(dev_priv->drrs.dp)) {
5473 DRM_ERROR("DRRS already enabled\n");
5474 goto unlock;
5475 }
5476
5477 dev_priv->drrs.busy_frontbuffer_bits = 0;
5478
5479 dev_priv->drrs.dp = intel_dp;
5480
5481 unlock:
5482 mutex_unlock(&dev_priv->drrs.mutex);
5483 }
5484
5485 /**
5486 * intel_edp_drrs_disable - Disable DRRS
5487 * @intel_dp: DP struct
5488 *
5489 */
5490 void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5491 {
5492 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5493 struct drm_i915_private *dev_priv = dev->dev_private;
5494 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5495 struct drm_crtc *crtc = dig_port->base.base.crtc;
5496 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5497
5498 if (!intel_crtc->config->has_drrs)
5499 return;
5500
5501 mutex_lock(&dev_priv->drrs.mutex);
5502 if (!dev_priv->drrs.dp) {
5503 mutex_unlock(&dev_priv->drrs.mutex);
5504 return;
5505 }
5506
5507 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5508 intel_dp_set_drrs_state(dev_priv->dev,
5509 intel_dp->attached_connector->panel.
5510 fixed_mode->vrefresh);
5511
5512 dev_priv->drrs.dp = NULL;
5513 mutex_unlock(&dev_priv->drrs.mutex);
5514
5515 cancel_delayed_work_sync(&dev_priv->drrs.work);
5516 }
5517
5518 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5519 {
5520 struct drm_i915_private *dev_priv =
5521 container_of(work, typeof(*dev_priv), drrs.work.work);
5522 struct intel_dp *intel_dp;
5523
5524 mutex_lock(&dev_priv->drrs.mutex);
5525
5526 intel_dp = dev_priv->drrs.dp;
5527
5528 if (!intel_dp)
5529 goto unlock;
5530
5531 /*
5532 * The delayed work can race with an invalidate hence we need to
5533 * recheck.
5534 */
5535
5536 if (dev_priv->drrs.busy_frontbuffer_bits)
5537 goto unlock;
5538
5539 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5540 intel_dp_set_drrs_state(dev_priv->dev,
5541 intel_dp->attached_connector->panel.
5542 downclock_mode->vrefresh);
5543
5544 unlock:
5545 mutex_unlock(&dev_priv->drrs.mutex);
5546 }
5547
5548 /**
5549 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5550 * @dev: DRM device
5551 * @frontbuffer_bits: frontbuffer plane tracking bits
5552 *
5553 * This function gets called everytime rendering on the given planes start.
5554 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5555 *
5556 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5557 */
5558 void intel_edp_drrs_invalidate(struct drm_device *dev,
5559 unsigned frontbuffer_bits)
5560 {
5561 struct drm_i915_private *dev_priv = dev->dev_private;
5562 struct drm_crtc *crtc;
5563 enum pipe pipe;
5564
5565 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5566 return;
5567
5568 cancel_delayed_work(&dev_priv->drrs.work);
5569
5570 mutex_lock(&dev_priv->drrs.mutex);
5571 if (!dev_priv->drrs.dp) {
5572 mutex_unlock(&dev_priv->drrs.mutex);
5573 return;
5574 }
5575
5576 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5577 pipe = to_intel_crtc(crtc)->pipe;
5578
5579 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5580 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5581
5582 /* invalidate means busy screen hence upclock */
5583 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5584 intel_dp_set_drrs_state(dev_priv->dev,
5585 dev_priv->drrs.dp->attached_connector->panel.
5586 fixed_mode->vrefresh);
5587
5588 mutex_unlock(&dev_priv->drrs.mutex);
5589 }
5590
5591 /**
5592 * intel_edp_drrs_flush - Restart Idleness DRRS
5593 * @dev: DRM device
5594 * @frontbuffer_bits: frontbuffer plane tracking bits
5595 *
5596 * This function gets called every time rendering on the given planes has
5597 * completed or flip on a crtc is completed. So DRRS should be upclocked
5598 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5599 * if no other planes are dirty.
5600 *
5601 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5602 */
5603 void intel_edp_drrs_flush(struct drm_device *dev,
5604 unsigned frontbuffer_bits)
5605 {
5606 struct drm_i915_private *dev_priv = dev->dev_private;
5607 struct drm_crtc *crtc;
5608 enum pipe pipe;
5609
5610 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5611 return;
5612
5613 cancel_delayed_work(&dev_priv->drrs.work);
5614
5615 mutex_lock(&dev_priv->drrs.mutex);
5616 if (!dev_priv->drrs.dp) {
5617 mutex_unlock(&dev_priv->drrs.mutex);
5618 return;
5619 }
5620
5621 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5622 pipe = to_intel_crtc(crtc)->pipe;
5623
5624 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5625 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5626
5627 /* flush means busy screen hence upclock */
5628 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5629 intel_dp_set_drrs_state(dev_priv->dev,
5630 dev_priv->drrs.dp->attached_connector->panel.
5631 fixed_mode->vrefresh);
5632
5633 /*
5634 * flush also means no more activity hence schedule downclock, if all
5635 * other fbs are quiescent too
5636 */
5637 if (!dev_priv->drrs.busy_frontbuffer_bits)
5638 schedule_delayed_work(&dev_priv->drrs.work,
5639 msecs_to_jiffies(1000));
5640 mutex_unlock(&dev_priv->drrs.mutex);
5641 }
5642
5643 /**
5644 * DOC: Display Refresh Rate Switching (DRRS)
5645 *
5646 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5647 * which enables swtching between low and high refresh rates,
5648 * dynamically, based on the usage scenario. This feature is applicable
5649 * for internal panels.
5650 *
5651 * Indication that the panel supports DRRS is given by the panel EDID, which
5652 * would list multiple refresh rates for one resolution.
5653 *
5654 * DRRS is of 2 types - static and seamless.
5655 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5656 * (may appear as a blink on screen) and is used in dock-undock scenario.
5657 * Seamless DRRS involves changing RR without any visual effect to the user
5658 * and can be used during normal system usage. This is done by programming
5659 * certain registers.
5660 *
5661 * Support for static/seamless DRRS may be indicated in the VBT based on
5662 * inputs from the panel spec.
5663 *
5664 * DRRS saves power by switching to low RR based on usage scenarios.
5665 *
5666 * eDP DRRS:-
5667 * The implementation is based on frontbuffer tracking implementation.
5668 * When there is a disturbance on the screen triggered by user activity or a
5669 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5670 * When there is no movement on screen, after a timeout of 1 second, a switch
5671 * to low RR is made.
5672 * For integration with frontbuffer tracking code,
5673 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5674 *
5675 * DRRS can be further extended to support other internal panels and also
5676 * the scenario of video playback wherein RR is set based on the rate
5677 * requested by userspace.
5678 */
5679
5680 /**
5681 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5682 * @intel_connector: eDP connector
5683 * @fixed_mode: preferred mode of panel
5684 *
5685 * This function is called only once at driver load to initialize basic
5686 * DRRS stuff.
5687 *
5688 * Returns:
5689 * Downclock mode if panel supports it, else return NULL.
5690 * DRRS support is determined by the presence of downclock mode (apart
5691 * from VBT setting).
5692 */
5693 static struct drm_display_mode *
5694 intel_dp_drrs_init(struct intel_connector *intel_connector,
5695 struct drm_display_mode *fixed_mode)
5696 {
5697 struct drm_connector *connector = &intel_connector->base;
5698 struct drm_device *dev = connector->dev;
5699 struct drm_i915_private *dev_priv = dev->dev_private;
5700 struct drm_display_mode *downclock_mode = NULL;
5701
5702 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5703 mutex_init(&dev_priv->drrs.mutex);
5704
5705 if (INTEL_INFO(dev)->gen <= 6) {
5706 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5707 return NULL;
5708 }
5709
5710 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5711 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5712 return NULL;
5713 }
5714
5715 downclock_mode = intel_find_panel_downclock
5716 (dev, fixed_mode, connector);
5717
5718 if (!downclock_mode) {
5719 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5720 return NULL;
5721 }
5722
5723 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5724
5725 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5726 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5727 return downclock_mode;
5728 }
5729
5730 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5731 struct intel_connector *intel_connector)
5732 {
5733 struct drm_connector *connector = &intel_connector->base;
5734 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5735 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5736 struct drm_device *dev = intel_encoder->base.dev;
5737 struct drm_i915_private *dev_priv = dev->dev_private;
5738 struct drm_display_mode *fixed_mode = NULL;
5739 struct drm_display_mode *downclock_mode = NULL;
5740 bool has_dpcd;
5741 struct drm_display_mode *scan;
5742 struct edid *edid;
5743 enum pipe pipe = INVALID_PIPE;
5744
5745 if (!is_edp(intel_dp))
5746 return true;
5747
5748 pps_lock(intel_dp);
5749 intel_edp_panel_vdd_sanitize(intel_dp);
5750 pps_unlock(intel_dp);
5751
5752 /* Cache DPCD and EDID for edp. */
5753 has_dpcd = intel_dp_get_dpcd(intel_dp);
5754
5755 if (has_dpcd) {
5756 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5757 dev_priv->no_aux_handshake =
5758 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5759 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5760 } else {
5761 /* if this fails, presume the device is a ghost */
5762 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5763 return false;
5764 }
5765
5766 /* We now know it's not a ghost, init power sequence regs. */
5767 pps_lock(intel_dp);
5768 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5769 pps_unlock(intel_dp);
5770
5771 mutex_lock(&dev->mode_config.mutex);
5772 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5773 if (edid) {
5774 if (drm_add_edid_modes(connector, edid)) {
5775 drm_mode_connector_update_edid_property(connector,
5776 edid);
5777 drm_edid_to_eld(connector, edid);
5778 } else {
5779 kfree(edid);
5780 edid = ERR_PTR(-EINVAL);
5781 }
5782 } else {
5783 edid = ERR_PTR(-ENOENT);
5784 }
5785 intel_connector->edid = edid;
5786
5787 /* prefer fixed mode from EDID if available */
5788 list_for_each_entry(scan, &connector->probed_modes, head) {
5789 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5790 fixed_mode = drm_mode_duplicate(dev, scan);
5791 downclock_mode = intel_dp_drrs_init(
5792 intel_connector, fixed_mode);
5793 break;
5794 }
5795 }
5796
5797 /* fallback to VBT if available for eDP */
5798 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5799 fixed_mode = drm_mode_duplicate(dev,
5800 dev_priv->vbt.lfp_lvds_vbt_mode);
5801 if (fixed_mode)
5802 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5803 }
5804 mutex_unlock(&dev->mode_config.mutex);
5805
5806 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5807 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5808 register_reboot_notifier(&intel_dp->edp_notifier);
5809
5810 /*
5811 * Figure out the current pipe for the initial backlight setup.
5812 * If the current pipe isn't valid, try the PPS pipe, and if that
5813 * fails just assume pipe A.
5814 */
5815 if (IS_CHERRYVIEW(dev))
5816 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5817 else
5818 pipe = PORT_TO_PIPE(intel_dp->DP);
5819
5820 if (pipe != PIPE_A && pipe != PIPE_B)
5821 pipe = intel_dp->pps_pipe;
5822
5823 if (pipe != PIPE_A && pipe != PIPE_B)
5824 pipe = PIPE_A;
5825
5826 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5827 pipe_name(pipe));
5828 }
5829
5830 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5831 intel_connector->panel.backlight.power = intel_edp_backlight_power;
5832 intel_panel_setup_backlight(connector, pipe);
5833
5834 return true;
5835 }
5836
5837 bool
5838 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5839 struct intel_connector *intel_connector)
5840 {
5841 struct drm_connector *connector = &intel_connector->base;
5842 struct intel_dp *intel_dp = &intel_dig_port->dp;
5843 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5844 struct drm_device *dev = intel_encoder->base.dev;
5845 struct drm_i915_private *dev_priv = dev->dev_private;
5846 enum port port = intel_dig_port->port;
5847 int type, ret;
5848
5849 if (WARN(intel_dig_port->max_lanes < 1,
5850 "Not enough lanes (%d) for DP on port %c\n",
5851 intel_dig_port->max_lanes, port_name(port)))
5852 return false;
5853
5854 intel_dp->pps_pipe = INVALID_PIPE;
5855
5856 /* intel_dp vfuncs */
5857 if (INTEL_INFO(dev)->gen >= 9)
5858 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5859 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5860 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5861 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5862 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5863 else if (HAS_PCH_SPLIT(dev))
5864 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5865 else
5866 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5867
5868 if (INTEL_INFO(dev)->gen >= 9)
5869 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5870 else
5871 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5872
5873 if (HAS_DDI(dev))
5874 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5875
5876 /* Preserve the current hw state. */
5877 intel_dp->DP = I915_READ(intel_dp->output_reg);
5878 intel_dp->attached_connector = intel_connector;
5879
5880 if (intel_dp_is_edp(dev, port))
5881 type = DRM_MODE_CONNECTOR_eDP;
5882 else
5883 type = DRM_MODE_CONNECTOR_DisplayPort;
5884
5885 /*
5886 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5887 * for DP the encoder type can be set by the caller to
5888 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5889 */
5890 if (type == DRM_MODE_CONNECTOR_eDP)
5891 intel_encoder->type = INTEL_OUTPUT_EDP;
5892
5893 /* eDP only on port B and/or C on vlv/chv */
5894 if (WARN_ON((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
5895 is_edp(intel_dp) && port != PORT_B && port != PORT_C))
5896 return false;
5897
5898 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5899 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5900 port_name(port));
5901
5902 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
5903 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
5904
5905 connector->interlace_allowed = true;
5906 connector->doublescan_allowed = 0;
5907
5908 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5909 edp_panel_vdd_work);
5910
5911 intel_connector_attach_encoder(intel_connector, intel_encoder);
5912 drm_connector_register(connector);
5913
5914 if (HAS_DDI(dev))
5915 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5916 else
5917 intel_connector->get_hw_state = intel_connector_get_hw_state;
5918 intel_connector->unregister = intel_dp_connector_unregister;
5919
5920 /* Set up the hotplug pin. */
5921 switch (port) {
5922 case PORT_A:
5923 intel_encoder->hpd_pin = HPD_PORT_A;
5924 break;
5925 case PORT_B:
5926 intel_encoder->hpd_pin = HPD_PORT_B;
5927 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
5928 intel_encoder->hpd_pin = HPD_PORT_A;
5929 break;
5930 case PORT_C:
5931 intel_encoder->hpd_pin = HPD_PORT_C;
5932 break;
5933 case PORT_D:
5934 intel_encoder->hpd_pin = HPD_PORT_D;
5935 break;
5936 case PORT_E:
5937 intel_encoder->hpd_pin = HPD_PORT_E;
5938 break;
5939 default:
5940 BUG();
5941 }
5942
5943 if (is_edp(intel_dp)) {
5944 pps_lock(intel_dp);
5945 intel_dp_init_panel_power_timestamps(intel_dp);
5946 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5947 vlv_initial_power_sequencer_setup(intel_dp);
5948 else
5949 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5950 pps_unlock(intel_dp);
5951 }
5952
5953 ret = intel_dp_aux_init(intel_dp, intel_connector);
5954 if (ret)
5955 goto fail;
5956
5957 /* init MST on ports that can support it */
5958 if (HAS_DP_MST(dev) &&
5959 (port == PORT_B || port == PORT_C || port == PORT_D))
5960 intel_dp_mst_encoder_init(intel_dig_port,
5961 intel_connector->base.base.id);
5962
5963 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5964 intel_dp_aux_fini(intel_dp);
5965 intel_dp_mst_encoder_cleanup(intel_dig_port);
5966 goto fail;
5967 }
5968
5969 intel_dp_add_properties(intel_dp, connector);
5970
5971 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5972 * 0xd. Failure to do so will result in spurious interrupts being
5973 * generated on the port when a cable is not attached.
5974 */
5975 if (IS_G4X(dev) && !IS_GM45(dev)) {
5976 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
5977 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5978 }
5979
5980 i915_debugfs_connector_add(connector);
5981
5982 return true;
5983
5984 fail:
5985 if (is_edp(intel_dp)) {
5986 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5987 /*
5988 * vdd might still be enabled do to the delayed vdd off.
5989 * Make sure vdd is actually turned off here.
5990 */
5991 pps_lock(intel_dp);
5992 edp_panel_vdd_off_sync(intel_dp);
5993 pps_unlock(intel_dp);
5994 }
5995 drm_connector_unregister(connector);
5996 drm_connector_cleanup(connector);
5997
5998 return false;
5999 }
6000
6001 void
6002 intel_dp_init(struct drm_device *dev,
6003 i915_reg_t output_reg, enum port port)
6004 {
6005 struct drm_i915_private *dev_priv = dev->dev_private;
6006 struct intel_digital_port *intel_dig_port;
6007 struct intel_encoder *intel_encoder;
6008 struct drm_encoder *encoder;
6009 struct intel_connector *intel_connector;
6010
6011 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6012 if (!intel_dig_port)
6013 return;
6014
6015 intel_connector = intel_connector_alloc();
6016 if (!intel_connector)
6017 goto err_connector_alloc;
6018
6019 intel_encoder = &intel_dig_port->base;
6020 encoder = &intel_encoder->base;
6021
6022 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6023 DRM_MODE_ENCODER_TMDS, NULL))
6024 goto err_encoder_init;
6025
6026 intel_encoder->compute_config = intel_dp_compute_config;
6027 intel_encoder->disable = intel_disable_dp;
6028 intel_encoder->get_hw_state = intel_dp_get_hw_state;
6029 intel_encoder->get_config = intel_dp_get_config;
6030 intel_encoder->suspend = intel_dp_encoder_suspend;
6031 if (IS_CHERRYVIEW(dev)) {
6032 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6033 intel_encoder->pre_enable = chv_pre_enable_dp;
6034 intel_encoder->enable = vlv_enable_dp;
6035 intel_encoder->post_disable = chv_post_disable_dp;
6036 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6037 } else if (IS_VALLEYVIEW(dev)) {
6038 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6039 intel_encoder->pre_enable = vlv_pre_enable_dp;
6040 intel_encoder->enable = vlv_enable_dp;
6041 intel_encoder->post_disable = vlv_post_disable_dp;
6042 } else {
6043 intel_encoder->pre_enable = g4x_pre_enable_dp;
6044 intel_encoder->enable = g4x_enable_dp;
6045 if (INTEL_INFO(dev)->gen >= 5)
6046 intel_encoder->post_disable = ilk_post_disable_dp;
6047 }
6048
6049 intel_dig_port->port = port;
6050 dev_priv->dig_port_map[port] = intel_encoder;
6051 intel_dig_port->dp.output_reg = output_reg;
6052 intel_dig_port->max_lanes = 4;
6053
6054 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
6055 if (IS_CHERRYVIEW(dev)) {
6056 if (port == PORT_D)
6057 intel_encoder->crtc_mask = 1 << 2;
6058 else
6059 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6060 } else {
6061 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6062 }
6063 intel_encoder->cloneable = 0;
6064
6065 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6066 dev_priv->hotplug.irq_port[port] = intel_dig_port;
6067
6068 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6069 goto err_init_connector;
6070
6071 return;
6072
6073 err_init_connector:
6074 drm_encoder_cleanup(encoder);
6075 err_encoder_init:
6076 kfree(intel_connector);
6077 err_connector_alloc:
6078 kfree(intel_dig_port);
6079
6080 return;
6081 }
6082
6083 void intel_dp_mst_suspend(struct drm_device *dev)
6084 {
6085 struct drm_i915_private *dev_priv = dev->dev_private;
6086 int i;
6087
6088 /* disable MST */
6089 for (i = 0; i < I915_MAX_PORTS; i++) {
6090 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6091 if (!intel_dig_port)
6092 continue;
6093
6094 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6095 if (!intel_dig_port->dp.can_mst)
6096 continue;
6097 if (intel_dig_port->dp.is_mst)
6098 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6099 }
6100 }
6101 }
6102
6103 void intel_dp_mst_resume(struct drm_device *dev)
6104 {
6105 struct drm_i915_private *dev_priv = dev->dev_private;
6106 int i;
6107
6108 for (i = 0; i < I915_MAX_PORTS; i++) {
6109 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6110 if (!intel_dig_port)
6111 continue;
6112 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6113 int ret;
6114
6115 if (!intel_dig_port->dp.can_mst)
6116 continue;
6117
6118 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6119 if (ret != 0) {
6120 intel_dp_check_mst_status(&intel_dig_port->dp);
6121 }
6122 }
6123 }
6124 }
This page took 0.189522 seconds and 5 git commands to generate.