2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Keith Packard <keithp@keithp.com>
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_crtc.h>
36 #include <drm/drm_crtc_helper.h>
37 #include <drm/drm_edid.h>
38 #include "intel_drv.h"
39 #include <drm/i915_drm.h>
42 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
44 /* Compliance test status bits */
45 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
55 static const struct dp_link_dpll gen4_dpll
[] = {
57 { .p1
= 2, .p2
= 10, .n
= 2, .m1
= 23, .m2
= 8 } },
59 { .p1
= 1, .p2
= 10, .n
= 1, .m1
= 14, .m2
= 2 } }
62 static const struct dp_link_dpll pch_dpll
[] = {
64 { .p1
= 2, .p2
= 10, .n
= 1, .m1
= 12, .m2
= 9 } },
66 { .p1
= 1, .p2
= 10, .n
= 2, .m1
= 14, .m2
= 8 } }
69 static const struct dp_link_dpll vlv_dpll
[] = {
71 { .p1
= 3, .p2
= 2, .n
= 5, .m1
= 3, .m2
= 81 } },
73 { .p1
= 2, .p2
= 2, .n
= 1, .m1
= 2, .m2
= 27 } }
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
80 static const struct dp_link_dpll chv_dpll
[] = {
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
87 { .p1
= 4, .p2
= 2, .n
= 1, .m1
= 2, .m2
= 0x819999a } },
88 { 270000, /* m2_int = 27, m2_fraction = 0 */
89 { .p1
= 4, .p2
= 1, .n
= 1, .m1
= 2, .m2
= 0x6c00000 } },
90 { 540000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1
= 2, .p2
= 1, .n
= 1, .m1
= 2, .m2
= 0x6c00000 } }
94 static const int bxt_rates
[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
96 static const int skl_rates
[] = { 162000, 216000, 270000,
97 324000, 432000, 540000 };
98 static const int default_rates
[] = { 162000, 270000, 540000 };
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
107 static bool is_edp(struct intel_dp
*intel_dp
)
109 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
111 return intel_dig_port
->base
.type
== INTEL_OUTPUT_EDP
;
114 static struct drm_device
*intel_dp_to_dev(struct intel_dp
*intel_dp
)
116 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
118 return intel_dig_port
->base
.base
.dev
;
121 static struct intel_dp
*intel_attached_dp(struct drm_connector
*connector
)
123 return enc_to_intel_dp(&intel_attached_encoder(connector
)->base
);
126 static void intel_dp_link_down(struct intel_dp
*intel_dp
);
127 static bool edp_panel_vdd_on(struct intel_dp
*intel_dp
);
128 static void edp_panel_vdd_off(struct intel_dp
*intel_dp
, bool sync
);
129 static void vlv_init_panel_power_sequencer(struct intel_dp
*intel_dp
);
130 static void vlv_steal_power_sequencer(struct drm_device
*dev
,
133 static unsigned int intel_dp_unused_lane_mask(int lane_count
)
135 return ~((1 << lane_count
) - 1) & 0xf;
139 intel_dp_max_link_bw(struct intel_dp
*intel_dp
)
141 int max_link_bw
= intel_dp
->dpcd
[DP_MAX_LINK_RATE
];
143 switch (max_link_bw
) {
144 case DP_LINK_BW_1_62
:
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
151 max_link_bw
= DP_LINK_BW_1_62
;
157 static u8
intel_dp_max_lane_count(struct intel_dp
*intel_dp
)
159 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
160 u8 source_max
, sink_max
;
162 source_max
= intel_dig_port
->max_lanes
;
163 sink_max
= drm_dp_max_lane_count(intel_dp
->dpcd
);
165 return min(source_max
, sink_max
);
169 * The units on the numbers in the next two are... bizarre. Examples will
170 * make it clearer; this one parallels an example in the eDP spec.
172 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
174 * 270000 * 1 * 8 / 10 == 216000
176 * The actual data capacity of that configuration is 2.16Gbit/s, so the
177 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
178 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
179 * 119000. At 18bpp that's 2142000 kilobits per second.
181 * Thus the strange-looking division by 10 in intel_dp_link_required, to
182 * get the result in decakilobits instead of kilobits.
186 intel_dp_link_required(int pixel_clock
, int bpp
)
188 return (pixel_clock
* bpp
+ 9) / 10;
192 intel_dp_max_data_rate(int max_link_clock
, int max_lanes
)
194 return (max_link_clock
* max_lanes
* 8) / 10;
197 static enum drm_mode_status
198 intel_dp_mode_valid(struct drm_connector
*connector
,
199 struct drm_display_mode
*mode
)
201 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
202 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
203 struct drm_display_mode
*fixed_mode
= intel_connector
->panel
.fixed_mode
;
204 int target_clock
= mode
->clock
;
205 int max_rate
, mode_rate
, max_lanes
, max_link_clock
;
206 int max_dotclk
= to_i915(connector
->dev
)->max_dotclk_freq
;
208 if (is_edp(intel_dp
) && fixed_mode
) {
209 if (mode
->hdisplay
> fixed_mode
->hdisplay
)
212 if (mode
->vdisplay
> fixed_mode
->vdisplay
)
215 target_clock
= fixed_mode
->clock
;
218 max_link_clock
= intel_dp_max_link_rate(intel_dp
);
219 max_lanes
= intel_dp_max_lane_count(intel_dp
);
221 max_rate
= intel_dp_max_data_rate(max_link_clock
, max_lanes
);
222 mode_rate
= intel_dp_link_required(target_clock
, 18);
224 if (mode_rate
> max_rate
|| target_clock
> max_dotclk
)
225 return MODE_CLOCK_HIGH
;
227 if (mode
->clock
< 10000)
228 return MODE_CLOCK_LOW
;
230 if (mode
->flags
& DRM_MODE_FLAG_DBLCLK
)
231 return MODE_H_ILLEGAL
;
236 uint32_t intel_dp_pack_aux(const uint8_t *src
, int src_bytes
)
243 for (i
= 0; i
< src_bytes
; i
++)
244 v
|= ((uint32_t) src
[i
]) << ((3-i
) * 8);
248 static void intel_dp_unpack_aux(uint32_t src
, uint8_t *dst
, int dst_bytes
)
253 for (i
= 0; i
< dst_bytes
; i
++)
254 dst
[i
] = src
>> ((3-i
) * 8);
258 intel_dp_init_panel_power_sequencer(struct drm_device
*dev
,
259 struct intel_dp
*intel_dp
);
261 intel_dp_init_panel_power_sequencer_registers(struct drm_device
*dev
,
262 struct intel_dp
*intel_dp
);
264 static void pps_lock(struct intel_dp
*intel_dp
)
266 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
267 struct intel_encoder
*encoder
= &intel_dig_port
->base
;
268 struct drm_device
*dev
= encoder
->base
.dev
;
269 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
270 enum intel_display_power_domain power_domain
;
273 * See vlv_power_sequencer_reset() why we need
274 * a power domain reference here.
276 power_domain
= intel_display_port_aux_power_domain(encoder
);
277 intel_display_power_get(dev_priv
, power_domain
);
279 mutex_lock(&dev_priv
->pps_mutex
);
282 static void pps_unlock(struct intel_dp
*intel_dp
)
284 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
285 struct intel_encoder
*encoder
= &intel_dig_port
->base
;
286 struct drm_device
*dev
= encoder
->base
.dev
;
287 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
288 enum intel_display_power_domain power_domain
;
290 mutex_unlock(&dev_priv
->pps_mutex
);
292 power_domain
= intel_display_port_aux_power_domain(encoder
);
293 intel_display_power_put(dev_priv
, power_domain
);
297 vlv_power_sequencer_kick(struct intel_dp
*intel_dp
)
299 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
300 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
301 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
302 enum pipe pipe
= intel_dp
->pps_pipe
;
303 bool pll_enabled
, release_cl_override
= false;
304 enum dpio_phy phy
= DPIO_PHY(pipe
);
305 enum dpio_channel ch
= vlv_pipe_to_channel(pipe
);
308 if (WARN(I915_READ(intel_dp
->output_reg
) & DP_PORT_EN
,
309 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
310 pipe_name(pipe
), port_name(intel_dig_port
->port
)))
313 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
314 pipe_name(pipe
), port_name(intel_dig_port
->port
));
316 /* Preserve the BIOS-computed detected bit. This is
317 * supposed to be read-only.
319 DP
= I915_READ(intel_dp
->output_reg
) & DP_DETECTED
;
320 DP
|= DP_VOLTAGE_0_4
| DP_PRE_EMPHASIS_0
;
321 DP
|= DP_PORT_WIDTH(1);
322 DP
|= DP_LINK_TRAIN_PAT_1
;
324 if (IS_CHERRYVIEW(dev
))
325 DP
|= DP_PIPE_SELECT_CHV(pipe
);
326 else if (pipe
== PIPE_B
)
327 DP
|= DP_PIPEB_SELECT
;
329 pll_enabled
= I915_READ(DPLL(pipe
)) & DPLL_VCO_ENABLE
;
332 * The DPLL for the pipe must be enabled for this to work.
333 * So enable temporarily it if it's not already enabled.
336 release_cl_override
= IS_CHERRYVIEW(dev
) &&
337 !chv_phy_powergate_ch(dev_priv
, phy
, ch
, true);
339 if (vlv_force_pll_on(dev
, pipe
, IS_CHERRYVIEW(dev
) ?
340 &chv_dpll
[0].dpll
: &vlv_dpll
[0].dpll
)) {
341 DRM_ERROR("Failed to force on pll for pipe %c!\n",
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
353 I915_WRITE(intel_dp
->output_reg
, DP
);
354 POSTING_READ(intel_dp
->output_reg
);
356 I915_WRITE(intel_dp
->output_reg
, DP
| DP_PORT_EN
);
357 POSTING_READ(intel_dp
->output_reg
);
359 I915_WRITE(intel_dp
->output_reg
, DP
& ~DP_PORT_EN
);
360 POSTING_READ(intel_dp
->output_reg
);
363 vlv_force_pll_off(dev
, pipe
);
365 if (release_cl_override
)
366 chv_phy_powergate_ch(dev_priv
, phy
, ch
, false);
371 vlv_power_sequencer_pipe(struct intel_dp
*intel_dp
)
373 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
374 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
375 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
376 struct intel_encoder
*encoder
;
377 unsigned int pipes
= (1 << PIPE_A
) | (1 << PIPE_B
);
380 lockdep_assert_held(&dev_priv
->pps_mutex
);
382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp
));
385 if (intel_dp
->pps_pipe
!= INVALID_PIPE
)
386 return intel_dp
->pps_pipe
;
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
392 for_each_intel_encoder(dev
, encoder
) {
393 struct intel_dp
*tmp
;
395 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
398 tmp
= enc_to_intel_dp(&encoder
->base
);
400 if (tmp
->pps_pipe
!= INVALID_PIPE
)
401 pipes
&= ~(1 << tmp
->pps_pipe
);
405 * Didn't find one. This should not happen since there
406 * are two power sequencers and up to two eDP ports.
408 if (WARN_ON(pipes
== 0))
411 pipe
= ffs(pipes
) - 1;
413 vlv_steal_power_sequencer(dev
, pipe
);
414 intel_dp
->pps_pipe
= pipe
;
416 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
417 pipe_name(intel_dp
->pps_pipe
),
418 port_name(intel_dig_port
->port
));
420 /* init power sequencer on this pipe and port */
421 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
422 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
425 * Even vdd force doesn't work until we've made
426 * the power sequencer lock in on the port.
428 vlv_power_sequencer_kick(intel_dp
);
430 return intel_dp
->pps_pipe
;
433 typedef bool (*vlv_pipe_check
)(struct drm_i915_private
*dev_priv
,
436 static bool vlv_pipe_has_pp_on(struct drm_i915_private
*dev_priv
,
439 return I915_READ(VLV_PIPE_PP_STATUS(pipe
)) & PP_ON
;
442 static bool vlv_pipe_has_vdd_on(struct drm_i915_private
*dev_priv
,
445 return I915_READ(VLV_PIPE_PP_CONTROL(pipe
)) & EDP_FORCE_VDD
;
448 static bool vlv_pipe_any(struct drm_i915_private
*dev_priv
,
455 vlv_initial_pps_pipe(struct drm_i915_private
*dev_priv
,
457 vlv_pipe_check pipe_check
)
461 for (pipe
= PIPE_A
; pipe
<= PIPE_B
; pipe
++) {
462 u32 port_sel
= I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe
)) &
463 PANEL_PORT_SELECT_MASK
;
465 if (port_sel
!= PANEL_PORT_SELECT_VLV(port
))
468 if (!pipe_check(dev_priv
, pipe
))
478 vlv_initial_power_sequencer_setup(struct intel_dp
*intel_dp
)
480 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
481 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
482 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
483 enum port port
= intel_dig_port
->port
;
485 lockdep_assert_held(&dev_priv
->pps_mutex
);
487 /* try to find a pipe with this port selected */
488 /* first pick one where the panel is on */
489 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
491 /* didn't find one? pick one where vdd is on */
492 if (intel_dp
->pps_pipe
== INVALID_PIPE
)
493 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
494 vlv_pipe_has_vdd_on
);
495 /* didn't find one? pick one with just the correct port */
496 if (intel_dp
->pps_pipe
== INVALID_PIPE
)
497 intel_dp
->pps_pipe
= vlv_initial_pps_pipe(dev_priv
, port
,
500 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
501 if (intel_dp
->pps_pipe
== INVALID_PIPE
) {
502 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
507 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
508 port_name(port
), pipe_name(intel_dp
->pps_pipe
));
510 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
511 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
514 void vlv_power_sequencer_reset(struct drm_i915_private
*dev_priv
)
516 struct drm_device
*dev
= dev_priv
->dev
;
517 struct intel_encoder
*encoder
;
519 if (WARN_ON(!IS_VALLEYVIEW(dev
) && !IS_CHERRYVIEW(dev
)))
523 * We can't grab pps_mutex here due to deadlock with power_domain
524 * mutex when power_domain functions are called while holding pps_mutex.
525 * That also means that in order to use pps_pipe the code needs to
526 * hold both a power domain reference and pps_mutex, and the power domain
527 * reference get/put must be done while _not_ holding pps_mutex.
528 * pps_{lock,unlock}() do these steps in the correct order, so one
529 * should use them always.
532 for_each_intel_encoder(dev
, encoder
) {
533 struct intel_dp
*intel_dp
;
535 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
538 intel_dp
= enc_to_intel_dp(&encoder
->base
);
539 intel_dp
->pps_pipe
= INVALID_PIPE
;
544 _pp_ctrl_reg(struct intel_dp
*intel_dp
)
546 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
549 return BXT_PP_CONTROL(0);
550 else if (HAS_PCH_SPLIT(dev
))
551 return PCH_PP_CONTROL
;
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp
));
557 _pp_stat_reg(struct intel_dp
*intel_dp
)
559 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
562 return BXT_PP_STATUS(0);
563 else if (HAS_PCH_SPLIT(dev
))
564 return PCH_PP_STATUS
;
566 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp
));
569 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
570 This function only applicable when panel PM state is not to be tracked */
571 static int edp_notify_handler(struct notifier_block
*this, unsigned long code
,
574 struct intel_dp
*intel_dp
= container_of(this, typeof(* intel_dp
),
576 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
577 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
579 if (!is_edp(intel_dp
) || code
!= SYS_RESTART
)
584 if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
)) {
585 enum pipe pipe
= vlv_power_sequencer_pipe(intel_dp
);
586 i915_reg_t pp_ctrl_reg
, pp_div_reg
;
589 pp_ctrl_reg
= VLV_PIPE_PP_CONTROL(pipe
);
590 pp_div_reg
= VLV_PIPE_PP_DIVISOR(pipe
);
591 pp_div
= I915_READ(pp_div_reg
);
592 pp_div
&= PP_REFERENCE_DIVIDER_MASK
;
594 /* 0x1F write to PP_DIV_REG sets max cycle delay */
595 I915_WRITE(pp_div_reg
, pp_div
| 0x1F);
596 I915_WRITE(pp_ctrl_reg
, PANEL_UNLOCK_REGS
| PANEL_POWER_OFF
);
597 msleep(intel_dp
->panel_power_cycle_delay
);
600 pps_unlock(intel_dp
);
605 static bool edp_have_panel_power(struct intel_dp
*intel_dp
)
607 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
608 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
610 lockdep_assert_held(&dev_priv
->pps_mutex
);
612 if ((IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
)) &&
613 intel_dp
->pps_pipe
== INVALID_PIPE
)
616 return (I915_READ(_pp_stat_reg(intel_dp
)) & PP_ON
) != 0;
619 static bool edp_have_panel_vdd(struct intel_dp
*intel_dp
)
621 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
622 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
624 lockdep_assert_held(&dev_priv
->pps_mutex
);
626 if ((IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
)) &&
627 intel_dp
->pps_pipe
== INVALID_PIPE
)
630 return I915_READ(_pp_ctrl_reg(intel_dp
)) & EDP_FORCE_VDD
;
634 intel_dp_check_edp(struct intel_dp
*intel_dp
)
636 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
637 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
639 if (!is_edp(intel_dp
))
642 if (!edp_have_panel_power(intel_dp
) && !edp_have_panel_vdd(intel_dp
)) {
643 WARN(1, "eDP powered off while attempting aux channel communication.\n");
644 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
645 I915_READ(_pp_stat_reg(intel_dp
)),
646 I915_READ(_pp_ctrl_reg(intel_dp
)));
651 intel_dp_aux_wait_done(struct intel_dp
*intel_dp
, bool has_aux_irq
)
653 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
654 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
655 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
656 i915_reg_t ch_ctl
= intel_dp
->aux_ch_ctl_reg
;
660 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
662 done
= wait_event_timeout(dev_priv
->gmbus_wait_queue
, C
,
663 msecs_to_jiffies_timeout(10));
665 done
= wait_for_atomic(C
, 10) == 0;
667 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
674 static uint32_t i9xx_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
676 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
677 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
680 * The clock divider is based off the hrawclk, and would like to run at
681 * 2MHz. So, take the hrawclk value and divide by 2 and use that
683 return index
? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev
), 2);
686 static uint32_t ilk_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
688 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
689 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
690 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
695 if (intel_dig_port
->port
== PORT_A
) {
696 return DIV_ROUND_CLOSEST(dev_priv
->cdclk_freq
, 2000);
699 return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev
), 2);
703 static uint32_t hsw_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
705 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
706 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
707 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
709 if (intel_dig_port
->port
== PORT_A
) {
712 return DIV_ROUND_CLOSEST(dev_priv
->cdclk_freq
, 2000);
713 } else if (HAS_PCH_LPT_H(dev_priv
)) {
714 /* Workaround for non-ULT HSW */
721 return index
? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev
), 2);
725 static uint32_t vlv_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
727 return index
? 0 : 100;
730 static uint32_t skl_get_aux_clock_divider(struct intel_dp
*intel_dp
, int index
)
733 * SKL doesn't need us to program the AUX clock divider (Hardware will
734 * derive the clock from CDCLK automatically). We still implement the
735 * get_aux_clock_divider vfunc to plug-in into the existing code.
737 return index
? 0 : 1;
740 static uint32_t i9xx_get_aux_send_ctl(struct intel_dp
*intel_dp
,
743 uint32_t aux_clock_divider
)
745 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
746 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
747 uint32_t precharge
, timeout
;
754 if (IS_BROADWELL(dev
) && intel_dig_port
->port
== PORT_A
)
755 timeout
= DP_AUX_CH_CTL_TIME_OUT_600us
;
757 timeout
= DP_AUX_CH_CTL_TIME_OUT_400us
;
759 return DP_AUX_CH_CTL_SEND_BUSY
|
761 (has_aux_irq
? DP_AUX_CH_CTL_INTERRUPT
: 0) |
762 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
764 DP_AUX_CH_CTL_RECEIVE_ERROR
|
765 (send_bytes
<< DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
) |
766 (precharge
<< DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT
) |
767 (aux_clock_divider
<< DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT
);
770 static uint32_t skl_get_aux_send_ctl(struct intel_dp
*intel_dp
,
775 return DP_AUX_CH_CTL_SEND_BUSY
|
777 (has_aux_irq
? DP_AUX_CH_CTL_INTERRUPT
: 0) |
778 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
779 DP_AUX_CH_CTL_TIME_OUT_1600us
|
780 DP_AUX_CH_CTL_RECEIVE_ERROR
|
781 (send_bytes
<< DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
) |
782 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
786 intel_dp_aux_ch(struct intel_dp
*intel_dp
,
787 const uint8_t *send
, int send_bytes
,
788 uint8_t *recv
, int recv_size
)
790 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
791 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
792 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
793 i915_reg_t ch_ctl
= intel_dp
->aux_ch_ctl_reg
;
794 uint32_t aux_clock_divider
;
795 int i
, ret
, recv_bytes
;
798 bool has_aux_irq
= HAS_AUX_IRQ(dev
);
804 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805 * In such cases we want to leave VDD enabled and it's up to upper layers
806 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
809 vdd
= edp_panel_vdd_on(intel_dp
);
811 /* dp aux is extremely sensitive to irq latency, hence request the
812 * lowest possible wakeup latency and so prevent the cpu from going into
815 pm_qos_update_request(&dev_priv
->pm_qos
, 0);
817 intel_dp_check_edp(intel_dp
);
819 /* Try to wait for any previous AUX channel activity */
820 for (try = 0; try < 3; try++) {
821 status
= I915_READ_NOTRACE(ch_ctl
);
822 if ((status
& DP_AUX_CH_CTL_SEND_BUSY
) == 0)
828 static u32 last_status
= -1;
829 const u32 status
= I915_READ(ch_ctl
);
831 if (status
!= last_status
) {
832 WARN(1, "dp_aux_ch not started status 0x%08x\n",
834 last_status
= status
;
841 /* Only 5 data registers! */
842 if (WARN_ON(send_bytes
> 20 || recv_size
> 20)) {
847 while ((aux_clock_divider
= intel_dp
->get_aux_clock_divider(intel_dp
, clock
++))) {
848 u32 send_ctl
= intel_dp
->get_aux_send_ctl(intel_dp
,
853 /* Must try at least 3 times according to DP spec */
854 for (try = 0; try < 5; try++) {
855 /* Load the send data into the aux channel data registers */
856 for (i
= 0; i
< send_bytes
; i
+= 4)
857 I915_WRITE(intel_dp
->aux_ch_data_reg
[i
>> 2],
858 intel_dp_pack_aux(send
+ i
,
861 /* Send the command and wait for it to complete */
862 I915_WRITE(ch_ctl
, send_ctl
);
864 status
= intel_dp_aux_wait_done(intel_dp
, has_aux_irq
);
866 /* Clear done status and any errors */
870 DP_AUX_CH_CTL_TIME_OUT_ERROR
|
871 DP_AUX_CH_CTL_RECEIVE_ERROR
);
873 if (status
& DP_AUX_CH_CTL_TIME_OUT_ERROR
)
876 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
877 * 400us delay required for errors and timeouts
878 * Timeout errors from the HW already meet this
879 * requirement so skip to next iteration
881 if (status
& DP_AUX_CH_CTL_RECEIVE_ERROR
) {
882 usleep_range(400, 500);
885 if (status
& DP_AUX_CH_CTL_DONE
)
890 if ((status
& DP_AUX_CH_CTL_DONE
) == 0) {
891 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status
);
897 /* Check for timeout or receive error.
898 * Timeouts occur when the sink is not connected
900 if (status
& DP_AUX_CH_CTL_RECEIVE_ERROR
) {
901 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status
);
906 /* Timeouts occur when the device isn't connected, so they're
907 * "normal" -- don't fill the kernel log with these */
908 if (status
& DP_AUX_CH_CTL_TIME_OUT_ERROR
) {
909 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status
);
914 /* Unload any bytes sent back from the other side */
915 recv_bytes
= ((status
& DP_AUX_CH_CTL_MESSAGE_SIZE_MASK
) >>
916 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT
);
919 * By BSpec: "Message sizes of 0 or >20 are not allowed."
920 * We have no idea of what happened so we return -EBUSY so
921 * drm layer takes care for the necessary retries.
923 if (recv_bytes
== 0 || recv_bytes
> 20) {
924 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
927 * FIXME: This patch was created on top of a series that
928 * organize the retries at drm level. There EBUSY should
929 * also take care for 1ms wait before retrying.
930 * That aux retries re-org is still needed and after that is
931 * merged we remove this sleep from here.
933 usleep_range(1000, 1500);
938 if (recv_bytes
> recv_size
)
939 recv_bytes
= recv_size
;
941 for (i
= 0; i
< recv_bytes
; i
+= 4)
942 intel_dp_unpack_aux(I915_READ(intel_dp
->aux_ch_data_reg
[i
>> 2]),
943 recv
+ i
, recv_bytes
- i
);
947 pm_qos_update_request(&dev_priv
->pm_qos
, PM_QOS_DEFAULT_VALUE
);
950 edp_panel_vdd_off(intel_dp
, false);
952 pps_unlock(intel_dp
);
957 #define BARE_ADDRESS_SIZE 3
958 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
960 intel_dp_aux_transfer(struct drm_dp_aux
*aux
, struct drm_dp_aux_msg
*msg
)
962 struct intel_dp
*intel_dp
= container_of(aux
, struct intel_dp
, aux
);
963 uint8_t txbuf
[20], rxbuf
[20];
964 size_t txsize
, rxsize
;
967 txbuf
[0] = (msg
->request
<< 4) |
968 ((msg
->address
>> 16) & 0xf);
969 txbuf
[1] = (msg
->address
>> 8) & 0xff;
970 txbuf
[2] = msg
->address
& 0xff;
971 txbuf
[3] = msg
->size
- 1;
973 switch (msg
->request
& ~DP_AUX_I2C_MOT
) {
974 case DP_AUX_NATIVE_WRITE
:
975 case DP_AUX_I2C_WRITE
:
976 case DP_AUX_I2C_WRITE_STATUS_UPDATE
:
977 txsize
= msg
->size
? HEADER_SIZE
+ msg
->size
: BARE_ADDRESS_SIZE
;
978 rxsize
= 2; /* 0 or 1 data bytes */
980 if (WARN_ON(txsize
> 20))
984 memcpy(txbuf
+ HEADER_SIZE
, msg
->buffer
, msg
->size
);
988 ret
= intel_dp_aux_ch(intel_dp
, txbuf
, txsize
, rxbuf
, rxsize
);
990 msg
->reply
= rxbuf
[0] >> 4;
993 /* Number of bytes written in a short write. */
994 ret
= clamp_t(int, rxbuf
[1], 0, msg
->size
);
996 /* Return payload size. */
1002 case DP_AUX_NATIVE_READ
:
1003 case DP_AUX_I2C_READ
:
1004 txsize
= msg
->size
? HEADER_SIZE
: BARE_ADDRESS_SIZE
;
1005 rxsize
= msg
->size
+ 1;
1007 if (WARN_ON(rxsize
> 20))
1010 ret
= intel_dp_aux_ch(intel_dp
, txbuf
, txsize
, rxbuf
, rxsize
);
1012 msg
->reply
= rxbuf
[0] >> 4;
1014 * Assume happy day, and copy the data. The caller is
1015 * expected to check msg->reply before touching it.
1017 * Return payload size.
1020 memcpy(msg
->buffer
, rxbuf
+ 1, ret
);
1032 static i915_reg_t
g4x_aux_ctl_reg(struct drm_i915_private
*dev_priv
,
1039 return DP_AUX_CH_CTL(port
);
1042 return DP_AUX_CH_CTL(PORT_B
);
1046 static i915_reg_t
g4x_aux_data_reg(struct drm_i915_private
*dev_priv
,
1047 enum port port
, int index
)
1053 return DP_AUX_CH_DATA(port
, index
);
1056 return DP_AUX_CH_DATA(PORT_B
, index
);
1060 static i915_reg_t
ilk_aux_ctl_reg(struct drm_i915_private
*dev_priv
,
1065 return DP_AUX_CH_CTL(port
);
1069 return PCH_DP_AUX_CH_CTL(port
);
1072 return DP_AUX_CH_CTL(PORT_A
);
1076 static i915_reg_t
ilk_aux_data_reg(struct drm_i915_private
*dev_priv
,
1077 enum port port
, int index
)
1081 return DP_AUX_CH_DATA(port
, index
);
1085 return PCH_DP_AUX_CH_DATA(port
, index
);
1088 return DP_AUX_CH_DATA(PORT_A
, index
);
1093 * On SKL we don't have Aux for port E so we rely
1094 * on VBT to set a proper alternate aux channel.
1096 static enum port
skl_porte_aux_port(struct drm_i915_private
*dev_priv
)
1098 const struct ddi_vbt_port_info
*info
=
1099 &dev_priv
->vbt
.ddi_port_info
[PORT_E
];
1101 switch (info
->alternate_aux_channel
) {
1111 MISSING_CASE(info
->alternate_aux_channel
);
1116 static i915_reg_t
skl_aux_ctl_reg(struct drm_i915_private
*dev_priv
,
1120 port
= skl_porte_aux_port(dev_priv
);
1127 return DP_AUX_CH_CTL(port
);
1130 return DP_AUX_CH_CTL(PORT_A
);
1134 static i915_reg_t
skl_aux_data_reg(struct drm_i915_private
*dev_priv
,
1135 enum port port
, int index
)
1138 port
= skl_porte_aux_port(dev_priv
);
1145 return DP_AUX_CH_DATA(port
, index
);
1148 return DP_AUX_CH_DATA(PORT_A
, index
);
1152 static i915_reg_t
intel_aux_ctl_reg(struct drm_i915_private
*dev_priv
,
1155 if (INTEL_INFO(dev_priv
)->gen
>= 9)
1156 return skl_aux_ctl_reg(dev_priv
, port
);
1157 else if (HAS_PCH_SPLIT(dev_priv
))
1158 return ilk_aux_ctl_reg(dev_priv
, port
);
1160 return g4x_aux_ctl_reg(dev_priv
, port
);
1163 static i915_reg_t
intel_aux_data_reg(struct drm_i915_private
*dev_priv
,
1164 enum port port
, int index
)
1166 if (INTEL_INFO(dev_priv
)->gen
>= 9)
1167 return skl_aux_data_reg(dev_priv
, port
, index
);
1168 else if (HAS_PCH_SPLIT(dev_priv
))
1169 return ilk_aux_data_reg(dev_priv
, port
, index
);
1171 return g4x_aux_data_reg(dev_priv
, port
, index
);
1174 static void intel_aux_reg_init(struct intel_dp
*intel_dp
)
1176 struct drm_i915_private
*dev_priv
= to_i915(intel_dp_to_dev(intel_dp
));
1177 enum port port
= dp_to_dig_port(intel_dp
)->port
;
1180 intel_dp
->aux_ch_ctl_reg
= intel_aux_ctl_reg(dev_priv
, port
);
1181 for (i
= 0; i
< ARRAY_SIZE(intel_dp
->aux_ch_data_reg
); i
++)
1182 intel_dp
->aux_ch_data_reg
[i
] = intel_aux_data_reg(dev_priv
, port
, i
);
1186 intel_dp_aux_fini(struct intel_dp
*intel_dp
)
1188 drm_dp_aux_unregister(&intel_dp
->aux
);
1189 kfree(intel_dp
->aux
.name
);
1193 intel_dp_aux_init(struct intel_dp
*intel_dp
, struct intel_connector
*connector
)
1195 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1196 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
1197 enum port port
= intel_dig_port
->port
;
1200 intel_aux_reg_init(intel_dp
);
1202 intel_dp
->aux
.name
= kasprintf(GFP_KERNEL
, "DPDDC-%c", port_name(port
));
1203 if (!intel_dp
->aux
.name
)
1206 intel_dp
->aux
.dev
= dev
->dev
;
1207 intel_dp
->aux
.transfer
= intel_dp_aux_transfer
;
1209 DRM_DEBUG_KMS("registering %s bus for %s\n",
1211 connector
->base
.kdev
->kobj
.name
);
1213 ret
= drm_dp_aux_register(&intel_dp
->aux
);
1215 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1216 intel_dp
->aux
.name
, ret
);
1217 kfree(intel_dp
->aux
.name
);
1221 ret
= sysfs_create_link(&connector
->base
.kdev
->kobj
,
1222 &intel_dp
->aux
.ddc
.dev
.kobj
,
1223 intel_dp
->aux
.ddc
.dev
.kobj
.name
);
1225 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n",
1226 intel_dp
->aux
.name
, ret
);
1227 intel_dp_aux_fini(intel_dp
);
1235 intel_dp_connector_unregister(struct intel_connector
*intel_connector
)
1237 struct intel_dp
*intel_dp
= intel_attached_dp(&intel_connector
->base
);
1239 if (!intel_connector
->mst_port
)
1240 sysfs_remove_link(&intel_connector
->base
.kdev
->kobj
,
1241 intel_dp
->aux
.ddc
.dev
.kobj
.name
);
1242 intel_connector_unregister(intel_connector
);
1246 skl_edp_set_pll_config(struct intel_crtc_state
*pipe_config
)
1250 memset(&pipe_config
->dpll_hw_state
, 0,
1251 sizeof(pipe_config
->dpll_hw_state
));
1253 pipe_config
->ddi_pll_sel
= SKL_DPLL0
;
1254 pipe_config
->dpll_hw_state
.cfgcr1
= 0;
1255 pipe_config
->dpll_hw_state
.cfgcr2
= 0;
1257 ctrl1
= DPLL_CTRL1_OVERRIDE(SKL_DPLL0
);
1258 switch (pipe_config
->port_clock
/ 2) {
1260 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810
,
1264 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350
,
1268 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700
,
1272 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620
,
1275 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1276 results in CDCLK change. Need to handle the change of CDCLK by
1277 disabling pipes and re-enabling them */
1279 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080
,
1283 ctrl1
|= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160
,
1288 pipe_config
->dpll_hw_state
.ctrl1
= ctrl1
;
1292 hsw_dp_set_ddi_pll_sel(struct intel_crtc_state
*pipe_config
)
1294 memset(&pipe_config
->dpll_hw_state
, 0,
1295 sizeof(pipe_config
->dpll_hw_state
));
1297 switch (pipe_config
->port_clock
/ 2) {
1299 pipe_config
->ddi_pll_sel
= PORT_CLK_SEL_LCPLL_810
;
1302 pipe_config
->ddi_pll_sel
= PORT_CLK_SEL_LCPLL_1350
;
1305 pipe_config
->ddi_pll_sel
= PORT_CLK_SEL_LCPLL_2700
;
1311 intel_dp_sink_rates(struct intel_dp
*intel_dp
, const int **sink_rates
)
1313 if (intel_dp
->num_sink_rates
) {
1314 *sink_rates
= intel_dp
->sink_rates
;
1315 return intel_dp
->num_sink_rates
;
1318 *sink_rates
= default_rates
;
1320 return (intel_dp_max_link_bw(intel_dp
) >> 3) + 1;
1323 bool intel_dp_source_supports_hbr2(struct intel_dp
*intel_dp
)
1325 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1326 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
1328 /* WaDisableHBR2:skl */
1329 if (IS_SKL_REVID(dev
, 0, SKL_REVID_B0
))
1332 if ((IS_HASWELL(dev
) && !IS_HSW_ULX(dev
)) || IS_BROADWELL(dev
) ||
1333 (INTEL_INFO(dev
)->gen
>= 9))
1340 intel_dp_source_rates(struct intel_dp
*intel_dp
, const int **source_rates
)
1342 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
1343 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
1346 if (IS_BROXTON(dev
)) {
1347 *source_rates
= bxt_rates
;
1348 size
= ARRAY_SIZE(bxt_rates
);
1349 } else if (IS_SKYLAKE(dev
) || IS_KABYLAKE(dev
)) {
1350 *source_rates
= skl_rates
;
1351 size
= ARRAY_SIZE(skl_rates
);
1353 *source_rates
= default_rates
;
1354 size
= ARRAY_SIZE(default_rates
);
1357 /* This depends on the fact that 5.4 is last value in the array */
1358 if (!intel_dp_source_supports_hbr2(intel_dp
))
1365 intel_dp_set_clock(struct intel_encoder
*encoder
,
1366 struct intel_crtc_state
*pipe_config
)
1368 struct drm_device
*dev
= encoder
->base
.dev
;
1369 const struct dp_link_dpll
*divisor
= NULL
;
1373 divisor
= gen4_dpll
;
1374 count
= ARRAY_SIZE(gen4_dpll
);
1375 } else if (HAS_PCH_SPLIT(dev
)) {
1377 count
= ARRAY_SIZE(pch_dpll
);
1378 } else if (IS_CHERRYVIEW(dev
)) {
1380 count
= ARRAY_SIZE(chv_dpll
);
1381 } else if (IS_VALLEYVIEW(dev
)) {
1383 count
= ARRAY_SIZE(vlv_dpll
);
1386 if (divisor
&& count
) {
1387 for (i
= 0; i
< count
; i
++) {
1388 if (pipe_config
->port_clock
== divisor
[i
].clock
) {
1389 pipe_config
->dpll
= divisor
[i
].dpll
;
1390 pipe_config
->clock_set
= true;
1397 static int intersect_rates(const int *source_rates
, int source_len
,
1398 const int *sink_rates
, int sink_len
,
1401 int i
= 0, j
= 0, k
= 0;
1403 while (i
< source_len
&& j
< sink_len
) {
1404 if (source_rates
[i
] == sink_rates
[j
]) {
1405 if (WARN_ON(k
>= DP_MAX_SUPPORTED_RATES
))
1407 common_rates
[k
] = source_rates
[i
];
1411 } else if (source_rates
[i
] < sink_rates
[j
]) {
1420 static int intel_dp_common_rates(struct intel_dp
*intel_dp
,
1423 const int *source_rates
, *sink_rates
;
1424 int source_len
, sink_len
;
1426 sink_len
= intel_dp_sink_rates(intel_dp
, &sink_rates
);
1427 source_len
= intel_dp_source_rates(intel_dp
, &source_rates
);
1429 return intersect_rates(source_rates
, source_len
,
1430 sink_rates
, sink_len
,
1434 static void snprintf_int_array(char *str
, size_t len
,
1435 const int *array
, int nelem
)
1441 for (i
= 0; i
< nelem
; i
++) {
1442 int r
= snprintf(str
, len
, "%s%d", i
? ", " : "", array
[i
]);
1450 static void intel_dp_print_rates(struct intel_dp
*intel_dp
)
1452 const int *source_rates
, *sink_rates
;
1453 int source_len
, sink_len
, common_len
;
1454 int common_rates
[DP_MAX_SUPPORTED_RATES
];
1455 char str
[128]; /* FIXME: too big for stack? */
1457 if ((drm_debug
& DRM_UT_KMS
) == 0)
1460 source_len
= intel_dp_source_rates(intel_dp
, &source_rates
);
1461 snprintf_int_array(str
, sizeof(str
), source_rates
, source_len
);
1462 DRM_DEBUG_KMS("source rates: %s\n", str
);
1464 sink_len
= intel_dp_sink_rates(intel_dp
, &sink_rates
);
1465 snprintf_int_array(str
, sizeof(str
), sink_rates
, sink_len
);
1466 DRM_DEBUG_KMS("sink rates: %s\n", str
);
1468 common_len
= intel_dp_common_rates(intel_dp
, common_rates
);
1469 snprintf_int_array(str
, sizeof(str
), common_rates
, common_len
);
1470 DRM_DEBUG_KMS("common rates: %s\n", str
);
1473 static int rate_to_index(int find
, const int *rates
)
1477 for (i
= 0; i
< DP_MAX_SUPPORTED_RATES
; ++i
)
1478 if (find
== rates
[i
])
1485 intel_dp_max_link_rate(struct intel_dp
*intel_dp
)
1487 int rates
[DP_MAX_SUPPORTED_RATES
] = {};
1490 len
= intel_dp_common_rates(intel_dp
, rates
);
1491 if (WARN_ON(len
<= 0))
1494 return rates
[rate_to_index(0, rates
) - 1];
1497 int intel_dp_rate_select(struct intel_dp
*intel_dp
, int rate
)
1499 return rate_to_index(rate
, intel_dp
->sink_rates
);
1502 void intel_dp_compute_rate(struct intel_dp
*intel_dp
, int port_clock
,
1503 uint8_t *link_bw
, uint8_t *rate_select
)
1505 if (intel_dp
->num_sink_rates
) {
1508 intel_dp_rate_select(intel_dp
, port_clock
);
1510 *link_bw
= drm_dp_link_rate_to_bw_code(port_clock
);
1516 intel_dp_compute_config(struct intel_encoder
*encoder
,
1517 struct intel_crtc_state
*pipe_config
)
1519 struct drm_device
*dev
= encoder
->base
.dev
;
1520 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1521 struct drm_display_mode
*adjusted_mode
= &pipe_config
->base
.adjusted_mode
;
1522 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
1523 enum port port
= dp_to_dig_port(intel_dp
)->port
;
1524 struct intel_crtc
*intel_crtc
= to_intel_crtc(pipe_config
->base
.crtc
);
1525 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
1526 int lane_count
, clock
;
1527 int min_lane_count
= 1;
1528 int max_lane_count
= intel_dp_max_lane_count(intel_dp
);
1529 /* Conveniently, the link BW constants become indices with a shift...*/
1533 int link_avail
, link_clock
;
1534 int common_rates
[DP_MAX_SUPPORTED_RATES
] = {};
1536 uint8_t link_bw
, rate_select
;
1538 common_len
= intel_dp_common_rates(intel_dp
, common_rates
);
1540 /* No common link rates between source and sink */
1541 WARN_ON(common_len
<= 0);
1543 max_clock
= common_len
- 1;
1545 if (HAS_PCH_SPLIT(dev
) && !HAS_DDI(dev
) && port
!= PORT_A
)
1546 pipe_config
->has_pch_encoder
= true;
1548 pipe_config
->has_dp_encoder
= true;
1549 pipe_config
->has_drrs
= false;
1550 pipe_config
->has_audio
= intel_dp
->has_audio
&& port
!= PORT_A
;
1552 if (is_edp(intel_dp
) && intel_connector
->panel
.fixed_mode
) {
1553 intel_fixed_panel_mode(intel_connector
->panel
.fixed_mode
,
1556 if (INTEL_INFO(dev
)->gen
>= 9) {
1558 ret
= skl_update_scaler_crtc(pipe_config
);
1563 if (HAS_GMCH_DISPLAY(dev
))
1564 intel_gmch_panel_fitting(intel_crtc
, pipe_config
,
1565 intel_connector
->panel
.fitting_mode
);
1567 intel_pch_panel_fitting(intel_crtc
, pipe_config
,
1568 intel_connector
->panel
.fitting_mode
);
1571 if (adjusted_mode
->flags
& DRM_MODE_FLAG_DBLCLK
)
1574 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1575 "max bw %d pixel clock %iKHz\n",
1576 max_lane_count
, common_rates
[max_clock
],
1577 adjusted_mode
->crtc_clock
);
1579 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1580 * bpc in between. */
1581 bpp
= pipe_config
->pipe_bpp
;
1582 if (is_edp(intel_dp
)) {
1584 /* Get bpp from vbt only for panels that dont have bpp in edid */
1585 if (intel_connector
->base
.display_info
.bpc
== 0 &&
1586 (dev_priv
->vbt
.edp_bpp
&& dev_priv
->vbt
.edp_bpp
< bpp
)) {
1587 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1588 dev_priv
->vbt
.edp_bpp
);
1589 bpp
= dev_priv
->vbt
.edp_bpp
;
1593 * Use the maximum clock and number of lanes the eDP panel
1594 * advertizes being capable of. The panels are generally
1595 * designed to support only a single clock and lane
1596 * configuration, and typically these values correspond to the
1597 * native resolution of the panel.
1599 min_lane_count
= max_lane_count
;
1600 min_clock
= max_clock
;
1603 for (; bpp
>= 6*3; bpp
-= 2*3) {
1604 mode_rate
= intel_dp_link_required(adjusted_mode
->crtc_clock
,
1607 for (clock
= min_clock
; clock
<= max_clock
; clock
++) {
1608 for (lane_count
= min_lane_count
;
1609 lane_count
<= max_lane_count
;
1612 link_clock
= common_rates
[clock
];
1613 link_avail
= intel_dp_max_data_rate(link_clock
,
1616 if (mode_rate
<= link_avail
) {
1626 if (intel_dp
->color_range_auto
) {
1629 * CEA-861-E - 5.1 Default Encoding Parameters
1630 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1632 pipe_config
->limited_color_range
=
1633 bpp
!= 18 && drm_match_cea_mode(adjusted_mode
) > 1;
1635 pipe_config
->limited_color_range
=
1636 intel_dp
->limited_color_range
;
1639 pipe_config
->lane_count
= lane_count
;
1641 pipe_config
->pipe_bpp
= bpp
;
1642 pipe_config
->port_clock
= common_rates
[clock
];
1644 intel_dp_compute_rate(intel_dp
, pipe_config
->port_clock
,
1645 &link_bw
, &rate_select
);
1647 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1648 link_bw
, rate_select
, pipe_config
->lane_count
,
1649 pipe_config
->port_clock
, bpp
);
1650 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1651 mode_rate
, link_avail
);
1653 intel_link_compute_m_n(bpp
, lane_count
,
1654 adjusted_mode
->crtc_clock
,
1655 pipe_config
->port_clock
,
1656 &pipe_config
->dp_m_n
);
1658 if (intel_connector
->panel
.downclock_mode
!= NULL
&&
1659 dev_priv
->drrs
.type
== SEAMLESS_DRRS_SUPPORT
) {
1660 pipe_config
->has_drrs
= true;
1661 intel_link_compute_m_n(bpp
, lane_count
,
1662 intel_connector
->panel
.downclock_mode
->clock
,
1663 pipe_config
->port_clock
,
1664 &pipe_config
->dp_m2_n2
);
1667 if ((IS_SKYLAKE(dev
) || IS_KABYLAKE(dev
)) && is_edp(intel_dp
))
1668 skl_edp_set_pll_config(pipe_config
);
1669 else if (IS_BROXTON(dev
))
1670 /* handled in ddi */;
1671 else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
1672 hsw_dp_set_ddi_pll_sel(pipe_config
);
1674 intel_dp_set_clock(encoder
, pipe_config
);
1679 void intel_dp_set_link_params(struct intel_dp
*intel_dp
,
1680 const struct intel_crtc_state
*pipe_config
)
1682 intel_dp
->link_rate
= pipe_config
->port_clock
;
1683 intel_dp
->lane_count
= pipe_config
->lane_count
;
1686 static void intel_dp_prepare(struct intel_encoder
*encoder
)
1688 struct drm_device
*dev
= encoder
->base
.dev
;
1689 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1690 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
1691 enum port port
= dp_to_dig_port(intel_dp
)->port
;
1692 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
1693 const struct drm_display_mode
*adjusted_mode
= &crtc
->config
->base
.adjusted_mode
;
1695 intel_dp_set_link_params(intel_dp
, crtc
->config
);
1698 * There are four kinds of DP registers:
1705 * IBX PCH and CPU are the same for almost everything,
1706 * except that the CPU DP PLL is configured in this
1709 * CPT PCH is quite different, having many bits moved
1710 * to the TRANS_DP_CTL register instead. That
1711 * configuration happens (oddly) in ironlake_pch_enable
1714 /* Preserve the BIOS-computed detected bit. This is
1715 * supposed to be read-only.
1717 intel_dp
->DP
= I915_READ(intel_dp
->output_reg
) & DP_DETECTED
;
1719 /* Handle DP bits in common between all three register formats */
1720 intel_dp
->DP
|= DP_VOLTAGE_0_4
| DP_PRE_EMPHASIS_0
;
1721 intel_dp
->DP
|= DP_PORT_WIDTH(crtc
->config
->lane_count
);
1723 /* Split out the IBX/CPU vs CPT settings */
1725 if (IS_GEN7(dev
) && port
== PORT_A
) {
1726 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PHSYNC
)
1727 intel_dp
->DP
|= DP_SYNC_HS_HIGH
;
1728 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PVSYNC
)
1729 intel_dp
->DP
|= DP_SYNC_VS_HIGH
;
1730 intel_dp
->DP
|= DP_LINK_TRAIN_OFF_CPT
;
1732 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
1733 intel_dp
->DP
|= DP_ENHANCED_FRAMING
;
1735 intel_dp
->DP
|= crtc
->pipe
<< 29;
1736 } else if (HAS_PCH_CPT(dev
) && port
!= PORT_A
) {
1739 intel_dp
->DP
|= DP_LINK_TRAIN_OFF_CPT
;
1741 trans_dp
= I915_READ(TRANS_DP_CTL(crtc
->pipe
));
1742 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
1743 trans_dp
|= TRANS_DP_ENH_FRAMING
;
1745 trans_dp
&= ~TRANS_DP_ENH_FRAMING
;
1746 I915_WRITE(TRANS_DP_CTL(crtc
->pipe
), trans_dp
);
1748 if (!HAS_PCH_SPLIT(dev
) && !IS_VALLEYVIEW(dev
) &&
1749 !IS_CHERRYVIEW(dev
) && crtc
->config
->limited_color_range
)
1750 intel_dp
->DP
|= DP_COLOR_RANGE_16_235
;
1752 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PHSYNC
)
1753 intel_dp
->DP
|= DP_SYNC_HS_HIGH
;
1754 if (adjusted_mode
->flags
& DRM_MODE_FLAG_PVSYNC
)
1755 intel_dp
->DP
|= DP_SYNC_VS_HIGH
;
1756 intel_dp
->DP
|= DP_LINK_TRAIN_OFF
;
1758 if (drm_dp_enhanced_frame_cap(intel_dp
->dpcd
))
1759 intel_dp
->DP
|= DP_ENHANCED_FRAMING
;
1761 if (IS_CHERRYVIEW(dev
))
1762 intel_dp
->DP
|= DP_PIPE_SELECT_CHV(crtc
->pipe
);
1763 else if (crtc
->pipe
== PIPE_B
)
1764 intel_dp
->DP
|= DP_PIPEB_SELECT
;
1768 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1769 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1771 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1772 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1774 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1775 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1777 static void wait_panel_status(struct intel_dp
*intel_dp
,
1781 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1782 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1783 i915_reg_t pp_stat_reg
, pp_ctrl_reg
;
1785 lockdep_assert_held(&dev_priv
->pps_mutex
);
1787 pp_stat_reg
= _pp_stat_reg(intel_dp
);
1788 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1790 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1792 I915_READ(pp_stat_reg
),
1793 I915_READ(pp_ctrl_reg
));
1795 if (_wait_for((I915_READ(pp_stat_reg
) & mask
) == value
, 5000, 10)) {
1796 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1797 I915_READ(pp_stat_reg
),
1798 I915_READ(pp_ctrl_reg
));
1801 DRM_DEBUG_KMS("Wait complete\n");
1804 static void wait_panel_on(struct intel_dp
*intel_dp
)
1806 DRM_DEBUG_KMS("Wait for panel power on\n");
1807 wait_panel_status(intel_dp
, IDLE_ON_MASK
, IDLE_ON_VALUE
);
1810 static void wait_panel_off(struct intel_dp
*intel_dp
)
1812 DRM_DEBUG_KMS("Wait for panel power off time\n");
1813 wait_panel_status(intel_dp
, IDLE_OFF_MASK
, IDLE_OFF_VALUE
);
1816 static void wait_panel_power_cycle(struct intel_dp
*intel_dp
)
1818 ktime_t panel_power_on_time
;
1819 s64 panel_power_off_duration
;
1821 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1823 /* take the difference of currrent time and panel power off time
1824 * and then make panel wait for t11_t12 if needed. */
1825 panel_power_on_time
= ktime_get_boottime();
1826 panel_power_off_duration
= ktime_ms_delta(panel_power_on_time
, intel_dp
->panel_power_off_time
);
1828 /* When we disable the VDD override bit last we have to do the manual
1830 if (panel_power_off_duration
< (s64
)intel_dp
->panel_power_cycle_delay
)
1831 wait_remaining_ms_from_jiffies(jiffies
,
1832 intel_dp
->panel_power_cycle_delay
- panel_power_off_duration
);
1834 wait_panel_status(intel_dp
, IDLE_CYCLE_MASK
, IDLE_CYCLE_VALUE
);
1837 static void wait_backlight_on(struct intel_dp
*intel_dp
)
1839 wait_remaining_ms_from_jiffies(intel_dp
->last_power_on
,
1840 intel_dp
->backlight_on_delay
);
1843 static void edp_wait_backlight_off(struct intel_dp
*intel_dp
)
1845 wait_remaining_ms_from_jiffies(intel_dp
->last_backlight_off
,
1846 intel_dp
->backlight_off_delay
);
1849 /* Read the current pp_control value, unlocking the register if it
1853 static u32
ironlake_get_pp_control(struct intel_dp
*intel_dp
)
1855 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1856 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1859 lockdep_assert_held(&dev_priv
->pps_mutex
);
1861 control
= I915_READ(_pp_ctrl_reg(intel_dp
));
1862 if (!IS_BROXTON(dev
)) {
1863 control
&= ~PANEL_UNLOCK_MASK
;
1864 control
|= PANEL_UNLOCK_REGS
;
1870 * Must be paired with edp_panel_vdd_off().
1871 * Must hold pps_mutex around the whole on/off sequence.
1872 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1874 static bool edp_panel_vdd_on(struct intel_dp
*intel_dp
)
1876 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1877 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
1878 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
1879 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1880 enum intel_display_power_domain power_domain
;
1882 i915_reg_t pp_stat_reg
, pp_ctrl_reg
;
1883 bool need_to_disable
= !intel_dp
->want_panel_vdd
;
1885 lockdep_assert_held(&dev_priv
->pps_mutex
);
1887 if (!is_edp(intel_dp
))
1890 cancel_delayed_work(&intel_dp
->panel_vdd_work
);
1891 intel_dp
->want_panel_vdd
= true;
1893 if (edp_have_panel_vdd(intel_dp
))
1894 return need_to_disable
;
1896 power_domain
= intel_display_port_aux_power_domain(intel_encoder
);
1897 intel_display_power_get(dev_priv
, power_domain
);
1899 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1900 port_name(intel_dig_port
->port
));
1902 if (!edp_have_panel_power(intel_dp
))
1903 wait_panel_power_cycle(intel_dp
);
1905 pp
= ironlake_get_pp_control(intel_dp
);
1906 pp
|= EDP_FORCE_VDD
;
1908 pp_stat_reg
= _pp_stat_reg(intel_dp
);
1909 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1911 I915_WRITE(pp_ctrl_reg
, pp
);
1912 POSTING_READ(pp_ctrl_reg
);
1913 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1914 I915_READ(pp_stat_reg
), I915_READ(pp_ctrl_reg
));
1916 * If the panel wasn't on, delay before accessing aux channel
1918 if (!edp_have_panel_power(intel_dp
)) {
1919 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1920 port_name(intel_dig_port
->port
));
1921 msleep(intel_dp
->panel_power_up_delay
);
1924 return need_to_disable
;
1928 * Must be paired with intel_edp_panel_vdd_off() or
1929 * intel_edp_panel_off().
1930 * Nested calls to these functions are not allowed since
1931 * we drop the lock. Caller must use some higher level
1932 * locking to prevent nested calls from other threads.
1934 void intel_edp_panel_vdd_on(struct intel_dp
*intel_dp
)
1938 if (!is_edp(intel_dp
))
1942 vdd
= edp_panel_vdd_on(intel_dp
);
1943 pps_unlock(intel_dp
);
1945 I915_STATE_WARN(!vdd
, "eDP port %c VDD already requested on\n",
1946 port_name(dp_to_dig_port(intel_dp
)->port
));
1949 static void edp_panel_vdd_off_sync(struct intel_dp
*intel_dp
)
1951 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
1952 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1953 struct intel_digital_port
*intel_dig_port
=
1954 dp_to_dig_port(intel_dp
);
1955 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
1956 enum intel_display_power_domain power_domain
;
1958 i915_reg_t pp_stat_reg
, pp_ctrl_reg
;
1960 lockdep_assert_held(&dev_priv
->pps_mutex
);
1962 WARN_ON(intel_dp
->want_panel_vdd
);
1964 if (!edp_have_panel_vdd(intel_dp
))
1967 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1968 port_name(intel_dig_port
->port
));
1970 pp
= ironlake_get_pp_control(intel_dp
);
1971 pp
&= ~EDP_FORCE_VDD
;
1973 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
1974 pp_stat_reg
= _pp_stat_reg(intel_dp
);
1976 I915_WRITE(pp_ctrl_reg
, pp
);
1977 POSTING_READ(pp_ctrl_reg
);
1979 /* Make sure sequencer is idle before allowing subsequent activity */
1980 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1981 I915_READ(pp_stat_reg
), I915_READ(pp_ctrl_reg
));
1983 if ((pp
& POWER_TARGET_ON
) == 0)
1984 intel_dp
->panel_power_off_time
= ktime_get_boottime();
1986 power_domain
= intel_display_port_aux_power_domain(intel_encoder
);
1987 intel_display_power_put(dev_priv
, power_domain
);
1990 static void edp_panel_vdd_work(struct work_struct
*__work
)
1992 struct intel_dp
*intel_dp
= container_of(to_delayed_work(__work
),
1993 struct intel_dp
, panel_vdd_work
);
1996 if (!intel_dp
->want_panel_vdd
)
1997 edp_panel_vdd_off_sync(intel_dp
);
1998 pps_unlock(intel_dp
);
2001 static void edp_panel_vdd_schedule_off(struct intel_dp
*intel_dp
)
2003 unsigned long delay
;
2006 * Queue the timer to fire a long time from now (relative to the power
2007 * down delay) to keep the panel power up across a sequence of
2010 delay
= msecs_to_jiffies(intel_dp
->panel_power_cycle_delay
* 5);
2011 schedule_delayed_work(&intel_dp
->panel_vdd_work
, delay
);
2015 * Must be paired with edp_panel_vdd_on().
2016 * Must hold pps_mutex around the whole on/off sequence.
2017 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2019 static void edp_panel_vdd_off(struct intel_dp
*intel_dp
, bool sync
)
2021 struct drm_i915_private
*dev_priv
=
2022 intel_dp_to_dev(intel_dp
)->dev_private
;
2024 lockdep_assert_held(&dev_priv
->pps_mutex
);
2026 if (!is_edp(intel_dp
))
2029 I915_STATE_WARN(!intel_dp
->want_panel_vdd
, "eDP port %c VDD not forced on",
2030 port_name(dp_to_dig_port(intel_dp
)->port
));
2032 intel_dp
->want_panel_vdd
= false;
2035 edp_panel_vdd_off_sync(intel_dp
);
2037 edp_panel_vdd_schedule_off(intel_dp
);
2040 static void edp_panel_on(struct intel_dp
*intel_dp
)
2042 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2043 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2045 i915_reg_t pp_ctrl_reg
;
2047 lockdep_assert_held(&dev_priv
->pps_mutex
);
2049 if (!is_edp(intel_dp
))
2052 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2053 port_name(dp_to_dig_port(intel_dp
)->port
));
2055 if (WARN(edp_have_panel_power(intel_dp
),
2056 "eDP port %c panel power already on\n",
2057 port_name(dp_to_dig_port(intel_dp
)->port
)))
2060 wait_panel_power_cycle(intel_dp
);
2062 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2063 pp
= ironlake_get_pp_control(intel_dp
);
2065 /* ILK workaround: disable reset around power sequence */
2066 pp
&= ~PANEL_POWER_RESET
;
2067 I915_WRITE(pp_ctrl_reg
, pp
);
2068 POSTING_READ(pp_ctrl_reg
);
2071 pp
|= POWER_TARGET_ON
;
2073 pp
|= PANEL_POWER_RESET
;
2075 I915_WRITE(pp_ctrl_reg
, pp
);
2076 POSTING_READ(pp_ctrl_reg
);
2078 wait_panel_on(intel_dp
);
2079 intel_dp
->last_power_on
= jiffies
;
2082 pp
|= PANEL_POWER_RESET
; /* restore panel reset bit */
2083 I915_WRITE(pp_ctrl_reg
, pp
);
2084 POSTING_READ(pp_ctrl_reg
);
2088 void intel_edp_panel_on(struct intel_dp
*intel_dp
)
2090 if (!is_edp(intel_dp
))
2094 edp_panel_on(intel_dp
);
2095 pps_unlock(intel_dp
);
2099 static void edp_panel_off(struct intel_dp
*intel_dp
)
2101 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2102 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
2103 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2104 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2105 enum intel_display_power_domain power_domain
;
2107 i915_reg_t pp_ctrl_reg
;
2109 lockdep_assert_held(&dev_priv
->pps_mutex
);
2111 if (!is_edp(intel_dp
))
2114 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2115 port_name(dp_to_dig_port(intel_dp
)->port
));
2117 WARN(!intel_dp
->want_panel_vdd
, "Need eDP port %c VDD to turn off panel\n",
2118 port_name(dp_to_dig_port(intel_dp
)->port
));
2120 pp
= ironlake_get_pp_control(intel_dp
);
2121 /* We need to switch off panel power _and_ force vdd, for otherwise some
2122 * panels get very unhappy and cease to work. */
2123 pp
&= ~(POWER_TARGET_ON
| PANEL_POWER_RESET
| EDP_FORCE_VDD
|
2126 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2128 intel_dp
->want_panel_vdd
= false;
2130 I915_WRITE(pp_ctrl_reg
, pp
);
2131 POSTING_READ(pp_ctrl_reg
);
2133 intel_dp
->panel_power_off_time
= ktime_get_boottime();
2134 wait_panel_off(intel_dp
);
2136 /* We got a reference when we enabled the VDD. */
2137 power_domain
= intel_display_port_aux_power_domain(intel_encoder
);
2138 intel_display_power_put(dev_priv
, power_domain
);
2141 void intel_edp_panel_off(struct intel_dp
*intel_dp
)
2143 if (!is_edp(intel_dp
))
2147 edp_panel_off(intel_dp
);
2148 pps_unlock(intel_dp
);
2151 /* Enable backlight in the panel power control. */
2152 static void _intel_edp_backlight_on(struct intel_dp
*intel_dp
)
2154 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2155 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
2156 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2158 i915_reg_t pp_ctrl_reg
;
2161 * If we enable the backlight right away following a panel power
2162 * on, we may see slight flicker as the panel syncs with the eDP
2163 * link. So delay a bit to make sure the image is solid before
2164 * allowing it to appear.
2166 wait_backlight_on(intel_dp
);
2170 pp
= ironlake_get_pp_control(intel_dp
);
2171 pp
|= EDP_BLC_ENABLE
;
2173 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2175 I915_WRITE(pp_ctrl_reg
, pp
);
2176 POSTING_READ(pp_ctrl_reg
);
2178 pps_unlock(intel_dp
);
2181 /* Enable backlight PWM and backlight PP control. */
2182 void intel_edp_backlight_on(struct intel_dp
*intel_dp
)
2184 if (!is_edp(intel_dp
))
2187 DRM_DEBUG_KMS("\n");
2189 intel_panel_enable_backlight(intel_dp
->attached_connector
);
2190 _intel_edp_backlight_on(intel_dp
);
2193 /* Disable backlight in the panel power control. */
2194 static void _intel_edp_backlight_off(struct intel_dp
*intel_dp
)
2196 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2197 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2199 i915_reg_t pp_ctrl_reg
;
2201 if (!is_edp(intel_dp
))
2206 pp
= ironlake_get_pp_control(intel_dp
);
2207 pp
&= ~EDP_BLC_ENABLE
;
2209 pp_ctrl_reg
= _pp_ctrl_reg(intel_dp
);
2211 I915_WRITE(pp_ctrl_reg
, pp
);
2212 POSTING_READ(pp_ctrl_reg
);
2214 pps_unlock(intel_dp
);
2216 intel_dp
->last_backlight_off
= jiffies
;
2217 edp_wait_backlight_off(intel_dp
);
2220 /* Disable backlight PP control and backlight PWM. */
2221 void intel_edp_backlight_off(struct intel_dp
*intel_dp
)
2223 if (!is_edp(intel_dp
))
2226 DRM_DEBUG_KMS("\n");
2228 _intel_edp_backlight_off(intel_dp
);
2229 intel_panel_disable_backlight(intel_dp
->attached_connector
);
2233 * Hook for controlling the panel power control backlight through the bl_power
2234 * sysfs attribute. Take care to handle multiple calls.
2236 static void intel_edp_backlight_power(struct intel_connector
*connector
,
2239 struct intel_dp
*intel_dp
= intel_attached_dp(&connector
->base
);
2243 is_enabled
= ironlake_get_pp_control(intel_dp
) & EDP_BLC_ENABLE
;
2244 pps_unlock(intel_dp
);
2246 if (is_enabled
== enable
)
2249 DRM_DEBUG_KMS("panel power control backlight %s\n",
2250 enable
? "enable" : "disable");
2253 _intel_edp_backlight_on(intel_dp
);
2255 _intel_edp_backlight_off(intel_dp
);
2258 static void assert_dp_port(struct intel_dp
*intel_dp
, bool state
)
2260 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
2261 struct drm_i915_private
*dev_priv
= to_i915(dig_port
->base
.base
.dev
);
2262 bool cur_state
= I915_READ(intel_dp
->output_reg
) & DP_PORT_EN
;
2264 I915_STATE_WARN(cur_state
!= state
,
2265 "DP port %c state assertion failure (expected %s, current %s)\n",
2266 port_name(dig_port
->port
),
2267 onoff(state
), onoff(cur_state
));
2269 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2271 static void assert_edp_pll(struct drm_i915_private
*dev_priv
, bool state
)
2273 bool cur_state
= I915_READ(DP_A
) & DP_PLL_ENABLE
;
2275 I915_STATE_WARN(cur_state
!= state
,
2276 "eDP PLL state assertion failure (expected %s, current %s)\n",
2277 onoff(state
), onoff(cur_state
));
2279 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2280 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2282 static void ironlake_edp_pll_on(struct intel_dp
*intel_dp
)
2284 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2285 struct intel_crtc
*crtc
= to_intel_crtc(intel_dig_port
->base
.base
.crtc
);
2286 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
2288 assert_pipe_disabled(dev_priv
, crtc
->pipe
);
2289 assert_dp_port_disabled(intel_dp
);
2290 assert_edp_pll_disabled(dev_priv
);
2292 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2293 crtc
->config
->port_clock
);
2295 intel_dp
->DP
&= ~DP_PLL_FREQ_MASK
;
2297 if (crtc
->config
->port_clock
== 162000)
2298 intel_dp
->DP
|= DP_PLL_FREQ_162MHZ
;
2300 intel_dp
->DP
|= DP_PLL_FREQ_270MHZ
;
2302 I915_WRITE(DP_A
, intel_dp
->DP
);
2306 intel_dp
->DP
|= DP_PLL_ENABLE
;
2308 I915_WRITE(DP_A
, intel_dp
->DP
);
2313 static void ironlake_edp_pll_off(struct intel_dp
*intel_dp
)
2315 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2316 struct intel_crtc
*crtc
= to_intel_crtc(intel_dig_port
->base
.base
.crtc
);
2317 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
2319 assert_pipe_disabled(dev_priv
, crtc
->pipe
);
2320 assert_dp_port_disabled(intel_dp
);
2321 assert_edp_pll_enabled(dev_priv
);
2323 DRM_DEBUG_KMS("disabling eDP PLL\n");
2325 intel_dp
->DP
&= ~DP_PLL_ENABLE
;
2327 I915_WRITE(DP_A
, intel_dp
->DP
);
2332 /* If the sink supports it, try to set the power state appropriately */
2333 void intel_dp_sink_dpms(struct intel_dp
*intel_dp
, int mode
)
2337 /* Should have a valid DPCD by this point */
2338 if (intel_dp
->dpcd
[DP_DPCD_REV
] < 0x11)
2341 if (mode
!= DRM_MODE_DPMS_ON
) {
2342 ret
= drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
,
2346 * When turning on, we need to retry for 1ms to give the sink
2349 for (i
= 0; i
< 3; i
++) {
2350 ret
= drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_SET_POWER
,
2359 DRM_DEBUG_KMS("failed to %s sink power state\n",
2360 mode
== DRM_MODE_DPMS_ON
? "enable" : "disable");
2363 static bool intel_dp_get_hw_state(struct intel_encoder
*encoder
,
2366 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2367 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2368 struct drm_device
*dev
= encoder
->base
.dev
;
2369 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2370 enum intel_display_power_domain power_domain
;
2373 power_domain
= intel_display_port_power_domain(encoder
);
2374 if (!intel_display_power_is_enabled(dev_priv
, power_domain
))
2377 tmp
= I915_READ(intel_dp
->output_reg
);
2379 if (!(tmp
& DP_PORT_EN
))
2382 if (IS_GEN7(dev
) && port
== PORT_A
) {
2383 *pipe
= PORT_TO_PIPE_CPT(tmp
);
2384 } else if (HAS_PCH_CPT(dev
) && port
!= PORT_A
) {
2387 for_each_pipe(dev_priv
, p
) {
2388 u32 trans_dp
= I915_READ(TRANS_DP_CTL(p
));
2389 if (TRANS_DP_PIPE_TO_PORT(trans_dp
) == port
) {
2395 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2396 i915_mmio_reg_offset(intel_dp
->output_reg
));
2397 } else if (IS_CHERRYVIEW(dev
)) {
2398 *pipe
= DP_PORT_TO_PIPE_CHV(tmp
);
2400 *pipe
= PORT_TO_PIPE(tmp
);
2406 static void intel_dp_get_config(struct intel_encoder
*encoder
,
2407 struct intel_crtc_state
*pipe_config
)
2409 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2411 struct drm_device
*dev
= encoder
->base
.dev
;
2412 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2413 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2414 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2417 tmp
= I915_READ(intel_dp
->output_reg
);
2419 pipe_config
->has_audio
= tmp
& DP_AUDIO_OUTPUT_ENABLE
&& port
!= PORT_A
;
2421 if (HAS_PCH_CPT(dev
) && port
!= PORT_A
) {
2422 u32 trans_dp
= I915_READ(TRANS_DP_CTL(crtc
->pipe
));
2424 if (trans_dp
& TRANS_DP_HSYNC_ACTIVE_HIGH
)
2425 flags
|= DRM_MODE_FLAG_PHSYNC
;
2427 flags
|= DRM_MODE_FLAG_NHSYNC
;
2429 if (trans_dp
& TRANS_DP_VSYNC_ACTIVE_HIGH
)
2430 flags
|= DRM_MODE_FLAG_PVSYNC
;
2432 flags
|= DRM_MODE_FLAG_NVSYNC
;
2434 if (tmp
& DP_SYNC_HS_HIGH
)
2435 flags
|= DRM_MODE_FLAG_PHSYNC
;
2437 flags
|= DRM_MODE_FLAG_NHSYNC
;
2439 if (tmp
& DP_SYNC_VS_HIGH
)
2440 flags
|= DRM_MODE_FLAG_PVSYNC
;
2442 flags
|= DRM_MODE_FLAG_NVSYNC
;
2445 pipe_config
->base
.adjusted_mode
.flags
|= flags
;
2447 if (!HAS_PCH_SPLIT(dev
) && !IS_VALLEYVIEW(dev
) &&
2448 !IS_CHERRYVIEW(dev
) && tmp
& DP_COLOR_RANGE_16_235
)
2449 pipe_config
->limited_color_range
= true;
2451 pipe_config
->has_dp_encoder
= true;
2453 pipe_config
->lane_count
=
2454 ((tmp
& DP_PORT_WIDTH_MASK
) >> DP_PORT_WIDTH_SHIFT
) + 1;
2456 intel_dp_get_m_n(crtc
, pipe_config
);
2458 if (port
== PORT_A
) {
2459 if ((I915_READ(DP_A
) & DP_PLL_FREQ_MASK
) == DP_PLL_FREQ_162MHZ
)
2460 pipe_config
->port_clock
= 162000;
2462 pipe_config
->port_clock
= 270000;
2465 dotclock
= intel_dotclock_calculate(pipe_config
->port_clock
,
2466 &pipe_config
->dp_m_n
);
2468 if (HAS_PCH_SPLIT(dev_priv
->dev
) && port
!= PORT_A
)
2469 ironlake_check_encoder_dotclock(pipe_config
, dotclock
);
2471 pipe_config
->base
.adjusted_mode
.crtc_clock
= dotclock
;
2473 if (is_edp(intel_dp
) && dev_priv
->vbt
.edp_bpp
&&
2474 pipe_config
->pipe_bpp
> dev_priv
->vbt
.edp_bpp
) {
2476 * This is a big fat ugly hack.
2478 * Some machines in UEFI boot mode provide us a VBT that has 18
2479 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2480 * unknown we fail to light up. Yet the same BIOS boots up with
2481 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2482 * max, not what it tells us to use.
2484 * Note: This will still be broken if the eDP panel is not lit
2485 * up by the BIOS, and thus we can't get the mode at module
2488 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2489 pipe_config
->pipe_bpp
, dev_priv
->vbt
.edp_bpp
);
2490 dev_priv
->vbt
.edp_bpp
= pipe_config
->pipe_bpp
;
2494 static void intel_disable_dp(struct intel_encoder
*encoder
)
2496 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2497 struct drm_device
*dev
= encoder
->base
.dev
;
2498 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2500 if (crtc
->config
->has_audio
)
2501 intel_audio_codec_disable(encoder
);
2503 if (HAS_PSR(dev
) && !HAS_DDI(dev
))
2504 intel_psr_disable(intel_dp
);
2506 /* Make sure the panel is off before trying to change the mode. But also
2507 * ensure that we have vdd while we switch off the panel. */
2508 intel_edp_panel_vdd_on(intel_dp
);
2509 intel_edp_backlight_off(intel_dp
);
2510 intel_dp_sink_dpms(intel_dp
, DRM_MODE_DPMS_OFF
);
2511 intel_edp_panel_off(intel_dp
);
2513 /* disable the port before the pipe on g4x */
2514 if (INTEL_INFO(dev
)->gen
< 5)
2515 intel_dp_link_down(intel_dp
);
2518 static void ilk_post_disable_dp(struct intel_encoder
*encoder
)
2520 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2521 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2523 intel_dp_link_down(intel_dp
);
2525 /* Only ilk+ has port A */
2527 ironlake_edp_pll_off(intel_dp
);
2530 static void vlv_post_disable_dp(struct intel_encoder
*encoder
)
2532 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2534 intel_dp_link_down(intel_dp
);
2537 static void chv_data_lane_soft_reset(struct intel_encoder
*encoder
,
2540 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
2541 enum dpio_channel ch
= vlv_dport_to_channel(enc_to_dig_port(&encoder
->base
));
2542 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2543 enum pipe pipe
= crtc
->pipe
;
2546 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW0(ch
));
2548 val
&= ~(DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
);
2550 val
|= DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
;
2551 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW0(ch
), val
);
2553 if (crtc
->config
->lane_count
> 2) {
2554 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW0(ch
));
2556 val
&= ~(DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
);
2558 val
|= DPIO_PCS_TX_LANE2_RESET
| DPIO_PCS_TX_LANE1_RESET
;
2559 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW0(ch
), val
);
2562 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW1(ch
));
2563 val
|= CHV_PCS_REQ_SOFTRESET_EN
;
2565 val
&= ~DPIO_PCS_CLK_SOFT_RESET
;
2567 val
|= DPIO_PCS_CLK_SOFT_RESET
;
2568 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW1(ch
), val
);
2570 if (crtc
->config
->lane_count
> 2) {
2571 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW1(ch
));
2572 val
|= CHV_PCS_REQ_SOFTRESET_EN
;
2574 val
&= ~DPIO_PCS_CLK_SOFT_RESET
;
2576 val
|= DPIO_PCS_CLK_SOFT_RESET
;
2577 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW1(ch
), val
);
2581 static void chv_post_disable_dp(struct intel_encoder
*encoder
)
2583 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2584 struct drm_device
*dev
= encoder
->base
.dev
;
2585 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2587 intel_dp_link_down(intel_dp
);
2589 mutex_lock(&dev_priv
->sb_lock
);
2591 /* Assert data lane reset */
2592 chv_data_lane_soft_reset(encoder
, true);
2594 mutex_unlock(&dev_priv
->sb_lock
);
2598 _intel_dp_set_link_train(struct intel_dp
*intel_dp
,
2600 uint8_t dp_train_pat
)
2602 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2603 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
2604 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2605 enum port port
= intel_dig_port
->port
;
2608 uint32_t temp
= I915_READ(DP_TP_CTL(port
));
2610 if (dp_train_pat
& DP_LINK_SCRAMBLING_DISABLE
)
2611 temp
|= DP_TP_CTL_SCRAMBLE_DISABLE
;
2613 temp
&= ~DP_TP_CTL_SCRAMBLE_DISABLE
;
2615 temp
&= ~DP_TP_CTL_LINK_TRAIN_MASK
;
2616 switch (dp_train_pat
& DP_TRAINING_PATTERN_MASK
) {
2617 case DP_TRAINING_PATTERN_DISABLE
:
2618 temp
|= DP_TP_CTL_LINK_TRAIN_NORMAL
;
2621 case DP_TRAINING_PATTERN_1
:
2622 temp
|= DP_TP_CTL_LINK_TRAIN_PAT1
;
2624 case DP_TRAINING_PATTERN_2
:
2625 temp
|= DP_TP_CTL_LINK_TRAIN_PAT2
;
2627 case DP_TRAINING_PATTERN_3
:
2628 temp
|= DP_TP_CTL_LINK_TRAIN_PAT3
;
2631 I915_WRITE(DP_TP_CTL(port
), temp
);
2633 } else if ((IS_GEN7(dev
) && port
== PORT_A
) ||
2634 (HAS_PCH_CPT(dev
) && port
!= PORT_A
)) {
2635 *DP
&= ~DP_LINK_TRAIN_MASK_CPT
;
2637 switch (dp_train_pat
& DP_TRAINING_PATTERN_MASK
) {
2638 case DP_TRAINING_PATTERN_DISABLE
:
2639 *DP
|= DP_LINK_TRAIN_OFF_CPT
;
2641 case DP_TRAINING_PATTERN_1
:
2642 *DP
|= DP_LINK_TRAIN_PAT_1_CPT
;
2644 case DP_TRAINING_PATTERN_2
:
2645 *DP
|= DP_LINK_TRAIN_PAT_2_CPT
;
2647 case DP_TRAINING_PATTERN_3
:
2648 DRM_ERROR("DP training pattern 3 not supported\n");
2649 *DP
|= DP_LINK_TRAIN_PAT_2_CPT
;
2654 if (IS_CHERRYVIEW(dev
))
2655 *DP
&= ~DP_LINK_TRAIN_MASK_CHV
;
2657 *DP
&= ~DP_LINK_TRAIN_MASK
;
2659 switch (dp_train_pat
& DP_TRAINING_PATTERN_MASK
) {
2660 case DP_TRAINING_PATTERN_DISABLE
:
2661 *DP
|= DP_LINK_TRAIN_OFF
;
2663 case DP_TRAINING_PATTERN_1
:
2664 *DP
|= DP_LINK_TRAIN_PAT_1
;
2666 case DP_TRAINING_PATTERN_2
:
2667 *DP
|= DP_LINK_TRAIN_PAT_2
;
2669 case DP_TRAINING_PATTERN_3
:
2670 if (IS_CHERRYVIEW(dev
)) {
2671 *DP
|= DP_LINK_TRAIN_PAT_3_CHV
;
2673 DRM_ERROR("DP training pattern 3 not supported\n");
2674 *DP
|= DP_LINK_TRAIN_PAT_2
;
2681 static void intel_dp_enable_port(struct intel_dp
*intel_dp
)
2683 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
2684 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2685 struct intel_crtc
*crtc
=
2686 to_intel_crtc(dp_to_dig_port(intel_dp
)->base
.base
.crtc
);
2688 /* enable with pattern 1 (as per spec) */
2689 _intel_dp_set_link_train(intel_dp
, &intel_dp
->DP
,
2690 DP_TRAINING_PATTERN_1
);
2692 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
2693 POSTING_READ(intel_dp
->output_reg
);
2696 * Magic for VLV/CHV. We _must_ first set up the register
2697 * without actually enabling the port, and then do another
2698 * write to enable the port. Otherwise link training will
2699 * fail when the power sequencer is freshly used for this port.
2701 intel_dp
->DP
|= DP_PORT_EN
;
2702 if (crtc
->config
->has_audio
)
2703 intel_dp
->DP
|= DP_AUDIO_OUTPUT_ENABLE
;
2705 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
2706 POSTING_READ(intel_dp
->output_reg
);
2709 static void intel_enable_dp(struct intel_encoder
*encoder
)
2711 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2712 struct drm_device
*dev
= encoder
->base
.dev
;
2713 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2714 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2715 uint32_t dp_reg
= I915_READ(intel_dp
->output_reg
);
2716 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2717 enum pipe pipe
= crtc
->pipe
;
2719 if (WARN_ON(dp_reg
& DP_PORT_EN
))
2724 if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
))
2725 vlv_init_panel_power_sequencer(intel_dp
);
2728 * We get an occasional spurious underrun between the port
2729 * enable and vdd enable, when enabling port A eDP.
2731 * FIXME: Not sure if this applies to (PCH) port D eDP as well
2734 intel_set_cpu_fifo_underrun_reporting(dev_priv
, pipe
, false);
2736 intel_dp_enable_port(intel_dp
);
2738 if (port
== PORT_A
&& IS_GEN5(dev_priv
)) {
2740 * Underrun reporting for the other pipe was disabled in
2741 * g4x_pre_enable_dp(). The eDP PLL and port have now been
2742 * enabled, so it's now safe to re-enable underrun reporting.
2744 intel_wait_for_vblank_if_active(dev_priv
->dev
, !pipe
);
2745 intel_set_cpu_fifo_underrun_reporting(dev_priv
, !pipe
, true);
2746 intel_set_pch_fifo_underrun_reporting(dev_priv
, !pipe
, true);
2749 edp_panel_vdd_on(intel_dp
);
2750 edp_panel_on(intel_dp
);
2751 edp_panel_vdd_off(intel_dp
, true);
2754 intel_set_cpu_fifo_underrun_reporting(dev_priv
, pipe
, true);
2756 pps_unlock(intel_dp
);
2758 if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
)) {
2759 unsigned int lane_mask
= 0x0;
2761 if (IS_CHERRYVIEW(dev
))
2762 lane_mask
= intel_dp_unused_lane_mask(crtc
->config
->lane_count
);
2764 vlv_wait_port_ready(dev_priv
, dp_to_dig_port(intel_dp
),
2768 intel_dp_sink_dpms(intel_dp
, DRM_MODE_DPMS_ON
);
2769 intel_dp_start_link_train(intel_dp
);
2770 intel_dp_stop_link_train(intel_dp
);
2772 if (crtc
->config
->has_audio
) {
2773 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2775 intel_audio_codec_enable(encoder
);
2779 static void g4x_enable_dp(struct intel_encoder
*encoder
)
2781 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2783 intel_enable_dp(encoder
);
2784 intel_edp_backlight_on(intel_dp
);
2787 static void vlv_enable_dp(struct intel_encoder
*encoder
)
2789 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2791 intel_edp_backlight_on(intel_dp
);
2792 intel_psr_enable(intel_dp
);
2795 static void g4x_pre_enable_dp(struct intel_encoder
*encoder
)
2797 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
2798 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2799 enum port port
= dp_to_dig_port(intel_dp
)->port
;
2800 enum pipe pipe
= to_intel_crtc(encoder
->base
.crtc
)->pipe
;
2802 intel_dp_prepare(encoder
);
2804 if (port
== PORT_A
&& IS_GEN5(dev_priv
)) {
2806 * We get FIFO underruns on the other pipe when
2807 * enabling the CPU eDP PLL, and when enabling CPU
2808 * eDP port. We could potentially avoid the PLL
2809 * underrun with a vblank wait just prior to enabling
2810 * the PLL, but that doesn't appear to help the port
2811 * enable case. Just sweep it all under the rug.
2813 intel_set_cpu_fifo_underrun_reporting(dev_priv
, !pipe
, false);
2814 intel_set_pch_fifo_underrun_reporting(dev_priv
, !pipe
, false);
2817 /* Only ilk+ has port A */
2819 ironlake_edp_pll_on(intel_dp
);
2822 static void vlv_detach_power_sequencer(struct intel_dp
*intel_dp
)
2824 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2825 struct drm_i915_private
*dev_priv
= intel_dig_port
->base
.base
.dev
->dev_private
;
2826 enum pipe pipe
= intel_dp
->pps_pipe
;
2827 i915_reg_t pp_on_reg
= VLV_PIPE_PP_ON_DELAYS(pipe
);
2829 edp_panel_vdd_off_sync(intel_dp
);
2832 * VLV seems to get confused when multiple power seqeuencers
2833 * have the same port selected (even if only one has power/vdd
2834 * enabled). The failure manifests as vlv_wait_port_ready() failing
2835 * CHV on the other hand doesn't seem to mind having the same port
2836 * selected in multiple power seqeuencers, but let's clear the
2837 * port select always when logically disconnecting a power sequencer
2840 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2841 pipe_name(pipe
), port_name(intel_dig_port
->port
));
2842 I915_WRITE(pp_on_reg
, 0);
2843 POSTING_READ(pp_on_reg
);
2845 intel_dp
->pps_pipe
= INVALID_PIPE
;
2848 static void vlv_steal_power_sequencer(struct drm_device
*dev
,
2851 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2852 struct intel_encoder
*encoder
;
2854 lockdep_assert_held(&dev_priv
->pps_mutex
);
2856 if (WARN_ON(pipe
!= PIPE_A
&& pipe
!= PIPE_B
))
2859 for_each_intel_encoder(dev
, encoder
) {
2860 struct intel_dp
*intel_dp
;
2863 if (encoder
->type
!= INTEL_OUTPUT_EDP
)
2866 intel_dp
= enc_to_intel_dp(&encoder
->base
);
2867 port
= dp_to_dig_port(intel_dp
)->port
;
2869 if (intel_dp
->pps_pipe
!= pipe
)
2872 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2873 pipe_name(pipe
), port_name(port
));
2875 WARN(encoder
->base
.crtc
,
2876 "stealing pipe %c power sequencer from active eDP port %c\n",
2877 pipe_name(pipe
), port_name(port
));
2879 /* make sure vdd is off before we steal it */
2880 vlv_detach_power_sequencer(intel_dp
);
2884 static void vlv_init_panel_power_sequencer(struct intel_dp
*intel_dp
)
2886 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
2887 struct intel_encoder
*encoder
= &intel_dig_port
->base
;
2888 struct drm_device
*dev
= encoder
->base
.dev
;
2889 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2890 struct intel_crtc
*crtc
= to_intel_crtc(encoder
->base
.crtc
);
2892 lockdep_assert_held(&dev_priv
->pps_mutex
);
2894 if (!is_edp(intel_dp
))
2897 if (intel_dp
->pps_pipe
== crtc
->pipe
)
2901 * If another power sequencer was being used on this
2902 * port previously make sure to turn off vdd there while
2903 * we still have control of it.
2905 if (intel_dp
->pps_pipe
!= INVALID_PIPE
)
2906 vlv_detach_power_sequencer(intel_dp
);
2909 * We may be stealing the power
2910 * sequencer from another port.
2912 vlv_steal_power_sequencer(dev
, crtc
->pipe
);
2914 /* now it's all ours */
2915 intel_dp
->pps_pipe
= crtc
->pipe
;
2917 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2918 pipe_name(intel_dp
->pps_pipe
), port_name(intel_dig_port
->port
));
2920 /* init power sequencer on this pipe and port */
2921 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
2922 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
2925 static void vlv_pre_enable_dp(struct intel_encoder
*encoder
)
2927 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2928 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
2929 struct drm_device
*dev
= encoder
->base
.dev
;
2930 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2931 struct intel_crtc
*intel_crtc
= to_intel_crtc(encoder
->base
.crtc
);
2932 enum dpio_channel port
= vlv_dport_to_channel(dport
);
2933 int pipe
= intel_crtc
->pipe
;
2936 mutex_lock(&dev_priv
->sb_lock
);
2938 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW8(port
));
2945 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW8(port
), val
);
2946 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW14(port
), 0x00760018);
2947 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW23(port
), 0x00400888);
2949 mutex_unlock(&dev_priv
->sb_lock
);
2951 intel_enable_dp(encoder
);
2954 static void vlv_dp_pre_pll_enable(struct intel_encoder
*encoder
)
2956 struct intel_digital_port
*dport
= enc_to_dig_port(&encoder
->base
);
2957 struct drm_device
*dev
= encoder
->base
.dev
;
2958 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2959 struct intel_crtc
*intel_crtc
=
2960 to_intel_crtc(encoder
->base
.crtc
);
2961 enum dpio_channel port
= vlv_dport_to_channel(dport
);
2962 int pipe
= intel_crtc
->pipe
;
2964 intel_dp_prepare(encoder
);
2966 /* Program Tx lane resets to default */
2967 mutex_lock(&dev_priv
->sb_lock
);
2968 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW0(port
),
2969 DPIO_PCS_TX_LANE2_RESET
|
2970 DPIO_PCS_TX_LANE1_RESET
);
2971 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW1(port
),
2972 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN
|
2973 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN
|
2974 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT
) |
2975 DPIO_PCS_CLK_SOFT_RESET
);
2977 /* Fix up inter-pair skew failure */
2978 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW12(port
), 0x00750f00);
2979 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW11(port
), 0x00001500);
2980 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW14(port
), 0x40400000);
2981 mutex_unlock(&dev_priv
->sb_lock
);
2984 static void chv_pre_enable_dp(struct intel_encoder
*encoder
)
2986 struct intel_dp
*intel_dp
= enc_to_intel_dp(&encoder
->base
);
2987 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
2988 struct drm_device
*dev
= encoder
->base
.dev
;
2989 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2990 struct intel_crtc
*intel_crtc
=
2991 to_intel_crtc(encoder
->base
.crtc
);
2992 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
2993 int pipe
= intel_crtc
->pipe
;
2994 int data
, i
, stagger
;
2997 mutex_lock(&dev_priv
->sb_lock
);
2999 /* allow hardware to manage TX FIFO reset source */
3000 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW11(ch
));
3001 val
&= ~DPIO_LANEDESKEW_STRAP_OVRD
;
3002 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW11(ch
), val
);
3004 if (intel_crtc
->config
->lane_count
> 2) {
3005 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW11(ch
));
3006 val
&= ~DPIO_LANEDESKEW_STRAP_OVRD
;
3007 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW11(ch
), val
);
3010 /* Program Tx lane latency optimal setting*/
3011 for (i
= 0; i
< intel_crtc
->config
->lane_count
; i
++) {
3012 /* Set the upar bit */
3013 if (intel_crtc
->config
->lane_count
== 1)
3016 data
= (i
== 1) ? 0x0 : 0x1;
3017 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW14(ch
, i
),
3018 data
<< DPIO_UPAR_SHIFT
);
3021 /* Data lane stagger programming */
3022 if (intel_crtc
->config
->port_clock
> 270000)
3024 else if (intel_crtc
->config
->port_clock
> 135000)
3026 else if (intel_crtc
->config
->port_clock
> 67500)
3028 else if (intel_crtc
->config
->port_clock
> 33750)
3033 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW11(ch
));
3034 val
|= DPIO_TX2_STAGGER_MASK(0x1f);
3035 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW11(ch
), val
);
3037 if (intel_crtc
->config
->lane_count
> 2) {
3038 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW11(ch
));
3039 val
|= DPIO_TX2_STAGGER_MASK(0x1f);
3040 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW11(ch
), val
);
3043 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW12(ch
),
3044 DPIO_LANESTAGGER_STRAP(stagger
) |
3045 DPIO_LANESTAGGER_STRAP_OVRD
|
3046 DPIO_TX1_STAGGER_MASK(0x1f) |
3047 DPIO_TX1_STAGGER_MULT(6) |
3048 DPIO_TX2_STAGGER_MULT(0));
3050 if (intel_crtc
->config
->lane_count
> 2) {
3051 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW12(ch
),
3052 DPIO_LANESTAGGER_STRAP(stagger
) |
3053 DPIO_LANESTAGGER_STRAP_OVRD
|
3054 DPIO_TX1_STAGGER_MASK(0x1f) |
3055 DPIO_TX1_STAGGER_MULT(7) |
3056 DPIO_TX2_STAGGER_MULT(5));
3059 /* Deassert data lane reset */
3060 chv_data_lane_soft_reset(encoder
, false);
3062 mutex_unlock(&dev_priv
->sb_lock
);
3064 intel_enable_dp(encoder
);
3066 /* Second common lane will stay alive on its own now */
3067 if (dport
->release_cl2_override
) {
3068 chv_phy_powergate_ch(dev_priv
, DPIO_PHY0
, DPIO_CH1
, false);
3069 dport
->release_cl2_override
= false;
3073 static void chv_dp_pre_pll_enable(struct intel_encoder
*encoder
)
3075 struct intel_digital_port
*dport
= enc_to_dig_port(&encoder
->base
);
3076 struct drm_device
*dev
= encoder
->base
.dev
;
3077 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3078 struct intel_crtc
*intel_crtc
=
3079 to_intel_crtc(encoder
->base
.crtc
);
3080 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
3081 enum pipe pipe
= intel_crtc
->pipe
;
3082 unsigned int lane_mask
=
3083 intel_dp_unused_lane_mask(intel_crtc
->config
->lane_count
);
3086 intel_dp_prepare(encoder
);
3089 * Must trick the second common lane into life.
3090 * Otherwise we can't even access the PLL.
3092 if (ch
== DPIO_CH0
&& pipe
== PIPE_B
)
3093 dport
->release_cl2_override
=
3094 !chv_phy_powergate_ch(dev_priv
, DPIO_PHY0
, DPIO_CH1
, true);
3096 chv_phy_powergate_lanes(encoder
, true, lane_mask
);
3098 mutex_lock(&dev_priv
->sb_lock
);
3100 /* Assert data lane reset */
3101 chv_data_lane_soft_reset(encoder
, true);
3103 /* program left/right clock distribution */
3104 if (pipe
!= PIPE_B
) {
3105 val
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW5_CH0
);
3106 val
&= ~(CHV_BUFLEFTENA1_MASK
| CHV_BUFRIGHTENA1_MASK
);
3108 val
|= CHV_BUFLEFTENA1_FORCE
;
3110 val
|= CHV_BUFRIGHTENA1_FORCE
;
3111 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW5_CH0
, val
);
3113 val
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW1_CH1
);
3114 val
&= ~(CHV_BUFLEFTENA2_MASK
| CHV_BUFRIGHTENA2_MASK
);
3116 val
|= CHV_BUFLEFTENA2_FORCE
;
3118 val
|= CHV_BUFRIGHTENA2_FORCE
;
3119 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW1_CH1
, val
);
3122 /* program clock channel usage */
3123 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW8(ch
));
3124 val
|= CHV_PCS_USEDCLKCHANNEL_OVRRIDE
;
3126 val
&= ~CHV_PCS_USEDCLKCHANNEL
;
3128 val
|= CHV_PCS_USEDCLKCHANNEL
;
3129 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW8(ch
), val
);
3131 if (intel_crtc
->config
->lane_count
> 2) {
3132 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW8(ch
));
3133 val
|= CHV_PCS_USEDCLKCHANNEL_OVRRIDE
;
3135 val
&= ~CHV_PCS_USEDCLKCHANNEL
;
3137 val
|= CHV_PCS_USEDCLKCHANNEL
;
3138 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW8(ch
), val
);
3142 * This a a bit weird since generally CL
3143 * matches the pipe, but here we need to
3144 * pick the CL based on the port.
3146 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_CMN_DW19(ch
));
3148 val
&= ~CHV_CMN_USEDCLKCHANNEL
;
3150 val
|= CHV_CMN_USEDCLKCHANNEL
;
3151 vlv_dpio_write(dev_priv
, pipe
, CHV_CMN_DW19(ch
), val
);
3153 mutex_unlock(&dev_priv
->sb_lock
);
3156 static void chv_dp_post_pll_disable(struct intel_encoder
*encoder
)
3158 struct drm_i915_private
*dev_priv
= to_i915(encoder
->base
.dev
);
3159 enum pipe pipe
= to_intel_crtc(encoder
->base
.crtc
)->pipe
;
3162 mutex_lock(&dev_priv
->sb_lock
);
3164 /* disable left/right clock distribution */
3165 if (pipe
!= PIPE_B
) {
3166 val
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW5_CH0
);
3167 val
&= ~(CHV_BUFLEFTENA1_MASK
| CHV_BUFRIGHTENA1_MASK
);
3168 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW5_CH0
, val
);
3170 val
= vlv_dpio_read(dev_priv
, pipe
, _CHV_CMN_DW1_CH1
);
3171 val
&= ~(CHV_BUFLEFTENA2_MASK
| CHV_BUFRIGHTENA2_MASK
);
3172 vlv_dpio_write(dev_priv
, pipe
, _CHV_CMN_DW1_CH1
, val
);
3175 mutex_unlock(&dev_priv
->sb_lock
);
3178 * Leave the power down bit cleared for at least one
3179 * lane so that chv_powergate_phy_ch() will power
3180 * on something when the channel is otherwise unused.
3181 * When the port is off and the override is removed
3182 * the lanes power down anyway, so otherwise it doesn't
3183 * really matter what the state of power down bits is
3186 chv_phy_powergate_lanes(encoder
, false, 0x0);
3190 * Native read with retry for link status and receiver capability reads for
3191 * cases where the sink may still be asleep.
3193 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3194 * supposed to retry 3 times per the spec.
3197 intel_dp_dpcd_read_wake(struct drm_dp_aux
*aux
, unsigned int offset
,
3198 void *buffer
, size_t size
)
3204 * Sometime we just get the same incorrect byte repeated
3205 * over the entire buffer. Doing just one throw away read
3206 * initially seems to "solve" it.
3208 drm_dp_dpcd_read(aux
, DP_DPCD_REV
, buffer
, 1);
3210 for (i
= 0; i
< 3; i
++) {
3211 ret
= drm_dp_dpcd_read(aux
, offset
, buffer
, size
);
3221 * Fetch AUX CH registers 0x202 - 0x207 which contain
3222 * link status information
3225 intel_dp_get_link_status(struct intel_dp
*intel_dp
, uint8_t link_status
[DP_LINK_STATUS_SIZE
])
3227 return intel_dp_dpcd_read_wake(&intel_dp
->aux
,
3230 DP_LINK_STATUS_SIZE
) == DP_LINK_STATUS_SIZE
;
3233 /* These are source-specific values. */
3235 intel_dp_voltage_max(struct intel_dp
*intel_dp
)
3237 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
3238 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3239 enum port port
= dp_to_dig_port(intel_dp
)->port
;
3241 if (IS_BROXTON(dev
))
3242 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3243 else if (INTEL_INFO(dev
)->gen
>= 9) {
3244 if (dev_priv
->edp_low_vswing
&& port
== PORT_A
)
3245 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3246 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
3247 } else if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
))
3248 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3249 else if (IS_GEN7(dev
) && port
== PORT_A
)
3250 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
3251 else if (HAS_PCH_CPT(dev
) && port
!= PORT_A
)
3252 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3254 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2
;
3258 intel_dp_pre_emphasis_max(struct intel_dp
*intel_dp
, uint8_t voltage_swing
)
3260 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
3261 enum port port
= dp_to_dig_port(intel_dp
)->port
;
3263 if (INTEL_INFO(dev
)->gen
>= 9) {
3264 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3265 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3266 return DP_TRAIN_PRE_EMPH_LEVEL_3
;
3267 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3268 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3269 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3270 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3271 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3272 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3274 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3276 } else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
3277 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3278 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3279 return DP_TRAIN_PRE_EMPH_LEVEL_3
;
3280 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3281 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3282 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3283 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3284 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3286 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3288 } else if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
)) {
3289 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3290 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3291 return DP_TRAIN_PRE_EMPH_LEVEL_3
;
3292 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3293 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3294 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3295 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3296 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3298 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3300 } else if (IS_GEN7(dev
) && port
== PORT_A
) {
3301 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3303 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3304 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3305 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3306 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3308 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3311 switch (voltage_swing
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3312 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3313 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3314 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3315 return DP_TRAIN_PRE_EMPH_LEVEL_2
;
3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3317 return DP_TRAIN_PRE_EMPH_LEVEL_1
;
3318 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3320 return DP_TRAIN_PRE_EMPH_LEVEL_0
;
3325 static uint32_t vlv_signal_levels(struct intel_dp
*intel_dp
)
3327 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
3328 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3329 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
3330 struct intel_crtc
*intel_crtc
=
3331 to_intel_crtc(dport
->base
.base
.crtc
);
3332 unsigned long demph_reg_value
, preemph_reg_value
,
3333 uniqtranscale_reg_value
;
3334 uint8_t train_set
= intel_dp
->train_set
[0];
3335 enum dpio_channel port
= vlv_dport_to_channel(dport
);
3336 int pipe
= intel_crtc
->pipe
;
3338 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
3339 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
3340 preemph_reg_value
= 0x0004000;
3341 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3342 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3343 demph_reg_value
= 0x2B405555;
3344 uniqtranscale_reg_value
= 0x552AB83A;
3346 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3347 demph_reg_value
= 0x2B404040;
3348 uniqtranscale_reg_value
= 0x5548B83A;
3350 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3351 demph_reg_value
= 0x2B245555;
3352 uniqtranscale_reg_value
= 0x5560B83A;
3354 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3355 demph_reg_value
= 0x2B405555;
3356 uniqtranscale_reg_value
= 0x5598DA3A;
3362 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3363 preemph_reg_value
= 0x0002000;
3364 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3365 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3366 demph_reg_value
= 0x2B404040;
3367 uniqtranscale_reg_value
= 0x5552B83A;
3369 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3370 demph_reg_value
= 0x2B404848;
3371 uniqtranscale_reg_value
= 0x5580B83A;
3373 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3374 demph_reg_value
= 0x2B404040;
3375 uniqtranscale_reg_value
= 0x55ADDA3A;
3381 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3382 preemph_reg_value
= 0x0000000;
3383 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3384 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3385 demph_reg_value
= 0x2B305555;
3386 uniqtranscale_reg_value
= 0x5570B83A;
3388 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3389 demph_reg_value
= 0x2B2B4040;
3390 uniqtranscale_reg_value
= 0x55ADDA3A;
3396 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3397 preemph_reg_value
= 0x0006000;
3398 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3399 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3400 demph_reg_value
= 0x1B405555;
3401 uniqtranscale_reg_value
= 0x55ADDA3A;
3411 mutex_lock(&dev_priv
->sb_lock
);
3412 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW5(port
), 0x00000000);
3413 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW4(port
), demph_reg_value
);
3414 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW2(port
),
3415 uniqtranscale_reg_value
);
3416 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW3(port
), 0x0C782040);
3417 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW11(port
), 0x00030000);
3418 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS_DW9(port
), preemph_reg_value
);
3419 vlv_dpio_write(dev_priv
, pipe
, VLV_TX_DW5(port
), 0x80000000);
3420 mutex_unlock(&dev_priv
->sb_lock
);
3425 static bool chv_need_uniq_trans_scale(uint8_t train_set
)
3427 return (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) == DP_TRAIN_PRE_EMPH_LEVEL_0
&&
3428 (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3
;
3431 static uint32_t chv_signal_levels(struct intel_dp
*intel_dp
)
3433 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
3434 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3435 struct intel_digital_port
*dport
= dp_to_dig_port(intel_dp
);
3436 struct intel_crtc
*intel_crtc
= to_intel_crtc(dport
->base
.base
.crtc
);
3437 u32 deemph_reg_value
, margin_reg_value
, val
;
3438 uint8_t train_set
= intel_dp
->train_set
[0];
3439 enum dpio_channel ch
= vlv_dport_to_channel(dport
);
3440 enum pipe pipe
= intel_crtc
->pipe
;
3443 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
3444 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
3445 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3446 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3447 deemph_reg_value
= 128;
3448 margin_reg_value
= 52;
3450 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3451 deemph_reg_value
= 128;
3452 margin_reg_value
= 77;
3454 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3455 deemph_reg_value
= 128;
3456 margin_reg_value
= 102;
3458 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3459 deemph_reg_value
= 128;
3460 margin_reg_value
= 154;
3461 /* FIXME extra to set for 1200 */
3467 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3468 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3469 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3470 deemph_reg_value
= 85;
3471 margin_reg_value
= 78;
3473 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3474 deemph_reg_value
= 85;
3475 margin_reg_value
= 116;
3477 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3478 deemph_reg_value
= 85;
3479 margin_reg_value
= 154;
3485 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3486 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3487 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3488 deemph_reg_value
= 64;
3489 margin_reg_value
= 104;
3491 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3492 deemph_reg_value
= 64;
3493 margin_reg_value
= 154;
3499 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3500 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3501 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3502 deemph_reg_value
= 43;
3503 margin_reg_value
= 154;
3513 mutex_lock(&dev_priv
->sb_lock
);
3515 /* Clear calc init */
3516 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW10(ch
));
3517 val
&= ~(DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
);
3518 val
&= ~(DPIO_PCS_TX1DEEMP_MASK
| DPIO_PCS_TX2DEEMP_MASK
);
3519 val
|= DPIO_PCS_TX1DEEMP_9P5
| DPIO_PCS_TX2DEEMP_9P5
;
3520 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW10(ch
), val
);
3522 if (intel_crtc
->config
->lane_count
> 2) {
3523 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW10(ch
));
3524 val
&= ~(DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
);
3525 val
&= ~(DPIO_PCS_TX1DEEMP_MASK
| DPIO_PCS_TX2DEEMP_MASK
);
3526 val
|= DPIO_PCS_TX1DEEMP_9P5
| DPIO_PCS_TX2DEEMP_9P5
;
3527 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW10(ch
), val
);
3530 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW9(ch
));
3531 val
&= ~(DPIO_PCS_TX1MARGIN_MASK
| DPIO_PCS_TX2MARGIN_MASK
);
3532 val
|= DPIO_PCS_TX1MARGIN_000
| DPIO_PCS_TX2MARGIN_000
;
3533 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW9(ch
), val
);
3535 if (intel_crtc
->config
->lane_count
> 2) {
3536 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW9(ch
));
3537 val
&= ~(DPIO_PCS_TX1MARGIN_MASK
| DPIO_PCS_TX2MARGIN_MASK
);
3538 val
|= DPIO_PCS_TX1MARGIN_000
| DPIO_PCS_TX2MARGIN_000
;
3539 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW9(ch
), val
);
3542 /* Program swing deemph */
3543 for (i
= 0; i
< intel_crtc
->config
->lane_count
; i
++) {
3544 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW4(ch
, i
));
3545 val
&= ~DPIO_SWING_DEEMPH9P5_MASK
;
3546 val
|= deemph_reg_value
<< DPIO_SWING_DEEMPH9P5_SHIFT
;
3547 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW4(ch
, i
), val
);
3550 /* Program swing margin */
3551 for (i
= 0; i
< intel_crtc
->config
->lane_count
; i
++) {
3552 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW2(ch
, i
));
3554 val
&= ~DPIO_SWING_MARGIN000_MASK
;
3555 val
|= margin_reg_value
<< DPIO_SWING_MARGIN000_SHIFT
;
3558 * Supposedly this value shouldn't matter when unique transition
3559 * scale is disabled, but in fact it does matter. Let's just
3560 * always program the same value and hope it's OK.
3562 val
&= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT
);
3563 val
|= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT
;
3565 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW2(ch
, i
), val
);
3569 * The document said it needs to set bit 27 for ch0 and bit 26
3570 * for ch1. Might be a typo in the doc.
3571 * For now, for this unique transition scale selection, set bit
3572 * 27 for ch0 and ch1.
3574 for (i
= 0; i
< intel_crtc
->config
->lane_count
; i
++) {
3575 val
= vlv_dpio_read(dev_priv
, pipe
, CHV_TX_DW3(ch
, i
));
3576 if (chv_need_uniq_trans_scale(train_set
))
3577 val
|= DPIO_TX_UNIQ_TRANS_SCALE_EN
;
3579 val
&= ~DPIO_TX_UNIQ_TRANS_SCALE_EN
;
3580 vlv_dpio_write(dev_priv
, pipe
, CHV_TX_DW3(ch
, i
), val
);
3583 /* Start swing calculation */
3584 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS01_DW10(ch
));
3585 val
|= DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
;
3586 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS01_DW10(ch
), val
);
3588 if (intel_crtc
->config
->lane_count
> 2) {
3589 val
= vlv_dpio_read(dev_priv
, pipe
, VLV_PCS23_DW10(ch
));
3590 val
|= DPIO_PCS_SWING_CALC_TX0_TX2
| DPIO_PCS_SWING_CALC_TX1_TX3
;
3591 vlv_dpio_write(dev_priv
, pipe
, VLV_PCS23_DW10(ch
), val
);
3594 mutex_unlock(&dev_priv
->sb_lock
);
3600 gen4_signal_levels(uint8_t train_set
)
3602 uint32_t signal_levels
= 0;
3604 switch (train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
) {
3605 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
:
3607 signal_levels
|= DP_VOLTAGE_0_4
;
3609 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
:
3610 signal_levels
|= DP_VOLTAGE_0_6
;
3612 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
:
3613 signal_levels
|= DP_VOLTAGE_0_8
;
3615 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
:
3616 signal_levels
|= DP_VOLTAGE_1_2
;
3619 switch (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) {
3620 case DP_TRAIN_PRE_EMPH_LEVEL_0
:
3622 signal_levels
|= DP_PRE_EMPHASIS_0
;
3624 case DP_TRAIN_PRE_EMPH_LEVEL_1
:
3625 signal_levels
|= DP_PRE_EMPHASIS_3_5
;
3627 case DP_TRAIN_PRE_EMPH_LEVEL_2
:
3628 signal_levels
|= DP_PRE_EMPHASIS_6
;
3630 case DP_TRAIN_PRE_EMPH_LEVEL_3
:
3631 signal_levels
|= DP_PRE_EMPHASIS_9_5
;
3634 return signal_levels
;
3637 /* Gen6's DP voltage swing and pre-emphasis control */
3639 gen6_edp_signal_levels(uint8_t train_set
)
3641 int signal_levels
= train_set
& (DP_TRAIN_VOLTAGE_SWING_MASK
|
3642 DP_TRAIN_PRE_EMPHASIS_MASK
);
3643 switch (signal_levels
) {
3644 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3645 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3646 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B
;
3647 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3648 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B
;
3649 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3650 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3651 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B
;
3652 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3653 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3654 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B
;
3655 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3656 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3657 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B
;
3659 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3660 "0x%x\n", signal_levels
);
3661 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B
;
3665 /* Gen7's DP voltage swing and pre-emphasis control */
3667 gen7_edp_signal_levels(uint8_t train_set
)
3669 int signal_levels
= train_set
& (DP_TRAIN_VOLTAGE_SWING_MASK
|
3670 DP_TRAIN_PRE_EMPHASIS_MASK
);
3671 switch (signal_levels
) {
3672 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3673 return EDP_LINK_TRAIN_400MV_0DB_IVB
;
3674 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3675 return EDP_LINK_TRAIN_400MV_3_5DB_IVB
;
3676 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0
| DP_TRAIN_PRE_EMPH_LEVEL_2
:
3677 return EDP_LINK_TRAIN_400MV_6DB_IVB
;
3679 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3680 return EDP_LINK_TRAIN_600MV_0DB_IVB
;
3681 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3682 return EDP_LINK_TRAIN_600MV_3_5DB_IVB
;
3684 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_0
:
3685 return EDP_LINK_TRAIN_800MV_0DB_IVB
;
3686 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2
| DP_TRAIN_PRE_EMPH_LEVEL_1
:
3687 return EDP_LINK_TRAIN_800MV_3_5DB_IVB
;
3690 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3691 "0x%x\n", signal_levels
);
3692 return EDP_LINK_TRAIN_500MV_0DB_IVB
;
3697 intel_dp_set_signal_levels(struct intel_dp
*intel_dp
)
3699 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3700 enum port port
= intel_dig_port
->port
;
3701 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3702 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3703 uint32_t signal_levels
, mask
= 0;
3704 uint8_t train_set
= intel_dp
->train_set
[0];
3707 signal_levels
= ddi_signal_levels(intel_dp
);
3709 if (IS_BROXTON(dev
))
3712 mask
= DDI_BUF_EMP_MASK
;
3713 } else if (IS_CHERRYVIEW(dev
)) {
3714 signal_levels
= chv_signal_levels(intel_dp
);
3715 } else if (IS_VALLEYVIEW(dev
)) {
3716 signal_levels
= vlv_signal_levels(intel_dp
);
3717 } else if (IS_GEN7(dev
) && port
== PORT_A
) {
3718 signal_levels
= gen7_edp_signal_levels(train_set
);
3719 mask
= EDP_LINK_TRAIN_VOL_EMP_MASK_IVB
;
3720 } else if (IS_GEN6(dev
) && port
== PORT_A
) {
3721 signal_levels
= gen6_edp_signal_levels(train_set
);
3722 mask
= EDP_LINK_TRAIN_VOL_EMP_MASK_SNB
;
3724 signal_levels
= gen4_signal_levels(train_set
);
3725 mask
= DP_VOLTAGE_MASK
| DP_PRE_EMPHASIS_MASK
;
3729 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels
);
3731 DRM_DEBUG_KMS("Using vswing level %d\n",
3732 train_set
& DP_TRAIN_VOLTAGE_SWING_MASK
);
3733 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3734 (train_set
& DP_TRAIN_PRE_EMPHASIS_MASK
) >>
3735 DP_TRAIN_PRE_EMPHASIS_SHIFT
);
3737 intel_dp
->DP
= (intel_dp
->DP
& ~mask
) | signal_levels
;
3739 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
3740 POSTING_READ(intel_dp
->output_reg
);
3744 intel_dp_program_link_training_pattern(struct intel_dp
*intel_dp
,
3745 uint8_t dp_train_pat
)
3747 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3748 struct drm_i915_private
*dev_priv
=
3749 to_i915(intel_dig_port
->base
.base
.dev
);
3751 _intel_dp_set_link_train(intel_dp
, &intel_dp
->DP
, dp_train_pat
);
3753 I915_WRITE(intel_dp
->output_reg
, intel_dp
->DP
);
3754 POSTING_READ(intel_dp
->output_reg
);
3757 void intel_dp_set_idle_link_train(struct intel_dp
*intel_dp
)
3759 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3760 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3761 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3762 enum port port
= intel_dig_port
->port
;
3768 val
= I915_READ(DP_TP_CTL(port
));
3769 val
&= ~DP_TP_CTL_LINK_TRAIN_MASK
;
3770 val
|= DP_TP_CTL_LINK_TRAIN_IDLE
;
3771 I915_WRITE(DP_TP_CTL(port
), val
);
3774 * On PORT_A we can have only eDP in SST mode. There the only reason
3775 * we need to set idle transmission mode is to work around a HW issue
3776 * where we enable the pipe while not in idle link-training mode.
3777 * In this case there is requirement to wait for a minimum number of
3778 * idle patterns to be sent.
3783 if (wait_for((I915_READ(DP_TP_STATUS(port
)) & DP_TP_STATUS_IDLE_DONE
),
3785 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3789 intel_dp_link_down(struct intel_dp
*intel_dp
)
3791 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
3792 struct intel_crtc
*crtc
= to_intel_crtc(intel_dig_port
->base
.base
.crtc
);
3793 enum port port
= intel_dig_port
->port
;
3794 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
3795 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3796 uint32_t DP
= intel_dp
->DP
;
3798 if (WARN_ON(HAS_DDI(dev
)))
3801 if (WARN_ON((I915_READ(intel_dp
->output_reg
) & DP_PORT_EN
) == 0))
3804 DRM_DEBUG_KMS("\n");
3806 if ((IS_GEN7(dev
) && port
== PORT_A
) ||
3807 (HAS_PCH_CPT(dev
) && port
!= PORT_A
)) {
3808 DP
&= ~DP_LINK_TRAIN_MASK_CPT
;
3809 DP
|= DP_LINK_TRAIN_PAT_IDLE_CPT
;
3811 if (IS_CHERRYVIEW(dev
))
3812 DP
&= ~DP_LINK_TRAIN_MASK_CHV
;
3814 DP
&= ~DP_LINK_TRAIN_MASK
;
3815 DP
|= DP_LINK_TRAIN_PAT_IDLE
;
3817 I915_WRITE(intel_dp
->output_reg
, DP
);
3818 POSTING_READ(intel_dp
->output_reg
);
3820 DP
&= ~(DP_PORT_EN
| DP_AUDIO_OUTPUT_ENABLE
);
3821 I915_WRITE(intel_dp
->output_reg
, DP
);
3822 POSTING_READ(intel_dp
->output_reg
);
3825 * HW workaround for IBX, we need to move the port
3826 * to transcoder A after disabling it to allow the
3827 * matching HDMI port to be enabled on transcoder A.
3829 if (HAS_PCH_IBX(dev
) && crtc
->pipe
== PIPE_B
&& port
!= PORT_A
) {
3831 * We get CPU/PCH FIFO underruns on the other pipe when
3832 * doing the workaround. Sweep them under the rug.
3834 intel_set_cpu_fifo_underrun_reporting(dev_priv
, PIPE_A
, false);
3835 intel_set_pch_fifo_underrun_reporting(dev_priv
, PIPE_A
, false);
3837 /* always enable with pattern 1 (as per spec) */
3838 DP
&= ~(DP_PIPEB_SELECT
| DP_LINK_TRAIN_MASK
);
3839 DP
|= DP_PORT_EN
| DP_LINK_TRAIN_PAT_1
;
3840 I915_WRITE(intel_dp
->output_reg
, DP
);
3841 POSTING_READ(intel_dp
->output_reg
);
3844 I915_WRITE(intel_dp
->output_reg
, DP
);
3845 POSTING_READ(intel_dp
->output_reg
);
3847 intel_wait_for_vblank_if_active(dev_priv
->dev
, PIPE_A
);
3848 intel_set_cpu_fifo_underrun_reporting(dev_priv
, PIPE_A
, true);
3849 intel_set_pch_fifo_underrun_reporting(dev_priv
, PIPE_A
, true);
3852 msleep(intel_dp
->panel_power_down_delay
);
3858 intel_dp_get_dpcd(struct intel_dp
*intel_dp
)
3860 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
3861 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
3862 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3865 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, 0x000, intel_dp
->dpcd
,
3866 sizeof(intel_dp
->dpcd
)) < 0)
3867 return false; /* aux transfer failed */
3869 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp
->dpcd
), intel_dp
->dpcd
);
3871 if (intel_dp
->dpcd
[DP_DPCD_REV
] == 0)
3872 return false; /* DPCD not present */
3874 /* Check if the panel supports PSR */
3875 memset(intel_dp
->psr_dpcd
, 0, sizeof(intel_dp
->psr_dpcd
));
3876 if (is_edp(intel_dp
)) {
3877 intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_PSR_SUPPORT
,
3879 sizeof(intel_dp
->psr_dpcd
));
3880 if (intel_dp
->psr_dpcd
[0] & DP_PSR_IS_SUPPORTED
) {
3881 dev_priv
->psr
.sink_support
= true;
3882 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3885 if (INTEL_INFO(dev
)->gen
>= 9 &&
3886 (intel_dp
->psr_dpcd
[0] & DP_PSR2_IS_SUPPORTED
)) {
3887 uint8_t frame_sync_cap
;
3889 dev_priv
->psr
.sink_support
= true;
3890 intel_dp_dpcd_read_wake(&intel_dp
->aux
,
3891 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP
,
3892 &frame_sync_cap
, 1);
3893 dev_priv
->psr
.aux_frame_sync
= frame_sync_cap
? true : false;
3894 /* PSR2 needs frame sync as well */
3895 dev_priv
->psr
.psr2_support
= dev_priv
->psr
.aux_frame_sync
;
3896 DRM_DEBUG_KMS("PSR2 %s on sink",
3897 dev_priv
->psr
.psr2_support
? "supported" : "not supported");
3901 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
3902 yesno(intel_dp_source_supports_hbr2(intel_dp
)),
3903 yesno(drm_dp_tps3_supported(intel_dp
->dpcd
)));
3905 /* Intermediate frequency support */
3906 if (is_edp(intel_dp
) &&
3907 (intel_dp
->dpcd
[DP_EDP_CONFIGURATION_CAP
] & DP_DPCD_DISPLAY_CONTROL_CAPABLE
) &&
3908 (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_EDP_DPCD_REV
, &rev
, 1) == 1) &&
3909 (rev
>= 0x03)) { /* eDp v1.4 or higher */
3910 __le16 sink_rates
[DP_MAX_SUPPORTED_RATES
];
3913 intel_dp_dpcd_read_wake(&intel_dp
->aux
,
3914 DP_SUPPORTED_LINK_RATES
,
3916 sizeof(sink_rates
));
3918 for (i
= 0; i
< ARRAY_SIZE(sink_rates
); i
++) {
3919 int val
= le16_to_cpu(sink_rates
[i
]);
3924 /* Value read is in kHz while drm clock is saved in deca-kHz */
3925 intel_dp
->sink_rates
[i
] = (val
* 200) / 10;
3927 intel_dp
->num_sink_rates
= i
;
3930 intel_dp_print_rates(intel_dp
);
3932 if (!(intel_dp
->dpcd
[DP_DOWNSTREAMPORT_PRESENT
] &
3933 DP_DWN_STRM_PORT_PRESENT
))
3934 return true; /* native DP sink */
3936 if (intel_dp
->dpcd
[DP_DPCD_REV
] == 0x10)
3937 return true; /* no per-port downstream info */
3939 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_DOWNSTREAM_PORT_0
,
3940 intel_dp
->downstream_ports
,
3941 DP_MAX_DOWNSTREAM_PORTS
) < 0)
3942 return false; /* downstream port status fetch failed */
3948 intel_dp_probe_oui(struct intel_dp
*intel_dp
)
3952 if (!(intel_dp
->dpcd
[DP_DOWN_STREAM_PORT_COUNT
] & DP_OUI_SUPPORT
))
3955 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_SINK_OUI
, buf
, 3) == 3)
3956 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3957 buf
[0], buf
[1], buf
[2]);
3959 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_BRANCH_OUI
, buf
, 3) == 3)
3960 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3961 buf
[0], buf
[1], buf
[2]);
3965 intel_dp_probe_mst(struct intel_dp
*intel_dp
)
3969 if (!intel_dp
->can_mst
)
3972 if (intel_dp
->dpcd
[DP_DPCD_REV
] < 0x12)
3975 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_MSTM_CAP
, buf
, 1)) {
3976 if (buf
[0] & DP_MST_CAP
) {
3977 DRM_DEBUG_KMS("Sink is MST capable\n");
3978 intel_dp
->is_mst
= true;
3980 DRM_DEBUG_KMS("Sink is not MST capable\n");
3981 intel_dp
->is_mst
= false;
3985 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
, intel_dp
->is_mst
);
3986 return intel_dp
->is_mst
;
3989 static int intel_dp_sink_crc_stop(struct intel_dp
*intel_dp
)
3991 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
3992 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
3993 struct intel_crtc
*intel_crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
3999 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_SINK
, &buf
) < 0) {
4000 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4005 if (drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_TEST_SINK
,
4006 buf
& ~DP_TEST_SINK_START
) < 0) {
4007 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
4013 intel_wait_for_vblank(dev
, intel_crtc
->pipe
);
4015 if (drm_dp_dpcd_readb(&intel_dp
->aux
,
4016 DP_TEST_SINK_MISC
, &buf
) < 0) {
4020 count
= buf
& DP_TEST_COUNT_MASK
;
4021 } while (--attempts
&& count
);
4023 if (attempts
== 0) {
4024 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
4029 hsw_enable_ips(intel_crtc
);
4033 static int intel_dp_sink_crc_start(struct intel_dp
*intel_dp
)
4035 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
4036 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
4037 struct intel_crtc
*intel_crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
4041 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_SINK_MISC
, &buf
) < 0)
4044 if (!(buf
& DP_TEST_CRC_SUPPORTED
))
4047 if (drm_dp_dpcd_readb(&intel_dp
->aux
, DP_TEST_SINK
, &buf
) < 0)
4050 if (buf
& DP_TEST_SINK_START
) {
4051 ret
= intel_dp_sink_crc_stop(intel_dp
);
4056 hsw_disable_ips(intel_crtc
);
4058 if (drm_dp_dpcd_writeb(&intel_dp
->aux
, DP_TEST_SINK
,
4059 buf
| DP_TEST_SINK_START
) < 0) {
4060 hsw_enable_ips(intel_crtc
);
4064 intel_wait_for_vblank(dev
, intel_crtc
->pipe
);
4068 int intel_dp_sink_crc(struct intel_dp
*intel_dp
, u8
*crc
)
4070 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
4071 struct drm_device
*dev
= dig_port
->base
.base
.dev
;
4072 struct intel_crtc
*intel_crtc
= to_intel_crtc(dig_port
->base
.base
.crtc
);
4077 ret
= intel_dp_sink_crc_start(intel_dp
);
4082 intel_wait_for_vblank(dev
, intel_crtc
->pipe
);
4084 if (drm_dp_dpcd_readb(&intel_dp
->aux
,
4085 DP_TEST_SINK_MISC
, &buf
) < 0) {
4089 count
= buf
& DP_TEST_COUNT_MASK
;
4091 } while (--attempts
&& count
== 0);
4093 if (attempts
== 0) {
4094 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4099 if (drm_dp_dpcd_read(&intel_dp
->aux
, DP_TEST_CRC_R_CR
, crc
, 6) < 0) {
4105 intel_dp_sink_crc_stop(intel_dp
);
4110 intel_dp_get_sink_irq(struct intel_dp
*intel_dp
, u8
*sink_irq_vector
)
4112 return intel_dp_dpcd_read_wake(&intel_dp
->aux
,
4113 DP_DEVICE_SERVICE_IRQ_VECTOR
,
4114 sink_irq_vector
, 1) == 1;
4118 intel_dp_get_sink_irq_esi(struct intel_dp
*intel_dp
, u8
*sink_irq_vector
)
4122 ret
= intel_dp_dpcd_read_wake(&intel_dp
->aux
,
4124 sink_irq_vector
, 14);
4131 static uint8_t intel_dp_autotest_link_training(struct intel_dp
*intel_dp
)
4133 uint8_t test_result
= DP_TEST_ACK
;
4137 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp
*intel_dp
)
4139 uint8_t test_result
= DP_TEST_NAK
;
4143 static uint8_t intel_dp_autotest_edid(struct intel_dp
*intel_dp
)
4145 uint8_t test_result
= DP_TEST_NAK
;
4146 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4147 struct drm_connector
*connector
= &intel_connector
->base
;
4149 if (intel_connector
->detect_edid
== NULL
||
4150 connector
->edid_corrupt
||
4151 intel_dp
->aux
.i2c_defer_count
> 6) {
4152 /* Check EDID read for NACKs, DEFERs and corruption
4153 * (DP CTS 1.2 Core r1.1)
4154 * 4.2.2.4 : Failed EDID read, I2C_NAK
4155 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4156 * 4.2.2.6 : EDID corruption detected
4157 * Use failsafe mode for all cases
4159 if (intel_dp
->aux
.i2c_nack_count
> 0 ||
4160 intel_dp
->aux
.i2c_defer_count
> 0)
4161 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4162 intel_dp
->aux
.i2c_nack_count
,
4163 intel_dp
->aux
.i2c_defer_count
);
4164 intel_dp
->compliance_test_data
= INTEL_DP_RESOLUTION_FAILSAFE
;
4166 struct edid
*block
= intel_connector
->detect_edid
;
4168 /* We have to write the checksum
4169 * of the last block read
4171 block
+= intel_connector
->detect_edid
->extensions
;
4173 if (!drm_dp_dpcd_write(&intel_dp
->aux
,
4174 DP_TEST_EDID_CHECKSUM
,
4177 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4179 test_result
= DP_TEST_ACK
| DP_TEST_EDID_CHECKSUM_WRITE
;
4180 intel_dp
->compliance_test_data
= INTEL_DP_RESOLUTION_STANDARD
;
4183 /* Set test active flag here so userspace doesn't interrupt things */
4184 intel_dp
->compliance_test_active
= 1;
4189 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp
*intel_dp
)
4191 uint8_t test_result
= DP_TEST_NAK
;
4195 static void intel_dp_handle_test_request(struct intel_dp
*intel_dp
)
4197 uint8_t response
= DP_TEST_NAK
;
4201 status
= drm_dp_dpcd_read(&intel_dp
->aux
, DP_TEST_REQUEST
, &rxdata
, 1);
4203 DRM_DEBUG_KMS("Could not read test request from sink\n");
4208 case DP_TEST_LINK_TRAINING
:
4209 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4210 intel_dp
->compliance_test_type
= DP_TEST_LINK_TRAINING
;
4211 response
= intel_dp_autotest_link_training(intel_dp
);
4213 case DP_TEST_LINK_VIDEO_PATTERN
:
4214 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4215 intel_dp
->compliance_test_type
= DP_TEST_LINK_VIDEO_PATTERN
;
4216 response
= intel_dp_autotest_video_pattern(intel_dp
);
4218 case DP_TEST_LINK_EDID_READ
:
4219 DRM_DEBUG_KMS("EDID test requested\n");
4220 intel_dp
->compliance_test_type
= DP_TEST_LINK_EDID_READ
;
4221 response
= intel_dp_autotest_edid(intel_dp
);
4223 case DP_TEST_LINK_PHY_TEST_PATTERN
:
4224 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4225 intel_dp
->compliance_test_type
= DP_TEST_LINK_PHY_TEST_PATTERN
;
4226 response
= intel_dp_autotest_phy_pattern(intel_dp
);
4229 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata
);
4234 status
= drm_dp_dpcd_write(&intel_dp
->aux
,
4238 DRM_DEBUG_KMS("Could not write test response to sink\n");
4242 intel_dp_check_mst_status(struct intel_dp
*intel_dp
)
4246 if (intel_dp
->is_mst
) {
4251 bret
= intel_dp_get_sink_irq_esi(intel_dp
, esi
);
4255 /* check link status - esi[10] = 0x200c */
4256 if (intel_dp
->active_mst_links
&&
4257 !drm_dp_channel_eq_ok(&esi
[10], intel_dp
->lane_count
)) {
4258 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4259 intel_dp_start_link_train(intel_dp
);
4260 intel_dp_stop_link_train(intel_dp
);
4263 DRM_DEBUG_KMS("got esi %3ph\n", esi
);
4264 ret
= drm_dp_mst_hpd_irq(&intel_dp
->mst_mgr
, esi
, &handled
);
4267 for (retry
= 0; retry
< 3; retry
++) {
4269 wret
= drm_dp_dpcd_write(&intel_dp
->aux
,
4270 DP_SINK_COUNT_ESI
+1,
4277 bret
= intel_dp_get_sink_irq_esi(intel_dp
, esi
);
4279 DRM_DEBUG_KMS("got esi2 %3ph\n", esi
);
4287 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4288 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4289 intel_dp
->is_mst
= false;
4290 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
, intel_dp
->is_mst
);
4291 /* send a hotplug event */
4292 drm_kms_helper_hotplug_event(intel_dig_port
->base
.base
.dev
);
4299 * According to DP spec
4302 * 2. Configure link according to Receiver Capabilities
4303 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4304 * 4. Check link status on receipt of hot-plug interrupt
4307 intel_dp_check_link_status(struct intel_dp
*intel_dp
)
4309 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
4310 struct intel_encoder
*intel_encoder
= &dp_to_dig_port(intel_dp
)->base
;
4312 u8 link_status
[DP_LINK_STATUS_SIZE
];
4314 WARN_ON(!drm_modeset_is_locked(&dev
->mode_config
.connection_mutex
));
4317 * Clearing compliance test variables to allow capturing
4318 * of values for next automated test request.
4320 intel_dp
->compliance_test_active
= 0;
4321 intel_dp
->compliance_test_type
= 0;
4322 intel_dp
->compliance_test_data
= 0;
4324 if (!intel_encoder
->base
.crtc
)
4327 if (!to_intel_crtc(intel_encoder
->base
.crtc
)->active
)
4330 /* Try to read receiver status if the link appears to be up */
4331 if (!intel_dp_get_link_status(intel_dp
, link_status
)) {
4335 /* Now read the DPCD to see if it's actually running */
4336 if (!intel_dp_get_dpcd(intel_dp
)) {
4340 /* Try to read the source of the interrupt */
4341 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11 &&
4342 intel_dp_get_sink_irq(intel_dp
, &sink_irq_vector
)) {
4343 /* Clear interrupt source */
4344 drm_dp_dpcd_writeb(&intel_dp
->aux
,
4345 DP_DEVICE_SERVICE_IRQ_VECTOR
,
4348 if (sink_irq_vector
& DP_AUTOMATED_TEST_REQUEST
)
4349 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
4350 if (sink_irq_vector
& (DP_CP_IRQ
| DP_SINK_SPECIFIC_IRQ
))
4351 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4354 /* if link training is requested we should perform it always */
4355 if ((intel_dp
->compliance_test_type
== DP_TEST_LINK_TRAINING
) ||
4356 (!drm_dp_channel_eq_ok(link_status
, intel_dp
->lane_count
))) {
4357 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4358 intel_encoder
->base
.name
);
4359 intel_dp_start_link_train(intel_dp
);
4360 intel_dp_stop_link_train(intel_dp
);
4364 /* XXX this is probably wrong for multiple downstream ports */
4365 static enum drm_connector_status
4366 intel_dp_detect_dpcd(struct intel_dp
*intel_dp
)
4368 uint8_t *dpcd
= intel_dp
->dpcd
;
4371 if (!intel_dp_get_dpcd(intel_dp
))
4372 return connector_status_disconnected
;
4374 /* if there's no downstream port, we're done */
4375 if (!(dpcd
[DP_DOWNSTREAMPORT_PRESENT
] & DP_DWN_STRM_PORT_PRESENT
))
4376 return connector_status_connected
;
4378 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4379 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11 &&
4380 intel_dp
->downstream_ports
[0] & DP_DS_PORT_HPD
) {
4383 if (intel_dp_dpcd_read_wake(&intel_dp
->aux
, DP_SINK_COUNT
,
4385 return connector_status_unknown
;
4387 return DP_GET_SINK_COUNT(reg
) ? connector_status_connected
4388 : connector_status_disconnected
;
4391 /* If no HPD, poke DDC gently */
4392 if (drm_probe_ddc(&intel_dp
->aux
.ddc
))
4393 return connector_status_connected
;
4395 /* Well we tried, say unknown for unreliable port types */
4396 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11) {
4397 type
= intel_dp
->downstream_ports
[0] & DP_DS_PORT_TYPE_MASK
;
4398 if (type
== DP_DS_PORT_TYPE_VGA
||
4399 type
== DP_DS_PORT_TYPE_NON_EDID
)
4400 return connector_status_unknown
;
4402 type
= intel_dp
->dpcd
[DP_DOWNSTREAMPORT_PRESENT
] &
4403 DP_DWN_STRM_PORT_TYPE_MASK
;
4404 if (type
== DP_DWN_STRM_PORT_TYPE_ANALOG
||
4405 type
== DP_DWN_STRM_PORT_TYPE_OTHER
)
4406 return connector_status_unknown
;
4409 /* Anything else is out of spec, warn and ignore */
4410 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4411 return connector_status_disconnected
;
4414 static enum drm_connector_status
4415 edp_detect(struct intel_dp
*intel_dp
)
4417 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
4418 enum drm_connector_status status
;
4420 status
= intel_panel_detect(dev
);
4421 if (status
== connector_status_unknown
)
4422 status
= connector_status_connected
;
4427 static bool ibx_digital_port_connected(struct drm_i915_private
*dev_priv
,
4428 struct intel_digital_port
*port
)
4432 switch (port
->port
) {
4436 bit
= SDE_PORTB_HOTPLUG
;
4439 bit
= SDE_PORTC_HOTPLUG
;
4442 bit
= SDE_PORTD_HOTPLUG
;
4445 MISSING_CASE(port
->port
);
4449 return I915_READ(SDEISR
) & bit
;
4452 static bool cpt_digital_port_connected(struct drm_i915_private
*dev_priv
,
4453 struct intel_digital_port
*port
)
4457 switch (port
->port
) {
4461 bit
= SDE_PORTB_HOTPLUG_CPT
;
4464 bit
= SDE_PORTC_HOTPLUG_CPT
;
4467 bit
= SDE_PORTD_HOTPLUG_CPT
;
4470 bit
= SDE_PORTE_HOTPLUG_SPT
;
4473 MISSING_CASE(port
->port
);
4477 return I915_READ(SDEISR
) & bit
;
4480 static bool g4x_digital_port_connected(struct drm_i915_private
*dev_priv
,
4481 struct intel_digital_port
*port
)
4485 switch (port
->port
) {
4487 bit
= PORTB_HOTPLUG_LIVE_STATUS_G4X
;
4490 bit
= PORTC_HOTPLUG_LIVE_STATUS_G4X
;
4493 bit
= PORTD_HOTPLUG_LIVE_STATUS_G4X
;
4496 MISSING_CASE(port
->port
);
4500 return I915_READ(PORT_HOTPLUG_STAT
) & bit
;
4503 static bool gm45_digital_port_connected(struct drm_i915_private
*dev_priv
,
4504 struct intel_digital_port
*port
)
4508 switch (port
->port
) {
4510 bit
= PORTB_HOTPLUG_LIVE_STATUS_GM45
;
4513 bit
= PORTC_HOTPLUG_LIVE_STATUS_GM45
;
4516 bit
= PORTD_HOTPLUG_LIVE_STATUS_GM45
;
4519 MISSING_CASE(port
->port
);
4523 return I915_READ(PORT_HOTPLUG_STAT
) & bit
;
4526 static bool bxt_digital_port_connected(struct drm_i915_private
*dev_priv
,
4527 struct intel_digital_port
*intel_dig_port
)
4529 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
4533 intel_hpd_pin_to_port(intel_encoder
->hpd_pin
, &port
);
4536 bit
= BXT_DE_PORT_HP_DDIA
;
4539 bit
= BXT_DE_PORT_HP_DDIB
;
4542 bit
= BXT_DE_PORT_HP_DDIC
;
4549 return I915_READ(GEN8_DE_PORT_ISR
) & bit
;
4553 * intel_digital_port_connected - is the specified port connected?
4554 * @dev_priv: i915 private structure
4555 * @port: the port to test
4557 * Return %true if @port is connected, %false otherwise.
4559 bool intel_digital_port_connected(struct drm_i915_private
*dev_priv
,
4560 struct intel_digital_port
*port
)
4562 if (HAS_PCH_IBX(dev_priv
))
4563 return ibx_digital_port_connected(dev_priv
, port
);
4564 else if (HAS_PCH_SPLIT(dev_priv
))
4565 return cpt_digital_port_connected(dev_priv
, port
);
4566 else if (IS_BROXTON(dev_priv
))
4567 return bxt_digital_port_connected(dev_priv
, port
);
4568 else if (IS_GM45(dev_priv
))
4569 return gm45_digital_port_connected(dev_priv
, port
);
4571 return g4x_digital_port_connected(dev_priv
, port
);
4574 static struct edid
*
4575 intel_dp_get_edid(struct intel_dp
*intel_dp
)
4577 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4579 /* use cached edid if we have one */
4580 if (intel_connector
->edid
) {
4582 if (IS_ERR(intel_connector
->edid
))
4585 return drm_edid_duplicate(intel_connector
->edid
);
4587 return drm_get_edid(&intel_connector
->base
,
4588 &intel_dp
->aux
.ddc
);
4592 intel_dp_set_edid(struct intel_dp
*intel_dp
)
4594 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4597 edid
= intel_dp_get_edid(intel_dp
);
4598 intel_connector
->detect_edid
= edid
;
4600 if (intel_dp
->force_audio
!= HDMI_AUDIO_AUTO
)
4601 intel_dp
->has_audio
= intel_dp
->force_audio
== HDMI_AUDIO_ON
;
4603 intel_dp
->has_audio
= drm_detect_monitor_audio(edid
);
4607 intel_dp_unset_edid(struct intel_dp
*intel_dp
)
4609 struct intel_connector
*intel_connector
= intel_dp
->attached_connector
;
4611 kfree(intel_connector
->detect_edid
);
4612 intel_connector
->detect_edid
= NULL
;
4614 intel_dp
->has_audio
= false;
4617 static enum drm_connector_status
4618 intel_dp_detect(struct drm_connector
*connector
, bool force
)
4620 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
4621 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4622 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
4623 struct drm_device
*dev
= connector
->dev
;
4624 enum drm_connector_status status
;
4625 enum intel_display_power_domain power_domain
;
4629 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4630 connector
->base
.id
, connector
->name
);
4631 intel_dp_unset_edid(intel_dp
);
4633 if (intel_dp
->is_mst
) {
4634 /* MST devices are disconnected from a monitor POV */
4635 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4636 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4637 return connector_status_disconnected
;
4640 power_domain
= intel_display_port_aux_power_domain(intel_encoder
);
4641 intel_display_power_get(to_i915(dev
), power_domain
);
4643 /* Can't disconnect eDP, but you can close the lid... */
4644 if (is_edp(intel_dp
))
4645 status
= edp_detect(intel_dp
);
4646 else if (intel_digital_port_connected(to_i915(dev
),
4647 dp_to_dig_port(intel_dp
)))
4648 status
= intel_dp_detect_dpcd(intel_dp
);
4650 status
= connector_status_disconnected
;
4652 if (status
!= connector_status_connected
) {
4653 intel_dp
->compliance_test_active
= 0;
4654 intel_dp
->compliance_test_type
= 0;
4655 intel_dp
->compliance_test_data
= 0;
4660 intel_dp_probe_oui(intel_dp
);
4662 ret
= intel_dp_probe_mst(intel_dp
);
4664 /* if we are in MST mode then this connector
4665 won't appear connected or have anything with EDID on it */
4666 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4667 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4668 status
= connector_status_disconnected
;
4673 * Clearing NACK and defer counts to get their exact values
4674 * while reading EDID which are required by Compliance tests
4675 * 4.2.2.4 and 4.2.2.5
4677 intel_dp
->aux
.i2c_nack_count
= 0;
4678 intel_dp
->aux
.i2c_defer_count
= 0;
4680 intel_dp_set_edid(intel_dp
);
4682 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4683 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4684 status
= connector_status_connected
;
4686 /* Try to read the source of the interrupt */
4687 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11 &&
4688 intel_dp_get_sink_irq(intel_dp
, &sink_irq_vector
)) {
4689 /* Clear interrupt source */
4690 drm_dp_dpcd_writeb(&intel_dp
->aux
,
4691 DP_DEVICE_SERVICE_IRQ_VECTOR
,
4694 if (sink_irq_vector
& DP_AUTOMATED_TEST_REQUEST
)
4695 intel_dp_handle_test_request(intel_dp
);
4696 if (sink_irq_vector
& (DP_CP_IRQ
| DP_SINK_SPECIFIC_IRQ
))
4697 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4701 intel_display_power_put(to_i915(dev
), power_domain
);
4706 intel_dp_force(struct drm_connector
*connector
)
4708 struct intel_dp
*intel_dp
= intel_attached_dp(connector
);
4709 struct intel_encoder
*intel_encoder
= &dp_to_dig_port(intel_dp
)->base
;
4710 struct drm_i915_private
*dev_priv
= to_i915(intel_encoder
->base
.dev
);
4711 enum intel_display_power_domain power_domain
;
4713 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4714 connector
->base
.id
, connector
->name
);
4715 intel_dp_unset_edid(intel_dp
);
4717 if (connector
->status
!= connector_status_connected
)
4720 power_domain
= intel_display_port_aux_power_domain(intel_encoder
);
4721 intel_display_power_get(dev_priv
, power_domain
);
4723 intel_dp_set_edid(intel_dp
);
4725 intel_display_power_put(dev_priv
, power_domain
);
4727 if (intel_encoder
->type
!= INTEL_OUTPUT_EDP
)
4728 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
4731 static int intel_dp_get_modes(struct drm_connector
*connector
)
4733 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4736 edid
= intel_connector
->detect_edid
;
4738 int ret
= intel_connector_update_modes(connector
, edid
);
4743 /* if eDP has no EDID, fall back to fixed mode */
4744 if (is_edp(intel_attached_dp(connector
)) &&
4745 intel_connector
->panel
.fixed_mode
) {
4746 struct drm_display_mode
*mode
;
4748 mode
= drm_mode_duplicate(connector
->dev
,
4749 intel_connector
->panel
.fixed_mode
);
4751 drm_mode_probed_add(connector
, mode
);
4760 intel_dp_detect_audio(struct drm_connector
*connector
)
4762 bool has_audio
= false;
4765 edid
= to_intel_connector(connector
)->detect_edid
;
4767 has_audio
= drm_detect_monitor_audio(edid
);
4773 intel_dp_set_property(struct drm_connector
*connector
,
4774 struct drm_property
*property
,
4777 struct drm_i915_private
*dev_priv
= connector
->dev
->dev_private
;
4778 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4779 struct intel_encoder
*intel_encoder
= intel_attached_encoder(connector
);
4780 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
4783 ret
= drm_object_property_set_value(&connector
->base
, property
, val
);
4787 if (property
== dev_priv
->force_audio_property
) {
4791 if (i
== intel_dp
->force_audio
)
4794 intel_dp
->force_audio
= i
;
4796 if (i
== HDMI_AUDIO_AUTO
)
4797 has_audio
= intel_dp_detect_audio(connector
);
4799 has_audio
= (i
== HDMI_AUDIO_ON
);
4801 if (has_audio
== intel_dp
->has_audio
)
4804 intel_dp
->has_audio
= has_audio
;
4808 if (property
== dev_priv
->broadcast_rgb_property
) {
4809 bool old_auto
= intel_dp
->color_range_auto
;
4810 bool old_range
= intel_dp
->limited_color_range
;
4813 case INTEL_BROADCAST_RGB_AUTO
:
4814 intel_dp
->color_range_auto
= true;
4816 case INTEL_BROADCAST_RGB_FULL
:
4817 intel_dp
->color_range_auto
= false;
4818 intel_dp
->limited_color_range
= false;
4820 case INTEL_BROADCAST_RGB_LIMITED
:
4821 intel_dp
->color_range_auto
= false;
4822 intel_dp
->limited_color_range
= true;
4828 if (old_auto
== intel_dp
->color_range_auto
&&
4829 old_range
== intel_dp
->limited_color_range
)
4835 if (is_edp(intel_dp
) &&
4836 property
== connector
->dev
->mode_config
.scaling_mode_property
) {
4837 if (val
== DRM_MODE_SCALE_NONE
) {
4838 DRM_DEBUG_KMS("no scaling not supported\n");
4842 if (intel_connector
->panel
.fitting_mode
== val
) {
4843 /* the eDP scaling property is not changed */
4846 intel_connector
->panel
.fitting_mode
= val
;
4854 if (intel_encoder
->base
.crtc
)
4855 intel_crtc_restore_mode(intel_encoder
->base
.crtc
);
4861 intel_dp_connector_destroy(struct drm_connector
*connector
)
4863 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
4865 kfree(intel_connector
->detect_edid
);
4867 if (!IS_ERR_OR_NULL(intel_connector
->edid
))
4868 kfree(intel_connector
->edid
);
4870 /* Can't call is_edp() since the encoder may have been destroyed
4872 if (connector
->connector_type
== DRM_MODE_CONNECTOR_eDP
)
4873 intel_panel_fini(&intel_connector
->panel
);
4875 drm_connector_cleanup(connector
);
4879 void intel_dp_encoder_destroy(struct drm_encoder
*encoder
)
4881 struct intel_digital_port
*intel_dig_port
= enc_to_dig_port(encoder
);
4882 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
4884 intel_dp_aux_fini(intel_dp
);
4885 intel_dp_mst_encoder_cleanup(intel_dig_port
);
4886 if (is_edp(intel_dp
)) {
4887 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
4889 * vdd might still be enabled do to the delayed vdd off.
4890 * Make sure vdd is actually turned off here.
4893 edp_panel_vdd_off_sync(intel_dp
);
4894 pps_unlock(intel_dp
);
4896 if (intel_dp
->edp_notifier
.notifier_call
) {
4897 unregister_reboot_notifier(&intel_dp
->edp_notifier
);
4898 intel_dp
->edp_notifier
.notifier_call
= NULL
;
4901 drm_encoder_cleanup(encoder
);
4902 kfree(intel_dig_port
);
4905 static void intel_dp_encoder_suspend(struct intel_encoder
*intel_encoder
)
4907 struct intel_dp
*intel_dp
= enc_to_intel_dp(&intel_encoder
->base
);
4909 if (!is_edp(intel_dp
))
4913 * vdd might still be enabled do to the delayed vdd off.
4914 * Make sure vdd is actually turned off here.
4916 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
4918 edp_panel_vdd_off_sync(intel_dp
);
4919 pps_unlock(intel_dp
);
4922 static void intel_edp_panel_vdd_sanitize(struct intel_dp
*intel_dp
)
4924 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
4925 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
4926 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4927 enum intel_display_power_domain power_domain
;
4929 lockdep_assert_held(&dev_priv
->pps_mutex
);
4931 if (!edp_have_panel_vdd(intel_dp
))
4935 * The VDD bit needs a power domain reference, so if the bit is
4936 * already enabled when we boot or resume, grab this reference and
4937 * schedule a vdd off, so we don't hold on to the reference
4940 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4941 power_domain
= intel_display_port_aux_power_domain(&intel_dig_port
->base
);
4942 intel_display_power_get(dev_priv
, power_domain
);
4944 edp_panel_vdd_schedule_off(intel_dp
);
4947 static void intel_dp_encoder_reset(struct drm_encoder
*encoder
)
4949 struct intel_dp
*intel_dp
;
4951 if (to_intel_encoder(encoder
)->type
!= INTEL_OUTPUT_EDP
)
4954 intel_dp
= enc_to_intel_dp(encoder
);
4959 * Read out the current power sequencer assignment,
4960 * in case the BIOS did something with it.
4962 if (IS_VALLEYVIEW(encoder
->dev
) || IS_CHERRYVIEW(encoder
->dev
))
4963 vlv_initial_power_sequencer_setup(intel_dp
);
4965 intel_edp_panel_vdd_sanitize(intel_dp
);
4967 pps_unlock(intel_dp
);
4970 static const struct drm_connector_funcs intel_dp_connector_funcs
= {
4971 .dpms
= drm_atomic_helper_connector_dpms
,
4972 .detect
= intel_dp_detect
,
4973 .force
= intel_dp_force
,
4974 .fill_modes
= drm_helper_probe_single_connector_modes
,
4975 .set_property
= intel_dp_set_property
,
4976 .atomic_get_property
= intel_connector_atomic_get_property
,
4977 .destroy
= intel_dp_connector_destroy
,
4978 .atomic_destroy_state
= drm_atomic_helper_connector_destroy_state
,
4979 .atomic_duplicate_state
= drm_atomic_helper_connector_duplicate_state
,
4982 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs
= {
4983 .get_modes
= intel_dp_get_modes
,
4984 .mode_valid
= intel_dp_mode_valid
,
4985 .best_encoder
= intel_best_encoder
,
4988 static const struct drm_encoder_funcs intel_dp_enc_funcs
= {
4989 .reset
= intel_dp_encoder_reset
,
4990 .destroy
= intel_dp_encoder_destroy
,
4994 intel_dp_hpd_pulse(struct intel_digital_port
*intel_dig_port
, bool long_hpd
)
4996 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
4997 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
4998 struct drm_device
*dev
= intel_dig_port
->base
.base
.dev
;
4999 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5000 enum intel_display_power_domain power_domain
;
5001 enum irqreturn ret
= IRQ_NONE
;
5003 if (intel_dig_port
->base
.type
!= INTEL_OUTPUT_EDP
&&
5004 intel_dig_port
->base
.type
!= INTEL_OUTPUT_HDMI
)
5005 intel_dig_port
->base
.type
= INTEL_OUTPUT_DISPLAYPORT
;
5007 if (long_hpd
&& intel_dig_port
->base
.type
== INTEL_OUTPUT_EDP
) {
5009 * vdd off can generate a long pulse on eDP which
5010 * would require vdd on to handle it, and thus we
5011 * would end up in an endless cycle of
5012 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5014 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5015 port_name(intel_dig_port
->port
));
5019 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5020 port_name(intel_dig_port
->port
),
5021 long_hpd
? "long" : "short");
5023 power_domain
= intel_display_port_aux_power_domain(intel_encoder
);
5024 intel_display_power_get(dev_priv
, power_domain
);
5027 /* indicate that we need to restart link training */
5028 intel_dp
->train_set_valid
= false;
5030 if (!intel_digital_port_connected(dev_priv
, intel_dig_port
))
5033 if (!intel_dp_get_dpcd(intel_dp
)) {
5037 intel_dp_probe_oui(intel_dp
);
5039 if (!intel_dp_probe_mst(intel_dp
)) {
5040 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
5041 intel_dp_check_link_status(intel_dp
);
5042 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
5046 if (intel_dp
->is_mst
) {
5047 if (intel_dp_check_mst_status(intel_dp
) == -EINVAL
)
5051 if (!intel_dp
->is_mst
) {
5052 drm_modeset_lock(&dev
->mode_config
.connection_mutex
, NULL
);
5053 intel_dp_check_link_status(intel_dp
);
5054 drm_modeset_unlock(&dev
->mode_config
.connection_mutex
);
5062 /* if we were in MST mode, and device is not there get out of MST mode */
5063 if (intel_dp
->is_mst
) {
5064 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp
->is_mst
, intel_dp
->mst_mgr
.mst_state
);
5065 intel_dp
->is_mst
= false;
5066 drm_dp_mst_topology_mgr_set_mst(&intel_dp
->mst_mgr
, intel_dp
->is_mst
);
5069 intel_display_power_put(dev_priv
, power_domain
);
5074 /* check the VBT to see whether the eDP is on another port */
5075 bool intel_dp_is_edp(struct drm_device
*dev
, enum port port
)
5077 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5078 union child_device_config
*p_child
;
5080 static const short port_mapping
[] = {
5081 [PORT_B
] = DVO_PORT_DPB
,
5082 [PORT_C
] = DVO_PORT_DPC
,
5083 [PORT_D
] = DVO_PORT_DPD
,
5084 [PORT_E
] = DVO_PORT_DPE
,
5088 * eDP not supported on g4x. so bail out early just
5089 * for a bit extra safety in case the VBT is bonkers.
5091 if (INTEL_INFO(dev
)->gen
< 5)
5097 if (!dev_priv
->vbt
.child_dev_num
)
5100 for (i
= 0; i
< dev_priv
->vbt
.child_dev_num
; i
++) {
5101 p_child
= dev_priv
->vbt
.child_dev
+ i
;
5103 if (p_child
->common
.dvo_port
== port_mapping
[port
] &&
5104 (p_child
->common
.device_type
& DEVICE_TYPE_eDP_BITS
) ==
5105 (DEVICE_TYPE_eDP
& DEVICE_TYPE_eDP_BITS
))
5112 intel_dp_add_properties(struct intel_dp
*intel_dp
, struct drm_connector
*connector
)
5114 struct intel_connector
*intel_connector
= to_intel_connector(connector
);
5116 intel_attach_force_audio_property(connector
);
5117 intel_attach_broadcast_rgb_property(connector
);
5118 intel_dp
->color_range_auto
= true;
5120 if (is_edp(intel_dp
)) {
5121 drm_mode_create_scaling_mode_property(connector
->dev
);
5122 drm_object_attach_property(
5124 connector
->dev
->mode_config
.scaling_mode_property
,
5125 DRM_MODE_SCALE_ASPECT
);
5126 intel_connector
->panel
.fitting_mode
= DRM_MODE_SCALE_ASPECT
;
5130 static void intel_dp_init_panel_power_timestamps(struct intel_dp
*intel_dp
)
5132 intel_dp
->panel_power_off_time
= ktime_get_boottime();
5133 intel_dp
->last_power_on
= jiffies
;
5134 intel_dp
->last_backlight_off
= jiffies
;
5138 intel_dp_init_panel_power_sequencer(struct drm_device
*dev
,
5139 struct intel_dp
*intel_dp
)
5141 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5142 struct edp_power_seq cur
, vbt
, spec
,
5143 *final
= &intel_dp
->pps_delays
;
5144 u32 pp_on
, pp_off
, pp_div
= 0, pp_ctl
= 0;
5145 i915_reg_t pp_ctrl_reg
, pp_on_reg
, pp_off_reg
, pp_div_reg
;
5147 lockdep_assert_held(&dev_priv
->pps_mutex
);
5149 /* already initialized? */
5150 if (final
->t11_t12
!= 0)
5153 if (IS_BROXTON(dev
)) {
5155 * TODO: BXT has 2 sets of PPS registers.
5156 * Correct Register for Broxton need to be identified
5157 * using VBT. hardcoding for now
5159 pp_ctrl_reg
= BXT_PP_CONTROL(0);
5160 pp_on_reg
= BXT_PP_ON_DELAYS(0);
5161 pp_off_reg
= BXT_PP_OFF_DELAYS(0);
5162 } else if (HAS_PCH_SPLIT(dev
)) {
5163 pp_ctrl_reg
= PCH_PP_CONTROL
;
5164 pp_on_reg
= PCH_PP_ON_DELAYS
;
5165 pp_off_reg
= PCH_PP_OFF_DELAYS
;
5166 pp_div_reg
= PCH_PP_DIVISOR
;
5168 enum pipe pipe
= vlv_power_sequencer_pipe(intel_dp
);
5170 pp_ctrl_reg
= VLV_PIPE_PP_CONTROL(pipe
);
5171 pp_on_reg
= VLV_PIPE_PP_ON_DELAYS(pipe
);
5172 pp_off_reg
= VLV_PIPE_PP_OFF_DELAYS(pipe
);
5173 pp_div_reg
= VLV_PIPE_PP_DIVISOR(pipe
);
5176 /* Workaround: Need to write PP_CONTROL with the unlock key as
5177 * the very first thing. */
5178 pp_ctl
= ironlake_get_pp_control(intel_dp
);
5180 pp_on
= I915_READ(pp_on_reg
);
5181 pp_off
= I915_READ(pp_off_reg
);
5182 if (!IS_BROXTON(dev
)) {
5183 I915_WRITE(pp_ctrl_reg
, pp_ctl
);
5184 pp_div
= I915_READ(pp_div_reg
);
5187 /* Pull timing values out of registers */
5188 cur
.t1_t3
= (pp_on
& PANEL_POWER_UP_DELAY_MASK
) >>
5189 PANEL_POWER_UP_DELAY_SHIFT
;
5191 cur
.t8
= (pp_on
& PANEL_LIGHT_ON_DELAY_MASK
) >>
5192 PANEL_LIGHT_ON_DELAY_SHIFT
;
5194 cur
.t9
= (pp_off
& PANEL_LIGHT_OFF_DELAY_MASK
) >>
5195 PANEL_LIGHT_OFF_DELAY_SHIFT
;
5197 cur
.t10
= (pp_off
& PANEL_POWER_DOWN_DELAY_MASK
) >>
5198 PANEL_POWER_DOWN_DELAY_SHIFT
;
5200 if (IS_BROXTON(dev
)) {
5201 u16 tmp
= (pp_ctl
& BXT_POWER_CYCLE_DELAY_MASK
) >>
5202 BXT_POWER_CYCLE_DELAY_SHIFT
;
5204 cur
.t11_t12
= (tmp
- 1) * 1000;
5208 cur
.t11_t12
= ((pp_div
& PANEL_POWER_CYCLE_DELAY_MASK
) >>
5209 PANEL_POWER_CYCLE_DELAY_SHIFT
) * 1000;
5212 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5213 cur
.t1_t3
, cur
.t8
, cur
.t9
, cur
.t10
, cur
.t11_t12
);
5215 vbt
= dev_priv
->vbt
.edp_pps
;
5217 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5218 * our hw here, which are all in 100usec. */
5219 spec
.t1_t3
= 210 * 10;
5220 spec
.t8
= 50 * 10; /* no limit for t8, use t7 instead */
5221 spec
.t9
= 50 * 10; /* no limit for t9, make it symmetric with t8 */
5222 spec
.t10
= 500 * 10;
5223 /* This one is special and actually in units of 100ms, but zero
5224 * based in the hw (so we need to add 100 ms). But the sw vbt
5225 * table multiplies it with 1000 to make it in units of 100usec,
5227 spec
.t11_t12
= (510 + 100) * 10;
5229 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5230 vbt
.t1_t3
, vbt
.t8
, vbt
.t9
, vbt
.t10
, vbt
.t11_t12
);
5232 /* Use the max of the register settings and vbt. If both are
5233 * unset, fall back to the spec limits. */
5234 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5236 max(cur.field, vbt.field))
5237 assign_final(t1_t3
);
5241 assign_final(t11_t12
);
5244 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5245 intel_dp
->panel_power_up_delay
= get_delay(t1_t3
);
5246 intel_dp
->backlight_on_delay
= get_delay(t8
);
5247 intel_dp
->backlight_off_delay
= get_delay(t9
);
5248 intel_dp
->panel_power_down_delay
= get_delay(t10
);
5249 intel_dp
->panel_power_cycle_delay
= get_delay(t11_t12
);
5252 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5253 intel_dp
->panel_power_up_delay
, intel_dp
->panel_power_down_delay
,
5254 intel_dp
->panel_power_cycle_delay
);
5256 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5257 intel_dp
->backlight_on_delay
, intel_dp
->backlight_off_delay
);
5261 intel_dp_init_panel_power_sequencer_registers(struct drm_device
*dev
,
5262 struct intel_dp
*intel_dp
)
5264 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5265 u32 pp_on
, pp_off
, pp_div
, port_sel
= 0;
5266 int div
= HAS_PCH_SPLIT(dev
) ? intel_pch_rawclk(dev
) : intel_hrawclk(dev
);
5267 i915_reg_t pp_on_reg
, pp_off_reg
, pp_div_reg
, pp_ctrl_reg
;
5268 enum port port
= dp_to_dig_port(intel_dp
)->port
;
5269 const struct edp_power_seq
*seq
= &intel_dp
->pps_delays
;
5271 lockdep_assert_held(&dev_priv
->pps_mutex
);
5273 if (IS_BROXTON(dev
)) {
5275 * TODO: BXT has 2 sets of PPS registers.
5276 * Correct Register for Broxton need to be identified
5277 * using VBT. hardcoding for now
5279 pp_ctrl_reg
= BXT_PP_CONTROL(0);
5280 pp_on_reg
= BXT_PP_ON_DELAYS(0);
5281 pp_off_reg
= BXT_PP_OFF_DELAYS(0);
5283 } else if (HAS_PCH_SPLIT(dev
)) {
5284 pp_on_reg
= PCH_PP_ON_DELAYS
;
5285 pp_off_reg
= PCH_PP_OFF_DELAYS
;
5286 pp_div_reg
= PCH_PP_DIVISOR
;
5288 enum pipe pipe
= vlv_power_sequencer_pipe(intel_dp
);
5290 pp_on_reg
= VLV_PIPE_PP_ON_DELAYS(pipe
);
5291 pp_off_reg
= VLV_PIPE_PP_OFF_DELAYS(pipe
);
5292 pp_div_reg
= VLV_PIPE_PP_DIVISOR(pipe
);
5296 * And finally store the new values in the power sequencer. The
5297 * backlight delays are set to 1 because we do manual waits on them. For
5298 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5299 * we'll end up waiting for the backlight off delay twice: once when we
5300 * do the manual sleep, and once when we disable the panel and wait for
5301 * the PP_STATUS bit to become zero.
5303 pp_on
= (seq
->t1_t3
<< PANEL_POWER_UP_DELAY_SHIFT
) |
5304 (1 << PANEL_LIGHT_ON_DELAY_SHIFT
);
5305 pp_off
= (1 << PANEL_LIGHT_OFF_DELAY_SHIFT
) |
5306 (seq
->t10
<< PANEL_POWER_DOWN_DELAY_SHIFT
);
5307 /* Compute the divisor for the pp clock, simply match the Bspec
5309 if (IS_BROXTON(dev
)) {
5310 pp_div
= I915_READ(pp_ctrl_reg
);
5311 pp_div
&= ~BXT_POWER_CYCLE_DELAY_MASK
;
5312 pp_div
|= (DIV_ROUND_UP((seq
->t11_t12
+ 1), 1000)
5313 << BXT_POWER_CYCLE_DELAY_SHIFT
);
5315 pp_div
= ((100 * div
)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT
;
5316 pp_div
|= (DIV_ROUND_UP(seq
->t11_t12
, 1000)
5317 << PANEL_POWER_CYCLE_DELAY_SHIFT
);
5320 /* Haswell doesn't have any port selection bits for the panel
5321 * power sequencer any more. */
5322 if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
)) {
5323 port_sel
= PANEL_PORT_SELECT_VLV(port
);
5324 } else if (HAS_PCH_IBX(dev
) || HAS_PCH_CPT(dev
)) {
5326 port_sel
= PANEL_PORT_SELECT_DPA
;
5328 port_sel
= PANEL_PORT_SELECT_DPD
;
5333 I915_WRITE(pp_on_reg
, pp_on
);
5334 I915_WRITE(pp_off_reg
, pp_off
);
5335 if (IS_BROXTON(dev
))
5336 I915_WRITE(pp_ctrl_reg
, pp_div
);
5338 I915_WRITE(pp_div_reg
, pp_div
);
5340 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5341 I915_READ(pp_on_reg
),
5342 I915_READ(pp_off_reg
),
5344 (I915_READ(pp_ctrl_reg
) & BXT_POWER_CYCLE_DELAY_MASK
) :
5345 I915_READ(pp_div_reg
));
5349 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5351 * @refresh_rate: RR to be programmed
5353 * This function gets called when refresh rate (RR) has to be changed from
5354 * one frequency to another. Switches can be between high and low RR
5355 * supported by the panel or to any other RR based on media playback (in
5356 * this case, RR value needs to be passed from user space).
5358 * The caller of this function needs to take a lock on dev_priv->drrs.
5360 static void intel_dp_set_drrs_state(struct drm_device
*dev
, int refresh_rate
)
5362 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5363 struct intel_encoder
*encoder
;
5364 struct intel_digital_port
*dig_port
= NULL
;
5365 struct intel_dp
*intel_dp
= dev_priv
->drrs
.dp
;
5366 struct intel_crtc_state
*config
= NULL
;
5367 struct intel_crtc
*intel_crtc
= NULL
;
5368 enum drrs_refresh_rate_type index
= DRRS_HIGH_RR
;
5370 if (refresh_rate
<= 0) {
5371 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5375 if (intel_dp
== NULL
) {
5376 DRM_DEBUG_KMS("DRRS not supported.\n");
5381 * FIXME: This needs proper synchronization with psr state for some
5382 * platforms that cannot have PSR and DRRS enabled at the same time.
5385 dig_port
= dp_to_dig_port(intel_dp
);
5386 encoder
= &dig_port
->base
;
5387 intel_crtc
= to_intel_crtc(encoder
->base
.crtc
);
5390 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5394 config
= intel_crtc
->config
;
5396 if (dev_priv
->drrs
.type
< SEAMLESS_DRRS_SUPPORT
) {
5397 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5401 if (intel_dp
->attached_connector
->panel
.downclock_mode
->vrefresh
==
5403 index
= DRRS_LOW_RR
;
5405 if (index
== dev_priv
->drrs
.refresh_rate_type
) {
5407 "DRRS requested for previously set RR...ignoring\n");
5411 if (!intel_crtc
->active
) {
5412 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5416 if (INTEL_INFO(dev
)->gen
>= 8 && !IS_CHERRYVIEW(dev
)) {
5419 intel_dp_set_m_n(intel_crtc
, M1_N1
);
5422 intel_dp_set_m_n(intel_crtc
, M2_N2
);
5426 DRM_ERROR("Unsupported refreshrate type\n");
5428 } else if (INTEL_INFO(dev
)->gen
> 6) {
5429 i915_reg_t reg
= PIPECONF(intel_crtc
->config
->cpu_transcoder
);
5432 val
= I915_READ(reg
);
5433 if (index
> DRRS_HIGH_RR
) {
5434 if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
))
5435 val
|= PIPECONF_EDP_RR_MODE_SWITCH_VLV
;
5437 val
|= PIPECONF_EDP_RR_MODE_SWITCH
;
5439 if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
))
5440 val
&= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV
;
5442 val
&= ~PIPECONF_EDP_RR_MODE_SWITCH
;
5444 I915_WRITE(reg
, val
);
5447 dev_priv
->drrs
.refresh_rate_type
= index
;
5449 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate
);
5453 * intel_edp_drrs_enable - init drrs struct if supported
5454 * @intel_dp: DP struct
5456 * Initializes frontbuffer_bits and drrs.dp
5458 void intel_edp_drrs_enable(struct intel_dp
*intel_dp
)
5460 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
5461 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5462 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
5463 struct drm_crtc
*crtc
= dig_port
->base
.base
.crtc
;
5464 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5466 if (!intel_crtc
->config
->has_drrs
) {
5467 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5471 mutex_lock(&dev_priv
->drrs
.mutex
);
5472 if (WARN_ON(dev_priv
->drrs
.dp
)) {
5473 DRM_ERROR("DRRS already enabled\n");
5477 dev_priv
->drrs
.busy_frontbuffer_bits
= 0;
5479 dev_priv
->drrs
.dp
= intel_dp
;
5482 mutex_unlock(&dev_priv
->drrs
.mutex
);
5486 * intel_edp_drrs_disable - Disable DRRS
5487 * @intel_dp: DP struct
5490 void intel_edp_drrs_disable(struct intel_dp
*intel_dp
)
5492 struct drm_device
*dev
= intel_dp_to_dev(intel_dp
);
5493 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5494 struct intel_digital_port
*dig_port
= dp_to_dig_port(intel_dp
);
5495 struct drm_crtc
*crtc
= dig_port
->base
.base
.crtc
;
5496 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
5498 if (!intel_crtc
->config
->has_drrs
)
5501 mutex_lock(&dev_priv
->drrs
.mutex
);
5502 if (!dev_priv
->drrs
.dp
) {
5503 mutex_unlock(&dev_priv
->drrs
.mutex
);
5507 if (dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
5508 intel_dp_set_drrs_state(dev_priv
->dev
,
5509 intel_dp
->attached_connector
->panel
.
5510 fixed_mode
->vrefresh
);
5512 dev_priv
->drrs
.dp
= NULL
;
5513 mutex_unlock(&dev_priv
->drrs
.mutex
);
5515 cancel_delayed_work_sync(&dev_priv
->drrs
.work
);
5518 static void intel_edp_drrs_downclock_work(struct work_struct
*work
)
5520 struct drm_i915_private
*dev_priv
=
5521 container_of(work
, typeof(*dev_priv
), drrs
.work
.work
);
5522 struct intel_dp
*intel_dp
;
5524 mutex_lock(&dev_priv
->drrs
.mutex
);
5526 intel_dp
= dev_priv
->drrs
.dp
;
5532 * The delayed work can race with an invalidate hence we need to
5536 if (dev_priv
->drrs
.busy_frontbuffer_bits
)
5539 if (dev_priv
->drrs
.refresh_rate_type
!= DRRS_LOW_RR
)
5540 intel_dp_set_drrs_state(dev_priv
->dev
,
5541 intel_dp
->attached_connector
->panel
.
5542 downclock_mode
->vrefresh
);
5545 mutex_unlock(&dev_priv
->drrs
.mutex
);
5549 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5551 * @frontbuffer_bits: frontbuffer plane tracking bits
5553 * This function gets called everytime rendering on the given planes start.
5554 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5556 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5558 void intel_edp_drrs_invalidate(struct drm_device
*dev
,
5559 unsigned frontbuffer_bits
)
5561 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5562 struct drm_crtc
*crtc
;
5565 if (dev_priv
->drrs
.type
== DRRS_NOT_SUPPORTED
)
5568 cancel_delayed_work(&dev_priv
->drrs
.work
);
5570 mutex_lock(&dev_priv
->drrs
.mutex
);
5571 if (!dev_priv
->drrs
.dp
) {
5572 mutex_unlock(&dev_priv
->drrs
.mutex
);
5576 crtc
= dp_to_dig_port(dev_priv
->drrs
.dp
)->base
.base
.crtc
;
5577 pipe
= to_intel_crtc(crtc
)->pipe
;
5579 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
5580 dev_priv
->drrs
.busy_frontbuffer_bits
|= frontbuffer_bits
;
5582 /* invalidate means busy screen hence upclock */
5583 if (frontbuffer_bits
&& dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
5584 intel_dp_set_drrs_state(dev_priv
->dev
,
5585 dev_priv
->drrs
.dp
->attached_connector
->panel
.
5586 fixed_mode
->vrefresh
);
5588 mutex_unlock(&dev_priv
->drrs
.mutex
);
5592 * intel_edp_drrs_flush - Restart Idleness DRRS
5594 * @frontbuffer_bits: frontbuffer plane tracking bits
5596 * This function gets called every time rendering on the given planes has
5597 * completed or flip on a crtc is completed. So DRRS should be upclocked
5598 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5599 * if no other planes are dirty.
5601 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5603 void intel_edp_drrs_flush(struct drm_device
*dev
,
5604 unsigned frontbuffer_bits
)
5606 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5607 struct drm_crtc
*crtc
;
5610 if (dev_priv
->drrs
.type
== DRRS_NOT_SUPPORTED
)
5613 cancel_delayed_work(&dev_priv
->drrs
.work
);
5615 mutex_lock(&dev_priv
->drrs
.mutex
);
5616 if (!dev_priv
->drrs
.dp
) {
5617 mutex_unlock(&dev_priv
->drrs
.mutex
);
5621 crtc
= dp_to_dig_port(dev_priv
->drrs
.dp
)->base
.base
.crtc
;
5622 pipe
= to_intel_crtc(crtc
)->pipe
;
5624 frontbuffer_bits
&= INTEL_FRONTBUFFER_ALL_MASK(pipe
);
5625 dev_priv
->drrs
.busy_frontbuffer_bits
&= ~frontbuffer_bits
;
5627 /* flush means busy screen hence upclock */
5628 if (frontbuffer_bits
&& dev_priv
->drrs
.refresh_rate_type
== DRRS_LOW_RR
)
5629 intel_dp_set_drrs_state(dev_priv
->dev
,
5630 dev_priv
->drrs
.dp
->attached_connector
->panel
.
5631 fixed_mode
->vrefresh
);
5634 * flush also means no more activity hence schedule downclock, if all
5635 * other fbs are quiescent too
5637 if (!dev_priv
->drrs
.busy_frontbuffer_bits
)
5638 schedule_delayed_work(&dev_priv
->drrs
.work
,
5639 msecs_to_jiffies(1000));
5640 mutex_unlock(&dev_priv
->drrs
.mutex
);
5644 * DOC: Display Refresh Rate Switching (DRRS)
5646 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5647 * which enables swtching between low and high refresh rates,
5648 * dynamically, based on the usage scenario. This feature is applicable
5649 * for internal panels.
5651 * Indication that the panel supports DRRS is given by the panel EDID, which
5652 * would list multiple refresh rates for one resolution.
5654 * DRRS is of 2 types - static and seamless.
5655 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5656 * (may appear as a blink on screen) and is used in dock-undock scenario.
5657 * Seamless DRRS involves changing RR without any visual effect to the user
5658 * and can be used during normal system usage. This is done by programming
5659 * certain registers.
5661 * Support for static/seamless DRRS may be indicated in the VBT based on
5662 * inputs from the panel spec.
5664 * DRRS saves power by switching to low RR based on usage scenarios.
5667 * The implementation is based on frontbuffer tracking implementation.
5668 * When there is a disturbance on the screen triggered by user activity or a
5669 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5670 * When there is no movement on screen, after a timeout of 1 second, a switch
5671 * to low RR is made.
5672 * For integration with frontbuffer tracking code,
5673 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5675 * DRRS can be further extended to support other internal panels and also
5676 * the scenario of video playback wherein RR is set based on the rate
5677 * requested by userspace.
5681 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5682 * @intel_connector: eDP connector
5683 * @fixed_mode: preferred mode of panel
5685 * This function is called only once at driver load to initialize basic
5689 * Downclock mode if panel supports it, else return NULL.
5690 * DRRS support is determined by the presence of downclock mode (apart
5691 * from VBT setting).
5693 static struct drm_display_mode
*
5694 intel_dp_drrs_init(struct intel_connector
*intel_connector
,
5695 struct drm_display_mode
*fixed_mode
)
5697 struct drm_connector
*connector
= &intel_connector
->base
;
5698 struct drm_device
*dev
= connector
->dev
;
5699 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5700 struct drm_display_mode
*downclock_mode
= NULL
;
5702 INIT_DELAYED_WORK(&dev_priv
->drrs
.work
, intel_edp_drrs_downclock_work
);
5703 mutex_init(&dev_priv
->drrs
.mutex
);
5705 if (INTEL_INFO(dev
)->gen
<= 6) {
5706 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5710 if (dev_priv
->vbt
.drrs_type
!= SEAMLESS_DRRS_SUPPORT
) {
5711 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5715 downclock_mode
= intel_find_panel_downclock
5716 (dev
, fixed_mode
, connector
);
5718 if (!downclock_mode
) {
5719 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5723 dev_priv
->drrs
.type
= dev_priv
->vbt
.drrs_type
;
5725 dev_priv
->drrs
.refresh_rate_type
= DRRS_HIGH_RR
;
5726 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5727 return downclock_mode
;
5730 static bool intel_edp_init_connector(struct intel_dp
*intel_dp
,
5731 struct intel_connector
*intel_connector
)
5733 struct drm_connector
*connector
= &intel_connector
->base
;
5734 struct intel_digital_port
*intel_dig_port
= dp_to_dig_port(intel_dp
);
5735 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
5736 struct drm_device
*dev
= intel_encoder
->base
.dev
;
5737 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5738 struct drm_display_mode
*fixed_mode
= NULL
;
5739 struct drm_display_mode
*downclock_mode
= NULL
;
5741 struct drm_display_mode
*scan
;
5743 enum pipe pipe
= INVALID_PIPE
;
5745 if (!is_edp(intel_dp
))
5749 intel_edp_panel_vdd_sanitize(intel_dp
);
5750 pps_unlock(intel_dp
);
5752 /* Cache DPCD and EDID for edp. */
5753 has_dpcd
= intel_dp_get_dpcd(intel_dp
);
5756 if (intel_dp
->dpcd
[DP_DPCD_REV
] >= 0x11)
5757 dev_priv
->no_aux_handshake
=
5758 intel_dp
->dpcd
[DP_MAX_DOWNSPREAD
] &
5759 DP_NO_AUX_HANDSHAKE_LINK_TRAINING
;
5761 /* if this fails, presume the device is a ghost */
5762 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5766 /* We now know it's not a ghost, init power sequence regs. */
5768 intel_dp_init_panel_power_sequencer_registers(dev
, intel_dp
);
5769 pps_unlock(intel_dp
);
5771 mutex_lock(&dev
->mode_config
.mutex
);
5772 edid
= drm_get_edid(connector
, &intel_dp
->aux
.ddc
);
5774 if (drm_add_edid_modes(connector
, edid
)) {
5775 drm_mode_connector_update_edid_property(connector
,
5777 drm_edid_to_eld(connector
, edid
);
5780 edid
= ERR_PTR(-EINVAL
);
5783 edid
= ERR_PTR(-ENOENT
);
5785 intel_connector
->edid
= edid
;
5787 /* prefer fixed mode from EDID if available */
5788 list_for_each_entry(scan
, &connector
->probed_modes
, head
) {
5789 if ((scan
->type
& DRM_MODE_TYPE_PREFERRED
)) {
5790 fixed_mode
= drm_mode_duplicate(dev
, scan
);
5791 downclock_mode
= intel_dp_drrs_init(
5792 intel_connector
, fixed_mode
);
5797 /* fallback to VBT if available for eDP */
5798 if (!fixed_mode
&& dev_priv
->vbt
.lfp_lvds_vbt_mode
) {
5799 fixed_mode
= drm_mode_duplicate(dev
,
5800 dev_priv
->vbt
.lfp_lvds_vbt_mode
);
5802 fixed_mode
->type
|= DRM_MODE_TYPE_PREFERRED
;
5804 mutex_unlock(&dev
->mode_config
.mutex
);
5806 if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
)) {
5807 intel_dp
->edp_notifier
.notifier_call
= edp_notify_handler
;
5808 register_reboot_notifier(&intel_dp
->edp_notifier
);
5811 * Figure out the current pipe for the initial backlight setup.
5812 * If the current pipe isn't valid, try the PPS pipe, and if that
5813 * fails just assume pipe A.
5815 if (IS_CHERRYVIEW(dev
))
5816 pipe
= DP_PORT_TO_PIPE_CHV(intel_dp
->DP
);
5818 pipe
= PORT_TO_PIPE(intel_dp
->DP
);
5820 if (pipe
!= PIPE_A
&& pipe
!= PIPE_B
)
5821 pipe
= intel_dp
->pps_pipe
;
5823 if (pipe
!= PIPE_A
&& pipe
!= PIPE_B
)
5826 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5830 intel_panel_init(&intel_connector
->panel
, fixed_mode
, downclock_mode
);
5831 intel_connector
->panel
.backlight
.power
= intel_edp_backlight_power
;
5832 intel_panel_setup_backlight(connector
, pipe
);
5838 intel_dp_init_connector(struct intel_digital_port
*intel_dig_port
,
5839 struct intel_connector
*intel_connector
)
5841 struct drm_connector
*connector
= &intel_connector
->base
;
5842 struct intel_dp
*intel_dp
= &intel_dig_port
->dp
;
5843 struct intel_encoder
*intel_encoder
= &intel_dig_port
->base
;
5844 struct drm_device
*dev
= intel_encoder
->base
.dev
;
5845 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5846 enum port port
= intel_dig_port
->port
;
5849 if (WARN(intel_dig_port
->max_lanes
< 1,
5850 "Not enough lanes (%d) for DP on port %c\n",
5851 intel_dig_port
->max_lanes
, port_name(port
)))
5854 intel_dp
->pps_pipe
= INVALID_PIPE
;
5856 /* intel_dp vfuncs */
5857 if (INTEL_INFO(dev
)->gen
>= 9)
5858 intel_dp
->get_aux_clock_divider
= skl_get_aux_clock_divider
;
5859 else if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
))
5860 intel_dp
->get_aux_clock_divider
= vlv_get_aux_clock_divider
;
5861 else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
5862 intel_dp
->get_aux_clock_divider
= hsw_get_aux_clock_divider
;
5863 else if (HAS_PCH_SPLIT(dev
))
5864 intel_dp
->get_aux_clock_divider
= ilk_get_aux_clock_divider
;
5866 intel_dp
->get_aux_clock_divider
= i9xx_get_aux_clock_divider
;
5868 if (INTEL_INFO(dev
)->gen
>= 9)
5869 intel_dp
->get_aux_send_ctl
= skl_get_aux_send_ctl
;
5871 intel_dp
->get_aux_send_ctl
= i9xx_get_aux_send_ctl
;
5874 intel_dp
->prepare_link_retrain
= intel_ddi_prepare_link_retrain
;
5876 /* Preserve the current hw state. */
5877 intel_dp
->DP
= I915_READ(intel_dp
->output_reg
);
5878 intel_dp
->attached_connector
= intel_connector
;
5880 if (intel_dp_is_edp(dev
, port
))
5881 type
= DRM_MODE_CONNECTOR_eDP
;
5883 type
= DRM_MODE_CONNECTOR_DisplayPort
;
5886 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
5887 * for DP the encoder type can be set by the caller to
5888 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
5890 if (type
== DRM_MODE_CONNECTOR_eDP
)
5891 intel_encoder
->type
= INTEL_OUTPUT_EDP
;
5893 /* eDP only on port B and/or C on vlv/chv */
5894 if (WARN_ON((IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
)) &&
5895 is_edp(intel_dp
) && port
!= PORT_B
&& port
!= PORT_C
))
5898 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5899 type
== DRM_MODE_CONNECTOR_eDP
? "eDP" : "DP",
5902 drm_connector_init(dev
, connector
, &intel_dp_connector_funcs
, type
);
5903 drm_connector_helper_add(connector
, &intel_dp_connector_helper_funcs
);
5905 connector
->interlace_allowed
= true;
5906 connector
->doublescan_allowed
= 0;
5908 INIT_DELAYED_WORK(&intel_dp
->panel_vdd_work
,
5909 edp_panel_vdd_work
);
5911 intel_connector_attach_encoder(intel_connector
, intel_encoder
);
5912 drm_connector_register(connector
);
5915 intel_connector
->get_hw_state
= intel_ddi_connector_get_hw_state
;
5917 intel_connector
->get_hw_state
= intel_connector_get_hw_state
;
5918 intel_connector
->unregister
= intel_dp_connector_unregister
;
5920 /* Set up the hotplug pin. */
5923 intel_encoder
->hpd_pin
= HPD_PORT_A
;
5926 intel_encoder
->hpd_pin
= HPD_PORT_B
;
5927 if (IS_BXT_REVID(dev
, 0, BXT_REVID_A1
))
5928 intel_encoder
->hpd_pin
= HPD_PORT_A
;
5931 intel_encoder
->hpd_pin
= HPD_PORT_C
;
5934 intel_encoder
->hpd_pin
= HPD_PORT_D
;
5937 intel_encoder
->hpd_pin
= HPD_PORT_E
;
5943 if (is_edp(intel_dp
)) {
5945 intel_dp_init_panel_power_timestamps(intel_dp
);
5946 if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
))
5947 vlv_initial_power_sequencer_setup(intel_dp
);
5949 intel_dp_init_panel_power_sequencer(dev
, intel_dp
);
5950 pps_unlock(intel_dp
);
5953 ret
= intel_dp_aux_init(intel_dp
, intel_connector
);
5957 /* init MST on ports that can support it */
5958 if (HAS_DP_MST(dev
) &&
5959 (port
== PORT_B
|| port
== PORT_C
|| port
== PORT_D
))
5960 intel_dp_mst_encoder_init(intel_dig_port
,
5961 intel_connector
->base
.base
.id
);
5963 if (!intel_edp_init_connector(intel_dp
, intel_connector
)) {
5964 intel_dp_aux_fini(intel_dp
);
5965 intel_dp_mst_encoder_cleanup(intel_dig_port
);
5969 intel_dp_add_properties(intel_dp
, connector
);
5971 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
5972 * 0xd. Failure to do so will result in spurious interrupts being
5973 * generated on the port when a cable is not attached.
5975 if (IS_G4X(dev
) && !IS_GM45(dev
)) {
5976 u32 temp
= I915_READ(PEG_BAND_GAP_DATA
);
5977 I915_WRITE(PEG_BAND_GAP_DATA
, (temp
& ~0xf) | 0xd);
5980 i915_debugfs_connector_add(connector
);
5985 if (is_edp(intel_dp
)) {
5986 cancel_delayed_work_sync(&intel_dp
->panel_vdd_work
);
5988 * vdd might still be enabled do to the delayed vdd off.
5989 * Make sure vdd is actually turned off here.
5992 edp_panel_vdd_off_sync(intel_dp
);
5993 pps_unlock(intel_dp
);
5995 drm_connector_unregister(connector
);
5996 drm_connector_cleanup(connector
);
6002 intel_dp_init(struct drm_device
*dev
,
6003 i915_reg_t output_reg
, enum port port
)
6005 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6006 struct intel_digital_port
*intel_dig_port
;
6007 struct intel_encoder
*intel_encoder
;
6008 struct drm_encoder
*encoder
;
6009 struct intel_connector
*intel_connector
;
6011 intel_dig_port
= kzalloc(sizeof(*intel_dig_port
), GFP_KERNEL
);
6012 if (!intel_dig_port
)
6015 intel_connector
= intel_connector_alloc();
6016 if (!intel_connector
)
6017 goto err_connector_alloc
;
6019 intel_encoder
= &intel_dig_port
->base
;
6020 encoder
= &intel_encoder
->base
;
6022 if (drm_encoder_init(dev
, &intel_encoder
->base
, &intel_dp_enc_funcs
,
6023 DRM_MODE_ENCODER_TMDS
, NULL
))
6024 goto err_encoder_init
;
6026 intel_encoder
->compute_config
= intel_dp_compute_config
;
6027 intel_encoder
->disable
= intel_disable_dp
;
6028 intel_encoder
->get_hw_state
= intel_dp_get_hw_state
;
6029 intel_encoder
->get_config
= intel_dp_get_config
;
6030 intel_encoder
->suspend
= intel_dp_encoder_suspend
;
6031 if (IS_CHERRYVIEW(dev
)) {
6032 intel_encoder
->pre_pll_enable
= chv_dp_pre_pll_enable
;
6033 intel_encoder
->pre_enable
= chv_pre_enable_dp
;
6034 intel_encoder
->enable
= vlv_enable_dp
;
6035 intel_encoder
->post_disable
= chv_post_disable_dp
;
6036 intel_encoder
->post_pll_disable
= chv_dp_post_pll_disable
;
6037 } else if (IS_VALLEYVIEW(dev
)) {
6038 intel_encoder
->pre_pll_enable
= vlv_dp_pre_pll_enable
;
6039 intel_encoder
->pre_enable
= vlv_pre_enable_dp
;
6040 intel_encoder
->enable
= vlv_enable_dp
;
6041 intel_encoder
->post_disable
= vlv_post_disable_dp
;
6043 intel_encoder
->pre_enable
= g4x_pre_enable_dp
;
6044 intel_encoder
->enable
= g4x_enable_dp
;
6045 if (INTEL_INFO(dev
)->gen
>= 5)
6046 intel_encoder
->post_disable
= ilk_post_disable_dp
;
6049 intel_dig_port
->port
= port
;
6050 dev_priv
->dig_port_map
[port
] = intel_encoder
;
6051 intel_dig_port
->dp
.output_reg
= output_reg
;
6052 intel_dig_port
->max_lanes
= 4;
6054 intel_encoder
->type
= INTEL_OUTPUT_DISPLAYPORT
;
6055 if (IS_CHERRYVIEW(dev
)) {
6057 intel_encoder
->crtc_mask
= 1 << 2;
6059 intel_encoder
->crtc_mask
= (1 << 0) | (1 << 1);
6061 intel_encoder
->crtc_mask
= (1 << 0) | (1 << 1) | (1 << 2);
6063 intel_encoder
->cloneable
= 0;
6065 intel_dig_port
->hpd_pulse
= intel_dp_hpd_pulse
;
6066 dev_priv
->hotplug
.irq_port
[port
] = intel_dig_port
;
6068 if (!intel_dp_init_connector(intel_dig_port
, intel_connector
))
6069 goto err_init_connector
;
6074 drm_encoder_cleanup(encoder
);
6076 kfree(intel_connector
);
6077 err_connector_alloc
:
6078 kfree(intel_dig_port
);
6083 void intel_dp_mst_suspend(struct drm_device
*dev
)
6085 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6089 for (i
= 0; i
< I915_MAX_PORTS
; i
++) {
6090 struct intel_digital_port
*intel_dig_port
= dev_priv
->hotplug
.irq_port
[i
];
6091 if (!intel_dig_port
)
6094 if (intel_dig_port
->base
.type
== INTEL_OUTPUT_DISPLAYPORT
) {
6095 if (!intel_dig_port
->dp
.can_mst
)
6097 if (intel_dig_port
->dp
.is_mst
)
6098 drm_dp_mst_topology_mgr_suspend(&intel_dig_port
->dp
.mst_mgr
);
6103 void intel_dp_mst_resume(struct drm_device
*dev
)
6105 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6108 for (i
= 0; i
< I915_MAX_PORTS
; i
++) {
6109 struct intel_digital_port
*intel_dig_port
= dev_priv
->hotplug
.irq_port
[i
];
6110 if (!intel_dig_port
)
6112 if (intel_dig_port
->base
.type
== INTEL_OUTPUT_DISPLAYPORT
) {
6115 if (!intel_dig_port
->dp
.can_mst
)
6118 ret
= drm_dp_mst_topology_mgr_resume(&intel_dig_port
->dp
.mst_mgr
);
6120 intel_dp_check_mst_status(&intel_dig_port
->dp
);