drm/i915: split intel_hdmi_init into encoder and connector pieces
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
760285e7
DH
31#include <drm/drmP.h>
32#include <drm/drm_crtc.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/drm_edid.h>
a4fc5ed6 35#include "intel_drv.h"
760285e7 36#include <drm/i915_drm.h>
a4fc5ed6 37#include "i915_drv.h"
a4fc5ed6 38
a4fc5ed6
KP
39#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
40
cfcb0fc9
JB
41/**
42 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
43 * @intel_dp: DP struct
44 *
45 * If a CPU or PCH DP output is attached to an eDP panel, this function
46 * will return true, and false otherwise.
47 */
48static bool is_edp(struct intel_dp *intel_dp)
49{
da63a9f2
PZ
50 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
51
52 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
53}
54
55/**
56 * is_pch_edp - is the port on the PCH and attached to an eDP panel?
57 * @intel_dp: DP struct
58 *
59 * Returns true if the given DP struct corresponds to a PCH DP port attached
60 * to an eDP panel, false otherwise. Helpful for determining whether we
61 * may need FDI resources for a given DP output or not.
62 */
63static bool is_pch_edp(struct intel_dp *intel_dp)
64{
65 return intel_dp->is_pch_edp;
66}
67
1c95822a
AJ
68/**
69 * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
70 * @intel_dp: DP struct
71 *
72 * Returns true if the given DP struct corresponds to a CPU eDP port.
73 */
74static bool is_cpu_edp(struct intel_dp *intel_dp)
75{
76 return is_edp(intel_dp) && !is_pch_edp(intel_dp);
77}
78
30add22d
PZ
79static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
80{
da63a9f2
PZ
81 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
82
83 return intel_dig_port->base.base.dev;
30add22d
PZ
84}
85
df0e9248
CW
86static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
87{
fa90ecef 88 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
89}
90
814948ad
JB
91/**
92 * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP?
93 * @encoder: DRM encoder
94 *
95 * Return true if @encoder corresponds to a PCH attached eDP panel. Needed
96 * by intel_display.c.
97 */
98bool intel_encoder_is_pch_edp(struct drm_encoder *encoder)
99{
100 struct intel_dp *intel_dp;
101
102 if (!encoder)
103 return false;
104
105 intel_dp = enc_to_intel_dp(encoder);
106
107 return is_pch_edp(intel_dp);
108}
109
ea5b213a 110static void intel_dp_link_down(struct intel_dp *intel_dp);
a4fc5ed6 111
32f9d658 112void
0206e353 113intel_edp_link_config(struct intel_encoder *intel_encoder,
ea5b213a 114 int *lane_num, int *link_bw)
32f9d658 115{
fa90ecef 116 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
32f9d658 117
ea5b213a 118 *lane_num = intel_dp->lane_count;
3b5c662e 119 *link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
32f9d658
ZW
120}
121
94bf2ced
DV
122int
123intel_edp_target_clock(struct intel_encoder *intel_encoder,
124 struct drm_display_mode *mode)
125{
fa90ecef 126 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
dd06f90e 127 struct intel_connector *intel_connector = intel_dp->attached_connector;
94bf2ced 128
dd06f90e
JN
129 if (intel_connector->panel.fixed_mode)
130 return intel_connector->panel.fixed_mode->clock;
94bf2ced
DV
131 else
132 return mode->clock;
133}
134
a4fc5ed6 135static int
ea5b213a 136intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 137{
7183dc29 138 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
139
140 switch (max_link_bw) {
141 case DP_LINK_BW_1_62:
142 case DP_LINK_BW_2_7:
143 break;
144 default:
145 max_link_bw = DP_LINK_BW_1_62;
146 break;
147 }
148 return max_link_bw;
149}
150
151static int
152intel_dp_link_clock(uint8_t link_bw)
153{
154 if (link_bw == DP_LINK_BW_2_7)
155 return 270000;
156 else
157 return 162000;
158}
159
cd9dde44
AJ
160/*
161 * The units on the numbers in the next two are... bizarre. Examples will
162 * make it clearer; this one parallels an example in the eDP spec.
163 *
164 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
165 *
166 * 270000 * 1 * 8 / 10 == 216000
167 *
168 * The actual data capacity of that configuration is 2.16Gbit/s, so the
169 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
170 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
171 * 119000. At 18bpp that's 2142000 kilobits per second.
172 *
173 * Thus the strange-looking division by 10 in intel_dp_link_required, to
174 * get the result in decakilobits instead of kilobits.
175 */
176
a4fc5ed6 177static int
c898261c 178intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 179{
cd9dde44 180 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
181}
182
fe27d53e
DA
183static int
184intel_dp_max_data_rate(int max_link_clock, int max_lanes)
185{
186 return (max_link_clock * max_lanes * 8) / 10;
187}
188
c4867936
DV
189static bool
190intel_dp_adjust_dithering(struct intel_dp *intel_dp,
191 struct drm_display_mode *mode,
cb1793ce 192 bool adjust_mode)
c4867936
DV
193{
194 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
397fe157 195 int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
c4867936
DV
196 int max_rate, mode_rate;
197
198 mode_rate = intel_dp_link_required(mode->clock, 24);
199 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
200
201 if (mode_rate > max_rate) {
202 mode_rate = intel_dp_link_required(mode->clock, 18);
203 if (mode_rate > max_rate)
204 return false;
205
cb1793ce
DV
206 if (adjust_mode)
207 mode->private_flags
c4867936
DV
208 |= INTEL_MODE_DP_FORCE_6BPC;
209
210 return true;
211 }
212
213 return true;
214}
215
a4fc5ed6
KP
216static int
217intel_dp_mode_valid(struct drm_connector *connector,
218 struct drm_display_mode *mode)
219{
df0e9248 220 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
221 struct intel_connector *intel_connector = to_intel_connector(connector);
222 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
a4fc5ed6 223
dd06f90e
JN
224 if (is_edp(intel_dp) && fixed_mode) {
225 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
226 return MODE_PANEL;
227
dd06f90e 228 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43
ZY
229 return MODE_PANEL;
230 }
231
cb1793ce 232 if (!intel_dp_adjust_dithering(intel_dp, mode, false))
c4867936 233 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
234
235 if (mode->clock < 10000)
236 return MODE_CLOCK_LOW;
237
0af78a2b
DV
238 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
239 return MODE_H_ILLEGAL;
240
a4fc5ed6
KP
241 return MODE_OK;
242}
243
244static uint32_t
245pack_aux(uint8_t *src, int src_bytes)
246{
247 int i;
248 uint32_t v = 0;
249
250 if (src_bytes > 4)
251 src_bytes = 4;
252 for (i = 0; i < src_bytes; i++)
253 v |= ((uint32_t) src[i]) << ((3-i) * 8);
254 return v;
255}
256
257static void
258unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
259{
260 int i;
261 if (dst_bytes > 4)
262 dst_bytes = 4;
263 for (i = 0; i < dst_bytes; i++)
264 dst[i] = src >> ((3-i) * 8);
265}
266
fb0f8fbf
KP
267/* hrawclock is 1/4 the FSB frequency */
268static int
269intel_hrawclk(struct drm_device *dev)
270{
271 struct drm_i915_private *dev_priv = dev->dev_private;
272 uint32_t clkcfg;
273
9473c8f4
VP
274 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
275 if (IS_VALLEYVIEW(dev))
276 return 200;
277
fb0f8fbf
KP
278 clkcfg = I915_READ(CLKCFG);
279 switch (clkcfg & CLKCFG_FSB_MASK) {
280 case CLKCFG_FSB_400:
281 return 100;
282 case CLKCFG_FSB_533:
283 return 133;
284 case CLKCFG_FSB_667:
285 return 166;
286 case CLKCFG_FSB_800:
287 return 200;
288 case CLKCFG_FSB_1067:
289 return 266;
290 case CLKCFG_FSB_1333:
291 return 333;
292 /* these two are just a guess; one of them might be right */
293 case CLKCFG_FSB_1600:
294 case CLKCFG_FSB_1600_ALT:
295 return 400;
296 default:
297 return 133;
298 }
299}
300
ebf33b18
KP
301static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
302{
30add22d 303 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
304 struct drm_i915_private *dev_priv = dev->dev_private;
305
306 return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
307}
308
309static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
310{
30add22d 311 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
312 struct drm_i915_private *dev_priv = dev->dev_private;
313
314 return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
315}
316
9b984dae
KP
317static void
318intel_dp_check_edp(struct intel_dp *intel_dp)
319{
30add22d 320 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 321 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 322
9b984dae
KP
323 if (!is_edp(intel_dp))
324 return;
ebf33b18 325 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
326 WARN(1, "eDP powered off while attempting aux channel communication.\n");
327 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
ebf33b18 328 I915_READ(PCH_PP_STATUS),
9b984dae
KP
329 I915_READ(PCH_PP_CONTROL));
330 }
331}
332
a4fc5ed6 333static int
ea5b213a 334intel_dp_aux_ch(struct intel_dp *intel_dp,
a4fc5ed6
KP
335 uint8_t *send, int send_bytes,
336 uint8_t *recv, int recv_size)
337{
ea5b213a 338 uint32_t output_reg = intel_dp->output_reg;
30add22d 339 struct drm_device *dev = intel_dp_to_dev(intel_dp);
a4fc5ed6
KP
340 struct drm_i915_private *dev_priv = dev->dev_private;
341 uint32_t ch_ctl = output_reg + 0x10;
342 uint32_t ch_data = ch_ctl + 4;
343 int i;
344 int recv_bytes;
a4fc5ed6 345 uint32_t status;
fb0f8fbf 346 uint32_t aux_clock_divider;
6b4e0a93 347 int try, precharge;
a4fc5ed6 348
750eb99e
PZ
349 if (IS_HASWELL(dev)) {
350 switch (intel_dp->port) {
351 case PORT_A:
352 ch_ctl = DPA_AUX_CH_CTL;
353 ch_data = DPA_AUX_CH_DATA1;
354 break;
355 case PORT_B:
356 ch_ctl = PCH_DPB_AUX_CH_CTL;
357 ch_data = PCH_DPB_AUX_CH_DATA1;
358 break;
359 case PORT_C:
360 ch_ctl = PCH_DPC_AUX_CH_CTL;
361 ch_data = PCH_DPC_AUX_CH_DATA1;
362 break;
363 case PORT_D:
364 ch_ctl = PCH_DPD_AUX_CH_CTL;
365 ch_data = PCH_DPD_AUX_CH_DATA1;
366 break;
367 default:
368 BUG();
369 }
370 }
371
9b984dae 372 intel_dp_check_edp(intel_dp);
a4fc5ed6 373 /* The clock divider is based off the hrawclk,
fb0f8fbf
KP
374 * and would like to run at 2MHz. So, take the
375 * hrawclk value and divide by 2 and use that
6176b8f9
JB
376 *
377 * Note that PCH attached eDP panels should use a 125MHz input
378 * clock divider.
a4fc5ed6 379 */
1c95822a 380 if (is_cpu_edp(intel_dp)) {
b8fc2f6a
PZ
381 if (IS_HASWELL(dev))
382 aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
383 else if (IS_VALLEYVIEW(dev))
9473c8f4
VP
384 aux_clock_divider = 100;
385 else if (IS_GEN6(dev) || IS_GEN7(dev))
1a2eb460 386 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
e3421a18
ZW
387 else
388 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
389 } else if (HAS_PCH_SPLIT(dev))
6b3ec1c9 390 aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
5eb08b69
ZW
391 else
392 aux_clock_divider = intel_hrawclk(dev) / 2;
393
6b4e0a93
DV
394 if (IS_GEN6(dev))
395 precharge = 3;
396 else
397 precharge = 5;
398
11bee43e
JB
399 /* Try to wait for any previous AUX channel activity */
400 for (try = 0; try < 3; try++) {
401 status = I915_READ(ch_ctl);
402 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
403 break;
404 msleep(1);
405 }
406
407 if (try == 3) {
408 WARN(1, "dp_aux_ch not started status 0x%08x\n",
409 I915_READ(ch_ctl));
4f7f7b7e
CW
410 return -EBUSY;
411 }
412
fb0f8fbf
KP
413 /* Must try at least 3 times according to DP spec */
414 for (try = 0; try < 5; try++) {
415 /* Load the send data into the aux channel data registers */
4f7f7b7e
CW
416 for (i = 0; i < send_bytes; i += 4)
417 I915_WRITE(ch_data + i,
418 pack_aux(send + i, send_bytes - i));
0206e353 419
fb0f8fbf 420 /* Send the command and wait for it to complete */
4f7f7b7e
CW
421 I915_WRITE(ch_ctl,
422 DP_AUX_CH_CTL_SEND_BUSY |
423 DP_AUX_CH_CTL_TIME_OUT_400us |
424 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
425 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
426 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
427 DP_AUX_CH_CTL_DONE |
428 DP_AUX_CH_CTL_TIME_OUT_ERROR |
429 DP_AUX_CH_CTL_RECEIVE_ERROR);
fb0f8fbf 430 for (;;) {
fb0f8fbf
KP
431 status = I915_READ(ch_ctl);
432 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
433 break;
4f7f7b7e 434 udelay(100);
fb0f8fbf 435 }
0206e353 436
fb0f8fbf 437 /* Clear done status and any errors */
4f7f7b7e
CW
438 I915_WRITE(ch_ctl,
439 status |
440 DP_AUX_CH_CTL_DONE |
441 DP_AUX_CH_CTL_TIME_OUT_ERROR |
442 DP_AUX_CH_CTL_RECEIVE_ERROR);
d7e96fea
AJ
443
444 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
445 DP_AUX_CH_CTL_RECEIVE_ERROR))
446 continue;
4f7f7b7e 447 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
448 break;
449 }
450
a4fc5ed6 451 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 452 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
a5b3da54 453 return -EBUSY;
a4fc5ed6
KP
454 }
455
456 /* Check for timeout or receive error.
457 * Timeouts occur when the sink is not connected
458 */
a5b3da54 459 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 460 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
a5b3da54
KP
461 return -EIO;
462 }
1ae8c0a5
KP
463
464 /* Timeouts occur when the device isn't connected, so they're
465 * "normal" -- don't fill the kernel log with these */
a5b3da54 466 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 467 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
a5b3da54 468 return -ETIMEDOUT;
a4fc5ed6
KP
469 }
470
471 /* Unload any bytes sent back from the other side */
472 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
473 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
474 if (recv_bytes > recv_size)
475 recv_bytes = recv_size;
0206e353 476
4f7f7b7e
CW
477 for (i = 0; i < recv_bytes; i += 4)
478 unpack_aux(I915_READ(ch_data + i),
479 recv + i, recv_bytes - i);
a4fc5ed6
KP
480
481 return recv_bytes;
482}
483
484/* Write data to the aux channel in native mode */
485static int
ea5b213a 486intel_dp_aux_native_write(struct intel_dp *intel_dp,
a4fc5ed6
KP
487 uint16_t address, uint8_t *send, int send_bytes)
488{
489 int ret;
490 uint8_t msg[20];
491 int msg_bytes;
492 uint8_t ack;
493
9b984dae 494 intel_dp_check_edp(intel_dp);
a4fc5ed6
KP
495 if (send_bytes > 16)
496 return -1;
497 msg[0] = AUX_NATIVE_WRITE << 4;
498 msg[1] = address >> 8;
eebc863e 499 msg[2] = address & 0xff;
a4fc5ed6
KP
500 msg[3] = send_bytes - 1;
501 memcpy(&msg[4], send, send_bytes);
502 msg_bytes = send_bytes + 4;
503 for (;;) {
ea5b213a 504 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
a4fc5ed6
KP
505 if (ret < 0)
506 return ret;
507 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
508 break;
509 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
510 udelay(100);
511 else
a5b3da54 512 return -EIO;
a4fc5ed6
KP
513 }
514 return send_bytes;
515}
516
517/* Write a single byte to the aux channel in native mode */
518static int
ea5b213a 519intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
a4fc5ed6
KP
520 uint16_t address, uint8_t byte)
521{
ea5b213a 522 return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
a4fc5ed6
KP
523}
524
525/* read bytes from a native aux channel */
526static int
ea5b213a 527intel_dp_aux_native_read(struct intel_dp *intel_dp,
a4fc5ed6
KP
528 uint16_t address, uint8_t *recv, int recv_bytes)
529{
530 uint8_t msg[4];
531 int msg_bytes;
532 uint8_t reply[20];
533 int reply_bytes;
534 uint8_t ack;
535 int ret;
536
9b984dae 537 intel_dp_check_edp(intel_dp);
a4fc5ed6
KP
538 msg[0] = AUX_NATIVE_READ << 4;
539 msg[1] = address >> 8;
540 msg[2] = address & 0xff;
541 msg[3] = recv_bytes - 1;
542
543 msg_bytes = 4;
544 reply_bytes = recv_bytes + 1;
545
546 for (;;) {
ea5b213a 547 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
a4fc5ed6 548 reply, reply_bytes);
a5b3da54
KP
549 if (ret == 0)
550 return -EPROTO;
551 if (ret < 0)
a4fc5ed6
KP
552 return ret;
553 ack = reply[0];
554 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
555 memcpy(recv, reply + 1, ret - 1);
556 return ret - 1;
557 }
558 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
559 udelay(100);
560 else
a5b3da54 561 return -EIO;
a4fc5ed6
KP
562 }
563}
564
565static int
ab2c0672
DA
566intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
567 uint8_t write_byte, uint8_t *read_byte)
a4fc5ed6 568{
ab2c0672 569 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
ea5b213a
CW
570 struct intel_dp *intel_dp = container_of(adapter,
571 struct intel_dp,
572 adapter);
ab2c0672
DA
573 uint16_t address = algo_data->address;
574 uint8_t msg[5];
575 uint8_t reply[2];
8316f337 576 unsigned retry;
ab2c0672
DA
577 int msg_bytes;
578 int reply_bytes;
579 int ret;
580
9b984dae 581 intel_dp_check_edp(intel_dp);
ab2c0672
DA
582 /* Set up the command byte */
583 if (mode & MODE_I2C_READ)
584 msg[0] = AUX_I2C_READ << 4;
585 else
586 msg[0] = AUX_I2C_WRITE << 4;
587
588 if (!(mode & MODE_I2C_STOP))
589 msg[0] |= AUX_I2C_MOT << 4;
a4fc5ed6 590
ab2c0672
DA
591 msg[1] = address >> 8;
592 msg[2] = address;
593
594 switch (mode) {
595 case MODE_I2C_WRITE:
596 msg[3] = 0;
597 msg[4] = write_byte;
598 msg_bytes = 5;
599 reply_bytes = 1;
600 break;
601 case MODE_I2C_READ:
602 msg[3] = 0;
603 msg_bytes = 4;
604 reply_bytes = 2;
605 break;
606 default:
607 msg_bytes = 3;
608 reply_bytes = 1;
609 break;
610 }
611
8316f337
DF
612 for (retry = 0; retry < 5; retry++) {
613 ret = intel_dp_aux_ch(intel_dp,
614 msg, msg_bytes,
615 reply, reply_bytes);
ab2c0672 616 if (ret < 0) {
3ff99164 617 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
ab2c0672
DA
618 return ret;
619 }
8316f337
DF
620
621 switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
622 case AUX_NATIVE_REPLY_ACK:
623 /* I2C-over-AUX Reply field is only valid
624 * when paired with AUX ACK.
625 */
626 break;
627 case AUX_NATIVE_REPLY_NACK:
628 DRM_DEBUG_KMS("aux_ch native nack\n");
629 return -EREMOTEIO;
630 case AUX_NATIVE_REPLY_DEFER:
631 udelay(100);
632 continue;
633 default:
634 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
635 reply[0]);
636 return -EREMOTEIO;
637 }
638
ab2c0672
DA
639 switch (reply[0] & AUX_I2C_REPLY_MASK) {
640 case AUX_I2C_REPLY_ACK:
641 if (mode == MODE_I2C_READ) {
642 *read_byte = reply[1];
643 }
644 return reply_bytes - 1;
645 case AUX_I2C_REPLY_NACK:
8316f337 646 DRM_DEBUG_KMS("aux_i2c nack\n");
ab2c0672
DA
647 return -EREMOTEIO;
648 case AUX_I2C_REPLY_DEFER:
8316f337 649 DRM_DEBUG_KMS("aux_i2c defer\n");
ab2c0672
DA
650 udelay(100);
651 break;
652 default:
8316f337 653 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
ab2c0672
DA
654 return -EREMOTEIO;
655 }
656 }
8316f337
DF
657
658 DRM_ERROR("too many retries, giving up\n");
659 return -EREMOTEIO;
a4fc5ed6
KP
660}
661
662static int
ea5b213a 663intel_dp_i2c_init(struct intel_dp *intel_dp,
55f78c43 664 struct intel_connector *intel_connector, const char *name)
a4fc5ed6 665{
0b5c541b
KP
666 int ret;
667
d54e9d28 668 DRM_DEBUG_KMS("i2c_init %s\n", name);
ea5b213a
CW
669 intel_dp->algo.running = false;
670 intel_dp->algo.address = 0;
671 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
672
0206e353 673 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
ea5b213a
CW
674 intel_dp->adapter.owner = THIS_MODULE;
675 intel_dp->adapter.class = I2C_CLASS_DDC;
0206e353 676 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
ea5b213a
CW
677 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
678 intel_dp->adapter.algo_data = &intel_dp->algo;
679 intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
680
0b5c541b
KP
681 ironlake_edp_panel_vdd_on(intel_dp);
682 ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
bd943159 683 ironlake_edp_panel_vdd_off(intel_dp, false);
0b5c541b 684 return ret;
a4fc5ed6
KP
685}
686
687static bool
e811f5ae
LP
688intel_dp_mode_fixup(struct drm_encoder *encoder,
689 const struct drm_display_mode *mode,
a4fc5ed6
KP
690 struct drm_display_mode *adjusted_mode)
691{
0d3a1bee 692 struct drm_device *dev = encoder->dev;
ea5b213a 693 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
dd06f90e 694 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 695 int lane_count, clock;
397fe157 696 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
ea5b213a 697 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
083f9560 698 int bpp, mode_rate;
a4fc5ed6
KP
699 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
700
dd06f90e
JN
701 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
702 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
703 adjusted_mode);
53b41837
YN
704 intel_pch_panel_fitting(dev,
705 intel_connector->panel.fitting_mode,
1d8e1c75 706 mode, adjusted_mode);
0d3a1bee
ZY
707 }
708
cb1793ce 709 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
710 return false;
711
083f9560
DV
712 DRM_DEBUG_KMS("DP link computation with max lane count %i "
713 "max bw %02x pixel clock %iKHz\n",
71244653 714 max_lane_count, bws[max_clock], adjusted_mode->clock);
083f9560 715
cb1793ce 716 if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true))
c4867936
DV
717 return false;
718
719 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
71244653 720 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
c4867936 721
2514bc51
JB
722 for (clock = 0; clock <= max_clock; clock++) {
723 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
fe27d53e 724 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
a4fc5ed6 725
083f9560 726 if (mode_rate <= link_avail) {
ea5b213a
CW
727 intel_dp->link_bw = bws[clock];
728 intel_dp->lane_count = lane_count;
729 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
083f9560
DV
730 DRM_DEBUG_KMS("DP link bw %02x lane "
731 "count %d clock %d bpp %d\n",
ea5b213a 732 intel_dp->link_bw, intel_dp->lane_count,
083f9560
DV
733 adjusted_mode->clock, bpp);
734 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
735 mode_rate, link_avail);
a4fc5ed6
KP
736 return true;
737 }
738 }
739 }
fe27d53e 740
a4fc5ed6
KP
741 return false;
742}
743
744struct intel_dp_m_n {
745 uint32_t tu;
746 uint32_t gmch_m;
747 uint32_t gmch_n;
748 uint32_t link_m;
749 uint32_t link_n;
750};
751
752static void
753intel_reduce_ratio(uint32_t *num, uint32_t *den)
754{
755 while (*num > 0xffffff || *den > 0xffffff) {
756 *num >>= 1;
757 *den >>= 1;
758 }
759}
760
761static void
36e83a18 762intel_dp_compute_m_n(int bpp,
a4fc5ed6
KP
763 int nlanes,
764 int pixel_clock,
765 int link_clock,
766 struct intel_dp_m_n *m_n)
767{
768 m_n->tu = 64;
36e83a18 769 m_n->gmch_m = (pixel_clock * bpp) >> 3;
a4fc5ed6
KP
770 m_n->gmch_n = link_clock * nlanes;
771 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
772 m_n->link_m = pixel_clock;
773 m_n->link_n = link_clock;
774 intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
775}
776
777void
778intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
779 struct drm_display_mode *adjusted_mode)
780{
781 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
782 struct intel_encoder *intel_encoder;
783 struct intel_dp *intel_dp;
a4fc5ed6
KP
784 struct drm_i915_private *dev_priv = dev->dev_private;
785 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
858fa035 786 int lane_count = 4;
a4fc5ed6 787 struct intel_dp_m_n m_n;
9db4a9c7 788 int pipe = intel_crtc->pipe;
afe2fcf5 789 enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
a4fc5ed6
KP
790
791 /*
21d40d37 792 * Find the lane count in the intel_encoder private
a4fc5ed6 793 */
fa90ecef
PZ
794 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
795 intel_dp = enc_to_intel_dp(&intel_encoder->base);
a4fc5ed6 796
fa90ecef
PZ
797 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
798 intel_encoder->type == INTEL_OUTPUT_EDP)
9a10f401 799 {
ea5b213a 800 lane_count = intel_dp->lane_count;
51190667 801 break;
a4fc5ed6
KP
802 }
803 }
804
805 /*
806 * Compute the GMCH and Link ratios. The '3' here is
807 * the number of bytes_per_pixel post-LUT, which we always
808 * set up for 8-bits of R/G/B, or 3 bytes total.
809 */
858fa035 810 intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
a4fc5ed6
KP
811 mode->clock, adjusted_mode->clock, &m_n);
812
1eb8dfec 813 if (IS_HASWELL(dev)) {
afe2fcf5
PZ
814 I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
815 TU_SIZE(m_n.tu) | m_n.gmch_m);
816 I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
817 I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
818 I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
1eb8dfec 819 } else if (HAS_PCH_SPLIT(dev)) {
7346bfa0 820 I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
9db4a9c7
JB
821 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
822 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
823 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
74a4dd2e
VP
824 } else if (IS_VALLEYVIEW(dev)) {
825 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
826 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
827 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
828 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
a4fc5ed6 829 } else {
9db4a9c7 830 I915_WRITE(PIPE_GMCH_DATA_M(pipe),
7346bfa0 831 TU_SIZE(m_n.tu) | m_n.gmch_m);
9db4a9c7
JB
832 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
833 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
834 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
a4fc5ed6
KP
835 }
836}
837
247d89f6
PZ
838void intel_dp_init_link_config(struct intel_dp *intel_dp)
839{
840 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
841 intel_dp->link_configuration[0] = intel_dp->link_bw;
842 intel_dp->link_configuration[1] = intel_dp->lane_count;
843 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
844 /*
845 * Check for DPCD version > 1.1 and enhanced framing support
846 */
847 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
848 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
849 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
850 }
851}
852
a4fc5ed6
KP
853static void
854intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
855 struct drm_display_mode *adjusted_mode)
856{
e3421a18 857 struct drm_device *dev = encoder->dev;
417e822d 858 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 859 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
fa90ecef 860 struct drm_crtc *crtc = encoder->crtc;
a4fc5ed6
KP
861 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
862
417e822d 863 /*
1a2eb460 864 * There are four kinds of DP registers:
417e822d
KP
865 *
866 * IBX PCH
1a2eb460
KP
867 * SNB CPU
868 * IVB CPU
417e822d
KP
869 * CPT PCH
870 *
871 * IBX PCH and CPU are the same for almost everything,
872 * except that the CPU DP PLL is configured in this
873 * register
874 *
875 * CPT PCH is quite different, having many bits moved
876 * to the TRANS_DP_CTL register instead. That
877 * configuration happens (oddly) in ironlake_pch_enable
878 */
9c9e7927 879
417e822d
KP
880 /* Preserve the BIOS-computed detected bit. This is
881 * supposed to be read-only.
882 */
883 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 884
417e822d 885 /* Handle DP bits in common between all three register formats */
417e822d 886 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
a4fc5ed6 887
ea5b213a 888 switch (intel_dp->lane_count) {
a4fc5ed6 889 case 1:
ea5b213a 890 intel_dp->DP |= DP_PORT_WIDTH_1;
a4fc5ed6
KP
891 break;
892 case 2:
ea5b213a 893 intel_dp->DP |= DP_PORT_WIDTH_2;
a4fc5ed6
KP
894 break;
895 case 4:
ea5b213a 896 intel_dp->DP |= DP_PORT_WIDTH_4;
a4fc5ed6
KP
897 break;
898 }
e0dac65e
WF
899 if (intel_dp->has_audio) {
900 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
901 pipe_name(intel_crtc->pipe));
ea5b213a 902 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
e0dac65e
WF
903 intel_write_eld(encoder, adjusted_mode);
904 }
247d89f6
PZ
905
906 intel_dp_init_link_config(intel_dp);
a4fc5ed6 907
417e822d 908 /* Split out the IBX/CPU vs CPT settings */
32f9d658 909
19c03924 910 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
911 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
912 intel_dp->DP |= DP_SYNC_HS_HIGH;
913 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
914 intel_dp->DP |= DP_SYNC_VS_HIGH;
915 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
916
917 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
918 intel_dp->DP |= DP_ENHANCED_FRAMING;
919
920 intel_dp->DP |= intel_crtc->pipe << 29;
921
922 /* don't miss out required setting for eDP */
1a2eb460
KP
923 if (adjusted_mode->clock < 200000)
924 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
925 else
926 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
927 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
417e822d
KP
928 intel_dp->DP |= intel_dp->color_range;
929
930 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
931 intel_dp->DP |= DP_SYNC_HS_HIGH;
932 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
933 intel_dp->DP |= DP_SYNC_VS_HIGH;
934 intel_dp->DP |= DP_LINK_TRAIN_OFF;
935
936 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
937 intel_dp->DP |= DP_ENHANCED_FRAMING;
938
939 if (intel_crtc->pipe == 1)
940 intel_dp->DP |= DP_PIPEB_SELECT;
941
942 if (is_cpu_edp(intel_dp)) {
943 /* don't miss out required setting for eDP */
417e822d
KP
944 if (adjusted_mode->clock < 200000)
945 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
946 else
947 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
948 }
949 } else {
950 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
32f9d658 951 }
a4fc5ed6
KP
952}
953
99ea7127
KP
954#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
955#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
956
957#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
958#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
959
960#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
961#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
962
963static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
964 u32 mask,
965 u32 value)
bd943159 966{
30add22d 967 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 968 struct drm_i915_private *dev_priv = dev->dev_private;
32ce697c 969
99ea7127
KP
970 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
971 mask, value,
972 I915_READ(PCH_PP_STATUS),
973 I915_READ(PCH_PP_CONTROL));
32ce697c 974
99ea7127
KP
975 if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) {
976 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
977 I915_READ(PCH_PP_STATUS),
978 I915_READ(PCH_PP_CONTROL));
32ce697c 979 }
99ea7127 980}
32ce697c 981
99ea7127
KP
982static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
983{
984 DRM_DEBUG_KMS("Wait for panel power on\n");
985 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
986}
987
99ea7127
KP
988static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
989{
990 DRM_DEBUG_KMS("Wait for panel power off time\n");
991 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
992}
993
994static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
995{
996 DRM_DEBUG_KMS("Wait for panel power cycle\n");
997 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
998}
999
1000
832dd3c1
KP
1001/* Read the current pp_control value, unlocking the register if it
1002 * is locked
1003 */
1004
1005static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
1006{
1007 u32 control = I915_READ(PCH_PP_CONTROL);
1008
1009 control &= ~PANEL_UNLOCK_MASK;
1010 control |= PANEL_UNLOCK_REGS;
1011 return control;
bd943159
KP
1012}
1013
82a4d9c0 1014void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1015{
30add22d 1016 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501
JB
1017 struct drm_i915_private *dev_priv = dev->dev_private;
1018 u32 pp;
1019
97af61f5
KP
1020 if (!is_edp(intel_dp))
1021 return;
f01eca2e 1022 DRM_DEBUG_KMS("Turn eDP VDD on\n");
5d613501 1023
bd943159
KP
1024 WARN(intel_dp->want_panel_vdd,
1025 "eDP VDD already requested on\n");
1026
1027 intel_dp->want_panel_vdd = true;
99ea7127 1028
bd943159
KP
1029 if (ironlake_edp_have_panel_vdd(intel_dp)) {
1030 DRM_DEBUG_KMS("eDP VDD already on\n");
1031 return;
1032 }
1033
99ea7127
KP
1034 if (!ironlake_edp_have_panel_power(intel_dp))
1035 ironlake_wait_panel_power_cycle(intel_dp);
1036
832dd3c1 1037 pp = ironlake_get_pp_control(dev_priv);
5d613501
JB
1038 pp |= EDP_FORCE_VDD;
1039 I915_WRITE(PCH_PP_CONTROL, pp);
1040 POSTING_READ(PCH_PP_CONTROL);
f01eca2e
KP
1041 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1042 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
ebf33b18
KP
1043
1044 /*
1045 * If the panel wasn't on, delay before accessing aux channel
1046 */
1047 if (!ironlake_edp_have_panel_power(intel_dp)) {
bd943159 1048 DRM_DEBUG_KMS("eDP was not running\n");
f01eca2e 1049 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1050 }
5d613501
JB
1051}
1052
bd943159 1053static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1054{
30add22d 1055 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501
JB
1056 struct drm_i915_private *dev_priv = dev->dev_private;
1057 u32 pp;
1058
bd943159 1059 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
832dd3c1 1060 pp = ironlake_get_pp_control(dev_priv);
bd943159
KP
1061 pp &= ~EDP_FORCE_VDD;
1062 I915_WRITE(PCH_PP_CONTROL, pp);
1063 POSTING_READ(PCH_PP_CONTROL);
1064
1065 /* Make sure sequencer is idle before allowing subsequent activity */
1066 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
1067 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
99ea7127
KP
1068
1069 msleep(intel_dp->panel_power_down_delay);
bd943159
KP
1070 }
1071}
5d613501 1072
bd943159
KP
1073static void ironlake_panel_vdd_work(struct work_struct *__work)
1074{
1075 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1076 struct intel_dp, panel_vdd_work);
30add22d 1077 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bd943159 1078
627f7675 1079 mutex_lock(&dev->mode_config.mutex);
bd943159 1080 ironlake_panel_vdd_off_sync(intel_dp);
627f7675 1081 mutex_unlock(&dev->mode_config.mutex);
bd943159
KP
1082}
1083
82a4d9c0 1084void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1085{
97af61f5
KP
1086 if (!is_edp(intel_dp))
1087 return;
5d613501 1088
bd943159
KP
1089 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
1090 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
f2e8b18a 1091
bd943159
KP
1092 intel_dp->want_panel_vdd = false;
1093
1094 if (sync) {
1095 ironlake_panel_vdd_off_sync(intel_dp);
1096 } else {
1097 /*
1098 * Queue the timer to fire a long
1099 * time from now (relative to the power down delay)
1100 * to keep the panel power up across a sequence of operations
1101 */
1102 schedule_delayed_work(&intel_dp->panel_vdd_work,
1103 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1104 }
5d613501
JB
1105}
1106
82a4d9c0 1107void ironlake_edp_panel_on(struct intel_dp *intel_dp)
9934c132 1108{
30add22d 1109 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1110 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1111 u32 pp;
9934c132 1112
97af61f5 1113 if (!is_edp(intel_dp))
bd943159 1114 return;
99ea7127
KP
1115
1116 DRM_DEBUG_KMS("Turn eDP power on\n");
1117
1118 if (ironlake_edp_have_panel_power(intel_dp)) {
1119 DRM_DEBUG_KMS("eDP power already on\n");
7d639f35 1120 return;
99ea7127 1121 }
9934c132 1122
99ea7127 1123 ironlake_wait_panel_power_cycle(intel_dp);
37c6c9b0 1124
99ea7127 1125 pp = ironlake_get_pp_control(dev_priv);
05ce1a49
KP
1126 if (IS_GEN5(dev)) {
1127 /* ILK workaround: disable reset around power sequence */
1128 pp &= ~PANEL_POWER_RESET;
1129 I915_WRITE(PCH_PP_CONTROL, pp);
1130 POSTING_READ(PCH_PP_CONTROL);
1131 }
37c6c9b0 1132
1c0ae80a 1133 pp |= POWER_TARGET_ON;
99ea7127
KP
1134 if (!IS_GEN5(dev))
1135 pp |= PANEL_POWER_RESET;
1136
9934c132 1137 I915_WRITE(PCH_PP_CONTROL, pp);
01cb9ea6 1138 POSTING_READ(PCH_PP_CONTROL);
9934c132 1139
99ea7127 1140 ironlake_wait_panel_on(intel_dp);
9934c132 1141
05ce1a49
KP
1142 if (IS_GEN5(dev)) {
1143 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1144 I915_WRITE(PCH_PP_CONTROL, pp);
1145 POSTING_READ(PCH_PP_CONTROL);
1146 }
9934c132
JB
1147}
1148
82a4d9c0 1149void ironlake_edp_panel_off(struct intel_dp *intel_dp)
9934c132 1150{
30add22d 1151 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1152 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1153 u32 pp;
9934c132 1154
97af61f5
KP
1155 if (!is_edp(intel_dp))
1156 return;
37c6c9b0 1157
99ea7127 1158 DRM_DEBUG_KMS("Turn eDP power off\n");
37c6c9b0 1159
6cb49835 1160 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
37c6c9b0 1161
99ea7127 1162 pp = ironlake_get_pp_control(dev_priv);
35a38556
DV
1163 /* We need to switch off panel power _and_ force vdd, for otherwise some
1164 * panels get very unhappy and cease to work. */
1165 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
99ea7127
KP
1166 I915_WRITE(PCH_PP_CONTROL, pp);
1167 POSTING_READ(PCH_PP_CONTROL);
9934c132 1168
35a38556
DV
1169 intel_dp->want_panel_vdd = false;
1170
99ea7127 1171 ironlake_wait_panel_off(intel_dp);
9934c132
JB
1172}
1173
d6c50ff8 1174void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1175{
da63a9f2
PZ
1176 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1177 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658 1178 struct drm_i915_private *dev_priv = dev->dev_private;
da63a9f2 1179 int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
32f9d658
ZW
1180 u32 pp;
1181
f01eca2e
KP
1182 if (!is_edp(intel_dp))
1183 return;
1184
28c97730 1185 DRM_DEBUG_KMS("\n");
01cb9ea6
JB
1186 /*
1187 * If we enable the backlight right away following a panel power
1188 * on, we may see slight flicker as the panel syncs with the eDP
1189 * link. So delay a bit to make sure the image is solid before
1190 * allowing it to appear.
1191 */
f01eca2e 1192 msleep(intel_dp->backlight_on_delay);
832dd3c1 1193 pp = ironlake_get_pp_control(dev_priv);
32f9d658
ZW
1194 pp |= EDP_BLC_ENABLE;
1195 I915_WRITE(PCH_PP_CONTROL, pp);
f01eca2e 1196 POSTING_READ(PCH_PP_CONTROL);
035aa3de
DV
1197
1198 intel_panel_enable_backlight(dev, pipe);
32f9d658
ZW
1199}
1200
d6c50ff8 1201void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 1202{
30add22d 1203 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
1204 struct drm_i915_private *dev_priv = dev->dev_private;
1205 u32 pp;
1206
f01eca2e
KP
1207 if (!is_edp(intel_dp))
1208 return;
1209
035aa3de
DV
1210 intel_panel_disable_backlight(dev);
1211
28c97730 1212 DRM_DEBUG_KMS("\n");
832dd3c1 1213 pp = ironlake_get_pp_control(dev_priv);
32f9d658
ZW
1214 pp &= ~EDP_BLC_ENABLE;
1215 I915_WRITE(PCH_PP_CONTROL, pp);
f01eca2e
KP
1216 POSTING_READ(PCH_PP_CONTROL);
1217 msleep(intel_dp->backlight_off_delay);
32f9d658 1218}
a4fc5ed6 1219
2bd2ad64 1220static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 1221{
da63a9f2
PZ
1222 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1223 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1224 struct drm_device *dev = crtc->dev;
d240f20f
JB
1225 struct drm_i915_private *dev_priv = dev->dev_private;
1226 u32 dpa_ctl;
1227
2bd2ad64
DV
1228 assert_pipe_disabled(dev_priv,
1229 to_intel_crtc(crtc)->pipe);
1230
d240f20f
JB
1231 DRM_DEBUG_KMS("\n");
1232 dpa_ctl = I915_READ(DP_A);
0767935e
DV
1233 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1234 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1235
1236 /* We don't adjust intel_dp->DP while tearing down the link, to
1237 * facilitate link retraining (e.g. after hotplug). Hence clear all
1238 * enable bits here to ensure that we don't enable too much. */
1239 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1240 intel_dp->DP |= DP_PLL_ENABLE;
1241 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
1242 POSTING_READ(DP_A);
1243 udelay(200);
d240f20f
JB
1244}
1245
2bd2ad64 1246static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 1247{
da63a9f2
PZ
1248 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1249 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1250 struct drm_device *dev = crtc->dev;
d240f20f
JB
1251 struct drm_i915_private *dev_priv = dev->dev_private;
1252 u32 dpa_ctl;
1253
2bd2ad64
DV
1254 assert_pipe_disabled(dev_priv,
1255 to_intel_crtc(crtc)->pipe);
1256
d240f20f 1257 dpa_ctl = I915_READ(DP_A);
0767935e
DV
1258 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1259 "dp pll off, should be on\n");
1260 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1261
1262 /* We can't rely on the value tracked for the DP register in
1263 * intel_dp->DP because link_down must not change that (otherwise link
1264 * re-training will fail. */
298b0b39 1265 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 1266 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 1267 POSTING_READ(DP_A);
d240f20f
JB
1268 udelay(200);
1269}
1270
c7ad3810 1271/* If the sink supports it, try to set the power state appropriately */
c19b0669 1272void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
1273{
1274 int ret, i;
1275
1276 /* Should have a valid DPCD by this point */
1277 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1278 return;
1279
1280 if (mode != DRM_MODE_DPMS_ON) {
1281 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
1282 DP_SET_POWER_D3);
1283 if (ret != 1)
1284 DRM_DEBUG_DRIVER("failed to write sink power state\n");
1285 } else {
1286 /*
1287 * When turning on, we need to retry for 1ms to give the sink
1288 * time to wake up.
1289 */
1290 for (i = 0; i < 3; i++) {
1291 ret = intel_dp_aux_native_write_1(intel_dp,
1292 DP_SET_POWER,
1293 DP_SET_POWER_D0);
1294 if (ret == 1)
1295 break;
1296 msleep(1);
1297 }
1298 }
1299}
1300
19d8fe15
DV
1301static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1302 enum pipe *pipe)
d240f20f 1303{
19d8fe15
DV
1304 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1305 struct drm_device *dev = encoder->base.dev;
1306 struct drm_i915_private *dev_priv = dev->dev_private;
1307 u32 tmp = I915_READ(intel_dp->output_reg);
1308
1309 if (!(tmp & DP_PORT_EN))
1310 return false;
1311
1312 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
1313 *pipe = PORT_TO_PIPE_CPT(tmp);
1314 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
1315 *pipe = PORT_TO_PIPE(tmp);
1316 } else {
1317 u32 trans_sel;
1318 u32 trans_dp;
1319 int i;
1320
1321 switch (intel_dp->output_reg) {
1322 case PCH_DP_B:
1323 trans_sel = TRANS_DP_PORT_SEL_B;
1324 break;
1325 case PCH_DP_C:
1326 trans_sel = TRANS_DP_PORT_SEL_C;
1327 break;
1328 case PCH_DP_D:
1329 trans_sel = TRANS_DP_PORT_SEL_D;
1330 break;
1331 default:
1332 return true;
1333 }
1334
1335 for_each_pipe(i) {
1336 trans_dp = I915_READ(TRANS_DP_CTL(i));
1337 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1338 *pipe = i;
1339 return true;
1340 }
1341 }
19d8fe15 1342
4a0833ec
DV
1343 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1344 intel_dp->output_reg);
1345 }
d240f20f 1346
19d8fe15
DV
1347 return true;
1348}
1349
e8cb4558 1350static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 1351{
e8cb4558 1352 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
6cb49835
DV
1353
1354 /* Make sure the panel is off before trying to change the mode. But also
1355 * ensure that we have vdd while we switch off the panel. */
1356 ironlake_edp_panel_vdd_on(intel_dp);
21264c63 1357 ironlake_edp_backlight_off(intel_dp);
c7ad3810 1358 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
35a38556 1359 ironlake_edp_panel_off(intel_dp);
3739850b
DV
1360
1361 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1362 if (!is_cpu_edp(intel_dp))
1363 intel_dp_link_down(intel_dp);
d240f20f
JB
1364}
1365
2bd2ad64
DV
1366static void intel_post_disable_dp(struct intel_encoder *encoder)
1367{
1368 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1369
3739850b
DV
1370 if (is_cpu_edp(intel_dp)) {
1371 intel_dp_link_down(intel_dp);
2bd2ad64 1372 ironlake_edp_pll_off(intel_dp);
3739850b 1373 }
2bd2ad64
DV
1374}
1375
e8cb4558 1376static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 1377{
e8cb4558
DV
1378 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1379 struct drm_device *dev = encoder->base.dev;
1380 struct drm_i915_private *dev_priv = dev->dev_private;
1381 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 1382
0c33d8d7
DV
1383 if (WARN_ON(dp_reg & DP_PORT_EN))
1384 return;
1385
97af61f5 1386 ironlake_edp_panel_vdd_on(intel_dp);
f01eca2e 1387 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
0c33d8d7
DV
1388 intel_dp_start_link_train(intel_dp);
1389 ironlake_edp_panel_on(intel_dp);
1390 ironlake_edp_panel_vdd_off(intel_dp, true);
1391 intel_dp_complete_link_train(intel_dp);
f01eca2e 1392 ironlake_edp_backlight_on(intel_dp);
d240f20f
JB
1393}
1394
2bd2ad64 1395static void intel_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 1396{
2bd2ad64 1397 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
0a91ca29 1398
2bd2ad64
DV
1399 if (is_cpu_edp(intel_dp))
1400 ironlake_edp_pll_on(intel_dp);
a4fc5ed6
KP
1401}
1402
1403/*
df0c237d
JB
1404 * Native read with retry for link status and receiver capability reads for
1405 * cases where the sink may still be asleep.
a4fc5ed6
KP
1406 */
1407static bool
df0c237d
JB
1408intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1409 uint8_t *recv, int recv_bytes)
a4fc5ed6 1410{
61da5fab
JB
1411 int ret, i;
1412
df0c237d
JB
1413 /*
1414 * Sinks are *supposed* to come up within 1ms from an off state,
1415 * but we're also supposed to retry 3 times per the spec.
1416 */
61da5fab 1417 for (i = 0; i < 3; i++) {
df0c237d
JB
1418 ret = intel_dp_aux_native_read(intel_dp, address, recv,
1419 recv_bytes);
1420 if (ret == recv_bytes)
61da5fab
JB
1421 return true;
1422 msleep(1);
1423 }
a4fc5ed6 1424
61da5fab 1425 return false;
a4fc5ed6
KP
1426}
1427
1428/*
1429 * Fetch AUX CH registers 0x202 - 0x207 which contain
1430 * link status information
1431 */
1432static bool
93f62dad 1433intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 1434{
df0c237d
JB
1435 return intel_dp_aux_native_read_retry(intel_dp,
1436 DP_LANE0_1_STATUS,
93f62dad 1437 link_status,
df0c237d 1438 DP_LINK_STATUS_SIZE);
a4fc5ed6
KP
1439}
1440
a4fc5ed6
KP
1441#if 0
1442static char *voltage_names[] = {
1443 "0.4V", "0.6V", "0.8V", "1.2V"
1444};
1445static char *pre_emph_names[] = {
1446 "0dB", "3.5dB", "6dB", "9.5dB"
1447};
1448static char *link_train_names[] = {
1449 "pattern 1", "pattern 2", "idle", "off"
1450};
1451#endif
1452
1453/*
1454 * These are source-specific values; current Intel hardware supports
1455 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1456 */
a4fc5ed6
KP
1457
1458static uint8_t
1a2eb460 1459intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 1460{
30add22d 1461 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1a2eb460
KP
1462
1463 if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
1464 return DP_TRAIN_VOLTAGE_SWING_800;
1465 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
1466 return DP_TRAIN_VOLTAGE_SWING_1200;
1467 else
1468 return DP_TRAIN_VOLTAGE_SWING_800;
1469}
1470
1471static uint8_t
1472intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1473{
30add22d 1474 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1a2eb460 1475
d6c0d722
PZ
1476 if (IS_HASWELL(dev)) {
1477 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1478 case DP_TRAIN_VOLTAGE_SWING_400:
1479 return DP_TRAIN_PRE_EMPHASIS_9_5;
1480 case DP_TRAIN_VOLTAGE_SWING_600:
1481 return DP_TRAIN_PRE_EMPHASIS_6;
1482 case DP_TRAIN_VOLTAGE_SWING_800:
1483 return DP_TRAIN_PRE_EMPHASIS_3_5;
1484 case DP_TRAIN_VOLTAGE_SWING_1200:
1485 default:
1486 return DP_TRAIN_PRE_EMPHASIS_0;
1487 }
1488 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
1489 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1490 case DP_TRAIN_VOLTAGE_SWING_400:
1491 return DP_TRAIN_PRE_EMPHASIS_6;
1492 case DP_TRAIN_VOLTAGE_SWING_600:
1493 case DP_TRAIN_VOLTAGE_SWING_800:
1494 return DP_TRAIN_PRE_EMPHASIS_3_5;
1495 default:
1496 return DP_TRAIN_PRE_EMPHASIS_0;
1497 }
1498 } else {
1499 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1500 case DP_TRAIN_VOLTAGE_SWING_400:
1501 return DP_TRAIN_PRE_EMPHASIS_6;
1502 case DP_TRAIN_VOLTAGE_SWING_600:
1503 return DP_TRAIN_PRE_EMPHASIS_6;
1504 case DP_TRAIN_VOLTAGE_SWING_800:
1505 return DP_TRAIN_PRE_EMPHASIS_3_5;
1506 case DP_TRAIN_VOLTAGE_SWING_1200:
1507 default:
1508 return DP_TRAIN_PRE_EMPHASIS_0;
1509 }
a4fc5ed6
KP
1510 }
1511}
1512
1513static void
93f62dad 1514intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
1515{
1516 uint8_t v = 0;
1517 uint8_t p = 0;
1518 int lane;
1a2eb460
KP
1519 uint8_t voltage_max;
1520 uint8_t preemph_max;
a4fc5ed6 1521
33a34e4e 1522 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
1523 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
1524 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
1525
1526 if (this_v > v)
1527 v = this_v;
1528 if (this_p > p)
1529 p = this_p;
1530 }
1531
1a2eb460 1532 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
1533 if (v >= voltage_max)
1534 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 1535
1a2eb460
KP
1536 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
1537 if (p >= preemph_max)
1538 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
1539
1540 for (lane = 0; lane < 4; lane++)
33a34e4e 1541 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
1542}
1543
1544static uint32_t
93f62dad 1545intel_dp_signal_levels(uint8_t train_set)
a4fc5ed6 1546{
3cf2efb1 1547 uint32_t signal_levels = 0;
a4fc5ed6 1548
3cf2efb1 1549 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
a4fc5ed6
KP
1550 case DP_TRAIN_VOLTAGE_SWING_400:
1551 default:
1552 signal_levels |= DP_VOLTAGE_0_4;
1553 break;
1554 case DP_TRAIN_VOLTAGE_SWING_600:
1555 signal_levels |= DP_VOLTAGE_0_6;
1556 break;
1557 case DP_TRAIN_VOLTAGE_SWING_800:
1558 signal_levels |= DP_VOLTAGE_0_8;
1559 break;
1560 case DP_TRAIN_VOLTAGE_SWING_1200:
1561 signal_levels |= DP_VOLTAGE_1_2;
1562 break;
1563 }
3cf2efb1 1564 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
a4fc5ed6
KP
1565 case DP_TRAIN_PRE_EMPHASIS_0:
1566 default:
1567 signal_levels |= DP_PRE_EMPHASIS_0;
1568 break;
1569 case DP_TRAIN_PRE_EMPHASIS_3_5:
1570 signal_levels |= DP_PRE_EMPHASIS_3_5;
1571 break;
1572 case DP_TRAIN_PRE_EMPHASIS_6:
1573 signal_levels |= DP_PRE_EMPHASIS_6;
1574 break;
1575 case DP_TRAIN_PRE_EMPHASIS_9_5:
1576 signal_levels |= DP_PRE_EMPHASIS_9_5;
1577 break;
1578 }
1579 return signal_levels;
1580}
1581
e3421a18
ZW
1582/* Gen6's DP voltage swing and pre-emphasis control */
1583static uint32_t
1584intel_gen6_edp_signal_levels(uint8_t train_set)
1585{
3c5a62b5
YL
1586 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1587 DP_TRAIN_PRE_EMPHASIS_MASK);
1588 switch (signal_levels) {
e3421a18 1589 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
3c5a62b5
YL
1590 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1591 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1592 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1593 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
e3421a18 1594 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
3c5a62b5
YL
1595 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1596 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
e3421a18 1597 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
3c5a62b5
YL
1598 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1599 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
e3421a18 1600 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
3c5a62b5
YL
1601 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
1602 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 1603 default:
3c5a62b5
YL
1604 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1605 "0x%x\n", signal_levels);
1606 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
1607 }
1608}
1609
1a2eb460
KP
1610/* Gen7's DP voltage swing and pre-emphasis control */
1611static uint32_t
1612intel_gen7_edp_signal_levels(uint8_t train_set)
1613{
1614 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1615 DP_TRAIN_PRE_EMPHASIS_MASK);
1616 switch (signal_levels) {
1617 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1618 return EDP_LINK_TRAIN_400MV_0DB_IVB;
1619 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1620 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
1621 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1622 return EDP_LINK_TRAIN_400MV_6DB_IVB;
1623
1624 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1625 return EDP_LINK_TRAIN_600MV_0DB_IVB;
1626 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1627 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
1628
1629 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1630 return EDP_LINK_TRAIN_800MV_0DB_IVB;
1631 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1632 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
1633
1634 default:
1635 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1636 "0x%x\n", signal_levels);
1637 return EDP_LINK_TRAIN_500MV_0DB_IVB;
1638 }
1639}
1640
d6c0d722
PZ
1641/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
1642static uint32_t
1643intel_dp_signal_levels_hsw(uint8_t train_set)
1644{
1645 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1646 DP_TRAIN_PRE_EMPHASIS_MASK);
1647 switch (signal_levels) {
1648 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1649 return DDI_BUF_EMP_400MV_0DB_HSW;
1650 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1651 return DDI_BUF_EMP_400MV_3_5DB_HSW;
1652 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1653 return DDI_BUF_EMP_400MV_6DB_HSW;
1654 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
1655 return DDI_BUF_EMP_400MV_9_5DB_HSW;
1656
1657 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1658 return DDI_BUF_EMP_600MV_0DB_HSW;
1659 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1660 return DDI_BUF_EMP_600MV_3_5DB_HSW;
1661 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1662 return DDI_BUF_EMP_600MV_6DB_HSW;
1663
1664 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1665 return DDI_BUF_EMP_800MV_0DB_HSW;
1666 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1667 return DDI_BUF_EMP_800MV_3_5DB_HSW;
1668 default:
1669 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1670 "0x%x\n", signal_levels);
1671 return DDI_BUF_EMP_400MV_0DB_HSW;
1672 }
1673}
1674
a4fc5ed6 1675static bool
ea5b213a 1676intel_dp_set_link_train(struct intel_dp *intel_dp,
a4fc5ed6 1677 uint32_t dp_reg_value,
58e10eb9 1678 uint8_t dp_train_pat)
a4fc5ed6 1679{
30add22d 1680 struct drm_device *dev = intel_dp_to_dev(intel_dp);
a4fc5ed6 1681 struct drm_i915_private *dev_priv = dev->dev_private;
a4fc5ed6 1682 int ret;
d6c0d722 1683 uint32_t temp;
a4fc5ed6 1684
d6c0d722
PZ
1685 if (IS_HASWELL(dev)) {
1686 temp = I915_READ(DP_TP_CTL(intel_dp->port));
1687
1688 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
1689 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
1690 else
1691 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
1692
1693 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1694 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1695 case DP_TRAINING_PATTERN_DISABLE:
1696 temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
1697 I915_WRITE(DP_TP_CTL(intel_dp->port), temp);
1698
1699 if (wait_for((I915_READ(DP_TP_STATUS(intel_dp->port)) &
1700 DP_TP_STATUS_IDLE_DONE), 1))
1701 DRM_ERROR("Timed out waiting for DP idle patterns\n");
1702
1703 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1704 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
1705
1706 break;
1707 case DP_TRAINING_PATTERN_1:
1708 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
1709 break;
1710 case DP_TRAINING_PATTERN_2:
1711 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
1712 break;
1713 case DP_TRAINING_PATTERN_3:
1714 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
1715 break;
1716 }
1717 I915_WRITE(DP_TP_CTL(intel_dp->port), temp);
1718
1719 } else if (HAS_PCH_CPT(dev) &&
1720 (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
47ea7542
PZ
1721 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
1722
1723 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1724 case DP_TRAINING_PATTERN_DISABLE:
1725 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
1726 break;
1727 case DP_TRAINING_PATTERN_1:
1728 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
1729 break;
1730 case DP_TRAINING_PATTERN_2:
1731 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1732 break;
1733 case DP_TRAINING_PATTERN_3:
1734 DRM_ERROR("DP training pattern 3 not supported\n");
1735 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1736 break;
1737 }
1738
1739 } else {
1740 dp_reg_value &= ~DP_LINK_TRAIN_MASK;
1741
1742 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1743 case DP_TRAINING_PATTERN_DISABLE:
1744 dp_reg_value |= DP_LINK_TRAIN_OFF;
1745 break;
1746 case DP_TRAINING_PATTERN_1:
1747 dp_reg_value |= DP_LINK_TRAIN_PAT_1;
1748 break;
1749 case DP_TRAINING_PATTERN_2:
1750 dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1751 break;
1752 case DP_TRAINING_PATTERN_3:
1753 DRM_ERROR("DP training pattern 3 not supported\n");
1754 dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1755 break;
1756 }
1757 }
1758
ea5b213a
CW
1759 I915_WRITE(intel_dp->output_reg, dp_reg_value);
1760 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 1761
ea5b213a 1762 intel_dp_aux_native_write_1(intel_dp,
a4fc5ed6
KP
1763 DP_TRAINING_PATTERN_SET,
1764 dp_train_pat);
1765
47ea7542
PZ
1766 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
1767 DP_TRAINING_PATTERN_DISABLE) {
1768 ret = intel_dp_aux_native_write(intel_dp,
1769 DP_TRAINING_LANE0_SET,
1770 intel_dp->train_set,
1771 intel_dp->lane_count);
1772 if (ret != intel_dp->lane_count)
1773 return false;
1774 }
a4fc5ed6
KP
1775
1776 return true;
1777}
1778
33a34e4e 1779/* Enable corresponding port and start training pattern 1 */
c19b0669 1780void
33a34e4e 1781intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 1782{
da63a9f2 1783 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 1784 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
1785 int i;
1786 uint8_t voltage;
1787 bool clock_recovery = false;
cdb0e95b 1788 int voltage_tries, loop_tries;
ea5b213a 1789 uint32_t DP = intel_dp->DP;
a4fc5ed6 1790
c19b0669
PZ
1791 if (IS_HASWELL(dev))
1792 intel_ddi_prepare_link_retrain(encoder);
1793
3cf2efb1
CW
1794 /* Write the link configuration data */
1795 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
1796 intel_dp->link_configuration,
1797 DP_LINK_CONFIGURATION_SIZE);
a4fc5ed6
KP
1798
1799 DP |= DP_PORT_EN;
1a2eb460 1800
33a34e4e 1801 memset(intel_dp->train_set, 0, 4);
a4fc5ed6 1802 voltage = 0xff;
cdb0e95b
KP
1803 voltage_tries = 0;
1804 loop_tries = 0;
a4fc5ed6
KP
1805 clock_recovery = false;
1806 for (;;) {
33a34e4e 1807 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
93f62dad 1808 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 1809 uint32_t signal_levels;
417e822d 1810
d6c0d722
PZ
1811 if (IS_HASWELL(dev)) {
1812 signal_levels = intel_dp_signal_levels_hsw(
1813 intel_dp->train_set[0]);
1814 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
1815 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
1816 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1817 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1818 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
33a34e4e 1819 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
e3421a18
ZW
1820 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1821 } else {
93f62dad 1822 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
e3421a18
ZW
1823 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1824 }
d6c0d722
PZ
1825 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
1826 signal_levels);
a4fc5ed6 1827
a7c9655f 1828 /* Set training pattern 1 */
47ea7542 1829 if (!intel_dp_set_link_train(intel_dp, DP,
81055854
AJ
1830 DP_TRAINING_PATTERN_1 |
1831 DP_LINK_SCRAMBLING_DISABLE))
a4fc5ed6 1832 break;
a4fc5ed6 1833
a7c9655f 1834 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
1835 if (!intel_dp_get_link_status(intel_dp, link_status)) {
1836 DRM_ERROR("failed to get link status\n");
a4fc5ed6 1837 break;
93f62dad 1838 }
a4fc5ed6 1839
01916270 1840 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 1841 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
1842 clock_recovery = true;
1843 break;
1844 }
1845
1846 /* Check to see if we've tried the max voltage */
1847 for (i = 0; i < intel_dp->lane_count; i++)
1848 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 1849 break;
0d710688 1850 if (i == intel_dp->lane_count && voltage_tries == 5) {
24773670 1851 if (++loop_tries == 5) {
cdb0e95b
KP
1852 DRM_DEBUG_KMS("too many full retries, give up\n");
1853 break;
1854 }
1855 memset(intel_dp->train_set, 0, 4);
1856 voltage_tries = 0;
1857 continue;
1858 }
a4fc5ed6 1859
3cf2efb1 1860 /* Check to see if we've tried the same voltage 5 times */
24773670
CW
1861 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) != voltage) {
1862 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
cdb0e95b 1863 voltage_tries = 0;
24773670
CW
1864 } else
1865 ++voltage_tries;
a4fc5ed6 1866
3cf2efb1 1867 /* Compute new intel_dp->train_set as requested by target */
93f62dad 1868 intel_get_adjust_train(intel_dp, link_status);
a4fc5ed6
KP
1869 }
1870
33a34e4e
JB
1871 intel_dp->DP = DP;
1872}
1873
c19b0669 1874void
33a34e4e
JB
1875intel_dp_complete_link_train(struct intel_dp *intel_dp)
1876{
30add22d 1877 struct drm_device *dev = intel_dp_to_dev(intel_dp);
33a34e4e 1878 bool channel_eq = false;
37f80975 1879 int tries, cr_tries;
33a34e4e
JB
1880 uint32_t DP = intel_dp->DP;
1881
a4fc5ed6
KP
1882 /* channel equalization */
1883 tries = 0;
37f80975 1884 cr_tries = 0;
a4fc5ed6
KP
1885 channel_eq = false;
1886 for (;;) {
33a34e4e 1887 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
e3421a18 1888 uint32_t signal_levels;
93f62dad 1889 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 1890
37f80975
JB
1891 if (cr_tries > 5) {
1892 DRM_ERROR("failed to train DP, aborting\n");
1893 intel_dp_link_down(intel_dp);
1894 break;
1895 }
1896
d6c0d722
PZ
1897 if (IS_HASWELL(dev)) {
1898 signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
1899 DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
1900 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
1901 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
1902 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
1903 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
33a34e4e 1904 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
e3421a18
ZW
1905 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1906 } else {
93f62dad 1907 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
e3421a18
ZW
1908 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1909 }
1910
a4fc5ed6 1911 /* channel eq pattern */
47ea7542 1912 if (!intel_dp_set_link_train(intel_dp, DP,
81055854
AJ
1913 DP_TRAINING_PATTERN_2 |
1914 DP_LINK_SCRAMBLING_DISABLE))
a4fc5ed6
KP
1915 break;
1916
a7c9655f 1917 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
93f62dad 1918 if (!intel_dp_get_link_status(intel_dp, link_status))
a4fc5ed6 1919 break;
a4fc5ed6 1920
37f80975 1921 /* Make sure clock is still ok */
01916270 1922 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
37f80975
JB
1923 intel_dp_start_link_train(intel_dp);
1924 cr_tries++;
1925 continue;
1926 }
1927
1ffdff13 1928 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
1929 channel_eq = true;
1930 break;
1931 }
a4fc5ed6 1932
37f80975
JB
1933 /* Try 5 times, then try clock recovery if that fails */
1934 if (tries > 5) {
1935 intel_dp_link_down(intel_dp);
1936 intel_dp_start_link_train(intel_dp);
1937 tries = 0;
1938 cr_tries++;
1939 continue;
1940 }
a4fc5ed6 1941
3cf2efb1 1942 /* Compute new intel_dp->train_set as requested by target */
93f62dad 1943 intel_get_adjust_train(intel_dp, link_status);
3cf2efb1 1944 ++tries;
869184a6 1945 }
3cf2efb1 1946
d6c0d722
PZ
1947 if (channel_eq)
1948 DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n");
1949
47ea7542 1950 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
1951}
1952
1953static void
ea5b213a 1954intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 1955{
da63a9f2
PZ
1956 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1957 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 1958 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 1959 uint32_t DP = intel_dp->DP;
a4fc5ed6 1960
c19b0669
PZ
1961 /*
1962 * DDI code has a strict mode set sequence and we should try to respect
1963 * it, otherwise we might hang the machine in many different ways. So we
1964 * really should be disabling the port only on a complete crtc_disable
1965 * sequence. This function is just called under two conditions on DDI
1966 * code:
1967 * - Link train failed while doing crtc_enable, and on this case we
1968 * really should respect the mode set sequence and wait for a
1969 * crtc_disable.
1970 * - Someone turned the monitor off and intel_dp_check_link_status
1971 * called us. We don't need to disable the whole port on this case, so
1972 * when someone turns the monitor on again,
1973 * intel_ddi_prepare_link_retrain will take care of redoing the link
1974 * train.
1975 */
1976 if (IS_HASWELL(dev))
1977 return;
1978
0c33d8d7 1979 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
1980 return;
1981
28c97730 1982 DRM_DEBUG_KMS("\n");
32f9d658 1983
1a2eb460 1984 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
e3421a18 1985 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 1986 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18
ZW
1987 } else {
1988 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 1989 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 1990 }
fe255d00 1991 POSTING_READ(intel_dp->output_reg);
5eb08b69 1992
fe255d00 1993 msleep(17);
5eb08b69 1994
493a7081 1995 if (HAS_PCH_IBX(dev) &&
1b39d6f3 1996 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
da63a9f2 1997 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
31acbcc4 1998
5bddd17f
EA
1999 /* Hardware workaround: leaving our transcoder select
2000 * set to transcoder B while it's off will prevent the
2001 * corresponding HDMI output on transcoder A.
2002 *
2003 * Combine this with another hardware workaround:
2004 * transcoder select bit can only be cleared while the
2005 * port is enabled.
2006 */
2007 DP &= ~DP_PIPEB_SELECT;
2008 I915_WRITE(intel_dp->output_reg, DP);
2009
2010 /* Changes to enable or select take place the vblank
2011 * after being written.
2012 */
31acbcc4
CW
2013 if (crtc == NULL) {
2014 /* We can arrive here never having been attached
2015 * to a CRTC, for instance, due to inheriting
2016 * random state from the BIOS.
2017 *
2018 * If the pipe is not running, play safe and
2019 * wait for the clocks to stabilise before
2020 * continuing.
2021 */
2022 POSTING_READ(intel_dp->output_reg);
2023 msleep(50);
2024 } else
2025 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
5bddd17f
EA
2026 }
2027
832afda6 2028 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
2029 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
2030 POSTING_READ(intel_dp->output_reg);
f01eca2e 2031 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
2032}
2033
26d61aad
KP
2034static bool
2035intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 2036{
92fd8fd1 2037 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
b091cd92
AJ
2038 sizeof(intel_dp->dpcd)) == 0)
2039 return false; /* aux transfer failed */
92fd8fd1 2040
b091cd92
AJ
2041 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2042 return false; /* DPCD not present */
2043
2044 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2045 DP_DWN_STRM_PORT_PRESENT))
2046 return true; /* native DP sink */
2047
2048 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
2049 return true; /* no per-port downstream info */
2050
2051 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
2052 intel_dp->downstream_ports,
2053 DP_MAX_DOWNSTREAM_PORTS) == 0)
2054 return false; /* downstream port status fetch failed */
2055
2056 return true;
92fd8fd1
KP
2057}
2058
0d198328
AJ
2059static void
2060intel_dp_probe_oui(struct intel_dp *intel_dp)
2061{
2062 u8 buf[3];
2063
2064 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
2065 return;
2066
351cfc34
DV
2067 ironlake_edp_panel_vdd_on(intel_dp);
2068
0d198328
AJ
2069 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
2070 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
2071 buf[0], buf[1], buf[2]);
2072
2073 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
2074 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
2075 buf[0], buf[1], buf[2]);
351cfc34
DV
2076
2077 ironlake_edp_panel_vdd_off(intel_dp, false);
0d198328
AJ
2078}
2079
a60f0e38
JB
2080static bool
2081intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2082{
2083 int ret;
2084
2085 ret = intel_dp_aux_native_read_retry(intel_dp,
2086 DP_DEVICE_SERVICE_IRQ_VECTOR,
2087 sink_irq_vector, 1);
2088 if (!ret)
2089 return false;
2090
2091 return true;
2092}
2093
2094static void
2095intel_dp_handle_test_request(struct intel_dp *intel_dp)
2096{
2097 /* NAK by default */
9324cf7f 2098 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
a60f0e38
JB
2099}
2100
a4fc5ed6
KP
2101/*
2102 * According to DP spec
2103 * 5.1.2:
2104 * 1. Read DPCD
2105 * 2. Configure link according to Receiver Capabilities
2106 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
2107 * 4. Check link status on receipt of hot-plug interrupt
2108 */
2109
2110static void
ea5b213a 2111intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 2112{
da63a9f2 2113 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 2114 u8 sink_irq_vector;
93f62dad 2115 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 2116
da63a9f2 2117 if (!intel_encoder->connectors_active)
d2b996ac 2118 return;
59cd09e1 2119
da63a9f2 2120 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
2121 return;
2122
92fd8fd1 2123 /* Try to read receiver status if the link appears to be up */
93f62dad 2124 if (!intel_dp_get_link_status(intel_dp, link_status)) {
ea5b213a 2125 intel_dp_link_down(intel_dp);
a4fc5ed6
KP
2126 return;
2127 }
2128
92fd8fd1 2129 /* Now read the DPCD to see if it's actually running */
26d61aad 2130 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
2131 intel_dp_link_down(intel_dp);
2132 return;
2133 }
2134
a60f0e38
JB
2135 /* Try to read the source of the interrupt */
2136 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2137 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
2138 /* Clear interrupt source */
2139 intel_dp_aux_native_write_1(intel_dp,
2140 DP_DEVICE_SERVICE_IRQ_VECTOR,
2141 sink_irq_vector);
2142
2143 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
2144 intel_dp_handle_test_request(intel_dp);
2145 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
2146 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
2147 }
2148
1ffdff13 2149 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 2150 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
da63a9f2 2151 drm_get_encoder_name(&intel_encoder->base));
33a34e4e
JB
2152 intel_dp_start_link_train(intel_dp);
2153 intel_dp_complete_link_train(intel_dp);
2154 }
a4fc5ed6 2155}
a4fc5ed6 2156
07d3dc18 2157/* XXX this is probably wrong for multiple downstream ports */
71ba9000 2158static enum drm_connector_status
26d61aad 2159intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 2160{
07d3dc18
AJ
2161 uint8_t *dpcd = intel_dp->dpcd;
2162 bool hpd;
2163 uint8_t type;
2164
2165 if (!intel_dp_get_dpcd(intel_dp))
2166 return connector_status_disconnected;
2167
2168 /* if there's no downstream port, we're done */
2169 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
2170 return connector_status_connected;
2171
2172 /* If we're HPD-aware, SINK_COUNT changes dynamically */
2173 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
2174 if (hpd) {
da131a46 2175 uint8_t reg;
07d3dc18 2176 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
da131a46 2177 &reg, 1))
07d3dc18 2178 return connector_status_unknown;
da131a46
AJ
2179 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
2180 : connector_status_disconnected;
07d3dc18
AJ
2181 }
2182
2183 /* If no HPD, poke DDC gently */
2184 if (drm_probe_ddc(&intel_dp->adapter))
26d61aad 2185 return connector_status_connected;
07d3dc18
AJ
2186
2187 /* Well we tried, say unknown for unreliable port types */
2188 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
2189 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
2190 return connector_status_unknown;
2191
2192 /* Anything else is out of spec, warn and ignore */
2193 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 2194 return connector_status_disconnected;
71ba9000
AJ
2195}
2196
5eb08b69 2197static enum drm_connector_status
a9756bb5 2198ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 2199{
30add22d 2200 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5eb08b69
ZW
2201 enum drm_connector_status status;
2202
fe16d949
CW
2203 /* Can't disconnect eDP, but you can close the lid... */
2204 if (is_edp(intel_dp)) {
30add22d 2205 status = intel_panel_detect(dev);
fe16d949
CW
2206 if (status == connector_status_unknown)
2207 status = connector_status_connected;
2208 return status;
2209 }
01cb9ea6 2210
26d61aad 2211 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
2212}
2213
a4fc5ed6 2214static enum drm_connector_status
a9756bb5 2215g4x_dp_detect(struct intel_dp *intel_dp)
a4fc5ed6 2216{
30add22d 2217 struct drm_device *dev = intel_dp_to_dev(intel_dp);
a4fc5ed6 2218 struct drm_i915_private *dev_priv = dev->dev_private;
10f76a38 2219 uint32_t bit;
5eb08b69 2220
ea5b213a 2221 switch (intel_dp->output_reg) {
a4fc5ed6 2222 case DP_B:
10f76a38 2223 bit = DPB_HOTPLUG_LIVE_STATUS;
a4fc5ed6
KP
2224 break;
2225 case DP_C:
10f76a38 2226 bit = DPC_HOTPLUG_LIVE_STATUS;
a4fc5ed6
KP
2227 break;
2228 case DP_D:
10f76a38 2229 bit = DPD_HOTPLUG_LIVE_STATUS;
a4fc5ed6
KP
2230 break;
2231 default:
2232 return connector_status_unknown;
2233 }
2234
10f76a38 2235 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
a4fc5ed6
KP
2236 return connector_status_disconnected;
2237
26d61aad 2238 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
2239}
2240
8c241fef
KP
2241static struct edid *
2242intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2243{
9cd300e0 2244 struct intel_connector *intel_connector = to_intel_connector(connector);
d6f24d0f 2245
9cd300e0
JN
2246 /* use cached edid if we have one */
2247 if (intel_connector->edid) {
2248 struct edid *edid;
2249 int size;
2250
2251 /* invalid edid */
2252 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
2253 return NULL;
2254
9cd300e0 2255 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
d6f24d0f
JB
2256 edid = kmalloc(size, GFP_KERNEL);
2257 if (!edid)
2258 return NULL;
2259
9cd300e0 2260 memcpy(edid, intel_connector->edid, size);
d6f24d0f
JB
2261 return edid;
2262 }
8c241fef 2263
9cd300e0 2264 return drm_get_edid(connector, adapter);
8c241fef
KP
2265}
2266
2267static int
2268intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
2269{
9cd300e0 2270 struct intel_connector *intel_connector = to_intel_connector(connector);
8c241fef 2271
9cd300e0
JN
2272 /* use cached edid if we have one */
2273 if (intel_connector->edid) {
2274 /* invalid edid */
2275 if (IS_ERR(intel_connector->edid))
2276 return 0;
2277
2278 return intel_connector_update_modes(connector,
2279 intel_connector->edid);
d6f24d0f
JB
2280 }
2281
9cd300e0 2282 return intel_ddc_get_modes(connector, adapter);
8c241fef
KP
2283}
2284
2285
a9756bb5
ZW
2286/**
2287 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
2288 *
2289 * \return true if DP port is connected.
2290 * \return false if DP port is disconnected.
2291 */
2292static enum drm_connector_status
2293intel_dp_detect(struct drm_connector *connector, bool force)
2294{
2295 struct intel_dp *intel_dp = intel_attached_dp(connector);
fa90ecef 2296 struct drm_device *dev = connector->dev;
a9756bb5
ZW
2297 enum drm_connector_status status;
2298 struct edid *edid = NULL;
898076ed 2299 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
a9756bb5
ZW
2300
2301 intel_dp->has_audio = false;
2302
2303 if (HAS_PCH_SPLIT(dev))
2304 status = ironlake_dp_detect(intel_dp);
2305 else
2306 status = g4x_dp_detect(intel_dp);
1b9be9d0 2307
898076ed
JN
2308 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2309 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
2310 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
1b9be9d0 2311
a9756bb5
ZW
2312 if (status != connector_status_connected)
2313 return status;
2314
0d198328
AJ
2315 intel_dp_probe_oui(intel_dp);
2316
c3e5f67b
DV
2317 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
2318 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
f684960e 2319 } else {
8c241fef 2320 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
f684960e
CW
2321 if (edid) {
2322 intel_dp->has_audio = drm_detect_monitor_audio(edid);
f684960e
CW
2323 kfree(edid);
2324 }
a9756bb5
ZW
2325 }
2326
2327 return connector_status_connected;
a4fc5ed6
KP
2328}
2329
2330static int intel_dp_get_modes(struct drm_connector *connector)
2331{
df0e9248 2332 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e 2333 struct intel_connector *intel_connector = to_intel_connector(connector);
fa90ecef 2334 struct drm_device *dev = connector->dev;
32f9d658 2335 int ret;
a4fc5ed6
KP
2336
2337 /* We should parse the EDID data and find out if it has an audio sink
2338 */
2339
8c241fef 2340 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
f8779fda 2341 if (ret)
32f9d658
ZW
2342 return ret;
2343
f8779fda 2344 /* if eDP has no EDID, fall back to fixed mode */
dd06f90e 2345 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
f8779fda 2346 struct drm_display_mode *mode;
dd06f90e
JN
2347 mode = drm_mode_duplicate(dev,
2348 intel_connector->panel.fixed_mode);
f8779fda 2349 if (mode) {
32f9d658
ZW
2350 drm_mode_probed_add(connector, mode);
2351 return 1;
2352 }
2353 }
2354 return 0;
a4fc5ed6
KP
2355}
2356
1aad7ac0
CW
2357static bool
2358intel_dp_detect_audio(struct drm_connector *connector)
2359{
2360 struct intel_dp *intel_dp = intel_attached_dp(connector);
2361 struct edid *edid;
2362 bool has_audio = false;
2363
8c241fef 2364 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
1aad7ac0
CW
2365 if (edid) {
2366 has_audio = drm_detect_monitor_audio(edid);
1aad7ac0
CW
2367 kfree(edid);
2368 }
2369
2370 return has_audio;
2371}
2372
f684960e
CW
2373static int
2374intel_dp_set_property(struct drm_connector *connector,
2375 struct drm_property *property,
2376 uint64_t val)
2377{
e953fd7b 2378 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 2379 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
2380 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
2381 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
2382 int ret;
2383
2384 ret = drm_connector_property_set_value(connector, property, val);
2385 if (ret)
2386 return ret;
2387
3f43c48d 2388 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
2389 int i = val;
2390 bool has_audio;
2391
2392 if (i == intel_dp->force_audio)
f684960e
CW
2393 return 0;
2394
1aad7ac0 2395 intel_dp->force_audio = i;
f684960e 2396
c3e5f67b 2397 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
2398 has_audio = intel_dp_detect_audio(connector);
2399 else
c3e5f67b 2400 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
2401
2402 if (has_audio == intel_dp->has_audio)
f684960e
CW
2403 return 0;
2404
1aad7ac0 2405 intel_dp->has_audio = has_audio;
f684960e
CW
2406 goto done;
2407 }
2408
e953fd7b
CW
2409 if (property == dev_priv->broadcast_rgb_property) {
2410 if (val == !!intel_dp->color_range)
2411 return 0;
2412
2413 intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
2414 goto done;
2415 }
2416
53b41837
YN
2417 if (is_edp(intel_dp) &&
2418 property == connector->dev->mode_config.scaling_mode_property) {
2419 if (val == DRM_MODE_SCALE_NONE) {
2420 DRM_DEBUG_KMS("no scaling not supported\n");
2421 return -EINVAL;
2422 }
2423
2424 if (intel_connector->panel.fitting_mode == val) {
2425 /* the eDP scaling property is not changed */
2426 return 0;
2427 }
2428 intel_connector->panel.fitting_mode = val;
2429
2430 goto done;
2431 }
2432
f684960e
CW
2433 return -EINVAL;
2434
2435done:
da63a9f2
PZ
2436 if (intel_encoder->base.crtc) {
2437 struct drm_crtc *crtc = intel_encoder->base.crtc;
a6778b3c
DV
2438 intel_set_mode(crtc, &crtc->mode,
2439 crtc->x, crtc->y, crtc->fb);
f684960e
CW
2440 }
2441
2442 return 0;
2443}
2444
a4fc5ed6 2445static void
0206e353 2446intel_dp_destroy(struct drm_connector *connector)
a4fc5ed6 2447{
aaa6fd2a 2448 struct drm_device *dev = connector->dev;
be3cd5e3 2449 struct intel_dp *intel_dp = intel_attached_dp(connector);
1d508706 2450 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 2451
9cd300e0
JN
2452 if (!IS_ERR_OR_NULL(intel_connector->edid))
2453 kfree(intel_connector->edid);
2454
1d508706 2455 if (is_edp(intel_dp)) {
aaa6fd2a 2456 intel_panel_destroy_backlight(dev);
1d508706
JN
2457 intel_panel_fini(&intel_connector->panel);
2458 }
aaa6fd2a 2459
a4fc5ed6
KP
2460 drm_sysfs_connector_remove(connector);
2461 drm_connector_cleanup(connector);
55f78c43 2462 kfree(connector);
a4fc5ed6
KP
2463}
2464
24d05927
DV
2465static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2466{
da63a9f2
PZ
2467 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
2468 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927
DV
2469
2470 i2c_del_adapter(&intel_dp->adapter);
2471 drm_encoder_cleanup(encoder);
bd943159
KP
2472 if (is_edp(intel_dp)) {
2473 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
2474 ironlake_panel_vdd_off_sync(intel_dp);
2475 }
da63a9f2 2476 kfree(intel_dig_port);
24d05927
DV
2477}
2478
a4fc5ed6 2479static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
a4fc5ed6 2480 .mode_fixup = intel_dp_mode_fixup,
a4fc5ed6 2481 .mode_set = intel_dp_mode_set,
1f703855 2482 .disable = intel_encoder_noop,
a4fc5ed6
KP
2483};
2484
a7902ac5
PZ
2485static const struct drm_encoder_helper_funcs intel_dp_helper_funcs_hsw = {
2486 .mode_fixup = intel_dp_mode_fixup,
2487 .mode_set = intel_ddi_mode_set,
2488 .disable = intel_encoder_noop,
2489};
2490
a4fc5ed6 2491static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 2492 .dpms = intel_connector_dpms,
a4fc5ed6
KP
2493 .detect = intel_dp_detect,
2494 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 2495 .set_property = intel_dp_set_property,
a4fc5ed6
KP
2496 .destroy = intel_dp_destroy,
2497};
2498
2499static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
2500 .get_modes = intel_dp_get_modes,
2501 .mode_valid = intel_dp_mode_valid,
df0e9248 2502 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
2503};
2504
a4fc5ed6 2505static const struct drm_encoder_funcs intel_dp_enc_funcs = {
24d05927 2506 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
2507};
2508
995b6762 2509static void
21d40d37 2510intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 2511{
fa90ecef 2512 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
c8110e52 2513
885a5014 2514 intel_dp_check_link_status(intel_dp);
c8110e52 2515}
6207937d 2516
e3421a18
ZW
2517/* Return which DP Port should be selected for Transcoder DP control */
2518int
0206e353 2519intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
2520{
2521 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
2522 struct intel_encoder *intel_encoder;
2523 struct intel_dp *intel_dp;
e3421a18 2524
fa90ecef
PZ
2525 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
2526 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 2527
fa90ecef
PZ
2528 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2529 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 2530 return intel_dp->output_reg;
e3421a18 2531 }
ea5b213a 2532
e3421a18
ZW
2533 return -1;
2534}
2535
36e83a18 2536/* check the VBT to see whether the eDP is on DP-D port */
cb0953d7 2537bool intel_dpd_is_edp(struct drm_device *dev)
36e83a18
ZY
2538{
2539 struct drm_i915_private *dev_priv = dev->dev_private;
2540 struct child_device_config *p_child;
2541 int i;
2542
2543 if (!dev_priv->child_dev_num)
2544 return false;
2545
2546 for (i = 0; i < dev_priv->child_dev_num; i++) {
2547 p_child = dev_priv->child_dev + i;
2548
2549 if (p_child->dvo_port == PORT_IDPD &&
2550 p_child->device_type == DEVICE_TYPE_eDP)
2551 return true;
2552 }
2553 return false;
2554}
2555
f684960e
CW
2556static void
2557intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
2558{
53b41837
YN
2559 struct intel_connector *intel_connector = to_intel_connector(connector);
2560
3f43c48d 2561 intel_attach_force_audio_property(connector);
e953fd7b 2562 intel_attach_broadcast_rgb_property(connector);
53b41837
YN
2563
2564 if (is_edp(intel_dp)) {
2565 drm_mode_create_scaling_mode_property(connector->dev);
2566 drm_connector_attach_property(
2567 connector,
2568 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
2569 DRM_MODE_SCALE_ASPECT);
2570 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 2571 }
f684960e
CW
2572}
2573
67a54566
DV
2574static void
2575intel_dp_init_panel_power_sequencer(struct drm_device *dev,
2576 struct intel_dp *intel_dp)
2577{
2578 struct drm_i915_private *dev_priv = dev->dev_private;
2579 struct edp_power_seq cur, vbt, spec, final;
2580 u32 pp_on, pp_off, pp_div, pp;
2581
2582 /* Workaround: Need to write PP_CONTROL with the unlock key as
2583 * the very first thing. */
2584 pp = ironlake_get_pp_control(dev_priv);
2585 I915_WRITE(PCH_PP_CONTROL, pp);
2586
2587 pp_on = I915_READ(PCH_PP_ON_DELAYS);
2588 pp_off = I915_READ(PCH_PP_OFF_DELAYS);
2589 pp_div = I915_READ(PCH_PP_DIVISOR);
2590
2591 /* Pull timing values out of registers */
2592 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
2593 PANEL_POWER_UP_DELAY_SHIFT;
2594
2595 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2596 PANEL_LIGHT_ON_DELAY_SHIFT;
2597
2598 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2599 PANEL_LIGHT_OFF_DELAY_SHIFT;
2600
2601 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
2602 PANEL_POWER_DOWN_DELAY_SHIFT;
2603
2604 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
2605 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
2606
2607 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2608 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
2609
2610 vbt = dev_priv->edp.pps;
2611
2612 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
2613 * our hw here, which are all in 100usec. */
2614 spec.t1_t3 = 210 * 10;
2615 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
2616 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
2617 spec.t10 = 500 * 10;
2618 /* This one is special and actually in units of 100ms, but zero
2619 * based in the hw (so we need to add 100 ms). But the sw vbt
2620 * table multiplies it with 1000 to make it in units of 100usec,
2621 * too. */
2622 spec.t11_t12 = (510 + 100) * 10;
2623
2624 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2625 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
2626
2627 /* Use the max of the register settings and vbt. If both are
2628 * unset, fall back to the spec limits. */
2629#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
2630 spec.field : \
2631 max(cur.field, vbt.field))
2632 assign_final(t1_t3);
2633 assign_final(t8);
2634 assign_final(t9);
2635 assign_final(t10);
2636 assign_final(t11_t12);
2637#undef assign_final
2638
2639#define get_delay(field) (DIV_ROUND_UP(final.field, 10))
2640 intel_dp->panel_power_up_delay = get_delay(t1_t3);
2641 intel_dp->backlight_on_delay = get_delay(t8);
2642 intel_dp->backlight_off_delay = get_delay(t9);
2643 intel_dp->panel_power_down_delay = get_delay(t10);
2644 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
2645#undef get_delay
2646
2647 /* And finally store the new values in the power sequencer. */
2648 pp_on = (final.t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
2649 (final.t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
2650 pp_off = (final.t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
2651 (final.t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
2652 /* Compute the divisor for the pp clock, simply match the Bspec
2653 * formula. */
2654 pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
2655 << PP_REFERENCE_DIVIDER_SHIFT;
2656 pp_div |= (DIV_ROUND_UP(final.t11_t12, 1000)
2657 << PANEL_POWER_CYCLE_DELAY_SHIFT);
2658
2659 /* Haswell doesn't have any port selection bits for the panel
2660 * power sequencer any more. */
2661 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
2662 if (is_cpu_edp(intel_dp))
2663 pp_on |= PANEL_POWER_PORT_DP_A;
2664 else
2665 pp_on |= PANEL_POWER_PORT_DP_D;
2666 }
2667
2668 I915_WRITE(PCH_PP_ON_DELAYS, pp_on);
2669 I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
2670 I915_WRITE(PCH_PP_DIVISOR, pp_div);
2671
2672
2673 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2674 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2675 intel_dp->panel_power_cycle_delay);
2676
2677 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2678 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2679
2680 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
2681 I915_READ(PCH_PP_ON_DELAYS),
2682 I915_READ(PCH_PP_OFF_DELAYS),
2683 I915_READ(PCH_PP_DIVISOR));
2684}
2685
a4fc5ed6 2686void
ab9d7c30 2687intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
a4fc5ed6
KP
2688{
2689 struct drm_i915_private *dev_priv = dev->dev_private;
2690 struct drm_connector *connector;
ea5b213a 2691 struct intel_dp *intel_dp;
21d40d37 2692 struct intel_encoder *intel_encoder;
55f78c43 2693 struct intel_connector *intel_connector;
da63a9f2 2694 struct intel_digital_port *intel_dig_port;
f8779fda 2695 struct drm_display_mode *fixed_mode = NULL;
5eb08b69 2696 const char *name = NULL;
b329530c 2697 int type;
a4fc5ed6 2698
da63a9f2
PZ
2699 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
2700 if (!intel_dig_port)
a4fc5ed6
KP
2701 return;
2702
da63a9f2 2703 intel_dp = &intel_dig_port->dp;
3d3dc149 2704 intel_dp->output_reg = output_reg;
ab9d7c30 2705 intel_dp->port = port;
0767935e
DV
2706 /* Preserve the current hw state. */
2707 intel_dp->DP = I915_READ(intel_dp->output_reg);
3d3dc149 2708
55f78c43
ZW
2709 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
2710 if (!intel_connector) {
da63a9f2 2711 kfree(intel_dig_port);
55f78c43
ZW
2712 return;
2713 }
da63a9f2 2714 intel_encoder = &intel_dig_port->base;
dd06f90e 2715 intel_dp->attached_connector = intel_connector;
55f78c43 2716
ea5b213a 2717 if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
b329530c 2718 if (intel_dpd_is_edp(dev))
ea5b213a 2719 intel_dp->is_pch_edp = true;
b329530c 2720
19c03924
GB
2721 /*
2722 * FIXME : We need to initialize built-in panels before external panels.
2723 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
2724 */
2725 if (IS_VALLEYVIEW(dev) && output_reg == DP_C) {
2726 type = DRM_MODE_CONNECTOR_eDP;
2727 intel_encoder->type = INTEL_OUTPUT_EDP;
2728 } else if (output_reg == DP_A || is_pch_edp(intel_dp)) {
b329530c
AJ
2729 type = DRM_MODE_CONNECTOR_eDP;
2730 intel_encoder->type = INTEL_OUTPUT_EDP;
2731 } else {
2732 type = DRM_MODE_CONNECTOR_DisplayPort;
2733 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2734 }
2735
55f78c43 2736 connector = &intel_connector->base;
b329530c 2737 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
2738 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
2739
eb1f8e4f
DA
2740 connector->polled = DRM_CONNECTOR_POLL_HPD;
2741
66a9278e 2742 intel_encoder->cloneable = false;
f8aed700 2743
66a9278e
DV
2744 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
2745 ironlake_panel_vdd_work);
6251ec0a 2746
27f8227b 2747 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
ee7b9f93 2748
a4fc5ed6
KP
2749 connector->interlace_allowed = true;
2750 connector->doublescan_allowed = 0;
2751
4ef69c7a 2752 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
a4fc5ed6 2753 DRM_MODE_ENCODER_TMDS);
a7902ac5
PZ
2754
2755 if (IS_HASWELL(dev))
2756 drm_encoder_helper_add(&intel_encoder->base,
2757 &intel_dp_helper_funcs_hsw);
2758 else
2759 drm_encoder_helper_add(&intel_encoder->base,
2760 &intel_dp_helper_funcs);
a4fc5ed6 2761
df0e9248 2762 intel_connector_attach_encoder(intel_connector, intel_encoder);
a4fc5ed6
KP
2763 drm_sysfs_connector_add(connector);
2764
a7902ac5
PZ
2765 if (IS_HASWELL(dev)) {
2766 intel_encoder->enable = intel_enable_ddi;
2767 intel_encoder->pre_enable = intel_ddi_pre_enable;
2768 intel_encoder->disable = intel_disable_ddi;
2769 intel_encoder->post_disable = intel_ddi_post_disable;
2770 intel_encoder->get_hw_state = intel_ddi_get_hw_state;
2771 } else {
2772 intel_encoder->enable = intel_enable_dp;
2773 intel_encoder->pre_enable = intel_pre_enable_dp;
2774 intel_encoder->disable = intel_disable_dp;
2775 intel_encoder->post_disable = intel_post_disable_dp;
2776 intel_encoder->get_hw_state = intel_dp_get_hw_state;
2777 }
19d8fe15 2778 intel_connector->get_hw_state = intel_connector_get_hw_state;
e8cb4558 2779
a4fc5ed6 2780 /* Set up the DDC bus. */
ab9d7c30
PZ
2781 switch (port) {
2782 case PORT_A:
2783 name = "DPDDC-A";
2784 break;
2785 case PORT_B:
2786 dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
2787 name = "DPDDC-B";
2788 break;
2789 case PORT_C:
2790 dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
2791 name = "DPDDC-C";
2792 break;
2793 case PORT_D:
2794 dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
2795 name = "DPDDC-D";
2796 break;
2797 default:
2798 WARN(1, "Invalid port %c\n", port_name(port));
2799 break;
5eb08b69
ZW
2800 }
2801
67a54566
DV
2802 if (is_edp(intel_dp))
2803 intel_dp_init_panel_power_sequencer(dev, intel_dp);
c1f05264
DA
2804
2805 intel_dp_i2c_init(intel_dp, intel_connector, name);
2806
67a54566 2807 /* Cache DPCD and EDID for edp. */
c1f05264
DA
2808 if (is_edp(intel_dp)) {
2809 bool ret;
f8779fda 2810 struct drm_display_mode *scan;
c1f05264 2811 struct edid *edid;
5d613501
JB
2812
2813 ironlake_edp_panel_vdd_on(intel_dp);
59f3e272 2814 ret = intel_dp_get_dpcd(intel_dp);
bd943159 2815 ironlake_edp_panel_vdd_off(intel_dp, false);
99ea7127 2816
59f3e272 2817 if (ret) {
7183dc29
JB
2818 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
2819 dev_priv->no_aux_handshake =
2820 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
89667383
JB
2821 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
2822 } else {
3d3dc149 2823 /* if this fails, presume the device is a ghost */
48898b03 2824 DRM_INFO("failed to retrieve link info, disabling eDP\n");
fa90ecef
PZ
2825 intel_dp_encoder_destroy(&intel_encoder->base);
2826 intel_dp_destroy(connector);
3d3dc149 2827 return;
89667383 2828 }
89667383 2829
d6f24d0f
JB
2830 ironlake_edp_panel_vdd_on(intel_dp);
2831 edid = drm_get_edid(connector, &intel_dp->adapter);
2832 if (edid) {
9cd300e0
JN
2833 if (drm_add_edid_modes(connector, edid)) {
2834 drm_mode_connector_update_edid_property(connector, edid);
2835 drm_edid_to_eld(connector, edid);
2836 } else {
2837 kfree(edid);
2838 edid = ERR_PTR(-EINVAL);
2839 }
2840 } else {
2841 edid = ERR_PTR(-ENOENT);
d6f24d0f 2842 }
9cd300e0 2843 intel_connector->edid = edid;
f8779fda
JN
2844
2845 /* prefer fixed mode from EDID if available */
2846 list_for_each_entry(scan, &connector->probed_modes, head) {
2847 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
2848 fixed_mode = drm_mode_duplicate(dev, scan);
2849 break;
2850 }
2851 }
2852
2853 /* fallback to VBT if available for eDP */
2854 if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
2855 fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
2856 if (fixed_mode)
2857 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
2858 }
f8779fda 2859
d6f24d0f
JB
2860 ironlake_edp_panel_vdd_off(intel_dp, false);
2861 }
552fb0b7 2862
21d40d37 2863 intel_encoder->hot_plug = intel_dp_hot_plug;
a4fc5ed6 2864
1d508706 2865 if (is_edp(intel_dp)) {
dd06f90e 2866 intel_panel_init(&intel_connector->panel, fixed_mode);
0657b6b1 2867 intel_panel_setup_backlight(connector);
1d508706 2868 }
32f9d658 2869
f684960e
CW
2870 intel_dp_add_properties(intel_dp, connector);
2871
a4fc5ed6
KP
2872 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
2873 * 0xd. Failure to do so will result in spurious interrupts being
2874 * generated on the port when a cable is not attached.
2875 */
2876 if (IS_G4X(dev) && !IS_GM45(dev)) {
2877 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
2878 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
2879 }
2880}
This page took 0.405309 seconds and 5 git commands to generate.