Merge tag 'v3.10-rc2' into drm-intel-next-queued
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
760285e7
DH
31#include <drm/drmP.h>
32#include <drm/drm_crtc.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/drm_edid.h>
a4fc5ed6 35#include "intel_drv.h"
760285e7 36#include <drm/i915_drm.h>
a4fc5ed6 37#include "i915_drv.h"
a4fc5ed6 38
a4fc5ed6
KP
39#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
40
cfcb0fc9
JB
41/**
42 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
43 * @intel_dp: DP struct
44 *
45 * If a CPU or PCH DP output is attached to an eDP panel, this function
46 * will return true, and false otherwise.
47 */
48static bool is_edp(struct intel_dp *intel_dp)
49{
da63a9f2
PZ
50 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
51
52 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
53}
54
68b4d824
ID
55static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
56{
57 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
58
59 return intel_dig_port->base.base.dev;
60}
61
1c95822a
AJ
62/**
63 * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
64 * @intel_dp: DP struct
65 *
66 * Returns true if the given DP struct corresponds to a CPU eDP port.
67 */
68static bool is_cpu_edp(struct intel_dp *intel_dp)
69{
68b4d824 70 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 71 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
68b4d824 72 enum port port = intel_dig_port->port;
da63a9f2 73
68b4d824
ID
74 return is_edp(intel_dp) &&
75 (port == PORT_A || (port == PORT_C && IS_VALLEYVIEW(dev)));
ea5b213a 76}
a4fc5ed6 77
df0e9248
CW
78static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
79{
fa90ecef 80 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
81}
82
ea5b213a 83static void intel_dp_link_down(struct intel_dp *intel_dp);
a4fc5ed6 84
a4fc5ed6 85static int
ea5b213a 86intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 87{
7183dc29 88 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
89
90 switch (max_link_bw) {
91 case DP_LINK_BW_1_62:
92 case DP_LINK_BW_2_7:
93 break;
94 default:
95 max_link_bw = DP_LINK_BW_1_62;
96 break;
97 }
98 return max_link_bw;
99}
100
cd9dde44
AJ
101/*
102 * The units on the numbers in the next two are... bizarre. Examples will
103 * make it clearer; this one parallels an example in the eDP spec.
104 *
105 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
106 *
107 * 270000 * 1 * 8 / 10 == 216000
108 *
109 * The actual data capacity of that configuration is 2.16Gbit/s, so the
110 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
111 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
112 * 119000. At 18bpp that's 2142000 kilobits per second.
113 *
114 * Thus the strange-looking division by 10 in intel_dp_link_required, to
115 * get the result in decakilobits instead of kilobits.
116 */
117
a4fc5ed6 118static int
c898261c 119intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 120{
cd9dde44 121 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
122}
123
fe27d53e
DA
124static int
125intel_dp_max_data_rate(int max_link_clock, int max_lanes)
126{
127 return (max_link_clock * max_lanes * 8) / 10;
128}
129
a4fc5ed6
KP
130static int
131intel_dp_mode_valid(struct drm_connector *connector,
132 struct drm_display_mode *mode)
133{
df0e9248 134 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
135 struct intel_connector *intel_connector = to_intel_connector(connector);
136 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
137 int target_clock = mode->clock;
138 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 139
dd06f90e
JN
140 if (is_edp(intel_dp) && fixed_mode) {
141 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
142 return MODE_PANEL;
143
dd06f90e 144 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 145 return MODE_PANEL;
03afc4a2
DV
146
147 target_clock = fixed_mode->clock;
7de56f43
ZY
148 }
149
36008365
DV
150 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
151 max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
152
153 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
154 mode_rate = intel_dp_link_required(target_clock, 18);
155
156 if (mode_rate > max_rate)
c4867936 157 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
158
159 if (mode->clock < 10000)
160 return MODE_CLOCK_LOW;
161
0af78a2b
DV
162 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
163 return MODE_H_ILLEGAL;
164
a4fc5ed6
KP
165 return MODE_OK;
166}
167
168static uint32_t
169pack_aux(uint8_t *src, int src_bytes)
170{
171 int i;
172 uint32_t v = 0;
173
174 if (src_bytes > 4)
175 src_bytes = 4;
176 for (i = 0; i < src_bytes; i++)
177 v |= ((uint32_t) src[i]) << ((3-i) * 8);
178 return v;
179}
180
181static void
182unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
183{
184 int i;
185 if (dst_bytes > 4)
186 dst_bytes = 4;
187 for (i = 0; i < dst_bytes; i++)
188 dst[i] = src >> ((3-i) * 8);
189}
190
fb0f8fbf
KP
191/* hrawclock is 1/4 the FSB frequency */
192static int
193intel_hrawclk(struct drm_device *dev)
194{
195 struct drm_i915_private *dev_priv = dev->dev_private;
196 uint32_t clkcfg;
197
9473c8f4
VP
198 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
199 if (IS_VALLEYVIEW(dev))
200 return 200;
201
fb0f8fbf
KP
202 clkcfg = I915_READ(CLKCFG);
203 switch (clkcfg & CLKCFG_FSB_MASK) {
204 case CLKCFG_FSB_400:
205 return 100;
206 case CLKCFG_FSB_533:
207 return 133;
208 case CLKCFG_FSB_667:
209 return 166;
210 case CLKCFG_FSB_800:
211 return 200;
212 case CLKCFG_FSB_1067:
213 return 266;
214 case CLKCFG_FSB_1333:
215 return 333;
216 /* these two are just a guess; one of them might be right */
217 case CLKCFG_FSB_1600:
218 case CLKCFG_FSB_1600_ALT:
219 return 400;
220 default:
221 return 133;
222 }
223}
224
ebf33b18
KP
225static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
226{
30add22d 227 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18 228 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420 229 u32 pp_stat_reg;
ebf33b18 230
453c5420
JB
231 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
232 return (I915_READ(pp_stat_reg) & PP_ON) != 0;
ebf33b18
KP
233}
234
235static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
236{
30add22d 237 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18 238 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420 239 u32 pp_ctrl_reg;
ebf33b18 240
453c5420
JB
241 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
242 return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
ebf33b18
KP
243}
244
9b984dae
KP
245static void
246intel_dp_check_edp(struct intel_dp *intel_dp)
247{
30add22d 248 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 249 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420 250 u32 pp_stat_reg, pp_ctrl_reg;
ebf33b18 251
9b984dae
KP
252 if (!is_edp(intel_dp))
253 return;
453c5420
JB
254
255 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
256 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
257
ebf33b18 258 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
259 WARN(1, "eDP powered off while attempting aux channel communication.\n");
260 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
453c5420
JB
261 I915_READ(pp_stat_reg),
262 I915_READ(pp_ctrl_reg));
9b984dae
KP
263 }
264}
265
9ee32fea
DV
266static uint32_t
267intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
268{
269 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
270 struct drm_device *dev = intel_dig_port->base.base.dev;
271 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 272 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
273 uint32_t status;
274 bool done;
275
ef04f00d 276#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 277 if (has_aux_irq)
b90f5176
PZ
278 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
279 msecs_to_jiffies(10));
9ee32fea
DV
280 else
281 done = wait_for_atomic(C, 10) == 0;
282 if (!done)
283 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
284 has_aux_irq);
285#undef C
286
287 return status;
288}
289
a4fc5ed6 290static int
ea5b213a 291intel_dp_aux_ch(struct intel_dp *intel_dp,
a4fc5ed6
KP
292 uint8_t *send, int send_bytes,
293 uint8_t *recv, int recv_size)
294{
174edf1f
PZ
295 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
296 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 297 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 298 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
a4fc5ed6 299 uint32_t ch_data = ch_ctl + 4;
9ee32fea 300 int i, ret, recv_bytes;
a4fc5ed6 301 uint32_t status;
fb0f8fbf 302 uint32_t aux_clock_divider;
6b4e0a93 303 int try, precharge;
9ee32fea
DV
304 bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
305
306 /* dp aux is extremely sensitive to irq latency, hence request the
307 * lowest possible wakeup latency and so prevent the cpu from going into
308 * deep sleep states.
309 */
310 pm_qos_update_request(&dev_priv->pm_qos, 0);
a4fc5ed6 311
9b984dae 312 intel_dp_check_edp(intel_dp);
a4fc5ed6 313 /* The clock divider is based off the hrawclk,
fb0f8fbf
KP
314 * and would like to run at 2MHz. So, take the
315 * hrawclk value and divide by 2 and use that
6176b8f9
JB
316 *
317 * Note that PCH attached eDP panels should use a 125MHz input
318 * clock divider.
a4fc5ed6 319 */
1c95822a 320 if (is_cpu_edp(intel_dp)) {
affa9354 321 if (HAS_DDI(dev))
b8fc2f6a
PZ
322 aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
323 else if (IS_VALLEYVIEW(dev))
9473c8f4
VP
324 aux_clock_divider = 100;
325 else if (IS_GEN6(dev) || IS_GEN7(dev))
1a2eb460 326 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
e3421a18
ZW
327 else
328 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
2c55c336
JN
329 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
330 /* Workaround for non-ULT HSW */
331 aux_clock_divider = 74;
332 } else if (HAS_PCH_SPLIT(dev)) {
6b3ec1c9 333 aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 334 } else {
5eb08b69 335 aux_clock_divider = intel_hrawclk(dev) / 2;
2c55c336 336 }
5eb08b69 337
6b4e0a93
DV
338 if (IS_GEN6(dev))
339 precharge = 3;
340 else
341 precharge = 5;
342
11bee43e
JB
343 /* Try to wait for any previous AUX channel activity */
344 for (try = 0; try < 3; try++) {
ef04f00d 345 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
346 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
347 break;
348 msleep(1);
349 }
350
351 if (try == 3) {
352 WARN(1, "dp_aux_ch not started status 0x%08x\n",
353 I915_READ(ch_ctl));
9ee32fea
DV
354 ret = -EBUSY;
355 goto out;
4f7f7b7e
CW
356 }
357
fb0f8fbf
KP
358 /* Must try at least 3 times according to DP spec */
359 for (try = 0; try < 5; try++) {
360 /* Load the send data into the aux channel data registers */
4f7f7b7e
CW
361 for (i = 0; i < send_bytes; i += 4)
362 I915_WRITE(ch_data + i,
363 pack_aux(send + i, send_bytes - i));
0206e353 364
fb0f8fbf 365 /* Send the command and wait for it to complete */
4f7f7b7e
CW
366 I915_WRITE(ch_ctl,
367 DP_AUX_CH_CTL_SEND_BUSY |
9ee32fea 368 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
4f7f7b7e
CW
369 DP_AUX_CH_CTL_TIME_OUT_400us |
370 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
371 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
372 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
373 DP_AUX_CH_CTL_DONE |
374 DP_AUX_CH_CTL_TIME_OUT_ERROR |
375 DP_AUX_CH_CTL_RECEIVE_ERROR);
9ee32fea
DV
376
377 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
0206e353 378
fb0f8fbf 379 /* Clear done status and any errors */
4f7f7b7e
CW
380 I915_WRITE(ch_ctl,
381 status |
382 DP_AUX_CH_CTL_DONE |
383 DP_AUX_CH_CTL_TIME_OUT_ERROR |
384 DP_AUX_CH_CTL_RECEIVE_ERROR);
d7e96fea
AJ
385
386 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
387 DP_AUX_CH_CTL_RECEIVE_ERROR))
388 continue;
4f7f7b7e 389 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
390 break;
391 }
392
a4fc5ed6 393 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 394 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
395 ret = -EBUSY;
396 goto out;
a4fc5ed6
KP
397 }
398
399 /* Check for timeout or receive error.
400 * Timeouts occur when the sink is not connected
401 */
a5b3da54 402 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 403 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
404 ret = -EIO;
405 goto out;
a5b3da54 406 }
1ae8c0a5
KP
407
408 /* Timeouts occur when the device isn't connected, so they're
409 * "normal" -- don't fill the kernel log with these */
a5b3da54 410 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 411 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
412 ret = -ETIMEDOUT;
413 goto out;
a4fc5ed6
KP
414 }
415
416 /* Unload any bytes sent back from the other side */
417 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
418 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
419 if (recv_bytes > recv_size)
420 recv_bytes = recv_size;
0206e353 421
4f7f7b7e
CW
422 for (i = 0; i < recv_bytes; i += 4)
423 unpack_aux(I915_READ(ch_data + i),
424 recv + i, recv_bytes - i);
a4fc5ed6 425
9ee32fea
DV
426 ret = recv_bytes;
427out:
428 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
429
430 return ret;
a4fc5ed6
KP
431}
432
433/* Write data to the aux channel in native mode */
434static int
ea5b213a 435intel_dp_aux_native_write(struct intel_dp *intel_dp,
a4fc5ed6
KP
436 uint16_t address, uint8_t *send, int send_bytes)
437{
438 int ret;
439 uint8_t msg[20];
440 int msg_bytes;
441 uint8_t ack;
442
9b984dae 443 intel_dp_check_edp(intel_dp);
a4fc5ed6
KP
444 if (send_bytes > 16)
445 return -1;
446 msg[0] = AUX_NATIVE_WRITE << 4;
447 msg[1] = address >> 8;
eebc863e 448 msg[2] = address & 0xff;
a4fc5ed6
KP
449 msg[3] = send_bytes - 1;
450 memcpy(&msg[4], send, send_bytes);
451 msg_bytes = send_bytes + 4;
452 for (;;) {
ea5b213a 453 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
a4fc5ed6
KP
454 if (ret < 0)
455 return ret;
456 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
457 break;
458 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
459 udelay(100);
460 else
a5b3da54 461 return -EIO;
a4fc5ed6
KP
462 }
463 return send_bytes;
464}
465
466/* Write a single byte to the aux channel in native mode */
467static int
ea5b213a 468intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
a4fc5ed6
KP
469 uint16_t address, uint8_t byte)
470{
ea5b213a 471 return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
a4fc5ed6
KP
472}
473
474/* read bytes from a native aux channel */
475static int
ea5b213a 476intel_dp_aux_native_read(struct intel_dp *intel_dp,
a4fc5ed6
KP
477 uint16_t address, uint8_t *recv, int recv_bytes)
478{
479 uint8_t msg[4];
480 int msg_bytes;
481 uint8_t reply[20];
482 int reply_bytes;
483 uint8_t ack;
484 int ret;
485
9b984dae 486 intel_dp_check_edp(intel_dp);
a4fc5ed6
KP
487 msg[0] = AUX_NATIVE_READ << 4;
488 msg[1] = address >> 8;
489 msg[2] = address & 0xff;
490 msg[3] = recv_bytes - 1;
491
492 msg_bytes = 4;
493 reply_bytes = recv_bytes + 1;
494
495 for (;;) {
ea5b213a 496 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
a4fc5ed6 497 reply, reply_bytes);
a5b3da54
KP
498 if (ret == 0)
499 return -EPROTO;
500 if (ret < 0)
a4fc5ed6
KP
501 return ret;
502 ack = reply[0];
503 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
504 memcpy(recv, reply + 1, ret - 1);
505 return ret - 1;
506 }
507 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
508 udelay(100);
509 else
a5b3da54 510 return -EIO;
a4fc5ed6
KP
511 }
512}
513
514static int
ab2c0672
DA
515intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
516 uint8_t write_byte, uint8_t *read_byte)
a4fc5ed6 517{
ab2c0672 518 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
ea5b213a
CW
519 struct intel_dp *intel_dp = container_of(adapter,
520 struct intel_dp,
521 adapter);
ab2c0672
DA
522 uint16_t address = algo_data->address;
523 uint8_t msg[5];
524 uint8_t reply[2];
8316f337 525 unsigned retry;
ab2c0672
DA
526 int msg_bytes;
527 int reply_bytes;
528 int ret;
529
9b984dae 530 intel_dp_check_edp(intel_dp);
ab2c0672
DA
531 /* Set up the command byte */
532 if (mode & MODE_I2C_READ)
533 msg[0] = AUX_I2C_READ << 4;
534 else
535 msg[0] = AUX_I2C_WRITE << 4;
536
537 if (!(mode & MODE_I2C_STOP))
538 msg[0] |= AUX_I2C_MOT << 4;
a4fc5ed6 539
ab2c0672
DA
540 msg[1] = address >> 8;
541 msg[2] = address;
542
543 switch (mode) {
544 case MODE_I2C_WRITE:
545 msg[3] = 0;
546 msg[4] = write_byte;
547 msg_bytes = 5;
548 reply_bytes = 1;
549 break;
550 case MODE_I2C_READ:
551 msg[3] = 0;
552 msg_bytes = 4;
553 reply_bytes = 2;
554 break;
555 default:
556 msg_bytes = 3;
557 reply_bytes = 1;
558 break;
559 }
560
8316f337
DF
561 for (retry = 0; retry < 5; retry++) {
562 ret = intel_dp_aux_ch(intel_dp,
563 msg, msg_bytes,
564 reply, reply_bytes);
ab2c0672 565 if (ret < 0) {
3ff99164 566 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
ab2c0672
DA
567 return ret;
568 }
8316f337
DF
569
570 switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
571 case AUX_NATIVE_REPLY_ACK:
572 /* I2C-over-AUX Reply field is only valid
573 * when paired with AUX ACK.
574 */
575 break;
576 case AUX_NATIVE_REPLY_NACK:
577 DRM_DEBUG_KMS("aux_ch native nack\n");
578 return -EREMOTEIO;
579 case AUX_NATIVE_REPLY_DEFER:
580 udelay(100);
581 continue;
582 default:
583 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
584 reply[0]);
585 return -EREMOTEIO;
586 }
587
ab2c0672
DA
588 switch (reply[0] & AUX_I2C_REPLY_MASK) {
589 case AUX_I2C_REPLY_ACK:
590 if (mode == MODE_I2C_READ) {
591 *read_byte = reply[1];
592 }
593 return reply_bytes - 1;
594 case AUX_I2C_REPLY_NACK:
8316f337 595 DRM_DEBUG_KMS("aux_i2c nack\n");
ab2c0672
DA
596 return -EREMOTEIO;
597 case AUX_I2C_REPLY_DEFER:
8316f337 598 DRM_DEBUG_KMS("aux_i2c defer\n");
ab2c0672
DA
599 udelay(100);
600 break;
601 default:
8316f337 602 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
ab2c0672
DA
603 return -EREMOTEIO;
604 }
605 }
8316f337
DF
606
607 DRM_ERROR("too many retries, giving up\n");
608 return -EREMOTEIO;
a4fc5ed6
KP
609}
610
611static int
ea5b213a 612intel_dp_i2c_init(struct intel_dp *intel_dp,
55f78c43 613 struct intel_connector *intel_connector, const char *name)
a4fc5ed6 614{
0b5c541b
KP
615 int ret;
616
d54e9d28 617 DRM_DEBUG_KMS("i2c_init %s\n", name);
ea5b213a
CW
618 intel_dp->algo.running = false;
619 intel_dp->algo.address = 0;
620 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
621
0206e353 622 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
ea5b213a
CW
623 intel_dp->adapter.owner = THIS_MODULE;
624 intel_dp->adapter.class = I2C_CLASS_DDC;
0206e353 625 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
ea5b213a
CW
626 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
627 intel_dp->adapter.algo_data = &intel_dp->algo;
628 intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
629
0b5c541b
KP
630 ironlake_edp_panel_vdd_on(intel_dp);
631 ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
bd943159 632 ironlake_edp_panel_vdd_off(intel_dp, false);
0b5c541b 633 return ret;
a4fc5ed6
KP
634}
635
c6bb3538
DV
636static void
637intel_dp_set_clock(struct intel_encoder *encoder,
638 struct intel_crtc_config *pipe_config, int link_bw)
639{
640 struct drm_device *dev = encoder->base.dev;
641
642 if (IS_G4X(dev)) {
643 if (link_bw == DP_LINK_BW_1_62) {
644 pipe_config->dpll.p1 = 2;
645 pipe_config->dpll.p2 = 10;
646 pipe_config->dpll.n = 2;
647 pipe_config->dpll.m1 = 23;
648 pipe_config->dpll.m2 = 8;
649 } else {
650 pipe_config->dpll.p1 = 1;
651 pipe_config->dpll.p2 = 10;
652 pipe_config->dpll.n = 1;
653 pipe_config->dpll.m1 = 14;
654 pipe_config->dpll.m2 = 2;
655 }
656 pipe_config->clock_set = true;
657 } else if (IS_HASWELL(dev)) {
658 /* Haswell has special-purpose DP DDI clocks. */
659 } else if (HAS_PCH_SPLIT(dev)) {
660 if (link_bw == DP_LINK_BW_1_62) {
661 pipe_config->dpll.n = 1;
662 pipe_config->dpll.p1 = 2;
663 pipe_config->dpll.p2 = 10;
664 pipe_config->dpll.m1 = 12;
665 pipe_config->dpll.m2 = 9;
666 } else {
667 pipe_config->dpll.n = 2;
668 pipe_config->dpll.p1 = 1;
669 pipe_config->dpll.p2 = 10;
670 pipe_config->dpll.m1 = 14;
671 pipe_config->dpll.m2 = 8;
672 }
673 pipe_config->clock_set = true;
674 } else if (IS_VALLEYVIEW(dev)) {
675 /* FIXME: Need to figure out optimized DP clocks for vlv. */
676 }
677}
678
00c09d70 679bool
5bfe2ac0
DV
680intel_dp_compute_config(struct intel_encoder *encoder,
681 struct intel_crtc_config *pipe_config)
a4fc5ed6 682{
5bfe2ac0 683 struct drm_device *dev = encoder->base.dev;
36008365 684 struct drm_i915_private *dev_priv = dev->dev_private;
5bfe2ac0 685 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5bfe2ac0 686 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2dd24552 687 struct intel_crtc *intel_crtc = encoder->new_crtc;
dd06f90e 688 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 689 int lane_count, clock;
397fe157 690 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
ea5b213a 691 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
083f9560 692 int bpp, mode_rate;
a4fc5ed6 693 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
36008365 694 int target_clock, link_avail, link_clock;
a4fc5ed6 695
5bfe2ac0
DV
696 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && !is_cpu_edp(intel_dp))
697 pipe_config->has_pch_encoder = true;
698
03afc4a2
DV
699 pipe_config->has_dp_encoder = true;
700
dd06f90e
JN
701 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
702 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
703 adjusted_mode);
2dd24552
JB
704 if (!HAS_PCH_SPLIT(dev))
705 intel_gmch_panel_fitting(intel_crtc, pipe_config,
706 intel_connector->panel.fitting_mode);
707 else
b074cec8
JB
708 intel_pch_panel_fitting(intel_crtc, pipe_config,
709 intel_connector->panel.fitting_mode);
0d3a1bee 710 }
36008365
DV
711 /* We need to take the panel's fixed mode into account. */
712 target_clock = adjusted_mode->clock;
0d3a1bee 713
cb1793ce 714 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
715 return false;
716
083f9560
DV
717 DRM_DEBUG_KMS("DP link computation with max lane count %i "
718 "max bw %02x pixel clock %iKHz\n",
71244653 719 max_lane_count, bws[max_clock], adjusted_mode->clock);
083f9560 720
36008365
DV
721 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
722 * bpc in between. */
03afc4a2 723 bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
e1b73cba
DV
724 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp)
725 bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
af13188a 726
36008365
DV
727 for (; bpp >= 6*3; bpp -= 2*3) {
728 mode_rate = intel_dp_link_required(target_clock, bpp);
729
730 for (clock = 0; clock <= max_clock; clock++) {
731 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
732 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
733 link_avail = intel_dp_max_data_rate(link_clock,
734 lane_count);
735
736 if (mode_rate <= link_avail) {
737 goto found;
738 }
739 }
740 }
741 }
c4867936 742
36008365 743 return false;
3685a8f3 744
36008365 745found:
55bc60db
VS
746 if (intel_dp->color_range_auto) {
747 /*
748 * See:
749 * CEA-861-E - 5.1 Default Encoding Parameters
750 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
751 */
18316c8c 752 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
753 intel_dp->color_range = DP_COLOR_RANGE_16_235;
754 else
755 intel_dp->color_range = 0;
756 }
757
3685a8f3 758 if (intel_dp->color_range)
50f3b016 759 pipe_config->limited_color_range = true;
3685a8f3 760
36008365
DV
761 intel_dp->link_bw = bws[clock];
762 intel_dp->lane_count = lane_count;
763 adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
657445fe 764 pipe_config->pipe_bpp = bpp;
df92b1e6 765 pipe_config->pixel_target_clock = target_clock;
fe27d53e 766
36008365
DV
767 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
768 intel_dp->link_bw, intel_dp->lane_count,
769 adjusted_mode->clock, bpp);
770 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
771 mode_rate, link_avail);
772
03afc4a2
DV
773 intel_link_compute_m_n(bpp, lane_count,
774 target_clock, adjusted_mode->clock,
775 &pipe_config->dp_m_n);
a4fc5ed6 776
c6bb3538
DV
777 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
778
03afc4a2 779 return true;
a4fc5ed6
KP
780}
781
247d89f6
PZ
782void intel_dp_init_link_config(struct intel_dp *intel_dp)
783{
784 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
785 intel_dp->link_configuration[0] = intel_dp->link_bw;
786 intel_dp->link_configuration[1] = intel_dp->lane_count;
787 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
788 /*
789 * Check for DPCD version > 1.1 and enhanced framing support
790 */
791 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
792 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
793 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
794 }
795}
796
ea9b6006
DV
797static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
798{
799 struct drm_device *dev = crtc->dev;
800 struct drm_i915_private *dev_priv = dev->dev_private;
801 u32 dpa_ctl;
802
803 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
804 dpa_ctl = I915_READ(DP_A);
805 dpa_ctl &= ~DP_PLL_FREQ_MASK;
806
807 if (clock < 200000) {
1ce17038
DV
808 /* For a long time we've carried around a ILK-DevA w/a for the
809 * 160MHz clock. If we're really unlucky, it's still required.
810 */
811 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 812 dpa_ctl |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
813 } else {
814 dpa_ctl |= DP_PLL_FREQ_270MHZ;
815 }
1ce17038 816
ea9b6006
DV
817 I915_WRITE(DP_A, dpa_ctl);
818
819 POSTING_READ(DP_A);
820 udelay(500);
821}
822
a4fc5ed6
KP
823static void
824intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
825 struct drm_display_mode *adjusted_mode)
826{
e3421a18 827 struct drm_device *dev = encoder->dev;
417e822d 828 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 829 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
fa90ecef 830 struct drm_crtc *crtc = encoder->crtc;
a4fc5ed6
KP
831 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
832
417e822d 833 /*
1a2eb460 834 * There are four kinds of DP registers:
417e822d
KP
835 *
836 * IBX PCH
1a2eb460
KP
837 * SNB CPU
838 * IVB CPU
417e822d
KP
839 * CPT PCH
840 *
841 * IBX PCH and CPU are the same for almost everything,
842 * except that the CPU DP PLL is configured in this
843 * register
844 *
845 * CPT PCH is quite different, having many bits moved
846 * to the TRANS_DP_CTL register instead. That
847 * configuration happens (oddly) in ironlake_pch_enable
848 */
9c9e7927 849
417e822d
KP
850 /* Preserve the BIOS-computed detected bit. This is
851 * supposed to be read-only.
852 */
853 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 854
417e822d 855 /* Handle DP bits in common between all three register formats */
417e822d 856 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 857 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 858
e0dac65e
WF
859 if (intel_dp->has_audio) {
860 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
861 pipe_name(intel_crtc->pipe));
ea5b213a 862 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
e0dac65e
WF
863 intel_write_eld(encoder, adjusted_mode);
864 }
247d89f6
PZ
865
866 intel_dp_init_link_config(intel_dp);
a4fc5ed6 867
417e822d 868 /* Split out the IBX/CPU vs CPT settings */
32f9d658 869
19c03924 870 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
871 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
872 intel_dp->DP |= DP_SYNC_HS_HIGH;
873 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
874 intel_dp->DP |= DP_SYNC_VS_HIGH;
875 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
876
877 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
878 intel_dp->DP |= DP_ENHANCED_FRAMING;
879
880 intel_dp->DP |= intel_crtc->pipe << 29;
881
882 /* don't miss out required setting for eDP */
1a2eb460
KP
883 if (adjusted_mode->clock < 200000)
884 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
885 else
886 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
887 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
b2634017 888 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 889 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
890
891 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
892 intel_dp->DP |= DP_SYNC_HS_HIGH;
893 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
894 intel_dp->DP |= DP_SYNC_VS_HIGH;
895 intel_dp->DP |= DP_LINK_TRAIN_OFF;
896
897 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
898 intel_dp->DP |= DP_ENHANCED_FRAMING;
899
900 if (intel_crtc->pipe == 1)
901 intel_dp->DP |= DP_PIPEB_SELECT;
902
b2634017 903 if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
417e822d 904 /* don't miss out required setting for eDP */
417e822d
KP
905 if (adjusted_mode->clock < 200000)
906 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
907 else
908 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
909 }
910 } else {
911 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
32f9d658 912 }
ea9b6006 913
5d66d5b6 914 if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
ea9b6006 915 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
a4fc5ed6
KP
916}
917
99ea7127
KP
918#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
919#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
920
921#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
922#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
923
924#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
925#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
926
927static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
928 u32 mask,
929 u32 value)
bd943159 930{
30add22d 931 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 932 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
933 u32 pp_stat_reg, pp_ctrl_reg;
934
935 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
936 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
32ce697c 937
99ea7127 938 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
939 mask, value,
940 I915_READ(pp_stat_reg),
941 I915_READ(pp_ctrl_reg));
32ce697c 942
453c5420 943 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 944 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
945 I915_READ(pp_stat_reg),
946 I915_READ(pp_ctrl_reg));
32ce697c 947 }
99ea7127 948}
32ce697c 949
99ea7127
KP
950static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
951{
952 DRM_DEBUG_KMS("Wait for panel power on\n");
953 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
954}
955
99ea7127
KP
956static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
957{
958 DRM_DEBUG_KMS("Wait for panel power off time\n");
959 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
960}
961
962static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
963{
964 DRM_DEBUG_KMS("Wait for panel power cycle\n");
965 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
966}
967
968
832dd3c1
KP
969/* Read the current pp_control value, unlocking the register if it
970 * is locked
971 */
972
453c5420 973static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 974{
453c5420
JB
975 struct drm_device *dev = intel_dp_to_dev(intel_dp);
976 struct drm_i915_private *dev_priv = dev->dev_private;
977 u32 control;
978 u32 pp_ctrl_reg;
979
980 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
981 control = I915_READ(pp_ctrl_reg);
832dd3c1
KP
982
983 control &= ~PANEL_UNLOCK_MASK;
984 control |= PANEL_UNLOCK_REGS;
985 return control;
bd943159
KP
986}
987
82a4d9c0 988void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 989{
30add22d 990 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501
JB
991 struct drm_i915_private *dev_priv = dev->dev_private;
992 u32 pp;
453c5420 993 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 994
97af61f5
KP
995 if (!is_edp(intel_dp))
996 return;
f01eca2e 997 DRM_DEBUG_KMS("Turn eDP VDD on\n");
5d613501 998
bd943159
KP
999 WARN(intel_dp->want_panel_vdd,
1000 "eDP VDD already requested on\n");
1001
1002 intel_dp->want_panel_vdd = true;
99ea7127 1003
bd943159
KP
1004 if (ironlake_edp_have_panel_vdd(intel_dp)) {
1005 DRM_DEBUG_KMS("eDP VDD already on\n");
1006 return;
1007 }
1008
99ea7127
KP
1009 if (!ironlake_edp_have_panel_power(intel_dp))
1010 ironlake_wait_panel_power_cycle(intel_dp);
1011
453c5420 1012 pp = ironlake_get_pp_control(intel_dp);
5d613501 1013 pp |= EDP_FORCE_VDD;
ebf33b18 1014
453c5420
JB
1015 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
1016 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1017
1018 I915_WRITE(pp_ctrl_reg, pp);
1019 POSTING_READ(pp_ctrl_reg);
1020 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1021 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1022 /*
1023 * If the panel wasn't on, delay before accessing aux channel
1024 */
1025 if (!ironlake_edp_have_panel_power(intel_dp)) {
bd943159 1026 DRM_DEBUG_KMS("eDP was not running\n");
f01eca2e 1027 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1028 }
5d613501
JB
1029}
1030
bd943159 1031static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1032{
30add22d 1033 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501
JB
1034 struct drm_i915_private *dev_priv = dev->dev_private;
1035 u32 pp;
453c5420 1036 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1037
a0e99e68
DV
1038 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1039
bd943159 1040 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
453c5420 1041 pp = ironlake_get_pp_control(intel_dp);
bd943159 1042 pp &= ~EDP_FORCE_VDD;
bd943159 1043
453c5420
JB
1044 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
1045 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1046
1047 I915_WRITE(pp_ctrl_reg, pp);
1048 POSTING_READ(pp_ctrl_reg);
99ea7127 1049
453c5420
JB
1050 /* Make sure sequencer is idle before allowing subsequent activity */
1051 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1052 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
99ea7127 1053 msleep(intel_dp->panel_power_down_delay);
bd943159
KP
1054 }
1055}
5d613501 1056
bd943159
KP
1057static void ironlake_panel_vdd_work(struct work_struct *__work)
1058{
1059 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1060 struct intel_dp, panel_vdd_work);
30add22d 1061 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bd943159 1062
627f7675 1063 mutex_lock(&dev->mode_config.mutex);
bd943159 1064 ironlake_panel_vdd_off_sync(intel_dp);
627f7675 1065 mutex_unlock(&dev->mode_config.mutex);
bd943159
KP
1066}
1067
82a4d9c0 1068void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1069{
97af61f5
KP
1070 if (!is_edp(intel_dp))
1071 return;
5d613501 1072
bd943159
KP
1073 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
1074 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
f2e8b18a 1075
bd943159
KP
1076 intel_dp->want_panel_vdd = false;
1077
1078 if (sync) {
1079 ironlake_panel_vdd_off_sync(intel_dp);
1080 } else {
1081 /*
1082 * Queue the timer to fire a long
1083 * time from now (relative to the power down delay)
1084 * to keep the panel power up across a sequence of operations
1085 */
1086 schedule_delayed_work(&intel_dp->panel_vdd_work,
1087 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1088 }
5d613501
JB
1089}
1090
82a4d9c0 1091void ironlake_edp_panel_on(struct intel_dp *intel_dp)
9934c132 1092{
30add22d 1093 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1094 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1095 u32 pp;
453c5420 1096 u32 pp_ctrl_reg;
9934c132 1097
97af61f5 1098 if (!is_edp(intel_dp))
bd943159 1099 return;
99ea7127
KP
1100
1101 DRM_DEBUG_KMS("Turn eDP power on\n");
1102
1103 if (ironlake_edp_have_panel_power(intel_dp)) {
1104 DRM_DEBUG_KMS("eDP power already on\n");
7d639f35 1105 return;
99ea7127 1106 }
9934c132 1107
99ea7127 1108 ironlake_wait_panel_power_cycle(intel_dp);
37c6c9b0 1109
453c5420 1110 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1111 if (IS_GEN5(dev)) {
1112 /* ILK workaround: disable reset around power sequence */
1113 pp &= ~PANEL_POWER_RESET;
1114 I915_WRITE(PCH_PP_CONTROL, pp);
1115 POSTING_READ(PCH_PP_CONTROL);
1116 }
37c6c9b0 1117
1c0ae80a 1118 pp |= POWER_TARGET_ON;
99ea7127
KP
1119 if (!IS_GEN5(dev))
1120 pp |= PANEL_POWER_RESET;
1121
453c5420
JB
1122 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1123
1124 I915_WRITE(pp_ctrl_reg, pp);
1125 POSTING_READ(pp_ctrl_reg);
9934c132 1126
99ea7127 1127 ironlake_wait_panel_on(intel_dp);
9934c132 1128
05ce1a49
KP
1129 if (IS_GEN5(dev)) {
1130 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1131 I915_WRITE(PCH_PP_CONTROL, pp);
1132 POSTING_READ(PCH_PP_CONTROL);
1133 }
9934c132
JB
1134}
1135
82a4d9c0 1136void ironlake_edp_panel_off(struct intel_dp *intel_dp)
9934c132 1137{
30add22d 1138 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1139 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1140 u32 pp;
453c5420 1141 u32 pp_ctrl_reg;
9934c132 1142
97af61f5
KP
1143 if (!is_edp(intel_dp))
1144 return;
37c6c9b0 1145
99ea7127 1146 DRM_DEBUG_KMS("Turn eDP power off\n");
37c6c9b0 1147
6cb49835 1148 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
37c6c9b0 1149
453c5420 1150 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1151 /* We need to switch off panel power _and_ force vdd, for otherwise some
1152 * panels get very unhappy and cease to work. */
1153 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
453c5420
JB
1154
1155 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1156
1157 I915_WRITE(pp_ctrl_reg, pp);
1158 POSTING_READ(pp_ctrl_reg);
9934c132 1159
35a38556
DV
1160 intel_dp->want_panel_vdd = false;
1161
99ea7127 1162 ironlake_wait_panel_off(intel_dp);
9934c132
JB
1163}
1164
d6c50ff8 1165void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1166{
da63a9f2
PZ
1167 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1168 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658 1169 struct drm_i915_private *dev_priv = dev->dev_private;
da63a9f2 1170 int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
32f9d658 1171 u32 pp;
453c5420 1172 u32 pp_ctrl_reg;
32f9d658 1173
f01eca2e
KP
1174 if (!is_edp(intel_dp))
1175 return;
1176
28c97730 1177 DRM_DEBUG_KMS("\n");
01cb9ea6
JB
1178 /*
1179 * If we enable the backlight right away following a panel power
1180 * on, we may see slight flicker as the panel syncs with the eDP
1181 * link. So delay a bit to make sure the image is solid before
1182 * allowing it to appear.
1183 */
f01eca2e 1184 msleep(intel_dp->backlight_on_delay);
453c5420 1185 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1186 pp |= EDP_BLC_ENABLE;
453c5420
JB
1187
1188 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1189
1190 I915_WRITE(pp_ctrl_reg, pp);
1191 POSTING_READ(pp_ctrl_reg);
035aa3de
DV
1192
1193 intel_panel_enable_backlight(dev, pipe);
32f9d658
ZW
1194}
1195
d6c50ff8 1196void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 1197{
30add22d 1198 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
1199 struct drm_i915_private *dev_priv = dev->dev_private;
1200 u32 pp;
453c5420 1201 u32 pp_ctrl_reg;
32f9d658 1202
f01eca2e
KP
1203 if (!is_edp(intel_dp))
1204 return;
1205
035aa3de
DV
1206 intel_panel_disable_backlight(dev);
1207
28c97730 1208 DRM_DEBUG_KMS("\n");
453c5420 1209 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1210 pp &= ~EDP_BLC_ENABLE;
453c5420
JB
1211
1212 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1213
1214 I915_WRITE(pp_ctrl_reg, pp);
1215 POSTING_READ(pp_ctrl_reg);
f01eca2e 1216 msleep(intel_dp->backlight_off_delay);
32f9d658 1217}
a4fc5ed6 1218
2bd2ad64 1219static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 1220{
da63a9f2
PZ
1221 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1222 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1223 struct drm_device *dev = crtc->dev;
d240f20f
JB
1224 struct drm_i915_private *dev_priv = dev->dev_private;
1225 u32 dpa_ctl;
1226
2bd2ad64
DV
1227 assert_pipe_disabled(dev_priv,
1228 to_intel_crtc(crtc)->pipe);
1229
d240f20f
JB
1230 DRM_DEBUG_KMS("\n");
1231 dpa_ctl = I915_READ(DP_A);
0767935e
DV
1232 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1233 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1234
1235 /* We don't adjust intel_dp->DP while tearing down the link, to
1236 * facilitate link retraining (e.g. after hotplug). Hence clear all
1237 * enable bits here to ensure that we don't enable too much. */
1238 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1239 intel_dp->DP |= DP_PLL_ENABLE;
1240 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
1241 POSTING_READ(DP_A);
1242 udelay(200);
d240f20f
JB
1243}
1244
2bd2ad64 1245static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 1246{
da63a9f2
PZ
1247 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1248 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1249 struct drm_device *dev = crtc->dev;
d240f20f
JB
1250 struct drm_i915_private *dev_priv = dev->dev_private;
1251 u32 dpa_ctl;
1252
2bd2ad64
DV
1253 assert_pipe_disabled(dev_priv,
1254 to_intel_crtc(crtc)->pipe);
1255
d240f20f 1256 dpa_ctl = I915_READ(DP_A);
0767935e
DV
1257 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1258 "dp pll off, should be on\n");
1259 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1260
1261 /* We can't rely on the value tracked for the DP register in
1262 * intel_dp->DP because link_down must not change that (otherwise link
1263 * re-training will fail. */
298b0b39 1264 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 1265 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 1266 POSTING_READ(DP_A);
d240f20f
JB
1267 udelay(200);
1268}
1269
c7ad3810 1270/* If the sink supports it, try to set the power state appropriately */
c19b0669 1271void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
1272{
1273 int ret, i;
1274
1275 /* Should have a valid DPCD by this point */
1276 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1277 return;
1278
1279 if (mode != DRM_MODE_DPMS_ON) {
1280 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
1281 DP_SET_POWER_D3);
1282 if (ret != 1)
1283 DRM_DEBUG_DRIVER("failed to write sink power state\n");
1284 } else {
1285 /*
1286 * When turning on, we need to retry for 1ms to give the sink
1287 * time to wake up.
1288 */
1289 for (i = 0; i < 3; i++) {
1290 ret = intel_dp_aux_native_write_1(intel_dp,
1291 DP_SET_POWER,
1292 DP_SET_POWER_D0);
1293 if (ret == 1)
1294 break;
1295 msleep(1);
1296 }
1297 }
1298}
1299
19d8fe15
DV
1300static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1301 enum pipe *pipe)
d240f20f 1302{
19d8fe15
DV
1303 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1304 struct drm_device *dev = encoder->base.dev;
1305 struct drm_i915_private *dev_priv = dev->dev_private;
1306 u32 tmp = I915_READ(intel_dp->output_reg);
1307
1308 if (!(tmp & DP_PORT_EN))
1309 return false;
1310
5d66d5b6 1311 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
19d8fe15
DV
1312 *pipe = PORT_TO_PIPE_CPT(tmp);
1313 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
1314 *pipe = PORT_TO_PIPE(tmp);
1315 } else {
1316 u32 trans_sel;
1317 u32 trans_dp;
1318 int i;
1319
1320 switch (intel_dp->output_reg) {
1321 case PCH_DP_B:
1322 trans_sel = TRANS_DP_PORT_SEL_B;
1323 break;
1324 case PCH_DP_C:
1325 trans_sel = TRANS_DP_PORT_SEL_C;
1326 break;
1327 case PCH_DP_D:
1328 trans_sel = TRANS_DP_PORT_SEL_D;
1329 break;
1330 default:
1331 return true;
1332 }
1333
1334 for_each_pipe(i) {
1335 trans_dp = I915_READ(TRANS_DP_CTL(i));
1336 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1337 *pipe = i;
1338 return true;
1339 }
1340 }
19d8fe15 1341
4a0833ec
DV
1342 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1343 intel_dp->output_reg);
1344 }
d240f20f 1345
2af8898b 1346 return true;
19d8fe15 1347}
d240f20f 1348
e8cb4558 1349static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 1350{
e8cb4558 1351 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
6cb49835
DV
1352
1353 /* Make sure the panel is off before trying to change the mode. But also
1354 * ensure that we have vdd while we switch off the panel. */
1355 ironlake_edp_panel_vdd_on(intel_dp);
21264c63 1356 ironlake_edp_backlight_off(intel_dp);
c7ad3810 1357 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
35a38556 1358 ironlake_edp_panel_off(intel_dp);
3739850b
DV
1359
1360 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1361 if (!is_cpu_edp(intel_dp))
1362 intel_dp_link_down(intel_dp);
d240f20f
JB
1363}
1364
2bd2ad64 1365static void intel_post_disable_dp(struct intel_encoder *encoder)
d240f20f 1366{
2bd2ad64 1367 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
b2634017 1368 struct drm_device *dev = encoder->base.dev;
2bd2ad64 1369
3739850b
DV
1370 if (is_cpu_edp(intel_dp)) {
1371 intel_dp_link_down(intel_dp);
b2634017
JB
1372 if (!IS_VALLEYVIEW(dev))
1373 ironlake_edp_pll_off(intel_dp);
3739850b 1374 }
2bd2ad64
DV
1375}
1376
e8cb4558 1377static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 1378{
e8cb4558
DV
1379 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1380 struct drm_device *dev = encoder->base.dev;
1381 struct drm_i915_private *dev_priv = dev->dev_private;
1382 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 1383
0c33d8d7
DV
1384 if (WARN_ON(dp_reg & DP_PORT_EN))
1385 return;
5d613501 1386
97af61f5 1387 ironlake_edp_panel_vdd_on(intel_dp);
f01eca2e 1388 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 1389 intel_dp_start_link_train(intel_dp);
97af61f5 1390 ironlake_edp_panel_on(intel_dp);
bd943159 1391 ironlake_edp_panel_vdd_off(intel_dp, true);
33a34e4e 1392 intel_dp_complete_link_train(intel_dp);
3ab9c637 1393 intel_dp_stop_link_train(intel_dp);
f01eca2e 1394 ironlake_edp_backlight_on(intel_dp);
89b667f8
JB
1395
1396 if (IS_VALLEYVIEW(dev)) {
1397 struct intel_digital_port *dport =
1398 enc_to_dig_port(&encoder->base);
1399 int channel = vlv_dport_to_channel(dport);
1400
1401 vlv_wait_port_ready(dev_priv, channel);
1402 }
d240f20f
JB
1403}
1404
2bd2ad64 1405static void intel_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 1406{
2bd2ad64 1407 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
b2634017 1408 struct drm_device *dev = encoder->base.dev;
89b667f8 1409 struct drm_i915_private *dev_priv = dev->dev_private;
a4fc5ed6 1410
b2634017 1411 if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
2bd2ad64 1412 ironlake_edp_pll_on(intel_dp);
89b667f8
JB
1413
1414 if (IS_VALLEYVIEW(dev)) {
1415 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1416 struct intel_crtc *intel_crtc =
1417 to_intel_crtc(encoder->base.crtc);
1418 int port = vlv_dport_to_channel(dport);
1419 int pipe = intel_crtc->pipe;
1420 u32 val;
1421
1422 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
1423
1424 val = intel_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
1425 val = 0;
1426 if (pipe)
1427 val |= (1<<21);
1428 else
1429 val &= ~(1<<21);
1430 val |= 0x001000c4;
1431 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
1432
1433 intel_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
1434 0x00760018);
1435 intel_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
1436 0x00400888);
1437 }
1438}
1439
1440static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
1441{
1442 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1443 struct drm_device *dev = encoder->base.dev;
1444 struct drm_i915_private *dev_priv = dev->dev_private;
1445 int port = vlv_dport_to_channel(dport);
1446
1447 if (!IS_VALLEYVIEW(dev))
1448 return;
1449
1450 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
1451
1452 /* Program Tx lane resets to default */
1453 intel_dpio_write(dev_priv, DPIO_PCS_TX(port),
1454 DPIO_PCS_TX_LANE2_RESET |
1455 DPIO_PCS_TX_LANE1_RESET);
1456 intel_dpio_write(dev_priv, DPIO_PCS_CLK(port),
1457 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1458 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1459 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1460 DPIO_PCS_CLK_SOFT_RESET);
1461
1462 /* Fix up inter-pair skew failure */
1463 intel_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
1464 intel_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
1465 intel_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
a4fc5ed6
KP
1466}
1467
1468/*
df0c237d
JB
1469 * Native read with retry for link status and receiver capability reads for
1470 * cases where the sink may still be asleep.
a4fc5ed6
KP
1471 */
1472static bool
df0c237d
JB
1473intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1474 uint8_t *recv, int recv_bytes)
a4fc5ed6 1475{
61da5fab
JB
1476 int ret, i;
1477
df0c237d
JB
1478 /*
1479 * Sinks are *supposed* to come up within 1ms from an off state,
1480 * but we're also supposed to retry 3 times per the spec.
1481 */
61da5fab 1482 for (i = 0; i < 3; i++) {
df0c237d
JB
1483 ret = intel_dp_aux_native_read(intel_dp, address, recv,
1484 recv_bytes);
1485 if (ret == recv_bytes)
61da5fab
JB
1486 return true;
1487 msleep(1);
1488 }
a4fc5ed6 1489
61da5fab 1490 return false;
a4fc5ed6
KP
1491}
1492
1493/*
1494 * Fetch AUX CH registers 0x202 - 0x207 which contain
1495 * link status information
1496 */
1497static bool
93f62dad 1498intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 1499{
df0c237d
JB
1500 return intel_dp_aux_native_read_retry(intel_dp,
1501 DP_LANE0_1_STATUS,
93f62dad 1502 link_status,
df0c237d 1503 DP_LINK_STATUS_SIZE);
a4fc5ed6
KP
1504}
1505
a4fc5ed6
KP
1506#if 0
1507static char *voltage_names[] = {
1508 "0.4V", "0.6V", "0.8V", "1.2V"
1509};
1510static char *pre_emph_names[] = {
1511 "0dB", "3.5dB", "6dB", "9.5dB"
1512};
1513static char *link_train_names[] = {
1514 "pattern 1", "pattern 2", "idle", "off"
1515};
1516#endif
1517
1518/*
1519 * These are source-specific values; current Intel hardware supports
1520 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1521 */
a4fc5ed6
KP
1522
1523static uint8_t
1a2eb460 1524intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 1525{
30add22d 1526 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1a2eb460 1527
e2fa6fba
P
1528 if (IS_VALLEYVIEW(dev))
1529 return DP_TRAIN_VOLTAGE_SWING_1200;
1530 else if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
1a2eb460
KP
1531 return DP_TRAIN_VOLTAGE_SWING_800;
1532 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
1533 return DP_TRAIN_VOLTAGE_SWING_1200;
1534 else
1535 return DP_TRAIN_VOLTAGE_SWING_800;
1536}
1537
1538static uint8_t
1539intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1540{
30add22d 1541 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1a2eb460 1542
22b8bf17 1543 if (HAS_DDI(dev)) {
d6c0d722
PZ
1544 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1545 case DP_TRAIN_VOLTAGE_SWING_400:
1546 return DP_TRAIN_PRE_EMPHASIS_9_5;
1547 case DP_TRAIN_VOLTAGE_SWING_600:
1548 return DP_TRAIN_PRE_EMPHASIS_6;
1549 case DP_TRAIN_VOLTAGE_SWING_800:
1550 return DP_TRAIN_PRE_EMPHASIS_3_5;
1551 case DP_TRAIN_VOLTAGE_SWING_1200:
1552 default:
1553 return DP_TRAIN_PRE_EMPHASIS_0;
1554 }
e2fa6fba
P
1555 } else if (IS_VALLEYVIEW(dev)) {
1556 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1557 case DP_TRAIN_VOLTAGE_SWING_400:
1558 return DP_TRAIN_PRE_EMPHASIS_9_5;
1559 case DP_TRAIN_VOLTAGE_SWING_600:
1560 return DP_TRAIN_PRE_EMPHASIS_6;
1561 case DP_TRAIN_VOLTAGE_SWING_800:
1562 return DP_TRAIN_PRE_EMPHASIS_3_5;
1563 case DP_TRAIN_VOLTAGE_SWING_1200:
1564 default:
1565 return DP_TRAIN_PRE_EMPHASIS_0;
1566 }
1567 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1a2eb460
KP
1568 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1569 case DP_TRAIN_VOLTAGE_SWING_400:
1570 return DP_TRAIN_PRE_EMPHASIS_6;
1571 case DP_TRAIN_VOLTAGE_SWING_600:
1572 case DP_TRAIN_VOLTAGE_SWING_800:
1573 return DP_TRAIN_PRE_EMPHASIS_3_5;
1574 default:
1575 return DP_TRAIN_PRE_EMPHASIS_0;
1576 }
1577 } else {
1578 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1579 case DP_TRAIN_VOLTAGE_SWING_400:
1580 return DP_TRAIN_PRE_EMPHASIS_6;
1581 case DP_TRAIN_VOLTAGE_SWING_600:
1582 return DP_TRAIN_PRE_EMPHASIS_6;
1583 case DP_TRAIN_VOLTAGE_SWING_800:
1584 return DP_TRAIN_PRE_EMPHASIS_3_5;
1585 case DP_TRAIN_VOLTAGE_SWING_1200:
1586 default:
1587 return DP_TRAIN_PRE_EMPHASIS_0;
1588 }
a4fc5ed6
KP
1589 }
1590}
1591
e2fa6fba
P
1592static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
1593{
1594 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1595 struct drm_i915_private *dev_priv = dev->dev_private;
1596 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1597 unsigned long demph_reg_value, preemph_reg_value,
1598 uniqtranscale_reg_value;
1599 uint8_t train_set = intel_dp->train_set[0];
cece5d58 1600 int port = vlv_dport_to_channel(dport);
e2fa6fba 1601
89b667f8
JB
1602 WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
1603
e2fa6fba
P
1604 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1605 case DP_TRAIN_PRE_EMPHASIS_0:
1606 preemph_reg_value = 0x0004000;
1607 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1608 case DP_TRAIN_VOLTAGE_SWING_400:
1609 demph_reg_value = 0x2B405555;
1610 uniqtranscale_reg_value = 0x552AB83A;
1611 break;
1612 case DP_TRAIN_VOLTAGE_SWING_600:
1613 demph_reg_value = 0x2B404040;
1614 uniqtranscale_reg_value = 0x5548B83A;
1615 break;
1616 case DP_TRAIN_VOLTAGE_SWING_800:
1617 demph_reg_value = 0x2B245555;
1618 uniqtranscale_reg_value = 0x5560B83A;
1619 break;
1620 case DP_TRAIN_VOLTAGE_SWING_1200:
1621 demph_reg_value = 0x2B405555;
1622 uniqtranscale_reg_value = 0x5598DA3A;
1623 break;
1624 default:
1625 return 0;
1626 }
1627 break;
1628 case DP_TRAIN_PRE_EMPHASIS_3_5:
1629 preemph_reg_value = 0x0002000;
1630 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1631 case DP_TRAIN_VOLTAGE_SWING_400:
1632 demph_reg_value = 0x2B404040;
1633 uniqtranscale_reg_value = 0x5552B83A;
1634 break;
1635 case DP_TRAIN_VOLTAGE_SWING_600:
1636 demph_reg_value = 0x2B404848;
1637 uniqtranscale_reg_value = 0x5580B83A;
1638 break;
1639 case DP_TRAIN_VOLTAGE_SWING_800:
1640 demph_reg_value = 0x2B404040;
1641 uniqtranscale_reg_value = 0x55ADDA3A;
1642 break;
1643 default:
1644 return 0;
1645 }
1646 break;
1647 case DP_TRAIN_PRE_EMPHASIS_6:
1648 preemph_reg_value = 0x0000000;
1649 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1650 case DP_TRAIN_VOLTAGE_SWING_400:
1651 demph_reg_value = 0x2B305555;
1652 uniqtranscale_reg_value = 0x5570B83A;
1653 break;
1654 case DP_TRAIN_VOLTAGE_SWING_600:
1655 demph_reg_value = 0x2B2B4040;
1656 uniqtranscale_reg_value = 0x55ADDA3A;
1657 break;
1658 default:
1659 return 0;
1660 }
1661 break;
1662 case DP_TRAIN_PRE_EMPHASIS_9_5:
1663 preemph_reg_value = 0x0006000;
1664 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1665 case DP_TRAIN_VOLTAGE_SWING_400:
1666 demph_reg_value = 0x1B405555;
1667 uniqtranscale_reg_value = 0x55ADDA3A;
1668 break;
1669 default:
1670 return 0;
1671 }
1672 break;
1673 default:
1674 return 0;
1675 }
1676
e2fa6fba
P
1677 intel_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
1678 intel_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
1679 intel_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
1680 uniqtranscale_reg_value);
1681 intel_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040);
1682 intel_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
1683 intel_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
1684 intel_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
e2fa6fba
P
1685
1686 return 0;
1687}
1688
a4fc5ed6 1689static void
93f62dad 1690intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
1691{
1692 uint8_t v = 0;
1693 uint8_t p = 0;
1694 int lane;
1a2eb460
KP
1695 uint8_t voltage_max;
1696 uint8_t preemph_max;
a4fc5ed6 1697
33a34e4e 1698 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
1699 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
1700 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
1701
1702 if (this_v > v)
1703 v = this_v;
1704 if (this_p > p)
1705 p = this_p;
1706 }
1707
1a2eb460 1708 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
1709 if (v >= voltage_max)
1710 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 1711
1a2eb460
KP
1712 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
1713 if (p >= preemph_max)
1714 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
1715
1716 for (lane = 0; lane < 4; lane++)
33a34e4e 1717 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
1718}
1719
1720static uint32_t
f0a3424e 1721intel_gen4_signal_levels(uint8_t train_set)
a4fc5ed6 1722{
3cf2efb1 1723 uint32_t signal_levels = 0;
a4fc5ed6 1724
3cf2efb1 1725 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
a4fc5ed6
KP
1726 case DP_TRAIN_VOLTAGE_SWING_400:
1727 default:
1728 signal_levels |= DP_VOLTAGE_0_4;
1729 break;
1730 case DP_TRAIN_VOLTAGE_SWING_600:
1731 signal_levels |= DP_VOLTAGE_0_6;
1732 break;
1733 case DP_TRAIN_VOLTAGE_SWING_800:
1734 signal_levels |= DP_VOLTAGE_0_8;
1735 break;
1736 case DP_TRAIN_VOLTAGE_SWING_1200:
1737 signal_levels |= DP_VOLTAGE_1_2;
1738 break;
1739 }
3cf2efb1 1740 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
a4fc5ed6
KP
1741 case DP_TRAIN_PRE_EMPHASIS_0:
1742 default:
1743 signal_levels |= DP_PRE_EMPHASIS_0;
1744 break;
1745 case DP_TRAIN_PRE_EMPHASIS_3_5:
1746 signal_levels |= DP_PRE_EMPHASIS_3_5;
1747 break;
1748 case DP_TRAIN_PRE_EMPHASIS_6:
1749 signal_levels |= DP_PRE_EMPHASIS_6;
1750 break;
1751 case DP_TRAIN_PRE_EMPHASIS_9_5:
1752 signal_levels |= DP_PRE_EMPHASIS_9_5;
1753 break;
1754 }
1755 return signal_levels;
1756}
1757
e3421a18
ZW
1758/* Gen6's DP voltage swing and pre-emphasis control */
1759static uint32_t
1760intel_gen6_edp_signal_levels(uint8_t train_set)
1761{
3c5a62b5
YL
1762 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1763 DP_TRAIN_PRE_EMPHASIS_MASK);
1764 switch (signal_levels) {
e3421a18 1765 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
3c5a62b5
YL
1766 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1767 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1768 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1769 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
e3421a18 1770 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
3c5a62b5
YL
1771 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1772 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
e3421a18 1773 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
3c5a62b5
YL
1774 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1775 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
e3421a18 1776 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
3c5a62b5
YL
1777 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
1778 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 1779 default:
3c5a62b5
YL
1780 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1781 "0x%x\n", signal_levels);
1782 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
1783 }
1784}
1785
1a2eb460
KP
1786/* Gen7's DP voltage swing and pre-emphasis control */
1787static uint32_t
1788intel_gen7_edp_signal_levels(uint8_t train_set)
1789{
1790 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1791 DP_TRAIN_PRE_EMPHASIS_MASK);
1792 switch (signal_levels) {
1793 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1794 return EDP_LINK_TRAIN_400MV_0DB_IVB;
1795 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1796 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
1797 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1798 return EDP_LINK_TRAIN_400MV_6DB_IVB;
1799
1800 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1801 return EDP_LINK_TRAIN_600MV_0DB_IVB;
1802 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1803 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
1804
1805 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1806 return EDP_LINK_TRAIN_800MV_0DB_IVB;
1807 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1808 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
1809
1810 default:
1811 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1812 "0x%x\n", signal_levels);
1813 return EDP_LINK_TRAIN_500MV_0DB_IVB;
1814 }
1815}
1816
d6c0d722
PZ
1817/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
1818static uint32_t
f0a3424e 1819intel_hsw_signal_levels(uint8_t train_set)
a4fc5ed6 1820{
d6c0d722
PZ
1821 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1822 DP_TRAIN_PRE_EMPHASIS_MASK);
1823 switch (signal_levels) {
1824 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1825 return DDI_BUF_EMP_400MV_0DB_HSW;
1826 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1827 return DDI_BUF_EMP_400MV_3_5DB_HSW;
1828 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1829 return DDI_BUF_EMP_400MV_6DB_HSW;
1830 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
1831 return DDI_BUF_EMP_400MV_9_5DB_HSW;
a4fc5ed6 1832
d6c0d722
PZ
1833 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1834 return DDI_BUF_EMP_600MV_0DB_HSW;
1835 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1836 return DDI_BUF_EMP_600MV_3_5DB_HSW;
1837 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1838 return DDI_BUF_EMP_600MV_6DB_HSW;
a4fc5ed6 1839
d6c0d722
PZ
1840 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1841 return DDI_BUF_EMP_800MV_0DB_HSW;
1842 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1843 return DDI_BUF_EMP_800MV_3_5DB_HSW;
1844 default:
1845 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1846 "0x%x\n", signal_levels);
1847 return DDI_BUF_EMP_400MV_0DB_HSW;
a4fc5ed6 1848 }
a4fc5ed6
KP
1849}
1850
f0a3424e
PZ
1851/* Properly updates "DP" with the correct signal levels. */
1852static void
1853intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
1854{
1855 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1856 struct drm_device *dev = intel_dig_port->base.base.dev;
1857 uint32_t signal_levels, mask;
1858 uint8_t train_set = intel_dp->train_set[0];
1859
22b8bf17 1860 if (HAS_DDI(dev)) {
f0a3424e
PZ
1861 signal_levels = intel_hsw_signal_levels(train_set);
1862 mask = DDI_BUF_EMP_MASK;
e2fa6fba
P
1863 } else if (IS_VALLEYVIEW(dev)) {
1864 signal_levels = intel_vlv_signal_levels(intel_dp);
1865 mask = 0;
1866 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
f0a3424e
PZ
1867 signal_levels = intel_gen7_edp_signal_levels(train_set);
1868 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
1869 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1870 signal_levels = intel_gen6_edp_signal_levels(train_set);
1871 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
1872 } else {
1873 signal_levels = intel_gen4_signal_levels(train_set);
1874 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
1875 }
1876
1877 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
1878
1879 *DP = (*DP & ~mask) | signal_levels;
1880}
1881
a4fc5ed6 1882static bool
ea5b213a 1883intel_dp_set_link_train(struct intel_dp *intel_dp,
a4fc5ed6 1884 uint32_t dp_reg_value,
58e10eb9 1885 uint8_t dp_train_pat)
a4fc5ed6 1886{
174edf1f
PZ
1887 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1888 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 1889 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 1890 enum port port = intel_dig_port->port;
a4fc5ed6
KP
1891 int ret;
1892
22b8bf17 1893 if (HAS_DDI(dev)) {
3ab9c637 1894 uint32_t temp = I915_READ(DP_TP_CTL(port));
d6c0d722
PZ
1895
1896 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
1897 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
1898 else
1899 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
1900
1901 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1902 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1903 case DP_TRAINING_PATTERN_DISABLE:
d6c0d722
PZ
1904 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
1905
1906 break;
1907 case DP_TRAINING_PATTERN_1:
1908 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
1909 break;
1910 case DP_TRAINING_PATTERN_2:
1911 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
1912 break;
1913 case DP_TRAINING_PATTERN_3:
1914 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
1915 break;
1916 }
174edf1f 1917 I915_WRITE(DP_TP_CTL(port), temp);
d6c0d722
PZ
1918
1919 } else if (HAS_PCH_CPT(dev) &&
1920 (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
47ea7542
PZ
1921 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
1922
1923 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1924 case DP_TRAINING_PATTERN_DISABLE:
1925 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
1926 break;
1927 case DP_TRAINING_PATTERN_1:
1928 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
1929 break;
1930 case DP_TRAINING_PATTERN_2:
1931 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1932 break;
1933 case DP_TRAINING_PATTERN_3:
1934 DRM_ERROR("DP training pattern 3 not supported\n");
1935 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1936 break;
1937 }
1938
1939 } else {
1940 dp_reg_value &= ~DP_LINK_TRAIN_MASK;
1941
1942 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1943 case DP_TRAINING_PATTERN_DISABLE:
1944 dp_reg_value |= DP_LINK_TRAIN_OFF;
1945 break;
1946 case DP_TRAINING_PATTERN_1:
1947 dp_reg_value |= DP_LINK_TRAIN_PAT_1;
1948 break;
1949 case DP_TRAINING_PATTERN_2:
1950 dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1951 break;
1952 case DP_TRAINING_PATTERN_3:
1953 DRM_ERROR("DP training pattern 3 not supported\n");
1954 dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1955 break;
1956 }
1957 }
1958
ea5b213a
CW
1959 I915_WRITE(intel_dp->output_reg, dp_reg_value);
1960 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 1961
ea5b213a 1962 intel_dp_aux_native_write_1(intel_dp,
a4fc5ed6
KP
1963 DP_TRAINING_PATTERN_SET,
1964 dp_train_pat);
1965
47ea7542
PZ
1966 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
1967 DP_TRAINING_PATTERN_DISABLE) {
1968 ret = intel_dp_aux_native_write(intel_dp,
1969 DP_TRAINING_LANE0_SET,
1970 intel_dp->train_set,
1971 intel_dp->lane_count);
1972 if (ret != intel_dp->lane_count)
1973 return false;
1974 }
a4fc5ed6
KP
1975
1976 return true;
1977}
1978
3ab9c637
ID
1979static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
1980{
1981 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1982 struct drm_device *dev = intel_dig_port->base.base.dev;
1983 struct drm_i915_private *dev_priv = dev->dev_private;
1984 enum port port = intel_dig_port->port;
1985 uint32_t val;
1986
1987 if (!HAS_DDI(dev))
1988 return;
1989
1990 val = I915_READ(DP_TP_CTL(port));
1991 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1992 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
1993 I915_WRITE(DP_TP_CTL(port), val);
1994
1995 /*
1996 * On PORT_A we can have only eDP in SST mode. There the only reason
1997 * we need to set idle transmission mode is to work around a HW issue
1998 * where we enable the pipe while not in idle link-training mode.
1999 * In this case there is requirement to wait for a minimum number of
2000 * idle patterns to be sent.
2001 */
2002 if (port == PORT_A)
2003 return;
2004
2005 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
2006 1))
2007 DRM_ERROR("Timed out waiting for DP idle patterns\n");
2008}
2009
33a34e4e 2010/* Enable corresponding port and start training pattern 1 */
c19b0669 2011void
33a34e4e 2012intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 2013{
da63a9f2 2014 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 2015 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
2016 int i;
2017 uint8_t voltage;
2018 bool clock_recovery = false;
cdb0e95b 2019 int voltage_tries, loop_tries;
ea5b213a 2020 uint32_t DP = intel_dp->DP;
a4fc5ed6 2021
affa9354 2022 if (HAS_DDI(dev))
c19b0669
PZ
2023 intel_ddi_prepare_link_retrain(encoder);
2024
3cf2efb1
CW
2025 /* Write the link configuration data */
2026 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
2027 intel_dp->link_configuration,
2028 DP_LINK_CONFIGURATION_SIZE);
a4fc5ed6
KP
2029
2030 DP |= DP_PORT_EN;
1a2eb460 2031
33a34e4e 2032 memset(intel_dp->train_set, 0, 4);
a4fc5ed6 2033 voltage = 0xff;
cdb0e95b
KP
2034 voltage_tries = 0;
2035 loop_tries = 0;
a4fc5ed6
KP
2036 clock_recovery = false;
2037 for (;;) {
33a34e4e 2038 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
93f62dad 2039 uint8_t link_status[DP_LINK_STATUS_SIZE];
f0a3424e
PZ
2040
2041 intel_dp_set_signal_levels(intel_dp, &DP);
a4fc5ed6 2042
a7c9655f 2043 /* Set training pattern 1 */
47ea7542 2044 if (!intel_dp_set_link_train(intel_dp, DP,
81055854
AJ
2045 DP_TRAINING_PATTERN_1 |
2046 DP_LINK_SCRAMBLING_DISABLE))
a4fc5ed6 2047 break;
a4fc5ed6 2048
a7c9655f 2049 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
2050 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2051 DRM_ERROR("failed to get link status\n");
a4fc5ed6 2052 break;
93f62dad 2053 }
a4fc5ed6 2054
01916270 2055 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 2056 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
2057 clock_recovery = true;
2058 break;
2059 }
2060
2061 /* Check to see if we've tried the max voltage */
2062 for (i = 0; i < intel_dp->lane_count; i++)
2063 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 2064 break;
3b4f819d 2065 if (i == intel_dp->lane_count) {
b06fbda3
DV
2066 ++loop_tries;
2067 if (loop_tries == 5) {
cdb0e95b
KP
2068 DRM_DEBUG_KMS("too many full retries, give up\n");
2069 break;
2070 }
2071 memset(intel_dp->train_set, 0, 4);
2072 voltage_tries = 0;
2073 continue;
2074 }
a4fc5ed6 2075
3cf2efb1 2076 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 2077 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 2078 ++voltage_tries;
b06fbda3
DV
2079 if (voltage_tries == 5) {
2080 DRM_DEBUG_KMS("too many voltage retries, give up\n");
2081 break;
2082 }
2083 } else
2084 voltage_tries = 0;
2085 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 2086
3cf2efb1 2087 /* Compute new intel_dp->train_set as requested by target */
93f62dad 2088 intel_get_adjust_train(intel_dp, link_status);
a4fc5ed6
KP
2089 }
2090
33a34e4e
JB
2091 intel_dp->DP = DP;
2092}
2093
c19b0669 2094void
33a34e4e
JB
2095intel_dp_complete_link_train(struct intel_dp *intel_dp)
2096{
33a34e4e 2097 bool channel_eq = false;
37f80975 2098 int tries, cr_tries;
33a34e4e
JB
2099 uint32_t DP = intel_dp->DP;
2100
a4fc5ed6
KP
2101 /* channel equalization */
2102 tries = 0;
37f80975 2103 cr_tries = 0;
a4fc5ed6
KP
2104 channel_eq = false;
2105 for (;;) {
93f62dad 2106 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 2107
37f80975
JB
2108 if (cr_tries > 5) {
2109 DRM_ERROR("failed to train DP, aborting\n");
2110 intel_dp_link_down(intel_dp);
2111 break;
2112 }
2113
f0a3424e 2114 intel_dp_set_signal_levels(intel_dp, &DP);
e3421a18 2115
a4fc5ed6 2116 /* channel eq pattern */
47ea7542 2117 if (!intel_dp_set_link_train(intel_dp, DP,
81055854
AJ
2118 DP_TRAINING_PATTERN_2 |
2119 DP_LINK_SCRAMBLING_DISABLE))
a4fc5ed6
KP
2120 break;
2121
a7c9655f 2122 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
93f62dad 2123 if (!intel_dp_get_link_status(intel_dp, link_status))
a4fc5ed6 2124 break;
a4fc5ed6 2125
37f80975 2126 /* Make sure clock is still ok */
01916270 2127 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
37f80975
JB
2128 intel_dp_start_link_train(intel_dp);
2129 cr_tries++;
2130 continue;
2131 }
2132
1ffdff13 2133 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
2134 channel_eq = true;
2135 break;
2136 }
a4fc5ed6 2137
37f80975
JB
2138 /* Try 5 times, then try clock recovery if that fails */
2139 if (tries > 5) {
2140 intel_dp_link_down(intel_dp);
2141 intel_dp_start_link_train(intel_dp);
2142 tries = 0;
2143 cr_tries++;
2144 continue;
2145 }
a4fc5ed6 2146
3cf2efb1 2147 /* Compute new intel_dp->train_set as requested by target */
93f62dad 2148 intel_get_adjust_train(intel_dp, link_status);
3cf2efb1 2149 ++tries;
869184a6 2150 }
3cf2efb1 2151
3ab9c637
ID
2152 intel_dp_set_idle_link_train(intel_dp);
2153
2154 intel_dp->DP = DP;
2155
d6c0d722 2156 if (channel_eq)
07f42258 2157 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
d6c0d722 2158
3ab9c637
ID
2159}
2160
2161void intel_dp_stop_link_train(struct intel_dp *intel_dp)
2162{
2163 intel_dp_set_link_train(intel_dp, intel_dp->DP,
2164 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
2165}
2166
2167static void
ea5b213a 2168intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 2169{
da63a9f2
PZ
2170 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2171 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 2172 struct drm_i915_private *dev_priv = dev->dev_private;
ab527efc
DV
2173 struct intel_crtc *intel_crtc =
2174 to_intel_crtc(intel_dig_port->base.base.crtc);
ea5b213a 2175 uint32_t DP = intel_dp->DP;
a4fc5ed6 2176
c19b0669
PZ
2177 /*
2178 * DDI code has a strict mode set sequence and we should try to respect
2179 * it, otherwise we might hang the machine in many different ways. So we
2180 * really should be disabling the port only on a complete crtc_disable
2181 * sequence. This function is just called under two conditions on DDI
2182 * code:
2183 * - Link train failed while doing crtc_enable, and on this case we
2184 * really should respect the mode set sequence and wait for a
2185 * crtc_disable.
2186 * - Someone turned the monitor off and intel_dp_check_link_status
2187 * called us. We don't need to disable the whole port on this case, so
2188 * when someone turns the monitor on again,
2189 * intel_ddi_prepare_link_retrain will take care of redoing the link
2190 * train.
2191 */
affa9354 2192 if (HAS_DDI(dev))
c19b0669
PZ
2193 return;
2194
0c33d8d7 2195 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
2196 return;
2197
28c97730 2198 DRM_DEBUG_KMS("\n");
32f9d658 2199
1a2eb460 2200 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
e3421a18 2201 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 2202 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18
ZW
2203 } else {
2204 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 2205 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 2206 }
fe255d00 2207 POSTING_READ(intel_dp->output_reg);
5eb08b69 2208
ab527efc
DV
2209 /* We don't really know why we're doing this */
2210 intel_wait_for_vblank(dev, intel_crtc->pipe);
5eb08b69 2211
493a7081 2212 if (HAS_PCH_IBX(dev) &&
1b39d6f3 2213 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
da63a9f2 2214 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
31acbcc4 2215
5bddd17f
EA
2216 /* Hardware workaround: leaving our transcoder select
2217 * set to transcoder B while it's off will prevent the
2218 * corresponding HDMI output on transcoder A.
2219 *
2220 * Combine this with another hardware workaround:
2221 * transcoder select bit can only be cleared while the
2222 * port is enabled.
2223 */
2224 DP &= ~DP_PIPEB_SELECT;
2225 I915_WRITE(intel_dp->output_reg, DP);
2226
2227 /* Changes to enable or select take place the vblank
2228 * after being written.
2229 */
ff50afe9
DV
2230 if (WARN_ON(crtc == NULL)) {
2231 /* We should never try to disable a port without a crtc
2232 * attached. For paranoia keep the code around for a
2233 * bit. */
31acbcc4
CW
2234 POSTING_READ(intel_dp->output_reg);
2235 msleep(50);
2236 } else
ab527efc 2237 intel_wait_for_vblank(dev, intel_crtc->pipe);
5bddd17f
EA
2238 }
2239
832afda6 2240 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
2241 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
2242 POSTING_READ(intel_dp->output_reg);
f01eca2e 2243 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
2244}
2245
26d61aad
KP
2246static bool
2247intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 2248{
577c7a50
DL
2249 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2250
92fd8fd1 2251 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
edb39244
AJ
2252 sizeof(intel_dp->dpcd)) == 0)
2253 return false; /* aux transfer failed */
92fd8fd1 2254
577c7a50
DL
2255 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2256 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
2257 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
2258
edb39244
AJ
2259 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2260 return false; /* DPCD not present */
2261
2262 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2263 DP_DWN_STRM_PORT_PRESENT))
2264 return true; /* native DP sink */
2265
2266 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
2267 return true; /* no per-port downstream info */
2268
2269 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
2270 intel_dp->downstream_ports,
2271 DP_MAX_DOWNSTREAM_PORTS) == 0)
2272 return false; /* downstream port status fetch failed */
2273
2274 return true;
92fd8fd1
KP
2275}
2276
0d198328
AJ
2277static void
2278intel_dp_probe_oui(struct intel_dp *intel_dp)
2279{
2280 u8 buf[3];
2281
2282 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
2283 return;
2284
351cfc34
DV
2285 ironlake_edp_panel_vdd_on(intel_dp);
2286
0d198328
AJ
2287 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
2288 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
2289 buf[0], buf[1], buf[2]);
2290
2291 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
2292 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
2293 buf[0], buf[1], buf[2]);
351cfc34
DV
2294
2295 ironlake_edp_panel_vdd_off(intel_dp, false);
0d198328
AJ
2296}
2297
a60f0e38
JB
2298static bool
2299intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2300{
2301 int ret;
2302
2303 ret = intel_dp_aux_native_read_retry(intel_dp,
2304 DP_DEVICE_SERVICE_IRQ_VECTOR,
2305 sink_irq_vector, 1);
2306 if (!ret)
2307 return false;
2308
2309 return true;
2310}
2311
2312static void
2313intel_dp_handle_test_request(struct intel_dp *intel_dp)
2314{
2315 /* NAK by default */
9324cf7f 2316 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
a60f0e38
JB
2317}
2318
a4fc5ed6
KP
2319/*
2320 * According to DP spec
2321 * 5.1.2:
2322 * 1. Read DPCD
2323 * 2. Configure link according to Receiver Capabilities
2324 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
2325 * 4. Check link status on receipt of hot-plug interrupt
2326 */
2327
00c09d70 2328void
ea5b213a 2329intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 2330{
da63a9f2 2331 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 2332 u8 sink_irq_vector;
93f62dad 2333 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 2334
da63a9f2 2335 if (!intel_encoder->connectors_active)
d2b996ac 2336 return;
59cd09e1 2337
da63a9f2 2338 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
2339 return;
2340
92fd8fd1 2341 /* Try to read receiver status if the link appears to be up */
93f62dad 2342 if (!intel_dp_get_link_status(intel_dp, link_status)) {
ea5b213a 2343 intel_dp_link_down(intel_dp);
a4fc5ed6
KP
2344 return;
2345 }
2346
92fd8fd1 2347 /* Now read the DPCD to see if it's actually running */
26d61aad 2348 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
2349 intel_dp_link_down(intel_dp);
2350 return;
2351 }
2352
a60f0e38
JB
2353 /* Try to read the source of the interrupt */
2354 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2355 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
2356 /* Clear interrupt source */
2357 intel_dp_aux_native_write_1(intel_dp,
2358 DP_DEVICE_SERVICE_IRQ_VECTOR,
2359 sink_irq_vector);
2360
2361 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
2362 intel_dp_handle_test_request(intel_dp);
2363 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
2364 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
2365 }
2366
1ffdff13 2367 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 2368 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
da63a9f2 2369 drm_get_encoder_name(&intel_encoder->base));
33a34e4e
JB
2370 intel_dp_start_link_train(intel_dp);
2371 intel_dp_complete_link_train(intel_dp);
3ab9c637 2372 intel_dp_stop_link_train(intel_dp);
33a34e4e 2373 }
a4fc5ed6 2374}
a4fc5ed6 2375
caf9ab24 2376/* XXX this is probably wrong for multiple downstream ports */
71ba9000 2377static enum drm_connector_status
26d61aad 2378intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 2379{
caf9ab24
AJ
2380 uint8_t *dpcd = intel_dp->dpcd;
2381 bool hpd;
2382 uint8_t type;
2383
2384 if (!intel_dp_get_dpcd(intel_dp))
2385 return connector_status_disconnected;
2386
2387 /* if there's no downstream port, we're done */
2388 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 2389 return connector_status_connected;
caf9ab24
AJ
2390
2391 /* If we're HPD-aware, SINK_COUNT changes dynamically */
2392 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
2393 if (hpd) {
23235177 2394 uint8_t reg;
caf9ab24 2395 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
23235177 2396 &reg, 1))
caf9ab24 2397 return connector_status_unknown;
23235177
AJ
2398 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
2399 : connector_status_disconnected;
caf9ab24
AJ
2400 }
2401
2402 /* If no HPD, poke DDC gently */
2403 if (drm_probe_ddc(&intel_dp->adapter))
26d61aad 2404 return connector_status_connected;
caf9ab24
AJ
2405
2406 /* Well we tried, say unknown for unreliable port types */
2407 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
2408 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
2409 return connector_status_unknown;
2410
2411 /* Anything else is out of spec, warn and ignore */
2412 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 2413 return connector_status_disconnected;
71ba9000
AJ
2414}
2415
5eb08b69 2416static enum drm_connector_status
a9756bb5 2417ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 2418{
30add22d 2419 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
2420 struct drm_i915_private *dev_priv = dev->dev_private;
2421 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5eb08b69
ZW
2422 enum drm_connector_status status;
2423
fe16d949
CW
2424 /* Can't disconnect eDP, but you can close the lid... */
2425 if (is_edp(intel_dp)) {
30add22d 2426 status = intel_panel_detect(dev);
fe16d949
CW
2427 if (status == connector_status_unknown)
2428 status = connector_status_connected;
2429 return status;
2430 }
01cb9ea6 2431
1b469639
DL
2432 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
2433 return connector_status_disconnected;
2434
26d61aad 2435 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
2436}
2437
a4fc5ed6 2438static enum drm_connector_status
a9756bb5 2439g4x_dp_detect(struct intel_dp *intel_dp)
a4fc5ed6 2440{
30add22d 2441 struct drm_device *dev = intel_dp_to_dev(intel_dp);
a4fc5ed6 2442 struct drm_i915_private *dev_priv = dev->dev_private;
34f2be46 2443 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
10f76a38 2444 uint32_t bit;
5eb08b69 2445
35aad75f
JB
2446 /* Can't disconnect eDP, but you can close the lid... */
2447 if (is_edp(intel_dp)) {
2448 enum drm_connector_status status;
2449
2450 status = intel_panel_detect(dev);
2451 if (status == connector_status_unknown)
2452 status = connector_status_connected;
2453 return status;
2454 }
2455
34f2be46
VS
2456 switch (intel_dig_port->port) {
2457 case PORT_B:
26739f12 2458 bit = PORTB_HOTPLUG_LIVE_STATUS;
a4fc5ed6 2459 break;
34f2be46 2460 case PORT_C:
26739f12 2461 bit = PORTC_HOTPLUG_LIVE_STATUS;
a4fc5ed6 2462 break;
34f2be46 2463 case PORT_D:
26739f12 2464 bit = PORTD_HOTPLUG_LIVE_STATUS;
a4fc5ed6
KP
2465 break;
2466 default:
2467 return connector_status_unknown;
2468 }
2469
10f76a38 2470 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
a4fc5ed6
KP
2471 return connector_status_disconnected;
2472
26d61aad 2473 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
2474}
2475
8c241fef
KP
2476static struct edid *
2477intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2478{
9cd300e0 2479 struct intel_connector *intel_connector = to_intel_connector(connector);
d6f24d0f 2480
9cd300e0
JN
2481 /* use cached edid if we have one */
2482 if (intel_connector->edid) {
2483 struct edid *edid;
2484 int size;
2485
2486 /* invalid edid */
2487 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
2488 return NULL;
2489
9cd300e0 2490 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
d6f24d0f
JB
2491 edid = kmalloc(size, GFP_KERNEL);
2492 if (!edid)
2493 return NULL;
2494
9cd300e0 2495 memcpy(edid, intel_connector->edid, size);
d6f24d0f
JB
2496 return edid;
2497 }
8c241fef 2498
9cd300e0 2499 return drm_get_edid(connector, adapter);
8c241fef
KP
2500}
2501
2502static int
2503intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
2504{
9cd300e0 2505 struct intel_connector *intel_connector = to_intel_connector(connector);
8c241fef 2506
9cd300e0
JN
2507 /* use cached edid if we have one */
2508 if (intel_connector->edid) {
2509 /* invalid edid */
2510 if (IS_ERR(intel_connector->edid))
2511 return 0;
2512
2513 return intel_connector_update_modes(connector,
2514 intel_connector->edid);
d6f24d0f
JB
2515 }
2516
9cd300e0 2517 return intel_ddc_get_modes(connector, adapter);
8c241fef
KP
2518}
2519
a9756bb5
ZW
2520static enum drm_connector_status
2521intel_dp_detect(struct drm_connector *connector, bool force)
2522{
2523 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
2524 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2525 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 2526 struct drm_device *dev = connector->dev;
a9756bb5
ZW
2527 enum drm_connector_status status;
2528 struct edid *edid = NULL;
2529
2530 intel_dp->has_audio = false;
2531
2532 if (HAS_PCH_SPLIT(dev))
2533 status = ironlake_dp_detect(intel_dp);
2534 else
2535 status = g4x_dp_detect(intel_dp);
1b9be9d0 2536
a9756bb5
ZW
2537 if (status != connector_status_connected)
2538 return status;
2539
0d198328
AJ
2540 intel_dp_probe_oui(intel_dp);
2541
c3e5f67b
DV
2542 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
2543 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
f684960e 2544 } else {
8c241fef 2545 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
f684960e
CW
2546 if (edid) {
2547 intel_dp->has_audio = drm_detect_monitor_audio(edid);
f684960e
CW
2548 kfree(edid);
2549 }
a9756bb5
ZW
2550 }
2551
d63885da
PZ
2552 if (intel_encoder->type != INTEL_OUTPUT_EDP)
2553 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
a9756bb5 2554 return connector_status_connected;
a4fc5ed6
KP
2555}
2556
2557static int intel_dp_get_modes(struct drm_connector *connector)
2558{
df0e9248 2559 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e 2560 struct intel_connector *intel_connector = to_intel_connector(connector);
fa90ecef 2561 struct drm_device *dev = connector->dev;
32f9d658 2562 int ret;
a4fc5ed6
KP
2563
2564 /* We should parse the EDID data and find out if it has an audio sink
2565 */
2566
8c241fef 2567 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
f8779fda 2568 if (ret)
32f9d658
ZW
2569 return ret;
2570
f8779fda 2571 /* if eDP has no EDID, fall back to fixed mode */
dd06f90e 2572 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
f8779fda 2573 struct drm_display_mode *mode;
dd06f90e
JN
2574 mode = drm_mode_duplicate(dev,
2575 intel_connector->panel.fixed_mode);
f8779fda 2576 if (mode) {
32f9d658
ZW
2577 drm_mode_probed_add(connector, mode);
2578 return 1;
2579 }
2580 }
2581 return 0;
a4fc5ed6
KP
2582}
2583
1aad7ac0
CW
2584static bool
2585intel_dp_detect_audio(struct drm_connector *connector)
2586{
2587 struct intel_dp *intel_dp = intel_attached_dp(connector);
2588 struct edid *edid;
2589 bool has_audio = false;
2590
8c241fef 2591 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
1aad7ac0
CW
2592 if (edid) {
2593 has_audio = drm_detect_monitor_audio(edid);
1aad7ac0
CW
2594 kfree(edid);
2595 }
2596
2597 return has_audio;
2598}
2599
f684960e
CW
2600static int
2601intel_dp_set_property(struct drm_connector *connector,
2602 struct drm_property *property,
2603 uint64_t val)
2604{
e953fd7b 2605 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 2606 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
2607 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
2608 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
2609 int ret;
2610
662595df 2611 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
2612 if (ret)
2613 return ret;
2614
3f43c48d 2615 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
2616 int i = val;
2617 bool has_audio;
2618
2619 if (i == intel_dp->force_audio)
f684960e
CW
2620 return 0;
2621
1aad7ac0 2622 intel_dp->force_audio = i;
f684960e 2623
c3e5f67b 2624 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
2625 has_audio = intel_dp_detect_audio(connector);
2626 else
c3e5f67b 2627 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
2628
2629 if (has_audio == intel_dp->has_audio)
f684960e
CW
2630 return 0;
2631
1aad7ac0 2632 intel_dp->has_audio = has_audio;
f684960e
CW
2633 goto done;
2634 }
2635
e953fd7b 2636 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
2637 bool old_auto = intel_dp->color_range_auto;
2638 uint32_t old_range = intel_dp->color_range;
2639
55bc60db
VS
2640 switch (val) {
2641 case INTEL_BROADCAST_RGB_AUTO:
2642 intel_dp->color_range_auto = true;
2643 break;
2644 case INTEL_BROADCAST_RGB_FULL:
2645 intel_dp->color_range_auto = false;
2646 intel_dp->color_range = 0;
2647 break;
2648 case INTEL_BROADCAST_RGB_LIMITED:
2649 intel_dp->color_range_auto = false;
2650 intel_dp->color_range = DP_COLOR_RANGE_16_235;
2651 break;
2652 default:
2653 return -EINVAL;
2654 }
ae4edb80
DV
2655
2656 if (old_auto == intel_dp->color_range_auto &&
2657 old_range == intel_dp->color_range)
2658 return 0;
2659
e953fd7b
CW
2660 goto done;
2661 }
2662
53b41837
YN
2663 if (is_edp(intel_dp) &&
2664 property == connector->dev->mode_config.scaling_mode_property) {
2665 if (val == DRM_MODE_SCALE_NONE) {
2666 DRM_DEBUG_KMS("no scaling not supported\n");
2667 return -EINVAL;
2668 }
2669
2670 if (intel_connector->panel.fitting_mode == val) {
2671 /* the eDP scaling property is not changed */
2672 return 0;
2673 }
2674 intel_connector->panel.fitting_mode = val;
2675
2676 goto done;
2677 }
2678
f684960e
CW
2679 return -EINVAL;
2680
2681done:
c0c36b94
CW
2682 if (intel_encoder->base.crtc)
2683 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
2684
2685 return 0;
2686}
2687
a4fc5ed6 2688static void
0206e353 2689intel_dp_destroy(struct drm_connector *connector)
a4fc5ed6 2690{
be3cd5e3 2691 struct intel_dp *intel_dp = intel_attached_dp(connector);
1d508706 2692 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 2693
9cd300e0
JN
2694 if (!IS_ERR_OR_NULL(intel_connector->edid))
2695 kfree(intel_connector->edid);
2696
dc652f90 2697 if (is_edp(intel_dp))
1d508706 2698 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 2699
a4fc5ed6
KP
2700 drm_sysfs_connector_remove(connector);
2701 drm_connector_cleanup(connector);
55f78c43 2702 kfree(connector);
a4fc5ed6
KP
2703}
2704
00c09d70 2705void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 2706{
da63a9f2
PZ
2707 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
2708 struct intel_dp *intel_dp = &intel_dig_port->dp;
bd173813 2709 struct drm_device *dev = intel_dp_to_dev(intel_dp);
24d05927
DV
2710
2711 i2c_del_adapter(&intel_dp->adapter);
2712 drm_encoder_cleanup(encoder);
bd943159
KP
2713 if (is_edp(intel_dp)) {
2714 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
bd173813 2715 mutex_lock(&dev->mode_config.mutex);
bd943159 2716 ironlake_panel_vdd_off_sync(intel_dp);
bd173813 2717 mutex_unlock(&dev->mode_config.mutex);
bd943159 2718 }
da63a9f2 2719 kfree(intel_dig_port);
24d05927
DV
2720}
2721
a4fc5ed6 2722static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
a4fc5ed6 2723 .mode_set = intel_dp_mode_set,
a4fc5ed6
KP
2724};
2725
2726static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 2727 .dpms = intel_connector_dpms,
a4fc5ed6
KP
2728 .detect = intel_dp_detect,
2729 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 2730 .set_property = intel_dp_set_property,
a4fc5ed6
KP
2731 .destroy = intel_dp_destroy,
2732};
2733
2734static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
2735 .get_modes = intel_dp_get_modes,
2736 .mode_valid = intel_dp_mode_valid,
df0e9248 2737 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
2738};
2739
a4fc5ed6 2740static const struct drm_encoder_funcs intel_dp_enc_funcs = {
24d05927 2741 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
2742};
2743
995b6762 2744static void
21d40d37 2745intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 2746{
fa90ecef 2747 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
c8110e52 2748
885a5014 2749 intel_dp_check_link_status(intel_dp);
c8110e52 2750}
6207937d 2751
e3421a18
ZW
2752/* Return which DP Port should be selected for Transcoder DP control */
2753int
0206e353 2754intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
2755{
2756 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
2757 struct intel_encoder *intel_encoder;
2758 struct intel_dp *intel_dp;
e3421a18 2759
fa90ecef
PZ
2760 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
2761 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 2762
fa90ecef
PZ
2763 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2764 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 2765 return intel_dp->output_reg;
e3421a18 2766 }
ea5b213a 2767
e3421a18
ZW
2768 return -1;
2769}
2770
36e83a18 2771/* check the VBT to see whether the eDP is on DP-D port */
cb0953d7 2772bool intel_dpd_is_edp(struct drm_device *dev)
36e83a18
ZY
2773{
2774 struct drm_i915_private *dev_priv = dev->dev_private;
2775 struct child_device_config *p_child;
2776 int i;
2777
41aa3448 2778 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
2779 return false;
2780
41aa3448
RV
2781 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
2782 p_child = dev_priv->vbt.child_dev + i;
36e83a18
ZY
2783
2784 if (p_child->dvo_port == PORT_IDPD &&
2785 p_child->device_type == DEVICE_TYPE_eDP)
2786 return true;
2787 }
2788 return false;
2789}
2790
f684960e
CW
2791static void
2792intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
2793{
53b41837
YN
2794 struct intel_connector *intel_connector = to_intel_connector(connector);
2795
3f43c48d 2796 intel_attach_force_audio_property(connector);
e953fd7b 2797 intel_attach_broadcast_rgb_property(connector);
55bc60db 2798 intel_dp->color_range_auto = true;
53b41837
YN
2799
2800 if (is_edp(intel_dp)) {
2801 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
2802 drm_object_attach_property(
2803 &connector->base,
53b41837 2804 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
2805 DRM_MODE_SCALE_ASPECT);
2806 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 2807 }
f684960e
CW
2808}
2809
67a54566
DV
2810static void
2811intel_dp_init_panel_power_sequencer(struct drm_device *dev,
f30d26e4
JN
2812 struct intel_dp *intel_dp,
2813 struct edp_power_seq *out)
67a54566
DV
2814{
2815 struct drm_i915_private *dev_priv = dev->dev_private;
2816 struct edp_power_seq cur, vbt, spec, final;
2817 u32 pp_on, pp_off, pp_div, pp;
453c5420
JB
2818 int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg;
2819
2820 if (HAS_PCH_SPLIT(dev)) {
2821 pp_control_reg = PCH_PP_CONTROL;
2822 pp_on_reg = PCH_PP_ON_DELAYS;
2823 pp_off_reg = PCH_PP_OFF_DELAYS;
2824 pp_div_reg = PCH_PP_DIVISOR;
2825 } else {
2826 pp_control_reg = PIPEA_PP_CONTROL;
2827 pp_on_reg = PIPEA_PP_ON_DELAYS;
2828 pp_off_reg = PIPEA_PP_OFF_DELAYS;
2829 pp_div_reg = PIPEA_PP_DIVISOR;
2830 }
67a54566
DV
2831
2832 /* Workaround: Need to write PP_CONTROL with the unlock key as
2833 * the very first thing. */
453c5420
JB
2834 pp = ironlake_get_pp_control(intel_dp);
2835 I915_WRITE(pp_control_reg, pp);
67a54566 2836
453c5420
JB
2837 pp_on = I915_READ(pp_on_reg);
2838 pp_off = I915_READ(pp_off_reg);
2839 pp_div = I915_READ(pp_div_reg);
67a54566
DV
2840
2841 /* Pull timing values out of registers */
2842 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
2843 PANEL_POWER_UP_DELAY_SHIFT;
2844
2845 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2846 PANEL_LIGHT_ON_DELAY_SHIFT;
2847
2848 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2849 PANEL_LIGHT_OFF_DELAY_SHIFT;
2850
2851 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
2852 PANEL_POWER_DOWN_DELAY_SHIFT;
2853
2854 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
2855 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
2856
2857 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2858 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
2859
41aa3448 2860 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
2861
2862 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
2863 * our hw here, which are all in 100usec. */
2864 spec.t1_t3 = 210 * 10;
2865 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
2866 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
2867 spec.t10 = 500 * 10;
2868 /* This one is special and actually in units of 100ms, but zero
2869 * based in the hw (so we need to add 100 ms). But the sw vbt
2870 * table multiplies it with 1000 to make it in units of 100usec,
2871 * too. */
2872 spec.t11_t12 = (510 + 100) * 10;
2873
2874 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2875 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
2876
2877 /* Use the max of the register settings and vbt. If both are
2878 * unset, fall back to the spec limits. */
2879#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
2880 spec.field : \
2881 max(cur.field, vbt.field))
2882 assign_final(t1_t3);
2883 assign_final(t8);
2884 assign_final(t9);
2885 assign_final(t10);
2886 assign_final(t11_t12);
2887#undef assign_final
2888
2889#define get_delay(field) (DIV_ROUND_UP(final.field, 10))
2890 intel_dp->panel_power_up_delay = get_delay(t1_t3);
2891 intel_dp->backlight_on_delay = get_delay(t8);
2892 intel_dp->backlight_off_delay = get_delay(t9);
2893 intel_dp->panel_power_down_delay = get_delay(t10);
2894 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
2895#undef get_delay
2896
f30d26e4
JN
2897 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2898 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2899 intel_dp->panel_power_cycle_delay);
2900
2901 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2902 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2903
2904 if (out)
2905 *out = final;
2906}
2907
2908static void
2909intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
2910 struct intel_dp *intel_dp,
2911 struct edp_power_seq *seq)
2912{
2913 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
2914 u32 pp_on, pp_off, pp_div, port_sel = 0;
2915 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
2916 int pp_on_reg, pp_off_reg, pp_div_reg;
2917
2918 if (HAS_PCH_SPLIT(dev)) {
2919 pp_on_reg = PCH_PP_ON_DELAYS;
2920 pp_off_reg = PCH_PP_OFF_DELAYS;
2921 pp_div_reg = PCH_PP_DIVISOR;
2922 } else {
2923 pp_on_reg = PIPEA_PP_ON_DELAYS;
2924 pp_off_reg = PIPEA_PP_OFF_DELAYS;
2925 pp_div_reg = PIPEA_PP_DIVISOR;
2926 }
2927
2928 if (IS_VALLEYVIEW(dev))
2929 port_sel = I915_READ(pp_on_reg) & 0xc0000000;
f30d26e4 2930
67a54566 2931 /* And finally store the new values in the power sequencer. */
f30d26e4
JN
2932 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
2933 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
2934 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
2935 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
2936 /* Compute the divisor for the pp clock, simply match the Bspec
2937 * formula. */
453c5420 2938 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 2939 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
2940 << PANEL_POWER_CYCLE_DELAY_SHIFT);
2941
2942 /* Haswell doesn't have any port selection bits for the panel
2943 * power sequencer any more. */
2944 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
2945 if (is_cpu_edp(intel_dp))
453c5420 2946 port_sel = PANEL_POWER_PORT_DP_A;
67a54566 2947 else
453c5420 2948 port_sel = PANEL_POWER_PORT_DP_D;
67a54566
DV
2949 }
2950
453c5420
JB
2951 pp_on |= port_sel;
2952
2953 I915_WRITE(pp_on_reg, pp_on);
2954 I915_WRITE(pp_off_reg, pp_off);
2955 I915_WRITE(pp_div_reg, pp_div);
67a54566 2956
67a54566 2957 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
2958 I915_READ(pp_on_reg),
2959 I915_READ(pp_off_reg),
2960 I915_READ(pp_div_reg));
f684960e
CW
2961}
2962
a4fc5ed6 2963void
f0fec3f2
PZ
2964intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2965 struct intel_connector *intel_connector)
a4fc5ed6 2966{
f0fec3f2
PZ
2967 struct drm_connector *connector = &intel_connector->base;
2968 struct intel_dp *intel_dp = &intel_dig_port->dp;
2969 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2970 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 2971 struct drm_i915_private *dev_priv = dev->dev_private;
f8779fda 2972 struct drm_display_mode *fixed_mode = NULL;
f30d26e4 2973 struct edp_power_seq power_seq = { 0 };
174edf1f 2974 enum port port = intel_dig_port->port;
5eb08b69 2975 const char *name = NULL;
b329530c 2976 int type;
a4fc5ed6 2977
0767935e
DV
2978 /* Preserve the current hw state. */
2979 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 2980 intel_dp->attached_connector = intel_connector;
3d3dc149 2981
f7d24902 2982 type = DRM_MODE_CONNECTOR_DisplayPort;
19c03924
GB
2983 /*
2984 * FIXME : We need to initialize built-in panels before external panels.
2985 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
2986 */
f7d24902
ID
2987 switch (port) {
2988 case PORT_A:
19c03924 2989 type = DRM_MODE_CONNECTOR_eDP;
f7d24902
ID
2990 break;
2991 case PORT_C:
2992 if (IS_VALLEYVIEW(dev))
2993 type = DRM_MODE_CONNECTOR_eDP;
2994 break;
2995 case PORT_D:
2996 if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
2997 type = DRM_MODE_CONNECTOR_eDP;
2998 break;
2999 default: /* silence GCC warning */
3000 break;
b329530c
AJ
3001 }
3002
f7d24902
ID
3003 /*
3004 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
3005 * for DP the encoder type can be set by the caller to
3006 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
3007 */
3008 if (type == DRM_MODE_CONNECTOR_eDP)
3009 intel_encoder->type = INTEL_OUTPUT_EDP;
3010
e7281eab
ID
3011 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
3012 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
3013 port_name(port));
3014
b329530c 3015 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
3016 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
3017
a4fc5ed6
KP
3018 connector->interlace_allowed = true;
3019 connector->doublescan_allowed = 0;
3020
f0fec3f2
PZ
3021 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
3022 ironlake_panel_vdd_work);
a4fc5ed6 3023
df0e9248 3024 intel_connector_attach_encoder(intel_connector, intel_encoder);
a4fc5ed6
KP
3025 drm_sysfs_connector_add(connector);
3026
affa9354 3027 if (HAS_DDI(dev))
bcbc889b
PZ
3028 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
3029 else
3030 intel_connector->get_hw_state = intel_connector_get_hw_state;
3031
9ed35ab1
PZ
3032 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
3033 if (HAS_DDI(dev)) {
3034 switch (intel_dig_port->port) {
3035 case PORT_A:
3036 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
3037 break;
3038 case PORT_B:
3039 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
3040 break;
3041 case PORT_C:
3042 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
3043 break;
3044 case PORT_D:
3045 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
3046 break;
3047 default:
3048 BUG();
3049 }
3050 }
e8cb4558 3051
a4fc5ed6 3052 /* Set up the DDC bus. */
ab9d7c30
PZ
3053 switch (port) {
3054 case PORT_A:
1d843f9d 3055 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
3056 name = "DPDDC-A";
3057 break;
3058 case PORT_B:
1d843f9d 3059 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
3060 name = "DPDDC-B";
3061 break;
3062 case PORT_C:
1d843f9d 3063 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
3064 name = "DPDDC-C";
3065 break;
3066 case PORT_D:
1d843f9d 3067 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
3068 name = "DPDDC-D";
3069 break;
3070 default:
ad1c0b19 3071 BUG();
5eb08b69
ZW
3072 }
3073
67a54566 3074 if (is_edp(intel_dp))
f30d26e4 3075 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
c1f05264
DA
3076
3077 intel_dp_i2c_init(intel_dp, intel_connector, name);
3078
67a54566 3079 /* Cache DPCD and EDID for edp. */
c1f05264
DA
3080 if (is_edp(intel_dp)) {
3081 bool ret;
f8779fda 3082 struct drm_display_mode *scan;
c1f05264 3083 struct edid *edid;
5d613501
JB
3084
3085 ironlake_edp_panel_vdd_on(intel_dp);
59f3e272 3086 ret = intel_dp_get_dpcd(intel_dp);
bd943159 3087 ironlake_edp_panel_vdd_off(intel_dp, false);
99ea7127 3088
59f3e272 3089 if (ret) {
7183dc29
JB
3090 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3091 dev_priv->no_aux_handshake =
3092 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
89667383
JB
3093 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3094 } else {
3d3dc149 3095 /* if this fails, presume the device is a ghost */
48898b03 3096 DRM_INFO("failed to retrieve link info, disabling eDP\n");
fa90ecef
PZ
3097 intel_dp_encoder_destroy(&intel_encoder->base);
3098 intel_dp_destroy(connector);
3d3dc149 3099 return;
89667383 3100 }
89667383 3101
f30d26e4
JN
3102 /* We now know it's not a ghost, init power sequence regs. */
3103 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
3104 &power_seq);
3105
d6f24d0f
JB
3106 ironlake_edp_panel_vdd_on(intel_dp);
3107 edid = drm_get_edid(connector, &intel_dp->adapter);
3108 if (edid) {
9cd300e0
JN
3109 if (drm_add_edid_modes(connector, edid)) {
3110 drm_mode_connector_update_edid_property(connector, edid);
3111 drm_edid_to_eld(connector, edid);
3112 } else {
3113 kfree(edid);
3114 edid = ERR_PTR(-EINVAL);
3115 }
3116 } else {
3117 edid = ERR_PTR(-ENOENT);
d6f24d0f 3118 }
9cd300e0 3119 intel_connector->edid = edid;
f8779fda
JN
3120
3121 /* prefer fixed mode from EDID if available */
3122 list_for_each_entry(scan, &connector->probed_modes, head) {
3123 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
3124 fixed_mode = drm_mode_duplicate(dev, scan);
3125 break;
3126 }
d6f24d0f 3127 }
f8779fda
JN
3128
3129 /* fallback to VBT if available for eDP */
41aa3448
RV
3130 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
3131 fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
f8779fda
JN
3132 if (fixed_mode)
3133 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
3134 }
f8779fda 3135
d6f24d0f
JB
3136 ironlake_edp_panel_vdd_off(intel_dp, false);
3137 }
552fb0b7 3138
4d926461 3139 if (is_edp(intel_dp)) {
dd06f90e 3140 intel_panel_init(&intel_connector->panel, fixed_mode);
0657b6b1 3141 intel_panel_setup_backlight(connector);
32f9d658
ZW
3142 }
3143
f684960e
CW
3144 intel_dp_add_properties(intel_dp, connector);
3145
a4fc5ed6
KP
3146 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
3147 * 0xd. Failure to do so will result in spurious interrupts being
3148 * generated on the port when a cable is not attached.
3149 */
3150 if (IS_G4X(dev) && !IS_GM45(dev)) {
3151 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
3152 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
3153 }
3154}
f0fec3f2
PZ
3155
3156void
3157intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3158{
3159 struct intel_digital_port *intel_dig_port;
3160 struct intel_encoder *intel_encoder;
3161 struct drm_encoder *encoder;
3162 struct intel_connector *intel_connector;
3163
3164 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
3165 if (!intel_dig_port)
3166 return;
3167
3168 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
3169 if (!intel_connector) {
3170 kfree(intel_dig_port);
3171 return;
3172 }
3173
3174 intel_encoder = &intel_dig_port->base;
3175 encoder = &intel_encoder->base;
3176
3177 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
3178 DRM_MODE_ENCODER_TMDS);
00c09d70 3179 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
f0fec3f2 3180
5bfe2ac0 3181 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70
PZ
3182 intel_encoder->enable = intel_enable_dp;
3183 intel_encoder->pre_enable = intel_pre_enable_dp;
3184 intel_encoder->disable = intel_disable_dp;
3185 intel_encoder->post_disable = intel_post_disable_dp;
3186 intel_encoder->get_hw_state = intel_dp_get_hw_state;
89b667f8
JB
3187 if (IS_VALLEYVIEW(dev))
3188 intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
f0fec3f2 3189
174edf1f 3190 intel_dig_port->port = port;
f0fec3f2
PZ
3191 intel_dig_port->dp.output_reg = output_reg;
3192
00c09d70 3193 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
f0fec3f2
PZ
3194 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
3195 intel_encoder->cloneable = false;
3196 intel_encoder->hot_plug = intel_dp_hot_plug;
3197
3198 intel_dp_init_connector(intel_dig_port, intel_connector);
3199}
This page took 0.512944 seconds and 5 git commands to generate.