drm/i915: merge VLV eDP and DP AUX clock divider calculation
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <drm/drmP.h>
32 #include <drm/drm_crtc.h>
33 #include <drm/drm_crtc_helper.h>
34 #include <drm/drm_edid.h>
35 #include "intel_drv.h"
36 #include <drm/i915_drm.h>
37 #include "i915_drv.h"
38
39 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
40
41 /**
42 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
43 * @intel_dp: DP struct
44 *
45 * If a CPU or PCH DP output is attached to an eDP panel, this function
46 * will return true, and false otherwise.
47 */
48 static bool is_edp(struct intel_dp *intel_dp)
49 {
50 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
51
52 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
53 }
54
55 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
56 {
57 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
58
59 return intel_dig_port->base.base.dev;
60 }
61
62 /**
63 * is_cpu_edp - is the port on the CPU and attached to an eDP panel?
64 * @intel_dp: DP struct
65 *
66 * Returns true if the given DP struct corresponds to a CPU eDP port.
67 */
68 static bool is_cpu_edp(struct intel_dp *intel_dp)
69 {
70 struct drm_device *dev = intel_dp_to_dev(intel_dp);
71 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
72 enum port port = intel_dig_port->port;
73
74 return is_edp(intel_dp) &&
75 (port == PORT_A || (port == PORT_C && IS_VALLEYVIEW(dev)));
76 }
77
78 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
79 {
80 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
81 }
82
83 static void intel_dp_link_down(struct intel_dp *intel_dp);
84
85 static int
86 intel_dp_max_link_bw(struct intel_dp *intel_dp)
87 {
88 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
89
90 switch (max_link_bw) {
91 case DP_LINK_BW_1_62:
92 case DP_LINK_BW_2_7:
93 break;
94 default:
95 max_link_bw = DP_LINK_BW_1_62;
96 break;
97 }
98 return max_link_bw;
99 }
100
101 /*
102 * The units on the numbers in the next two are... bizarre. Examples will
103 * make it clearer; this one parallels an example in the eDP spec.
104 *
105 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
106 *
107 * 270000 * 1 * 8 / 10 == 216000
108 *
109 * The actual data capacity of that configuration is 2.16Gbit/s, so the
110 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
111 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
112 * 119000. At 18bpp that's 2142000 kilobits per second.
113 *
114 * Thus the strange-looking division by 10 in intel_dp_link_required, to
115 * get the result in decakilobits instead of kilobits.
116 */
117
118 static int
119 intel_dp_link_required(int pixel_clock, int bpp)
120 {
121 return (pixel_clock * bpp + 9) / 10;
122 }
123
124 static int
125 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
126 {
127 return (max_link_clock * max_lanes * 8) / 10;
128 }
129
130 static int
131 intel_dp_mode_valid(struct drm_connector *connector,
132 struct drm_display_mode *mode)
133 {
134 struct intel_dp *intel_dp = intel_attached_dp(connector);
135 struct intel_connector *intel_connector = to_intel_connector(connector);
136 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
137 int target_clock = mode->clock;
138 int max_rate, mode_rate, max_lanes, max_link_clock;
139
140 if (is_edp(intel_dp) && fixed_mode) {
141 if (mode->hdisplay > fixed_mode->hdisplay)
142 return MODE_PANEL;
143
144 if (mode->vdisplay > fixed_mode->vdisplay)
145 return MODE_PANEL;
146
147 target_clock = fixed_mode->clock;
148 }
149
150 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
151 max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
152
153 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
154 mode_rate = intel_dp_link_required(target_clock, 18);
155
156 if (mode_rate > max_rate)
157 return MODE_CLOCK_HIGH;
158
159 if (mode->clock < 10000)
160 return MODE_CLOCK_LOW;
161
162 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
163 return MODE_H_ILLEGAL;
164
165 return MODE_OK;
166 }
167
168 static uint32_t
169 pack_aux(uint8_t *src, int src_bytes)
170 {
171 int i;
172 uint32_t v = 0;
173
174 if (src_bytes > 4)
175 src_bytes = 4;
176 for (i = 0; i < src_bytes; i++)
177 v |= ((uint32_t) src[i]) << ((3-i) * 8);
178 return v;
179 }
180
181 static void
182 unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
183 {
184 int i;
185 if (dst_bytes > 4)
186 dst_bytes = 4;
187 for (i = 0; i < dst_bytes; i++)
188 dst[i] = src >> ((3-i) * 8);
189 }
190
191 /* hrawclock is 1/4 the FSB frequency */
192 static int
193 intel_hrawclk(struct drm_device *dev)
194 {
195 struct drm_i915_private *dev_priv = dev->dev_private;
196 uint32_t clkcfg;
197
198 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
199 if (IS_VALLEYVIEW(dev))
200 return 200;
201
202 clkcfg = I915_READ(CLKCFG);
203 switch (clkcfg & CLKCFG_FSB_MASK) {
204 case CLKCFG_FSB_400:
205 return 100;
206 case CLKCFG_FSB_533:
207 return 133;
208 case CLKCFG_FSB_667:
209 return 166;
210 case CLKCFG_FSB_800:
211 return 200;
212 case CLKCFG_FSB_1067:
213 return 266;
214 case CLKCFG_FSB_1333:
215 return 333;
216 /* these two are just a guess; one of them might be right */
217 case CLKCFG_FSB_1600:
218 case CLKCFG_FSB_1600_ALT:
219 return 400;
220 default:
221 return 133;
222 }
223 }
224
225 static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
226 {
227 struct drm_device *dev = intel_dp_to_dev(intel_dp);
228 struct drm_i915_private *dev_priv = dev->dev_private;
229 u32 pp_stat_reg;
230
231 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
232 return (I915_READ(pp_stat_reg) & PP_ON) != 0;
233 }
234
235 static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
236 {
237 struct drm_device *dev = intel_dp_to_dev(intel_dp);
238 struct drm_i915_private *dev_priv = dev->dev_private;
239 u32 pp_ctrl_reg;
240
241 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
242 return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
243 }
244
245 static void
246 intel_dp_check_edp(struct intel_dp *intel_dp)
247 {
248 struct drm_device *dev = intel_dp_to_dev(intel_dp);
249 struct drm_i915_private *dev_priv = dev->dev_private;
250 u32 pp_stat_reg, pp_ctrl_reg;
251
252 if (!is_edp(intel_dp))
253 return;
254
255 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
256 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
257
258 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
259 WARN(1, "eDP powered off while attempting aux channel communication.\n");
260 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
261 I915_READ(pp_stat_reg),
262 I915_READ(pp_ctrl_reg));
263 }
264 }
265
266 static uint32_t
267 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
268 {
269 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
270 struct drm_device *dev = intel_dig_port->base.base.dev;
271 struct drm_i915_private *dev_priv = dev->dev_private;
272 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
273 uint32_t status;
274 bool done;
275
276 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
277 if (has_aux_irq)
278 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
279 msecs_to_jiffies(10));
280 else
281 done = wait_for_atomic(C, 10) == 0;
282 if (!done)
283 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
284 has_aux_irq);
285 #undef C
286
287 return status;
288 }
289
290 static int
291 intel_dp_aux_ch(struct intel_dp *intel_dp,
292 uint8_t *send, int send_bytes,
293 uint8_t *recv, int recv_size)
294 {
295 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
296 struct drm_device *dev = intel_dig_port->base.base.dev;
297 struct drm_i915_private *dev_priv = dev->dev_private;
298 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
299 uint32_t ch_data = ch_ctl + 4;
300 int i, ret, recv_bytes;
301 uint32_t status;
302 uint32_t aux_clock_divider;
303 int try, precharge;
304 bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
305
306 /* dp aux is extremely sensitive to irq latency, hence request the
307 * lowest possible wakeup latency and so prevent the cpu from going into
308 * deep sleep states.
309 */
310 pm_qos_update_request(&dev_priv->pm_qos, 0);
311
312 intel_dp_check_edp(intel_dp);
313 /* The clock divider is based off the hrawclk,
314 * and would like to run at 2MHz. So, take the
315 * hrawclk value and divide by 2 and use that
316 *
317 * Note that PCH attached eDP panels should use a 125MHz input
318 * clock divider.
319 */
320 if (IS_VALLEYVIEW(dev)) {
321 aux_clock_divider = 100;
322 } else if (intel_dig_port->port == PORT_A) {
323 if (HAS_DDI(dev))
324 aux_clock_divider = DIV_ROUND_CLOSEST(
325 intel_ddi_get_cdclk_freq(dev_priv), 2000);
326 else if (IS_GEN6(dev) || IS_GEN7(dev))
327 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
328 else
329 aux_clock_divider = 225; /* eDP input clock at 450Mhz */
330 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
331 /* Workaround for non-ULT HSW */
332 aux_clock_divider = 74;
333 } else if (HAS_PCH_SPLIT(dev)) {
334 aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
335 } else {
336 aux_clock_divider = intel_hrawclk(dev) / 2;
337 }
338
339 if (IS_GEN6(dev))
340 precharge = 3;
341 else
342 precharge = 5;
343
344 /* Try to wait for any previous AUX channel activity */
345 for (try = 0; try < 3; try++) {
346 status = I915_READ_NOTRACE(ch_ctl);
347 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
348 break;
349 msleep(1);
350 }
351
352 if (try == 3) {
353 WARN(1, "dp_aux_ch not started status 0x%08x\n",
354 I915_READ(ch_ctl));
355 ret = -EBUSY;
356 goto out;
357 }
358
359 /* Must try at least 3 times according to DP spec */
360 for (try = 0; try < 5; try++) {
361 /* Load the send data into the aux channel data registers */
362 for (i = 0; i < send_bytes; i += 4)
363 I915_WRITE(ch_data + i,
364 pack_aux(send + i, send_bytes - i));
365
366 /* Send the command and wait for it to complete */
367 I915_WRITE(ch_ctl,
368 DP_AUX_CH_CTL_SEND_BUSY |
369 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
370 DP_AUX_CH_CTL_TIME_OUT_400us |
371 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
372 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
373 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
374 DP_AUX_CH_CTL_DONE |
375 DP_AUX_CH_CTL_TIME_OUT_ERROR |
376 DP_AUX_CH_CTL_RECEIVE_ERROR);
377
378 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
379
380 /* Clear done status and any errors */
381 I915_WRITE(ch_ctl,
382 status |
383 DP_AUX_CH_CTL_DONE |
384 DP_AUX_CH_CTL_TIME_OUT_ERROR |
385 DP_AUX_CH_CTL_RECEIVE_ERROR);
386
387 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
388 DP_AUX_CH_CTL_RECEIVE_ERROR))
389 continue;
390 if (status & DP_AUX_CH_CTL_DONE)
391 break;
392 }
393
394 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
395 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
396 ret = -EBUSY;
397 goto out;
398 }
399
400 /* Check for timeout or receive error.
401 * Timeouts occur when the sink is not connected
402 */
403 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
404 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
405 ret = -EIO;
406 goto out;
407 }
408
409 /* Timeouts occur when the device isn't connected, so they're
410 * "normal" -- don't fill the kernel log with these */
411 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
412 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
413 ret = -ETIMEDOUT;
414 goto out;
415 }
416
417 /* Unload any bytes sent back from the other side */
418 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
419 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
420 if (recv_bytes > recv_size)
421 recv_bytes = recv_size;
422
423 for (i = 0; i < recv_bytes; i += 4)
424 unpack_aux(I915_READ(ch_data + i),
425 recv + i, recv_bytes - i);
426
427 ret = recv_bytes;
428 out:
429 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
430
431 return ret;
432 }
433
434 /* Write data to the aux channel in native mode */
435 static int
436 intel_dp_aux_native_write(struct intel_dp *intel_dp,
437 uint16_t address, uint8_t *send, int send_bytes)
438 {
439 int ret;
440 uint8_t msg[20];
441 int msg_bytes;
442 uint8_t ack;
443
444 intel_dp_check_edp(intel_dp);
445 if (send_bytes > 16)
446 return -1;
447 msg[0] = AUX_NATIVE_WRITE << 4;
448 msg[1] = address >> 8;
449 msg[2] = address & 0xff;
450 msg[3] = send_bytes - 1;
451 memcpy(&msg[4], send, send_bytes);
452 msg_bytes = send_bytes + 4;
453 for (;;) {
454 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
455 if (ret < 0)
456 return ret;
457 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
458 break;
459 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
460 udelay(100);
461 else
462 return -EIO;
463 }
464 return send_bytes;
465 }
466
467 /* Write a single byte to the aux channel in native mode */
468 static int
469 intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
470 uint16_t address, uint8_t byte)
471 {
472 return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
473 }
474
475 /* read bytes from a native aux channel */
476 static int
477 intel_dp_aux_native_read(struct intel_dp *intel_dp,
478 uint16_t address, uint8_t *recv, int recv_bytes)
479 {
480 uint8_t msg[4];
481 int msg_bytes;
482 uint8_t reply[20];
483 int reply_bytes;
484 uint8_t ack;
485 int ret;
486
487 intel_dp_check_edp(intel_dp);
488 msg[0] = AUX_NATIVE_READ << 4;
489 msg[1] = address >> 8;
490 msg[2] = address & 0xff;
491 msg[3] = recv_bytes - 1;
492
493 msg_bytes = 4;
494 reply_bytes = recv_bytes + 1;
495
496 for (;;) {
497 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
498 reply, reply_bytes);
499 if (ret == 0)
500 return -EPROTO;
501 if (ret < 0)
502 return ret;
503 ack = reply[0];
504 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
505 memcpy(recv, reply + 1, ret - 1);
506 return ret - 1;
507 }
508 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
509 udelay(100);
510 else
511 return -EIO;
512 }
513 }
514
515 static int
516 intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
517 uint8_t write_byte, uint8_t *read_byte)
518 {
519 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
520 struct intel_dp *intel_dp = container_of(adapter,
521 struct intel_dp,
522 adapter);
523 uint16_t address = algo_data->address;
524 uint8_t msg[5];
525 uint8_t reply[2];
526 unsigned retry;
527 int msg_bytes;
528 int reply_bytes;
529 int ret;
530
531 intel_dp_check_edp(intel_dp);
532 /* Set up the command byte */
533 if (mode & MODE_I2C_READ)
534 msg[0] = AUX_I2C_READ << 4;
535 else
536 msg[0] = AUX_I2C_WRITE << 4;
537
538 if (!(mode & MODE_I2C_STOP))
539 msg[0] |= AUX_I2C_MOT << 4;
540
541 msg[1] = address >> 8;
542 msg[2] = address;
543
544 switch (mode) {
545 case MODE_I2C_WRITE:
546 msg[3] = 0;
547 msg[4] = write_byte;
548 msg_bytes = 5;
549 reply_bytes = 1;
550 break;
551 case MODE_I2C_READ:
552 msg[3] = 0;
553 msg_bytes = 4;
554 reply_bytes = 2;
555 break;
556 default:
557 msg_bytes = 3;
558 reply_bytes = 1;
559 break;
560 }
561
562 for (retry = 0; retry < 5; retry++) {
563 ret = intel_dp_aux_ch(intel_dp,
564 msg, msg_bytes,
565 reply, reply_bytes);
566 if (ret < 0) {
567 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
568 return ret;
569 }
570
571 switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
572 case AUX_NATIVE_REPLY_ACK:
573 /* I2C-over-AUX Reply field is only valid
574 * when paired with AUX ACK.
575 */
576 break;
577 case AUX_NATIVE_REPLY_NACK:
578 DRM_DEBUG_KMS("aux_ch native nack\n");
579 return -EREMOTEIO;
580 case AUX_NATIVE_REPLY_DEFER:
581 udelay(100);
582 continue;
583 default:
584 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
585 reply[0]);
586 return -EREMOTEIO;
587 }
588
589 switch (reply[0] & AUX_I2C_REPLY_MASK) {
590 case AUX_I2C_REPLY_ACK:
591 if (mode == MODE_I2C_READ) {
592 *read_byte = reply[1];
593 }
594 return reply_bytes - 1;
595 case AUX_I2C_REPLY_NACK:
596 DRM_DEBUG_KMS("aux_i2c nack\n");
597 return -EREMOTEIO;
598 case AUX_I2C_REPLY_DEFER:
599 DRM_DEBUG_KMS("aux_i2c defer\n");
600 udelay(100);
601 break;
602 default:
603 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
604 return -EREMOTEIO;
605 }
606 }
607
608 DRM_ERROR("too many retries, giving up\n");
609 return -EREMOTEIO;
610 }
611
612 static int
613 intel_dp_i2c_init(struct intel_dp *intel_dp,
614 struct intel_connector *intel_connector, const char *name)
615 {
616 int ret;
617
618 DRM_DEBUG_KMS("i2c_init %s\n", name);
619 intel_dp->algo.running = false;
620 intel_dp->algo.address = 0;
621 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
622
623 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
624 intel_dp->adapter.owner = THIS_MODULE;
625 intel_dp->adapter.class = I2C_CLASS_DDC;
626 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
627 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
628 intel_dp->adapter.algo_data = &intel_dp->algo;
629 intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
630
631 ironlake_edp_panel_vdd_on(intel_dp);
632 ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
633 ironlake_edp_panel_vdd_off(intel_dp, false);
634 return ret;
635 }
636
637 static void
638 intel_dp_set_clock(struct intel_encoder *encoder,
639 struct intel_crtc_config *pipe_config, int link_bw)
640 {
641 struct drm_device *dev = encoder->base.dev;
642
643 if (IS_G4X(dev)) {
644 if (link_bw == DP_LINK_BW_1_62) {
645 pipe_config->dpll.p1 = 2;
646 pipe_config->dpll.p2 = 10;
647 pipe_config->dpll.n = 2;
648 pipe_config->dpll.m1 = 23;
649 pipe_config->dpll.m2 = 8;
650 } else {
651 pipe_config->dpll.p1 = 1;
652 pipe_config->dpll.p2 = 10;
653 pipe_config->dpll.n = 1;
654 pipe_config->dpll.m1 = 14;
655 pipe_config->dpll.m2 = 2;
656 }
657 pipe_config->clock_set = true;
658 } else if (IS_HASWELL(dev)) {
659 /* Haswell has special-purpose DP DDI clocks. */
660 } else if (HAS_PCH_SPLIT(dev)) {
661 if (link_bw == DP_LINK_BW_1_62) {
662 pipe_config->dpll.n = 1;
663 pipe_config->dpll.p1 = 2;
664 pipe_config->dpll.p2 = 10;
665 pipe_config->dpll.m1 = 12;
666 pipe_config->dpll.m2 = 9;
667 } else {
668 pipe_config->dpll.n = 2;
669 pipe_config->dpll.p1 = 1;
670 pipe_config->dpll.p2 = 10;
671 pipe_config->dpll.m1 = 14;
672 pipe_config->dpll.m2 = 8;
673 }
674 pipe_config->clock_set = true;
675 } else if (IS_VALLEYVIEW(dev)) {
676 /* FIXME: Need to figure out optimized DP clocks for vlv. */
677 }
678 }
679
680 bool
681 intel_dp_compute_config(struct intel_encoder *encoder,
682 struct intel_crtc_config *pipe_config)
683 {
684 struct drm_device *dev = encoder->base.dev;
685 struct drm_i915_private *dev_priv = dev->dev_private;
686 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
687 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
688 struct intel_crtc *intel_crtc = encoder->new_crtc;
689 struct intel_connector *intel_connector = intel_dp->attached_connector;
690 int lane_count, clock;
691 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
692 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
693 int bpp, mode_rate;
694 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
695 int target_clock, link_avail, link_clock;
696
697 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && !is_cpu_edp(intel_dp))
698 pipe_config->has_pch_encoder = true;
699
700 pipe_config->has_dp_encoder = true;
701
702 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
703 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
704 adjusted_mode);
705 if (!HAS_PCH_SPLIT(dev))
706 intel_gmch_panel_fitting(intel_crtc, pipe_config,
707 intel_connector->panel.fitting_mode);
708 else
709 intel_pch_panel_fitting(intel_crtc, pipe_config,
710 intel_connector->panel.fitting_mode);
711 }
712 /* We need to take the panel's fixed mode into account. */
713 target_clock = adjusted_mode->clock;
714
715 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
716 return false;
717
718 DRM_DEBUG_KMS("DP link computation with max lane count %i "
719 "max bw %02x pixel clock %iKHz\n",
720 max_lane_count, bws[max_clock], adjusted_mode->clock);
721
722 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
723 * bpc in between. */
724 bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
725 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp)
726 bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
727
728 for (; bpp >= 6*3; bpp -= 2*3) {
729 mode_rate = intel_dp_link_required(target_clock, bpp);
730
731 for (clock = 0; clock <= max_clock; clock++) {
732 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
733 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
734 link_avail = intel_dp_max_data_rate(link_clock,
735 lane_count);
736
737 if (mode_rate <= link_avail) {
738 goto found;
739 }
740 }
741 }
742 }
743
744 return false;
745
746 found:
747 if (intel_dp->color_range_auto) {
748 /*
749 * See:
750 * CEA-861-E - 5.1 Default Encoding Parameters
751 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
752 */
753 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
754 intel_dp->color_range = DP_COLOR_RANGE_16_235;
755 else
756 intel_dp->color_range = 0;
757 }
758
759 if (intel_dp->color_range)
760 pipe_config->limited_color_range = true;
761
762 intel_dp->link_bw = bws[clock];
763 intel_dp->lane_count = lane_count;
764 adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
765 pipe_config->pipe_bpp = bpp;
766 pipe_config->pixel_target_clock = target_clock;
767
768 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
769 intel_dp->link_bw, intel_dp->lane_count,
770 adjusted_mode->clock, bpp);
771 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
772 mode_rate, link_avail);
773
774 intel_link_compute_m_n(bpp, lane_count,
775 target_clock, adjusted_mode->clock,
776 &pipe_config->dp_m_n);
777
778 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
779
780 return true;
781 }
782
783 void intel_dp_init_link_config(struct intel_dp *intel_dp)
784 {
785 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
786 intel_dp->link_configuration[0] = intel_dp->link_bw;
787 intel_dp->link_configuration[1] = intel_dp->lane_count;
788 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
789 /*
790 * Check for DPCD version > 1.1 and enhanced framing support
791 */
792 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
793 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
794 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
795 }
796 }
797
798 static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
799 {
800 struct drm_device *dev = crtc->dev;
801 struct drm_i915_private *dev_priv = dev->dev_private;
802 u32 dpa_ctl;
803
804 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
805 dpa_ctl = I915_READ(DP_A);
806 dpa_ctl &= ~DP_PLL_FREQ_MASK;
807
808 if (clock < 200000) {
809 /* For a long time we've carried around a ILK-DevA w/a for the
810 * 160MHz clock. If we're really unlucky, it's still required.
811 */
812 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
813 dpa_ctl |= DP_PLL_FREQ_160MHZ;
814 } else {
815 dpa_ctl |= DP_PLL_FREQ_270MHZ;
816 }
817
818 I915_WRITE(DP_A, dpa_ctl);
819
820 POSTING_READ(DP_A);
821 udelay(500);
822 }
823
824 static void
825 intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
826 struct drm_display_mode *adjusted_mode)
827 {
828 struct drm_device *dev = encoder->dev;
829 struct drm_i915_private *dev_priv = dev->dev_private;
830 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
831 struct drm_crtc *crtc = encoder->crtc;
832 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
833
834 /*
835 * There are four kinds of DP registers:
836 *
837 * IBX PCH
838 * SNB CPU
839 * IVB CPU
840 * CPT PCH
841 *
842 * IBX PCH and CPU are the same for almost everything,
843 * except that the CPU DP PLL is configured in this
844 * register
845 *
846 * CPT PCH is quite different, having many bits moved
847 * to the TRANS_DP_CTL register instead. That
848 * configuration happens (oddly) in ironlake_pch_enable
849 */
850
851 /* Preserve the BIOS-computed detected bit. This is
852 * supposed to be read-only.
853 */
854 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
855
856 /* Handle DP bits in common between all three register formats */
857 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
858 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
859
860 if (intel_dp->has_audio) {
861 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
862 pipe_name(intel_crtc->pipe));
863 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
864 intel_write_eld(encoder, adjusted_mode);
865 }
866
867 intel_dp_init_link_config(intel_dp);
868
869 /* Split out the IBX/CPU vs CPT settings */
870
871 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
872 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
873 intel_dp->DP |= DP_SYNC_HS_HIGH;
874 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
875 intel_dp->DP |= DP_SYNC_VS_HIGH;
876 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
877
878 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
879 intel_dp->DP |= DP_ENHANCED_FRAMING;
880
881 intel_dp->DP |= intel_crtc->pipe << 29;
882
883 /* don't miss out required setting for eDP */
884 if (adjusted_mode->clock < 200000)
885 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
886 else
887 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
888 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
889 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
890 intel_dp->DP |= intel_dp->color_range;
891
892 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
893 intel_dp->DP |= DP_SYNC_HS_HIGH;
894 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
895 intel_dp->DP |= DP_SYNC_VS_HIGH;
896 intel_dp->DP |= DP_LINK_TRAIN_OFF;
897
898 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
899 intel_dp->DP |= DP_ENHANCED_FRAMING;
900
901 if (intel_crtc->pipe == 1)
902 intel_dp->DP |= DP_PIPEB_SELECT;
903
904 if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
905 /* don't miss out required setting for eDP */
906 if (adjusted_mode->clock < 200000)
907 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
908 else
909 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
910 }
911 } else {
912 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
913 }
914
915 if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
916 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
917 }
918
919 #define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
920 #define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
921
922 #define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
923 #define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
924
925 #define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
926 #define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
927
928 static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
929 u32 mask,
930 u32 value)
931 {
932 struct drm_device *dev = intel_dp_to_dev(intel_dp);
933 struct drm_i915_private *dev_priv = dev->dev_private;
934 u32 pp_stat_reg, pp_ctrl_reg;
935
936 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
937 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
938
939 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
940 mask, value,
941 I915_READ(pp_stat_reg),
942 I915_READ(pp_ctrl_reg));
943
944 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
945 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
946 I915_READ(pp_stat_reg),
947 I915_READ(pp_ctrl_reg));
948 }
949 }
950
951 static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
952 {
953 DRM_DEBUG_KMS("Wait for panel power on\n");
954 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
955 }
956
957 static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
958 {
959 DRM_DEBUG_KMS("Wait for panel power off time\n");
960 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
961 }
962
963 static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
964 {
965 DRM_DEBUG_KMS("Wait for panel power cycle\n");
966 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
967 }
968
969
970 /* Read the current pp_control value, unlocking the register if it
971 * is locked
972 */
973
974 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
975 {
976 struct drm_device *dev = intel_dp_to_dev(intel_dp);
977 struct drm_i915_private *dev_priv = dev->dev_private;
978 u32 control;
979 u32 pp_ctrl_reg;
980
981 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
982 control = I915_READ(pp_ctrl_reg);
983
984 control &= ~PANEL_UNLOCK_MASK;
985 control |= PANEL_UNLOCK_REGS;
986 return control;
987 }
988
989 void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
990 {
991 struct drm_device *dev = intel_dp_to_dev(intel_dp);
992 struct drm_i915_private *dev_priv = dev->dev_private;
993 u32 pp;
994 u32 pp_stat_reg, pp_ctrl_reg;
995
996 if (!is_edp(intel_dp))
997 return;
998 DRM_DEBUG_KMS("Turn eDP VDD on\n");
999
1000 WARN(intel_dp->want_panel_vdd,
1001 "eDP VDD already requested on\n");
1002
1003 intel_dp->want_panel_vdd = true;
1004
1005 if (ironlake_edp_have_panel_vdd(intel_dp)) {
1006 DRM_DEBUG_KMS("eDP VDD already on\n");
1007 return;
1008 }
1009
1010 if (!ironlake_edp_have_panel_power(intel_dp))
1011 ironlake_wait_panel_power_cycle(intel_dp);
1012
1013 pp = ironlake_get_pp_control(intel_dp);
1014 pp |= EDP_FORCE_VDD;
1015
1016 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
1017 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1018
1019 I915_WRITE(pp_ctrl_reg, pp);
1020 POSTING_READ(pp_ctrl_reg);
1021 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1022 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1023 /*
1024 * If the panel wasn't on, delay before accessing aux channel
1025 */
1026 if (!ironlake_edp_have_panel_power(intel_dp)) {
1027 DRM_DEBUG_KMS("eDP was not running\n");
1028 msleep(intel_dp->panel_power_up_delay);
1029 }
1030 }
1031
1032 static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
1033 {
1034 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1035 struct drm_i915_private *dev_priv = dev->dev_private;
1036 u32 pp;
1037 u32 pp_stat_reg, pp_ctrl_reg;
1038
1039 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1040
1041 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
1042 pp = ironlake_get_pp_control(intel_dp);
1043 pp &= ~EDP_FORCE_VDD;
1044
1045 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
1046 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1047
1048 I915_WRITE(pp_ctrl_reg, pp);
1049 POSTING_READ(pp_ctrl_reg);
1050
1051 /* Make sure sequencer is idle before allowing subsequent activity */
1052 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1053 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
1054 msleep(intel_dp->panel_power_down_delay);
1055 }
1056 }
1057
1058 static void ironlake_panel_vdd_work(struct work_struct *__work)
1059 {
1060 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1061 struct intel_dp, panel_vdd_work);
1062 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1063
1064 mutex_lock(&dev->mode_config.mutex);
1065 ironlake_panel_vdd_off_sync(intel_dp);
1066 mutex_unlock(&dev->mode_config.mutex);
1067 }
1068
1069 void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1070 {
1071 if (!is_edp(intel_dp))
1072 return;
1073
1074 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
1075 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
1076
1077 intel_dp->want_panel_vdd = false;
1078
1079 if (sync) {
1080 ironlake_panel_vdd_off_sync(intel_dp);
1081 } else {
1082 /*
1083 * Queue the timer to fire a long
1084 * time from now (relative to the power down delay)
1085 * to keep the panel power up across a sequence of operations
1086 */
1087 schedule_delayed_work(&intel_dp->panel_vdd_work,
1088 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1089 }
1090 }
1091
1092 void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1093 {
1094 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1095 struct drm_i915_private *dev_priv = dev->dev_private;
1096 u32 pp;
1097 u32 pp_ctrl_reg;
1098
1099 if (!is_edp(intel_dp))
1100 return;
1101
1102 DRM_DEBUG_KMS("Turn eDP power on\n");
1103
1104 if (ironlake_edp_have_panel_power(intel_dp)) {
1105 DRM_DEBUG_KMS("eDP power already on\n");
1106 return;
1107 }
1108
1109 ironlake_wait_panel_power_cycle(intel_dp);
1110
1111 pp = ironlake_get_pp_control(intel_dp);
1112 if (IS_GEN5(dev)) {
1113 /* ILK workaround: disable reset around power sequence */
1114 pp &= ~PANEL_POWER_RESET;
1115 I915_WRITE(PCH_PP_CONTROL, pp);
1116 POSTING_READ(PCH_PP_CONTROL);
1117 }
1118
1119 pp |= POWER_TARGET_ON;
1120 if (!IS_GEN5(dev))
1121 pp |= PANEL_POWER_RESET;
1122
1123 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1124
1125 I915_WRITE(pp_ctrl_reg, pp);
1126 POSTING_READ(pp_ctrl_reg);
1127
1128 ironlake_wait_panel_on(intel_dp);
1129
1130 if (IS_GEN5(dev)) {
1131 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1132 I915_WRITE(PCH_PP_CONTROL, pp);
1133 POSTING_READ(PCH_PP_CONTROL);
1134 }
1135 }
1136
1137 void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1138 {
1139 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1140 struct drm_i915_private *dev_priv = dev->dev_private;
1141 u32 pp;
1142 u32 pp_ctrl_reg;
1143
1144 if (!is_edp(intel_dp))
1145 return;
1146
1147 DRM_DEBUG_KMS("Turn eDP power off\n");
1148
1149 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
1150
1151 pp = ironlake_get_pp_control(intel_dp);
1152 /* We need to switch off panel power _and_ force vdd, for otherwise some
1153 * panels get very unhappy and cease to work. */
1154 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1155
1156 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1157
1158 I915_WRITE(pp_ctrl_reg, pp);
1159 POSTING_READ(pp_ctrl_reg);
1160
1161 intel_dp->want_panel_vdd = false;
1162
1163 ironlake_wait_panel_off(intel_dp);
1164 }
1165
1166 void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1167 {
1168 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1169 struct drm_device *dev = intel_dig_port->base.base.dev;
1170 struct drm_i915_private *dev_priv = dev->dev_private;
1171 int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
1172 u32 pp;
1173 u32 pp_ctrl_reg;
1174
1175 if (!is_edp(intel_dp))
1176 return;
1177
1178 DRM_DEBUG_KMS("\n");
1179 /*
1180 * If we enable the backlight right away following a panel power
1181 * on, we may see slight flicker as the panel syncs with the eDP
1182 * link. So delay a bit to make sure the image is solid before
1183 * allowing it to appear.
1184 */
1185 msleep(intel_dp->backlight_on_delay);
1186 pp = ironlake_get_pp_control(intel_dp);
1187 pp |= EDP_BLC_ENABLE;
1188
1189 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1190
1191 I915_WRITE(pp_ctrl_reg, pp);
1192 POSTING_READ(pp_ctrl_reg);
1193
1194 intel_panel_enable_backlight(dev, pipe);
1195 }
1196
1197 void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1198 {
1199 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1200 struct drm_i915_private *dev_priv = dev->dev_private;
1201 u32 pp;
1202 u32 pp_ctrl_reg;
1203
1204 if (!is_edp(intel_dp))
1205 return;
1206
1207 intel_panel_disable_backlight(dev);
1208
1209 DRM_DEBUG_KMS("\n");
1210 pp = ironlake_get_pp_control(intel_dp);
1211 pp &= ~EDP_BLC_ENABLE;
1212
1213 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1214
1215 I915_WRITE(pp_ctrl_reg, pp);
1216 POSTING_READ(pp_ctrl_reg);
1217 msleep(intel_dp->backlight_off_delay);
1218 }
1219
1220 static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
1221 {
1222 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1223 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1224 struct drm_device *dev = crtc->dev;
1225 struct drm_i915_private *dev_priv = dev->dev_private;
1226 u32 dpa_ctl;
1227
1228 assert_pipe_disabled(dev_priv,
1229 to_intel_crtc(crtc)->pipe);
1230
1231 DRM_DEBUG_KMS("\n");
1232 dpa_ctl = I915_READ(DP_A);
1233 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1234 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1235
1236 /* We don't adjust intel_dp->DP while tearing down the link, to
1237 * facilitate link retraining (e.g. after hotplug). Hence clear all
1238 * enable bits here to ensure that we don't enable too much. */
1239 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1240 intel_dp->DP |= DP_PLL_ENABLE;
1241 I915_WRITE(DP_A, intel_dp->DP);
1242 POSTING_READ(DP_A);
1243 udelay(200);
1244 }
1245
1246 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
1247 {
1248 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1249 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1250 struct drm_device *dev = crtc->dev;
1251 struct drm_i915_private *dev_priv = dev->dev_private;
1252 u32 dpa_ctl;
1253
1254 assert_pipe_disabled(dev_priv,
1255 to_intel_crtc(crtc)->pipe);
1256
1257 dpa_ctl = I915_READ(DP_A);
1258 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1259 "dp pll off, should be on\n");
1260 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1261
1262 /* We can't rely on the value tracked for the DP register in
1263 * intel_dp->DP because link_down must not change that (otherwise link
1264 * re-training will fail. */
1265 dpa_ctl &= ~DP_PLL_ENABLE;
1266 I915_WRITE(DP_A, dpa_ctl);
1267 POSTING_READ(DP_A);
1268 udelay(200);
1269 }
1270
1271 /* If the sink supports it, try to set the power state appropriately */
1272 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
1273 {
1274 int ret, i;
1275
1276 /* Should have a valid DPCD by this point */
1277 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1278 return;
1279
1280 if (mode != DRM_MODE_DPMS_ON) {
1281 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
1282 DP_SET_POWER_D3);
1283 if (ret != 1)
1284 DRM_DEBUG_DRIVER("failed to write sink power state\n");
1285 } else {
1286 /*
1287 * When turning on, we need to retry for 1ms to give the sink
1288 * time to wake up.
1289 */
1290 for (i = 0; i < 3; i++) {
1291 ret = intel_dp_aux_native_write_1(intel_dp,
1292 DP_SET_POWER,
1293 DP_SET_POWER_D0);
1294 if (ret == 1)
1295 break;
1296 msleep(1);
1297 }
1298 }
1299 }
1300
1301 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1302 enum pipe *pipe)
1303 {
1304 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1305 struct drm_device *dev = encoder->base.dev;
1306 struct drm_i915_private *dev_priv = dev->dev_private;
1307 u32 tmp = I915_READ(intel_dp->output_reg);
1308
1309 if (!(tmp & DP_PORT_EN))
1310 return false;
1311
1312 if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1313 *pipe = PORT_TO_PIPE_CPT(tmp);
1314 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
1315 *pipe = PORT_TO_PIPE(tmp);
1316 } else {
1317 u32 trans_sel;
1318 u32 trans_dp;
1319 int i;
1320
1321 switch (intel_dp->output_reg) {
1322 case PCH_DP_B:
1323 trans_sel = TRANS_DP_PORT_SEL_B;
1324 break;
1325 case PCH_DP_C:
1326 trans_sel = TRANS_DP_PORT_SEL_C;
1327 break;
1328 case PCH_DP_D:
1329 trans_sel = TRANS_DP_PORT_SEL_D;
1330 break;
1331 default:
1332 return true;
1333 }
1334
1335 for_each_pipe(i) {
1336 trans_dp = I915_READ(TRANS_DP_CTL(i));
1337 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1338 *pipe = i;
1339 return true;
1340 }
1341 }
1342
1343 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1344 intel_dp->output_reg);
1345 }
1346
1347 return true;
1348 }
1349
1350 static void intel_dp_get_config(struct intel_encoder *encoder,
1351 struct intel_crtc_config *pipe_config)
1352 {
1353 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1354 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1355 u32 tmp, flags = 0;
1356
1357 tmp = I915_READ(intel_dp->output_reg);
1358
1359 if (tmp & DP_SYNC_HS_HIGH)
1360 flags |= DRM_MODE_FLAG_PHSYNC;
1361 else
1362 flags |= DRM_MODE_FLAG_NHSYNC;
1363
1364 if (tmp & DP_SYNC_VS_HIGH)
1365 flags |= DRM_MODE_FLAG_PVSYNC;
1366 else
1367 flags |= DRM_MODE_FLAG_NVSYNC;
1368
1369 pipe_config->adjusted_mode.flags |= flags;
1370 }
1371
1372 static void intel_disable_dp(struct intel_encoder *encoder)
1373 {
1374 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1375 enum port port = dp_to_dig_port(intel_dp)->port;
1376 struct drm_device *dev = encoder->base.dev;
1377
1378 /* Make sure the panel is off before trying to change the mode. But also
1379 * ensure that we have vdd while we switch off the panel. */
1380 ironlake_edp_panel_vdd_on(intel_dp);
1381 ironlake_edp_backlight_off(intel_dp);
1382 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1383 ironlake_edp_panel_off(intel_dp);
1384
1385 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
1386 if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
1387 intel_dp_link_down(intel_dp);
1388 }
1389
1390 static void intel_post_disable_dp(struct intel_encoder *encoder)
1391 {
1392 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1393 enum port port = dp_to_dig_port(intel_dp)->port;
1394 struct drm_device *dev = encoder->base.dev;
1395
1396 if (port == PORT_A || IS_VALLEYVIEW(dev)) {
1397 intel_dp_link_down(intel_dp);
1398 if (!IS_VALLEYVIEW(dev))
1399 ironlake_edp_pll_off(intel_dp);
1400 }
1401 }
1402
1403 static void intel_enable_dp(struct intel_encoder *encoder)
1404 {
1405 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1406 struct drm_device *dev = encoder->base.dev;
1407 struct drm_i915_private *dev_priv = dev->dev_private;
1408 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1409
1410 if (WARN_ON(dp_reg & DP_PORT_EN))
1411 return;
1412
1413 ironlake_edp_panel_vdd_on(intel_dp);
1414 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1415 intel_dp_start_link_train(intel_dp);
1416 ironlake_edp_panel_on(intel_dp);
1417 ironlake_edp_panel_vdd_off(intel_dp, true);
1418 intel_dp_complete_link_train(intel_dp);
1419 intel_dp_stop_link_train(intel_dp);
1420 ironlake_edp_backlight_on(intel_dp);
1421
1422 if (IS_VALLEYVIEW(dev)) {
1423 struct intel_digital_port *dport =
1424 enc_to_dig_port(&encoder->base);
1425 int channel = vlv_dport_to_channel(dport);
1426
1427 vlv_wait_port_ready(dev_priv, channel);
1428 }
1429 }
1430
1431 static void intel_pre_enable_dp(struct intel_encoder *encoder)
1432 {
1433 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1434 struct drm_device *dev = encoder->base.dev;
1435 struct drm_i915_private *dev_priv = dev->dev_private;
1436
1437 if (is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev))
1438 ironlake_edp_pll_on(intel_dp);
1439
1440 if (IS_VALLEYVIEW(dev)) {
1441 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1442 struct intel_crtc *intel_crtc =
1443 to_intel_crtc(encoder->base.crtc);
1444 int port = vlv_dport_to_channel(dport);
1445 int pipe = intel_crtc->pipe;
1446 u32 val;
1447
1448 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
1449 val = 0;
1450 if (pipe)
1451 val |= (1<<21);
1452 else
1453 val &= ~(1<<21);
1454 val |= 0x001000c4;
1455 vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
1456
1457 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
1458 0x00760018);
1459 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
1460 0x00400888);
1461 }
1462 }
1463
1464 static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
1465 {
1466 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1467 struct drm_device *dev = encoder->base.dev;
1468 struct drm_i915_private *dev_priv = dev->dev_private;
1469 int port = vlv_dport_to_channel(dport);
1470
1471 if (!IS_VALLEYVIEW(dev))
1472 return;
1473
1474 /* Program Tx lane resets to default */
1475 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
1476 DPIO_PCS_TX_LANE2_RESET |
1477 DPIO_PCS_TX_LANE1_RESET);
1478 vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
1479 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1480 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1481 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1482 DPIO_PCS_CLK_SOFT_RESET);
1483
1484 /* Fix up inter-pair skew failure */
1485 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
1486 vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
1487 vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
1488 }
1489
1490 /*
1491 * Native read with retry for link status and receiver capability reads for
1492 * cases where the sink may still be asleep.
1493 */
1494 static bool
1495 intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1496 uint8_t *recv, int recv_bytes)
1497 {
1498 int ret, i;
1499
1500 /*
1501 * Sinks are *supposed* to come up within 1ms from an off state,
1502 * but we're also supposed to retry 3 times per the spec.
1503 */
1504 for (i = 0; i < 3; i++) {
1505 ret = intel_dp_aux_native_read(intel_dp, address, recv,
1506 recv_bytes);
1507 if (ret == recv_bytes)
1508 return true;
1509 msleep(1);
1510 }
1511
1512 return false;
1513 }
1514
1515 /*
1516 * Fetch AUX CH registers 0x202 - 0x207 which contain
1517 * link status information
1518 */
1519 static bool
1520 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1521 {
1522 return intel_dp_aux_native_read_retry(intel_dp,
1523 DP_LANE0_1_STATUS,
1524 link_status,
1525 DP_LINK_STATUS_SIZE);
1526 }
1527
1528 #if 0
1529 static char *voltage_names[] = {
1530 "0.4V", "0.6V", "0.8V", "1.2V"
1531 };
1532 static char *pre_emph_names[] = {
1533 "0dB", "3.5dB", "6dB", "9.5dB"
1534 };
1535 static char *link_train_names[] = {
1536 "pattern 1", "pattern 2", "idle", "off"
1537 };
1538 #endif
1539
1540 /*
1541 * These are source-specific values; current Intel hardware supports
1542 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1543 */
1544
1545 static uint8_t
1546 intel_dp_voltage_max(struct intel_dp *intel_dp)
1547 {
1548 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1549
1550 if (IS_VALLEYVIEW(dev))
1551 return DP_TRAIN_VOLTAGE_SWING_1200;
1552 else if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
1553 return DP_TRAIN_VOLTAGE_SWING_800;
1554 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
1555 return DP_TRAIN_VOLTAGE_SWING_1200;
1556 else
1557 return DP_TRAIN_VOLTAGE_SWING_800;
1558 }
1559
1560 static uint8_t
1561 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1562 {
1563 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1564
1565 if (HAS_DDI(dev)) {
1566 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1567 case DP_TRAIN_VOLTAGE_SWING_400:
1568 return DP_TRAIN_PRE_EMPHASIS_9_5;
1569 case DP_TRAIN_VOLTAGE_SWING_600:
1570 return DP_TRAIN_PRE_EMPHASIS_6;
1571 case DP_TRAIN_VOLTAGE_SWING_800:
1572 return DP_TRAIN_PRE_EMPHASIS_3_5;
1573 case DP_TRAIN_VOLTAGE_SWING_1200:
1574 default:
1575 return DP_TRAIN_PRE_EMPHASIS_0;
1576 }
1577 } else if (IS_VALLEYVIEW(dev)) {
1578 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1579 case DP_TRAIN_VOLTAGE_SWING_400:
1580 return DP_TRAIN_PRE_EMPHASIS_9_5;
1581 case DP_TRAIN_VOLTAGE_SWING_600:
1582 return DP_TRAIN_PRE_EMPHASIS_6;
1583 case DP_TRAIN_VOLTAGE_SWING_800:
1584 return DP_TRAIN_PRE_EMPHASIS_3_5;
1585 case DP_TRAIN_VOLTAGE_SWING_1200:
1586 default:
1587 return DP_TRAIN_PRE_EMPHASIS_0;
1588 }
1589 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1590 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1591 case DP_TRAIN_VOLTAGE_SWING_400:
1592 return DP_TRAIN_PRE_EMPHASIS_6;
1593 case DP_TRAIN_VOLTAGE_SWING_600:
1594 case DP_TRAIN_VOLTAGE_SWING_800:
1595 return DP_TRAIN_PRE_EMPHASIS_3_5;
1596 default:
1597 return DP_TRAIN_PRE_EMPHASIS_0;
1598 }
1599 } else {
1600 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1601 case DP_TRAIN_VOLTAGE_SWING_400:
1602 return DP_TRAIN_PRE_EMPHASIS_6;
1603 case DP_TRAIN_VOLTAGE_SWING_600:
1604 return DP_TRAIN_PRE_EMPHASIS_6;
1605 case DP_TRAIN_VOLTAGE_SWING_800:
1606 return DP_TRAIN_PRE_EMPHASIS_3_5;
1607 case DP_TRAIN_VOLTAGE_SWING_1200:
1608 default:
1609 return DP_TRAIN_PRE_EMPHASIS_0;
1610 }
1611 }
1612 }
1613
1614 static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
1615 {
1616 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1617 struct drm_i915_private *dev_priv = dev->dev_private;
1618 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1619 unsigned long demph_reg_value, preemph_reg_value,
1620 uniqtranscale_reg_value;
1621 uint8_t train_set = intel_dp->train_set[0];
1622 int port = vlv_dport_to_channel(dport);
1623
1624 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1625 case DP_TRAIN_PRE_EMPHASIS_0:
1626 preemph_reg_value = 0x0004000;
1627 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1628 case DP_TRAIN_VOLTAGE_SWING_400:
1629 demph_reg_value = 0x2B405555;
1630 uniqtranscale_reg_value = 0x552AB83A;
1631 break;
1632 case DP_TRAIN_VOLTAGE_SWING_600:
1633 demph_reg_value = 0x2B404040;
1634 uniqtranscale_reg_value = 0x5548B83A;
1635 break;
1636 case DP_TRAIN_VOLTAGE_SWING_800:
1637 demph_reg_value = 0x2B245555;
1638 uniqtranscale_reg_value = 0x5560B83A;
1639 break;
1640 case DP_TRAIN_VOLTAGE_SWING_1200:
1641 demph_reg_value = 0x2B405555;
1642 uniqtranscale_reg_value = 0x5598DA3A;
1643 break;
1644 default:
1645 return 0;
1646 }
1647 break;
1648 case DP_TRAIN_PRE_EMPHASIS_3_5:
1649 preemph_reg_value = 0x0002000;
1650 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1651 case DP_TRAIN_VOLTAGE_SWING_400:
1652 demph_reg_value = 0x2B404040;
1653 uniqtranscale_reg_value = 0x5552B83A;
1654 break;
1655 case DP_TRAIN_VOLTAGE_SWING_600:
1656 demph_reg_value = 0x2B404848;
1657 uniqtranscale_reg_value = 0x5580B83A;
1658 break;
1659 case DP_TRAIN_VOLTAGE_SWING_800:
1660 demph_reg_value = 0x2B404040;
1661 uniqtranscale_reg_value = 0x55ADDA3A;
1662 break;
1663 default:
1664 return 0;
1665 }
1666 break;
1667 case DP_TRAIN_PRE_EMPHASIS_6:
1668 preemph_reg_value = 0x0000000;
1669 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1670 case DP_TRAIN_VOLTAGE_SWING_400:
1671 demph_reg_value = 0x2B305555;
1672 uniqtranscale_reg_value = 0x5570B83A;
1673 break;
1674 case DP_TRAIN_VOLTAGE_SWING_600:
1675 demph_reg_value = 0x2B2B4040;
1676 uniqtranscale_reg_value = 0x55ADDA3A;
1677 break;
1678 default:
1679 return 0;
1680 }
1681 break;
1682 case DP_TRAIN_PRE_EMPHASIS_9_5:
1683 preemph_reg_value = 0x0006000;
1684 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1685 case DP_TRAIN_VOLTAGE_SWING_400:
1686 demph_reg_value = 0x1B405555;
1687 uniqtranscale_reg_value = 0x55ADDA3A;
1688 break;
1689 default:
1690 return 0;
1691 }
1692 break;
1693 default:
1694 return 0;
1695 }
1696
1697 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
1698 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
1699 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
1700 uniqtranscale_reg_value);
1701 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040);
1702 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
1703 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
1704 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
1705
1706 return 0;
1707 }
1708
1709 static void
1710 intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1711 {
1712 uint8_t v = 0;
1713 uint8_t p = 0;
1714 int lane;
1715 uint8_t voltage_max;
1716 uint8_t preemph_max;
1717
1718 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1719 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
1720 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
1721
1722 if (this_v > v)
1723 v = this_v;
1724 if (this_p > p)
1725 p = this_p;
1726 }
1727
1728 voltage_max = intel_dp_voltage_max(intel_dp);
1729 if (v >= voltage_max)
1730 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
1731
1732 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
1733 if (p >= preemph_max)
1734 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
1735
1736 for (lane = 0; lane < 4; lane++)
1737 intel_dp->train_set[lane] = v | p;
1738 }
1739
1740 static uint32_t
1741 intel_gen4_signal_levels(uint8_t train_set)
1742 {
1743 uint32_t signal_levels = 0;
1744
1745 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1746 case DP_TRAIN_VOLTAGE_SWING_400:
1747 default:
1748 signal_levels |= DP_VOLTAGE_0_4;
1749 break;
1750 case DP_TRAIN_VOLTAGE_SWING_600:
1751 signal_levels |= DP_VOLTAGE_0_6;
1752 break;
1753 case DP_TRAIN_VOLTAGE_SWING_800:
1754 signal_levels |= DP_VOLTAGE_0_8;
1755 break;
1756 case DP_TRAIN_VOLTAGE_SWING_1200:
1757 signal_levels |= DP_VOLTAGE_1_2;
1758 break;
1759 }
1760 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1761 case DP_TRAIN_PRE_EMPHASIS_0:
1762 default:
1763 signal_levels |= DP_PRE_EMPHASIS_0;
1764 break;
1765 case DP_TRAIN_PRE_EMPHASIS_3_5:
1766 signal_levels |= DP_PRE_EMPHASIS_3_5;
1767 break;
1768 case DP_TRAIN_PRE_EMPHASIS_6:
1769 signal_levels |= DP_PRE_EMPHASIS_6;
1770 break;
1771 case DP_TRAIN_PRE_EMPHASIS_9_5:
1772 signal_levels |= DP_PRE_EMPHASIS_9_5;
1773 break;
1774 }
1775 return signal_levels;
1776 }
1777
1778 /* Gen6's DP voltage swing and pre-emphasis control */
1779 static uint32_t
1780 intel_gen6_edp_signal_levels(uint8_t train_set)
1781 {
1782 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1783 DP_TRAIN_PRE_EMPHASIS_MASK);
1784 switch (signal_levels) {
1785 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1786 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1787 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1788 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1789 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
1790 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1791 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1792 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
1793 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1794 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1795 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
1796 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1797 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
1798 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
1799 default:
1800 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1801 "0x%x\n", signal_levels);
1802 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
1803 }
1804 }
1805
1806 /* Gen7's DP voltage swing and pre-emphasis control */
1807 static uint32_t
1808 intel_gen7_edp_signal_levels(uint8_t train_set)
1809 {
1810 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1811 DP_TRAIN_PRE_EMPHASIS_MASK);
1812 switch (signal_levels) {
1813 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1814 return EDP_LINK_TRAIN_400MV_0DB_IVB;
1815 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1816 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
1817 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1818 return EDP_LINK_TRAIN_400MV_6DB_IVB;
1819
1820 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1821 return EDP_LINK_TRAIN_600MV_0DB_IVB;
1822 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1823 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
1824
1825 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1826 return EDP_LINK_TRAIN_800MV_0DB_IVB;
1827 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1828 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
1829
1830 default:
1831 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1832 "0x%x\n", signal_levels);
1833 return EDP_LINK_TRAIN_500MV_0DB_IVB;
1834 }
1835 }
1836
1837 /* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
1838 static uint32_t
1839 intel_hsw_signal_levels(uint8_t train_set)
1840 {
1841 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
1842 DP_TRAIN_PRE_EMPHASIS_MASK);
1843 switch (signal_levels) {
1844 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
1845 return DDI_BUF_EMP_400MV_0DB_HSW;
1846 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
1847 return DDI_BUF_EMP_400MV_3_5DB_HSW;
1848 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
1849 return DDI_BUF_EMP_400MV_6DB_HSW;
1850 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
1851 return DDI_BUF_EMP_400MV_9_5DB_HSW;
1852
1853 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
1854 return DDI_BUF_EMP_600MV_0DB_HSW;
1855 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
1856 return DDI_BUF_EMP_600MV_3_5DB_HSW;
1857 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
1858 return DDI_BUF_EMP_600MV_6DB_HSW;
1859
1860 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
1861 return DDI_BUF_EMP_800MV_0DB_HSW;
1862 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
1863 return DDI_BUF_EMP_800MV_3_5DB_HSW;
1864 default:
1865 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
1866 "0x%x\n", signal_levels);
1867 return DDI_BUF_EMP_400MV_0DB_HSW;
1868 }
1869 }
1870
1871 /* Properly updates "DP" with the correct signal levels. */
1872 static void
1873 intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
1874 {
1875 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1876 struct drm_device *dev = intel_dig_port->base.base.dev;
1877 uint32_t signal_levels, mask;
1878 uint8_t train_set = intel_dp->train_set[0];
1879
1880 if (HAS_DDI(dev)) {
1881 signal_levels = intel_hsw_signal_levels(train_set);
1882 mask = DDI_BUF_EMP_MASK;
1883 } else if (IS_VALLEYVIEW(dev)) {
1884 signal_levels = intel_vlv_signal_levels(intel_dp);
1885 mask = 0;
1886 } else if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
1887 signal_levels = intel_gen7_edp_signal_levels(train_set);
1888 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
1889 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1890 signal_levels = intel_gen6_edp_signal_levels(train_set);
1891 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
1892 } else {
1893 signal_levels = intel_gen4_signal_levels(train_set);
1894 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
1895 }
1896
1897 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
1898
1899 *DP = (*DP & ~mask) | signal_levels;
1900 }
1901
1902 static bool
1903 intel_dp_set_link_train(struct intel_dp *intel_dp,
1904 uint32_t dp_reg_value,
1905 uint8_t dp_train_pat)
1906 {
1907 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1908 struct drm_device *dev = intel_dig_port->base.base.dev;
1909 struct drm_i915_private *dev_priv = dev->dev_private;
1910 enum port port = intel_dig_port->port;
1911 int ret;
1912
1913 if (HAS_DDI(dev)) {
1914 uint32_t temp = I915_READ(DP_TP_CTL(port));
1915
1916 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
1917 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
1918 else
1919 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
1920
1921 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1922 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1923 case DP_TRAINING_PATTERN_DISABLE:
1924 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
1925
1926 break;
1927 case DP_TRAINING_PATTERN_1:
1928 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
1929 break;
1930 case DP_TRAINING_PATTERN_2:
1931 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
1932 break;
1933 case DP_TRAINING_PATTERN_3:
1934 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
1935 break;
1936 }
1937 I915_WRITE(DP_TP_CTL(port), temp);
1938
1939 } else if (HAS_PCH_CPT(dev) &&
1940 (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
1941 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
1942
1943 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1944 case DP_TRAINING_PATTERN_DISABLE:
1945 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
1946 break;
1947 case DP_TRAINING_PATTERN_1:
1948 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
1949 break;
1950 case DP_TRAINING_PATTERN_2:
1951 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1952 break;
1953 case DP_TRAINING_PATTERN_3:
1954 DRM_ERROR("DP training pattern 3 not supported\n");
1955 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
1956 break;
1957 }
1958
1959 } else {
1960 dp_reg_value &= ~DP_LINK_TRAIN_MASK;
1961
1962 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1963 case DP_TRAINING_PATTERN_DISABLE:
1964 dp_reg_value |= DP_LINK_TRAIN_OFF;
1965 break;
1966 case DP_TRAINING_PATTERN_1:
1967 dp_reg_value |= DP_LINK_TRAIN_PAT_1;
1968 break;
1969 case DP_TRAINING_PATTERN_2:
1970 dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1971 break;
1972 case DP_TRAINING_PATTERN_3:
1973 DRM_ERROR("DP training pattern 3 not supported\n");
1974 dp_reg_value |= DP_LINK_TRAIN_PAT_2;
1975 break;
1976 }
1977 }
1978
1979 I915_WRITE(intel_dp->output_reg, dp_reg_value);
1980 POSTING_READ(intel_dp->output_reg);
1981
1982 intel_dp_aux_native_write_1(intel_dp,
1983 DP_TRAINING_PATTERN_SET,
1984 dp_train_pat);
1985
1986 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
1987 DP_TRAINING_PATTERN_DISABLE) {
1988 ret = intel_dp_aux_native_write(intel_dp,
1989 DP_TRAINING_LANE0_SET,
1990 intel_dp->train_set,
1991 intel_dp->lane_count);
1992 if (ret != intel_dp->lane_count)
1993 return false;
1994 }
1995
1996 return true;
1997 }
1998
1999 static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
2000 {
2001 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2002 struct drm_device *dev = intel_dig_port->base.base.dev;
2003 struct drm_i915_private *dev_priv = dev->dev_private;
2004 enum port port = intel_dig_port->port;
2005 uint32_t val;
2006
2007 if (!HAS_DDI(dev))
2008 return;
2009
2010 val = I915_READ(DP_TP_CTL(port));
2011 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2012 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
2013 I915_WRITE(DP_TP_CTL(port), val);
2014
2015 /*
2016 * On PORT_A we can have only eDP in SST mode. There the only reason
2017 * we need to set idle transmission mode is to work around a HW issue
2018 * where we enable the pipe while not in idle link-training mode.
2019 * In this case there is requirement to wait for a minimum number of
2020 * idle patterns to be sent.
2021 */
2022 if (port == PORT_A)
2023 return;
2024
2025 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
2026 1))
2027 DRM_ERROR("Timed out waiting for DP idle patterns\n");
2028 }
2029
2030 /* Enable corresponding port and start training pattern 1 */
2031 void
2032 intel_dp_start_link_train(struct intel_dp *intel_dp)
2033 {
2034 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
2035 struct drm_device *dev = encoder->dev;
2036 int i;
2037 uint8_t voltage;
2038 bool clock_recovery = false;
2039 int voltage_tries, loop_tries;
2040 uint32_t DP = intel_dp->DP;
2041
2042 if (HAS_DDI(dev))
2043 intel_ddi_prepare_link_retrain(encoder);
2044
2045 /* Write the link configuration data */
2046 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
2047 intel_dp->link_configuration,
2048 DP_LINK_CONFIGURATION_SIZE);
2049
2050 DP |= DP_PORT_EN;
2051
2052 memset(intel_dp->train_set, 0, 4);
2053 voltage = 0xff;
2054 voltage_tries = 0;
2055 loop_tries = 0;
2056 clock_recovery = false;
2057 for (;;) {
2058 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
2059 uint8_t link_status[DP_LINK_STATUS_SIZE];
2060
2061 intel_dp_set_signal_levels(intel_dp, &DP);
2062
2063 /* Set training pattern 1 */
2064 if (!intel_dp_set_link_train(intel_dp, DP,
2065 DP_TRAINING_PATTERN_1 |
2066 DP_LINK_SCRAMBLING_DISABLE))
2067 break;
2068
2069 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
2070 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2071 DRM_ERROR("failed to get link status\n");
2072 break;
2073 }
2074
2075 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2076 DRM_DEBUG_KMS("clock recovery OK\n");
2077 clock_recovery = true;
2078 break;
2079 }
2080
2081 /* Check to see if we've tried the max voltage */
2082 for (i = 0; i < intel_dp->lane_count; i++)
2083 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
2084 break;
2085 if (i == intel_dp->lane_count) {
2086 ++loop_tries;
2087 if (loop_tries == 5) {
2088 DRM_DEBUG_KMS("too many full retries, give up\n");
2089 break;
2090 }
2091 memset(intel_dp->train_set, 0, 4);
2092 voltage_tries = 0;
2093 continue;
2094 }
2095
2096 /* Check to see if we've tried the same voltage 5 times */
2097 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
2098 ++voltage_tries;
2099 if (voltage_tries == 5) {
2100 DRM_DEBUG_KMS("too many voltage retries, give up\n");
2101 break;
2102 }
2103 } else
2104 voltage_tries = 0;
2105 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
2106
2107 /* Compute new intel_dp->train_set as requested by target */
2108 intel_get_adjust_train(intel_dp, link_status);
2109 }
2110
2111 intel_dp->DP = DP;
2112 }
2113
2114 void
2115 intel_dp_complete_link_train(struct intel_dp *intel_dp)
2116 {
2117 bool channel_eq = false;
2118 int tries, cr_tries;
2119 uint32_t DP = intel_dp->DP;
2120
2121 /* channel equalization */
2122 tries = 0;
2123 cr_tries = 0;
2124 channel_eq = false;
2125 for (;;) {
2126 uint8_t link_status[DP_LINK_STATUS_SIZE];
2127
2128 if (cr_tries > 5) {
2129 DRM_ERROR("failed to train DP, aborting\n");
2130 intel_dp_link_down(intel_dp);
2131 break;
2132 }
2133
2134 intel_dp_set_signal_levels(intel_dp, &DP);
2135
2136 /* channel eq pattern */
2137 if (!intel_dp_set_link_train(intel_dp, DP,
2138 DP_TRAINING_PATTERN_2 |
2139 DP_LINK_SCRAMBLING_DISABLE))
2140 break;
2141
2142 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
2143 if (!intel_dp_get_link_status(intel_dp, link_status))
2144 break;
2145
2146 /* Make sure clock is still ok */
2147 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
2148 intel_dp_start_link_train(intel_dp);
2149 cr_tries++;
2150 continue;
2151 }
2152
2153 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2154 channel_eq = true;
2155 break;
2156 }
2157
2158 /* Try 5 times, then try clock recovery if that fails */
2159 if (tries > 5) {
2160 intel_dp_link_down(intel_dp);
2161 intel_dp_start_link_train(intel_dp);
2162 tries = 0;
2163 cr_tries++;
2164 continue;
2165 }
2166
2167 /* Compute new intel_dp->train_set as requested by target */
2168 intel_get_adjust_train(intel_dp, link_status);
2169 ++tries;
2170 }
2171
2172 intel_dp_set_idle_link_train(intel_dp);
2173
2174 intel_dp->DP = DP;
2175
2176 if (channel_eq)
2177 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
2178
2179 }
2180
2181 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
2182 {
2183 intel_dp_set_link_train(intel_dp, intel_dp->DP,
2184 DP_TRAINING_PATTERN_DISABLE);
2185 }
2186
2187 static void
2188 intel_dp_link_down(struct intel_dp *intel_dp)
2189 {
2190 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2191 struct drm_device *dev = intel_dig_port->base.base.dev;
2192 struct drm_i915_private *dev_priv = dev->dev_private;
2193 struct intel_crtc *intel_crtc =
2194 to_intel_crtc(intel_dig_port->base.base.crtc);
2195 uint32_t DP = intel_dp->DP;
2196
2197 /*
2198 * DDI code has a strict mode set sequence and we should try to respect
2199 * it, otherwise we might hang the machine in many different ways. So we
2200 * really should be disabling the port only on a complete crtc_disable
2201 * sequence. This function is just called under two conditions on DDI
2202 * code:
2203 * - Link train failed while doing crtc_enable, and on this case we
2204 * really should respect the mode set sequence and wait for a
2205 * crtc_disable.
2206 * - Someone turned the monitor off and intel_dp_check_link_status
2207 * called us. We don't need to disable the whole port on this case, so
2208 * when someone turns the monitor on again,
2209 * intel_ddi_prepare_link_retrain will take care of redoing the link
2210 * train.
2211 */
2212 if (HAS_DDI(dev))
2213 return;
2214
2215 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
2216 return;
2217
2218 DRM_DEBUG_KMS("\n");
2219
2220 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
2221 DP &= ~DP_LINK_TRAIN_MASK_CPT;
2222 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
2223 } else {
2224 DP &= ~DP_LINK_TRAIN_MASK;
2225 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
2226 }
2227 POSTING_READ(intel_dp->output_reg);
2228
2229 /* We don't really know why we're doing this */
2230 intel_wait_for_vblank(dev, intel_crtc->pipe);
2231
2232 if (HAS_PCH_IBX(dev) &&
2233 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
2234 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2235
2236 /* Hardware workaround: leaving our transcoder select
2237 * set to transcoder B while it's off will prevent the
2238 * corresponding HDMI output on transcoder A.
2239 *
2240 * Combine this with another hardware workaround:
2241 * transcoder select bit can only be cleared while the
2242 * port is enabled.
2243 */
2244 DP &= ~DP_PIPEB_SELECT;
2245 I915_WRITE(intel_dp->output_reg, DP);
2246
2247 /* Changes to enable or select take place the vblank
2248 * after being written.
2249 */
2250 if (WARN_ON(crtc == NULL)) {
2251 /* We should never try to disable a port without a crtc
2252 * attached. For paranoia keep the code around for a
2253 * bit. */
2254 POSTING_READ(intel_dp->output_reg);
2255 msleep(50);
2256 } else
2257 intel_wait_for_vblank(dev, intel_crtc->pipe);
2258 }
2259
2260 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
2261 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
2262 POSTING_READ(intel_dp->output_reg);
2263 msleep(intel_dp->panel_power_down_delay);
2264 }
2265
2266 static bool
2267 intel_dp_get_dpcd(struct intel_dp *intel_dp)
2268 {
2269 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2270
2271 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
2272 sizeof(intel_dp->dpcd)) == 0)
2273 return false; /* aux transfer failed */
2274
2275 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2276 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
2277 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
2278
2279 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2280 return false; /* DPCD not present */
2281
2282 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2283 DP_DWN_STRM_PORT_PRESENT))
2284 return true; /* native DP sink */
2285
2286 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
2287 return true; /* no per-port downstream info */
2288
2289 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
2290 intel_dp->downstream_ports,
2291 DP_MAX_DOWNSTREAM_PORTS) == 0)
2292 return false; /* downstream port status fetch failed */
2293
2294 return true;
2295 }
2296
2297 static void
2298 intel_dp_probe_oui(struct intel_dp *intel_dp)
2299 {
2300 u8 buf[3];
2301
2302 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
2303 return;
2304
2305 ironlake_edp_panel_vdd_on(intel_dp);
2306
2307 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
2308 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
2309 buf[0], buf[1], buf[2]);
2310
2311 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
2312 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
2313 buf[0], buf[1], buf[2]);
2314
2315 ironlake_edp_panel_vdd_off(intel_dp, false);
2316 }
2317
2318 static bool
2319 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2320 {
2321 int ret;
2322
2323 ret = intel_dp_aux_native_read_retry(intel_dp,
2324 DP_DEVICE_SERVICE_IRQ_VECTOR,
2325 sink_irq_vector, 1);
2326 if (!ret)
2327 return false;
2328
2329 return true;
2330 }
2331
2332 static void
2333 intel_dp_handle_test_request(struct intel_dp *intel_dp)
2334 {
2335 /* NAK by default */
2336 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
2337 }
2338
2339 /*
2340 * According to DP spec
2341 * 5.1.2:
2342 * 1. Read DPCD
2343 * 2. Configure link according to Receiver Capabilities
2344 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
2345 * 4. Check link status on receipt of hot-plug interrupt
2346 */
2347
2348 void
2349 intel_dp_check_link_status(struct intel_dp *intel_dp)
2350 {
2351 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
2352 u8 sink_irq_vector;
2353 u8 link_status[DP_LINK_STATUS_SIZE];
2354
2355 if (!intel_encoder->connectors_active)
2356 return;
2357
2358 if (WARN_ON(!intel_encoder->base.crtc))
2359 return;
2360
2361 /* Try to read receiver status if the link appears to be up */
2362 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2363 intel_dp_link_down(intel_dp);
2364 return;
2365 }
2366
2367 /* Now read the DPCD to see if it's actually running */
2368 if (!intel_dp_get_dpcd(intel_dp)) {
2369 intel_dp_link_down(intel_dp);
2370 return;
2371 }
2372
2373 /* Try to read the source of the interrupt */
2374 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2375 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
2376 /* Clear interrupt source */
2377 intel_dp_aux_native_write_1(intel_dp,
2378 DP_DEVICE_SERVICE_IRQ_VECTOR,
2379 sink_irq_vector);
2380
2381 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
2382 intel_dp_handle_test_request(intel_dp);
2383 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
2384 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
2385 }
2386
2387 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
2388 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
2389 drm_get_encoder_name(&intel_encoder->base));
2390 intel_dp_start_link_train(intel_dp);
2391 intel_dp_complete_link_train(intel_dp);
2392 intel_dp_stop_link_train(intel_dp);
2393 }
2394 }
2395
2396 /* XXX this is probably wrong for multiple downstream ports */
2397 static enum drm_connector_status
2398 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
2399 {
2400 uint8_t *dpcd = intel_dp->dpcd;
2401 bool hpd;
2402 uint8_t type;
2403
2404 if (!intel_dp_get_dpcd(intel_dp))
2405 return connector_status_disconnected;
2406
2407 /* if there's no downstream port, we're done */
2408 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
2409 return connector_status_connected;
2410
2411 /* If we're HPD-aware, SINK_COUNT changes dynamically */
2412 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
2413 if (hpd) {
2414 uint8_t reg;
2415 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
2416 &reg, 1))
2417 return connector_status_unknown;
2418 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
2419 : connector_status_disconnected;
2420 }
2421
2422 /* If no HPD, poke DDC gently */
2423 if (drm_probe_ddc(&intel_dp->adapter))
2424 return connector_status_connected;
2425
2426 /* Well we tried, say unknown for unreliable port types */
2427 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
2428 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
2429 return connector_status_unknown;
2430
2431 /* Anything else is out of spec, warn and ignore */
2432 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
2433 return connector_status_disconnected;
2434 }
2435
2436 static enum drm_connector_status
2437 ironlake_dp_detect(struct intel_dp *intel_dp)
2438 {
2439 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2440 struct drm_i915_private *dev_priv = dev->dev_private;
2441 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2442 enum drm_connector_status status;
2443
2444 /* Can't disconnect eDP, but you can close the lid... */
2445 if (is_edp(intel_dp)) {
2446 status = intel_panel_detect(dev);
2447 if (status == connector_status_unknown)
2448 status = connector_status_connected;
2449 return status;
2450 }
2451
2452 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
2453 return connector_status_disconnected;
2454
2455 return intel_dp_detect_dpcd(intel_dp);
2456 }
2457
2458 static enum drm_connector_status
2459 g4x_dp_detect(struct intel_dp *intel_dp)
2460 {
2461 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2462 struct drm_i915_private *dev_priv = dev->dev_private;
2463 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2464 uint32_t bit;
2465
2466 /* Can't disconnect eDP, but you can close the lid... */
2467 if (is_edp(intel_dp)) {
2468 enum drm_connector_status status;
2469
2470 status = intel_panel_detect(dev);
2471 if (status == connector_status_unknown)
2472 status = connector_status_connected;
2473 return status;
2474 }
2475
2476 switch (intel_dig_port->port) {
2477 case PORT_B:
2478 bit = PORTB_HOTPLUG_LIVE_STATUS;
2479 break;
2480 case PORT_C:
2481 bit = PORTC_HOTPLUG_LIVE_STATUS;
2482 break;
2483 case PORT_D:
2484 bit = PORTD_HOTPLUG_LIVE_STATUS;
2485 break;
2486 default:
2487 return connector_status_unknown;
2488 }
2489
2490 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
2491 return connector_status_disconnected;
2492
2493 return intel_dp_detect_dpcd(intel_dp);
2494 }
2495
2496 static struct edid *
2497 intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2498 {
2499 struct intel_connector *intel_connector = to_intel_connector(connector);
2500
2501 /* use cached edid if we have one */
2502 if (intel_connector->edid) {
2503 struct edid *edid;
2504 int size;
2505
2506 /* invalid edid */
2507 if (IS_ERR(intel_connector->edid))
2508 return NULL;
2509
2510 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
2511 edid = kmemdup(intel_connector->edid, size, GFP_KERNEL);
2512 if (!edid)
2513 return NULL;
2514
2515 return edid;
2516 }
2517
2518 return drm_get_edid(connector, adapter);
2519 }
2520
2521 static int
2522 intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
2523 {
2524 struct intel_connector *intel_connector = to_intel_connector(connector);
2525
2526 /* use cached edid if we have one */
2527 if (intel_connector->edid) {
2528 /* invalid edid */
2529 if (IS_ERR(intel_connector->edid))
2530 return 0;
2531
2532 return intel_connector_update_modes(connector,
2533 intel_connector->edid);
2534 }
2535
2536 return intel_ddc_get_modes(connector, adapter);
2537 }
2538
2539 static enum drm_connector_status
2540 intel_dp_detect(struct drm_connector *connector, bool force)
2541 {
2542 struct intel_dp *intel_dp = intel_attached_dp(connector);
2543 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2544 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2545 struct drm_device *dev = connector->dev;
2546 enum drm_connector_status status;
2547 struct edid *edid = NULL;
2548
2549 intel_dp->has_audio = false;
2550
2551 if (HAS_PCH_SPLIT(dev))
2552 status = ironlake_dp_detect(intel_dp);
2553 else
2554 status = g4x_dp_detect(intel_dp);
2555
2556 if (status != connector_status_connected)
2557 return status;
2558
2559 intel_dp_probe_oui(intel_dp);
2560
2561 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
2562 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
2563 } else {
2564 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
2565 if (edid) {
2566 intel_dp->has_audio = drm_detect_monitor_audio(edid);
2567 kfree(edid);
2568 }
2569 }
2570
2571 if (intel_encoder->type != INTEL_OUTPUT_EDP)
2572 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
2573 return connector_status_connected;
2574 }
2575
2576 static int intel_dp_get_modes(struct drm_connector *connector)
2577 {
2578 struct intel_dp *intel_dp = intel_attached_dp(connector);
2579 struct intel_connector *intel_connector = to_intel_connector(connector);
2580 struct drm_device *dev = connector->dev;
2581 int ret;
2582
2583 /* We should parse the EDID data and find out if it has an audio sink
2584 */
2585
2586 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
2587 if (ret)
2588 return ret;
2589
2590 /* if eDP has no EDID, fall back to fixed mode */
2591 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
2592 struct drm_display_mode *mode;
2593 mode = drm_mode_duplicate(dev,
2594 intel_connector->panel.fixed_mode);
2595 if (mode) {
2596 drm_mode_probed_add(connector, mode);
2597 return 1;
2598 }
2599 }
2600 return 0;
2601 }
2602
2603 static bool
2604 intel_dp_detect_audio(struct drm_connector *connector)
2605 {
2606 struct intel_dp *intel_dp = intel_attached_dp(connector);
2607 struct edid *edid;
2608 bool has_audio = false;
2609
2610 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
2611 if (edid) {
2612 has_audio = drm_detect_monitor_audio(edid);
2613 kfree(edid);
2614 }
2615
2616 return has_audio;
2617 }
2618
2619 static int
2620 intel_dp_set_property(struct drm_connector *connector,
2621 struct drm_property *property,
2622 uint64_t val)
2623 {
2624 struct drm_i915_private *dev_priv = connector->dev->dev_private;
2625 struct intel_connector *intel_connector = to_intel_connector(connector);
2626 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
2627 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2628 int ret;
2629
2630 ret = drm_object_property_set_value(&connector->base, property, val);
2631 if (ret)
2632 return ret;
2633
2634 if (property == dev_priv->force_audio_property) {
2635 int i = val;
2636 bool has_audio;
2637
2638 if (i == intel_dp->force_audio)
2639 return 0;
2640
2641 intel_dp->force_audio = i;
2642
2643 if (i == HDMI_AUDIO_AUTO)
2644 has_audio = intel_dp_detect_audio(connector);
2645 else
2646 has_audio = (i == HDMI_AUDIO_ON);
2647
2648 if (has_audio == intel_dp->has_audio)
2649 return 0;
2650
2651 intel_dp->has_audio = has_audio;
2652 goto done;
2653 }
2654
2655 if (property == dev_priv->broadcast_rgb_property) {
2656 bool old_auto = intel_dp->color_range_auto;
2657 uint32_t old_range = intel_dp->color_range;
2658
2659 switch (val) {
2660 case INTEL_BROADCAST_RGB_AUTO:
2661 intel_dp->color_range_auto = true;
2662 break;
2663 case INTEL_BROADCAST_RGB_FULL:
2664 intel_dp->color_range_auto = false;
2665 intel_dp->color_range = 0;
2666 break;
2667 case INTEL_BROADCAST_RGB_LIMITED:
2668 intel_dp->color_range_auto = false;
2669 intel_dp->color_range = DP_COLOR_RANGE_16_235;
2670 break;
2671 default:
2672 return -EINVAL;
2673 }
2674
2675 if (old_auto == intel_dp->color_range_auto &&
2676 old_range == intel_dp->color_range)
2677 return 0;
2678
2679 goto done;
2680 }
2681
2682 if (is_edp(intel_dp) &&
2683 property == connector->dev->mode_config.scaling_mode_property) {
2684 if (val == DRM_MODE_SCALE_NONE) {
2685 DRM_DEBUG_KMS("no scaling not supported\n");
2686 return -EINVAL;
2687 }
2688
2689 if (intel_connector->panel.fitting_mode == val) {
2690 /* the eDP scaling property is not changed */
2691 return 0;
2692 }
2693 intel_connector->panel.fitting_mode = val;
2694
2695 goto done;
2696 }
2697
2698 return -EINVAL;
2699
2700 done:
2701 if (intel_encoder->base.crtc)
2702 intel_crtc_restore_mode(intel_encoder->base.crtc);
2703
2704 return 0;
2705 }
2706
2707 static void
2708 intel_dp_destroy(struct drm_connector *connector)
2709 {
2710 struct intel_dp *intel_dp = intel_attached_dp(connector);
2711 struct intel_connector *intel_connector = to_intel_connector(connector);
2712
2713 if (!IS_ERR_OR_NULL(intel_connector->edid))
2714 kfree(intel_connector->edid);
2715
2716 if (is_edp(intel_dp))
2717 intel_panel_fini(&intel_connector->panel);
2718
2719 drm_sysfs_connector_remove(connector);
2720 drm_connector_cleanup(connector);
2721 kfree(connector);
2722 }
2723
2724 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2725 {
2726 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
2727 struct intel_dp *intel_dp = &intel_dig_port->dp;
2728 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2729
2730 i2c_del_adapter(&intel_dp->adapter);
2731 drm_encoder_cleanup(encoder);
2732 if (is_edp(intel_dp)) {
2733 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
2734 mutex_lock(&dev->mode_config.mutex);
2735 ironlake_panel_vdd_off_sync(intel_dp);
2736 mutex_unlock(&dev->mode_config.mutex);
2737 }
2738 kfree(intel_dig_port);
2739 }
2740
2741 static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
2742 .mode_set = intel_dp_mode_set,
2743 };
2744
2745 static const struct drm_connector_funcs intel_dp_connector_funcs = {
2746 .dpms = intel_connector_dpms,
2747 .detect = intel_dp_detect,
2748 .fill_modes = drm_helper_probe_single_connector_modes,
2749 .set_property = intel_dp_set_property,
2750 .destroy = intel_dp_destroy,
2751 };
2752
2753 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
2754 .get_modes = intel_dp_get_modes,
2755 .mode_valid = intel_dp_mode_valid,
2756 .best_encoder = intel_best_encoder,
2757 };
2758
2759 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
2760 .destroy = intel_dp_encoder_destroy,
2761 };
2762
2763 static void
2764 intel_dp_hot_plug(struct intel_encoder *intel_encoder)
2765 {
2766 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
2767
2768 intel_dp_check_link_status(intel_dp);
2769 }
2770
2771 /* Return which DP Port should be selected for Transcoder DP control */
2772 int
2773 intel_trans_dp_port_sel(struct drm_crtc *crtc)
2774 {
2775 struct drm_device *dev = crtc->dev;
2776 struct intel_encoder *intel_encoder;
2777 struct intel_dp *intel_dp;
2778
2779 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
2780 intel_dp = enc_to_intel_dp(&intel_encoder->base);
2781
2782 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
2783 intel_encoder->type == INTEL_OUTPUT_EDP)
2784 return intel_dp->output_reg;
2785 }
2786
2787 return -1;
2788 }
2789
2790 /* check the VBT to see whether the eDP is on DP-D port */
2791 bool intel_dpd_is_edp(struct drm_device *dev)
2792 {
2793 struct drm_i915_private *dev_priv = dev->dev_private;
2794 struct child_device_config *p_child;
2795 int i;
2796
2797 if (!dev_priv->vbt.child_dev_num)
2798 return false;
2799
2800 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
2801 p_child = dev_priv->vbt.child_dev + i;
2802
2803 if (p_child->dvo_port == PORT_IDPD &&
2804 p_child->device_type == DEVICE_TYPE_eDP)
2805 return true;
2806 }
2807 return false;
2808 }
2809
2810 static void
2811 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
2812 {
2813 struct intel_connector *intel_connector = to_intel_connector(connector);
2814
2815 intel_attach_force_audio_property(connector);
2816 intel_attach_broadcast_rgb_property(connector);
2817 intel_dp->color_range_auto = true;
2818
2819 if (is_edp(intel_dp)) {
2820 drm_mode_create_scaling_mode_property(connector->dev);
2821 drm_object_attach_property(
2822 &connector->base,
2823 connector->dev->mode_config.scaling_mode_property,
2824 DRM_MODE_SCALE_ASPECT);
2825 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
2826 }
2827 }
2828
2829 static void
2830 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
2831 struct intel_dp *intel_dp,
2832 struct edp_power_seq *out)
2833 {
2834 struct drm_i915_private *dev_priv = dev->dev_private;
2835 struct edp_power_seq cur, vbt, spec, final;
2836 u32 pp_on, pp_off, pp_div, pp;
2837 int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg;
2838
2839 if (HAS_PCH_SPLIT(dev)) {
2840 pp_control_reg = PCH_PP_CONTROL;
2841 pp_on_reg = PCH_PP_ON_DELAYS;
2842 pp_off_reg = PCH_PP_OFF_DELAYS;
2843 pp_div_reg = PCH_PP_DIVISOR;
2844 } else {
2845 pp_control_reg = PIPEA_PP_CONTROL;
2846 pp_on_reg = PIPEA_PP_ON_DELAYS;
2847 pp_off_reg = PIPEA_PP_OFF_DELAYS;
2848 pp_div_reg = PIPEA_PP_DIVISOR;
2849 }
2850
2851 /* Workaround: Need to write PP_CONTROL with the unlock key as
2852 * the very first thing. */
2853 pp = ironlake_get_pp_control(intel_dp);
2854 I915_WRITE(pp_control_reg, pp);
2855
2856 pp_on = I915_READ(pp_on_reg);
2857 pp_off = I915_READ(pp_off_reg);
2858 pp_div = I915_READ(pp_div_reg);
2859
2860 /* Pull timing values out of registers */
2861 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
2862 PANEL_POWER_UP_DELAY_SHIFT;
2863
2864 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2865 PANEL_LIGHT_ON_DELAY_SHIFT;
2866
2867 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2868 PANEL_LIGHT_OFF_DELAY_SHIFT;
2869
2870 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
2871 PANEL_POWER_DOWN_DELAY_SHIFT;
2872
2873 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
2874 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
2875
2876 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2877 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
2878
2879 vbt = dev_priv->vbt.edp_pps;
2880
2881 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
2882 * our hw here, which are all in 100usec. */
2883 spec.t1_t3 = 210 * 10;
2884 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
2885 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
2886 spec.t10 = 500 * 10;
2887 /* This one is special and actually in units of 100ms, but zero
2888 * based in the hw (so we need to add 100 ms). But the sw vbt
2889 * table multiplies it with 1000 to make it in units of 100usec,
2890 * too. */
2891 spec.t11_t12 = (510 + 100) * 10;
2892
2893 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
2894 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
2895
2896 /* Use the max of the register settings and vbt. If both are
2897 * unset, fall back to the spec limits. */
2898 #define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
2899 spec.field : \
2900 max(cur.field, vbt.field))
2901 assign_final(t1_t3);
2902 assign_final(t8);
2903 assign_final(t9);
2904 assign_final(t10);
2905 assign_final(t11_t12);
2906 #undef assign_final
2907
2908 #define get_delay(field) (DIV_ROUND_UP(final.field, 10))
2909 intel_dp->panel_power_up_delay = get_delay(t1_t3);
2910 intel_dp->backlight_on_delay = get_delay(t8);
2911 intel_dp->backlight_off_delay = get_delay(t9);
2912 intel_dp->panel_power_down_delay = get_delay(t10);
2913 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
2914 #undef get_delay
2915
2916 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
2917 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
2918 intel_dp->panel_power_cycle_delay);
2919
2920 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2921 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2922
2923 if (out)
2924 *out = final;
2925 }
2926
2927 static void
2928 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
2929 struct intel_dp *intel_dp,
2930 struct edp_power_seq *seq)
2931 {
2932 struct drm_i915_private *dev_priv = dev->dev_private;
2933 u32 pp_on, pp_off, pp_div, port_sel = 0;
2934 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
2935 int pp_on_reg, pp_off_reg, pp_div_reg;
2936
2937 if (HAS_PCH_SPLIT(dev)) {
2938 pp_on_reg = PCH_PP_ON_DELAYS;
2939 pp_off_reg = PCH_PP_OFF_DELAYS;
2940 pp_div_reg = PCH_PP_DIVISOR;
2941 } else {
2942 pp_on_reg = PIPEA_PP_ON_DELAYS;
2943 pp_off_reg = PIPEA_PP_OFF_DELAYS;
2944 pp_div_reg = PIPEA_PP_DIVISOR;
2945 }
2946
2947 if (IS_VALLEYVIEW(dev))
2948 port_sel = I915_READ(pp_on_reg) & 0xc0000000;
2949
2950 /* And finally store the new values in the power sequencer. */
2951 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
2952 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
2953 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
2954 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
2955 /* Compute the divisor for the pp clock, simply match the Bspec
2956 * formula. */
2957 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
2958 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
2959 << PANEL_POWER_CYCLE_DELAY_SHIFT);
2960
2961 /* Haswell doesn't have any port selection bits for the panel
2962 * power sequencer any more. */
2963 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
2964 if (is_cpu_edp(intel_dp))
2965 port_sel = PANEL_POWER_PORT_DP_A;
2966 else
2967 port_sel = PANEL_POWER_PORT_DP_D;
2968 }
2969
2970 pp_on |= port_sel;
2971
2972 I915_WRITE(pp_on_reg, pp_on);
2973 I915_WRITE(pp_off_reg, pp_off);
2974 I915_WRITE(pp_div_reg, pp_div);
2975
2976 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
2977 I915_READ(pp_on_reg),
2978 I915_READ(pp_off_reg),
2979 I915_READ(pp_div_reg));
2980 }
2981
2982 void
2983 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
2984 struct intel_connector *intel_connector)
2985 {
2986 struct drm_connector *connector = &intel_connector->base;
2987 struct intel_dp *intel_dp = &intel_dig_port->dp;
2988 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2989 struct drm_device *dev = intel_encoder->base.dev;
2990 struct drm_i915_private *dev_priv = dev->dev_private;
2991 struct drm_display_mode *fixed_mode = NULL;
2992 struct edp_power_seq power_seq = { 0 };
2993 enum port port = intel_dig_port->port;
2994 const char *name = NULL;
2995 int type;
2996
2997 /* Preserve the current hw state. */
2998 intel_dp->DP = I915_READ(intel_dp->output_reg);
2999 intel_dp->attached_connector = intel_connector;
3000
3001 type = DRM_MODE_CONNECTOR_DisplayPort;
3002 /*
3003 * FIXME : We need to initialize built-in panels before external panels.
3004 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
3005 */
3006 switch (port) {
3007 case PORT_A:
3008 type = DRM_MODE_CONNECTOR_eDP;
3009 break;
3010 case PORT_C:
3011 if (IS_VALLEYVIEW(dev))
3012 type = DRM_MODE_CONNECTOR_eDP;
3013 break;
3014 case PORT_D:
3015 if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
3016 type = DRM_MODE_CONNECTOR_eDP;
3017 break;
3018 default: /* silence GCC warning */
3019 break;
3020 }
3021
3022 /*
3023 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
3024 * for DP the encoder type can be set by the caller to
3025 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
3026 */
3027 if (type == DRM_MODE_CONNECTOR_eDP)
3028 intel_encoder->type = INTEL_OUTPUT_EDP;
3029
3030 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
3031 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
3032 port_name(port));
3033
3034 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
3035 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
3036
3037 connector->interlace_allowed = true;
3038 connector->doublescan_allowed = 0;
3039
3040 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
3041 ironlake_panel_vdd_work);
3042
3043 intel_connector_attach_encoder(intel_connector, intel_encoder);
3044 drm_sysfs_connector_add(connector);
3045
3046 if (HAS_DDI(dev))
3047 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
3048 else
3049 intel_connector->get_hw_state = intel_connector_get_hw_state;
3050
3051 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
3052 if (HAS_DDI(dev)) {
3053 switch (intel_dig_port->port) {
3054 case PORT_A:
3055 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
3056 break;
3057 case PORT_B:
3058 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
3059 break;
3060 case PORT_C:
3061 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
3062 break;
3063 case PORT_D:
3064 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
3065 break;
3066 default:
3067 BUG();
3068 }
3069 }
3070
3071 /* Set up the DDC bus. */
3072 switch (port) {
3073 case PORT_A:
3074 intel_encoder->hpd_pin = HPD_PORT_A;
3075 name = "DPDDC-A";
3076 break;
3077 case PORT_B:
3078 intel_encoder->hpd_pin = HPD_PORT_B;
3079 name = "DPDDC-B";
3080 break;
3081 case PORT_C:
3082 intel_encoder->hpd_pin = HPD_PORT_C;
3083 name = "DPDDC-C";
3084 break;
3085 case PORT_D:
3086 intel_encoder->hpd_pin = HPD_PORT_D;
3087 name = "DPDDC-D";
3088 break;
3089 default:
3090 BUG();
3091 }
3092
3093 if (is_edp(intel_dp))
3094 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
3095
3096 intel_dp_i2c_init(intel_dp, intel_connector, name);
3097
3098 /* Cache DPCD and EDID for edp. */
3099 if (is_edp(intel_dp)) {
3100 bool ret;
3101 struct drm_display_mode *scan;
3102 struct edid *edid;
3103
3104 ironlake_edp_panel_vdd_on(intel_dp);
3105 ret = intel_dp_get_dpcd(intel_dp);
3106 ironlake_edp_panel_vdd_off(intel_dp, false);
3107
3108 if (ret) {
3109 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3110 dev_priv->no_aux_handshake =
3111 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3112 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3113 } else {
3114 /* if this fails, presume the device is a ghost */
3115 DRM_INFO("failed to retrieve link info, disabling eDP\n");
3116 intel_dp_encoder_destroy(&intel_encoder->base);
3117 intel_dp_destroy(connector);
3118 return;
3119 }
3120
3121 /* We now know it's not a ghost, init power sequence regs. */
3122 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
3123 &power_seq);
3124
3125 ironlake_edp_panel_vdd_on(intel_dp);
3126 edid = drm_get_edid(connector, &intel_dp->adapter);
3127 if (edid) {
3128 if (drm_add_edid_modes(connector, edid)) {
3129 drm_mode_connector_update_edid_property(connector, edid);
3130 drm_edid_to_eld(connector, edid);
3131 } else {
3132 kfree(edid);
3133 edid = ERR_PTR(-EINVAL);
3134 }
3135 } else {
3136 edid = ERR_PTR(-ENOENT);
3137 }
3138 intel_connector->edid = edid;
3139
3140 /* prefer fixed mode from EDID if available */
3141 list_for_each_entry(scan, &connector->probed_modes, head) {
3142 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
3143 fixed_mode = drm_mode_duplicate(dev, scan);
3144 break;
3145 }
3146 }
3147
3148 /* fallback to VBT if available for eDP */
3149 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
3150 fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
3151 if (fixed_mode)
3152 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
3153 }
3154
3155 ironlake_edp_panel_vdd_off(intel_dp, false);
3156 }
3157
3158 if (is_edp(intel_dp)) {
3159 intel_panel_init(&intel_connector->panel, fixed_mode);
3160 intel_panel_setup_backlight(connector);
3161 }
3162
3163 intel_dp_add_properties(intel_dp, connector);
3164
3165 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
3166 * 0xd. Failure to do so will result in spurious interrupts being
3167 * generated on the port when a cable is not attached.
3168 */
3169 if (IS_G4X(dev) && !IS_GM45(dev)) {
3170 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
3171 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
3172 }
3173 }
3174
3175 void
3176 intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3177 {
3178 struct intel_digital_port *intel_dig_port;
3179 struct intel_encoder *intel_encoder;
3180 struct drm_encoder *encoder;
3181 struct intel_connector *intel_connector;
3182
3183 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
3184 if (!intel_dig_port)
3185 return;
3186
3187 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
3188 if (!intel_connector) {
3189 kfree(intel_dig_port);
3190 return;
3191 }
3192
3193 intel_encoder = &intel_dig_port->base;
3194 encoder = &intel_encoder->base;
3195
3196 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
3197 DRM_MODE_ENCODER_TMDS);
3198 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
3199
3200 intel_encoder->compute_config = intel_dp_compute_config;
3201 intel_encoder->enable = intel_enable_dp;
3202 intel_encoder->pre_enable = intel_pre_enable_dp;
3203 intel_encoder->disable = intel_disable_dp;
3204 intel_encoder->post_disable = intel_post_disable_dp;
3205 intel_encoder->get_hw_state = intel_dp_get_hw_state;
3206 intel_encoder->get_config = intel_dp_get_config;
3207 if (IS_VALLEYVIEW(dev))
3208 intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
3209
3210 intel_dig_port->port = port;
3211 intel_dig_port->dp.output_reg = output_reg;
3212
3213 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
3214 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
3215 intel_encoder->cloneable = false;
3216 intel_encoder->hot_plug = intel_dp_hot_plug;
3217
3218 intel_dp_init_connector(intel_dig_port, intel_connector);
3219 }
This page took 0.108673 seconds and 5 git commands to generate.