drm/i915: initialize DDI buffer translations
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_display.c
CommitLineData
79e53945
JB
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
618563e3 27#include <linux/dmi.h>
c1c7af60
JB
28#include <linux/module.h>
29#include <linux/input.h>
79e53945 30#include <linux/i2c.h>
7662c8bd 31#include <linux/kernel.h>
5a0e3ad6 32#include <linux/slab.h>
9cce37f4 33#include <linux/vgaarb.h>
e0dac65e 34#include <drm/drm_edid.h>
79e53945
JB
35#include "drmP.h"
36#include "intel_drv.h"
37#include "i915_drm.h"
38#include "i915_drv.h"
e5510fac 39#include "i915_trace.h"
ab2c0672 40#include "drm_dp_helper.h"
79e53945 41#include "drm_crtc_helper.h"
c0f372b3 42#include <linux/dma_remapping.h>
79e53945 43
32f9d658
ZW
44#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
45
0206e353 46bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
3dec0095 47static void intel_increase_pllclock(struct drm_crtc *crtc);
6b383a7f 48static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
79e53945
JB
49
50typedef struct {
0206e353
AJ
51 /* given values */
52 int n;
53 int m1, m2;
54 int p1, p2;
55 /* derived values */
56 int dot;
57 int vco;
58 int m;
59 int p;
79e53945
JB
60} intel_clock_t;
61
62typedef struct {
0206e353 63 int min, max;
79e53945
JB
64} intel_range_t;
65
66typedef struct {
0206e353
AJ
67 int dot_limit;
68 int p2_slow, p2_fast;
79e53945
JB
69} intel_p2_t;
70
71#define INTEL_P2_NUM 2
d4906093
ML
72typedef struct intel_limit intel_limit_t;
73struct intel_limit {
0206e353
AJ
74 intel_range_t dot, vco, n, m, m1, m2, p, p1;
75 intel_p2_t p2;
76 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
cec2f356 77 int, int, intel_clock_t *, intel_clock_t *);
d4906093 78};
79e53945 79
2377b741
JB
80/* FDI */
81#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
82
d4906093
ML
83static bool
84intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
85 int target, int refclk, intel_clock_t *match_clock,
86 intel_clock_t *best_clock);
d4906093
ML
87static bool
88intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
89 int target, int refclk, intel_clock_t *match_clock,
90 intel_clock_t *best_clock);
79e53945 91
a4fc5ed6
KP
92static bool
93intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
cec2f356
SP
94 int target, int refclk, intel_clock_t *match_clock,
95 intel_clock_t *best_clock);
5eb08b69 96static bool
f2b115e6 97intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
cec2f356
SP
98 int target, int refclk, intel_clock_t *match_clock,
99 intel_clock_t *best_clock);
a4fc5ed6 100
021357ac
CW
101static inline u32 /* units of 100MHz */
102intel_fdi_link_freq(struct drm_device *dev)
103{
8b99e68c
CW
104 if (IS_GEN5(dev)) {
105 struct drm_i915_private *dev_priv = dev->dev_private;
106 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
107 } else
108 return 27;
021357ac
CW
109}
110
e4b36699 111static const intel_limit_t intel_limits_i8xx_dvo = {
0206e353
AJ
112 .dot = { .min = 25000, .max = 350000 },
113 .vco = { .min = 930000, .max = 1400000 },
114 .n = { .min = 3, .max = 16 },
115 .m = { .min = 96, .max = 140 },
116 .m1 = { .min = 18, .max = 26 },
117 .m2 = { .min = 6, .max = 16 },
118 .p = { .min = 4, .max = 128 },
119 .p1 = { .min = 2, .max = 33 },
273e27ca
EA
120 .p2 = { .dot_limit = 165000,
121 .p2_slow = 4, .p2_fast = 2 },
d4906093 122 .find_pll = intel_find_best_PLL,
e4b36699
KP
123};
124
125static const intel_limit_t intel_limits_i8xx_lvds = {
0206e353
AJ
126 .dot = { .min = 25000, .max = 350000 },
127 .vco = { .min = 930000, .max = 1400000 },
128 .n = { .min = 3, .max = 16 },
129 .m = { .min = 96, .max = 140 },
130 .m1 = { .min = 18, .max = 26 },
131 .m2 = { .min = 6, .max = 16 },
132 .p = { .min = 4, .max = 128 },
133 .p1 = { .min = 1, .max = 6 },
273e27ca
EA
134 .p2 = { .dot_limit = 165000,
135 .p2_slow = 14, .p2_fast = 7 },
d4906093 136 .find_pll = intel_find_best_PLL,
e4b36699 137};
273e27ca 138
e4b36699 139static const intel_limit_t intel_limits_i9xx_sdvo = {
0206e353
AJ
140 .dot = { .min = 20000, .max = 400000 },
141 .vco = { .min = 1400000, .max = 2800000 },
142 .n = { .min = 1, .max = 6 },
143 .m = { .min = 70, .max = 120 },
144 .m1 = { .min = 10, .max = 22 },
145 .m2 = { .min = 5, .max = 9 },
146 .p = { .min = 5, .max = 80 },
147 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
148 .p2 = { .dot_limit = 200000,
149 .p2_slow = 10, .p2_fast = 5 },
d4906093 150 .find_pll = intel_find_best_PLL,
e4b36699
KP
151};
152
153static const intel_limit_t intel_limits_i9xx_lvds = {
0206e353
AJ
154 .dot = { .min = 20000, .max = 400000 },
155 .vco = { .min = 1400000, .max = 2800000 },
156 .n = { .min = 1, .max = 6 },
157 .m = { .min = 70, .max = 120 },
158 .m1 = { .min = 10, .max = 22 },
159 .m2 = { .min = 5, .max = 9 },
160 .p = { .min = 7, .max = 98 },
161 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
162 .p2 = { .dot_limit = 112000,
163 .p2_slow = 14, .p2_fast = 7 },
d4906093 164 .find_pll = intel_find_best_PLL,
e4b36699
KP
165};
166
273e27ca 167
e4b36699 168static const intel_limit_t intel_limits_g4x_sdvo = {
273e27ca
EA
169 .dot = { .min = 25000, .max = 270000 },
170 .vco = { .min = 1750000, .max = 3500000},
171 .n = { .min = 1, .max = 4 },
172 .m = { .min = 104, .max = 138 },
173 .m1 = { .min = 17, .max = 23 },
174 .m2 = { .min = 5, .max = 11 },
175 .p = { .min = 10, .max = 30 },
176 .p1 = { .min = 1, .max = 3},
177 .p2 = { .dot_limit = 270000,
178 .p2_slow = 10,
179 .p2_fast = 10
044c7c41 180 },
d4906093 181 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
182};
183
184static const intel_limit_t intel_limits_g4x_hdmi = {
273e27ca
EA
185 .dot = { .min = 22000, .max = 400000 },
186 .vco = { .min = 1750000, .max = 3500000},
187 .n = { .min = 1, .max = 4 },
188 .m = { .min = 104, .max = 138 },
189 .m1 = { .min = 16, .max = 23 },
190 .m2 = { .min = 5, .max = 11 },
191 .p = { .min = 5, .max = 80 },
192 .p1 = { .min = 1, .max = 8},
193 .p2 = { .dot_limit = 165000,
194 .p2_slow = 10, .p2_fast = 5 },
d4906093 195 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
196};
197
198static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
273e27ca
EA
199 .dot = { .min = 20000, .max = 115000 },
200 .vco = { .min = 1750000, .max = 3500000 },
201 .n = { .min = 1, .max = 3 },
202 .m = { .min = 104, .max = 138 },
203 .m1 = { .min = 17, .max = 23 },
204 .m2 = { .min = 5, .max = 11 },
205 .p = { .min = 28, .max = 112 },
206 .p1 = { .min = 2, .max = 8 },
207 .p2 = { .dot_limit = 0,
208 .p2_slow = 14, .p2_fast = 14
044c7c41 209 },
d4906093 210 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
211};
212
213static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
273e27ca
EA
214 .dot = { .min = 80000, .max = 224000 },
215 .vco = { .min = 1750000, .max = 3500000 },
216 .n = { .min = 1, .max = 3 },
217 .m = { .min = 104, .max = 138 },
218 .m1 = { .min = 17, .max = 23 },
219 .m2 = { .min = 5, .max = 11 },
220 .p = { .min = 14, .max = 42 },
221 .p1 = { .min = 2, .max = 6 },
222 .p2 = { .dot_limit = 0,
223 .p2_slow = 7, .p2_fast = 7
044c7c41 224 },
d4906093 225 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
226};
227
228static const intel_limit_t intel_limits_g4x_display_port = {
0206e353
AJ
229 .dot = { .min = 161670, .max = 227000 },
230 .vco = { .min = 1750000, .max = 3500000},
231 .n = { .min = 1, .max = 2 },
232 .m = { .min = 97, .max = 108 },
233 .m1 = { .min = 0x10, .max = 0x12 },
234 .m2 = { .min = 0x05, .max = 0x06 },
235 .p = { .min = 10, .max = 20 },
236 .p1 = { .min = 1, .max = 2},
237 .p2 = { .dot_limit = 0,
273e27ca 238 .p2_slow = 10, .p2_fast = 10 },
0206e353 239 .find_pll = intel_find_pll_g4x_dp,
e4b36699
KP
240};
241
f2b115e6 242static const intel_limit_t intel_limits_pineview_sdvo = {
0206e353
AJ
243 .dot = { .min = 20000, .max = 400000},
244 .vco = { .min = 1700000, .max = 3500000 },
273e27ca 245 /* Pineview's Ncounter is a ring counter */
0206e353
AJ
246 .n = { .min = 3, .max = 6 },
247 .m = { .min = 2, .max = 256 },
273e27ca 248 /* Pineview only has one combined m divider, which we treat as m2. */
0206e353
AJ
249 .m1 = { .min = 0, .max = 0 },
250 .m2 = { .min = 0, .max = 254 },
251 .p = { .min = 5, .max = 80 },
252 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
253 .p2 = { .dot_limit = 200000,
254 .p2_slow = 10, .p2_fast = 5 },
6115707b 255 .find_pll = intel_find_best_PLL,
e4b36699
KP
256};
257
f2b115e6 258static const intel_limit_t intel_limits_pineview_lvds = {
0206e353
AJ
259 .dot = { .min = 20000, .max = 400000 },
260 .vco = { .min = 1700000, .max = 3500000 },
261 .n = { .min = 3, .max = 6 },
262 .m = { .min = 2, .max = 256 },
263 .m1 = { .min = 0, .max = 0 },
264 .m2 = { .min = 0, .max = 254 },
265 .p = { .min = 7, .max = 112 },
266 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
267 .p2 = { .dot_limit = 112000,
268 .p2_slow = 14, .p2_fast = 14 },
6115707b 269 .find_pll = intel_find_best_PLL,
e4b36699
KP
270};
271
273e27ca
EA
272/* Ironlake / Sandybridge
273 *
274 * We calculate clock using (register_value + 2) for N/M1/M2, so here
275 * the range value for them is (actual_value - 2).
276 */
b91ad0ec 277static const intel_limit_t intel_limits_ironlake_dac = {
273e27ca
EA
278 .dot = { .min = 25000, .max = 350000 },
279 .vco = { .min = 1760000, .max = 3510000 },
280 .n = { .min = 1, .max = 5 },
281 .m = { .min = 79, .max = 127 },
282 .m1 = { .min = 12, .max = 22 },
283 .m2 = { .min = 5, .max = 9 },
284 .p = { .min = 5, .max = 80 },
285 .p1 = { .min = 1, .max = 8 },
286 .p2 = { .dot_limit = 225000,
287 .p2_slow = 10, .p2_fast = 5 },
4547668a 288 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
289};
290
b91ad0ec 291static const intel_limit_t intel_limits_ironlake_single_lvds = {
273e27ca
EA
292 .dot = { .min = 25000, .max = 350000 },
293 .vco = { .min = 1760000, .max = 3510000 },
294 .n = { .min = 1, .max = 3 },
295 .m = { .min = 79, .max = 118 },
296 .m1 = { .min = 12, .max = 22 },
297 .m2 = { .min = 5, .max = 9 },
298 .p = { .min = 28, .max = 112 },
299 .p1 = { .min = 2, .max = 8 },
300 .p2 = { .dot_limit = 225000,
301 .p2_slow = 14, .p2_fast = 14 },
b91ad0ec
ZW
302 .find_pll = intel_g4x_find_best_PLL,
303};
304
305static const intel_limit_t intel_limits_ironlake_dual_lvds = {
273e27ca
EA
306 .dot = { .min = 25000, .max = 350000 },
307 .vco = { .min = 1760000, .max = 3510000 },
308 .n = { .min = 1, .max = 3 },
309 .m = { .min = 79, .max = 127 },
310 .m1 = { .min = 12, .max = 22 },
311 .m2 = { .min = 5, .max = 9 },
312 .p = { .min = 14, .max = 56 },
313 .p1 = { .min = 2, .max = 8 },
314 .p2 = { .dot_limit = 225000,
315 .p2_slow = 7, .p2_fast = 7 },
b91ad0ec
ZW
316 .find_pll = intel_g4x_find_best_PLL,
317};
318
273e27ca 319/* LVDS 100mhz refclk limits. */
b91ad0ec 320static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
273e27ca
EA
321 .dot = { .min = 25000, .max = 350000 },
322 .vco = { .min = 1760000, .max = 3510000 },
323 .n = { .min = 1, .max = 2 },
324 .m = { .min = 79, .max = 126 },
325 .m1 = { .min = 12, .max = 22 },
326 .m2 = { .min = 5, .max = 9 },
327 .p = { .min = 28, .max = 112 },
0206e353 328 .p1 = { .min = 2, .max = 8 },
273e27ca
EA
329 .p2 = { .dot_limit = 225000,
330 .p2_slow = 14, .p2_fast = 14 },
b91ad0ec
ZW
331 .find_pll = intel_g4x_find_best_PLL,
332};
333
334static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
273e27ca
EA
335 .dot = { .min = 25000, .max = 350000 },
336 .vco = { .min = 1760000, .max = 3510000 },
337 .n = { .min = 1, .max = 3 },
338 .m = { .min = 79, .max = 126 },
339 .m1 = { .min = 12, .max = 22 },
340 .m2 = { .min = 5, .max = 9 },
341 .p = { .min = 14, .max = 42 },
0206e353 342 .p1 = { .min = 2, .max = 6 },
273e27ca
EA
343 .p2 = { .dot_limit = 225000,
344 .p2_slow = 7, .p2_fast = 7 },
4547668a
ZY
345 .find_pll = intel_g4x_find_best_PLL,
346};
347
348static const intel_limit_t intel_limits_ironlake_display_port = {
0206e353
AJ
349 .dot = { .min = 25000, .max = 350000 },
350 .vco = { .min = 1760000, .max = 3510000},
351 .n = { .min = 1, .max = 2 },
352 .m = { .min = 81, .max = 90 },
353 .m1 = { .min = 12, .max = 22 },
354 .m2 = { .min = 5, .max = 9 },
355 .p = { .min = 10, .max = 20 },
356 .p1 = { .min = 1, .max = 2},
357 .p2 = { .dot_limit = 0,
273e27ca 358 .p2_slow = 10, .p2_fast = 10 },
0206e353 359 .find_pll = intel_find_pll_ironlake_dp,
79e53945
JB
360};
361
57f350b6
JB
362u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
363{
364 unsigned long flags;
365 u32 val = 0;
366
367 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
368 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
369 DRM_ERROR("DPIO idle wait timed out\n");
370 goto out_unlock;
371 }
372
373 I915_WRITE(DPIO_REG, reg);
374 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
375 DPIO_BYTE);
376 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
377 DRM_ERROR("DPIO read wait timed out\n");
378 goto out_unlock;
379 }
380 val = I915_READ(DPIO_DATA);
381
382out_unlock:
383 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
384 return val;
385}
386
57f350b6
JB
387static void vlv_init_dpio(struct drm_device *dev)
388{
389 struct drm_i915_private *dev_priv = dev->dev_private;
390
391 /* Reset the DPIO config */
392 I915_WRITE(DPIO_CTL, 0);
393 POSTING_READ(DPIO_CTL);
394 I915_WRITE(DPIO_CTL, 1);
395 POSTING_READ(DPIO_CTL);
396}
397
618563e3
DV
398static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
399{
400 DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
401 return 1;
402}
403
404static const struct dmi_system_id intel_dual_link_lvds[] = {
405 {
406 .callback = intel_dual_link_lvds_callback,
407 .ident = "Apple MacBook Pro (Core i5/i7 Series)",
408 .matches = {
409 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
410 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
411 },
412 },
413 { } /* terminating entry */
414};
415
b0354385
TI
416static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
417 unsigned int reg)
418{
419 unsigned int val;
420
121d527a
TI
421 /* use the module option value if specified */
422 if (i915_lvds_channel_mode > 0)
423 return i915_lvds_channel_mode == 2;
424
618563e3
DV
425 if (dmi_check_system(intel_dual_link_lvds))
426 return true;
427
b0354385
TI
428 if (dev_priv->lvds_val)
429 val = dev_priv->lvds_val;
430 else {
431 /* BIOS should set the proper LVDS register value at boot, but
432 * in reality, it doesn't set the value when the lid is closed;
433 * we need to check "the value to be set" in VBT when LVDS
434 * register is uninitialized.
435 */
436 val = I915_READ(reg);
437 if (!(val & ~LVDS_DETECTED))
438 val = dev_priv->bios_lvds_val;
439 dev_priv->lvds_val = val;
440 }
441 return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
442}
443
1b894b59
CW
444static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
445 int refclk)
2c07245f 446{
b91ad0ec
ZW
447 struct drm_device *dev = crtc->dev;
448 struct drm_i915_private *dev_priv = dev->dev_private;
2c07245f 449 const intel_limit_t *limit;
b91ad0ec
ZW
450
451 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
b0354385 452 if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
b91ad0ec 453 /* LVDS dual channel */
1b894b59 454 if (refclk == 100000)
b91ad0ec
ZW
455 limit = &intel_limits_ironlake_dual_lvds_100m;
456 else
457 limit = &intel_limits_ironlake_dual_lvds;
458 } else {
1b894b59 459 if (refclk == 100000)
b91ad0ec
ZW
460 limit = &intel_limits_ironlake_single_lvds_100m;
461 else
462 limit = &intel_limits_ironlake_single_lvds;
463 }
464 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
4547668a
ZY
465 HAS_eDP)
466 limit = &intel_limits_ironlake_display_port;
2c07245f 467 else
b91ad0ec 468 limit = &intel_limits_ironlake_dac;
2c07245f
ZW
469
470 return limit;
471}
472
044c7c41
ML
473static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
474{
475 struct drm_device *dev = crtc->dev;
476 struct drm_i915_private *dev_priv = dev->dev_private;
477 const intel_limit_t *limit;
478
479 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
b0354385 480 if (is_dual_link_lvds(dev_priv, LVDS))
044c7c41 481 /* LVDS with dual channel */
e4b36699 482 limit = &intel_limits_g4x_dual_channel_lvds;
044c7c41
ML
483 else
484 /* LVDS with dual channel */
e4b36699 485 limit = &intel_limits_g4x_single_channel_lvds;
044c7c41
ML
486 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
487 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
e4b36699 488 limit = &intel_limits_g4x_hdmi;
044c7c41 489 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
e4b36699 490 limit = &intel_limits_g4x_sdvo;
0206e353 491 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
e4b36699 492 limit = &intel_limits_g4x_display_port;
044c7c41 493 } else /* The option is for other outputs */
e4b36699 494 limit = &intel_limits_i9xx_sdvo;
044c7c41
ML
495
496 return limit;
497}
498
1b894b59 499static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
79e53945
JB
500{
501 struct drm_device *dev = crtc->dev;
502 const intel_limit_t *limit;
503
bad720ff 504 if (HAS_PCH_SPLIT(dev))
1b894b59 505 limit = intel_ironlake_limit(crtc, refclk);
2c07245f 506 else if (IS_G4X(dev)) {
044c7c41 507 limit = intel_g4x_limit(crtc);
f2b115e6 508 } else if (IS_PINEVIEW(dev)) {
2177832f 509 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
f2b115e6 510 limit = &intel_limits_pineview_lvds;
2177832f 511 else
f2b115e6 512 limit = &intel_limits_pineview_sdvo;
a6c45cf0
CW
513 } else if (!IS_GEN2(dev)) {
514 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
515 limit = &intel_limits_i9xx_lvds;
516 else
517 limit = &intel_limits_i9xx_sdvo;
79e53945
JB
518 } else {
519 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
e4b36699 520 limit = &intel_limits_i8xx_lvds;
79e53945 521 else
e4b36699 522 limit = &intel_limits_i8xx_dvo;
79e53945
JB
523 }
524 return limit;
525}
526
f2b115e6
AJ
527/* m1 is reserved as 0 in Pineview, n is a ring counter */
528static void pineview_clock(int refclk, intel_clock_t *clock)
79e53945 529{
2177832f
SL
530 clock->m = clock->m2 + 2;
531 clock->p = clock->p1 * clock->p2;
532 clock->vco = refclk * clock->m / clock->n;
533 clock->dot = clock->vco / clock->p;
534}
535
536static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
537{
f2b115e6
AJ
538 if (IS_PINEVIEW(dev)) {
539 pineview_clock(refclk, clock);
2177832f
SL
540 return;
541 }
79e53945
JB
542 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
543 clock->p = clock->p1 * clock->p2;
544 clock->vco = refclk * clock->m / (clock->n + 2);
545 clock->dot = clock->vco / clock->p;
546}
547
79e53945
JB
548/**
549 * Returns whether any output on the specified pipe is of the specified type
550 */
4ef69c7a 551bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
79e53945 552{
4ef69c7a
CW
553 struct drm_device *dev = crtc->dev;
554 struct drm_mode_config *mode_config = &dev->mode_config;
555 struct intel_encoder *encoder;
556
557 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
558 if (encoder->base.crtc == crtc && encoder->type == type)
559 return true;
560
561 return false;
79e53945
JB
562}
563
7c04d1d9 564#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
79e53945
JB
565/**
566 * Returns whether the given set of divisors are valid for a given refclk with
567 * the given connectors.
568 */
569
1b894b59
CW
570static bool intel_PLL_is_valid(struct drm_device *dev,
571 const intel_limit_t *limit,
572 const intel_clock_t *clock)
79e53945 573{
79e53945 574 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
0206e353 575 INTELPllInvalid("p1 out of range\n");
79e53945 576 if (clock->p < limit->p.min || limit->p.max < clock->p)
0206e353 577 INTELPllInvalid("p out of range\n");
79e53945 578 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
0206e353 579 INTELPllInvalid("m2 out of range\n");
79e53945 580 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
0206e353 581 INTELPllInvalid("m1 out of range\n");
f2b115e6 582 if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
0206e353 583 INTELPllInvalid("m1 <= m2\n");
79e53945 584 if (clock->m < limit->m.min || limit->m.max < clock->m)
0206e353 585 INTELPllInvalid("m out of range\n");
79e53945 586 if (clock->n < limit->n.min || limit->n.max < clock->n)
0206e353 587 INTELPllInvalid("n out of range\n");
79e53945 588 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
0206e353 589 INTELPllInvalid("vco out of range\n");
79e53945
JB
590 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
591 * connector, etc., rather than just a single range.
592 */
593 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
0206e353 594 INTELPllInvalid("dot out of range\n");
79e53945
JB
595
596 return true;
597}
598
d4906093
ML
599static bool
600intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
601 int target, int refclk, intel_clock_t *match_clock,
602 intel_clock_t *best_clock)
d4906093 603
79e53945
JB
604{
605 struct drm_device *dev = crtc->dev;
606 struct drm_i915_private *dev_priv = dev->dev_private;
607 intel_clock_t clock;
79e53945
JB
608 int err = target;
609
bc5e5718 610 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
832cc28d 611 (I915_READ(LVDS)) != 0) {
79e53945
JB
612 /*
613 * For LVDS, if the panel is on, just rely on its current
614 * settings for dual-channel. We haven't figured out how to
615 * reliably set up different single/dual channel state, if we
616 * even can.
617 */
b0354385 618 if (is_dual_link_lvds(dev_priv, LVDS))
79e53945
JB
619 clock.p2 = limit->p2.p2_fast;
620 else
621 clock.p2 = limit->p2.p2_slow;
622 } else {
623 if (target < limit->p2.dot_limit)
624 clock.p2 = limit->p2.p2_slow;
625 else
626 clock.p2 = limit->p2.p2_fast;
627 }
628
0206e353 629 memset(best_clock, 0, sizeof(*best_clock));
79e53945 630
42158660
ZY
631 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
632 clock.m1++) {
633 for (clock.m2 = limit->m2.min;
634 clock.m2 <= limit->m2.max; clock.m2++) {
f2b115e6
AJ
635 /* m1 is always 0 in Pineview */
636 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
42158660
ZY
637 break;
638 for (clock.n = limit->n.min;
639 clock.n <= limit->n.max; clock.n++) {
640 for (clock.p1 = limit->p1.min;
641 clock.p1 <= limit->p1.max; clock.p1++) {
79e53945
JB
642 int this_err;
643
2177832f 644 intel_clock(dev, refclk, &clock);
1b894b59
CW
645 if (!intel_PLL_is_valid(dev, limit,
646 &clock))
79e53945 647 continue;
cec2f356
SP
648 if (match_clock &&
649 clock.p != match_clock->p)
650 continue;
79e53945
JB
651
652 this_err = abs(clock.dot - target);
653 if (this_err < err) {
654 *best_clock = clock;
655 err = this_err;
656 }
657 }
658 }
659 }
660 }
661
662 return (err != target);
663}
664
d4906093
ML
665static bool
666intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
667 int target, int refclk, intel_clock_t *match_clock,
668 intel_clock_t *best_clock)
d4906093
ML
669{
670 struct drm_device *dev = crtc->dev;
671 struct drm_i915_private *dev_priv = dev->dev_private;
672 intel_clock_t clock;
673 int max_n;
674 bool found;
6ba770dc
AJ
675 /* approximately equals target * 0.00585 */
676 int err_most = (target >> 8) + (target >> 9);
d4906093
ML
677 found = false;
678
679 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
4547668a
ZY
680 int lvds_reg;
681
c619eed4 682 if (HAS_PCH_SPLIT(dev))
4547668a
ZY
683 lvds_reg = PCH_LVDS;
684 else
685 lvds_reg = LVDS;
686 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
d4906093
ML
687 LVDS_CLKB_POWER_UP)
688 clock.p2 = limit->p2.p2_fast;
689 else
690 clock.p2 = limit->p2.p2_slow;
691 } else {
692 if (target < limit->p2.dot_limit)
693 clock.p2 = limit->p2.p2_slow;
694 else
695 clock.p2 = limit->p2.p2_fast;
696 }
697
698 memset(best_clock, 0, sizeof(*best_clock));
699 max_n = limit->n.max;
f77f13e2 700 /* based on hardware requirement, prefer smaller n to precision */
d4906093 701 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
f77f13e2 702 /* based on hardware requirement, prefere larger m1,m2 */
d4906093
ML
703 for (clock.m1 = limit->m1.max;
704 clock.m1 >= limit->m1.min; clock.m1--) {
705 for (clock.m2 = limit->m2.max;
706 clock.m2 >= limit->m2.min; clock.m2--) {
707 for (clock.p1 = limit->p1.max;
708 clock.p1 >= limit->p1.min; clock.p1--) {
709 int this_err;
710
2177832f 711 intel_clock(dev, refclk, &clock);
1b894b59
CW
712 if (!intel_PLL_is_valid(dev, limit,
713 &clock))
d4906093 714 continue;
cec2f356
SP
715 if (match_clock &&
716 clock.p != match_clock->p)
717 continue;
1b894b59
CW
718
719 this_err = abs(clock.dot - target);
d4906093
ML
720 if (this_err < err_most) {
721 *best_clock = clock;
722 err_most = this_err;
723 max_n = clock.n;
724 found = true;
725 }
726 }
727 }
728 }
729 }
2c07245f
ZW
730 return found;
731}
732
5eb08b69 733static bool
f2b115e6 734intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
735 int target, int refclk, intel_clock_t *match_clock,
736 intel_clock_t *best_clock)
5eb08b69
ZW
737{
738 struct drm_device *dev = crtc->dev;
739 intel_clock_t clock;
4547668a 740
5eb08b69
ZW
741 if (target < 200000) {
742 clock.n = 1;
743 clock.p1 = 2;
744 clock.p2 = 10;
745 clock.m1 = 12;
746 clock.m2 = 9;
747 } else {
748 clock.n = 2;
749 clock.p1 = 1;
750 clock.p2 = 10;
751 clock.m1 = 14;
752 clock.m2 = 8;
753 }
754 intel_clock(dev, refclk, &clock);
755 memcpy(best_clock, &clock, sizeof(intel_clock_t));
756 return true;
757}
758
a4fc5ed6
KP
759/* DisplayPort has only two frequencies, 162MHz and 270MHz */
760static bool
761intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
762 int target, int refclk, intel_clock_t *match_clock,
763 intel_clock_t *best_clock)
a4fc5ed6 764{
5eddb70b
CW
765 intel_clock_t clock;
766 if (target < 200000) {
767 clock.p1 = 2;
768 clock.p2 = 10;
769 clock.n = 2;
770 clock.m1 = 23;
771 clock.m2 = 8;
772 } else {
773 clock.p1 = 1;
774 clock.p2 = 10;
775 clock.n = 1;
776 clock.m1 = 14;
777 clock.m2 = 2;
778 }
779 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
780 clock.p = (clock.p1 * clock.p2);
781 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
782 clock.vco = 0;
783 memcpy(best_clock, &clock, sizeof(intel_clock_t));
784 return true;
a4fc5ed6
KP
785}
786
a928d536
PZ
787static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
788{
789 struct drm_i915_private *dev_priv = dev->dev_private;
790 u32 frame, frame_reg = PIPEFRAME(pipe);
791
792 frame = I915_READ(frame_reg);
793
794 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
795 DRM_DEBUG_KMS("vblank wait timed out\n");
796}
797
9d0498a2
JB
798/**
799 * intel_wait_for_vblank - wait for vblank on a given pipe
800 * @dev: drm device
801 * @pipe: pipe to wait for
802 *
803 * Wait for vblank to occur on a given pipe. Needed for various bits of
804 * mode setting code.
805 */
806void intel_wait_for_vblank(struct drm_device *dev, int pipe)
79e53945 807{
9d0498a2 808 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 809 int pipestat_reg = PIPESTAT(pipe);
9d0498a2 810
a928d536
PZ
811 if (INTEL_INFO(dev)->gen >= 5) {
812 ironlake_wait_for_vblank(dev, pipe);
813 return;
814 }
815
300387c0
CW
816 /* Clear existing vblank status. Note this will clear any other
817 * sticky status fields as well.
818 *
819 * This races with i915_driver_irq_handler() with the result
820 * that either function could miss a vblank event. Here it is not
821 * fatal, as we will either wait upon the next vblank interrupt or
822 * timeout. Generally speaking intel_wait_for_vblank() is only
823 * called during modeset at which time the GPU should be idle and
824 * should *not* be performing page flips and thus not waiting on
825 * vblanks...
826 * Currently, the result of us stealing a vblank from the irq
827 * handler is that a single frame will be skipped during swapbuffers.
828 */
829 I915_WRITE(pipestat_reg,
830 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
831
9d0498a2 832 /* Wait for vblank interrupt bit to set */
481b6af3
CW
833 if (wait_for(I915_READ(pipestat_reg) &
834 PIPE_VBLANK_INTERRUPT_STATUS,
835 50))
9d0498a2
JB
836 DRM_DEBUG_KMS("vblank wait timed out\n");
837}
838
ab7ad7f6
KP
839/*
840 * intel_wait_for_pipe_off - wait for pipe to turn off
9d0498a2
JB
841 * @dev: drm device
842 * @pipe: pipe to wait for
843 *
844 * After disabling a pipe, we can't wait for vblank in the usual way,
845 * spinning on the vblank interrupt status bit, since we won't actually
846 * see an interrupt when the pipe is disabled.
847 *
ab7ad7f6
KP
848 * On Gen4 and above:
849 * wait for the pipe register state bit to turn off
850 *
851 * Otherwise:
852 * wait for the display line value to settle (it usually
853 * ends up stopping at the start of the next frame).
58e10eb9 854 *
9d0498a2 855 */
58e10eb9 856void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
9d0498a2
JB
857{
858 struct drm_i915_private *dev_priv = dev->dev_private;
ab7ad7f6
KP
859
860 if (INTEL_INFO(dev)->gen >= 4) {
58e10eb9 861 int reg = PIPECONF(pipe);
ab7ad7f6
KP
862
863 /* Wait for the Pipe State to go off */
58e10eb9
CW
864 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
865 100))
ab7ad7f6
KP
866 DRM_DEBUG_KMS("pipe_off wait timed out\n");
867 } else {
837ba00f 868 u32 last_line, line_mask;
58e10eb9 869 int reg = PIPEDSL(pipe);
ab7ad7f6
KP
870 unsigned long timeout = jiffies + msecs_to_jiffies(100);
871
837ba00f
PZ
872 if (IS_GEN2(dev))
873 line_mask = DSL_LINEMASK_GEN2;
874 else
875 line_mask = DSL_LINEMASK_GEN3;
876
ab7ad7f6
KP
877 /* Wait for the display line to settle */
878 do {
837ba00f 879 last_line = I915_READ(reg) & line_mask;
ab7ad7f6 880 mdelay(5);
837ba00f 881 } while (((I915_READ(reg) & line_mask) != last_line) &&
ab7ad7f6
KP
882 time_after(timeout, jiffies));
883 if (time_after(jiffies, timeout))
884 DRM_DEBUG_KMS("pipe_off wait timed out\n");
885 }
79e53945
JB
886}
887
b24e7179
JB
888static const char *state_string(bool enabled)
889{
890 return enabled ? "on" : "off";
891}
892
893/* Only for pre-ILK configs */
894static void assert_pll(struct drm_i915_private *dev_priv,
895 enum pipe pipe, bool state)
896{
897 int reg;
898 u32 val;
899 bool cur_state;
900
901 reg = DPLL(pipe);
902 val = I915_READ(reg);
903 cur_state = !!(val & DPLL_VCO_ENABLE);
904 WARN(cur_state != state,
905 "PLL state assertion failure (expected %s, current %s)\n",
906 state_string(state), state_string(cur_state));
907}
908#define assert_pll_enabled(d, p) assert_pll(d, p, true)
909#define assert_pll_disabled(d, p) assert_pll(d, p, false)
910
040484af
JB
911/* For ILK+ */
912static void assert_pch_pll(struct drm_i915_private *dev_priv,
ee7b9f93 913 struct intel_crtc *intel_crtc, bool state)
040484af
JB
914{
915 int reg;
916 u32 val;
917 bool cur_state;
918
9d82aa17
ED
919 if (HAS_PCH_LPT(dev_priv->dev)) {
920 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
921 return;
922 }
923
ee7b9f93
JB
924 if (!intel_crtc->pch_pll) {
925 WARN(1, "asserting PCH PLL enabled with no PLL\n");
926 return;
927 }
928
d3ccbe86
JB
929 if (HAS_PCH_CPT(dev_priv->dev)) {
930 u32 pch_dpll;
931
932 pch_dpll = I915_READ(PCH_DPLL_SEL);
933
934 /* Make sure the selected PLL is enabled to the transcoder */
ee7b9f93
JB
935 WARN(!((pch_dpll >> (4 * intel_crtc->pipe)) & 8),
936 "transcoder %d PLL not enabled\n", intel_crtc->pipe);
d3ccbe86
JB
937 }
938
ee7b9f93 939 reg = intel_crtc->pch_pll->pll_reg;
040484af
JB
940 val = I915_READ(reg);
941 cur_state = !!(val & DPLL_VCO_ENABLE);
942 WARN(cur_state != state,
943 "PCH PLL state assertion failure (expected %s, current %s)\n",
944 state_string(state), state_string(cur_state));
945}
946#define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true)
947#define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false)
948
949static void assert_fdi_tx(struct drm_i915_private *dev_priv,
950 enum pipe pipe, bool state)
951{
952 int reg;
953 u32 val;
954 bool cur_state;
955
bf507ef7
ED
956 if (IS_HASWELL(dev_priv->dev)) {
957 /* On Haswell, DDI is used instead of FDI_TX_CTL */
958 reg = DDI_FUNC_CTL(pipe);
959 val = I915_READ(reg);
960 cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
961 } else {
962 reg = FDI_TX_CTL(pipe);
963 val = I915_READ(reg);
964 cur_state = !!(val & FDI_TX_ENABLE);
965 }
040484af
JB
966 WARN(cur_state != state,
967 "FDI TX state assertion failure (expected %s, current %s)\n",
968 state_string(state), state_string(cur_state));
969}
970#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
971#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
972
973static void assert_fdi_rx(struct drm_i915_private *dev_priv,
974 enum pipe pipe, bool state)
975{
976 int reg;
977 u32 val;
978 bool cur_state;
979
59c859d6
ED
980 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
981 DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
982 return;
983 } else {
984 reg = FDI_RX_CTL(pipe);
985 val = I915_READ(reg);
986 cur_state = !!(val & FDI_RX_ENABLE);
987 }
040484af
JB
988 WARN(cur_state != state,
989 "FDI RX state assertion failure (expected %s, current %s)\n",
990 state_string(state), state_string(cur_state));
991}
992#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
993#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
994
995static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
996 enum pipe pipe)
997{
998 int reg;
999 u32 val;
1000
1001 /* ILK FDI PLL is always enabled */
1002 if (dev_priv->info->gen == 5)
1003 return;
1004
bf507ef7
ED
1005 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1006 if (IS_HASWELL(dev_priv->dev))
1007 return;
1008
040484af
JB
1009 reg = FDI_TX_CTL(pipe);
1010 val = I915_READ(reg);
1011 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1012}
1013
1014static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
1015 enum pipe pipe)
1016{
1017 int reg;
1018 u32 val;
1019
59c859d6
ED
1020 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1021 DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
1022 return;
1023 }
040484af
JB
1024 reg = FDI_RX_CTL(pipe);
1025 val = I915_READ(reg);
1026 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
1027}
1028
ea0760cf
JB
1029static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1030 enum pipe pipe)
1031{
1032 int pp_reg, lvds_reg;
1033 u32 val;
1034 enum pipe panel_pipe = PIPE_A;
0de3b485 1035 bool locked = true;
ea0760cf
JB
1036
1037 if (HAS_PCH_SPLIT(dev_priv->dev)) {
1038 pp_reg = PCH_PP_CONTROL;
1039 lvds_reg = PCH_LVDS;
1040 } else {
1041 pp_reg = PP_CONTROL;
1042 lvds_reg = LVDS;
1043 }
1044
1045 val = I915_READ(pp_reg);
1046 if (!(val & PANEL_POWER_ON) ||
1047 ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1048 locked = false;
1049
1050 if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1051 panel_pipe = PIPE_B;
1052
1053 WARN(panel_pipe == pipe && locked,
1054 "panel assertion failure, pipe %c regs locked\n",
9db4a9c7 1055 pipe_name(pipe));
ea0760cf
JB
1056}
1057
b840d907
JB
1058void assert_pipe(struct drm_i915_private *dev_priv,
1059 enum pipe pipe, bool state)
b24e7179
JB
1060{
1061 int reg;
1062 u32 val;
63d7bbe9 1063 bool cur_state;
b24e7179 1064
8e636784
DV
1065 /* if we need the pipe A quirk it must be always on */
1066 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1067 state = true;
1068
b24e7179
JB
1069 reg = PIPECONF(pipe);
1070 val = I915_READ(reg);
63d7bbe9
JB
1071 cur_state = !!(val & PIPECONF_ENABLE);
1072 WARN(cur_state != state,
1073 "pipe %c assertion failure (expected %s, current %s)\n",
9db4a9c7 1074 pipe_name(pipe), state_string(state), state_string(cur_state));
b24e7179
JB
1075}
1076
931872fc
CW
1077static void assert_plane(struct drm_i915_private *dev_priv,
1078 enum plane plane, bool state)
b24e7179
JB
1079{
1080 int reg;
1081 u32 val;
931872fc 1082 bool cur_state;
b24e7179
JB
1083
1084 reg = DSPCNTR(plane);
1085 val = I915_READ(reg);
931872fc
CW
1086 cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1087 WARN(cur_state != state,
1088 "plane %c assertion failure (expected %s, current %s)\n",
1089 plane_name(plane), state_string(state), state_string(cur_state));
b24e7179
JB
1090}
1091
931872fc
CW
1092#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1093#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1094
b24e7179
JB
1095static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1096 enum pipe pipe)
1097{
1098 int reg, i;
1099 u32 val;
1100 int cur_pipe;
1101
19ec1358 1102 /* Planes are fixed to pipes on ILK+ */
28c05794
AJ
1103 if (HAS_PCH_SPLIT(dev_priv->dev)) {
1104 reg = DSPCNTR(pipe);
1105 val = I915_READ(reg);
1106 WARN((val & DISPLAY_PLANE_ENABLE),
1107 "plane %c assertion failure, should be disabled but not\n",
1108 plane_name(pipe));
19ec1358 1109 return;
28c05794 1110 }
19ec1358 1111
b24e7179
JB
1112 /* Need to check both planes against the pipe */
1113 for (i = 0; i < 2; i++) {
1114 reg = DSPCNTR(i);
1115 val = I915_READ(reg);
1116 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1117 DISPPLANE_SEL_PIPE_SHIFT;
1118 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
9db4a9c7
JB
1119 "plane %c assertion failure, should be off on pipe %c but is still active\n",
1120 plane_name(i), pipe_name(pipe));
b24e7179
JB
1121 }
1122}
1123
92f2584a
JB
1124static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1125{
1126 u32 val;
1127 bool enabled;
1128
9d82aa17
ED
1129 if (HAS_PCH_LPT(dev_priv->dev)) {
1130 DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
1131 return;
1132 }
1133
92f2584a
JB
1134 val = I915_READ(PCH_DREF_CONTROL);
1135 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1136 DREF_SUPERSPREAD_SOURCE_MASK));
1137 WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1138}
1139
1140static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1141 enum pipe pipe)
1142{
1143 int reg;
1144 u32 val;
1145 bool enabled;
1146
1147 reg = TRANSCONF(pipe);
1148 val = I915_READ(reg);
1149 enabled = !!(val & TRANS_ENABLE);
9db4a9c7
JB
1150 WARN(enabled,
1151 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1152 pipe_name(pipe));
92f2584a
JB
1153}
1154
4e634389
KP
1155static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1156 enum pipe pipe, u32 port_sel, u32 val)
f0575e92
KP
1157{
1158 if ((val & DP_PORT_EN) == 0)
1159 return false;
1160
1161 if (HAS_PCH_CPT(dev_priv->dev)) {
1162 u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1163 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1164 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1165 return false;
1166 } else {
1167 if ((val & DP_PIPE_MASK) != (pipe << 30))
1168 return false;
1169 }
1170 return true;
1171}
1172
1519b995
KP
1173static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1174 enum pipe pipe, u32 val)
1175{
1176 if ((val & PORT_ENABLE) == 0)
1177 return false;
1178
1179 if (HAS_PCH_CPT(dev_priv->dev)) {
1180 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1181 return false;
1182 } else {
1183 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1184 return false;
1185 }
1186 return true;
1187}
1188
1189static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1190 enum pipe pipe, u32 val)
1191{
1192 if ((val & LVDS_PORT_EN) == 0)
1193 return false;
1194
1195 if (HAS_PCH_CPT(dev_priv->dev)) {
1196 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1197 return false;
1198 } else {
1199 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1200 return false;
1201 }
1202 return true;
1203}
1204
1205static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1206 enum pipe pipe, u32 val)
1207{
1208 if ((val & ADPA_DAC_ENABLE) == 0)
1209 return false;
1210 if (HAS_PCH_CPT(dev_priv->dev)) {
1211 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1212 return false;
1213 } else {
1214 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1215 return false;
1216 }
1217 return true;
1218}
1219
291906f1 1220static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
f0575e92 1221 enum pipe pipe, int reg, u32 port_sel)
291906f1 1222{
47a05eca 1223 u32 val = I915_READ(reg);
4e634389 1224 WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
291906f1 1225 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
9db4a9c7 1226 reg, pipe_name(pipe));
291906f1
JB
1227}
1228
1229static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1230 enum pipe pipe, int reg)
1231{
47a05eca 1232 u32 val = I915_READ(reg);
1519b995 1233 WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
23c99e77 1234 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
9db4a9c7 1235 reg, pipe_name(pipe));
291906f1
JB
1236}
1237
1238static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1239 enum pipe pipe)
1240{
1241 int reg;
1242 u32 val;
291906f1 1243
f0575e92
KP
1244 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1245 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1246 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
291906f1
JB
1247
1248 reg = PCH_ADPA;
1249 val = I915_READ(reg);
1519b995 1250 WARN(adpa_pipe_enabled(dev_priv, val, pipe),
291906f1 1251 "PCH VGA enabled on transcoder %c, should be disabled\n",
9db4a9c7 1252 pipe_name(pipe));
291906f1
JB
1253
1254 reg = PCH_LVDS;
1255 val = I915_READ(reg);
1519b995 1256 WARN(lvds_pipe_enabled(dev_priv, val, pipe),
291906f1 1257 "PCH LVDS enabled on transcoder %c, should be disabled\n",
9db4a9c7 1258 pipe_name(pipe));
291906f1
JB
1259
1260 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1261 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1262 assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1263}
1264
63d7bbe9
JB
1265/**
1266 * intel_enable_pll - enable a PLL
1267 * @dev_priv: i915 private structure
1268 * @pipe: pipe PLL to enable
1269 *
1270 * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
1271 * make sure the PLL reg is writable first though, since the panel write
1272 * protect mechanism may be enabled.
1273 *
1274 * Note! This is for pre-ILK only.
1275 */
1276static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1277{
1278 int reg;
1279 u32 val;
1280
1281 /* No really, not for ILK+ */
1282 BUG_ON(dev_priv->info->gen >= 5);
1283
1284 /* PLL is protected by panel, make sure we can write it */
1285 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1286 assert_panel_unlocked(dev_priv, pipe);
1287
1288 reg = DPLL(pipe);
1289 val = I915_READ(reg);
1290 val |= DPLL_VCO_ENABLE;
1291
1292 /* We do this three times for luck */
1293 I915_WRITE(reg, val);
1294 POSTING_READ(reg);
1295 udelay(150); /* wait for warmup */
1296 I915_WRITE(reg, val);
1297 POSTING_READ(reg);
1298 udelay(150); /* wait for warmup */
1299 I915_WRITE(reg, val);
1300 POSTING_READ(reg);
1301 udelay(150); /* wait for warmup */
1302}
1303
1304/**
1305 * intel_disable_pll - disable a PLL
1306 * @dev_priv: i915 private structure
1307 * @pipe: pipe PLL to disable
1308 *
1309 * Disable the PLL for @pipe, making sure the pipe is off first.
1310 *
1311 * Note! This is for pre-ILK only.
1312 */
1313static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1314{
1315 int reg;
1316 u32 val;
1317
1318 /* Don't disable pipe A or pipe A PLLs if needed */
1319 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1320 return;
1321
1322 /* Make sure the pipe isn't still relying on us */
1323 assert_pipe_disabled(dev_priv, pipe);
1324
1325 reg = DPLL(pipe);
1326 val = I915_READ(reg);
1327 val &= ~DPLL_VCO_ENABLE;
1328 I915_WRITE(reg, val);
1329 POSTING_READ(reg);
1330}
1331
a416edef
ED
1332/* SBI access */
1333static void
1334intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
1335{
1336 unsigned long flags;
1337
1338 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
1339 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
1340 100)) {
1341 DRM_ERROR("timeout waiting for SBI to become ready\n");
1342 goto out_unlock;
1343 }
1344
1345 I915_WRITE(SBI_ADDR,
1346 (reg << 16));
1347 I915_WRITE(SBI_DATA,
1348 value);
1349 I915_WRITE(SBI_CTL_STAT,
1350 SBI_BUSY |
1351 SBI_CTL_OP_CRWR);
1352
1353 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
1354 100)) {
1355 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
1356 goto out_unlock;
1357 }
1358
1359out_unlock:
1360 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1361}
1362
1363static u32
1364intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
1365{
1366 unsigned long flags;
1367 u32 value;
1368
1369 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
1370 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_READY) == 0,
1371 100)) {
1372 DRM_ERROR("timeout waiting for SBI to become ready\n");
1373 goto out_unlock;
1374 }
1375
1376 I915_WRITE(SBI_ADDR,
1377 (reg << 16));
1378 I915_WRITE(SBI_CTL_STAT,
1379 SBI_BUSY |
1380 SBI_CTL_OP_CRRD);
1381
1382 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_READY | SBI_RESPONSE_SUCCESS)) == 0,
1383 100)) {
1384 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
1385 goto out_unlock;
1386 }
1387
1388 value = I915_READ(SBI_DATA);
1389
1390out_unlock:
1391 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1392 return value;
1393}
1394
92f2584a
JB
1395/**
1396 * intel_enable_pch_pll - enable PCH PLL
1397 * @dev_priv: i915 private structure
1398 * @pipe: pipe PLL to enable
1399 *
1400 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1401 * drives the transcoder clock.
1402 */
ee7b9f93 1403static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
92f2584a 1404{
ee7b9f93
JB
1405 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1406 struct intel_pch_pll *pll = intel_crtc->pch_pll;
92f2584a
JB
1407 int reg;
1408 u32 val;
1409
1410 /* PCH only available on ILK+ */
1411 BUG_ON(dev_priv->info->gen < 5);
ee7b9f93
JB
1412 BUG_ON(pll == NULL);
1413 BUG_ON(pll->refcount == 0);
1414
1415 DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
1416 pll->pll_reg, pll->active, pll->on,
1417 intel_crtc->base.base.id);
92f2584a
JB
1418
1419 /* PCH refclock must be enabled first */
1420 assert_pch_refclk_enabled(dev_priv);
1421
ee7b9f93
JB
1422 if (pll->active++ && pll->on) {
1423 assert_pch_pll_enabled(dev_priv, intel_crtc);
1424 return;
1425 }
1426
1427 DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
1428
1429 reg = pll->pll_reg;
92f2584a
JB
1430 val = I915_READ(reg);
1431 val |= DPLL_VCO_ENABLE;
1432 I915_WRITE(reg, val);
1433 POSTING_READ(reg);
1434 udelay(200);
ee7b9f93
JB
1435
1436 pll->on = true;
92f2584a
JB
1437}
1438
ee7b9f93 1439static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
92f2584a 1440{
ee7b9f93
JB
1441 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1442 struct intel_pch_pll *pll = intel_crtc->pch_pll;
92f2584a 1443 int reg;
ee7b9f93 1444 u32 val;
4c609cb8 1445
92f2584a
JB
1446 /* PCH only available on ILK+ */
1447 BUG_ON(dev_priv->info->gen < 5);
ee7b9f93
JB
1448 if (pll == NULL)
1449 return;
92f2584a 1450
ee7b9f93 1451 BUG_ON(pll->refcount == 0);
7a419866 1452
ee7b9f93
JB
1453 DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
1454 pll->pll_reg, pll->active, pll->on,
1455 intel_crtc->base.base.id);
7a419866 1456
ee7b9f93
JB
1457 BUG_ON(pll->active == 0);
1458 if (--pll->active) {
1459 assert_pch_pll_enabled(dev_priv, intel_crtc);
7a419866 1460 return;
ee7b9f93
JB
1461 }
1462
1463 DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
1464
1465 /* Make sure transcoder isn't still depending on us */
1466 assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
7a419866 1467
ee7b9f93 1468 reg = pll->pll_reg;
92f2584a
JB
1469 val = I915_READ(reg);
1470 val &= ~DPLL_VCO_ENABLE;
1471 I915_WRITE(reg, val);
1472 POSTING_READ(reg);
1473 udelay(200);
ee7b9f93
JB
1474
1475 pll->on = false;
92f2584a
JB
1476}
1477
040484af
JB
1478static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1479 enum pipe pipe)
1480{
1481 int reg;
5f7f726d 1482 u32 val, pipeconf_val;
7c26e5c6 1483 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
040484af
JB
1484
1485 /* PCH only available on ILK+ */
1486 BUG_ON(dev_priv->info->gen < 5);
1487
1488 /* Make sure PCH DPLL is enabled */
ee7b9f93 1489 assert_pch_pll_enabled(dev_priv, to_intel_crtc(crtc));
040484af
JB
1490
1491 /* FDI must be feeding us bits for PCH ports */
1492 assert_fdi_tx_enabled(dev_priv, pipe);
1493 assert_fdi_rx_enabled(dev_priv, pipe);
1494
59c859d6
ED
1495 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1496 DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
1497 return;
1498 }
040484af
JB
1499 reg = TRANSCONF(pipe);
1500 val = I915_READ(reg);
5f7f726d 1501 pipeconf_val = I915_READ(PIPECONF(pipe));
e9bcff5c
JB
1502
1503 if (HAS_PCH_IBX(dev_priv->dev)) {
1504 /*
1505 * make the BPC in transcoder be consistent with
1506 * that in pipeconf reg.
1507 */
1508 val &= ~PIPE_BPC_MASK;
5f7f726d 1509 val |= pipeconf_val & PIPE_BPC_MASK;
e9bcff5c 1510 }
5f7f726d
PZ
1511
1512 val &= ~TRANS_INTERLACE_MASK;
1513 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
7c26e5c6
PZ
1514 if (HAS_PCH_IBX(dev_priv->dev) &&
1515 intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1516 val |= TRANS_LEGACY_INTERLACED_ILK;
1517 else
1518 val |= TRANS_INTERLACED;
5f7f726d
PZ
1519 else
1520 val |= TRANS_PROGRESSIVE;
1521
040484af
JB
1522 I915_WRITE(reg, val | TRANS_ENABLE);
1523 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1524 DRM_ERROR("failed to enable transcoder %d\n", pipe);
1525}
1526
1527static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1528 enum pipe pipe)
1529{
1530 int reg;
1531 u32 val;
1532
1533 /* FDI relies on the transcoder */
1534 assert_fdi_tx_disabled(dev_priv, pipe);
1535 assert_fdi_rx_disabled(dev_priv, pipe);
1536
291906f1
JB
1537 /* Ports must be off as well */
1538 assert_pch_ports_disabled(dev_priv, pipe);
1539
040484af
JB
1540 reg = TRANSCONF(pipe);
1541 val = I915_READ(reg);
1542 val &= ~TRANS_ENABLE;
1543 I915_WRITE(reg, val);
1544 /* wait for PCH transcoder off, transcoder state */
1545 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
4c9c18c2 1546 DRM_ERROR("failed to disable transcoder %d\n", pipe);
040484af
JB
1547}
1548
b24e7179 1549/**
309cfea8 1550 * intel_enable_pipe - enable a pipe, asserting requirements
b24e7179
JB
1551 * @dev_priv: i915 private structure
1552 * @pipe: pipe to enable
040484af 1553 * @pch_port: on ILK+, is this pipe driving a PCH port or not
b24e7179
JB
1554 *
1555 * Enable @pipe, making sure that various hardware specific requirements
1556 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1557 *
1558 * @pipe should be %PIPE_A or %PIPE_B.
1559 *
1560 * Will wait until the pipe is actually running (i.e. first vblank) before
1561 * returning.
1562 */
040484af
JB
1563static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1564 bool pch_port)
b24e7179
JB
1565{
1566 int reg;
1567 u32 val;
1568
1569 /*
1570 * A pipe without a PLL won't actually be able to drive bits from
1571 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1572 * need the check.
1573 */
1574 if (!HAS_PCH_SPLIT(dev_priv->dev))
1575 assert_pll_enabled(dev_priv, pipe);
040484af
JB
1576 else {
1577 if (pch_port) {
1578 /* if driving the PCH, we need FDI enabled */
1579 assert_fdi_rx_pll_enabled(dev_priv, pipe);
1580 assert_fdi_tx_pll_enabled(dev_priv, pipe);
1581 }
1582 /* FIXME: assert CPU port conditions for SNB+ */
1583 }
b24e7179
JB
1584
1585 reg = PIPECONF(pipe);
1586 val = I915_READ(reg);
00d70b15
CW
1587 if (val & PIPECONF_ENABLE)
1588 return;
1589
1590 I915_WRITE(reg, val | PIPECONF_ENABLE);
b24e7179
JB
1591 intel_wait_for_vblank(dev_priv->dev, pipe);
1592}
1593
1594/**
309cfea8 1595 * intel_disable_pipe - disable a pipe, asserting requirements
b24e7179
JB
1596 * @dev_priv: i915 private structure
1597 * @pipe: pipe to disable
1598 *
1599 * Disable @pipe, making sure that various hardware specific requirements
1600 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1601 *
1602 * @pipe should be %PIPE_A or %PIPE_B.
1603 *
1604 * Will wait until the pipe has shut down before returning.
1605 */
1606static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1607 enum pipe pipe)
1608{
1609 int reg;
1610 u32 val;
1611
1612 /*
1613 * Make sure planes won't keep trying to pump pixels to us,
1614 * or we might hang the display.
1615 */
1616 assert_planes_disabled(dev_priv, pipe);
1617
1618 /* Don't disable pipe A or pipe A PLLs if needed */
1619 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1620 return;
1621
1622 reg = PIPECONF(pipe);
1623 val = I915_READ(reg);
00d70b15
CW
1624 if ((val & PIPECONF_ENABLE) == 0)
1625 return;
1626
1627 I915_WRITE(reg, val & ~PIPECONF_ENABLE);
b24e7179
JB
1628 intel_wait_for_pipe_off(dev_priv->dev, pipe);
1629}
1630
d74362c9
KP
1631/*
1632 * Plane regs are double buffered, going from enabled->disabled needs a
1633 * trigger in order to latch. The display address reg provides this.
1634 */
6f1d69b0 1635void intel_flush_display_plane(struct drm_i915_private *dev_priv,
d74362c9
KP
1636 enum plane plane)
1637{
1638 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1639 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1640}
1641
b24e7179
JB
1642/**
1643 * intel_enable_plane - enable a display plane on a given pipe
1644 * @dev_priv: i915 private structure
1645 * @plane: plane to enable
1646 * @pipe: pipe being fed
1647 *
1648 * Enable @plane on @pipe, making sure that @pipe is running first.
1649 */
1650static void intel_enable_plane(struct drm_i915_private *dev_priv,
1651 enum plane plane, enum pipe pipe)
1652{
1653 int reg;
1654 u32 val;
1655
1656 /* If the pipe isn't enabled, we can't pump pixels and may hang */
1657 assert_pipe_enabled(dev_priv, pipe);
1658
1659 reg = DSPCNTR(plane);
1660 val = I915_READ(reg);
00d70b15
CW
1661 if (val & DISPLAY_PLANE_ENABLE)
1662 return;
1663
1664 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
d74362c9 1665 intel_flush_display_plane(dev_priv, plane);
b24e7179
JB
1666 intel_wait_for_vblank(dev_priv->dev, pipe);
1667}
1668
b24e7179
JB
1669/**
1670 * intel_disable_plane - disable a display plane
1671 * @dev_priv: i915 private structure
1672 * @plane: plane to disable
1673 * @pipe: pipe consuming the data
1674 *
1675 * Disable @plane; should be an independent operation.
1676 */
1677static void intel_disable_plane(struct drm_i915_private *dev_priv,
1678 enum plane plane, enum pipe pipe)
1679{
1680 int reg;
1681 u32 val;
1682
1683 reg = DSPCNTR(plane);
1684 val = I915_READ(reg);
00d70b15
CW
1685 if ((val & DISPLAY_PLANE_ENABLE) == 0)
1686 return;
1687
1688 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
b24e7179
JB
1689 intel_flush_display_plane(dev_priv, plane);
1690 intel_wait_for_vblank(dev_priv->dev, pipe);
1691}
1692
47a05eca 1693static void disable_pch_dp(struct drm_i915_private *dev_priv,
f0575e92 1694 enum pipe pipe, int reg, u32 port_sel)
47a05eca
JB
1695{
1696 u32 val = I915_READ(reg);
4e634389 1697 if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
f0575e92 1698 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
47a05eca 1699 I915_WRITE(reg, val & ~DP_PORT_EN);
f0575e92 1700 }
47a05eca
JB
1701}
1702
1703static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1704 enum pipe pipe, int reg)
1705{
1706 u32 val = I915_READ(reg);
1519b995 1707 if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
f0575e92
KP
1708 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1709 reg, pipe);
47a05eca 1710 I915_WRITE(reg, val & ~PORT_ENABLE);
f0575e92 1711 }
47a05eca
JB
1712}
1713
1714/* Disable any ports connected to this transcoder */
1715static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1716 enum pipe pipe)
1717{
1718 u32 reg, val;
1719
1720 val = I915_READ(PCH_PP_CONTROL);
1721 I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1722
f0575e92
KP
1723 disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1724 disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1725 disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
47a05eca
JB
1726
1727 reg = PCH_ADPA;
1728 val = I915_READ(reg);
1519b995 1729 if (adpa_pipe_enabled(dev_priv, val, pipe))
47a05eca
JB
1730 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1731
1732 reg = PCH_LVDS;
1733 val = I915_READ(reg);
1519b995
KP
1734 if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1735 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
47a05eca
JB
1736 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1737 POSTING_READ(reg);
1738 udelay(100);
1739 }
1740
1741 disable_pch_hdmi(dev_priv, pipe, HDMIB);
1742 disable_pch_hdmi(dev_priv, pipe, HDMIC);
1743 disable_pch_hdmi(dev_priv, pipe, HDMID);
1744}
1745
127bd2ac 1746int
48b956c5 1747intel_pin_and_fence_fb_obj(struct drm_device *dev,
05394f39 1748 struct drm_i915_gem_object *obj,
919926ae 1749 struct intel_ring_buffer *pipelined)
6b95a207 1750{
ce453d81 1751 struct drm_i915_private *dev_priv = dev->dev_private;
6b95a207
KH
1752 u32 alignment;
1753 int ret;
1754
05394f39 1755 switch (obj->tiling_mode) {
6b95a207 1756 case I915_TILING_NONE:
534843da
CW
1757 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1758 alignment = 128 * 1024;
a6c45cf0 1759 else if (INTEL_INFO(dev)->gen >= 4)
534843da
CW
1760 alignment = 4 * 1024;
1761 else
1762 alignment = 64 * 1024;
6b95a207
KH
1763 break;
1764 case I915_TILING_X:
1765 /* pin() will align the object as required by fence */
1766 alignment = 0;
1767 break;
1768 case I915_TILING_Y:
1769 /* FIXME: Is this true? */
1770 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1771 return -EINVAL;
1772 default:
1773 BUG();
1774 }
1775
ce453d81 1776 dev_priv->mm.interruptible = false;
2da3b9b9 1777 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
48b956c5 1778 if (ret)
ce453d81 1779 goto err_interruptible;
6b95a207
KH
1780
1781 /* Install a fence for tiled scan-out. Pre-i965 always needs a
1782 * fence, whereas 965+ only requires a fence if using
1783 * framebuffer compression. For simplicity, we always install
1784 * a fence as the cost is not that onerous.
1785 */
06d98131 1786 ret = i915_gem_object_get_fence(obj);
9a5a53b3
CW
1787 if (ret)
1788 goto err_unpin;
1690e1eb 1789
9a5a53b3 1790 i915_gem_object_pin_fence(obj);
6b95a207 1791
ce453d81 1792 dev_priv->mm.interruptible = true;
6b95a207 1793 return 0;
48b956c5
CW
1794
1795err_unpin:
1796 i915_gem_object_unpin(obj);
ce453d81
CW
1797err_interruptible:
1798 dev_priv->mm.interruptible = true;
48b956c5 1799 return ret;
6b95a207
KH
1800}
1801
1690e1eb
CW
1802void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
1803{
1804 i915_gem_object_unpin_fence(obj);
1805 i915_gem_object_unpin(obj);
1806}
1807
17638cd6
JB
1808static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1809 int x, int y)
81255565
JB
1810{
1811 struct drm_device *dev = crtc->dev;
1812 struct drm_i915_private *dev_priv = dev->dev_private;
1813 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1814 struct intel_framebuffer *intel_fb;
05394f39 1815 struct drm_i915_gem_object *obj;
81255565
JB
1816 int plane = intel_crtc->plane;
1817 unsigned long Start, Offset;
81255565 1818 u32 dspcntr;
5eddb70b 1819 u32 reg;
81255565
JB
1820
1821 switch (plane) {
1822 case 0:
1823 case 1:
1824 break;
1825 default:
1826 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
1827 return -EINVAL;
1828 }
1829
1830 intel_fb = to_intel_framebuffer(fb);
1831 obj = intel_fb->obj;
81255565 1832
5eddb70b
CW
1833 reg = DSPCNTR(plane);
1834 dspcntr = I915_READ(reg);
81255565
JB
1835 /* Mask out pixel format bits in case we change it */
1836 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1837 switch (fb->bits_per_pixel) {
1838 case 8:
1839 dspcntr |= DISPPLANE_8BPP;
1840 break;
1841 case 16:
1842 if (fb->depth == 15)
1843 dspcntr |= DISPPLANE_15_16BPP;
1844 else
1845 dspcntr |= DISPPLANE_16BPP;
1846 break;
1847 case 24:
1848 case 32:
1849 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1850 break;
1851 default:
17638cd6 1852 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
81255565
JB
1853 return -EINVAL;
1854 }
a6c45cf0 1855 if (INTEL_INFO(dev)->gen >= 4) {
05394f39 1856 if (obj->tiling_mode != I915_TILING_NONE)
81255565
JB
1857 dspcntr |= DISPPLANE_TILED;
1858 else
1859 dspcntr &= ~DISPPLANE_TILED;
1860 }
1861
5eddb70b 1862 I915_WRITE(reg, dspcntr);
81255565 1863
05394f39 1864 Start = obj->gtt_offset;
01f2c773 1865 Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
81255565 1866
4e6cfefc 1867 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
01f2c773
VS
1868 Start, Offset, x, y, fb->pitches[0]);
1869 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
a6c45cf0 1870 if (INTEL_INFO(dev)->gen >= 4) {
446f2545 1871 I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
5eddb70b
CW
1872 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
1873 I915_WRITE(DSPADDR(plane), Offset);
1874 } else
1875 I915_WRITE(DSPADDR(plane), Start + Offset);
1876 POSTING_READ(reg);
81255565 1877
17638cd6
JB
1878 return 0;
1879}
1880
1881static int ironlake_update_plane(struct drm_crtc *crtc,
1882 struct drm_framebuffer *fb, int x, int y)
1883{
1884 struct drm_device *dev = crtc->dev;
1885 struct drm_i915_private *dev_priv = dev->dev_private;
1886 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1887 struct intel_framebuffer *intel_fb;
1888 struct drm_i915_gem_object *obj;
1889 int plane = intel_crtc->plane;
1890 unsigned long Start, Offset;
1891 u32 dspcntr;
1892 u32 reg;
1893
1894 switch (plane) {
1895 case 0:
1896 case 1:
27f8227b 1897 case 2:
17638cd6
JB
1898 break;
1899 default:
1900 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
1901 return -EINVAL;
1902 }
1903
1904 intel_fb = to_intel_framebuffer(fb);
1905 obj = intel_fb->obj;
1906
1907 reg = DSPCNTR(plane);
1908 dspcntr = I915_READ(reg);
1909 /* Mask out pixel format bits in case we change it */
1910 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1911 switch (fb->bits_per_pixel) {
1912 case 8:
1913 dspcntr |= DISPPLANE_8BPP;
1914 break;
1915 case 16:
1916 if (fb->depth != 16)
1917 return -EINVAL;
1918
1919 dspcntr |= DISPPLANE_16BPP;
1920 break;
1921 case 24:
1922 case 32:
1923 if (fb->depth == 24)
1924 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1925 else if (fb->depth == 30)
1926 dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
1927 else
1928 return -EINVAL;
1929 break;
1930 default:
1931 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
1932 return -EINVAL;
1933 }
1934
1935 if (obj->tiling_mode != I915_TILING_NONE)
1936 dspcntr |= DISPPLANE_TILED;
1937 else
1938 dspcntr &= ~DISPPLANE_TILED;
1939
1940 /* must disable */
1941 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
1942
1943 I915_WRITE(reg, dspcntr);
1944
1945 Start = obj->gtt_offset;
01f2c773 1946 Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
17638cd6
JB
1947
1948 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
01f2c773
VS
1949 Start, Offset, x, y, fb->pitches[0]);
1950 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
446f2545 1951 I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
17638cd6
JB
1952 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
1953 I915_WRITE(DSPADDR(plane), Offset);
1954 POSTING_READ(reg);
1955
1956 return 0;
1957}
1958
1959/* Assume fb object is pinned & idle & fenced and just update base pointers */
1960static int
1961intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1962 int x, int y, enum mode_set_atomic state)
1963{
1964 struct drm_device *dev = crtc->dev;
1965 struct drm_i915_private *dev_priv = dev->dev_private;
17638cd6 1966
6b8e6ed0
CW
1967 if (dev_priv->display.disable_fbc)
1968 dev_priv->display.disable_fbc(dev);
3dec0095 1969 intel_increase_pllclock(crtc);
81255565 1970
6b8e6ed0 1971 return dev_priv->display.update_plane(crtc, fb, x, y);
81255565
JB
1972}
1973
14667a4b
CW
1974static int
1975intel_finish_fb(struct drm_framebuffer *old_fb)
1976{
1977 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
1978 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1979 bool was_interruptible = dev_priv->mm.interruptible;
1980 int ret;
1981
1982 wait_event(dev_priv->pending_flip_queue,
1983 atomic_read(&dev_priv->mm.wedged) ||
1984 atomic_read(&obj->pending_flip) == 0);
1985
1986 /* Big Hammer, we also need to ensure that any pending
1987 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
1988 * current scanout is retired before unpinning the old
1989 * framebuffer.
1990 *
1991 * This should only fail upon a hung GPU, in which case we
1992 * can safely continue.
1993 */
1994 dev_priv->mm.interruptible = false;
1995 ret = i915_gem_object_finish_gpu(obj);
1996 dev_priv->mm.interruptible = was_interruptible;
1997
1998 return ret;
1999}
2000
5c3b82e2 2001static int
3c4fdcfb
KH
2002intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2003 struct drm_framebuffer *old_fb)
79e53945
JB
2004{
2005 struct drm_device *dev = crtc->dev;
6b8e6ed0 2006 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945
JB
2007 struct drm_i915_master_private *master_priv;
2008 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5c3b82e2 2009 int ret;
79e53945
JB
2010
2011 /* no fb bound */
2012 if (!crtc->fb) {
a5071c2f 2013 DRM_ERROR("No FB bound\n");
5c3b82e2
CW
2014 return 0;
2015 }
2016
5826eca5
ED
2017 if(intel_crtc->plane > dev_priv->num_pipe) {
2018 DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
2019 intel_crtc->plane,
2020 dev_priv->num_pipe);
5c3b82e2 2021 return -EINVAL;
79e53945
JB
2022 }
2023
5c3b82e2 2024 mutex_lock(&dev->struct_mutex);
265db958
CW
2025 ret = intel_pin_and_fence_fb_obj(dev,
2026 to_intel_framebuffer(crtc->fb)->obj,
919926ae 2027 NULL);
5c3b82e2
CW
2028 if (ret != 0) {
2029 mutex_unlock(&dev->struct_mutex);
a5071c2f 2030 DRM_ERROR("pin & fence failed\n");
5c3b82e2
CW
2031 return ret;
2032 }
79e53945 2033
14667a4b
CW
2034 if (old_fb)
2035 intel_finish_fb(old_fb);
265db958 2036
6b8e6ed0 2037 ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y);
4e6cfefc 2038 if (ret) {
1690e1eb 2039 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
5c3b82e2 2040 mutex_unlock(&dev->struct_mutex);
a5071c2f 2041 DRM_ERROR("failed to update base address\n");
4e6cfefc 2042 return ret;
79e53945 2043 }
3c4fdcfb 2044
b7f1de28
CW
2045 if (old_fb) {
2046 intel_wait_for_vblank(dev, intel_crtc->pipe);
1690e1eb 2047 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
b7f1de28 2048 }
652c393a 2049
6b8e6ed0 2050 intel_update_fbc(dev);
5c3b82e2 2051 mutex_unlock(&dev->struct_mutex);
79e53945
JB
2052
2053 if (!dev->primary->master)
5c3b82e2 2054 return 0;
79e53945
JB
2055
2056 master_priv = dev->primary->master->driver_priv;
2057 if (!master_priv->sarea_priv)
5c3b82e2 2058 return 0;
79e53945 2059
265db958 2060 if (intel_crtc->pipe) {
79e53945
JB
2061 master_priv->sarea_priv->pipeB_x = x;
2062 master_priv->sarea_priv->pipeB_y = y;
5c3b82e2
CW
2063 } else {
2064 master_priv->sarea_priv->pipeA_x = x;
2065 master_priv->sarea_priv->pipeA_y = y;
79e53945 2066 }
5c3b82e2
CW
2067
2068 return 0;
79e53945
JB
2069}
2070
5eddb70b 2071static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
32f9d658
ZW
2072{
2073 struct drm_device *dev = crtc->dev;
2074 struct drm_i915_private *dev_priv = dev->dev_private;
2075 u32 dpa_ctl;
2076
28c97730 2077 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
32f9d658
ZW
2078 dpa_ctl = I915_READ(DP_A);
2079 dpa_ctl &= ~DP_PLL_FREQ_MASK;
2080
2081 if (clock < 200000) {
2082 u32 temp;
2083 dpa_ctl |= DP_PLL_FREQ_160MHZ;
2084 /* workaround for 160Mhz:
2085 1) program 0x4600c bits 15:0 = 0x8124
2086 2) program 0x46010 bit 0 = 1
2087 3) program 0x46034 bit 24 = 1
2088 4) program 0x64000 bit 14 = 1
2089 */
2090 temp = I915_READ(0x4600c);
2091 temp &= 0xffff0000;
2092 I915_WRITE(0x4600c, temp | 0x8124);
2093
2094 temp = I915_READ(0x46010);
2095 I915_WRITE(0x46010, temp | 1);
2096
2097 temp = I915_READ(0x46034);
2098 I915_WRITE(0x46034, temp | (1 << 24));
2099 } else {
2100 dpa_ctl |= DP_PLL_FREQ_270MHZ;
2101 }
2102 I915_WRITE(DP_A, dpa_ctl);
2103
5eddb70b 2104 POSTING_READ(DP_A);
32f9d658
ZW
2105 udelay(500);
2106}
2107
5e84e1a4
ZW
2108static void intel_fdi_normal_train(struct drm_crtc *crtc)
2109{
2110 struct drm_device *dev = crtc->dev;
2111 struct drm_i915_private *dev_priv = dev->dev_private;
2112 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2113 int pipe = intel_crtc->pipe;
2114 u32 reg, temp;
2115
2116 /* enable normal train */
2117 reg = FDI_TX_CTL(pipe);
2118 temp = I915_READ(reg);
61e499bf 2119 if (IS_IVYBRIDGE(dev)) {
357555c0
JB
2120 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2121 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
61e499bf
KP
2122 } else {
2123 temp &= ~FDI_LINK_TRAIN_NONE;
2124 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
357555c0 2125 }
5e84e1a4
ZW
2126 I915_WRITE(reg, temp);
2127
2128 reg = FDI_RX_CTL(pipe);
2129 temp = I915_READ(reg);
2130 if (HAS_PCH_CPT(dev)) {
2131 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2132 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2133 } else {
2134 temp &= ~FDI_LINK_TRAIN_NONE;
2135 temp |= FDI_LINK_TRAIN_NONE;
2136 }
2137 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2138
2139 /* wait one idle pattern time */
2140 POSTING_READ(reg);
2141 udelay(1000);
357555c0
JB
2142
2143 /* IVB wants error correction enabled */
2144 if (IS_IVYBRIDGE(dev))
2145 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2146 FDI_FE_ERRC_ENABLE);
5e84e1a4
ZW
2147}
2148
291427f5
JB
2149static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2150{
2151 struct drm_i915_private *dev_priv = dev->dev_private;
2152 u32 flags = I915_READ(SOUTH_CHICKEN1);
2153
2154 flags |= FDI_PHASE_SYNC_OVR(pipe);
2155 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2156 flags |= FDI_PHASE_SYNC_EN(pipe);
2157 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2158 POSTING_READ(SOUTH_CHICKEN1);
2159}
2160
8db9d77b
ZW
2161/* The FDI link training functions for ILK/Ibexpeak. */
2162static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2163{
2164 struct drm_device *dev = crtc->dev;
2165 struct drm_i915_private *dev_priv = dev->dev_private;
2166 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2167 int pipe = intel_crtc->pipe;
0fc932b8 2168 int plane = intel_crtc->plane;
5eddb70b 2169 u32 reg, temp, tries;
8db9d77b 2170
0fc932b8
JB
2171 /* FDI needs bits from pipe & plane first */
2172 assert_pipe_enabled(dev_priv, pipe);
2173 assert_plane_enabled(dev_priv, plane);
2174
e1a44743
AJ
2175 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2176 for train result */
5eddb70b
CW
2177 reg = FDI_RX_IMR(pipe);
2178 temp = I915_READ(reg);
e1a44743
AJ
2179 temp &= ~FDI_RX_SYMBOL_LOCK;
2180 temp &= ~FDI_RX_BIT_LOCK;
5eddb70b
CW
2181 I915_WRITE(reg, temp);
2182 I915_READ(reg);
e1a44743
AJ
2183 udelay(150);
2184
8db9d77b 2185 /* enable CPU FDI TX and PCH FDI RX */
5eddb70b
CW
2186 reg = FDI_TX_CTL(pipe);
2187 temp = I915_READ(reg);
77ffb597
AJ
2188 temp &= ~(7 << 19);
2189 temp |= (intel_crtc->fdi_lanes - 1) << 19;
8db9d77b
ZW
2190 temp &= ~FDI_LINK_TRAIN_NONE;
2191 temp |= FDI_LINK_TRAIN_PATTERN_1;
5eddb70b 2192 I915_WRITE(reg, temp | FDI_TX_ENABLE);
8db9d77b 2193
5eddb70b
CW
2194 reg = FDI_RX_CTL(pipe);
2195 temp = I915_READ(reg);
8db9d77b
ZW
2196 temp &= ~FDI_LINK_TRAIN_NONE;
2197 temp |= FDI_LINK_TRAIN_PATTERN_1;
5eddb70b
CW
2198 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2199
2200 POSTING_READ(reg);
8db9d77b
ZW
2201 udelay(150);
2202
5b2adf89 2203 /* Ironlake workaround, enable clock pointer after FDI enable*/
6f06ce18
JB
2204 if (HAS_PCH_IBX(dev)) {
2205 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2206 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2207 FDI_RX_PHASE_SYNC_POINTER_EN);
2208 }
5b2adf89 2209
5eddb70b 2210 reg = FDI_RX_IIR(pipe);
e1a44743 2211 for (tries = 0; tries < 5; tries++) {
5eddb70b 2212 temp = I915_READ(reg);
8db9d77b
ZW
2213 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2214
2215 if ((temp & FDI_RX_BIT_LOCK)) {
2216 DRM_DEBUG_KMS("FDI train 1 done.\n");
5eddb70b 2217 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
8db9d77b
ZW
2218 break;
2219 }
8db9d77b 2220 }
e1a44743 2221 if (tries == 5)
5eddb70b 2222 DRM_ERROR("FDI train 1 fail!\n");
8db9d77b
ZW
2223
2224 /* Train 2 */
5eddb70b
CW
2225 reg = FDI_TX_CTL(pipe);
2226 temp = I915_READ(reg);
8db9d77b
ZW
2227 temp &= ~FDI_LINK_TRAIN_NONE;
2228 temp |= FDI_LINK_TRAIN_PATTERN_2;
5eddb70b 2229 I915_WRITE(reg, temp);
8db9d77b 2230
5eddb70b
CW
2231 reg = FDI_RX_CTL(pipe);
2232 temp = I915_READ(reg);
8db9d77b
ZW
2233 temp &= ~FDI_LINK_TRAIN_NONE;
2234 temp |= FDI_LINK_TRAIN_PATTERN_2;
5eddb70b 2235 I915_WRITE(reg, temp);
8db9d77b 2236
5eddb70b
CW
2237 POSTING_READ(reg);
2238 udelay(150);
8db9d77b 2239
5eddb70b 2240 reg = FDI_RX_IIR(pipe);
e1a44743 2241 for (tries = 0; tries < 5; tries++) {
5eddb70b 2242 temp = I915_READ(reg);
8db9d77b
ZW
2243 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2244
2245 if (temp & FDI_RX_SYMBOL_LOCK) {
5eddb70b 2246 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
8db9d77b
ZW
2247 DRM_DEBUG_KMS("FDI train 2 done.\n");
2248 break;
2249 }
8db9d77b 2250 }
e1a44743 2251 if (tries == 5)
5eddb70b 2252 DRM_ERROR("FDI train 2 fail!\n");
8db9d77b
ZW
2253
2254 DRM_DEBUG_KMS("FDI train done\n");
5c5313c8 2255
8db9d77b
ZW
2256}
2257
0206e353 2258static const int snb_b_fdi_train_param[] = {
8db9d77b
ZW
2259 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2260 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2261 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2262 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2263};
2264
2265/* The FDI link training functions for SNB/Cougarpoint. */
2266static void gen6_fdi_link_train(struct drm_crtc *crtc)
2267{
2268 struct drm_device *dev = crtc->dev;
2269 struct drm_i915_private *dev_priv = dev->dev_private;
2270 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2271 int pipe = intel_crtc->pipe;
fa37d39e 2272 u32 reg, temp, i, retry;
8db9d77b 2273
e1a44743
AJ
2274 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2275 for train result */
5eddb70b
CW
2276 reg = FDI_RX_IMR(pipe);
2277 temp = I915_READ(reg);
e1a44743
AJ
2278 temp &= ~FDI_RX_SYMBOL_LOCK;
2279 temp &= ~FDI_RX_BIT_LOCK;
5eddb70b
CW
2280 I915_WRITE(reg, temp);
2281
2282 POSTING_READ(reg);
e1a44743
AJ
2283 udelay(150);
2284
8db9d77b 2285 /* enable CPU FDI TX and PCH FDI RX */
5eddb70b
CW
2286 reg = FDI_TX_CTL(pipe);
2287 temp = I915_READ(reg);
77ffb597
AJ
2288 temp &= ~(7 << 19);
2289 temp |= (intel_crtc->fdi_lanes - 1) << 19;
8db9d77b
ZW
2290 temp &= ~FDI_LINK_TRAIN_NONE;
2291 temp |= FDI_LINK_TRAIN_PATTERN_1;
2292 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2293 /* SNB-B */
2294 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5eddb70b 2295 I915_WRITE(reg, temp | FDI_TX_ENABLE);
8db9d77b 2296
5eddb70b
CW
2297 reg = FDI_RX_CTL(pipe);
2298 temp = I915_READ(reg);
8db9d77b
ZW
2299 if (HAS_PCH_CPT(dev)) {
2300 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2301 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2302 } else {
2303 temp &= ~FDI_LINK_TRAIN_NONE;
2304 temp |= FDI_LINK_TRAIN_PATTERN_1;
2305 }
5eddb70b
CW
2306 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2307
2308 POSTING_READ(reg);
8db9d77b
ZW
2309 udelay(150);
2310
291427f5
JB
2311 if (HAS_PCH_CPT(dev))
2312 cpt_phase_pointer_enable(dev, pipe);
2313
0206e353 2314 for (i = 0; i < 4; i++) {
5eddb70b
CW
2315 reg = FDI_TX_CTL(pipe);
2316 temp = I915_READ(reg);
8db9d77b
ZW
2317 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2318 temp |= snb_b_fdi_train_param[i];
5eddb70b
CW
2319 I915_WRITE(reg, temp);
2320
2321 POSTING_READ(reg);
8db9d77b
ZW
2322 udelay(500);
2323
fa37d39e
SP
2324 for (retry = 0; retry < 5; retry++) {
2325 reg = FDI_RX_IIR(pipe);
2326 temp = I915_READ(reg);
2327 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2328 if (temp & FDI_RX_BIT_LOCK) {
2329 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2330 DRM_DEBUG_KMS("FDI train 1 done.\n");
2331 break;
2332 }
2333 udelay(50);
8db9d77b 2334 }
fa37d39e
SP
2335 if (retry < 5)
2336 break;
8db9d77b
ZW
2337 }
2338 if (i == 4)
5eddb70b 2339 DRM_ERROR("FDI train 1 fail!\n");
8db9d77b
ZW
2340
2341 /* Train 2 */
5eddb70b
CW
2342 reg = FDI_TX_CTL(pipe);
2343 temp = I915_READ(reg);
8db9d77b
ZW
2344 temp &= ~FDI_LINK_TRAIN_NONE;
2345 temp |= FDI_LINK_TRAIN_PATTERN_2;
2346 if (IS_GEN6(dev)) {
2347 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2348 /* SNB-B */
2349 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2350 }
5eddb70b 2351 I915_WRITE(reg, temp);
8db9d77b 2352
5eddb70b
CW
2353 reg = FDI_RX_CTL(pipe);
2354 temp = I915_READ(reg);
8db9d77b
ZW
2355 if (HAS_PCH_CPT(dev)) {
2356 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2357 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2358 } else {
2359 temp &= ~FDI_LINK_TRAIN_NONE;
2360 temp |= FDI_LINK_TRAIN_PATTERN_2;
2361 }
5eddb70b
CW
2362 I915_WRITE(reg, temp);
2363
2364 POSTING_READ(reg);
8db9d77b
ZW
2365 udelay(150);
2366
0206e353 2367 for (i = 0; i < 4; i++) {
5eddb70b
CW
2368 reg = FDI_TX_CTL(pipe);
2369 temp = I915_READ(reg);
8db9d77b
ZW
2370 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2371 temp |= snb_b_fdi_train_param[i];
5eddb70b
CW
2372 I915_WRITE(reg, temp);
2373
2374 POSTING_READ(reg);
8db9d77b
ZW
2375 udelay(500);
2376
fa37d39e
SP
2377 for (retry = 0; retry < 5; retry++) {
2378 reg = FDI_RX_IIR(pipe);
2379 temp = I915_READ(reg);
2380 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2381 if (temp & FDI_RX_SYMBOL_LOCK) {
2382 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2383 DRM_DEBUG_KMS("FDI train 2 done.\n");
2384 break;
2385 }
2386 udelay(50);
8db9d77b 2387 }
fa37d39e
SP
2388 if (retry < 5)
2389 break;
8db9d77b
ZW
2390 }
2391 if (i == 4)
5eddb70b 2392 DRM_ERROR("FDI train 2 fail!\n");
8db9d77b
ZW
2393
2394 DRM_DEBUG_KMS("FDI train done.\n");
2395}
2396
357555c0
JB
2397/* Manual link training for Ivy Bridge A0 parts */
2398static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2399{
2400 struct drm_device *dev = crtc->dev;
2401 struct drm_i915_private *dev_priv = dev->dev_private;
2402 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2403 int pipe = intel_crtc->pipe;
2404 u32 reg, temp, i;
2405
2406 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2407 for train result */
2408 reg = FDI_RX_IMR(pipe);
2409 temp = I915_READ(reg);
2410 temp &= ~FDI_RX_SYMBOL_LOCK;
2411 temp &= ~FDI_RX_BIT_LOCK;
2412 I915_WRITE(reg, temp);
2413
2414 POSTING_READ(reg);
2415 udelay(150);
2416
2417 /* enable CPU FDI TX and PCH FDI RX */
2418 reg = FDI_TX_CTL(pipe);
2419 temp = I915_READ(reg);
2420 temp &= ~(7 << 19);
2421 temp |= (intel_crtc->fdi_lanes - 1) << 19;
2422 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2423 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2424 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2425 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
c4f9c4c2 2426 temp |= FDI_COMPOSITE_SYNC;
357555c0
JB
2427 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2428
2429 reg = FDI_RX_CTL(pipe);
2430 temp = I915_READ(reg);
2431 temp &= ~FDI_LINK_TRAIN_AUTO;
2432 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2433 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
c4f9c4c2 2434 temp |= FDI_COMPOSITE_SYNC;
357555c0
JB
2435 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2436
2437 POSTING_READ(reg);
2438 udelay(150);
2439
291427f5
JB
2440 if (HAS_PCH_CPT(dev))
2441 cpt_phase_pointer_enable(dev, pipe);
2442
0206e353 2443 for (i = 0; i < 4; i++) {
357555c0
JB
2444 reg = FDI_TX_CTL(pipe);
2445 temp = I915_READ(reg);
2446 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2447 temp |= snb_b_fdi_train_param[i];
2448 I915_WRITE(reg, temp);
2449
2450 POSTING_READ(reg);
2451 udelay(500);
2452
2453 reg = FDI_RX_IIR(pipe);
2454 temp = I915_READ(reg);
2455 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2456
2457 if (temp & FDI_RX_BIT_LOCK ||
2458 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2459 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2460 DRM_DEBUG_KMS("FDI train 1 done.\n");
2461 break;
2462 }
2463 }
2464 if (i == 4)
2465 DRM_ERROR("FDI train 1 fail!\n");
2466
2467 /* Train 2 */
2468 reg = FDI_TX_CTL(pipe);
2469 temp = I915_READ(reg);
2470 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2471 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2472 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2473 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2474 I915_WRITE(reg, temp);
2475
2476 reg = FDI_RX_CTL(pipe);
2477 temp = I915_READ(reg);
2478 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2479 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2480 I915_WRITE(reg, temp);
2481
2482 POSTING_READ(reg);
2483 udelay(150);
2484
0206e353 2485 for (i = 0; i < 4; i++) {
357555c0
JB
2486 reg = FDI_TX_CTL(pipe);
2487 temp = I915_READ(reg);
2488 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2489 temp |= snb_b_fdi_train_param[i];
2490 I915_WRITE(reg, temp);
2491
2492 POSTING_READ(reg);
2493 udelay(500);
2494
2495 reg = FDI_RX_IIR(pipe);
2496 temp = I915_READ(reg);
2497 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2498
2499 if (temp & FDI_RX_SYMBOL_LOCK) {
2500 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2501 DRM_DEBUG_KMS("FDI train 2 done.\n");
2502 break;
2503 }
2504 }
2505 if (i == 4)
2506 DRM_ERROR("FDI train 2 fail!\n");
2507
2508 DRM_DEBUG_KMS("FDI train done.\n");
2509}
2510
2511static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2c07245f
ZW
2512{
2513 struct drm_device *dev = crtc->dev;
2514 struct drm_i915_private *dev_priv = dev->dev_private;
2515 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2516 int pipe = intel_crtc->pipe;
5eddb70b 2517 u32 reg, temp;
79e53945 2518
c64e311e 2519 /* Write the TU size bits so error detection works */
5eddb70b
CW
2520 I915_WRITE(FDI_RX_TUSIZE1(pipe),
2521 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
c64e311e 2522
c98e9dcf 2523 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5eddb70b
CW
2524 reg = FDI_RX_CTL(pipe);
2525 temp = I915_READ(reg);
2526 temp &= ~((0x7 << 19) | (0x7 << 16));
c98e9dcf 2527 temp |= (intel_crtc->fdi_lanes - 1) << 19;
5eddb70b
CW
2528 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2529 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2530
2531 POSTING_READ(reg);
c98e9dcf
JB
2532 udelay(200);
2533
2534 /* Switch from Rawclk to PCDclk */
5eddb70b
CW
2535 temp = I915_READ(reg);
2536 I915_WRITE(reg, temp | FDI_PCDCLK);
2537
2538 POSTING_READ(reg);
c98e9dcf
JB
2539 udelay(200);
2540
bf507ef7
ED
2541 /* On Haswell, the PLL configuration for ports and pipes is handled
2542 * separately, as part of DDI setup */
2543 if (!IS_HASWELL(dev)) {
2544 /* Enable CPU FDI TX PLL, always on for Ironlake */
2545 reg = FDI_TX_CTL(pipe);
2546 temp = I915_READ(reg);
2547 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2548 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
5eddb70b 2549
bf507ef7
ED
2550 POSTING_READ(reg);
2551 udelay(100);
2552 }
6be4a607 2553 }
0e23b99d
JB
2554}
2555
291427f5
JB
2556static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2557{
2558 struct drm_i915_private *dev_priv = dev->dev_private;
2559 u32 flags = I915_READ(SOUTH_CHICKEN1);
2560
2561 flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2562 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2563 flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2564 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2565 POSTING_READ(SOUTH_CHICKEN1);
2566}
0fc932b8
JB
2567static void ironlake_fdi_disable(struct drm_crtc *crtc)
2568{
2569 struct drm_device *dev = crtc->dev;
2570 struct drm_i915_private *dev_priv = dev->dev_private;
2571 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2572 int pipe = intel_crtc->pipe;
2573 u32 reg, temp;
2574
2575 /* disable CPU FDI tx and PCH FDI rx */
2576 reg = FDI_TX_CTL(pipe);
2577 temp = I915_READ(reg);
2578 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2579 POSTING_READ(reg);
2580
2581 reg = FDI_RX_CTL(pipe);
2582 temp = I915_READ(reg);
2583 temp &= ~(0x7 << 16);
2584 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2585 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2586
2587 POSTING_READ(reg);
2588 udelay(100);
2589
2590 /* Ironlake workaround, disable clock pointer after downing FDI */
6f06ce18
JB
2591 if (HAS_PCH_IBX(dev)) {
2592 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
0fc932b8
JB
2593 I915_WRITE(FDI_RX_CHICKEN(pipe),
2594 I915_READ(FDI_RX_CHICKEN(pipe) &
6f06ce18 2595 ~FDI_RX_PHASE_SYNC_POINTER_EN));
291427f5
JB
2596 } else if (HAS_PCH_CPT(dev)) {
2597 cpt_phase_pointer_disable(dev, pipe);
6f06ce18 2598 }
0fc932b8
JB
2599
2600 /* still set train pattern 1 */
2601 reg = FDI_TX_CTL(pipe);
2602 temp = I915_READ(reg);
2603 temp &= ~FDI_LINK_TRAIN_NONE;
2604 temp |= FDI_LINK_TRAIN_PATTERN_1;
2605 I915_WRITE(reg, temp);
2606
2607 reg = FDI_RX_CTL(pipe);
2608 temp = I915_READ(reg);
2609 if (HAS_PCH_CPT(dev)) {
2610 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2611 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2612 } else {
2613 temp &= ~FDI_LINK_TRAIN_NONE;
2614 temp |= FDI_LINK_TRAIN_PATTERN_1;
2615 }
2616 /* BPC in FDI rx is consistent with that in PIPECONF */
2617 temp &= ~(0x07 << 16);
2618 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2619 I915_WRITE(reg, temp);
2620
2621 POSTING_READ(reg);
2622 udelay(100);
2623}
2624
e6c3a2a6
CW
2625static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2626{
0f91128d 2627 struct drm_device *dev = crtc->dev;
e6c3a2a6
CW
2628
2629 if (crtc->fb == NULL)
2630 return;
2631
0f91128d
CW
2632 mutex_lock(&dev->struct_mutex);
2633 intel_finish_fb(crtc->fb);
2634 mutex_unlock(&dev->struct_mutex);
e6c3a2a6
CW
2635}
2636
040484af
JB
2637static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2638{
2639 struct drm_device *dev = crtc->dev;
2640 struct drm_mode_config *mode_config = &dev->mode_config;
2641 struct intel_encoder *encoder;
2642
2643 /*
2644 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2645 * must be driven by its own crtc; no sharing is possible.
2646 */
2647 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2648 if (encoder->base.crtc != crtc)
2649 continue;
2650
6ee8bab0
ED
2651 /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
2652 * CPU handles all others */
2653 if (IS_HASWELL(dev)) {
2654 /* It is still unclear how this will work on PPT, so throw up a warning */
2655 WARN_ON(!HAS_PCH_LPT(dev));
2656
2657 if (encoder->type == DRM_MODE_ENCODER_DAC) {
2658 DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
2659 return true;
2660 } else {
2661 DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
2662 encoder->type);
2663 return false;
2664 }
2665 }
2666
040484af
JB
2667 switch (encoder->type) {
2668 case INTEL_OUTPUT_EDP:
2669 if (!intel_encoder_is_pch_edp(&encoder->base))
2670 return false;
2671 continue;
2672 }
2673 }
2674
2675 return true;
2676}
2677
f67a559d
JB
2678/*
2679 * Enable PCH resources required for PCH ports:
2680 * - PCH PLLs
2681 * - FDI training & RX/TX
2682 * - update transcoder timings
2683 * - DP transcoding bits
2684 * - transcoder
2685 */
2686static void ironlake_pch_enable(struct drm_crtc *crtc)
0e23b99d
JB
2687{
2688 struct drm_device *dev = crtc->dev;
2689 struct drm_i915_private *dev_priv = dev->dev_private;
2690 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2691 int pipe = intel_crtc->pipe;
ee7b9f93 2692 u32 reg, temp;
2c07245f 2693
c98e9dcf 2694 /* For PCH output, training FDI link */
674cf967 2695 dev_priv->display.fdi_link_train(crtc);
2c07245f 2696
ee7b9f93 2697 intel_enable_pch_pll(intel_crtc);
8db9d77b 2698
c98e9dcf 2699 if (HAS_PCH_CPT(dev)) {
ee7b9f93 2700 u32 sel;
4b645f14 2701
c98e9dcf 2702 temp = I915_READ(PCH_DPLL_SEL);
ee7b9f93
JB
2703 switch (pipe) {
2704 default:
2705 case 0:
2706 temp |= TRANSA_DPLL_ENABLE;
2707 sel = TRANSA_DPLLB_SEL;
2708 break;
2709 case 1:
2710 temp |= TRANSB_DPLL_ENABLE;
2711 sel = TRANSB_DPLLB_SEL;
2712 break;
2713 case 2:
2714 temp |= TRANSC_DPLL_ENABLE;
2715 sel = TRANSC_DPLLB_SEL;
2716 break;
d64311ab 2717 }
ee7b9f93
JB
2718 if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
2719 temp |= sel;
2720 else
2721 temp &= ~sel;
c98e9dcf 2722 I915_WRITE(PCH_DPLL_SEL, temp);
c98e9dcf 2723 }
5eddb70b 2724
d9b6cb56
JB
2725 /* set transcoder timing, panel must allow it */
2726 assert_panel_unlocked(dev_priv, pipe);
5eddb70b
CW
2727 I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2728 I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
2729 I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
8db9d77b 2730
5eddb70b
CW
2731 I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
2732 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2733 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
0529a0d9 2734 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
8db9d77b 2735
f57e1e3a
ED
2736 if (!IS_HASWELL(dev))
2737 intel_fdi_normal_train(crtc);
5e84e1a4 2738
c98e9dcf
JB
2739 /* For PCH DP, enable TRANS_DP_CTL */
2740 if (HAS_PCH_CPT(dev) &&
417e822d
KP
2741 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
2742 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
9325c9f0 2743 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
5eddb70b
CW
2744 reg = TRANS_DP_CTL(pipe);
2745 temp = I915_READ(reg);
2746 temp &= ~(TRANS_DP_PORT_SEL_MASK |
220cad3c
EA
2747 TRANS_DP_SYNC_MASK |
2748 TRANS_DP_BPC_MASK);
5eddb70b
CW
2749 temp |= (TRANS_DP_OUTPUT_ENABLE |
2750 TRANS_DP_ENH_FRAMING);
9325c9f0 2751 temp |= bpc << 9; /* same format but at 11:9 */
c98e9dcf
JB
2752
2753 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
5eddb70b 2754 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
c98e9dcf 2755 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
5eddb70b 2756 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
c98e9dcf
JB
2757
2758 switch (intel_trans_dp_port_sel(crtc)) {
2759 case PCH_DP_B:
5eddb70b 2760 temp |= TRANS_DP_PORT_SEL_B;
c98e9dcf
JB
2761 break;
2762 case PCH_DP_C:
5eddb70b 2763 temp |= TRANS_DP_PORT_SEL_C;
c98e9dcf
JB
2764 break;
2765 case PCH_DP_D:
5eddb70b 2766 temp |= TRANS_DP_PORT_SEL_D;
c98e9dcf
JB
2767 break;
2768 default:
2769 DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
5eddb70b 2770 temp |= TRANS_DP_PORT_SEL_B;
c98e9dcf 2771 break;
32f9d658 2772 }
2c07245f 2773
5eddb70b 2774 I915_WRITE(reg, temp);
6be4a607 2775 }
b52eb4dc 2776
040484af 2777 intel_enable_transcoder(dev_priv, pipe);
f67a559d
JB
2778}
2779
ee7b9f93
JB
2780static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
2781{
2782 struct intel_pch_pll *pll = intel_crtc->pch_pll;
2783
2784 if (pll == NULL)
2785 return;
2786
2787 if (pll->refcount == 0) {
2788 WARN(1, "bad PCH PLL refcount\n");
2789 return;
2790 }
2791
2792 --pll->refcount;
2793 intel_crtc->pch_pll = NULL;
2794}
2795
2796static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
2797{
2798 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
2799 struct intel_pch_pll *pll;
2800 int i;
2801
2802 pll = intel_crtc->pch_pll;
2803 if (pll) {
2804 DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
2805 intel_crtc->base.base.id, pll->pll_reg);
2806 goto prepare;
2807 }
2808
2809 for (i = 0; i < dev_priv->num_pch_pll; i++) {
2810 pll = &dev_priv->pch_plls[i];
2811
2812 /* Only want to check enabled timings first */
2813 if (pll->refcount == 0)
2814 continue;
2815
2816 if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
2817 fp == I915_READ(pll->fp0_reg)) {
2818 DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
2819 intel_crtc->base.base.id,
2820 pll->pll_reg, pll->refcount, pll->active);
2821
2822 goto found;
2823 }
2824 }
2825
2826 /* Ok no matching timings, maybe there's a free one? */
2827 for (i = 0; i < dev_priv->num_pch_pll; i++) {
2828 pll = &dev_priv->pch_plls[i];
2829 if (pll->refcount == 0) {
2830 DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
2831 intel_crtc->base.base.id, pll->pll_reg);
2832 goto found;
2833 }
2834 }
2835
2836 return NULL;
2837
2838found:
2839 intel_crtc->pch_pll = pll;
2840 pll->refcount++;
2841 DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
2842prepare: /* separate function? */
2843 DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
ee7b9f93 2844
e04c7350
CW
2845 /* Wait for the clocks to stabilize before rewriting the regs */
2846 I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
ee7b9f93
JB
2847 POSTING_READ(pll->pll_reg);
2848 udelay(150);
e04c7350
CW
2849
2850 I915_WRITE(pll->fp0_reg, fp);
2851 I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
ee7b9f93
JB
2852 pll->on = false;
2853 return pll;
2854}
2855
d4270e57
JB
2856void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
2857{
2858 struct drm_i915_private *dev_priv = dev->dev_private;
2859 int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
2860 u32 temp;
2861
2862 temp = I915_READ(dslreg);
2863 udelay(500);
2864 if (wait_for(I915_READ(dslreg) != temp, 5)) {
2865 /* Without this, mode sets may fail silently on FDI */
2866 I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
2867 udelay(250);
2868 I915_WRITE(tc2reg, 0);
2869 if (wait_for(I915_READ(dslreg) != temp, 5))
2870 DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
2871 }
2872}
2873
f67a559d
JB
2874static void ironlake_crtc_enable(struct drm_crtc *crtc)
2875{
2876 struct drm_device *dev = crtc->dev;
2877 struct drm_i915_private *dev_priv = dev->dev_private;
2878 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2879 int pipe = intel_crtc->pipe;
2880 int plane = intel_crtc->plane;
2881 u32 temp;
2882 bool is_pch_port;
2883
2884 if (intel_crtc->active)
2885 return;
2886
2887 intel_crtc->active = true;
2888 intel_update_watermarks(dev);
2889
2890 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
2891 temp = I915_READ(PCH_LVDS);
2892 if ((temp & LVDS_PORT_EN) == 0)
2893 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
2894 }
2895
2896 is_pch_port = intel_crtc_driving_pch(crtc);
2897
2898 if (is_pch_port)
357555c0 2899 ironlake_fdi_pll_enable(crtc);
f67a559d
JB
2900 else
2901 ironlake_fdi_disable(crtc);
2902
2903 /* Enable panel fitting for LVDS */
2904 if (dev_priv->pch_pf_size &&
2905 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
2906 /* Force use of hard-coded filter coefficients
2907 * as some pre-programmed values are broken,
2908 * e.g. x201.
2909 */
9db4a9c7
JB
2910 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
2911 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
2912 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
f67a559d
JB
2913 }
2914
9c54c0dd
JB
2915 /*
2916 * On ILK+ LUT must be loaded before the pipe is running but with
2917 * clocks enabled
2918 */
2919 intel_crtc_load_lut(crtc);
2920
f67a559d
JB
2921 intel_enable_pipe(dev_priv, pipe, is_pch_port);
2922 intel_enable_plane(dev_priv, plane, pipe);
2923
2924 if (is_pch_port)
2925 ironlake_pch_enable(crtc);
c98e9dcf 2926
d1ebd816 2927 mutex_lock(&dev->struct_mutex);
bed4a673 2928 intel_update_fbc(dev);
d1ebd816
BW
2929 mutex_unlock(&dev->struct_mutex);
2930
6b383a7f 2931 intel_crtc_update_cursor(crtc, true);
6be4a607
JB
2932}
2933
2934static void ironlake_crtc_disable(struct drm_crtc *crtc)
2935{
2936 struct drm_device *dev = crtc->dev;
2937 struct drm_i915_private *dev_priv = dev->dev_private;
2938 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2939 int pipe = intel_crtc->pipe;
2940 int plane = intel_crtc->plane;
5eddb70b 2941 u32 reg, temp;
b52eb4dc 2942
f7abfe8b
CW
2943 if (!intel_crtc->active)
2944 return;
2945
e6c3a2a6 2946 intel_crtc_wait_for_pending_flips(crtc);
6be4a607 2947 drm_vblank_off(dev, pipe);
6b383a7f 2948 intel_crtc_update_cursor(crtc, false);
5eddb70b 2949
b24e7179 2950 intel_disable_plane(dev_priv, plane, pipe);
913d8d11 2951
973d04f9
CW
2952 if (dev_priv->cfb_plane == plane)
2953 intel_disable_fbc(dev);
2c07245f 2954
b24e7179 2955 intel_disable_pipe(dev_priv, pipe);
32f9d658 2956
6be4a607 2957 /* Disable PF */
9db4a9c7
JB
2958 I915_WRITE(PF_CTL(pipe), 0);
2959 I915_WRITE(PF_WIN_SZ(pipe), 0);
2c07245f 2960
0fc932b8 2961 ironlake_fdi_disable(crtc);
2c07245f 2962
47a05eca
JB
2963 /* This is a horrible layering violation; we should be doing this in
2964 * the connector/encoder ->prepare instead, but we don't always have
2965 * enough information there about the config to know whether it will
2966 * actually be necessary or just cause undesired flicker.
2967 */
2968 intel_disable_pch_ports(dev_priv, pipe);
249c0e64 2969
040484af 2970 intel_disable_transcoder(dev_priv, pipe);
913d8d11 2971
6be4a607
JB
2972 if (HAS_PCH_CPT(dev)) {
2973 /* disable TRANS_DP_CTL */
5eddb70b
CW
2974 reg = TRANS_DP_CTL(pipe);
2975 temp = I915_READ(reg);
2976 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
cb3543c6 2977 temp |= TRANS_DP_PORT_SEL_NONE;
5eddb70b 2978 I915_WRITE(reg, temp);
6be4a607
JB
2979
2980 /* disable DPLL_SEL */
2981 temp = I915_READ(PCH_DPLL_SEL);
9db4a9c7
JB
2982 switch (pipe) {
2983 case 0:
d64311ab 2984 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
9db4a9c7
JB
2985 break;
2986 case 1:
6be4a607 2987 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
9db4a9c7
JB
2988 break;
2989 case 2:
4b645f14 2990 /* C shares PLL A or B */
d64311ab 2991 temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
9db4a9c7
JB
2992 break;
2993 default:
2994 BUG(); /* wtf */
2995 }
6be4a607 2996 I915_WRITE(PCH_DPLL_SEL, temp);
6be4a607 2997 }
e3421a18 2998
6be4a607 2999 /* disable PCH DPLL */
ee7b9f93 3000 intel_disable_pch_pll(intel_crtc);
8db9d77b 3001
6be4a607 3002 /* Switch from PCDclk to Rawclk */
5eddb70b
CW
3003 reg = FDI_RX_CTL(pipe);
3004 temp = I915_READ(reg);
3005 I915_WRITE(reg, temp & ~FDI_PCDCLK);
8db9d77b 3006
6be4a607 3007 /* Disable CPU FDI TX PLL */
5eddb70b
CW
3008 reg = FDI_TX_CTL(pipe);
3009 temp = I915_READ(reg);
3010 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3011
3012 POSTING_READ(reg);
6be4a607 3013 udelay(100);
8db9d77b 3014
5eddb70b
CW
3015 reg = FDI_RX_CTL(pipe);
3016 temp = I915_READ(reg);
3017 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2c07245f 3018
6be4a607 3019 /* Wait for the clocks to turn off. */
5eddb70b 3020 POSTING_READ(reg);
6be4a607 3021 udelay(100);
6b383a7f 3022
f7abfe8b 3023 intel_crtc->active = false;
6b383a7f 3024 intel_update_watermarks(dev);
d1ebd816
BW
3025
3026 mutex_lock(&dev->struct_mutex);
6b383a7f 3027 intel_update_fbc(dev);
d1ebd816 3028 mutex_unlock(&dev->struct_mutex);
6be4a607 3029}
1b3c7a47 3030
6be4a607
JB
3031static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3032{
3033 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3034 int pipe = intel_crtc->pipe;
3035 int plane = intel_crtc->plane;
8db9d77b 3036
6be4a607
JB
3037 /* XXX: When our outputs are all unaware of DPMS modes other than off
3038 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3039 */
3040 switch (mode) {
3041 case DRM_MODE_DPMS_ON:
3042 case DRM_MODE_DPMS_STANDBY:
3043 case DRM_MODE_DPMS_SUSPEND:
3044 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3045 ironlake_crtc_enable(crtc);
3046 break;
1b3c7a47 3047
6be4a607
JB
3048 case DRM_MODE_DPMS_OFF:
3049 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3050 ironlake_crtc_disable(crtc);
2c07245f
ZW
3051 break;
3052 }
3053}
3054
ee7b9f93
JB
3055static void ironlake_crtc_off(struct drm_crtc *crtc)
3056{
3057 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3058 intel_put_pch_pll(intel_crtc);
3059}
3060
02e792fb
DV
3061static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3062{
02e792fb 3063 if (!enable && intel_crtc->overlay) {
23f09ce3 3064 struct drm_device *dev = intel_crtc->base.dev;
ce453d81 3065 struct drm_i915_private *dev_priv = dev->dev_private;
03f77ea5 3066
23f09ce3 3067 mutex_lock(&dev->struct_mutex);
ce453d81
CW
3068 dev_priv->mm.interruptible = false;
3069 (void) intel_overlay_switch_off(intel_crtc->overlay);
3070 dev_priv->mm.interruptible = true;
23f09ce3 3071 mutex_unlock(&dev->struct_mutex);
02e792fb 3072 }
02e792fb 3073
5dcdbcb0
CW
3074 /* Let userspace switch the overlay on again. In most cases userspace
3075 * has to recompute where to put it anyway.
3076 */
02e792fb
DV
3077}
3078
0b8765c6 3079static void i9xx_crtc_enable(struct drm_crtc *crtc)
79e53945
JB
3080{
3081 struct drm_device *dev = crtc->dev;
79e53945
JB
3082 struct drm_i915_private *dev_priv = dev->dev_private;
3083 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3084 int pipe = intel_crtc->pipe;
80824003 3085 int plane = intel_crtc->plane;
79e53945 3086
f7abfe8b
CW
3087 if (intel_crtc->active)
3088 return;
3089
3090 intel_crtc->active = true;
6b383a7f
CW
3091 intel_update_watermarks(dev);
3092
63d7bbe9 3093 intel_enable_pll(dev_priv, pipe);
040484af 3094 intel_enable_pipe(dev_priv, pipe, false);
b24e7179 3095 intel_enable_plane(dev_priv, plane, pipe);
79e53945 3096
0b8765c6 3097 intel_crtc_load_lut(crtc);
bed4a673 3098 intel_update_fbc(dev);
79e53945 3099
0b8765c6
JB
3100 /* Give the overlay scaler a chance to enable if it's on this pipe */
3101 intel_crtc_dpms_overlay(intel_crtc, true);
6b383a7f 3102 intel_crtc_update_cursor(crtc, true);
0b8765c6 3103}
79e53945 3104
0b8765c6
JB
3105static void i9xx_crtc_disable(struct drm_crtc *crtc)
3106{
3107 struct drm_device *dev = crtc->dev;
3108 struct drm_i915_private *dev_priv = dev->dev_private;
3109 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3110 int pipe = intel_crtc->pipe;
3111 int plane = intel_crtc->plane;
b690e96c 3112
f7abfe8b
CW
3113 if (!intel_crtc->active)
3114 return;
3115
0b8765c6 3116 /* Give the overlay scaler a chance to disable if it's on this pipe */
e6c3a2a6
CW
3117 intel_crtc_wait_for_pending_flips(crtc);
3118 drm_vblank_off(dev, pipe);
0b8765c6 3119 intel_crtc_dpms_overlay(intel_crtc, false);
6b383a7f 3120 intel_crtc_update_cursor(crtc, false);
0b8765c6 3121
973d04f9
CW
3122 if (dev_priv->cfb_plane == plane)
3123 intel_disable_fbc(dev);
79e53945 3124
b24e7179 3125 intel_disable_plane(dev_priv, plane, pipe);
b24e7179 3126 intel_disable_pipe(dev_priv, pipe);
63d7bbe9 3127 intel_disable_pll(dev_priv, pipe);
0b8765c6 3128
f7abfe8b 3129 intel_crtc->active = false;
6b383a7f
CW
3130 intel_update_fbc(dev);
3131 intel_update_watermarks(dev);
0b8765c6
JB
3132}
3133
3134static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3135{
3136 /* XXX: When our outputs are all unaware of DPMS modes other than off
3137 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3138 */
3139 switch (mode) {
3140 case DRM_MODE_DPMS_ON:
3141 case DRM_MODE_DPMS_STANDBY:
3142 case DRM_MODE_DPMS_SUSPEND:
3143 i9xx_crtc_enable(crtc);
3144 break;
3145 case DRM_MODE_DPMS_OFF:
3146 i9xx_crtc_disable(crtc);
79e53945
JB
3147 break;
3148 }
2c07245f
ZW
3149}
3150
ee7b9f93
JB
3151static void i9xx_crtc_off(struct drm_crtc *crtc)
3152{
3153}
3154
2c07245f
ZW
3155/**
3156 * Sets the power management mode of the pipe and plane.
2c07245f
ZW
3157 */
3158static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3159{
3160 struct drm_device *dev = crtc->dev;
e70236a8 3161 struct drm_i915_private *dev_priv = dev->dev_private;
2c07245f
ZW
3162 struct drm_i915_master_private *master_priv;
3163 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3164 int pipe = intel_crtc->pipe;
3165 bool enabled;
3166
032d2a0d
CW
3167 if (intel_crtc->dpms_mode == mode)
3168 return;
3169
65655d4a 3170 intel_crtc->dpms_mode = mode;
debcaddc 3171
e70236a8 3172 dev_priv->display.dpms(crtc, mode);
79e53945
JB
3173
3174 if (!dev->primary->master)
3175 return;
3176
3177 master_priv = dev->primary->master->driver_priv;
3178 if (!master_priv->sarea_priv)
3179 return;
3180
3181 enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
3182
3183 switch (pipe) {
3184 case 0:
3185 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3186 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3187 break;
3188 case 1:
3189 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3190 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3191 break;
3192 default:
9db4a9c7 3193 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
79e53945
JB
3194 break;
3195 }
79e53945
JB
3196}
3197
cdd59983
CW
3198static void intel_crtc_disable(struct drm_crtc *crtc)
3199{
3200 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3201 struct drm_device *dev = crtc->dev;
ee7b9f93 3202 struct drm_i915_private *dev_priv = dev->dev_private;
cdd59983
CW
3203
3204 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
ee7b9f93
JB
3205 dev_priv->display.off(crtc);
3206
931872fc
CW
3207 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3208 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
cdd59983
CW
3209
3210 if (crtc->fb) {
3211 mutex_lock(&dev->struct_mutex);
1690e1eb 3212 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
cdd59983
CW
3213 mutex_unlock(&dev->struct_mutex);
3214 }
3215}
3216
7e7d76c3
JB
3217/* Prepare for a mode set.
3218 *
3219 * Note we could be a lot smarter here. We need to figure out which outputs
3220 * will be enabled, which disabled (in short, how the config will changes)
3221 * and perform the minimum necessary steps to accomplish that, e.g. updating
3222 * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3223 * panel fitting is in the proper state, etc.
3224 */
3225static void i9xx_crtc_prepare(struct drm_crtc *crtc)
79e53945 3226{
7e7d76c3 3227 i9xx_crtc_disable(crtc);
79e53945
JB
3228}
3229
7e7d76c3 3230static void i9xx_crtc_commit(struct drm_crtc *crtc)
79e53945 3231{
7e7d76c3 3232 i9xx_crtc_enable(crtc);
7e7d76c3
JB
3233}
3234
3235static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3236{
7e7d76c3 3237 ironlake_crtc_disable(crtc);
7e7d76c3
JB
3238}
3239
3240static void ironlake_crtc_commit(struct drm_crtc *crtc)
3241{
7e7d76c3 3242 ironlake_crtc_enable(crtc);
79e53945
JB
3243}
3244
0206e353 3245void intel_encoder_prepare(struct drm_encoder *encoder)
79e53945
JB
3246{
3247 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3248 /* lvds has its own version of prepare see intel_lvds_prepare */
3249 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3250}
3251
0206e353 3252void intel_encoder_commit(struct drm_encoder *encoder)
79e53945
JB
3253{
3254 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
d4270e57 3255 struct drm_device *dev = encoder->dev;
d47d7cb8 3256 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
d4270e57 3257
79e53945
JB
3258 /* lvds has its own version of commit see intel_lvds_commit */
3259 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
d4270e57
JB
3260
3261 if (HAS_PCH_CPT(dev))
3262 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
79e53945
JB
3263}
3264
ea5b213a
CW
3265void intel_encoder_destroy(struct drm_encoder *encoder)
3266{
4ef69c7a 3267 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
ea5b213a 3268
ea5b213a
CW
3269 drm_encoder_cleanup(encoder);
3270 kfree(intel_encoder);
3271}
3272
79e53945
JB
3273static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3274 struct drm_display_mode *mode,
3275 struct drm_display_mode *adjusted_mode)
3276{
2c07245f 3277 struct drm_device *dev = crtc->dev;
89749350 3278
bad720ff 3279 if (HAS_PCH_SPLIT(dev)) {
2c07245f 3280 /* FDI link clock is fixed at 2.7G */
2377b741
JB
3281 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3282 return false;
2c07245f 3283 }
89749350 3284
f9bef081
DV
3285 /* All interlaced capable intel hw wants timings in frames. Note though
3286 * that intel_lvds_mode_fixup does some funny tricks with the crtc
3287 * timings, so we need to be careful not to clobber these.*/
3288 if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3289 drm_mode_set_crtcinfo(adjusted_mode, 0);
89749350 3290
79e53945
JB
3291 return true;
3292}
3293
25eb05fc
JB
3294static int valleyview_get_display_clock_speed(struct drm_device *dev)
3295{
3296 return 400000; /* FIXME */
3297}
3298
e70236a8
JB
3299static int i945_get_display_clock_speed(struct drm_device *dev)
3300{
3301 return 400000;
3302}
79e53945 3303
e70236a8 3304static int i915_get_display_clock_speed(struct drm_device *dev)
79e53945 3305{
e70236a8
JB
3306 return 333000;
3307}
79e53945 3308
e70236a8
JB
3309static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3310{
3311 return 200000;
3312}
79e53945 3313
e70236a8
JB
3314static int i915gm_get_display_clock_speed(struct drm_device *dev)
3315{
3316 u16 gcfgc = 0;
79e53945 3317
e70236a8
JB
3318 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3319
3320 if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3321 return 133000;
3322 else {
3323 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3324 case GC_DISPLAY_CLOCK_333_MHZ:
3325 return 333000;
3326 default:
3327 case GC_DISPLAY_CLOCK_190_200_MHZ:
3328 return 190000;
79e53945 3329 }
e70236a8
JB
3330 }
3331}
3332
3333static int i865_get_display_clock_speed(struct drm_device *dev)
3334{
3335 return 266000;
3336}
3337
3338static int i855_get_display_clock_speed(struct drm_device *dev)
3339{
3340 u16 hpllcc = 0;
3341 /* Assume that the hardware is in the high speed state. This
3342 * should be the default.
3343 */
3344 switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3345 case GC_CLOCK_133_200:
3346 case GC_CLOCK_100_200:
3347 return 200000;
3348 case GC_CLOCK_166_250:
3349 return 250000;
3350 case GC_CLOCK_100_133:
79e53945 3351 return 133000;
e70236a8 3352 }
79e53945 3353
e70236a8
JB
3354 /* Shouldn't happen */
3355 return 0;
3356}
79e53945 3357
e70236a8
JB
3358static int i830_get_display_clock_speed(struct drm_device *dev)
3359{
3360 return 133000;
79e53945
JB
3361}
3362
2c07245f
ZW
3363struct fdi_m_n {
3364 u32 tu;
3365 u32 gmch_m;
3366 u32 gmch_n;
3367 u32 link_m;
3368 u32 link_n;
3369};
3370
3371static void
3372fdi_reduce_ratio(u32 *num, u32 *den)
3373{
3374 while (*num > 0xffffff || *den > 0xffffff) {
3375 *num >>= 1;
3376 *den >>= 1;
3377 }
3378}
3379
2c07245f 3380static void
f2b115e6
AJ
3381ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3382 int link_clock, struct fdi_m_n *m_n)
2c07245f 3383{
2c07245f
ZW
3384 m_n->tu = 64; /* default size */
3385
22ed1113
CW
3386 /* BUG_ON(pixel_clock > INT_MAX / 36); */
3387 m_n->gmch_m = bits_per_pixel * pixel_clock;
3388 m_n->gmch_n = link_clock * nlanes * 8;
2c07245f
ZW
3389 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3390
22ed1113
CW
3391 m_n->link_m = pixel_clock;
3392 m_n->link_n = link_clock;
2c07245f
ZW
3393 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3394}
3395
a7615030
CW
3396static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
3397{
72bbe58c
KP
3398 if (i915_panel_use_ssc >= 0)
3399 return i915_panel_use_ssc != 0;
3400 return dev_priv->lvds_use_ssc
435793df 3401 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
a7615030
CW
3402}
3403
5a354204
JB
3404/**
3405 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
3406 * @crtc: CRTC structure
3b5c78a3 3407 * @mode: requested mode
5a354204
JB
3408 *
3409 * A pipe may be connected to one or more outputs. Based on the depth of the
3410 * attached framebuffer, choose a good color depth to use on the pipe.
3411 *
3412 * If possible, match the pipe depth to the fb depth. In some cases, this
3413 * isn't ideal, because the connected output supports a lesser or restricted
3414 * set of depths. Resolve that here:
3415 * LVDS typically supports only 6bpc, so clamp down in that case
3416 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
3417 * Displays may support a restricted set as well, check EDID and clamp as
3418 * appropriate.
3b5c78a3 3419 * DP may want to dither down to 6bpc to fit larger modes
5a354204
JB
3420 *
3421 * RETURNS:
3422 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
3423 * true if they don't match).
3424 */
3425static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
3b5c78a3
AJ
3426 unsigned int *pipe_bpp,
3427 struct drm_display_mode *mode)
5a354204
JB
3428{
3429 struct drm_device *dev = crtc->dev;
3430 struct drm_i915_private *dev_priv = dev->dev_private;
3431 struct drm_encoder *encoder;
3432 struct drm_connector *connector;
3433 unsigned int display_bpc = UINT_MAX, bpc;
3434
3435 /* Walk the encoders & connectors on this crtc, get min bpc */
3436 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3437 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3438
3439 if (encoder->crtc != crtc)
3440 continue;
3441
3442 if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
3443 unsigned int lvds_bpc;
3444
3445 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
3446 LVDS_A3_POWER_UP)
3447 lvds_bpc = 8;
3448 else
3449 lvds_bpc = 6;
3450
3451 if (lvds_bpc < display_bpc) {
82820490 3452 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
5a354204
JB
3453 display_bpc = lvds_bpc;
3454 }
3455 continue;
3456 }
3457
3458 if (intel_encoder->type == INTEL_OUTPUT_EDP) {
3459 /* Use VBT settings if we have an eDP panel */
3460 unsigned int edp_bpc = dev_priv->edp.bpp / 3;
3461
3462 if (edp_bpc < display_bpc) {
82820490 3463 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
5a354204
JB
3464 display_bpc = edp_bpc;
3465 }
3466 continue;
3467 }
3468
3469 /* Not one of the known troublemakers, check the EDID */
3470 list_for_each_entry(connector, &dev->mode_config.connector_list,
3471 head) {
3472 if (connector->encoder != encoder)
3473 continue;
3474
62ac41a6
JB
3475 /* Don't use an invalid EDID bpc value */
3476 if (connector->display_info.bpc &&
3477 connector->display_info.bpc < display_bpc) {
82820490 3478 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
5a354204
JB
3479 display_bpc = connector->display_info.bpc;
3480 }
3481 }
3482
3483 /*
3484 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
3485 * through, clamp it down. (Note: >12bpc will be caught below.)
3486 */
3487 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
3488 if (display_bpc > 8 && display_bpc < 12) {
82820490 3489 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
5a354204
JB
3490 display_bpc = 12;
3491 } else {
82820490 3492 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
5a354204
JB
3493 display_bpc = 8;
3494 }
3495 }
3496 }
3497
3b5c78a3
AJ
3498 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
3499 DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
3500 display_bpc = 6;
3501 }
3502
5a354204
JB
3503 /*
3504 * We could just drive the pipe at the highest bpc all the time and
3505 * enable dithering as needed, but that costs bandwidth. So choose
3506 * the minimum value that expresses the full color range of the fb but
3507 * also stays within the max display bpc discovered above.
3508 */
3509
3510 switch (crtc->fb->depth) {
3511 case 8:
3512 bpc = 8; /* since we go through a colormap */
3513 break;
3514 case 15:
3515 case 16:
3516 bpc = 6; /* min is 18bpp */
3517 break;
3518 case 24:
578393cd 3519 bpc = 8;
5a354204
JB
3520 break;
3521 case 30:
578393cd 3522 bpc = 10;
5a354204
JB
3523 break;
3524 case 48:
578393cd 3525 bpc = 12;
5a354204
JB
3526 break;
3527 default:
3528 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
3529 bpc = min((unsigned int)8, display_bpc);
3530 break;
3531 }
3532
578393cd
KP
3533 display_bpc = min(display_bpc, bpc);
3534
82820490
AJ
3535 DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
3536 bpc, display_bpc);
5a354204 3537
578393cd 3538 *pipe_bpp = display_bpc * 3;
5a354204
JB
3539
3540 return display_bpc != bpc;
3541}
3542
c65d77d8
JB
3543static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
3544{
3545 struct drm_device *dev = crtc->dev;
3546 struct drm_i915_private *dev_priv = dev->dev_private;
3547 int refclk;
3548
3549 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3550 intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
3551 refclk = dev_priv->lvds_ssc_freq * 1000;
3552 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
3553 refclk / 1000);
3554 } else if (!IS_GEN2(dev)) {
3555 refclk = 96000;
3556 } else {
3557 refclk = 48000;
3558 }
3559
3560 return refclk;
3561}
3562
3563static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
3564 intel_clock_t *clock)
3565{
3566 /* SDVO TV has fixed PLL values depend on its clock range,
3567 this mirrors vbios setting. */
3568 if (adjusted_mode->clock >= 100000
3569 && adjusted_mode->clock < 140500) {
3570 clock->p1 = 2;
3571 clock->p2 = 10;
3572 clock->n = 3;
3573 clock->m1 = 16;
3574 clock->m2 = 8;
3575 } else if (adjusted_mode->clock >= 140500
3576 && adjusted_mode->clock <= 200000) {
3577 clock->p1 = 1;
3578 clock->p2 = 10;
3579 clock->n = 6;
3580 clock->m1 = 12;
3581 clock->m2 = 8;
3582 }
3583}
3584
a7516a05
JB
3585static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
3586 intel_clock_t *clock,
3587 intel_clock_t *reduced_clock)
3588{
3589 struct drm_device *dev = crtc->dev;
3590 struct drm_i915_private *dev_priv = dev->dev_private;
3591 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3592 int pipe = intel_crtc->pipe;
3593 u32 fp, fp2 = 0;
3594
3595 if (IS_PINEVIEW(dev)) {
3596 fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
3597 if (reduced_clock)
3598 fp2 = (1 << reduced_clock->n) << 16 |
3599 reduced_clock->m1 << 8 | reduced_clock->m2;
3600 } else {
3601 fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
3602 if (reduced_clock)
3603 fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
3604 reduced_clock->m2;
3605 }
3606
3607 I915_WRITE(FP0(pipe), fp);
3608
3609 intel_crtc->lowfreq_avail = false;
3610 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3611 reduced_clock && i915_powersave) {
3612 I915_WRITE(FP1(pipe), fp2);
3613 intel_crtc->lowfreq_avail = true;
3614 } else {
3615 I915_WRITE(FP1(pipe), fp);
3616 }
3617}
3618
93e537a1
DV
3619static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
3620 struct drm_display_mode *adjusted_mode)
3621{
3622 struct drm_device *dev = crtc->dev;
3623 struct drm_i915_private *dev_priv = dev->dev_private;
3624 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3625 int pipe = intel_crtc->pipe;
284d5df5 3626 u32 temp;
93e537a1
DV
3627
3628 temp = I915_READ(LVDS);
3629 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
3630 if (pipe == 1) {
3631 temp |= LVDS_PIPEB_SELECT;
3632 } else {
3633 temp &= ~LVDS_PIPEB_SELECT;
3634 }
3635 /* set the corresponsding LVDS_BORDER bit */
3636 temp |= dev_priv->lvds_border_bits;
3637 /* Set the B0-B3 data pairs corresponding to whether we're going to
3638 * set the DPLLs for dual-channel mode or not.
3639 */
3640 if (clock->p2 == 7)
3641 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
3642 else
3643 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
3644
3645 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
3646 * appropriately here, but we need to look more thoroughly into how
3647 * panels behave in the two modes.
3648 */
3649 /* set the dithering flag on LVDS as needed */
3650 if (INTEL_INFO(dev)->gen >= 4) {
3651 if (dev_priv->lvds_dither)
3652 temp |= LVDS_ENABLE_DITHER;
3653 else
3654 temp &= ~LVDS_ENABLE_DITHER;
3655 }
284d5df5 3656 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
93e537a1 3657 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
284d5df5 3658 temp |= LVDS_HSYNC_POLARITY;
93e537a1 3659 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
284d5df5 3660 temp |= LVDS_VSYNC_POLARITY;
93e537a1
DV
3661 I915_WRITE(LVDS, temp);
3662}
3663
eb1cbe48
DV
3664static void i9xx_update_pll(struct drm_crtc *crtc,
3665 struct drm_display_mode *mode,
3666 struct drm_display_mode *adjusted_mode,
3667 intel_clock_t *clock, intel_clock_t *reduced_clock,
3668 int num_connectors)
3669{
3670 struct drm_device *dev = crtc->dev;
3671 struct drm_i915_private *dev_priv = dev->dev_private;
3672 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3673 int pipe = intel_crtc->pipe;
3674 u32 dpll;
3675 bool is_sdvo;
3676
3677 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
3678 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
3679
3680 dpll = DPLL_VGA_MODE_DIS;
3681
3682 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
3683 dpll |= DPLLB_MODE_LVDS;
3684 else
3685 dpll |= DPLLB_MODE_DAC_SERIAL;
3686 if (is_sdvo) {
3687 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
3688 if (pixel_multiplier > 1) {
3689 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3690 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
3691 }
3692 dpll |= DPLL_DVO_HIGH_SPEED;
3693 }
3694 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
3695 dpll |= DPLL_DVO_HIGH_SPEED;
3696
3697 /* compute bitmask from p1 value */
3698 if (IS_PINEVIEW(dev))
3699 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
3700 else {
3701 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3702 if (IS_G4X(dev) && reduced_clock)
3703 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
3704 }
3705 switch (clock->p2) {
3706 case 5:
3707 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
3708 break;
3709 case 7:
3710 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
3711 break;
3712 case 10:
3713 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
3714 break;
3715 case 14:
3716 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
3717 break;
3718 }
3719 if (INTEL_INFO(dev)->gen >= 4)
3720 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
3721
3722 if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
3723 dpll |= PLL_REF_INPUT_TVCLKINBC;
3724 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
3725 /* XXX: just matching BIOS for now */
3726 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
3727 dpll |= 3;
3728 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3729 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
3730 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
3731 else
3732 dpll |= PLL_REF_INPUT_DREFCLK;
3733
3734 dpll |= DPLL_VCO_ENABLE;
3735 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
3736 POSTING_READ(DPLL(pipe));
3737 udelay(150);
3738
3739 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
3740 * This is an exception to the general rule that mode_set doesn't turn
3741 * things on.
3742 */
3743 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
3744 intel_update_lvds(crtc, clock, adjusted_mode);
3745
3746 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
3747 intel_dp_set_m_n(crtc, mode, adjusted_mode);
3748
3749 I915_WRITE(DPLL(pipe), dpll);
3750
3751 /* Wait for the clocks to stabilize. */
3752 POSTING_READ(DPLL(pipe));
3753 udelay(150);
3754
3755 if (INTEL_INFO(dev)->gen >= 4) {
3756 u32 temp = 0;
3757 if (is_sdvo) {
3758 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
3759 if (temp > 1)
3760 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
3761 else
3762 temp = 0;
3763 }
3764 I915_WRITE(DPLL_MD(pipe), temp);
3765 } else {
3766 /* The pixel multiplier can only be updated once the
3767 * DPLL is enabled and the clocks are stable.
3768 *
3769 * So write it again.
3770 */
3771 I915_WRITE(DPLL(pipe), dpll);
3772 }
3773}
3774
3775static void i8xx_update_pll(struct drm_crtc *crtc,
3776 struct drm_display_mode *adjusted_mode,
3777 intel_clock_t *clock,
3778 int num_connectors)
3779{
3780 struct drm_device *dev = crtc->dev;
3781 struct drm_i915_private *dev_priv = dev->dev_private;
3782 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3783 int pipe = intel_crtc->pipe;
3784 u32 dpll;
3785
3786 dpll = DPLL_VGA_MODE_DIS;
3787
3788 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3789 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3790 } else {
3791 if (clock->p1 == 2)
3792 dpll |= PLL_P1_DIVIDE_BY_TWO;
3793 else
3794 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
3795 if (clock->p2 == 4)
3796 dpll |= PLL_P2_DIVIDE_BY_4;
3797 }
3798
3799 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
3800 /* XXX: just matching BIOS for now */
3801 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
3802 dpll |= 3;
3803 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3804 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
3805 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
3806 else
3807 dpll |= PLL_REF_INPUT_DREFCLK;
3808
3809 dpll |= DPLL_VCO_ENABLE;
3810 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
3811 POSTING_READ(DPLL(pipe));
3812 udelay(150);
3813
3814 I915_WRITE(DPLL(pipe), dpll);
3815
3816 /* Wait for the clocks to stabilize. */
3817 POSTING_READ(DPLL(pipe));
3818 udelay(150);
3819
3820 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
3821 * This is an exception to the general rule that mode_set doesn't turn
3822 * things on.
3823 */
3824 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
3825 intel_update_lvds(crtc, clock, adjusted_mode);
3826
3827 /* The pixel multiplier can only be updated once the
3828 * DPLL is enabled and the clocks are stable.
3829 *
3830 * So write it again.
3831 */
3832 I915_WRITE(DPLL(pipe), dpll);
3833}
3834
f564048e
EA
3835static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
3836 struct drm_display_mode *mode,
3837 struct drm_display_mode *adjusted_mode,
3838 int x, int y,
3839 struct drm_framebuffer *old_fb)
79e53945
JB
3840{
3841 struct drm_device *dev = crtc->dev;
3842 struct drm_i915_private *dev_priv = dev->dev_private;
3843 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3844 int pipe = intel_crtc->pipe;
80824003 3845 int plane = intel_crtc->plane;
c751ce4f 3846 int refclk, num_connectors = 0;
652c393a 3847 intel_clock_t clock, reduced_clock;
eb1cbe48
DV
3848 u32 dspcntr, pipeconf, vsyncshift;
3849 bool ok, has_reduced_clock = false, is_sdvo = false;
3850 bool is_lvds = false, is_tv = false, is_dp = false;
79e53945 3851 struct drm_mode_config *mode_config = &dev->mode_config;
5eddb70b 3852 struct intel_encoder *encoder;
d4906093 3853 const intel_limit_t *limit;
5c3b82e2 3854 int ret;
79e53945 3855
5eddb70b
CW
3856 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
3857 if (encoder->base.crtc != crtc)
79e53945
JB
3858 continue;
3859
5eddb70b 3860 switch (encoder->type) {
79e53945
JB
3861 case INTEL_OUTPUT_LVDS:
3862 is_lvds = true;
3863 break;
3864 case INTEL_OUTPUT_SDVO:
7d57382e 3865 case INTEL_OUTPUT_HDMI:
79e53945 3866 is_sdvo = true;
5eddb70b 3867 if (encoder->needs_tv_clock)
e2f0ba97 3868 is_tv = true;
79e53945 3869 break;
79e53945
JB
3870 case INTEL_OUTPUT_TVOUT:
3871 is_tv = true;
3872 break;
a4fc5ed6
KP
3873 case INTEL_OUTPUT_DISPLAYPORT:
3874 is_dp = true;
3875 break;
79e53945 3876 }
43565a06 3877
c751ce4f 3878 num_connectors++;
79e53945
JB
3879 }
3880
c65d77d8 3881 refclk = i9xx_get_refclk(crtc, num_connectors);
79e53945 3882
d4906093
ML
3883 /*
3884 * Returns a set of divisors for the desired target clock with the given
3885 * refclk, or FALSE. The returned values represent the clock equation:
3886 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
3887 */
1b894b59 3888 limit = intel_limit(crtc, refclk);
cec2f356
SP
3889 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
3890 &clock);
79e53945
JB
3891 if (!ok) {
3892 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5c3b82e2 3893 return -EINVAL;
79e53945
JB
3894 }
3895
cda4b7d3 3896 /* Ensure that the cursor is valid for the new mode before changing... */
6b383a7f 3897 intel_crtc_update_cursor(crtc, true);
cda4b7d3 3898
ddc9003c 3899 if (is_lvds && dev_priv->lvds_downclock_avail) {
cec2f356
SP
3900 /*
3901 * Ensure we match the reduced clock's P to the target clock.
3902 * If the clocks don't match, we can't switch the display clock
3903 * by using the FP0/FP1. In such case we will disable the LVDS
3904 * downclock feature.
3905 */
ddc9003c 3906 has_reduced_clock = limit->find_pll(limit, crtc,
5eddb70b
CW
3907 dev_priv->lvds_downclock,
3908 refclk,
cec2f356 3909 &clock,
5eddb70b 3910 &reduced_clock);
7026d4ac
ZW
3911 }
3912
c65d77d8
JB
3913 if (is_sdvo && is_tv)
3914 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
7026d4ac 3915
a7516a05
JB
3916 i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
3917 &reduced_clock : NULL);
79e53945 3918
eb1cbe48
DV
3919 if (IS_GEN2(dev))
3920 i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
79e53945 3921 else
eb1cbe48
DV
3922 i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
3923 has_reduced_clock ? &reduced_clock : NULL,
3924 num_connectors);
79e53945
JB
3925
3926 /* setup pipeconf */
5eddb70b 3927 pipeconf = I915_READ(PIPECONF(pipe));
79e53945
JB
3928
3929 /* Set up the display plane register */
3930 dspcntr = DISPPLANE_GAMMA_ENABLE;
3931
929c77fb
EA
3932 if (pipe == 0)
3933 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
3934 else
3935 dspcntr |= DISPPLANE_SEL_PIPE_B;
79e53945 3936
a6c45cf0 3937 if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
79e53945
JB
3938 /* Enable pixel doubling when the dot clock is > 90% of the (display)
3939 * core speed.
3940 *
3941 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
3942 * pipe == 0 check?
3943 */
e70236a8
JB
3944 if (mode->clock >
3945 dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
5eddb70b 3946 pipeconf |= PIPECONF_DOUBLE_WIDE;
79e53945 3947 else
5eddb70b 3948 pipeconf &= ~PIPECONF_DOUBLE_WIDE;
79e53945
JB
3949 }
3950
3b5c78a3
AJ
3951 /* default to 8bpc */
3952 pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
3953 if (is_dp) {
3954 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
3955 pipeconf |= PIPECONF_BPP_6 |
3956 PIPECONF_DITHER_EN |
3957 PIPECONF_DITHER_TYPE_SP;
3958 }
3959 }
3960
28c97730 3961 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
79e53945
JB
3962 drm_mode_debug_printmodeline(mode);
3963
a7516a05
JB
3964 if (HAS_PIPE_CXSR(dev)) {
3965 if (intel_crtc->lowfreq_avail) {
28c97730 3966 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
652c393a 3967 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
a7516a05 3968 } else {
28c97730 3969 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
652c393a
JB
3970 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
3971 }
3972 }
3973
617cf884 3974 pipeconf &= ~PIPECONF_INTERLACE_MASK;
dbb02575
DV
3975 if (!IS_GEN2(dev) &&
3976 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
734b4157
KH
3977 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
3978 /* the chip adds 2 halflines automatically */
734b4157 3979 adjusted_mode->crtc_vtotal -= 1;
734b4157 3980 adjusted_mode->crtc_vblank_end -= 1;
0529a0d9
DV
3981 vsyncshift = adjusted_mode->crtc_hsync_start
3982 - adjusted_mode->crtc_htotal/2;
3983 } else {
617cf884 3984 pipeconf |= PIPECONF_PROGRESSIVE;
0529a0d9
DV
3985 vsyncshift = 0;
3986 }
3987
3988 if (!IS_GEN3(dev))
3989 I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
734b4157 3990
5eddb70b
CW
3991 I915_WRITE(HTOTAL(pipe),
3992 (adjusted_mode->crtc_hdisplay - 1) |
79e53945 3993 ((adjusted_mode->crtc_htotal - 1) << 16));
5eddb70b
CW
3994 I915_WRITE(HBLANK(pipe),
3995 (adjusted_mode->crtc_hblank_start - 1) |
79e53945 3996 ((adjusted_mode->crtc_hblank_end - 1) << 16));
5eddb70b
CW
3997 I915_WRITE(HSYNC(pipe),
3998 (adjusted_mode->crtc_hsync_start - 1) |
79e53945 3999 ((adjusted_mode->crtc_hsync_end - 1) << 16));
5eddb70b
CW
4000
4001 I915_WRITE(VTOTAL(pipe),
4002 (adjusted_mode->crtc_vdisplay - 1) |
79e53945 4003 ((adjusted_mode->crtc_vtotal - 1) << 16));
5eddb70b
CW
4004 I915_WRITE(VBLANK(pipe),
4005 (adjusted_mode->crtc_vblank_start - 1) |
79e53945 4006 ((adjusted_mode->crtc_vblank_end - 1) << 16));
5eddb70b
CW
4007 I915_WRITE(VSYNC(pipe),
4008 (adjusted_mode->crtc_vsync_start - 1) |
79e53945 4009 ((adjusted_mode->crtc_vsync_end - 1) << 16));
5eddb70b
CW
4010
4011 /* pipesrc and dspsize control the size that is scaled from,
4012 * which should always be the user's requested size.
79e53945 4013 */
929c77fb
EA
4014 I915_WRITE(DSPSIZE(plane),
4015 ((mode->vdisplay - 1) << 16) |
4016 (mode->hdisplay - 1));
4017 I915_WRITE(DSPPOS(plane), 0);
5eddb70b
CW
4018 I915_WRITE(PIPESRC(pipe),
4019 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
2c07245f 4020
f564048e
EA
4021 I915_WRITE(PIPECONF(pipe), pipeconf);
4022 POSTING_READ(PIPECONF(pipe));
929c77fb 4023 intel_enable_pipe(dev_priv, pipe, false);
f564048e
EA
4024
4025 intel_wait_for_vblank(dev, pipe);
4026
f564048e
EA
4027 I915_WRITE(DSPCNTR(plane), dspcntr);
4028 POSTING_READ(DSPCNTR(plane));
4029
4030 ret = intel_pipe_set_base(crtc, x, y, old_fb);
4031
4032 intel_update_watermarks(dev);
4033
f564048e
EA
4034 return ret;
4035}
4036
9fb526db
KP
4037/*
4038 * Initialize reference clocks when the driver loads
4039 */
4040void ironlake_init_pch_refclk(struct drm_device *dev)
13d83a67
JB
4041{
4042 struct drm_i915_private *dev_priv = dev->dev_private;
4043 struct drm_mode_config *mode_config = &dev->mode_config;
13d83a67 4044 struct intel_encoder *encoder;
13d83a67
JB
4045 u32 temp;
4046 bool has_lvds = false;
199e5d79
KP
4047 bool has_cpu_edp = false;
4048 bool has_pch_edp = false;
4049 bool has_panel = false;
99eb6a01
KP
4050 bool has_ck505 = false;
4051 bool can_ssc = false;
13d83a67
JB
4052
4053 /* We need to take the global config into account */
199e5d79
KP
4054 list_for_each_entry(encoder, &mode_config->encoder_list,
4055 base.head) {
4056 switch (encoder->type) {
4057 case INTEL_OUTPUT_LVDS:
4058 has_panel = true;
4059 has_lvds = true;
4060 break;
4061 case INTEL_OUTPUT_EDP:
4062 has_panel = true;
4063 if (intel_encoder_is_pch_edp(&encoder->base))
4064 has_pch_edp = true;
4065 else
4066 has_cpu_edp = true;
4067 break;
13d83a67
JB
4068 }
4069 }
4070
99eb6a01
KP
4071 if (HAS_PCH_IBX(dev)) {
4072 has_ck505 = dev_priv->display_clock_mode;
4073 can_ssc = has_ck505;
4074 } else {
4075 has_ck505 = false;
4076 can_ssc = true;
4077 }
4078
4079 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
4080 has_panel, has_lvds, has_pch_edp, has_cpu_edp,
4081 has_ck505);
13d83a67
JB
4082
4083 /* Ironlake: try to setup display ref clock before DPLL
4084 * enabling. This is only under driver's control after
4085 * PCH B stepping, previous chipset stepping should be
4086 * ignoring this setting.
4087 */
4088 temp = I915_READ(PCH_DREF_CONTROL);
4089 /* Always enable nonspread source */
4090 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
13d83a67 4091
99eb6a01
KP
4092 if (has_ck505)
4093 temp |= DREF_NONSPREAD_CK505_ENABLE;
4094 else
4095 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
13d83a67 4096
199e5d79
KP
4097 if (has_panel) {
4098 temp &= ~DREF_SSC_SOURCE_MASK;
4099 temp |= DREF_SSC_SOURCE_ENABLE;
13d83a67 4100
199e5d79 4101 /* SSC must be turned on before enabling the CPU output */
99eb6a01 4102 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
199e5d79 4103 DRM_DEBUG_KMS("Using SSC on panel\n");
13d83a67 4104 temp |= DREF_SSC1_ENABLE;
e77166b5
DV
4105 } else
4106 temp &= ~DREF_SSC1_ENABLE;
199e5d79
KP
4107
4108 /* Get SSC going before enabling the outputs */
4109 I915_WRITE(PCH_DREF_CONTROL, temp);
4110 POSTING_READ(PCH_DREF_CONTROL);
4111 udelay(200);
4112
13d83a67
JB
4113 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4114
4115 /* Enable CPU source on CPU attached eDP */
199e5d79 4116 if (has_cpu_edp) {
99eb6a01 4117 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
199e5d79 4118 DRM_DEBUG_KMS("Using SSC on eDP\n");
13d83a67 4119 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
199e5d79 4120 }
13d83a67
JB
4121 else
4122 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
199e5d79
KP
4123 } else
4124 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4125
4126 I915_WRITE(PCH_DREF_CONTROL, temp);
4127 POSTING_READ(PCH_DREF_CONTROL);
4128 udelay(200);
4129 } else {
4130 DRM_DEBUG_KMS("Disabling SSC entirely\n");
4131
4132 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4133
4134 /* Turn off CPU output */
4135 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4136
4137 I915_WRITE(PCH_DREF_CONTROL, temp);
4138 POSTING_READ(PCH_DREF_CONTROL);
4139 udelay(200);
4140
4141 /* Turn off the SSC source */
4142 temp &= ~DREF_SSC_SOURCE_MASK;
4143 temp |= DREF_SSC_SOURCE_DISABLE;
4144
4145 /* Turn off SSC1 */
4146 temp &= ~ DREF_SSC1_ENABLE;
4147
13d83a67
JB
4148 I915_WRITE(PCH_DREF_CONTROL, temp);
4149 POSTING_READ(PCH_DREF_CONTROL);
4150 udelay(200);
4151 }
4152}
4153
d9d444cb
JB
4154static int ironlake_get_refclk(struct drm_crtc *crtc)
4155{
4156 struct drm_device *dev = crtc->dev;
4157 struct drm_i915_private *dev_priv = dev->dev_private;
4158 struct intel_encoder *encoder;
4159 struct drm_mode_config *mode_config = &dev->mode_config;
4160 struct intel_encoder *edp_encoder = NULL;
4161 int num_connectors = 0;
4162 bool is_lvds = false;
4163
4164 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4165 if (encoder->base.crtc != crtc)
4166 continue;
4167
4168 switch (encoder->type) {
4169 case INTEL_OUTPUT_LVDS:
4170 is_lvds = true;
4171 break;
4172 case INTEL_OUTPUT_EDP:
4173 edp_encoder = encoder;
4174 break;
4175 }
4176 num_connectors++;
4177 }
4178
4179 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4180 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4181 dev_priv->lvds_ssc_freq);
4182 return dev_priv->lvds_ssc_freq * 1000;
4183 }
4184
4185 return 120000;
4186}
4187
f564048e
EA
4188static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4189 struct drm_display_mode *mode,
4190 struct drm_display_mode *adjusted_mode,
4191 int x, int y,
4192 struct drm_framebuffer *old_fb)
79e53945
JB
4193{
4194 struct drm_device *dev = crtc->dev;
4195 struct drm_i915_private *dev_priv = dev->dev_private;
4196 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4197 int pipe = intel_crtc->pipe;
80824003 4198 int plane = intel_crtc->plane;
c751ce4f 4199 int refclk, num_connectors = 0;
652c393a 4200 intel_clock_t clock, reduced_clock;
5eddb70b 4201 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
a07d6787 4202 bool ok, has_reduced_clock = false, is_sdvo = false;
a4fc5ed6 4203 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
79e53945 4204 struct drm_mode_config *mode_config = &dev->mode_config;
e3aef172 4205 struct intel_encoder *encoder, *edp_encoder = NULL;
d4906093 4206 const intel_limit_t *limit;
5c3b82e2 4207 int ret;
2c07245f 4208 struct fdi_m_n m_n = {0};
fae14981 4209 u32 temp;
5a354204
JB
4210 int target_clock, pixel_multiplier, lane, link_bw, factor;
4211 unsigned int pipe_bpp;
4212 bool dither;
e3aef172 4213 bool is_cpu_edp = false, is_pch_edp = false;
79e53945 4214
5eddb70b
CW
4215 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4216 if (encoder->base.crtc != crtc)
79e53945
JB
4217 continue;
4218
5eddb70b 4219 switch (encoder->type) {
79e53945
JB
4220 case INTEL_OUTPUT_LVDS:
4221 is_lvds = true;
4222 break;
4223 case INTEL_OUTPUT_SDVO:
7d57382e 4224 case INTEL_OUTPUT_HDMI:
79e53945 4225 is_sdvo = true;
5eddb70b 4226 if (encoder->needs_tv_clock)
e2f0ba97 4227 is_tv = true;
79e53945 4228 break;
79e53945
JB
4229 case INTEL_OUTPUT_TVOUT:
4230 is_tv = true;
4231 break;
4232 case INTEL_OUTPUT_ANALOG:
4233 is_crt = true;
4234 break;
a4fc5ed6
KP
4235 case INTEL_OUTPUT_DISPLAYPORT:
4236 is_dp = true;
4237 break;
32f9d658 4238 case INTEL_OUTPUT_EDP:
e3aef172
JB
4239 is_dp = true;
4240 if (intel_encoder_is_pch_edp(&encoder->base))
4241 is_pch_edp = true;
4242 else
4243 is_cpu_edp = true;
4244 edp_encoder = encoder;
32f9d658 4245 break;
79e53945 4246 }
43565a06 4247
c751ce4f 4248 num_connectors++;
79e53945
JB
4249 }
4250
d9d444cb 4251 refclk = ironlake_get_refclk(crtc);
79e53945 4252
d4906093
ML
4253 /*
4254 * Returns a set of divisors for the desired target clock with the given
4255 * refclk, or FALSE. The returned values represent the clock equation:
4256 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4257 */
1b894b59 4258 limit = intel_limit(crtc, refclk);
cec2f356
SP
4259 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
4260 &clock);
79e53945
JB
4261 if (!ok) {
4262 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5c3b82e2 4263 return -EINVAL;
79e53945
JB
4264 }
4265
cda4b7d3 4266 /* Ensure that the cursor is valid for the new mode before changing... */
6b383a7f 4267 intel_crtc_update_cursor(crtc, true);
cda4b7d3 4268
ddc9003c 4269 if (is_lvds && dev_priv->lvds_downclock_avail) {
cec2f356
SP
4270 /*
4271 * Ensure we match the reduced clock's P to the target clock.
4272 * If the clocks don't match, we can't switch the display clock
4273 * by using the FP0/FP1. In such case we will disable the LVDS
4274 * downclock feature.
4275 */
ddc9003c 4276 has_reduced_clock = limit->find_pll(limit, crtc,
5eddb70b
CW
4277 dev_priv->lvds_downclock,
4278 refclk,
cec2f356 4279 &clock,
5eddb70b 4280 &reduced_clock);
652c393a 4281 }
7026d4ac
ZW
4282 /* SDVO TV has fixed PLL values depend on its clock range,
4283 this mirrors vbios setting. */
4284 if (is_sdvo && is_tv) {
4285 if (adjusted_mode->clock >= 100000
5eddb70b 4286 && adjusted_mode->clock < 140500) {
7026d4ac
ZW
4287 clock.p1 = 2;
4288 clock.p2 = 10;
4289 clock.n = 3;
4290 clock.m1 = 16;
4291 clock.m2 = 8;
4292 } else if (adjusted_mode->clock >= 140500
5eddb70b 4293 && adjusted_mode->clock <= 200000) {
7026d4ac
ZW
4294 clock.p1 = 1;
4295 clock.p2 = 10;
4296 clock.n = 6;
4297 clock.m1 = 12;
4298 clock.m2 = 8;
4299 }
4300 }
4301
2c07245f 4302 /* FDI link */
8febb297
EA
4303 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4304 lane = 0;
4305 /* CPU eDP doesn't require FDI link, so just set DP M/N
4306 according to current link config */
e3aef172 4307 if (is_cpu_edp) {
8febb297 4308 target_clock = mode->clock;
e3aef172 4309 intel_edp_link_config(edp_encoder, &lane, &link_bw);
8febb297
EA
4310 } else {
4311 /* [e]DP over FDI requires target mode clock
4312 instead of link clock */
e3aef172 4313 if (is_dp)
5eb08b69 4314 target_clock = mode->clock;
8febb297
EA
4315 else
4316 target_clock = adjusted_mode->clock;
4317
4318 /* FDI is a binary signal running at ~2.7GHz, encoding
4319 * each output octet as 10 bits. The actual frequency
4320 * is stored as a divider into a 100MHz clock, and the
4321 * mode pixel clock is stored in units of 1KHz.
4322 * Hence the bw of each lane in terms of the mode signal
4323 * is:
4324 */
4325 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4326 }
58a27471 4327
8febb297
EA
4328 /* determine panel color depth */
4329 temp = I915_READ(PIPECONF(pipe));
4330 temp &= ~PIPE_BPC_MASK;
3b5c78a3 4331 dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
5a354204
JB
4332 switch (pipe_bpp) {
4333 case 18:
4334 temp |= PIPE_6BPC;
8febb297 4335 break;
5a354204
JB
4336 case 24:
4337 temp |= PIPE_8BPC;
8febb297 4338 break;
5a354204
JB
4339 case 30:
4340 temp |= PIPE_10BPC;
8febb297 4341 break;
5a354204
JB
4342 case 36:
4343 temp |= PIPE_12BPC;
8febb297
EA
4344 break;
4345 default:
62ac41a6
JB
4346 WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
4347 pipe_bpp);
5a354204
JB
4348 temp |= PIPE_8BPC;
4349 pipe_bpp = 24;
4350 break;
8febb297 4351 }
77ffb597 4352
5a354204
JB
4353 intel_crtc->bpp = pipe_bpp;
4354 I915_WRITE(PIPECONF(pipe), temp);
4355
8febb297
EA
4356 if (!lane) {
4357 /*
4358 * Account for spread spectrum to avoid
4359 * oversubscribing the link. Max center spread
4360 * is 2.5%; use 5% for safety's sake.
4361 */
5a354204 4362 u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
8febb297 4363 lane = bps / (link_bw * 8) + 1;
5eb08b69 4364 }
2c07245f 4365
8febb297
EA
4366 intel_crtc->fdi_lanes = lane;
4367
4368 if (pixel_multiplier > 1)
4369 link_bw *= pixel_multiplier;
5a354204
JB
4370 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
4371 &m_n);
8febb297 4372
a07d6787
EA
4373 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
4374 if (has_reduced_clock)
4375 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
4376 reduced_clock.m2;
79e53945 4377
c1858123 4378 /* Enable autotuning of the PLL clock (if permissible) */
8febb297
EA
4379 factor = 21;
4380 if (is_lvds) {
4381 if ((intel_panel_use_ssc(dev_priv) &&
4382 dev_priv->lvds_ssc_freq == 100) ||
4383 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
4384 factor = 25;
4385 } else if (is_sdvo && is_tv)
4386 factor = 20;
c1858123 4387
cb0e0931 4388 if (clock.m < factor * clock.n)
8febb297 4389 fp |= FP_CB_TUNE;
2c07245f 4390
5eddb70b 4391 dpll = 0;
2c07245f 4392
a07d6787
EA
4393 if (is_lvds)
4394 dpll |= DPLLB_MODE_LVDS;
4395 else
4396 dpll |= DPLLB_MODE_DAC_SERIAL;
4397 if (is_sdvo) {
4398 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4399 if (pixel_multiplier > 1) {
4400 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
79e53945 4401 }
a07d6787
EA
4402 dpll |= DPLL_DVO_HIGH_SPEED;
4403 }
e3aef172 4404 if (is_dp && !is_cpu_edp)
a07d6787 4405 dpll |= DPLL_DVO_HIGH_SPEED;
79e53945 4406
a07d6787
EA
4407 /* compute bitmask from p1 value */
4408 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4409 /* also FPA1 */
4410 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4411
4412 switch (clock.p2) {
4413 case 5:
4414 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4415 break;
4416 case 7:
4417 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4418 break;
4419 case 10:
4420 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4421 break;
4422 case 14:
4423 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4424 break;
79e53945
JB
4425 }
4426
43565a06
KH
4427 if (is_sdvo && is_tv)
4428 dpll |= PLL_REF_INPUT_TVCLKINBC;
4429 else if (is_tv)
79e53945 4430 /* XXX: just matching BIOS for now */
43565a06 4431 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
79e53945 4432 dpll |= 3;
a7615030 4433 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
43565a06 4434 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
79e53945
JB
4435 else
4436 dpll |= PLL_REF_INPUT_DREFCLK;
4437
4438 /* setup pipeconf */
5eddb70b 4439 pipeconf = I915_READ(PIPECONF(pipe));
79e53945
JB
4440
4441 /* Set up the display plane register */
4442 dspcntr = DISPPLANE_GAMMA_ENABLE;
4443
f7cb34d4 4444 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
79e53945
JB
4445 drm_mode_debug_printmodeline(mode);
4446
9d82aa17
ED
4447 /* CPU eDP is the only output that doesn't need a PCH PLL of its own on
4448 * pre-Haswell/LPT generation */
4449 if (HAS_PCH_LPT(dev)) {
4450 DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
4451 pipe);
4452 } else if (!is_cpu_edp) {
ee7b9f93 4453 struct intel_pch_pll *pll;
4b645f14 4454
ee7b9f93
JB
4455 pll = intel_get_pch_pll(intel_crtc, dpll, fp);
4456 if (pll == NULL) {
4457 DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
4458 pipe);
4b645f14
JB
4459 return -EINVAL;
4460 }
ee7b9f93
JB
4461 } else
4462 intel_put_pch_pll(intel_crtc);
79e53945
JB
4463
4464 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4465 * This is an exception to the general rule that mode_set doesn't turn
4466 * things on.
4467 */
4468 if (is_lvds) {
fae14981 4469 temp = I915_READ(PCH_LVDS);
5eddb70b 4470 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
7885d205
JB
4471 if (HAS_PCH_CPT(dev)) {
4472 temp &= ~PORT_TRANS_SEL_MASK;
4b645f14 4473 temp |= PORT_TRANS_SEL_CPT(pipe);
7885d205
JB
4474 } else {
4475 if (pipe == 1)
4476 temp |= LVDS_PIPEB_SELECT;
4477 else
4478 temp &= ~LVDS_PIPEB_SELECT;
4479 }
4b645f14 4480
a3e17eb8 4481 /* set the corresponsding LVDS_BORDER bit */
5eddb70b 4482 temp |= dev_priv->lvds_border_bits;
79e53945
JB
4483 /* Set the B0-B3 data pairs corresponding to whether we're going to
4484 * set the DPLLs for dual-channel mode or not.
4485 */
4486 if (clock.p2 == 7)
5eddb70b 4487 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
79e53945 4488 else
5eddb70b 4489 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
79e53945
JB
4490
4491 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4492 * appropriately here, but we need to look more thoroughly into how
4493 * panels behave in the two modes.
4494 */
284d5df5 4495 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
aa9b500d 4496 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
284d5df5 4497 temp |= LVDS_HSYNC_POLARITY;
aa9b500d 4498 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
284d5df5 4499 temp |= LVDS_VSYNC_POLARITY;
fae14981 4500 I915_WRITE(PCH_LVDS, temp);
79e53945 4501 }
434ed097 4502
8febb297
EA
4503 pipeconf &= ~PIPECONF_DITHER_EN;
4504 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5a354204 4505 if ((is_lvds && dev_priv->lvds_dither) || dither) {
8febb297 4506 pipeconf |= PIPECONF_DITHER_EN;
f74974c7 4507 pipeconf |= PIPECONF_DITHER_TYPE_SP;
434ed097 4508 }
e3aef172 4509 if (is_dp && !is_cpu_edp) {
a4fc5ed6 4510 intel_dp_set_m_n(crtc, mode, adjusted_mode);
8febb297 4511 } else {
8db9d77b 4512 /* For non-DP output, clear any trans DP clock recovery setting.*/
9db4a9c7
JB
4513 I915_WRITE(TRANSDATA_M1(pipe), 0);
4514 I915_WRITE(TRANSDATA_N1(pipe), 0);
4515 I915_WRITE(TRANSDPLINK_M1(pipe), 0);
4516 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
8db9d77b 4517 }
79e53945 4518
ee7b9f93
JB
4519 if (intel_crtc->pch_pll) {
4520 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
5eddb70b 4521
32f9d658 4522 /* Wait for the clocks to stabilize. */
ee7b9f93 4523 POSTING_READ(intel_crtc->pch_pll->pll_reg);
32f9d658
ZW
4524 udelay(150);
4525
8febb297
EA
4526 /* The pixel multiplier can only be updated once the
4527 * DPLL is enabled and the clocks are stable.
4528 *
4529 * So write it again.
4530 */
ee7b9f93 4531 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
79e53945 4532 }
79e53945 4533
5eddb70b 4534 intel_crtc->lowfreq_avail = false;
ee7b9f93 4535 if (intel_crtc->pch_pll) {
4b645f14 4536 if (is_lvds && has_reduced_clock && i915_powersave) {
ee7b9f93 4537 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
4b645f14
JB
4538 intel_crtc->lowfreq_avail = true;
4539 if (HAS_PIPE_CXSR(dev)) {
4540 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4541 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4542 }
4543 } else {
ee7b9f93 4544 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
4b645f14
JB
4545 if (HAS_PIPE_CXSR(dev)) {
4546 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
4547 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4548 }
652c393a
JB
4549 }
4550 }
4551
617cf884 4552 pipeconf &= ~PIPECONF_INTERLACE_MASK;
734b4157 4553 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5def474e 4554 pipeconf |= PIPECONF_INTERLACED_ILK;
734b4157 4555 /* the chip adds 2 halflines automatically */
734b4157 4556 adjusted_mode->crtc_vtotal -= 1;
734b4157 4557 adjusted_mode->crtc_vblank_end -= 1;
0529a0d9
DV
4558 I915_WRITE(VSYNCSHIFT(pipe),
4559 adjusted_mode->crtc_hsync_start
4560 - adjusted_mode->crtc_htotal/2);
4561 } else {
617cf884 4562 pipeconf |= PIPECONF_PROGRESSIVE;
0529a0d9
DV
4563 I915_WRITE(VSYNCSHIFT(pipe), 0);
4564 }
734b4157 4565
5eddb70b
CW
4566 I915_WRITE(HTOTAL(pipe),
4567 (adjusted_mode->crtc_hdisplay - 1) |
79e53945 4568 ((adjusted_mode->crtc_htotal - 1) << 16));
5eddb70b
CW
4569 I915_WRITE(HBLANK(pipe),
4570 (adjusted_mode->crtc_hblank_start - 1) |
79e53945 4571 ((adjusted_mode->crtc_hblank_end - 1) << 16));
5eddb70b
CW
4572 I915_WRITE(HSYNC(pipe),
4573 (adjusted_mode->crtc_hsync_start - 1) |
79e53945 4574 ((adjusted_mode->crtc_hsync_end - 1) << 16));
5eddb70b
CW
4575
4576 I915_WRITE(VTOTAL(pipe),
4577 (adjusted_mode->crtc_vdisplay - 1) |
79e53945 4578 ((adjusted_mode->crtc_vtotal - 1) << 16));
5eddb70b
CW
4579 I915_WRITE(VBLANK(pipe),
4580 (adjusted_mode->crtc_vblank_start - 1) |
79e53945 4581 ((adjusted_mode->crtc_vblank_end - 1) << 16));
5eddb70b
CW
4582 I915_WRITE(VSYNC(pipe),
4583 (adjusted_mode->crtc_vsync_start - 1) |
79e53945 4584 ((adjusted_mode->crtc_vsync_end - 1) << 16));
5eddb70b 4585
8febb297
EA
4586 /* pipesrc controls the size that is scaled from, which should
4587 * always be the user's requested size.
79e53945 4588 */
5eddb70b
CW
4589 I915_WRITE(PIPESRC(pipe),
4590 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
2c07245f 4591
8febb297
EA
4592 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
4593 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
4594 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
4595 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
2c07245f 4596
e3aef172 4597 if (is_cpu_edp)
8febb297 4598 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
2c07245f 4599
5eddb70b
CW
4600 I915_WRITE(PIPECONF(pipe), pipeconf);
4601 POSTING_READ(PIPECONF(pipe));
79e53945 4602
9d0498a2 4603 intel_wait_for_vblank(dev, pipe);
79e53945 4604
5eddb70b 4605 I915_WRITE(DSPCNTR(plane), dspcntr);
b24e7179 4606 POSTING_READ(DSPCNTR(plane));
79e53945 4607
5c3b82e2 4608 ret = intel_pipe_set_base(crtc, x, y, old_fb);
7662c8bd
SL
4609
4610 intel_update_watermarks(dev);
4611
1f803ee5 4612 return ret;
79e53945
JB
4613}
4614
f564048e
EA
4615static int intel_crtc_mode_set(struct drm_crtc *crtc,
4616 struct drm_display_mode *mode,
4617 struct drm_display_mode *adjusted_mode,
4618 int x, int y,
4619 struct drm_framebuffer *old_fb)
4620{
4621 struct drm_device *dev = crtc->dev;
4622 struct drm_i915_private *dev_priv = dev->dev_private;
0b701d27
EA
4623 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4624 int pipe = intel_crtc->pipe;
f564048e
EA
4625 int ret;
4626
0b701d27 4627 drm_vblank_pre_modeset(dev, pipe);
7662c8bd 4628
f564048e
EA
4629 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
4630 x, y, old_fb);
79e53945 4631 drm_vblank_post_modeset(dev, pipe);
5c3b82e2 4632
d8e70a25
JB
4633 if (ret)
4634 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
4635 else
4636 intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
120eced9 4637
1f803ee5 4638 return ret;
79e53945
JB
4639}
4640
3a9627f4
WF
4641static bool intel_eld_uptodate(struct drm_connector *connector,
4642 int reg_eldv, uint32_t bits_eldv,
4643 int reg_elda, uint32_t bits_elda,
4644 int reg_edid)
4645{
4646 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4647 uint8_t *eld = connector->eld;
4648 uint32_t i;
4649
4650 i = I915_READ(reg_eldv);
4651 i &= bits_eldv;
4652
4653 if (!eld[0])
4654 return !i;
4655
4656 if (!i)
4657 return false;
4658
4659 i = I915_READ(reg_elda);
4660 i &= ~bits_elda;
4661 I915_WRITE(reg_elda, i);
4662
4663 for (i = 0; i < eld[2]; i++)
4664 if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
4665 return false;
4666
4667 return true;
4668}
4669
e0dac65e
WF
4670static void g4x_write_eld(struct drm_connector *connector,
4671 struct drm_crtc *crtc)
4672{
4673 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4674 uint8_t *eld = connector->eld;
4675 uint32_t eldv;
4676 uint32_t len;
4677 uint32_t i;
4678
4679 i = I915_READ(G4X_AUD_VID_DID);
4680
4681 if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
4682 eldv = G4X_ELDV_DEVCL_DEVBLC;
4683 else
4684 eldv = G4X_ELDV_DEVCTG;
4685
3a9627f4
WF
4686 if (intel_eld_uptodate(connector,
4687 G4X_AUD_CNTL_ST, eldv,
4688 G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
4689 G4X_HDMIW_HDMIEDID))
4690 return;
4691
e0dac65e
WF
4692 i = I915_READ(G4X_AUD_CNTL_ST);
4693 i &= ~(eldv | G4X_ELD_ADDR);
4694 len = (i >> 9) & 0x1f; /* ELD buffer size */
4695 I915_WRITE(G4X_AUD_CNTL_ST, i);
4696
4697 if (!eld[0])
4698 return;
4699
4700 len = min_t(uint8_t, eld[2], len);
4701 DRM_DEBUG_DRIVER("ELD size %d\n", len);
4702 for (i = 0; i < len; i++)
4703 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
4704
4705 i = I915_READ(G4X_AUD_CNTL_ST);
4706 i |= eldv;
4707 I915_WRITE(G4X_AUD_CNTL_ST, i);
4708}
4709
4710static void ironlake_write_eld(struct drm_connector *connector,
4711 struct drm_crtc *crtc)
4712{
4713 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4714 uint8_t *eld = connector->eld;
4715 uint32_t eldv;
4716 uint32_t i;
4717 int len;
4718 int hdmiw_hdmiedid;
b6daa025 4719 int aud_config;
e0dac65e
WF
4720 int aud_cntl_st;
4721 int aud_cntrl_st2;
4722
b3f33cbf 4723 if (HAS_PCH_IBX(connector->dev)) {
1202b4c6 4724 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
b6daa025 4725 aud_config = IBX_AUD_CONFIG_A;
1202b4c6
WF
4726 aud_cntl_st = IBX_AUD_CNTL_ST_A;
4727 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
e0dac65e 4728 } else {
1202b4c6 4729 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
b6daa025 4730 aud_config = CPT_AUD_CONFIG_A;
1202b4c6
WF
4731 aud_cntl_st = CPT_AUD_CNTL_ST_A;
4732 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
e0dac65e
WF
4733 }
4734
4735 i = to_intel_crtc(crtc)->pipe;
4736 hdmiw_hdmiedid += i * 0x100;
4737 aud_cntl_st += i * 0x100;
b6daa025 4738 aud_config += i * 0x100;
e0dac65e
WF
4739
4740 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
4741
4742 i = I915_READ(aud_cntl_st);
4743 i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */
4744 if (!i) {
4745 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
4746 /* operate blindly on all ports */
1202b4c6
WF
4747 eldv = IBX_ELD_VALIDB;
4748 eldv |= IBX_ELD_VALIDB << 4;
4749 eldv |= IBX_ELD_VALIDB << 8;
e0dac65e
WF
4750 } else {
4751 DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
1202b4c6 4752 eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
e0dac65e
WF
4753 }
4754
3a9627f4
WF
4755 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
4756 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
4757 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
b6daa025
WF
4758 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
4759 } else
4760 I915_WRITE(aud_config, 0);
e0dac65e 4761
3a9627f4
WF
4762 if (intel_eld_uptodate(connector,
4763 aud_cntrl_st2, eldv,
4764 aud_cntl_st, IBX_ELD_ADDRESS,
4765 hdmiw_hdmiedid))
4766 return;
4767
e0dac65e
WF
4768 i = I915_READ(aud_cntrl_st2);
4769 i &= ~eldv;
4770 I915_WRITE(aud_cntrl_st2, i);
4771
4772 if (!eld[0])
4773 return;
4774
e0dac65e 4775 i = I915_READ(aud_cntl_st);
1202b4c6 4776 i &= ~IBX_ELD_ADDRESS;
e0dac65e
WF
4777 I915_WRITE(aud_cntl_st, i);
4778
4779 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
4780 DRM_DEBUG_DRIVER("ELD size %d\n", len);
4781 for (i = 0; i < len; i++)
4782 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
4783
4784 i = I915_READ(aud_cntrl_st2);
4785 i |= eldv;
4786 I915_WRITE(aud_cntrl_st2, i);
4787}
4788
4789void intel_write_eld(struct drm_encoder *encoder,
4790 struct drm_display_mode *mode)
4791{
4792 struct drm_crtc *crtc = encoder->crtc;
4793 struct drm_connector *connector;
4794 struct drm_device *dev = encoder->dev;
4795 struct drm_i915_private *dev_priv = dev->dev_private;
4796
4797 connector = drm_select_eld(encoder, mode);
4798 if (!connector)
4799 return;
4800
4801 DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
4802 connector->base.id,
4803 drm_get_connector_name(connector),
4804 connector->encoder->base.id,
4805 drm_get_encoder_name(connector->encoder));
4806
4807 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
4808
4809 if (dev_priv->display.write_eld)
4810 dev_priv->display.write_eld(connector, crtc);
4811}
4812
79e53945
JB
4813/** Loads the palette/gamma unit for the CRTC with the prepared values */
4814void intel_crtc_load_lut(struct drm_crtc *crtc)
4815{
4816 struct drm_device *dev = crtc->dev;
4817 struct drm_i915_private *dev_priv = dev->dev_private;
4818 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9db4a9c7 4819 int palreg = PALETTE(intel_crtc->pipe);
79e53945
JB
4820 int i;
4821
4822 /* The clocks have to be on to load the palette. */
aed3f09d 4823 if (!crtc->enabled || !intel_crtc->active)
79e53945
JB
4824 return;
4825
f2b115e6 4826 /* use legacy palette for Ironlake */
bad720ff 4827 if (HAS_PCH_SPLIT(dev))
9db4a9c7 4828 palreg = LGC_PALETTE(intel_crtc->pipe);
2c07245f 4829
79e53945
JB
4830 for (i = 0; i < 256; i++) {
4831 I915_WRITE(palreg + 4 * i,
4832 (intel_crtc->lut_r[i] << 16) |
4833 (intel_crtc->lut_g[i] << 8) |
4834 intel_crtc->lut_b[i]);
4835 }
4836}
4837
560b85bb
CW
4838static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
4839{
4840 struct drm_device *dev = crtc->dev;
4841 struct drm_i915_private *dev_priv = dev->dev_private;
4842 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4843 bool visible = base != 0;
4844 u32 cntl;
4845
4846 if (intel_crtc->cursor_visible == visible)
4847 return;
4848
9db4a9c7 4849 cntl = I915_READ(_CURACNTR);
560b85bb
CW
4850 if (visible) {
4851 /* On these chipsets we can only modify the base whilst
4852 * the cursor is disabled.
4853 */
9db4a9c7 4854 I915_WRITE(_CURABASE, base);
560b85bb
CW
4855
4856 cntl &= ~(CURSOR_FORMAT_MASK);
4857 /* XXX width must be 64, stride 256 => 0x00 << 28 */
4858 cntl |= CURSOR_ENABLE |
4859 CURSOR_GAMMA_ENABLE |
4860 CURSOR_FORMAT_ARGB;
4861 } else
4862 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
9db4a9c7 4863 I915_WRITE(_CURACNTR, cntl);
560b85bb
CW
4864
4865 intel_crtc->cursor_visible = visible;
4866}
4867
4868static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
4869{
4870 struct drm_device *dev = crtc->dev;
4871 struct drm_i915_private *dev_priv = dev->dev_private;
4872 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4873 int pipe = intel_crtc->pipe;
4874 bool visible = base != 0;
4875
4876 if (intel_crtc->cursor_visible != visible) {
548f245b 4877 uint32_t cntl = I915_READ(CURCNTR(pipe));
560b85bb
CW
4878 if (base) {
4879 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
4880 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
4881 cntl |= pipe << 28; /* Connect to correct pipe */
4882 } else {
4883 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
4884 cntl |= CURSOR_MODE_DISABLE;
4885 }
9db4a9c7 4886 I915_WRITE(CURCNTR(pipe), cntl);
560b85bb
CW
4887
4888 intel_crtc->cursor_visible = visible;
4889 }
4890 /* and commit changes on next vblank */
9db4a9c7 4891 I915_WRITE(CURBASE(pipe), base);
560b85bb
CW
4892}
4893
65a21cd6
JB
4894static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
4895{
4896 struct drm_device *dev = crtc->dev;
4897 struct drm_i915_private *dev_priv = dev->dev_private;
4898 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4899 int pipe = intel_crtc->pipe;
4900 bool visible = base != 0;
4901
4902 if (intel_crtc->cursor_visible != visible) {
4903 uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
4904 if (base) {
4905 cntl &= ~CURSOR_MODE;
4906 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
4907 } else {
4908 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
4909 cntl |= CURSOR_MODE_DISABLE;
4910 }
4911 I915_WRITE(CURCNTR_IVB(pipe), cntl);
4912
4913 intel_crtc->cursor_visible = visible;
4914 }
4915 /* and commit changes on next vblank */
4916 I915_WRITE(CURBASE_IVB(pipe), base);
4917}
4918
cda4b7d3 4919/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
6b383a7f
CW
4920static void intel_crtc_update_cursor(struct drm_crtc *crtc,
4921 bool on)
cda4b7d3
CW
4922{
4923 struct drm_device *dev = crtc->dev;
4924 struct drm_i915_private *dev_priv = dev->dev_private;
4925 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4926 int pipe = intel_crtc->pipe;
4927 int x = intel_crtc->cursor_x;
4928 int y = intel_crtc->cursor_y;
560b85bb 4929 u32 base, pos;
cda4b7d3
CW
4930 bool visible;
4931
4932 pos = 0;
4933
6b383a7f 4934 if (on && crtc->enabled && crtc->fb) {
cda4b7d3
CW
4935 base = intel_crtc->cursor_addr;
4936 if (x > (int) crtc->fb->width)
4937 base = 0;
4938
4939 if (y > (int) crtc->fb->height)
4940 base = 0;
4941 } else
4942 base = 0;
4943
4944 if (x < 0) {
4945 if (x + intel_crtc->cursor_width < 0)
4946 base = 0;
4947
4948 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
4949 x = -x;
4950 }
4951 pos |= x << CURSOR_X_SHIFT;
4952
4953 if (y < 0) {
4954 if (y + intel_crtc->cursor_height < 0)
4955 base = 0;
4956
4957 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
4958 y = -y;
4959 }
4960 pos |= y << CURSOR_Y_SHIFT;
4961
4962 visible = base != 0;
560b85bb 4963 if (!visible && !intel_crtc->cursor_visible)
cda4b7d3
CW
4964 return;
4965
0cd83aa9 4966 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
65a21cd6
JB
4967 I915_WRITE(CURPOS_IVB(pipe), pos);
4968 ivb_update_cursor(crtc, base);
4969 } else {
4970 I915_WRITE(CURPOS(pipe), pos);
4971 if (IS_845G(dev) || IS_I865G(dev))
4972 i845_update_cursor(crtc, base);
4973 else
4974 i9xx_update_cursor(crtc, base);
4975 }
cda4b7d3
CW
4976}
4977
79e53945 4978static int intel_crtc_cursor_set(struct drm_crtc *crtc,
05394f39 4979 struct drm_file *file,
79e53945
JB
4980 uint32_t handle,
4981 uint32_t width, uint32_t height)
4982{
4983 struct drm_device *dev = crtc->dev;
4984 struct drm_i915_private *dev_priv = dev->dev_private;
4985 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
05394f39 4986 struct drm_i915_gem_object *obj;
cda4b7d3 4987 uint32_t addr;
3f8bc370 4988 int ret;
79e53945 4989
28c97730 4990 DRM_DEBUG_KMS("\n");
79e53945
JB
4991
4992 /* if we want to turn off the cursor ignore width and height */
4993 if (!handle) {
28c97730 4994 DRM_DEBUG_KMS("cursor off\n");
3f8bc370 4995 addr = 0;
05394f39 4996 obj = NULL;
5004417d 4997 mutex_lock(&dev->struct_mutex);
3f8bc370 4998 goto finish;
79e53945
JB
4999 }
5000
5001 /* Currently we only support 64x64 cursors */
5002 if (width != 64 || height != 64) {
5003 DRM_ERROR("we currently only support 64x64 cursors\n");
5004 return -EINVAL;
5005 }
5006
05394f39 5007 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
c8725226 5008 if (&obj->base == NULL)
79e53945
JB
5009 return -ENOENT;
5010
05394f39 5011 if (obj->base.size < width * height * 4) {
79e53945 5012 DRM_ERROR("buffer is to small\n");
34b8686e
DA
5013 ret = -ENOMEM;
5014 goto fail;
79e53945
JB
5015 }
5016
71acb5eb 5017 /* we only need to pin inside GTT if cursor is non-phy */
7f9872e0 5018 mutex_lock(&dev->struct_mutex);
b295d1b6 5019 if (!dev_priv->info->cursor_needs_physical) {
d9e86c0e
CW
5020 if (obj->tiling_mode) {
5021 DRM_ERROR("cursor cannot be tiled\n");
5022 ret = -EINVAL;
5023 goto fail_locked;
5024 }
5025
2da3b9b9 5026 ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
e7b526bb
CW
5027 if (ret) {
5028 DRM_ERROR("failed to move cursor bo into the GTT\n");
2da3b9b9 5029 goto fail_locked;
e7b526bb
CW
5030 }
5031
d9e86c0e
CW
5032 ret = i915_gem_object_put_fence(obj);
5033 if (ret) {
2da3b9b9 5034 DRM_ERROR("failed to release fence for cursor");
d9e86c0e
CW
5035 goto fail_unpin;
5036 }
5037
05394f39 5038 addr = obj->gtt_offset;
71acb5eb 5039 } else {
6eeefaf3 5040 int align = IS_I830(dev) ? 16 * 1024 : 256;
05394f39 5041 ret = i915_gem_attach_phys_object(dev, obj,
6eeefaf3
CW
5042 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
5043 align);
71acb5eb
DA
5044 if (ret) {
5045 DRM_ERROR("failed to attach phys object\n");
7f9872e0 5046 goto fail_locked;
71acb5eb 5047 }
05394f39 5048 addr = obj->phys_obj->handle->busaddr;
3f8bc370
KH
5049 }
5050
a6c45cf0 5051 if (IS_GEN2(dev))
14b60391
JB
5052 I915_WRITE(CURSIZE, (height << 12) | width);
5053
3f8bc370 5054 finish:
3f8bc370 5055 if (intel_crtc->cursor_bo) {
b295d1b6 5056 if (dev_priv->info->cursor_needs_physical) {
05394f39 5057 if (intel_crtc->cursor_bo != obj)
71acb5eb
DA
5058 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
5059 } else
5060 i915_gem_object_unpin(intel_crtc->cursor_bo);
05394f39 5061 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
3f8bc370 5062 }
80824003 5063
7f9872e0 5064 mutex_unlock(&dev->struct_mutex);
3f8bc370
KH
5065
5066 intel_crtc->cursor_addr = addr;
05394f39 5067 intel_crtc->cursor_bo = obj;
cda4b7d3
CW
5068 intel_crtc->cursor_width = width;
5069 intel_crtc->cursor_height = height;
5070
6b383a7f 5071 intel_crtc_update_cursor(crtc, true);
3f8bc370 5072
79e53945 5073 return 0;
e7b526bb 5074fail_unpin:
05394f39 5075 i915_gem_object_unpin(obj);
7f9872e0 5076fail_locked:
34b8686e 5077 mutex_unlock(&dev->struct_mutex);
bc9025bd 5078fail:
05394f39 5079 drm_gem_object_unreference_unlocked(&obj->base);
34b8686e 5080 return ret;
79e53945
JB
5081}
5082
5083static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
5084{
79e53945 5085 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
79e53945 5086
cda4b7d3
CW
5087 intel_crtc->cursor_x = x;
5088 intel_crtc->cursor_y = y;
652c393a 5089
6b383a7f 5090 intel_crtc_update_cursor(crtc, true);
79e53945
JB
5091
5092 return 0;
5093}
5094
5095/** Sets the color ramps on behalf of RandR */
5096void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
5097 u16 blue, int regno)
5098{
5099 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5100
5101 intel_crtc->lut_r[regno] = red >> 8;
5102 intel_crtc->lut_g[regno] = green >> 8;
5103 intel_crtc->lut_b[regno] = blue >> 8;
5104}
5105
b8c00ac5
DA
5106void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
5107 u16 *blue, int regno)
5108{
5109 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5110
5111 *red = intel_crtc->lut_r[regno] << 8;
5112 *green = intel_crtc->lut_g[regno] << 8;
5113 *blue = intel_crtc->lut_b[regno] << 8;
5114}
5115
79e53945 5116static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
7203425a 5117 u16 *blue, uint32_t start, uint32_t size)
79e53945 5118{
7203425a 5119 int end = (start + size > 256) ? 256 : start + size, i;
79e53945 5120 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
79e53945 5121
7203425a 5122 for (i = start; i < end; i++) {
79e53945
JB
5123 intel_crtc->lut_r[i] = red[i] >> 8;
5124 intel_crtc->lut_g[i] = green[i] >> 8;
5125 intel_crtc->lut_b[i] = blue[i] >> 8;
5126 }
5127
5128 intel_crtc_load_lut(crtc);
5129}
5130
5131/**
5132 * Get a pipe with a simple mode set on it for doing load-based monitor
5133 * detection.
5134 *
5135 * It will be up to the load-detect code to adjust the pipe as appropriate for
c751ce4f 5136 * its requirements. The pipe will be connected to no other encoders.
79e53945 5137 *
c751ce4f 5138 * Currently this code will only succeed if there is a pipe with no encoders
79e53945
JB
5139 * configured for it. In the future, it could choose to temporarily disable
5140 * some outputs to free up a pipe for its use.
5141 *
5142 * \return crtc, or NULL if no pipes are available.
5143 */
5144
5145/* VESA 640x480x72Hz mode to set on the pipe */
5146static struct drm_display_mode load_detect_mode = {
5147 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
5148 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
5149};
5150
d2dff872
CW
5151static struct drm_framebuffer *
5152intel_framebuffer_create(struct drm_device *dev,
308e5bcb 5153 struct drm_mode_fb_cmd2 *mode_cmd,
d2dff872
CW
5154 struct drm_i915_gem_object *obj)
5155{
5156 struct intel_framebuffer *intel_fb;
5157 int ret;
5158
5159 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
5160 if (!intel_fb) {
5161 drm_gem_object_unreference_unlocked(&obj->base);
5162 return ERR_PTR(-ENOMEM);
5163 }
5164
5165 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
5166 if (ret) {
5167 drm_gem_object_unreference_unlocked(&obj->base);
5168 kfree(intel_fb);
5169 return ERR_PTR(ret);
5170 }
5171
5172 return &intel_fb->base;
5173}
5174
5175static u32
5176intel_framebuffer_pitch_for_width(int width, int bpp)
5177{
5178 u32 pitch = DIV_ROUND_UP(width * bpp, 8);
5179 return ALIGN(pitch, 64);
5180}
5181
5182static u32
5183intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
5184{
5185 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
5186 return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
5187}
5188
5189static struct drm_framebuffer *
5190intel_framebuffer_create_for_mode(struct drm_device *dev,
5191 struct drm_display_mode *mode,
5192 int depth, int bpp)
5193{
5194 struct drm_i915_gem_object *obj;
308e5bcb 5195 struct drm_mode_fb_cmd2 mode_cmd;
d2dff872
CW
5196
5197 obj = i915_gem_alloc_object(dev,
5198 intel_framebuffer_size_for_mode(mode, bpp));
5199 if (obj == NULL)
5200 return ERR_PTR(-ENOMEM);
5201
5202 mode_cmd.width = mode->hdisplay;
5203 mode_cmd.height = mode->vdisplay;
308e5bcb
JB
5204 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
5205 bpp);
5ca0c34a 5206 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
d2dff872
CW
5207
5208 return intel_framebuffer_create(dev, &mode_cmd, obj);
5209}
5210
5211static struct drm_framebuffer *
5212mode_fits_in_fbdev(struct drm_device *dev,
5213 struct drm_display_mode *mode)
5214{
5215 struct drm_i915_private *dev_priv = dev->dev_private;
5216 struct drm_i915_gem_object *obj;
5217 struct drm_framebuffer *fb;
5218
5219 if (dev_priv->fbdev == NULL)
5220 return NULL;
5221
5222 obj = dev_priv->fbdev->ifb.obj;
5223 if (obj == NULL)
5224 return NULL;
5225
5226 fb = &dev_priv->fbdev->ifb.base;
01f2c773
VS
5227 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
5228 fb->bits_per_pixel))
d2dff872
CW
5229 return NULL;
5230
01f2c773 5231 if (obj->base.size < mode->vdisplay * fb->pitches[0])
d2dff872
CW
5232 return NULL;
5233
5234 return fb;
5235}
5236
7173188d
CW
5237bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
5238 struct drm_connector *connector,
5239 struct drm_display_mode *mode,
8261b191 5240 struct intel_load_detect_pipe *old)
79e53945
JB
5241{
5242 struct intel_crtc *intel_crtc;
5243 struct drm_crtc *possible_crtc;
4ef69c7a 5244 struct drm_encoder *encoder = &intel_encoder->base;
79e53945
JB
5245 struct drm_crtc *crtc = NULL;
5246 struct drm_device *dev = encoder->dev;
d2dff872 5247 struct drm_framebuffer *old_fb;
79e53945
JB
5248 int i = -1;
5249
d2dff872
CW
5250 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5251 connector->base.id, drm_get_connector_name(connector),
5252 encoder->base.id, drm_get_encoder_name(encoder));
5253
79e53945
JB
5254 /*
5255 * Algorithm gets a little messy:
7a5e4805 5256 *
79e53945
JB
5257 * - if the connector already has an assigned crtc, use it (but make
5258 * sure it's on first)
7a5e4805 5259 *
79e53945
JB
5260 * - try to find the first unused crtc that can drive this connector,
5261 * and use that if we find one
79e53945
JB
5262 */
5263
5264 /* See if we already have a CRTC for this connector */
5265 if (encoder->crtc) {
5266 crtc = encoder->crtc;
8261b191 5267
79e53945 5268 intel_crtc = to_intel_crtc(crtc);
8261b191
CW
5269 old->dpms_mode = intel_crtc->dpms_mode;
5270 old->load_detect_temp = false;
5271
5272 /* Make sure the crtc and connector are running */
79e53945 5273 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
6492711d
CW
5274 struct drm_encoder_helper_funcs *encoder_funcs;
5275 struct drm_crtc_helper_funcs *crtc_funcs;
5276
79e53945
JB
5277 crtc_funcs = crtc->helper_private;
5278 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
6492711d
CW
5279
5280 encoder_funcs = encoder->helper_private;
79e53945
JB
5281 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
5282 }
8261b191 5283
7173188d 5284 return true;
79e53945
JB
5285 }
5286
5287 /* Find an unused one (if possible) */
5288 list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
5289 i++;
5290 if (!(encoder->possible_crtcs & (1 << i)))
5291 continue;
5292 if (!possible_crtc->enabled) {
5293 crtc = possible_crtc;
5294 break;
5295 }
79e53945
JB
5296 }
5297
5298 /*
5299 * If we didn't find an unused CRTC, don't use any.
5300 */
5301 if (!crtc) {
7173188d
CW
5302 DRM_DEBUG_KMS("no pipe available for load-detect\n");
5303 return false;
79e53945
JB
5304 }
5305
5306 encoder->crtc = crtc;
c1c43977 5307 connector->encoder = encoder;
79e53945
JB
5308
5309 intel_crtc = to_intel_crtc(crtc);
8261b191
CW
5310 old->dpms_mode = intel_crtc->dpms_mode;
5311 old->load_detect_temp = true;
d2dff872 5312 old->release_fb = NULL;
79e53945 5313
6492711d
CW
5314 if (!mode)
5315 mode = &load_detect_mode;
79e53945 5316
d2dff872
CW
5317 old_fb = crtc->fb;
5318
5319 /* We need a framebuffer large enough to accommodate all accesses
5320 * that the plane may generate whilst we perform load detection.
5321 * We can not rely on the fbcon either being present (we get called
5322 * during its initialisation to detect all boot displays, or it may
5323 * not even exist) or that it is large enough to satisfy the
5324 * requested mode.
5325 */
5326 crtc->fb = mode_fits_in_fbdev(dev, mode);
5327 if (crtc->fb == NULL) {
5328 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
5329 crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
5330 old->release_fb = crtc->fb;
5331 } else
5332 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
5333 if (IS_ERR(crtc->fb)) {
5334 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5335 crtc->fb = old_fb;
5336 return false;
79e53945 5337 }
79e53945 5338
d2dff872 5339 if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
6492711d 5340 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
d2dff872
CW
5341 if (old->release_fb)
5342 old->release_fb->funcs->destroy(old->release_fb);
5343 crtc->fb = old_fb;
6492711d 5344 return false;
79e53945 5345 }
7173188d 5346
79e53945 5347 /* let the connector get through one full cycle before testing */
9d0498a2 5348 intel_wait_for_vblank(dev, intel_crtc->pipe);
79e53945 5349
7173188d 5350 return true;
79e53945
JB
5351}
5352
c1c43977 5353void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
8261b191
CW
5354 struct drm_connector *connector,
5355 struct intel_load_detect_pipe *old)
79e53945 5356{
4ef69c7a 5357 struct drm_encoder *encoder = &intel_encoder->base;
79e53945
JB
5358 struct drm_device *dev = encoder->dev;
5359 struct drm_crtc *crtc = encoder->crtc;
5360 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
5361 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
5362
d2dff872
CW
5363 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5364 connector->base.id, drm_get_connector_name(connector),
5365 encoder->base.id, drm_get_encoder_name(encoder));
5366
8261b191 5367 if (old->load_detect_temp) {
c1c43977 5368 connector->encoder = NULL;
79e53945 5369 drm_helper_disable_unused_functions(dev);
d2dff872
CW
5370
5371 if (old->release_fb)
5372 old->release_fb->funcs->destroy(old->release_fb);
5373
0622a53c 5374 return;
79e53945
JB
5375 }
5376
c751ce4f 5377 /* Switch crtc and encoder back off if necessary */
0622a53c
CW
5378 if (old->dpms_mode != DRM_MODE_DPMS_ON) {
5379 encoder_funcs->dpms(encoder, old->dpms_mode);
8261b191 5380 crtc_funcs->dpms(crtc, old->dpms_mode);
79e53945
JB
5381 }
5382}
5383
5384/* Returns the clock of the currently programmed mode of the given pipe. */
5385static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
5386{
5387 struct drm_i915_private *dev_priv = dev->dev_private;
5388 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5389 int pipe = intel_crtc->pipe;
548f245b 5390 u32 dpll = I915_READ(DPLL(pipe));
79e53945
JB
5391 u32 fp;
5392 intel_clock_t clock;
5393
5394 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
39adb7a5 5395 fp = I915_READ(FP0(pipe));
79e53945 5396 else
39adb7a5 5397 fp = I915_READ(FP1(pipe));
79e53945
JB
5398
5399 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
f2b115e6
AJ
5400 if (IS_PINEVIEW(dev)) {
5401 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
5402 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
2177832f
SL
5403 } else {
5404 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
5405 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
5406 }
5407
a6c45cf0 5408 if (!IS_GEN2(dev)) {
f2b115e6
AJ
5409 if (IS_PINEVIEW(dev))
5410 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
5411 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
2177832f
SL
5412 else
5413 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
79e53945
JB
5414 DPLL_FPA01_P1_POST_DIV_SHIFT);
5415
5416 switch (dpll & DPLL_MODE_MASK) {
5417 case DPLLB_MODE_DAC_SERIAL:
5418 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
5419 5 : 10;
5420 break;
5421 case DPLLB_MODE_LVDS:
5422 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
5423 7 : 14;
5424 break;
5425 default:
28c97730 5426 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
79e53945
JB
5427 "mode\n", (int)(dpll & DPLL_MODE_MASK));
5428 return 0;
5429 }
5430
5431 /* XXX: Handle the 100Mhz refclk */
2177832f 5432 intel_clock(dev, 96000, &clock);
79e53945
JB
5433 } else {
5434 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
5435
5436 if (is_lvds) {
5437 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
5438 DPLL_FPA01_P1_POST_DIV_SHIFT);
5439 clock.p2 = 14;
5440
5441 if ((dpll & PLL_REF_INPUT_MASK) ==
5442 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
5443 /* XXX: might not be 66MHz */
2177832f 5444 intel_clock(dev, 66000, &clock);
79e53945 5445 } else
2177832f 5446 intel_clock(dev, 48000, &clock);
79e53945
JB
5447 } else {
5448 if (dpll & PLL_P1_DIVIDE_BY_TWO)
5449 clock.p1 = 2;
5450 else {
5451 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
5452 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
5453 }
5454 if (dpll & PLL_P2_DIVIDE_BY_4)
5455 clock.p2 = 4;
5456 else
5457 clock.p2 = 2;
5458
2177832f 5459 intel_clock(dev, 48000, &clock);
79e53945
JB
5460 }
5461 }
5462
5463 /* XXX: It would be nice to validate the clocks, but we can't reuse
5464 * i830PllIsValid() because it relies on the xf86_config connector
5465 * configuration being accurate, which it isn't necessarily.
5466 */
5467
5468 return clock.dot;
5469}
5470
5471/** Returns the currently programmed mode of the given pipe. */
5472struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
5473 struct drm_crtc *crtc)
5474{
548f245b 5475 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945
JB
5476 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5477 int pipe = intel_crtc->pipe;
5478 struct drm_display_mode *mode;
548f245b
JB
5479 int htot = I915_READ(HTOTAL(pipe));
5480 int hsync = I915_READ(HSYNC(pipe));
5481 int vtot = I915_READ(VTOTAL(pipe));
5482 int vsync = I915_READ(VSYNC(pipe));
79e53945
JB
5483
5484 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
5485 if (!mode)
5486 return NULL;
5487
5488 mode->clock = intel_crtc_clock_get(dev, crtc);
5489 mode->hdisplay = (htot & 0xffff) + 1;
5490 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
5491 mode->hsync_start = (hsync & 0xffff) + 1;
5492 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
5493 mode->vdisplay = (vtot & 0xffff) + 1;
5494 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
5495 mode->vsync_start = (vsync & 0xffff) + 1;
5496 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
5497
5498 drm_mode_set_name(mode);
79e53945
JB
5499
5500 return mode;
5501}
5502
652c393a
JB
5503#define GPU_IDLE_TIMEOUT 500 /* ms */
5504
5505/* When this timer fires, we've been idle for awhile */
5506static void intel_gpu_idle_timer(unsigned long arg)
5507{
5508 struct drm_device *dev = (struct drm_device *)arg;
5509 drm_i915_private_t *dev_priv = dev->dev_private;
5510
ff7ea4c0
CW
5511 if (!list_empty(&dev_priv->mm.active_list)) {
5512 /* Still processing requests, so just re-arm the timer. */
5513 mod_timer(&dev_priv->idle_timer, jiffies +
5514 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
5515 return;
5516 }
652c393a 5517
ff7ea4c0 5518 dev_priv->busy = false;
01dfba93 5519 queue_work(dev_priv->wq, &dev_priv->idle_work);
652c393a
JB
5520}
5521
652c393a
JB
5522#define CRTC_IDLE_TIMEOUT 1000 /* ms */
5523
5524static void intel_crtc_idle_timer(unsigned long arg)
5525{
5526 struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
5527 struct drm_crtc *crtc = &intel_crtc->base;
5528 drm_i915_private_t *dev_priv = crtc->dev->dev_private;
ff7ea4c0 5529 struct intel_framebuffer *intel_fb;
652c393a 5530
ff7ea4c0
CW
5531 intel_fb = to_intel_framebuffer(crtc->fb);
5532 if (intel_fb && intel_fb->obj->active) {
5533 /* The framebuffer is still being accessed by the GPU. */
5534 mod_timer(&intel_crtc->idle_timer, jiffies +
5535 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
5536 return;
5537 }
652c393a 5538
ff7ea4c0 5539 intel_crtc->busy = false;
01dfba93 5540 queue_work(dev_priv->wq, &dev_priv->idle_work);
652c393a
JB
5541}
5542
3dec0095 5543static void intel_increase_pllclock(struct drm_crtc *crtc)
652c393a
JB
5544{
5545 struct drm_device *dev = crtc->dev;
5546 drm_i915_private_t *dev_priv = dev->dev_private;
5547 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5548 int pipe = intel_crtc->pipe;
dbdc6479
JB
5549 int dpll_reg = DPLL(pipe);
5550 int dpll;
652c393a 5551
bad720ff 5552 if (HAS_PCH_SPLIT(dev))
652c393a
JB
5553 return;
5554
5555 if (!dev_priv->lvds_downclock_avail)
5556 return;
5557
dbdc6479 5558 dpll = I915_READ(dpll_reg);
652c393a 5559 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
44d98a61 5560 DRM_DEBUG_DRIVER("upclocking LVDS\n");
652c393a 5561
8ac5a6d5 5562 assert_panel_unlocked(dev_priv, pipe);
652c393a
JB
5563
5564 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
5565 I915_WRITE(dpll_reg, dpll);
9d0498a2 5566 intel_wait_for_vblank(dev, pipe);
dbdc6479 5567
652c393a
JB
5568 dpll = I915_READ(dpll_reg);
5569 if (dpll & DISPLAY_RATE_SELECT_FPA1)
44d98a61 5570 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
652c393a
JB
5571 }
5572
5573 /* Schedule downclock */
3dec0095
DV
5574 mod_timer(&intel_crtc->idle_timer, jiffies +
5575 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
652c393a
JB
5576}
5577
5578static void intel_decrease_pllclock(struct drm_crtc *crtc)
5579{
5580 struct drm_device *dev = crtc->dev;
5581 drm_i915_private_t *dev_priv = dev->dev_private;
5582 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
652c393a 5583
bad720ff 5584 if (HAS_PCH_SPLIT(dev))
652c393a
JB
5585 return;
5586
5587 if (!dev_priv->lvds_downclock_avail)
5588 return;
5589
5590 /*
5591 * Since this is called by a timer, we should never get here in
5592 * the manual case.
5593 */
5594 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
dc257cf1
DV
5595 int pipe = intel_crtc->pipe;
5596 int dpll_reg = DPLL(pipe);
5597 int dpll;
f6e5b160 5598
44d98a61 5599 DRM_DEBUG_DRIVER("downclocking LVDS\n");
652c393a 5600
8ac5a6d5 5601 assert_panel_unlocked(dev_priv, pipe);
652c393a 5602
dc257cf1 5603 dpll = I915_READ(dpll_reg);
652c393a
JB
5604 dpll |= DISPLAY_RATE_SELECT_FPA1;
5605 I915_WRITE(dpll_reg, dpll);
9d0498a2 5606 intel_wait_for_vblank(dev, pipe);
652c393a
JB
5607 dpll = I915_READ(dpll_reg);
5608 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
44d98a61 5609 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
652c393a
JB
5610 }
5611
5612}
5613
5614/**
5615 * intel_idle_update - adjust clocks for idleness
5616 * @work: work struct
5617 *
5618 * Either the GPU or display (or both) went idle. Check the busy status
5619 * here and adjust the CRTC and GPU clocks as necessary.
5620 */
5621static void intel_idle_update(struct work_struct *work)
5622{
5623 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
5624 idle_work);
5625 struct drm_device *dev = dev_priv->dev;
5626 struct drm_crtc *crtc;
5627 struct intel_crtc *intel_crtc;
5628
5629 if (!i915_powersave)
5630 return;
5631
5632 mutex_lock(&dev->struct_mutex);
5633
7648fa99
JB
5634 i915_update_gfx_val(dev_priv);
5635
652c393a
JB
5636 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5637 /* Skip inactive CRTCs */
5638 if (!crtc->fb)
5639 continue;
5640
5641 intel_crtc = to_intel_crtc(crtc);
5642 if (!intel_crtc->busy)
5643 intel_decrease_pllclock(crtc);
5644 }
5645
45ac22c8 5646
652c393a
JB
5647 mutex_unlock(&dev->struct_mutex);
5648}
5649
5650/**
5651 * intel_mark_busy - mark the GPU and possibly the display busy
5652 * @dev: drm device
5653 * @obj: object we're operating on
5654 *
5655 * Callers can use this function to indicate that the GPU is busy processing
5656 * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
5657 * buffer), we'll also mark the display as busy, so we know to increase its
5658 * clock frequency.
5659 */
05394f39 5660void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
652c393a
JB
5661{
5662 drm_i915_private_t *dev_priv = dev->dev_private;
5663 struct drm_crtc *crtc = NULL;
5664 struct intel_framebuffer *intel_fb;
5665 struct intel_crtc *intel_crtc;
5666
5e17ee74
ZW
5667 if (!drm_core_check_feature(dev, DRIVER_MODESET))
5668 return;
5669
9104183d
CW
5670 if (!dev_priv->busy) {
5671 intel_sanitize_pm(dev);
28cf798f 5672 dev_priv->busy = true;
9104183d 5673 } else
28cf798f
CW
5674 mod_timer(&dev_priv->idle_timer, jiffies +
5675 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
652c393a 5676
acb87dfb
CW
5677 if (obj == NULL)
5678 return;
5679
652c393a
JB
5680 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5681 if (!crtc->fb)
5682 continue;
5683
5684 intel_crtc = to_intel_crtc(crtc);
5685 intel_fb = to_intel_framebuffer(crtc->fb);
5686 if (intel_fb->obj == obj) {
5687 if (!intel_crtc->busy) {
5688 /* Non-busy -> busy, upclock */
3dec0095 5689 intel_increase_pllclock(crtc);
652c393a
JB
5690 intel_crtc->busy = true;
5691 } else {
5692 /* Busy -> busy, put off timer */
5693 mod_timer(&intel_crtc->idle_timer, jiffies +
5694 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
5695 }
5696 }
5697 }
5698}
5699
79e53945
JB
5700static void intel_crtc_destroy(struct drm_crtc *crtc)
5701{
5702 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
67e77c5a
DV
5703 struct drm_device *dev = crtc->dev;
5704 struct intel_unpin_work *work;
5705 unsigned long flags;
5706
5707 spin_lock_irqsave(&dev->event_lock, flags);
5708 work = intel_crtc->unpin_work;
5709 intel_crtc->unpin_work = NULL;
5710 spin_unlock_irqrestore(&dev->event_lock, flags);
5711
5712 if (work) {
5713 cancel_work_sync(&work->work);
5714 kfree(work);
5715 }
79e53945
JB
5716
5717 drm_crtc_cleanup(crtc);
67e77c5a 5718
79e53945
JB
5719 kfree(intel_crtc);
5720}
5721
6b95a207
KH
5722static void intel_unpin_work_fn(struct work_struct *__work)
5723{
5724 struct intel_unpin_work *work =
5725 container_of(__work, struct intel_unpin_work, work);
5726
5727 mutex_lock(&work->dev->struct_mutex);
1690e1eb 5728 intel_unpin_fb_obj(work->old_fb_obj);
05394f39
CW
5729 drm_gem_object_unreference(&work->pending_flip_obj->base);
5730 drm_gem_object_unreference(&work->old_fb_obj->base);
d9e86c0e 5731
7782de3b 5732 intel_update_fbc(work->dev);
6b95a207
KH
5733 mutex_unlock(&work->dev->struct_mutex);
5734 kfree(work);
5735}
5736
1afe3e9d 5737static void do_intel_finish_page_flip(struct drm_device *dev,
49b14a5c 5738 struct drm_crtc *crtc)
6b95a207
KH
5739{
5740 drm_i915_private_t *dev_priv = dev->dev_private;
6b95a207
KH
5741 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5742 struct intel_unpin_work *work;
05394f39 5743 struct drm_i915_gem_object *obj;
6b95a207 5744 struct drm_pending_vblank_event *e;
49b14a5c 5745 struct timeval tnow, tvbl;
6b95a207
KH
5746 unsigned long flags;
5747
5748 /* Ignore early vblank irqs */
5749 if (intel_crtc == NULL)
5750 return;
5751
49b14a5c
MK
5752 do_gettimeofday(&tnow);
5753
6b95a207
KH
5754 spin_lock_irqsave(&dev->event_lock, flags);
5755 work = intel_crtc->unpin_work;
5756 if (work == NULL || !work->pending) {
5757 spin_unlock_irqrestore(&dev->event_lock, flags);
5758 return;
5759 }
5760
5761 intel_crtc->unpin_work = NULL;
6b95a207
KH
5762
5763 if (work->event) {
5764 e = work->event;
49b14a5c 5765 e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
0af7e4df
MK
5766
5767 /* Called before vblank count and timestamps have
5768 * been updated for the vblank interval of flip
5769 * completion? Need to increment vblank count and
5770 * add one videorefresh duration to returned timestamp
49b14a5c
MK
5771 * to account for this. We assume this happened if we
5772 * get called over 0.9 frame durations after the last
5773 * timestamped vblank.
5774 *
5775 * This calculation can not be used with vrefresh rates
5776 * below 5Hz (10Hz to be on the safe side) without
5777 * promoting to 64 integers.
0af7e4df 5778 */
49b14a5c
MK
5779 if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
5780 9 * crtc->framedur_ns) {
0af7e4df 5781 e->event.sequence++;
49b14a5c
MK
5782 tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
5783 crtc->framedur_ns);
0af7e4df
MK
5784 }
5785
49b14a5c
MK
5786 e->event.tv_sec = tvbl.tv_sec;
5787 e->event.tv_usec = tvbl.tv_usec;
0af7e4df 5788
6b95a207
KH
5789 list_add_tail(&e->base.link,
5790 &e->base.file_priv->event_list);
5791 wake_up_interruptible(&e->base.file_priv->event_wait);
5792 }
5793
0af7e4df
MK
5794 drm_vblank_put(dev, intel_crtc->pipe);
5795
6b95a207
KH
5796 spin_unlock_irqrestore(&dev->event_lock, flags);
5797
05394f39 5798 obj = work->old_fb_obj;
d9e86c0e 5799
e59f2bac 5800 atomic_clear_mask(1 << intel_crtc->plane,
05394f39
CW
5801 &obj->pending_flip.counter);
5802 if (atomic_read(&obj->pending_flip) == 0)
f787a5f5 5803 wake_up(&dev_priv->pending_flip_queue);
d9e86c0e 5804
6b95a207 5805 schedule_work(&work->work);
e5510fac
JB
5806
5807 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
6b95a207
KH
5808}
5809
1afe3e9d
JB
5810void intel_finish_page_flip(struct drm_device *dev, int pipe)
5811{
5812 drm_i915_private_t *dev_priv = dev->dev_private;
5813 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
5814
49b14a5c 5815 do_intel_finish_page_flip(dev, crtc);
1afe3e9d
JB
5816}
5817
5818void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
5819{
5820 drm_i915_private_t *dev_priv = dev->dev_private;
5821 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
5822
49b14a5c 5823 do_intel_finish_page_flip(dev, crtc);
1afe3e9d
JB
5824}
5825
6b95a207
KH
5826void intel_prepare_page_flip(struct drm_device *dev, int plane)
5827{
5828 drm_i915_private_t *dev_priv = dev->dev_private;
5829 struct intel_crtc *intel_crtc =
5830 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
5831 unsigned long flags;
5832
5833 spin_lock_irqsave(&dev->event_lock, flags);
de3f440f 5834 if (intel_crtc->unpin_work) {
4e5359cd
SF
5835 if ((++intel_crtc->unpin_work->pending) > 1)
5836 DRM_ERROR("Prepared flip multiple times\n");
de3f440f
JB
5837 } else {
5838 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
5839 }
6b95a207
KH
5840 spin_unlock_irqrestore(&dev->event_lock, flags);
5841}
5842
8c9f3aaf
JB
5843static int intel_gen2_queue_flip(struct drm_device *dev,
5844 struct drm_crtc *crtc,
5845 struct drm_framebuffer *fb,
5846 struct drm_i915_gem_object *obj)
5847{
5848 struct drm_i915_private *dev_priv = dev->dev_private;
5849 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5850 unsigned long offset;
5851 u32 flip_mask;
6d90c952 5852 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8c9f3aaf
JB
5853 int ret;
5854
6d90c952 5855 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8c9f3aaf 5856 if (ret)
83d4092b 5857 goto err;
8c9f3aaf
JB
5858
5859 /* Offset into the new buffer for cases of shared fbs between CRTCs */
01f2c773 5860 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
8c9f3aaf 5861
6d90c952 5862 ret = intel_ring_begin(ring, 6);
8c9f3aaf 5863 if (ret)
83d4092b 5864 goto err_unpin;
8c9f3aaf
JB
5865
5866 /* Can't queue multiple flips, so wait for the previous
5867 * one to finish before executing the next.
5868 */
5869 if (intel_crtc->plane)
5870 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5871 else
5872 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6d90c952
DV
5873 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
5874 intel_ring_emit(ring, MI_NOOP);
5875 intel_ring_emit(ring, MI_DISPLAY_FLIP |
5876 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5877 intel_ring_emit(ring, fb->pitches[0]);
5878 intel_ring_emit(ring, obj->gtt_offset + offset);
5879 intel_ring_emit(ring, 0); /* aux display base address, unused */
5880 intel_ring_advance(ring);
83d4092b
CW
5881 return 0;
5882
5883err_unpin:
5884 intel_unpin_fb_obj(obj);
5885err:
8c9f3aaf
JB
5886 return ret;
5887}
5888
5889static int intel_gen3_queue_flip(struct drm_device *dev,
5890 struct drm_crtc *crtc,
5891 struct drm_framebuffer *fb,
5892 struct drm_i915_gem_object *obj)
5893{
5894 struct drm_i915_private *dev_priv = dev->dev_private;
5895 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5896 unsigned long offset;
5897 u32 flip_mask;
6d90c952 5898 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8c9f3aaf
JB
5899 int ret;
5900
6d90c952 5901 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8c9f3aaf 5902 if (ret)
83d4092b 5903 goto err;
8c9f3aaf
JB
5904
5905 /* Offset into the new buffer for cases of shared fbs between CRTCs */
01f2c773 5906 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
8c9f3aaf 5907
6d90c952 5908 ret = intel_ring_begin(ring, 6);
8c9f3aaf 5909 if (ret)
83d4092b 5910 goto err_unpin;
8c9f3aaf
JB
5911
5912 if (intel_crtc->plane)
5913 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5914 else
5915 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6d90c952
DV
5916 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
5917 intel_ring_emit(ring, MI_NOOP);
5918 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
5919 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5920 intel_ring_emit(ring, fb->pitches[0]);
5921 intel_ring_emit(ring, obj->gtt_offset + offset);
5922 intel_ring_emit(ring, MI_NOOP);
5923
5924 intel_ring_advance(ring);
83d4092b
CW
5925 return 0;
5926
5927err_unpin:
5928 intel_unpin_fb_obj(obj);
5929err:
8c9f3aaf
JB
5930 return ret;
5931}
5932
5933static int intel_gen4_queue_flip(struct drm_device *dev,
5934 struct drm_crtc *crtc,
5935 struct drm_framebuffer *fb,
5936 struct drm_i915_gem_object *obj)
5937{
5938 struct drm_i915_private *dev_priv = dev->dev_private;
5939 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5940 uint32_t pf, pipesrc;
6d90c952 5941 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8c9f3aaf
JB
5942 int ret;
5943
6d90c952 5944 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8c9f3aaf 5945 if (ret)
83d4092b 5946 goto err;
8c9f3aaf 5947
6d90c952 5948 ret = intel_ring_begin(ring, 4);
8c9f3aaf 5949 if (ret)
83d4092b 5950 goto err_unpin;
8c9f3aaf
JB
5951
5952 /* i965+ uses the linear or tiled offsets from the
5953 * Display Registers (which do not change across a page-flip)
5954 * so we need only reprogram the base address.
5955 */
6d90c952
DV
5956 intel_ring_emit(ring, MI_DISPLAY_FLIP |
5957 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5958 intel_ring_emit(ring, fb->pitches[0]);
5959 intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode);
8c9f3aaf
JB
5960
5961 /* XXX Enabling the panel-fitter across page-flip is so far
5962 * untested on non-native modes, so ignore it for now.
5963 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
5964 */
5965 pf = 0;
5966 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6d90c952
DV
5967 intel_ring_emit(ring, pf | pipesrc);
5968 intel_ring_advance(ring);
83d4092b
CW
5969 return 0;
5970
5971err_unpin:
5972 intel_unpin_fb_obj(obj);
5973err:
8c9f3aaf
JB
5974 return ret;
5975}
5976
5977static int intel_gen6_queue_flip(struct drm_device *dev,
5978 struct drm_crtc *crtc,
5979 struct drm_framebuffer *fb,
5980 struct drm_i915_gem_object *obj)
5981{
5982 struct drm_i915_private *dev_priv = dev->dev_private;
5983 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6d90c952 5984 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8c9f3aaf
JB
5985 uint32_t pf, pipesrc;
5986 int ret;
5987
6d90c952 5988 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8c9f3aaf 5989 if (ret)
83d4092b 5990 goto err;
8c9f3aaf 5991
6d90c952 5992 ret = intel_ring_begin(ring, 4);
8c9f3aaf 5993 if (ret)
83d4092b 5994 goto err_unpin;
8c9f3aaf 5995
6d90c952
DV
5996 intel_ring_emit(ring, MI_DISPLAY_FLIP |
5997 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5998 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
5999 intel_ring_emit(ring, obj->gtt_offset);
8c9f3aaf 6000
dc257cf1
DV
6001 /* Contrary to the suggestions in the documentation,
6002 * "Enable Panel Fitter" does not seem to be required when page
6003 * flipping with a non-native mode, and worse causes a normal
6004 * modeset to fail.
6005 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
6006 */
6007 pf = 0;
8c9f3aaf 6008 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6d90c952
DV
6009 intel_ring_emit(ring, pf | pipesrc);
6010 intel_ring_advance(ring);
83d4092b
CW
6011 return 0;
6012
6013err_unpin:
6014 intel_unpin_fb_obj(obj);
6015err:
8c9f3aaf
JB
6016 return ret;
6017}
6018
7c9017e5
JB
6019/*
6020 * On gen7 we currently use the blit ring because (in early silicon at least)
6021 * the render ring doesn't give us interrpts for page flip completion, which
6022 * means clients will hang after the first flip is queued. Fortunately the
6023 * blit ring generates interrupts properly, so use it instead.
6024 */
6025static int intel_gen7_queue_flip(struct drm_device *dev,
6026 struct drm_crtc *crtc,
6027 struct drm_framebuffer *fb,
6028 struct drm_i915_gem_object *obj)
6029{
6030 struct drm_i915_private *dev_priv = dev->dev_private;
6031 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6032 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
6033 int ret;
6034
6035 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6036 if (ret)
83d4092b 6037 goto err;
7c9017e5
JB
6038
6039 ret = intel_ring_begin(ring, 4);
6040 if (ret)
83d4092b 6041 goto err_unpin;
7c9017e5
JB
6042
6043 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
01f2c773 6044 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7c9017e5
JB
6045 intel_ring_emit(ring, (obj->gtt_offset));
6046 intel_ring_emit(ring, (MI_NOOP));
6047 intel_ring_advance(ring);
83d4092b
CW
6048 return 0;
6049
6050err_unpin:
6051 intel_unpin_fb_obj(obj);
6052err:
7c9017e5
JB
6053 return ret;
6054}
6055
8c9f3aaf
JB
6056static int intel_default_queue_flip(struct drm_device *dev,
6057 struct drm_crtc *crtc,
6058 struct drm_framebuffer *fb,
6059 struct drm_i915_gem_object *obj)
6060{
6061 return -ENODEV;
6062}
6063
6b95a207
KH
6064static int intel_crtc_page_flip(struct drm_crtc *crtc,
6065 struct drm_framebuffer *fb,
6066 struct drm_pending_vblank_event *event)
6067{
6068 struct drm_device *dev = crtc->dev;
6069 struct drm_i915_private *dev_priv = dev->dev_private;
6070 struct intel_framebuffer *intel_fb;
05394f39 6071 struct drm_i915_gem_object *obj;
6b95a207
KH
6072 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6073 struct intel_unpin_work *work;
8c9f3aaf 6074 unsigned long flags;
52e68630 6075 int ret;
6b95a207
KH
6076
6077 work = kzalloc(sizeof *work, GFP_KERNEL);
6078 if (work == NULL)
6079 return -ENOMEM;
6080
6b95a207
KH
6081 work->event = event;
6082 work->dev = crtc->dev;
6083 intel_fb = to_intel_framebuffer(crtc->fb);
b1b87f6b 6084 work->old_fb_obj = intel_fb->obj;
6b95a207
KH
6085 INIT_WORK(&work->work, intel_unpin_work_fn);
6086
7317c75e
JB
6087 ret = drm_vblank_get(dev, intel_crtc->pipe);
6088 if (ret)
6089 goto free_work;
6090
6b95a207
KH
6091 /* We borrow the event spin lock for protecting unpin_work */
6092 spin_lock_irqsave(&dev->event_lock, flags);
6093 if (intel_crtc->unpin_work) {
6094 spin_unlock_irqrestore(&dev->event_lock, flags);
6095 kfree(work);
7317c75e 6096 drm_vblank_put(dev, intel_crtc->pipe);
468f0b44
CW
6097
6098 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
6b95a207
KH
6099 return -EBUSY;
6100 }
6101 intel_crtc->unpin_work = work;
6102 spin_unlock_irqrestore(&dev->event_lock, flags);
6103
6104 intel_fb = to_intel_framebuffer(fb);
6105 obj = intel_fb->obj;
6106
468f0b44 6107 mutex_lock(&dev->struct_mutex);
6b95a207 6108
75dfca80 6109 /* Reference the objects for the scheduled work. */
05394f39
CW
6110 drm_gem_object_reference(&work->old_fb_obj->base);
6111 drm_gem_object_reference(&obj->base);
6b95a207
KH
6112
6113 crtc->fb = fb;
96b099fd 6114
e1f99ce6 6115 work->pending_flip_obj = obj;
e1f99ce6 6116
4e5359cd
SF
6117 work->enable_stall_check = true;
6118
e1f99ce6
CW
6119 /* Block clients from rendering to the new back buffer until
6120 * the flip occurs and the object is no longer visible.
6121 */
05394f39 6122 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
e1f99ce6 6123
8c9f3aaf
JB
6124 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
6125 if (ret)
6126 goto cleanup_pending;
6b95a207 6127
7782de3b 6128 intel_disable_fbc(dev);
acb87dfb 6129 intel_mark_busy(dev, obj);
6b95a207
KH
6130 mutex_unlock(&dev->struct_mutex);
6131
e5510fac
JB
6132 trace_i915_flip_request(intel_crtc->plane, obj);
6133
6b95a207 6134 return 0;
96b099fd 6135
8c9f3aaf
JB
6136cleanup_pending:
6137 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
05394f39
CW
6138 drm_gem_object_unreference(&work->old_fb_obj->base);
6139 drm_gem_object_unreference(&obj->base);
96b099fd
CW
6140 mutex_unlock(&dev->struct_mutex);
6141
6142 spin_lock_irqsave(&dev->event_lock, flags);
6143 intel_crtc->unpin_work = NULL;
6144 spin_unlock_irqrestore(&dev->event_lock, flags);
6145
7317c75e
JB
6146 drm_vblank_put(dev, intel_crtc->pipe);
6147free_work:
96b099fd
CW
6148 kfree(work);
6149
6150 return ret;
6b95a207
KH
6151}
6152
47f1c6c9
CW
6153static void intel_sanitize_modesetting(struct drm_device *dev,
6154 int pipe, int plane)
6155{
6156 struct drm_i915_private *dev_priv = dev->dev_private;
6157 u32 reg, val;
6158
f47166d2
CW
6159 /* Clear any frame start delays used for debugging left by the BIOS */
6160 for_each_pipe(pipe) {
6161 reg = PIPECONF(pipe);
6162 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
6163 }
6164
47f1c6c9
CW
6165 if (HAS_PCH_SPLIT(dev))
6166 return;
6167
6168 /* Who knows what state these registers were left in by the BIOS or
6169 * grub?
6170 *
6171 * If we leave the registers in a conflicting state (e.g. with the
6172 * display plane reading from the other pipe than the one we intend
6173 * to use) then when we attempt to teardown the active mode, we will
6174 * not disable the pipes and planes in the correct order -- leaving
6175 * a plane reading from a disabled pipe and possibly leading to
6176 * undefined behaviour.
6177 */
6178
6179 reg = DSPCNTR(plane);
6180 val = I915_READ(reg);
6181
6182 if ((val & DISPLAY_PLANE_ENABLE) == 0)
6183 return;
6184 if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
6185 return;
6186
6187 /* This display plane is active and attached to the other CPU pipe. */
6188 pipe = !pipe;
6189
6190 /* Disable the plane and wait for it to stop reading from the pipe. */
b24e7179
JB
6191 intel_disable_plane(dev_priv, plane, pipe);
6192 intel_disable_pipe(dev_priv, pipe);
47f1c6c9 6193}
79e53945 6194
f6e5b160
CW
6195static void intel_crtc_reset(struct drm_crtc *crtc)
6196{
6197 struct drm_device *dev = crtc->dev;
6198 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6199
6200 /* Reset flags back to the 'unknown' status so that they
6201 * will be correctly set on the initial modeset.
6202 */
6203 intel_crtc->dpms_mode = -1;
6204
6205 /* We need to fix up any BIOS configuration that conflicts with
6206 * our expectations.
6207 */
6208 intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
6209}
6210
6211static struct drm_crtc_helper_funcs intel_helper_funcs = {
6212 .dpms = intel_crtc_dpms,
6213 .mode_fixup = intel_crtc_mode_fixup,
6214 .mode_set = intel_crtc_mode_set,
6215 .mode_set_base = intel_pipe_set_base,
6216 .mode_set_base_atomic = intel_pipe_set_base_atomic,
6217 .load_lut = intel_crtc_load_lut,
6218 .disable = intel_crtc_disable,
6219};
6220
6221static const struct drm_crtc_funcs intel_crtc_funcs = {
6222 .reset = intel_crtc_reset,
6223 .cursor_set = intel_crtc_cursor_set,
6224 .cursor_move = intel_crtc_cursor_move,
6225 .gamma_set = intel_crtc_gamma_set,
6226 .set_config = drm_crtc_helper_set_config,
6227 .destroy = intel_crtc_destroy,
6228 .page_flip = intel_crtc_page_flip,
6229};
6230
ee7b9f93
JB
6231static void intel_pch_pll_init(struct drm_device *dev)
6232{
6233 drm_i915_private_t *dev_priv = dev->dev_private;
6234 int i;
6235
6236 if (dev_priv->num_pch_pll == 0) {
6237 DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
6238 return;
6239 }
6240
6241 for (i = 0; i < dev_priv->num_pch_pll; i++) {
6242 dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
6243 dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
6244 dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
6245 }
6246}
6247
b358d0a6 6248static void intel_crtc_init(struct drm_device *dev, int pipe)
79e53945 6249{
22fd0fab 6250 drm_i915_private_t *dev_priv = dev->dev_private;
79e53945
JB
6251 struct intel_crtc *intel_crtc;
6252 int i;
6253
6254 intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
6255 if (intel_crtc == NULL)
6256 return;
6257
6258 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
6259
6260 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
79e53945
JB
6261 for (i = 0; i < 256; i++) {
6262 intel_crtc->lut_r[i] = i;
6263 intel_crtc->lut_g[i] = i;
6264 intel_crtc->lut_b[i] = i;
6265 }
6266
80824003
JB
6267 /* Swap pipes & planes for FBC on pre-965 */
6268 intel_crtc->pipe = pipe;
6269 intel_crtc->plane = pipe;
e2e767ab 6270 if (IS_MOBILE(dev) && IS_GEN3(dev)) {
28c97730 6271 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
e2e767ab 6272 intel_crtc->plane = !pipe;
80824003
JB
6273 }
6274
22fd0fab
JB
6275 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
6276 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
6277 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
6278 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
6279
5d1d0cc8 6280 intel_crtc_reset(&intel_crtc->base);
04dbff52 6281 intel_crtc->active = true; /* force the pipe off on setup_init_config */
5a354204 6282 intel_crtc->bpp = 24; /* default for pre-Ironlake */
7e7d76c3
JB
6283
6284 if (HAS_PCH_SPLIT(dev)) {
6285 intel_helper_funcs.prepare = ironlake_crtc_prepare;
6286 intel_helper_funcs.commit = ironlake_crtc_commit;
6287 } else {
6288 intel_helper_funcs.prepare = i9xx_crtc_prepare;
6289 intel_helper_funcs.commit = i9xx_crtc_commit;
6290 }
6291
79e53945
JB
6292 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
6293
652c393a
JB
6294 intel_crtc->busy = false;
6295
6296 setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
6297 (unsigned long)intel_crtc);
79e53945
JB
6298}
6299
08d7b3d1 6300int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
05394f39 6301 struct drm_file *file)
08d7b3d1 6302{
08d7b3d1 6303 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
c05422d5
DV
6304 struct drm_mode_object *drmmode_obj;
6305 struct intel_crtc *crtc;
08d7b3d1 6306
1cff8f6b
DV
6307 if (!drm_core_check_feature(dev, DRIVER_MODESET))
6308 return -ENODEV;
08d7b3d1 6309
c05422d5
DV
6310 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
6311 DRM_MODE_OBJECT_CRTC);
08d7b3d1 6312
c05422d5 6313 if (!drmmode_obj) {
08d7b3d1
CW
6314 DRM_ERROR("no such CRTC id\n");
6315 return -EINVAL;
6316 }
6317
c05422d5
DV
6318 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
6319 pipe_from_crtc_id->pipe = crtc->pipe;
08d7b3d1 6320
c05422d5 6321 return 0;
08d7b3d1
CW
6322}
6323
c5e4df33 6324static int intel_encoder_clones(struct drm_device *dev, int type_mask)
79e53945 6325{
4ef69c7a 6326 struct intel_encoder *encoder;
79e53945 6327 int index_mask = 0;
79e53945
JB
6328 int entry = 0;
6329
4ef69c7a
CW
6330 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6331 if (type_mask & encoder->clone_mask)
79e53945
JB
6332 index_mask |= (1 << entry);
6333 entry++;
6334 }
4ef69c7a 6335
79e53945
JB
6336 return index_mask;
6337}
6338
4d302442
CW
6339static bool has_edp_a(struct drm_device *dev)
6340{
6341 struct drm_i915_private *dev_priv = dev->dev_private;
6342
6343 if (!IS_MOBILE(dev))
6344 return false;
6345
6346 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
6347 return false;
6348
6349 if (IS_GEN5(dev) &&
6350 (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
6351 return false;
6352
6353 return true;
6354}
6355
79e53945
JB
6356static void intel_setup_outputs(struct drm_device *dev)
6357{
725e30ad 6358 struct drm_i915_private *dev_priv = dev->dev_private;
4ef69c7a 6359 struct intel_encoder *encoder;
cb0953d7 6360 bool dpd_is_edp = false;
f3cfcba6 6361 bool has_lvds;
79e53945 6362
f3cfcba6 6363 has_lvds = intel_lvds_init(dev);
c5d1b51d
CW
6364 if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
6365 /* disable the panel fitter on everything but LVDS */
6366 I915_WRITE(PFIT_CONTROL, 0);
6367 }
79e53945 6368
bad720ff 6369 if (HAS_PCH_SPLIT(dev)) {
cb0953d7 6370 dpd_is_edp = intel_dpd_is_edp(dev);
30ad48b7 6371
4d302442 6372 if (has_edp_a(dev))
32f9d658
ZW
6373 intel_dp_init(dev, DP_A);
6374
cb0953d7
AJ
6375 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6376 intel_dp_init(dev, PCH_DP_D);
6377 }
6378
6379 intel_crt_init(dev);
6380
6381 if (HAS_PCH_SPLIT(dev)) {
6382 int found;
6383
30ad48b7 6384 if (I915_READ(HDMIB) & PORT_DETECTED) {
461ed3ca 6385 /* PCH SDVOB multiplex with HDMIB */
eef4eacb 6386 found = intel_sdvo_init(dev, PCH_SDVOB, true);
30ad48b7
ZW
6387 if (!found)
6388 intel_hdmi_init(dev, HDMIB);
5eb08b69
ZW
6389 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
6390 intel_dp_init(dev, PCH_DP_B);
30ad48b7
ZW
6391 }
6392
6393 if (I915_READ(HDMIC) & PORT_DETECTED)
6394 intel_hdmi_init(dev, HDMIC);
6395
6396 if (I915_READ(HDMID) & PORT_DETECTED)
6397 intel_hdmi_init(dev, HDMID);
6398
5eb08b69
ZW
6399 if (I915_READ(PCH_DP_C) & DP_DETECTED)
6400 intel_dp_init(dev, PCH_DP_C);
6401
cb0953d7 6402 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
5eb08b69
ZW
6403 intel_dp_init(dev, PCH_DP_D);
6404
103a196f 6405 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
27185ae1 6406 bool found = false;
7d57382e 6407
725e30ad 6408 if (I915_READ(SDVOB) & SDVO_DETECTED) {
b01f2c3a 6409 DRM_DEBUG_KMS("probing SDVOB\n");
eef4eacb 6410 found = intel_sdvo_init(dev, SDVOB, true);
b01f2c3a
JB
6411 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
6412 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
725e30ad 6413 intel_hdmi_init(dev, SDVOB);
b01f2c3a 6414 }
27185ae1 6415
b01f2c3a
JB
6416 if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
6417 DRM_DEBUG_KMS("probing DP_B\n");
a4fc5ed6 6418 intel_dp_init(dev, DP_B);
b01f2c3a 6419 }
725e30ad 6420 }
13520b05
KH
6421
6422 /* Before G4X SDVOC doesn't have its own detect register */
13520b05 6423
b01f2c3a
JB
6424 if (I915_READ(SDVOB) & SDVO_DETECTED) {
6425 DRM_DEBUG_KMS("probing SDVOC\n");
eef4eacb 6426 found = intel_sdvo_init(dev, SDVOC, false);
b01f2c3a 6427 }
27185ae1
ML
6428
6429 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
6430
b01f2c3a
JB
6431 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
6432 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
725e30ad 6433 intel_hdmi_init(dev, SDVOC);
b01f2c3a
JB
6434 }
6435 if (SUPPORTS_INTEGRATED_DP(dev)) {
6436 DRM_DEBUG_KMS("probing DP_C\n");
a4fc5ed6 6437 intel_dp_init(dev, DP_C);
b01f2c3a 6438 }
725e30ad 6439 }
27185ae1 6440
b01f2c3a
JB
6441 if (SUPPORTS_INTEGRATED_DP(dev) &&
6442 (I915_READ(DP_D) & DP_DETECTED)) {
6443 DRM_DEBUG_KMS("probing DP_D\n");
a4fc5ed6 6444 intel_dp_init(dev, DP_D);
b01f2c3a 6445 }
bad720ff 6446 } else if (IS_GEN2(dev))
79e53945
JB
6447 intel_dvo_init(dev);
6448
103a196f 6449 if (SUPPORTS_TV(dev))
79e53945
JB
6450 intel_tv_init(dev);
6451
4ef69c7a
CW
6452 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6453 encoder->base.possible_crtcs = encoder->crtc_mask;
6454 encoder->base.possible_clones =
6455 intel_encoder_clones(dev, encoder->clone_mask);
79e53945 6456 }
47356eb6 6457
2c7111db
CW
6458 /* disable all the possible outputs/crtcs before entering KMS mode */
6459 drm_helper_disable_unused_functions(dev);
9fb526db
KP
6460
6461 if (HAS_PCH_SPLIT(dev))
6462 ironlake_init_pch_refclk(dev);
79e53945
JB
6463}
6464
6465static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
6466{
6467 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
79e53945
JB
6468
6469 drm_framebuffer_cleanup(fb);
05394f39 6470 drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
79e53945
JB
6471
6472 kfree(intel_fb);
6473}
6474
6475static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
05394f39 6476 struct drm_file *file,
79e53945
JB
6477 unsigned int *handle)
6478{
6479 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
05394f39 6480 struct drm_i915_gem_object *obj = intel_fb->obj;
79e53945 6481
05394f39 6482 return drm_gem_handle_create(file, &obj->base, handle);
79e53945
JB
6483}
6484
6485static const struct drm_framebuffer_funcs intel_fb_funcs = {
6486 .destroy = intel_user_framebuffer_destroy,
6487 .create_handle = intel_user_framebuffer_create_handle,
6488};
6489
38651674
DA
6490int intel_framebuffer_init(struct drm_device *dev,
6491 struct intel_framebuffer *intel_fb,
308e5bcb 6492 struct drm_mode_fb_cmd2 *mode_cmd,
05394f39 6493 struct drm_i915_gem_object *obj)
79e53945 6494{
79e53945
JB
6495 int ret;
6496
05394f39 6497 if (obj->tiling_mode == I915_TILING_Y)
57cd6508
CW
6498 return -EINVAL;
6499
308e5bcb 6500 if (mode_cmd->pitches[0] & 63)
57cd6508
CW
6501 return -EINVAL;
6502
308e5bcb 6503 switch (mode_cmd->pixel_format) {
04b3924d
VS
6504 case DRM_FORMAT_RGB332:
6505 case DRM_FORMAT_RGB565:
6506 case DRM_FORMAT_XRGB8888:
b250da79 6507 case DRM_FORMAT_XBGR8888:
04b3924d
VS
6508 case DRM_FORMAT_ARGB8888:
6509 case DRM_FORMAT_XRGB2101010:
6510 case DRM_FORMAT_ARGB2101010:
308e5bcb 6511 /* RGB formats are common across chipsets */
b5626747 6512 break;
04b3924d
VS
6513 case DRM_FORMAT_YUYV:
6514 case DRM_FORMAT_UYVY:
6515 case DRM_FORMAT_YVYU:
6516 case DRM_FORMAT_VYUY:
57cd6508
CW
6517 break;
6518 default:
aca25848
ED
6519 DRM_DEBUG_KMS("unsupported pixel format %u\n",
6520 mode_cmd->pixel_format);
57cd6508
CW
6521 return -EINVAL;
6522 }
6523
79e53945
JB
6524 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
6525 if (ret) {
6526 DRM_ERROR("framebuffer init failed %d\n", ret);
6527 return ret;
6528 }
6529
6530 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
79e53945 6531 intel_fb->obj = obj;
79e53945
JB
6532 return 0;
6533}
6534
79e53945
JB
6535static struct drm_framebuffer *
6536intel_user_framebuffer_create(struct drm_device *dev,
6537 struct drm_file *filp,
308e5bcb 6538 struct drm_mode_fb_cmd2 *mode_cmd)
79e53945 6539{
05394f39 6540 struct drm_i915_gem_object *obj;
79e53945 6541
308e5bcb
JB
6542 obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
6543 mode_cmd->handles[0]));
c8725226 6544 if (&obj->base == NULL)
cce13ff7 6545 return ERR_PTR(-ENOENT);
79e53945 6546
d2dff872 6547 return intel_framebuffer_create(dev, mode_cmd, obj);
79e53945
JB
6548}
6549
79e53945 6550static const struct drm_mode_config_funcs intel_mode_funcs = {
79e53945 6551 .fb_create = intel_user_framebuffer_create,
eb1f8e4f 6552 .output_poll_changed = intel_fb_output_poll_changed,
79e53945
JB
6553};
6554
e70236a8
JB
6555/* Set up chip specific display functions */
6556static void intel_init_display(struct drm_device *dev)
6557{
6558 struct drm_i915_private *dev_priv = dev->dev_private;
6559
6560 /* We always want a DPMS function */
f564048e 6561 if (HAS_PCH_SPLIT(dev)) {
f2b115e6 6562 dev_priv->display.dpms = ironlake_crtc_dpms;
f564048e 6563 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
ee7b9f93 6564 dev_priv->display.off = ironlake_crtc_off;
17638cd6 6565 dev_priv->display.update_plane = ironlake_update_plane;
f564048e 6566 } else {
e70236a8 6567 dev_priv->display.dpms = i9xx_crtc_dpms;
f564048e 6568 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
ee7b9f93 6569 dev_priv->display.off = i9xx_crtc_off;
17638cd6 6570 dev_priv->display.update_plane = i9xx_update_plane;
f564048e 6571 }
e70236a8 6572
e70236a8 6573 /* Returns the core display clock speed */
25eb05fc
JB
6574 if (IS_VALLEYVIEW(dev))
6575 dev_priv->display.get_display_clock_speed =
6576 valleyview_get_display_clock_speed;
6577 else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
e70236a8
JB
6578 dev_priv->display.get_display_clock_speed =
6579 i945_get_display_clock_speed;
6580 else if (IS_I915G(dev))
6581 dev_priv->display.get_display_clock_speed =
6582 i915_get_display_clock_speed;
f2b115e6 6583 else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
e70236a8
JB
6584 dev_priv->display.get_display_clock_speed =
6585 i9xx_misc_get_display_clock_speed;
6586 else if (IS_I915GM(dev))
6587 dev_priv->display.get_display_clock_speed =
6588 i915gm_get_display_clock_speed;
6589 else if (IS_I865G(dev))
6590 dev_priv->display.get_display_clock_speed =
6591 i865_get_display_clock_speed;
f0f8a9ce 6592 else if (IS_I85X(dev))
e70236a8
JB
6593 dev_priv->display.get_display_clock_speed =
6594 i855_get_display_clock_speed;
6595 else /* 852, 830 */
6596 dev_priv->display.get_display_clock_speed =
6597 i830_get_display_clock_speed;
6598
7f8a8569 6599 if (HAS_PCH_SPLIT(dev)) {
f00a3ddf 6600 if (IS_GEN5(dev)) {
674cf967 6601 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
e0dac65e 6602 dev_priv->display.write_eld = ironlake_write_eld;
1398261a 6603 } else if (IS_GEN6(dev)) {
674cf967 6604 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
e0dac65e 6605 dev_priv->display.write_eld = ironlake_write_eld;
357555c0
JB
6606 } else if (IS_IVYBRIDGE(dev)) {
6607 /* FIXME: detect B0+ stepping and use auto training */
6608 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
e0dac65e 6609 dev_priv->display.write_eld = ironlake_write_eld;
7f8a8569
ZW
6610 } else
6611 dev_priv->display.update_wm = NULL;
ceb04246 6612 } else if (IS_VALLEYVIEW(dev)) {
575155a9
JB
6613 dev_priv->display.force_wake_get = vlv_force_wake_get;
6614 dev_priv->display.force_wake_put = vlv_force_wake_put;
6067aaea 6615 } else if (IS_G4X(dev)) {
e0dac65e 6616 dev_priv->display.write_eld = g4x_write_eld;
e70236a8 6617 }
8c9f3aaf
JB
6618
6619 /* Default just returns -ENODEV to indicate unsupported */
6620 dev_priv->display.queue_flip = intel_default_queue_flip;
6621
6622 switch (INTEL_INFO(dev)->gen) {
6623 case 2:
6624 dev_priv->display.queue_flip = intel_gen2_queue_flip;
6625 break;
6626
6627 case 3:
6628 dev_priv->display.queue_flip = intel_gen3_queue_flip;
6629 break;
6630
6631 case 4:
6632 case 5:
6633 dev_priv->display.queue_flip = intel_gen4_queue_flip;
6634 break;
6635
6636 case 6:
6637 dev_priv->display.queue_flip = intel_gen6_queue_flip;
6638 break;
7c9017e5
JB
6639 case 7:
6640 dev_priv->display.queue_flip = intel_gen7_queue_flip;
6641 break;
8c9f3aaf 6642 }
e70236a8
JB
6643}
6644
b690e96c
JB
6645/*
6646 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
6647 * resume, or other times. This quirk makes sure that's the case for
6648 * affected systems.
6649 */
0206e353 6650static void quirk_pipea_force(struct drm_device *dev)
b690e96c
JB
6651{
6652 struct drm_i915_private *dev_priv = dev->dev_private;
6653
6654 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
bc0daf48 6655 DRM_INFO("applying pipe a force quirk\n");
b690e96c
JB
6656}
6657
435793df
KP
6658/*
6659 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
6660 */
6661static void quirk_ssc_force_disable(struct drm_device *dev)
6662{
6663 struct drm_i915_private *dev_priv = dev->dev_private;
6664 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
bc0daf48 6665 DRM_INFO("applying lvds SSC disable quirk\n");
435793df
KP
6666}
6667
4dca20ef 6668/*
5a15ab5b
CE
6669 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
6670 * brightness value
4dca20ef
CE
6671 */
6672static void quirk_invert_brightness(struct drm_device *dev)
6673{
6674 struct drm_i915_private *dev_priv = dev->dev_private;
6675 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
bc0daf48 6676 DRM_INFO("applying inverted panel brightness quirk\n");
435793df
KP
6677}
6678
b690e96c
JB
6679struct intel_quirk {
6680 int device;
6681 int subsystem_vendor;
6682 int subsystem_device;
6683 void (*hook)(struct drm_device *dev);
6684};
6685
c43b5634 6686static struct intel_quirk intel_quirks[] = {
b690e96c 6687 /* HP Mini needs pipe A force quirk (LP: #322104) */
0206e353 6688 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
b690e96c
JB
6689
6690 /* Thinkpad R31 needs pipe A force quirk */
6691 { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
6692 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
6693 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
6694
6695 /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
6696 { 0x3577, 0x1014, 0x0513, quirk_pipea_force },
6697 /* ThinkPad X40 needs pipe A force quirk */
6698
6699 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
6700 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
6701
6702 /* 855 & before need to leave pipe A & dpll A up */
6703 { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
6704 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
435793df
KP
6705
6706 /* Lenovo U160 cannot use SSC on LVDS */
6707 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
070d329a
MAS
6708
6709 /* Sony Vaio Y cannot use SSC on LVDS */
6710 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
5a15ab5b
CE
6711
6712 /* Acer Aspire 5734Z must invert backlight brightness */
6713 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
b690e96c
JB
6714};
6715
6716static void intel_init_quirks(struct drm_device *dev)
6717{
6718 struct pci_dev *d = dev->pdev;
6719 int i;
6720
6721 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
6722 struct intel_quirk *q = &intel_quirks[i];
6723
6724 if (d->device == q->device &&
6725 (d->subsystem_vendor == q->subsystem_vendor ||
6726 q->subsystem_vendor == PCI_ANY_ID) &&
6727 (d->subsystem_device == q->subsystem_device ||
6728 q->subsystem_device == PCI_ANY_ID))
6729 q->hook(dev);
6730 }
6731}
6732
9cce37f4
JB
6733/* Disable the VGA plane that we never use */
6734static void i915_disable_vga(struct drm_device *dev)
6735{
6736 struct drm_i915_private *dev_priv = dev->dev_private;
6737 u8 sr1;
6738 u32 vga_reg;
6739
6740 if (HAS_PCH_SPLIT(dev))
6741 vga_reg = CPU_VGACNTRL;
6742 else
6743 vga_reg = VGACNTRL;
6744
6745 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
3fdcf431 6746 outb(SR01, VGA_SR_INDEX);
9cce37f4
JB
6747 sr1 = inb(VGA_SR_DATA);
6748 outb(sr1 | 1<<5, VGA_SR_DATA);
6749 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
6750 udelay(300);
6751
6752 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
6753 POSTING_READ(vga_reg);
6754}
6755
f82cfb6b
JB
6756static void ivb_pch_pwm_override(struct drm_device *dev)
6757{
6758 struct drm_i915_private *dev_priv = dev->dev_private;
6759
6760 /*
6761 * IVB has CPU eDP backlight regs too, set things up to let the
6762 * PCH regs control the backlight
6763 */
6764 I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE);
6765 I915_WRITE(BLC_PWM_CPU_CTL, 0);
6766 I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE | (1<<30));
6767}
6768
f817586c
DV
6769void intel_modeset_init_hw(struct drm_device *dev)
6770{
6771 struct drm_i915_private *dev_priv = dev->dev_private;
6772
6773 intel_init_clock_gating(dev);
6774
6775 if (IS_IRONLAKE_M(dev)) {
6776 ironlake_enable_drps(dev);
1833b134 6777 ironlake_enable_rc6(dev);
f817586c
DV
6778 intel_init_emon(dev);
6779 }
6780
b6834bd6 6781 if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
f817586c
DV
6782 gen6_enable_rps(dev_priv);
6783 gen6_update_ring_freq(dev_priv);
6784 }
f82cfb6b
JB
6785
6786 if (IS_IVYBRIDGE(dev))
6787 ivb_pch_pwm_override(dev);
f817586c
DV
6788}
6789
79e53945
JB
6790void intel_modeset_init(struct drm_device *dev)
6791{
652c393a 6792 struct drm_i915_private *dev_priv = dev->dev_private;
b840d907 6793 int i, ret;
79e53945
JB
6794
6795 drm_mode_config_init(dev);
6796
6797 dev->mode_config.min_width = 0;
6798 dev->mode_config.min_height = 0;
6799
019d96cb
DA
6800 dev->mode_config.preferred_depth = 24;
6801 dev->mode_config.prefer_shadow = 1;
6802
79e53945
JB
6803 dev->mode_config.funcs = (void *)&intel_mode_funcs;
6804
b690e96c
JB
6805 intel_init_quirks(dev);
6806
1fa61106
ED
6807 intel_init_pm(dev);
6808
45244b87
ED
6809 intel_prepare_ddi(dev);
6810
e70236a8
JB
6811 intel_init_display(dev);
6812
a6c45cf0
CW
6813 if (IS_GEN2(dev)) {
6814 dev->mode_config.max_width = 2048;
6815 dev->mode_config.max_height = 2048;
6816 } else if (IS_GEN3(dev)) {
5e4d6fa7
KP
6817 dev->mode_config.max_width = 4096;
6818 dev->mode_config.max_height = 4096;
79e53945 6819 } else {
a6c45cf0
CW
6820 dev->mode_config.max_width = 8192;
6821 dev->mode_config.max_height = 8192;
79e53945 6822 }
35c3047a 6823 dev->mode_config.fb_base = dev->agp->base;
79e53945 6824
28c97730 6825 DRM_DEBUG_KMS("%d display pipe%s available.\n",
a3524f1b 6826 dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
79e53945 6827
a3524f1b 6828 for (i = 0; i < dev_priv->num_pipe; i++) {
79e53945 6829 intel_crtc_init(dev, i);
00c2064b
JB
6830 ret = intel_plane_init(dev, i);
6831 if (ret)
6832 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
79e53945
JB
6833 }
6834
ee7b9f93
JB
6835 intel_pch_pll_init(dev);
6836
9cce37f4
JB
6837 /* Just disable it once at startup */
6838 i915_disable_vga(dev);
79e53945 6839 intel_setup_outputs(dev);
652c393a 6840
652c393a
JB
6841 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
6842 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
6843 (unsigned long)dev);
2c7111db
CW
6844}
6845
6846void intel_modeset_gem_init(struct drm_device *dev)
6847{
1833b134 6848 intel_modeset_init_hw(dev);
02e792fb
DV
6849
6850 intel_setup_overlay(dev);
79e53945
JB
6851}
6852
6853void intel_modeset_cleanup(struct drm_device *dev)
6854{
652c393a
JB
6855 struct drm_i915_private *dev_priv = dev->dev_private;
6856 struct drm_crtc *crtc;
6857 struct intel_crtc *intel_crtc;
6858
f87ea761 6859 drm_kms_helper_poll_fini(dev);
652c393a
JB
6860 mutex_lock(&dev->struct_mutex);
6861
723bfd70
JB
6862 intel_unregister_dsm_handler();
6863
6864
652c393a
JB
6865 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6866 /* Skip inactive CRTCs */
6867 if (!crtc->fb)
6868 continue;
6869
6870 intel_crtc = to_intel_crtc(crtc);
3dec0095 6871 intel_increase_pllclock(crtc);
652c393a
JB
6872 }
6873
973d04f9 6874 intel_disable_fbc(dev);
e70236a8 6875
f97108d1
JB
6876 if (IS_IRONLAKE_M(dev))
6877 ironlake_disable_drps(dev);
b6834bd6 6878 if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev))
3b8d8d91 6879 gen6_disable_rps(dev);
f97108d1 6880
d5bb081b
JB
6881 if (IS_IRONLAKE_M(dev))
6882 ironlake_disable_rc6(dev);
0cdab21f 6883
57f350b6
JB
6884 if (IS_VALLEYVIEW(dev))
6885 vlv_init_dpio(dev);
6886
69341a5e
KH
6887 mutex_unlock(&dev->struct_mutex);
6888
6c0d9350
DV
6889 /* Disable the irq before mode object teardown, for the irq might
6890 * enqueue unpin/hotplug work. */
6891 drm_irq_uninstall(dev);
6892 cancel_work_sync(&dev_priv->hotplug_work);
6fdd4d98 6893 cancel_work_sync(&dev_priv->rps_work);
6c0d9350 6894
1630fe75
CW
6895 /* flush any delayed tasks or pending work */
6896 flush_scheduled_work();
6897
3dec0095
DV
6898 /* Shut off idle work before the crtcs get freed. */
6899 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6900 intel_crtc = to_intel_crtc(crtc);
6901 del_timer_sync(&intel_crtc->idle_timer);
6902 }
6903 del_timer_sync(&dev_priv->idle_timer);
6904 cancel_work_sync(&dev_priv->idle_work);
6905
79e53945
JB
6906 drm_mode_config_cleanup(dev);
6907}
6908
f1c79df3
ZW
6909/*
6910 * Return which encoder is currently attached for connector.
6911 */
df0e9248 6912struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
79e53945 6913{
df0e9248
CW
6914 return &intel_attached_encoder(connector)->base;
6915}
f1c79df3 6916
df0e9248
CW
6917void intel_connector_attach_encoder(struct intel_connector *connector,
6918 struct intel_encoder *encoder)
6919{
6920 connector->encoder = encoder;
6921 drm_mode_connector_attach_encoder(&connector->base,
6922 &encoder->base);
79e53945 6923}
28d52043
DA
6924
6925/*
6926 * set vga decode state - true == enable VGA decode
6927 */
6928int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
6929{
6930 struct drm_i915_private *dev_priv = dev->dev_private;
6931 u16 gmch_ctrl;
6932
6933 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
6934 if (state)
6935 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
6936 else
6937 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
6938 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
6939 return 0;
6940}
c4a1d9e4
CW
6941
6942#ifdef CONFIG_DEBUG_FS
6943#include <linux/seq_file.h>
6944
6945struct intel_display_error_state {
6946 struct intel_cursor_error_state {
6947 u32 control;
6948 u32 position;
6949 u32 base;
6950 u32 size;
6951 } cursor[2];
6952
6953 struct intel_pipe_error_state {
6954 u32 conf;
6955 u32 source;
6956
6957 u32 htotal;
6958 u32 hblank;
6959 u32 hsync;
6960 u32 vtotal;
6961 u32 vblank;
6962 u32 vsync;
6963 } pipe[2];
6964
6965 struct intel_plane_error_state {
6966 u32 control;
6967 u32 stride;
6968 u32 size;
6969 u32 pos;
6970 u32 addr;
6971 u32 surface;
6972 u32 tile_offset;
6973 } plane[2];
6974};
6975
6976struct intel_display_error_state *
6977intel_display_capture_error_state(struct drm_device *dev)
6978{
0206e353 6979 drm_i915_private_t *dev_priv = dev->dev_private;
c4a1d9e4
CW
6980 struct intel_display_error_state *error;
6981 int i;
6982
6983 error = kmalloc(sizeof(*error), GFP_ATOMIC);
6984 if (error == NULL)
6985 return NULL;
6986
6987 for (i = 0; i < 2; i++) {
6988 error->cursor[i].control = I915_READ(CURCNTR(i));
6989 error->cursor[i].position = I915_READ(CURPOS(i));
6990 error->cursor[i].base = I915_READ(CURBASE(i));
6991
6992 error->plane[i].control = I915_READ(DSPCNTR(i));
6993 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
6994 error->plane[i].size = I915_READ(DSPSIZE(i));
0206e353 6995 error->plane[i].pos = I915_READ(DSPPOS(i));
c4a1d9e4
CW
6996 error->plane[i].addr = I915_READ(DSPADDR(i));
6997 if (INTEL_INFO(dev)->gen >= 4) {
6998 error->plane[i].surface = I915_READ(DSPSURF(i));
6999 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
7000 }
7001
7002 error->pipe[i].conf = I915_READ(PIPECONF(i));
7003 error->pipe[i].source = I915_READ(PIPESRC(i));
7004 error->pipe[i].htotal = I915_READ(HTOTAL(i));
7005 error->pipe[i].hblank = I915_READ(HBLANK(i));
7006 error->pipe[i].hsync = I915_READ(HSYNC(i));
7007 error->pipe[i].vtotal = I915_READ(VTOTAL(i));
7008 error->pipe[i].vblank = I915_READ(VBLANK(i));
7009 error->pipe[i].vsync = I915_READ(VSYNC(i));
7010 }
7011
7012 return error;
7013}
7014
7015void
7016intel_display_print_error_state(struct seq_file *m,
7017 struct drm_device *dev,
7018 struct intel_display_error_state *error)
7019{
7020 int i;
7021
7022 for (i = 0; i < 2; i++) {
7023 seq_printf(m, "Pipe [%d]:\n", i);
7024 seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
7025 seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
7026 seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
7027 seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
7028 seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
7029 seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
7030 seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
7031 seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
7032
7033 seq_printf(m, "Plane [%d]:\n", i);
7034 seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
7035 seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
7036 seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
7037 seq_printf(m, " POS: %08x\n", error->plane[i].pos);
7038 seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
7039 if (INTEL_INFO(dev)->gen >= 4) {
7040 seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
7041 seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
7042 }
7043
7044 seq_printf(m, "Cursor [%d]:\n", i);
7045 seq_printf(m, " CNTR: %08x\n", error->cursor[i].control);
7046 seq_printf(m, " POS: %08x\n", error->cursor[i].position);
7047 seq_printf(m, " BASE: %08x\n", error->cursor[i].base);
7048 }
7049}
7050#endif
This page took 1.270462 seconds and 5 git commands to generate.