drm/i915: simplify intel_crtc_driving_pch
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_display.c
CommitLineData
79e53945
JB
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
618563e3 27#include <linux/dmi.h>
c1c7af60
JB
28#include <linux/module.h>
29#include <linux/input.h>
79e53945 30#include <linux/i2c.h>
7662c8bd 31#include <linux/kernel.h>
5a0e3ad6 32#include <linux/slab.h>
9cce37f4 33#include <linux/vgaarb.h>
e0dac65e 34#include <drm/drm_edid.h>
760285e7 35#include <drm/drmP.h>
79e53945 36#include "intel_drv.h"
760285e7 37#include <drm/i915_drm.h>
79e53945 38#include "i915_drv.h"
e5510fac 39#include "i915_trace.h"
760285e7
DH
40#include <drm/drm_dp_helper.h>
41#include <drm/drm_crtc_helper.h>
c0f372b3 42#include <linux/dma_remapping.h>
79e53945 43
32f9d658
ZW
44#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
45
0206e353 46bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
3dec0095 47static void intel_increase_pllclock(struct drm_crtc *crtc);
6b383a7f 48static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
79e53945
JB
49
50typedef struct {
0206e353
AJ
51 /* given values */
52 int n;
53 int m1, m2;
54 int p1, p2;
55 /* derived values */
56 int dot;
57 int vco;
58 int m;
59 int p;
79e53945
JB
60} intel_clock_t;
61
62typedef struct {
0206e353 63 int min, max;
79e53945
JB
64} intel_range_t;
65
66typedef struct {
0206e353
AJ
67 int dot_limit;
68 int p2_slow, p2_fast;
79e53945
JB
69} intel_p2_t;
70
71#define INTEL_P2_NUM 2
d4906093
ML
72typedef struct intel_limit intel_limit_t;
73struct intel_limit {
0206e353
AJ
74 intel_range_t dot, vco, n, m, m1, m2, p, p1;
75 intel_p2_t p2;
76 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
cec2f356 77 int, int, intel_clock_t *, intel_clock_t *);
d4906093 78};
79e53945 79
2377b741
JB
80/* FDI */
81#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
82
d2acd215
DV
83int
84intel_pch_rawclk(struct drm_device *dev)
85{
86 struct drm_i915_private *dev_priv = dev->dev_private;
87
88 WARN_ON(!HAS_PCH_SPLIT(dev));
89
90 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
91}
92
d4906093
ML
93static bool
94intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
95 int target, int refclk, intel_clock_t *match_clock,
96 intel_clock_t *best_clock);
d4906093
ML
97static bool
98intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
99 int target, int refclk, intel_clock_t *match_clock,
100 intel_clock_t *best_clock);
79e53945 101
a4fc5ed6
KP
102static bool
103intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
cec2f356
SP
104 int target, int refclk, intel_clock_t *match_clock,
105 intel_clock_t *best_clock);
5eb08b69 106static bool
f2b115e6 107intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
cec2f356
SP
108 int target, int refclk, intel_clock_t *match_clock,
109 intel_clock_t *best_clock);
a4fc5ed6 110
a0c4da24
JB
111static bool
112intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
113 int target, int refclk, intel_clock_t *match_clock,
114 intel_clock_t *best_clock);
115
021357ac
CW
116static inline u32 /* units of 100MHz */
117intel_fdi_link_freq(struct drm_device *dev)
118{
8b99e68c
CW
119 if (IS_GEN5(dev)) {
120 struct drm_i915_private *dev_priv = dev->dev_private;
121 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
122 } else
123 return 27;
021357ac
CW
124}
125
e4b36699 126static const intel_limit_t intel_limits_i8xx_dvo = {
0206e353
AJ
127 .dot = { .min = 25000, .max = 350000 },
128 .vco = { .min = 930000, .max = 1400000 },
129 .n = { .min = 3, .max = 16 },
130 .m = { .min = 96, .max = 140 },
131 .m1 = { .min = 18, .max = 26 },
132 .m2 = { .min = 6, .max = 16 },
133 .p = { .min = 4, .max = 128 },
134 .p1 = { .min = 2, .max = 33 },
273e27ca
EA
135 .p2 = { .dot_limit = 165000,
136 .p2_slow = 4, .p2_fast = 2 },
d4906093 137 .find_pll = intel_find_best_PLL,
e4b36699
KP
138};
139
140static const intel_limit_t intel_limits_i8xx_lvds = {
0206e353
AJ
141 .dot = { .min = 25000, .max = 350000 },
142 .vco = { .min = 930000, .max = 1400000 },
143 .n = { .min = 3, .max = 16 },
144 .m = { .min = 96, .max = 140 },
145 .m1 = { .min = 18, .max = 26 },
146 .m2 = { .min = 6, .max = 16 },
147 .p = { .min = 4, .max = 128 },
148 .p1 = { .min = 1, .max = 6 },
273e27ca
EA
149 .p2 = { .dot_limit = 165000,
150 .p2_slow = 14, .p2_fast = 7 },
d4906093 151 .find_pll = intel_find_best_PLL,
e4b36699 152};
273e27ca 153
e4b36699 154static const intel_limit_t intel_limits_i9xx_sdvo = {
0206e353
AJ
155 .dot = { .min = 20000, .max = 400000 },
156 .vco = { .min = 1400000, .max = 2800000 },
157 .n = { .min = 1, .max = 6 },
158 .m = { .min = 70, .max = 120 },
159 .m1 = { .min = 10, .max = 22 },
160 .m2 = { .min = 5, .max = 9 },
161 .p = { .min = 5, .max = 80 },
162 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
163 .p2 = { .dot_limit = 200000,
164 .p2_slow = 10, .p2_fast = 5 },
d4906093 165 .find_pll = intel_find_best_PLL,
e4b36699
KP
166};
167
168static const intel_limit_t intel_limits_i9xx_lvds = {
0206e353
AJ
169 .dot = { .min = 20000, .max = 400000 },
170 .vco = { .min = 1400000, .max = 2800000 },
171 .n = { .min = 1, .max = 6 },
172 .m = { .min = 70, .max = 120 },
173 .m1 = { .min = 10, .max = 22 },
174 .m2 = { .min = 5, .max = 9 },
175 .p = { .min = 7, .max = 98 },
176 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
177 .p2 = { .dot_limit = 112000,
178 .p2_slow = 14, .p2_fast = 7 },
d4906093 179 .find_pll = intel_find_best_PLL,
e4b36699
KP
180};
181
273e27ca 182
e4b36699 183static const intel_limit_t intel_limits_g4x_sdvo = {
273e27ca
EA
184 .dot = { .min = 25000, .max = 270000 },
185 .vco = { .min = 1750000, .max = 3500000},
186 .n = { .min = 1, .max = 4 },
187 .m = { .min = 104, .max = 138 },
188 .m1 = { .min = 17, .max = 23 },
189 .m2 = { .min = 5, .max = 11 },
190 .p = { .min = 10, .max = 30 },
191 .p1 = { .min = 1, .max = 3},
192 .p2 = { .dot_limit = 270000,
193 .p2_slow = 10,
194 .p2_fast = 10
044c7c41 195 },
d4906093 196 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
197};
198
199static const intel_limit_t intel_limits_g4x_hdmi = {
273e27ca
EA
200 .dot = { .min = 22000, .max = 400000 },
201 .vco = { .min = 1750000, .max = 3500000},
202 .n = { .min = 1, .max = 4 },
203 .m = { .min = 104, .max = 138 },
204 .m1 = { .min = 16, .max = 23 },
205 .m2 = { .min = 5, .max = 11 },
206 .p = { .min = 5, .max = 80 },
207 .p1 = { .min = 1, .max = 8},
208 .p2 = { .dot_limit = 165000,
209 .p2_slow = 10, .p2_fast = 5 },
d4906093 210 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
211};
212
213static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
273e27ca
EA
214 .dot = { .min = 20000, .max = 115000 },
215 .vco = { .min = 1750000, .max = 3500000 },
216 .n = { .min = 1, .max = 3 },
217 .m = { .min = 104, .max = 138 },
218 .m1 = { .min = 17, .max = 23 },
219 .m2 = { .min = 5, .max = 11 },
220 .p = { .min = 28, .max = 112 },
221 .p1 = { .min = 2, .max = 8 },
222 .p2 = { .dot_limit = 0,
223 .p2_slow = 14, .p2_fast = 14
044c7c41 224 },
d4906093 225 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
226};
227
228static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
273e27ca
EA
229 .dot = { .min = 80000, .max = 224000 },
230 .vco = { .min = 1750000, .max = 3500000 },
231 .n = { .min = 1, .max = 3 },
232 .m = { .min = 104, .max = 138 },
233 .m1 = { .min = 17, .max = 23 },
234 .m2 = { .min = 5, .max = 11 },
235 .p = { .min = 14, .max = 42 },
236 .p1 = { .min = 2, .max = 6 },
237 .p2 = { .dot_limit = 0,
238 .p2_slow = 7, .p2_fast = 7
044c7c41 239 },
d4906093 240 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
241};
242
243static const intel_limit_t intel_limits_g4x_display_port = {
0206e353
AJ
244 .dot = { .min = 161670, .max = 227000 },
245 .vco = { .min = 1750000, .max = 3500000},
246 .n = { .min = 1, .max = 2 },
247 .m = { .min = 97, .max = 108 },
248 .m1 = { .min = 0x10, .max = 0x12 },
249 .m2 = { .min = 0x05, .max = 0x06 },
250 .p = { .min = 10, .max = 20 },
251 .p1 = { .min = 1, .max = 2},
252 .p2 = { .dot_limit = 0,
273e27ca 253 .p2_slow = 10, .p2_fast = 10 },
0206e353 254 .find_pll = intel_find_pll_g4x_dp,
e4b36699
KP
255};
256
f2b115e6 257static const intel_limit_t intel_limits_pineview_sdvo = {
0206e353
AJ
258 .dot = { .min = 20000, .max = 400000},
259 .vco = { .min = 1700000, .max = 3500000 },
273e27ca 260 /* Pineview's Ncounter is a ring counter */
0206e353
AJ
261 .n = { .min = 3, .max = 6 },
262 .m = { .min = 2, .max = 256 },
273e27ca 263 /* Pineview only has one combined m divider, which we treat as m2. */
0206e353
AJ
264 .m1 = { .min = 0, .max = 0 },
265 .m2 = { .min = 0, .max = 254 },
266 .p = { .min = 5, .max = 80 },
267 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
268 .p2 = { .dot_limit = 200000,
269 .p2_slow = 10, .p2_fast = 5 },
6115707b 270 .find_pll = intel_find_best_PLL,
e4b36699
KP
271};
272
f2b115e6 273static const intel_limit_t intel_limits_pineview_lvds = {
0206e353
AJ
274 .dot = { .min = 20000, .max = 400000 },
275 .vco = { .min = 1700000, .max = 3500000 },
276 .n = { .min = 3, .max = 6 },
277 .m = { .min = 2, .max = 256 },
278 .m1 = { .min = 0, .max = 0 },
279 .m2 = { .min = 0, .max = 254 },
280 .p = { .min = 7, .max = 112 },
281 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
282 .p2 = { .dot_limit = 112000,
283 .p2_slow = 14, .p2_fast = 14 },
6115707b 284 .find_pll = intel_find_best_PLL,
e4b36699
KP
285};
286
273e27ca
EA
287/* Ironlake / Sandybridge
288 *
289 * We calculate clock using (register_value + 2) for N/M1/M2, so here
290 * the range value for them is (actual_value - 2).
291 */
b91ad0ec 292static const intel_limit_t intel_limits_ironlake_dac = {
273e27ca
EA
293 .dot = { .min = 25000, .max = 350000 },
294 .vco = { .min = 1760000, .max = 3510000 },
295 .n = { .min = 1, .max = 5 },
296 .m = { .min = 79, .max = 127 },
297 .m1 = { .min = 12, .max = 22 },
298 .m2 = { .min = 5, .max = 9 },
299 .p = { .min = 5, .max = 80 },
300 .p1 = { .min = 1, .max = 8 },
301 .p2 = { .dot_limit = 225000,
302 .p2_slow = 10, .p2_fast = 5 },
4547668a 303 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
304};
305
b91ad0ec 306static const intel_limit_t intel_limits_ironlake_single_lvds = {
273e27ca
EA
307 .dot = { .min = 25000, .max = 350000 },
308 .vco = { .min = 1760000, .max = 3510000 },
309 .n = { .min = 1, .max = 3 },
310 .m = { .min = 79, .max = 118 },
311 .m1 = { .min = 12, .max = 22 },
312 .m2 = { .min = 5, .max = 9 },
313 .p = { .min = 28, .max = 112 },
314 .p1 = { .min = 2, .max = 8 },
315 .p2 = { .dot_limit = 225000,
316 .p2_slow = 14, .p2_fast = 14 },
b91ad0ec
ZW
317 .find_pll = intel_g4x_find_best_PLL,
318};
319
320static const intel_limit_t intel_limits_ironlake_dual_lvds = {
273e27ca
EA
321 .dot = { .min = 25000, .max = 350000 },
322 .vco = { .min = 1760000, .max = 3510000 },
323 .n = { .min = 1, .max = 3 },
324 .m = { .min = 79, .max = 127 },
325 .m1 = { .min = 12, .max = 22 },
326 .m2 = { .min = 5, .max = 9 },
327 .p = { .min = 14, .max = 56 },
328 .p1 = { .min = 2, .max = 8 },
329 .p2 = { .dot_limit = 225000,
330 .p2_slow = 7, .p2_fast = 7 },
b91ad0ec
ZW
331 .find_pll = intel_g4x_find_best_PLL,
332};
333
273e27ca 334/* LVDS 100mhz refclk limits. */
b91ad0ec 335static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
273e27ca
EA
336 .dot = { .min = 25000, .max = 350000 },
337 .vco = { .min = 1760000, .max = 3510000 },
338 .n = { .min = 1, .max = 2 },
339 .m = { .min = 79, .max = 126 },
340 .m1 = { .min = 12, .max = 22 },
341 .m2 = { .min = 5, .max = 9 },
342 .p = { .min = 28, .max = 112 },
0206e353 343 .p1 = { .min = 2, .max = 8 },
273e27ca
EA
344 .p2 = { .dot_limit = 225000,
345 .p2_slow = 14, .p2_fast = 14 },
b91ad0ec
ZW
346 .find_pll = intel_g4x_find_best_PLL,
347};
348
349static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
273e27ca
EA
350 .dot = { .min = 25000, .max = 350000 },
351 .vco = { .min = 1760000, .max = 3510000 },
352 .n = { .min = 1, .max = 3 },
353 .m = { .min = 79, .max = 126 },
354 .m1 = { .min = 12, .max = 22 },
355 .m2 = { .min = 5, .max = 9 },
356 .p = { .min = 14, .max = 42 },
0206e353 357 .p1 = { .min = 2, .max = 6 },
273e27ca
EA
358 .p2 = { .dot_limit = 225000,
359 .p2_slow = 7, .p2_fast = 7 },
4547668a
ZY
360 .find_pll = intel_g4x_find_best_PLL,
361};
362
363static const intel_limit_t intel_limits_ironlake_display_port = {
0206e353
AJ
364 .dot = { .min = 25000, .max = 350000 },
365 .vco = { .min = 1760000, .max = 3510000},
366 .n = { .min = 1, .max = 2 },
367 .m = { .min = 81, .max = 90 },
368 .m1 = { .min = 12, .max = 22 },
369 .m2 = { .min = 5, .max = 9 },
370 .p = { .min = 10, .max = 20 },
371 .p1 = { .min = 1, .max = 2},
372 .p2 = { .dot_limit = 0,
273e27ca 373 .p2_slow = 10, .p2_fast = 10 },
0206e353 374 .find_pll = intel_find_pll_ironlake_dp,
79e53945
JB
375};
376
a0c4da24
JB
377static const intel_limit_t intel_limits_vlv_dac = {
378 .dot = { .min = 25000, .max = 270000 },
379 .vco = { .min = 4000000, .max = 6000000 },
380 .n = { .min = 1, .max = 7 },
381 .m = { .min = 22, .max = 450 }, /* guess */
382 .m1 = { .min = 2, .max = 3 },
383 .m2 = { .min = 11, .max = 156 },
384 .p = { .min = 10, .max = 30 },
385 .p1 = { .min = 2, .max = 3 },
386 .p2 = { .dot_limit = 270000,
387 .p2_slow = 2, .p2_fast = 20 },
388 .find_pll = intel_vlv_find_best_pll,
389};
390
391static const intel_limit_t intel_limits_vlv_hdmi = {
392 .dot = { .min = 20000, .max = 165000 },
17dc9257 393 .vco = { .min = 4000000, .max = 5994000},
a0c4da24
JB
394 .n = { .min = 1, .max = 7 },
395 .m = { .min = 60, .max = 300 }, /* guess */
396 .m1 = { .min = 2, .max = 3 },
397 .m2 = { .min = 11, .max = 156 },
398 .p = { .min = 10, .max = 30 },
399 .p1 = { .min = 2, .max = 3 },
400 .p2 = { .dot_limit = 270000,
401 .p2_slow = 2, .p2_fast = 20 },
402 .find_pll = intel_vlv_find_best_pll,
403};
404
405static const intel_limit_t intel_limits_vlv_dp = {
74a4dd2e
VP
406 .dot = { .min = 25000, .max = 270000 },
407 .vco = { .min = 4000000, .max = 6000000 },
a0c4da24 408 .n = { .min = 1, .max = 7 },
74a4dd2e 409 .m = { .min = 22, .max = 450 },
a0c4da24
JB
410 .m1 = { .min = 2, .max = 3 },
411 .m2 = { .min = 11, .max = 156 },
412 .p = { .min = 10, .max = 30 },
413 .p1 = { .min = 2, .max = 3 },
414 .p2 = { .dot_limit = 270000,
415 .p2_slow = 2, .p2_fast = 20 },
416 .find_pll = intel_vlv_find_best_pll,
417};
418
57f350b6
JB
419u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
420{
421 unsigned long flags;
422 u32 val = 0;
423
424 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
425 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
426 DRM_ERROR("DPIO idle wait timed out\n");
427 goto out_unlock;
428 }
429
430 I915_WRITE(DPIO_REG, reg);
431 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
432 DPIO_BYTE);
433 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
434 DRM_ERROR("DPIO read wait timed out\n");
435 goto out_unlock;
436 }
437 val = I915_READ(DPIO_DATA);
438
439out_unlock:
440 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
441 return val;
442}
443
a0c4da24
JB
444static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
445 u32 val)
446{
447 unsigned long flags;
448
449 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
450 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
451 DRM_ERROR("DPIO idle wait timed out\n");
452 goto out_unlock;
453 }
454
455 I915_WRITE(DPIO_DATA, val);
456 I915_WRITE(DPIO_REG, reg);
457 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
458 DPIO_BYTE);
459 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
460 DRM_ERROR("DPIO write wait timed out\n");
461
462out_unlock:
463 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
464}
465
57f350b6
JB
466static void vlv_init_dpio(struct drm_device *dev)
467{
468 struct drm_i915_private *dev_priv = dev->dev_private;
469
470 /* Reset the DPIO config */
471 I915_WRITE(DPIO_CTL, 0);
472 POSTING_READ(DPIO_CTL);
473 I915_WRITE(DPIO_CTL, 1);
474 POSTING_READ(DPIO_CTL);
475}
476
618563e3
DV
477static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
478{
479 DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
480 return 1;
481}
482
483static const struct dmi_system_id intel_dual_link_lvds[] = {
484 {
485 .callback = intel_dual_link_lvds_callback,
486 .ident = "Apple MacBook Pro (Core i5/i7 Series)",
487 .matches = {
488 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
489 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
490 },
491 },
492 { } /* terminating entry */
493};
494
b0354385
TI
495static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
496 unsigned int reg)
497{
498 unsigned int val;
499
121d527a
TI
500 /* use the module option value if specified */
501 if (i915_lvds_channel_mode > 0)
502 return i915_lvds_channel_mode == 2;
503
618563e3
DV
504 if (dmi_check_system(intel_dual_link_lvds))
505 return true;
506
b0354385
TI
507 if (dev_priv->lvds_val)
508 val = dev_priv->lvds_val;
509 else {
510 /* BIOS should set the proper LVDS register value at boot, but
511 * in reality, it doesn't set the value when the lid is closed;
512 * we need to check "the value to be set" in VBT when LVDS
513 * register is uninitialized.
514 */
515 val = I915_READ(reg);
14d94a3d 516 if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
b0354385
TI
517 val = dev_priv->bios_lvds_val;
518 dev_priv->lvds_val = val;
519 }
520 return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
521}
522
1b894b59
CW
523static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
524 int refclk)
2c07245f 525{
b91ad0ec
ZW
526 struct drm_device *dev = crtc->dev;
527 struct drm_i915_private *dev_priv = dev->dev_private;
2c07245f 528 const intel_limit_t *limit;
b91ad0ec
ZW
529
530 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
b0354385 531 if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
b91ad0ec 532 /* LVDS dual channel */
1b894b59 533 if (refclk == 100000)
b91ad0ec
ZW
534 limit = &intel_limits_ironlake_dual_lvds_100m;
535 else
536 limit = &intel_limits_ironlake_dual_lvds;
537 } else {
1b894b59 538 if (refclk == 100000)
b91ad0ec
ZW
539 limit = &intel_limits_ironlake_single_lvds_100m;
540 else
541 limit = &intel_limits_ironlake_single_lvds;
542 }
543 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
4547668a
ZY
544 HAS_eDP)
545 limit = &intel_limits_ironlake_display_port;
2c07245f 546 else
b91ad0ec 547 limit = &intel_limits_ironlake_dac;
2c07245f
ZW
548
549 return limit;
550}
551
044c7c41
ML
552static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
553{
554 struct drm_device *dev = crtc->dev;
555 struct drm_i915_private *dev_priv = dev->dev_private;
556 const intel_limit_t *limit;
557
558 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
b0354385 559 if (is_dual_link_lvds(dev_priv, LVDS))
044c7c41 560 /* LVDS with dual channel */
e4b36699 561 limit = &intel_limits_g4x_dual_channel_lvds;
044c7c41
ML
562 else
563 /* LVDS with dual channel */
e4b36699 564 limit = &intel_limits_g4x_single_channel_lvds;
044c7c41
ML
565 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
566 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
e4b36699 567 limit = &intel_limits_g4x_hdmi;
044c7c41 568 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
e4b36699 569 limit = &intel_limits_g4x_sdvo;
0206e353 570 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
e4b36699 571 limit = &intel_limits_g4x_display_port;
044c7c41 572 } else /* The option is for other outputs */
e4b36699 573 limit = &intel_limits_i9xx_sdvo;
044c7c41
ML
574
575 return limit;
576}
577
1b894b59 578static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
79e53945
JB
579{
580 struct drm_device *dev = crtc->dev;
581 const intel_limit_t *limit;
582
bad720ff 583 if (HAS_PCH_SPLIT(dev))
1b894b59 584 limit = intel_ironlake_limit(crtc, refclk);
2c07245f 585 else if (IS_G4X(dev)) {
044c7c41 586 limit = intel_g4x_limit(crtc);
f2b115e6 587 } else if (IS_PINEVIEW(dev)) {
2177832f 588 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
f2b115e6 589 limit = &intel_limits_pineview_lvds;
2177832f 590 else
f2b115e6 591 limit = &intel_limits_pineview_sdvo;
a0c4da24
JB
592 } else if (IS_VALLEYVIEW(dev)) {
593 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
594 limit = &intel_limits_vlv_dac;
595 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
596 limit = &intel_limits_vlv_hdmi;
597 else
598 limit = &intel_limits_vlv_dp;
a6c45cf0
CW
599 } else if (!IS_GEN2(dev)) {
600 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
601 limit = &intel_limits_i9xx_lvds;
602 else
603 limit = &intel_limits_i9xx_sdvo;
79e53945
JB
604 } else {
605 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
e4b36699 606 limit = &intel_limits_i8xx_lvds;
79e53945 607 else
e4b36699 608 limit = &intel_limits_i8xx_dvo;
79e53945
JB
609 }
610 return limit;
611}
612
f2b115e6
AJ
613/* m1 is reserved as 0 in Pineview, n is a ring counter */
614static void pineview_clock(int refclk, intel_clock_t *clock)
79e53945 615{
2177832f
SL
616 clock->m = clock->m2 + 2;
617 clock->p = clock->p1 * clock->p2;
618 clock->vco = refclk * clock->m / clock->n;
619 clock->dot = clock->vco / clock->p;
620}
621
622static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
623{
f2b115e6
AJ
624 if (IS_PINEVIEW(dev)) {
625 pineview_clock(refclk, clock);
2177832f
SL
626 return;
627 }
79e53945
JB
628 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
629 clock->p = clock->p1 * clock->p2;
630 clock->vco = refclk * clock->m / (clock->n + 2);
631 clock->dot = clock->vco / clock->p;
632}
633
79e53945
JB
634/**
635 * Returns whether any output on the specified pipe is of the specified type
636 */
4ef69c7a 637bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
79e53945 638{
4ef69c7a 639 struct drm_device *dev = crtc->dev;
4ef69c7a
CW
640 struct intel_encoder *encoder;
641
6c2b7c12
DV
642 for_each_encoder_on_crtc(dev, crtc, encoder)
643 if (encoder->type == type)
4ef69c7a
CW
644 return true;
645
646 return false;
79e53945
JB
647}
648
7c04d1d9 649#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
79e53945
JB
650/**
651 * Returns whether the given set of divisors are valid for a given refclk with
652 * the given connectors.
653 */
654
1b894b59
CW
655static bool intel_PLL_is_valid(struct drm_device *dev,
656 const intel_limit_t *limit,
657 const intel_clock_t *clock)
79e53945 658{
79e53945 659 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
0206e353 660 INTELPllInvalid("p1 out of range\n");
79e53945 661 if (clock->p < limit->p.min || limit->p.max < clock->p)
0206e353 662 INTELPllInvalid("p out of range\n");
79e53945 663 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
0206e353 664 INTELPllInvalid("m2 out of range\n");
79e53945 665 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
0206e353 666 INTELPllInvalid("m1 out of range\n");
f2b115e6 667 if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
0206e353 668 INTELPllInvalid("m1 <= m2\n");
79e53945 669 if (clock->m < limit->m.min || limit->m.max < clock->m)
0206e353 670 INTELPllInvalid("m out of range\n");
79e53945 671 if (clock->n < limit->n.min || limit->n.max < clock->n)
0206e353 672 INTELPllInvalid("n out of range\n");
79e53945 673 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
0206e353 674 INTELPllInvalid("vco out of range\n");
79e53945
JB
675 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
676 * connector, etc., rather than just a single range.
677 */
678 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
0206e353 679 INTELPllInvalid("dot out of range\n");
79e53945
JB
680
681 return true;
682}
683
d4906093
ML
684static bool
685intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
686 int target, int refclk, intel_clock_t *match_clock,
687 intel_clock_t *best_clock)
d4906093 688
79e53945
JB
689{
690 struct drm_device *dev = crtc->dev;
691 struct drm_i915_private *dev_priv = dev->dev_private;
692 intel_clock_t clock;
79e53945
JB
693 int err = target;
694
bc5e5718 695 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
832cc28d 696 (I915_READ(LVDS)) != 0) {
79e53945
JB
697 /*
698 * For LVDS, if the panel is on, just rely on its current
699 * settings for dual-channel. We haven't figured out how to
700 * reliably set up different single/dual channel state, if we
701 * even can.
702 */
b0354385 703 if (is_dual_link_lvds(dev_priv, LVDS))
79e53945
JB
704 clock.p2 = limit->p2.p2_fast;
705 else
706 clock.p2 = limit->p2.p2_slow;
707 } else {
708 if (target < limit->p2.dot_limit)
709 clock.p2 = limit->p2.p2_slow;
710 else
711 clock.p2 = limit->p2.p2_fast;
712 }
713
0206e353 714 memset(best_clock, 0, sizeof(*best_clock));
79e53945 715
42158660
ZY
716 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
717 clock.m1++) {
718 for (clock.m2 = limit->m2.min;
719 clock.m2 <= limit->m2.max; clock.m2++) {
f2b115e6
AJ
720 /* m1 is always 0 in Pineview */
721 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
42158660
ZY
722 break;
723 for (clock.n = limit->n.min;
724 clock.n <= limit->n.max; clock.n++) {
725 for (clock.p1 = limit->p1.min;
726 clock.p1 <= limit->p1.max; clock.p1++) {
79e53945
JB
727 int this_err;
728
2177832f 729 intel_clock(dev, refclk, &clock);
1b894b59
CW
730 if (!intel_PLL_is_valid(dev, limit,
731 &clock))
79e53945 732 continue;
cec2f356
SP
733 if (match_clock &&
734 clock.p != match_clock->p)
735 continue;
79e53945
JB
736
737 this_err = abs(clock.dot - target);
738 if (this_err < err) {
739 *best_clock = clock;
740 err = this_err;
741 }
742 }
743 }
744 }
745 }
746
747 return (err != target);
748}
749
d4906093
ML
750static bool
751intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
752 int target, int refclk, intel_clock_t *match_clock,
753 intel_clock_t *best_clock)
d4906093
ML
754{
755 struct drm_device *dev = crtc->dev;
756 struct drm_i915_private *dev_priv = dev->dev_private;
757 intel_clock_t clock;
758 int max_n;
759 bool found;
6ba770dc
AJ
760 /* approximately equals target * 0.00585 */
761 int err_most = (target >> 8) + (target >> 9);
d4906093
ML
762 found = false;
763
764 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
4547668a
ZY
765 int lvds_reg;
766
c619eed4 767 if (HAS_PCH_SPLIT(dev))
4547668a
ZY
768 lvds_reg = PCH_LVDS;
769 else
770 lvds_reg = LVDS;
771 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
d4906093
ML
772 LVDS_CLKB_POWER_UP)
773 clock.p2 = limit->p2.p2_fast;
774 else
775 clock.p2 = limit->p2.p2_slow;
776 } else {
777 if (target < limit->p2.dot_limit)
778 clock.p2 = limit->p2.p2_slow;
779 else
780 clock.p2 = limit->p2.p2_fast;
781 }
782
783 memset(best_clock, 0, sizeof(*best_clock));
784 max_n = limit->n.max;
f77f13e2 785 /* based on hardware requirement, prefer smaller n to precision */
d4906093 786 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
f77f13e2 787 /* based on hardware requirement, prefere larger m1,m2 */
d4906093
ML
788 for (clock.m1 = limit->m1.max;
789 clock.m1 >= limit->m1.min; clock.m1--) {
790 for (clock.m2 = limit->m2.max;
791 clock.m2 >= limit->m2.min; clock.m2--) {
792 for (clock.p1 = limit->p1.max;
793 clock.p1 >= limit->p1.min; clock.p1--) {
794 int this_err;
795
2177832f 796 intel_clock(dev, refclk, &clock);
1b894b59
CW
797 if (!intel_PLL_is_valid(dev, limit,
798 &clock))
d4906093 799 continue;
cec2f356
SP
800 if (match_clock &&
801 clock.p != match_clock->p)
802 continue;
1b894b59
CW
803
804 this_err = abs(clock.dot - target);
d4906093
ML
805 if (this_err < err_most) {
806 *best_clock = clock;
807 err_most = this_err;
808 max_n = clock.n;
809 found = true;
810 }
811 }
812 }
813 }
814 }
2c07245f
ZW
815 return found;
816}
817
5eb08b69 818static bool
f2b115e6 819intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
820 int target, int refclk, intel_clock_t *match_clock,
821 intel_clock_t *best_clock)
5eb08b69
ZW
822{
823 struct drm_device *dev = crtc->dev;
824 intel_clock_t clock;
4547668a 825
5eb08b69
ZW
826 if (target < 200000) {
827 clock.n = 1;
828 clock.p1 = 2;
829 clock.p2 = 10;
830 clock.m1 = 12;
831 clock.m2 = 9;
832 } else {
833 clock.n = 2;
834 clock.p1 = 1;
835 clock.p2 = 10;
836 clock.m1 = 14;
837 clock.m2 = 8;
838 }
839 intel_clock(dev, refclk, &clock);
840 memcpy(best_clock, &clock, sizeof(intel_clock_t));
841 return true;
842}
843
a4fc5ed6
KP
844/* DisplayPort has only two frequencies, 162MHz and 270MHz */
845static bool
846intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
847 int target, int refclk, intel_clock_t *match_clock,
848 intel_clock_t *best_clock)
a4fc5ed6 849{
5eddb70b
CW
850 intel_clock_t clock;
851 if (target < 200000) {
852 clock.p1 = 2;
853 clock.p2 = 10;
854 clock.n = 2;
855 clock.m1 = 23;
856 clock.m2 = 8;
857 } else {
858 clock.p1 = 1;
859 clock.p2 = 10;
860 clock.n = 1;
861 clock.m1 = 14;
862 clock.m2 = 2;
863 }
864 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
865 clock.p = (clock.p1 * clock.p2);
866 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
867 clock.vco = 0;
868 memcpy(best_clock, &clock, sizeof(intel_clock_t));
869 return true;
a4fc5ed6 870}
a0c4da24
JB
871static bool
872intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
873 int target, int refclk, intel_clock_t *match_clock,
874 intel_clock_t *best_clock)
875{
876 u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
877 u32 m, n, fastclk;
878 u32 updrate, minupdate, fracbits, p;
879 unsigned long bestppm, ppm, absppm;
880 int dotclk, flag;
881
af447bd3 882 flag = 0;
a0c4da24
JB
883 dotclk = target * 1000;
884 bestppm = 1000000;
885 ppm = absppm = 0;
886 fastclk = dotclk / (2*100);
887 updrate = 0;
888 minupdate = 19200;
889 fracbits = 1;
890 n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
891 bestm1 = bestm2 = bestp1 = bestp2 = 0;
892
893 /* based on hardware requirement, prefer smaller n to precision */
894 for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
895 updrate = refclk / n;
896 for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
897 for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
898 if (p2 > 10)
899 p2 = p2 - 1;
900 p = p1 * p2;
901 /* based on hardware requirement, prefer bigger m1,m2 values */
902 for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
903 m2 = (((2*(fastclk * p * n / m1 )) +
904 refclk) / (2*refclk));
905 m = m1 * m2;
906 vco = updrate * m;
907 if (vco >= limit->vco.min && vco < limit->vco.max) {
908 ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
909 absppm = (ppm > 0) ? ppm : (-ppm);
910 if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
911 bestppm = 0;
912 flag = 1;
913 }
914 if (absppm < bestppm - 10) {
915 bestppm = absppm;
916 flag = 1;
917 }
918 if (flag) {
919 bestn = n;
920 bestm1 = m1;
921 bestm2 = m2;
922 bestp1 = p1;
923 bestp2 = p2;
924 flag = 0;
925 }
926 }
927 }
928 }
929 }
930 }
931 best_clock->n = bestn;
932 best_clock->m1 = bestm1;
933 best_clock->m2 = bestm2;
934 best_clock->p1 = bestp1;
935 best_clock->p2 = bestp2;
936
937 return true;
938}
a4fc5ed6 939
a928d536
PZ
940static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
941{
942 struct drm_i915_private *dev_priv = dev->dev_private;
943 u32 frame, frame_reg = PIPEFRAME(pipe);
944
945 frame = I915_READ(frame_reg);
946
947 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
948 DRM_DEBUG_KMS("vblank wait timed out\n");
949}
950
9d0498a2
JB
951/**
952 * intel_wait_for_vblank - wait for vblank on a given pipe
953 * @dev: drm device
954 * @pipe: pipe to wait for
955 *
956 * Wait for vblank to occur on a given pipe. Needed for various bits of
957 * mode setting code.
958 */
959void intel_wait_for_vblank(struct drm_device *dev, int pipe)
79e53945 960{
9d0498a2 961 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 962 int pipestat_reg = PIPESTAT(pipe);
9d0498a2 963
a928d536
PZ
964 if (INTEL_INFO(dev)->gen >= 5) {
965 ironlake_wait_for_vblank(dev, pipe);
966 return;
967 }
968
300387c0
CW
969 /* Clear existing vblank status. Note this will clear any other
970 * sticky status fields as well.
971 *
972 * This races with i915_driver_irq_handler() with the result
973 * that either function could miss a vblank event. Here it is not
974 * fatal, as we will either wait upon the next vblank interrupt or
975 * timeout. Generally speaking intel_wait_for_vblank() is only
976 * called during modeset at which time the GPU should be idle and
977 * should *not* be performing page flips and thus not waiting on
978 * vblanks...
979 * Currently, the result of us stealing a vblank from the irq
980 * handler is that a single frame will be skipped during swapbuffers.
981 */
982 I915_WRITE(pipestat_reg,
983 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
984
9d0498a2 985 /* Wait for vblank interrupt bit to set */
481b6af3
CW
986 if (wait_for(I915_READ(pipestat_reg) &
987 PIPE_VBLANK_INTERRUPT_STATUS,
988 50))
9d0498a2
JB
989 DRM_DEBUG_KMS("vblank wait timed out\n");
990}
991
ab7ad7f6
KP
992/*
993 * intel_wait_for_pipe_off - wait for pipe to turn off
9d0498a2
JB
994 * @dev: drm device
995 * @pipe: pipe to wait for
996 *
997 * After disabling a pipe, we can't wait for vblank in the usual way,
998 * spinning on the vblank interrupt status bit, since we won't actually
999 * see an interrupt when the pipe is disabled.
1000 *
ab7ad7f6
KP
1001 * On Gen4 and above:
1002 * wait for the pipe register state bit to turn off
1003 *
1004 * Otherwise:
1005 * wait for the display line value to settle (it usually
1006 * ends up stopping at the start of the next frame).
58e10eb9 1007 *
9d0498a2 1008 */
58e10eb9 1009void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
9d0498a2
JB
1010{
1011 struct drm_i915_private *dev_priv = dev->dev_private;
ab7ad7f6
KP
1012
1013 if (INTEL_INFO(dev)->gen >= 4) {
58e10eb9 1014 int reg = PIPECONF(pipe);
ab7ad7f6
KP
1015
1016 /* Wait for the Pipe State to go off */
58e10eb9
CW
1017 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1018 100))
284637d9 1019 WARN(1, "pipe_off wait timed out\n");
ab7ad7f6 1020 } else {
837ba00f 1021 u32 last_line, line_mask;
58e10eb9 1022 int reg = PIPEDSL(pipe);
ab7ad7f6
KP
1023 unsigned long timeout = jiffies + msecs_to_jiffies(100);
1024
837ba00f
PZ
1025 if (IS_GEN2(dev))
1026 line_mask = DSL_LINEMASK_GEN2;
1027 else
1028 line_mask = DSL_LINEMASK_GEN3;
1029
ab7ad7f6
KP
1030 /* Wait for the display line to settle */
1031 do {
837ba00f 1032 last_line = I915_READ(reg) & line_mask;
ab7ad7f6 1033 mdelay(5);
837ba00f 1034 } while (((I915_READ(reg) & line_mask) != last_line) &&
ab7ad7f6
KP
1035 time_after(timeout, jiffies));
1036 if (time_after(jiffies, timeout))
284637d9 1037 WARN(1, "pipe_off wait timed out\n");
ab7ad7f6 1038 }
79e53945
JB
1039}
1040
b24e7179
JB
1041static const char *state_string(bool enabled)
1042{
1043 return enabled ? "on" : "off";
1044}
1045
1046/* Only for pre-ILK configs */
1047static void assert_pll(struct drm_i915_private *dev_priv,
1048 enum pipe pipe, bool state)
1049{
1050 int reg;
1051 u32 val;
1052 bool cur_state;
1053
1054 reg = DPLL(pipe);
1055 val = I915_READ(reg);
1056 cur_state = !!(val & DPLL_VCO_ENABLE);
1057 WARN(cur_state != state,
1058 "PLL state assertion failure (expected %s, current %s)\n",
1059 state_string(state), state_string(cur_state));
1060}
1061#define assert_pll_enabled(d, p) assert_pll(d, p, true)
1062#define assert_pll_disabled(d, p) assert_pll(d, p, false)
1063
040484af
JB
1064/* For ILK+ */
1065static void assert_pch_pll(struct drm_i915_private *dev_priv,
92b27b08
CW
1066 struct intel_pch_pll *pll,
1067 struct intel_crtc *crtc,
1068 bool state)
040484af 1069{
040484af
JB
1070 u32 val;
1071 bool cur_state;
1072
9d82aa17
ED
1073 if (HAS_PCH_LPT(dev_priv->dev)) {
1074 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
1075 return;
1076 }
1077
92b27b08
CW
1078 if (WARN (!pll,
1079 "asserting PCH PLL %s with no PLL\n", state_string(state)))
ee7b9f93 1080 return;
ee7b9f93 1081
92b27b08
CW
1082 val = I915_READ(pll->pll_reg);
1083 cur_state = !!(val & DPLL_VCO_ENABLE);
1084 WARN(cur_state != state,
1085 "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
1086 pll->pll_reg, state_string(state), state_string(cur_state), val);
1087
1088 /* Make sure the selected PLL is correctly attached to the transcoder */
1089 if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
d3ccbe86
JB
1090 u32 pch_dpll;
1091
1092 pch_dpll = I915_READ(PCH_DPLL_SEL);
92b27b08
CW
1093 cur_state = pll->pll_reg == _PCH_DPLL_B;
1094 if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
1095 "PLL[%d] not attached to this transcoder %d: %08x\n",
1096 cur_state, crtc->pipe, pch_dpll)) {
1097 cur_state = !!(val >> (4*crtc->pipe + 3));
1098 WARN(cur_state != state,
1099 "PLL[%d] not %s on this transcoder %d: %08x\n",
1100 pll->pll_reg == _PCH_DPLL_B,
1101 state_string(state),
1102 crtc->pipe,
1103 val);
1104 }
d3ccbe86 1105 }
040484af 1106}
92b27b08
CW
1107#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
1108#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
040484af
JB
1109
1110static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1111 enum pipe pipe, bool state)
1112{
1113 int reg;
1114 u32 val;
1115 bool cur_state;
1116
bf507ef7
ED
1117 if (IS_HASWELL(dev_priv->dev)) {
1118 /* On Haswell, DDI is used instead of FDI_TX_CTL */
1119 reg = DDI_FUNC_CTL(pipe);
1120 val = I915_READ(reg);
1121 cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
1122 } else {
1123 reg = FDI_TX_CTL(pipe);
1124 val = I915_READ(reg);
1125 cur_state = !!(val & FDI_TX_ENABLE);
1126 }
040484af
JB
1127 WARN(cur_state != state,
1128 "FDI TX state assertion failure (expected %s, current %s)\n",
1129 state_string(state), state_string(cur_state));
1130}
1131#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1132#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1133
1134static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1135 enum pipe pipe, bool state)
1136{
1137 int reg;
1138 u32 val;
1139 bool cur_state;
1140
59c859d6
ED
1141 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1142 DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
1143 return;
1144 } else {
1145 reg = FDI_RX_CTL(pipe);
1146 val = I915_READ(reg);
1147 cur_state = !!(val & FDI_RX_ENABLE);
1148 }
040484af
JB
1149 WARN(cur_state != state,
1150 "FDI RX state assertion failure (expected %s, current %s)\n",
1151 state_string(state), state_string(cur_state));
1152}
1153#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1154#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1155
1156static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1157 enum pipe pipe)
1158{
1159 int reg;
1160 u32 val;
1161
1162 /* ILK FDI PLL is always enabled */
1163 if (dev_priv->info->gen == 5)
1164 return;
1165
bf507ef7
ED
1166 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1167 if (IS_HASWELL(dev_priv->dev))
1168 return;
1169
040484af
JB
1170 reg = FDI_TX_CTL(pipe);
1171 val = I915_READ(reg);
1172 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1173}
1174
1175static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
1176 enum pipe pipe)
1177{
1178 int reg;
1179 u32 val;
1180
59c859d6
ED
1181 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1182 DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
1183 return;
1184 }
040484af
JB
1185 reg = FDI_RX_CTL(pipe);
1186 val = I915_READ(reg);
1187 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
1188}
1189
ea0760cf
JB
1190static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1191 enum pipe pipe)
1192{
1193 int pp_reg, lvds_reg;
1194 u32 val;
1195 enum pipe panel_pipe = PIPE_A;
0de3b485 1196 bool locked = true;
ea0760cf
JB
1197
1198 if (HAS_PCH_SPLIT(dev_priv->dev)) {
1199 pp_reg = PCH_PP_CONTROL;
1200 lvds_reg = PCH_LVDS;
1201 } else {
1202 pp_reg = PP_CONTROL;
1203 lvds_reg = LVDS;
1204 }
1205
1206 val = I915_READ(pp_reg);
1207 if (!(val & PANEL_POWER_ON) ||
1208 ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1209 locked = false;
1210
1211 if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1212 panel_pipe = PIPE_B;
1213
1214 WARN(panel_pipe == pipe && locked,
1215 "panel assertion failure, pipe %c regs locked\n",
9db4a9c7 1216 pipe_name(pipe));
ea0760cf
JB
1217}
1218
b840d907
JB
1219void assert_pipe(struct drm_i915_private *dev_priv,
1220 enum pipe pipe, bool state)
b24e7179
JB
1221{
1222 int reg;
1223 u32 val;
63d7bbe9 1224 bool cur_state;
b24e7179 1225
8e636784
DV
1226 /* if we need the pipe A quirk it must be always on */
1227 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1228 state = true;
1229
b24e7179
JB
1230 reg = PIPECONF(pipe);
1231 val = I915_READ(reg);
63d7bbe9
JB
1232 cur_state = !!(val & PIPECONF_ENABLE);
1233 WARN(cur_state != state,
1234 "pipe %c assertion failure (expected %s, current %s)\n",
9db4a9c7 1235 pipe_name(pipe), state_string(state), state_string(cur_state));
b24e7179
JB
1236}
1237
931872fc
CW
1238static void assert_plane(struct drm_i915_private *dev_priv,
1239 enum plane plane, bool state)
b24e7179
JB
1240{
1241 int reg;
1242 u32 val;
931872fc 1243 bool cur_state;
b24e7179
JB
1244
1245 reg = DSPCNTR(plane);
1246 val = I915_READ(reg);
931872fc
CW
1247 cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1248 WARN(cur_state != state,
1249 "plane %c assertion failure (expected %s, current %s)\n",
1250 plane_name(plane), state_string(state), state_string(cur_state));
b24e7179
JB
1251}
1252
931872fc
CW
1253#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1254#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1255
b24e7179
JB
1256static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1257 enum pipe pipe)
1258{
1259 int reg, i;
1260 u32 val;
1261 int cur_pipe;
1262
19ec1358 1263 /* Planes are fixed to pipes on ILK+ */
28c05794
AJ
1264 if (HAS_PCH_SPLIT(dev_priv->dev)) {
1265 reg = DSPCNTR(pipe);
1266 val = I915_READ(reg);
1267 WARN((val & DISPLAY_PLANE_ENABLE),
1268 "plane %c assertion failure, should be disabled but not\n",
1269 plane_name(pipe));
19ec1358 1270 return;
28c05794 1271 }
19ec1358 1272
b24e7179
JB
1273 /* Need to check both planes against the pipe */
1274 for (i = 0; i < 2; i++) {
1275 reg = DSPCNTR(i);
1276 val = I915_READ(reg);
1277 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1278 DISPPLANE_SEL_PIPE_SHIFT;
1279 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
9db4a9c7
JB
1280 "plane %c assertion failure, should be off on pipe %c but is still active\n",
1281 plane_name(i), pipe_name(pipe));
b24e7179
JB
1282 }
1283}
1284
92f2584a
JB
1285static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1286{
1287 u32 val;
1288 bool enabled;
1289
9d82aa17
ED
1290 if (HAS_PCH_LPT(dev_priv->dev)) {
1291 DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
1292 return;
1293 }
1294
92f2584a
JB
1295 val = I915_READ(PCH_DREF_CONTROL);
1296 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1297 DREF_SUPERSPREAD_SOURCE_MASK));
1298 WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1299}
1300
1301static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1302 enum pipe pipe)
1303{
1304 int reg;
1305 u32 val;
1306 bool enabled;
1307
1308 reg = TRANSCONF(pipe);
1309 val = I915_READ(reg);
1310 enabled = !!(val & TRANS_ENABLE);
9db4a9c7
JB
1311 WARN(enabled,
1312 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1313 pipe_name(pipe));
92f2584a
JB
1314}
1315
4e634389
KP
1316static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1317 enum pipe pipe, u32 port_sel, u32 val)
f0575e92
KP
1318{
1319 if ((val & DP_PORT_EN) == 0)
1320 return false;
1321
1322 if (HAS_PCH_CPT(dev_priv->dev)) {
1323 u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1324 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1325 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1326 return false;
1327 } else {
1328 if ((val & DP_PIPE_MASK) != (pipe << 30))
1329 return false;
1330 }
1331 return true;
1332}
1333
1519b995
KP
1334static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1335 enum pipe pipe, u32 val)
1336{
1337 if ((val & PORT_ENABLE) == 0)
1338 return false;
1339
1340 if (HAS_PCH_CPT(dev_priv->dev)) {
1341 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1342 return false;
1343 } else {
1344 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1345 return false;
1346 }
1347 return true;
1348}
1349
1350static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1351 enum pipe pipe, u32 val)
1352{
1353 if ((val & LVDS_PORT_EN) == 0)
1354 return false;
1355
1356 if (HAS_PCH_CPT(dev_priv->dev)) {
1357 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1358 return false;
1359 } else {
1360 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1361 return false;
1362 }
1363 return true;
1364}
1365
1366static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1367 enum pipe pipe, u32 val)
1368{
1369 if ((val & ADPA_DAC_ENABLE) == 0)
1370 return false;
1371 if (HAS_PCH_CPT(dev_priv->dev)) {
1372 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1373 return false;
1374 } else {
1375 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1376 return false;
1377 }
1378 return true;
1379}
1380
291906f1 1381static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
f0575e92 1382 enum pipe pipe, int reg, u32 port_sel)
291906f1 1383{
47a05eca 1384 u32 val = I915_READ(reg);
4e634389 1385 WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
291906f1 1386 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
9db4a9c7 1387 reg, pipe_name(pipe));
de9a35ab 1388
75c5da27
DV
1389 WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1390 && (val & DP_PIPEB_SELECT),
de9a35ab 1391 "IBX PCH dp port still using transcoder B\n");
291906f1
JB
1392}
1393
1394static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1395 enum pipe pipe, int reg)
1396{
47a05eca 1397 u32 val = I915_READ(reg);
e9a851ed 1398 WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
23c99e77 1399 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
9db4a9c7 1400 reg, pipe_name(pipe));
de9a35ab 1401
75c5da27
DV
1402 WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0
1403 && (val & SDVO_PIPE_B_SELECT),
de9a35ab 1404 "IBX PCH hdmi port still using transcoder B\n");
291906f1
JB
1405}
1406
1407static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1408 enum pipe pipe)
1409{
1410 int reg;
1411 u32 val;
291906f1 1412
f0575e92
KP
1413 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1414 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1415 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
291906f1
JB
1416
1417 reg = PCH_ADPA;
1418 val = I915_READ(reg);
e9a851ed 1419 WARN(adpa_pipe_enabled(dev_priv, pipe, val),
291906f1 1420 "PCH VGA enabled on transcoder %c, should be disabled\n",
9db4a9c7 1421 pipe_name(pipe));
291906f1
JB
1422
1423 reg = PCH_LVDS;
1424 val = I915_READ(reg);
e9a851ed 1425 WARN(lvds_pipe_enabled(dev_priv, pipe, val),
291906f1 1426 "PCH LVDS enabled on transcoder %c, should be disabled\n",
9db4a9c7 1427 pipe_name(pipe));
291906f1
JB
1428
1429 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1430 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1431 assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1432}
1433
63d7bbe9
JB
1434/**
1435 * intel_enable_pll - enable a PLL
1436 * @dev_priv: i915 private structure
1437 * @pipe: pipe PLL to enable
1438 *
1439 * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
1440 * make sure the PLL reg is writable first though, since the panel write
1441 * protect mechanism may be enabled.
1442 *
1443 * Note! This is for pre-ILK only.
7434a255
TR
1444 *
1445 * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
63d7bbe9 1446 */
a37b9b34 1447static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
63d7bbe9
JB
1448{
1449 int reg;
1450 u32 val;
1451
1452 /* No really, not for ILK+ */
a0c4da24 1453 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
63d7bbe9
JB
1454
1455 /* PLL is protected by panel, make sure we can write it */
1456 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1457 assert_panel_unlocked(dev_priv, pipe);
1458
1459 reg = DPLL(pipe);
1460 val = I915_READ(reg);
1461 val |= DPLL_VCO_ENABLE;
1462
1463 /* We do this three times for luck */
1464 I915_WRITE(reg, val);
1465 POSTING_READ(reg);
1466 udelay(150); /* wait for warmup */
1467 I915_WRITE(reg, val);
1468 POSTING_READ(reg);
1469 udelay(150); /* wait for warmup */
1470 I915_WRITE(reg, val);
1471 POSTING_READ(reg);
1472 udelay(150); /* wait for warmup */
1473}
1474
1475/**
1476 * intel_disable_pll - disable a PLL
1477 * @dev_priv: i915 private structure
1478 * @pipe: pipe PLL to disable
1479 *
1480 * Disable the PLL for @pipe, making sure the pipe is off first.
1481 *
1482 * Note! This is for pre-ILK only.
1483 */
1484static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1485{
1486 int reg;
1487 u32 val;
1488
1489 /* Don't disable pipe A or pipe A PLLs if needed */
1490 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1491 return;
1492
1493 /* Make sure the pipe isn't still relying on us */
1494 assert_pipe_disabled(dev_priv, pipe);
1495
1496 reg = DPLL(pipe);
1497 val = I915_READ(reg);
1498 val &= ~DPLL_VCO_ENABLE;
1499 I915_WRITE(reg, val);
1500 POSTING_READ(reg);
1501}
1502
a416edef
ED
1503/* SBI access */
1504static void
1505intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
1506{
1507 unsigned long flags;
1508
1509 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
39fb50f6 1510 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
a416edef
ED
1511 100)) {
1512 DRM_ERROR("timeout waiting for SBI to become ready\n");
1513 goto out_unlock;
1514 }
1515
1516 I915_WRITE(SBI_ADDR,
1517 (reg << 16));
1518 I915_WRITE(SBI_DATA,
1519 value);
1520 I915_WRITE(SBI_CTL_STAT,
1521 SBI_BUSY |
1522 SBI_CTL_OP_CRWR);
1523
39fb50f6 1524 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
a416edef
ED
1525 100)) {
1526 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
1527 goto out_unlock;
1528 }
1529
1530out_unlock:
1531 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1532}
1533
1534static u32
1535intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
1536{
1537 unsigned long flags;
39fb50f6 1538 u32 value = 0;
a416edef
ED
1539
1540 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
39fb50f6 1541 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
a416edef
ED
1542 100)) {
1543 DRM_ERROR("timeout waiting for SBI to become ready\n");
1544 goto out_unlock;
1545 }
1546
1547 I915_WRITE(SBI_ADDR,
1548 (reg << 16));
1549 I915_WRITE(SBI_CTL_STAT,
1550 SBI_BUSY |
1551 SBI_CTL_OP_CRRD);
1552
39fb50f6 1553 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
a416edef
ED
1554 100)) {
1555 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
1556 goto out_unlock;
1557 }
1558
1559 value = I915_READ(SBI_DATA);
1560
1561out_unlock:
1562 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1563 return value;
1564}
1565
92f2584a
JB
1566/**
1567 * intel_enable_pch_pll - enable PCH PLL
1568 * @dev_priv: i915 private structure
1569 * @pipe: pipe PLL to enable
1570 *
1571 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1572 * drives the transcoder clock.
1573 */
ee7b9f93 1574static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
92f2584a 1575{
ee7b9f93 1576 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
48da64a8 1577 struct intel_pch_pll *pll;
92f2584a
JB
1578 int reg;
1579 u32 val;
1580
48da64a8 1581 /* PCH PLLs only available on ILK, SNB and IVB */
92f2584a 1582 BUG_ON(dev_priv->info->gen < 5);
48da64a8
CW
1583 pll = intel_crtc->pch_pll;
1584 if (pll == NULL)
1585 return;
1586
1587 if (WARN_ON(pll->refcount == 0))
1588 return;
ee7b9f93
JB
1589
1590 DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
1591 pll->pll_reg, pll->active, pll->on,
1592 intel_crtc->base.base.id);
92f2584a
JB
1593
1594 /* PCH refclock must be enabled first */
1595 assert_pch_refclk_enabled(dev_priv);
1596
ee7b9f93 1597 if (pll->active++ && pll->on) {
92b27b08 1598 assert_pch_pll_enabled(dev_priv, pll, NULL);
ee7b9f93
JB
1599 return;
1600 }
1601
1602 DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
1603
1604 reg = pll->pll_reg;
92f2584a
JB
1605 val = I915_READ(reg);
1606 val |= DPLL_VCO_ENABLE;
1607 I915_WRITE(reg, val);
1608 POSTING_READ(reg);
1609 udelay(200);
ee7b9f93
JB
1610
1611 pll->on = true;
92f2584a
JB
1612}
1613
ee7b9f93 1614static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
92f2584a 1615{
ee7b9f93
JB
1616 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1617 struct intel_pch_pll *pll = intel_crtc->pch_pll;
92f2584a 1618 int reg;
ee7b9f93 1619 u32 val;
4c609cb8 1620
92f2584a
JB
1621 /* PCH only available on ILK+ */
1622 BUG_ON(dev_priv->info->gen < 5);
ee7b9f93
JB
1623 if (pll == NULL)
1624 return;
92f2584a 1625
48da64a8
CW
1626 if (WARN_ON(pll->refcount == 0))
1627 return;
7a419866 1628
ee7b9f93
JB
1629 DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
1630 pll->pll_reg, pll->active, pll->on,
1631 intel_crtc->base.base.id);
7a419866 1632
48da64a8 1633 if (WARN_ON(pll->active == 0)) {
92b27b08 1634 assert_pch_pll_disabled(dev_priv, pll, NULL);
48da64a8
CW
1635 return;
1636 }
1637
ee7b9f93 1638 if (--pll->active) {
92b27b08 1639 assert_pch_pll_enabled(dev_priv, pll, NULL);
7a419866 1640 return;
ee7b9f93
JB
1641 }
1642
1643 DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
1644
1645 /* Make sure transcoder isn't still depending on us */
1646 assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
7a419866 1647
ee7b9f93 1648 reg = pll->pll_reg;
92f2584a
JB
1649 val = I915_READ(reg);
1650 val &= ~DPLL_VCO_ENABLE;
1651 I915_WRITE(reg, val);
1652 POSTING_READ(reg);
1653 udelay(200);
ee7b9f93
JB
1654
1655 pll->on = false;
92f2584a
JB
1656}
1657
040484af
JB
1658static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1659 enum pipe pipe)
1660{
1661 int reg;
5f7f726d 1662 u32 val, pipeconf_val;
7c26e5c6 1663 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
040484af
JB
1664
1665 /* PCH only available on ILK+ */
1666 BUG_ON(dev_priv->info->gen < 5);
1667
1668 /* Make sure PCH DPLL is enabled */
92b27b08
CW
1669 assert_pch_pll_enabled(dev_priv,
1670 to_intel_crtc(crtc)->pch_pll,
1671 to_intel_crtc(crtc));
040484af
JB
1672
1673 /* FDI must be feeding us bits for PCH ports */
1674 assert_fdi_tx_enabled(dev_priv, pipe);
1675 assert_fdi_rx_enabled(dev_priv, pipe);
1676
59c859d6
ED
1677 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1678 DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
1679 return;
1680 }
040484af
JB
1681 reg = TRANSCONF(pipe);
1682 val = I915_READ(reg);
5f7f726d 1683 pipeconf_val = I915_READ(PIPECONF(pipe));
e9bcff5c
JB
1684
1685 if (HAS_PCH_IBX(dev_priv->dev)) {
1686 /*
1687 * make the BPC in transcoder be consistent with
1688 * that in pipeconf reg.
1689 */
1690 val &= ~PIPE_BPC_MASK;
5f7f726d 1691 val |= pipeconf_val & PIPE_BPC_MASK;
e9bcff5c 1692 }
5f7f726d
PZ
1693
1694 val &= ~TRANS_INTERLACE_MASK;
1695 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
7c26e5c6
PZ
1696 if (HAS_PCH_IBX(dev_priv->dev) &&
1697 intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1698 val |= TRANS_LEGACY_INTERLACED_ILK;
1699 else
1700 val |= TRANS_INTERLACED;
5f7f726d
PZ
1701 else
1702 val |= TRANS_PROGRESSIVE;
1703
040484af
JB
1704 I915_WRITE(reg, val | TRANS_ENABLE);
1705 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1706 DRM_ERROR("failed to enable transcoder %d\n", pipe);
1707}
1708
1709static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1710 enum pipe pipe)
1711{
1712 int reg;
1713 u32 val;
1714
1715 /* FDI relies on the transcoder */
1716 assert_fdi_tx_disabled(dev_priv, pipe);
1717 assert_fdi_rx_disabled(dev_priv, pipe);
1718
291906f1
JB
1719 /* Ports must be off as well */
1720 assert_pch_ports_disabled(dev_priv, pipe);
1721
040484af
JB
1722 reg = TRANSCONF(pipe);
1723 val = I915_READ(reg);
1724 val &= ~TRANS_ENABLE;
1725 I915_WRITE(reg, val);
1726 /* wait for PCH transcoder off, transcoder state */
1727 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
4c9c18c2 1728 DRM_ERROR("failed to disable transcoder %d\n", pipe);
040484af
JB
1729}
1730
b24e7179 1731/**
309cfea8 1732 * intel_enable_pipe - enable a pipe, asserting requirements
b24e7179
JB
1733 * @dev_priv: i915 private structure
1734 * @pipe: pipe to enable
040484af 1735 * @pch_port: on ILK+, is this pipe driving a PCH port or not
b24e7179
JB
1736 *
1737 * Enable @pipe, making sure that various hardware specific requirements
1738 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1739 *
1740 * @pipe should be %PIPE_A or %PIPE_B.
1741 *
1742 * Will wait until the pipe is actually running (i.e. first vblank) before
1743 * returning.
1744 */
040484af
JB
1745static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1746 bool pch_port)
b24e7179
JB
1747{
1748 int reg;
1749 u32 val;
1750
1751 /*
1752 * A pipe without a PLL won't actually be able to drive bits from
1753 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1754 * need the check.
1755 */
1756 if (!HAS_PCH_SPLIT(dev_priv->dev))
1757 assert_pll_enabled(dev_priv, pipe);
040484af
JB
1758 else {
1759 if (pch_port) {
1760 /* if driving the PCH, we need FDI enabled */
1761 assert_fdi_rx_pll_enabled(dev_priv, pipe);
1762 assert_fdi_tx_pll_enabled(dev_priv, pipe);
1763 }
1764 /* FIXME: assert CPU port conditions for SNB+ */
1765 }
b24e7179
JB
1766
1767 reg = PIPECONF(pipe);
1768 val = I915_READ(reg);
00d70b15
CW
1769 if (val & PIPECONF_ENABLE)
1770 return;
1771
1772 I915_WRITE(reg, val | PIPECONF_ENABLE);
b24e7179
JB
1773 intel_wait_for_vblank(dev_priv->dev, pipe);
1774}
1775
1776/**
309cfea8 1777 * intel_disable_pipe - disable a pipe, asserting requirements
b24e7179
JB
1778 * @dev_priv: i915 private structure
1779 * @pipe: pipe to disable
1780 *
1781 * Disable @pipe, making sure that various hardware specific requirements
1782 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1783 *
1784 * @pipe should be %PIPE_A or %PIPE_B.
1785 *
1786 * Will wait until the pipe has shut down before returning.
1787 */
1788static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1789 enum pipe pipe)
1790{
1791 int reg;
1792 u32 val;
1793
1794 /*
1795 * Make sure planes won't keep trying to pump pixels to us,
1796 * or we might hang the display.
1797 */
1798 assert_planes_disabled(dev_priv, pipe);
1799
1800 /* Don't disable pipe A or pipe A PLLs if needed */
1801 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1802 return;
1803
1804 reg = PIPECONF(pipe);
1805 val = I915_READ(reg);
00d70b15
CW
1806 if ((val & PIPECONF_ENABLE) == 0)
1807 return;
1808
1809 I915_WRITE(reg, val & ~PIPECONF_ENABLE);
b24e7179
JB
1810 intel_wait_for_pipe_off(dev_priv->dev, pipe);
1811}
1812
d74362c9
KP
1813/*
1814 * Plane regs are double buffered, going from enabled->disabled needs a
1815 * trigger in order to latch. The display address reg provides this.
1816 */
6f1d69b0 1817void intel_flush_display_plane(struct drm_i915_private *dev_priv,
d74362c9
KP
1818 enum plane plane)
1819{
1820 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1821 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1822}
1823
b24e7179
JB
1824/**
1825 * intel_enable_plane - enable a display plane on a given pipe
1826 * @dev_priv: i915 private structure
1827 * @plane: plane to enable
1828 * @pipe: pipe being fed
1829 *
1830 * Enable @plane on @pipe, making sure that @pipe is running first.
1831 */
1832static void intel_enable_plane(struct drm_i915_private *dev_priv,
1833 enum plane plane, enum pipe pipe)
1834{
1835 int reg;
1836 u32 val;
1837
1838 /* If the pipe isn't enabled, we can't pump pixels and may hang */
1839 assert_pipe_enabled(dev_priv, pipe);
1840
1841 reg = DSPCNTR(plane);
1842 val = I915_READ(reg);
00d70b15
CW
1843 if (val & DISPLAY_PLANE_ENABLE)
1844 return;
1845
1846 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
d74362c9 1847 intel_flush_display_plane(dev_priv, plane);
b24e7179
JB
1848 intel_wait_for_vblank(dev_priv->dev, pipe);
1849}
1850
b24e7179
JB
1851/**
1852 * intel_disable_plane - disable a display plane
1853 * @dev_priv: i915 private structure
1854 * @plane: plane to disable
1855 * @pipe: pipe consuming the data
1856 *
1857 * Disable @plane; should be an independent operation.
1858 */
1859static void intel_disable_plane(struct drm_i915_private *dev_priv,
1860 enum plane plane, enum pipe pipe)
1861{
1862 int reg;
1863 u32 val;
1864
1865 reg = DSPCNTR(plane);
1866 val = I915_READ(reg);
00d70b15
CW
1867 if ((val & DISPLAY_PLANE_ENABLE) == 0)
1868 return;
1869
1870 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
b24e7179
JB
1871 intel_flush_display_plane(dev_priv, plane);
1872 intel_wait_for_vblank(dev_priv->dev, pipe);
1873}
1874
127bd2ac 1875int
48b956c5 1876intel_pin_and_fence_fb_obj(struct drm_device *dev,
05394f39 1877 struct drm_i915_gem_object *obj,
919926ae 1878 struct intel_ring_buffer *pipelined)
6b95a207 1879{
ce453d81 1880 struct drm_i915_private *dev_priv = dev->dev_private;
6b95a207
KH
1881 u32 alignment;
1882 int ret;
1883
05394f39 1884 switch (obj->tiling_mode) {
6b95a207 1885 case I915_TILING_NONE:
534843da
CW
1886 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1887 alignment = 128 * 1024;
a6c45cf0 1888 else if (INTEL_INFO(dev)->gen >= 4)
534843da
CW
1889 alignment = 4 * 1024;
1890 else
1891 alignment = 64 * 1024;
6b95a207
KH
1892 break;
1893 case I915_TILING_X:
1894 /* pin() will align the object as required by fence */
1895 alignment = 0;
1896 break;
1897 case I915_TILING_Y:
1898 /* FIXME: Is this true? */
1899 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1900 return -EINVAL;
1901 default:
1902 BUG();
1903 }
1904
ce453d81 1905 dev_priv->mm.interruptible = false;
2da3b9b9 1906 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
48b956c5 1907 if (ret)
ce453d81 1908 goto err_interruptible;
6b95a207
KH
1909
1910 /* Install a fence for tiled scan-out. Pre-i965 always needs a
1911 * fence, whereas 965+ only requires a fence if using
1912 * framebuffer compression. For simplicity, we always install
1913 * a fence as the cost is not that onerous.
1914 */
06d98131 1915 ret = i915_gem_object_get_fence(obj);
9a5a53b3
CW
1916 if (ret)
1917 goto err_unpin;
1690e1eb 1918
9a5a53b3 1919 i915_gem_object_pin_fence(obj);
6b95a207 1920
ce453d81 1921 dev_priv->mm.interruptible = true;
6b95a207 1922 return 0;
48b956c5
CW
1923
1924err_unpin:
1925 i915_gem_object_unpin(obj);
ce453d81
CW
1926err_interruptible:
1927 dev_priv->mm.interruptible = true;
48b956c5 1928 return ret;
6b95a207
KH
1929}
1930
1690e1eb
CW
1931void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
1932{
1933 i915_gem_object_unpin_fence(obj);
1934 i915_gem_object_unpin(obj);
1935}
1936
c2c75131
DV
1937/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
1938 * is assumed to be a power-of-two. */
1939static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x, int *y,
1940 unsigned int bpp,
1941 unsigned int pitch)
1942{
1943 int tile_rows, tiles;
1944
1945 tile_rows = *y / 8;
1946 *y %= 8;
1947 tiles = *x / (512/bpp);
1948 *x %= 512/bpp;
1949
1950 return tile_rows * pitch * 8 + tiles * 4096;
1951}
1952
17638cd6
JB
1953static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1954 int x, int y)
81255565
JB
1955{
1956 struct drm_device *dev = crtc->dev;
1957 struct drm_i915_private *dev_priv = dev->dev_private;
1958 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1959 struct intel_framebuffer *intel_fb;
05394f39 1960 struct drm_i915_gem_object *obj;
81255565 1961 int plane = intel_crtc->plane;
e506a0c6 1962 unsigned long linear_offset;
81255565 1963 u32 dspcntr;
5eddb70b 1964 u32 reg;
81255565
JB
1965
1966 switch (plane) {
1967 case 0:
1968 case 1:
1969 break;
1970 default:
1971 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
1972 return -EINVAL;
1973 }
1974
1975 intel_fb = to_intel_framebuffer(fb);
1976 obj = intel_fb->obj;
81255565 1977
5eddb70b
CW
1978 reg = DSPCNTR(plane);
1979 dspcntr = I915_READ(reg);
81255565
JB
1980 /* Mask out pixel format bits in case we change it */
1981 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
1982 switch (fb->bits_per_pixel) {
1983 case 8:
1984 dspcntr |= DISPPLANE_8BPP;
1985 break;
1986 case 16:
1987 if (fb->depth == 15)
1988 dspcntr |= DISPPLANE_15_16BPP;
1989 else
1990 dspcntr |= DISPPLANE_16BPP;
1991 break;
1992 case 24:
1993 case 32:
1994 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
1995 break;
1996 default:
17638cd6 1997 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
81255565
JB
1998 return -EINVAL;
1999 }
a6c45cf0 2000 if (INTEL_INFO(dev)->gen >= 4) {
05394f39 2001 if (obj->tiling_mode != I915_TILING_NONE)
81255565
JB
2002 dspcntr |= DISPPLANE_TILED;
2003 else
2004 dspcntr &= ~DISPPLANE_TILED;
2005 }
2006
5eddb70b 2007 I915_WRITE(reg, dspcntr);
81255565 2008
e506a0c6 2009 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
81255565 2010
c2c75131
DV
2011 if (INTEL_INFO(dev)->gen >= 4) {
2012 intel_crtc->dspaddr_offset =
2013 gen4_compute_dspaddr_offset_xtiled(&x, &y,
2014 fb->bits_per_pixel / 8,
2015 fb->pitches[0]);
2016 linear_offset -= intel_crtc->dspaddr_offset;
2017 } else {
e506a0c6 2018 intel_crtc->dspaddr_offset = linear_offset;
c2c75131 2019 }
e506a0c6
DV
2020
2021 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
2022 obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
01f2c773 2023 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
a6c45cf0 2024 if (INTEL_INFO(dev)->gen >= 4) {
c2c75131
DV
2025 I915_MODIFY_DISPBASE(DSPSURF(plane),
2026 obj->gtt_offset + intel_crtc->dspaddr_offset);
5eddb70b 2027 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
e506a0c6 2028 I915_WRITE(DSPLINOFF(plane), linear_offset);
5eddb70b 2029 } else
e506a0c6 2030 I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
5eddb70b 2031 POSTING_READ(reg);
81255565 2032
17638cd6
JB
2033 return 0;
2034}
2035
2036static int ironlake_update_plane(struct drm_crtc *crtc,
2037 struct drm_framebuffer *fb, int x, int y)
2038{
2039 struct drm_device *dev = crtc->dev;
2040 struct drm_i915_private *dev_priv = dev->dev_private;
2041 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2042 struct intel_framebuffer *intel_fb;
2043 struct drm_i915_gem_object *obj;
2044 int plane = intel_crtc->plane;
e506a0c6 2045 unsigned long linear_offset;
17638cd6
JB
2046 u32 dspcntr;
2047 u32 reg;
2048
2049 switch (plane) {
2050 case 0:
2051 case 1:
27f8227b 2052 case 2:
17638cd6
JB
2053 break;
2054 default:
2055 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2056 return -EINVAL;
2057 }
2058
2059 intel_fb = to_intel_framebuffer(fb);
2060 obj = intel_fb->obj;
2061
2062 reg = DSPCNTR(plane);
2063 dspcntr = I915_READ(reg);
2064 /* Mask out pixel format bits in case we change it */
2065 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2066 switch (fb->bits_per_pixel) {
2067 case 8:
2068 dspcntr |= DISPPLANE_8BPP;
2069 break;
2070 case 16:
2071 if (fb->depth != 16)
2072 return -EINVAL;
2073
2074 dspcntr |= DISPPLANE_16BPP;
2075 break;
2076 case 24:
2077 case 32:
2078 if (fb->depth == 24)
2079 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2080 else if (fb->depth == 30)
2081 dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2082 else
2083 return -EINVAL;
2084 break;
2085 default:
2086 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2087 return -EINVAL;
2088 }
2089
2090 if (obj->tiling_mode != I915_TILING_NONE)
2091 dspcntr |= DISPPLANE_TILED;
2092 else
2093 dspcntr &= ~DISPPLANE_TILED;
2094
2095 /* must disable */
2096 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2097
2098 I915_WRITE(reg, dspcntr);
2099
e506a0c6 2100 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
c2c75131
DV
2101 intel_crtc->dspaddr_offset =
2102 gen4_compute_dspaddr_offset_xtiled(&x, &y,
2103 fb->bits_per_pixel / 8,
2104 fb->pitches[0]);
2105 linear_offset -= intel_crtc->dspaddr_offset;
17638cd6 2106
e506a0c6
DV
2107 DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
2108 obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
01f2c773 2109 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
c2c75131
DV
2110 I915_MODIFY_DISPBASE(DSPSURF(plane),
2111 obj->gtt_offset + intel_crtc->dspaddr_offset);
17638cd6 2112 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
e506a0c6 2113 I915_WRITE(DSPLINOFF(plane), linear_offset);
17638cd6
JB
2114 POSTING_READ(reg);
2115
2116 return 0;
2117}
2118
2119/* Assume fb object is pinned & idle & fenced and just update base pointers */
2120static int
2121intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2122 int x, int y, enum mode_set_atomic state)
2123{
2124 struct drm_device *dev = crtc->dev;
2125 struct drm_i915_private *dev_priv = dev->dev_private;
17638cd6 2126
6b8e6ed0
CW
2127 if (dev_priv->display.disable_fbc)
2128 dev_priv->display.disable_fbc(dev);
3dec0095 2129 intel_increase_pllclock(crtc);
81255565 2130
6b8e6ed0 2131 return dev_priv->display.update_plane(crtc, fb, x, y);
81255565
JB
2132}
2133
14667a4b
CW
2134static int
2135intel_finish_fb(struct drm_framebuffer *old_fb)
2136{
2137 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2138 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2139 bool was_interruptible = dev_priv->mm.interruptible;
2140 int ret;
2141
2142 wait_event(dev_priv->pending_flip_queue,
2143 atomic_read(&dev_priv->mm.wedged) ||
2144 atomic_read(&obj->pending_flip) == 0);
2145
2146 /* Big Hammer, we also need to ensure that any pending
2147 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2148 * current scanout is retired before unpinning the old
2149 * framebuffer.
2150 *
2151 * This should only fail upon a hung GPU, in which case we
2152 * can safely continue.
2153 */
2154 dev_priv->mm.interruptible = false;
2155 ret = i915_gem_object_finish_gpu(obj);
2156 dev_priv->mm.interruptible = was_interruptible;
2157
2158 return ret;
2159}
2160
5c3b82e2 2161static int
3c4fdcfb 2162intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
94352cf9 2163 struct drm_framebuffer *fb)
79e53945
JB
2164{
2165 struct drm_device *dev = crtc->dev;
6b8e6ed0 2166 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945
JB
2167 struct drm_i915_master_private *master_priv;
2168 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
94352cf9 2169 struct drm_framebuffer *old_fb;
5c3b82e2 2170 int ret;
79e53945
JB
2171
2172 /* no fb bound */
94352cf9 2173 if (!fb) {
a5071c2f 2174 DRM_ERROR("No FB bound\n");
5c3b82e2
CW
2175 return 0;
2176 }
2177
5826eca5
ED
2178 if(intel_crtc->plane > dev_priv->num_pipe) {
2179 DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
2180 intel_crtc->plane,
2181 dev_priv->num_pipe);
5c3b82e2 2182 return -EINVAL;
79e53945
JB
2183 }
2184
5c3b82e2 2185 mutex_lock(&dev->struct_mutex);
265db958 2186 ret = intel_pin_and_fence_fb_obj(dev,
94352cf9 2187 to_intel_framebuffer(fb)->obj,
919926ae 2188 NULL);
5c3b82e2
CW
2189 if (ret != 0) {
2190 mutex_unlock(&dev->struct_mutex);
a5071c2f 2191 DRM_ERROR("pin & fence failed\n");
5c3b82e2
CW
2192 return ret;
2193 }
79e53945 2194
94352cf9
DV
2195 if (crtc->fb)
2196 intel_finish_fb(crtc->fb);
265db958 2197
94352cf9 2198 ret = dev_priv->display.update_plane(crtc, fb, x, y);
4e6cfefc 2199 if (ret) {
94352cf9 2200 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
5c3b82e2 2201 mutex_unlock(&dev->struct_mutex);
a5071c2f 2202 DRM_ERROR("failed to update base address\n");
4e6cfefc 2203 return ret;
79e53945 2204 }
3c4fdcfb 2205
94352cf9
DV
2206 old_fb = crtc->fb;
2207 crtc->fb = fb;
6c4c86f5
DV
2208 crtc->x = x;
2209 crtc->y = y;
94352cf9 2210
b7f1de28
CW
2211 if (old_fb) {
2212 intel_wait_for_vblank(dev, intel_crtc->pipe);
1690e1eb 2213 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
b7f1de28 2214 }
652c393a 2215
6b8e6ed0 2216 intel_update_fbc(dev);
5c3b82e2 2217 mutex_unlock(&dev->struct_mutex);
79e53945
JB
2218
2219 if (!dev->primary->master)
5c3b82e2 2220 return 0;
79e53945
JB
2221
2222 master_priv = dev->primary->master->driver_priv;
2223 if (!master_priv->sarea_priv)
5c3b82e2 2224 return 0;
79e53945 2225
265db958 2226 if (intel_crtc->pipe) {
79e53945
JB
2227 master_priv->sarea_priv->pipeB_x = x;
2228 master_priv->sarea_priv->pipeB_y = y;
5c3b82e2
CW
2229 } else {
2230 master_priv->sarea_priv->pipeA_x = x;
2231 master_priv->sarea_priv->pipeA_y = y;
79e53945 2232 }
5c3b82e2
CW
2233
2234 return 0;
79e53945
JB
2235}
2236
5eddb70b 2237static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
32f9d658
ZW
2238{
2239 struct drm_device *dev = crtc->dev;
2240 struct drm_i915_private *dev_priv = dev->dev_private;
2241 u32 dpa_ctl;
2242
28c97730 2243 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
32f9d658
ZW
2244 dpa_ctl = I915_READ(DP_A);
2245 dpa_ctl &= ~DP_PLL_FREQ_MASK;
2246
2247 if (clock < 200000) {
2248 u32 temp;
2249 dpa_ctl |= DP_PLL_FREQ_160MHZ;
2250 /* workaround for 160Mhz:
2251 1) program 0x4600c bits 15:0 = 0x8124
2252 2) program 0x46010 bit 0 = 1
2253 3) program 0x46034 bit 24 = 1
2254 4) program 0x64000 bit 14 = 1
2255 */
2256 temp = I915_READ(0x4600c);
2257 temp &= 0xffff0000;
2258 I915_WRITE(0x4600c, temp | 0x8124);
2259
2260 temp = I915_READ(0x46010);
2261 I915_WRITE(0x46010, temp | 1);
2262
2263 temp = I915_READ(0x46034);
2264 I915_WRITE(0x46034, temp | (1 << 24));
2265 } else {
2266 dpa_ctl |= DP_PLL_FREQ_270MHZ;
2267 }
2268 I915_WRITE(DP_A, dpa_ctl);
2269
5eddb70b 2270 POSTING_READ(DP_A);
32f9d658
ZW
2271 udelay(500);
2272}
2273
5e84e1a4
ZW
2274static void intel_fdi_normal_train(struct drm_crtc *crtc)
2275{
2276 struct drm_device *dev = crtc->dev;
2277 struct drm_i915_private *dev_priv = dev->dev_private;
2278 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2279 int pipe = intel_crtc->pipe;
2280 u32 reg, temp;
2281
2282 /* enable normal train */
2283 reg = FDI_TX_CTL(pipe);
2284 temp = I915_READ(reg);
61e499bf 2285 if (IS_IVYBRIDGE(dev)) {
357555c0
JB
2286 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2287 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
61e499bf
KP
2288 } else {
2289 temp &= ~FDI_LINK_TRAIN_NONE;
2290 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
357555c0 2291 }
5e84e1a4
ZW
2292 I915_WRITE(reg, temp);
2293
2294 reg = FDI_RX_CTL(pipe);
2295 temp = I915_READ(reg);
2296 if (HAS_PCH_CPT(dev)) {
2297 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2298 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2299 } else {
2300 temp &= ~FDI_LINK_TRAIN_NONE;
2301 temp |= FDI_LINK_TRAIN_NONE;
2302 }
2303 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2304
2305 /* wait one idle pattern time */
2306 POSTING_READ(reg);
2307 udelay(1000);
357555c0
JB
2308
2309 /* IVB wants error correction enabled */
2310 if (IS_IVYBRIDGE(dev))
2311 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2312 FDI_FE_ERRC_ENABLE);
5e84e1a4
ZW
2313}
2314
291427f5
JB
2315static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2316{
2317 struct drm_i915_private *dev_priv = dev->dev_private;
2318 u32 flags = I915_READ(SOUTH_CHICKEN1);
2319
2320 flags |= FDI_PHASE_SYNC_OVR(pipe);
2321 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2322 flags |= FDI_PHASE_SYNC_EN(pipe);
2323 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2324 POSTING_READ(SOUTH_CHICKEN1);
2325}
2326
8db9d77b
ZW
2327/* The FDI link training functions for ILK/Ibexpeak. */
2328static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2329{
2330 struct drm_device *dev = crtc->dev;
2331 struct drm_i915_private *dev_priv = dev->dev_private;
2332 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2333 int pipe = intel_crtc->pipe;
0fc932b8 2334 int plane = intel_crtc->plane;
5eddb70b 2335 u32 reg, temp, tries;
8db9d77b 2336
0fc932b8
JB
2337 /* FDI needs bits from pipe & plane first */
2338 assert_pipe_enabled(dev_priv, pipe);
2339 assert_plane_enabled(dev_priv, plane);
2340
e1a44743
AJ
2341 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2342 for train result */
5eddb70b
CW
2343 reg = FDI_RX_IMR(pipe);
2344 temp = I915_READ(reg);
e1a44743
AJ
2345 temp &= ~FDI_RX_SYMBOL_LOCK;
2346 temp &= ~FDI_RX_BIT_LOCK;
5eddb70b
CW
2347 I915_WRITE(reg, temp);
2348 I915_READ(reg);
e1a44743
AJ
2349 udelay(150);
2350
8db9d77b 2351 /* enable CPU FDI TX and PCH FDI RX */
5eddb70b
CW
2352 reg = FDI_TX_CTL(pipe);
2353 temp = I915_READ(reg);
77ffb597
AJ
2354 temp &= ~(7 << 19);
2355 temp |= (intel_crtc->fdi_lanes - 1) << 19;
8db9d77b
ZW
2356 temp &= ~FDI_LINK_TRAIN_NONE;
2357 temp |= FDI_LINK_TRAIN_PATTERN_1;
5eddb70b 2358 I915_WRITE(reg, temp | FDI_TX_ENABLE);
8db9d77b 2359
5eddb70b
CW
2360 reg = FDI_RX_CTL(pipe);
2361 temp = I915_READ(reg);
8db9d77b
ZW
2362 temp &= ~FDI_LINK_TRAIN_NONE;
2363 temp |= FDI_LINK_TRAIN_PATTERN_1;
5eddb70b
CW
2364 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2365
2366 POSTING_READ(reg);
8db9d77b
ZW
2367 udelay(150);
2368
5b2adf89 2369 /* Ironlake workaround, enable clock pointer after FDI enable*/
6f06ce18
JB
2370 if (HAS_PCH_IBX(dev)) {
2371 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2372 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2373 FDI_RX_PHASE_SYNC_POINTER_EN);
2374 }
5b2adf89 2375
5eddb70b 2376 reg = FDI_RX_IIR(pipe);
e1a44743 2377 for (tries = 0; tries < 5; tries++) {
5eddb70b 2378 temp = I915_READ(reg);
8db9d77b
ZW
2379 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2380
2381 if ((temp & FDI_RX_BIT_LOCK)) {
2382 DRM_DEBUG_KMS("FDI train 1 done.\n");
5eddb70b 2383 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
8db9d77b
ZW
2384 break;
2385 }
8db9d77b 2386 }
e1a44743 2387 if (tries == 5)
5eddb70b 2388 DRM_ERROR("FDI train 1 fail!\n");
8db9d77b
ZW
2389
2390 /* Train 2 */
5eddb70b
CW
2391 reg = FDI_TX_CTL(pipe);
2392 temp = I915_READ(reg);
8db9d77b
ZW
2393 temp &= ~FDI_LINK_TRAIN_NONE;
2394 temp |= FDI_LINK_TRAIN_PATTERN_2;
5eddb70b 2395 I915_WRITE(reg, temp);
8db9d77b 2396
5eddb70b
CW
2397 reg = FDI_RX_CTL(pipe);
2398 temp = I915_READ(reg);
8db9d77b
ZW
2399 temp &= ~FDI_LINK_TRAIN_NONE;
2400 temp |= FDI_LINK_TRAIN_PATTERN_2;
5eddb70b 2401 I915_WRITE(reg, temp);
8db9d77b 2402
5eddb70b
CW
2403 POSTING_READ(reg);
2404 udelay(150);
8db9d77b 2405
5eddb70b 2406 reg = FDI_RX_IIR(pipe);
e1a44743 2407 for (tries = 0; tries < 5; tries++) {
5eddb70b 2408 temp = I915_READ(reg);
8db9d77b
ZW
2409 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2410
2411 if (temp & FDI_RX_SYMBOL_LOCK) {
5eddb70b 2412 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
8db9d77b
ZW
2413 DRM_DEBUG_KMS("FDI train 2 done.\n");
2414 break;
2415 }
8db9d77b 2416 }
e1a44743 2417 if (tries == 5)
5eddb70b 2418 DRM_ERROR("FDI train 2 fail!\n");
8db9d77b
ZW
2419
2420 DRM_DEBUG_KMS("FDI train done\n");
5c5313c8 2421
8db9d77b
ZW
2422}
2423
0206e353 2424static const int snb_b_fdi_train_param[] = {
8db9d77b
ZW
2425 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2426 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2427 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2428 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2429};
2430
2431/* The FDI link training functions for SNB/Cougarpoint. */
2432static void gen6_fdi_link_train(struct drm_crtc *crtc)
2433{
2434 struct drm_device *dev = crtc->dev;
2435 struct drm_i915_private *dev_priv = dev->dev_private;
2436 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2437 int pipe = intel_crtc->pipe;
fa37d39e 2438 u32 reg, temp, i, retry;
8db9d77b 2439
e1a44743
AJ
2440 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2441 for train result */
5eddb70b
CW
2442 reg = FDI_RX_IMR(pipe);
2443 temp = I915_READ(reg);
e1a44743
AJ
2444 temp &= ~FDI_RX_SYMBOL_LOCK;
2445 temp &= ~FDI_RX_BIT_LOCK;
5eddb70b
CW
2446 I915_WRITE(reg, temp);
2447
2448 POSTING_READ(reg);
e1a44743
AJ
2449 udelay(150);
2450
8db9d77b 2451 /* enable CPU FDI TX and PCH FDI RX */
5eddb70b
CW
2452 reg = FDI_TX_CTL(pipe);
2453 temp = I915_READ(reg);
77ffb597
AJ
2454 temp &= ~(7 << 19);
2455 temp |= (intel_crtc->fdi_lanes - 1) << 19;
8db9d77b
ZW
2456 temp &= ~FDI_LINK_TRAIN_NONE;
2457 temp |= FDI_LINK_TRAIN_PATTERN_1;
2458 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2459 /* SNB-B */
2460 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5eddb70b 2461 I915_WRITE(reg, temp | FDI_TX_ENABLE);
8db9d77b 2462
5eddb70b
CW
2463 reg = FDI_RX_CTL(pipe);
2464 temp = I915_READ(reg);
8db9d77b
ZW
2465 if (HAS_PCH_CPT(dev)) {
2466 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2467 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2468 } else {
2469 temp &= ~FDI_LINK_TRAIN_NONE;
2470 temp |= FDI_LINK_TRAIN_PATTERN_1;
2471 }
5eddb70b
CW
2472 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2473
2474 POSTING_READ(reg);
8db9d77b
ZW
2475 udelay(150);
2476
291427f5
JB
2477 if (HAS_PCH_CPT(dev))
2478 cpt_phase_pointer_enable(dev, pipe);
2479
0206e353 2480 for (i = 0; i < 4; i++) {
5eddb70b
CW
2481 reg = FDI_TX_CTL(pipe);
2482 temp = I915_READ(reg);
8db9d77b
ZW
2483 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2484 temp |= snb_b_fdi_train_param[i];
5eddb70b
CW
2485 I915_WRITE(reg, temp);
2486
2487 POSTING_READ(reg);
8db9d77b
ZW
2488 udelay(500);
2489
fa37d39e
SP
2490 for (retry = 0; retry < 5; retry++) {
2491 reg = FDI_RX_IIR(pipe);
2492 temp = I915_READ(reg);
2493 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2494 if (temp & FDI_RX_BIT_LOCK) {
2495 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2496 DRM_DEBUG_KMS("FDI train 1 done.\n");
2497 break;
2498 }
2499 udelay(50);
8db9d77b 2500 }
fa37d39e
SP
2501 if (retry < 5)
2502 break;
8db9d77b
ZW
2503 }
2504 if (i == 4)
5eddb70b 2505 DRM_ERROR("FDI train 1 fail!\n");
8db9d77b
ZW
2506
2507 /* Train 2 */
5eddb70b
CW
2508 reg = FDI_TX_CTL(pipe);
2509 temp = I915_READ(reg);
8db9d77b
ZW
2510 temp &= ~FDI_LINK_TRAIN_NONE;
2511 temp |= FDI_LINK_TRAIN_PATTERN_2;
2512 if (IS_GEN6(dev)) {
2513 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2514 /* SNB-B */
2515 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2516 }
5eddb70b 2517 I915_WRITE(reg, temp);
8db9d77b 2518
5eddb70b
CW
2519 reg = FDI_RX_CTL(pipe);
2520 temp = I915_READ(reg);
8db9d77b
ZW
2521 if (HAS_PCH_CPT(dev)) {
2522 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2523 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2524 } else {
2525 temp &= ~FDI_LINK_TRAIN_NONE;
2526 temp |= FDI_LINK_TRAIN_PATTERN_2;
2527 }
5eddb70b
CW
2528 I915_WRITE(reg, temp);
2529
2530 POSTING_READ(reg);
8db9d77b
ZW
2531 udelay(150);
2532
0206e353 2533 for (i = 0; i < 4; i++) {
5eddb70b
CW
2534 reg = FDI_TX_CTL(pipe);
2535 temp = I915_READ(reg);
8db9d77b
ZW
2536 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2537 temp |= snb_b_fdi_train_param[i];
5eddb70b
CW
2538 I915_WRITE(reg, temp);
2539
2540 POSTING_READ(reg);
8db9d77b
ZW
2541 udelay(500);
2542
fa37d39e
SP
2543 for (retry = 0; retry < 5; retry++) {
2544 reg = FDI_RX_IIR(pipe);
2545 temp = I915_READ(reg);
2546 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2547 if (temp & FDI_RX_SYMBOL_LOCK) {
2548 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2549 DRM_DEBUG_KMS("FDI train 2 done.\n");
2550 break;
2551 }
2552 udelay(50);
8db9d77b 2553 }
fa37d39e
SP
2554 if (retry < 5)
2555 break;
8db9d77b
ZW
2556 }
2557 if (i == 4)
5eddb70b 2558 DRM_ERROR("FDI train 2 fail!\n");
8db9d77b
ZW
2559
2560 DRM_DEBUG_KMS("FDI train done.\n");
2561}
2562
357555c0
JB
2563/* Manual link training for Ivy Bridge A0 parts */
2564static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2565{
2566 struct drm_device *dev = crtc->dev;
2567 struct drm_i915_private *dev_priv = dev->dev_private;
2568 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2569 int pipe = intel_crtc->pipe;
2570 u32 reg, temp, i;
2571
2572 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2573 for train result */
2574 reg = FDI_RX_IMR(pipe);
2575 temp = I915_READ(reg);
2576 temp &= ~FDI_RX_SYMBOL_LOCK;
2577 temp &= ~FDI_RX_BIT_LOCK;
2578 I915_WRITE(reg, temp);
2579
2580 POSTING_READ(reg);
2581 udelay(150);
2582
2583 /* enable CPU FDI TX and PCH FDI RX */
2584 reg = FDI_TX_CTL(pipe);
2585 temp = I915_READ(reg);
2586 temp &= ~(7 << 19);
2587 temp |= (intel_crtc->fdi_lanes - 1) << 19;
2588 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2589 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2590 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2591 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
c4f9c4c2 2592 temp |= FDI_COMPOSITE_SYNC;
357555c0
JB
2593 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2594
2595 reg = FDI_RX_CTL(pipe);
2596 temp = I915_READ(reg);
2597 temp &= ~FDI_LINK_TRAIN_AUTO;
2598 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2599 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
c4f9c4c2 2600 temp |= FDI_COMPOSITE_SYNC;
357555c0
JB
2601 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2602
2603 POSTING_READ(reg);
2604 udelay(150);
2605
291427f5
JB
2606 if (HAS_PCH_CPT(dev))
2607 cpt_phase_pointer_enable(dev, pipe);
2608
0206e353 2609 for (i = 0; i < 4; i++) {
357555c0
JB
2610 reg = FDI_TX_CTL(pipe);
2611 temp = I915_READ(reg);
2612 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2613 temp |= snb_b_fdi_train_param[i];
2614 I915_WRITE(reg, temp);
2615
2616 POSTING_READ(reg);
2617 udelay(500);
2618
2619 reg = FDI_RX_IIR(pipe);
2620 temp = I915_READ(reg);
2621 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2622
2623 if (temp & FDI_RX_BIT_LOCK ||
2624 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2625 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2626 DRM_DEBUG_KMS("FDI train 1 done.\n");
2627 break;
2628 }
2629 }
2630 if (i == 4)
2631 DRM_ERROR("FDI train 1 fail!\n");
2632
2633 /* Train 2 */
2634 reg = FDI_TX_CTL(pipe);
2635 temp = I915_READ(reg);
2636 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2637 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2638 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2639 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2640 I915_WRITE(reg, temp);
2641
2642 reg = FDI_RX_CTL(pipe);
2643 temp = I915_READ(reg);
2644 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2645 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2646 I915_WRITE(reg, temp);
2647
2648 POSTING_READ(reg);
2649 udelay(150);
2650
0206e353 2651 for (i = 0; i < 4; i++) {
357555c0
JB
2652 reg = FDI_TX_CTL(pipe);
2653 temp = I915_READ(reg);
2654 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2655 temp |= snb_b_fdi_train_param[i];
2656 I915_WRITE(reg, temp);
2657
2658 POSTING_READ(reg);
2659 udelay(500);
2660
2661 reg = FDI_RX_IIR(pipe);
2662 temp = I915_READ(reg);
2663 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2664
2665 if (temp & FDI_RX_SYMBOL_LOCK) {
2666 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2667 DRM_DEBUG_KMS("FDI train 2 done.\n");
2668 break;
2669 }
2670 }
2671 if (i == 4)
2672 DRM_ERROR("FDI train 2 fail!\n");
2673
2674 DRM_DEBUG_KMS("FDI train done.\n");
2675}
2676
88cefb6c 2677static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2c07245f 2678{
88cefb6c 2679 struct drm_device *dev = intel_crtc->base.dev;
2c07245f 2680 struct drm_i915_private *dev_priv = dev->dev_private;
2c07245f 2681 int pipe = intel_crtc->pipe;
5eddb70b 2682 u32 reg, temp;
79e53945 2683
c64e311e 2684 /* Write the TU size bits so error detection works */
5eddb70b
CW
2685 I915_WRITE(FDI_RX_TUSIZE1(pipe),
2686 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
c64e311e 2687
c98e9dcf 2688 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5eddb70b
CW
2689 reg = FDI_RX_CTL(pipe);
2690 temp = I915_READ(reg);
2691 temp &= ~((0x7 << 19) | (0x7 << 16));
c98e9dcf 2692 temp |= (intel_crtc->fdi_lanes - 1) << 19;
5eddb70b
CW
2693 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2694 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2695
2696 POSTING_READ(reg);
c98e9dcf
JB
2697 udelay(200);
2698
2699 /* Switch from Rawclk to PCDclk */
5eddb70b
CW
2700 temp = I915_READ(reg);
2701 I915_WRITE(reg, temp | FDI_PCDCLK);
2702
2703 POSTING_READ(reg);
c98e9dcf
JB
2704 udelay(200);
2705
bf507ef7
ED
2706 /* On Haswell, the PLL configuration for ports and pipes is handled
2707 * separately, as part of DDI setup */
2708 if (!IS_HASWELL(dev)) {
2709 /* Enable CPU FDI TX PLL, always on for Ironlake */
2710 reg = FDI_TX_CTL(pipe);
2711 temp = I915_READ(reg);
2712 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2713 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
5eddb70b 2714
bf507ef7
ED
2715 POSTING_READ(reg);
2716 udelay(100);
2717 }
6be4a607 2718 }
0e23b99d
JB
2719}
2720
88cefb6c
DV
2721static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
2722{
2723 struct drm_device *dev = intel_crtc->base.dev;
2724 struct drm_i915_private *dev_priv = dev->dev_private;
2725 int pipe = intel_crtc->pipe;
2726 u32 reg, temp;
2727
2728 /* Switch from PCDclk to Rawclk */
2729 reg = FDI_RX_CTL(pipe);
2730 temp = I915_READ(reg);
2731 I915_WRITE(reg, temp & ~FDI_PCDCLK);
2732
2733 /* Disable CPU FDI TX PLL */
2734 reg = FDI_TX_CTL(pipe);
2735 temp = I915_READ(reg);
2736 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
2737
2738 POSTING_READ(reg);
2739 udelay(100);
2740
2741 reg = FDI_RX_CTL(pipe);
2742 temp = I915_READ(reg);
2743 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2744
2745 /* Wait for the clocks to turn off. */
2746 POSTING_READ(reg);
2747 udelay(100);
2748}
2749
291427f5
JB
2750static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2751{
2752 struct drm_i915_private *dev_priv = dev->dev_private;
2753 u32 flags = I915_READ(SOUTH_CHICKEN1);
2754
2755 flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2756 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2757 flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2758 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2759 POSTING_READ(SOUTH_CHICKEN1);
2760}
0fc932b8
JB
2761static void ironlake_fdi_disable(struct drm_crtc *crtc)
2762{
2763 struct drm_device *dev = crtc->dev;
2764 struct drm_i915_private *dev_priv = dev->dev_private;
2765 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2766 int pipe = intel_crtc->pipe;
2767 u32 reg, temp;
2768
2769 /* disable CPU FDI tx and PCH FDI rx */
2770 reg = FDI_TX_CTL(pipe);
2771 temp = I915_READ(reg);
2772 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2773 POSTING_READ(reg);
2774
2775 reg = FDI_RX_CTL(pipe);
2776 temp = I915_READ(reg);
2777 temp &= ~(0x7 << 16);
2778 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2779 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2780
2781 POSTING_READ(reg);
2782 udelay(100);
2783
2784 /* Ironlake workaround, disable clock pointer after downing FDI */
6f06ce18
JB
2785 if (HAS_PCH_IBX(dev)) {
2786 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
0fc932b8
JB
2787 I915_WRITE(FDI_RX_CHICKEN(pipe),
2788 I915_READ(FDI_RX_CHICKEN(pipe) &
6f06ce18 2789 ~FDI_RX_PHASE_SYNC_POINTER_EN));
291427f5
JB
2790 } else if (HAS_PCH_CPT(dev)) {
2791 cpt_phase_pointer_disable(dev, pipe);
6f06ce18 2792 }
0fc932b8
JB
2793
2794 /* still set train pattern 1 */
2795 reg = FDI_TX_CTL(pipe);
2796 temp = I915_READ(reg);
2797 temp &= ~FDI_LINK_TRAIN_NONE;
2798 temp |= FDI_LINK_TRAIN_PATTERN_1;
2799 I915_WRITE(reg, temp);
2800
2801 reg = FDI_RX_CTL(pipe);
2802 temp = I915_READ(reg);
2803 if (HAS_PCH_CPT(dev)) {
2804 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2805 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2806 } else {
2807 temp &= ~FDI_LINK_TRAIN_NONE;
2808 temp |= FDI_LINK_TRAIN_PATTERN_1;
2809 }
2810 /* BPC in FDI rx is consistent with that in PIPECONF */
2811 temp &= ~(0x07 << 16);
2812 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2813 I915_WRITE(reg, temp);
2814
2815 POSTING_READ(reg);
2816 udelay(100);
2817}
2818
5bb61643
CW
2819static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2820{
2821 struct drm_device *dev = crtc->dev;
2822 struct drm_i915_private *dev_priv = dev->dev_private;
2823 unsigned long flags;
2824 bool pending;
2825
2826 if (atomic_read(&dev_priv->mm.wedged))
2827 return false;
2828
2829 spin_lock_irqsave(&dev->event_lock, flags);
2830 pending = to_intel_crtc(crtc)->unpin_work != NULL;
2831 spin_unlock_irqrestore(&dev->event_lock, flags);
2832
2833 return pending;
2834}
2835
e6c3a2a6
CW
2836static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2837{
0f91128d 2838 struct drm_device *dev = crtc->dev;
5bb61643 2839 struct drm_i915_private *dev_priv = dev->dev_private;
e6c3a2a6
CW
2840
2841 if (crtc->fb == NULL)
2842 return;
2843
5bb61643
CW
2844 wait_event(dev_priv->pending_flip_queue,
2845 !intel_crtc_has_pending_flip(crtc));
2846
0f91128d
CW
2847 mutex_lock(&dev->struct_mutex);
2848 intel_finish_fb(crtc->fb);
2849 mutex_unlock(&dev->struct_mutex);
e6c3a2a6
CW
2850}
2851
fc316cbe 2852static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
040484af
JB
2853{
2854 struct drm_device *dev = crtc->dev;
228d3e36 2855 struct intel_encoder *intel_encoder;
040484af
JB
2856
2857 /*
2858 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2859 * must be driven by its own crtc; no sharing is possible.
2860 */
228d3e36 2861 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
228d3e36 2862 switch (intel_encoder->type) {
040484af 2863 case INTEL_OUTPUT_EDP:
228d3e36 2864 if (!intel_encoder_is_pch_edp(&intel_encoder->base))
040484af
JB
2865 return false;
2866 continue;
2867 }
2868 }
2869
2870 return true;
2871}
2872
fc316cbe
PZ
2873static bool haswell_crtc_driving_pch(struct drm_crtc *crtc)
2874{
2875 return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG);
2876}
2877
e615efe4
ED
2878/* Program iCLKIP clock to the desired frequency */
2879static void lpt_program_iclkip(struct drm_crtc *crtc)
2880{
2881 struct drm_device *dev = crtc->dev;
2882 struct drm_i915_private *dev_priv = dev->dev_private;
2883 u32 divsel, phaseinc, auxdiv, phasedir = 0;
2884 u32 temp;
2885
2886 /* It is necessary to ungate the pixclk gate prior to programming
2887 * the divisors, and gate it back when it is done.
2888 */
2889 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
2890
2891 /* Disable SSCCTL */
2892 intel_sbi_write(dev_priv, SBI_SSCCTL6,
2893 intel_sbi_read(dev_priv, SBI_SSCCTL6) |
2894 SBI_SSCCTL_DISABLE);
2895
2896 /* 20MHz is a corner case which is out of range for the 7-bit divisor */
2897 if (crtc->mode.clock == 20000) {
2898 auxdiv = 1;
2899 divsel = 0x41;
2900 phaseinc = 0x20;
2901 } else {
2902 /* The iCLK virtual clock root frequency is in MHz,
2903 * but the crtc->mode.clock in in KHz. To get the divisors,
2904 * it is necessary to divide one by another, so we
2905 * convert the virtual clock precision to KHz here for higher
2906 * precision.
2907 */
2908 u32 iclk_virtual_root_freq = 172800 * 1000;
2909 u32 iclk_pi_range = 64;
2910 u32 desired_divisor, msb_divisor_value, pi_value;
2911
2912 desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
2913 msb_divisor_value = desired_divisor / iclk_pi_range;
2914 pi_value = desired_divisor % iclk_pi_range;
2915
2916 auxdiv = 0;
2917 divsel = msb_divisor_value - 2;
2918 phaseinc = pi_value;
2919 }
2920
2921 /* This should not happen with any sane values */
2922 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2923 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
2924 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
2925 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2926
2927 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2928 crtc->mode.clock,
2929 auxdiv,
2930 divsel,
2931 phasedir,
2932 phaseinc);
2933
2934 /* Program SSCDIVINTPHASE6 */
2935 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
2936 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2937 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2938 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2939 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2940 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2941 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2942
2943 intel_sbi_write(dev_priv,
2944 SBI_SSCDIVINTPHASE6,
2945 temp);
2946
2947 /* Program SSCAUXDIV */
2948 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
2949 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2950 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2951 intel_sbi_write(dev_priv,
2952 SBI_SSCAUXDIV6,
2953 temp);
2954
2955
2956 /* Enable modulator and associated divider */
2957 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
2958 temp &= ~SBI_SSCCTL_DISABLE;
2959 intel_sbi_write(dev_priv,
2960 SBI_SSCCTL6,
2961 temp);
2962
2963 /* Wait for initialization time */
2964 udelay(24);
2965
2966 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2967}
2968
f67a559d
JB
2969/*
2970 * Enable PCH resources required for PCH ports:
2971 * - PCH PLLs
2972 * - FDI training & RX/TX
2973 * - update transcoder timings
2974 * - DP transcoding bits
2975 * - transcoder
2976 */
2977static void ironlake_pch_enable(struct drm_crtc *crtc)
0e23b99d
JB
2978{
2979 struct drm_device *dev = crtc->dev;
2980 struct drm_i915_private *dev_priv = dev->dev_private;
2981 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2982 int pipe = intel_crtc->pipe;
ee7b9f93 2983 u32 reg, temp;
2c07245f 2984
e7e164db
CW
2985 assert_transcoder_disabled(dev_priv, pipe);
2986
c98e9dcf 2987 /* For PCH output, training FDI link */
674cf967 2988 dev_priv->display.fdi_link_train(crtc);
2c07245f 2989
6f13b7b5
CW
2990 intel_enable_pch_pll(intel_crtc);
2991
e615efe4
ED
2992 if (HAS_PCH_LPT(dev)) {
2993 DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
2994 lpt_program_iclkip(crtc);
2995 } else if (HAS_PCH_CPT(dev)) {
ee7b9f93 2996 u32 sel;
4b645f14 2997
c98e9dcf 2998 temp = I915_READ(PCH_DPLL_SEL);
ee7b9f93
JB
2999 switch (pipe) {
3000 default:
3001 case 0:
3002 temp |= TRANSA_DPLL_ENABLE;
3003 sel = TRANSA_DPLLB_SEL;
3004 break;
3005 case 1:
3006 temp |= TRANSB_DPLL_ENABLE;
3007 sel = TRANSB_DPLLB_SEL;
3008 break;
3009 case 2:
3010 temp |= TRANSC_DPLL_ENABLE;
3011 sel = TRANSC_DPLLB_SEL;
3012 break;
d64311ab 3013 }
ee7b9f93
JB
3014 if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
3015 temp |= sel;
3016 else
3017 temp &= ~sel;
c98e9dcf 3018 I915_WRITE(PCH_DPLL_SEL, temp);
c98e9dcf 3019 }
5eddb70b 3020
d9b6cb56
JB
3021 /* set transcoder timing, panel must allow it */
3022 assert_panel_unlocked(dev_priv, pipe);
5eddb70b
CW
3023 I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
3024 I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
3025 I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
8db9d77b 3026
5eddb70b
CW
3027 I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
3028 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
3029 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
0529a0d9 3030 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
8db9d77b 3031
f57e1e3a
ED
3032 if (!IS_HASWELL(dev))
3033 intel_fdi_normal_train(crtc);
5e84e1a4 3034
c98e9dcf
JB
3035 /* For PCH DP, enable TRANS_DP_CTL */
3036 if (HAS_PCH_CPT(dev) &&
417e822d
KP
3037 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3038 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
9325c9f0 3039 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
5eddb70b
CW
3040 reg = TRANS_DP_CTL(pipe);
3041 temp = I915_READ(reg);
3042 temp &= ~(TRANS_DP_PORT_SEL_MASK |
220cad3c
EA
3043 TRANS_DP_SYNC_MASK |
3044 TRANS_DP_BPC_MASK);
5eddb70b
CW
3045 temp |= (TRANS_DP_OUTPUT_ENABLE |
3046 TRANS_DP_ENH_FRAMING);
9325c9f0 3047 temp |= bpc << 9; /* same format but at 11:9 */
c98e9dcf
JB
3048
3049 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
5eddb70b 3050 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
c98e9dcf 3051 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
5eddb70b 3052 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
c98e9dcf
JB
3053
3054 switch (intel_trans_dp_port_sel(crtc)) {
3055 case PCH_DP_B:
5eddb70b 3056 temp |= TRANS_DP_PORT_SEL_B;
c98e9dcf
JB
3057 break;
3058 case PCH_DP_C:
5eddb70b 3059 temp |= TRANS_DP_PORT_SEL_C;
c98e9dcf
JB
3060 break;
3061 case PCH_DP_D:
5eddb70b 3062 temp |= TRANS_DP_PORT_SEL_D;
c98e9dcf
JB
3063 break;
3064 default:
3065 DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
5eddb70b 3066 temp |= TRANS_DP_PORT_SEL_B;
c98e9dcf 3067 break;
32f9d658 3068 }
2c07245f 3069
5eddb70b 3070 I915_WRITE(reg, temp);
6be4a607 3071 }
b52eb4dc 3072
040484af 3073 intel_enable_transcoder(dev_priv, pipe);
f67a559d
JB
3074}
3075
ee7b9f93
JB
3076static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
3077{
3078 struct intel_pch_pll *pll = intel_crtc->pch_pll;
3079
3080 if (pll == NULL)
3081 return;
3082
3083 if (pll->refcount == 0) {
3084 WARN(1, "bad PCH PLL refcount\n");
3085 return;
3086 }
3087
3088 --pll->refcount;
3089 intel_crtc->pch_pll = NULL;
3090}
3091
3092static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
3093{
3094 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
3095 struct intel_pch_pll *pll;
3096 int i;
3097
3098 pll = intel_crtc->pch_pll;
3099 if (pll) {
3100 DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
3101 intel_crtc->base.base.id, pll->pll_reg);
3102 goto prepare;
3103 }
3104
98b6bd99
DV
3105 if (HAS_PCH_IBX(dev_priv->dev)) {
3106 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3107 i = intel_crtc->pipe;
3108 pll = &dev_priv->pch_plls[i];
3109
3110 DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
3111 intel_crtc->base.base.id, pll->pll_reg);
3112
3113 goto found;
3114 }
3115
ee7b9f93
JB
3116 for (i = 0; i < dev_priv->num_pch_pll; i++) {
3117 pll = &dev_priv->pch_plls[i];
3118
3119 /* Only want to check enabled timings first */
3120 if (pll->refcount == 0)
3121 continue;
3122
3123 if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
3124 fp == I915_READ(pll->fp0_reg)) {
3125 DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
3126 intel_crtc->base.base.id,
3127 pll->pll_reg, pll->refcount, pll->active);
3128
3129 goto found;
3130 }
3131 }
3132
3133 /* Ok no matching timings, maybe there's a free one? */
3134 for (i = 0; i < dev_priv->num_pch_pll; i++) {
3135 pll = &dev_priv->pch_plls[i];
3136 if (pll->refcount == 0) {
3137 DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
3138 intel_crtc->base.base.id, pll->pll_reg);
3139 goto found;
3140 }
3141 }
3142
3143 return NULL;
3144
3145found:
3146 intel_crtc->pch_pll = pll;
3147 pll->refcount++;
3148 DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
3149prepare: /* separate function? */
3150 DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
ee7b9f93 3151
e04c7350
CW
3152 /* Wait for the clocks to stabilize before rewriting the regs */
3153 I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
ee7b9f93
JB
3154 POSTING_READ(pll->pll_reg);
3155 udelay(150);
e04c7350
CW
3156
3157 I915_WRITE(pll->fp0_reg, fp);
3158 I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
ee7b9f93
JB
3159 pll->on = false;
3160 return pll;
3161}
3162
d4270e57
JB
3163void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3164{
3165 struct drm_i915_private *dev_priv = dev->dev_private;
3166 int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
3167 u32 temp;
3168
3169 temp = I915_READ(dslreg);
3170 udelay(500);
3171 if (wait_for(I915_READ(dslreg) != temp, 5)) {
3172 /* Without this, mode sets may fail silently on FDI */
3173 I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
3174 udelay(250);
3175 I915_WRITE(tc2reg, 0);
3176 if (wait_for(I915_READ(dslreg) != temp, 5))
3177 DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
3178 }
3179}
3180
f67a559d
JB
3181static void ironlake_crtc_enable(struct drm_crtc *crtc)
3182{
3183 struct drm_device *dev = crtc->dev;
3184 struct drm_i915_private *dev_priv = dev->dev_private;
3185 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 3186 struct intel_encoder *encoder;
f67a559d
JB
3187 int pipe = intel_crtc->pipe;
3188 int plane = intel_crtc->plane;
3189 u32 temp;
3190 bool is_pch_port;
3191
08a48469
DV
3192 WARN_ON(!crtc->enabled);
3193
f67a559d
JB
3194 if (intel_crtc->active)
3195 return;
3196
3197 intel_crtc->active = true;
3198 intel_update_watermarks(dev);
3199
3200 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3201 temp = I915_READ(PCH_LVDS);
3202 if ((temp & LVDS_PORT_EN) == 0)
3203 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3204 }
3205
fc316cbe 3206 is_pch_port = ironlake_crtc_driving_pch(crtc);
f67a559d 3207
46b6f814 3208 if (is_pch_port) {
88cefb6c 3209 ironlake_fdi_pll_enable(intel_crtc);
46b6f814
DV
3210 } else {
3211 assert_fdi_tx_disabled(dev_priv, pipe);
3212 assert_fdi_rx_disabled(dev_priv, pipe);
3213 }
f67a559d 3214
bf49ec8c
DV
3215 for_each_encoder_on_crtc(dev, crtc, encoder)
3216 if (encoder->pre_enable)
3217 encoder->pre_enable(encoder);
3218
f67a559d
JB
3219 /* Enable panel fitting for LVDS */
3220 if (dev_priv->pch_pf_size &&
3221 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
3222 /* Force use of hard-coded filter coefficients
3223 * as some pre-programmed values are broken,
3224 * e.g. x201.
3225 */
9db4a9c7
JB
3226 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3227 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3228 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
f67a559d
JB
3229 }
3230
9c54c0dd
JB
3231 /*
3232 * On ILK+ LUT must be loaded before the pipe is running but with
3233 * clocks enabled
3234 */
3235 intel_crtc_load_lut(crtc);
3236
f67a559d
JB
3237 intel_enable_pipe(dev_priv, pipe, is_pch_port);
3238 intel_enable_plane(dev_priv, plane, pipe);
3239
3240 if (is_pch_port)
3241 ironlake_pch_enable(crtc);
c98e9dcf 3242
d1ebd816 3243 mutex_lock(&dev->struct_mutex);
bed4a673 3244 intel_update_fbc(dev);
d1ebd816
BW
3245 mutex_unlock(&dev->struct_mutex);
3246
6b383a7f 3247 intel_crtc_update_cursor(crtc, true);
ef9c3aee 3248
fa5c73b1
DV
3249 for_each_encoder_on_crtc(dev, crtc, encoder)
3250 encoder->enable(encoder);
61b77ddd
DV
3251
3252 if (HAS_PCH_CPT(dev))
3253 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
6ce94100
DV
3254
3255 /*
3256 * There seems to be a race in PCH platform hw (at least on some
3257 * outputs) where an enabled pipe still completes any pageflip right
3258 * away (as if the pipe is off) instead of waiting for vblank. As soon
3259 * as the first vblank happend, everything works as expected. Hence just
3260 * wait for one vblank before returning to avoid strange things
3261 * happening.
3262 */
3263 intel_wait_for_vblank(dev, intel_crtc->pipe);
6be4a607
JB
3264}
3265
4f771f10
PZ
3266static void haswell_crtc_enable(struct drm_crtc *crtc)
3267{
3268 struct drm_device *dev = crtc->dev;
3269 struct drm_i915_private *dev_priv = dev->dev_private;
3270 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3271 struct intel_encoder *encoder;
3272 int pipe = intel_crtc->pipe;
3273 int plane = intel_crtc->plane;
4f771f10
PZ
3274 bool is_pch_port;
3275
3276 WARN_ON(!crtc->enabled);
3277
3278 if (intel_crtc->active)
3279 return;
3280
3281 intel_crtc->active = true;
3282 intel_update_watermarks(dev);
3283
fc316cbe 3284 is_pch_port = haswell_crtc_driving_pch(crtc);
4f771f10
PZ
3285
3286 if (is_pch_port) {
3287 ironlake_fdi_pll_enable(intel_crtc);
3288 } else {
3289 assert_fdi_tx_disabled(dev_priv, pipe);
3290 assert_fdi_rx_disabled(dev_priv, pipe);
3291 }
3292
3293 for_each_encoder_on_crtc(dev, crtc, encoder)
3294 if (encoder->pre_enable)
3295 encoder->pre_enable(encoder);
3296
1f544388 3297 intel_ddi_enable_pipe_clock(intel_crtc);
4f771f10 3298
1f544388
PZ
3299 /* Enable panel fitting for eDP */
3300 if (dev_priv->pch_pf_size && HAS_eDP) {
4f771f10
PZ
3301 /* Force use of hard-coded filter coefficients
3302 * as some pre-programmed values are broken,
3303 * e.g. x201.
3304 */
3305 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3306 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3307 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3308 }
3309
3310 /*
3311 * On ILK+ LUT must be loaded before the pipe is running but with
3312 * clocks enabled
3313 */
3314 intel_crtc_load_lut(crtc);
3315
1f544388
PZ
3316 intel_ddi_set_pipe_settings(crtc);
3317 intel_ddi_enable_pipe_func(crtc);
4f771f10
PZ
3318
3319 intel_enable_pipe(dev_priv, pipe, is_pch_port);
3320 intel_enable_plane(dev_priv, plane, pipe);
3321
3322 if (is_pch_port)
3323 ironlake_pch_enable(crtc);
3324
3325 mutex_lock(&dev->struct_mutex);
3326 intel_update_fbc(dev);
3327 mutex_unlock(&dev->struct_mutex);
3328
3329 intel_crtc_update_cursor(crtc, true);
3330
3331 for_each_encoder_on_crtc(dev, crtc, encoder)
3332 encoder->enable(encoder);
3333
4f771f10
PZ
3334 /*
3335 * There seems to be a race in PCH platform hw (at least on some
3336 * outputs) where an enabled pipe still completes any pageflip right
3337 * away (as if the pipe is off) instead of waiting for vblank. As soon
3338 * as the first vblank happend, everything works as expected. Hence just
3339 * wait for one vblank before returning to avoid strange things
3340 * happening.
3341 */
3342 intel_wait_for_vblank(dev, intel_crtc->pipe);
3343}
3344
6be4a607
JB
3345static void ironlake_crtc_disable(struct drm_crtc *crtc)
3346{
3347 struct drm_device *dev = crtc->dev;
3348 struct drm_i915_private *dev_priv = dev->dev_private;
3349 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 3350 struct intel_encoder *encoder;
6be4a607
JB
3351 int pipe = intel_crtc->pipe;
3352 int plane = intel_crtc->plane;
5eddb70b 3353 u32 reg, temp;
b52eb4dc 3354
ef9c3aee 3355
f7abfe8b
CW
3356 if (!intel_crtc->active)
3357 return;
3358
ea9d758d
DV
3359 for_each_encoder_on_crtc(dev, crtc, encoder)
3360 encoder->disable(encoder);
3361
e6c3a2a6 3362 intel_crtc_wait_for_pending_flips(crtc);
6be4a607 3363 drm_vblank_off(dev, pipe);
6b383a7f 3364 intel_crtc_update_cursor(crtc, false);
5eddb70b 3365
b24e7179 3366 intel_disable_plane(dev_priv, plane, pipe);
913d8d11 3367
973d04f9
CW
3368 if (dev_priv->cfb_plane == plane)
3369 intel_disable_fbc(dev);
2c07245f 3370
b24e7179 3371 intel_disable_pipe(dev_priv, pipe);
32f9d658 3372
6be4a607 3373 /* Disable PF */
9db4a9c7
JB
3374 I915_WRITE(PF_CTL(pipe), 0);
3375 I915_WRITE(PF_WIN_SZ(pipe), 0);
2c07245f 3376
bf49ec8c
DV
3377 for_each_encoder_on_crtc(dev, crtc, encoder)
3378 if (encoder->post_disable)
3379 encoder->post_disable(encoder);
3380
0fc932b8 3381 ironlake_fdi_disable(crtc);
2c07245f 3382
040484af 3383 intel_disable_transcoder(dev_priv, pipe);
913d8d11 3384
6be4a607
JB
3385 if (HAS_PCH_CPT(dev)) {
3386 /* disable TRANS_DP_CTL */
5eddb70b
CW
3387 reg = TRANS_DP_CTL(pipe);
3388 temp = I915_READ(reg);
3389 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
cb3543c6 3390 temp |= TRANS_DP_PORT_SEL_NONE;
5eddb70b 3391 I915_WRITE(reg, temp);
6be4a607
JB
3392
3393 /* disable DPLL_SEL */
3394 temp = I915_READ(PCH_DPLL_SEL);
9db4a9c7
JB
3395 switch (pipe) {
3396 case 0:
d64311ab 3397 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
9db4a9c7
JB
3398 break;
3399 case 1:
6be4a607 3400 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
9db4a9c7
JB
3401 break;
3402 case 2:
4b645f14 3403 /* C shares PLL A or B */
d64311ab 3404 temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
9db4a9c7
JB
3405 break;
3406 default:
3407 BUG(); /* wtf */
3408 }
6be4a607 3409 I915_WRITE(PCH_DPLL_SEL, temp);
6be4a607 3410 }
e3421a18 3411
6be4a607 3412 /* disable PCH DPLL */
ee7b9f93 3413 intel_disable_pch_pll(intel_crtc);
8db9d77b 3414
88cefb6c 3415 ironlake_fdi_pll_disable(intel_crtc);
6b383a7f 3416
f7abfe8b 3417 intel_crtc->active = false;
6b383a7f 3418 intel_update_watermarks(dev);
d1ebd816
BW
3419
3420 mutex_lock(&dev->struct_mutex);
6b383a7f 3421 intel_update_fbc(dev);
d1ebd816 3422 mutex_unlock(&dev->struct_mutex);
6be4a607 3423}
1b3c7a47 3424
4f771f10
PZ
3425static void haswell_crtc_disable(struct drm_crtc *crtc)
3426{
3427 struct drm_device *dev = crtc->dev;
3428 struct drm_i915_private *dev_priv = dev->dev_private;
3429 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3430 struct intel_encoder *encoder;
3431 int pipe = intel_crtc->pipe;
3432 int plane = intel_crtc->plane;
4f771f10
PZ
3433
3434 if (!intel_crtc->active)
3435 return;
3436
3437 for_each_encoder_on_crtc(dev, crtc, encoder)
3438 encoder->disable(encoder);
3439
3440 intel_crtc_wait_for_pending_flips(crtc);
3441 drm_vblank_off(dev, pipe);
3442 intel_crtc_update_cursor(crtc, false);
3443
3444 intel_disable_plane(dev_priv, plane, pipe);
3445
3446 if (dev_priv->cfb_plane == plane)
3447 intel_disable_fbc(dev);
3448
3449 intel_disable_pipe(dev_priv, pipe);
3450
1f544388 3451 intel_ddi_disable_pipe_func(dev_priv, pipe);
4f771f10
PZ
3452
3453 /* Disable PF */
3454 I915_WRITE(PF_CTL(pipe), 0);
3455 I915_WRITE(PF_WIN_SZ(pipe), 0);
3456
1f544388 3457 intel_ddi_disable_pipe_clock(intel_crtc);
4f771f10
PZ
3458
3459 for_each_encoder_on_crtc(dev, crtc, encoder)
3460 if (encoder->post_disable)
3461 encoder->post_disable(encoder);
3462
3463 ironlake_fdi_disable(crtc);
3464
3465 intel_disable_transcoder(dev_priv, pipe);
3466
4f771f10
PZ
3467 /* disable PCH DPLL */
3468 intel_disable_pch_pll(intel_crtc);
3469
3470 ironlake_fdi_pll_disable(intel_crtc);
3471
3472 intel_crtc->active = false;
3473 intel_update_watermarks(dev);
3474
3475 mutex_lock(&dev->struct_mutex);
3476 intel_update_fbc(dev);
3477 mutex_unlock(&dev->struct_mutex);
3478}
3479
ee7b9f93
JB
3480static void ironlake_crtc_off(struct drm_crtc *crtc)
3481{
3482 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3483 intel_put_pch_pll(intel_crtc);
3484}
3485
6441ab5f
PZ
3486static void haswell_crtc_off(struct drm_crtc *crtc)
3487{
3488 intel_ddi_put_crtc_pll(crtc);
3489}
3490
02e792fb
DV
3491static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3492{
02e792fb 3493 if (!enable && intel_crtc->overlay) {
23f09ce3 3494 struct drm_device *dev = intel_crtc->base.dev;
ce453d81 3495 struct drm_i915_private *dev_priv = dev->dev_private;
03f77ea5 3496
23f09ce3 3497 mutex_lock(&dev->struct_mutex);
ce453d81
CW
3498 dev_priv->mm.interruptible = false;
3499 (void) intel_overlay_switch_off(intel_crtc->overlay);
3500 dev_priv->mm.interruptible = true;
23f09ce3 3501 mutex_unlock(&dev->struct_mutex);
02e792fb 3502 }
02e792fb 3503
5dcdbcb0
CW
3504 /* Let userspace switch the overlay on again. In most cases userspace
3505 * has to recompute where to put it anyway.
3506 */
02e792fb
DV
3507}
3508
0b8765c6 3509static void i9xx_crtc_enable(struct drm_crtc *crtc)
79e53945
JB
3510{
3511 struct drm_device *dev = crtc->dev;
79e53945
JB
3512 struct drm_i915_private *dev_priv = dev->dev_private;
3513 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 3514 struct intel_encoder *encoder;
79e53945 3515 int pipe = intel_crtc->pipe;
80824003 3516 int plane = intel_crtc->plane;
79e53945 3517
08a48469
DV
3518 WARN_ON(!crtc->enabled);
3519
f7abfe8b
CW
3520 if (intel_crtc->active)
3521 return;
3522
3523 intel_crtc->active = true;
6b383a7f
CW
3524 intel_update_watermarks(dev);
3525
63d7bbe9 3526 intel_enable_pll(dev_priv, pipe);
040484af 3527 intel_enable_pipe(dev_priv, pipe, false);
b24e7179 3528 intel_enable_plane(dev_priv, plane, pipe);
79e53945 3529
0b8765c6 3530 intel_crtc_load_lut(crtc);
bed4a673 3531 intel_update_fbc(dev);
79e53945 3532
0b8765c6
JB
3533 /* Give the overlay scaler a chance to enable if it's on this pipe */
3534 intel_crtc_dpms_overlay(intel_crtc, true);
6b383a7f 3535 intel_crtc_update_cursor(crtc, true);
ef9c3aee 3536
fa5c73b1
DV
3537 for_each_encoder_on_crtc(dev, crtc, encoder)
3538 encoder->enable(encoder);
0b8765c6 3539}
79e53945 3540
0b8765c6
JB
3541static void i9xx_crtc_disable(struct drm_crtc *crtc)
3542{
3543 struct drm_device *dev = crtc->dev;
3544 struct drm_i915_private *dev_priv = dev->dev_private;
3545 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 3546 struct intel_encoder *encoder;
0b8765c6
JB
3547 int pipe = intel_crtc->pipe;
3548 int plane = intel_crtc->plane;
b690e96c 3549
ef9c3aee 3550
f7abfe8b
CW
3551 if (!intel_crtc->active)
3552 return;
3553
ea9d758d
DV
3554 for_each_encoder_on_crtc(dev, crtc, encoder)
3555 encoder->disable(encoder);
3556
0b8765c6 3557 /* Give the overlay scaler a chance to disable if it's on this pipe */
e6c3a2a6
CW
3558 intel_crtc_wait_for_pending_flips(crtc);
3559 drm_vblank_off(dev, pipe);
0b8765c6 3560 intel_crtc_dpms_overlay(intel_crtc, false);
6b383a7f 3561 intel_crtc_update_cursor(crtc, false);
0b8765c6 3562
973d04f9
CW
3563 if (dev_priv->cfb_plane == plane)
3564 intel_disable_fbc(dev);
79e53945 3565
b24e7179 3566 intel_disable_plane(dev_priv, plane, pipe);
b24e7179 3567 intel_disable_pipe(dev_priv, pipe);
63d7bbe9 3568 intel_disable_pll(dev_priv, pipe);
0b8765c6 3569
f7abfe8b 3570 intel_crtc->active = false;
6b383a7f
CW
3571 intel_update_fbc(dev);
3572 intel_update_watermarks(dev);
0b8765c6
JB
3573}
3574
ee7b9f93
JB
3575static void i9xx_crtc_off(struct drm_crtc *crtc)
3576{
3577}
3578
976f8a20
DV
3579static void intel_crtc_update_sarea(struct drm_crtc *crtc,
3580 bool enabled)
2c07245f
ZW
3581{
3582 struct drm_device *dev = crtc->dev;
3583 struct drm_i915_master_private *master_priv;
3584 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3585 int pipe = intel_crtc->pipe;
79e53945
JB
3586
3587 if (!dev->primary->master)
3588 return;
3589
3590 master_priv = dev->primary->master->driver_priv;
3591 if (!master_priv->sarea_priv)
3592 return;
3593
79e53945
JB
3594 switch (pipe) {
3595 case 0:
3596 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3597 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3598 break;
3599 case 1:
3600 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3601 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3602 break;
3603 default:
9db4a9c7 3604 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
79e53945
JB
3605 break;
3606 }
79e53945
JB
3607}
3608
976f8a20
DV
3609/**
3610 * Sets the power management mode of the pipe and plane.
3611 */
3612void intel_crtc_update_dpms(struct drm_crtc *crtc)
3613{
3614 struct drm_device *dev = crtc->dev;
3615 struct drm_i915_private *dev_priv = dev->dev_private;
3616 struct intel_encoder *intel_encoder;
3617 bool enable = false;
3618
3619 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
3620 enable |= intel_encoder->connectors_active;
3621
3622 if (enable)
3623 dev_priv->display.crtc_enable(crtc);
3624 else
3625 dev_priv->display.crtc_disable(crtc);
3626
3627 intel_crtc_update_sarea(crtc, enable);
3628}
3629
3630static void intel_crtc_noop(struct drm_crtc *crtc)
3631{
3632}
3633
cdd59983
CW
3634static void intel_crtc_disable(struct drm_crtc *crtc)
3635{
cdd59983 3636 struct drm_device *dev = crtc->dev;
976f8a20 3637 struct drm_connector *connector;
ee7b9f93 3638 struct drm_i915_private *dev_priv = dev->dev_private;
cdd59983 3639
976f8a20
DV
3640 /* crtc should still be enabled when we disable it. */
3641 WARN_ON(!crtc->enabled);
3642
3643 dev_priv->display.crtc_disable(crtc);
3644 intel_crtc_update_sarea(crtc, false);
ee7b9f93
JB
3645 dev_priv->display.off(crtc);
3646
931872fc
CW
3647 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3648 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
cdd59983
CW
3649
3650 if (crtc->fb) {
3651 mutex_lock(&dev->struct_mutex);
1690e1eb 3652 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
cdd59983 3653 mutex_unlock(&dev->struct_mutex);
976f8a20
DV
3654 crtc->fb = NULL;
3655 }
3656
3657 /* Update computed state. */
3658 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
3659 if (!connector->encoder || !connector->encoder->crtc)
3660 continue;
3661
3662 if (connector->encoder->crtc != crtc)
3663 continue;
3664
3665 connector->dpms = DRM_MODE_DPMS_OFF;
3666 to_intel_encoder(connector->encoder)->connectors_active = false;
cdd59983
CW
3667 }
3668}
3669
a261b246 3670void intel_modeset_disable(struct drm_device *dev)
79e53945 3671{
a261b246
DV
3672 struct drm_crtc *crtc;
3673
3674 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
3675 if (crtc->enabled)
3676 intel_crtc_disable(crtc);
3677 }
79e53945
JB
3678}
3679
1f703855 3680void intel_encoder_noop(struct drm_encoder *encoder)
79e53945 3681{
7e7d76c3
JB
3682}
3683
ea5b213a 3684void intel_encoder_destroy(struct drm_encoder *encoder)
7e7d76c3 3685{
4ef69c7a 3686 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
ea5b213a 3687
ea5b213a
CW
3688 drm_encoder_cleanup(encoder);
3689 kfree(intel_encoder);
7e7d76c3
JB
3690}
3691
5ab432ef
DV
3692/* Simple dpms helper for encodres with just one connector, no cloning and only
3693 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
3694 * state of the entire output pipe. */
3695void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
7e7d76c3 3696{
5ab432ef
DV
3697 if (mode == DRM_MODE_DPMS_ON) {
3698 encoder->connectors_active = true;
3699
b2cabb0e 3700 intel_crtc_update_dpms(encoder->base.crtc);
5ab432ef
DV
3701 } else {
3702 encoder->connectors_active = false;
3703
b2cabb0e 3704 intel_crtc_update_dpms(encoder->base.crtc);
5ab432ef 3705 }
79e53945
JB
3706}
3707
0a91ca29
DV
3708/* Cross check the actual hw state with our own modeset state tracking (and it's
3709 * internal consistency). */
b980514c 3710static void intel_connector_check_state(struct intel_connector *connector)
79e53945 3711{
0a91ca29
DV
3712 if (connector->get_hw_state(connector)) {
3713 struct intel_encoder *encoder = connector->encoder;
3714 struct drm_crtc *crtc;
3715 bool encoder_enabled;
3716 enum pipe pipe;
3717
3718 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
3719 connector->base.base.id,
3720 drm_get_connector_name(&connector->base));
3721
3722 WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
3723 "wrong connector dpms state\n");
3724 WARN(connector->base.encoder != &encoder->base,
3725 "active connector not linked to encoder\n");
3726 WARN(!encoder->connectors_active,
3727 "encoder->connectors_active not set\n");
3728
3729 encoder_enabled = encoder->get_hw_state(encoder, &pipe);
3730 WARN(!encoder_enabled, "encoder not enabled\n");
3731 if (WARN_ON(!encoder->base.crtc))
3732 return;
3733
3734 crtc = encoder->base.crtc;
3735
3736 WARN(!crtc->enabled, "crtc not enabled\n");
3737 WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
3738 WARN(pipe != to_intel_crtc(crtc)->pipe,
3739 "encoder active on the wrong pipe\n");
3740 }
79e53945
JB
3741}
3742
5ab432ef
DV
3743/* Even simpler default implementation, if there's really no special case to
3744 * consider. */
3745void intel_connector_dpms(struct drm_connector *connector, int mode)
79e53945 3746{
5ab432ef 3747 struct intel_encoder *encoder = intel_attached_encoder(connector);
d4270e57 3748
5ab432ef
DV
3749 /* All the simple cases only support two dpms states. */
3750 if (mode != DRM_MODE_DPMS_ON)
3751 mode = DRM_MODE_DPMS_OFF;
d4270e57 3752
5ab432ef
DV
3753 if (mode == connector->dpms)
3754 return;
3755
3756 connector->dpms = mode;
3757
3758 /* Only need to change hw state when actually enabled */
3759 if (encoder->base.crtc)
3760 intel_encoder_dpms(encoder, mode);
3761 else
8af6cf88 3762 WARN_ON(encoder->connectors_active != false);
0a91ca29 3763
b980514c 3764 intel_modeset_check_state(connector->dev);
79e53945
JB
3765}
3766
f0947c37
DV
3767/* Simple connector->get_hw_state implementation for encoders that support only
3768 * one connector and no cloning and hence the encoder state determines the state
3769 * of the connector. */
3770bool intel_connector_get_hw_state(struct intel_connector *connector)
ea5b213a 3771{
24929352 3772 enum pipe pipe = 0;
f0947c37 3773 struct intel_encoder *encoder = connector->encoder;
ea5b213a 3774
f0947c37 3775 return encoder->get_hw_state(encoder, &pipe);
ea5b213a
CW
3776}
3777
79e53945 3778static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
35313cde 3779 const struct drm_display_mode *mode,
79e53945
JB
3780 struct drm_display_mode *adjusted_mode)
3781{
2c07245f 3782 struct drm_device *dev = crtc->dev;
89749350 3783
bad720ff 3784 if (HAS_PCH_SPLIT(dev)) {
2c07245f 3785 /* FDI link clock is fixed at 2.7G */
2377b741
JB
3786 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3787 return false;
2c07245f 3788 }
89749350 3789
f9bef081
DV
3790 /* All interlaced capable intel hw wants timings in frames. Note though
3791 * that intel_lvds_mode_fixup does some funny tricks with the crtc
3792 * timings, so we need to be careful not to clobber these.*/
3793 if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3794 drm_mode_set_crtcinfo(adjusted_mode, 0);
89749350 3795
44f46b42
CW
3796 /* WaPruneModeWithIncorrectHsyncOffset: Cantiga+ cannot handle modes
3797 * with a hsync front porch of 0.
3798 */
3799 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
3800 adjusted_mode->hsync_start == adjusted_mode->hdisplay)
3801 return false;
3802
79e53945
JB
3803 return true;
3804}
3805
25eb05fc
JB
3806static int valleyview_get_display_clock_speed(struct drm_device *dev)
3807{
3808 return 400000; /* FIXME */
3809}
3810
e70236a8
JB
3811static int i945_get_display_clock_speed(struct drm_device *dev)
3812{
3813 return 400000;
3814}
79e53945 3815
e70236a8 3816static int i915_get_display_clock_speed(struct drm_device *dev)
79e53945 3817{
e70236a8
JB
3818 return 333000;
3819}
79e53945 3820
e70236a8
JB
3821static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3822{
3823 return 200000;
3824}
79e53945 3825
e70236a8
JB
3826static int i915gm_get_display_clock_speed(struct drm_device *dev)
3827{
3828 u16 gcfgc = 0;
79e53945 3829
e70236a8
JB
3830 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3831
3832 if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3833 return 133000;
3834 else {
3835 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3836 case GC_DISPLAY_CLOCK_333_MHZ:
3837 return 333000;
3838 default:
3839 case GC_DISPLAY_CLOCK_190_200_MHZ:
3840 return 190000;
79e53945 3841 }
e70236a8
JB
3842 }
3843}
3844
3845static int i865_get_display_clock_speed(struct drm_device *dev)
3846{
3847 return 266000;
3848}
3849
3850static int i855_get_display_clock_speed(struct drm_device *dev)
3851{
3852 u16 hpllcc = 0;
3853 /* Assume that the hardware is in the high speed state. This
3854 * should be the default.
3855 */
3856 switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3857 case GC_CLOCK_133_200:
3858 case GC_CLOCK_100_200:
3859 return 200000;
3860 case GC_CLOCK_166_250:
3861 return 250000;
3862 case GC_CLOCK_100_133:
79e53945 3863 return 133000;
e70236a8 3864 }
79e53945 3865
e70236a8
JB
3866 /* Shouldn't happen */
3867 return 0;
3868}
79e53945 3869
e70236a8
JB
3870static int i830_get_display_clock_speed(struct drm_device *dev)
3871{
3872 return 133000;
79e53945
JB
3873}
3874
2c07245f
ZW
3875struct fdi_m_n {
3876 u32 tu;
3877 u32 gmch_m;
3878 u32 gmch_n;
3879 u32 link_m;
3880 u32 link_n;
3881};
3882
3883static void
3884fdi_reduce_ratio(u32 *num, u32 *den)
3885{
3886 while (*num > 0xffffff || *den > 0xffffff) {
3887 *num >>= 1;
3888 *den >>= 1;
3889 }
3890}
3891
2c07245f 3892static void
f2b115e6
AJ
3893ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3894 int link_clock, struct fdi_m_n *m_n)
2c07245f 3895{
2c07245f
ZW
3896 m_n->tu = 64; /* default size */
3897
22ed1113
CW
3898 /* BUG_ON(pixel_clock > INT_MAX / 36); */
3899 m_n->gmch_m = bits_per_pixel * pixel_clock;
3900 m_n->gmch_n = link_clock * nlanes * 8;
2c07245f
ZW
3901 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3902
22ed1113
CW
3903 m_n->link_m = pixel_clock;
3904 m_n->link_n = link_clock;
2c07245f
ZW
3905 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3906}
3907
a7615030
CW
3908static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
3909{
72bbe58c
KP
3910 if (i915_panel_use_ssc >= 0)
3911 return i915_panel_use_ssc != 0;
3912 return dev_priv->lvds_use_ssc
435793df 3913 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
a7615030
CW
3914}
3915
5a354204
JB
3916/**
3917 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
3918 * @crtc: CRTC structure
3b5c78a3 3919 * @mode: requested mode
5a354204
JB
3920 *
3921 * A pipe may be connected to one or more outputs. Based on the depth of the
3922 * attached framebuffer, choose a good color depth to use on the pipe.
3923 *
3924 * If possible, match the pipe depth to the fb depth. In some cases, this
3925 * isn't ideal, because the connected output supports a lesser or restricted
3926 * set of depths. Resolve that here:
3927 * LVDS typically supports only 6bpc, so clamp down in that case
3928 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
3929 * Displays may support a restricted set as well, check EDID and clamp as
3930 * appropriate.
3b5c78a3 3931 * DP may want to dither down to 6bpc to fit larger modes
5a354204
JB
3932 *
3933 * RETURNS:
3934 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
3935 * true if they don't match).
3936 */
3937static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
94352cf9 3938 struct drm_framebuffer *fb,
3b5c78a3
AJ
3939 unsigned int *pipe_bpp,
3940 struct drm_display_mode *mode)
5a354204
JB
3941{
3942 struct drm_device *dev = crtc->dev;
3943 struct drm_i915_private *dev_priv = dev->dev_private;
5a354204 3944 struct drm_connector *connector;
6c2b7c12 3945 struct intel_encoder *intel_encoder;
5a354204
JB
3946 unsigned int display_bpc = UINT_MAX, bpc;
3947
3948 /* Walk the encoders & connectors on this crtc, get min bpc */
6c2b7c12 3949 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5a354204
JB
3950
3951 if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
3952 unsigned int lvds_bpc;
3953
3954 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
3955 LVDS_A3_POWER_UP)
3956 lvds_bpc = 8;
3957 else
3958 lvds_bpc = 6;
3959
3960 if (lvds_bpc < display_bpc) {
82820490 3961 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
5a354204
JB
3962 display_bpc = lvds_bpc;
3963 }
3964 continue;
3965 }
3966
5a354204
JB
3967 /* Not one of the known troublemakers, check the EDID */
3968 list_for_each_entry(connector, &dev->mode_config.connector_list,
3969 head) {
6c2b7c12 3970 if (connector->encoder != &intel_encoder->base)
5a354204
JB
3971 continue;
3972
62ac41a6
JB
3973 /* Don't use an invalid EDID bpc value */
3974 if (connector->display_info.bpc &&
3975 connector->display_info.bpc < display_bpc) {
82820490 3976 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
5a354204
JB
3977 display_bpc = connector->display_info.bpc;
3978 }
3979 }
3980
3981 /*
3982 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
3983 * through, clamp it down. (Note: >12bpc will be caught below.)
3984 */
3985 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
3986 if (display_bpc > 8 && display_bpc < 12) {
82820490 3987 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
5a354204
JB
3988 display_bpc = 12;
3989 } else {
82820490 3990 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
5a354204
JB
3991 display_bpc = 8;
3992 }
3993 }
3994 }
3995
3b5c78a3
AJ
3996 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
3997 DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
3998 display_bpc = 6;
3999 }
4000
5a354204
JB
4001 /*
4002 * We could just drive the pipe at the highest bpc all the time and
4003 * enable dithering as needed, but that costs bandwidth. So choose
4004 * the minimum value that expresses the full color range of the fb but
4005 * also stays within the max display bpc discovered above.
4006 */
4007
94352cf9 4008 switch (fb->depth) {
5a354204
JB
4009 case 8:
4010 bpc = 8; /* since we go through a colormap */
4011 break;
4012 case 15:
4013 case 16:
4014 bpc = 6; /* min is 18bpp */
4015 break;
4016 case 24:
578393cd 4017 bpc = 8;
5a354204
JB
4018 break;
4019 case 30:
578393cd 4020 bpc = 10;
5a354204
JB
4021 break;
4022 case 48:
578393cd 4023 bpc = 12;
5a354204
JB
4024 break;
4025 default:
4026 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
4027 bpc = min((unsigned int)8, display_bpc);
4028 break;
4029 }
4030
578393cd
KP
4031 display_bpc = min(display_bpc, bpc);
4032
82820490
AJ
4033 DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
4034 bpc, display_bpc);
5a354204 4035
578393cd 4036 *pipe_bpp = display_bpc * 3;
5a354204
JB
4037
4038 return display_bpc != bpc;
4039}
4040
a0c4da24
JB
4041static int vlv_get_refclk(struct drm_crtc *crtc)
4042{
4043 struct drm_device *dev = crtc->dev;
4044 struct drm_i915_private *dev_priv = dev->dev_private;
4045 int refclk = 27000; /* for DP & HDMI */
4046
4047 return 100000; /* only one validated so far */
4048
4049 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
4050 refclk = 96000;
4051 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
4052 if (intel_panel_use_ssc(dev_priv))
4053 refclk = 100000;
4054 else
4055 refclk = 96000;
4056 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
4057 refclk = 100000;
4058 }
4059
4060 return refclk;
4061}
4062
c65d77d8
JB
4063static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4064{
4065 struct drm_device *dev = crtc->dev;
4066 struct drm_i915_private *dev_priv = dev->dev_private;
4067 int refclk;
4068
a0c4da24
JB
4069 if (IS_VALLEYVIEW(dev)) {
4070 refclk = vlv_get_refclk(crtc);
4071 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
c65d77d8
JB
4072 intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4073 refclk = dev_priv->lvds_ssc_freq * 1000;
4074 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4075 refclk / 1000);
4076 } else if (!IS_GEN2(dev)) {
4077 refclk = 96000;
4078 } else {
4079 refclk = 48000;
4080 }
4081
4082 return refclk;
4083}
4084
4085static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
4086 intel_clock_t *clock)
4087{
4088 /* SDVO TV has fixed PLL values depend on its clock range,
4089 this mirrors vbios setting. */
4090 if (adjusted_mode->clock >= 100000
4091 && adjusted_mode->clock < 140500) {
4092 clock->p1 = 2;
4093 clock->p2 = 10;
4094 clock->n = 3;
4095 clock->m1 = 16;
4096 clock->m2 = 8;
4097 } else if (adjusted_mode->clock >= 140500
4098 && adjusted_mode->clock <= 200000) {
4099 clock->p1 = 1;
4100 clock->p2 = 10;
4101 clock->n = 6;
4102 clock->m1 = 12;
4103 clock->m2 = 8;
4104 }
4105}
4106
a7516a05
JB
4107static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
4108 intel_clock_t *clock,
4109 intel_clock_t *reduced_clock)
4110{
4111 struct drm_device *dev = crtc->dev;
4112 struct drm_i915_private *dev_priv = dev->dev_private;
4113 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4114 int pipe = intel_crtc->pipe;
4115 u32 fp, fp2 = 0;
4116
4117 if (IS_PINEVIEW(dev)) {
4118 fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
4119 if (reduced_clock)
4120 fp2 = (1 << reduced_clock->n) << 16 |
4121 reduced_clock->m1 << 8 | reduced_clock->m2;
4122 } else {
4123 fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
4124 if (reduced_clock)
4125 fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
4126 reduced_clock->m2;
4127 }
4128
4129 I915_WRITE(FP0(pipe), fp);
4130
4131 intel_crtc->lowfreq_avail = false;
4132 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4133 reduced_clock && i915_powersave) {
4134 I915_WRITE(FP1(pipe), fp2);
4135 intel_crtc->lowfreq_avail = true;
4136 } else {
4137 I915_WRITE(FP1(pipe), fp);
4138 }
4139}
4140
93e537a1
DV
4141static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
4142 struct drm_display_mode *adjusted_mode)
4143{
4144 struct drm_device *dev = crtc->dev;
4145 struct drm_i915_private *dev_priv = dev->dev_private;
4146 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4147 int pipe = intel_crtc->pipe;
284d5df5 4148 u32 temp;
93e537a1
DV
4149
4150 temp = I915_READ(LVDS);
4151 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
4152 if (pipe == 1) {
4153 temp |= LVDS_PIPEB_SELECT;
4154 } else {
4155 temp &= ~LVDS_PIPEB_SELECT;
4156 }
4157 /* set the corresponsding LVDS_BORDER bit */
4158 temp |= dev_priv->lvds_border_bits;
4159 /* Set the B0-B3 data pairs corresponding to whether we're going to
4160 * set the DPLLs for dual-channel mode or not.
4161 */
4162 if (clock->p2 == 7)
4163 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
4164 else
4165 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
4166
4167 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4168 * appropriately here, but we need to look more thoroughly into how
4169 * panels behave in the two modes.
4170 */
4171 /* set the dithering flag on LVDS as needed */
4172 if (INTEL_INFO(dev)->gen >= 4) {
4173 if (dev_priv->lvds_dither)
4174 temp |= LVDS_ENABLE_DITHER;
4175 else
4176 temp &= ~LVDS_ENABLE_DITHER;
4177 }
284d5df5 4178 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
93e537a1 4179 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
284d5df5 4180 temp |= LVDS_HSYNC_POLARITY;
93e537a1 4181 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
284d5df5 4182 temp |= LVDS_VSYNC_POLARITY;
93e537a1
DV
4183 I915_WRITE(LVDS, temp);
4184}
4185
a0c4da24
JB
4186static void vlv_update_pll(struct drm_crtc *crtc,
4187 struct drm_display_mode *mode,
4188 struct drm_display_mode *adjusted_mode,
4189 intel_clock_t *clock, intel_clock_t *reduced_clock,
2a8f64ca 4190 int num_connectors)
a0c4da24
JB
4191{
4192 struct drm_device *dev = crtc->dev;
4193 struct drm_i915_private *dev_priv = dev->dev_private;
4194 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4195 int pipe = intel_crtc->pipe;
4196 u32 dpll, mdiv, pdiv;
4197 u32 bestn, bestm1, bestm2, bestp1, bestp2;
2a8f64ca
VP
4198 bool is_sdvo;
4199 u32 temp;
4200
4201 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
4202 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
a0c4da24 4203
2a8f64ca
VP
4204 dpll = DPLL_VGA_MODE_DIS;
4205 dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
4206 dpll |= DPLL_REFA_CLK_ENABLE_VLV;
4207 dpll |= DPLL_INTEGRATED_CLOCK_VLV;
4208
4209 I915_WRITE(DPLL(pipe), dpll);
4210 POSTING_READ(DPLL(pipe));
a0c4da24
JB
4211
4212 bestn = clock->n;
4213 bestm1 = clock->m1;
4214 bestm2 = clock->m2;
4215 bestp1 = clock->p1;
4216 bestp2 = clock->p2;
4217
2a8f64ca
VP
4218 /*
4219 * In Valleyview PLL and program lane counter registers are exposed
4220 * through DPIO interface
4221 */
a0c4da24
JB
4222 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
4223 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
4224 mdiv |= ((bestn << DPIO_N_SHIFT));
4225 mdiv |= (1 << DPIO_POST_DIV_SHIFT);
4226 mdiv |= (1 << DPIO_K_SHIFT);
4227 mdiv |= DPIO_ENABLE_CALIBRATION;
4228 intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
4229
4230 intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
4231
2a8f64ca 4232 pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) |
a0c4da24 4233 (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
2a8f64ca
VP
4234 (7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
4235 (5 << DPIO_CLK_BIAS_CTL_SHIFT);
a0c4da24
JB
4236 intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
4237
2a8f64ca 4238 intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b);
a0c4da24
JB
4239
4240 dpll |= DPLL_VCO_ENABLE;
4241 I915_WRITE(DPLL(pipe), dpll);
4242 POSTING_READ(DPLL(pipe));
4243 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
4244 DRM_ERROR("DPLL %d failed to lock\n", pipe);
4245
2a8f64ca
VP
4246 intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
4247
4248 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
4249 intel_dp_set_m_n(crtc, mode, adjusted_mode);
4250
4251 I915_WRITE(DPLL(pipe), dpll);
4252
4253 /* Wait for the clocks to stabilize. */
4254 POSTING_READ(DPLL(pipe));
4255 udelay(150);
a0c4da24 4256
2a8f64ca
VP
4257 temp = 0;
4258 if (is_sdvo) {
4259 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
a0c4da24
JB
4260 if (temp > 1)
4261 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4262 else
4263 temp = 0;
a0c4da24 4264 }
2a8f64ca
VP
4265 I915_WRITE(DPLL_MD(pipe), temp);
4266 POSTING_READ(DPLL_MD(pipe));
a0c4da24 4267
2a8f64ca
VP
4268 /* Now program lane control registers */
4269 if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)
4270 || intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
4271 {
4272 temp = 0x1000C4;
4273 if(pipe == 1)
4274 temp |= (1 << 21);
4275 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
4276 }
4277 if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP))
4278 {
4279 temp = 0x1000C4;
4280 if(pipe == 1)
4281 temp |= (1 << 21);
4282 intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
4283 }
a0c4da24
JB
4284}
4285
eb1cbe48
DV
4286static void i9xx_update_pll(struct drm_crtc *crtc,
4287 struct drm_display_mode *mode,
4288 struct drm_display_mode *adjusted_mode,
4289 intel_clock_t *clock, intel_clock_t *reduced_clock,
4290 int num_connectors)
4291{
4292 struct drm_device *dev = crtc->dev;
4293 struct drm_i915_private *dev_priv = dev->dev_private;
4294 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4295 int pipe = intel_crtc->pipe;
4296 u32 dpll;
4297 bool is_sdvo;
4298
2a8f64ca
VP
4299 i9xx_update_pll_dividers(crtc, clock, reduced_clock);
4300
eb1cbe48
DV
4301 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
4302 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
4303
4304 dpll = DPLL_VGA_MODE_DIS;
4305
4306 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
4307 dpll |= DPLLB_MODE_LVDS;
4308 else
4309 dpll |= DPLLB_MODE_DAC_SERIAL;
4310 if (is_sdvo) {
4311 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4312 if (pixel_multiplier > 1) {
4313 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4314 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
4315 }
4316 dpll |= DPLL_DVO_HIGH_SPEED;
4317 }
4318 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
4319 dpll |= DPLL_DVO_HIGH_SPEED;
4320
4321 /* compute bitmask from p1 value */
4322 if (IS_PINEVIEW(dev))
4323 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
4324 else {
4325 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4326 if (IS_G4X(dev) && reduced_clock)
4327 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4328 }
4329 switch (clock->p2) {
4330 case 5:
4331 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4332 break;
4333 case 7:
4334 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4335 break;
4336 case 10:
4337 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4338 break;
4339 case 14:
4340 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4341 break;
4342 }
4343 if (INTEL_INFO(dev)->gen >= 4)
4344 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
4345
4346 if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
4347 dpll |= PLL_REF_INPUT_TVCLKINBC;
4348 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
4349 /* XXX: just matching BIOS for now */
4350 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
4351 dpll |= 3;
4352 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4353 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4354 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4355 else
4356 dpll |= PLL_REF_INPUT_DREFCLK;
4357
4358 dpll |= DPLL_VCO_ENABLE;
4359 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4360 POSTING_READ(DPLL(pipe));
4361 udelay(150);
4362
4363 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4364 * This is an exception to the general rule that mode_set doesn't turn
4365 * things on.
4366 */
4367 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
4368 intel_update_lvds(crtc, clock, adjusted_mode);
4369
4370 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
4371 intel_dp_set_m_n(crtc, mode, adjusted_mode);
4372
4373 I915_WRITE(DPLL(pipe), dpll);
4374
4375 /* Wait for the clocks to stabilize. */
4376 POSTING_READ(DPLL(pipe));
4377 udelay(150);
4378
4379 if (INTEL_INFO(dev)->gen >= 4) {
4380 u32 temp = 0;
4381 if (is_sdvo) {
4382 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
4383 if (temp > 1)
4384 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4385 else
4386 temp = 0;
4387 }
4388 I915_WRITE(DPLL_MD(pipe), temp);
4389 } else {
4390 /* The pixel multiplier can only be updated once the
4391 * DPLL is enabled and the clocks are stable.
4392 *
4393 * So write it again.
4394 */
4395 I915_WRITE(DPLL(pipe), dpll);
4396 }
4397}
4398
4399static void i8xx_update_pll(struct drm_crtc *crtc,
4400 struct drm_display_mode *adjusted_mode,
2a8f64ca 4401 intel_clock_t *clock, intel_clock_t *reduced_clock,
eb1cbe48
DV
4402 int num_connectors)
4403{
4404 struct drm_device *dev = crtc->dev;
4405 struct drm_i915_private *dev_priv = dev->dev_private;
4406 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4407 int pipe = intel_crtc->pipe;
4408 u32 dpll;
4409
2a8f64ca
VP
4410 i9xx_update_pll_dividers(crtc, clock, reduced_clock);
4411
eb1cbe48
DV
4412 dpll = DPLL_VGA_MODE_DIS;
4413
4414 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
4415 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4416 } else {
4417 if (clock->p1 == 2)
4418 dpll |= PLL_P1_DIVIDE_BY_TWO;
4419 else
4420 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4421 if (clock->p2 == 4)
4422 dpll |= PLL_P2_DIVIDE_BY_4;
4423 }
4424
4425 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
4426 /* XXX: just matching BIOS for now */
4427 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
4428 dpll |= 3;
4429 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4430 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4431 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4432 else
4433 dpll |= PLL_REF_INPUT_DREFCLK;
4434
4435 dpll |= DPLL_VCO_ENABLE;
4436 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4437 POSTING_READ(DPLL(pipe));
4438 udelay(150);
4439
eb1cbe48
DV
4440 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4441 * This is an exception to the general rule that mode_set doesn't turn
4442 * things on.
4443 */
4444 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
4445 intel_update_lvds(crtc, clock, adjusted_mode);
4446
5b5896e4
DV
4447 I915_WRITE(DPLL(pipe), dpll);
4448
4449 /* Wait for the clocks to stabilize. */
4450 POSTING_READ(DPLL(pipe));
4451 udelay(150);
4452
eb1cbe48
DV
4453 /* The pixel multiplier can only be updated once the
4454 * DPLL is enabled and the clocks are stable.
4455 *
4456 * So write it again.
4457 */
4458 I915_WRITE(DPLL(pipe), dpll);
4459}
4460
b0e77b9c
PZ
4461static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
4462 struct drm_display_mode *mode,
4463 struct drm_display_mode *adjusted_mode)
4464{
4465 struct drm_device *dev = intel_crtc->base.dev;
4466 struct drm_i915_private *dev_priv = dev->dev_private;
4467 enum pipe pipe = intel_crtc->pipe;
4468 uint32_t vsyncshift;
4469
4470 if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4471 /* the chip adds 2 halflines automatically */
4472 adjusted_mode->crtc_vtotal -= 1;
4473 adjusted_mode->crtc_vblank_end -= 1;
4474 vsyncshift = adjusted_mode->crtc_hsync_start
4475 - adjusted_mode->crtc_htotal / 2;
4476 } else {
4477 vsyncshift = 0;
4478 }
4479
4480 if (INTEL_INFO(dev)->gen > 3)
4481 I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
4482
4483 I915_WRITE(HTOTAL(pipe),
4484 (adjusted_mode->crtc_hdisplay - 1) |
4485 ((adjusted_mode->crtc_htotal - 1) << 16));
4486 I915_WRITE(HBLANK(pipe),
4487 (adjusted_mode->crtc_hblank_start - 1) |
4488 ((adjusted_mode->crtc_hblank_end - 1) << 16));
4489 I915_WRITE(HSYNC(pipe),
4490 (adjusted_mode->crtc_hsync_start - 1) |
4491 ((adjusted_mode->crtc_hsync_end - 1) << 16));
4492
4493 I915_WRITE(VTOTAL(pipe),
4494 (adjusted_mode->crtc_vdisplay - 1) |
4495 ((adjusted_mode->crtc_vtotal - 1) << 16));
4496 I915_WRITE(VBLANK(pipe),
4497 (adjusted_mode->crtc_vblank_start - 1) |
4498 ((adjusted_mode->crtc_vblank_end - 1) << 16));
4499 I915_WRITE(VSYNC(pipe),
4500 (adjusted_mode->crtc_vsync_start - 1) |
4501 ((adjusted_mode->crtc_vsync_end - 1) << 16));
4502
4503 /* pipesrc controls the size that is scaled from, which should
4504 * always be the user's requested size.
4505 */
4506 I915_WRITE(PIPESRC(pipe),
4507 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
4508}
4509
f564048e
EA
4510static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4511 struct drm_display_mode *mode,
4512 struct drm_display_mode *adjusted_mode,
4513 int x, int y,
94352cf9 4514 struct drm_framebuffer *fb)
79e53945
JB
4515{
4516 struct drm_device *dev = crtc->dev;
4517 struct drm_i915_private *dev_priv = dev->dev_private;
4518 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4519 int pipe = intel_crtc->pipe;
80824003 4520 int plane = intel_crtc->plane;
c751ce4f 4521 int refclk, num_connectors = 0;
652c393a 4522 intel_clock_t clock, reduced_clock;
b0e77b9c 4523 u32 dspcntr, pipeconf;
eb1cbe48
DV
4524 bool ok, has_reduced_clock = false, is_sdvo = false;
4525 bool is_lvds = false, is_tv = false, is_dp = false;
5eddb70b 4526 struct intel_encoder *encoder;
d4906093 4527 const intel_limit_t *limit;
5c3b82e2 4528 int ret;
79e53945 4529
6c2b7c12 4530 for_each_encoder_on_crtc(dev, crtc, encoder) {
5eddb70b 4531 switch (encoder->type) {
79e53945
JB
4532 case INTEL_OUTPUT_LVDS:
4533 is_lvds = true;
4534 break;
4535 case INTEL_OUTPUT_SDVO:
7d57382e 4536 case INTEL_OUTPUT_HDMI:
79e53945 4537 is_sdvo = true;
5eddb70b 4538 if (encoder->needs_tv_clock)
e2f0ba97 4539 is_tv = true;
79e53945 4540 break;
79e53945
JB
4541 case INTEL_OUTPUT_TVOUT:
4542 is_tv = true;
4543 break;
a4fc5ed6
KP
4544 case INTEL_OUTPUT_DISPLAYPORT:
4545 is_dp = true;
4546 break;
79e53945 4547 }
43565a06 4548
c751ce4f 4549 num_connectors++;
79e53945
JB
4550 }
4551
c65d77d8 4552 refclk = i9xx_get_refclk(crtc, num_connectors);
79e53945 4553
d4906093
ML
4554 /*
4555 * Returns a set of divisors for the desired target clock with the given
4556 * refclk, or FALSE. The returned values represent the clock equation:
4557 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4558 */
1b894b59 4559 limit = intel_limit(crtc, refclk);
cec2f356
SP
4560 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
4561 &clock);
79e53945
JB
4562 if (!ok) {
4563 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5c3b82e2 4564 return -EINVAL;
79e53945
JB
4565 }
4566
cda4b7d3 4567 /* Ensure that the cursor is valid for the new mode before changing... */
6b383a7f 4568 intel_crtc_update_cursor(crtc, true);
cda4b7d3 4569
ddc9003c 4570 if (is_lvds && dev_priv->lvds_downclock_avail) {
cec2f356
SP
4571 /*
4572 * Ensure we match the reduced clock's P to the target clock.
4573 * If the clocks don't match, we can't switch the display clock
4574 * by using the FP0/FP1. In such case we will disable the LVDS
4575 * downclock feature.
4576 */
ddc9003c 4577 has_reduced_clock = limit->find_pll(limit, crtc,
5eddb70b
CW
4578 dev_priv->lvds_downclock,
4579 refclk,
cec2f356 4580 &clock,
5eddb70b 4581 &reduced_clock);
7026d4ac
ZW
4582 }
4583
c65d77d8
JB
4584 if (is_sdvo && is_tv)
4585 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
7026d4ac 4586
eb1cbe48 4587 if (IS_GEN2(dev))
2a8f64ca
VP
4588 i8xx_update_pll(crtc, adjusted_mode, &clock,
4589 has_reduced_clock ? &reduced_clock : NULL,
4590 num_connectors);
a0c4da24 4591 else if (IS_VALLEYVIEW(dev))
2a8f64ca
VP
4592 vlv_update_pll(crtc, mode, adjusted_mode, &clock,
4593 has_reduced_clock ? &reduced_clock : NULL,
4594 num_connectors);
79e53945 4595 else
eb1cbe48
DV
4596 i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
4597 has_reduced_clock ? &reduced_clock : NULL,
4598 num_connectors);
79e53945
JB
4599
4600 /* setup pipeconf */
5eddb70b 4601 pipeconf = I915_READ(PIPECONF(pipe));
79e53945
JB
4602
4603 /* Set up the display plane register */
4604 dspcntr = DISPPLANE_GAMMA_ENABLE;
4605
929c77fb
EA
4606 if (pipe == 0)
4607 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4608 else
4609 dspcntr |= DISPPLANE_SEL_PIPE_B;
79e53945 4610
a6c45cf0 4611 if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
79e53945
JB
4612 /* Enable pixel doubling when the dot clock is > 90% of the (display)
4613 * core speed.
4614 *
4615 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4616 * pipe == 0 check?
4617 */
e70236a8
JB
4618 if (mode->clock >
4619 dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
5eddb70b 4620 pipeconf |= PIPECONF_DOUBLE_WIDE;
79e53945 4621 else
5eddb70b 4622 pipeconf &= ~PIPECONF_DOUBLE_WIDE;
79e53945
JB
4623 }
4624
3b5c78a3
AJ
4625 /* default to 8bpc */
4626 pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
4627 if (is_dp) {
0c96c65b 4628 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
3b5c78a3
AJ
4629 pipeconf |= PIPECONF_BPP_6 |
4630 PIPECONF_DITHER_EN |
4631 PIPECONF_DITHER_TYPE_SP;
4632 }
4633 }
4634
19c03924
GB
4635 if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
4636 if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4637 pipeconf |= PIPECONF_BPP_6 |
4638 PIPECONF_ENABLE |
4639 I965_PIPECONF_ACTIVE;
4640 }
4641 }
4642
28c97730 4643 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
79e53945
JB
4644 drm_mode_debug_printmodeline(mode);
4645
a7516a05
JB
4646 if (HAS_PIPE_CXSR(dev)) {
4647 if (intel_crtc->lowfreq_avail) {
28c97730 4648 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
652c393a 4649 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
a7516a05 4650 } else {
28c97730 4651 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
652c393a
JB
4652 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4653 }
4654 }
4655
617cf884 4656 pipeconf &= ~PIPECONF_INTERLACE_MASK;
dbb02575 4657 if (!IS_GEN2(dev) &&
b0e77b9c 4658 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
734b4157 4659 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
b0e77b9c 4660 else
617cf884 4661 pipeconf |= PIPECONF_PROGRESSIVE;
734b4157 4662
b0e77b9c 4663 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
5eddb70b
CW
4664
4665 /* pipesrc and dspsize control the size that is scaled from,
4666 * which should always be the user's requested size.
79e53945 4667 */
929c77fb
EA
4668 I915_WRITE(DSPSIZE(plane),
4669 ((mode->vdisplay - 1) << 16) |
4670 (mode->hdisplay - 1));
4671 I915_WRITE(DSPPOS(plane), 0);
2c07245f 4672
f564048e
EA
4673 I915_WRITE(PIPECONF(pipe), pipeconf);
4674 POSTING_READ(PIPECONF(pipe));
929c77fb 4675 intel_enable_pipe(dev_priv, pipe, false);
f564048e
EA
4676
4677 intel_wait_for_vblank(dev, pipe);
4678
f564048e
EA
4679 I915_WRITE(DSPCNTR(plane), dspcntr);
4680 POSTING_READ(DSPCNTR(plane));
4681
94352cf9 4682 ret = intel_pipe_set_base(crtc, x, y, fb);
f564048e
EA
4683
4684 intel_update_watermarks(dev);
4685
f564048e
EA
4686 return ret;
4687}
4688
9fb526db
KP
4689/*
4690 * Initialize reference clocks when the driver loads
4691 */
4692void ironlake_init_pch_refclk(struct drm_device *dev)
13d83a67
JB
4693{
4694 struct drm_i915_private *dev_priv = dev->dev_private;
4695 struct drm_mode_config *mode_config = &dev->mode_config;
13d83a67 4696 struct intel_encoder *encoder;
13d83a67
JB
4697 u32 temp;
4698 bool has_lvds = false;
199e5d79
KP
4699 bool has_cpu_edp = false;
4700 bool has_pch_edp = false;
4701 bool has_panel = false;
99eb6a01
KP
4702 bool has_ck505 = false;
4703 bool can_ssc = false;
13d83a67
JB
4704
4705 /* We need to take the global config into account */
199e5d79
KP
4706 list_for_each_entry(encoder, &mode_config->encoder_list,
4707 base.head) {
4708 switch (encoder->type) {
4709 case INTEL_OUTPUT_LVDS:
4710 has_panel = true;
4711 has_lvds = true;
4712 break;
4713 case INTEL_OUTPUT_EDP:
4714 has_panel = true;
4715 if (intel_encoder_is_pch_edp(&encoder->base))
4716 has_pch_edp = true;
4717 else
4718 has_cpu_edp = true;
4719 break;
13d83a67
JB
4720 }
4721 }
4722
99eb6a01
KP
4723 if (HAS_PCH_IBX(dev)) {
4724 has_ck505 = dev_priv->display_clock_mode;
4725 can_ssc = has_ck505;
4726 } else {
4727 has_ck505 = false;
4728 can_ssc = true;
4729 }
4730
4731 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
4732 has_panel, has_lvds, has_pch_edp, has_cpu_edp,
4733 has_ck505);
13d83a67
JB
4734
4735 /* Ironlake: try to setup display ref clock before DPLL
4736 * enabling. This is only under driver's control after
4737 * PCH B stepping, previous chipset stepping should be
4738 * ignoring this setting.
4739 */
4740 temp = I915_READ(PCH_DREF_CONTROL);
4741 /* Always enable nonspread source */
4742 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
13d83a67 4743
99eb6a01
KP
4744 if (has_ck505)
4745 temp |= DREF_NONSPREAD_CK505_ENABLE;
4746 else
4747 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
13d83a67 4748
199e5d79
KP
4749 if (has_panel) {
4750 temp &= ~DREF_SSC_SOURCE_MASK;
4751 temp |= DREF_SSC_SOURCE_ENABLE;
13d83a67 4752
199e5d79 4753 /* SSC must be turned on before enabling the CPU output */
99eb6a01 4754 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
199e5d79 4755 DRM_DEBUG_KMS("Using SSC on panel\n");
13d83a67 4756 temp |= DREF_SSC1_ENABLE;
e77166b5
DV
4757 } else
4758 temp &= ~DREF_SSC1_ENABLE;
199e5d79
KP
4759
4760 /* Get SSC going before enabling the outputs */
4761 I915_WRITE(PCH_DREF_CONTROL, temp);
4762 POSTING_READ(PCH_DREF_CONTROL);
4763 udelay(200);
4764
13d83a67
JB
4765 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4766
4767 /* Enable CPU source on CPU attached eDP */
199e5d79 4768 if (has_cpu_edp) {
99eb6a01 4769 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
199e5d79 4770 DRM_DEBUG_KMS("Using SSC on eDP\n");
13d83a67 4771 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
199e5d79 4772 }
13d83a67
JB
4773 else
4774 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
199e5d79
KP
4775 } else
4776 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4777
4778 I915_WRITE(PCH_DREF_CONTROL, temp);
4779 POSTING_READ(PCH_DREF_CONTROL);
4780 udelay(200);
4781 } else {
4782 DRM_DEBUG_KMS("Disabling SSC entirely\n");
4783
4784 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4785
4786 /* Turn off CPU output */
4787 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4788
4789 I915_WRITE(PCH_DREF_CONTROL, temp);
4790 POSTING_READ(PCH_DREF_CONTROL);
4791 udelay(200);
4792
4793 /* Turn off the SSC source */
4794 temp &= ~DREF_SSC_SOURCE_MASK;
4795 temp |= DREF_SSC_SOURCE_DISABLE;
4796
4797 /* Turn off SSC1 */
4798 temp &= ~ DREF_SSC1_ENABLE;
4799
13d83a67
JB
4800 I915_WRITE(PCH_DREF_CONTROL, temp);
4801 POSTING_READ(PCH_DREF_CONTROL);
4802 udelay(200);
4803 }
4804}
4805
d9d444cb
JB
4806static int ironlake_get_refclk(struct drm_crtc *crtc)
4807{
4808 struct drm_device *dev = crtc->dev;
4809 struct drm_i915_private *dev_priv = dev->dev_private;
4810 struct intel_encoder *encoder;
d9d444cb
JB
4811 struct intel_encoder *edp_encoder = NULL;
4812 int num_connectors = 0;
4813 bool is_lvds = false;
4814
6c2b7c12 4815 for_each_encoder_on_crtc(dev, crtc, encoder) {
d9d444cb
JB
4816 switch (encoder->type) {
4817 case INTEL_OUTPUT_LVDS:
4818 is_lvds = true;
4819 break;
4820 case INTEL_OUTPUT_EDP:
4821 edp_encoder = encoder;
4822 break;
4823 }
4824 num_connectors++;
4825 }
4826
4827 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4828 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4829 dev_priv->lvds_ssc_freq);
4830 return dev_priv->lvds_ssc_freq * 1000;
4831 }
4832
4833 return 120000;
4834}
4835
c8203565
PZ
4836static void ironlake_set_pipeconf(struct drm_crtc *crtc,
4837 struct drm_display_mode *adjusted_mode,
4838 bool dither)
4839{
4840 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
4841 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4842 int pipe = intel_crtc->pipe;
4843 uint32_t val;
4844
4845 val = I915_READ(PIPECONF(pipe));
4846
4847 val &= ~PIPE_BPC_MASK;
4848 switch (intel_crtc->bpp) {
4849 case 18:
4850 val |= PIPE_6BPC;
4851 break;
4852 case 24:
4853 val |= PIPE_8BPC;
4854 break;
4855 case 30:
4856 val |= PIPE_10BPC;
4857 break;
4858 case 36:
4859 val |= PIPE_12BPC;
4860 break;
4861 default:
cc769b62
PZ
4862 /* Case prevented by intel_choose_pipe_bpp_dither. */
4863 BUG();
c8203565
PZ
4864 }
4865
4866 val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
4867 if (dither)
4868 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
4869
4870 val &= ~PIPECONF_INTERLACE_MASK;
4871 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
4872 val |= PIPECONF_INTERLACED_ILK;
4873 else
4874 val |= PIPECONF_PROGRESSIVE;
4875
4876 I915_WRITE(PIPECONF(pipe), val);
4877 POSTING_READ(PIPECONF(pipe));
4878}
4879
ee2b0b38
PZ
4880static void haswell_set_pipeconf(struct drm_crtc *crtc,
4881 struct drm_display_mode *adjusted_mode,
4882 bool dither)
4883{
4884 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
4885 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4886 int pipe = intel_crtc->pipe;
4887 uint32_t val;
4888
4889 val = I915_READ(PIPECONF(pipe));
4890
4891 val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
4892 if (dither)
4893 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
4894
4895 val &= ~PIPECONF_INTERLACE_MASK_HSW;
4896 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
4897 val |= PIPECONF_INTERLACED_ILK;
4898 else
4899 val |= PIPECONF_PROGRESSIVE;
4900
4901 I915_WRITE(PIPECONF(pipe), val);
4902 POSTING_READ(PIPECONF(pipe));
4903}
4904
6591c6e4
PZ
4905static bool ironlake_compute_clocks(struct drm_crtc *crtc,
4906 struct drm_display_mode *adjusted_mode,
4907 intel_clock_t *clock,
4908 bool *has_reduced_clock,
4909 intel_clock_t *reduced_clock)
4910{
4911 struct drm_device *dev = crtc->dev;
4912 struct drm_i915_private *dev_priv = dev->dev_private;
4913 struct intel_encoder *intel_encoder;
4914 int refclk;
4915 const intel_limit_t *limit;
4916 bool ret, is_sdvo = false, is_tv = false, is_lvds = false;
4917
4918 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4919 switch (intel_encoder->type) {
4920 case INTEL_OUTPUT_LVDS:
4921 is_lvds = true;
4922 break;
4923 case INTEL_OUTPUT_SDVO:
4924 case INTEL_OUTPUT_HDMI:
4925 is_sdvo = true;
4926 if (intel_encoder->needs_tv_clock)
4927 is_tv = true;
4928 break;
4929 case INTEL_OUTPUT_TVOUT:
4930 is_tv = true;
4931 break;
4932 }
4933 }
4934
4935 refclk = ironlake_get_refclk(crtc);
4936
4937 /*
4938 * Returns a set of divisors for the desired target clock with the given
4939 * refclk, or FALSE. The returned values represent the clock equation:
4940 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4941 */
4942 limit = intel_limit(crtc, refclk);
4943 ret = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
4944 clock);
4945 if (!ret)
4946 return false;
4947
4948 if (is_lvds && dev_priv->lvds_downclock_avail) {
4949 /*
4950 * Ensure we match the reduced clock's P to the target clock.
4951 * If the clocks don't match, we can't switch the display clock
4952 * by using the FP0/FP1. In such case we will disable the LVDS
4953 * downclock feature.
4954 */
4955 *has_reduced_clock = limit->find_pll(limit, crtc,
4956 dev_priv->lvds_downclock,
4957 refclk,
4958 clock,
4959 reduced_clock);
4960 }
4961
4962 if (is_sdvo && is_tv)
4963 i9xx_adjust_sdvo_tv_clock(adjusted_mode, clock);
4964
4965 return true;
4966}
4967
f48d8f23
PZ
4968static void ironlake_set_m_n(struct drm_crtc *crtc,
4969 struct drm_display_mode *mode,
4970 struct drm_display_mode *adjusted_mode)
4971{
4972 struct drm_device *dev = crtc->dev;
4973 struct drm_i915_private *dev_priv = dev->dev_private;
4974 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4975 enum pipe pipe = intel_crtc->pipe;
4976 struct intel_encoder *intel_encoder, *edp_encoder = NULL;
4977 struct fdi_m_n m_n = {0};
4978 int target_clock, pixel_multiplier, lane, link_bw;
4979 bool is_dp = false, is_cpu_edp = false;
4980
4981 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
4982 switch (intel_encoder->type) {
4983 case INTEL_OUTPUT_DISPLAYPORT:
4984 is_dp = true;
4985 break;
4986 case INTEL_OUTPUT_EDP:
4987 is_dp = true;
4988 if (!intel_encoder_is_pch_edp(&intel_encoder->base))
4989 is_cpu_edp = true;
4990 edp_encoder = intel_encoder;
4991 break;
4992 }
4993 }
4994
4995 /* FDI link */
4996 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4997 lane = 0;
4998 /* CPU eDP doesn't require FDI link, so just set DP M/N
4999 according to current link config */
5000 if (is_cpu_edp) {
5001 intel_edp_link_config(edp_encoder, &lane, &link_bw);
5002 } else {
5003 /* FDI is a binary signal running at ~2.7GHz, encoding
5004 * each output octet as 10 bits. The actual frequency
5005 * is stored as a divider into a 100MHz clock, and the
5006 * mode pixel clock is stored in units of 1KHz.
5007 * Hence the bw of each lane in terms of the mode signal
5008 * is:
5009 */
5010 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
5011 }
5012
5013 /* [e]DP over FDI requires target mode clock instead of link clock. */
5014 if (edp_encoder)
5015 target_clock = intel_edp_target_clock(edp_encoder, mode);
5016 else if (is_dp)
5017 target_clock = mode->clock;
5018 else
5019 target_clock = adjusted_mode->clock;
5020
5021 if (!lane) {
5022 /*
5023 * Account for spread spectrum to avoid
5024 * oversubscribing the link. Max center spread
5025 * is 2.5%; use 5% for safety's sake.
5026 */
5027 u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
5028 lane = bps / (link_bw * 8) + 1;
5029 }
5030
5031 intel_crtc->fdi_lanes = lane;
5032
5033 if (pixel_multiplier > 1)
5034 link_bw *= pixel_multiplier;
5035 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
5036 &m_n);
5037
5038 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
5039 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
5040 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
5041 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
5042}
5043
de13a2e3
PZ
5044static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
5045 struct drm_display_mode *adjusted_mode,
5046 intel_clock_t *clock, u32 fp)
79e53945 5047{
de13a2e3 5048 struct drm_crtc *crtc = &intel_crtc->base;
79e53945
JB
5049 struct drm_device *dev = crtc->dev;
5050 struct drm_i915_private *dev_priv = dev->dev_private;
de13a2e3
PZ
5051 struct intel_encoder *intel_encoder;
5052 uint32_t dpll;
5053 int factor, pixel_multiplier, num_connectors = 0;
5054 bool is_lvds = false, is_sdvo = false, is_tv = false;
5055 bool is_dp = false, is_cpu_edp = false;
79e53945 5056
de13a2e3
PZ
5057 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5058 switch (intel_encoder->type) {
79e53945
JB
5059 case INTEL_OUTPUT_LVDS:
5060 is_lvds = true;
5061 break;
5062 case INTEL_OUTPUT_SDVO:
7d57382e 5063 case INTEL_OUTPUT_HDMI:
79e53945 5064 is_sdvo = true;
de13a2e3 5065 if (intel_encoder->needs_tv_clock)
e2f0ba97 5066 is_tv = true;
79e53945 5067 break;
79e53945
JB
5068 case INTEL_OUTPUT_TVOUT:
5069 is_tv = true;
5070 break;
a4fc5ed6
KP
5071 case INTEL_OUTPUT_DISPLAYPORT:
5072 is_dp = true;
5073 break;
32f9d658 5074 case INTEL_OUTPUT_EDP:
e3aef172 5075 is_dp = true;
de13a2e3 5076 if (!intel_encoder_is_pch_edp(&intel_encoder->base))
e3aef172 5077 is_cpu_edp = true;
32f9d658 5078 break;
79e53945 5079 }
43565a06 5080
c751ce4f 5081 num_connectors++;
79e53945
JB
5082 }
5083
c1858123 5084 /* Enable autotuning of the PLL clock (if permissible) */
8febb297
EA
5085 factor = 21;
5086 if (is_lvds) {
5087 if ((intel_panel_use_ssc(dev_priv) &&
5088 dev_priv->lvds_ssc_freq == 100) ||
5089 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
5090 factor = 25;
5091 } else if (is_sdvo && is_tv)
5092 factor = 20;
c1858123 5093
de13a2e3 5094 if (clock->m < factor * clock->n)
8febb297 5095 fp |= FP_CB_TUNE;
2c07245f 5096
5eddb70b 5097 dpll = 0;
2c07245f 5098
a07d6787
EA
5099 if (is_lvds)
5100 dpll |= DPLLB_MODE_LVDS;
5101 else
5102 dpll |= DPLLB_MODE_DAC_SERIAL;
5103 if (is_sdvo) {
de13a2e3 5104 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
a07d6787
EA
5105 if (pixel_multiplier > 1) {
5106 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
79e53945 5107 }
a07d6787
EA
5108 dpll |= DPLL_DVO_HIGH_SPEED;
5109 }
e3aef172 5110 if (is_dp && !is_cpu_edp)
a07d6787 5111 dpll |= DPLL_DVO_HIGH_SPEED;
79e53945 5112
a07d6787 5113 /* compute bitmask from p1 value */
de13a2e3 5114 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
a07d6787 5115 /* also FPA1 */
de13a2e3 5116 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
a07d6787 5117
de13a2e3 5118 switch (clock->p2) {
a07d6787
EA
5119 case 5:
5120 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5121 break;
5122 case 7:
5123 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5124 break;
5125 case 10:
5126 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5127 break;
5128 case 14:
5129 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5130 break;
79e53945
JB
5131 }
5132
43565a06
KH
5133 if (is_sdvo && is_tv)
5134 dpll |= PLL_REF_INPUT_TVCLKINBC;
5135 else if (is_tv)
79e53945 5136 /* XXX: just matching BIOS for now */
43565a06 5137 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
79e53945 5138 dpll |= 3;
a7615030 5139 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
43565a06 5140 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
79e53945
JB
5141 else
5142 dpll |= PLL_REF_INPUT_DREFCLK;
5143
de13a2e3
PZ
5144 return dpll;
5145}
5146
5147static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5148 struct drm_display_mode *mode,
5149 struct drm_display_mode *adjusted_mode,
5150 int x, int y,
5151 struct drm_framebuffer *fb)
5152{
5153 struct drm_device *dev = crtc->dev;
5154 struct drm_i915_private *dev_priv = dev->dev_private;
5155 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5156 int pipe = intel_crtc->pipe;
5157 int plane = intel_crtc->plane;
5158 int num_connectors = 0;
5159 intel_clock_t clock, reduced_clock;
5160 u32 dpll, fp = 0, fp2 = 0;
e2f12b07
PZ
5161 bool ok, has_reduced_clock = false;
5162 bool is_lvds = false, is_dp = false, is_cpu_edp = false;
de13a2e3
PZ
5163 struct intel_encoder *encoder;
5164 u32 temp;
5165 int ret;
5166 bool dither;
de13a2e3
PZ
5167
5168 for_each_encoder_on_crtc(dev, crtc, encoder) {
5169 switch (encoder->type) {
5170 case INTEL_OUTPUT_LVDS:
5171 is_lvds = true;
5172 break;
de13a2e3
PZ
5173 case INTEL_OUTPUT_DISPLAYPORT:
5174 is_dp = true;
5175 break;
5176 case INTEL_OUTPUT_EDP:
5177 is_dp = true;
e2f12b07 5178 if (!intel_encoder_is_pch_edp(&encoder->base))
de13a2e3
PZ
5179 is_cpu_edp = true;
5180 break;
5181 }
5182
5183 num_connectors++;
5184 }
5185
5dc5298b
PZ
5186 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
5187 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
5188
de13a2e3
PZ
5189 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
5190 &has_reduced_clock, &reduced_clock);
5191 if (!ok) {
5192 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5193 return -EINVAL;
5194 }
5195
5196 /* Ensure that the cursor is valid for the new mode before changing... */
5197 intel_crtc_update_cursor(crtc, true);
5198
5199 /* determine panel color depth */
5200 dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, mode);
5201 if (is_lvds && dev_priv->lvds_dither)
5202 dither = true;
5203
5204 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5205 if (has_reduced_clock)
5206 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5207 reduced_clock.m2;
5208
5209 dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp);
5210
f7cb34d4 5211 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
79e53945
JB
5212 drm_mode_debug_printmodeline(mode);
5213
5dc5298b
PZ
5214 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
5215 if (!is_cpu_edp) {
ee7b9f93 5216 struct intel_pch_pll *pll;
4b645f14 5217
ee7b9f93
JB
5218 pll = intel_get_pch_pll(intel_crtc, dpll, fp);
5219 if (pll == NULL) {
5220 DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
5221 pipe);
4b645f14
JB
5222 return -EINVAL;
5223 }
ee7b9f93
JB
5224 } else
5225 intel_put_pch_pll(intel_crtc);
79e53945
JB
5226
5227 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
5228 * This is an exception to the general rule that mode_set doesn't turn
5229 * things on.
5230 */
5231 if (is_lvds) {
fae14981 5232 temp = I915_READ(PCH_LVDS);
5eddb70b 5233 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
7885d205
JB
5234 if (HAS_PCH_CPT(dev)) {
5235 temp &= ~PORT_TRANS_SEL_MASK;
4b645f14 5236 temp |= PORT_TRANS_SEL_CPT(pipe);
7885d205
JB
5237 } else {
5238 if (pipe == 1)
5239 temp |= LVDS_PIPEB_SELECT;
5240 else
5241 temp &= ~LVDS_PIPEB_SELECT;
5242 }
4b645f14 5243
a3e17eb8 5244 /* set the corresponsding LVDS_BORDER bit */
5eddb70b 5245 temp |= dev_priv->lvds_border_bits;
79e53945
JB
5246 /* Set the B0-B3 data pairs corresponding to whether we're going to
5247 * set the DPLLs for dual-channel mode or not.
5248 */
5249 if (clock.p2 == 7)
5eddb70b 5250 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
79e53945 5251 else
5eddb70b 5252 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
79e53945
JB
5253
5254 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
5255 * appropriately here, but we need to look more thoroughly into how
5256 * panels behave in the two modes.
5257 */
284d5df5 5258 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
aa9b500d 5259 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
284d5df5 5260 temp |= LVDS_HSYNC_POLARITY;
aa9b500d 5261 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
284d5df5 5262 temp |= LVDS_VSYNC_POLARITY;
fae14981 5263 I915_WRITE(PCH_LVDS, temp);
79e53945 5264 }
434ed097 5265
e3aef172 5266 if (is_dp && !is_cpu_edp) {
a4fc5ed6 5267 intel_dp_set_m_n(crtc, mode, adjusted_mode);
8febb297 5268 } else {
8db9d77b 5269 /* For non-DP output, clear any trans DP clock recovery setting.*/
9db4a9c7
JB
5270 I915_WRITE(TRANSDATA_M1(pipe), 0);
5271 I915_WRITE(TRANSDATA_N1(pipe), 0);
5272 I915_WRITE(TRANSDPLINK_M1(pipe), 0);
5273 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
8db9d77b 5274 }
79e53945 5275
ee7b9f93
JB
5276 if (intel_crtc->pch_pll) {
5277 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
5eddb70b 5278
32f9d658 5279 /* Wait for the clocks to stabilize. */
ee7b9f93 5280 POSTING_READ(intel_crtc->pch_pll->pll_reg);
32f9d658
ZW
5281 udelay(150);
5282
8febb297
EA
5283 /* The pixel multiplier can only be updated once the
5284 * DPLL is enabled and the clocks are stable.
5285 *
5286 * So write it again.
5287 */
ee7b9f93 5288 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
79e53945 5289 }
79e53945 5290
5eddb70b 5291 intel_crtc->lowfreq_avail = false;
ee7b9f93 5292 if (intel_crtc->pch_pll) {
4b645f14 5293 if (is_lvds && has_reduced_clock && i915_powersave) {
ee7b9f93 5294 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
4b645f14 5295 intel_crtc->lowfreq_avail = true;
4b645f14 5296 } else {
ee7b9f93 5297 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
652c393a
JB
5298 }
5299 }
5300
b0e77b9c 5301 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
2c07245f 5302
f48d8f23 5303 ironlake_set_m_n(crtc, mode, adjusted_mode);
2c07245f 5304
e3aef172 5305 if (is_cpu_edp)
8febb297 5306 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
2c07245f 5307
c8203565 5308 ironlake_set_pipeconf(crtc, adjusted_mode, dither);
79e53945 5309
9d0498a2 5310 intel_wait_for_vblank(dev, pipe);
79e53945 5311
a1f9e77e
PZ
5312 /* Set up the display plane register */
5313 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
b24e7179 5314 POSTING_READ(DSPCNTR(plane));
79e53945 5315
94352cf9 5316 ret = intel_pipe_set_base(crtc, x, y, fb);
7662c8bd
SL
5317
5318 intel_update_watermarks(dev);
5319
1f8eeabf
ED
5320 intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
5321
1f803ee5 5322 return ret;
79e53945
JB
5323}
5324
09b4ddf9
PZ
5325static int haswell_crtc_mode_set(struct drm_crtc *crtc,
5326 struct drm_display_mode *mode,
5327 struct drm_display_mode *adjusted_mode,
5328 int x, int y,
5329 struct drm_framebuffer *fb)
5330{
5331 struct drm_device *dev = crtc->dev;
5332 struct drm_i915_private *dev_priv = dev->dev_private;
5333 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5334 int pipe = intel_crtc->pipe;
5335 int plane = intel_crtc->plane;
5336 int num_connectors = 0;
5337 intel_clock_t clock, reduced_clock;
5dc5298b 5338 u32 dpll = 0, fp = 0, fp2 = 0;
09b4ddf9
PZ
5339 bool ok, has_reduced_clock = false;
5340 bool is_lvds = false, is_dp = false, is_cpu_edp = false;
5341 struct intel_encoder *encoder;
5342 u32 temp;
5343 int ret;
5344 bool dither;
5345
5346 for_each_encoder_on_crtc(dev, crtc, encoder) {
5347 switch (encoder->type) {
5348 case INTEL_OUTPUT_LVDS:
5349 is_lvds = true;
5350 break;
5351 case INTEL_OUTPUT_DISPLAYPORT:
5352 is_dp = true;
5353 break;
5354 case INTEL_OUTPUT_EDP:
5355 is_dp = true;
5356 if (!intel_encoder_is_pch_edp(&encoder->base))
5357 is_cpu_edp = true;
5358 break;
5359 }
5360
5361 num_connectors++;
5362 }
5363
5dc5298b
PZ
5364 /* We are not sure yet this won't happen. */
5365 WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
5366 INTEL_PCH_TYPE(dev));
5367
5368 WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
5369 num_connectors, pipe_name(pipe));
5370
1ce42920
PZ
5371 WARN_ON(I915_READ(PIPECONF(pipe)) &
5372 (PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
5373
5374 WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
5375
6441ab5f
PZ
5376 if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
5377 return -EINVAL;
5378
5dc5298b
PZ
5379 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5380 ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
5381 &has_reduced_clock,
5382 &reduced_clock);
5383 if (!ok) {
5384 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5385 return -EINVAL;
5386 }
09b4ddf9
PZ
5387 }
5388
5389 /* Ensure that the cursor is valid for the new mode before changing... */
5390 intel_crtc_update_cursor(crtc, true);
5391
5392 /* determine panel color depth */
5393 dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, mode);
5394 if (is_lvds && dev_priv->lvds_dither)
5395 dither = true;
5396
09b4ddf9
PZ
5397 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
5398 drm_mode_debug_printmodeline(mode);
5399
5dc5298b
PZ
5400 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5401 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
5402 if (has_reduced_clock)
5403 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
5404 reduced_clock.m2;
5405
5406 dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
5407 fp);
5408
5409 /* CPU eDP is the only output that doesn't need a PCH PLL of its
5410 * own on pre-Haswell/LPT generation */
5411 if (!is_cpu_edp) {
5412 struct intel_pch_pll *pll;
5413
5414 pll = intel_get_pch_pll(intel_crtc, dpll, fp);
5415 if (pll == NULL) {
5416 DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
5417 pipe);
5418 return -EINVAL;
5419 }
5420 } else
5421 intel_put_pch_pll(intel_crtc);
09b4ddf9 5422
5dc5298b
PZ
5423 /* The LVDS pin pair needs to be on before the DPLLs are
5424 * enabled. This is an exception to the general rule that
5425 * mode_set doesn't turn things on.
5426 */
5427 if (is_lvds) {
5428 temp = I915_READ(PCH_LVDS);
5429 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
5430 if (HAS_PCH_CPT(dev)) {
5431 temp &= ~PORT_TRANS_SEL_MASK;
5432 temp |= PORT_TRANS_SEL_CPT(pipe);
5433 } else {
5434 if (pipe == 1)
5435 temp |= LVDS_PIPEB_SELECT;
5436 else
5437 temp &= ~LVDS_PIPEB_SELECT;
5438 }
09b4ddf9 5439
5dc5298b
PZ
5440 /* set the corresponsding LVDS_BORDER bit */
5441 temp |= dev_priv->lvds_border_bits;
5442 /* Set the B0-B3 data pairs corresponding to whether
5443 * we're going to set the DPLLs for dual-channel mode or
5444 * not.
5445 */
5446 if (clock.p2 == 7)
5447 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
09b4ddf9 5448 else
5dc5298b
PZ
5449 temp &= ~(LVDS_B0B3_POWER_UP |
5450 LVDS_CLKB_POWER_UP);
5451
5452 /* It would be nice to set 24 vs 18-bit mode
5453 * (LVDS_A3_POWER_UP) appropriately here, but we need to
5454 * look more thoroughly into how panels behave in the
5455 * two modes.
5456 */
5457 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5458 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5459 temp |= LVDS_HSYNC_POLARITY;
5460 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5461 temp |= LVDS_VSYNC_POLARITY;
5462 I915_WRITE(PCH_LVDS, temp);
09b4ddf9 5463 }
09b4ddf9
PZ
5464 }
5465
5466 if (is_dp && !is_cpu_edp) {
5467 intel_dp_set_m_n(crtc, mode, adjusted_mode);
5468 } else {
5dc5298b
PZ
5469 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5470 /* For non-DP output, clear any trans DP clock recovery
5471 * setting.*/
5472 I915_WRITE(TRANSDATA_M1(pipe), 0);
5473 I915_WRITE(TRANSDATA_N1(pipe), 0);
5474 I915_WRITE(TRANSDPLINK_M1(pipe), 0);
5475 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
5476 }
09b4ddf9
PZ
5477 }
5478
5479 intel_crtc->lowfreq_avail = false;
5dc5298b
PZ
5480 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
5481 if (intel_crtc->pch_pll) {
5482 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
5483
5484 /* Wait for the clocks to stabilize. */
5485 POSTING_READ(intel_crtc->pch_pll->pll_reg);
5486 udelay(150);
5487
5488 /* The pixel multiplier can only be updated once the
5489 * DPLL is enabled and the clocks are stable.
5490 *
5491 * So write it again.
5492 */
5493 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
5494 }
5495
5496 if (intel_crtc->pch_pll) {
5497 if (is_lvds && has_reduced_clock && i915_powersave) {
5498 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
5499 intel_crtc->lowfreq_avail = true;
5500 } else {
5501 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
5502 }
09b4ddf9
PZ
5503 }
5504 }
5505
5506 intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
5507
1eb8dfec
PZ
5508 if (!is_dp || is_cpu_edp)
5509 ironlake_set_m_n(crtc, mode, adjusted_mode);
09b4ddf9 5510
5dc5298b
PZ
5511 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5512 if (is_cpu_edp)
5513 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
09b4ddf9 5514
ee2b0b38 5515 haswell_set_pipeconf(crtc, adjusted_mode, dither);
09b4ddf9 5516
09b4ddf9
PZ
5517 /* Set up the display plane register */
5518 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
5519 POSTING_READ(DSPCNTR(plane));
5520
5521 ret = intel_pipe_set_base(crtc, x, y, fb);
5522
5523 intel_update_watermarks(dev);
5524
5525 intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
5526
5527 return ret;
5528}
5529
f564048e
EA
5530static int intel_crtc_mode_set(struct drm_crtc *crtc,
5531 struct drm_display_mode *mode,
5532 struct drm_display_mode *adjusted_mode,
5533 int x, int y,
94352cf9 5534 struct drm_framebuffer *fb)
f564048e
EA
5535{
5536 struct drm_device *dev = crtc->dev;
5537 struct drm_i915_private *dev_priv = dev->dev_private;
0b701d27
EA
5538 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5539 int pipe = intel_crtc->pipe;
f564048e
EA
5540 int ret;
5541
0b701d27 5542 drm_vblank_pre_modeset(dev, pipe);
7662c8bd 5543
f564048e 5544 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
94352cf9 5545 x, y, fb);
79e53945 5546 drm_vblank_post_modeset(dev, pipe);
5c3b82e2 5547
1f803ee5 5548 return ret;
79e53945
JB
5549}
5550
3a9627f4
WF
5551static bool intel_eld_uptodate(struct drm_connector *connector,
5552 int reg_eldv, uint32_t bits_eldv,
5553 int reg_elda, uint32_t bits_elda,
5554 int reg_edid)
5555{
5556 struct drm_i915_private *dev_priv = connector->dev->dev_private;
5557 uint8_t *eld = connector->eld;
5558 uint32_t i;
5559
5560 i = I915_READ(reg_eldv);
5561 i &= bits_eldv;
5562
5563 if (!eld[0])
5564 return !i;
5565
5566 if (!i)
5567 return false;
5568
5569 i = I915_READ(reg_elda);
5570 i &= ~bits_elda;
5571 I915_WRITE(reg_elda, i);
5572
5573 for (i = 0; i < eld[2]; i++)
5574 if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
5575 return false;
5576
5577 return true;
5578}
5579
e0dac65e
WF
5580static void g4x_write_eld(struct drm_connector *connector,
5581 struct drm_crtc *crtc)
5582{
5583 struct drm_i915_private *dev_priv = connector->dev->dev_private;
5584 uint8_t *eld = connector->eld;
5585 uint32_t eldv;
5586 uint32_t len;
5587 uint32_t i;
5588
5589 i = I915_READ(G4X_AUD_VID_DID);
5590
5591 if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
5592 eldv = G4X_ELDV_DEVCL_DEVBLC;
5593 else
5594 eldv = G4X_ELDV_DEVCTG;
5595
3a9627f4
WF
5596 if (intel_eld_uptodate(connector,
5597 G4X_AUD_CNTL_ST, eldv,
5598 G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
5599 G4X_HDMIW_HDMIEDID))
5600 return;
5601
e0dac65e
WF
5602 i = I915_READ(G4X_AUD_CNTL_ST);
5603 i &= ~(eldv | G4X_ELD_ADDR);
5604 len = (i >> 9) & 0x1f; /* ELD buffer size */
5605 I915_WRITE(G4X_AUD_CNTL_ST, i);
5606
5607 if (!eld[0])
5608 return;
5609
5610 len = min_t(uint8_t, eld[2], len);
5611 DRM_DEBUG_DRIVER("ELD size %d\n", len);
5612 for (i = 0; i < len; i++)
5613 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
5614
5615 i = I915_READ(G4X_AUD_CNTL_ST);
5616 i |= eldv;
5617 I915_WRITE(G4X_AUD_CNTL_ST, i);
5618}
5619
83358c85
WX
5620static void haswell_write_eld(struct drm_connector *connector,
5621 struct drm_crtc *crtc)
5622{
5623 struct drm_i915_private *dev_priv = connector->dev->dev_private;
5624 uint8_t *eld = connector->eld;
5625 struct drm_device *dev = crtc->dev;
5626 uint32_t eldv;
5627 uint32_t i;
5628 int len;
5629 int pipe = to_intel_crtc(crtc)->pipe;
5630 int tmp;
5631
5632 int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
5633 int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
5634 int aud_config = HSW_AUD_CFG(pipe);
5635 int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
5636
5637
5638 DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
5639
5640 /* Audio output enable */
5641 DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
5642 tmp = I915_READ(aud_cntrl_st2);
5643 tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
5644 I915_WRITE(aud_cntrl_st2, tmp);
5645
5646 /* Wait for 1 vertical blank */
5647 intel_wait_for_vblank(dev, pipe);
5648
5649 /* Set ELD valid state */
5650 tmp = I915_READ(aud_cntrl_st2);
5651 DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%8x\n", tmp);
5652 tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
5653 I915_WRITE(aud_cntrl_st2, tmp);
5654 tmp = I915_READ(aud_cntrl_st2);
5655 DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%8x\n", tmp);
5656
5657 /* Enable HDMI mode */
5658 tmp = I915_READ(aud_config);
5659 DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%8x\n", tmp);
5660 /* clear N_programing_enable and N_value_index */
5661 tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
5662 I915_WRITE(aud_config, tmp);
5663
5664 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
5665
5666 eldv = AUDIO_ELD_VALID_A << (pipe * 4);
5667
5668 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
5669 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
5670 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
5671 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
5672 } else
5673 I915_WRITE(aud_config, 0);
5674
5675 if (intel_eld_uptodate(connector,
5676 aud_cntrl_st2, eldv,
5677 aud_cntl_st, IBX_ELD_ADDRESS,
5678 hdmiw_hdmiedid))
5679 return;
5680
5681 i = I915_READ(aud_cntrl_st2);
5682 i &= ~eldv;
5683 I915_WRITE(aud_cntrl_st2, i);
5684
5685 if (!eld[0])
5686 return;
5687
5688 i = I915_READ(aud_cntl_st);
5689 i &= ~IBX_ELD_ADDRESS;
5690 I915_WRITE(aud_cntl_st, i);
5691 i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
5692 DRM_DEBUG_DRIVER("port num:%d\n", i);
5693
5694 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
5695 DRM_DEBUG_DRIVER("ELD size %d\n", len);
5696 for (i = 0; i < len; i++)
5697 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
5698
5699 i = I915_READ(aud_cntrl_st2);
5700 i |= eldv;
5701 I915_WRITE(aud_cntrl_st2, i);
5702
5703}
5704
e0dac65e
WF
5705static void ironlake_write_eld(struct drm_connector *connector,
5706 struct drm_crtc *crtc)
5707{
5708 struct drm_i915_private *dev_priv = connector->dev->dev_private;
5709 uint8_t *eld = connector->eld;
5710 uint32_t eldv;
5711 uint32_t i;
5712 int len;
5713 int hdmiw_hdmiedid;
b6daa025 5714 int aud_config;
e0dac65e
WF
5715 int aud_cntl_st;
5716 int aud_cntrl_st2;
9b138a83 5717 int pipe = to_intel_crtc(crtc)->pipe;
e0dac65e 5718
b3f33cbf 5719 if (HAS_PCH_IBX(connector->dev)) {
9b138a83
WX
5720 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
5721 aud_config = IBX_AUD_CFG(pipe);
5722 aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
1202b4c6 5723 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
e0dac65e 5724 } else {
9b138a83
WX
5725 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
5726 aud_config = CPT_AUD_CFG(pipe);
5727 aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
1202b4c6 5728 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
e0dac65e
WF
5729 }
5730
9b138a83 5731 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
e0dac65e
WF
5732
5733 i = I915_READ(aud_cntl_st);
9b138a83 5734 i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
e0dac65e
WF
5735 if (!i) {
5736 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
5737 /* operate blindly on all ports */
1202b4c6
WF
5738 eldv = IBX_ELD_VALIDB;
5739 eldv |= IBX_ELD_VALIDB << 4;
5740 eldv |= IBX_ELD_VALIDB << 8;
e0dac65e
WF
5741 } else {
5742 DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
1202b4c6 5743 eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
e0dac65e
WF
5744 }
5745
3a9627f4
WF
5746 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
5747 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
5748 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
b6daa025
WF
5749 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
5750 } else
5751 I915_WRITE(aud_config, 0);
e0dac65e 5752
3a9627f4
WF
5753 if (intel_eld_uptodate(connector,
5754 aud_cntrl_st2, eldv,
5755 aud_cntl_st, IBX_ELD_ADDRESS,
5756 hdmiw_hdmiedid))
5757 return;
5758
e0dac65e
WF
5759 i = I915_READ(aud_cntrl_st2);
5760 i &= ~eldv;
5761 I915_WRITE(aud_cntrl_st2, i);
5762
5763 if (!eld[0])
5764 return;
5765
e0dac65e 5766 i = I915_READ(aud_cntl_st);
1202b4c6 5767 i &= ~IBX_ELD_ADDRESS;
e0dac65e
WF
5768 I915_WRITE(aud_cntl_st, i);
5769
5770 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
5771 DRM_DEBUG_DRIVER("ELD size %d\n", len);
5772 for (i = 0; i < len; i++)
5773 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
5774
5775 i = I915_READ(aud_cntrl_st2);
5776 i |= eldv;
5777 I915_WRITE(aud_cntrl_st2, i);
5778}
5779
5780void intel_write_eld(struct drm_encoder *encoder,
5781 struct drm_display_mode *mode)
5782{
5783 struct drm_crtc *crtc = encoder->crtc;
5784 struct drm_connector *connector;
5785 struct drm_device *dev = encoder->dev;
5786 struct drm_i915_private *dev_priv = dev->dev_private;
5787
5788 connector = drm_select_eld(encoder, mode);
5789 if (!connector)
5790 return;
5791
5792 DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5793 connector->base.id,
5794 drm_get_connector_name(connector),
5795 connector->encoder->base.id,
5796 drm_get_encoder_name(connector->encoder));
5797
5798 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
5799
5800 if (dev_priv->display.write_eld)
5801 dev_priv->display.write_eld(connector, crtc);
5802}
5803
79e53945
JB
5804/** Loads the palette/gamma unit for the CRTC with the prepared values */
5805void intel_crtc_load_lut(struct drm_crtc *crtc)
5806{
5807 struct drm_device *dev = crtc->dev;
5808 struct drm_i915_private *dev_priv = dev->dev_private;
5809 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9db4a9c7 5810 int palreg = PALETTE(intel_crtc->pipe);
79e53945
JB
5811 int i;
5812
5813 /* The clocks have to be on to load the palette. */
aed3f09d 5814 if (!crtc->enabled || !intel_crtc->active)
79e53945
JB
5815 return;
5816
f2b115e6 5817 /* use legacy palette for Ironlake */
bad720ff 5818 if (HAS_PCH_SPLIT(dev))
9db4a9c7 5819 palreg = LGC_PALETTE(intel_crtc->pipe);
2c07245f 5820
79e53945
JB
5821 for (i = 0; i < 256; i++) {
5822 I915_WRITE(palreg + 4 * i,
5823 (intel_crtc->lut_r[i] << 16) |
5824 (intel_crtc->lut_g[i] << 8) |
5825 intel_crtc->lut_b[i]);
5826 }
5827}
5828
560b85bb
CW
5829static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
5830{
5831 struct drm_device *dev = crtc->dev;
5832 struct drm_i915_private *dev_priv = dev->dev_private;
5833 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5834 bool visible = base != 0;
5835 u32 cntl;
5836
5837 if (intel_crtc->cursor_visible == visible)
5838 return;
5839
9db4a9c7 5840 cntl = I915_READ(_CURACNTR);
560b85bb
CW
5841 if (visible) {
5842 /* On these chipsets we can only modify the base whilst
5843 * the cursor is disabled.
5844 */
9db4a9c7 5845 I915_WRITE(_CURABASE, base);
560b85bb
CW
5846
5847 cntl &= ~(CURSOR_FORMAT_MASK);
5848 /* XXX width must be 64, stride 256 => 0x00 << 28 */
5849 cntl |= CURSOR_ENABLE |
5850 CURSOR_GAMMA_ENABLE |
5851 CURSOR_FORMAT_ARGB;
5852 } else
5853 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
9db4a9c7 5854 I915_WRITE(_CURACNTR, cntl);
560b85bb
CW
5855
5856 intel_crtc->cursor_visible = visible;
5857}
5858
5859static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
5860{
5861 struct drm_device *dev = crtc->dev;
5862 struct drm_i915_private *dev_priv = dev->dev_private;
5863 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5864 int pipe = intel_crtc->pipe;
5865 bool visible = base != 0;
5866
5867 if (intel_crtc->cursor_visible != visible) {
548f245b 5868 uint32_t cntl = I915_READ(CURCNTR(pipe));
560b85bb
CW
5869 if (base) {
5870 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
5871 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
5872 cntl |= pipe << 28; /* Connect to correct pipe */
5873 } else {
5874 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
5875 cntl |= CURSOR_MODE_DISABLE;
5876 }
9db4a9c7 5877 I915_WRITE(CURCNTR(pipe), cntl);
560b85bb
CW
5878
5879 intel_crtc->cursor_visible = visible;
5880 }
5881 /* and commit changes on next vblank */
9db4a9c7 5882 I915_WRITE(CURBASE(pipe), base);
560b85bb
CW
5883}
5884
65a21cd6
JB
5885static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
5886{
5887 struct drm_device *dev = crtc->dev;
5888 struct drm_i915_private *dev_priv = dev->dev_private;
5889 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5890 int pipe = intel_crtc->pipe;
5891 bool visible = base != 0;
5892
5893 if (intel_crtc->cursor_visible != visible) {
5894 uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
5895 if (base) {
5896 cntl &= ~CURSOR_MODE;
5897 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
5898 } else {
5899 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
5900 cntl |= CURSOR_MODE_DISABLE;
5901 }
5902 I915_WRITE(CURCNTR_IVB(pipe), cntl);
5903
5904 intel_crtc->cursor_visible = visible;
5905 }
5906 /* and commit changes on next vblank */
5907 I915_WRITE(CURBASE_IVB(pipe), base);
5908}
5909
cda4b7d3 5910/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
6b383a7f
CW
5911static void intel_crtc_update_cursor(struct drm_crtc *crtc,
5912 bool on)
cda4b7d3
CW
5913{
5914 struct drm_device *dev = crtc->dev;
5915 struct drm_i915_private *dev_priv = dev->dev_private;
5916 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5917 int pipe = intel_crtc->pipe;
5918 int x = intel_crtc->cursor_x;
5919 int y = intel_crtc->cursor_y;
560b85bb 5920 u32 base, pos;
cda4b7d3
CW
5921 bool visible;
5922
5923 pos = 0;
5924
6b383a7f 5925 if (on && crtc->enabled && crtc->fb) {
cda4b7d3
CW
5926 base = intel_crtc->cursor_addr;
5927 if (x > (int) crtc->fb->width)
5928 base = 0;
5929
5930 if (y > (int) crtc->fb->height)
5931 base = 0;
5932 } else
5933 base = 0;
5934
5935 if (x < 0) {
5936 if (x + intel_crtc->cursor_width < 0)
5937 base = 0;
5938
5939 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
5940 x = -x;
5941 }
5942 pos |= x << CURSOR_X_SHIFT;
5943
5944 if (y < 0) {
5945 if (y + intel_crtc->cursor_height < 0)
5946 base = 0;
5947
5948 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
5949 y = -y;
5950 }
5951 pos |= y << CURSOR_Y_SHIFT;
5952
5953 visible = base != 0;
560b85bb 5954 if (!visible && !intel_crtc->cursor_visible)
cda4b7d3
CW
5955 return;
5956
0cd83aa9 5957 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
65a21cd6
JB
5958 I915_WRITE(CURPOS_IVB(pipe), pos);
5959 ivb_update_cursor(crtc, base);
5960 } else {
5961 I915_WRITE(CURPOS(pipe), pos);
5962 if (IS_845G(dev) || IS_I865G(dev))
5963 i845_update_cursor(crtc, base);
5964 else
5965 i9xx_update_cursor(crtc, base);
5966 }
cda4b7d3
CW
5967}
5968
79e53945 5969static int intel_crtc_cursor_set(struct drm_crtc *crtc,
05394f39 5970 struct drm_file *file,
79e53945
JB
5971 uint32_t handle,
5972 uint32_t width, uint32_t height)
5973{
5974 struct drm_device *dev = crtc->dev;
5975 struct drm_i915_private *dev_priv = dev->dev_private;
5976 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
05394f39 5977 struct drm_i915_gem_object *obj;
cda4b7d3 5978 uint32_t addr;
3f8bc370 5979 int ret;
79e53945 5980
79e53945
JB
5981 /* if we want to turn off the cursor ignore width and height */
5982 if (!handle) {
28c97730 5983 DRM_DEBUG_KMS("cursor off\n");
3f8bc370 5984 addr = 0;
05394f39 5985 obj = NULL;
5004417d 5986 mutex_lock(&dev->struct_mutex);
3f8bc370 5987 goto finish;
79e53945
JB
5988 }
5989
5990 /* Currently we only support 64x64 cursors */
5991 if (width != 64 || height != 64) {
5992 DRM_ERROR("we currently only support 64x64 cursors\n");
5993 return -EINVAL;
5994 }
5995
05394f39 5996 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
c8725226 5997 if (&obj->base == NULL)
79e53945
JB
5998 return -ENOENT;
5999
05394f39 6000 if (obj->base.size < width * height * 4) {
79e53945 6001 DRM_ERROR("buffer is to small\n");
34b8686e
DA
6002 ret = -ENOMEM;
6003 goto fail;
79e53945
JB
6004 }
6005
71acb5eb 6006 /* we only need to pin inside GTT if cursor is non-phy */
7f9872e0 6007 mutex_lock(&dev->struct_mutex);
b295d1b6 6008 if (!dev_priv->info->cursor_needs_physical) {
d9e86c0e
CW
6009 if (obj->tiling_mode) {
6010 DRM_ERROR("cursor cannot be tiled\n");
6011 ret = -EINVAL;
6012 goto fail_locked;
6013 }
6014
2da3b9b9 6015 ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
e7b526bb
CW
6016 if (ret) {
6017 DRM_ERROR("failed to move cursor bo into the GTT\n");
2da3b9b9 6018 goto fail_locked;
e7b526bb
CW
6019 }
6020
d9e86c0e
CW
6021 ret = i915_gem_object_put_fence(obj);
6022 if (ret) {
2da3b9b9 6023 DRM_ERROR("failed to release fence for cursor");
d9e86c0e
CW
6024 goto fail_unpin;
6025 }
6026
05394f39 6027 addr = obj->gtt_offset;
71acb5eb 6028 } else {
6eeefaf3 6029 int align = IS_I830(dev) ? 16 * 1024 : 256;
05394f39 6030 ret = i915_gem_attach_phys_object(dev, obj,
6eeefaf3
CW
6031 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
6032 align);
71acb5eb
DA
6033 if (ret) {
6034 DRM_ERROR("failed to attach phys object\n");
7f9872e0 6035 goto fail_locked;
71acb5eb 6036 }
05394f39 6037 addr = obj->phys_obj->handle->busaddr;
3f8bc370
KH
6038 }
6039
a6c45cf0 6040 if (IS_GEN2(dev))
14b60391
JB
6041 I915_WRITE(CURSIZE, (height << 12) | width);
6042
3f8bc370 6043 finish:
3f8bc370 6044 if (intel_crtc->cursor_bo) {
b295d1b6 6045 if (dev_priv->info->cursor_needs_physical) {
05394f39 6046 if (intel_crtc->cursor_bo != obj)
71acb5eb
DA
6047 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
6048 } else
6049 i915_gem_object_unpin(intel_crtc->cursor_bo);
05394f39 6050 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
3f8bc370 6051 }
80824003 6052
7f9872e0 6053 mutex_unlock(&dev->struct_mutex);
3f8bc370
KH
6054
6055 intel_crtc->cursor_addr = addr;
05394f39 6056 intel_crtc->cursor_bo = obj;
cda4b7d3
CW
6057 intel_crtc->cursor_width = width;
6058 intel_crtc->cursor_height = height;
6059
6b383a7f 6060 intel_crtc_update_cursor(crtc, true);
3f8bc370 6061
79e53945 6062 return 0;
e7b526bb 6063fail_unpin:
05394f39 6064 i915_gem_object_unpin(obj);
7f9872e0 6065fail_locked:
34b8686e 6066 mutex_unlock(&dev->struct_mutex);
bc9025bd 6067fail:
05394f39 6068 drm_gem_object_unreference_unlocked(&obj->base);
34b8686e 6069 return ret;
79e53945
JB
6070}
6071
6072static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
6073{
79e53945 6074 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
79e53945 6075
cda4b7d3
CW
6076 intel_crtc->cursor_x = x;
6077 intel_crtc->cursor_y = y;
652c393a 6078
6b383a7f 6079 intel_crtc_update_cursor(crtc, true);
79e53945
JB
6080
6081 return 0;
6082}
6083
6084/** Sets the color ramps on behalf of RandR */
6085void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
6086 u16 blue, int regno)
6087{
6088 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6089
6090 intel_crtc->lut_r[regno] = red >> 8;
6091 intel_crtc->lut_g[regno] = green >> 8;
6092 intel_crtc->lut_b[regno] = blue >> 8;
6093}
6094
b8c00ac5
DA
6095void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
6096 u16 *blue, int regno)
6097{
6098 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6099
6100 *red = intel_crtc->lut_r[regno] << 8;
6101 *green = intel_crtc->lut_g[regno] << 8;
6102 *blue = intel_crtc->lut_b[regno] << 8;
6103}
6104
79e53945 6105static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
7203425a 6106 u16 *blue, uint32_t start, uint32_t size)
79e53945 6107{
7203425a 6108 int end = (start + size > 256) ? 256 : start + size, i;
79e53945 6109 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
79e53945 6110
7203425a 6111 for (i = start; i < end; i++) {
79e53945
JB
6112 intel_crtc->lut_r[i] = red[i] >> 8;
6113 intel_crtc->lut_g[i] = green[i] >> 8;
6114 intel_crtc->lut_b[i] = blue[i] >> 8;
6115 }
6116
6117 intel_crtc_load_lut(crtc);
6118}
6119
6120/**
6121 * Get a pipe with a simple mode set on it for doing load-based monitor
6122 * detection.
6123 *
6124 * It will be up to the load-detect code to adjust the pipe as appropriate for
c751ce4f 6125 * its requirements. The pipe will be connected to no other encoders.
79e53945 6126 *
c751ce4f 6127 * Currently this code will only succeed if there is a pipe with no encoders
79e53945
JB
6128 * configured for it. In the future, it could choose to temporarily disable
6129 * some outputs to free up a pipe for its use.
6130 *
6131 * \return crtc, or NULL if no pipes are available.
6132 */
6133
6134/* VESA 640x480x72Hz mode to set on the pipe */
6135static struct drm_display_mode load_detect_mode = {
6136 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
6137 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
6138};
6139
d2dff872
CW
6140static struct drm_framebuffer *
6141intel_framebuffer_create(struct drm_device *dev,
308e5bcb 6142 struct drm_mode_fb_cmd2 *mode_cmd,
d2dff872
CW
6143 struct drm_i915_gem_object *obj)
6144{
6145 struct intel_framebuffer *intel_fb;
6146 int ret;
6147
6148 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
6149 if (!intel_fb) {
6150 drm_gem_object_unreference_unlocked(&obj->base);
6151 return ERR_PTR(-ENOMEM);
6152 }
6153
6154 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
6155 if (ret) {
6156 drm_gem_object_unreference_unlocked(&obj->base);
6157 kfree(intel_fb);
6158 return ERR_PTR(ret);
6159 }
6160
6161 return &intel_fb->base;
6162}
6163
6164static u32
6165intel_framebuffer_pitch_for_width(int width, int bpp)
6166{
6167 u32 pitch = DIV_ROUND_UP(width * bpp, 8);
6168 return ALIGN(pitch, 64);
6169}
6170
6171static u32
6172intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
6173{
6174 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
6175 return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
6176}
6177
6178static struct drm_framebuffer *
6179intel_framebuffer_create_for_mode(struct drm_device *dev,
6180 struct drm_display_mode *mode,
6181 int depth, int bpp)
6182{
6183 struct drm_i915_gem_object *obj;
308e5bcb 6184 struct drm_mode_fb_cmd2 mode_cmd;
d2dff872
CW
6185
6186 obj = i915_gem_alloc_object(dev,
6187 intel_framebuffer_size_for_mode(mode, bpp));
6188 if (obj == NULL)
6189 return ERR_PTR(-ENOMEM);
6190
6191 mode_cmd.width = mode->hdisplay;
6192 mode_cmd.height = mode->vdisplay;
308e5bcb
JB
6193 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
6194 bpp);
5ca0c34a 6195 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
d2dff872
CW
6196
6197 return intel_framebuffer_create(dev, &mode_cmd, obj);
6198}
6199
6200static struct drm_framebuffer *
6201mode_fits_in_fbdev(struct drm_device *dev,
6202 struct drm_display_mode *mode)
6203{
6204 struct drm_i915_private *dev_priv = dev->dev_private;
6205 struct drm_i915_gem_object *obj;
6206 struct drm_framebuffer *fb;
6207
6208 if (dev_priv->fbdev == NULL)
6209 return NULL;
6210
6211 obj = dev_priv->fbdev->ifb.obj;
6212 if (obj == NULL)
6213 return NULL;
6214
6215 fb = &dev_priv->fbdev->ifb.base;
01f2c773
VS
6216 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
6217 fb->bits_per_pixel))
d2dff872
CW
6218 return NULL;
6219
01f2c773 6220 if (obj->base.size < mode->vdisplay * fb->pitches[0])
d2dff872
CW
6221 return NULL;
6222
6223 return fb;
6224}
6225
d2434ab7 6226bool intel_get_load_detect_pipe(struct drm_connector *connector,
7173188d 6227 struct drm_display_mode *mode,
8261b191 6228 struct intel_load_detect_pipe *old)
79e53945
JB
6229{
6230 struct intel_crtc *intel_crtc;
d2434ab7
DV
6231 struct intel_encoder *intel_encoder =
6232 intel_attached_encoder(connector);
79e53945 6233 struct drm_crtc *possible_crtc;
4ef69c7a 6234 struct drm_encoder *encoder = &intel_encoder->base;
79e53945
JB
6235 struct drm_crtc *crtc = NULL;
6236 struct drm_device *dev = encoder->dev;
94352cf9 6237 struct drm_framebuffer *fb;
79e53945
JB
6238 int i = -1;
6239
d2dff872
CW
6240 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6241 connector->base.id, drm_get_connector_name(connector),
6242 encoder->base.id, drm_get_encoder_name(encoder));
6243
79e53945
JB
6244 /*
6245 * Algorithm gets a little messy:
7a5e4805 6246 *
79e53945
JB
6247 * - if the connector already has an assigned crtc, use it (but make
6248 * sure it's on first)
7a5e4805 6249 *
79e53945
JB
6250 * - try to find the first unused crtc that can drive this connector,
6251 * and use that if we find one
79e53945
JB
6252 */
6253
6254 /* See if we already have a CRTC for this connector */
6255 if (encoder->crtc) {
6256 crtc = encoder->crtc;
8261b191 6257
24218aac 6258 old->dpms_mode = connector->dpms;
8261b191
CW
6259 old->load_detect_temp = false;
6260
6261 /* Make sure the crtc and connector are running */
24218aac
DV
6262 if (connector->dpms != DRM_MODE_DPMS_ON)
6263 connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
8261b191 6264
7173188d 6265 return true;
79e53945
JB
6266 }
6267
6268 /* Find an unused one (if possible) */
6269 list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
6270 i++;
6271 if (!(encoder->possible_crtcs & (1 << i)))
6272 continue;
6273 if (!possible_crtc->enabled) {
6274 crtc = possible_crtc;
6275 break;
6276 }
79e53945
JB
6277 }
6278
6279 /*
6280 * If we didn't find an unused CRTC, don't use any.
6281 */
6282 if (!crtc) {
7173188d
CW
6283 DRM_DEBUG_KMS("no pipe available for load-detect\n");
6284 return false;
79e53945
JB
6285 }
6286
fc303101
DV
6287 intel_encoder->new_crtc = to_intel_crtc(crtc);
6288 to_intel_connector(connector)->new_encoder = intel_encoder;
79e53945
JB
6289
6290 intel_crtc = to_intel_crtc(crtc);
24218aac 6291 old->dpms_mode = connector->dpms;
8261b191 6292 old->load_detect_temp = true;
d2dff872 6293 old->release_fb = NULL;
79e53945 6294
6492711d
CW
6295 if (!mode)
6296 mode = &load_detect_mode;
79e53945 6297
d2dff872
CW
6298 /* We need a framebuffer large enough to accommodate all accesses
6299 * that the plane may generate whilst we perform load detection.
6300 * We can not rely on the fbcon either being present (we get called
6301 * during its initialisation to detect all boot displays, or it may
6302 * not even exist) or that it is large enough to satisfy the
6303 * requested mode.
6304 */
94352cf9
DV
6305 fb = mode_fits_in_fbdev(dev, mode);
6306 if (fb == NULL) {
d2dff872 6307 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
94352cf9
DV
6308 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
6309 old->release_fb = fb;
d2dff872
CW
6310 } else
6311 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
94352cf9 6312 if (IS_ERR(fb)) {
d2dff872 6313 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
24218aac 6314 goto fail;
79e53945 6315 }
79e53945 6316
94352cf9 6317 if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
6492711d 6318 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
d2dff872
CW
6319 if (old->release_fb)
6320 old->release_fb->funcs->destroy(old->release_fb);
24218aac 6321 goto fail;
79e53945 6322 }
7173188d 6323
79e53945 6324 /* let the connector get through one full cycle before testing */
9d0498a2 6325 intel_wait_for_vblank(dev, intel_crtc->pipe);
79e53945 6326
7173188d 6327 return true;
24218aac
DV
6328fail:
6329 connector->encoder = NULL;
6330 encoder->crtc = NULL;
24218aac 6331 return false;
79e53945
JB
6332}
6333
d2434ab7 6334void intel_release_load_detect_pipe(struct drm_connector *connector,
8261b191 6335 struct intel_load_detect_pipe *old)
79e53945 6336{
d2434ab7
DV
6337 struct intel_encoder *intel_encoder =
6338 intel_attached_encoder(connector);
4ef69c7a 6339 struct drm_encoder *encoder = &intel_encoder->base;
79e53945 6340
d2dff872
CW
6341 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6342 connector->base.id, drm_get_connector_name(connector),
6343 encoder->base.id, drm_get_encoder_name(encoder));
6344
8261b191 6345 if (old->load_detect_temp) {
fc303101
DV
6346 struct drm_crtc *crtc = encoder->crtc;
6347
6348 to_intel_connector(connector)->new_encoder = NULL;
6349 intel_encoder->new_crtc = NULL;
6350 intel_set_mode(crtc, NULL, 0, 0, NULL);
d2dff872
CW
6351
6352 if (old->release_fb)
6353 old->release_fb->funcs->destroy(old->release_fb);
6354
0622a53c 6355 return;
79e53945
JB
6356 }
6357
c751ce4f 6358 /* Switch crtc and encoder back off if necessary */
24218aac
DV
6359 if (old->dpms_mode != DRM_MODE_DPMS_ON)
6360 connector->funcs->dpms(connector, old->dpms_mode);
79e53945
JB
6361}
6362
6363/* Returns the clock of the currently programmed mode of the given pipe. */
6364static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
6365{
6366 struct drm_i915_private *dev_priv = dev->dev_private;
6367 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6368 int pipe = intel_crtc->pipe;
548f245b 6369 u32 dpll = I915_READ(DPLL(pipe));
79e53945
JB
6370 u32 fp;
6371 intel_clock_t clock;
6372
6373 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
39adb7a5 6374 fp = I915_READ(FP0(pipe));
79e53945 6375 else
39adb7a5 6376 fp = I915_READ(FP1(pipe));
79e53945
JB
6377
6378 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
f2b115e6
AJ
6379 if (IS_PINEVIEW(dev)) {
6380 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
6381 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
2177832f
SL
6382 } else {
6383 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
6384 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
6385 }
6386
a6c45cf0 6387 if (!IS_GEN2(dev)) {
f2b115e6
AJ
6388 if (IS_PINEVIEW(dev))
6389 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
6390 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
2177832f
SL
6391 else
6392 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
79e53945
JB
6393 DPLL_FPA01_P1_POST_DIV_SHIFT);
6394
6395 switch (dpll & DPLL_MODE_MASK) {
6396 case DPLLB_MODE_DAC_SERIAL:
6397 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
6398 5 : 10;
6399 break;
6400 case DPLLB_MODE_LVDS:
6401 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
6402 7 : 14;
6403 break;
6404 default:
28c97730 6405 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
79e53945
JB
6406 "mode\n", (int)(dpll & DPLL_MODE_MASK));
6407 return 0;
6408 }
6409
6410 /* XXX: Handle the 100Mhz refclk */
2177832f 6411 intel_clock(dev, 96000, &clock);
79e53945
JB
6412 } else {
6413 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
6414
6415 if (is_lvds) {
6416 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
6417 DPLL_FPA01_P1_POST_DIV_SHIFT);
6418 clock.p2 = 14;
6419
6420 if ((dpll & PLL_REF_INPUT_MASK) ==
6421 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
6422 /* XXX: might not be 66MHz */
2177832f 6423 intel_clock(dev, 66000, &clock);
79e53945 6424 } else
2177832f 6425 intel_clock(dev, 48000, &clock);
79e53945
JB
6426 } else {
6427 if (dpll & PLL_P1_DIVIDE_BY_TWO)
6428 clock.p1 = 2;
6429 else {
6430 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
6431 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
6432 }
6433 if (dpll & PLL_P2_DIVIDE_BY_4)
6434 clock.p2 = 4;
6435 else
6436 clock.p2 = 2;
6437
2177832f 6438 intel_clock(dev, 48000, &clock);
79e53945
JB
6439 }
6440 }
6441
6442 /* XXX: It would be nice to validate the clocks, but we can't reuse
6443 * i830PllIsValid() because it relies on the xf86_config connector
6444 * configuration being accurate, which it isn't necessarily.
6445 */
6446
6447 return clock.dot;
6448}
6449
6450/** Returns the currently programmed mode of the given pipe. */
6451struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
6452 struct drm_crtc *crtc)
6453{
548f245b 6454 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945
JB
6455 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6456 int pipe = intel_crtc->pipe;
6457 struct drm_display_mode *mode;
548f245b
JB
6458 int htot = I915_READ(HTOTAL(pipe));
6459 int hsync = I915_READ(HSYNC(pipe));
6460 int vtot = I915_READ(VTOTAL(pipe));
6461 int vsync = I915_READ(VSYNC(pipe));
79e53945
JB
6462
6463 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
6464 if (!mode)
6465 return NULL;
6466
6467 mode->clock = intel_crtc_clock_get(dev, crtc);
6468 mode->hdisplay = (htot & 0xffff) + 1;
6469 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
6470 mode->hsync_start = (hsync & 0xffff) + 1;
6471 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
6472 mode->vdisplay = (vtot & 0xffff) + 1;
6473 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
6474 mode->vsync_start = (vsync & 0xffff) + 1;
6475 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
6476
6477 drm_mode_set_name(mode);
79e53945
JB
6478
6479 return mode;
6480}
6481
3dec0095 6482static void intel_increase_pllclock(struct drm_crtc *crtc)
652c393a
JB
6483{
6484 struct drm_device *dev = crtc->dev;
6485 drm_i915_private_t *dev_priv = dev->dev_private;
6486 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6487 int pipe = intel_crtc->pipe;
dbdc6479
JB
6488 int dpll_reg = DPLL(pipe);
6489 int dpll;
652c393a 6490
bad720ff 6491 if (HAS_PCH_SPLIT(dev))
652c393a
JB
6492 return;
6493
6494 if (!dev_priv->lvds_downclock_avail)
6495 return;
6496
dbdc6479 6497 dpll = I915_READ(dpll_reg);
652c393a 6498 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
44d98a61 6499 DRM_DEBUG_DRIVER("upclocking LVDS\n");
652c393a 6500
8ac5a6d5 6501 assert_panel_unlocked(dev_priv, pipe);
652c393a
JB
6502
6503 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
6504 I915_WRITE(dpll_reg, dpll);
9d0498a2 6505 intel_wait_for_vblank(dev, pipe);
dbdc6479 6506
652c393a
JB
6507 dpll = I915_READ(dpll_reg);
6508 if (dpll & DISPLAY_RATE_SELECT_FPA1)
44d98a61 6509 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
652c393a 6510 }
652c393a
JB
6511}
6512
6513static void intel_decrease_pllclock(struct drm_crtc *crtc)
6514{
6515 struct drm_device *dev = crtc->dev;
6516 drm_i915_private_t *dev_priv = dev->dev_private;
6517 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
652c393a 6518
bad720ff 6519 if (HAS_PCH_SPLIT(dev))
652c393a
JB
6520 return;
6521
6522 if (!dev_priv->lvds_downclock_avail)
6523 return;
6524
6525 /*
6526 * Since this is called by a timer, we should never get here in
6527 * the manual case.
6528 */
6529 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
dc257cf1
DV
6530 int pipe = intel_crtc->pipe;
6531 int dpll_reg = DPLL(pipe);
6532 int dpll;
f6e5b160 6533
44d98a61 6534 DRM_DEBUG_DRIVER("downclocking LVDS\n");
652c393a 6535
8ac5a6d5 6536 assert_panel_unlocked(dev_priv, pipe);
652c393a 6537
dc257cf1 6538 dpll = I915_READ(dpll_reg);
652c393a
JB
6539 dpll |= DISPLAY_RATE_SELECT_FPA1;
6540 I915_WRITE(dpll_reg, dpll);
9d0498a2 6541 intel_wait_for_vblank(dev, pipe);
652c393a
JB
6542 dpll = I915_READ(dpll_reg);
6543 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
44d98a61 6544 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
652c393a
JB
6545 }
6546
6547}
6548
f047e395
CW
6549void intel_mark_busy(struct drm_device *dev)
6550{
f047e395
CW
6551 i915_update_gfx_val(dev->dev_private);
6552}
6553
6554void intel_mark_idle(struct drm_device *dev)
652c393a 6555{
f047e395
CW
6556}
6557
6558void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
6559{
6560 struct drm_device *dev = obj->base.dev;
652c393a 6561 struct drm_crtc *crtc;
652c393a
JB
6562
6563 if (!i915_powersave)
6564 return;
6565
652c393a 6566 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
652c393a
JB
6567 if (!crtc->fb)
6568 continue;
6569
f047e395
CW
6570 if (to_intel_framebuffer(crtc->fb)->obj == obj)
6571 intel_increase_pllclock(crtc);
652c393a 6572 }
652c393a
JB
6573}
6574
f047e395 6575void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
652c393a 6576{
f047e395
CW
6577 struct drm_device *dev = obj->base.dev;
6578 struct drm_crtc *crtc;
652c393a 6579
f047e395 6580 if (!i915_powersave)
acb87dfb
CW
6581 return;
6582
652c393a
JB
6583 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6584 if (!crtc->fb)
6585 continue;
6586
f047e395
CW
6587 if (to_intel_framebuffer(crtc->fb)->obj == obj)
6588 intel_decrease_pllclock(crtc);
652c393a
JB
6589 }
6590}
6591
79e53945
JB
6592static void intel_crtc_destroy(struct drm_crtc *crtc)
6593{
6594 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
67e77c5a
DV
6595 struct drm_device *dev = crtc->dev;
6596 struct intel_unpin_work *work;
6597 unsigned long flags;
6598
6599 spin_lock_irqsave(&dev->event_lock, flags);
6600 work = intel_crtc->unpin_work;
6601 intel_crtc->unpin_work = NULL;
6602 spin_unlock_irqrestore(&dev->event_lock, flags);
6603
6604 if (work) {
6605 cancel_work_sync(&work->work);
6606 kfree(work);
6607 }
79e53945
JB
6608
6609 drm_crtc_cleanup(crtc);
67e77c5a 6610
79e53945
JB
6611 kfree(intel_crtc);
6612}
6613
6b95a207
KH
6614static void intel_unpin_work_fn(struct work_struct *__work)
6615{
6616 struct intel_unpin_work *work =
6617 container_of(__work, struct intel_unpin_work, work);
6618
6619 mutex_lock(&work->dev->struct_mutex);
1690e1eb 6620 intel_unpin_fb_obj(work->old_fb_obj);
05394f39
CW
6621 drm_gem_object_unreference(&work->pending_flip_obj->base);
6622 drm_gem_object_unreference(&work->old_fb_obj->base);
d9e86c0e 6623
7782de3b 6624 intel_update_fbc(work->dev);
6b95a207
KH
6625 mutex_unlock(&work->dev->struct_mutex);
6626 kfree(work);
6627}
6628
1afe3e9d 6629static void do_intel_finish_page_flip(struct drm_device *dev,
49b14a5c 6630 struct drm_crtc *crtc)
6b95a207
KH
6631{
6632 drm_i915_private_t *dev_priv = dev->dev_private;
6b95a207
KH
6633 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6634 struct intel_unpin_work *work;
05394f39 6635 struct drm_i915_gem_object *obj;
6b95a207 6636 struct drm_pending_vblank_event *e;
95cb1b02 6637 struct timeval tvbl;
6b95a207
KH
6638 unsigned long flags;
6639
6640 /* Ignore early vblank irqs */
6641 if (intel_crtc == NULL)
6642 return;
6643
6644 spin_lock_irqsave(&dev->event_lock, flags);
6645 work = intel_crtc->unpin_work;
6646 if (work == NULL || !work->pending) {
6647 spin_unlock_irqrestore(&dev->event_lock, flags);
6648 return;
6649 }
6650
6651 intel_crtc->unpin_work = NULL;
6b95a207
KH
6652
6653 if (work->event) {
6654 e = work->event;
49b14a5c 6655 e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
0af7e4df 6656
49b14a5c
MK
6657 e->event.tv_sec = tvbl.tv_sec;
6658 e->event.tv_usec = tvbl.tv_usec;
0af7e4df 6659
6b95a207
KH
6660 list_add_tail(&e->base.link,
6661 &e->base.file_priv->event_list);
6662 wake_up_interruptible(&e->base.file_priv->event_wait);
6663 }
6664
0af7e4df
MK
6665 drm_vblank_put(dev, intel_crtc->pipe);
6666
6b95a207
KH
6667 spin_unlock_irqrestore(&dev->event_lock, flags);
6668
05394f39 6669 obj = work->old_fb_obj;
d9e86c0e 6670
e59f2bac 6671 atomic_clear_mask(1 << intel_crtc->plane,
05394f39 6672 &obj->pending_flip.counter);
d9e86c0e 6673
5bb61643 6674 wake_up(&dev_priv->pending_flip_queue);
6b95a207 6675 schedule_work(&work->work);
e5510fac
JB
6676
6677 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
6b95a207
KH
6678}
6679
1afe3e9d
JB
6680void intel_finish_page_flip(struct drm_device *dev, int pipe)
6681{
6682 drm_i915_private_t *dev_priv = dev->dev_private;
6683 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
6684
49b14a5c 6685 do_intel_finish_page_flip(dev, crtc);
1afe3e9d
JB
6686}
6687
6688void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
6689{
6690 drm_i915_private_t *dev_priv = dev->dev_private;
6691 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
6692
49b14a5c 6693 do_intel_finish_page_flip(dev, crtc);
1afe3e9d
JB
6694}
6695
6b95a207
KH
6696void intel_prepare_page_flip(struct drm_device *dev, int plane)
6697{
6698 drm_i915_private_t *dev_priv = dev->dev_private;
6699 struct intel_crtc *intel_crtc =
6700 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
6701 unsigned long flags;
6702
6703 spin_lock_irqsave(&dev->event_lock, flags);
de3f440f 6704 if (intel_crtc->unpin_work) {
4e5359cd
SF
6705 if ((++intel_crtc->unpin_work->pending) > 1)
6706 DRM_ERROR("Prepared flip multiple times\n");
de3f440f
JB
6707 } else {
6708 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
6709 }
6b95a207
KH
6710 spin_unlock_irqrestore(&dev->event_lock, flags);
6711}
6712
8c9f3aaf
JB
6713static int intel_gen2_queue_flip(struct drm_device *dev,
6714 struct drm_crtc *crtc,
6715 struct drm_framebuffer *fb,
6716 struct drm_i915_gem_object *obj)
6717{
6718 struct drm_i915_private *dev_priv = dev->dev_private;
6719 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8c9f3aaf 6720 u32 flip_mask;
6d90c952 6721 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8c9f3aaf
JB
6722 int ret;
6723
6d90c952 6724 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8c9f3aaf 6725 if (ret)
83d4092b 6726 goto err;
8c9f3aaf 6727
6d90c952 6728 ret = intel_ring_begin(ring, 6);
8c9f3aaf 6729 if (ret)
83d4092b 6730 goto err_unpin;
8c9f3aaf
JB
6731
6732 /* Can't queue multiple flips, so wait for the previous
6733 * one to finish before executing the next.
6734 */
6735 if (intel_crtc->plane)
6736 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6737 else
6738 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6d90c952
DV
6739 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
6740 intel_ring_emit(ring, MI_NOOP);
6741 intel_ring_emit(ring, MI_DISPLAY_FLIP |
6742 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6743 intel_ring_emit(ring, fb->pitches[0]);
e506a0c6 6744 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
6d90c952
DV
6745 intel_ring_emit(ring, 0); /* aux display base address, unused */
6746 intel_ring_advance(ring);
83d4092b
CW
6747 return 0;
6748
6749err_unpin:
6750 intel_unpin_fb_obj(obj);
6751err:
8c9f3aaf
JB
6752 return ret;
6753}
6754
6755static int intel_gen3_queue_flip(struct drm_device *dev,
6756 struct drm_crtc *crtc,
6757 struct drm_framebuffer *fb,
6758 struct drm_i915_gem_object *obj)
6759{
6760 struct drm_i915_private *dev_priv = dev->dev_private;
6761 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8c9f3aaf 6762 u32 flip_mask;
6d90c952 6763 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8c9f3aaf
JB
6764 int ret;
6765
6d90c952 6766 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8c9f3aaf 6767 if (ret)
83d4092b 6768 goto err;
8c9f3aaf 6769
6d90c952 6770 ret = intel_ring_begin(ring, 6);
8c9f3aaf 6771 if (ret)
83d4092b 6772 goto err_unpin;
8c9f3aaf
JB
6773
6774 if (intel_crtc->plane)
6775 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6776 else
6777 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6d90c952
DV
6778 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
6779 intel_ring_emit(ring, MI_NOOP);
6780 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
6781 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6782 intel_ring_emit(ring, fb->pitches[0]);
e506a0c6 6783 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
6d90c952
DV
6784 intel_ring_emit(ring, MI_NOOP);
6785
6786 intel_ring_advance(ring);
83d4092b
CW
6787 return 0;
6788
6789err_unpin:
6790 intel_unpin_fb_obj(obj);
6791err:
8c9f3aaf
JB
6792 return ret;
6793}
6794
6795static int intel_gen4_queue_flip(struct drm_device *dev,
6796 struct drm_crtc *crtc,
6797 struct drm_framebuffer *fb,
6798 struct drm_i915_gem_object *obj)
6799{
6800 struct drm_i915_private *dev_priv = dev->dev_private;
6801 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6802 uint32_t pf, pipesrc;
6d90c952 6803 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8c9f3aaf
JB
6804 int ret;
6805
6d90c952 6806 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8c9f3aaf 6807 if (ret)
83d4092b 6808 goto err;
8c9f3aaf 6809
6d90c952 6810 ret = intel_ring_begin(ring, 4);
8c9f3aaf 6811 if (ret)
83d4092b 6812 goto err_unpin;
8c9f3aaf
JB
6813
6814 /* i965+ uses the linear or tiled offsets from the
6815 * Display Registers (which do not change across a page-flip)
6816 * so we need only reprogram the base address.
6817 */
6d90c952
DV
6818 intel_ring_emit(ring, MI_DISPLAY_FLIP |
6819 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6820 intel_ring_emit(ring, fb->pitches[0]);
c2c75131
DV
6821 intel_ring_emit(ring,
6822 (obj->gtt_offset + intel_crtc->dspaddr_offset) |
6823 obj->tiling_mode);
8c9f3aaf
JB
6824
6825 /* XXX Enabling the panel-fitter across page-flip is so far
6826 * untested on non-native modes, so ignore it for now.
6827 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
6828 */
6829 pf = 0;
6830 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6d90c952
DV
6831 intel_ring_emit(ring, pf | pipesrc);
6832 intel_ring_advance(ring);
83d4092b
CW
6833 return 0;
6834
6835err_unpin:
6836 intel_unpin_fb_obj(obj);
6837err:
8c9f3aaf
JB
6838 return ret;
6839}
6840
6841static int intel_gen6_queue_flip(struct drm_device *dev,
6842 struct drm_crtc *crtc,
6843 struct drm_framebuffer *fb,
6844 struct drm_i915_gem_object *obj)
6845{
6846 struct drm_i915_private *dev_priv = dev->dev_private;
6847 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6d90c952 6848 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8c9f3aaf
JB
6849 uint32_t pf, pipesrc;
6850 int ret;
6851
6d90c952 6852 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8c9f3aaf 6853 if (ret)
83d4092b 6854 goto err;
8c9f3aaf 6855
6d90c952 6856 ret = intel_ring_begin(ring, 4);
8c9f3aaf 6857 if (ret)
83d4092b 6858 goto err_unpin;
8c9f3aaf 6859
6d90c952
DV
6860 intel_ring_emit(ring, MI_DISPLAY_FLIP |
6861 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6862 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
c2c75131 6863 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
8c9f3aaf 6864
dc257cf1
DV
6865 /* Contrary to the suggestions in the documentation,
6866 * "Enable Panel Fitter" does not seem to be required when page
6867 * flipping with a non-native mode, and worse causes a normal
6868 * modeset to fail.
6869 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
6870 */
6871 pf = 0;
8c9f3aaf 6872 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6d90c952
DV
6873 intel_ring_emit(ring, pf | pipesrc);
6874 intel_ring_advance(ring);
83d4092b
CW
6875 return 0;
6876
6877err_unpin:
6878 intel_unpin_fb_obj(obj);
6879err:
8c9f3aaf
JB
6880 return ret;
6881}
6882
7c9017e5
JB
6883/*
6884 * On gen7 we currently use the blit ring because (in early silicon at least)
6885 * the render ring doesn't give us interrpts for page flip completion, which
6886 * means clients will hang after the first flip is queued. Fortunately the
6887 * blit ring generates interrupts properly, so use it instead.
6888 */
6889static int intel_gen7_queue_flip(struct drm_device *dev,
6890 struct drm_crtc *crtc,
6891 struct drm_framebuffer *fb,
6892 struct drm_i915_gem_object *obj)
6893{
6894 struct drm_i915_private *dev_priv = dev->dev_private;
6895 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6896 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
cb05d8de 6897 uint32_t plane_bit = 0;
7c9017e5
JB
6898 int ret;
6899
6900 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6901 if (ret)
83d4092b 6902 goto err;
7c9017e5 6903
cb05d8de
DV
6904 switch(intel_crtc->plane) {
6905 case PLANE_A:
6906 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
6907 break;
6908 case PLANE_B:
6909 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
6910 break;
6911 case PLANE_C:
6912 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
6913 break;
6914 default:
6915 WARN_ONCE(1, "unknown plane in flip command\n");
6916 ret = -ENODEV;
ab3951eb 6917 goto err_unpin;
cb05d8de
DV
6918 }
6919
7c9017e5
JB
6920 ret = intel_ring_begin(ring, 4);
6921 if (ret)
83d4092b 6922 goto err_unpin;
7c9017e5 6923
cb05d8de 6924 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
01f2c773 6925 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
c2c75131 6926 intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
7c9017e5
JB
6927 intel_ring_emit(ring, (MI_NOOP));
6928 intel_ring_advance(ring);
83d4092b
CW
6929 return 0;
6930
6931err_unpin:
6932 intel_unpin_fb_obj(obj);
6933err:
7c9017e5
JB
6934 return ret;
6935}
6936
8c9f3aaf
JB
6937static int intel_default_queue_flip(struct drm_device *dev,
6938 struct drm_crtc *crtc,
6939 struct drm_framebuffer *fb,
6940 struct drm_i915_gem_object *obj)
6941{
6942 return -ENODEV;
6943}
6944
6b95a207
KH
6945static int intel_crtc_page_flip(struct drm_crtc *crtc,
6946 struct drm_framebuffer *fb,
6947 struct drm_pending_vblank_event *event)
6948{
6949 struct drm_device *dev = crtc->dev;
6950 struct drm_i915_private *dev_priv = dev->dev_private;
6951 struct intel_framebuffer *intel_fb;
05394f39 6952 struct drm_i915_gem_object *obj;
6b95a207
KH
6953 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6954 struct intel_unpin_work *work;
8c9f3aaf 6955 unsigned long flags;
52e68630 6956 int ret;
6b95a207 6957
e6a595d2
VS
6958 /* Can't change pixel format via MI display flips. */
6959 if (fb->pixel_format != crtc->fb->pixel_format)
6960 return -EINVAL;
6961
6962 /*
6963 * TILEOFF/LINOFF registers can't be changed via MI display flips.
6964 * Note that pitch changes could also affect these register.
6965 */
6966 if (INTEL_INFO(dev)->gen > 3 &&
6967 (fb->offsets[0] != crtc->fb->offsets[0] ||
6968 fb->pitches[0] != crtc->fb->pitches[0]))
6969 return -EINVAL;
6970
6b95a207
KH
6971 work = kzalloc(sizeof *work, GFP_KERNEL);
6972 if (work == NULL)
6973 return -ENOMEM;
6974
6b95a207
KH
6975 work->event = event;
6976 work->dev = crtc->dev;
6977 intel_fb = to_intel_framebuffer(crtc->fb);
b1b87f6b 6978 work->old_fb_obj = intel_fb->obj;
6b95a207
KH
6979 INIT_WORK(&work->work, intel_unpin_work_fn);
6980
7317c75e
JB
6981 ret = drm_vblank_get(dev, intel_crtc->pipe);
6982 if (ret)
6983 goto free_work;
6984
6b95a207
KH
6985 /* We borrow the event spin lock for protecting unpin_work */
6986 spin_lock_irqsave(&dev->event_lock, flags);
6987 if (intel_crtc->unpin_work) {
6988 spin_unlock_irqrestore(&dev->event_lock, flags);
6989 kfree(work);
7317c75e 6990 drm_vblank_put(dev, intel_crtc->pipe);
468f0b44
CW
6991
6992 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
6b95a207
KH
6993 return -EBUSY;
6994 }
6995 intel_crtc->unpin_work = work;
6996 spin_unlock_irqrestore(&dev->event_lock, flags);
6997
6998 intel_fb = to_intel_framebuffer(fb);
6999 obj = intel_fb->obj;
7000
79158103
CW
7001 ret = i915_mutex_lock_interruptible(dev);
7002 if (ret)
7003 goto cleanup;
6b95a207 7004
75dfca80 7005 /* Reference the objects for the scheduled work. */
05394f39
CW
7006 drm_gem_object_reference(&work->old_fb_obj->base);
7007 drm_gem_object_reference(&obj->base);
6b95a207
KH
7008
7009 crtc->fb = fb;
96b099fd 7010
e1f99ce6 7011 work->pending_flip_obj = obj;
e1f99ce6 7012
4e5359cd
SF
7013 work->enable_stall_check = true;
7014
e1f99ce6
CW
7015 /* Block clients from rendering to the new back buffer until
7016 * the flip occurs and the object is no longer visible.
7017 */
05394f39 7018 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
e1f99ce6 7019
8c9f3aaf
JB
7020 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
7021 if (ret)
7022 goto cleanup_pending;
6b95a207 7023
7782de3b 7024 intel_disable_fbc(dev);
f047e395 7025 intel_mark_fb_busy(obj);
6b95a207
KH
7026 mutex_unlock(&dev->struct_mutex);
7027
e5510fac
JB
7028 trace_i915_flip_request(intel_crtc->plane, obj);
7029
6b95a207 7030 return 0;
96b099fd 7031
8c9f3aaf
JB
7032cleanup_pending:
7033 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
05394f39
CW
7034 drm_gem_object_unreference(&work->old_fb_obj->base);
7035 drm_gem_object_unreference(&obj->base);
96b099fd
CW
7036 mutex_unlock(&dev->struct_mutex);
7037
79158103 7038cleanup:
96b099fd
CW
7039 spin_lock_irqsave(&dev->event_lock, flags);
7040 intel_crtc->unpin_work = NULL;
7041 spin_unlock_irqrestore(&dev->event_lock, flags);
7042
7317c75e
JB
7043 drm_vblank_put(dev, intel_crtc->pipe);
7044free_work:
96b099fd
CW
7045 kfree(work);
7046
7047 return ret;
6b95a207
KH
7048}
7049
f6e5b160 7050static struct drm_crtc_helper_funcs intel_helper_funcs = {
f6e5b160
CW
7051 .mode_set_base_atomic = intel_pipe_set_base_atomic,
7052 .load_lut = intel_crtc_load_lut,
976f8a20 7053 .disable = intel_crtc_noop,
f6e5b160
CW
7054};
7055
6ed0f796 7056bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
47f1c6c9 7057{
6ed0f796
DV
7058 struct intel_encoder *other_encoder;
7059 struct drm_crtc *crtc = &encoder->new_crtc->base;
47f1c6c9 7060
6ed0f796
DV
7061 if (WARN_ON(!crtc))
7062 return false;
7063
7064 list_for_each_entry(other_encoder,
7065 &crtc->dev->mode_config.encoder_list,
7066 base.head) {
7067
7068 if (&other_encoder->new_crtc->base != crtc ||
7069 encoder == other_encoder)
7070 continue;
7071 else
7072 return true;
f47166d2
CW
7073 }
7074
6ed0f796
DV
7075 return false;
7076}
47f1c6c9 7077
50f56119
DV
7078static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
7079 struct drm_crtc *crtc)
7080{
7081 struct drm_device *dev;
7082 struct drm_crtc *tmp;
7083 int crtc_mask = 1;
47f1c6c9 7084
50f56119 7085 WARN(!crtc, "checking null crtc?\n");
47f1c6c9 7086
50f56119 7087 dev = crtc->dev;
47f1c6c9 7088
50f56119
DV
7089 list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
7090 if (tmp == crtc)
7091 break;
7092 crtc_mask <<= 1;
7093 }
47f1c6c9 7094
50f56119
DV
7095 if (encoder->possible_crtcs & crtc_mask)
7096 return true;
7097 return false;
47f1c6c9 7098}
79e53945 7099
9a935856
DV
7100/**
7101 * intel_modeset_update_staged_output_state
7102 *
7103 * Updates the staged output configuration state, e.g. after we've read out the
7104 * current hw state.
7105 */
7106static void intel_modeset_update_staged_output_state(struct drm_device *dev)
f6e5b160 7107{
9a935856
DV
7108 struct intel_encoder *encoder;
7109 struct intel_connector *connector;
f6e5b160 7110
9a935856
DV
7111 list_for_each_entry(connector, &dev->mode_config.connector_list,
7112 base.head) {
7113 connector->new_encoder =
7114 to_intel_encoder(connector->base.encoder);
7115 }
f6e5b160 7116
9a935856
DV
7117 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7118 base.head) {
7119 encoder->new_crtc =
7120 to_intel_crtc(encoder->base.crtc);
7121 }
f6e5b160
CW
7122}
7123
9a935856
DV
7124/**
7125 * intel_modeset_commit_output_state
7126 *
7127 * This function copies the stage display pipe configuration to the real one.
7128 */
7129static void intel_modeset_commit_output_state(struct drm_device *dev)
7130{
7131 struct intel_encoder *encoder;
7132 struct intel_connector *connector;
f6e5b160 7133
9a935856
DV
7134 list_for_each_entry(connector, &dev->mode_config.connector_list,
7135 base.head) {
7136 connector->base.encoder = &connector->new_encoder->base;
7137 }
f6e5b160 7138
9a935856
DV
7139 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7140 base.head) {
7141 encoder->base.crtc = &encoder->new_crtc->base;
7142 }
7143}
7144
7758a113
DV
7145static struct drm_display_mode *
7146intel_modeset_adjusted_mode(struct drm_crtc *crtc,
7147 struct drm_display_mode *mode)
ee7b9f93 7148{
7758a113
DV
7149 struct drm_device *dev = crtc->dev;
7150 struct drm_display_mode *adjusted_mode;
7151 struct drm_encoder_helper_funcs *encoder_funcs;
7152 struct intel_encoder *encoder;
ee7b9f93 7153
7758a113
DV
7154 adjusted_mode = drm_mode_duplicate(dev, mode);
7155 if (!adjusted_mode)
7156 return ERR_PTR(-ENOMEM);
7157
7158 /* Pass our mode to the connectors and the CRTC to give them a chance to
7159 * adjust it according to limitations or connector properties, and also
7160 * a chance to reject the mode entirely.
7161 */
7162 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7163 base.head) {
7164
7165 if (&encoder->new_crtc->base != crtc)
7166 continue;
7167 encoder_funcs = encoder->base.helper_private;
7168 if (!(encoder_funcs->mode_fixup(&encoder->base, mode,
7169 adjusted_mode))) {
7170 DRM_DEBUG_KMS("Encoder fixup failed\n");
7171 goto fail;
7172 }
ee7b9f93
JB
7173 }
7174
7758a113
DV
7175 if (!(intel_crtc_mode_fixup(crtc, mode, adjusted_mode))) {
7176 DRM_DEBUG_KMS("CRTC fixup failed\n");
7177 goto fail;
ee7b9f93 7178 }
7758a113
DV
7179 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
7180
7181 return adjusted_mode;
7182fail:
7183 drm_mode_destroy(dev, adjusted_mode);
7184 return ERR_PTR(-EINVAL);
ee7b9f93
JB
7185}
7186
e2e1ed41
DV
7187/* Computes which crtcs are affected and sets the relevant bits in the mask. For
7188 * simplicity we use the crtc's pipe number (because it's easier to obtain). */
7189static void
7190intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
7191 unsigned *prepare_pipes, unsigned *disable_pipes)
79e53945
JB
7192{
7193 struct intel_crtc *intel_crtc;
e2e1ed41
DV
7194 struct drm_device *dev = crtc->dev;
7195 struct intel_encoder *encoder;
7196 struct intel_connector *connector;
7197 struct drm_crtc *tmp_crtc;
79e53945 7198
e2e1ed41 7199 *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
79e53945 7200
e2e1ed41
DV
7201 /* Check which crtcs have changed outputs connected to them, these need
7202 * to be part of the prepare_pipes mask. We don't (yet) support global
7203 * modeset across multiple crtcs, so modeset_pipes will only have one
7204 * bit set at most. */
7205 list_for_each_entry(connector, &dev->mode_config.connector_list,
7206 base.head) {
7207 if (connector->base.encoder == &connector->new_encoder->base)
7208 continue;
79e53945 7209
e2e1ed41
DV
7210 if (connector->base.encoder) {
7211 tmp_crtc = connector->base.encoder->crtc;
7212
7213 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
7214 }
7215
7216 if (connector->new_encoder)
7217 *prepare_pipes |=
7218 1 << connector->new_encoder->new_crtc->pipe;
79e53945
JB
7219 }
7220
e2e1ed41
DV
7221 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7222 base.head) {
7223 if (encoder->base.crtc == &encoder->new_crtc->base)
7224 continue;
7225
7226 if (encoder->base.crtc) {
7227 tmp_crtc = encoder->base.crtc;
7228
7229 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
7230 }
7231
7232 if (encoder->new_crtc)
7233 *prepare_pipes |= 1 << encoder->new_crtc->pipe;
80824003
JB
7234 }
7235
e2e1ed41
DV
7236 /* Check for any pipes that will be fully disabled ... */
7237 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
7238 base.head) {
7239 bool used = false;
22fd0fab 7240
e2e1ed41
DV
7241 /* Don't try to disable disabled crtcs. */
7242 if (!intel_crtc->base.enabled)
7243 continue;
7e7d76c3 7244
e2e1ed41
DV
7245 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7246 base.head) {
7247 if (encoder->new_crtc == intel_crtc)
7248 used = true;
7249 }
7250
7251 if (!used)
7252 *disable_pipes |= 1 << intel_crtc->pipe;
7e7d76c3
JB
7253 }
7254
e2e1ed41
DV
7255
7256 /* set_mode is also used to update properties on life display pipes. */
7257 intel_crtc = to_intel_crtc(crtc);
7258 if (crtc->enabled)
7259 *prepare_pipes |= 1 << intel_crtc->pipe;
7260
7261 /* We only support modeset on one single crtc, hence we need to do that
7262 * only for the passed in crtc iff we change anything else than just
7263 * disable crtcs.
7264 *
7265 * This is actually not true, to be fully compatible with the old crtc
7266 * helper we automatically disable _any_ output (i.e. doesn't need to be
7267 * connected to the crtc we're modesetting on) if it's disconnected.
7268 * Which is a rather nutty api (since changed the output configuration
7269 * without userspace's explicit request can lead to confusion), but
7270 * alas. Hence we currently need to modeset on all pipes we prepare. */
7271 if (*prepare_pipes)
7272 *modeset_pipes = *prepare_pipes;
7273
7274 /* ... and mask these out. */
7275 *modeset_pipes &= ~(*disable_pipes);
7276 *prepare_pipes &= ~(*disable_pipes);
7277}
7278
ea9d758d
DV
7279static bool intel_crtc_in_use(struct drm_crtc *crtc)
7280{
7281 struct drm_encoder *encoder;
7282 struct drm_device *dev = crtc->dev;
7283
7284 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
7285 if (encoder->crtc == crtc)
7286 return true;
7287
7288 return false;
7289}
7290
7291static void
7292intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
7293{
7294 struct intel_encoder *intel_encoder;
7295 struct intel_crtc *intel_crtc;
7296 struct drm_connector *connector;
7297
7298 list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
7299 base.head) {
7300 if (!intel_encoder->base.crtc)
7301 continue;
7302
7303 intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
7304
7305 if (prepare_pipes & (1 << intel_crtc->pipe))
7306 intel_encoder->connectors_active = false;
7307 }
7308
7309 intel_modeset_commit_output_state(dev);
7310
7311 /* Update computed state. */
7312 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
7313 base.head) {
7314 intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
7315 }
7316
7317 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
7318 if (!connector->encoder || !connector->encoder->crtc)
7319 continue;
7320
7321 intel_crtc = to_intel_crtc(connector->encoder->crtc);
7322
7323 if (prepare_pipes & (1 << intel_crtc->pipe)) {
68d34720
DV
7324 struct drm_property *dpms_property =
7325 dev->mode_config.dpms_property;
7326
ea9d758d 7327 connector->dpms = DRM_MODE_DPMS_ON;
68d34720
DV
7328 drm_connector_property_set_value(connector,
7329 dpms_property,
7330 DRM_MODE_DPMS_ON);
ea9d758d
DV
7331
7332 intel_encoder = to_intel_encoder(connector->encoder);
7333 intel_encoder->connectors_active = true;
7334 }
7335 }
7336
7337}
7338
25c5b266
DV
7339#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
7340 list_for_each_entry((intel_crtc), \
7341 &(dev)->mode_config.crtc_list, \
7342 base.head) \
7343 if (mask & (1 <<(intel_crtc)->pipe)) \
7344
b980514c 7345void
8af6cf88
DV
7346intel_modeset_check_state(struct drm_device *dev)
7347{
7348 struct intel_crtc *crtc;
7349 struct intel_encoder *encoder;
7350 struct intel_connector *connector;
7351
7352 list_for_each_entry(connector, &dev->mode_config.connector_list,
7353 base.head) {
7354 /* This also checks the encoder/connector hw state with the
7355 * ->get_hw_state callbacks. */
7356 intel_connector_check_state(connector);
7357
7358 WARN(&connector->new_encoder->base != connector->base.encoder,
7359 "connector's staged encoder doesn't match current encoder\n");
7360 }
7361
7362 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7363 base.head) {
7364 bool enabled = false;
7365 bool active = false;
7366 enum pipe pipe, tracked_pipe;
7367
7368 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
7369 encoder->base.base.id,
7370 drm_get_encoder_name(&encoder->base));
7371
7372 WARN(&encoder->new_crtc->base != encoder->base.crtc,
7373 "encoder's stage crtc doesn't match current crtc\n");
7374 WARN(encoder->connectors_active && !encoder->base.crtc,
7375 "encoder's active_connectors set, but no crtc\n");
7376
7377 list_for_each_entry(connector, &dev->mode_config.connector_list,
7378 base.head) {
7379 if (connector->base.encoder != &encoder->base)
7380 continue;
7381 enabled = true;
7382 if (connector->base.dpms != DRM_MODE_DPMS_OFF)
7383 active = true;
7384 }
7385 WARN(!!encoder->base.crtc != enabled,
7386 "encoder's enabled state mismatch "
7387 "(expected %i, found %i)\n",
7388 !!encoder->base.crtc, enabled);
7389 WARN(active && !encoder->base.crtc,
7390 "active encoder with no crtc\n");
7391
7392 WARN(encoder->connectors_active != active,
7393 "encoder's computed active state doesn't match tracked active state "
7394 "(expected %i, found %i)\n", active, encoder->connectors_active);
7395
7396 active = encoder->get_hw_state(encoder, &pipe);
7397 WARN(active != encoder->connectors_active,
7398 "encoder's hw state doesn't match sw tracking "
7399 "(expected %i, found %i)\n",
7400 encoder->connectors_active, active);
7401
7402 if (!encoder->base.crtc)
7403 continue;
7404
7405 tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
7406 WARN(active && pipe != tracked_pipe,
7407 "active encoder's pipe doesn't match"
7408 "(expected %i, found %i)\n",
7409 tracked_pipe, pipe);
7410
7411 }
7412
7413 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
7414 base.head) {
7415 bool enabled = false;
7416 bool active = false;
7417
7418 DRM_DEBUG_KMS("[CRTC:%d]\n",
7419 crtc->base.base.id);
7420
7421 WARN(crtc->active && !crtc->base.enabled,
7422 "active crtc, but not enabled in sw tracking\n");
7423
7424 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7425 base.head) {
7426 if (encoder->base.crtc != &crtc->base)
7427 continue;
7428 enabled = true;
7429 if (encoder->connectors_active)
7430 active = true;
7431 }
7432 WARN(active != crtc->active,
7433 "crtc's computed active state doesn't match tracked active state "
7434 "(expected %i, found %i)\n", active, crtc->active);
7435 WARN(enabled != crtc->base.enabled,
7436 "crtc's computed enabled state doesn't match tracked enabled state "
7437 "(expected %i, found %i)\n", enabled, crtc->base.enabled);
7438
7439 assert_pipe(dev->dev_private, crtc->pipe, crtc->active);
7440 }
7441}
7442
a6778b3c
DV
7443bool intel_set_mode(struct drm_crtc *crtc,
7444 struct drm_display_mode *mode,
94352cf9 7445 int x, int y, struct drm_framebuffer *fb)
a6778b3c
DV
7446{
7447 struct drm_device *dev = crtc->dev;
dbf2b54e 7448 drm_i915_private_t *dev_priv = dev->dev_private;
a6778b3c 7449 struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
a6778b3c 7450 struct drm_encoder_helper_funcs *encoder_funcs;
a6778b3c 7451 struct drm_encoder *encoder;
25c5b266
DV
7452 struct intel_crtc *intel_crtc;
7453 unsigned disable_pipes, prepare_pipes, modeset_pipes;
a6778b3c
DV
7454 bool ret = true;
7455
e2e1ed41 7456 intel_modeset_affected_pipes(crtc, &modeset_pipes,
25c5b266
DV
7457 &prepare_pipes, &disable_pipes);
7458
7459 DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
7460 modeset_pipes, prepare_pipes, disable_pipes);
e2e1ed41 7461
976f8a20
DV
7462 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
7463 intel_crtc_disable(&intel_crtc->base);
87f1faa6 7464
a6778b3c
DV
7465 saved_hwmode = crtc->hwmode;
7466 saved_mode = crtc->mode;
a6778b3c 7467
25c5b266
DV
7468 /* Hack: Because we don't (yet) support global modeset on multiple
7469 * crtcs, we don't keep track of the new mode for more than one crtc.
7470 * Hence simply check whether any bit is set in modeset_pipes in all the
7471 * pieces of code that are not yet converted to deal with mutliple crtcs
7472 * changing their mode at the same time. */
7473 adjusted_mode = NULL;
7474 if (modeset_pipes) {
7475 adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
7476 if (IS_ERR(adjusted_mode)) {
7477 return false;
7478 }
25c5b266 7479 }
a6778b3c 7480
ea9d758d
DV
7481 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
7482 if (intel_crtc->base.enabled)
7483 dev_priv->display.crtc_disable(&intel_crtc->base);
7484 }
a6778b3c 7485
6c4c86f5
DV
7486 /* crtc->mode is already used by the ->mode_set callbacks, hence we need
7487 * to set it here already despite that we pass it down the callchain.
7488 */
7489 if (modeset_pipes)
25c5b266 7490 crtc->mode = *mode;
7758a113 7491
ea9d758d
DV
7492 /* Only after disabling all output pipelines that will be changed can we
7493 * update the the output configuration. */
7494 intel_modeset_update_state(dev, prepare_pipes);
7495
a6778b3c
DV
7496 /* Set up the DPLL and any encoders state that needs to adjust or depend
7497 * on the DPLL.
7498 */
25c5b266
DV
7499 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
7500 ret = !intel_crtc_mode_set(&intel_crtc->base,
7501 mode, adjusted_mode,
7502 x, y, fb);
7503 if (!ret)
7504 goto done;
a6778b3c 7505
25c5b266 7506 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
a6778b3c 7507
25c5b266
DV
7508 if (encoder->crtc != &intel_crtc->base)
7509 continue;
a6778b3c 7510
25c5b266
DV
7511 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
7512 encoder->base.id, drm_get_encoder_name(encoder),
7513 mode->base.id, mode->name);
7514 encoder_funcs = encoder->helper_private;
7515 encoder_funcs->mode_set(encoder, mode, adjusted_mode);
7516 }
a6778b3c
DV
7517 }
7518
7519 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
25c5b266
DV
7520 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
7521 dev_priv->display.crtc_enable(&intel_crtc->base);
a6778b3c 7522
25c5b266
DV
7523 if (modeset_pipes) {
7524 /* Store real post-adjustment hardware mode. */
7525 crtc->hwmode = *adjusted_mode;
a6778b3c 7526
25c5b266
DV
7527 /* Calculate and store various constants which
7528 * are later needed by vblank and swap-completion
7529 * timestamping. They are derived from true hwmode.
7530 */
7531 drm_calc_timestamping_constants(crtc);
7532 }
a6778b3c
DV
7533
7534 /* FIXME: add subpixel order */
7535done:
7536 drm_mode_destroy(dev, adjusted_mode);
25c5b266 7537 if (!ret && crtc->enabled) {
a6778b3c
DV
7538 crtc->hwmode = saved_hwmode;
7539 crtc->mode = saved_mode;
8af6cf88
DV
7540 } else {
7541 intel_modeset_check_state(dev);
a6778b3c
DV
7542 }
7543
7544 return ret;
7545}
7546
25c5b266
DV
7547#undef for_each_intel_crtc_masked
7548
d9e55608
DV
7549static void intel_set_config_free(struct intel_set_config *config)
7550{
7551 if (!config)
7552 return;
7553
1aa4b628
DV
7554 kfree(config->save_connector_encoders);
7555 kfree(config->save_encoder_crtcs);
d9e55608
DV
7556 kfree(config);
7557}
7558
85f9eb71
DV
7559static int intel_set_config_save_state(struct drm_device *dev,
7560 struct intel_set_config *config)
7561{
85f9eb71
DV
7562 struct drm_encoder *encoder;
7563 struct drm_connector *connector;
7564 int count;
7565
1aa4b628
DV
7566 config->save_encoder_crtcs =
7567 kcalloc(dev->mode_config.num_encoder,
7568 sizeof(struct drm_crtc *), GFP_KERNEL);
7569 if (!config->save_encoder_crtcs)
85f9eb71
DV
7570 return -ENOMEM;
7571
1aa4b628
DV
7572 config->save_connector_encoders =
7573 kcalloc(dev->mode_config.num_connector,
7574 sizeof(struct drm_encoder *), GFP_KERNEL);
7575 if (!config->save_connector_encoders)
85f9eb71
DV
7576 return -ENOMEM;
7577
7578 /* Copy data. Note that driver private data is not affected.
7579 * Should anything bad happen only the expected state is
7580 * restored, not the drivers personal bookkeeping.
7581 */
85f9eb71
DV
7582 count = 0;
7583 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1aa4b628 7584 config->save_encoder_crtcs[count++] = encoder->crtc;
85f9eb71
DV
7585 }
7586
7587 count = 0;
7588 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1aa4b628 7589 config->save_connector_encoders[count++] = connector->encoder;
85f9eb71
DV
7590 }
7591
7592 return 0;
7593}
7594
7595static void intel_set_config_restore_state(struct drm_device *dev,
7596 struct intel_set_config *config)
7597{
9a935856
DV
7598 struct intel_encoder *encoder;
7599 struct intel_connector *connector;
85f9eb71
DV
7600 int count;
7601
85f9eb71 7602 count = 0;
9a935856
DV
7603 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
7604 encoder->new_crtc =
7605 to_intel_crtc(config->save_encoder_crtcs[count++]);
85f9eb71
DV
7606 }
7607
7608 count = 0;
9a935856
DV
7609 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
7610 connector->new_encoder =
7611 to_intel_encoder(config->save_connector_encoders[count++]);
85f9eb71
DV
7612 }
7613}
7614
5e2b584e
DV
7615static void
7616intel_set_config_compute_mode_changes(struct drm_mode_set *set,
7617 struct intel_set_config *config)
7618{
7619
7620 /* We should be able to check here if the fb has the same properties
7621 * and then just flip_or_move it */
7622 if (set->crtc->fb != set->fb) {
7623 /* If we have no fb then treat it as a full mode set */
7624 if (set->crtc->fb == NULL) {
7625 DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
7626 config->mode_changed = true;
7627 } else if (set->fb == NULL) {
7628 config->mode_changed = true;
7629 } else if (set->fb->depth != set->crtc->fb->depth) {
7630 config->mode_changed = true;
7631 } else if (set->fb->bits_per_pixel !=
7632 set->crtc->fb->bits_per_pixel) {
7633 config->mode_changed = true;
7634 } else
7635 config->fb_changed = true;
7636 }
7637
835c5873 7638 if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
5e2b584e
DV
7639 config->fb_changed = true;
7640
7641 if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
7642 DRM_DEBUG_KMS("modes are different, full mode set\n");
7643 drm_mode_debug_printmodeline(&set->crtc->mode);
7644 drm_mode_debug_printmodeline(set->mode);
7645 config->mode_changed = true;
7646 }
7647}
7648
2e431051 7649static int
9a935856
DV
7650intel_modeset_stage_output_state(struct drm_device *dev,
7651 struct drm_mode_set *set,
7652 struct intel_set_config *config)
50f56119 7653{
85f9eb71 7654 struct drm_crtc *new_crtc;
9a935856
DV
7655 struct intel_connector *connector;
7656 struct intel_encoder *encoder;
2e431051 7657 int count, ro;
50f56119 7658
9a935856
DV
7659 /* The upper layers ensure that we either disabl a crtc or have a list
7660 * of connectors. For paranoia, double-check this. */
7661 WARN_ON(!set->fb && (set->num_connectors != 0));
7662 WARN_ON(set->fb && (set->num_connectors == 0));
7663
50f56119 7664 count = 0;
9a935856
DV
7665 list_for_each_entry(connector, &dev->mode_config.connector_list,
7666 base.head) {
7667 /* Otherwise traverse passed in connector list and get encoders
7668 * for them. */
50f56119 7669 for (ro = 0; ro < set->num_connectors; ro++) {
9a935856
DV
7670 if (set->connectors[ro] == &connector->base) {
7671 connector->new_encoder = connector->encoder;
50f56119
DV
7672 break;
7673 }
7674 }
7675
9a935856
DV
7676 /* If we disable the crtc, disable all its connectors. Also, if
7677 * the connector is on the changing crtc but not on the new
7678 * connector list, disable it. */
7679 if ((!set->fb || ro == set->num_connectors) &&
7680 connector->base.encoder &&
7681 connector->base.encoder->crtc == set->crtc) {
7682 connector->new_encoder = NULL;
7683
7684 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
7685 connector->base.base.id,
7686 drm_get_connector_name(&connector->base));
7687 }
7688
7689
7690 if (&connector->new_encoder->base != connector->base.encoder) {
50f56119 7691 DRM_DEBUG_KMS("encoder changed, full mode switch\n");
5e2b584e 7692 config->mode_changed = true;
50f56119 7693 }
9a935856
DV
7694
7695 /* Disable all disconnected encoders. */
7696 if (connector->base.status == connector_status_disconnected)
7697 connector->new_encoder = NULL;
50f56119 7698 }
9a935856 7699 /* connector->new_encoder is now updated for all connectors. */
50f56119 7700
9a935856 7701 /* Update crtc of enabled connectors. */
50f56119 7702 count = 0;
9a935856
DV
7703 list_for_each_entry(connector, &dev->mode_config.connector_list,
7704 base.head) {
7705 if (!connector->new_encoder)
50f56119
DV
7706 continue;
7707
9a935856 7708 new_crtc = connector->new_encoder->base.crtc;
50f56119
DV
7709
7710 for (ro = 0; ro < set->num_connectors; ro++) {
9a935856 7711 if (set->connectors[ro] == &connector->base)
50f56119
DV
7712 new_crtc = set->crtc;
7713 }
7714
7715 /* Make sure the new CRTC will work with the encoder */
9a935856
DV
7716 if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
7717 new_crtc)) {
5e2b584e 7718 return -EINVAL;
50f56119 7719 }
9a935856
DV
7720 connector->encoder->new_crtc = to_intel_crtc(new_crtc);
7721
7722 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
7723 connector->base.base.id,
7724 drm_get_connector_name(&connector->base),
7725 new_crtc->base.id);
7726 }
7727
7728 /* Check for any encoders that needs to be disabled. */
7729 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
7730 base.head) {
7731 list_for_each_entry(connector,
7732 &dev->mode_config.connector_list,
7733 base.head) {
7734 if (connector->new_encoder == encoder) {
7735 WARN_ON(!connector->new_encoder->new_crtc);
7736
7737 goto next_encoder;
7738 }
7739 }
7740 encoder->new_crtc = NULL;
7741next_encoder:
7742 /* Only now check for crtc changes so we don't miss encoders
7743 * that will be disabled. */
7744 if (&encoder->new_crtc->base != encoder->base.crtc) {
50f56119 7745 DRM_DEBUG_KMS("crtc changed, full mode switch\n");
5e2b584e 7746 config->mode_changed = true;
50f56119
DV
7747 }
7748 }
9a935856 7749 /* Now we've also updated encoder->new_crtc for all encoders. */
50f56119 7750
2e431051
DV
7751 return 0;
7752}
7753
7754static int intel_crtc_set_config(struct drm_mode_set *set)
7755{
7756 struct drm_device *dev;
2e431051
DV
7757 struct drm_mode_set save_set;
7758 struct intel_set_config *config;
7759 int ret;
2e431051 7760
8d3e375e
DV
7761 BUG_ON(!set);
7762 BUG_ON(!set->crtc);
7763 BUG_ON(!set->crtc->helper_private);
2e431051
DV
7764
7765 if (!set->mode)
7766 set->fb = NULL;
7767
431e50f7
DV
7768 /* The fb helper likes to play gross jokes with ->mode_set_config.
7769 * Unfortunately the crtc helper doesn't do much at all for this case,
7770 * so we have to cope with this madness until the fb helper is fixed up. */
7771 if (set->fb && set->num_connectors == 0)
7772 return 0;
7773
2e431051
DV
7774 if (set->fb) {
7775 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
7776 set->crtc->base.id, set->fb->base.id,
7777 (int)set->num_connectors, set->x, set->y);
7778 } else {
7779 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
2e431051
DV
7780 }
7781
7782 dev = set->crtc->dev;
7783
7784 ret = -ENOMEM;
7785 config = kzalloc(sizeof(*config), GFP_KERNEL);
7786 if (!config)
7787 goto out_config;
7788
7789 ret = intel_set_config_save_state(dev, config);
7790 if (ret)
7791 goto out_config;
7792
7793 save_set.crtc = set->crtc;
7794 save_set.mode = &set->crtc->mode;
7795 save_set.x = set->crtc->x;
7796 save_set.y = set->crtc->y;
7797 save_set.fb = set->crtc->fb;
7798
7799 /* Compute whether we need a full modeset, only an fb base update or no
7800 * change at all. In the future we might also check whether only the
7801 * mode changed, e.g. for LVDS where we only change the panel fitter in
7802 * such cases. */
7803 intel_set_config_compute_mode_changes(set, config);
7804
9a935856 7805 ret = intel_modeset_stage_output_state(dev, set, config);
2e431051
DV
7806 if (ret)
7807 goto fail;
7808
5e2b584e 7809 if (config->mode_changed) {
87f1faa6 7810 if (set->mode) {
50f56119
DV
7811 DRM_DEBUG_KMS("attempting to set mode from"
7812 " userspace\n");
7813 drm_mode_debug_printmodeline(set->mode);
87f1faa6
DV
7814 }
7815
7816 if (!intel_set_mode(set->crtc, set->mode,
7817 set->x, set->y, set->fb)) {
7818 DRM_ERROR("failed to set mode on [CRTC:%d]\n",
7819 set->crtc->base.id);
7820 ret = -EINVAL;
7821 goto fail;
7822 }
5e2b584e 7823 } else if (config->fb_changed) {
4f660f49 7824 ret = intel_pipe_set_base(set->crtc,
94352cf9 7825 set->x, set->y, set->fb);
50f56119
DV
7826 }
7827
d9e55608
DV
7828 intel_set_config_free(config);
7829
50f56119
DV
7830 return 0;
7831
7832fail:
85f9eb71 7833 intel_set_config_restore_state(dev, config);
50f56119
DV
7834
7835 /* Try to restore the config */
5e2b584e 7836 if (config->mode_changed &&
a6778b3c
DV
7837 !intel_set_mode(save_set.crtc, save_set.mode,
7838 save_set.x, save_set.y, save_set.fb))
50f56119
DV
7839 DRM_ERROR("failed to restore config after modeset failure\n");
7840
d9e55608
DV
7841out_config:
7842 intel_set_config_free(config);
50f56119
DV
7843 return ret;
7844}
7845
f6e5b160 7846static const struct drm_crtc_funcs intel_crtc_funcs = {
f6e5b160
CW
7847 .cursor_set = intel_crtc_cursor_set,
7848 .cursor_move = intel_crtc_cursor_move,
7849 .gamma_set = intel_crtc_gamma_set,
50f56119 7850 .set_config = intel_crtc_set_config,
f6e5b160
CW
7851 .destroy = intel_crtc_destroy,
7852 .page_flip = intel_crtc_page_flip,
7853};
7854
79f689aa
PZ
7855static void intel_cpu_pll_init(struct drm_device *dev)
7856{
7857 if (IS_HASWELL(dev))
7858 intel_ddi_pll_init(dev);
7859}
7860
ee7b9f93
JB
7861static void intel_pch_pll_init(struct drm_device *dev)
7862{
7863 drm_i915_private_t *dev_priv = dev->dev_private;
7864 int i;
7865
7866 if (dev_priv->num_pch_pll == 0) {
7867 DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
7868 return;
7869 }
7870
7871 for (i = 0; i < dev_priv->num_pch_pll; i++) {
7872 dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
7873 dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
7874 dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
7875 }
7876}
7877
b358d0a6 7878static void intel_crtc_init(struct drm_device *dev, int pipe)
79e53945 7879{
22fd0fab 7880 drm_i915_private_t *dev_priv = dev->dev_private;
79e53945
JB
7881 struct intel_crtc *intel_crtc;
7882 int i;
7883
7884 intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
7885 if (intel_crtc == NULL)
7886 return;
7887
7888 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
7889
7890 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
79e53945
JB
7891 for (i = 0; i < 256; i++) {
7892 intel_crtc->lut_r[i] = i;
7893 intel_crtc->lut_g[i] = i;
7894 intel_crtc->lut_b[i] = i;
7895 }
7896
80824003
JB
7897 /* Swap pipes & planes for FBC on pre-965 */
7898 intel_crtc->pipe = pipe;
7899 intel_crtc->plane = pipe;
e2e767ab 7900 if (IS_MOBILE(dev) && IS_GEN3(dev)) {
28c97730 7901 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
e2e767ab 7902 intel_crtc->plane = !pipe;
80824003
JB
7903 }
7904
22fd0fab
JB
7905 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
7906 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
7907 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
7908 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
7909
5a354204 7910 intel_crtc->bpp = 24; /* default for pre-Ironlake */
7e7d76c3 7911
79e53945 7912 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
79e53945
JB
7913}
7914
08d7b3d1 7915int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
05394f39 7916 struct drm_file *file)
08d7b3d1 7917{
08d7b3d1 7918 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
c05422d5
DV
7919 struct drm_mode_object *drmmode_obj;
7920 struct intel_crtc *crtc;
08d7b3d1 7921
1cff8f6b
DV
7922 if (!drm_core_check_feature(dev, DRIVER_MODESET))
7923 return -ENODEV;
08d7b3d1 7924
c05422d5
DV
7925 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
7926 DRM_MODE_OBJECT_CRTC);
08d7b3d1 7927
c05422d5 7928 if (!drmmode_obj) {
08d7b3d1
CW
7929 DRM_ERROR("no such CRTC id\n");
7930 return -EINVAL;
7931 }
7932
c05422d5
DV
7933 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
7934 pipe_from_crtc_id->pipe = crtc->pipe;
08d7b3d1 7935
c05422d5 7936 return 0;
08d7b3d1
CW
7937}
7938
66a9278e 7939static int intel_encoder_clones(struct intel_encoder *encoder)
79e53945 7940{
66a9278e
DV
7941 struct drm_device *dev = encoder->base.dev;
7942 struct intel_encoder *source_encoder;
79e53945 7943 int index_mask = 0;
79e53945
JB
7944 int entry = 0;
7945
66a9278e
DV
7946 list_for_each_entry(source_encoder,
7947 &dev->mode_config.encoder_list, base.head) {
7948
7949 if (encoder == source_encoder)
79e53945 7950 index_mask |= (1 << entry);
66a9278e
DV
7951
7952 /* Intel hw has only one MUX where enocoders could be cloned. */
7953 if (encoder->cloneable && source_encoder->cloneable)
7954 index_mask |= (1 << entry);
7955
79e53945
JB
7956 entry++;
7957 }
4ef69c7a 7958
79e53945
JB
7959 return index_mask;
7960}
7961
4d302442
CW
7962static bool has_edp_a(struct drm_device *dev)
7963{
7964 struct drm_i915_private *dev_priv = dev->dev_private;
7965
7966 if (!IS_MOBILE(dev))
7967 return false;
7968
7969 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
7970 return false;
7971
7972 if (IS_GEN5(dev) &&
7973 (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
7974 return false;
7975
7976 return true;
7977}
7978
79e53945
JB
7979static void intel_setup_outputs(struct drm_device *dev)
7980{
725e30ad 7981 struct drm_i915_private *dev_priv = dev->dev_private;
4ef69c7a 7982 struct intel_encoder *encoder;
cb0953d7 7983 bool dpd_is_edp = false;
f3cfcba6 7984 bool has_lvds;
79e53945 7985
f3cfcba6 7986 has_lvds = intel_lvds_init(dev);
c5d1b51d
CW
7987 if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
7988 /* disable the panel fitter on everything but LVDS */
7989 I915_WRITE(PFIT_CONTROL, 0);
7990 }
79e53945 7991
bad720ff 7992 if (HAS_PCH_SPLIT(dev)) {
cb0953d7 7993 dpd_is_edp = intel_dpd_is_edp(dev);
30ad48b7 7994
4d302442 7995 if (has_edp_a(dev))
ab9d7c30 7996 intel_dp_init(dev, DP_A, PORT_A);
32f9d658 7997
cb0953d7 7998 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
ab9d7c30 7999 intel_dp_init(dev, PCH_DP_D, PORT_D);
cb0953d7
AJ
8000 }
8001
8002 intel_crt_init(dev);
8003
0e72a5b5
ED
8004 if (IS_HASWELL(dev)) {
8005 int found;
8006
8007 /* Haswell uses DDI functions to detect digital outputs */
8008 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
8009 /* DDI A only supports eDP */
8010 if (found)
8011 intel_ddi_init(dev, PORT_A);
8012
8013 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
8014 * register */
8015 found = I915_READ(SFUSE_STRAP);
8016
8017 if (found & SFUSE_STRAP_DDIB_DETECTED)
8018 intel_ddi_init(dev, PORT_B);
8019 if (found & SFUSE_STRAP_DDIC_DETECTED)
8020 intel_ddi_init(dev, PORT_C);
8021 if (found & SFUSE_STRAP_DDID_DETECTED)
8022 intel_ddi_init(dev, PORT_D);
8023 } else if (HAS_PCH_SPLIT(dev)) {
cb0953d7
AJ
8024 int found;
8025
30ad48b7 8026 if (I915_READ(HDMIB) & PORT_DETECTED) {
461ed3ca 8027 /* PCH SDVOB multiplex with HDMIB */
eef4eacb 8028 found = intel_sdvo_init(dev, PCH_SDVOB, true);
30ad48b7 8029 if (!found)
08d644ad 8030 intel_hdmi_init(dev, HDMIB, PORT_B);
5eb08b69 8031 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
ab9d7c30 8032 intel_dp_init(dev, PCH_DP_B, PORT_B);
30ad48b7
ZW
8033 }
8034
8035 if (I915_READ(HDMIC) & PORT_DETECTED)
08d644ad 8036 intel_hdmi_init(dev, HDMIC, PORT_C);
30ad48b7 8037
b708a1d5 8038 if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
08d644ad 8039 intel_hdmi_init(dev, HDMID, PORT_D);
30ad48b7 8040
5eb08b69 8041 if (I915_READ(PCH_DP_C) & DP_DETECTED)
ab9d7c30 8042 intel_dp_init(dev, PCH_DP_C, PORT_C);
5eb08b69 8043
cb0953d7 8044 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
ab9d7c30 8045 intel_dp_init(dev, PCH_DP_D, PORT_D);
4a87d65d
JB
8046 } else if (IS_VALLEYVIEW(dev)) {
8047 int found;
8048
19c03924
GB
8049 /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
8050 if (I915_READ(DP_C) & DP_DETECTED)
8051 intel_dp_init(dev, DP_C, PORT_C);
8052
4a87d65d
JB
8053 if (I915_READ(SDVOB) & PORT_DETECTED) {
8054 /* SDVOB multiplex with HDMIB */
8055 found = intel_sdvo_init(dev, SDVOB, true);
8056 if (!found)
08d644ad 8057 intel_hdmi_init(dev, SDVOB, PORT_B);
4a87d65d 8058 if (!found && (I915_READ(DP_B) & DP_DETECTED))
ab9d7c30 8059 intel_dp_init(dev, DP_B, PORT_B);
4a87d65d
JB
8060 }
8061
8062 if (I915_READ(SDVOC) & PORT_DETECTED)
08d644ad 8063 intel_hdmi_init(dev, SDVOC, PORT_C);
5eb08b69 8064
103a196f 8065 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
27185ae1 8066 bool found = false;
7d57382e 8067
725e30ad 8068 if (I915_READ(SDVOB) & SDVO_DETECTED) {
b01f2c3a 8069 DRM_DEBUG_KMS("probing SDVOB\n");
eef4eacb 8070 found = intel_sdvo_init(dev, SDVOB, true);
b01f2c3a
JB
8071 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
8072 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
08d644ad 8073 intel_hdmi_init(dev, SDVOB, PORT_B);
b01f2c3a 8074 }
27185ae1 8075
b01f2c3a
JB
8076 if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
8077 DRM_DEBUG_KMS("probing DP_B\n");
ab9d7c30 8078 intel_dp_init(dev, DP_B, PORT_B);
b01f2c3a 8079 }
725e30ad 8080 }
13520b05
KH
8081
8082 /* Before G4X SDVOC doesn't have its own detect register */
13520b05 8083
b01f2c3a
JB
8084 if (I915_READ(SDVOB) & SDVO_DETECTED) {
8085 DRM_DEBUG_KMS("probing SDVOC\n");
eef4eacb 8086 found = intel_sdvo_init(dev, SDVOC, false);
b01f2c3a 8087 }
27185ae1
ML
8088
8089 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
8090
b01f2c3a
JB
8091 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
8092 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
08d644ad 8093 intel_hdmi_init(dev, SDVOC, PORT_C);
b01f2c3a
JB
8094 }
8095 if (SUPPORTS_INTEGRATED_DP(dev)) {
8096 DRM_DEBUG_KMS("probing DP_C\n");
ab9d7c30 8097 intel_dp_init(dev, DP_C, PORT_C);
b01f2c3a 8098 }
725e30ad 8099 }
27185ae1 8100
b01f2c3a
JB
8101 if (SUPPORTS_INTEGRATED_DP(dev) &&
8102 (I915_READ(DP_D) & DP_DETECTED)) {
8103 DRM_DEBUG_KMS("probing DP_D\n");
ab9d7c30 8104 intel_dp_init(dev, DP_D, PORT_D);
b01f2c3a 8105 }
bad720ff 8106 } else if (IS_GEN2(dev))
79e53945
JB
8107 intel_dvo_init(dev);
8108
103a196f 8109 if (SUPPORTS_TV(dev))
79e53945
JB
8110 intel_tv_init(dev);
8111
4ef69c7a
CW
8112 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
8113 encoder->base.possible_crtcs = encoder->crtc_mask;
8114 encoder->base.possible_clones =
66a9278e 8115 intel_encoder_clones(encoder);
79e53945 8116 }
47356eb6 8117
40579abe 8118 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
9fb526db 8119 ironlake_init_pch_refclk(dev);
79e53945
JB
8120}
8121
8122static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
8123{
8124 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
79e53945
JB
8125
8126 drm_framebuffer_cleanup(fb);
05394f39 8127 drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
79e53945
JB
8128
8129 kfree(intel_fb);
8130}
8131
8132static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
05394f39 8133 struct drm_file *file,
79e53945
JB
8134 unsigned int *handle)
8135{
8136 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
05394f39 8137 struct drm_i915_gem_object *obj = intel_fb->obj;
79e53945 8138
05394f39 8139 return drm_gem_handle_create(file, &obj->base, handle);
79e53945
JB
8140}
8141
8142static const struct drm_framebuffer_funcs intel_fb_funcs = {
8143 .destroy = intel_user_framebuffer_destroy,
8144 .create_handle = intel_user_framebuffer_create_handle,
8145};
8146
38651674
DA
8147int intel_framebuffer_init(struct drm_device *dev,
8148 struct intel_framebuffer *intel_fb,
308e5bcb 8149 struct drm_mode_fb_cmd2 *mode_cmd,
05394f39 8150 struct drm_i915_gem_object *obj)
79e53945 8151{
79e53945
JB
8152 int ret;
8153
05394f39 8154 if (obj->tiling_mode == I915_TILING_Y)
57cd6508
CW
8155 return -EINVAL;
8156
308e5bcb 8157 if (mode_cmd->pitches[0] & 63)
57cd6508
CW
8158 return -EINVAL;
8159
308e5bcb 8160 switch (mode_cmd->pixel_format) {
04b3924d
VS
8161 case DRM_FORMAT_RGB332:
8162 case DRM_FORMAT_RGB565:
8163 case DRM_FORMAT_XRGB8888:
b250da79 8164 case DRM_FORMAT_XBGR8888:
04b3924d
VS
8165 case DRM_FORMAT_ARGB8888:
8166 case DRM_FORMAT_XRGB2101010:
8167 case DRM_FORMAT_ARGB2101010:
308e5bcb 8168 /* RGB formats are common across chipsets */
b5626747 8169 break;
04b3924d
VS
8170 case DRM_FORMAT_YUYV:
8171 case DRM_FORMAT_UYVY:
8172 case DRM_FORMAT_YVYU:
8173 case DRM_FORMAT_VYUY:
57cd6508
CW
8174 break;
8175 default:
aca25848
ED
8176 DRM_DEBUG_KMS("unsupported pixel format %u\n",
8177 mode_cmd->pixel_format);
57cd6508
CW
8178 return -EINVAL;
8179 }
8180
79e53945
JB
8181 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
8182 if (ret) {
8183 DRM_ERROR("framebuffer init failed %d\n", ret);
8184 return ret;
8185 }
8186
8187 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
79e53945 8188 intel_fb->obj = obj;
79e53945
JB
8189 return 0;
8190}
8191
79e53945
JB
8192static struct drm_framebuffer *
8193intel_user_framebuffer_create(struct drm_device *dev,
8194 struct drm_file *filp,
308e5bcb 8195 struct drm_mode_fb_cmd2 *mode_cmd)
79e53945 8196{
05394f39 8197 struct drm_i915_gem_object *obj;
79e53945 8198
308e5bcb
JB
8199 obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
8200 mode_cmd->handles[0]));
c8725226 8201 if (&obj->base == NULL)
cce13ff7 8202 return ERR_PTR(-ENOENT);
79e53945 8203
d2dff872 8204 return intel_framebuffer_create(dev, mode_cmd, obj);
79e53945
JB
8205}
8206
79e53945 8207static const struct drm_mode_config_funcs intel_mode_funcs = {
79e53945 8208 .fb_create = intel_user_framebuffer_create,
eb1f8e4f 8209 .output_poll_changed = intel_fb_output_poll_changed,
79e53945
JB
8210};
8211
e70236a8
JB
8212/* Set up chip specific display functions */
8213static void intel_init_display(struct drm_device *dev)
8214{
8215 struct drm_i915_private *dev_priv = dev->dev_private;
8216
8217 /* We always want a DPMS function */
09b4ddf9
PZ
8218 if (IS_HASWELL(dev)) {
8219 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
4f771f10
PZ
8220 dev_priv->display.crtc_enable = haswell_crtc_enable;
8221 dev_priv->display.crtc_disable = haswell_crtc_disable;
6441ab5f 8222 dev_priv->display.off = haswell_crtc_off;
09b4ddf9
PZ
8223 dev_priv->display.update_plane = ironlake_update_plane;
8224 } else if (HAS_PCH_SPLIT(dev)) {
f564048e 8225 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
76e5a89c
DV
8226 dev_priv->display.crtc_enable = ironlake_crtc_enable;
8227 dev_priv->display.crtc_disable = ironlake_crtc_disable;
ee7b9f93 8228 dev_priv->display.off = ironlake_crtc_off;
17638cd6 8229 dev_priv->display.update_plane = ironlake_update_plane;
f564048e 8230 } else {
f564048e 8231 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
76e5a89c
DV
8232 dev_priv->display.crtc_enable = i9xx_crtc_enable;
8233 dev_priv->display.crtc_disable = i9xx_crtc_disable;
ee7b9f93 8234 dev_priv->display.off = i9xx_crtc_off;
17638cd6 8235 dev_priv->display.update_plane = i9xx_update_plane;
f564048e 8236 }
e70236a8 8237
e70236a8 8238 /* Returns the core display clock speed */
25eb05fc
JB
8239 if (IS_VALLEYVIEW(dev))
8240 dev_priv->display.get_display_clock_speed =
8241 valleyview_get_display_clock_speed;
8242 else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
e70236a8
JB
8243 dev_priv->display.get_display_clock_speed =
8244 i945_get_display_clock_speed;
8245 else if (IS_I915G(dev))
8246 dev_priv->display.get_display_clock_speed =
8247 i915_get_display_clock_speed;
f2b115e6 8248 else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
e70236a8
JB
8249 dev_priv->display.get_display_clock_speed =
8250 i9xx_misc_get_display_clock_speed;
8251 else if (IS_I915GM(dev))
8252 dev_priv->display.get_display_clock_speed =
8253 i915gm_get_display_clock_speed;
8254 else if (IS_I865G(dev))
8255 dev_priv->display.get_display_clock_speed =
8256 i865_get_display_clock_speed;
f0f8a9ce 8257 else if (IS_I85X(dev))
e70236a8
JB
8258 dev_priv->display.get_display_clock_speed =
8259 i855_get_display_clock_speed;
8260 else /* 852, 830 */
8261 dev_priv->display.get_display_clock_speed =
8262 i830_get_display_clock_speed;
8263
7f8a8569 8264 if (HAS_PCH_SPLIT(dev)) {
f00a3ddf 8265 if (IS_GEN5(dev)) {
674cf967 8266 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
e0dac65e 8267 dev_priv->display.write_eld = ironlake_write_eld;
1398261a 8268 } else if (IS_GEN6(dev)) {
674cf967 8269 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
e0dac65e 8270 dev_priv->display.write_eld = ironlake_write_eld;
357555c0
JB
8271 } else if (IS_IVYBRIDGE(dev)) {
8272 /* FIXME: detect B0+ stepping and use auto training */
8273 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
e0dac65e 8274 dev_priv->display.write_eld = ironlake_write_eld;
c82e4d26
ED
8275 } else if (IS_HASWELL(dev)) {
8276 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
83358c85 8277 dev_priv->display.write_eld = haswell_write_eld;
7f8a8569
ZW
8278 } else
8279 dev_priv->display.update_wm = NULL;
6067aaea 8280 } else if (IS_G4X(dev)) {
e0dac65e 8281 dev_priv->display.write_eld = g4x_write_eld;
e70236a8 8282 }
8c9f3aaf
JB
8283
8284 /* Default just returns -ENODEV to indicate unsupported */
8285 dev_priv->display.queue_flip = intel_default_queue_flip;
8286
8287 switch (INTEL_INFO(dev)->gen) {
8288 case 2:
8289 dev_priv->display.queue_flip = intel_gen2_queue_flip;
8290 break;
8291
8292 case 3:
8293 dev_priv->display.queue_flip = intel_gen3_queue_flip;
8294 break;
8295
8296 case 4:
8297 case 5:
8298 dev_priv->display.queue_flip = intel_gen4_queue_flip;
8299 break;
8300
8301 case 6:
8302 dev_priv->display.queue_flip = intel_gen6_queue_flip;
8303 break;
7c9017e5
JB
8304 case 7:
8305 dev_priv->display.queue_flip = intel_gen7_queue_flip;
8306 break;
8c9f3aaf 8307 }
e70236a8
JB
8308}
8309
b690e96c
JB
8310/*
8311 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
8312 * resume, or other times. This quirk makes sure that's the case for
8313 * affected systems.
8314 */
0206e353 8315static void quirk_pipea_force(struct drm_device *dev)
b690e96c
JB
8316{
8317 struct drm_i915_private *dev_priv = dev->dev_private;
8318
8319 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
bc0daf48 8320 DRM_INFO("applying pipe a force quirk\n");
b690e96c
JB
8321}
8322
435793df
KP
8323/*
8324 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
8325 */
8326static void quirk_ssc_force_disable(struct drm_device *dev)
8327{
8328 struct drm_i915_private *dev_priv = dev->dev_private;
8329 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
bc0daf48 8330 DRM_INFO("applying lvds SSC disable quirk\n");
435793df
KP
8331}
8332
4dca20ef 8333/*
5a15ab5b
CE
8334 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
8335 * brightness value
4dca20ef
CE
8336 */
8337static void quirk_invert_brightness(struct drm_device *dev)
8338{
8339 struct drm_i915_private *dev_priv = dev->dev_private;
8340 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
bc0daf48 8341 DRM_INFO("applying inverted panel brightness quirk\n");
435793df
KP
8342}
8343
b690e96c
JB
8344struct intel_quirk {
8345 int device;
8346 int subsystem_vendor;
8347 int subsystem_device;
8348 void (*hook)(struct drm_device *dev);
8349};
8350
c43b5634 8351static struct intel_quirk intel_quirks[] = {
b690e96c 8352 /* HP Mini needs pipe A force quirk (LP: #322104) */
0206e353 8353 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
b690e96c 8354
b690e96c
JB
8355 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
8356 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
8357
b690e96c
JB
8358 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
8359 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
8360
ccd0d36e 8361 /* 830/845 need to leave pipe A & dpll A up */
b690e96c 8362 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
dcdaed6e 8363 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
435793df
KP
8364
8365 /* Lenovo U160 cannot use SSC on LVDS */
8366 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
070d329a
MAS
8367
8368 /* Sony Vaio Y cannot use SSC on LVDS */
8369 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
5a15ab5b
CE
8370
8371 /* Acer Aspire 5734Z must invert backlight brightness */
8372 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
b690e96c
JB
8373};
8374
8375static void intel_init_quirks(struct drm_device *dev)
8376{
8377 struct pci_dev *d = dev->pdev;
8378 int i;
8379
8380 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
8381 struct intel_quirk *q = &intel_quirks[i];
8382
8383 if (d->device == q->device &&
8384 (d->subsystem_vendor == q->subsystem_vendor ||
8385 q->subsystem_vendor == PCI_ANY_ID) &&
8386 (d->subsystem_device == q->subsystem_device ||
8387 q->subsystem_device == PCI_ANY_ID))
8388 q->hook(dev);
8389 }
8390}
8391
9cce37f4
JB
8392/* Disable the VGA plane that we never use */
8393static void i915_disable_vga(struct drm_device *dev)
8394{
8395 struct drm_i915_private *dev_priv = dev->dev_private;
8396 u8 sr1;
8397 u32 vga_reg;
8398
8399 if (HAS_PCH_SPLIT(dev))
8400 vga_reg = CPU_VGACNTRL;
8401 else
8402 vga_reg = VGACNTRL;
8403
8404 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
3fdcf431 8405 outb(SR01, VGA_SR_INDEX);
9cce37f4
JB
8406 sr1 = inb(VGA_SR_DATA);
8407 outb(sr1 | 1<<5, VGA_SR_DATA);
8408 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
8409 udelay(300);
8410
8411 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
8412 POSTING_READ(vga_reg);
8413}
8414
f817586c
DV
8415void intel_modeset_init_hw(struct drm_device *dev)
8416{
0232e927
ED
8417 /* We attempt to init the necessary power wells early in the initialization
8418 * time, so the subsystems that expect power to be enabled can work.
8419 */
8420 intel_init_power_wells(dev);
8421
a8f78b58
ED
8422 intel_prepare_ddi(dev);
8423
f817586c
DV
8424 intel_init_clock_gating(dev);
8425
79f5b2c7 8426 mutex_lock(&dev->struct_mutex);
8090c6b9 8427 intel_enable_gt_powersave(dev);
79f5b2c7 8428 mutex_unlock(&dev->struct_mutex);
f817586c
DV
8429}
8430
79e53945
JB
8431void intel_modeset_init(struct drm_device *dev)
8432{
652c393a 8433 struct drm_i915_private *dev_priv = dev->dev_private;
b840d907 8434 int i, ret;
79e53945
JB
8435
8436 drm_mode_config_init(dev);
8437
8438 dev->mode_config.min_width = 0;
8439 dev->mode_config.min_height = 0;
8440
019d96cb
DA
8441 dev->mode_config.preferred_depth = 24;
8442 dev->mode_config.prefer_shadow = 1;
8443
e6ecefaa 8444 dev->mode_config.funcs = &intel_mode_funcs;
79e53945 8445
b690e96c
JB
8446 intel_init_quirks(dev);
8447
1fa61106
ED
8448 intel_init_pm(dev);
8449
e70236a8
JB
8450 intel_init_display(dev);
8451
a6c45cf0
CW
8452 if (IS_GEN2(dev)) {
8453 dev->mode_config.max_width = 2048;
8454 dev->mode_config.max_height = 2048;
8455 } else if (IS_GEN3(dev)) {
5e4d6fa7
KP
8456 dev->mode_config.max_width = 4096;
8457 dev->mode_config.max_height = 4096;
79e53945 8458 } else {
a6c45cf0
CW
8459 dev->mode_config.max_width = 8192;
8460 dev->mode_config.max_height = 8192;
79e53945 8461 }
dd2757f8 8462 dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr;
79e53945 8463
28c97730 8464 DRM_DEBUG_KMS("%d display pipe%s available.\n",
a3524f1b 8465 dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
79e53945 8466
a3524f1b 8467 for (i = 0; i < dev_priv->num_pipe; i++) {
79e53945 8468 intel_crtc_init(dev, i);
00c2064b
JB
8469 ret = intel_plane_init(dev, i);
8470 if (ret)
8471 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
79e53945
JB
8472 }
8473
79f689aa 8474 intel_cpu_pll_init(dev);
ee7b9f93
JB
8475 intel_pch_pll_init(dev);
8476
9cce37f4
JB
8477 /* Just disable it once at startup */
8478 i915_disable_vga(dev);
79e53945 8479 intel_setup_outputs(dev);
2c7111db
CW
8480}
8481
24929352
DV
8482static void
8483intel_connector_break_all_links(struct intel_connector *connector)
8484{
8485 connector->base.dpms = DRM_MODE_DPMS_OFF;
8486 connector->base.encoder = NULL;
8487 connector->encoder->connectors_active = false;
8488 connector->encoder->base.crtc = NULL;
8489}
8490
7fad798e
DV
8491static void intel_enable_pipe_a(struct drm_device *dev)
8492{
8493 struct intel_connector *connector;
8494 struct drm_connector *crt = NULL;
8495 struct intel_load_detect_pipe load_detect_temp;
8496
8497 /* We can't just switch on the pipe A, we need to set things up with a
8498 * proper mode and output configuration. As a gross hack, enable pipe A
8499 * by enabling the load detect pipe once. */
8500 list_for_each_entry(connector,
8501 &dev->mode_config.connector_list,
8502 base.head) {
8503 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
8504 crt = &connector->base;
8505 break;
8506 }
8507 }
8508
8509 if (!crt)
8510 return;
8511
8512 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
8513 intel_release_load_detect_pipe(crt, &load_detect_temp);
8514
8515
8516}
8517
fa555837
DV
8518static bool
8519intel_check_plane_mapping(struct intel_crtc *crtc)
8520{
8521 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
8522 u32 reg, val;
8523
8524 if (dev_priv->num_pipe == 1)
8525 return true;
8526
8527 reg = DSPCNTR(!crtc->plane);
8528 val = I915_READ(reg);
8529
8530 if ((val & DISPLAY_PLANE_ENABLE) &&
8531 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
8532 return false;
8533
8534 return true;
8535}
8536
24929352
DV
8537static void intel_sanitize_crtc(struct intel_crtc *crtc)
8538{
8539 struct drm_device *dev = crtc->base.dev;
8540 struct drm_i915_private *dev_priv = dev->dev_private;
fa555837 8541 u32 reg;
24929352 8542
24929352
DV
8543 /* Clear any frame start delays used for debugging left by the BIOS */
8544 reg = PIPECONF(crtc->pipe);
8545 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
8546
8547 /* We need to sanitize the plane -> pipe mapping first because this will
fa555837
DV
8548 * disable the crtc (and hence change the state) if it is wrong. Note
8549 * that gen4+ has a fixed plane -> pipe mapping. */
8550 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
24929352
DV
8551 struct intel_connector *connector;
8552 bool plane;
8553
24929352
DV
8554 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
8555 crtc->base.base.id);
8556
8557 /* Pipe has the wrong plane attached and the plane is active.
8558 * Temporarily change the plane mapping and disable everything
8559 * ... */
8560 plane = crtc->plane;
8561 crtc->plane = !plane;
8562 dev_priv->display.crtc_disable(&crtc->base);
8563 crtc->plane = plane;
8564
8565 /* ... and break all links. */
8566 list_for_each_entry(connector, &dev->mode_config.connector_list,
8567 base.head) {
8568 if (connector->encoder->base.crtc != &crtc->base)
8569 continue;
8570
8571 intel_connector_break_all_links(connector);
8572 }
8573
8574 WARN_ON(crtc->active);
8575 crtc->base.enabled = false;
8576 }
24929352 8577
7fad798e
DV
8578 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
8579 crtc->pipe == PIPE_A && !crtc->active) {
8580 /* BIOS forgot to enable pipe A, this mostly happens after
8581 * resume. Force-enable the pipe to fix this, the update_dpms
8582 * call below we restore the pipe to the right state, but leave
8583 * the required bits on. */
8584 intel_enable_pipe_a(dev);
8585 }
8586
24929352
DV
8587 /* Adjust the state of the output pipe according to whether we
8588 * have active connectors/encoders. */
8589 intel_crtc_update_dpms(&crtc->base);
8590
8591 if (crtc->active != crtc->base.enabled) {
8592 struct intel_encoder *encoder;
8593
8594 /* This can happen either due to bugs in the get_hw_state
8595 * functions or because the pipe is force-enabled due to the
8596 * pipe A quirk. */
8597 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
8598 crtc->base.base.id,
8599 crtc->base.enabled ? "enabled" : "disabled",
8600 crtc->active ? "enabled" : "disabled");
8601
8602 crtc->base.enabled = crtc->active;
8603
8604 /* Because we only establish the connector -> encoder ->
8605 * crtc links if something is active, this means the
8606 * crtc is now deactivated. Break the links. connector
8607 * -> encoder links are only establish when things are
8608 * actually up, hence no need to break them. */
8609 WARN_ON(crtc->active);
8610
8611 for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
8612 WARN_ON(encoder->connectors_active);
8613 encoder->base.crtc = NULL;
8614 }
8615 }
8616}
8617
8618static void intel_sanitize_encoder(struct intel_encoder *encoder)
8619{
8620 struct intel_connector *connector;
8621 struct drm_device *dev = encoder->base.dev;
8622
8623 /* We need to check both for a crtc link (meaning that the
8624 * encoder is active and trying to read from a pipe) and the
8625 * pipe itself being active. */
8626 bool has_active_crtc = encoder->base.crtc &&
8627 to_intel_crtc(encoder->base.crtc)->active;
8628
8629 if (encoder->connectors_active && !has_active_crtc) {
8630 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
8631 encoder->base.base.id,
8632 drm_get_encoder_name(&encoder->base));
8633
8634 /* Connector is active, but has no active pipe. This is
8635 * fallout from our resume register restoring. Disable
8636 * the encoder manually again. */
8637 if (encoder->base.crtc) {
8638 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
8639 encoder->base.base.id,
8640 drm_get_encoder_name(&encoder->base));
8641 encoder->disable(encoder);
8642 }
8643
8644 /* Inconsistent output/port/pipe state happens presumably due to
8645 * a bug in one of the get_hw_state functions. Or someplace else
8646 * in our code, like the register restore mess on resume. Clamp
8647 * things to off as a safer default. */
8648 list_for_each_entry(connector,
8649 &dev->mode_config.connector_list,
8650 base.head) {
8651 if (connector->encoder != encoder)
8652 continue;
8653
8654 intel_connector_break_all_links(connector);
8655 }
8656 }
8657 /* Enabled encoders without active connectors will be fixed in
8658 * the crtc fixup. */
8659}
8660
8661/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
8662 * and i915 state tracking structures. */
8663void intel_modeset_setup_hw_state(struct drm_device *dev)
8664{
8665 struct drm_i915_private *dev_priv = dev->dev_private;
8666 enum pipe pipe;
8667 u32 tmp;
8668 struct intel_crtc *crtc;
8669 struct intel_encoder *encoder;
8670 struct intel_connector *connector;
8671
8672 for_each_pipe(pipe) {
8673 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
8674
8675 tmp = I915_READ(PIPECONF(pipe));
8676 if (tmp & PIPECONF_ENABLE)
8677 crtc->active = true;
8678 else
8679 crtc->active = false;
8680
8681 crtc->base.enabled = crtc->active;
8682
8683 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
8684 crtc->base.base.id,
8685 crtc->active ? "enabled" : "disabled");
8686 }
8687
6441ab5f
PZ
8688 if (IS_HASWELL(dev))
8689 intel_ddi_setup_hw_pll_state(dev);
8690
24929352
DV
8691 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8692 base.head) {
8693 pipe = 0;
8694
8695 if (encoder->get_hw_state(encoder, &pipe)) {
8696 encoder->base.crtc =
8697 dev_priv->pipe_to_crtc_mapping[pipe];
8698 } else {
8699 encoder->base.crtc = NULL;
8700 }
8701
8702 encoder->connectors_active = false;
8703 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
8704 encoder->base.base.id,
8705 drm_get_encoder_name(&encoder->base),
8706 encoder->base.crtc ? "enabled" : "disabled",
8707 pipe);
8708 }
8709
8710 list_for_each_entry(connector, &dev->mode_config.connector_list,
8711 base.head) {
8712 if (connector->get_hw_state(connector)) {
8713 connector->base.dpms = DRM_MODE_DPMS_ON;
8714 connector->encoder->connectors_active = true;
8715 connector->base.encoder = &connector->encoder->base;
8716 } else {
8717 connector->base.dpms = DRM_MODE_DPMS_OFF;
8718 connector->base.encoder = NULL;
8719 }
8720 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
8721 connector->base.base.id,
8722 drm_get_connector_name(&connector->base),
8723 connector->base.encoder ? "enabled" : "disabled");
8724 }
8725
8726 /* HW state is read out, now we need to sanitize this mess. */
8727 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8728 base.head) {
8729 intel_sanitize_encoder(encoder);
8730 }
8731
8732 for_each_pipe(pipe) {
8733 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
8734 intel_sanitize_crtc(crtc);
8735 }
9a935856
DV
8736
8737 intel_modeset_update_staged_output_state(dev);
8af6cf88
DV
8738
8739 intel_modeset_check_state(dev);
2e938892
DV
8740
8741 drm_mode_config_reset(dev);
24929352
DV
8742}
8743
2c7111db
CW
8744void intel_modeset_gem_init(struct drm_device *dev)
8745{
1833b134 8746 intel_modeset_init_hw(dev);
02e792fb
DV
8747
8748 intel_setup_overlay(dev);
24929352
DV
8749
8750 intel_modeset_setup_hw_state(dev);
79e53945
JB
8751}
8752
8753void intel_modeset_cleanup(struct drm_device *dev)
8754{
652c393a
JB
8755 struct drm_i915_private *dev_priv = dev->dev_private;
8756 struct drm_crtc *crtc;
8757 struct intel_crtc *intel_crtc;
8758
f87ea761 8759 drm_kms_helper_poll_fini(dev);
652c393a
JB
8760 mutex_lock(&dev->struct_mutex);
8761
723bfd70
JB
8762 intel_unregister_dsm_handler();
8763
8764
652c393a
JB
8765 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
8766 /* Skip inactive CRTCs */
8767 if (!crtc->fb)
8768 continue;
8769
8770 intel_crtc = to_intel_crtc(crtc);
3dec0095 8771 intel_increase_pllclock(crtc);
652c393a
JB
8772 }
8773
973d04f9 8774 intel_disable_fbc(dev);
e70236a8 8775
8090c6b9 8776 intel_disable_gt_powersave(dev);
0cdab21f 8777
930ebb46
DV
8778 ironlake_teardown_rc6(dev);
8779
57f350b6
JB
8780 if (IS_VALLEYVIEW(dev))
8781 vlv_init_dpio(dev);
8782
69341a5e
KH
8783 mutex_unlock(&dev->struct_mutex);
8784
6c0d9350
DV
8785 /* Disable the irq before mode object teardown, for the irq might
8786 * enqueue unpin/hotplug work. */
8787 drm_irq_uninstall(dev);
8788 cancel_work_sync(&dev_priv->hotplug_work);
c6a828d3 8789 cancel_work_sync(&dev_priv->rps.work);
6c0d9350 8790
1630fe75
CW
8791 /* flush any delayed tasks or pending work */
8792 flush_scheduled_work();
8793
79e53945
JB
8794 drm_mode_config_cleanup(dev);
8795}
8796
f1c79df3
ZW
8797/*
8798 * Return which encoder is currently attached for connector.
8799 */
df0e9248 8800struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
79e53945 8801{
df0e9248
CW
8802 return &intel_attached_encoder(connector)->base;
8803}
f1c79df3 8804
df0e9248
CW
8805void intel_connector_attach_encoder(struct intel_connector *connector,
8806 struct intel_encoder *encoder)
8807{
8808 connector->encoder = encoder;
8809 drm_mode_connector_attach_encoder(&connector->base,
8810 &encoder->base);
79e53945 8811}
28d52043
DA
8812
8813/*
8814 * set vga decode state - true == enable VGA decode
8815 */
8816int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
8817{
8818 struct drm_i915_private *dev_priv = dev->dev_private;
8819 u16 gmch_ctrl;
8820
8821 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
8822 if (state)
8823 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
8824 else
8825 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
8826 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
8827 return 0;
8828}
c4a1d9e4
CW
8829
8830#ifdef CONFIG_DEBUG_FS
8831#include <linux/seq_file.h>
8832
8833struct intel_display_error_state {
8834 struct intel_cursor_error_state {
8835 u32 control;
8836 u32 position;
8837 u32 base;
8838 u32 size;
52331309 8839 } cursor[I915_MAX_PIPES];
c4a1d9e4
CW
8840
8841 struct intel_pipe_error_state {
8842 u32 conf;
8843 u32 source;
8844
8845 u32 htotal;
8846 u32 hblank;
8847 u32 hsync;
8848 u32 vtotal;
8849 u32 vblank;
8850 u32 vsync;
52331309 8851 } pipe[I915_MAX_PIPES];
c4a1d9e4
CW
8852
8853 struct intel_plane_error_state {
8854 u32 control;
8855 u32 stride;
8856 u32 size;
8857 u32 pos;
8858 u32 addr;
8859 u32 surface;
8860 u32 tile_offset;
52331309 8861 } plane[I915_MAX_PIPES];
c4a1d9e4
CW
8862};
8863
8864struct intel_display_error_state *
8865intel_display_capture_error_state(struct drm_device *dev)
8866{
0206e353 8867 drm_i915_private_t *dev_priv = dev->dev_private;
c4a1d9e4
CW
8868 struct intel_display_error_state *error;
8869 int i;
8870
8871 error = kmalloc(sizeof(*error), GFP_ATOMIC);
8872 if (error == NULL)
8873 return NULL;
8874
52331309 8875 for_each_pipe(i) {
c4a1d9e4
CW
8876 error->cursor[i].control = I915_READ(CURCNTR(i));
8877 error->cursor[i].position = I915_READ(CURPOS(i));
8878 error->cursor[i].base = I915_READ(CURBASE(i));
8879
8880 error->plane[i].control = I915_READ(DSPCNTR(i));
8881 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
8882 error->plane[i].size = I915_READ(DSPSIZE(i));
0206e353 8883 error->plane[i].pos = I915_READ(DSPPOS(i));
c4a1d9e4
CW
8884 error->plane[i].addr = I915_READ(DSPADDR(i));
8885 if (INTEL_INFO(dev)->gen >= 4) {
8886 error->plane[i].surface = I915_READ(DSPSURF(i));
8887 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
8888 }
8889
8890 error->pipe[i].conf = I915_READ(PIPECONF(i));
8891 error->pipe[i].source = I915_READ(PIPESRC(i));
8892 error->pipe[i].htotal = I915_READ(HTOTAL(i));
8893 error->pipe[i].hblank = I915_READ(HBLANK(i));
8894 error->pipe[i].hsync = I915_READ(HSYNC(i));
8895 error->pipe[i].vtotal = I915_READ(VTOTAL(i));
8896 error->pipe[i].vblank = I915_READ(VBLANK(i));
8897 error->pipe[i].vsync = I915_READ(VSYNC(i));
8898 }
8899
8900 return error;
8901}
8902
8903void
8904intel_display_print_error_state(struct seq_file *m,
8905 struct drm_device *dev,
8906 struct intel_display_error_state *error)
8907{
52331309 8908 drm_i915_private_t *dev_priv = dev->dev_private;
c4a1d9e4
CW
8909 int i;
8910
52331309
DL
8911 seq_printf(m, "Num Pipes: %d\n", dev_priv->num_pipe);
8912 for_each_pipe(i) {
c4a1d9e4
CW
8913 seq_printf(m, "Pipe [%d]:\n", i);
8914 seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
8915 seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
8916 seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
8917 seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
8918 seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
8919 seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
8920 seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
8921 seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
8922
8923 seq_printf(m, "Plane [%d]:\n", i);
8924 seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
8925 seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
8926 seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
8927 seq_printf(m, " POS: %08x\n", error->plane[i].pos);
8928 seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
8929 if (INTEL_INFO(dev)->gen >= 4) {
8930 seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
8931 seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
8932 }
8933
8934 seq_printf(m, "Cursor [%d]:\n", i);
8935 seq_printf(m, " CNTR: %08x\n", error->cursor[i].control);
8936 seq_printf(m, " POS: %08x\n", error->cursor[i].position);
8937 seq_printf(m, " BASE: %08x\n", error->cursor[i].base);
8938 }
8939}
8940#endif
This page took 1.558822 seconds and 5 git commands to generate.