drm/i915: Don't lie about findind suitable PLL settings on VLV
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_display.c
CommitLineData
79e53945
JB
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
618563e3 27#include <linux/dmi.h>
c1c7af60
JB
28#include <linux/module.h>
29#include <linux/input.h>
79e53945 30#include <linux/i2c.h>
7662c8bd 31#include <linux/kernel.h>
5a0e3ad6 32#include <linux/slab.h>
9cce37f4 33#include <linux/vgaarb.h>
e0dac65e 34#include <drm/drm_edid.h>
760285e7 35#include <drm/drmP.h>
79e53945 36#include "intel_drv.h"
760285e7 37#include <drm/i915_drm.h>
79e53945 38#include "i915_drv.h"
e5510fac 39#include "i915_trace.h"
760285e7
DH
40#include <drm/drm_dp_helper.h>
41#include <drm/drm_crtc_helper.h>
c0f372b3 42#include <linux/dma_remapping.h>
79e53945 43
3dec0095 44static void intel_increase_pllclock(struct drm_crtc *crtc);
6b383a7f 45static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
79e53945 46
f1f644dc
JB
47static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
48 struct intel_crtc_config *pipe_config);
18442d08
VS
49static void ironlake_pch_clock_get(struct intel_crtc *crtc,
50 struct intel_crtc_config *pipe_config);
f1f644dc 51
e7457a9a
DL
52static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
53 int x, int y, struct drm_framebuffer *old_fb);
54
55
79e53945 56typedef struct {
0206e353 57 int min, max;
79e53945
JB
58} intel_range_t;
59
60typedef struct {
0206e353
AJ
61 int dot_limit;
62 int p2_slow, p2_fast;
79e53945
JB
63} intel_p2_t;
64
d4906093
ML
65typedef struct intel_limit intel_limit_t;
66struct intel_limit {
0206e353
AJ
67 intel_range_t dot, vco, n, m, m1, m2, p, p1;
68 intel_p2_t p2;
d4906093 69};
79e53945 70
d2acd215
DV
71int
72intel_pch_rawclk(struct drm_device *dev)
73{
74 struct drm_i915_private *dev_priv = dev->dev_private;
75
76 WARN_ON(!HAS_PCH_SPLIT(dev));
77
78 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
79}
80
021357ac
CW
81static inline u32 /* units of 100MHz */
82intel_fdi_link_freq(struct drm_device *dev)
83{
8b99e68c
CW
84 if (IS_GEN5(dev)) {
85 struct drm_i915_private *dev_priv = dev->dev_private;
86 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
87 } else
88 return 27;
021357ac
CW
89}
90
5d536e28 91static const intel_limit_t intel_limits_i8xx_dac = {
0206e353
AJ
92 .dot = { .min = 25000, .max = 350000 },
93 .vco = { .min = 930000, .max = 1400000 },
94 .n = { .min = 3, .max = 16 },
95 .m = { .min = 96, .max = 140 },
96 .m1 = { .min = 18, .max = 26 },
97 .m2 = { .min = 6, .max = 16 },
98 .p = { .min = 4, .max = 128 },
99 .p1 = { .min = 2, .max = 33 },
273e27ca
EA
100 .p2 = { .dot_limit = 165000,
101 .p2_slow = 4, .p2_fast = 2 },
e4b36699
KP
102};
103
5d536e28
DV
104static const intel_limit_t intel_limits_i8xx_dvo = {
105 .dot = { .min = 25000, .max = 350000 },
106 .vco = { .min = 930000, .max = 1400000 },
107 .n = { .min = 3, .max = 16 },
108 .m = { .min = 96, .max = 140 },
109 .m1 = { .min = 18, .max = 26 },
110 .m2 = { .min = 6, .max = 16 },
111 .p = { .min = 4, .max = 128 },
112 .p1 = { .min = 2, .max = 33 },
113 .p2 = { .dot_limit = 165000,
114 .p2_slow = 4, .p2_fast = 4 },
115};
116
e4b36699 117static const intel_limit_t intel_limits_i8xx_lvds = {
0206e353
AJ
118 .dot = { .min = 25000, .max = 350000 },
119 .vco = { .min = 930000, .max = 1400000 },
120 .n = { .min = 3, .max = 16 },
121 .m = { .min = 96, .max = 140 },
122 .m1 = { .min = 18, .max = 26 },
123 .m2 = { .min = 6, .max = 16 },
124 .p = { .min = 4, .max = 128 },
125 .p1 = { .min = 1, .max = 6 },
273e27ca
EA
126 .p2 = { .dot_limit = 165000,
127 .p2_slow = 14, .p2_fast = 7 },
e4b36699 128};
273e27ca 129
e4b36699 130static const intel_limit_t intel_limits_i9xx_sdvo = {
0206e353
AJ
131 .dot = { .min = 20000, .max = 400000 },
132 .vco = { .min = 1400000, .max = 2800000 },
133 .n = { .min = 1, .max = 6 },
134 .m = { .min = 70, .max = 120 },
4f7dfb67
PJ
135 .m1 = { .min = 8, .max = 18 },
136 .m2 = { .min = 3, .max = 7 },
0206e353
AJ
137 .p = { .min = 5, .max = 80 },
138 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
139 .p2 = { .dot_limit = 200000,
140 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
141};
142
143static const intel_limit_t intel_limits_i9xx_lvds = {
0206e353
AJ
144 .dot = { .min = 20000, .max = 400000 },
145 .vco = { .min = 1400000, .max = 2800000 },
146 .n = { .min = 1, .max = 6 },
147 .m = { .min = 70, .max = 120 },
53a7d2d1
PJ
148 .m1 = { .min = 8, .max = 18 },
149 .m2 = { .min = 3, .max = 7 },
0206e353
AJ
150 .p = { .min = 7, .max = 98 },
151 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
152 .p2 = { .dot_limit = 112000,
153 .p2_slow = 14, .p2_fast = 7 },
e4b36699
KP
154};
155
273e27ca 156
e4b36699 157static const intel_limit_t intel_limits_g4x_sdvo = {
273e27ca
EA
158 .dot = { .min = 25000, .max = 270000 },
159 .vco = { .min = 1750000, .max = 3500000},
160 .n = { .min = 1, .max = 4 },
161 .m = { .min = 104, .max = 138 },
162 .m1 = { .min = 17, .max = 23 },
163 .m2 = { .min = 5, .max = 11 },
164 .p = { .min = 10, .max = 30 },
165 .p1 = { .min = 1, .max = 3},
166 .p2 = { .dot_limit = 270000,
167 .p2_slow = 10,
168 .p2_fast = 10
044c7c41 169 },
e4b36699
KP
170};
171
172static const intel_limit_t intel_limits_g4x_hdmi = {
273e27ca
EA
173 .dot = { .min = 22000, .max = 400000 },
174 .vco = { .min = 1750000, .max = 3500000},
175 .n = { .min = 1, .max = 4 },
176 .m = { .min = 104, .max = 138 },
177 .m1 = { .min = 16, .max = 23 },
178 .m2 = { .min = 5, .max = 11 },
179 .p = { .min = 5, .max = 80 },
180 .p1 = { .min = 1, .max = 8},
181 .p2 = { .dot_limit = 165000,
182 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
183};
184
185static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
273e27ca
EA
186 .dot = { .min = 20000, .max = 115000 },
187 .vco = { .min = 1750000, .max = 3500000 },
188 .n = { .min = 1, .max = 3 },
189 .m = { .min = 104, .max = 138 },
190 .m1 = { .min = 17, .max = 23 },
191 .m2 = { .min = 5, .max = 11 },
192 .p = { .min = 28, .max = 112 },
193 .p1 = { .min = 2, .max = 8 },
194 .p2 = { .dot_limit = 0,
195 .p2_slow = 14, .p2_fast = 14
044c7c41 196 },
e4b36699
KP
197};
198
199static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
273e27ca
EA
200 .dot = { .min = 80000, .max = 224000 },
201 .vco = { .min = 1750000, .max = 3500000 },
202 .n = { .min = 1, .max = 3 },
203 .m = { .min = 104, .max = 138 },
204 .m1 = { .min = 17, .max = 23 },
205 .m2 = { .min = 5, .max = 11 },
206 .p = { .min = 14, .max = 42 },
207 .p1 = { .min = 2, .max = 6 },
208 .p2 = { .dot_limit = 0,
209 .p2_slow = 7, .p2_fast = 7
044c7c41 210 },
e4b36699
KP
211};
212
f2b115e6 213static const intel_limit_t intel_limits_pineview_sdvo = {
0206e353
AJ
214 .dot = { .min = 20000, .max = 400000},
215 .vco = { .min = 1700000, .max = 3500000 },
273e27ca 216 /* Pineview's Ncounter is a ring counter */
0206e353
AJ
217 .n = { .min = 3, .max = 6 },
218 .m = { .min = 2, .max = 256 },
273e27ca 219 /* Pineview only has one combined m divider, which we treat as m2. */
0206e353
AJ
220 .m1 = { .min = 0, .max = 0 },
221 .m2 = { .min = 0, .max = 254 },
222 .p = { .min = 5, .max = 80 },
223 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
224 .p2 = { .dot_limit = 200000,
225 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
226};
227
f2b115e6 228static const intel_limit_t intel_limits_pineview_lvds = {
0206e353
AJ
229 .dot = { .min = 20000, .max = 400000 },
230 .vco = { .min = 1700000, .max = 3500000 },
231 .n = { .min = 3, .max = 6 },
232 .m = { .min = 2, .max = 256 },
233 .m1 = { .min = 0, .max = 0 },
234 .m2 = { .min = 0, .max = 254 },
235 .p = { .min = 7, .max = 112 },
236 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
237 .p2 = { .dot_limit = 112000,
238 .p2_slow = 14, .p2_fast = 14 },
e4b36699
KP
239};
240
273e27ca
EA
241/* Ironlake / Sandybridge
242 *
243 * We calculate clock using (register_value + 2) for N/M1/M2, so here
244 * the range value for them is (actual_value - 2).
245 */
b91ad0ec 246static const intel_limit_t intel_limits_ironlake_dac = {
273e27ca
EA
247 .dot = { .min = 25000, .max = 350000 },
248 .vco = { .min = 1760000, .max = 3510000 },
249 .n = { .min = 1, .max = 5 },
250 .m = { .min = 79, .max = 127 },
251 .m1 = { .min = 12, .max = 22 },
252 .m2 = { .min = 5, .max = 9 },
253 .p = { .min = 5, .max = 80 },
254 .p1 = { .min = 1, .max = 8 },
255 .p2 = { .dot_limit = 225000,
256 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
257};
258
b91ad0ec 259static const intel_limit_t intel_limits_ironlake_single_lvds = {
273e27ca
EA
260 .dot = { .min = 25000, .max = 350000 },
261 .vco = { .min = 1760000, .max = 3510000 },
262 .n = { .min = 1, .max = 3 },
263 .m = { .min = 79, .max = 118 },
264 .m1 = { .min = 12, .max = 22 },
265 .m2 = { .min = 5, .max = 9 },
266 .p = { .min = 28, .max = 112 },
267 .p1 = { .min = 2, .max = 8 },
268 .p2 = { .dot_limit = 225000,
269 .p2_slow = 14, .p2_fast = 14 },
b91ad0ec
ZW
270};
271
272static const intel_limit_t intel_limits_ironlake_dual_lvds = {
273e27ca
EA
273 .dot = { .min = 25000, .max = 350000 },
274 .vco = { .min = 1760000, .max = 3510000 },
275 .n = { .min = 1, .max = 3 },
276 .m = { .min = 79, .max = 127 },
277 .m1 = { .min = 12, .max = 22 },
278 .m2 = { .min = 5, .max = 9 },
279 .p = { .min = 14, .max = 56 },
280 .p1 = { .min = 2, .max = 8 },
281 .p2 = { .dot_limit = 225000,
282 .p2_slow = 7, .p2_fast = 7 },
b91ad0ec
ZW
283};
284
273e27ca 285/* LVDS 100mhz refclk limits. */
b91ad0ec 286static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
273e27ca
EA
287 .dot = { .min = 25000, .max = 350000 },
288 .vco = { .min = 1760000, .max = 3510000 },
289 .n = { .min = 1, .max = 2 },
290 .m = { .min = 79, .max = 126 },
291 .m1 = { .min = 12, .max = 22 },
292 .m2 = { .min = 5, .max = 9 },
293 .p = { .min = 28, .max = 112 },
0206e353 294 .p1 = { .min = 2, .max = 8 },
273e27ca
EA
295 .p2 = { .dot_limit = 225000,
296 .p2_slow = 14, .p2_fast = 14 },
b91ad0ec
ZW
297};
298
299static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
273e27ca
EA
300 .dot = { .min = 25000, .max = 350000 },
301 .vco = { .min = 1760000, .max = 3510000 },
302 .n = { .min = 1, .max = 3 },
303 .m = { .min = 79, .max = 126 },
304 .m1 = { .min = 12, .max = 22 },
305 .m2 = { .min = 5, .max = 9 },
306 .p = { .min = 14, .max = 42 },
0206e353 307 .p1 = { .min = 2, .max = 6 },
273e27ca
EA
308 .p2 = { .dot_limit = 225000,
309 .p2_slow = 7, .p2_fast = 7 },
4547668a
ZY
310};
311
dc730512 312static const intel_limit_t intel_limits_vlv = {
75e53986
DV
313 .dot = { .min = 25000, .max = 270000 },
314 .vco = { .min = 4000000, .max = 6000000 },
a0c4da24 315 .n = { .min = 1, .max = 7 },
a0c4da24
JB
316 .m1 = { .min = 2, .max = 3 },
317 .m2 = { .min = 11, .max = 156 },
b99ab663 318 .p1 = { .min = 2, .max = 3 },
5fdc9c49 319 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
a0c4da24
JB
320};
321
6b4bf1c4
VS
322static void vlv_clock(int refclk, intel_clock_t *clock)
323{
324 clock->m = clock->m1 * clock->m2;
325 clock->p = clock->p1 * clock->p2;
326 clock->vco = refclk * clock->m / clock->n;
327 clock->dot = clock->vco / clock->p;
328}
329
e0638cdf
PZ
330/**
331 * Returns whether any output on the specified pipe is of the specified type
332 */
333static bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
334{
335 struct drm_device *dev = crtc->dev;
336 struct intel_encoder *encoder;
337
338 for_each_encoder_on_crtc(dev, crtc, encoder)
339 if (encoder->type == type)
340 return true;
341
342 return false;
343}
344
1b894b59
CW
345static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
346 int refclk)
2c07245f 347{
b91ad0ec 348 struct drm_device *dev = crtc->dev;
2c07245f 349 const intel_limit_t *limit;
b91ad0ec
ZW
350
351 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
1974cad0 352 if (intel_is_dual_link_lvds(dev)) {
1b894b59 353 if (refclk == 100000)
b91ad0ec
ZW
354 limit = &intel_limits_ironlake_dual_lvds_100m;
355 else
356 limit = &intel_limits_ironlake_dual_lvds;
357 } else {
1b894b59 358 if (refclk == 100000)
b91ad0ec
ZW
359 limit = &intel_limits_ironlake_single_lvds_100m;
360 else
361 limit = &intel_limits_ironlake_single_lvds;
362 }
c6bb3538 363 } else
b91ad0ec 364 limit = &intel_limits_ironlake_dac;
2c07245f
ZW
365
366 return limit;
367}
368
044c7c41
ML
369static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
370{
371 struct drm_device *dev = crtc->dev;
044c7c41
ML
372 const intel_limit_t *limit;
373
374 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
1974cad0 375 if (intel_is_dual_link_lvds(dev))
e4b36699 376 limit = &intel_limits_g4x_dual_channel_lvds;
044c7c41 377 else
e4b36699 378 limit = &intel_limits_g4x_single_channel_lvds;
044c7c41
ML
379 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
380 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
e4b36699 381 limit = &intel_limits_g4x_hdmi;
044c7c41 382 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
e4b36699 383 limit = &intel_limits_g4x_sdvo;
044c7c41 384 } else /* The option is for other outputs */
e4b36699 385 limit = &intel_limits_i9xx_sdvo;
044c7c41
ML
386
387 return limit;
388}
389
1b894b59 390static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
79e53945
JB
391{
392 struct drm_device *dev = crtc->dev;
393 const intel_limit_t *limit;
394
bad720ff 395 if (HAS_PCH_SPLIT(dev))
1b894b59 396 limit = intel_ironlake_limit(crtc, refclk);
2c07245f 397 else if (IS_G4X(dev)) {
044c7c41 398 limit = intel_g4x_limit(crtc);
f2b115e6 399 } else if (IS_PINEVIEW(dev)) {
2177832f 400 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
f2b115e6 401 limit = &intel_limits_pineview_lvds;
2177832f 402 else
f2b115e6 403 limit = &intel_limits_pineview_sdvo;
a0c4da24 404 } else if (IS_VALLEYVIEW(dev)) {
dc730512 405 limit = &intel_limits_vlv;
a6c45cf0
CW
406 } else if (!IS_GEN2(dev)) {
407 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
408 limit = &intel_limits_i9xx_lvds;
409 else
410 limit = &intel_limits_i9xx_sdvo;
79e53945
JB
411 } else {
412 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
e4b36699 413 limit = &intel_limits_i8xx_lvds;
5d536e28 414 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
e4b36699 415 limit = &intel_limits_i8xx_dvo;
5d536e28
DV
416 else
417 limit = &intel_limits_i8xx_dac;
79e53945
JB
418 }
419 return limit;
420}
421
f2b115e6
AJ
422/* m1 is reserved as 0 in Pineview, n is a ring counter */
423static void pineview_clock(int refclk, intel_clock_t *clock)
79e53945 424{
2177832f
SL
425 clock->m = clock->m2 + 2;
426 clock->p = clock->p1 * clock->p2;
427 clock->vco = refclk * clock->m / clock->n;
428 clock->dot = clock->vco / clock->p;
429}
430
7429e9d4
DV
431static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
432{
433 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
434}
435
ac58c3f0 436static void i9xx_clock(int refclk, intel_clock_t *clock)
2177832f 437{
7429e9d4 438 clock->m = i9xx_dpll_compute_m(clock);
79e53945
JB
439 clock->p = clock->p1 * clock->p2;
440 clock->vco = refclk * clock->m / (clock->n + 2);
441 clock->dot = clock->vco / clock->p;
442}
443
7c04d1d9 444#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
79e53945
JB
445/**
446 * Returns whether the given set of divisors are valid for a given refclk with
447 * the given connectors.
448 */
449
1b894b59
CW
450static bool intel_PLL_is_valid(struct drm_device *dev,
451 const intel_limit_t *limit,
452 const intel_clock_t *clock)
79e53945 453{
79e53945 454 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
0206e353 455 INTELPllInvalid("p1 out of range\n");
79e53945 456 if (clock->p < limit->p.min || limit->p.max < clock->p)
0206e353 457 INTELPllInvalid("p out of range\n");
79e53945 458 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
0206e353 459 INTELPllInvalid("m2 out of range\n");
79e53945 460 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
0206e353 461 INTELPllInvalid("m1 out of range\n");
f2b115e6 462 if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
0206e353 463 INTELPllInvalid("m1 <= m2\n");
79e53945 464 if (clock->m < limit->m.min || limit->m.max < clock->m)
0206e353 465 INTELPllInvalid("m out of range\n");
79e53945 466 if (clock->n < limit->n.min || limit->n.max < clock->n)
0206e353 467 INTELPllInvalid("n out of range\n");
79e53945 468 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
0206e353 469 INTELPllInvalid("vco out of range\n");
79e53945
JB
470 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
471 * connector, etc., rather than just a single range.
472 */
473 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
0206e353 474 INTELPllInvalid("dot out of range\n");
79e53945
JB
475
476 return true;
477}
478
d4906093 479static bool
ee9300bb 480i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
481 int target, int refclk, intel_clock_t *match_clock,
482 intel_clock_t *best_clock)
79e53945
JB
483{
484 struct drm_device *dev = crtc->dev;
79e53945 485 intel_clock_t clock;
79e53945
JB
486 int err = target;
487
a210b028 488 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
79e53945 489 /*
a210b028
DV
490 * For LVDS just rely on its current settings for dual-channel.
491 * We haven't figured out how to reliably set up different
492 * single/dual channel state, if we even can.
79e53945 493 */
1974cad0 494 if (intel_is_dual_link_lvds(dev))
79e53945
JB
495 clock.p2 = limit->p2.p2_fast;
496 else
497 clock.p2 = limit->p2.p2_slow;
498 } else {
499 if (target < limit->p2.dot_limit)
500 clock.p2 = limit->p2.p2_slow;
501 else
502 clock.p2 = limit->p2.p2_fast;
503 }
504
0206e353 505 memset(best_clock, 0, sizeof(*best_clock));
79e53945 506
42158660
ZY
507 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
508 clock.m1++) {
509 for (clock.m2 = limit->m2.min;
510 clock.m2 <= limit->m2.max; clock.m2++) {
c0efc387 511 if (clock.m2 >= clock.m1)
42158660
ZY
512 break;
513 for (clock.n = limit->n.min;
514 clock.n <= limit->n.max; clock.n++) {
515 for (clock.p1 = limit->p1.min;
516 clock.p1 <= limit->p1.max; clock.p1++) {
79e53945
JB
517 int this_err;
518
ac58c3f0
DV
519 i9xx_clock(refclk, &clock);
520 if (!intel_PLL_is_valid(dev, limit,
521 &clock))
522 continue;
523 if (match_clock &&
524 clock.p != match_clock->p)
525 continue;
526
527 this_err = abs(clock.dot - target);
528 if (this_err < err) {
529 *best_clock = clock;
530 err = this_err;
531 }
532 }
533 }
534 }
535 }
536
537 return (err != target);
538}
539
540static bool
ee9300bb
DV
541pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
542 int target, int refclk, intel_clock_t *match_clock,
543 intel_clock_t *best_clock)
79e53945
JB
544{
545 struct drm_device *dev = crtc->dev;
79e53945 546 intel_clock_t clock;
79e53945
JB
547 int err = target;
548
a210b028 549 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
79e53945 550 /*
a210b028
DV
551 * For LVDS just rely on its current settings for dual-channel.
552 * We haven't figured out how to reliably set up different
553 * single/dual channel state, if we even can.
79e53945 554 */
1974cad0 555 if (intel_is_dual_link_lvds(dev))
79e53945
JB
556 clock.p2 = limit->p2.p2_fast;
557 else
558 clock.p2 = limit->p2.p2_slow;
559 } else {
560 if (target < limit->p2.dot_limit)
561 clock.p2 = limit->p2.p2_slow;
562 else
563 clock.p2 = limit->p2.p2_fast;
564 }
565
0206e353 566 memset(best_clock, 0, sizeof(*best_clock));
79e53945 567
42158660
ZY
568 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
569 clock.m1++) {
570 for (clock.m2 = limit->m2.min;
571 clock.m2 <= limit->m2.max; clock.m2++) {
42158660
ZY
572 for (clock.n = limit->n.min;
573 clock.n <= limit->n.max; clock.n++) {
574 for (clock.p1 = limit->p1.min;
575 clock.p1 <= limit->p1.max; clock.p1++) {
79e53945
JB
576 int this_err;
577
ac58c3f0 578 pineview_clock(refclk, &clock);
1b894b59
CW
579 if (!intel_PLL_is_valid(dev, limit,
580 &clock))
79e53945 581 continue;
cec2f356
SP
582 if (match_clock &&
583 clock.p != match_clock->p)
584 continue;
79e53945
JB
585
586 this_err = abs(clock.dot - target);
587 if (this_err < err) {
588 *best_clock = clock;
589 err = this_err;
590 }
591 }
592 }
593 }
594 }
595
596 return (err != target);
597}
598
d4906093 599static bool
ee9300bb
DV
600g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
601 int target, int refclk, intel_clock_t *match_clock,
602 intel_clock_t *best_clock)
d4906093
ML
603{
604 struct drm_device *dev = crtc->dev;
d4906093
ML
605 intel_clock_t clock;
606 int max_n;
607 bool found;
6ba770dc
AJ
608 /* approximately equals target * 0.00585 */
609 int err_most = (target >> 8) + (target >> 9);
d4906093
ML
610 found = false;
611
612 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
1974cad0 613 if (intel_is_dual_link_lvds(dev))
d4906093
ML
614 clock.p2 = limit->p2.p2_fast;
615 else
616 clock.p2 = limit->p2.p2_slow;
617 } else {
618 if (target < limit->p2.dot_limit)
619 clock.p2 = limit->p2.p2_slow;
620 else
621 clock.p2 = limit->p2.p2_fast;
622 }
623
624 memset(best_clock, 0, sizeof(*best_clock));
625 max_n = limit->n.max;
f77f13e2 626 /* based on hardware requirement, prefer smaller n to precision */
d4906093 627 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
f77f13e2 628 /* based on hardware requirement, prefere larger m1,m2 */
d4906093
ML
629 for (clock.m1 = limit->m1.max;
630 clock.m1 >= limit->m1.min; clock.m1--) {
631 for (clock.m2 = limit->m2.max;
632 clock.m2 >= limit->m2.min; clock.m2--) {
633 for (clock.p1 = limit->p1.max;
634 clock.p1 >= limit->p1.min; clock.p1--) {
635 int this_err;
636
ac58c3f0 637 i9xx_clock(refclk, &clock);
1b894b59
CW
638 if (!intel_PLL_is_valid(dev, limit,
639 &clock))
d4906093 640 continue;
1b894b59
CW
641
642 this_err = abs(clock.dot - target);
d4906093
ML
643 if (this_err < err_most) {
644 *best_clock = clock;
645 err_most = this_err;
646 max_n = clock.n;
647 found = true;
648 }
649 }
650 }
651 }
652 }
2c07245f
ZW
653 return found;
654}
655
a0c4da24 656static bool
ee9300bb
DV
657vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
658 int target, int refclk, intel_clock_t *match_clock,
659 intel_clock_t *best_clock)
a0c4da24 660{
6b4bf1c4 661 intel_clock_t clock;
69e4f900 662 unsigned int bestppm = 1000000;
27e639bf
VS
663 /* min update 19.2 MHz */
664 int max_n = min(limit->n.max, refclk / 19200);
49e497ef 665 bool found = false;
a0c4da24 666
6b4bf1c4
VS
667 target *= 5; /* fast clock */
668
669 memset(best_clock, 0, sizeof(*best_clock));
a0c4da24
JB
670
671 /* based on hardware requirement, prefer smaller n to precision */
27e639bf 672 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
811bbf05 673 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
889059d8 674 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
c1a9ae43 675 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
6b4bf1c4 676 clock.p = clock.p1 * clock.p2;
a0c4da24 677 /* based on hardware requirement, prefer bigger m1,m2 values */
6b4bf1c4 678 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
69e4f900
VS
679 unsigned int ppm, diff;
680
6b4bf1c4
VS
681 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
682 refclk * clock.m1);
683
684 vlv_clock(refclk, &clock);
43b0ac53 685
6b4bf1c4
VS
686 if (clock.vco < limit->vco.min ||
687 clock.vco >= limit->vco.max)
43b0ac53
VS
688 continue;
689
6b4bf1c4
VS
690 diff = abs(clock.dot - target);
691 ppm = div_u64(1000000ULL * diff, target);
692
693 if (ppm < 100 && clock.p > best_clock->p) {
43b0ac53 694 bestppm = 0;
6b4bf1c4 695 *best_clock = clock;
49e497ef 696 found = true;
43b0ac53 697 }
6b4bf1c4 698
c686122c 699 if (bestppm >= 10 && ppm < bestppm - 10) {
69e4f900 700 bestppm = ppm;
6b4bf1c4 701 *best_clock = clock;
49e497ef 702 found = true;
a0c4da24
JB
703 }
704 }
705 }
706 }
707 }
a0c4da24 708
49e497ef 709 return found;
a0c4da24 710}
a4fc5ed6 711
20ddf665
VS
712bool intel_crtc_active(struct drm_crtc *crtc)
713{
714 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
715
716 /* Be paranoid as we can arrive here with only partial
717 * state retrieved from the hardware during setup.
718 *
241bfc38 719 * We can ditch the adjusted_mode.crtc_clock check as soon
20ddf665
VS
720 * as Haswell has gained clock readout/fastboot support.
721 *
722 * We can ditch the crtc->fb check as soon as we can
723 * properly reconstruct framebuffers.
724 */
725 return intel_crtc->active && crtc->fb &&
241bfc38 726 intel_crtc->config.adjusted_mode.crtc_clock;
20ddf665
VS
727}
728
a5c961d1
PZ
729enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
730 enum pipe pipe)
731{
732 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
733 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
734
3b117c8f 735 return intel_crtc->config.cpu_transcoder;
a5c961d1
PZ
736}
737
a928d536
PZ
738static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
739{
740 struct drm_i915_private *dev_priv = dev->dev_private;
741 u32 frame, frame_reg = PIPEFRAME(pipe);
742
743 frame = I915_READ(frame_reg);
744
745 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
746 DRM_DEBUG_KMS("vblank wait timed out\n");
747}
748
9d0498a2
JB
749/**
750 * intel_wait_for_vblank - wait for vblank on a given pipe
751 * @dev: drm device
752 * @pipe: pipe to wait for
753 *
754 * Wait for vblank to occur on a given pipe. Needed for various bits of
755 * mode setting code.
756 */
757void intel_wait_for_vblank(struct drm_device *dev, int pipe)
79e53945 758{
9d0498a2 759 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 760 int pipestat_reg = PIPESTAT(pipe);
9d0498a2 761
a928d536
PZ
762 if (INTEL_INFO(dev)->gen >= 5) {
763 ironlake_wait_for_vblank(dev, pipe);
764 return;
765 }
766
300387c0
CW
767 /* Clear existing vblank status. Note this will clear any other
768 * sticky status fields as well.
769 *
770 * This races with i915_driver_irq_handler() with the result
771 * that either function could miss a vblank event. Here it is not
772 * fatal, as we will either wait upon the next vblank interrupt or
773 * timeout. Generally speaking intel_wait_for_vblank() is only
774 * called during modeset at which time the GPU should be idle and
775 * should *not* be performing page flips and thus not waiting on
776 * vblanks...
777 * Currently, the result of us stealing a vblank from the irq
778 * handler is that a single frame will be skipped during swapbuffers.
779 */
780 I915_WRITE(pipestat_reg,
781 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
782
9d0498a2 783 /* Wait for vblank interrupt bit to set */
481b6af3
CW
784 if (wait_for(I915_READ(pipestat_reg) &
785 PIPE_VBLANK_INTERRUPT_STATUS,
786 50))
9d0498a2
JB
787 DRM_DEBUG_KMS("vblank wait timed out\n");
788}
789
ab7ad7f6
KP
790/*
791 * intel_wait_for_pipe_off - wait for pipe to turn off
9d0498a2
JB
792 * @dev: drm device
793 * @pipe: pipe to wait for
794 *
795 * After disabling a pipe, we can't wait for vblank in the usual way,
796 * spinning on the vblank interrupt status bit, since we won't actually
797 * see an interrupt when the pipe is disabled.
798 *
ab7ad7f6
KP
799 * On Gen4 and above:
800 * wait for the pipe register state bit to turn off
801 *
802 * Otherwise:
803 * wait for the display line value to settle (it usually
804 * ends up stopping at the start of the next frame).
58e10eb9 805 *
9d0498a2 806 */
58e10eb9 807void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
9d0498a2
JB
808{
809 struct drm_i915_private *dev_priv = dev->dev_private;
702e7a56
PZ
810 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
811 pipe);
ab7ad7f6
KP
812
813 if (INTEL_INFO(dev)->gen >= 4) {
702e7a56 814 int reg = PIPECONF(cpu_transcoder);
ab7ad7f6
KP
815
816 /* Wait for the Pipe State to go off */
58e10eb9
CW
817 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
818 100))
284637d9 819 WARN(1, "pipe_off wait timed out\n");
ab7ad7f6 820 } else {
837ba00f 821 u32 last_line, line_mask;
58e10eb9 822 int reg = PIPEDSL(pipe);
ab7ad7f6
KP
823 unsigned long timeout = jiffies + msecs_to_jiffies(100);
824
837ba00f
PZ
825 if (IS_GEN2(dev))
826 line_mask = DSL_LINEMASK_GEN2;
827 else
828 line_mask = DSL_LINEMASK_GEN3;
829
ab7ad7f6
KP
830 /* Wait for the display line to settle */
831 do {
837ba00f 832 last_line = I915_READ(reg) & line_mask;
ab7ad7f6 833 mdelay(5);
837ba00f 834 } while (((I915_READ(reg) & line_mask) != last_line) &&
ab7ad7f6
KP
835 time_after(timeout, jiffies));
836 if (time_after(jiffies, timeout))
284637d9 837 WARN(1, "pipe_off wait timed out\n");
ab7ad7f6 838 }
79e53945
JB
839}
840
b0ea7d37
DL
841/*
842 * ibx_digital_port_connected - is the specified port connected?
843 * @dev_priv: i915 private structure
844 * @port: the port to test
845 *
846 * Returns true if @port is connected, false otherwise.
847 */
848bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
849 struct intel_digital_port *port)
850{
851 u32 bit;
852
c36346e3
DL
853 if (HAS_PCH_IBX(dev_priv->dev)) {
854 switch(port->port) {
855 case PORT_B:
856 bit = SDE_PORTB_HOTPLUG;
857 break;
858 case PORT_C:
859 bit = SDE_PORTC_HOTPLUG;
860 break;
861 case PORT_D:
862 bit = SDE_PORTD_HOTPLUG;
863 break;
864 default:
865 return true;
866 }
867 } else {
868 switch(port->port) {
869 case PORT_B:
870 bit = SDE_PORTB_HOTPLUG_CPT;
871 break;
872 case PORT_C:
873 bit = SDE_PORTC_HOTPLUG_CPT;
874 break;
875 case PORT_D:
876 bit = SDE_PORTD_HOTPLUG_CPT;
877 break;
878 default:
879 return true;
880 }
b0ea7d37
DL
881 }
882
883 return I915_READ(SDEISR) & bit;
884}
885
b24e7179
JB
886static const char *state_string(bool enabled)
887{
888 return enabled ? "on" : "off";
889}
890
891/* Only for pre-ILK configs */
55607e8a
DV
892void assert_pll(struct drm_i915_private *dev_priv,
893 enum pipe pipe, bool state)
b24e7179
JB
894{
895 int reg;
896 u32 val;
897 bool cur_state;
898
899 reg = DPLL(pipe);
900 val = I915_READ(reg);
901 cur_state = !!(val & DPLL_VCO_ENABLE);
902 WARN(cur_state != state,
903 "PLL state assertion failure (expected %s, current %s)\n",
904 state_string(state), state_string(cur_state));
905}
b24e7179 906
23538ef1
JN
907/* XXX: the dsi pll is shared between MIPI DSI ports */
908static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
909{
910 u32 val;
911 bool cur_state;
912
913 mutex_lock(&dev_priv->dpio_lock);
914 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
915 mutex_unlock(&dev_priv->dpio_lock);
916
917 cur_state = val & DSI_PLL_VCO_EN;
918 WARN(cur_state != state,
919 "DSI PLL state assertion failure (expected %s, current %s)\n",
920 state_string(state), state_string(cur_state));
921}
922#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
923#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
924
55607e8a 925struct intel_shared_dpll *
e2b78267
DV
926intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
927{
928 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
929
a43f6e0f 930 if (crtc->config.shared_dpll < 0)
e2b78267
DV
931 return NULL;
932
a43f6e0f 933 return &dev_priv->shared_dplls[crtc->config.shared_dpll];
e2b78267
DV
934}
935
040484af 936/* For ILK+ */
55607e8a
DV
937void assert_shared_dpll(struct drm_i915_private *dev_priv,
938 struct intel_shared_dpll *pll,
939 bool state)
040484af 940{
040484af 941 bool cur_state;
5358901f 942 struct intel_dpll_hw_state hw_state;
040484af 943
9d82aa17
ED
944 if (HAS_PCH_LPT(dev_priv->dev)) {
945 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
946 return;
947 }
948
92b27b08 949 if (WARN (!pll,
46edb027 950 "asserting DPLL %s with no DPLL\n", state_string(state)))
ee7b9f93 951 return;
ee7b9f93 952
5358901f 953 cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
92b27b08 954 WARN(cur_state != state,
5358901f
DV
955 "%s assertion failure (expected %s, current %s)\n",
956 pll->name, state_string(state), state_string(cur_state));
040484af 957}
040484af
JB
958
959static void assert_fdi_tx(struct drm_i915_private *dev_priv,
960 enum pipe pipe, bool state)
961{
962 int reg;
963 u32 val;
964 bool cur_state;
ad80a810
PZ
965 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
966 pipe);
040484af 967
affa9354
PZ
968 if (HAS_DDI(dev_priv->dev)) {
969 /* DDI does not have a specific FDI_TX register */
ad80a810 970 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
bf507ef7 971 val = I915_READ(reg);
ad80a810 972 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
bf507ef7
ED
973 } else {
974 reg = FDI_TX_CTL(pipe);
975 val = I915_READ(reg);
976 cur_state = !!(val & FDI_TX_ENABLE);
977 }
040484af
JB
978 WARN(cur_state != state,
979 "FDI TX state assertion failure (expected %s, current %s)\n",
980 state_string(state), state_string(cur_state));
981}
982#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
983#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
984
985static void assert_fdi_rx(struct drm_i915_private *dev_priv,
986 enum pipe pipe, bool state)
987{
988 int reg;
989 u32 val;
990 bool cur_state;
991
d63fa0dc
PZ
992 reg = FDI_RX_CTL(pipe);
993 val = I915_READ(reg);
994 cur_state = !!(val & FDI_RX_ENABLE);
040484af
JB
995 WARN(cur_state != state,
996 "FDI RX state assertion failure (expected %s, current %s)\n",
997 state_string(state), state_string(cur_state));
998}
999#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1000#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1001
1002static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1003 enum pipe pipe)
1004{
1005 int reg;
1006 u32 val;
1007
1008 /* ILK FDI PLL is always enabled */
1009 if (dev_priv->info->gen == 5)
1010 return;
1011
bf507ef7 1012 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
affa9354 1013 if (HAS_DDI(dev_priv->dev))
bf507ef7
ED
1014 return;
1015
040484af
JB
1016 reg = FDI_TX_CTL(pipe);
1017 val = I915_READ(reg);
1018 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1019}
1020
55607e8a
DV
1021void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1022 enum pipe pipe, bool state)
040484af
JB
1023{
1024 int reg;
1025 u32 val;
55607e8a 1026 bool cur_state;
040484af
JB
1027
1028 reg = FDI_RX_CTL(pipe);
1029 val = I915_READ(reg);
55607e8a
DV
1030 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1031 WARN(cur_state != state,
1032 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1033 state_string(state), state_string(cur_state));
040484af
JB
1034}
1035
ea0760cf
JB
1036static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1037 enum pipe pipe)
1038{
1039 int pp_reg, lvds_reg;
1040 u32 val;
1041 enum pipe panel_pipe = PIPE_A;
0de3b485 1042 bool locked = true;
ea0760cf
JB
1043
1044 if (HAS_PCH_SPLIT(dev_priv->dev)) {
1045 pp_reg = PCH_PP_CONTROL;
1046 lvds_reg = PCH_LVDS;
1047 } else {
1048 pp_reg = PP_CONTROL;
1049 lvds_reg = LVDS;
1050 }
1051
1052 val = I915_READ(pp_reg);
1053 if (!(val & PANEL_POWER_ON) ||
1054 ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1055 locked = false;
1056
1057 if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1058 panel_pipe = PIPE_B;
1059
1060 WARN(panel_pipe == pipe && locked,
1061 "panel assertion failure, pipe %c regs locked\n",
9db4a9c7 1062 pipe_name(pipe));
ea0760cf
JB
1063}
1064
93ce0ba6
JN
1065static void assert_cursor(struct drm_i915_private *dev_priv,
1066 enum pipe pipe, bool state)
1067{
1068 struct drm_device *dev = dev_priv->dev;
1069 bool cur_state;
1070
1071 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1072 cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
1073 else if (IS_845G(dev) || IS_I865G(dev))
1074 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
1075 else
1076 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1077
1078 WARN(cur_state != state,
1079 "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1080 pipe_name(pipe), state_string(state), state_string(cur_state));
1081}
1082#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1083#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1084
b840d907
JB
1085void assert_pipe(struct drm_i915_private *dev_priv,
1086 enum pipe pipe, bool state)
b24e7179
JB
1087{
1088 int reg;
1089 u32 val;
63d7bbe9 1090 bool cur_state;
702e7a56
PZ
1091 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1092 pipe);
b24e7179 1093
8e636784
DV
1094 /* if we need the pipe A quirk it must be always on */
1095 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1096 state = true;
1097
b97186f0
PZ
1098 if (!intel_display_power_enabled(dev_priv->dev,
1099 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
69310161
PZ
1100 cur_state = false;
1101 } else {
1102 reg = PIPECONF(cpu_transcoder);
1103 val = I915_READ(reg);
1104 cur_state = !!(val & PIPECONF_ENABLE);
1105 }
1106
63d7bbe9
JB
1107 WARN(cur_state != state,
1108 "pipe %c assertion failure (expected %s, current %s)\n",
9db4a9c7 1109 pipe_name(pipe), state_string(state), state_string(cur_state));
b24e7179
JB
1110}
1111
931872fc
CW
1112static void assert_plane(struct drm_i915_private *dev_priv,
1113 enum plane plane, bool state)
b24e7179
JB
1114{
1115 int reg;
1116 u32 val;
931872fc 1117 bool cur_state;
b24e7179
JB
1118
1119 reg = DSPCNTR(plane);
1120 val = I915_READ(reg);
931872fc
CW
1121 cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1122 WARN(cur_state != state,
1123 "plane %c assertion failure (expected %s, current %s)\n",
1124 plane_name(plane), state_string(state), state_string(cur_state));
b24e7179
JB
1125}
1126
931872fc
CW
1127#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1128#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1129
b24e7179
JB
1130static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1131 enum pipe pipe)
1132{
653e1026 1133 struct drm_device *dev = dev_priv->dev;
b24e7179
JB
1134 int reg, i;
1135 u32 val;
1136 int cur_pipe;
1137
653e1026
VS
1138 /* Primary planes are fixed to pipes on gen4+ */
1139 if (INTEL_INFO(dev)->gen >= 4) {
28c05794
AJ
1140 reg = DSPCNTR(pipe);
1141 val = I915_READ(reg);
1142 WARN((val & DISPLAY_PLANE_ENABLE),
1143 "plane %c assertion failure, should be disabled but not\n",
1144 plane_name(pipe));
19ec1358 1145 return;
28c05794 1146 }
19ec1358 1147
b24e7179 1148 /* Need to check both planes against the pipe */
08e2a7de 1149 for_each_pipe(i) {
b24e7179
JB
1150 reg = DSPCNTR(i);
1151 val = I915_READ(reg);
1152 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1153 DISPPLANE_SEL_PIPE_SHIFT;
1154 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
9db4a9c7
JB
1155 "plane %c assertion failure, should be off on pipe %c but is still active\n",
1156 plane_name(i), pipe_name(pipe));
b24e7179
JB
1157 }
1158}
1159
19332d7a
JB
1160static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1161 enum pipe pipe)
1162{
20674eef 1163 struct drm_device *dev = dev_priv->dev;
19332d7a
JB
1164 int reg, i;
1165 u32 val;
1166
20674eef
VS
1167 if (IS_VALLEYVIEW(dev)) {
1168 for (i = 0; i < dev_priv->num_plane; i++) {
1169 reg = SPCNTR(pipe, i);
1170 val = I915_READ(reg);
1171 WARN((val & SP_ENABLE),
1172 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1173 sprite_name(pipe, i), pipe_name(pipe));
1174 }
1175 } else if (INTEL_INFO(dev)->gen >= 7) {
1176 reg = SPRCTL(pipe);
19332d7a 1177 val = I915_READ(reg);
20674eef 1178 WARN((val & SPRITE_ENABLE),
06da8da2 1179 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
20674eef
VS
1180 plane_name(pipe), pipe_name(pipe));
1181 } else if (INTEL_INFO(dev)->gen >= 5) {
1182 reg = DVSCNTR(pipe);
19332d7a 1183 val = I915_READ(reg);
20674eef 1184 WARN((val & DVS_ENABLE),
06da8da2 1185 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
20674eef 1186 plane_name(pipe), pipe_name(pipe));
19332d7a
JB
1187 }
1188}
1189
92f2584a
JB
1190static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1191{
1192 u32 val;
1193 bool enabled;
1194
9d82aa17
ED
1195 if (HAS_PCH_LPT(dev_priv->dev)) {
1196 DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
1197 return;
1198 }
1199
92f2584a
JB
1200 val = I915_READ(PCH_DREF_CONTROL);
1201 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1202 DREF_SUPERSPREAD_SOURCE_MASK));
1203 WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1204}
1205
ab9412ba
DV
1206static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1207 enum pipe pipe)
92f2584a
JB
1208{
1209 int reg;
1210 u32 val;
1211 bool enabled;
1212
ab9412ba 1213 reg = PCH_TRANSCONF(pipe);
92f2584a
JB
1214 val = I915_READ(reg);
1215 enabled = !!(val & TRANS_ENABLE);
9db4a9c7
JB
1216 WARN(enabled,
1217 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1218 pipe_name(pipe));
92f2584a
JB
1219}
1220
4e634389
KP
1221static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1222 enum pipe pipe, u32 port_sel, u32 val)
f0575e92
KP
1223{
1224 if ((val & DP_PORT_EN) == 0)
1225 return false;
1226
1227 if (HAS_PCH_CPT(dev_priv->dev)) {
1228 u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1229 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1230 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1231 return false;
1232 } else {
1233 if ((val & DP_PIPE_MASK) != (pipe << 30))
1234 return false;
1235 }
1236 return true;
1237}
1238
1519b995
KP
1239static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1240 enum pipe pipe, u32 val)
1241{
dc0fa718 1242 if ((val & SDVO_ENABLE) == 0)
1519b995
KP
1243 return false;
1244
1245 if (HAS_PCH_CPT(dev_priv->dev)) {
dc0fa718 1246 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1519b995
KP
1247 return false;
1248 } else {
dc0fa718 1249 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1519b995
KP
1250 return false;
1251 }
1252 return true;
1253}
1254
1255static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1256 enum pipe pipe, u32 val)
1257{
1258 if ((val & LVDS_PORT_EN) == 0)
1259 return false;
1260
1261 if (HAS_PCH_CPT(dev_priv->dev)) {
1262 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1263 return false;
1264 } else {
1265 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1266 return false;
1267 }
1268 return true;
1269}
1270
1271static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1272 enum pipe pipe, u32 val)
1273{
1274 if ((val & ADPA_DAC_ENABLE) == 0)
1275 return false;
1276 if (HAS_PCH_CPT(dev_priv->dev)) {
1277 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1278 return false;
1279 } else {
1280 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1281 return false;
1282 }
1283 return true;
1284}
1285
291906f1 1286static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
f0575e92 1287 enum pipe pipe, int reg, u32 port_sel)
291906f1 1288{
47a05eca 1289 u32 val = I915_READ(reg);
4e634389 1290 WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
291906f1 1291 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
9db4a9c7 1292 reg, pipe_name(pipe));
de9a35ab 1293
75c5da27
DV
1294 WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1295 && (val & DP_PIPEB_SELECT),
de9a35ab 1296 "IBX PCH dp port still using transcoder B\n");
291906f1
JB
1297}
1298
1299static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1300 enum pipe pipe, int reg)
1301{
47a05eca 1302 u32 val = I915_READ(reg);
b70ad586 1303 WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
23c99e77 1304 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
9db4a9c7 1305 reg, pipe_name(pipe));
de9a35ab 1306
dc0fa718 1307 WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
75c5da27 1308 && (val & SDVO_PIPE_B_SELECT),
de9a35ab 1309 "IBX PCH hdmi port still using transcoder B\n");
291906f1
JB
1310}
1311
1312static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1313 enum pipe pipe)
1314{
1315 int reg;
1316 u32 val;
291906f1 1317
f0575e92
KP
1318 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1319 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1320 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
291906f1
JB
1321
1322 reg = PCH_ADPA;
1323 val = I915_READ(reg);
b70ad586 1324 WARN(adpa_pipe_enabled(dev_priv, pipe, val),
291906f1 1325 "PCH VGA enabled on transcoder %c, should be disabled\n",
9db4a9c7 1326 pipe_name(pipe));
291906f1
JB
1327
1328 reg = PCH_LVDS;
1329 val = I915_READ(reg);
b70ad586 1330 WARN(lvds_pipe_enabled(dev_priv, pipe, val),
291906f1 1331 "PCH LVDS enabled on transcoder %c, should be disabled\n",
9db4a9c7 1332 pipe_name(pipe));
291906f1 1333
e2debe91
PZ
1334 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1335 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1336 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
291906f1
JB
1337}
1338
40e9cf64
JB
1339static void intel_init_dpio(struct drm_device *dev)
1340{
1341 struct drm_i915_private *dev_priv = dev->dev_private;
1342
1343 if (!IS_VALLEYVIEW(dev))
1344 return;
1345
1346 /*
1347 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
1348 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
1349 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
1350 * b. The other bits such as sfr settings / modesel may all be set
1351 * to 0.
1352 *
1353 * This should only be done on init and resume from S3 with both
1354 * PLLs disabled, or we risk losing DPIO and PLL synchronization.
1355 */
1356 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
1357}
1358
426115cf 1359static void vlv_enable_pll(struct intel_crtc *crtc)
87442f73 1360{
426115cf
DV
1361 struct drm_device *dev = crtc->base.dev;
1362 struct drm_i915_private *dev_priv = dev->dev_private;
1363 int reg = DPLL(crtc->pipe);
1364 u32 dpll = crtc->config.dpll_hw_state.dpll;
87442f73 1365
426115cf 1366 assert_pipe_disabled(dev_priv, crtc->pipe);
87442f73
DV
1367
1368 /* No really, not for ILK+ */
1369 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1370
1371 /* PLL is protected by panel, make sure we can write it */
1372 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
426115cf 1373 assert_panel_unlocked(dev_priv, crtc->pipe);
87442f73 1374
426115cf
DV
1375 I915_WRITE(reg, dpll);
1376 POSTING_READ(reg);
1377 udelay(150);
1378
1379 if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1380 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1381
1382 I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
1383 POSTING_READ(DPLL_MD(crtc->pipe));
87442f73
DV
1384
1385 /* We do this three times for luck */
426115cf 1386 I915_WRITE(reg, dpll);
87442f73
DV
1387 POSTING_READ(reg);
1388 udelay(150); /* wait for warmup */
426115cf 1389 I915_WRITE(reg, dpll);
87442f73
DV
1390 POSTING_READ(reg);
1391 udelay(150); /* wait for warmup */
426115cf 1392 I915_WRITE(reg, dpll);
87442f73
DV
1393 POSTING_READ(reg);
1394 udelay(150); /* wait for warmup */
1395}
1396
66e3d5c0 1397static void i9xx_enable_pll(struct intel_crtc *crtc)
63d7bbe9 1398{
66e3d5c0
DV
1399 struct drm_device *dev = crtc->base.dev;
1400 struct drm_i915_private *dev_priv = dev->dev_private;
1401 int reg = DPLL(crtc->pipe);
1402 u32 dpll = crtc->config.dpll_hw_state.dpll;
63d7bbe9 1403
66e3d5c0 1404 assert_pipe_disabled(dev_priv, crtc->pipe);
58c6eaa2 1405
63d7bbe9 1406 /* No really, not for ILK+ */
87442f73 1407 BUG_ON(dev_priv->info->gen >= 5);
63d7bbe9
JB
1408
1409 /* PLL is protected by panel, make sure we can write it */
66e3d5c0
DV
1410 if (IS_MOBILE(dev) && !IS_I830(dev))
1411 assert_panel_unlocked(dev_priv, crtc->pipe);
63d7bbe9 1412
66e3d5c0
DV
1413 I915_WRITE(reg, dpll);
1414
1415 /* Wait for the clocks to stabilize. */
1416 POSTING_READ(reg);
1417 udelay(150);
1418
1419 if (INTEL_INFO(dev)->gen >= 4) {
1420 I915_WRITE(DPLL_MD(crtc->pipe),
1421 crtc->config.dpll_hw_state.dpll_md);
1422 } else {
1423 /* The pixel multiplier can only be updated once the
1424 * DPLL is enabled and the clocks are stable.
1425 *
1426 * So write it again.
1427 */
1428 I915_WRITE(reg, dpll);
1429 }
63d7bbe9
JB
1430
1431 /* We do this three times for luck */
66e3d5c0 1432 I915_WRITE(reg, dpll);
63d7bbe9
JB
1433 POSTING_READ(reg);
1434 udelay(150); /* wait for warmup */
66e3d5c0 1435 I915_WRITE(reg, dpll);
63d7bbe9
JB
1436 POSTING_READ(reg);
1437 udelay(150); /* wait for warmup */
66e3d5c0 1438 I915_WRITE(reg, dpll);
63d7bbe9
JB
1439 POSTING_READ(reg);
1440 udelay(150); /* wait for warmup */
1441}
1442
1443/**
50b44a44 1444 * i9xx_disable_pll - disable a PLL
63d7bbe9
JB
1445 * @dev_priv: i915 private structure
1446 * @pipe: pipe PLL to disable
1447 *
1448 * Disable the PLL for @pipe, making sure the pipe is off first.
1449 *
1450 * Note! This is for pre-ILK only.
1451 */
50b44a44 1452static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
63d7bbe9 1453{
63d7bbe9
JB
1454 /* Don't disable pipe A or pipe A PLLs if needed */
1455 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1456 return;
1457
1458 /* Make sure the pipe isn't still relying on us */
1459 assert_pipe_disabled(dev_priv, pipe);
1460
50b44a44
DV
1461 I915_WRITE(DPLL(pipe), 0);
1462 POSTING_READ(DPLL(pipe));
63d7bbe9
JB
1463}
1464
f6071166
JB
1465static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1466{
1467 u32 val = 0;
1468
1469 /* Make sure the pipe isn't still relying on us */
1470 assert_pipe_disabled(dev_priv, pipe);
1471
1472 /* Leave integrated clock source enabled */
1473 if (pipe == PIPE_B)
1474 val = DPLL_INTEGRATED_CRI_CLK_VLV;
1475 I915_WRITE(DPLL(pipe), val);
1476 POSTING_READ(DPLL(pipe));
1477}
1478
89b667f8
JB
1479void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
1480{
1481 u32 port_mask;
1482
1483 if (!port)
1484 port_mask = DPLL_PORTB_READY_MASK;
1485 else
1486 port_mask = DPLL_PORTC_READY_MASK;
1487
1488 if (wait_for((I915_READ(DPLL(0)) & port_mask) == 0, 1000))
1489 WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
1490 'B' + port, I915_READ(DPLL(0)));
1491}
1492
92f2584a 1493/**
e72f9fbf 1494 * ironlake_enable_shared_dpll - enable PCH PLL
92f2584a
JB
1495 * @dev_priv: i915 private structure
1496 * @pipe: pipe PLL to enable
1497 *
1498 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1499 * drives the transcoder clock.
1500 */
e2b78267 1501static void ironlake_enable_shared_dpll(struct intel_crtc *crtc)
92f2584a 1502{
e2b78267
DV
1503 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1504 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
92f2584a 1505
48da64a8 1506 /* PCH PLLs only available on ILK, SNB and IVB */
92f2584a 1507 BUG_ON(dev_priv->info->gen < 5);
87a875bb 1508 if (WARN_ON(pll == NULL))
48da64a8
CW
1509 return;
1510
1511 if (WARN_ON(pll->refcount == 0))
1512 return;
ee7b9f93 1513
46edb027
DV
1514 DRM_DEBUG_KMS("enable %s (active %d, on? %d)for crtc %d\n",
1515 pll->name, pll->active, pll->on,
e2b78267 1516 crtc->base.base.id);
92f2584a 1517
cdbd2316
DV
1518 if (pll->active++) {
1519 WARN_ON(!pll->on);
e9d6944e 1520 assert_shared_dpll_enabled(dev_priv, pll);
ee7b9f93
JB
1521 return;
1522 }
f4a091c7 1523 WARN_ON(pll->on);
ee7b9f93 1524
46edb027 1525 DRM_DEBUG_KMS("enabling %s\n", pll->name);
e7b903d2 1526 pll->enable(dev_priv, pll);
ee7b9f93 1527 pll->on = true;
92f2584a
JB
1528}
1529
e2b78267 1530static void intel_disable_shared_dpll(struct intel_crtc *crtc)
92f2584a 1531{
e2b78267
DV
1532 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1533 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
4c609cb8 1534
92f2584a
JB
1535 /* PCH only available on ILK+ */
1536 BUG_ON(dev_priv->info->gen < 5);
87a875bb 1537 if (WARN_ON(pll == NULL))
ee7b9f93 1538 return;
92f2584a 1539
48da64a8
CW
1540 if (WARN_ON(pll->refcount == 0))
1541 return;
7a419866 1542
46edb027
DV
1543 DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1544 pll->name, pll->active, pll->on,
e2b78267 1545 crtc->base.base.id);
7a419866 1546
48da64a8 1547 if (WARN_ON(pll->active == 0)) {
e9d6944e 1548 assert_shared_dpll_disabled(dev_priv, pll);
48da64a8
CW
1549 return;
1550 }
1551
e9d6944e 1552 assert_shared_dpll_enabled(dev_priv, pll);
f4a091c7 1553 WARN_ON(!pll->on);
cdbd2316 1554 if (--pll->active)
7a419866 1555 return;
ee7b9f93 1556
46edb027 1557 DRM_DEBUG_KMS("disabling %s\n", pll->name);
e7b903d2 1558 pll->disable(dev_priv, pll);
ee7b9f93 1559 pll->on = false;
92f2584a
JB
1560}
1561
b8a4f404
PZ
1562static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1563 enum pipe pipe)
040484af 1564{
23670b32 1565 struct drm_device *dev = dev_priv->dev;
7c26e5c6 1566 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
e2b78267 1567 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
23670b32 1568 uint32_t reg, val, pipeconf_val;
040484af
JB
1569
1570 /* PCH only available on ILK+ */
1571 BUG_ON(dev_priv->info->gen < 5);
1572
1573 /* Make sure PCH DPLL is enabled */
e72f9fbf 1574 assert_shared_dpll_enabled(dev_priv,
e9d6944e 1575 intel_crtc_to_shared_dpll(intel_crtc));
040484af
JB
1576
1577 /* FDI must be feeding us bits for PCH ports */
1578 assert_fdi_tx_enabled(dev_priv, pipe);
1579 assert_fdi_rx_enabled(dev_priv, pipe);
1580
23670b32
DV
1581 if (HAS_PCH_CPT(dev)) {
1582 /* Workaround: Set the timing override bit before enabling the
1583 * pch transcoder. */
1584 reg = TRANS_CHICKEN2(pipe);
1585 val = I915_READ(reg);
1586 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1587 I915_WRITE(reg, val);
59c859d6 1588 }
23670b32 1589
ab9412ba 1590 reg = PCH_TRANSCONF(pipe);
040484af 1591 val = I915_READ(reg);
5f7f726d 1592 pipeconf_val = I915_READ(PIPECONF(pipe));
e9bcff5c
JB
1593
1594 if (HAS_PCH_IBX(dev_priv->dev)) {
1595 /*
1596 * make the BPC in transcoder be consistent with
1597 * that in pipeconf reg.
1598 */
dfd07d72
DV
1599 val &= ~PIPECONF_BPC_MASK;
1600 val |= pipeconf_val & PIPECONF_BPC_MASK;
e9bcff5c 1601 }
5f7f726d
PZ
1602
1603 val &= ~TRANS_INTERLACE_MASK;
1604 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
7c26e5c6
PZ
1605 if (HAS_PCH_IBX(dev_priv->dev) &&
1606 intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1607 val |= TRANS_LEGACY_INTERLACED_ILK;
1608 else
1609 val |= TRANS_INTERLACED;
5f7f726d
PZ
1610 else
1611 val |= TRANS_PROGRESSIVE;
1612
040484af
JB
1613 I915_WRITE(reg, val | TRANS_ENABLE);
1614 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
4bb6f1f3 1615 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
040484af
JB
1616}
1617
8fb033d7 1618static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
937bb610 1619 enum transcoder cpu_transcoder)
040484af 1620{
8fb033d7 1621 u32 val, pipeconf_val;
8fb033d7
PZ
1622
1623 /* PCH only available on ILK+ */
1624 BUG_ON(dev_priv->info->gen < 5);
1625
8fb033d7 1626 /* FDI must be feeding us bits for PCH ports */
1a240d4d 1627 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
937bb610 1628 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
8fb033d7 1629
223a6fdf
PZ
1630 /* Workaround: set timing override bit. */
1631 val = I915_READ(_TRANSA_CHICKEN2);
23670b32 1632 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
223a6fdf
PZ
1633 I915_WRITE(_TRANSA_CHICKEN2, val);
1634
25f3ef11 1635 val = TRANS_ENABLE;
937bb610 1636 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
8fb033d7 1637
9a76b1c6
PZ
1638 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1639 PIPECONF_INTERLACED_ILK)
a35f2679 1640 val |= TRANS_INTERLACED;
8fb033d7
PZ
1641 else
1642 val |= TRANS_PROGRESSIVE;
1643
ab9412ba
DV
1644 I915_WRITE(LPT_TRANSCONF, val);
1645 if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
937bb610 1646 DRM_ERROR("Failed to enable PCH transcoder\n");
8fb033d7
PZ
1647}
1648
b8a4f404
PZ
1649static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1650 enum pipe pipe)
040484af 1651{
23670b32
DV
1652 struct drm_device *dev = dev_priv->dev;
1653 uint32_t reg, val;
040484af
JB
1654
1655 /* FDI relies on the transcoder */
1656 assert_fdi_tx_disabled(dev_priv, pipe);
1657 assert_fdi_rx_disabled(dev_priv, pipe);
1658
291906f1
JB
1659 /* Ports must be off as well */
1660 assert_pch_ports_disabled(dev_priv, pipe);
1661
ab9412ba 1662 reg = PCH_TRANSCONF(pipe);
040484af
JB
1663 val = I915_READ(reg);
1664 val &= ~TRANS_ENABLE;
1665 I915_WRITE(reg, val);
1666 /* wait for PCH transcoder off, transcoder state */
1667 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
4bb6f1f3 1668 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
23670b32
DV
1669
1670 if (!HAS_PCH_IBX(dev)) {
1671 /* Workaround: Clear the timing override chicken bit again. */
1672 reg = TRANS_CHICKEN2(pipe);
1673 val = I915_READ(reg);
1674 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1675 I915_WRITE(reg, val);
1676 }
040484af
JB
1677}
1678
ab4d966c 1679static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
8fb033d7 1680{
8fb033d7
PZ
1681 u32 val;
1682
ab9412ba 1683 val = I915_READ(LPT_TRANSCONF);
8fb033d7 1684 val &= ~TRANS_ENABLE;
ab9412ba 1685 I915_WRITE(LPT_TRANSCONF, val);
8fb033d7 1686 /* wait for PCH transcoder off, transcoder state */
ab9412ba 1687 if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
8a52fd9f 1688 DRM_ERROR("Failed to disable PCH transcoder\n");
223a6fdf
PZ
1689
1690 /* Workaround: clear timing override bit. */
1691 val = I915_READ(_TRANSA_CHICKEN2);
23670b32 1692 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
223a6fdf 1693 I915_WRITE(_TRANSA_CHICKEN2, val);
040484af
JB
1694}
1695
b24e7179 1696/**
309cfea8 1697 * intel_enable_pipe - enable a pipe, asserting requirements
b24e7179
JB
1698 * @dev_priv: i915 private structure
1699 * @pipe: pipe to enable
040484af 1700 * @pch_port: on ILK+, is this pipe driving a PCH port or not
b24e7179
JB
1701 *
1702 * Enable @pipe, making sure that various hardware specific requirements
1703 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1704 *
1705 * @pipe should be %PIPE_A or %PIPE_B.
1706 *
1707 * Will wait until the pipe is actually running (i.e. first vblank) before
1708 * returning.
1709 */
040484af 1710static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
23538ef1 1711 bool pch_port, bool dsi)
b24e7179 1712{
702e7a56
PZ
1713 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1714 pipe);
1a240d4d 1715 enum pipe pch_transcoder;
b24e7179
JB
1716 int reg;
1717 u32 val;
1718
58c6eaa2 1719 assert_planes_disabled(dev_priv, pipe);
93ce0ba6 1720 assert_cursor_disabled(dev_priv, pipe);
58c6eaa2
DV
1721 assert_sprites_disabled(dev_priv, pipe);
1722
681e5811 1723 if (HAS_PCH_LPT(dev_priv->dev))
cc391bbb
PZ
1724 pch_transcoder = TRANSCODER_A;
1725 else
1726 pch_transcoder = pipe;
1727
b24e7179
JB
1728 /*
1729 * A pipe without a PLL won't actually be able to drive bits from
1730 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1731 * need the check.
1732 */
1733 if (!HAS_PCH_SPLIT(dev_priv->dev))
23538ef1
JN
1734 if (dsi)
1735 assert_dsi_pll_enabled(dev_priv);
1736 else
1737 assert_pll_enabled(dev_priv, pipe);
040484af
JB
1738 else {
1739 if (pch_port) {
1740 /* if driving the PCH, we need FDI enabled */
cc391bbb 1741 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1a240d4d
DV
1742 assert_fdi_tx_pll_enabled(dev_priv,
1743 (enum pipe) cpu_transcoder);
040484af
JB
1744 }
1745 /* FIXME: assert CPU port conditions for SNB+ */
1746 }
b24e7179 1747
702e7a56 1748 reg = PIPECONF(cpu_transcoder);
b24e7179 1749 val = I915_READ(reg);
00d70b15
CW
1750 if (val & PIPECONF_ENABLE)
1751 return;
1752
1753 I915_WRITE(reg, val | PIPECONF_ENABLE);
b24e7179
JB
1754 intel_wait_for_vblank(dev_priv->dev, pipe);
1755}
1756
1757/**
309cfea8 1758 * intel_disable_pipe - disable a pipe, asserting requirements
b24e7179
JB
1759 * @dev_priv: i915 private structure
1760 * @pipe: pipe to disable
1761 *
1762 * Disable @pipe, making sure that various hardware specific requirements
1763 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1764 *
1765 * @pipe should be %PIPE_A or %PIPE_B.
1766 *
1767 * Will wait until the pipe has shut down before returning.
1768 */
1769static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1770 enum pipe pipe)
1771{
702e7a56
PZ
1772 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1773 pipe);
b24e7179
JB
1774 int reg;
1775 u32 val;
1776
1777 /*
1778 * Make sure planes won't keep trying to pump pixels to us,
1779 * or we might hang the display.
1780 */
1781 assert_planes_disabled(dev_priv, pipe);
93ce0ba6 1782 assert_cursor_disabled(dev_priv, pipe);
19332d7a 1783 assert_sprites_disabled(dev_priv, pipe);
b24e7179
JB
1784
1785 /* Don't disable pipe A or pipe A PLLs if needed */
1786 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1787 return;
1788
702e7a56 1789 reg = PIPECONF(cpu_transcoder);
b24e7179 1790 val = I915_READ(reg);
00d70b15
CW
1791 if ((val & PIPECONF_ENABLE) == 0)
1792 return;
1793
1794 I915_WRITE(reg, val & ~PIPECONF_ENABLE);
b24e7179
JB
1795 intel_wait_for_pipe_off(dev_priv->dev, pipe);
1796}
1797
d74362c9
KP
1798/*
1799 * Plane regs are double buffered, going from enabled->disabled needs a
1800 * trigger in order to latch. The display address reg provides this.
1801 */
6f1d69b0 1802void intel_flush_display_plane(struct drm_i915_private *dev_priv,
d74362c9
KP
1803 enum plane plane)
1804{
14f86147
DL
1805 if (dev_priv->info->gen >= 4)
1806 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1807 else
1808 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
d74362c9
KP
1809}
1810
b24e7179
JB
1811/**
1812 * intel_enable_plane - enable a display plane on a given pipe
1813 * @dev_priv: i915 private structure
1814 * @plane: plane to enable
1815 * @pipe: pipe being fed
1816 *
1817 * Enable @plane on @pipe, making sure that @pipe is running first.
1818 */
1819static void intel_enable_plane(struct drm_i915_private *dev_priv,
1820 enum plane plane, enum pipe pipe)
1821{
1822 int reg;
1823 u32 val;
1824
1825 /* If the pipe isn't enabled, we can't pump pixels and may hang */
1826 assert_pipe_enabled(dev_priv, pipe);
1827
1828 reg = DSPCNTR(plane);
1829 val = I915_READ(reg);
00d70b15
CW
1830 if (val & DISPLAY_PLANE_ENABLE)
1831 return;
1832
1833 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
d74362c9 1834 intel_flush_display_plane(dev_priv, plane);
b24e7179
JB
1835 intel_wait_for_vblank(dev_priv->dev, pipe);
1836}
1837
b24e7179
JB
1838/**
1839 * intel_disable_plane - disable a display plane
1840 * @dev_priv: i915 private structure
1841 * @plane: plane to disable
1842 * @pipe: pipe consuming the data
1843 *
1844 * Disable @plane; should be an independent operation.
1845 */
1846static void intel_disable_plane(struct drm_i915_private *dev_priv,
1847 enum plane plane, enum pipe pipe)
1848{
1849 int reg;
1850 u32 val;
1851
1852 reg = DSPCNTR(plane);
1853 val = I915_READ(reg);
00d70b15
CW
1854 if ((val & DISPLAY_PLANE_ENABLE) == 0)
1855 return;
1856
1857 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
b24e7179
JB
1858 intel_flush_display_plane(dev_priv, plane);
1859 intel_wait_for_vblank(dev_priv->dev, pipe);
1860}
1861
693db184
CW
1862static bool need_vtd_wa(struct drm_device *dev)
1863{
1864#ifdef CONFIG_INTEL_IOMMU
1865 if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
1866 return true;
1867#endif
1868 return false;
1869}
1870
127bd2ac 1871int
48b956c5 1872intel_pin_and_fence_fb_obj(struct drm_device *dev,
05394f39 1873 struct drm_i915_gem_object *obj,
919926ae 1874 struct intel_ring_buffer *pipelined)
6b95a207 1875{
ce453d81 1876 struct drm_i915_private *dev_priv = dev->dev_private;
6b95a207
KH
1877 u32 alignment;
1878 int ret;
1879
05394f39 1880 switch (obj->tiling_mode) {
6b95a207 1881 case I915_TILING_NONE:
534843da
CW
1882 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1883 alignment = 128 * 1024;
a6c45cf0 1884 else if (INTEL_INFO(dev)->gen >= 4)
534843da
CW
1885 alignment = 4 * 1024;
1886 else
1887 alignment = 64 * 1024;
6b95a207
KH
1888 break;
1889 case I915_TILING_X:
1890 /* pin() will align the object as required by fence */
1891 alignment = 0;
1892 break;
1893 case I915_TILING_Y:
8bb6e959
DV
1894 /* Despite that we check this in framebuffer_init userspace can
1895 * screw us over and change the tiling after the fact. Only
1896 * pinned buffers can't change their tiling. */
1897 DRM_DEBUG_DRIVER("Y tiled not allowed for scan out buffers\n");
6b95a207
KH
1898 return -EINVAL;
1899 default:
1900 BUG();
1901 }
1902
693db184
CW
1903 /* Note that the w/a also requires 64 PTE of padding following the
1904 * bo. We currently fill all unused PTE with the shadow page and so
1905 * we should always have valid PTE following the scanout preventing
1906 * the VT-d warning.
1907 */
1908 if (need_vtd_wa(dev) && alignment < 256 * 1024)
1909 alignment = 256 * 1024;
1910
ce453d81 1911 dev_priv->mm.interruptible = false;
2da3b9b9 1912 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
48b956c5 1913 if (ret)
ce453d81 1914 goto err_interruptible;
6b95a207
KH
1915
1916 /* Install a fence for tiled scan-out. Pre-i965 always needs a
1917 * fence, whereas 965+ only requires a fence if using
1918 * framebuffer compression. For simplicity, we always install
1919 * a fence as the cost is not that onerous.
1920 */
06d98131 1921 ret = i915_gem_object_get_fence(obj);
9a5a53b3
CW
1922 if (ret)
1923 goto err_unpin;
1690e1eb 1924
9a5a53b3 1925 i915_gem_object_pin_fence(obj);
6b95a207 1926
ce453d81 1927 dev_priv->mm.interruptible = true;
6b95a207 1928 return 0;
48b956c5
CW
1929
1930err_unpin:
cc98b413 1931 i915_gem_object_unpin_from_display_plane(obj);
ce453d81
CW
1932err_interruptible:
1933 dev_priv->mm.interruptible = true;
48b956c5 1934 return ret;
6b95a207
KH
1935}
1936
1690e1eb
CW
1937void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
1938{
1939 i915_gem_object_unpin_fence(obj);
cc98b413 1940 i915_gem_object_unpin_from_display_plane(obj);
1690e1eb
CW
1941}
1942
c2c75131
DV
1943/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
1944 * is assumed to be a power-of-two. */
bc752862
CW
1945unsigned long intel_gen4_compute_page_offset(int *x, int *y,
1946 unsigned int tiling_mode,
1947 unsigned int cpp,
1948 unsigned int pitch)
c2c75131 1949{
bc752862
CW
1950 if (tiling_mode != I915_TILING_NONE) {
1951 unsigned int tile_rows, tiles;
c2c75131 1952
bc752862
CW
1953 tile_rows = *y / 8;
1954 *y %= 8;
c2c75131 1955
bc752862
CW
1956 tiles = *x / (512/cpp);
1957 *x %= 512/cpp;
1958
1959 return tile_rows * pitch * 8 + tiles * 4096;
1960 } else {
1961 unsigned int offset;
1962
1963 offset = *y * pitch + *x * cpp;
1964 *y = 0;
1965 *x = (offset & 4095) / cpp;
1966 return offset & -4096;
1967 }
c2c75131
DV
1968}
1969
17638cd6
JB
1970static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1971 int x, int y)
81255565
JB
1972{
1973 struct drm_device *dev = crtc->dev;
1974 struct drm_i915_private *dev_priv = dev->dev_private;
1975 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1976 struct intel_framebuffer *intel_fb;
05394f39 1977 struct drm_i915_gem_object *obj;
81255565 1978 int plane = intel_crtc->plane;
e506a0c6 1979 unsigned long linear_offset;
81255565 1980 u32 dspcntr;
5eddb70b 1981 u32 reg;
81255565
JB
1982
1983 switch (plane) {
1984 case 0:
1985 case 1:
1986 break;
1987 default:
84f44ce7 1988 DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
81255565
JB
1989 return -EINVAL;
1990 }
1991
1992 intel_fb = to_intel_framebuffer(fb);
1993 obj = intel_fb->obj;
81255565 1994
5eddb70b
CW
1995 reg = DSPCNTR(plane);
1996 dspcntr = I915_READ(reg);
81255565
JB
1997 /* Mask out pixel format bits in case we change it */
1998 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
57779d06
VS
1999 switch (fb->pixel_format) {
2000 case DRM_FORMAT_C8:
81255565
JB
2001 dspcntr |= DISPPLANE_8BPP;
2002 break;
57779d06
VS
2003 case DRM_FORMAT_XRGB1555:
2004 case DRM_FORMAT_ARGB1555:
2005 dspcntr |= DISPPLANE_BGRX555;
81255565 2006 break;
57779d06
VS
2007 case DRM_FORMAT_RGB565:
2008 dspcntr |= DISPPLANE_BGRX565;
2009 break;
2010 case DRM_FORMAT_XRGB8888:
2011 case DRM_FORMAT_ARGB8888:
2012 dspcntr |= DISPPLANE_BGRX888;
2013 break;
2014 case DRM_FORMAT_XBGR8888:
2015 case DRM_FORMAT_ABGR8888:
2016 dspcntr |= DISPPLANE_RGBX888;
2017 break;
2018 case DRM_FORMAT_XRGB2101010:
2019 case DRM_FORMAT_ARGB2101010:
2020 dspcntr |= DISPPLANE_BGRX101010;
2021 break;
2022 case DRM_FORMAT_XBGR2101010:
2023 case DRM_FORMAT_ABGR2101010:
2024 dspcntr |= DISPPLANE_RGBX101010;
81255565
JB
2025 break;
2026 default:
baba133a 2027 BUG();
81255565 2028 }
57779d06 2029
a6c45cf0 2030 if (INTEL_INFO(dev)->gen >= 4) {
05394f39 2031 if (obj->tiling_mode != I915_TILING_NONE)
81255565
JB
2032 dspcntr |= DISPPLANE_TILED;
2033 else
2034 dspcntr &= ~DISPPLANE_TILED;
2035 }
2036
de1aa629
VS
2037 if (IS_G4X(dev))
2038 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2039
5eddb70b 2040 I915_WRITE(reg, dspcntr);
81255565 2041
e506a0c6 2042 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
81255565 2043
c2c75131
DV
2044 if (INTEL_INFO(dev)->gen >= 4) {
2045 intel_crtc->dspaddr_offset =
bc752862
CW
2046 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2047 fb->bits_per_pixel / 8,
2048 fb->pitches[0]);
c2c75131
DV
2049 linear_offset -= intel_crtc->dspaddr_offset;
2050 } else {
e506a0c6 2051 intel_crtc->dspaddr_offset = linear_offset;
c2c75131 2052 }
e506a0c6 2053
f343c5f6
BW
2054 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2055 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2056 fb->pitches[0]);
01f2c773 2057 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
a6c45cf0 2058 if (INTEL_INFO(dev)->gen >= 4) {
c2c75131 2059 I915_MODIFY_DISPBASE(DSPSURF(plane),
f343c5f6 2060 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
5eddb70b 2061 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
e506a0c6 2062 I915_WRITE(DSPLINOFF(plane), linear_offset);
5eddb70b 2063 } else
f343c5f6 2064 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
5eddb70b 2065 POSTING_READ(reg);
81255565 2066
17638cd6
JB
2067 return 0;
2068}
2069
2070static int ironlake_update_plane(struct drm_crtc *crtc,
2071 struct drm_framebuffer *fb, int x, int y)
2072{
2073 struct drm_device *dev = crtc->dev;
2074 struct drm_i915_private *dev_priv = dev->dev_private;
2075 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2076 struct intel_framebuffer *intel_fb;
2077 struct drm_i915_gem_object *obj;
2078 int plane = intel_crtc->plane;
e506a0c6 2079 unsigned long linear_offset;
17638cd6
JB
2080 u32 dspcntr;
2081 u32 reg;
2082
2083 switch (plane) {
2084 case 0:
2085 case 1:
27f8227b 2086 case 2:
17638cd6
JB
2087 break;
2088 default:
84f44ce7 2089 DRM_ERROR("Can't update plane %c in SAREA\n", plane_name(plane));
17638cd6
JB
2090 return -EINVAL;
2091 }
2092
2093 intel_fb = to_intel_framebuffer(fb);
2094 obj = intel_fb->obj;
2095
2096 reg = DSPCNTR(plane);
2097 dspcntr = I915_READ(reg);
2098 /* Mask out pixel format bits in case we change it */
2099 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
57779d06
VS
2100 switch (fb->pixel_format) {
2101 case DRM_FORMAT_C8:
17638cd6
JB
2102 dspcntr |= DISPPLANE_8BPP;
2103 break;
57779d06
VS
2104 case DRM_FORMAT_RGB565:
2105 dspcntr |= DISPPLANE_BGRX565;
17638cd6 2106 break;
57779d06
VS
2107 case DRM_FORMAT_XRGB8888:
2108 case DRM_FORMAT_ARGB8888:
2109 dspcntr |= DISPPLANE_BGRX888;
2110 break;
2111 case DRM_FORMAT_XBGR8888:
2112 case DRM_FORMAT_ABGR8888:
2113 dspcntr |= DISPPLANE_RGBX888;
2114 break;
2115 case DRM_FORMAT_XRGB2101010:
2116 case DRM_FORMAT_ARGB2101010:
2117 dspcntr |= DISPPLANE_BGRX101010;
2118 break;
2119 case DRM_FORMAT_XBGR2101010:
2120 case DRM_FORMAT_ABGR2101010:
2121 dspcntr |= DISPPLANE_RGBX101010;
17638cd6
JB
2122 break;
2123 default:
baba133a 2124 BUG();
17638cd6
JB
2125 }
2126
2127 if (obj->tiling_mode != I915_TILING_NONE)
2128 dspcntr |= DISPPLANE_TILED;
2129 else
2130 dspcntr &= ~DISPPLANE_TILED;
2131
1f5d76db
PZ
2132 if (IS_HASWELL(dev))
2133 dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE;
2134 else
2135 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
17638cd6
JB
2136
2137 I915_WRITE(reg, dspcntr);
2138
e506a0c6 2139 linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
c2c75131 2140 intel_crtc->dspaddr_offset =
bc752862
CW
2141 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
2142 fb->bits_per_pixel / 8,
2143 fb->pitches[0]);
c2c75131 2144 linear_offset -= intel_crtc->dspaddr_offset;
17638cd6 2145
f343c5f6
BW
2146 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2147 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
2148 fb->pitches[0]);
01f2c773 2149 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
c2c75131 2150 I915_MODIFY_DISPBASE(DSPSURF(plane),
f343c5f6 2151 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
bc1c91eb
DL
2152 if (IS_HASWELL(dev)) {
2153 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2154 } else {
2155 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2156 I915_WRITE(DSPLINOFF(plane), linear_offset);
2157 }
17638cd6
JB
2158 POSTING_READ(reg);
2159
2160 return 0;
2161}
2162
2163/* Assume fb object is pinned & idle & fenced and just update base pointers */
2164static int
2165intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2166 int x, int y, enum mode_set_atomic state)
2167{
2168 struct drm_device *dev = crtc->dev;
2169 struct drm_i915_private *dev_priv = dev->dev_private;
17638cd6 2170
6b8e6ed0
CW
2171 if (dev_priv->display.disable_fbc)
2172 dev_priv->display.disable_fbc(dev);
3dec0095 2173 intel_increase_pllclock(crtc);
81255565 2174
6b8e6ed0 2175 return dev_priv->display.update_plane(crtc, fb, x, y);
81255565
JB
2176}
2177
96a02917
VS
2178void intel_display_handle_reset(struct drm_device *dev)
2179{
2180 struct drm_i915_private *dev_priv = dev->dev_private;
2181 struct drm_crtc *crtc;
2182
2183 /*
2184 * Flips in the rings have been nuked by the reset,
2185 * so complete all pending flips so that user space
2186 * will get its events and not get stuck.
2187 *
2188 * Also update the base address of all primary
2189 * planes to the the last fb to make sure we're
2190 * showing the correct fb after a reset.
2191 *
2192 * Need to make two loops over the crtcs so that we
2193 * don't try to grab a crtc mutex before the
2194 * pending_flip_queue really got woken up.
2195 */
2196
2197 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2198 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2199 enum plane plane = intel_crtc->plane;
2200
2201 intel_prepare_page_flip(dev, plane);
2202 intel_finish_page_flip_plane(dev, plane);
2203 }
2204
2205 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
2206 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2207
2208 mutex_lock(&crtc->mutex);
2209 if (intel_crtc->active)
2210 dev_priv->display.update_plane(crtc, crtc->fb,
2211 crtc->x, crtc->y);
2212 mutex_unlock(&crtc->mutex);
2213 }
2214}
2215
14667a4b
CW
2216static int
2217intel_finish_fb(struct drm_framebuffer *old_fb)
2218{
2219 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2220 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2221 bool was_interruptible = dev_priv->mm.interruptible;
2222 int ret;
2223
14667a4b
CW
2224 /* Big Hammer, we also need to ensure that any pending
2225 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2226 * current scanout is retired before unpinning the old
2227 * framebuffer.
2228 *
2229 * This should only fail upon a hung GPU, in which case we
2230 * can safely continue.
2231 */
2232 dev_priv->mm.interruptible = false;
2233 ret = i915_gem_object_finish_gpu(obj);
2234 dev_priv->mm.interruptible = was_interruptible;
2235
2236 return ret;
2237}
2238
198598d0
VS
2239static void intel_crtc_update_sarea_pos(struct drm_crtc *crtc, int x, int y)
2240{
2241 struct drm_device *dev = crtc->dev;
2242 struct drm_i915_master_private *master_priv;
2243 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2244
2245 if (!dev->primary->master)
2246 return;
2247
2248 master_priv = dev->primary->master->driver_priv;
2249 if (!master_priv->sarea_priv)
2250 return;
2251
2252 switch (intel_crtc->pipe) {
2253 case 0:
2254 master_priv->sarea_priv->pipeA_x = x;
2255 master_priv->sarea_priv->pipeA_y = y;
2256 break;
2257 case 1:
2258 master_priv->sarea_priv->pipeB_x = x;
2259 master_priv->sarea_priv->pipeB_y = y;
2260 break;
2261 default:
2262 break;
2263 }
2264}
2265
5c3b82e2 2266static int
3c4fdcfb 2267intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
94352cf9 2268 struct drm_framebuffer *fb)
79e53945
JB
2269{
2270 struct drm_device *dev = crtc->dev;
6b8e6ed0 2271 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945 2272 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
94352cf9 2273 struct drm_framebuffer *old_fb;
5c3b82e2 2274 int ret;
79e53945
JB
2275
2276 /* no fb bound */
94352cf9 2277 if (!fb) {
a5071c2f 2278 DRM_ERROR("No FB bound\n");
5c3b82e2
CW
2279 return 0;
2280 }
2281
7eb552ae 2282 if (intel_crtc->plane > INTEL_INFO(dev)->num_pipes) {
84f44ce7
VS
2283 DRM_ERROR("no plane for crtc: plane %c, num_pipes %d\n",
2284 plane_name(intel_crtc->plane),
2285 INTEL_INFO(dev)->num_pipes);
5c3b82e2 2286 return -EINVAL;
79e53945
JB
2287 }
2288
5c3b82e2 2289 mutex_lock(&dev->struct_mutex);
265db958 2290 ret = intel_pin_and_fence_fb_obj(dev,
94352cf9 2291 to_intel_framebuffer(fb)->obj,
919926ae 2292 NULL);
5c3b82e2
CW
2293 if (ret != 0) {
2294 mutex_unlock(&dev->struct_mutex);
a5071c2f 2295 DRM_ERROR("pin & fence failed\n");
5c3b82e2
CW
2296 return ret;
2297 }
79e53945 2298
bb2043de
DL
2299 /*
2300 * Update pipe size and adjust fitter if needed: the reason for this is
2301 * that in compute_mode_changes we check the native mode (not the pfit
2302 * mode) to see if we can flip rather than do a full mode set. In the
2303 * fastboot case, we'll flip, but if we don't update the pipesrc and
2304 * pfit state, we'll end up with a big fb scanned out into the wrong
2305 * sized surface.
2306 *
2307 * To fix this properly, we need to hoist the checks up into
2308 * compute_mode_changes (or above), check the actual pfit state and
2309 * whether the platform allows pfit disable with pipe active, and only
2310 * then update the pipesrc and pfit state, even on the flip path.
2311 */
4d6a3e63 2312 if (i915_fastboot) {
d7bf63f2
DL
2313 const struct drm_display_mode *adjusted_mode =
2314 &intel_crtc->config.adjusted_mode;
2315
4d6a3e63 2316 I915_WRITE(PIPESRC(intel_crtc->pipe),
d7bf63f2
DL
2317 ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2318 (adjusted_mode->crtc_vdisplay - 1));
fd4daa9c 2319 if (!intel_crtc->config.pch_pfit.enabled &&
4d6a3e63
JB
2320 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2321 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2322 I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
2323 I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2324 I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2325 }
2326 }
2327
94352cf9 2328 ret = dev_priv->display.update_plane(crtc, fb, x, y);
4e6cfefc 2329 if (ret) {
94352cf9 2330 intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
5c3b82e2 2331 mutex_unlock(&dev->struct_mutex);
a5071c2f 2332 DRM_ERROR("failed to update base address\n");
4e6cfefc 2333 return ret;
79e53945 2334 }
3c4fdcfb 2335
94352cf9
DV
2336 old_fb = crtc->fb;
2337 crtc->fb = fb;
6c4c86f5
DV
2338 crtc->x = x;
2339 crtc->y = y;
94352cf9 2340
b7f1de28 2341 if (old_fb) {
d7697eea
DV
2342 if (intel_crtc->active && old_fb != fb)
2343 intel_wait_for_vblank(dev, intel_crtc->pipe);
1690e1eb 2344 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
b7f1de28 2345 }
652c393a 2346
6b8e6ed0 2347 intel_update_fbc(dev);
4906557e 2348 intel_edp_psr_update(dev);
5c3b82e2 2349 mutex_unlock(&dev->struct_mutex);
79e53945 2350
198598d0 2351 intel_crtc_update_sarea_pos(crtc, x, y);
5c3b82e2
CW
2352
2353 return 0;
79e53945
JB
2354}
2355
5e84e1a4
ZW
2356static void intel_fdi_normal_train(struct drm_crtc *crtc)
2357{
2358 struct drm_device *dev = crtc->dev;
2359 struct drm_i915_private *dev_priv = dev->dev_private;
2360 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2361 int pipe = intel_crtc->pipe;
2362 u32 reg, temp;
2363
2364 /* enable normal train */
2365 reg = FDI_TX_CTL(pipe);
2366 temp = I915_READ(reg);
61e499bf 2367 if (IS_IVYBRIDGE(dev)) {
357555c0
JB
2368 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2369 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
61e499bf
KP
2370 } else {
2371 temp &= ~FDI_LINK_TRAIN_NONE;
2372 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
357555c0 2373 }
5e84e1a4
ZW
2374 I915_WRITE(reg, temp);
2375
2376 reg = FDI_RX_CTL(pipe);
2377 temp = I915_READ(reg);
2378 if (HAS_PCH_CPT(dev)) {
2379 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2380 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2381 } else {
2382 temp &= ~FDI_LINK_TRAIN_NONE;
2383 temp |= FDI_LINK_TRAIN_NONE;
2384 }
2385 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2386
2387 /* wait one idle pattern time */
2388 POSTING_READ(reg);
2389 udelay(1000);
357555c0
JB
2390
2391 /* IVB wants error correction enabled */
2392 if (IS_IVYBRIDGE(dev))
2393 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2394 FDI_FE_ERRC_ENABLE);
5e84e1a4
ZW
2395}
2396
1e833f40
DV
2397static bool pipe_has_enabled_pch(struct intel_crtc *intel_crtc)
2398{
2399 return intel_crtc->base.enabled && intel_crtc->config.has_pch_encoder;
2400}
2401
01a415fd
DV
2402static void ivb_modeset_global_resources(struct drm_device *dev)
2403{
2404 struct drm_i915_private *dev_priv = dev->dev_private;
2405 struct intel_crtc *pipe_B_crtc =
2406 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
2407 struct intel_crtc *pipe_C_crtc =
2408 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
2409 uint32_t temp;
2410
1e833f40
DV
2411 /*
2412 * When everything is off disable fdi C so that we could enable fdi B
2413 * with all lanes. Note that we don't care about enabled pipes without
2414 * an enabled pch encoder.
2415 */
2416 if (!pipe_has_enabled_pch(pipe_B_crtc) &&
2417 !pipe_has_enabled_pch(pipe_C_crtc)) {
01a415fd
DV
2418 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
2419 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
2420
2421 temp = I915_READ(SOUTH_CHICKEN1);
2422 temp &= ~FDI_BC_BIFURCATION_SELECT;
2423 DRM_DEBUG_KMS("disabling fdi C rx\n");
2424 I915_WRITE(SOUTH_CHICKEN1, temp);
2425 }
2426}
2427
8db9d77b
ZW
2428/* The FDI link training functions for ILK/Ibexpeak. */
2429static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2430{
2431 struct drm_device *dev = crtc->dev;
2432 struct drm_i915_private *dev_priv = dev->dev_private;
2433 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2434 int pipe = intel_crtc->pipe;
0fc932b8 2435 int plane = intel_crtc->plane;
5eddb70b 2436 u32 reg, temp, tries;
8db9d77b 2437
0fc932b8
JB
2438 /* FDI needs bits from pipe & plane first */
2439 assert_pipe_enabled(dev_priv, pipe);
2440 assert_plane_enabled(dev_priv, plane);
2441
e1a44743
AJ
2442 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2443 for train result */
5eddb70b
CW
2444 reg = FDI_RX_IMR(pipe);
2445 temp = I915_READ(reg);
e1a44743
AJ
2446 temp &= ~FDI_RX_SYMBOL_LOCK;
2447 temp &= ~FDI_RX_BIT_LOCK;
5eddb70b
CW
2448 I915_WRITE(reg, temp);
2449 I915_READ(reg);
e1a44743
AJ
2450 udelay(150);
2451
8db9d77b 2452 /* enable CPU FDI TX and PCH FDI RX */
5eddb70b
CW
2453 reg = FDI_TX_CTL(pipe);
2454 temp = I915_READ(reg);
627eb5a3
DV
2455 temp &= ~FDI_DP_PORT_WIDTH_MASK;
2456 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
8db9d77b
ZW
2457 temp &= ~FDI_LINK_TRAIN_NONE;
2458 temp |= FDI_LINK_TRAIN_PATTERN_1;
5eddb70b 2459 I915_WRITE(reg, temp | FDI_TX_ENABLE);
8db9d77b 2460
5eddb70b
CW
2461 reg = FDI_RX_CTL(pipe);
2462 temp = I915_READ(reg);
8db9d77b
ZW
2463 temp &= ~FDI_LINK_TRAIN_NONE;
2464 temp |= FDI_LINK_TRAIN_PATTERN_1;
5eddb70b
CW
2465 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2466
2467 POSTING_READ(reg);
8db9d77b
ZW
2468 udelay(150);
2469
5b2adf89 2470 /* Ironlake workaround, enable clock pointer after FDI enable*/
8f5718a6
DV
2471 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2472 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2473 FDI_RX_PHASE_SYNC_POINTER_EN);
5b2adf89 2474
5eddb70b 2475 reg = FDI_RX_IIR(pipe);
e1a44743 2476 for (tries = 0; tries < 5; tries++) {
5eddb70b 2477 temp = I915_READ(reg);
8db9d77b
ZW
2478 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2479
2480 if ((temp & FDI_RX_BIT_LOCK)) {
2481 DRM_DEBUG_KMS("FDI train 1 done.\n");
5eddb70b 2482 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
8db9d77b
ZW
2483 break;
2484 }
8db9d77b 2485 }
e1a44743 2486 if (tries == 5)
5eddb70b 2487 DRM_ERROR("FDI train 1 fail!\n");
8db9d77b
ZW
2488
2489 /* Train 2 */
5eddb70b
CW
2490 reg = FDI_TX_CTL(pipe);
2491 temp = I915_READ(reg);
8db9d77b
ZW
2492 temp &= ~FDI_LINK_TRAIN_NONE;
2493 temp |= FDI_LINK_TRAIN_PATTERN_2;
5eddb70b 2494 I915_WRITE(reg, temp);
8db9d77b 2495
5eddb70b
CW
2496 reg = FDI_RX_CTL(pipe);
2497 temp = I915_READ(reg);
8db9d77b
ZW
2498 temp &= ~FDI_LINK_TRAIN_NONE;
2499 temp |= FDI_LINK_TRAIN_PATTERN_2;
5eddb70b 2500 I915_WRITE(reg, temp);
8db9d77b 2501
5eddb70b
CW
2502 POSTING_READ(reg);
2503 udelay(150);
8db9d77b 2504
5eddb70b 2505 reg = FDI_RX_IIR(pipe);
e1a44743 2506 for (tries = 0; tries < 5; tries++) {
5eddb70b 2507 temp = I915_READ(reg);
8db9d77b
ZW
2508 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2509
2510 if (temp & FDI_RX_SYMBOL_LOCK) {
5eddb70b 2511 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
8db9d77b
ZW
2512 DRM_DEBUG_KMS("FDI train 2 done.\n");
2513 break;
2514 }
8db9d77b 2515 }
e1a44743 2516 if (tries == 5)
5eddb70b 2517 DRM_ERROR("FDI train 2 fail!\n");
8db9d77b
ZW
2518
2519 DRM_DEBUG_KMS("FDI train done\n");
5c5313c8 2520
8db9d77b
ZW
2521}
2522
0206e353 2523static const int snb_b_fdi_train_param[] = {
8db9d77b
ZW
2524 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2525 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2526 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2527 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2528};
2529
2530/* The FDI link training functions for SNB/Cougarpoint. */
2531static void gen6_fdi_link_train(struct drm_crtc *crtc)
2532{
2533 struct drm_device *dev = crtc->dev;
2534 struct drm_i915_private *dev_priv = dev->dev_private;
2535 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2536 int pipe = intel_crtc->pipe;
fa37d39e 2537 u32 reg, temp, i, retry;
8db9d77b 2538
e1a44743
AJ
2539 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2540 for train result */
5eddb70b
CW
2541 reg = FDI_RX_IMR(pipe);
2542 temp = I915_READ(reg);
e1a44743
AJ
2543 temp &= ~FDI_RX_SYMBOL_LOCK;
2544 temp &= ~FDI_RX_BIT_LOCK;
5eddb70b
CW
2545 I915_WRITE(reg, temp);
2546
2547 POSTING_READ(reg);
e1a44743
AJ
2548 udelay(150);
2549
8db9d77b 2550 /* enable CPU FDI TX and PCH FDI RX */
5eddb70b
CW
2551 reg = FDI_TX_CTL(pipe);
2552 temp = I915_READ(reg);
627eb5a3
DV
2553 temp &= ~FDI_DP_PORT_WIDTH_MASK;
2554 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
8db9d77b
ZW
2555 temp &= ~FDI_LINK_TRAIN_NONE;
2556 temp |= FDI_LINK_TRAIN_PATTERN_1;
2557 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2558 /* SNB-B */
2559 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5eddb70b 2560 I915_WRITE(reg, temp | FDI_TX_ENABLE);
8db9d77b 2561
d74cf324
DV
2562 I915_WRITE(FDI_RX_MISC(pipe),
2563 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
2564
5eddb70b
CW
2565 reg = FDI_RX_CTL(pipe);
2566 temp = I915_READ(reg);
8db9d77b
ZW
2567 if (HAS_PCH_CPT(dev)) {
2568 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2569 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2570 } else {
2571 temp &= ~FDI_LINK_TRAIN_NONE;
2572 temp |= FDI_LINK_TRAIN_PATTERN_1;
2573 }
5eddb70b
CW
2574 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2575
2576 POSTING_READ(reg);
8db9d77b
ZW
2577 udelay(150);
2578
0206e353 2579 for (i = 0; i < 4; i++) {
5eddb70b
CW
2580 reg = FDI_TX_CTL(pipe);
2581 temp = I915_READ(reg);
8db9d77b
ZW
2582 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2583 temp |= snb_b_fdi_train_param[i];
5eddb70b
CW
2584 I915_WRITE(reg, temp);
2585
2586 POSTING_READ(reg);
8db9d77b
ZW
2587 udelay(500);
2588
fa37d39e
SP
2589 for (retry = 0; retry < 5; retry++) {
2590 reg = FDI_RX_IIR(pipe);
2591 temp = I915_READ(reg);
2592 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2593 if (temp & FDI_RX_BIT_LOCK) {
2594 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2595 DRM_DEBUG_KMS("FDI train 1 done.\n");
2596 break;
2597 }
2598 udelay(50);
8db9d77b 2599 }
fa37d39e
SP
2600 if (retry < 5)
2601 break;
8db9d77b
ZW
2602 }
2603 if (i == 4)
5eddb70b 2604 DRM_ERROR("FDI train 1 fail!\n");
8db9d77b
ZW
2605
2606 /* Train 2 */
5eddb70b
CW
2607 reg = FDI_TX_CTL(pipe);
2608 temp = I915_READ(reg);
8db9d77b
ZW
2609 temp &= ~FDI_LINK_TRAIN_NONE;
2610 temp |= FDI_LINK_TRAIN_PATTERN_2;
2611 if (IS_GEN6(dev)) {
2612 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2613 /* SNB-B */
2614 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2615 }
5eddb70b 2616 I915_WRITE(reg, temp);
8db9d77b 2617
5eddb70b
CW
2618 reg = FDI_RX_CTL(pipe);
2619 temp = I915_READ(reg);
8db9d77b
ZW
2620 if (HAS_PCH_CPT(dev)) {
2621 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2622 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2623 } else {
2624 temp &= ~FDI_LINK_TRAIN_NONE;
2625 temp |= FDI_LINK_TRAIN_PATTERN_2;
2626 }
5eddb70b
CW
2627 I915_WRITE(reg, temp);
2628
2629 POSTING_READ(reg);
8db9d77b
ZW
2630 udelay(150);
2631
0206e353 2632 for (i = 0; i < 4; i++) {
5eddb70b
CW
2633 reg = FDI_TX_CTL(pipe);
2634 temp = I915_READ(reg);
8db9d77b
ZW
2635 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2636 temp |= snb_b_fdi_train_param[i];
5eddb70b
CW
2637 I915_WRITE(reg, temp);
2638
2639 POSTING_READ(reg);
8db9d77b
ZW
2640 udelay(500);
2641
fa37d39e
SP
2642 for (retry = 0; retry < 5; retry++) {
2643 reg = FDI_RX_IIR(pipe);
2644 temp = I915_READ(reg);
2645 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2646 if (temp & FDI_RX_SYMBOL_LOCK) {
2647 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2648 DRM_DEBUG_KMS("FDI train 2 done.\n");
2649 break;
2650 }
2651 udelay(50);
8db9d77b 2652 }
fa37d39e
SP
2653 if (retry < 5)
2654 break;
8db9d77b
ZW
2655 }
2656 if (i == 4)
5eddb70b 2657 DRM_ERROR("FDI train 2 fail!\n");
8db9d77b
ZW
2658
2659 DRM_DEBUG_KMS("FDI train done.\n");
2660}
2661
357555c0
JB
2662/* Manual link training for Ivy Bridge A0 parts */
2663static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2664{
2665 struct drm_device *dev = crtc->dev;
2666 struct drm_i915_private *dev_priv = dev->dev_private;
2667 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2668 int pipe = intel_crtc->pipe;
139ccd3f 2669 u32 reg, temp, i, j;
357555c0
JB
2670
2671 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2672 for train result */
2673 reg = FDI_RX_IMR(pipe);
2674 temp = I915_READ(reg);
2675 temp &= ~FDI_RX_SYMBOL_LOCK;
2676 temp &= ~FDI_RX_BIT_LOCK;
2677 I915_WRITE(reg, temp);
2678
2679 POSTING_READ(reg);
2680 udelay(150);
2681
01a415fd
DV
2682 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
2683 I915_READ(FDI_RX_IIR(pipe)));
2684
139ccd3f
JB
2685 /* Try each vswing and preemphasis setting twice before moving on */
2686 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
2687 /* disable first in case we need to retry */
2688 reg = FDI_TX_CTL(pipe);
2689 temp = I915_READ(reg);
2690 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2691 temp &= ~FDI_TX_ENABLE;
2692 I915_WRITE(reg, temp);
357555c0 2693
139ccd3f
JB
2694 reg = FDI_RX_CTL(pipe);
2695 temp = I915_READ(reg);
2696 temp &= ~FDI_LINK_TRAIN_AUTO;
2697 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2698 temp &= ~FDI_RX_ENABLE;
2699 I915_WRITE(reg, temp);
357555c0 2700
139ccd3f 2701 /* enable CPU FDI TX and PCH FDI RX */
357555c0
JB
2702 reg = FDI_TX_CTL(pipe);
2703 temp = I915_READ(reg);
139ccd3f
JB
2704 temp &= ~FDI_DP_PORT_WIDTH_MASK;
2705 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
2706 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
357555c0 2707 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
139ccd3f
JB
2708 temp |= snb_b_fdi_train_param[j/2];
2709 temp |= FDI_COMPOSITE_SYNC;
2710 I915_WRITE(reg, temp | FDI_TX_ENABLE);
357555c0 2711
139ccd3f
JB
2712 I915_WRITE(FDI_RX_MISC(pipe),
2713 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
357555c0 2714
139ccd3f 2715 reg = FDI_RX_CTL(pipe);
357555c0 2716 temp = I915_READ(reg);
139ccd3f
JB
2717 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2718 temp |= FDI_COMPOSITE_SYNC;
2719 I915_WRITE(reg, temp | FDI_RX_ENABLE);
357555c0 2720
139ccd3f
JB
2721 POSTING_READ(reg);
2722 udelay(1); /* should be 0.5us */
357555c0 2723
139ccd3f
JB
2724 for (i = 0; i < 4; i++) {
2725 reg = FDI_RX_IIR(pipe);
2726 temp = I915_READ(reg);
2727 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
357555c0 2728
139ccd3f
JB
2729 if (temp & FDI_RX_BIT_LOCK ||
2730 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2731 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2732 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
2733 i);
2734 break;
2735 }
2736 udelay(1); /* should be 0.5us */
2737 }
2738 if (i == 4) {
2739 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
2740 continue;
2741 }
357555c0 2742
139ccd3f 2743 /* Train 2 */
357555c0
JB
2744 reg = FDI_TX_CTL(pipe);
2745 temp = I915_READ(reg);
139ccd3f
JB
2746 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2747 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2748 I915_WRITE(reg, temp);
2749
2750 reg = FDI_RX_CTL(pipe);
2751 temp = I915_READ(reg);
2752 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2753 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
357555c0
JB
2754 I915_WRITE(reg, temp);
2755
2756 POSTING_READ(reg);
139ccd3f 2757 udelay(2); /* should be 1.5us */
357555c0 2758
139ccd3f
JB
2759 for (i = 0; i < 4; i++) {
2760 reg = FDI_RX_IIR(pipe);
2761 temp = I915_READ(reg);
2762 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
357555c0 2763
139ccd3f
JB
2764 if (temp & FDI_RX_SYMBOL_LOCK ||
2765 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
2766 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2767 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
2768 i);
2769 goto train_done;
2770 }
2771 udelay(2); /* should be 1.5us */
357555c0 2772 }
139ccd3f
JB
2773 if (i == 4)
2774 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
357555c0 2775 }
357555c0 2776
139ccd3f 2777train_done:
357555c0
JB
2778 DRM_DEBUG_KMS("FDI train done.\n");
2779}
2780
88cefb6c 2781static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2c07245f 2782{
88cefb6c 2783 struct drm_device *dev = intel_crtc->base.dev;
2c07245f 2784 struct drm_i915_private *dev_priv = dev->dev_private;
2c07245f 2785 int pipe = intel_crtc->pipe;
5eddb70b 2786 u32 reg, temp;
79e53945 2787
c64e311e 2788
c98e9dcf 2789 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5eddb70b
CW
2790 reg = FDI_RX_CTL(pipe);
2791 temp = I915_READ(reg);
627eb5a3
DV
2792 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
2793 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
dfd07d72 2794 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5eddb70b
CW
2795 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2796
2797 POSTING_READ(reg);
c98e9dcf
JB
2798 udelay(200);
2799
2800 /* Switch from Rawclk to PCDclk */
5eddb70b
CW
2801 temp = I915_READ(reg);
2802 I915_WRITE(reg, temp | FDI_PCDCLK);
2803
2804 POSTING_READ(reg);
c98e9dcf
JB
2805 udelay(200);
2806
20749730
PZ
2807 /* Enable CPU FDI TX PLL, always on for Ironlake */
2808 reg = FDI_TX_CTL(pipe);
2809 temp = I915_READ(reg);
2810 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2811 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
5eddb70b 2812
20749730
PZ
2813 POSTING_READ(reg);
2814 udelay(100);
6be4a607 2815 }
0e23b99d
JB
2816}
2817
88cefb6c
DV
2818static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
2819{
2820 struct drm_device *dev = intel_crtc->base.dev;
2821 struct drm_i915_private *dev_priv = dev->dev_private;
2822 int pipe = intel_crtc->pipe;
2823 u32 reg, temp;
2824
2825 /* Switch from PCDclk to Rawclk */
2826 reg = FDI_RX_CTL(pipe);
2827 temp = I915_READ(reg);
2828 I915_WRITE(reg, temp & ~FDI_PCDCLK);
2829
2830 /* Disable CPU FDI TX PLL */
2831 reg = FDI_TX_CTL(pipe);
2832 temp = I915_READ(reg);
2833 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
2834
2835 POSTING_READ(reg);
2836 udelay(100);
2837
2838 reg = FDI_RX_CTL(pipe);
2839 temp = I915_READ(reg);
2840 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2841
2842 /* Wait for the clocks to turn off. */
2843 POSTING_READ(reg);
2844 udelay(100);
2845}
2846
0fc932b8
JB
2847static void ironlake_fdi_disable(struct drm_crtc *crtc)
2848{
2849 struct drm_device *dev = crtc->dev;
2850 struct drm_i915_private *dev_priv = dev->dev_private;
2851 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2852 int pipe = intel_crtc->pipe;
2853 u32 reg, temp;
2854
2855 /* disable CPU FDI tx and PCH FDI rx */
2856 reg = FDI_TX_CTL(pipe);
2857 temp = I915_READ(reg);
2858 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2859 POSTING_READ(reg);
2860
2861 reg = FDI_RX_CTL(pipe);
2862 temp = I915_READ(reg);
2863 temp &= ~(0x7 << 16);
dfd07d72 2864 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
0fc932b8
JB
2865 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2866
2867 POSTING_READ(reg);
2868 udelay(100);
2869
2870 /* Ironlake workaround, disable clock pointer after downing FDI */
6f06ce18
JB
2871 if (HAS_PCH_IBX(dev)) {
2872 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
6f06ce18 2873 }
0fc932b8
JB
2874
2875 /* still set train pattern 1 */
2876 reg = FDI_TX_CTL(pipe);
2877 temp = I915_READ(reg);
2878 temp &= ~FDI_LINK_TRAIN_NONE;
2879 temp |= FDI_LINK_TRAIN_PATTERN_1;
2880 I915_WRITE(reg, temp);
2881
2882 reg = FDI_RX_CTL(pipe);
2883 temp = I915_READ(reg);
2884 if (HAS_PCH_CPT(dev)) {
2885 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2886 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2887 } else {
2888 temp &= ~FDI_LINK_TRAIN_NONE;
2889 temp |= FDI_LINK_TRAIN_PATTERN_1;
2890 }
2891 /* BPC in FDI rx is consistent with that in PIPECONF */
2892 temp &= ~(0x07 << 16);
dfd07d72 2893 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
0fc932b8
JB
2894 I915_WRITE(reg, temp);
2895
2896 POSTING_READ(reg);
2897 udelay(100);
2898}
2899
5bb61643
CW
2900static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2901{
2902 struct drm_device *dev = crtc->dev;
2903 struct drm_i915_private *dev_priv = dev->dev_private;
10d83730 2904 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5bb61643
CW
2905 unsigned long flags;
2906 bool pending;
2907
10d83730
VS
2908 if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2909 intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
5bb61643
CW
2910 return false;
2911
2912 spin_lock_irqsave(&dev->event_lock, flags);
2913 pending = to_intel_crtc(crtc)->unpin_work != NULL;
2914 spin_unlock_irqrestore(&dev->event_lock, flags);
2915
2916 return pending;
2917}
2918
e6c3a2a6
CW
2919static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2920{
0f91128d 2921 struct drm_device *dev = crtc->dev;
5bb61643 2922 struct drm_i915_private *dev_priv = dev->dev_private;
e6c3a2a6
CW
2923
2924 if (crtc->fb == NULL)
2925 return;
2926
2c10d571
DV
2927 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
2928
5bb61643
CW
2929 wait_event(dev_priv->pending_flip_queue,
2930 !intel_crtc_has_pending_flip(crtc));
2931
0f91128d
CW
2932 mutex_lock(&dev->struct_mutex);
2933 intel_finish_fb(crtc->fb);
2934 mutex_unlock(&dev->struct_mutex);
e6c3a2a6
CW
2935}
2936
e615efe4
ED
2937/* Program iCLKIP clock to the desired frequency */
2938static void lpt_program_iclkip(struct drm_crtc *crtc)
2939{
2940 struct drm_device *dev = crtc->dev;
2941 struct drm_i915_private *dev_priv = dev->dev_private;
241bfc38 2942 int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
e615efe4
ED
2943 u32 divsel, phaseinc, auxdiv, phasedir = 0;
2944 u32 temp;
2945
09153000
DV
2946 mutex_lock(&dev_priv->dpio_lock);
2947
e615efe4
ED
2948 /* It is necessary to ungate the pixclk gate prior to programming
2949 * the divisors, and gate it back when it is done.
2950 */
2951 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
2952
2953 /* Disable SSCCTL */
2954 intel_sbi_write(dev_priv, SBI_SSCCTL6,
988d6ee8
PZ
2955 intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
2956 SBI_SSCCTL_DISABLE,
2957 SBI_ICLK);
e615efe4
ED
2958
2959 /* 20MHz is a corner case which is out of range for the 7-bit divisor */
12d7ceed 2960 if (clock == 20000) {
e615efe4
ED
2961 auxdiv = 1;
2962 divsel = 0x41;
2963 phaseinc = 0x20;
2964 } else {
2965 /* The iCLK virtual clock root frequency is in MHz,
241bfc38
DL
2966 * but the adjusted_mode->crtc_clock in in KHz. To get the
2967 * divisors, it is necessary to divide one by another, so we
e615efe4
ED
2968 * convert the virtual clock precision to KHz here for higher
2969 * precision.
2970 */
2971 u32 iclk_virtual_root_freq = 172800 * 1000;
2972 u32 iclk_pi_range = 64;
2973 u32 desired_divisor, msb_divisor_value, pi_value;
2974
12d7ceed 2975 desired_divisor = (iclk_virtual_root_freq / clock);
e615efe4
ED
2976 msb_divisor_value = desired_divisor / iclk_pi_range;
2977 pi_value = desired_divisor % iclk_pi_range;
2978
2979 auxdiv = 0;
2980 divsel = msb_divisor_value - 2;
2981 phaseinc = pi_value;
2982 }
2983
2984 /* This should not happen with any sane values */
2985 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2986 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
2987 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
2988 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2989
2990 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
12d7ceed 2991 clock,
e615efe4
ED
2992 auxdiv,
2993 divsel,
2994 phasedir,
2995 phaseinc);
2996
2997 /* Program SSCDIVINTPHASE6 */
988d6ee8 2998 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
e615efe4
ED
2999 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3000 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3001 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3002 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3003 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3004 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
988d6ee8 3005 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
e615efe4
ED
3006
3007 /* Program SSCAUXDIV */
988d6ee8 3008 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
e615efe4
ED
3009 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3010 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
988d6ee8 3011 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
e615efe4
ED
3012
3013 /* Enable modulator and associated divider */
988d6ee8 3014 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
e615efe4 3015 temp &= ~SBI_SSCCTL_DISABLE;
988d6ee8 3016 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
e615efe4
ED
3017
3018 /* Wait for initialization time */
3019 udelay(24);
3020
3021 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
09153000
DV
3022
3023 mutex_unlock(&dev_priv->dpio_lock);
e615efe4
ED
3024}
3025
275f01b2
DV
3026static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3027 enum pipe pch_transcoder)
3028{
3029 struct drm_device *dev = crtc->base.dev;
3030 struct drm_i915_private *dev_priv = dev->dev_private;
3031 enum transcoder cpu_transcoder = crtc->config.cpu_transcoder;
3032
3033 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3034 I915_READ(HTOTAL(cpu_transcoder)));
3035 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3036 I915_READ(HBLANK(cpu_transcoder)));
3037 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3038 I915_READ(HSYNC(cpu_transcoder)));
3039
3040 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3041 I915_READ(VTOTAL(cpu_transcoder)));
3042 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3043 I915_READ(VBLANK(cpu_transcoder)));
3044 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3045 I915_READ(VSYNC(cpu_transcoder)));
3046 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3047 I915_READ(VSYNCSHIFT(cpu_transcoder)));
3048}
3049
f67a559d
JB
3050/*
3051 * Enable PCH resources required for PCH ports:
3052 * - PCH PLLs
3053 * - FDI training & RX/TX
3054 * - update transcoder timings
3055 * - DP transcoding bits
3056 * - transcoder
3057 */
3058static void ironlake_pch_enable(struct drm_crtc *crtc)
0e23b99d
JB
3059{
3060 struct drm_device *dev = crtc->dev;
3061 struct drm_i915_private *dev_priv = dev->dev_private;
3062 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3063 int pipe = intel_crtc->pipe;
ee7b9f93 3064 u32 reg, temp;
2c07245f 3065
ab9412ba 3066 assert_pch_transcoder_disabled(dev_priv, pipe);
e7e164db 3067
cd986abb
DV
3068 /* Write the TU size bits before fdi link training, so that error
3069 * detection works. */
3070 I915_WRITE(FDI_RX_TUSIZE1(pipe),
3071 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
3072
c98e9dcf 3073 /* For PCH output, training FDI link */
674cf967 3074 dev_priv->display.fdi_link_train(crtc);
2c07245f 3075
3ad8a208
DV
3076 /* We need to program the right clock selection before writing the pixel
3077 * mutliplier into the DPLL. */
303b81e0 3078 if (HAS_PCH_CPT(dev)) {
ee7b9f93 3079 u32 sel;
4b645f14 3080
c98e9dcf 3081 temp = I915_READ(PCH_DPLL_SEL);
11887397
DV
3082 temp |= TRANS_DPLL_ENABLE(pipe);
3083 sel = TRANS_DPLLB_SEL(pipe);
a43f6e0f 3084 if (intel_crtc->config.shared_dpll == DPLL_ID_PCH_PLL_B)
ee7b9f93
JB
3085 temp |= sel;
3086 else
3087 temp &= ~sel;
c98e9dcf 3088 I915_WRITE(PCH_DPLL_SEL, temp);
c98e9dcf 3089 }
5eddb70b 3090
3ad8a208
DV
3091 /* XXX: pch pll's can be enabled any time before we enable the PCH
3092 * transcoder, and we actually should do this to not upset any PCH
3093 * transcoder that already use the clock when we share it.
3094 *
3095 * Note that enable_shared_dpll tries to do the right thing, but
3096 * get_shared_dpll unconditionally resets the pll - we need that to have
3097 * the right LVDS enable sequence. */
3098 ironlake_enable_shared_dpll(intel_crtc);
3099
d9b6cb56
JB
3100 /* set transcoder timing, panel must allow it */
3101 assert_panel_unlocked(dev_priv, pipe);
275f01b2 3102 ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
8db9d77b 3103
303b81e0 3104 intel_fdi_normal_train(crtc);
5e84e1a4 3105
c98e9dcf
JB
3106 /* For PCH DP, enable TRANS_DP_CTL */
3107 if (HAS_PCH_CPT(dev) &&
417e822d
KP
3108 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3109 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
dfd07d72 3110 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
5eddb70b
CW
3111 reg = TRANS_DP_CTL(pipe);
3112 temp = I915_READ(reg);
3113 temp &= ~(TRANS_DP_PORT_SEL_MASK |
220cad3c
EA
3114 TRANS_DP_SYNC_MASK |
3115 TRANS_DP_BPC_MASK);
5eddb70b
CW
3116 temp |= (TRANS_DP_OUTPUT_ENABLE |
3117 TRANS_DP_ENH_FRAMING);
9325c9f0 3118 temp |= bpc << 9; /* same format but at 11:9 */
c98e9dcf
JB
3119
3120 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
5eddb70b 3121 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
c98e9dcf 3122 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
5eddb70b 3123 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
c98e9dcf
JB
3124
3125 switch (intel_trans_dp_port_sel(crtc)) {
3126 case PCH_DP_B:
5eddb70b 3127 temp |= TRANS_DP_PORT_SEL_B;
c98e9dcf
JB
3128 break;
3129 case PCH_DP_C:
5eddb70b 3130 temp |= TRANS_DP_PORT_SEL_C;
c98e9dcf
JB
3131 break;
3132 case PCH_DP_D:
5eddb70b 3133 temp |= TRANS_DP_PORT_SEL_D;
c98e9dcf
JB
3134 break;
3135 default:
e95d41e1 3136 BUG();
32f9d658 3137 }
2c07245f 3138
5eddb70b 3139 I915_WRITE(reg, temp);
6be4a607 3140 }
b52eb4dc 3141
b8a4f404 3142 ironlake_enable_pch_transcoder(dev_priv, pipe);
f67a559d
JB
3143}
3144
1507e5bd
PZ
3145static void lpt_pch_enable(struct drm_crtc *crtc)
3146{
3147 struct drm_device *dev = crtc->dev;
3148 struct drm_i915_private *dev_priv = dev->dev_private;
3149 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3b117c8f 3150 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
1507e5bd 3151
ab9412ba 3152 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
1507e5bd 3153
8c52b5e8 3154 lpt_program_iclkip(crtc);
1507e5bd 3155
0540e488 3156 /* Set transcoder timing. */
275f01b2 3157 ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
1507e5bd 3158
937bb610 3159 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
f67a559d
JB
3160}
3161
e2b78267 3162static void intel_put_shared_dpll(struct intel_crtc *crtc)
ee7b9f93 3163{
e2b78267 3164 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
ee7b9f93
JB
3165
3166 if (pll == NULL)
3167 return;
3168
3169 if (pll->refcount == 0) {
46edb027 3170 WARN(1, "bad %s refcount\n", pll->name);
ee7b9f93
JB
3171 return;
3172 }
3173
f4a091c7
DV
3174 if (--pll->refcount == 0) {
3175 WARN_ON(pll->on);
3176 WARN_ON(pll->active);
3177 }
3178
a43f6e0f 3179 crtc->config.shared_dpll = DPLL_ID_PRIVATE;
ee7b9f93
JB
3180}
3181
b89a1d39 3182static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
ee7b9f93 3183{
e2b78267
DV
3184 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3185 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
3186 enum intel_dpll_id i;
ee7b9f93 3187
ee7b9f93 3188 if (pll) {
46edb027
DV
3189 DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
3190 crtc->base.base.id, pll->name);
e2b78267 3191 intel_put_shared_dpll(crtc);
ee7b9f93
JB
3192 }
3193
98b6bd99
DV
3194 if (HAS_PCH_IBX(dev_priv->dev)) {
3195 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
d94ab068 3196 i = (enum intel_dpll_id) crtc->pipe;
e72f9fbf 3197 pll = &dev_priv->shared_dplls[i];
98b6bd99 3198
46edb027
DV
3199 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3200 crtc->base.base.id, pll->name);
98b6bd99
DV
3201
3202 goto found;
3203 }
3204
e72f9fbf
DV
3205 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3206 pll = &dev_priv->shared_dplls[i];
ee7b9f93
JB
3207
3208 /* Only want to check enabled timings first */
3209 if (pll->refcount == 0)
3210 continue;
3211
b89a1d39
DV
3212 if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
3213 sizeof(pll->hw_state)) == 0) {
46edb027 3214 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
e2b78267 3215 crtc->base.base.id,
46edb027 3216 pll->name, pll->refcount, pll->active);
ee7b9f93
JB
3217
3218 goto found;
3219 }
3220 }
3221
3222 /* Ok no matching timings, maybe there's a free one? */
e72f9fbf
DV
3223 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3224 pll = &dev_priv->shared_dplls[i];
ee7b9f93 3225 if (pll->refcount == 0) {
46edb027
DV
3226 DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3227 crtc->base.base.id, pll->name);
ee7b9f93
JB
3228 goto found;
3229 }
3230 }
3231
3232 return NULL;
3233
3234found:
a43f6e0f 3235 crtc->config.shared_dpll = i;
46edb027
DV
3236 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3237 pipe_name(crtc->pipe));
ee7b9f93 3238
cdbd2316 3239 if (pll->active == 0) {
66e985c0
DV
3240 memcpy(&pll->hw_state, &crtc->config.dpll_hw_state,
3241 sizeof(pll->hw_state));
3242
46edb027 3243 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
cdbd2316 3244 WARN_ON(pll->on);
e9d6944e 3245 assert_shared_dpll_disabled(dev_priv, pll);
ee7b9f93 3246
15bdd4cf 3247 pll->mode_set(dev_priv, pll);
cdbd2316
DV
3248 }
3249 pll->refcount++;
e04c7350 3250
ee7b9f93
JB
3251 return pll;
3252}
3253
a1520318 3254static void cpt_verify_modeset(struct drm_device *dev, int pipe)
d4270e57
JB
3255{
3256 struct drm_i915_private *dev_priv = dev->dev_private;
23670b32 3257 int dslreg = PIPEDSL(pipe);
d4270e57
JB
3258 u32 temp;
3259
3260 temp = I915_READ(dslreg);
3261 udelay(500);
3262 if (wait_for(I915_READ(dslreg) != temp, 5)) {
d4270e57 3263 if (wait_for(I915_READ(dslreg) != temp, 5))
84f44ce7 3264 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
d4270e57
JB
3265 }
3266}
3267
b074cec8
JB
3268static void ironlake_pfit_enable(struct intel_crtc *crtc)
3269{
3270 struct drm_device *dev = crtc->base.dev;
3271 struct drm_i915_private *dev_priv = dev->dev_private;
3272 int pipe = crtc->pipe;
3273
fd4daa9c 3274 if (crtc->config.pch_pfit.enabled) {
b074cec8
JB
3275 /* Force use of hard-coded filter coefficients
3276 * as some pre-programmed values are broken,
3277 * e.g. x201.
3278 */
3279 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
3280 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
3281 PF_PIPE_SEL_IVB(pipe));
3282 else
3283 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3284 I915_WRITE(PF_WIN_POS(pipe), crtc->config.pch_pfit.pos);
3285 I915_WRITE(PF_WIN_SZ(pipe), crtc->config.pch_pfit.size);
d4270e57
JB
3286 }
3287}
3288
bb53d4ae
VS
3289static void intel_enable_planes(struct drm_crtc *crtc)
3290{
3291 struct drm_device *dev = crtc->dev;
3292 enum pipe pipe = to_intel_crtc(crtc)->pipe;
3293 struct intel_plane *intel_plane;
3294
3295 list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
3296 if (intel_plane->pipe == pipe)
3297 intel_plane_restore(&intel_plane->base);
3298}
3299
3300static void intel_disable_planes(struct drm_crtc *crtc)
3301{
3302 struct drm_device *dev = crtc->dev;
3303 enum pipe pipe = to_intel_crtc(crtc)->pipe;
3304 struct intel_plane *intel_plane;
3305
3306 list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
3307 if (intel_plane->pipe == pipe)
3308 intel_plane_disable(&intel_plane->base);
3309}
3310
d77e4531
PZ
3311static void hsw_enable_ips(struct intel_crtc *crtc)
3312{
3313 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3314
3315 if (!crtc->config.ips_enabled)
3316 return;
3317
3318 /* We can only enable IPS after we enable a plane and wait for a vblank.
3319 * We guarantee that the plane is enabled by calling intel_enable_ips
3320 * only after intel_enable_plane. And intel_enable_plane already waits
3321 * for a vblank, so all we need to do here is to enable the IPS bit. */
3322 assert_plane_enabled(dev_priv, crtc->plane);
3323 I915_WRITE(IPS_CTL, IPS_ENABLE);
3324}
3325
3326static void hsw_disable_ips(struct intel_crtc *crtc)
3327{
3328 struct drm_device *dev = crtc->base.dev;
3329 struct drm_i915_private *dev_priv = dev->dev_private;
3330
3331 if (!crtc->config.ips_enabled)
3332 return;
3333
3334 assert_plane_enabled(dev_priv, crtc->plane);
3335 I915_WRITE(IPS_CTL, 0);
3336 POSTING_READ(IPS_CTL);
3337
3338 /* We need to wait for a vblank before we can disable the plane. */
3339 intel_wait_for_vblank(dev, crtc->pipe);
3340}
3341
3342/** Loads the palette/gamma unit for the CRTC with the prepared values */
3343static void intel_crtc_load_lut(struct drm_crtc *crtc)
3344{
3345 struct drm_device *dev = crtc->dev;
3346 struct drm_i915_private *dev_priv = dev->dev_private;
3347 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3348 enum pipe pipe = intel_crtc->pipe;
3349 int palreg = PALETTE(pipe);
3350 int i;
3351 bool reenable_ips = false;
3352
3353 /* The clocks have to be on to load the palette. */
3354 if (!crtc->enabled || !intel_crtc->active)
3355 return;
3356
3357 if (!HAS_PCH_SPLIT(dev_priv->dev)) {
3358 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
3359 assert_dsi_pll_enabled(dev_priv);
3360 else
3361 assert_pll_enabled(dev_priv, pipe);
3362 }
3363
3364 /* use legacy palette for Ironlake */
3365 if (HAS_PCH_SPLIT(dev))
3366 palreg = LGC_PALETTE(pipe);
3367
3368 /* Workaround : Do not read or write the pipe palette/gamma data while
3369 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
3370 */
3371 if (intel_crtc->config.ips_enabled &&
3372 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
3373 GAMMA_MODE_MODE_SPLIT)) {
3374 hsw_disable_ips(intel_crtc);
3375 reenable_ips = true;
3376 }
3377
3378 for (i = 0; i < 256; i++) {
3379 I915_WRITE(palreg + 4 * i,
3380 (intel_crtc->lut_r[i] << 16) |
3381 (intel_crtc->lut_g[i] << 8) |
3382 intel_crtc->lut_b[i]);
3383 }
3384
3385 if (reenable_ips)
3386 hsw_enable_ips(intel_crtc);
3387}
3388
f67a559d
JB
3389static void ironlake_crtc_enable(struct drm_crtc *crtc)
3390{
3391 struct drm_device *dev = crtc->dev;
3392 struct drm_i915_private *dev_priv = dev->dev_private;
3393 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 3394 struct intel_encoder *encoder;
f67a559d
JB
3395 int pipe = intel_crtc->pipe;
3396 int plane = intel_crtc->plane;
f67a559d 3397
08a48469
DV
3398 WARN_ON(!crtc->enabled);
3399
f67a559d
JB
3400 if (intel_crtc->active)
3401 return;
3402
3403 intel_crtc->active = true;
8664281b
PZ
3404
3405 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3406 intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
3407
f6736a1a 3408 for_each_encoder_on_crtc(dev, crtc, encoder)
952735ee
DV
3409 if (encoder->pre_enable)
3410 encoder->pre_enable(encoder);
f67a559d 3411
5bfe2ac0 3412 if (intel_crtc->config.has_pch_encoder) {
fff367c7
DV
3413 /* Note: FDI PLL enabling _must_ be done before we enable the
3414 * cpu pipes, hence this is separate from all the other fdi/pch
3415 * enabling. */
88cefb6c 3416 ironlake_fdi_pll_enable(intel_crtc);
46b6f814
DV
3417 } else {
3418 assert_fdi_tx_disabled(dev_priv, pipe);
3419 assert_fdi_rx_disabled(dev_priv, pipe);
3420 }
f67a559d 3421
b074cec8 3422 ironlake_pfit_enable(intel_crtc);
f67a559d 3423
9c54c0dd
JB
3424 /*
3425 * On ILK+ LUT must be loaded before the pipe is running but with
3426 * clocks enabled
3427 */
3428 intel_crtc_load_lut(crtc);
3429
f37fcc2a 3430 intel_update_watermarks(crtc);
5bfe2ac0 3431 intel_enable_pipe(dev_priv, pipe,
23538ef1 3432 intel_crtc->config.has_pch_encoder, false);
f67a559d 3433 intel_enable_plane(dev_priv, plane, pipe);
bb53d4ae 3434 intel_enable_planes(crtc);
5c38d48c 3435 intel_crtc_update_cursor(crtc, true);
f67a559d 3436
5bfe2ac0 3437 if (intel_crtc->config.has_pch_encoder)
f67a559d 3438 ironlake_pch_enable(crtc);
c98e9dcf 3439
d1ebd816 3440 mutex_lock(&dev->struct_mutex);
bed4a673 3441 intel_update_fbc(dev);
d1ebd816
BW
3442 mutex_unlock(&dev->struct_mutex);
3443
fa5c73b1
DV
3444 for_each_encoder_on_crtc(dev, crtc, encoder)
3445 encoder->enable(encoder);
61b77ddd
DV
3446
3447 if (HAS_PCH_CPT(dev))
a1520318 3448 cpt_verify_modeset(dev, intel_crtc->pipe);
6ce94100
DV
3449
3450 /*
3451 * There seems to be a race in PCH platform hw (at least on some
3452 * outputs) where an enabled pipe still completes any pageflip right
3453 * away (as if the pipe is off) instead of waiting for vblank. As soon
3454 * as the first vblank happend, everything works as expected. Hence just
3455 * wait for one vblank before returning to avoid strange things
3456 * happening.
3457 */
3458 intel_wait_for_vblank(dev, intel_crtc->pipe);
6be4a607
JB
3459}
3460
42db64ef
PZ
3461/* IPS only exists on ULT machines and is tied to pipe A. */
3462static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
3463{
f5adf94e 3464 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
42db64ef
PZ
3465}
3466
dda9a66a
VS
3467static void haswell_crtc_enable_planes(struct drm_crtc *crtc)
3468{
3469 struct drm_device *dev = crtc->dev;
3470 struct drm_i915_private *dev_priv = dev->dev_private;
3471 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3472 int pipe = intel_crtc->pipe;
3473 int plane = intel_crtc->plane;
3474
3475 intel_enable_plane(dev_priv, plane, pipe);
3476 intel_enable_planes(crtc);
3477 intel_crtc_update_cursor(crtc, true);
3478
3479 hsw_enable_ips(intel_crtc);
3480
3481 mutex_lock(&dev->struct_mutex);
3482 intel_update_fbc(dev);
3483 mutex_unlock(&dev->struct_mutex);
3484}
3485
3486static void haswell_crtc_disable_planes(struct drm_crtc *crtc)
3487{
3488 struct drm_device *dev = crtc->dev;
3489 struct drm_i915_private *dev_priv = dev->dev_private;
3490 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3491 int pipe = intel_crtc->pipe;
3492 int plane = intel_crtc->plane;
3493
3494 intel_crtc_wait_for_pending_flips(crtc);
3495 drm_vblank_off(dev, pipe);
3496
3497 /* FBC must be disabled before disabling the plane on HSW. */
3498 if (dev_priv->fbc.plane == plane)
3499 intel_disable_fbc(dev);
3500
3501 hsw_disable_ips(intel_crtc);
3502
3503 intel_crtc_update_cursor(crtc, false);
3504 intel_disable_planes(crtc);
3505 intel_disable_plane(dev_priv, plane, pipe);
3506}
3507
e4916946
PZ
3508/*
3509 * This implements the workaround described in the "notes" section of the mode
3510 * set sequence documentation. When going from no pipes or single pipe to
3511 * multiple pipes, and planes are enabled after the pipe, we need to wait at
3512 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
3513 */
3514static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc)
3515{
3516 struct drm_device *dev = crtc->base.dev;
3517 struct intel_crtc *crtc_it, *other_active_crtc = NULL;
3518
3519 /* We want to get the other_active_crtc only if there's only 1 other
3520 * active crtc. */
3521 list_for_each_entry(crtc_it, &dev->mode_config.crtc_list, base.head) {
3522 if (!crtc_it->active || crtc_it == crtc)
3523 continue;
3524
3525 if (other_active_crtc)
3526 return;
3527
3528 other_active_crtc = crtc_it;
3529 }
3530 if (!other_active_crtc)
3531 return;
3532
3533 intel_wait_for_vblank(dev, other_active_crtc->pipe);
3534 intel_wait_for_vblank(dev, other_active_crtc->pipe);
3535}
3536
4f771f10
PZ
3537static void haswell_crtc_enable(struct drm_crtc *crtc)
3538{
3539 struct drm_device *dev = crtc->dev;
3540 struct drm_i915_private *dev_priv = dev->dev_private;
3541 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3542 struct intel_encoder *encoder;
3543 int pipe = intel_crtc->pipe;
4f771f10
PZ
3544
3545 WARN_ON(!crtc->enabled);
3546
3547 if (intel_crtc->active)
3548 return;
3549
3550 intel_crtc->active = true;
8664281b
PZ
3551
3552 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true);
3553 if (intel_crtc->config.has_pch_encoder)
3554 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
3555
5bfe2ac0 3556 if (intel_crtc->config.has_pch_encoder)
04945641 3557 dev_priv->display.fdi_link_train(crtc);
4f771f10
PZ
3558
3559 for_each_encoder_on_crtc(dev, crtc, encoder)
3560 if (encoder->pre_enable)
3561 encoder->pre_enable(encoder);
3562
1f544388 3563 intel_ddi_enable_pipe_clock(intel_crtc);
4f771f10 3564
b074cec8 3565 ironlake_pfit_enable(intel_crtc);
4f771f10
PZ
3566
3567 /*
3568 * On ILK+ LUT must be loaded before the pipe is running but with
3569 * clocks enabled
3570 */
3571 intel_crtc_load_lut(crtc);
3572
1f544388 3573 intel_ddi_set_pipe_settings(crtc);
8228c251 3574 intel_ddi_enable_transcoder_func(crtc);
4f771f10 3575
f37fcc2a 3576 intel_update_watermarks(crtc);
5bfe2ac0 3577 intel_enable_pipe(dev_priv, pipe,
23538ef1 3578 intel_crtc->config.has_pch_encoder, false);
42db64ef 3579
5bfe2ac0 3580 if (intel_crtc->config.has_pch_encoder)
1507e5bd 3581 lpt_pch_enable(crtc);
4f771f10 3582
8807e55b 3583 for_each_encoder_on_crtc(dev, crtc, encoder) {
4f771f10 3584 encoder->enable(encoder);
8807e55b
JN
3585 intel_opregion_notify_encoder(encoder, true);
3586 }
4f771f10 3587
e4916946
PZ
3588 /* If we change the relative order between pipe/planes enabling, we need
3589 * to change the workaround. */
3590 haswell_mode_set_planes_workaround(intel_crtc);
dda9a66a
VS
3591 haswell_crtc_enable_planes(crtc);
3592
4f771f10
PZ
3593 /*
3594 * There seems to be a race in PCH platform hw (at least on some
3595 * outputs) where an enabled pipe still completes any pageflip right
3596 * away (as if the pipe is off) instead of waiting for vblank. As soon
3597 * as the first vblank happend, everything works as expected. Hence just
3598 * wait for one vblank before returning to avoid strange things
3599 * happening.
3600 */
3601 intel_wait_for_vblank(dev, intel_crtc->pipe);
3602}
3603
3f8dce3a
DV
3604static void ironlake_pfit_disable(struct intel_crtc *crtc)
3605{
3606 struct drm_device *dev = crtc->base.dev;
3607 struct drm_i915_private *dev_priv = dev->dev_private;
3608 int pipe = crtc->pipe;
3609
3610 /* To avoid upsetting the power well on haswell only disable the pfit if
3611 * it's in use. The hw state code will make sure we get this right. */
fd4daa9c 3612 if (crtc->config.pch_pfit.enabled) {
3f8dce3a
DV
3613 I915_WRITE(PF_CTL(pipe), 0);
3614 I915_WRITE(PF_WIN_POS(pipe), 0);
3615 I915_WRITE(PF_WIN_SZ(pipe), 0);
3616 }
3617}
3618
6be4a607
JB
3619static void ironlake_crtc_disable(struct drm_crtc *crtc)
3620{
3621 struct drm_device *dev = crtc->dev;
3622 struct drm_i915_private *dev_priv = dev->dev_private;
3623 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 3624 struct intel_encoder *encoder;
6be4a607
JB
3625 int pipe = intel_crtc->pipe;
3626 int plane = intel_crtc->plane;
5eddb70b 3627 u32 reg, temp;
b52eb4dc 3628
ef9c3aee 3629
f7abfe8b
CW
3630 if (!intel_crtc->active)
3631 return;
3632
ea9d758d
DV
3633 for_each_encoder_on_crtc(dev, crtc, encoder)
3634 encoder->disable(encoder);
3635
e6c3a2a6 3636 intel_crtc_wait_for_pending_flips(crtc);
6be4a607 3637 drm_vblank_off(dev, pipe);
913d8d11 3638
5c3fe8b0 3639 if (dev_priv->fbc.plane == plane)
973d04f9 3640 intel_disable_fbc(dev);
2c07245f 3641
0d5b8c61 3642 intel_crtc_update_cursor(crtc, false);
bb53d4ae 3643 intel_disable_planes(crtc);
0d5b8c61
VS
3644 intel_disable_plane(dev_priv, plane, pipe);
3645
d925c59a
DV
3646 if (intel_crtc->config.has_pch_encoder)
3647 intel_set_pch_fifo_underrun_reporting(dev, pipe, false);
3648
b24e7179 3649 intel_disable_pipe(dev_priv, pipe);
32f9d658 3650
3f8dce3a 3651 ironlake_pfit_disable(intel_crtc);
2c07245f 3652
bf49ec8c
DV
3653 for_each_encoder_on_crtc(dev, crtc, encoder)
3654 if (encoder->post_disable)
3655 encoder->post_disable(encoder);
2c07245f 3656
d925c59a
DV
3657 if (intel_crtc->config.has_pch_encoder) {
3658 ironlake_fdi_disable(crtc);
913d8d11 3659
d925c59a
DV
3660 ironlake_disable_pch_transcoder(dev_priv, pipe);
3661 intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
6be4a607 3662
d925c59a
DV
3663 if (HAS_PCH_CPT(dev)) {
3664 /* disable TRANS_DP_CTL */
3665 reg = TRANS_DP_CTL(pipe);
3666 temp = I915_READ(reg);
3667 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
3668 TRANS_DP_PORT_SEL_MASK);
3669 temp |= TRANS_DP_PORT_SEL_NONE;
3670 I915_WRITE(reg, temp);
3671
3672 /* disable DPLL_SEL */
3673 temp = I915_READ(PCH_DPLL_SEL);
11887397 3674 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
d925c59a 3675 I915_WRITE(PCH_DPLL_SEL, temp);
9db4a9c7 3676 }
e3421a18 3677
d925c59a 3678 /* disable PCH DPLL */
e72f9fbf 3679 intel_disable_shared_dpll(intel_crtc);
8db9d77b 3680
d925c59a
DV
3681 ironlake_fdi_pll_disable(intel_crtc);
3682 }
6b383a7f 3683
f7abfe8b 3684 intel_crtc->active = false;
46ba614c 3685 intel_update_watermarks(crtc);
d1ebd816
BW
3686
3687 mutex_lock(&dev->struct_mutex);
6b383a7f 3688 intel_update_fbc(dev);
d1ebd816 3689 mutex_unlock(&dev->struct_mutex);
6be4a607 3690}
1b3c7a47 3691
4f771f10 3692static void haswell_crtc_disable(struct drm_crtc *crtc)
ee7b9f93 3693{
4f771f10
PZ
3694 struct drm_device *dev = crtc->dev;
3695 struct drm_i915_private *dev_priv = dev->dev_private;
ee7b9f93 3696 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4f771f10
PZ
3697 struct intel_encoder *encoder;
3698 int pipe = intel_crtc->pipe;
3b117c8f 3699 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
ee7b9f93 3700
4f771f10
PZ
3701 if (!intel_crtc->active)
3702 return;
3703
dda9a66a
VS
3704 haswell_crtc_disable_planes(crtc);
3705
8807e55b
JN
3706 for_each_encoder_on_crtc(dev, crtc, encoder) {
3707 intel_opregion_notify_encoder(encoder, false);
4f771f10 3708 encoder->disable(encoder);
8807e55b 3709 }
4f771f10 3710
8664281b
PZ
3711 if (intel_crtc->config.has_pch_encoder)
3712 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false);
4f771f10
PZ
3713 intel_disable_pipe(dev_priv, pipe);
3714
ad80a810 3715 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4f771f10 3716
3f8dce3a 3717 ironlake_pfit_disable(intel_crtc);
4f771f10 3718
1f544388 3719 intel_ddi_disable_pipe_clock(intel_crtc);
4f771f10
PZ
3720
3721 for_each_encoder_on_crtc(dev, crtc, encoder)
3722 if (encoder->post_disable)
3723 encoder->post_disable(encoder);
3724
88adfff1 3725 if (intel_crtc->config.has_pch_encoder) {
ab4d966c 3726 lpt_disable_pch_transcoder(dev_priv);
8664281b 3727 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
1ad960f2 3728 intel_ddi_fdi_disable(crtc);
83616634 3729 }
4f771f10
PZ
3730
3731 intel_crtc->active = false;
46ba614c 3732 intel_update_watermarks(crtc);
4f771f10
PZ
3733
3734 mutex_lock(&dev->struct_mutex);
3735 intel_update_fbc(dev);
3736 mutex_unlock(&dev->struct_mutex);
3737}
3738
ee7b9f93
JB
3739static void ironlake_crtc_off(struct drm_crtc *crtc)
3740{
3741 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
e72f9fbf 3742 intel_put_shared_dpll(intel_crtc);
ee7b9f93
JB
3743}
3744
6441ab5f
PZ
3745static void haswell_crtc_off(struct drm_crtc *crtc)
3746{
3747 intel_ddi_put_crtc_pll(crtc);
3748}
3749
02e792fb
DV
3750static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3751{
02e792fb 3752 if (!enable && intel_crtc->overlay) {
23f09ce3 3753 struct drm_device *dev = intel_crtc->base.dev;
ce453d81 3754 struct drm_i915_private *dev_priv = dev->dev_private;
03f77ea5 3755
23f09ce3 3756 mutex_lock(&dev->struct_mutex);
ce453d81
CW
3757 dev_priv->mm.interruptible = false;
3758 (void) intel_overlay_switch_off(intel_crtc->overlay);
3759 dev_priv->mm.interruptible = true;
23f09ce3 3760 mutex_unlock(&dev->struct_mutex);
02e792fb 3761 }
02e792fb 3762
5dcdbcb0
CW
3763 /* Let userspace switch the overlay on again. In most cases userspace
3764 * has to recompute where to put it anyway.
3765 */
02e792fb
DV
3766}
3767
61bc95c1
EE
3768/**
3769 * i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
3770 * cursor plane briefly if not already running after enabling the display
3771 * plane.
3772 * This workaround avoids occasional blank screens when self refresh is
3773 * enabled.
3774 */
3775static void
3776g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
3777{
3778 u32 cntl = I915_READ(CURCNTR(pipe));
3779
3780 if ((cntl & CURSOR_MODE) == 0) {
3781 u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
3782
3783 I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
3784 I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
3785 intel_wait_for_vblank(dev_priv->dev, pipe);
3786 I915_WRITE(CURCNTR(pipe), cntl);
3787 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3788 I915_WRITE(FW_BLC_SELF, fw_bcl_self);
3789 }
3790}
3791
2dd24552
JB
3792static void i9xx_pfit_enable(struct intel_crtc *crtc)
3793{
3794 struct drm_device *dev = crtc->base.dev;
3795 struct drm_i915_private *dev_priv = dev->dev_private;
3796 struct intel_crtc_config *pipe_config = &crtc->config;
3797
328d8e82 3798 if (!crtc->config.gmch_pfit.control)
2dd24552
JB
3799 return;
3800
2dd24552 3801 /*
c0b03411
DV
3802 * The panel fitter should only be adjusted whilst the pipe is disabled,
3803 * according to register description and PRM.
2dd24552 3804 */
c0b03411
DV
3805 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
3806 assert_pipe_disabled(dev_priv, crtc->pipe);
2dd24552 3807
b074cec8
JB
3808 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
3809 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5a80c45c
DV
3810
3811 /* Border color in case we don't scale up to the full screen. Black by
3812 * default, change to something else for debugging. */
3813 I915_WRITE(BCLRPAT(crtc->pipe), 0);
2dd24552
JB
3814}
3815
89b667f8
JB
3816static void valleyview_crtc_enable(struct drm_crtc *crtc)
3817{
3818 struct drm_device *dev = crtc->dev;
3819 struct drm_i915_private *dev_priv = dev->dev_private;
3820 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3821 struct intel_encoder *encoder;
3822 int pipe = intel_crtc->pipe;
3823 int plane = intel_crtc->plane;
23538ef1 3824 bool is_dsi;
89b667f8
JB
3825
3826 WARN_ON(!crtc->enabled);
3827
3828 if (intel_crtc->active)
3829 return;
3830
3831 intel_crtc->active = true;
89b667f8 3832
89b667f8
JB
3833 for_each_encoder_on_crtc(dev, crtc, encoder)
3834 if (encoder->pre_pll_enable)
3835 encoder->pre_pll_enable(encoder);
3836
23538ef1
JN
3837 is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
3838
e9fd1c02
JN
3839 if (!is_dsi)
3840 vlv_enable_pll(intel_crtc);
89b667f8
JB
3841
3842 for_each_encoder_on_crtc(dev, crtc, encoder)
3843 if (encoder->pre_enable)
3844 encoder->pre_enable(encoder);
3845
2dd24552
JB
3846 i9xx_pfit_enable(intel_crtc);
3847
63cbb074
VS
3848 intel_crtc_load_lut(crtc);
3849
f37fcc2a 3850 intel_update_watermarks(crtc);
23538ef1 3851 intel_enable_pipe(dev_priv, pipe, false, is_dsi);
89b667f8 3852 intel_enable_plane(dev_priv, plane, pipe);
bb53d4ae 3853 intel_enable_planes(crtc);
5c38d48c 3854 intel_crtc_update_cursor(crtc, true);
89b667f8 3855
89b667f8 3856 intel_update_fbc(dev);
5004945f
JN
3857
3858 for_each_encoder_on_crtc(dev, crtc, encoder)
3859 encoder->enable(encoder);
89b667f8
JB
3860}
3861
0b8765c6 3862static void i9xx_crtc_enable(struct drm_crtc *crtc)
79e53945
JB
3863{
3864 struct drm_device *dev = crtc->dev;
79e53945
JB
3865 struct drm_i915_private *dev_priv = dev->dev_private;
3866 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 3867 struct intel_encoder *encoder;
79e53945 3868 int pipe = intel_crtc->pipe;
80824003 3869 int plane = intel_crtc->plane;
79e53945 3870
08a48469
DV
3871 WARN_ON(!crtc->enabled);
3872
f7abfe8b
CW
3873 if (intel_crtc->active)
3874 return;
3875
3876 intel_crtc->active = true;
6b383a7f 3877
9d6d9f19
MK
3878 for_each_encoder_on_crtc(dev, crtc, encoder)
3879 if (encoder->pre_enable)
3880 encoder->pre_enable(encoder);
3881
f6736a1a
DV
3882 i9xx_enable_pll(intel_crtc);
3883
2dd24552
JB
3884 i9xx_pfit_enable(intel_crtc);
3885
63cbb074
VS
3886 intel_crtc_load_lut(crtc);
3887
f37fcc2a 3888 intel_update_watermarks(crtc);
23538ef1 3889 intel_enable_pipe(dev_priv, pipe, false, false);
b24e7179 3890 intel_enable_plane(dev_priv, plane, pipe);
bb53d4ae 3891 intel_enable_planes(crtc);
22e407d7 3892 /* The fixup needs to happen before cursor is enabled */
61bc95c1
EE
3893 if (IS_G4X(dev))
3894 g4x_fixup_plane(dev_priv, pipe);
22e407d7 3895 intel_crtc_update_cursor(crtc, true);
79e53945 3896
0b8765c6
JB
3897 /* Give the overlay scaler a chance to enable if it's on this pipe */
3898 intel_crtc_dpms_overlay(intel_crtc, true);
ef9c3aee 3899
f440eb13 3900 intel_update_fbc(dev);
ef9c3aee 3901
fa5c73b1
DV
3902 for_each_encoder_on_crtc(dev, crtc, encoder)
3903 encoder->enable(encoder);
0b8765c6 3904}
79e53945 3905
87476d63
DV
3906static void i9xx_pfit_disable(struct intel_crtc *crtc)
3907{
3908 struct drm_device *dev = crtc->base.dev;
3909 struct drm_i915_private *dev_priv = dev->dev_private;
87476d63 3910
328d8e82
DV
3911 if (!crtc->config.gmch_pfit.control)
3912 return;
87476d63 3913
328d8e82 3914 assert_pipe_disabled(dev_priv, crtc->pipe);
87476d63 3915
328d8e82
DV
3916 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
3917 I915_READ(PFIT_CONTROL));
3918 I915_WRITE(PFIT_CONTROL, 0);
87476d63
DV
3919}
3920
0b8765c6
JB
3921static void i9xx_crtc_disable(struct drm_crtc *crtc)
3922{
3923 struct drm_device *dev = crtc->dev;
3924 struct drm_i915_private *dev_priv = dev->dev_private;
3925 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 3926 struct intel_encoder *encoder;
0b8765c6
JB
3927 int pipe = intel_crtc->pipe;
3928 int plane = intel_crtc->plane;
ef9c3aee 3929
f7abfe8b
CW
3930 if (!intel_crtc->active)
3931 return;
3932
ea9d758d
DV
3933 for_each_encoder_on_crtc(dev, crtc, encoder)
3934 encoder->disable(encoder);
3935
0b8765c6 3936 /* Give the overlay scaler a chance to disable if it's on this pipe */
e6c3a2a6
CW
3937 intel_crtc_wait_for_pending_flips(crtc);
3938 drm_vblank_off(dev, pipe);
0b8765c6 3939
5c3fe8b0 3940 if (dev_priv->fbc.plane == plane)
973d04f9 3941 intel_disable_fbc(dev);
79e53945 3942
0d5b8c61
VS
3943 intel_crtc_dpms_overlay(intel_crtc, false);
3944 intel_crtc_update_cursor(crtc, false);
bb53d4ae 3945 intel_disable_planes(crtc);
b24e7179 3946 intel_disable_plane(dev_priv, plane, pipe);
0d5b8c61 3947
b24e7179 3948 intel_disable_pipe(dev_priv, pipe);
24a1f16d 3949
87476d63 3950 i9xx_pfit_disable(intel_crtc);
24a1f16d 3951
89b667f8
JB
3952 for_each_encoder_on_crtc(dev, crtc, encoder)
3953 if (encoder->post_disable)
3954 encoder->post_disable(encoder);
3955
f6071166
JB
3956 if (IS_VALLEYVIEW(dev) && !intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
3957 vlv_disable_pll(dev_priv, pipe);
3958 else if (!IS_VALLEYVIEW(dev))
e9fd1c02 3959 i9xx_disable_pll(dev_priv, pipe);
0b8765c6 3960
f7abfe8b 3961 intel_crtc->active = false;
46ba614c 3962 intel_update_watermarks(crtc);
f37fcc2a 3963
6b383a7f 3964 intel_update_fbc(dev);
0b8765c6
JB
3965}
3966
ee7b9f93
JB
3967static void i9xx_crtc_off(struct drm_crtc *crtc)
3968{
3969}
3970
976f8a20
DV
3971static void intel_crtc_update_sarea(struct drm_crtc *crtc,
3972 bool enabled)
2c07245f
ZW
3973{
3974 struct drm_device *dev = crtc->dev;
3975 struct drm_i915_master_private *master_priv;
3976 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3977 int pipe = intel_crtc->pipe;
79e53945
JB
3978
3979 if (!dev->primary->master)
3980 return;
3981
3982 master_priv = dev->primary->master->driver_priv;
3983 if (!master_priv->sarea_priv)
3984 return;
3985
79e53945
JB
3986 switch (pipe) {
3987 case 0:
3988 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3989 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3990 break;
3991 case 1:
3992 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3993 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3994 break;
3995 default:
9db4a9c7 3996 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
79e53945
JB
3997 break;
3998 }
79e53945
JB
3999}
4000
976f8a20
DV
4001/**
4002 * Sets the power management mode of the pipe and plane.
4003 */
4004void intel_crtc_update_dpms(struct drm_crtc *crtc)
4005{
4006 struct drm_device *dev = crtc->dev;
4007 struct drm_i915_private *dev_priv = dev->dev_private;
4008 struct intel_encoder *intel_encoder;
4009 bool enable = false;
4010
4011 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
4012 enable |= intel_encoder->connectors_active;
4013
4014 if (enable)
4015 dev_priv->display.crtc_enable(crtc);
4016 else
4017 dev_priv->display.crtc_disable(crtc);
4018
4019 intel_crtc_update_sarea(crtc, enable);
4020}
4021
cdd59983
CW
4022static void intel_crtc_disable(struct drm_crtc *crtc)
4023{
cdd59983 4024 struct drm_device *dev = crtc->dev;
976f8a20 4025 struct drm_connector *connector;
ee7b9f93 4026 struct drm_i915_private *dev_priv = dev->dev_private;
7b9f35a6 4027 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
cdd59983 4028
976f8a20
DV
4029 /* crtc should still be enabled when we disable it. */
4030 WARN_ON(!crtc->enabled);
4031
4032 dev_priv->display.crtc_disable(crtc);
c77bf565 4033 intel_crtc->eld_vld = false;
976f8a20 4034 intel_crtc_update_sarea(crtc, false);
ee7b9f93
JB
4035 dev_priv->display.off(crtc);
4036
931872fc 4037 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
93ce0ba6 4038 assert_cursor_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
931872fc 4039 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
cdd59983
CW
4040
4041 if (crtc->fb) {
4042 mutex_lock(&dev->struct_mutex);
1690e1eb 4043 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
cdd59983 4044 mutex_unlock(&dev->struct_mutex);
976f8a20
DV
4045 crtc->fb = NULL;
4046 }
4047
4048 /* Update computed state. */
4049 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4050 if (!connector->encoder || !connector->encoder->crtc)
4051 continue;
4052
4053 if (connector->encoder->crtc != crtc)
4054 continue;
4055
4056 connector->dpms = DRM_MODE_DPMS_OFF;
4057 to_intel_encoder(connector->encoder)->connectors_active = false;
cdd59983
CW
4058 }
4059}
4060
ea5b213a 4061void intel_encoder_destroy(struct drm_encoder *encoder)
7e7d76c3 4062{
4ef69c7a 4063 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
ea5b213a 4064
ea5b213a
CW
4065 drm_encoder_cleanup(encoder);
4066 kfree(intel_encoder);
7e7d76c3
JB
4067}
4068
9237329d 4069/* Simple dpms helper for encoders with just one connector, no cloning and only
5ab432ef
DV
4070 * one kind of off state. It clamps all !ON modes to fully OFF and changes the
4071 * state of the entire output pipe. */
9237329d 4072static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
7e7d76c3 4073{
5ab432ef
DV
4074 if (mode == DRM_MODE_DPMS_ON) {
4075 encoder->connectors_active = true;
4076
b2cabb0e 4077 intel_crtc_update_dpms(encoder->base.crtc);
5ab432ef
DV
4078 } else {
4079 encoder->connectors_active = false;
4080
b2cabb0e 4081 intel_crtc_update_dpms(encoder->base.crtc);
5ab432ef 4082 }
79e53945
JB
4083}
4084
0a91ca29
DV
4085/* Cross check the actual hw state with our own modeset state tracking (and it's
4086 * internal consistency). */
b980514c 4087static void intel_connector_check_state(struct intel_connector *connector)
79e53945 4088{
0a91ca29
DV
4089 if (connector->get_hw_state(connector)) {
4090 struct intel_encoder *encoder = connector->encoder;
4091 struct drm_crtc *crtc;
4092 bool encoder_enabled;
4093 enum pipe pipe;
4094
4095 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4096 connector->base.base.id,
4097 drm_get_connector_name(&connector->base));
4098
4099 WARN(connector->base.dpms == DRM_MODE_DPMS_OFF,
4100 "wrong connector dpms state\n");
4101 WARN(connector->base.encoder != &encoder->base,
4102 "active connector not linked to encoder\n");
4103 WARN(!encoder->connectors_active,
4104 "encoder->connectors_active not set\n");
4105
4106 encoder_enabled = encoder->get_hw_state(encoder, &pipe);
4107 WARN(!encoder_enabled, "encoder not enabled\n");
4108 if (WARN_ON(!encoder->base.crtc))
4109 return;
4110
4111 crtc = encoder->base.crtc;
4112
4113 WARN(!crtc->enabled, "crtc not enabled\n");
4114 WARN(!to_intel_crtc(crtc)->active, "crtc not active\n");
4115 WARN(pipe != to_intel_crtc(crtc)->pipe,
4116 "encoder active on the wrong pipe\n");
4117 }
79e53945
JB
4118}
4119
5ab432ef
DV
4120/* Even simpler default implementation, if there's really no special case to
4121 * consider. */
4122void intel_connector_dpms(struct drm_connector *connector, int mode)
79e53945 4123{
5ab432ef 4124 struct intel_encoder *encoder = intel_attached_encoder(connector);
d4270e57 4125
5ab432ef
DV
4126 /* All the simple cases only support two dpms states. */
4127 if (mode != DRM_MODE_DPMS_ON)
4128 mode = DRM_MODE_DPMS_OFF;
d4270e57 4129
5ab432ef
DV
4130 if (mode == connector->dpms)
4131 return;
4132
4133 connector->dpms = mode;
4134
4135 /* Only need to change hw state when actually enabled */
4136 if (encoder->base.crtc)
4137 intel_encoder_dpms(encoder, mode);
4138 else
8af6cf88 4139 WARN_ON(encoder->connectors_active != false);
0a91ca29 4140
b980514c 4141 intel_modeset_check_state(connector->dev);
79e53945
JB
4142}
4143
f0947c37
DV
4144/* Simple connector->get_hw_state implementation for encoders that support only
4145 * one connector and no cloning and hence the encoder state determines the state
4146 * of the connector. */
4147bool intel_connector_get_hw_state(struct intel_connector *connector)
ea5b213a 4148{
24929352 4149 enum pipe pipe = 0;
f0947c37 4150 struct intel_encoder *encoder = connector->encoder;
ea5b213a 4151
f0947c37 4152 return encoder->get_hw_state(encoder, &pipe);
ea5b213a
CW
4153}
4154
1857e1da
DV
4155static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
4156 struct intel_crtc_config *pipe_config)
4157{
4158 struct drm_i915_private *dev_priv = dev->dev_private;
4159 struct intel_crtc *pipe_B_crtc =
4160 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
4161
4162 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
4163 pipe_name(pipe), pipe_config->fdi_lanes);
4164 if (pipe_config->fdi_lanes > 4) {
4165 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
4166 pipe_name(pipe), pipe_config->fdi_lanes);
4167 return false;
4168 }
4169
4170 if (IS_HASWELL(dev)) {
4171 if (pipe_config->fdi_lanes > 2) {
4172 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
4173 pipe_config->fdi_lanes);
4174 return false;
4175 } else {
4176 return true;
4177 }
4178 }
4179
4180 if (INTEL_INFO(dev)->num_pipes == 2)
4181 return true;
4182
4183 /* Ivybridge 3 pipe is really complicated */
4184 switch (pipe) {
4185 case PIPE_A:
4186 return true;
4187 case PIPE_B:
4188 if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
4189 pipe_config->fdi_lanes > 2) {
4190 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
4191 pipe_name(pipe), pipe_config->fdi_lanes);
4192 return false;
4193 }
4194 return true;
4195 case PIPE_C:
1e833f40 4196 if (!pipe_has_enabled_pch(pipe_B_crtc) ||
1857e1da
DV
4197 pipe_B_crtc->config.fdi_lanes <= 2) {
4198 if (pipe_config->fdi_lanes > 2) {
4199 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
4200 pipe_name(pipe), pipe_config->fdi_lanes);
4201 return false;
4202 }
4203 } else {
4204 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
4205 return false;
4206 }
4207 return true;
4208 default:
4209 BUG();
4210 }
4211}
4212
e29c22c0
DV
4213#define RETRY 1
4214static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
4215 struct intel_crtc_config *pipe_config)
877d48d5 4216{
1857e1da 4217 struct drm_device *dev = intel_crtc->base.dev;
877d48d5 4218 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
ff9a6750 4219 int lane, link_bw, fdi_dotclock;
e29c22c0 4220 bool setup_ok, needs_recompute = false;
877d48d5 4221
e29c22c0 4222retry:
877d48d5
DV
4223 /* FDI is a binary signal running at ~2.7GHz, encoding
4224 * each output octet as 10 bits. The actual frequency
4225 * is stored as a divider into a 100MHz clock, and the
4226 * mode pixel clock is stored in units of 1KHz.
4227 * Hence the bw of each lane in terms of the mode signal
4228 * is:
4229 */
4230 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4231
241bfc38 4232 fdi_dotclock = adjusted_mode->crtc_clock;
877d48d5 4233
2bd89a07 4234 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
877d48d5
DV
4235 pipe_config->pipe_bpp);
4236
4237 pipe_config->fdi_lanes = lane;
4238
2bd89a07 4239 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
877d48d5 4240 link_bw, &pipe_config->fdi_m_n);
1857e1da 4241
e29c22c0
DV
4242 setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev,
4243 intel_crtc->pipe, pipe_config);
4244 if (!setup_ok && pipe_config->pipe_bpp > 6*3) {
4245 pipe_config->pipe_bpp -= 2*3;
4246 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
4247 pipe_config->pipe_bpp);
4248 needs_recompute = true;
4249 pipe_config->bw_constrained = true;
4250
4251 goto retry;
4252 }
4253
4254 if (needs_recompute)
4255 return RETRY;
4256
4257 return setup_ok ? 0 : -EINVAL;
877d48d5
DV
4258}
4259
42db64ef
PZ
4260static void hsw_compute_ips_config(struct intel_crtc *crtc,
4261 struct intel_crtc_config *pipe_config)
4262{
3c4ca58c
PZ
4263 pipe_config->ips_enabled = i915_enable_ips &&
4264 hsw_crtc_supports_ips(crtc) &&
b6dfdc9b 4265 pipe_config->pipe_bpp <= 24;
42db64ef
PZ
4266}
4267
a43f6e0f 4268static int intel_crtc_compute_config(struct intel_crtc *crtc,
e29c22c0 4269 struct intel_crtc_config *pipe_config)
79e53945 4270{
a43f6e0f 4271 struct drm_device *dev = crtc->base.dev;
b8cecdf5 4272 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
89749350 4273
ad3a4479 4274 /* FIXME should check pixel clock limits on all platforms */
cf532bb2
VS
4275 if (INTEL_INFO(dev)->gen < 4) {
4276 struct drm_i915_private *dev_priv = dev->dev_private;
4277 int clock_limit =
4278 dev_priv->display.get_display_clock_speed(dev);
4279
4280 /*
4281 * Enable pixel doubling when the dot clock
4282 * is > 90% of the (display) core speed.
4283 *
b397c96b
VS
4284 * GDG double wide on either pipe,
4285 * otherwise pipe A only.
cf532bb2 4286 */
b397c96b 4287 if ((crtc->pipe == PIPE_A || IS_I915G(dev)) &&
241bfc38 4288 adjusted_mode->crtc_clock > clock_limit * 9 / 10) {
ad3a4479 4289 clock_limit *= 2;
cf532bb2 4290 pipe_config->double_wide = true;
ad3a4479
VS
4291 }
4292
241bfc38 4293 if (adjusted_mode->crtc_clock > clock_limit * 9 / 10)
e29c22c0 4294 return -EINVAL;
2c07245f 4295 }
89749350 4296
1d1d0e27
VS
4297 /*
4298 * Pipe horizontal size must be even in:
4299 * - DVO ganged mode
4300 * - LVDS dual channel mode
4301 * - Double wide pipe
4302 */
4303 if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
4304 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
4305 pipe_config->pipe_src_w &= ~1;
4306
8693a824
DL
4307 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
4308 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
44f46b42
CW
4309 */
4310 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
4311 adjusted_mode->hsync_start == adjusted_mode->hdisplay)
e29c22c0 4312 return -EINVAL;
44f46b42 4313
bd080ee5 4314 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) {
5d2d38dd 4315 pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */
bd080ee5 4316 } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) {
5d2d38dd
DV
4317 /* only a 8bpc pipe, with 6bpc dither through the panel fitter
4318 * for lvds. */
4319 pipe_config->pipe_bpp = 8*3;
4320 }
4321
f5adf94e 4322 if (HAS_IPS(dev))
a43f6e0f
DV
4323 hsw_compute_ips_config(crtc, pipe_config);
4324
4325 /* XXX: PCH clock sharing is done in ->mode_set, so make sure the old
4326 * clock survives for now. */
4327 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
4328 pipe_config->shared_dpll = crtc->config.shared_dpll;
42db64ef 4329
877d48d5 4330 if (pipe_config->has_pch_encoder)
a43f6e0f 4331 return ironlake_fdi_compute_config(crtc, pipe_config);
877d48d5 4332
e29c22c0 4333 return 0;
79e53945
JB
4334}
4335
25eb05fc
JB
4336static int valleyview_get_display_clock_speed(struct drm_device *dev)
4337{
4338 return 400000; /* FIXME */
4339}
4340
e70236a8
JB
4341static int i945_get_display_clock_speed(struct drm_device *dev)
4342{
4343 return 400000;
4344}
79e53945 4345
e70236a8 4346static int i915_get_display_clock_speed(struct drm_device *dev)
79e53945 4347{
e70236a8
JB
4348 return 333000;
4349}
79e53945 4350
e70236a8
JB
4351static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
4352{
4353 return 200000;
4354}
79e53945 4355
257a7ffc
DV
4356static int pnv_get_display_clock_speed(struct drm_device *dev)
4357{
4358 u16 gcfgc = 0;
4359
4360 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
4361
4362 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
4363 case GC_DISPLAY_CLOCK_267_MHZ_PNV:
4364 return 267000;
4365 case GC_DISPLAY_CLOCK_333_MHZ_PNV:
4366 return 333000;
4367 case GC_DISPLAY_CLOCK_444_MHZ_PNV:
4368 return 444000;
4369 case GC_DISPLAY_CLOCK_200_MHZ_PNV:
4370 return 200000;
4371 default:
4372 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
4373 case GC_DISPLAY_CLOCK_133_MHZ_PNV:
4374 return 133000;
4375 case GC_DISPLAY_CLOCK_167_MHZ_PNV:
4376 return 167000;
4377 }
4378}
4379
e70236a8
JB
4380static int i915gm_get_display_clock_speed(struct drm_device *dev)
4381{
4382 u16 gcfgc = 0;
79e53945 4383
e70236a8
JB
4384 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
4385
4386 if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
4387 return 133000;
4388 else {
4389 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
4390 case GC_DISPLAY_CLOCK_333_MHZ:
4391 return 333000;
4392 default:
4393 case GC_DISPLAY_CLOCK_190_200_MHZ:
4394 return 190000;
79e53945 4395 }
e70236a8
JB
4396 }
4397}
4398
4399static int i865_get_display_clock_speed(struct drm_device *dev)
4400{
4401 return 266000;
4402}
4403
4404static int i855_get_display_clock_speed(struct drm_device *dev)
4405{
4406 u16 hpllcc = 0;
4407 /* Assume that the hardware is in the high speed state. This
4408 * should be the default.
4409 */
4410 switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
4411 case GC_CLOCK_133_200:
4412 case GC_CLOCK_100_200:
4413 return 200000;
4414 case GC_CLOCK_166_250:
4415 return 250000;
4416 case GC_CLOCK_100_133:
79e53945 4417 return 133000;
e70236a8 4418 }
79e53945 4419
e70236a8
JB
4420 /* Shouldn't happen */
4421 return 0;
4422}
79e53945 4423
e70236a8
JB
4424static int i830_get_display_clock_speed(struct drm_device *dev)
4425{
4426 return 133000;
79e53945
JB
4427}
4428
2c07245f 4429static void
a65851af 4430intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
2c07245f 4431{
a65851af
VS
4432 while (*num > DATA_LINK_M_N_MASK ||
4433 *den > DATA_LINK_M_N_MASK) {
2c07245f
ZW
4434 *num >>= 1;
4435 *den >>= 1;
4436 }
4437}
4438
a65851af
VS
4439static void compute_m_n(unsigned int m, unsigned int n,
4440 uint32_t *ret_m, uint32_t *ret_n)
4441{
4442 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
4443 *ret_m = div_u64((uint64_t) m * *ret_n, n);
4444 intel_reduce_m_n_ratio(ret_m, ret_n);
4445}
4446
e69d0bc1
DV
4447void
4448intel_link_compute_m_n(int bits_per_pixel, int nlanes,
4449 int pixel_clock, int link_clock,
4450 struct intel_link_m_n *m_n)
2c07245f 4451{
e69d0bc1 4452 m_n->tu = 64;
a65851af
VS
4453
4454 compute_m_n(bits_per_pixel * pixel_clock,
4455 link_clock * nlanes * 8,
4456 &m_n->gmch_m, &m_n->gmch_n);
4457
4458 compute_m_n(pixel_clock, link_clock,
4459 &m_n->link_m, &m_n->link_n);
2c07245f
ZW
4460}
4461
a7615030
CW
4462static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4463{
72bbe58c
KP
4464 if (i915_panel_use_ssc >= 0)
4465 return i915_panel_use_ssc != 0;
41aa3448 4466 return dev_priv->vbt.lvds_use_ssc
435793df 4467 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
a7615030
CW
4468}
4469
c65d77d8
JB
4470static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
4471{
4472 struct drm_device *dev = crtc->dev;
4473 struct drm_i915_private *dev_priv = dev->dev_private;
4474 int refclk;
4475
a0c4da24 4476 if (IS_VALLEYVIEW(dev)) {
9a0ea498 4477 refclk = 100000;
a0c4da24 4478 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
c65d77d8 4479 intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
41aa3448 4480 refclk = dev_priv->vbt.lvds_ssc_freq * 1000;
c65d77d8
JB
4481 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4482 refclk / 1000);
4483 } else if (!IS_GEN2(dev)) {
4484 refclk = 96000;
4485 } else {
4486 refclk = 48000;
4487 }
4488
4489 return refclk;
4490}
4491
7429e9d4 4492static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
c65d77d8 4493{
7df00d7a 4494 return (1 << dpll->n) << 16 | dpll->m2;
7429e9d4 4495}
f47709a9 4496
7429e9d4
DV
4497static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
4498{
4499 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
c65d77d8
JB
4500}
4501
f47709a9 4502static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
a7516a05
JB
4503 intel_clock_t *reduced_clock)
4504{
f47709a9 4505 struct drm_device *dev = crtc->base.dev;
a7516a05 4506 struct drm_i915_private *dev_priv = dev->dev_private;
f47709a9 4507 int pipe = crtc->pipe;
a7516a05
JB
4508 u32 fp, fp2 = 0;
4509
4510 if (IS_PINEVIEW(dev)) {
7429e9d4 4511 fp = pnv_dpll_compute_fp(&crtc->config.dpll);
a7516a05 4512 if (reduced_clock)
7429e9d4 4513 fp2 = pnv_dpll_compute_fp(reduced_clock);
a7516a05 4514 } else {
7429e9d4 4515 fp = i9xx_dpll_compute_fp(&crtc->config.dpll);
a7516a05 4516 if (reduced_clock)
7429e9d4 4517 fp2 = i9xx_dpll_compute_fp(reduced_clock);
a7516a05
JB
4518 }
4519
4520 I915_WRITE(FP0(pipe), fp);
8bcc2795 4521 crtc->config.dpll_hw_state.fp0 = fp;
a7516a05 4522
f47709a9
DV
4523 crtc->lowfreq_avail = false;
4524 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
a7516a05
JB
4525 reduced_clock && i915_powersave) {
4526 I915_WRITE(FP1(pipe), fp2);
8bcc2795 4527 crtc->config.dpll_hw_state.fp1 = fp2;
f47709a9 4528 crtc->lowfreq_avail = true;
a7516a05
JB
4529 } else {
4530 I915_WRITE(FP1(pipe), fp);
8bcc2795 4531 crtc->config.dpll_hw_state.fp1 = fp;
a7516a05
JB
4532 }
4533}
4534
5e69f97f
CML
4535static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
4536 pipe)
89b667f8
JB
4537{
4538 u32 reg_val;
4539
4540 /*
4541 * PLLB opamp always calibrates to max value of 0x3f, force enable it
4542 * and set it to a reasonable value instead.
4543 */
5e69f97f 4544 reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
89b667f8
JB
4545 reg_val &= 0xffffff00;
4546 reg_val |= 0x00000030;
5e69f97f 4547 vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
89b667f8 4548
5e69f97f 4549 reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
89b667f8
JB
4550 reg_val &= 0x8cffffff;
4551 reg_val = 0x8c000000;
5e69f97f 4552 vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
89b667f8 4553
5e69f97f 4554 reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF(1));
89b667f8 4555 reg_val &= 0xffffff00;
5e69f97f 4556 vlv_dpio_write(dev_priv, pipe, DPIO_IREF(1), reg_val);
89b667f8 4557
5e69f97f 4558 reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_CALIBRATION);
89b667f8
JB
4559 reg_val &= 0x00ffffff;
4560 reg_val |= 0xb0000000;
5e69f97f 4561 vlv_dpio_write(dev_priv, pipe, DPIO_CALIBRATION, reg_val);
89b667f8
JB
4562}
4563
b551842d
DV
4564static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
4565 struct intel_link_m_n *m_n)
4566{
4567 struct drm_device *dev = crtc->base.dev;
4568 struct drm_i915_private *dev_priv = dev->dev_private;
4569 int pipe = crtc->pipe;
4570
e3b95f1e
DV
4571 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
4572 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
4573 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
4574 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
b551842d
DV
4575}
4576
4577static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
4578 struct intel_link_m_n *m_n)
4579{
4580 struct drm_device *dev = crtc->base.dev;
4581 struct drm_i915_private *dev_priv = dev->dev_private;
4582 int pipe = crtc->pipe;
4583 enum transcoder transcoder = crtc->config.cpu_transcoder;
4584
4585 if (INTEL_INFO(dev)->gen >= 5) {
4586 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
4587 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
4588 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
4589 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
4590 } else {
e3b95f1e
DV
4591 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
4592 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
4593 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
4594 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
b551842d
DV
4595 }
4596}
4597
03afc4a2
DV
4598static void intel_dp_set_m_n(struct intel_crtc *crtc)
4599{
4600 if (crtc->config.has_pch_encoder)
4601 intel_pch_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
4602 else
4603 intel_cpu_transcoder_set_m_n(crtc, &crtc->config.dp_m_n);
4604}
4605
f47709a9 4606static void vlv_update_pll(struct intel_crtc *crtc)
a0c4da24 4607{
f47709a9 4608 struct drm_device *dev = crtc->base.dev;
a0c4da24 4609 struct drm_i915_private *dev_priv = dev->dev_private;
f47709a9 4610 int pipe = crtc->pipe;
89b667f8 4611 u32 dpll, mdiv;
a0c4da24 4612 u32 bestn, bestm1, bestm2, bestp1, bestp2;
198a037f 4613 u32 coreclk, reg_val, dpll_md;
a0c4da24 4614
09153000
DV
4615 mutex_lock(&dev_priv->dpio_lock);
4616
f47709a9
DV
4617 bestn = crtc->config.dpll.n;
4618 bestm1 = crtc->config.dpll.m1;
4619 bestm2 = crtc->config.dpll.m2;
4620 bestp1 = crtc->config.dpll.p1;
4621 bestp2 = crtc->config.dpll.p2;
a0c4da24 4622
89b667f8
JB
4623 /* See eDP HDMI DPIO driver vbios notes doc */
4624
4625 /* PLL B needs special handling */
4626 if (pipe)
5e69f97f 4627 vlv_pllb_recal_opamp(dev_priv, pipe);
89b667f8
JB
4628
4629 /* Set up Tx target for periodic Rcomp update */
5e69f97f 4630 vlv_dpio_write(dev_priv, pipe, DPIO_IREF_BCAST, 0x0100000f);
89b667f8
JB
4631
4632 /* Disable target IRef on PLL */
5e69f97f 4633 reg_val = vlv_dpio_read(dev_priv, pipe, DPIO_IREF_CTL(pipe));
89b667f8 4634 reg_val &= 0x00ffffff;
5e69f97f 4635 vlv_dpio_write(dev_priv, pipe, DPIO_IREF_CTL(pipe), reg_val);
89b667f8
JB
4636
4637 /* Disable fast lock */
5e69f97f 4638 vlv_dpio_write(dev_priv, pipe, DPIO_FASTCLK_DISABLE, 0x610);
89b667f8
JB
4639
4640 /* Set idtafcrecal before PLL is enabled */
a0c4da24
JB
4641 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
4642 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
4643 mdiv |= ((bestn << DPIO_N_SHIFT));
a0c4da24 4644 mdiv |= (1 << DPIO_K_SHIFT);
7df5080b
JB
4645
4646 /*
4647 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
4648 * but we don't support that).
4649 * Note: don't use the DAC post divider as it seems unstable.
4650 */
4651 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
5e69f97f 4652 vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
a0c4da24 4653
a0c4da24 4654 mdiv |= DPIO_ENABLE_CALIBRATION;
5e69f97f 4655 vlv_dpio_write(dev_priv, pipe, DPIO_DIV(pipe), mdiv);
a0c4da24 4656
89b667f8 4657 /* Set HBR and RBR LPF coefficients */
ff9a6750 4658 if (crtc->config.port_clock == 162000 ||
99750bd4 4659 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
89b667f8 4660 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
5e69f97f 4661 vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
885b0120 4662 0x009f0003);
89b667f8 4663 else
5e69f97f 4664 vlv_dpio_write(dev_priv, pipe, DPIO_LPF_COEFF(pipe),
89b667f8
JB
4665 0x00d0000f);
4666
4667 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) ||
4668 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
4669 /* Use SSC source */
4670 if (!pipe)
5e69f97f 4671 vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
89b667f8
JB
4672 0x0df40000);
4673 else
5e69f97f 4674 vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
89b667f8
JB
4675 0x0df70000);
4676 } else { /* HDMI or VGA */
4677 /* Use bend source */
4678 if (!pipe)
5e69f97f 4679 vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
89b667f8
JB
4680 0x0df70000);
4681 else
5e69f97f 4682 vlv_dpio_write(dev_priv, pipe, DPIO_REFSFR(pipe),
89b667f8
JB
4683 0x0df40000);
4684 }
a0c4da24 4685
5e69f97f 4686 coreclk = vlv_dpio_read(dev_priv, pipe, DPIO_CORE_CLK(pipe));
89b667f8
JB
4687 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
4688 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) ||
4689 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP))
4690 coreclk |= 0x01000000;
5e69f97f 4691 vlv_dpio_write(dev_priv, pipe, DPIO_CORE_CLK(pipe), coreclk);
a0c4da24 4692
5e69f97f 4693 vlv_dpio_write(dev_priv, pipe, DPIO_PLL_CML(pipe), 0x87871000);
a0c4da24 4694
89b667f8
JB
4695 /* Enable DPIO clock input */
4696 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
4697 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
f6071166
JB
4698 /* We should never disable this, set it here for state tracking */
4699 if (pipe == PIPE_B)
89b667f8 4700 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
a0c4da24 4701 dpll |= DPLL_VCO_ENABLE;
8bcc2795
DV
4702 crtc->config.dpll_hw_state.dpll = dpll;
4703
ef1b460d
DV
4704 dpll_md = (crtc->config.pixel_multiplier - 1)
4705 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8bcc2795
DV
4706 crtc->config.dpll_hw_state.dpll_md = dpll_md;
4707
89b667f8
JB
4708 if (crtc->config.has_dp_encoder)
4709 intel_dp_set_m_n(crtc);
09153000
DV
4710
4711 mutex_unlock(&dev_priv->dpio_lock);
a0c4da24
JB
4712}
4713
f47709a9
DV
4714static void i9xx_update_pll(struct intel_crtc *crtc,
4715 intel_clock_t *reduced_clock,
eb1cbe48
DV
4716 int num_connectors)
4717{
f47709a9 4718 struct drm_device *dev = crtc->base.dev;
eb1cbe48 4719 struct drm_i915_private *dev_priv = dev->dev_private;
eb1cbe48
DV
4720 u32 dpll;
4721 bool is_sdvo;
f47709a9 4722 struct dpll *clock = &crtc->config.dpll;
eb1cbe48 4723
f47709a9 4724 i9xx_update_pll_dividers(crtc, reduced_clock);
2a8f64ca 4725
f47709a9
DV
4726 is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) ||
4727 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
eb1cbe48
DV
4728
4729 dpll = DPLL_VGA_MODE_DIS;
4730
f47709a9 4731 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS))
eb1cbe48
DV
4732 dpll |= DPLLB_MODE_LVDS;
4733 else
4734 dpll |= DPLLB_MODE_DAC_SERIAL;
6cc5f341 4735
ef1b460d 4736 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
198a037f
DV
4737 dpll |= (crtc->config.pixel_multiplier - 1)
4738 << SDVO_MULTIPLIER_SHIFT_HIRES;
eb1cbe48 4739 }
198a037f
DV
4740
4741 if (is_sdvo)
4a33e48d 4742 dpll |= DPLL_SDVO_HIGH_SPEED;
198a037f 4743
f47709a9 4744 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
4a33e48d 4745 dpll |= DPLL_SDVO_HIGH_SPEED;
eb1cbe48
DV
4746
4747 /* compute bitmask from p1 value */
4748 if (IS_PINEVIEW(dev))
4749 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
4750 else {
4751 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4752 if (IS_G4X(dev) && reduced_clock)
4753 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4754 }
4755 switch (clock->p2) {
4756 case 5:
4757 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4758 break;
4759 case 7:
4760 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4761 break;
4762 case 10:
4763 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4764 break;
4765 case 14:
4766 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4767 break;
4768 }
4769 if (INTEL_INFO(dev)->gen >= 4)
4770 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
4771
09ede541 4772 if (crtc->config.sdvo_tv_clock)
eb1cbe48 4773 dpll |= PLL_REF_INPUT_TVCLKINBC;
f47709a9 4774 else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
eb1cbe48
DV
4775 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4776 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4777 else
4778 dpll |= PLL_REF_INPUT_DREFCLK;
4779
4780 dpll |= DPLL_VCO_ENABLE;
8bcc2795
DV
4781 crtc->config.dpll_hw_state.dpll = dpll;
4782
eb1cbe48 4783 if (INTEL_INFO(dev)->gen >= 4) {
ef1b460d
DV
4784 u32 dpll_md = (crtc->config.pixel_multiplier - 1)
4785 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
8bcc2795 4786 crtc->config.dpll_hw_state.dpll_md = dpll_md;
eb1cbe48 4787 }
66e3d5c0
DV
4788
4789 if (crtc->config.has_dp_encoder)
4790 intel_dp_set_m_n(crtc);
eb1cbe48
DV
4791}
4792
f47709a9 4793static void i8xx_update_pll(struct intel_crtc *crtc,
f47709a9 4794 intel_clock_t *reduced_clock,
eb1cbe48
DV
4795 int num_connectors)
4796{
f47709a9 4797 struct drm_device *dev = crtc->base.dev;
eb1cbe48 4798 struct drm_i915_private *dev_priv = dev->dev_private;
eb1cbe48 4799 u32 dpll;
f47709a9 4800 struct dpll *clock = &crtc->config.dpll;
eb1cbe48 4801
f47709a9 4802 i9xx_update_pll_dividers(crtc, reduced_clock);
2a8f64ca 4803
eb1cbe48
DV
4804 dpll = DPLL_VGA_MODE_DIS;
4805
f47709a9 4806 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) {
eb1cbe48
DV
4807 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4808 } else {
4809 if (clock->p1 == 2)
4810 dpll |= PLL_P1_DIVIDE_BY_TWO;
4811 else
4812 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4813 if (clock->p2 == 4)
4814 dpll |= PLL_P2_DIVIDE_BY_4;
4815 }
4816
4a33e48d
DV
4817 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
4818 dpll |= DPLL_DVO_2X_MODE;
4819
f47709a9 4820 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
eb1cbe48
DV
4821 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4822 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4823 else
4824 dpll |= PLL_REF_INPUT_DREFCLK;
4825
4826 dpll |= DPLL_VCO_ENABLE;
8bcc2795 4827 crtc->config.dpll_hw_state.dpll = dpll;
eb1cbe48
DV
4828}
4829
8a654f3b 4830static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
b0e77b9c
PZ
4831{
4832 struct drm_device *dev = intel_crtc->base.dev;
4833 struct drm_i915_private *dev_priv = dev->dev_private;
4834 enum pipe pipe = intel_crtc->pipe;
3b117c8f 4835 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
8a654f3b
DV
4836 struct drm_display_mode *adjusted_mode =
4837 &intel_crtc->config.adjusted_mode;
4d8a62ea
DV
4838 uint32_t vsyncshift, crtc_vtotal, crtc_vblank_end;
4839
4840 /* We need to be careful not to changed the adjusted mode, for otherwise
4841 * the hw state checker will get angry at the mismatch. */
4842 crtc_vtotal = adjusted_mode->crtc_vtotal;
4843 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
b0e77b9c
PZ
4844
4845 if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
4846 /* the chip adds 2 halflines automatically */
4d8a62ea
DV
4847 crtc_vtotal -= 1;
4848 crtc_vblank_end -= 1;
b0e77b9c
PZ
4849 vsyncshift = adjusted_mode->crtc_hsync_start
4850 - adjusted_mode->crtc_htotal / 2;
4851 } else {
4852 vsyncshift = 0;
4853 }
4854
4855 if (INTEL_INFO(dev)->gen > 3)
fe2b8f9d 4856 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
b0e77b9c 4857
fe2b8f9d 4858 I915_WRITE(HTOTAL(cpu_transcoder),
b0e77b9c
PZ
4859 (adjusted_mode->crtc_hdisplay - 1) |
4860 ((adjusted_mode->crtc_htotal - 1) << 16));
fe2b8f9d 4861 I915_WRITE(HBLANK(cpu_transcoder),
b0e77b9c
PZ
4862 (adjusted_mode->crtc_hblank_start - 1) |
4863 ((adjusted_mode->crtc_hblank_end - 1) << 16));
fe2b8f9d 4864 I915_WRITE(HSYNC(cpu_transcoder),
b0e77b9c
PZ
4865 (adjusted_mode->crtc_hsync_start - 1) |
4866 ((adjusted_mode->crtc_hsync_end - 1) << 16));
4867
fe2b8f9d 4868 I915_WRITE(VTOTAL(cpu_transcoder),
b0e77b9c 4869 (adjusted_mode->crtc_vdisplay - 1) |
4d8a62ea 4870 ((crtc_vtotal - 1) << 16));
fe2b8f9d 4871 I915_WRITE(VBLANK(cpu_transcoder),
b0e77b9c 4872 (adjusted_mode->crtc_vblank_start - 1) |
4d8a62ea 4873 ((crtc_vblank_end - 1) << 16));
fe2b8f9d 4874 I915_WRITE(VSYNC(cpu_transcoder),
b0e77b9c
PZ
4875 (adjusted_mode->crtc_vsync_start - 1) |
4876 ((adjusted_mode->crtc_vsync_end - 1) << 16));
4877
b5e508d4
PZ
4878 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
4879 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
4880 * documented on the DDI_FUNC_CTL register description, EDP Input Select
4881 * bits. */
4882 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
4883 (pipe == PIPE_B || pipe == PIPE_C))
4884 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
4885
b0e77b9c
PZ
4886 /* pipesrc controls the size that is scaled from, which should
4887 * always be the user's requested size.
4888 */
4889 I915_WRITE(PIPESRC(pipe),
37327abd
VS
4890 ((intel_crtc->config.pipe_src_w - 1) << 16) |
4891 (intel_crtc->config.pipe_src_h - 1));
b0e77b9c
PZ
4892}
4893
1bd1bd80
DV
4894static void intel_get_pipe_timings(struct intel_crtc *crtc,
4895 struct intel_crtc_config *pipe_config)
4896{
4897 struct drm_device *dev = crtc->base.dev;
4898 struct drm_i915_private *dev_priv = dev->dev_private;
4899 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
4900 uint32_t tmp;
4901
4902 tmp = I915_READ(HTOTAL(cpu_transcoder));
4903 pipe_config->adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
4904 pipe_config->adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
4905 tmp = I915_READ(HBLANK(cpu_transcoder));
4906 pipe_config->adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
4907 pipe_config->adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
4908 tmp = I915_READ(HSYNC(cpu_transcoder));
4909 pipe_config->adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
4910 pipe_config->adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
4911
4912 tmp = I915_READ(VTOTAL(cpu_transcoder));
4913 pipe_config->adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
4914 pipe_config->adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
4915 tmp = I915_READ(VBLANK(cpu_transcoder));
4916 pipe_config->adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
4917 pipe_config->adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
4918 tmp = I915_READ(VSYNC(cpu_transcoder));
4919 pipe_config->adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
4920 pipe_config->adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
4921
4922 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
4923 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
4924 pipe_config->adjusted_mode.crtc_vtotal += 1;
4925 pipe_config->adjusted_mode.crtc_vblank_end += 1;
4926 }
4927
4928 tmp = I915_READ(PIPESRC(crtc->pipe));
37327abd
VS
4929 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
4930 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
4931
4932 pipe_config->requested_mode.vdisplay = pipe_config->pipe_src_h;
4933 pipe_config->requested_mode.hdisplay = pipe_config->pipe_src_w;
1bd1bd80
DV
4934}
4935
babea61d
JB
4936static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
4937 struct intel_crtc_config *pipe_config)
4938{
4939 struct drm_crtc *crtc = &intel_crtc->base;
4940
4941 crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
4942 crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal;
4943 crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
4944 crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
4945
4946 crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
4947 crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal;
4948 crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
4949 crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
4950
4951 crtc->mode.flags = pipe_config->adjusted_mode.flags;
4952
241bfc38 4953 crtc->mode.clock = pipe_config->adjusted_mode.crtc_clock;
babea61d
JB
4954 crtc->mode.flags |= pipe_config->adjusted_mode.flags;
4955}
4956
84b046f3
DV
4957static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
4958{
4959 struct drm_device *dev = intel_crtc->base.dev;
4960 struct drm_i915_private *dev_priv = dev->dev_private;
4961 uint32_t pipeconf;
4962
9f11a9e4 4963 pipeconf = 0;
84b046f3 4964
67c72a12
DV
4965 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
4966 I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE)
4967 pipeconf |= PIPECONF_ENABLE;
4968
cf532bb2
VS
4969 if (intel_crtc->config.double_wide)
4970 pipeconf |= PIPECONF_DOUBLE_WIDE;
84b046f3 4971
ff9ce46e
DV
4972 /* only g4x and later have fancy bpc/dither controls */
4973 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
ff9ce46e
DV
4974 /* Bspec claims that we can't use dithering for 30bpp pipes. */
4975 if (intel_crtc->config.dither && intel_crtc->config.pipe_bpp != 30)
4976 pipeconf |= PIPECONF_DITHER_EN |
84b046f3 4977 PIPECONF_DITHER_TYPE_SP;
84b046f3 4978
ff9ce46e
DV
4979 switch (intel_crtc->config.pipe_bpp) {
4980 case 18:
4981 pipeconf |= PIPECONF_6BPC;
4982 break;
4983 case 24:
4984 pipeconf |= PIPECONF_8BPC;
4985 break;
4986 case 30:
4987 pipeconf |= PIPECONF_10BPC;
4988 break;
4989 default:
4990 /* Case prevented by intel_choose_pipe_bpp_dither. */
4991 BUG();
84b046f3
DV
4992 }
4993 }
4994
4995 if (HAS_PIPE_CXSR(dev)) {
4996 if (intel_crtc->lowfreq_avail) {
4997 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
4998 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
4999 } else {
5000 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
84b046f3
DV
5001 }
5002 }
5003
84b046f3
DV
5004 if (!IS_GEN2(dev) &&
5005 intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
5006 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
5007 else
5008 pipeconf |= PIPECONF_PROGRESSIVE;
5009
9f11a9e4
DV
5010 if (IS_VALLEYVIEW(dev) && intel_crtc->config.limited_color_range)
5011 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
9c8e09b7 5012
84b046f3
DV
5013 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
5014 POSTING_READ(PIPECONF(intel_crtc->pipe));
5015}
5016
f564048e 5017static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
f564048e 5018 int x, int y,
94352cf9 5019 struct drm_framebuffer *fb)
79e53945
JB
5020{
5021 struct drm_device *dev = crtc->dev;
5022 struct drm_i915_private *dev_priv = dev->dev_private;
5023 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5024 int pipe = intel_crtc->pipe;
80824003 5025 int plane = intel_crtc->plane;
c751ce4f 5026 int refclk, num_connectors = 0;
652c393a 5027 intel_clock_t clock, reduced_clock;
84b046f3 5028 u32 dspcntr;
a16af721 5029 bool ok, has_reduced_clock = false;
e9fd1c02 5030 bool is_lvds = false, is_dsi = false;
5eddb70b 5031 struct intel_encoder *encoder;
d4906093 5032 const intel_limit_t *limit;
5c3b82e2 5033 int ret;
79e53945 5034
6c2b7c12 5035 for_each_encoder_on_crtc(dev, crtc, encoder) {
5eddb70b 5036 switch (encoder->type) {
79e53945
JB
5037 case INTEL_OUTPUT_LVDS:
5038 is_lvds = true;
5039 break;
e9fd1c02
JN
5040 case INTEL_OUTPUT_DSI:
5041 is_dsi = true;
5042 break;
79e53945 5043 }
43565a06 5044
c751ce4f 5045 num_connectors++;
79e53945
JB
5046 }
5047
f2335330
JN
5048 if (is_dsi)
5049 goto skip_dpll;
5050
5051 if (!intel_crtc->config.clock_set) {
5052 refclk = i9xx_get_refclk(crtc, num_connectors);
79e53945 5053
e9fd1c02
JN
5054 /*
5055 * Returns a set of divisors for the desired target clock with
5056 * the given refclk, or FALSE. The returned values represent
5057 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
5058 * 2) / p1 / p2.
5059 */
5060 limit = intel_limit(crtc, refclk);
5061 ok = dev_priv->display.find_dpll(limit, crtc,
5062 intel_crtc->config.port_clock,
5063 refclk, NULL, &clock);
f2335330 5064 if (!ok) {
e9fd1c02
JN
5065 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5066 return -EINVAL;
5067 }
79e53945 5068
f2335330
JN
5069 if (is_lvds && dev_priv->lvds_downclock_avail) {
5070 /*
5071 * Ensure we match the reduced clock's P to the target
5072 * clock. If the clocks don't match, we can't switch
5073 * the display clock by using the FP0/FP1. In such case
5074 * we will disable the LVDS downclock feature.
5075 */
5076 has_reduced_clock =
5077 dev_priv->display.find_dpll(limit, crtc,
5078 dev_priv->lvds_downclock,
5079 refclk, &clock,
5080 &reduced_clock);
5081 }
5082 /* Compat-code for transition, will disappear. */
f47709a9
DV
5083 intel_crtc->config.dpll.n = clock.n;
5084 intel_crtc->config.dpll.m1 = clock.m1;
5085 intel_crtc->config.dpll.m2 = clock.m2;
5086 intel_crtc->config.dpll.p1 = clock.p1;
5087 intel_crtc->config.dpll.p2 = clock.p2;
5088 }
7026d4ac 5089
e9fd1c02 5090 if (IS_GEN2(dev)) {
8a654f3b 5091 i8xx_update_pll(intel_crtc,
2a8f64ca
VP
5092 has_reduced_clock ? &reduced_clock : NULL,
5093 num_connectors);
e9fd1c02 5094 } else if (IS_VALLEYVIEW(dev)) {
f2335330 5095 vlv_update_pll(intel_crtc);
e9fd1c02 5096 } else {
f47709a9 5097 i9xx_update_pll(intel_crtc,
eb1cbe48 5098 has_reduced_clock ? &reduced_clock : NULL,
89b667f8 5099 num_connectors);
e9fd1c02 5100 }
79e53945 5101
f2335330 5102skip_dpll:
79e53945
JB
5103 /* Set up the display plane register */
5104 dspcntr = DISPPLANE_GAMMA_ENABLE;
5105
da6ecc5d
JB
5106 if (!IS_VALLEYVIEW(dev)) {
5107 if (pipe == 0)
5108 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
5109 else
5110 dspcntr |= DISPPLANE_SEL_PIPE_B;
5111 }
79e53945 5112
8a654f3b 5113 intel_set_pipe_timings(intel_crtc);
5eddb70b
CW
5114
5115 /* pipesrc and dspsize control the size that is scaled from,
5116 * which should always be the user's requested size.
79e53945 5117 */
929c77fb 5118 I915_WRITE(DSPSIZE(plane),
37327abd
VS
5119 ((intel_crtc->config.pipe_src_h - 1) << 16) |
5120 (intel_crtc->config.pipe_src_w - 1));
929c77fb 5121 I915_WRITE(DSPPOS(plane), 0);
2c07245f 5122
84b046f3
DV
5123 i9xx_set_pipeconf(intel_crtc);
5124
f564048e
EA
5125 I915_WRITE(DSPCNTR(plane), dspcntr);
5126 POSTING_READ(DSPCNTR(plane));
5127
94352cf9 5128 ret = intel_pipe_set_base(crtc, x, y, fb);
f564048e 5129
f564048e
EA
5130 return ret;
5131}
5132
2fa2fe9a
DV
5133static void i9xx_get_pfit_config(struct intel_crtc *crtc,
5134 struct intel_crtc_config *pipe_config)
5135{
5136 struct drm_device *dev = crtc->base.dev;
5137 struct drm_i915_private *dev_priv = dev->dev_private;
5138 uint32_t tmp;
5139
5140 tmp = I915_READ(PFIT_CONTROL);
06922821
DV
5141 if (!(tmp & PFIT_ENABLE))
5142 return;
2fa2fe9a 5143
06922821 5144 /* Check whether the pfit is attached to our pipe. */
2fa2fe9a
DV
5145 if (INTEL_INFO(dev)->gen < 4) {
5146 if (crtc->pipe != PIPE_B)
5147 return;
2fa2fe9a
DV
5148 } else {
5149 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
5150 return;
5151 }
5152
06922821 5153 pipe_config->gmch_pfit.control = tmp;
2fa2fe9a
DV
5154 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
5155 if (INTEL_INFO(dev)->gen < 5)
5156 pipe_config->gmch_pfit.lvds_border_bits =
5157 I915_READ(LVDS) & LVDS_BORDER_ENABLE;
5158}
5159
acbec814
JB
5160static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5161 struct intel_crtc_config *pipe_config)
5162{
5163 struct drm_device *dev = crtc->base.dev;
5164 struct drm_i915_private *dev_priv = dev->dev_private;
5165 int pipe = pipe_config->cpu_transcoder;
5166 intel_clock_t clock;
5167 u32 mdiv;
662c6ecb 5168 int refclk = 100000;
acbec814
JB
5169
5170 mutex_lock(&dev_priv->dpio_lock);
5171 mdiv = vlv_dpio_read(dev_priv, pipe, DPIO_DIV(pipe));
5172 mutex_unlock(&dev_priv->dpio_lock);
5173
5174 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
5175 clock.m2 = mdiv & DPIO_M2DIV_MASK;
5176 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
5177 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
5178 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
5179
662c6ecb
CW
5180 clock.vco = refclk * clock.m1 * clock.m2 / clock.n;
5181 clock.dot = 2 * clock.vco / (clock.p1 * clock.p2);
acbec814
JB
5182
5183 pipe_config->port_clock = clock.dot / 10;
5184}
5185
0e8ffe1b
DV
5186static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5187 struct intel_crtc_config *pipe_config)
5188{
5189 struct drm_device *dev = crtc->base.dev;
5190 struct drm_i915_private *dev_priv = dev->dev_private;
5191 uint32_t tmp;
5192
e143a21c 5193 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
c0d43d62 5194 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
eccb140b 5195
0e8ffe1b
DV
5196 tmp = I915_READ(PIPECONF(crtc->pipe));
5197 if (!(tmp & PIPECONF_ENABLE))
5198 return false;
5199
42571aef
VS
5200 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
5201 switch (tmp & PIPECONF_BPC_MASK) {
5202 case PIPECONF_6BPC:
5203 pipe_config->pipe_bpp = 18;
5204 break;
5205 case PIPECONF_8BPC:
5206 pipe_config->pipe_bpp = 24;
5207 break;
5208 case PIPECONF_10BPC:
5209 pipe_config->pipe_bpp = 30;
5210 break;
5211 default:
5212 break;
5213 }
5214 }
5215
282740f7
VS
5216 if (INTEL_INFO(dev)->gen < 4)
5217 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
5218
1bd1bd80
DV
5219 intel_get_pipe_timings(crtc, pipe_config);
5220
2fa2fe9a
DV
5221 i9xx_get_pfit_config(crtc, pipe_config);
5222
6c49f241
DV
5223 if (INTEL_INFO(dev)->gen >= 4) {
5224 tmp = I915_READ(DPLL_MD(crtc->pipe));
5225 pipe_config->pixel_multiplier =
5226 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
5227 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8bcc2795 5228 pipe_config->dpll_hw_state.dpll_md = tmp;
6c49f241
DV
5229 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
5230 tmp = I915_READ(DPLL(crtc->pipe));
5231 pipe_config->pixel_multiplier =
5232 ((tmp & SDVO_MULTIPLIER_MASK)
5233 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
5234 } else {
5235 /* Note that on i915G/GM the pixel multiplier is in the sdvo
5236 * port and will be fixed up in the encoder->get_config
5237 * function. */
5238 pipe_config->pixel_multiplier = 1;
5239 }
8bcc2795
DV
5240 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
5241 if (!IS_VALLEYVIEW(dev)) {
5242 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
5243 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
165e901c
VS
5244 } else {
5245 /* Mask out read-only status bits. */
5246 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
5247 DPLL_PORTC_READY_MASK |
5248 DPLL_PORTB_READY_MASK);
8bcc2795 5249 }
6c49f241 5250
acbec814
JB
5251 if (IS_VALLEYVIEW(dev))
5252 vlv_crtc_clock_get(crtc, pipe_config);
5253 else
5254 i9xx_crtc_clock_get(crtc, pipe_config);
18442d08 5255
0e8ffe1b
DV
5256 return true;
5257}
5258
dde86e2d 5259static void ironlake_init_pch_refclk(struct drm_device *dev)
13d83a67
JB
5260{
5261 struct drm_i915_private *dev_priv = dev->dev_private;
5262 struct drm_mode_config *mode_config = &dev->mode_config;
13d83a67 5263 struct intel_encoder *encoder;
74cfd7ac 5264 u32 val, final;
13d83a67 5265 bool has_lvds = false;
199e5d79 5266 bool has_cpu_edp = false;
199e5d79 5267 bool has_panel = false;
99eb6a01
KP
5268 bool has_ck505 = false;
5269 bool can_ssc = false;
13d83a67
JB
5270
5271 /* We need to take the global config into account */
199e5d79
KP
5272 list_for_each_entry(encoder, &mode_config->encoder_list,
5273 base.head) {
5274 switch (encoder->type) {
5275 case INTEL_OUTPUT_LVDS:
5276 has_panel = true;
5277 has_lvds = true;
5278 break;
5279 case INTEL_OUTPUT_EDP:
5280 has_panel = true;
2de6905f 5281 if (enc_to_dig_port(&encoder->base)->port == PORT_A)
199e5d79
KP
5282 has_cpu_edp = true;
5283 break;
13d83a67
JB
5284 }
5285 }
5286
99eb6a01 5287 if (HAS_PCH_IBX(dev)) {
41aa3448 5288 has_ck505 = dev_priv->vbt.display_clock_mode;
99eb6a01
KP
5289 can_ssc = has_ck505;
5290 } else {
5291 has_ck505 = false;
5292 can_ssc = true;
5293 }
5294
2de6905f
ID
5295 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
5296 has_panel, has_lvds, has_ck505);
13d83a67
JB
5297
5298 /* Ironlake: try to setup display ref clock before DPLL
5299 * enabling. This is only under driver's control after
5300 * PCH B stepping, previous chipset stepping should be
5301 * ignoring this setting.
5302 */
74cfd7ac
CW
5303 val = I915_READ(PCH_DREF_CONTROL);
5304
5305 /* As we must carefully and slowly disable/enable each source in turn,
5306 * compute the final state we want first and check if we need to
5307 * make any changes at all.
5308 */
5309 final = val;
5310 final &= ~DREF_NONSPREAD_SOURCE_MASK;
5311 if (has_ck505)
5312 final |= DREF_NONSPREAD_CK505_ENABLE;
5313 else
5314 final |= DREF_NONSPREAD_SOURCE_ENABLE;
5315
5316 final &= ~DREF_SSC_SOURCE_MASK;
5317 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
5318 final &= ~DREF_SSC1_ENABLE;
5319
5320 if (has_panel) {
5321 final |= DREF_SSC_SOURCE_ENABLE;
5322
5323 if (intel_panel_use_ssc(dev_priv) && can_ssc)
5324 final |= DREF_SSC1_ENABLE;
5325
5326 if (has_cpu_edp) {
5327 if (intel_panel_use_ssc(dev_priv) && can_ssc)
5328 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
5329 else
5330 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
5331 } else
5332 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5333 } else {
5334 final |= DREF_SSC_SOURCE_DISABLE;
5335 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
5336 }
5337
5338 if (final == val)
5339 return;
5340
13d83a67 5341 /* Always enable nonspread source */
74cfd7ac 5342 val &= ~DREF_NONSPREAD_SOURCE_MASK;
13d83a67 5343
99eb6a01 5344 if (has_ck505)
74cfd7ac 5345 val |= DREF_NONSPREAD_CK505_ENABLE;
99eb6a01 5346 else
74cfd7ac 5347 val |= DREF_NONSPREAD_SOURCE_ENABLE;
13d83a67 5348
199e5d79 5349 if (has_panel) {
74cfd7ac
CW
5350 val &= ~DREF_SSC_SOURCE_MASK;
5351 val |= DREF_SSC_SOURCE_ENABLE;
13d83a67 5352
199e5d79 5353 /* SSC must be turned on before enabling the CPU output */
99eb6a01 5354 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
199e5d79 5355 DRM_DEBUG_KMS("Using SSC on panel\n");
74cfd7ac 5356 val |= DREF_SSC1_ENABLE;
e77166b5 5357 } else
74cfd7ac 5358 val &= ~DREF_SSC1_ENABLE;
199e5d79
KP
5359
5360 /* Get SSC going before enabling the outputs */
74cfd7ac 5361 I915_WRITE(PCH_DREF_CONTROL, val);
199e5d79
KP
5362 POSTING_READ(PCH_DREF_CONTROL);
5363 udelay(200);
5364
74cfd7ac 5365 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
13d83a67
JB
5366
5367 /* Enable CPU source on CPU attached eDP */
199e5d79 5368 if (has_cpu_edp) {
99eb6a01 5369 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
199e5d79 5370 DRM_DEBUG_KMS("Using SSC on eDP\n");
74cfd7ac 5371 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
199e5d79 5372 }
13d83a67 5373 else
74cfd7ac 5374 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
199e5d79 5375 } else
74cfd7ac 5376 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
199e5d79 5377
74cfd7ac 5378 I915_WRITE(PCH_DREF_CONTROL, val);
199e5d79
KP
5379 POSTING_READ(PCH_DREF_CONTROL);
5380 udelay(200);
5381 } else {
5382 DRM_DEBUG_KMS("Disabling SSC entirely\n");
5383
74cfd7ac 5384 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
199e5d79
KP
5385
5386 /* Turn off CPU output */
74cfd7ac 5387 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
199e5d79 5388
74cfd7ac 5389 I915_WRITE(PCH_DREF_CONTROL, val);
199e5d79
KP
5390 POSTING_READ(PCH_DREF_CONTROL);
5391 udelay(200);
5392
5393 /* Turn off the SSC source */
74cfd7ac
CW
5394 val &= ~DREF_SSC_SOURCE_MASK;
5395 val |= DREF_SSC_SOURCE_DISABLE;
199e5d79
KP
5396
5397 /* Turn off SSC1 */
74cfd7ac 5398 val &= ~DREF_SSC1_ENABLE;
199e5d79 5399
74cfd7ac 5400 I915_WRITE(PCH_DREF_CONTROL, val);
13d83a67
JB
5401 POSTING_READ(PCH_DREF_CONTROL);
5402 udelay(200);
5403 }
74cfd7ac
CW
5404
5405 BUG_ON(val != final);
13d83a67
JB
5406}
5407
f31f2d55 5408static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
dde86e2d 5409{
f31f2d55 5410 uint32_t tmp;
dde86e2d 5411
0ff066a9
PZ
5412 tmp = I915_READ(SOUTH_CHICKEN2);
5413 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
5414 I915_WRITE(SOUTH_CHICKEN2, tmp);
dde86e2d 5415
0ff066a9
PZ
5416 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
5417 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
5418 DRM_ERROR("FDI mPHY reset assert timeout\n");
dde86e2d 5419
0ff066a9
PZ
5420 tmp = I915_READ(SOUTH_CHICKEN2);
5421 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
5422 I915_WRITE(SOUTH_CHICKEN2, tmp);
dde86e2d 5423
0ff066a9
PZ
5424 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
5425 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
5426 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
f31f2d55
PZ
5427}
5428
5429/* WaMPhyProgramming:hsw */
5430static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
5431{
5432 uint32_t tmp;
dde86e2d
PZ
5433
5434 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
5435 tmp &= ~(0xFF << 24);
5436 tmp |= (0x12 << 24);
5437 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
5438
dde86e2d
PZ
5439 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
5440 tmp |= (1 << 11);
5441 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
5442
5443 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
5444 tmp |= (1 << 11);
5445 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
5446
dde86e2d
PZ
5447 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
5448 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5449 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
5450
5451 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
5452 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
5453 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
5454
0ff066a9
PZ
5455 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
5456 tmp &= ~(7 << 13);
5457 tmp |= (5 << 13);
5458 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
dde86e2d 5459
0ff066a9
PZ
5460 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
5461 tmp &= ~(7 << 13);
5462 tmp |= (5 << 13);
5463 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
dde86e2d
PZ
5464
5465 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
5466 tmp &= ~0xFF;
5467 tmp |= 0x1C;
5468 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
5469
5470 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
5471 tmp &= ~0xFF;
5472 tmp |= 0x1C;
5473 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
5474
5475 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
5476 tmp &= ~(0xFF << 16);
5477 tmp |= (0x1C << 16);
5478 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
5479
5480 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
5481 tmp &= ~(0xFF << 16);
5482 tmp |= (0x1C << 16);
5483 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
5484
0ff066a9
PZ
5485 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
5486 tmp |= (1 << 27);
5487 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
dde86e2d 5488
0ff066a9
PZ
5489 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
5490 tmp |= (1 << 27);
5491 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
dde86e2d 5492
0ff066a9
PZ
5493 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
5494 tmp &= ~(0xF << 28);
5495 tmp |= (4 << 28);
5496 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
dde86e2d 5497
0ff066a9
PZ
5498 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
5499 tmp &= ~(0xF << 28);
5500 tmp |= (4 << 28);
5501 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
f31f2d55
PZ
5502}
5503
2fa86a1f
PZ
5504/* Implements 3 different sequences from BSpec chapter "Display iCLK
5505 * Programming" based on the parameters passed:
5506 * - Sequence to enable CLKOUT_DP
5507 * - Sequence to enable CLKOUT_DP without spread
5508 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
5509 */
5510static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
5511 bool with_fdi)
f31f2d55
PZ
5512{
5513 struct drm_i915_private *dev_priv = dev->dev_private;
2fa86a1f
PZ
5514 uint32_t reg, tmp;
5515
5516 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
5517 with_spread = true;
5518 if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
5519 with_fdi, "LP PCH doesn't have FDI\n"))
5520 with_fdi = false;
f31f2d55
PZ
5521
5522 mutex_lock(&dev_priv->dpio_lock);
5523
5524 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5525 tmp &= ~SBI_SSCCTL_DISABLE;
5526 tmp |= SBI_SSCCTL_PATHALT;
5527 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5528
5529 udelay(24);
5530
2fa86a1f
PZ
5531 if (with_spread) {
5532 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5533 tmp &= ~SBI_SSCCTL_PATHALT;
5534 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
f31f2d55 5535
2fa86a1f
PZ
5536 if (with_fdi) {
5537 lpt_reset_fdi_mphy(dev_priv);
5538 lpt_program_fdi_mphy(dev_priv);
5539 }
5540 }
dde86e2d 5541
2fa86a1f
PZ
5542 reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
5543 SBI_GEN0 : SBI_DBUFF0;
5544 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5545 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5546 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
c00db246
DV
5547
5548 mutex_unlock(&dev_priv->dpio_lock);
dde86e2d
PZ
5549}
5550
47701c3b
PZ
5551/* Sequence to disable CLKOUT_DP */
5552static void lpt_disable_clkout_dp(struct drm_device *dev)
5553{
5554 struct drm_i915_private *dev_priv = dev->dev_private;
5555 uint32_t reg, tmp;
5556
5557 mutex_lock(&dev_priv->dpio_lock);
5558
5559 reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
5560 SBI_GEN0 : SBI_DBUFF0;
5561 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
5562 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
5563 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
5564
5565 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
5566 if (!(tmp & SBI_SSCCTL_DISABLE)) {
5567 if (!(tmp & SBI_SSCCTL_PATHALT)) {
5568 tmp |= SBI_SSCCTL_PATHALT;
5569 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5570 udelay(32);
5571 }
5572 tmp |= SBI_SSCCTL_DISABLE;
5573 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
5574 }
5575
5576 mutex_unlock(&dev_priv->dpio_lock);
5577}
5578
bf8fa3d3
PZ
5579static void lpt_init_pch_refclk(struct drm_device *dev)
5580{
5581 struct drm_mode_config *mode_config = &dev->mode_config;
5582 struct intel_encoder *encoder;
5583 bool has_vga = false;
5584
5585 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5586 switch (encoder->type) {
5587 case INTEL_OUTPUT_ANALOG:
5588 has_vga = true;
5589 break;
5590 }
5591 }
5592
47701c3b
PZ
5593 if (has_vga)
5594 lpt_enable_clkout_dp(dev, true, true);
5595 else
5596 lpt_disable_clkout_dp(dev);
bf8fa3d3
PZ
5597}
5598
dde86e2d
PZ
5599/*
5600 * Initialize reference clocks when the driver loads
5601 */
5602void intel_init_pch_refclk(struct drm_device *dev)
5603{
5604 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
5605 ironlake_init_pch_refclk(dev);
5606 else if (HAS_PCH_LPT(dev))
5607 lpt_init_pch_refclk(dev);
5608}
5609
d9d444cb
JB
5610static int ironlake_get_refclk(struct drm_crtc *crtc)
5611{
5612 struct drm_device *dev = crtc->dev;
5613 struct drm_i915_private *dev_priv = dev->dev_private;
5614 struct intel_encoder *encoder;
d9d444cb
JB
5615 int num_connectors = 0;
5616 bool is_lvds = false;
5617
6c2b7c12 5618 for_each_encoder_on_crtc(dev, crtc, encoder) {
d9d444cb
JB
5619 switch (encoder->type) {
5620 case INTEL_OUTPUT_LVDS:
5621 is_lvds = true;
5622 break;
d9d444cb
JB
5623 }
5624 num_connectors++;
5625 }
5626
5627 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5628 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
41aa3448
RV
5629 dev_priv->vbt.lvds_ssc_freq);
5630 return dev_priv->vbt.lvds_ssc_freq * 1000;
d9d444cb
JB
5631 }
5632
5633 return 120000;
5634}
5635
6ff93609 5636static void ironlake_set_pipeconf(struct drm_crtc *crtc)
79e53945 5637{
c8203565 5638 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
79e53945
JB
5639 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5640 int pipe = intel_crtc->pipe;
c8203565
PZ
5641 uint32_t val;
5642
78114071 5643 val = 0;
c8203565 5644
965e0c48 5645 switch (intel_crtc->config.pipe_bpp) {
c8203565 5646 case 18:
dfd07d72 5647 val |= PIPECONF_6BPC;
c8203565
PZ
5648 break;
5649 case 24:
dfd07d72 5650 val |= PIPECONF_8BPC;
c8203565
PZ
5651 break;
5652 case 30:
dfd07d72 5653 val |= PIPECONF_10BPC;
c8203565
PZ
5654 break;
5655 case 36:
dfd07d72 5656 val |= PIPECONF_12BPC;
c8203565
PZ
5657 break;
5658 default:
cc769b62
PZ
5659 /* Case prevented by intel_choose_pipe_bpp_dither. */
5660 BUG();
c8203565
PZ
5661 }
5662
d8b32247 5663 if (intel_crtc->config.dither)
c8203565
PZ
5664 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5665
6ff93609 5666 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
c8203565
PZ
5667 val |= PIPECONF_INTERLACED_ILK;
5668 else
5669 val |= PIPECONF_PROGRESSIVE;
5670
50f3b016 5671 if (intel_crtc->config.limited_color_range)
3685a8f3 5672 val |= PIPECONF_COLOR_RANGE_SELECT;
3685a8f3 5673
c8203565
PZ
5674 I915_WRITE(PIPECONF(pipe), val);
5675 POSTING_READ(PIPECONF(pipe));
5676}
5677
86d3efce
VS
5678/*
5679 * Set up the pipe CSC unit.
5680 *
5681 * Currently only full range RGB to limited range RGB conversion
5682 * is supported, but eventually this should handle various
5683 * RGB<->YCbCr scenarios as well.
5684 */
50f3b016 5685static void intel_set_pipe_csc(struct drm_crtc *crtc)
86d3efce
VS
5686{
5687 struct drm_device *dev = crtc->dev;
5688 struct drm_i915_private *dev_priv = dev->dev_private;
5689 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5690 int pipe = intel_crtc->pipe;
5691 uint16_t coeff = 0x7800; /* 1.0 */
5692
5693 /*
5694 * TODO: Check what kind of values actually come out of the pipe
5695 * with these coeff/postoff values and adjust to get the best
5696 * accuracy. Perhaps we even need to take the bpc value into
5697 * consideration.
5698 */
5699
50f3b016 5700 if (intel_crtc->config.limited_color_range)
86d3efce
VS
5701 coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
5702
5703 /*
5704 * GY/GU and RY/RU should be the other way around according
5705 * to BSpec, but reality doesn't agree. Just set them up in
5706 * a way that results in the correct picture.
5707 */
5708 I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
5709 I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
5710
5711 I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
5712 I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
5713
5714 I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
5715 I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
5716
5717 I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
5718 I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
5719 I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
5720
5721 if (INTEL_INFO(dev)->gen > 6) {
5722 uint16_t postoff = 0;
5723
50f3b016 5724 if (intel_crtc->config.limited_color_range)
86d3efce
VS
5725 postoff = (16 * (1 << 13) / 255) & 0x1fff;
5726
5727 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
5728 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
5729 I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
5730
5731 I915_WRITE(PIPE_CSC_MODE(pipe), 0);
5732 } else {
5733 uint32_t mode = CSC_MODE_YUV_TO_RGB;
5734
50f3b016 5735 if (intel_crtc->config.limited_color_range)
86d3efce
VS
5736 mode |= CSC_BLACK_SCREEN_OFFSET;
5737
5738 I915_WRITE(PIPE_CSC_MODE(pipe), mode);
5739 }
5740}
5741
6ff93609 5742static void haswell_set_pipeconf(struct drm_crtc *crtc)
ee2b0b38
PZ
5743{
5744 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5745 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3b117c8f 5746 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
ee2b0b38
PZ
5747 uint32_t val;
5748
3eff4faa 5749 val = 0;
ee2b0b38 5750
d8b32247 5751 if (intel_crtc->config.dither)
ee2b0b38
PZ
5752 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
5753
6ff93609 5754 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
ee2b0b38
PZ
5755 val |= PIPECONF_INTERLACED_ILK;
5756 else
5757 val |= PIPECONF_PROGRESSIVE;
5758
702e7a56
PZ
5759 I915_WRITE(PIPECONF(cpu_transcoder), val);
5760 POSTING_READ(PIPECONF(cpu_transcoder));
3eff4faa
DV
5761
5762 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
5763 POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
ee2b0b38
PZ
5764}
5765
6591c6e4 5766static bool ironlake_compute_clocks(struct drm_crtc *crtc,
6591c6e4
PZ
5767 intel_clock_t *clock,
5768 bool *has_reduced_clock,
5769 intel_clock_t *reduced_clock)
5770{
5771 struct drm_device *dev = crtc->dev;
5772 struct drm_i915_private *dev_priv = dev->dev_private;
5773 struct intel_encoder *intel_encoder;
5774 int refclk;
d4906093 5775 const intel_limit_t *limit;
a16af721 5776 bool ret, is_lvds = false;
79e53945 5777
6591c6e4
PZ
5778 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5779 switch (intel_encoder->type) {
79e53945
JB
5780 case INTEL_OUTPUT_LVDS:
5781 is_lvds = true;
5782 break;
79e53945
JB
5783 }
5784 }
5785
d9d444cb 5786 refclk = ironlake_get_refclk(crtc);
79e53945 5787
d4906093
ML
5788 /*
5789 * Returns a set of divisors for the desired target clock with the given
5790 * refclk, or FALSE. The returned values represent the clock equation:
5791 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
5792 */
1b894b59 5793 limit = intel_limit(crtc, refclk);
ff9a6750
DV
5794 ret = dev_priv->display.find_dpll(limit, crtc,
5795 to_intel_crtc(crtc)->config.port_clock,
ee9300bb 5796 refclk, NULL, clock);
6591c6e4
PZ
5797 if (!ret)
5798 return false;
cda4b7d3 5799
ddc9003c 5800 if (is_lvds && dev_priv->lvds_downclock_avail) {
cec2f356
SP
5801 /*
5802 * Ensure we match the reduced clock's P to the target clock.
5803 * If the clocks don't match, we can't switch the display clock
5804 * by using the FP0/FP1. In such case we will disable the LVDS
5805 * downclock feature.
5806 */
ee9300bb
DV
5807 *has_reduced_clock =
5808 dev_priv->display.find_dpll(limit, crtc,
5809 dev_priv->lvds_downclock,
5810 refclk, clock,
5811 reduced_clock);
652c393a 5812 }
61e9653f 5813
6591c6e4
PZ
5814 return true;
5815}
5816
01a415fd
DV
5817static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
5818{
5819 struct drm_i915_private *dev_priv = dev->dev_private;
5820 uint32_t temp;
5821
5822 temp = I915_READ(SOUTH_CHICKEN1);
5823 if (temp & FDI_BC_BIFURCATION_SELECT)
5824 return;
5825
5826 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5827 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5828
5829 temp |= FDI_BC_BIFURCATION_SELECT;
5830 DRM_DEBUG_KMS("enabling fdi C rx\n");
5831 I915_WRITE(SOUTH_CHICKEN1, temp);
5832 POSTING_READ(SOUTH_CHICKEN1);
5833}
5834
ebfd86fd 5835static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
01a415fd
DV
5836{
5837 struct drm_device *dev = intel_crtc->base.dev;
5838 struct drm_i915_private *dev_priv = dev->dev_private;
01a415fd
DV
5839
5840 switch (intel_crtc->pipe) {
5841 case PIPE_A:
ebfd86fd 5842 break;
01a415fd 5843 case PIPE_B:
ebfd86fd 5844 if (intel_crtc->config.fdi_lanes > 2)
01a415fd
DV
5845 WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
5846 else
5847 cpt_enable_fdi_bc_bifurcation(dev);
5848
ebfd86fd 5849 break;
01a415fd 5850 case PIPE_C:
01a415fd
DV
5851 cpt_enable_fdi_bc_bifurcation(dev);
5852
ebfd86fd 5853 break;
01a415fd
DV
5854 default:
5855 BUG();
5856 }
5857}
5858
d4b1931c
PZ
5859int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
5860{
5861 /*
5862 * Account for spread spectrum to avoid
5863 * oversubscribing the link. Max center spread
5864 * is 2.5%; use 5% for safety's sake.
5865 */
5866 u32 bps = target_clock * bpp * 21 / 20;
5867 return bps / (link_bw * 8) + 1;
5868}
5869
7429e9d4 5870static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
6cf86a5e 5871{
7429e9d4 5872 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
f48d8f23
PZ
5873}
5874
de13a2e3 5875static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
7429e9d4 5876 u32 *fp,
9a7c7890 5877 intel_clock_t *reduced_clock, u32 *fp2)
79e53945 5878{
de13a2e3 5879 struct drm_crtc *crtc = &intel_crtc->base;
79e53945
JB
5880 struct drm_device *dev = crtc->dev;
5881 struct drm_i915_private *dev_priv = dev->dev_private;
de13a2e3
PZ
5882 struct intel_encoder *intel_encoder;
5883 uint32_t dpll;
6cc5f341 5884 int factor, num_connectors = 0;
09ede541 5885 bool is_lvds = false, is_sdvo = false;
79e53945 5886
de13a2e3
PZ
5887 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5888 switch (intel_encoder->type) {
79e53945
JB
5889 case INTEL_OUTPUT_LVDS:
5890 is_lvds = true;
5891 break;
5892 case INTEL_OUTPUT_SDVO:
7d57382e 5893 case INTEL_OUTPUT_HDMI:
79e53945 5894 is_sdvo = true;
79e53945 5895 break;
79e53945 5896 }
43565a06 5897
c751ce4f 5898 num_connectors++;
79e53945 5899 }
79e53945 5900
c1858123 5901 /* Enable autotuning of the PLL clock (if permissible) */
8febb297
EA
5902 factor = 21;
5903 if (is_lvds) {
5904 if ((intel_panel_use_ssc(dev_priv) &&
41aa3448 5905 dev_priv->vbt.lvds_ssc_freq == 100) ||
f0b44056 5906 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
8febb297 5907 factor = 25;
09ede541 5908 } else if (intel_crtc->config.sdvo_tv_clock)
8febb297 5909 factor = 20;
c1858123 5910
7429e9d4 5911 if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor))
7d0ac5b7 5912 *fp |= FP_CB_TUNE;
2c07245f 5913
9a7c7890
DV
5914 if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
5915 *fp2 |= FP_CB_TUNE;
5916
5eddb70b 5917 dpll = 0;
2c07245f 5918
a07d6787
EA
5919 if (is_lvds)
5920 dpll |= DPLLB_MODE_LVDS;
5921 else
5922 dpll |= DPLLB_MODE_DAC_SERIAL;
198a037f 5923
ef1b460d
DV
5924 dpll |= (intel_crtc->config.pixel_multiplier - 1)
5925 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
198a037f
DV
5926
5927 if (is_sdvo)
4a33e48d 5928 dpll |= DPLL_SDVO_HIGH_SPEED;
9566e9af 5929 if (intel_crtc->config.has_dp_encoder)
4a33e48d 5930 dpll |= DPLL_SDVO_HIGH_SPEED;
79e53945 5931
a07d6787 5932 /* compute bitmask from p1 value */
7429e9d4 5933 dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
a07d6787 5934 /* also FPA1 */
7429e9d4 5935 dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
a07d6787 5936
7429e9d4 5937 switch (intel_crtc->config.dpll.p2) {
a07d6787
EA
5938 case 5:
5939 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
5940 break;
5941 case 7:
5942 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
5943 break;
5944 case 10:
5945 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
5946 break;
5947 case 14:
5948 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
5949 break;
79e53945
JB
5950 }
5951
b4c09f3b 5952 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
43565a06 5953 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
79e53945
JB
5954 else
5955 dpll |= PLL_REF_INPUT_DREFCLK;
5956
959e16d6 5957 return dpll | DPLL_VCO_ENABLE;
de13a2e3
PZ
5958}
5959
5960static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
de13a2e3
PZ
5961 int x, int y,
5962 struct drm_framebuffer *fb)
5963{
5964 struct drm_device *dev = crtc->dev;
5965 struct drm_i915_private *dev_priv = dev->dev_private;
5966 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5967 int pipe = intel_crtc->pipe;
5968 int plane = intel_crtc->plane;
5969 int num_connectors = 0;
5970 intel_clock_t clock, reduced_clock;
cbbab5bd 5971 u32 dpll = 0, fp = 0, fp2 = 0;
e2f12b07 5972 bool ok, has_reduced_clock = false;
8b47047b 5973 bool is_lvds = false;
de13a2e3 5974 struct intel_encoder *encoder;
e2b78267 5975 struct intel_shared_dpll *pll;
de13a2e3 5976 int ret;
de13a2e3
PZ
5977
5978 for_each_encoder_on_crtc(dev, crtc, encoder) {
5979 switch (encoder->type) {
5980 case INTEL_OUTPUT_LVDS:
5981 is_lvds = true;
5982 break;
de13a2e3
PZ
5983 }
5984
5985 num_connectors++;
a07d6787 5986 }
79e53945 5987
5dc5298b
PZ
5988 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
5989 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
a07d6787 5990
ff9a6750 5991 ok = ironlake_compute_clocks(crtc, &clock,
de13a2e3 5992 &has_reduced_clock, &reduced_clock);
ee9300bb 5993 if (!ok && !intel_crtc->config.clock_set) {
de13a2e3
PZ
5994 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5995 return -EINVAL;
79e53945 5996 }
f47709a9
DV
5997 /* Compat-code for transition, will disappear. */
5998 if (!intel_crtc->config.clock_set) {
5999 intel_crtc->config.dpll.n = clock.n;
6000 intel_crtc->config.dpll.m1 = clock.m1;
6001 intel_crtc->config.dpll.m2 = clock.m2;
6002 intel_crtc->config.dpll.p1 = clock.p1;
6003 intel_crtc->config.dpll.p2 = clock.p2;
6004 }
79e53945 6005
5dc5298b 6006 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8b47047b 6007 if (intel_crtc->config.has_pch_encoder) {
7429e9d4 6008 fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
cbbab5bd 6009 if (has_reduced_clock)
7429e9d4 6010 fp2 = i9xx_dpll_compute_fp(&reduced_clock);
cbbab5bd 6011
7429e9d4 6012 dpll = ironlake_compute_dpll(intel_crtc,
cbbab5bd
DV
6013 &fp, &reduced_clock,
6014 has_reduced_clock ? &fp2 : NULL);
6015
959e16d6 6016 intel_crtc->config.dpll_hw_state.dpll = dpll;
66e985c0
DV
6017 intel_crtc->config.dpll_hw_state.fp0 = fp;
6018 if (has_reduced_clock)
6019 intel_crtc->config.dpll_hw_state.fp1 = fp2;
6020 else
6021 intel_crtc->config.dpll_hw_state.fp1 = fp;
6022
b89a1d39 6023 pll = intel_get_shared_dpll(intel_crtc);
ee7b9f93 6024 if (pll == NULL) {
84f44ce7
VS
6025 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
6026 pipe_name(pipe));
4b645f14
JB
6027 return -EINVAL;
6028 }
ee7b9f93 6029 } else
e72f9fbf 6030 intel_put_shared_dpll(intel_crtc);
79e53945 6031
03afc4a2
DV
6032 if (intel_crtc->config.has_dp_encoder)
6033 intel_dp_set_m_n(intel_crtc);
79e53945 6034
bcd644e0
DV
6035 if (is_lvds && has_reduced_clock && i915_powersave)
6036 intel_crtc->lowfreq_avail = true;
6037 else
6038 intel_crtc->lowfreq_avail = false;
e2b78267
DV
6039
6040 if (intel_crtc->config.has_pch_encoder) {
6041 pll = intel_crtc_to_shared_dpll(intel_crtc);
6042
652c393a
JB
6043 }
6044
8a654f3b 6045 intel_set_pipe_timings(intel_crtc);
5eddb70b 6046
ca3a0ff8 6047 if (intel_crtc->config.has_pch_encoder) {
ca3a0ff8
DV
6048 intel_cpu_transcoder_set_m_n(intel_crtc,
6049 &intel_crtc->config.fdi_m_n);
6050 }
2c07245f 6051
ebfd86fd
DV
6052 if (IS_IVYBRIDGE(dev))
6053 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
79e53945 6054
6ff93609 6055 ironlake_set_pipeconf(crtc);
79e53945 6056
a1f9e77e
PZ
6057 /* Set up the display plane register */
6058 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
b24e7179 6059 POSTING_READ(DSPCNTR(plane));
79e53945 6060
94352cf9 6061 ret = intel_pipe_set_base(crtc, x, y, fb);
7662c8bd 6062
1857e1da 6063 return ret;
79e53945
JB
6064}
6065
eb14cb74
VS
6066static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
6067 struct intel_link_m_n *m_n)
6068{
6069 struct drm_device *dev = crtc->base.dev;
6070 struct drm_i915_private *dev_priv = dev->dev_private;
6071 enum pipe pipe = crtc->pipe;
6072
6073 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
6074 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
6075 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
6076 & ~TU_SIZE_MASK;
6077 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
6078 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
6079 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6080}
6081
6082static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
6083 enum transcoder transcoder,
6084 struct intel_link_m_n *m_n)
72419203
DV
6085{
6086 struct drm_device *dev = crtc->base.dev;
6087 struct drm_i915_private *dev_priv = dev->dev_private;
eb14cb74 6088 enum pipe pipe = crtc->pipe;
72419203 6089
eb14cb74
VS
6090 if (INTEL_INFO(dev)->gen >= 5) {
6091 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
6092 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
6093 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
6094 & ~TU_SIZE_MASK;
6095 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
6096 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
6097 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6098 } else {
6099 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
6100 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
6101 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
6102 & ~TU_SIZE_MASK;
6103 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
6104 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
6105 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
6106 }
6107}
6108
6109void intel_dp_get_m_n(struct intel_crtc *crtc,
6110 struct intel_crtc_config *pipe_config)
6111{
6112 if (crtc->config.has_pch_encoder)
6113 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
6114 else
6115 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6116 &pipe_config->dp_m_n);
6117}
72419203 6118
eb14cb74
VS
6119static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
6120 struct intel_crtc_config *pipe_config)
6121{
6122 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
6123 &pipe_config->fdi_m_n);
72419203
DV
6124}
6125
2fa2fe9a
DV
6126static void ironlake_get_pfit_config(struct intel_crtc *crtc,
6127 struct intel_crtc_config *pipe_config)
6128{
6129 struct drm_device *dev = crtc->base.dev;
6130 struct drm_i915_private *dev_priv = dev->dev_private;
6131 uint32_t tmp;
6132
6133 tmp = I915_READ(PF_CTL(crtc->pipe));
6134
6135 if (tmp & PF_ENABLE) {
fd4daa9c 6136 pipe_config->pch_pfit.enabled = true;
2fa2fe9a
DV
6137 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
6138 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
cb8b2a30
DV
6139
6140 /* We currently do not free assignements of panel fitters on
6141 * ivb/hsw (since we don't use the higher upscaling modes which
6142 * differentiates them) so just WARN about this case for now. */
6143 if (IS_GEN7(dev)) {
6144 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
6145 PF_PIPE_SEL_IVB(crtc->pipe));
6146 }
2fa2fe9a 6147 }
79e53945
JB
6148}
6149
0e8ffe1b
DV
6150static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
6151 struct intel_crtc_config *pipe_config)
6152{
6153 struct drm_device *dev = crtc->base.dev;
6154 struct drm_i915_private *dev_priv = dev->dev_private;
6155 uint32_t tmp;
6156
e143a21c 6157 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
c0d43d62 6158 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
eccb140b 6159
0e8ffe1b
DV
6160 tmp = I915_READ(PIPECONF(crtc->pipe));
6161 if (!(tmp & PIPECONF_ENABLE))
6162 return false;
6163
42571aef
VS
6164 switch (tmp & PIPECONF_BPC_MASK) {
6165 case PIPECONF_6BPC:
6166 pipe_config->pipe_bpp = 18;
6167 break;
6168 case PIPECONF_8BPC:
6169 pipe_config->pipe_bpp = 24;
6170 break;
6171 case PIPECONF_10BPC:
6172 pipe_config->pipe_bpp = 30;
6173 break;
6174 case PIPECONF_12BPC:
6175 pipe_config->pipe_bpp = 36;
6176 break;
6177 default:
6178 break;
6179 }
6180
ab9412ba 6181 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
66e985c0
DV
6182 struct intel_shared_dpll *pll;
6183
88adfff1
DV
6184 pipe_config->has_pch_encoder = true;
6185
627eb5a3
DV
6186 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
6187 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6188 FDI_DP_PORT_WIDTH_SHIFT) + 1;
72419203
DV
6189
6190 ironlake_get_fdi_m_n_config(crtc, pipe_config);
6c49f241 6191
c0d43d62 6192 if (HAS_PCH_IBX(dev_priv->dev)) {
d94ab068
DV
6193 pipe_config->shared_dpll =
6194 (enum intel_dpll_id) crtc->pipe;
c0d43d62
DV
6195 } else {
6196 tmp = I915_READ(PCH_DPLL_SEL);
6197 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
6198 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
6199 else
6200 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
6201 }
66e985c0
DV
6202
6203 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
6204
6205 WARN_ON(!pll->get_hw_state(dev_priv, pll,
6206 &pipe_config->dpll_hw_state));
c93f54cf
DV
6207
6208 tmp = pipe_config->dpll_hw_state.dpll;
6209 pipe_config->pixel_multiplier =
6210 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
6211 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
18442d08
VS
6212
6213 ironlake_pch_clock_get(crtc, pipe_config);
6c49f241
DV
6214 } else {
6215 pipe_config->pixel_multiplier = 1;
627eb5a3
DV
6216 }
6217
1bd1bd80
DV
6218 intel_get_pipe_timings(crtc, pipe_config);
6219
2fa2fe9a
DV
6220 ironlake_get_pfit_config(crtc, pipe_config);
6221
0e8ffe1b
DV
6222 return true;
6223}
6224
be256dc7
PZ
6225static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
6226{
6227 struct drm_device *dev = dev_priv->dev;
6228 struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
6229 struct intel_crtc *crtc;
6230 unsigned long irqflags;
bd633a7c 6231 uint32_t val;
be256dc7
PZ
6232
6233 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6234 WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
6235 pipe_name(crtc->pipe));
6236
6237 WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
6238 WARN(plls->spll_refcount, "SPLL enabled\n");
6239 WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
6240 WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
6241 WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
6242 WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
6243 "CPU PWM1 enabled\n");
6244 WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
6245 "CPU PWM2 enabled\n");
6246 WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
6247 "PCH PWM1 enabled\n");
6248 WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
6249 "Utility pin enabled\n");
6250 WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
6251
6252 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
6253 val = I915_READ(DEIMR);
6254 WARN((val & ~DE_PCH_EVENT_IVB) != val,
6255 "Unexpected DEIMR bits enabled: 0x%x\n", val);
6256 val = I915_READ(SDEIMR);
bd633a7c 6257 WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
be256dc7
PZ
6258 "Unexpected SDEIMR bits enabled: 0x%x\n", val);
6259 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
6260}
6261
6262/*
6263 * This function implements pieces of two sequences from BSpec:
6264 * - Sequence for display software to disable LCPLL
6265 * - Sequence for display software to allow package C8+
6266 * The steps implemented here are just the steps that actually touch the LCPLL
6267 * register. Callers should take care of disabling all the display engine
6268 * functions, doing the mode unset, fixing interrupts, etc.
6269 */
6ff58d53
PZ
6270static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
6271 bool switch_to_fclk, bool allow_power_down)
be256dc7
PZ
6272{
6273 uint32_t val;
6274
6275 assert_can_disable_lcpll(dev_priv);
6276
6277 val = I915_READ(LCPLL_CTL);
6278
6279 if (switch_to_fclk) {
6280 val |= LCPLL_CD_SOURCE_FCLK;
6281 I915_WRITE(LCPLL_CTL, val);
6282
6283 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
6284 LCPLL_CD_SOURCE_FCLK_DONE, 1))
6285 DRM_ERROR("Switching to FCLK failed\n");
6286
6287 val = I915_READ(LCPLL_CTL);
6288 }
6289
6290 val |= LCPLL_PLL_DISABLE;
6291 I915_WRITE(LCPLL_CTL, val);
6292 POSTING_READ(LCPLL_CTL);
6293
6294 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
6295 DRM_ERROR("LCPLL still locked\n");
6296
6297 val = I915_READ(D_COMP);
6298 val |= D_COMP_COMP_DISABLE;
515b2392
PZ
6299 mutex_lock(&dev_priv->rps.hw_lock);
6300 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
6301 DRM_ERROR("Failed to disable D_COMP\n");
6302 mutex_unlock(&dev_priv->rps.hw_lock);
be256dc7
PZ
6303 POSTING_READ(D_COMP);
6304 ndelay(100);
6305
6306 if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
6307 DRM_ERROR("D_COMP RCOMP still in progress\n");
6308
6309 if (allow_power_down) {
6310 val = I915_READ(LCPLL_CTL);
6311 val |= LCPLL_POWER_DOWN_ALLOW;
6312 I915_WRITE(LCPLL_CTL, val);
6313 POSTING_READ(LCPLL_CTL);
6314 }
6315}
6316
6317/*
6318 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
6319 * source.
6320 */
6ff58d53 6321static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
be256dc7
PZ
6322{
6323 uint32_t val;
6324
6325 val = I915_READ(LCPLL_CTL);
6326
6327 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
6328 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
6329 return;
6330
215733fa
PZ
6331 /* Make sure we're not on PC8 state before disabling PC8, otherwise
6332 * we'll hang the machine! */
6333 dev_priv->uncore.funcs.force_wake_get(dev_priv);
6334
be256dc7
PZ
6335 if (val & LCPLL_POWER_DOWN_ALLOW) {
6336 val &= ~LCPLL_POWER_DOWN_ALLOW;
6337 I915_WRITE(LCPLL_CTL, val);
35d8f2eb 6338 POSTING_READ(LCPLL_CTL);
be256dc7
PZ
6339 }
6340
6341 val = I915_READ(D_COMP);
6342 val |= D_COMP_COMP_FORCE;
6343 val &= ~D_COMP_COMP_DISABLE;
515b2392
PZ
6344 mutex_lock(&dev_priv->rps.hw_lock);
6345 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val))
6346 DRM_ERROR("Failed to enable D_COMP\n");
6347 mutex_unlock(&dev_priv->rps.hw_lock);
35d8f2eb 6348 POSTING_READ(D_COMP);
be256dc7
PZ
6349
6350 val = I915_READ(LCPLL_CTL);
6351 val &= ~LCPLL_PLL_DISABLE;
6352 I915_WRITE(LCPLL_CTL, val);
6353
6354 if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
6355 DRM_ERROR("LCPLL not locked yet\n");
6356
6357 if (val & LCPLL_CD_SOURCE_FCLK) {
6358 val = I915_READ(LCPLL_CTL);
6359 val &= ~LCPLL_CD_SOURCE_FCLK;
6360 I915_WRITE(LCPLL_CTL, val);
6361
6362 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
6363 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
6364 DRM_ERROR("Switching back to LCPLL failed\n");
6365 }
215733fa
PZ
6366
6367 dev_priv->uncore.funcs.force_wake_put(dev_priv);
be256dc7
PZ
6368}
6369
c67a470b
PZ
6370void hsw_enable_pc8_work(struct work_struct *__work)
6371{
6372 struct drm_i915_private *dev_priv =
6373 container_of(to_delayed_work(__work), struct drm_i915_private,
6374 pc8.enable_work);
6375 struct drm_device *dev = dev_priv->dev;
6376 uint32_t val;
6377
6378 if (dev_priv->pc8.enabled)
6379 return;
6380
6381 DRM_DEBUG_KMS("Enabling package C8+\n");
6382
6383 dev_priv->pc8.enabled = true;
6384
6385 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6386 val = I915_READ(SOUTH_DSPCLK_GATE_D);
6387 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6388 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6389 }
6390
6391 lpt_disable_clkout_dp(dev);
6392 hsw_pc8_disable_interrupts(dev);
6393 hsw_disable_lcpll(dev_priv, true, true);
6394}
6395
6396static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6397{
6398 WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6399 WARN(dev_priv->pc8.disable_count < 1,
6400 "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6401
6402 dev_priv->pc8.disable_count--;
6403 if (dev_priv->pc8.disable_count != 0)
6404 return;
6405
6406 schedule_delayed_work(&dev_priv->pc8.enable_work,
90058745 6407 msecs_to_jiffies(i915_pc8_timeout));
c67a470b
PZ
6408}
6409
6410static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6411{
6412 struct drm_device *dev = dev_priv->dev;
6413 uint32_t val;
6414
6415 WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
6416 WARN(dev_priv->pc8.disable_count < 0,
6417 "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
6418
6419 dev_priv->pc8.disable_count++;
6420 if (dev_priv->pc8.disable_count != 1)
6421 return;
6422
6423 cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
6424 if (!dev_priv->pc8.enabled)
6425 return;
6426
6427 DRM_DEBUG_KMS("Disabling package C8+\n");
6428
6429 hsw_restore_lcpll(dev_priv);
6430 hsw_pc8_restore_interrupts(dev);
6431 lpt_init_pch_refclk(dev);
6432
6433 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6434 val = I915_READ(SOUTH_DSPCLK_GATE_D);
6435 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
6436 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6437 }
6438
6439 intel_prepare_ddi(dev);
6440 i915_gem_init_swizzling(dev);
6441 mutex_lock(&dev_priv->rps.hw_lock);
6442 gen6_update_ring_freq(dev);
6443 mutex_unlock(&dev_priv->rps.hw_lock);
6444 dev_priv->pc8.enabled = false;
6445}
6446
6447void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
6448{
6449 mutex_lock(&dev_priv->pc8.lock);
6450 __hsw_enable_package_c8(dev_priv);
6451 mutex_unlock(&dev_priv->pc8.lock);
6452}
6453
6454void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
6455{
6456 mutex_lock(&dev_priv->pc8.lock);
6457 __hsw_disable_package_c8(dev_priv);
6458 mutex_unlock(&dev_priv->pc8.lock);
6459}
6460
6461static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv)
6462{
6463 struct drm_device *dev = dev_priv->dev;
6464 struct intel_crtc *crtc;
6465 uint32_t val;
6466
6467 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
6468 if (crtc->base.enabled)
6469 return false;
6470
6471 /* This case is still possible since we have the i915.disable_power_well
6472 * parameter and also the KVMr or something else might be requesting the
6473 * power well. */
6474 val = I915_READ(HSW_PWR_WELL_DRIVER);
6475 if (val != 0) {
6476 DRM_DEBUG_KMS("Not enabling PC8: power well on\n");
6477 return false;
6478 }
6479
6480 return true;
6481}
6482
6483/* Since we're called from modeset_global_resources there's no way to
6484 * symmetrically increase and decrease the refcount, so we use
6485 * dev_priv->pc8.requirements_met to track whether we already have the refcount
6486 * or not.
6487 */
6488static void hsw_update_package_c8(struct drm_device *dev)
6489{
6490 struct drm_i915_private *dev_priv = dev->dev_private;
6491 bool allow;
6492
6493 if (!i915_enable_pc8)
6494 return;
6495
6496 mutex_lock(&dev_priv->pc8.lock);
6497
6498 allow = hsw_can_enable_package_c8(dev_priv);
6499
6500 if (allow == dev_priv->pc8.requirements_met)
6501 goto done;
6502
6503 dev_priv->pc8.requirements_met = allow;
6504
6505 if (allow)
6506 __hsw_enable_package_c8(dev_priv);
6507 else
6508 __hsw_disable_package_c8(dev_priv);
6509
6510done:
6511 mutex_unlock(&dev_priv->pc8.lock);
6512}
6513
6514static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
6515{
6516 if (!dev_priv->pc8.gpu_idle) {
6517 dev_priv->pc8.gpu_idle = true;
6518 hsw_enable_package_c8(dev_priv);
6519 }
6520}
6521
6522static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
6523{
6524 if (dev_priv->pc8.gpu_idle) {
6525 dev_priv->pc8.gpu_idle = false;
6526 hsw_disable_package_c8(dev_priv);
6527 }
be256dc7
PZ
6528}
6529
d6dd9eb1
DV
6530static void haswell_modeset_global_resources(struct drm_device *dev)
6531{
d6dd9eb1
DV
6532 bool enable = false;
6533 struct intel_crtc *crtc;
d6dd9eb1
DV
6534
6535 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
e7a639c4
DV
6536 if (!crtc->base.enabled)
6537 continue;
d6dd9eb1 6538
fd4daa9c 6539 if (crtc->pipe != PIPE_A || crtc->config.pch_pfit.enabled ||
e7a639c4 6540 crtc->config.cpu_transcoder != TRANSCODER_EDP)
d6dd9eb1
DV
6541 enable = true;
6542 }
6543
d6dd9eb1 6544 intel_set_power_well(dev, enable);
c67a470b
PZ
6545
6546 hsw_update_package_c8(dev);
d6dd9eb1
DV
6547}
6548
09b4ddf9 6549static int haswell_crtc_mode_set(struct drm_crtc *crtc,
09b4ddf9
PZ
6550 int x, int y,
6551 struct drm_framebuffer *fb)
6552{
6553 struct drm_device *dev = crtc->dev;
6554 struct drm_i915_private *dev_priv = dev->dev_private;
6555 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
09b4ddf9 6556 int plane = intel_crtc->plane;
09b4ddf9 6557 int ret;
09b4ddf9 6558
ff9a6750 6559 if (!intel_ddi_pll_mode_set(crtc))
6441ab5f
PZ
6560 return -EINVAL;
6561
03afc4a2
DV
6562 if (intel_crtc->config.has_dp_encoder)
6563 intel_dp_set_m_n(intel_crtc);
09b4ddf9
PZ
6564
6565 intel_crtc->lowfreq_avail = false;
09b4ddf9 6566
8a654f3b 6567 intel_set_pipe_timings(intel_crtc);
09b4ddf9 6568
ca3a0ff8 6569 if (intel_crtc->config.has_pch_encoder) {
ca3a0ff8
DV
6570 intel_cpu_transcoder_set_m_n(intel_crtc,
6571 &intel_crtc->config.fdi_m_n);
6572 }
09b4ddf9 6573
6ff93609 6574 haswell_set_pipeconf(crtc);
09b4ddf9 6575
50f3b016 6576 intel_set_pipe_csc(crtc);
86d3efce 6577
09b4ddf9 6578 /* Set up the display plane register */
86d3efce 6579 I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
09b4ddf9
PZ
6580 POSTING_READ(DSPCNTR(plane));
6581
6582 ret = intel_pipe_set_base(crtc, x, y, fb);
6583
1f803ee5 6584 return ret;
79e53945
JB
6585}
6586
0e8ffe1b
DV
6587static bool haswell_get_pipe_config(struct intel_crtc *crtc,
6588 struct intel_crtc_config *pipe_config)
6589{
6590 struct drm_device *dev = crtc->base.dev;
6591 struct drm_i915_private *dev_priv = dev->dev_private;
2fa2fe9a 6592 enum intel_display_power_domain pfit_domain;
0e8ffe1b
DV
6593 uint32_t tmp;
6594
e143a21c 6595 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
c0d43d62
DV
6596 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
6597
eccb140b
DV
6598 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
6599 if (tmp & TRANS_DDI_FUNC_ENABLE) {
6600 enum pipe trans_edp_pipe;
6601 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
6602 default:
6603 WARN(1, "unknown pipe linked to edp transcoder\n");
6604 case TRANS_DDI_EDP_INPUT_A_ONOFF:
6605 case TRANS_DDI_EDP_INPUT_A_ON:
6606 trans_edp_pipe = PIPE_A;
6607 break;
6608 case TRANS_DDI_EDP_INPUT_B_ONOFF:
6609 trans_edp_pipe = PIPE_B;
6610 break;
6611 case TRANS_DDI_EDP_INPUT_C_ONOFF:
6612 trans_edp_pipe = PIPE_C;
6613 break;
6614 }
6615
6616 if (trans_edp_pipe == crtc->pipe)
6617 pipe_config->cpu_transcoder = TRANSCODER_EDP;
6618 }
6619
b97186f0 6620 if (!intel_display_power_enabled(dev,
eccb140b 6621 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
2bfce950
PZ
6622 return false;
6623
eccb140b 6624 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
0e8ffe1b
DV
6625 if (!(tmp & PIPECONF_ENABLE))
6626 return false;
6627
88adfff1 6628 /*
f196e6be 6629 * Haswell has only FDI/PCH transcoder A. It is which is connected to
88adfff1
DV
6630 * DDI E. So just check whether this pipe is wired to DDI E and whether
6631 * the PCH transcoder is on.
6632 */
eccb140b 6633 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
88adfff1 6634 if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(PORT_E) &&
ab9412ba 6635 I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
88adfff1
DV
6636 pipe_config->has_pch_encoder = true;
6637
627eb5a3
DV
6638 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
6639 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
6640 FDI_DP_PORT_WIDTH_SHIFT) + 1;
72419203
DV
6641
6642 ironlake_get_fdi_m_n_config(crtc, pipe_config);
627eb5a3
DV
6643 }
6644
1bd1bd80
DV
6645 intel_get_pipe_timings(crtc, pipe_config);
6646
2fa2fe9a
DV
6647 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
6648 if (intel_display_power_enabled(dev, pfit_domain))
6649 ironlake_get_pfit_config(crtc, pipe_config);
88adfff1 6650
42db64ef
PZ
6651 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
6652 (I915_READ(IPS_CTL) & IPS_ENABLE);
6653
6c49f241
DV
6654 pipe_config->pixel_multiplier = 1;
6655
0e8ffe1b
DV
6656 return true;
6657}
6658
f564048e 6659static int intel_crtc_mode_set(struct drm_crtc *crtc,
f564048e 6660 int x, int y,
94352cf9 6661 struct drm_framebuffer *fb)
f564048e
EA
6662{
6663 struct drm_device *dev = crtc->dev;
6664 struct drm_i915_private *dev_priv = dev->dev_private;
9256aa19 6665 struct intel_encoder *encoder;
0b701d27 6666 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
b8cecdf5 6667 struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
0b701d27 6668 int pipe = intel_crtc->pipe;
f564048e
EA
6669 int ret;
6670
0b701d27 6671 drm_vblank_pre_modeset(dev, pipe);
7662c8bd 6672
b8cecdf5
DV
6673 ret = dev_priv->display.crtc_mode_set(crtc, x, y, fb);
6674
79e53945 6675 drm_vblank_post_modeset(dev, pipe);
5c3b82e2 6676
9256aa19
DV
6677 if (ret != 0)
6678 return ret;
6679
6680 for_each_encoder_on_crtc(dev, crtc, encoder) {
6681 DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
6682 encoder->base.base.id,
6683 drm_get_encoder_name(&encoder->base),
6684 mode->base.id, mode->name);
36f2d1f1 6685 encoder->mode_set(encoder);
9256aa19
DV
6686 }
6687
6688 return 0;
79e53945
JB
6689}
6690
3a9627f4
WF
6691static bool intel_eld_uptodate(struct drm_connector *connector,
6692 int reg_eldv, uint32_t bits_eldv,
6693 int reg_elda, uint32_t bits_elda,
6694 int reg_edid)
6695{
6696 struct drm_i915_private *dev_priv = connector->dev->dev_private;
6697 uint8_t *eld = connector->eld;
6698 uint32_t i;
6699
6700 i = I915_READ(reg_eldv);
6701 i &= bits_eldv;
6702
6703 if (!eld[0])
6704 return !i;
6705
6706 if (!i)
6707 return false;
6708
6709 i = I915_READ(reg_elda);
6710 i &= ~bits_elda;
6711 I915_WRITE(reg_elda, i);
6712
6713 for (i = 0; i < eld[2]; i++)
6714 if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
6715 return false;
6716
6717 return true;
6718}
6719
e0dac65e
WF
6720static void g4x_write_eld(struct drm_connector *connector,
6721 struct drm_crtc *crtc)
6722{
6723 struct drm_i915_private *dev_priv = connector->dev->dev_private;
6724 uint8_t *eld = connector->eld;
6725 uint32_t eldv;
6726 uint32_t len;
6727 uint32_t i;
6728
6729 i = I915_READ(G4X_AUD_VID_DID);
6730
6731 if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
6732 eldv = G4X_ELDV_DEVCL_DEVBLC;
6733 else
6734 eldv = G4X_ELDV_DEVCTG;
6735
3a9627f4
WF
6736 if (intel_eld_uptodate(connector,
6737 G4X_AUD_CNTL_ST, eldv,
6738 G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
6739 G4X_HDMIW_HDMIEDID))
6740 return;
6741
e0dac65e
WF
6742 i = I915_READ(G4X_AUD_CNTL_ST);
6743 i &= ~(eldv | G4X_ELD_ADDR);
6744 len = (i >> 9) & 0x1f; /* ELD buffer size */
6745 I915_WRITE(G4X_AUD_CNTL_ST, i);
6746
6747 if (!eld[0])
6748 return;
6749
6750 len = min_t(uint8_t, eld[2], len);
6751 DRM_DEBUG_DRIVER("ELD size %d\n", len);
6752 for (i = 0; i < len; i++)
6753 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
6754
6755 i = I915_READ(G4X_AUD_CNTL_ST);
6756 i |= eldv;
6757 I915_WRITE(G4X_AUD_CNTL_ST, i);
6758}
6759
83358c85
WX
6760static void haswell_write_eld(struct drm_connector *connector,
6761 struct drm_crtc *crtc)
6762{
6763 struct drm_i915_private *dev_priv = connector->dev->dev_private;
6764 uint8_t *eld = connector->eld;
6765 struct drm_device *dev = crtc->dev;
7b9f35a6 6766 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
83358c85
WX
6767 uint32_t eldv;
6768 uint32_t i;
6769 int len;
6770 int pipe = to_intel_crtc(crtc)->pipe;
6771 int tmp;
6772
6773 int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
6774 int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
6775 int aud_config = HSW_AUD_CFG(pipe);
6776 int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
6777
6778
6779 DRM_DEBUG_DRIVER("HDMI: Haswell Audio initialize....\n");
6780
6781 /* Audio output enable */
6782 DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
6783 tmp = I915_READ(aud_cntrl_st2);
6784 tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
6785 I915_WRITE(aud_cntrl_st2, tmp);
6786
6787 /* Wait for 1 vertical blank */
6788 intel_wait_for_vblank(dev, pipe);
6789
6790 /* Set ELD valid state */
6791 tmp = I915_READ(aud_cntrl_st2);
7e7cb34f 6792 DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
83358c85
WX
6793 tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
6794 I915_WRITE(aud_cntrl_st2, tmp);
6795 tmp = I915_READ(aud_cntrl_st2);
7e7cb34f 6796 DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
83358c85
WX
6797
6798 /* Enable HDMI mode */
6799 tmp = I915_READ(aud_config);
7e7cb34f 6800 DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
83358c85
WX
6801 /* clear N_programing_enable and N_value_index */
6802 tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
6803 I915_WRITE(aud_config, tmp);
6804
6805 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
6806
6807 eldv = AUDIO_ELD_VALID_A << (pipe * 4);
7b9f35a6 6808 intel_crtc->eld_vld = true;
83358c85
WX
6809
6810 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6811 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6812 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
6813 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6814 } else
6815 I915_WRITE(aud_config, 0);
6816
6817 if (intel_eld_uptodate(connector,
6818 aud_cntrl_st2, eldv,
6819 aud_cntl_st, IBX_ELD_ADDRESS,
6820 hdmiw_hdmiedid))
6821 return;
6822
6823 i = I915_READ(aud_cntrl_st2);
6824 i &= ~eldv;
6825 I915_WRITE(aud_cntrl_st2, i);
6826
6827 if (!eld[0])
6828 return;
6829
6830 i = I915_READ(aud_cntl_st);
6831 i &= ~IBX_ELD_ADDRESS;
6832 I915_WRITE(aud_cntl_st, i);
6833 i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
6834 DRM_DEBUG_DRIVER("port num:%d\n", i);
6835
6836 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
6837 DRM_DEBUG_DRIVER("ELD size %d\n", len);
6838 for (i = 0; i < len; i++)
6839 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
6840
6841 i = I915_READ(aud_cntrl_st2);
6842 i |= eldv;
6843 I915_WRITE(aud_cntrl_st2, i);
6844
6845}
6846
e0dac65e
WF
6847static void ironlake_write_eld(struct drm_connector *connector,
6848 struct drm_crtc *crtc)
6849{
6850 struct drm_i915_private *dev_priv = connector->dev->dev_private;
6851 uint8_t *eld = connector->eld;
6852 uint32_t eldv;
6853 uint32_t i;
6854 int len;
6855 int hdmiw_hdmiedid;
b6daa025 6856 int aud_config;
e0dac65e
WF
6857 int aud_cntl_st;
6858 int aud_cntrl_st2;
9b138a83 6859 int pipe = to_intel_crtc(crtc)->pipe;
e0dac65e 6860
b3f33cbf 6861 if (HAS_PCH_IBX(connector->dev)) {
9b138a83
WX
6862 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
6863 aud_config = IBX_AUD_CFG(pipe);
6864 aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
1202b4c6 6865 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
e0dac65e 6866 } else {
9b138a83
WX
6867 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
6868 aud_config = CPT_AUD_CFG(pipe);
6869 aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
1202b4c6 6870 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
e0dac65e
WF
6871 }
6872
9b138a83 6873 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
e0dac65e
WF
6874
6875 i = I915_READ(aud_cntl_st);
9b138a83 6876 i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
e0dac65e
WF
6877 if (!i) {
6878 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
6879 /* operate blindly on all ports */
1202b4c6
WF
6880 eldv = IBX_ELD_VALIDB;
6881 eldv |= IBX_ELD_VALIDB << 4;
6882 eldv |= IBX_ELD_VALIDB << 8;
e0dac65e 6883 } else {
2582a850 6884 DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
1202b4c6 6885 eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
e0dac65e
WF
6886 }
6887
3a9627f4
WF
6888 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
6889 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6890 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
b6daa025
WF
6891 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
6892 } else
6893 I915_WRITE(aud_config, 0);
e0dac65e 6894
3a9627f4
WF
6895 if (intel_eld_uptodate(connector,
6896 aud_cntrl_st2, eldv,
6897 aud_cntl_st, IBX_ELD_ADDRESS,
6898 hdmiw_hdmiedid))
6899 return;
6900
e0dac65e
WF
6901 i = I915_READ(aud_cntrl_st2);
6902 i &= ~eldv;
6903 I915_WRITE(aud_cntrl_st2, i);
6904
6905 if (!eld[0])
6906 return;
6907
e0dac65e 6908 i = I915_READ(aud_cntl_st);
1202b4c6 6909 i &= ~IBX_ELD_ADDRESS;
e0dac65e
WF
6910 I915_WRITE(aud_cntl_st, i);
6911
6912 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
6913 DRM_DEBUG_DRIVER("ELD size %d\n", len);
6914 for (i = 0; i < len; i++)
6915 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
6916
6917 i = I915_READ(aud_cntrl_st2);
6918 i |= eldv;
6919 I915_WRITE(aud_cntrl_st2, i);
6920}
6921
6922void intel_write_eld(struct drm_encoder *encoder,
6923 struct drm_display_mode *mode)
6924{
6925 struct drm_crtc *crtc = encoder->crtc;
6926 struct drm_connector *connector;
6927 struct drm_device *dev = encoder->dev;
6928 struct drm_i915_private *dev_priv = dev->dev_private;
6929
6930 connector = drm_select_eld(encoder, mode);
6931 if (!connector)
6932 return;
6933
6934 DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
6935 connector->base.id,
6936 drm_get_connector_name(connector),
6937 connector->encoder->base.id,
6938 drm_get_encoder_name(connector->encoder));
6939
6940 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
6941
6942 if (dev_priv->display.write_eld)
6943 dev_priv->display.write_eld(connector, crtc);
6944}
6945
560b85bb
CW
6946static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
6947{
6948 struct drm_device *dev = crtc->dev;
6949 struct drm_i915_private *dev_priv = dev->dev_private;
6950 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6951 bool visible = base != 0;
6952 u32 cntl;
6953
6954 if (intel_crtc->cursor_visible == visible)
6955 return;
6956
9db4a9c7 6957 cntl = I915_READ(_CURACNTR);
560b85bb
CW
6958 if (visible) {
6959 /* On these chipsets we can only modify the base whilst
6960 * the cursor is disabled.
6961 */
9db4a9c7 6962 I915_WRITE(_CURABASE, base);
560b85bb
CW
6963
6964 cntl &= ~(CURSOR_FORMAT_MASK);
6965 /* XXX width must be 64, stride 256 => 0x00 << 28 */
6966 cntl |= CURSOR_ENABLE |
6967 CURSOR_GAMMA_ENABLE |
6968 CURSOR_FORMAT_ARGB;
6969 } else
6970 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
9db4a9c7 6971 I915_WRITE(_CURACNTR, cntl);
560b85bb
CW
6972
6973 intel_crtc->cursor_visible = visible;
6974}
6975
6976static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
6977{
6978 struct drm_device *dev = crtc->dev;
6979 struct drm_i915_private *dev_priv = dev->dev_private;
6980 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6981 int pipe = intel_crtc->pipe;
6982 bool visible = base != 0;
6983
6984 if (intel_crtc->cursor_visible != visible) {
548f245b 6985 uint32_t cntl = I915_READ(CURCNTR(pipe));
560b85bb
CW
6986 if (base) {
6987 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
6988 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
6989 cntl |= pipe << 28; /* Connect to correct pipe */
6990 } else {
6991 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
6992 cntl |= CURSOR_MODE_DISABLE;
6993 }
9db4a9c7 6994 I915_WRITE(CURCNTR(pipe), cntl);
560b85bb
CW
6995
6996 intel_crtc->cursor_visible = visible;
6997 }
6998 /* and commit changes on next vblank */
9db4a9c7 6999 I915_WRITE(CURBASE(pipe), base);
560b85bb
CW
7000}
7001
65a21cd6
JB
7002static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
7003{
7004 struct drm_device *dev = crtc->dev;
7005 struct drm_i915_private *dev_priv = dev->dev_private;
7006 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7007 int pipe = intel_crtc->pipe;
7008 bool visible = base != 0;
7009
7010 if (intel_crtc->cursor_visible != visible) {
7011 uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
7012 if (base) {
7013 cntl &= ~CURSOR_MODE;
7014 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
7015 } else {
7016 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
7017 cntl |= CURSOR_MODE_DISABLE;
7018 }
1f5d76db 7019 if (IS_HASWELL(dev)) {
86d3efce 7020 cntl |= CURSOR_PIPE_CSC_ENABLE;
1f5d76db
PZ
7021 cntl &= ~CURSOR_TRICKLE_FEED_DISABLE;
7022 }
65a21cd6
JB
7023 I915_WRITE(CURCNTR_IVB(pipe), cntl);
7024
7025 intel_crtc->cursor_visible = visible;
7026 }
7027 /* and commit changes on next vblank */
7028 I915_WRITE(CURBASE_IVB(pipe), base);
7029}
7030
cda4b7d3 7031/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
6b383a7f
CW
7032static void intel_crtc_update_cursor(struct drm_crtc *crtc,
7033 bool on)
cda4b7d3
CW
7034{
7035 struct drm_device *dev = crtc->dev;
7036 struct drm_i915_private *dev_priv = dev->dev_private;
7037 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7038 int pipe = intel_crtc->pipe;
7039 int x = intel_crtc->cursor_x;
7040 int y = intel_crtc->cursor_y;
d6e4db15 7041 u32 base = 0, pos = 0;
cda4b7d3
CW
7042 bool visible;
7043
d6e4db15 7044 if (on)
cda4b7d3 7045 base = intel_crtc->cursor_addr;
cda4b7d3 7046
d6e4db15
VS
7047 if (x >= intel_crtc->config.pipe_src_w)
7048 base = 0;
7049
7050 if (y >= intel_crtc->config.pipe_src_h)
cda4b7d3
CW
7051 base = 0;
7052
7053 if (x < 0) {
efc9064e 7054 if (x + intel_crtc->cursor_width <= 0)
cda4b7d3
CW
7055 base = 0;
7056
7057 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
7058 x = -x;
7059 }
7060 pos |= x << CURSOR_X_SHIFT;
7061
7062 if (y < 0) {
efc9064e 7063 if (y + intel_crtc->cursor_height <= 0)
cda4b7d3
CW
7064 base = 0;
7065
7066 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
7067 y = -y;
7068 }
7069 pos |= y << CURSOR_Y_SHIFT;
7070
7071 visible = base != 0;
560b85bb 7072 if (!visible && !intel_crtc->cursor_visible)
cda4b7d3
CW
7073 return;
7074
0cd83aa9 7075 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
65a21cd6
JB
7076 I915_WRITE(CURPOS_IVB(pipe), pos);
7077 ivb_update_cursor(crtc, base);
7078 } else {
7079 I915_WRITE(CURPOS(pipe), pos);
7080 if (IS_845G(dev) || IS_I865G(dev))
7081 i845_update_cursor(crtc, base);
7082 else
7083 i9xx_update_cursor(crtc, base);
7084 }
cda4b7d3
CW
7085}
7086
79e53945 7087static int intel_crtc_cursor_set(struct drm_crtc *crtc,
05394f39 7088 struct drm_file *file,
79e53945
JB
7089 uint32_t handle,
7090 uint32_t width, uint32_t height)
7091{
7092 struct drm_device *dev = crtc->dev;
7093 struct drm_i915_private *dev_priv = dev->dev_private;
7094 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
05394f39 7095 struct drm_i915_gem_object *obj;
cda4b7d3 7096 uint32_t addr;
3f8bc370 7097 int ret;
79e53945 7098
79e53945
JB
7099 /* if we want to turn off the cursor ignore width and height */
7100 if (!handle) {
28c97730 7101 DRM_DEBUG_KMS("cursor off\n");
3f8bc370 7102 addr = 0;
05394f39 7103 obj = NULL;
5004417d 7104 mutex_lock(&dev->struct_mutex);
3f8bc370 7105 goto finish;
79e53945
JB
7106 }
7107
7108 /* Currently we only support 64x64 cursors */
7109 if (width != 64 || height != 64) {
7110 DRM_ERROR("we currently only support 64x64 cursors\n");
7111 return -EINVAL;
7112 }
7113
05394f39 7114 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
c8725226 7115 if (&obj->base == NULL)
79e53945
JB
7116 return -ENOENT;
7117
05394f39 7118 if (obj->base.size < width * height * 4) {
79e53945 7119 DRM_ERROR("buffer is to small\n");
34b8686e
DA
7120 ret = -ENOMEM;
7121 goto fail;
79e53945
JB
7122 }
7123
71acb5eb 7124 /* we only need to pin inside GTT if cursor is non-phy */
7f9872e0 7125 mutex_lock(&dev->struct_mutex);
b295d1b6 7126 if (!dev_priv->info->cursor_needs_physical) {
693db184
CW
7127 unsigned alignment;
7128
d9e86c0e
CW
7129 if (obj->tiling_mode) {
7130 DRM_ERROR("cursor cannot be tiled\n");
7131 ret = -EINVAL;
7132 goto fail_locked;
7133 }
7134
693db184
CW
7135 /* Note that the w/a also requires 2 PTE of padding following
7136 * the bo. We currently fill all unused PTE with the shadow
7137 * page and so we should always have valid PTE following the
7138 * cursor preventing the VT-d warning.
7139 */
7140 alignment = 0;
7141 if (need_vtd_wa(dev))
7142 alignment = 64*1024;
7143
7144 ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL);
e7b526bb
CW
7145 if (ret) {
7146 DRM_ERROR("failed to move cursor bo into the GTT\n");
2da3b9b9 7147 goto fail_locked;
e7b526bb
CW
7148 }
7149
d9e86c0e
CW
7150 ret = i915_gem_object_put_fence(obj);
7151 if (ret) {
2da3b9b9 7152 DRM_ERROR("failed to release fence for cursor");
d9e86c0e
CW
7153 goto fail_unpin;
7154 }
7155
f343c5f6 7156 addr = i915_gem_obj_ggtt_offset(obj);
71acb5eb 7157 } else {
6eeefaf3 7158 int align = IS_I830(dev) ? 16 * 1024 : 256;
05394f39 7159 ret = i915_gem_attach_phys_object(dev, obj,
6eeefaf3
CW
7160 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
7161 align);
71acb5eb
DA
7162 if (ret) {
7163 DRM_ERROR("failed to attach phys object\n");
7f9872e0 7164 goto fail_locked;
71acb5eb 7165 }
05394f39 7166 addr = obj->phys_obj->handle->busaddr;
3f8bc370
KH
7167 }
7168
a6c45cf0 7169 if (IS_GEN2(dev))
14b60391
JB
7170 I915_WRITE(CURSIZE, (height << 12) | width);
7171
3f8bc370 7172 finish:
3f8bc370 7173 if (intel_crtc->cursor_bo) {
b295d1b6 7174 if (dev_priv->info->cursor_needs_physical) {
05394f39 7175 if (intel_crtc->cursor_bo != obj)
71acb5eb
DA
7176 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
7177 } else
cc98b413 7178 i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
05394f39 7179 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
3f8bc370 7180 }
80824003 7181
7f9872e0 7182 mutex_unlock(&dev->struct_mutex);
3f8bc370
KH
7183
7184 intel_crtc->cursor_addr = addr;
05394f39 7185 intel_crtc->cursor_bo = obj;
cda4b7d3
CW
7186 intel_crtc->cursor_width = width;
7187 intel_crtc->cursor_height = height;
7188
f2f5f771
VS
7189 if (intel_crtc->active)
7190 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
3f8bc370 7191
79e53945 7192 return 0;
e7b526bb 7193fail_unpin:
cc98b413 7194 i915_gem_object_unpin_from_display_plane(obj);
7f9872e0 7195fail_locked:
34b8686e 7196 mutex_unlock(&dev->struct_mutex);
bc9025bd 7197fail:
05394f39 7198 drm_gem_object_unreference_unlocked(&obj->base);
34b8686e 7199 return ret;
79e53945
JB
7200}
7201
7202static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
7203{
79e53945 7204 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
79e53945 7205
cda4b7d3
CW
7206 intel_crtc->cursor_x = x;
7207 intel_crtc->cursor_y = y;
652c393a 7208
f2f5f771
VS
7209 if (intel_crtc->active)
7210 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
79e53945
JB
7211
7212 return 0;
b8c00ac5
DA
7213}
7214
79e53945 7215static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
7203425a 7216 u16 *blue, uint32_t start, uint32_t size)
79e53945 7217{
7203425a 7218 int end = (start + size > 256) ? 256 : start + size, i;
79e53945 7219 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
79e53945 7220
7203425a 7221 for (i = start; i < end; i++) {
79e53945
JB
7222 intel_crtc->lut_r[i] = red[i] >> 8;
7223 intel_crtc->lut_g[i] = green[i] >> 8;
7224 intel_crtc->lut_b[i] = blue[i] >> 8;
7225 }
7226
7227 intel_crtc_load_lut(crtc);
7228}
7229
79e53945
JB
7230/* VESA 640x480x72Hz mode to set on the pipe */
7231static struct drm_display_mode load_detect_mode = {
7232 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
7233 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
7234};
7235
d2dff872
CW
7236static struct drm_framebuffer *
7237intel_framebuffer_create(struct drm_device *dev,
308e5bcb 7238 struct drm_mode_fb_cmd2 *mode_cmd,
d2dff872
CW
7239 struct drm_i915_gem_object *obj)
7240{
7241 struct intel_framebuffer *intel_fb;
7242 int ret;
7243
7244 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
7245 if (!intel_fb) {
7246 drm_gem_object_unreference_unlocked(&obj->base);
7247 return ERR_PTR(-ENOMEM);
7248 }
7249
7250 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
7251 if (ret) {
7252 drm_gem_object_unreference_unlocked(&obj->base);
7253 kfree(intel_fb);
7254 return ERR_PTR(ret);
7255 }
7256
7257 return &intel_fb->base;
7258}
7259
7260static u32
7261intel_framebuffer_pitch_for_width(int width, int bpp)
7262{
7263 u32 pitch = DIV_ROUND_UP(width * bpp, 8);
7264 return ALIGN(pitch, 64);
7265}
7266
7267static u32
7268intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
7269{
7270 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
7271 return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
7272}
7273
7274static struct drm_framebuffer *
7275intel_framebuffer_create_for_mode(struct drm_device *dev,
7276 struct drm_display_mode *mode,
7277 int depth, int bpp)
7278{
7279 struct drm_i915_gem_object *obj;
0fed39bd 7280 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
d2dff872
CW
7281
7282 obj = i915_gem_alloc_object(dev,
7283 intel_framebuffer_size_for_mode(mode, bpp));
7284 if (obj == NULL)
7285 return ERR_PTR(-ENOMEM);
7286
7287 mode_cmd.width = mode->hdisplay;
7288 mode_cmd.height = mode->vdisplay;
308e5bcb
JB
7289 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
7290 bpp);
5ca0c34a 7291 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
d2dff872
CW
7292
7293 return intel_framebuffer_create(dev, &mode_cmd, obj);
7294}
7295
7296static struct drm_framebuffer *
7297mode_fits_in_fbdev(struct drm_device *dev,
7298 struct drm_display_mode *mode)
7299{
7300 struct drm_i915_private *dev_priv = dev->dev_private;
7301 struct drm_i915_gem_object *obj;
7302 struct drm_framebuffer *fb;
7303
7304 if (dev_priv->fbdev == NULL)
7305 return NULL;
7306
7307 obj = dev_priv->fbdev->ifb.obj;
7308 if (obj == NULL)
7309 return NULL;
7310
7311 fb = &dev_priv->fbdev->ifb.base;
01f2c773
VS
7312 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
7313 fb->bits_per_pixel))
d2dff872
CW
7314 return NULL;
7315
01f2c773 7316 if (obj->base.size < mode->vdisplay * fb->pitches[0])
d2dff872
CW
7317 return NULL;
7318
7319 return fb;
7320}
7321
d2434ab7 7322bool intel_get_load_detect_pipe(struct drm_connector *connector,
7173188d 7323 struct drm_display_mode *mode,
8261b191 7324 struct intel_load_detect_pipe *old)
79e53945
JB
7325{
7326 struct intel_crtc *intel_crtc;
d2434ab7
DV
7327 struct intel_encoder *intel_encoder =
7328 intel_attached_encoder(connector);
79e53945 7329 struct drm_crtc *possible_crtc;
4ef69c7a 7330 struct drm_encoder *encoder = &intel_encoder->base;
79e53945
JB
7331 struct drm_crtc *crtc = NULL;
7332 struct drm_device *dev = encoder->dev;
94352cf9 7333 struct drm_framebuffer *fb;
79e53945
JB
7334 int i = -1;
7335
d2dff872
CW
7336 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7337 connector->base.id, drm_get_connector_name(connector),
7338 encoder->base.id, drm_get_encoder_name(encoder));
7339
79e53945
JB
7340 /*
7341 * Algorithm gets a little messy:
7a5e4805 7342 *
79e53945
JB
7343 * - if the connector already has an assigned crtc, use it (but make
7344 * sure it's on first)
7a5e4805 7345 *
79e53945
JB
7346 * - try to find the first unused crtc that can drive this connector,
7347 * and use that if we find one
79e53945
JB
7348 */
7349
7350 /* See if we already have a CRTC for this connector */
7351 if (encoder->crtc) {
7352 crtc = encoder->crtc;
8261b191 7353
7b24056b
DV
7354 mutex_lock(&crtc->mutex);
7355
24218aac 7356 old->dpms_mode = connector->dpms;
8261b191
CW
7357 old->load_detect_temp = false;
7358
7359 /* Make sure the crtc and connector are running */
24218aac
DV
7360 if (connector->dpms != DRM_MODE_DPMS_ON)
7361 connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
8261b191 7362
7173188d 7363 return true;
79e53945
JB
7364 }
7365
7366 /* Find an unused one (if possible) */
7367 list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
7368 i++;
7369 if (!(encoder->possible_crtcs & (1 << i)))
7370 continue;
7371 if (!possible_crtc->enabled) {
7372 crtc = possible_crtc;
7373 break;
7374 }
79e53945
JB
7375 }
7376
7377 /*
7378 * If we didn't find an unused CRTC, don't use any.
7379 */
7380 if (!crtc) {
7173188d
CW
7381 DRM_DEBUG_KMS("no pipe available for load-detect\n");
7382 return false;
79e53945
JB
7383 }
7384
7b24056b 7385 mutex_lock(&crtc->mutex);
fc303101
DV
7386 intel_encoder->new_crtc = to_intel_crtc(crtc);
7387 to_intel_connector(connector)->new_encoder = intel_encoder;
79e53945
JB
7388
7389 intel_crtc = to_intel_crtc(crtc);
24218aac 7390 old->dpms_mode = connector->dpms;
8261b191 7391 old->load_detect_temp = true;
d2dff872 7392 old->release_fb = NULL;
79e53945 7393
6492711d
CW
7394 if (!mode)
7395 mode = &load_detect_mode;
79e53945 7396
d2dff872
CW
7397 /* We need a framebuffer large enough to accommodate all accesses
7398 * that the plane may generate whilst we perform load detection.
7399 * We can not rely on the fbcon either being present (we get called
7400 * during its initialisation to detect all boot displays, or it may
7401 * not even exist) or that it is large enough to satisfy the
7402 * requested mode.
7403 */
94352cf9
DV
7404 fb = mode_fits_in_fbdev(dev, mode);
7405 if (fb == NULL) {
d2dff872 7406 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
94352cf9
DV
7407 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
7408 old->release_fb = fb;
d2dff872
CW
7409 } else
7410 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
94352cf9 7411 if (IS_ERR(fb)) {
d2dff872 7412 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
7b24056b 7413 mutex_unlock(&crtc->mutex);
0e8b3d3e 7414 return false;
79e53945 7415 }
79e53945 7416
c0c36b94 7417 if (intel_set_mode(crtc, mode, 0, 0, fb)) {
6492711d 7418 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
d2dff872
CW
7419 if (old->release_fb)
7420 old->release_fb->funcs->destroy(old->release_fb);
7b24056b 7421 mutex_unlock(&crtc->mutex);
0e8b3d3e 7422 return false;
79e53945 7423 }
7173188d 7424
79e53945 7425 /* let the connector get through one full cycle before testing */
9d0498a2 7426 intel_wait_for_vblank(dev, intel_crtc->pipe);
7173188d 7427 return true;
79e53945
JB
7428}
7429
d2434ab7 7430void intel_release_load_detect_pipe(struct drm_connector *connector,
8261b191 7431 struct intel_load_detect_pipe *old)
79e53945 7432{
d2434ab7
DV
7433 struct intel_encoder *intel_encoder =
7434 intel_attached_encoder(connector);
4ef69c7a 7435 struct drm_encoder *encoder = &intel_encoder->base;
7b24056b 7436 struct drm_crtc *crtc = encoder->crtc;
79e53945 7437
d2dff872
CW
7438 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
7439 connector->base.id, drm_get_connector_name(connector),
7440 encoder->base.id, drm_get_encoder_name(encoder));
7441
8261b191 7442 if (old->load_detect_temp) {
fc303101
DV
7443 to_intel_connector(connector)->new_encoder = NULL;
7444 intel_encoder->new_crtc = NULL;
7445 intel_set_mode(crtc, NULL, 0, 0, NULL);
d2dff872 7446
36206361
DV
7447 if (old->release_fb) {
7448 drm_framebuffer_unregister_private(old->release_fb);
7449 drm_framebuffer_unreference(old->release_fb);
7450 }
d2dff872 7451
67c96400 7452 mutex_unlock(&crtc->mutex);
0622a53c 7453 return;
79e53945
JB
7454 }
7455
c751ce4f 7456 /* Switch crtc and encoder back off if necessary */
24218aac
DV
7457 if (old->dpms_mode != DRM_MODE_DPMS_ON)
7458 connector->funcs->dpms(connector, old->dpms_mode);
7b24056b
DV
7459
7460 mutex_unlock(&crtc->mutex);
79e53945
JB
7461}
7462
da4a1efa
VS
7463static int i9xx_pll_refclk(struct drm_device *dev,
7464 const struct intel_crtc_config *pipe_config)
7465{
7466 struct drm_i915_private *dev_priv = dev->dev_private;
7467 u32 dpll = pipe_config->dpll_hw_state.dpll;
7468
7469 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
7470 return dev_priv->vbt.lvds_ssc_freq * 1000;
7471 else if (HAS_PCH_SPLIT(dev))
7472 return 120000;
7473 else if (!IS_GEN2(dev))
7474 return 96000;
7475 else
7476 return 48000;
7477}
7478
79e53945 7479/* Returns the clock of the currently programmed mode of the given pipe. */
f1f644dc
JB
7480static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7481 struct intel_crtc_config *pipe_config)
79e53945 7482{
f1f644dc 7483 struct drm_device *dev = crtc->base.dev;
79e53945 7484 struct drm_i915_private *dev_priv = dev->dev_private;
f1f644dc 7485 int pipe = pipe_config->cpu_transcoder;
293623f7 7486 u32 dpll = pipe_config->dpll_hw_state.dpll;
79e53945
JB
7487 u32 fp;
7488 intel_clock_t clock;
da4a1efa 7489 int refclk = i9xx_pll_refclk(dev, pipe_config);
79e53945
JB
7490
7491 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
293623f7 7492 fp = pipe_config->dpll_hw_state.fp0;
79e53945 7493 else
293623f7 7494 fp = pipe_config->dpll_hw_state.fp1;
79e53945
JB
7495
7496 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
f2b115e6
AJ
7497 if (IS_PINEVIEW(dev)) {
7498 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
7499 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
2177832f
SL
7500 } else {
7501 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
7502 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
7503 }
7504
a6c45cf0 7505 if (!IS_GEN2(dev)) {
f2b115e6
AJ
7506 if (IS_PINEVIEW(dev))
7507 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
7508 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
2177832f
SL
7509 else
7510 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
79e53945
JB
7511 DPLL_FPA01_P1_POST_DIV_SHIFT);
7512
7513 switch (dpll & DPLL_MODE_MASK) {
7514 case DPLLB_MODE_DAC_SERIAL:
7515 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
7516 5 : 10;
7517 break;
7518 case DPLLB_MODE_LVDS:
7519 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
7520 7 : 14;
7521 break;
7522 default:
28c97730 7523 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
79e53945 7524 "mode\n", (int)(dpll & DPLL_MODE_MASK));
f1f644dc 7525 return;
79e53945
JB
7526 }
7527
ac58c3f0 7528 if (IS_PINEVIEW(dev))
da4a1efa 7529 pineview_clock(refclk, &clock);
ac58c3f0 7530 else
da4a1efa 7531 i9xx_clock(refclk, &clock);
79e53945
JB
7532 } else {
7533 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
7534
7535 if (is_lvds) {
7536 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
7537 DPLL_FPA01_P1_POST_DIV_SHIFT);
7538 clock.p2 = 14;
79e53945
JB
7539 } else {
7540 if (dpll & PLL_P1_DIVIDE_BY_TWO)
7541 clock.p1 = 2;
7542 else {
7543 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
7544 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
7545 }
7546 if (dpll & PLL_P2_DIVIDE_BY_4)
7547 clock.p2 = 4;
7548 else
7549 clock.p2 = 2;
79e53945 7550 }
da4a1efa
VS
7551
7552 i9xx_clock(refclk, &clock);
79e53945
JB
7553 }
7554
18442d08
VS
7555 /*
7556 * This value includes pixel_multiplier. We will use
241bfc38 7557 * port_clock to compute adjusted_mode.crtc_clock in the
18442d08
VS
7558 * encoder's get_config() function.
7559 */
7560 pipe_config->port_clock = clock.dot;
f1f644dc
JB
7561}
7562
6878da05
VS
7563int intel_dotclock_calculate(int link_freq,
7564 const struct intel_link_m_n *m_n)
f1f644dc 7565{
f1f644dc
JB
7566 /*
7567 * The calculation for the data clock is:
1041a02f 7568 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
f1f644dc 7569 * But we want to avoid losing precison if possible, so:
1041a02f 7570 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
f1f644dc
JB
7571 *
7572 * and the link clock is simpler:
1041a02f 7573 * link_clock = (m * link_clock) / n
f1f644dc
JB
7574 */
7575
6878da05
VS
7576 if (!m_n->link_n)
7577 return 0;
f1f644dc 7578
6878da05
VS
7579 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
7580}
f1f644dc 7581
18442d08
VS
7582static void ironlake_pch_clock_get(struct intel_crtc *crtc,
7583 struct intel_crtc_config *pipe_config)
6878da05
VS
7584{
7585 struct drm_device *dev = crtc->base.dev;
79e53945 7586
18442d08
VS
7587 /* read out port_clock from the DPLL */
7588 i9xx_crtc_clock_get(crtc, pipe_config);
f1f644dc 7589
f1f644dc 7590 /*
18442d08 7591 * This value does not include pixel_multiplier.
241bfc38 7592 * We will check that port_clock and adjusted_mode.crtc_clock
18442d08
VS
7593 * agree once we know their relationship in the encoder's
7594 * get_config() function.
79e53945 7595 */
241bfc38 7596 pipe_config->adjusted_mode.crtc_clock =
18442d08
VS
7597 intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
7598 &pipe_config->fdi_m_n);
79e53945
JB
7599}
7600
7601/** Returns the currently programmed mode of the given pipe. */
7602struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
7603 struct drm_crtc *crtc)
7604{
548f245b 7605 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945 7606 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3b117c8f 7607 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
79e53945 7608 struct drm_display_mode *mode;
f1f644dc 7609 struct intel_crtc_config pipe_config;
fe2b8f9d
PZ
7610 int htot = I915_READ(HTOTAL(cpu_transcoder));
7611 int hsync = I915_READ(HSYNC(cpu_transcoder));
7612 int vtot = I915_READ(VTOTAL(cpu_transcoder));
7613 int vsync = I915_READ(VSYNC(cpu_transcoder));
293623f7 7614 enum pipe pipe = intel_crtc->pipe;
79e53945
JB
7615
7616 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
7617 if (!mode)
7618 return NULL;
7619
f1f644dc
JB
7620 /*
7621 * Construct a pipe_config sufficient for getting the clock info
7622 * back out of crtc_clock_get.
7623 *
7624 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
7625 * to use a real value here instead.
7626 */
293623f7 7627 pipe_config.cpu_transcoder = (enum transcoder) pipe;
f1f644dc 7628 pipe_config.pixel_multiplier = 1;
293623f7
VS
7629 pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
7630 pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
7631 pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
f1f644dc
JB
7632 i9xx_crtc_clock_get(intel_crtc, &pipe_config);
7633
773ae034 7634 mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
79e53945
JB
7635 mode->hdisplay = (htot & 0xffff) + 1;
7636 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
7637 mode->hsync_start = (hsync & 0xffff) + 1;
7638 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
7639 mode->vdisplay = (vtot & 0xffff) + 1;
7640 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
7641 mode->vsync_start = (vsync & 0xffff) + 1;
7642 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
7643
7644 drm_mode_set_name(mode);
79e53945
JB
7645
7646 return mode;
7647}
7648
3dec0095 7649static void intel_increase_pllclock(struct drm_crtc *crtc)
652c393a
JB
7650{
7651 struct drm_device *dev = crtc->dev;
7652 drm_i915_private_t *dev_priv = dev->dev_private;
7653 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7654 int pipe = intel_crtc->pipe;
dbdc6479
JB
7655 int dpll_reg = DPLL(pipe);
7656 int dpll;
652c393a 7657
bad720ff 7658 if (HAS_PCH_SPLIT(dev))
652c393a
JB
7659 return;
7660
7661 if (!dev_priv->lvds_downclock_avail)
7662 return;
7663
dbdc6479 7664 dpll = I915_READ(dpll_reg);
652c393a 7665 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
44d98a61 7666 DRM_DEBUG_DRIVER("upclocking LVDS\n");
652c393a 7667
8ac5a6d5 7668 assert_panel_unlocked(dev_priv, pipe);
652c393a
JB
7669
7670 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
7671 I915_WRITE(dpll_reg, dpll);
9d0498a2 7672 intel_wait_for_vblank(dev, pipe);
dbdc6479 7673
652c393a
JB
7674 dpll = I915_READ(dpll_reg);
7675 if (dpll & DISPLAY_RATE_SELECT_FPA1)
44d98a61 7676 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
652c393a 7677 }
652c393a
JB
7678}
7679
7680static void intel_decrease_pllclock(struct drm_crtc *crtc)
7681{
7682 struct drm_device *dev = crtc->dev;
7683 drm_i915_private_t *dev_priv = dev->dev_private;
7684 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
652c393a 7685
bad720ff 7686 if (HAS_PCH_SPLIT(dev))
652c393a
JB
7687 return;
7688
7689 if (!dev_priv->lvds_downclock_avail)
7690 return;
7691
7692 /*
7693 * Since this is called by a timer, we should never get here in
7694 * the manual case.
7695 */
7696 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
dc257cf1
DV
7697 int pipe = intel_crtc->pipe;
7698 int dpll_reg = DPLL(pipe);
7699 int dpll;
f6e5b160 7700
44d98a61 7701 DRM_DEBUG_DRIVER("downclocking LVDS\n");
652c393a 7702
8ac5a6d5 7703 assert_panel_unlocked(dev_priv, pipe);
652c393a 7704
dc257cf1 7705 dpll = I915_READ(dpll_reg);
652c393a
JB
7706 dpll |= DISPLAY_RATE_SELECT_FPA1;
7707 I915_WRITE(dpll_reg, dpll);
9d0498a2 7708 intel_wait_for_vblank(dev, pipe);
652c393a
JB
7709 dpll = I915_READ(dpll_reg);
7710 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
44d98a61 7711 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
652c393a
JB
7712 }
7713
7714}
7715
f047e395
CW
7716void intel_mark_busy(struct drm_device *dev)
7717{
c67a470b
PZ
7718 struct drm_i915_private *dev_priv = dev->dev_private;
7719
7720 hsw_package_c8_gpu_busy(dev_priv);
7721 i915_update_gfx_val(dev_priv);
f047e395
CW
7722}
7723
7724void intel_mark_idle(struct drm_device *dev)
652c393a 7725{
c67a470b 7726 struct drm_i915_private *dev_priv = dev->dev_private;
652c393a 7727 struct drm_crtc *crtc;
652c393a 7728
c67a470b
PZ
7729 hsw_package_c8_gpu_idle(dev_priv);
7730
652c393a
JB
7731 if (!i915_powersave)
7732 return;
7733
652c393a 7734 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
652c393a
JB
7735 if (!crtc->fb)
7736 continue;
7737
725a5b54 7738 intel_decrease_pllclock(crtc);
652c393a 7739 }
b29c19b6
CW
7740
7741 if (dev_priv->info->gen >= 6)
7742 gen6_rps_idle(dev->dev_private);
652c393a
JB
7743}
7744
c65355bb
CW
7745void intel_mark_fb_busy(struct drm_i915_gem_object *obj,
7746 struct intel_ring_buffer *ring)
652c393a 7747{
f047e395
CW
7748 struct drm_device *dev = obj->base.dev;
7749 struct drm_crtc *crtc;
652c393a 7750
f047e395 7751 if (!i915_powersave)
acb87dfb
CW
7752 return;
7753
652c393a
JB
7754 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7755 if (!crtc->fb)
7756 continue;
7757
c65355bb
CW
7758 if (to_intel_framebuffer(crtc->fb)->obj != obj)
7759 continue;
7760
7761 intel_increase_pllclock(crtc);
7762 if (ring && intel_fbc_enabled(dev))
7763 ring->fbc_dirty = true;
652c393a
JB
7764 }
7765}
7766
79e53945
JB
7767static void intel_crtc_destroy(struct drm_crtc *crtc)
7768{
7769 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
67e77c5a
DV
7770 struct drm_device *dev = crtc->dev;
7771 struct intel_unpin_work *work;
7772 unsigned long flags;
7773
7774 spin_lock_irqsave(&dev->event_lock, flags);
7775 work = intel_crtc->unpin_work;
7776 intel_crtc->unpin_work = NULL;
7777 spin_unlock_irqrestore(&dev->event_lock, flags);
7778
7779 if (work) {
7780 cancel_work_sync(&work->work);
7781 kfree(work);
7782 }
79e53945 7783
40ccc72b
MK
7784 intel_crtc_cursor_set(crtc, NULL, 0, 0, 0);
7785
79e53945 7786 drm_crtc_cleanup(crtc);
67e77c5a 7787
79e53945
JB
7788 kfree(intel_crtc);
7789}
7790
6b95a207
KH
7791static void intel_unpin_work_fn(struct work_struct *__work)
7792{
7793 struct intel_unpin_work *work =
7794 container_of(__work, struct intel_unpin_work, work);
b4a98e57 7795 struct drm_device *dev = work->crtc->dev;
6b95a207 7796
b4a98e57 7797 mutex_lock(&dev->struct_mutex);
1690e1eb 7798 intel_unpin_fb_obj(work->old_fb_obj);
05394f39
CW
7799 drm_gem_object_unreference(&work->pending_flip_obj->base);
7800 drm_gem_object_unreference(&work->old_fb_obj->base);
d9e86c0e 7801
b4a98e57
CW
7802 intel_update_fbc(dev);
7803 mutex_unlock(&dev->struct_mutex);
7804
7805 BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
7806 atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
7807
6b95a207
KH
7808 kfree(work);
7809}
7810
1afe3e9d 7811static void do_intel_finish_page_flip(struct drm_device *dev,
49b14a5c 7812 struct drm_crtc *crtc)
6b95a207
KH
7813{
7814 drm_i915_private_t *dev_priv = dev->dev_private;
6b95a207
KH
7815 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7816 struct intel_unpin_work *work;
6b95a207
KH
7817 unsigned long flags;
7818
7819 /* Ignore early vblank irqs */
7820 if (intel_crtc == NULL)
7821 return;
7822
7823 spin_lock_irqsave(&dev->event_lock, flags);
7824 work = intel_crtc->unpin_work;
e7d841ca
CW
7825
7826 /* Ensure we don't miss a work->pending update ... */
7827 smp_rmb();
7828
7829 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
6b95a207
KH
7830 spin_unlock_irqrestore(&dev->event_lock, flags);
7831 return;
7832 }
7833
e7d841ca
CW
7834 /* and that the unpin work is consistent wrt ->pending. */
7835 smp_rmb();
7836
6b95a207 7837 intel_crtc->unpin_work = NULL;
6b95a207 7838
45a066eb
RC
7839 if (work->event)
7840 drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
6b95a207 7841
0af7e4df
MK
7842 drm_vblank_put(dev, intel_crtc->pipe);
7843
6b95a207
KH
7844 spin_unlock_irqrestore(&dev->event_lock, flags);
7845
2c10d571 7846 wake_up_all(&dev_priv->pending_flip_queue);
b4a98e57
CW
7847
7848 queue_work(dev_priv->wq, &work->work);
e5510fac
JB
7849
7850 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
6b95a207
KH
7851}
7852
1afe3e9d
JB
7853void intel_finish_page_flip(struct drm_device *dev, int pipe)
7854{
7855 drm_i915_private_t *dev_priv = dev->dev_private;
7856 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
7857
49b14a5c 7858 do_intel_finish_page_flip(dev, crtc);
1afe3e9d
JB
7859}
7860
7861void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
7862{
7863 drm_i915_private_t *dev_priv = dev->dev_private;
7864 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
7865
49b14a5c 7866 do_intel_finish_page_flip(dev, crtc);
1afe3e9d
JB
7867}
7868
6b95a207
KH
7869void intel_prepare_page_flip(struct drm_device *dev, int plane)
7870{
7871 drm_i915_private_t *dev_priv = dev->dev_private;
7872 struct intel_crtc *intel_crtc =
7873 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
7874 unsigned long flags;
7875
e7d841ca
CW
7876 /* NB: An MMIO update of the plane base pointer will also
7877 * generate a page-flip completion irq, i.e. every modeset
7878 * is also accompanied by a spurious intel_prepare_page_flip().
7879 */
6b95a207 7880 spin_lock_irqsave(&dev->event_lock, flags);
e7d841ca
CW
7881 if (intel_crtc->unpin_work)
7882 atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
6b95a207
KH
7883 spin_unlock_irqrestore(&dev->event_lock, flags);
7884}
7885
e7d841ca
CW
7886inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
7887{
7888 /* Ensure that the work item is consistent when activating it ... */
7889 smp_wmb();
7890 atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
7891 /* and that it is marked active as soon as the irq could fire. */
7892 smp_wmb();
7893}
7894
8c9f3aaf
JB
7895static int intel_gen2_queue_flip(struct drm_device *dev,
7896 struct drm_crtc *crtc,
7897 struct drm_framebuffer *fb,
ed8d1975
KP
7898 struct drm_i915_gem_object *obj,
7899 uint32_t flags)
8c9f3aaf
JB
7900{
7901 struct drm_i915_private *dev_priv = dev->dev_private;
7902 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8c9f3aaf 7903 u32 flip_mask;
6d90c952 7904 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8c9f3aaf
JB
7905 int ret;
7906
6d90c952 7907 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8c9f3aaf 7908 if (ret)
83d4092b 7909 goto err;
8c9f3aaf 7910
6d90c952 7911 ret = intel_ring_begin(ring, 6);
8c9f3aaf 7912 if (ret)
83d4092b 7913 goto err_unpin;
8c9f3aaf
JB
7914
7915 /* Can't queue multiple flips, so wait for the previous
7916 * one to finish before executing the next.
7917 */
7918 if (intel_crtc->plane)
7919 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7920 else
7921 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6d90c952
DV
7922 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
7923 intel_ring_emit(ring, MI_NOOP);
7924 intel_ring_emit(ring, MI_DISPLAY_FLIP |
7925 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7926 intel_ring_emit(ring, fb->pitches[0]);
f343c5f6 7927 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
6d90c952 7928 intel_ring_emit(ring, 0); /* aux display base address, unused */
e7d841ca
CW
7929
7930 intel_mark_page_flip_active(intel_crtc);
09246732 7931 __intel_ring_advance(ring);
83d4092b
CW
7932 return 0;
7933
7934err_unpin:
7935 intel_unpin_fb_obj(obj);
7936err:
8c9f3aaf
JB
7937 return ret;
7938}
7939
7940static int intel_gen3_queue_flip(struct drm_device *dev,
7941 struct drm_crtc *crtc,
7942 struct drm_framebuffer *fb,
ed8d1975
KP
7943 struct drm_i915_gem_object *obj,
7944 uint32_t flags)
8c9f3aaf
JB
7945{
7946 struct drm_i915_private *dev_priv = dev->dev_private;
7947 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8c9f3aaf 7948 u32 flip_mask;
6d90c952 7949 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8c9f3aaf
JB
7950 int ret;
7951
6d90c952 7952 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8c9f3aaf 7953 if (ret)
83d4092b 7954 goto err;
8c9f3aaf 7955
6d90c952 7956 ret = intel_ring_begin(ring, 6);
8c9f3aaf 7957 if (ret)
83d4092b 7958 goto err_unpin;
8c9f3aaf
JB
7959
7960 if (intel_crtc->plane)
7961 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
7962 else
7963 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6d90c952
DV
7964 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
7965 intel_ring_emit(ring, MI_NOOP);
7966 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
7967 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
7968 intel_ring_emit(ring, fb->pitches[0]);
f343c5f6 7969 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
6d90c952
DV
7970 intel_ring_emit(ring, MI_NOOP);
7971
e7d841ca 7972 intel_mark_page_flip_active(intel_crtc);
09246732 7973 __intel_ring_advance(ring);
83d4092b
CW
7974 return 0;
7975
7976err_unpin:
7977 intel_unpin_fb_obj(obj);
7978err:
8c9f3aaf
JB
7979 return ret;
7980}
7981
7982static int intel_gen4_queue_flip(struct drm_device *dev,
7983 struct drm_crtc *crtc,
7984 struct drm_framebuffer *fb,
ed8d1975
KP
7985 struct drm_i915_gem_object *obj,
7986 uint32_t flags)
8c9f3aaf
JB
7987{
7988 struct drm_i915_private *dev_priv = dev->dev_private;
7989 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7990 uint32_t pf, pipesrc;
6d90c952 7991 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8c9f3aaf
JB
7992 int ret;
7993
6d90c952 7994 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8c9f3aaf 7995 if (ret)
83d4092b 7996 goto err;
8c9f3aaf 7997
6d90c952 7998 ret = intel_ring_begin(ring, 4);
8c9f3aaf 7999 if (ret)
83d4092b 8000 goto err_unpin;
8c9f3aaf
JB
8001
8002 /* i965+ uses the linear or tiled offsets from the
8003 * Display Registers (which do not change across a page-flip)
8004 * so we need only reprogram the base address.
8005 */
6d90c952
DV
8006 intel_ring_emit(ring, MI_DISPLAY_FLIP |
8007 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8008 intel_ring_emit(ring, fb->pitches[0]);
c2c75131 8009 intel_ring_emit(ring,
f343c5f6 8010 (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
c2c75131 8011 obj->tiling_mode);
8c9f3aaf
JB
8012
8013 /* XXX Enabling the panel-fitter across page-flip is so far
8014 * untested on non-native modes, so ignore it for now.
8015 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
8016 */
8017 pf = 0;
8018 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6d90c952 8019 intel_ring_emit(ring, pf | pipesrc);
e7d841ca
CW
8020
8021 intel_mark_page_flip_active(intel_crtc);
09246732 8022 __intel_ring_advance(ring);
83d4092b
CW
8023 return 0;
8024
8025err_unpin:
8026 intel_unpin_fb_obj(obj);
8027err:
8c9f3aaf
JB
8028 return ret;
8029}
8030
8031static int intel_gen6_queue_flip(struct drm_device *dev,
8032 struct drm_crtc *crtc,
8033 struct drm_framebuffer *fb,
ed8d1975
KP
8034 struct drm_i915_gem_object *obj,
8035 uint32_t flags)
8c9f3aaf
JB
8036{
8037 struct drm_i915_private *dev_priv = dev->dev_private;
8038 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6d90c952 8039 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8c9f3aaf
JB
8040 uint32_t pf, pipesrc;
8041 int ret;
8042
6d90c952 8043 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8c9f3aaf 8044 if (ret)
83d4092b 8045 goto err;
8c9f3aaf 8046
6d90c952 8047 ret = intel_ring_begin(ring, 4);
8c9f3aaf 8048 if (ret)
83d4092b 8049 goto err_unpin;
8c9f3aaf 8050
6d90c952
DV
8051 intel_ring_emit(ring, MI_DISPLAY_FLIP |
8052 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
8053 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
f343c5f6 8054 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
8c9f3aaf 8055
dc257cf1
DV
8056 /* Contrary to the suggestions in the documentation,
8057 * "Enable Panel Fitter" does not seem to be required when page
8058 * flipping with a non-native mode, and worse causes a normal
8059 * modeset to fail.
8060 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
8061 */
8062 pf = 0;
8c9f3aaf 8063 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6d90c952 8064 intel_ring_emit(ring, pf | pipesrc);
e7d841ca
CW
8065
8066 intel_mark_page_flip_active(intel_crtc);
09246732 8067 __intel_ring_advance(ring);
83d4092b
CW
8068 return 0;
8069
8070err_unpin:
8071 intel_unpin_fb_obj(obj);
8072err:
8c9f3aaf
JB
8073 return ret;
8074}
8075
7c9017e5
JB
8076static int intel_gen7_queue_flip(struct drm_device *dev,
8077 struct drm_crtc *crtc,
8078 struct drm_framebuffer *fb,
ed8d1975
KP
8079 struct drm_i915_gem_object *obj,
8080 uint32_t flags)
7c9017e5
JB
8081{
8082 struct drm_i915_private *dev_priv = dev->dev_private;
8083 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ffe74d75 8084 struct intel_ring_buffer *ring;
cb05d8de 8085 uint32_t plane_bit = 0;
ffe74d75
CW
8086 int len, ret;
8087
8088 ring = obj->ring;
1c5fd085 8089 if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS)
ffe74d75 8090 ring = &dev_priv->ring[BCS];
7c9017e5
JB
8091
8092 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8093 if (ret)
83d4092b 8094 goto err;
7c9017e5 8095
cb05d8de
DV
8096 switch(intel_crtc->plane) {
8097 case PLANE_A:
8098 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
8099 break;
8100 case PLANE_B:
8101 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
8102 break;
8103 case PLANE_C:
8104 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
8105 break;
8106 default:
8107 WARN_ONCE(1, "unknown plane in flip command\n");
8108 ret = -ENODEV;
ab3951eb 8109 goto err_unpin;
cb05d8de
DV
8110 }
8111
ffe74d75
CW
8112 len = 4;
8113 if (ring->id == RCS)
8114 len += 6;
8115
8116 ret = intel_ring_begin(ring, len);
7c9017e5 8117 if (ret)
83d4092b 8118 goto err_unpin;
7c9017e5 8119
ffe74d75
CW
8120 /* Unmask the flip-done completion message. Note that the bspec says that
8121 * we should do this for both the BCS and RCS, and that we must not unmask
8122 * more than one flip event at any time (or ensure that one flip message
8123 * can be sent by waiting for flip-done prior to queueing new flips).
8124 * Experimentation says that BCS works despite DERRMR masking all
8125 * flip-done completion events and that unmasking all planes at once
8126 * for the RCS also doesn't appear to drop events. Setting the DERRMR
8127 * to zero does lead to lockups within MI_DISPLAY_FLIP.
8128 */
8129 if (ring->id == RCS) {
8130 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
8131 intel_ring_emit(ring, DERRMR);
8132 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
8133 DERRMR_PIPEB_PRI_FLIP_DONE |
8134 DERRMR_PIPEC_PRI_FLIP_DONE));
8135 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1));
8136 intel_ring_emit(ring, DERRMR);
8137 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
8138 }
8139
cb05d8de 8140 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
01f2c773 8141 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
f343c5f6 8142 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
7c9017e5 8143 intel_ring_emit(ring, (MI_NOOP));
e7d841ca
CW
8144
8145 intel_mark_page_flip_active(intel_crtc);
09246732 8146 __intel_ring_advance(ring);
83d4092b
CW
8147 return 0;
8148
8149err_unpin:
8150 intel_unpin_fb_obj(obj);
8151err:
7c9017e5
JB
8152 return ret;
8153}
8154
8c9f3aaf
JB
8155static int intel_default_queue_flip(struct drm_device *dev,
8156 struct drm_crtc *crtc,
8157 struct drm_framebuffer *fb,
ed8d1975
KP
8158 struct drm_i915_gem_object *obj,
8159 uint32_t flags)
8c9f3aaf
JB
8160{
8161 return -ENODEV;
8162}
8163
6b95a207
KH
8164static int intel_crtc_page_flip(struct drm_crtc *crtc,
8165 struct drm_framebuffer *fb,
ed8d1975
KP
8166 struct drm_pending_vblank_event *event,
8167 uint32_t page_flip_flags)
6b95a207
KH
8168{
8169 struct drm_device *dev = crtc->dev;
8170 struct drm_i915_private *dev_priv = dev->dev_private;
4a35f83b
VS
8171 struct drm_framebuffer *old_fb = crtc->fb;
8172 struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
6b95a207
KH
8173 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8174 struct intel_unpin_work *work;
8c9f3aaf 8175 unsigned long flags;
52e68630 8176 int ret;
6b95a207 8177
e6a595d2
VS
8178 /* Can't change pixel format via MI display flips. */
8179 if (fb->pixel_format != crtc->fb->pixel_format)
8180 return -EINVAL;
8181
8182 /*
8183 * TILEOFF/LINOFF registers can't be changed via MI display flips.
8184 * Note that pitch changes could also affect these register.
8185 */
8186 if (INTEL_INFO(dev)->gen > 3 &&
8187 (fb->offsets[0] != crtc->fb->offsets[0] ||
8188 fb->pitches[0] != crtc->fb->pitches[0]))
8189 return -EINVAL;
8190
b14c5679 8191 work = kzalloc(sizeof(*work), GFP_KERNEL);
6b95a207
KH
8192 if (work == NULL)
8193 return -ENOMEM;
8194
6b95a207 8195 work->event = event;
b4a98e57 8196 work->crtc = crtc;
4a35f83b 8197 work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
6b95a207
KH
8198 INIT_WORK(&work->work, intel_unpin_work_fn);
8199
7317c75e
JB
8200 ret = drm_vblank_get(dev, intel_crtc->pipe);
8201 if (ret)
8202 goto free_work;
8203
6b95a207
KH
8204 /* We borrow the event spin lock for protecting unpin_work */
8205 spin_lock_irqsave(&dev->event_lock, flags);
8206 if (intel_crtc->unpin_work) {
8207 spin_unlock_irqrestore(&dev->event_lock, flags);
8208 kfree(work);
7317c75e 8209 drm_vblank_put(dev, intel_crtc->pipe);
468f0b44
CW
8210
8211 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
6b95a207
KH
8212 return -EBUSY;
8213 }
8214 intel_crtc->unpin_work = work;
8215 spin_unlock_irqrestore(&dev->event_lock, flags);
8216
b4a98e57
CW
8217 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
8218 flush_workqueue(dev_priv->wq);
8219
79158103
CW
8220 ret = i915_mutex_lock_interruptible(dev);
8221 if (ret)
8222 goto cleanup;
6b95a207 8223
75dfca80 8224 /* Reference the objects for the scheduled work. */
05394f39
CW
8225 drm_gem_object_reference(&work->old_fb_obj->base);
8226 drm_gem_object_reference(&obj->base);
6b95a207
KH
8227
8228 crtc->fb = fb;
96b099fd 8229
e1f99ce6 8230 work->pending_flip_obj = obj;
e1f99ce6 8231
4e5359cd
SF
8232 work->enable_stall_check = true;
8233
b4a98e57 8234 atomic_inc(&intel_crtc->unpin_work_count);
10d83730 8235 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
e1f99ce6 8236
ed8d1975 8237 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags);
8c9f3aaf
JB
8238 if (ret)
8239 goto cleanup_pending;
6b95a207 8240
7782de3b 8241 intel_disable_fbc(dev);
c65355bb 8242 intel_mark_fb_busy(obj, NULL);
6b95a207
KH
8243 mutex_unlock(&dev->struct_mutex);
8244
e5510fac
JB
8245 trace_i915_flip_request(intel_crtc->plane, obj);
8246
6b95a207 8247 return 0;
96b099fd 8248
8c9f3aaf 8249cleanup_pending:
b4a98e57 8250 atomic_dec(&intel_crtc->unpin_work_count);
4a35f83b 8251 crtc->fb = old_fb;
05394f39
CW
8252 drm_gem_object_unreference(&work->old_fb_obj->base);
8253 drm_gem_object_unreference(&obj->base);
96b099fd
CW
8254 mutex_unlock(&dev->struct_mutex);
8255
79158103 8256cleanup:
96b099fd
CW
8257 spin_lock_irqsave(&dev->event_lock, flags);
8258 intel_crtc->unpin_work = NULL;
8259 spin_unlock_irqrestore(&dev->event_lock, flags);
8260
7317c75e
JB
8261 drm_vblank_put(dev, intel_crtc->pipe);
8262free_work:
96b099fd
CW
8263 kfree(work);
8264
8265 return ret;
6b95a207
KH
8266}
8267
f6e5b160 8268static struct drm_crtc_helper_funcs intel_helper_funcs = {
f6e5b160
CW
8269 .mode_set_base_atomic = intel_pipe_set_base_atomic,
8270 .load_lut = intel_crtc_load_lut,
f6e5b160
CW
8271};
8272
50f56119
DV
8273static bool intel_encoder_crtc_ok(struct drm_encoder *encoder,
8274 struct drm_crtc *crtc)
8275{
8276 struct drm_device *dev;
8277 struct drm_crtc *tmp;
8278 int crtc_mask = 1;
47f1c6c9 8279
50f56119 8280 WARN(!crtc, "checking null crtc?\n");
47f1c6c9 8281
50f56119 8282 dev = crtc->dev;
47f1c6c9 8283
50f56119
DV
8284 list_for_each_entry(tmp, &dev->mode_config.crtc_list, head) {
8285 if (tmp == crtc)
8286 break;
8287 crtc_mask <<= 1;
8288 }
47f1c6c9 8289
50f56119
DV
8290 if (encoder->possible_crtcs & crtc_mask)
8291 return true;
8292 return false;
47f1c6c9 8293}
79e53945 8294
9a935856
DV
8295/**
8296 * intel_modeset_update_staged_output_state
8297 *
8298 * Updates the staged output configuration state, e.g. after we've read out the
8299 * current hw state.
8300 */
8301static void intel_modeset_update_staged_output_state(struct drm_device *dev)
f6e5b160 8302{
9a935856
DV
8303 struct intel_encoder *encoder;
8304 struct intel_connector *connector;
f6e5b160 8305
9a935856
DV
8306 list_for_each_entry(connector, &dev->mode_config.connector_list,
8307 base.head) {
8308 connector->new_encoder =
8309 to_intel_encoder(connector->base.encoder);
8310 }
f6e5b160 8311
9a935856
DV
8312 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8313 base.head) {
8314 encoder->new_crtc =
8315 to_intel_crtc(encoder->base.crtc);
8316 }
f6e5b160
CW
8317}
8318
9a935856
DV
8319/**
8320 * intel_modeset_commit_output_state
8321 *
8322 * This function copies the stage display pipe configuration to the real one.
8323 */
8324static void intel_modeset_commit_output_state(struct drm_device *dev)
8325{
8326 struct intel_encoder *encoder;
8327 struct intel_connector *connector;
f6e5b160 8328
9a935856
DV
8329 list_for_each_entry(connector, &dev->mode_config.connector_list,
8330 base.head) {
8331 connector->base.encoder = &connector->new_encoder->base;
8332 }
f6e5b160 8333
9a935856
DV
8334 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8335 base.head) {
8336 encoder->base.crtc = &encoder->new_crtc->base;
8337 }
8338}
8339
050f7aeb
DV
8340static void
8341connected_sink_compute_bpp(struct intel_connector * connector,
8342 struct intel_crtc_config *pipe_config)
8343{
8344 int bpp = pipe_config->pipe_bpp;
8345
8346 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
8347 connector->base.base.id,
8348 drm_get_connector_name(&connector->base));
8349
8350 /* Don't use an invalid EDID bpc value */
8351 if (connector->base.display_info.bpc &&
8352 connector->base.display_info.bpc * 3 < bpp) {
8353 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
8354 bpp, connector->base.display_info.bpc*3);
8355 pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
8356 }
8357
8358 /* Clamp bpp to 8 on screens without EDID 1.4 */
8359 if (connector->base.display_info.bpc == 0 && bpp > 24) {
8360 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
8361 bpp);
8362 pipe_config->pipe_bpp = 24;
8363 }
8364}
8365
4e53c2e0 8366static int
050f7aeb
DV
8367compute_baseline_pipe_bpp(struct intel_crtc *crtc,
8368 struct drm_framebuffer *fb,
8369 struct intel_crtc_config *pipe_config)
4e53c2e0 8370{
050f7aeb
DV
8371 struct drm_device *dev = crtc->base.dev;
8372 struct intel_connector *connector;
4e53c2e0
DV
8373 int bpp;
8374
d42264b1
DV
8375 switch (fb->pixel_format) {
8376 case DRM_FORMAT_C8:
4e53c2e0
DV
8377 bpp = 8*3; /* since we go through a colormap */
8378 break;
d42264b1
DV
8379 case DRM_FORMAT_XRGB1555:
8380 case DRM_FORMAT_ARGB1555:
8381 /* checked in intel_framebuffer_init already */
8382 if (WARN_ON(INTEL_INFO(dev)->gen > 3))
8383 return -EINVAL;
8384 case DRM_FORMAT_RGB565:
4e53c2e0
DV
8385 bpp = 6*3; /* min is 18bpp */
8386 break;
d42264b1
DV
8387 case DRM_FORMAT_XBGR8888:
8388 case DRM_FORMAT_ABGR8888:
8389 /* checked in intel_framebuffer_init already */
8390 if (WARN_ON(INTEL_INFO(dev)->gen < 4))
8391 return -EINVAL;
8392 case DRM_FORMAT_XRGB8888:
8393 case DRM_FORMAT_ARGB8888:
4e53c2e0
DV
8394 bpp = 8*3;
8395 break;
d42264b1
DV
8396 case DRM_FORMAT_XRGB2101010:
8397 case DRM_FORMAT_ARGB2101010:
8398 case DRM_FORMAT_XBGR2101010:
8399 case DRM_FORMAT_ABGR2101010:
8400 /* checked in intel_framebuffer_init already */
8401 if (WARN_ON(INTEL_INFO(dev)->gen < 4))
baba133a 8402 return -EINVAL;
4e53c2e0
DV
8403 bpp = 10*3;
8404 break;
baba133a 8405 /* TODO: gen4+ supports 16 bpc floating point, too. */
4e53c2e0
DV
8406 default:
8407 DRM_DEBUG_KMS("unsupported depth\n");
8408 return -EINVAL;
8409 }
8410
4e53c2e0
DV
8411 pipe_config->pipe_bpp = bpp;
8412
8413 /* Clamp display bpp to EDID value */
8414 list_for_each_entry(connector, &dev->mode_config.connector_list,
050f7aeb 8415 base.head) {
1b829e05
DV
8416 if (!connector->new_encoder ||
8417 connector->new_encoder->new_crtc != crtc)
4e53c2e0
DV
8418 continue;
8419
050f7aeb 8420 connected_sink_compute_bpp(connector, pipe_config);
4e53c2e0
DV
8421 }
8422
8423 return bpp;
8424}
8425
644db711
DV
8426static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
8427{
8428 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
8429 "type: 0x%x flags: 0x%x\n",
1342830c 8430 mode->crtc_clock,
644db711
DV
8431 mode->crtc_hdisplay, mode->crtc_hsync_start,
8432 mode->crtc_hsync_end, mode->crtc_htotal,
8433 mode->crtc_vdisplay, mode->crtc_vsync_start,
8434 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
8435}
8436
c0b03411
DV
8437static void intel_dump_pipe_config(struct intel_crtc *crtc,
8438 struct intel_crtc_config *pipe_config,
8439 const char *context)
8440{
8441 DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id,
8442 context, pipe_name(crtc->pipe));
8443
8444 DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
8445 DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
8446 pipe_config->pipe_bpp, pipe_config->dither);
8447 DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
8448 pipe_config->has_pch_encoder,
8449 pipe_config->fdi_lanes,
8450 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
8451 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
8452 pipe_config->fdi_m_n.tu);
eb14cb74
VS
8453 DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
8454 pipe_config->has_dp_encoder,
8455 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
8456 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
8457 pipe_config->dp_m_n.tu);
c0b03411
DV
8458 DRM_DEBUG_KMS("requested mode:\n");
8459 drm_mode_debug_printmodeline(&pipe_config->requested_mode);
8460 DRM_DEBUG_KMS("adjusted mode:\n");
8461 drm_mode_debug_printmodeline(&pipe_config->adjusted_mode);
644db711 8462 intel_dump_crtc_timings(&pipe_config->adjusted_mode);
d71b8d4a 8463 DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
37327abd
VS
8464 DRM_DEBUG_KMS("pipe src size: %dx%d\n",
8465 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
c0b03411
DV
8466 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
8467 pipe_config->gmch_pfit.control,
8468 pipe_config->gmch_pfit.pgm_ratios,
8469 pipe_config->gmch_pfit.lvds_border_bits);
fd4daa9c 8470 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
c0b03411 8471 pipe_config->pch_pfit.pos,
fd4daa9c
CW
8472 pipe_config->pch_pfit.size,
8473 pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
42db64ef 8474 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
cf532bb2 8475 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
c0b03411
DV
8476}
8477
accfc0c5
DV
8478static bool check_encoder_cloning(struct drm_crtc *crtc)
8479{
8480 int num_encoders = 0;
8481 bool uncloneable_encoders = false;
8482 struct intel_encoder *encoder;
8483
8484 list_for_each_entry(encoder, &crtc->dev->mode_config.encoder_list,
8485 base.head) {
8486 if (&encoder->new_crtc->base != crtc)
8487 continue;
8488
8489 num_encoders++;
8490 if (!encoder->cloneable)
8491 uncloneable_encoders = true;
8492 }
8493
8494 return !(num_encoders > 1 && uncloneable_encoders);
8495}
8496
b8cecdf5
DV
8497static struct intel_crtc_config *
8498intel_modeset_pipe_config(struct drm_crtc *crtc,
4e53c2e0 8499 struct drm_framebuffer *fb,
b8cecdf5 8500 struct drm_display_mode *mode)
ee7b9f93 8501{
7758a113 8502 struct drm_device *dev = crtc->dev;
7758a113 8503 struct intel_encoder *encoder;
b8cecdf5 8504 struct intel_crtc_config *pipe_config;
e29c22c0
DV
8505 int plane_bpp, ret = -EINVAL;
8506 bool retry = true;
ee7b9f93 8507
accfc0c5
DV
8508 if (!check_encoder_cloning(crtc)) {
8509 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
8510 return ERR_PTR(-EINVAL);
8511 }
8512
b8cecdf5
DV
8513 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
8514 if (!pipe_config)
7758a113
DV
8515 return ERR_PTR(-ENOMEM);
8516
b8cecdf5
DV
8517 drm_mode_copy(&pipe_config->adjusted_mode, mode);
8518 drm_mode_copy(&pipe_config->requested_mode, mode);
37327abd 8519
e143a21c
DV
8520 pipe_config->cpu_transcoder =
8521 (enum transcoder) to_intel_crtc(crtc)->pipe;
c0d43d62 8522 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
b8cecdf5 8523
2960bc9c
ID
8524 /*
8525 * Sanitize sync polarity flags based on requested ones. If neither
8526 * positive or negative polarity is requested, treat this as meaning
8527 * negative polarity.
8528 */
8529 if (!(pipe_config->adjusted_mode.flags &
8530 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
8531 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
8532
8533 if (!(pipe_config->adjusted_mode.flags &
8534 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
8535 pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
8536
050f7aeb
DV
8537 /* Compute a starting value for pipe_config->pipe_bpp taking the source
8538 * plane pixel format and any sink constraints into account. Returns the
8539 * source plane bpp so that dithering can be selected on mismatches
8540 * after encoders and crtc also have had their say. */
8541 plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
8542 fb, pipe_config);
4e53c2e0
DV
8543 if (plane_bpp < 0)
8544 goto fail;
8545
e41a56be
VS
8546 /*
8547 * Determine the real pipe dimensions. Note that stereo modes can
8548 * increase the actual pipe size due to the frame doubling and
8549 * insertion of additional space for blanks between the frame. This
8550 * is stored in the crtc timings. We use the requested mode to do this
8551 * computation to clearly distinguish it from the adjusted mode, which
8552 * can be changed by the connectors in the below retry loop.
8553 */
8554 drm_mode_set_crtcinfo(&pipe_config->requested_mode, CRTC_STEREO_DOUBLE);
8555 pipe_config->pipe_src_w = pipe_config->requested_mode.crtc_hdisplay;
8556 pipe_config->pipe_src_h = pipe_config->requested_mode.crtc_vdisplay;
8557
e29c22c0 8558encoder_retry:
ef1b460d 8559 /* Ensure the port clock defaults are reset when retrying. */
ff9a6750 8560 pipe_config->port_clock = 0;
ef1b460d 8561 pipe_config->pixel_multiplier = 1;
ff9a6750 8562
135c81b8 8563 /* Fill in default crtc timings, allow encoders to overwrite them. */
6ce70f5e 8564 drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, CRTC_STEREO_DOUBLE);
135c81b8 8565
7758a113
DV
8566 /* Pass our mode to the connectors and the CRTC to give them a chance to
8567 * adjust it according to limitations or connector properties, and also
8568 * a chance to reject the mode entirely.
47f1c6c9 8569 */
7758a113
DV
8570 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8571 base.head) {
47f1c6c9 8572
7758a113
DV
8573 if (&encoder->new_crtc->base != crtc)
8574 continue;
7ae89233 8575
efea6e8e
DV
8576 if (!(encoder->compute_config(encoder, pipe_config))) {
8577 DRM_DEBUG_KMS("Encoder config failure\n");
7758a113
DV
8578 goto fail;
8579 }
ee7b9f93 8580 }
47f1c6c9 8581
ff9a6750
DV
8582 /* Set default port clock if not overwritten by the encoder. Needs to be
8583 * done afterwards in case the encoder adjusts the mode. */
8584 if (!pipe_config->port_clock)
241bfc38
DL
8585 pipe_config->port_clock = pipe_config->adjusted_mode.crtc_clock
8586 * pipe_config->pixel_multiplier;
ff9a6750 8587
a43f6e0f 8588 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
e29c22c0 8589 if (ret < 0) {
7758a113
DV
8590 DRM_DEBUG_KMS("CRTC fixup failed\n");
8591 goto fail;
ee7b9f93 8592 }
e29c22c0
DV
8593
8594 if (ret == RETRY) {
8595 if (WARN(!retry, "loop in pipe configuration computation\n")) {
8596 ret = -EINVAL;
8597 goto fail;
8598 }
8599
8600 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
8601 retry = false;
8602 goto encoder_retry;
8603 }
8604
4e53c2e0
DV
8605 pipe_config->dither = pipe_config->pipe_bpp != plane_bpp;
8606 DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
8607 plane_bpp, pipe_config->pipe_bpp, pipe_config->dither);
8608
b8cecdf5 8609 return pipe_config;
7758a113 8610fail:
b8cecdf5 8611 kfree(pipe_config);
e29c22c0 8612 return ERR_PTR(ret);
ee7b9f93 8613}
47f1c6c9 8614
e2e1ed41
DV
8615/* Computes which crtcs are affected and sets the relevant bits in the mask. For
8616 * simplicity we use the crtc's pipe number (because it's easier to obtain). */
8617static void
8618intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
8619 unsigned *prepare_pipes, unsigned *disable_pipes)
79e53945
JB
8620{
8621 struct intel_crtc *intel_crtc;
e2e1ed41
DV
8622 struct drm_device *dev = crtc->dev;
8623 struct intel_encoder *encoder;
8624 struct intel_connector *connector;
8625 struct drm_crtc *tmp_crtc;
79e53945 8626
e2e1ed41 8627 *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
79e53945 8628
e2e1ed41
DV
8629 /* Check which crtcs have changed outputs connected to them, these need
8630 * to be part of the prepare_pipes mask. We don't (yet) support global
8631 * modeset across multiple crtcs, so modeset_pipes will only have one
8632 * bit set at most. */
8633 list_for_each_entry(connector, &dev->mode_config.connector_list,
8634 base.head) {
8635 if (connector->base.encoder == &connector->new_encoder->base)
8636 continue;
79e53945 8637
e2e1ed41
DV
8638 if (connector->base.encoder) {
8639 tmp_crtc = connector->base.encoder->crtc;
8640
8641 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
8642 }
8643
8644 if (connector->new_encoder)
8645 *prepare_pipes |=
8646 1 << connector->new_encoder->new_crtc->pipe;
79e53945
JB
8647 }
8648
e2e1ed41
DV
8649 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8650 base.head) {
8651 if (encoder->base.crtc == &encoder->new_crtc->base)
8652 continue;
8653
8654 if (encoder->base.crtc) {
8655 tmp_crtc = encoder->base.crtc;
8656
8657 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
8658 }
8659
8660 if (encoder->new_crtc)
8661 *prepare_pipes |= 1 << encoder->new_crtc->pipe;
80824003
JB
8662 }
8663
e2e1ed41
DV
8664 /* Check for any pipes that will be fully disabled ... */
8665 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
8666 base.head) {
8667 bool used = false;
22fd0fab 8668
e2e1ed41
DV
8669 /* Don't try to disable disabled crtcs. */
8670 if (!intel_crtc->base.enabled)
8671 continue;
7e7d76c3 8672
e2e1ed41
DV
8673 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8674 base.head) {
8675 if (encoder->new_crtc == intel_crtc)
8676 used = true;
8677 }
8678
8679 if (!used)
8680 *disable_pipes |= 1 << intel_crtc->pipe;
7e7d76c3
JB
8681 }
8682
e2e1ed41
DV
8683
8684 /* set_mode is also used to update properties on life display pipes. */
8685 intel_crtc = to_intel_crtc(crtc);
8686 if (crtc->enabled)
8687 *prepare_pipes |= 1 << intel_crtc->pipe;
8688
b6c5164d
DV
8689 /*
8690 * For simplicity do a full modeset on any pipe where the output routing
8691 * changed. We could be more clever, but that would require us to be
8692 * more careful with calling the relevant encoder->mode_set functions.
8693 */
e2e1ed41
DV
8694 if (*prepare_pipes)
8695 *modeset_pipes = *prepare_pipes;
8696
8697 /* ... and mask these out. */
8698 *modeset_pipes &= ~(*disable_pipes);
8699 *prepare_pipes &= ~(*disable_pipes);
b6c5164d
DV
8700
8701 /*
8702 * HACK: We don't (yet) fully support global modesets. intel_set_config
8703 * obies this rule, but the modeset restore mode of
8704 * intel_modeset_setup_hw_state does not.
8705 */
8706 *modeset_pipes &= 1 << intel_crtc->pipe;
8707 *prepare_pipes &= 1 << intel_crtc->pipe;
e3641d3f
DV
8708
8709 DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
8710 *modeset_pipes, *prepare_pipes, *disable_pipes);
47f1c6c9 8711}
79e53945 8712
ea9d758d 8713static bool intel_crtc_in_use(struct drm_crtc *crtc)
f6e5b160 8714{
ea9d758d 8715 struct drm_encoder *encoder;
f6e5b160 8716 struct drm_device *dev = crtc->dev;
f6e5b160 8717
ea9d758d
DV
8718 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
8719 if (encoder->crtc == crtc)
8720 return true;
8721
8722 return false;
8723}
8724
8725static void
8726intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
8727{
8728 struct intel_encoder *intel_encoder;
8729 struct intel_crtc *intel_crtc;
8730 struct drm_connector *connector;
8731
8732 list_for_each_entry(intel_encoder, &dev->mode_config.encoder_list,
8733 base.head) {
8734 if (!intel_encoder->base.crtc)
8735 continue;
8736
8737 intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
8738
8739 if (prepare_pipes & (1 << intel_crtc->pipe))
8740 intel_encoder->connectors_active = false;
8741 }
8742
8743 intel_modeset_commit_output_state(dev);
8744
8745 /* Update computed state. */
8746 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
8747 base.head) {
8748 intel_crtc->base.enabled = intel_crtc_in_use(&intel_crtc->base);
8749 }
8750
8751 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
8752 if (!connector->encoder || !connector->encoder->crtc)
8753 continue;
8754
8755 intel_crtc = to_intel_crtc(connector->encoder->crtc);
8756
8757 if (prepare_pipes & (1 << intel_crtc->pipe)) {
68d34720
DV
8758 struct drm_property *dpms_property =
8759 dev->mode_config.dpms_property;
8760
ea9d758d 8761 connector->dpms = DRM_MODE_DPMS_ON;
662595df 8762 drm_object_property_set_value(&connector->base,
68d34720
DV
8763 dpms_property,
8764 DRM_MODE_DPMS_ON);
ea9d758d
DV
8765
8766 intel_encoder = to_intel_encoder(connector->encoder);
8767 intel_encoder->connectors_active = true;
8768 }
8769 }
8770
8771}
8772
3bd26263 8773static bool intel_fuzzy_clock_check(int clock1, int clock2)
f1f644dc 8774{
3bd26263 8775 int diff;
f1f644dc
JB
8776
8777 if (clock1 == clock2)
8778 return true;
8779
8780 if (!clock1 || !clock2)
8781 return false;
8782
8783 diff = abs(clock1 - clock2);
8784
8785 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
8786 return true;
8787
8788 return false;
8789}
8790
25c5b266
DV
8791#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
8792 list_for_each_entry((intel_crtc), \
8793 &(dev)->mode_config.crtc_list, \
8794 base.head) \
0973f18f 8795 if (mask & (1 <<(intel_crtc)->pipe))
25c5b266 8796
0e8ffe1b 8797static bool
2fa2fe9a
DV
8798intel_pipe_config_compare(struct drm_device *dev,
8799 struct intel_crtc_config *current_config,
0e8ffe1b
DV
8800 struct intel_crtc_config *pipe_config)
8801{
66e985c0
DV
8802#define PIPE_CONF_CHECK_X(name) \
8803 if (current_config->name != pipe_config->name) { \
8804 DRM_ERROR("mismatch in " #name " " \
8805 "(expected 0x%08x, found 0x%08x)\n", \
8806 current_config->name, \
8807 pipe_config->name); \
8808 return false; \
8809 }
8810
08a24034
DV
8811#define PIPE_CONF_CHECK_I(name) \
8812 if (current_config->name != pipe_config->name) { \
8813 DRM_ERROR("mismatch in " #name " " \
8814 "(expected %i, found %i)\n", \
8815 current_config->name, \
8816 pipe_config->name); \
8817 return false; \
88adfff1
DV
8818 }
8819
1bd1bd80
DV
8820#define PIPE_CONF_CHECK_FLAGS(name, mask) \
8821 if ((current_config->name ^ pipe_config->name) & (mask)) { \
6f02488e 8822 DRM_ERROR("mismatch in " #name "(" #mask ") " \
1bd1bd80
DV
8823 "(expected %i, found %i)\n", \
8824 current_config->name & (mask), \
8825 pipe_config->name & (mask)); \
8826 return false; \
8827 }
8828
5e550656
VS
8829#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
8830 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
8831 DRM_ERROR("mismatch in " #name " " \
8832 "(expected %i, found %i)\n", \
8833 current_config->name, \
8834 pipe_config->name); \
8835 return false; \
8836 }
8837
bb760063
DV
8838#define PIPE_CONF_QUIRK(quirk) \
8839 ((current_config->quirks | pipe_config->quirks) & (quirk))
8840
eccb140b
DV
8841 PIPE_CONF_CHECK_I(cpu_transcoder);
8842
08a24034
DV
8843 PIPE_CONF_CHECK_I(has_pch_encoder);
8844 PIPE_CONF_CHECK_I(fdi_lanes);
72419203
DV
8845 PIPE_CONF_CHECK_I(fdi_m_n.gmch_m);
8846 PIPE_CONF_CHECK_I(fdi_m_n.gmch_n);
8847 PIPE_CONF_CHECK_I(fdi_m_n.link_m);
8848 PIPE_CONF_CHECK_I(fdi_m_n.link_n);
8849 PIPE_CONF_CHECK_I(fdi_m_n.tu);
08a24034 8850
eb14cb74
VS
8851 PIPE_CONF_CHECK_I(has_dp_encoder);
8852 PIPE_CONF_CHECK_I(dp_m_n.gmch_m);
8853 PIPE_CONF_CHECK_I(dp_m_n.gmch_n);
8854 PIPE_CONF_CHECK_I(dp_m_n.link_m);
8855 PIPE_CONF_CHECK_I(dp_m_n.link_n);
8856 PIPE_CONF_CHECK_I(dp_m_n.tu);
8857
1bd1bd80
DV
8858 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hdisplay);
8859 PIPE_CONF_CHECK_I(adjusted_mode.crtc_htotal);
8860 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_start);
8861 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hblank_end);
8862 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_start);
8863 PIPE_CONF_CHECK_I(adjusted_mode.crtc_hsync_end);
8864
8865 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vdisplay);
8866 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vtotal);
8867 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_start);
8868 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vblank_end);
8869 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
8870 PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
8871
c93f54cf 8872 PIPE_CONF_CHECK_I(pixel_multiplier);
6c49f241 8873
1bd1bd80
DV
8874 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8875 DRM_MODE_FLAG_INTERLACE);
8876
bb760063
DV
8877 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
8878 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8879 DRM_MODE_FLAG_PHSYNC);
8880 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8881 DRM_MODE_FLAG_NHSYNC);
8882 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8883 DRM_MODE_FLAG_PVSYNC);
8884 PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
8885 DRM_MODE_FLAG_NVSYNC);
8886 }
045ac3b5 8887
37327abd
VS
8888 PIPE_CONF_CHECK_I(pipe_src_w);
8889 PIPE_CONF_CHECK_I(pipe_src_h);
1bd1bd80 8890
2fa2fe9a
DV
8891 PIPE_CONF_CHECK_I(gmch_pfit.control);
8892 /* pfit ratios are autocomputed by the hw on gen4+ */
8893 if (INTEL_INFO(dev)->gen < 4)
8894 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
8895 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
fd4daa9c
CW
8896 PIPE_CONF_CHECK_I(pch_pfit.enabled);
8897 if (current_config->pch_pfit.enabled) {
8898 PIPE_CONF_CHECK_I(pch_pfit.pos);
8899 PIPE_CONF_CHECK_I(pch_pfit.size);
8900 }
2fa2fe9a 8901
42db64ef
PZ
8902 PIPE_CONF_CHECK_I(ips_enabled);
8903
282740f7
VS
8904 PIPE_CONF_CHECK_I(double_wide);
8905
c0d43d62 8906 PIPE_CONF_CHECK_I(shared_dpll);
66e985c0 8907 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8bcc2795 8908 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
66e985c0
DV
8909 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
8910 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
c0d43d62 8911
42571aef
VS
8912 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
8913 PIPE_CONF_CHECK_I(pipe_bpp);
8914
d71b8d4a 8915 if (!IS_HASWELL(dev)) {
241bfc38 8916 PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
d71b8d4a
VS
8917 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
8918 }
5e550656 8919
66e985c0 8920#undef PIPE_CONF_CHECK_X
08a24034 8921#undef PIPE_CONF_CHECK_I
1bd1bd80 8922#undef PIPE_CONF_CHECK_FLAGS
5e550656 8923#undef PIPE_CONF_CHECK_CLOCK_FUZZY
bb760063 8924#undef PIPE_CONF_QUIRK
88adfff1 8925
0e8ffe1b
DV
8926 return true;
8927}
8928
91d1b4bd
DV
8929static void
8930check_connector_state(struct drm_device *dev)
8af6cf88 8931{
8af6cf88
DV
8932 struct intel_connector *connector;
8933
8934 list_for_each_entry(connector, &dev->mode_config.connector_list,
8935 base.head) {
8936 /* This also checks the encoder/connector hw state with the
8937 * ->get_hw_state callbacks. */
8938 intel_connector_check_state(connector);
8939
8940 WARN(&connector->new_encoder->base != connector->base.encoder,
8941 "connector's staged encoder doesn't match current encoder\n");
8942 }
91d1b4bd
DV
8943}
8944
8945static void
8946check_encoder_state(struct drm_device *dev)
8947{
8948 struct intel_encoder *encoder;
8949 struct intel_connector *connector;
8af6cf88
DV
8950
8951 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
8952 base.head) {
8953 bool enabled = false;
8954 bool active = false;
8955 enum pipe pipe, tracked_pipe;
8956
8957 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
8958 encoder->base.base.id,
8959 drm_get_encoder_name(&encoder->base));
8960
8961 WARN(&encoder->new_crtc->base != encoder->base.crtc,
8962 "encoder's stage crtc doesn't match current crtc\n");
8963 WARN(encoder->connectors_active && !encoder->base.crtc,
8964 "encoder's active_connectors set, but no crtc\n");
8965
8966 list_for_each_entry(connector, &dev->mode_config.connector_list,
8967 base.head) {
8968 if (connector->base.encoder != &encoder->base)
8969 continue;
8970 enabled = true;
8971 if (connector->base.dpms != DRM_MODE_DPMS_OFF)
8972 active = true;
8973 }
8974 WARN(!!encoder->base.crtc != enabled,
8975 "encoder's enabled state mismatch "
8976 "(expected %i, found %i)\n",
8977 !!encoder->base.crtc, enabled);
8978 WARN(active && !encoder->base.crtc,
8979 "active encoder with no crtc\n");
8980
8981 WARN(encoder->connectors_active != active,
8982 "encoder's computed active state doesn't match tracked active state "
8983 "(expected %i, found %i)\n", active, encoder->connectors_active);
8984
8985 active = encoder->get_hw_state(encoder, &pipe);
8986 WARN(active != encoder->connectors_active,
8987 "encoder's hw state doesn't match sw tracking "
8988 "(expected %i, found %i)\n",
8989 encoder->connectors_active, active);
8990
8991 if (!encoder->base.crtc)
8992 continue;
8993
8994 tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe;
8995 WARN(active && pipe != tracked_pipe,
8996 "active encoder's pipe doesn't match"
8997 "(expected %i, found %i)\n",
8998 tracked_pipe, pipe);
8999
9000 }
91d1b4bd
DV
9001}
9002
9003static void
9004check_crtc_state(struct drm_device *dev)
9005{
9006 drm_i915_private_t *dev_priv = dev->dev_private;
9007 struct intel_crtc *crtc;
9008 struct intel_encoder *encoder;
9009 struct intel_crtc_config pipe_config;
8af6cf88
DV
9010
9011 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9012 base.head) {
9013 bool enabled = false;
9014 bool active = false;
9015
045ac3b5
JB
9016 memset(&pipe_config, 0, sizeof(pipe_config));
9017
8af6cf88
DV
9018 DRM_DEBUG_KMS("[CRTC:%d]\n",
9019 crtc->base.base.id);
9020
9021 WARN(crtc->active && !crtc->base.enabled,
9022 "active crtc, but not enabled in sw tracking\n");
9023
9024 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9025 base.head) {
9026 if (encoder->base.crtc != &crtc->base)
9027 continue;
9028 enabled = true;
9029 if (encoder->connectors_active)
9030 active = true;
9031 }
6c49f241 9032
8af6cf88
DV
9033 WARN(active != crtc->active,
9034 "crtc's computed active state doesn't match tracked active state "
9035 "(expected %i, found %i)\n", active, crtc->active);
9036 WARN(enabled != crtc->base.enabled,
9037 "crtc's computed enabled state doesn't match tracked enabled state "
9038 "(expected %i, found %i)\n", enabled, crtc->base.enabled);
9039
0e8ffe1b
DV
9040 active = dev_priv->display.get_pipe_config(crtc,
9041 &pipe_config);
d62cf62a
DV
9042
9043 /* hw state is inconsistent with the pipe A quirk */
9044 if (crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
9045 active = crtc->active;
9046
6c49f241
DV
9047 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9048 base.head) {
3eaba51c 9049 enum pipe pipe;
6c49f241
DV
9050 if (encoder->base.crtc != &crtc->base)
9051 continue;
3eaba51c
VS
9052 if (encoder->get_config &&
9053 encoder->get_hw_state(encoder, &pipe))
6c49f241
DV
9054 encoder->get_config(encoder, &pipe_config);
9055 }
9056
0e8ffe1b
DV
9057 WARN(crtc->active != active,
9058 "crtc active state doesn't match with hw state "
9059 "(expected %i, found %i)\n", crtc->active, active);
9060
c0b03411
DV
9061 if (active &&
9062 !intel_pipe_config_compare(dev, &crtc->config, &pipe_config)) {
9063 WARN(1, "pipe state doesn't match!\n");
9064 intel_dump_pipe_config(crtc, &pipe_config,
9065 "[hw state]");
9066 intel_dump_pipe_config(crtc, &crtc->config,
9067 "[sw state]");
9068 }
8af6cf88
DV
9069 }
9070}
9071
91d1b4bd
DV
9072static void
9073check_shared_dpll_state(struct drm_device *dev)
9074{
9075 drm_i915_private_t *dev_priv = dev->dev_private;
9076 struct intel_crtc *crtc;
9077 struct intel_dpll_hw_state dpll_hw_state;
9078 int i;
5358901f
DV
9079
9080 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
9081 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
9082 int enabled_crtcs = 0, active_crtcs = 0;
9083 bool active;
9084
9085 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
9086
9087 DRM_DEBUG_KMS("%s\n", pll->name);
9088
9089 active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
9090
9091 WARN(pll->active > pll->refcount,
9092 "more active pll users than references: %i vs %i\n",
9093 pll->active, pll->refcount);
9094 WARN(pll->active && !pll->on,
9095 "pll in active use but not on in sw tracking\n");
35c95375
DV
9096 WARN(pll->on && !pll->active,
9097 "pll in on but not on in use in sw tracking\n");
5358901f
DV
9098 WARN(pll->on != active,
9099 "pll on state mismatch (expected %i, found %i)\n",
9100 pll->on, active);
9101
9102 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
9103 base.head) {
9104 if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll)
9105 enabled_crtcs++;
9106 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
9107 active_crtcs++;
9108 }
9109 WARN(pll->active != active_crtcs,
9110 "pll active crtcs mismatch (expected %i, found %i)\n",
9111 pll->active, active_crtcs);
9112 WARN(pll->refcount != enabled_crtcs,
9113 "pll enabled crtcs mismatch (expected %i, found %i)\n",
9114 pll->refcount, enabled_crtcs);
66e985c0
DV
9115
9116 WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state,
9117 sizeof(dpll_hw_state)),
9118 "pll hw state mismatch\n");
5358901f 9119 }
8af6cf88
DV
9120}
9121
91d1b4bd
DV
9122void
9123intel_modeset_check_state(struct drm_device *dev)
9124{
9125 check_connector_state(dev);
9126 check_encoder_state(dev);
9127 check_crtc_state(dev);
9128 check_shared_dpll_state(dev);
9129}
9130
18442d08
VS
9131void ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
9132 int dotclock)
9133{
9134 /*
9135 * FDI already provided one idea for the dotclock.
9136 * Yell if the encoder disagrees.
9137 */
241bfc38 9138 WARN(!intel_fuzzy_clock_check(pipe_config->adjusted_mode.crtc_clock, dotclock),
18442d08 9139 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
241bfc38 9140 pipe_config->adjusted_mode.crtc_clock, dotclock);
18442d08
VS
9141}
9142
f30da187
DV
9143static int __intel_set_mode(struct drm_crtc *crtc,
9144 struct drm_display_mode *mode,
9145 int x, int y, struct drm_framebuffer *fb)
a6778b3c
DV
9146{
9147 struct drm_device *dev = crtc->dev;
dbf2b54e 9148 drm_i915_private_t *dev_priv = dev->dev_private;
b8cecdf5
DV
9149 struct drm_display_mode *saved_mode, *saved_hwmode;
9150 struct intel_crtc_config *pipe_config = NULL;
25c5b266
DV
9151 struct intel_crtc *intel_crtc;
9152 unsigned disable_pipes, prepare_pipes, modeset_pipes;
c0c36b94 9153 int ret = 0;
a6778b3c 9154
a1e22653 9155 saved_mode = kcalloc(2, sizeof(*saved_mode), GFP_KERNEL);
c0c36b94
CW
9156 if (!saved_mode)
9157 return -ENOMEM;
3ac18232 9158 saved_hwmode = saved_mode + 1;
a6778b3c 9159
e2e1ed41 9160 intel_modeset_affected_pipes(crtc, &modeset_pipes,
25c5b266
DV
9161 &prepare_pipes, &disable_pipes);
9162
3ac18232
TG
9163 *saved_hwmode = crtc->hwmode;
9164 *saved_mode = crtc->mode;
a6778b3c 9165
25c5b266
DV
9166 /* Hack: Because we don't (yet) support global modeset on multiple
9167 * crtcs, we don't keep track of the new mode for more than one crtc.
9168 * Hence simply check whether any bit is set in modeset_pipes in all the
9169 * pieces of code that are not yet converted to deal with mutliple crtcs
9170 * changing their mode at the same time. */
25c5b266 9171 if (modeset_pipes) {
4e53c2e0 9172 pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
b8cecdf5
DV
9173 if (IS_ERR(pipe_config)) {
9174 ret = PTR_ERR(pipe_config);
9175 pipe_config = NULL;
9176
3ac18232 9177 goto out;
25c5b266 9178 }
c0b03411
DV
9179 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
9180 "[modeset]");
25c5b266 9181 }
a6778b3c 9182
460da916
DV
9183 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
9184 intel_crtc_disable(&intel_crtc->base);
9185
ea9d758d
DV
9186 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
9187 if (intel_crtc->base.enabled)
9188 dev_priv->display.crtc_disable(&intel_crtc->base);
9189 }
a6778b3c 9190
6c4c86f5
DV
9191 /* crtc->mode is already used by the ->mode_set callbacks, hence we need
9192 * to set it here already despite that we pass it down the callchain.
f6e5b160 9193 */
b8cecdf5 9194 if (modeset_pipes) {
25c5b266 9195 crtc->mode = *mode;
b8cecdf5
DV
9196 /* mode_set/enable/disable functions rely on a correct pipe
9197 * config. */
9198 to_intel_crtc(crtc)->config = *pipe_config;
9199 }
7758a113 9200
ea9d758d
DV
9201 /* Only after disabling all output pipelines that will be changed can we
9202 * update the the output configuration. */
9203 intel_modeset_update_state(dev, prepare_pipes);
f6e5b160 9204
47fab737
DV
9205 if (dev_priv->display.modeset_global_resources)
9206 dev_priv->display.modeset_global_resources(dev);
9207
a6778b3c
DV
9208 /* Set up the DPLL and any encoders state that needs to adjust or depend
9209 * on the DPLL.
f6e5b160 9210 */
25c5b266 9211 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
c0c36b94 9212 ret = intel_crtc_mode_set(&intel_crtc->base,
c0c36b94
CW
9213 x, y, fb);
9214 if (ret)
9215 goto done;
a6778b3c
DV
9216 }
9217
9218 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
25c5b266
DV
9219 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
9220 dev_priv->display.crtc_enable(&intel_crtc->base);
a6778b3c 9221
25c5b266
DV
9222 if (modeset_pipes) {
9223 /* Store real post-adjustment hardware mode. */
b8cecdf5 9224 crtc->hwmode = pipe_config->adjusted_mode;
a6778b3c 9225
25c5b266
DV
9226 /* Calculate and store various constants which
9227 * are later needed by vblank and swap-completion
9228 * timestamping. They are derived from true hwmode.
9229 */
9230 drm_calc_timestamping_constants(crtc);
9231 }
a6778b3c
DV
9232
9233 /* FIXME: add subpixel order */
9234done:
c0c36b94 9235 if (ret && crtc->enabled) {
3ac18232
TG
9236 crtc->hwmode = *saved_hwmode;
9237 crtc->mode = *saved_mode;
a6778b3c
DV
9238 }
9239
3ac18232 9240out:
b8cecdf5 9241 kfree(pipe_config);
3ac18232 9242 kfree(saved_mode);
a6778b3c 9243 return ret;
f6e5b160
CW
9244}
9245
e7457a9a
DL
9246static int intel_set_mode(struct drm_crtc *crtc,
9247 struct drm_display_mode *mode,
9248 int x, int y, struct drm_framebuffer *fb)
f30da187
DV
9249{
9250 int ret;
9251
9252 ret = __intel_set_mode(crtc, mode, x, y, fb);
9253
9254 if (ret == 0)
9255 intel_modeset_check_state(crtc->dev);
9256
9257 return ret;
9258}
9259
c0c36b94
CW
9260void intel_crtc_restore_mode(struct drm_crtc *crtc)
9261{
9262 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
9263}
9264
25c5b266
DV
9265#undef for_each_intel_crtc_masked
9266
d9e55608
DV
9267static void intel_set_config_free(struct intel_set_config *config)
9268{
9269 if (!config)
9270 return;
9271
1aa4b628
DV
9272 kfree(config->save_connector_encoders);
9273 kfree(config->save_encoder_crtcs);
d9e55608
DV
9274 kfree(config);
9275}
9276
85f9eb71
DV
9277static int intel_set_config_save_state(struct drm_device *dev,
9278 struct intel_set_config *config)
9279{
85f9eb71
DV
9280 struct drm_encoder *encoder;
9281 struct drm_connector *connector;
9282 int count;
9283
1aa4b628
DV
9284 config->save_encoder_crtcs =
9285 kcalloc(dev->mode_config.num_encoder,
9286 sizeof(struct drm_crtc *), GFP_KERNEL);
9287 if (!config->save_encoder_crtcs)
85f9eb71
DV
9288 return -ENOMEM;
9289
1aa4b628
DV
9290 config->save_connector_encoders =
9291 kcalloc(dev->mode_config.num_connector,
9292 sizeof(struct drm_encoder *), GFP_KERNEL);
9293 if (!config->save_connector_encoders)
85f9eb71
DV
9294 return -ENOMEM;
9295
9296 /* Copy data. Note that driver private data is not affected.
9297 * Should anything bad happen only the expected state is
9298 * restored, not the drivers personal bookkeeping.
9299 */
85f9eb71
DV
9300 count = 0;
9301 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
1aa4b628 9302 config->save_encoder_crtcs[count++] = encoder->crtc;
85f9eb71
DV
9303 }
9304
9305 count = 0;
9306 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1aa4b628 9307 config->save_connector_encoders[count++] = connector->encoder;
85f9eb71
DV
9308 }
9309
9310 return 0;
9311}
9312
9313static void intel_set_config_restore_state(struct drm_device *dev,
9314 struct intel_set_config *config)
9315{
9a935856
DV
9316 struct intel_encoder *encoder;
9317 struct intel_connector *connector;
85f9eb71
DV
9318 int count;
9319
85f9eb71 9320 count = 0;
9a935856
DV
9321 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
9322 encoder->new_crtc =
9323 to_intel_crtc(config->save_encoder_crtcs[count++]);
85f9eb71
DV
9324 }
9325
9326 count = 0;
9a935856
DV
9327 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) {
9328 connector->new_encoder =
9329 to_intel_encoder(config->save_connector_encoders[count++]);
85f9eb71
DV
9330 }
9331}
9332
e3de42b6 9333static bool
2e57f47d 9334is_crtc_connector_off(struct drm_mode_set *set)
e3de42b6
ID
9335{
9336 int i;
9337
2e57f47d
CW
9338 if (set->num_connectors == 0)
9339 return false;
9340
9341 if (WARN_ON(set->connectors == NULL))
9342 return false;
9343
9344 for (i = 0; i < set->num_connectors; i++)
9345 if (set->connectors[i]->encoder &&
9346 set->connectors[i]->encoder->crtc == set->crtc &&
9347 set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
e3de42b6
ID
9348 return true;
9349
9350 return false;
9351}
9352
5e2b584e
DV
9353static void
9354intel_set_config_compute_mode_changes(struct drm_mode_set *set,
9355 struct intel_set_config *config)
9356{
9357
9358 /* We should be able to check here if the fb has the same properties
9359 * and then just flip_or_move it */
2e57f47d
CW
9360 if (is_crtc_connector_off(set)) {
9361 config->mode_changed = true;
e3de42b6 9362 } else if (set->crtc->fb != set->fb) {
5e2b584e
DV
9363 /* If we have no fb then treat it as a full mode set */
9364 if (set->crtc->fb == NULL) {
319d9827
JB
9365 struct intel_crtc *intel_crtc =
9366 to_intel_crtc(set->crtc);
9367
9368 if (intel_crtc->active && i915_fastboot) {
9369 DRM_DEBUG_KMS("crtc has no fb, will flip\n");
9370 config->fb_changed = true;
9371 } else {
9372 DRM_DEBUG_KMS("inactive crtc, full mode set\n");
9373 config->mode_changed = true;
9374 }
5e2b584e
DV
9375 } else if (set->fb == NULL) {
9376 config->mode_changed = true;
72f4901e
DV
9377 } else if (set->fb->pixel_format !=
9378 set->crtc->fb->pixel_format) {
5e2b584e 9379 config->mode_changed = true;
e3de42b6 9380 } else {
5e2b584e 9381 config->fb_changed = true;
e3de42b6 9382 }
5e2b584e
DV
9383 }
9384
835c5873 9385 if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
5e2b584e
DV
9386 config->fb_changed = true;
9387
9388 if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
9389 DRM_DEBUG_KMS("modes are different, full mode set\n");
9390 drm_mode_debug_printmodeline(&set->crtc->mode);
9391 drm_mode_debug_printmodeline(set->mode);
9392 config->mode_changed = true;
9393 }
a1d95703
CW
9394
9395 DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
9396 set->crtc->base.id, config->mode_changed, config->fb_changed);
5e2b584e
DV
9397}
9398
2e431051 9399static int
9a935856
DV
9400intel_modeset_stage_output_state(struct drm_device *dev,
9401 struct drm_mode_set *set,
9402 struct intel_set_config *config)
50f56119 9403{
85f9eb71 9404 struct drm_crtc *new_crtc;
9a935856
DV
9405 struct intel_connector *connector;
9406 struct intel_encoder *encoder;
f3f08572 9407 int ro;
50f56119 9408
9abdda74 9409 /* The upper layers ensure that we either disable a crtc or have a list
9a935856
DV
9410 * of connectors. For paranoia, double-check this. */
9411 WARN_ON(!set->fb && (set->num_connectors != 0));
9412 WARN_ON(set->fb && (set->num_connectors == 0));
9413
9a935856
DV
9414 list_for_each_entry(connector, &dev->mode_config.connector_list,
9415 base.head) {
9416 /* Otherwise traverse passed in connector list and get encoders
9417 * for them. */
50f56119 9418 for (ro = 0; ro < set->num_connectors; ro++) {
9a935856
DV
9419 if (set->connectors[ro] == &connector->base) {
9420 connector->new_encoder = connector->encoder;
50f56119
DV
9421 break;
9422 }
9423 }
9424
9a935856
DV
9425 /* If we disable the crtc, disable all its connectors. Also, if
9426 * the connector is on the changing crtc but not on the new
9427 * connector list, disable it. */
9428 if ((!set->fb || ro == set->num_connectors) &&
9429 connector->base.encoder &&
9430 connector->base.encoder->crtc == set->crtc) {
9431 connector->new_encoder = NULL;
9432
9433 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
9434 connector->base.base.id,
9435 drm_get_connector_name(&connector->base));
9436 }
9437
9438
9439 if (&connector->new_encoder->base != connector->base.encoder) {
50f56119 9440 DRM_DEBUG_KMS("encoder changed, full mode switch\n");
5e2b584e 9441 config->mode_changed = true;
50f56119
DV
9442 }
9443 }
9a935856 9444 /* connector->new_encoder is now updated for all connectors. */
50f56119 9445
9a935856 9446 /* Update crtc of enabled connectors. */
9a935856
DV
9447 list_for_each_entry(connector, &dev->mode_config.connector_list,
9448 base.head) {
9449 if (!connector->new_encoder)
50f56119
DV
9450 continue;
9451
9a935856 9452 new_crtc = connector->new_encoder->base.crtc;
50f56119
DV
9453
9454 for (ro = 0; ro < set->num_connectors; ro++) {
9a935856 9455 if (set->connectors[ro] == &connector->base)
50f56119
DV
9456 new_crtc = set->crtc;
9457 }
9458
9459 /* Make sure the new CRTC will work with the encoder */
9a935856
DV
9460 if (!intel_encoder_crtc_ok(&connector->new_encoder->base,
9461 new_crtc)) {
5e2b584e 9462 return -EINVAL;
50f56119 9463 }
9a935856
DV
9464 connector->encoder->new_crtc = to_intel_crtc(new_crtc);
9465
9466 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
9467 connector->base.base.id,
9468 drm_get_connector_name(&connector->base),
9469 new_crtc->base.id);
9470 }
9471
9472 /* Check for any encoders that needs to be disabled. */
9473 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9474 base.head) {
9475 list_for_each_entry(connector,
9476 &dev->mode_config.connector_list,
9477 base.head) {
9478 if (connector->new_encoder == encoder) {
9479 WARN_ON(!connector->new_encoder->new_crtc);
9480
9481 goto next_encoder;
9482 }
9483 }
9484 encoder->new_crtc = NULL;
9485next_encoder:
9486 /* Only now check for crtc changes so we don't miss encoders
9487 * that will be disabled. */
9488 if (&encoder->new_crtc->base != encoder->base.crtc) {
50f56119 9489 DRM_DEBUG_KMS("crtc changed, full mode switch\n");
5e2b584e 9490 config->mode_changed = true;
50f56119
DV
9491 }
9492 }
9a935856 9493 /* Now we've also updated encoder->new_crtc for all encoders. */
50f56119 9494
2e431051
DV
9495 return 0;
9496}
9497
9498static int intel_crtc_set_config(struct drm_mode_set *set)
9499{
9500 struct drm_device *dev;
2e431051
DV
9501 struct drm_mode_set save_set;
9502 struct intel_set_config *config;
9503 int ret;
2e431051 9504
8d3e375e
DV
9505 BUG_ON(!set);
9506 BUG_ON(!set->crtc);
9507 BUG_ON(!set->crtc->helper_private);
2e431051 9508
7e53f3a4
DV
9509 /* Enforce sane interface api - has been abused by the fb helper. */
9510 BUG_ON(!set->mode && set->fb);
9511 BUG_ON(set->fb && set->num_connectors == 0);
431e50f7 9512
2e431051
DV
9513 if (set->fb) {
9514 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
9515 set->crtc->base.id, set->fb->base.id,
9516 (int)set->num_connectors, set->x, set->y);
9517 } else {
9518 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
2e431051
DV
9519 }
9520
9521 dev = set->crtc->dev;
9522
9523 ret = -ENOMEM;
9524 config = kzalloc(sizeof(*config), GFP_KERNEL);
9525 if (!config)
9526 goto out_config;
9527
9528 ret = intel_set_config_save_state(dev, config);
9529 if (ret)
9530 goto out_config;
9531
9532 save_set.crtc = set->crtc;
9533 save_set.mode = &set->crtc->mode;
9534 save_set.x = set->crtc->x;
9535 save_set.y = set->crtc->y;
9536 save_set.fb = set->crtc->fb;
9537
9538 /* Compute whether we need a full modeset, only an fb base update or no
9539 * change at all. In the future we might also check whether only the
9540 * mode changed, e.g. for LVDS where we only change the panel fitter in
9541 * such cases. */
9542 intel_set_config_compute_mode_changes(set, config);
9543
9a935856 9544 ret = intel_modeset_stage_output_state(dev, set, config);
2e431051
DV
9545 if (ret)
9546 goto fail;
9547
5e2b584e 9548 if (config->mode_changed) {
c0c36b94
CW
9549 ret = intel_set_mode(set->crtc, set->mode,
9550 set->x, set->y, set->fb);
5e2b584e 9551 } else if (config->fb_changed) {
4878cae2
VS
9552 intel_crtc_wait_for_pending_flips(set->crtc);
9553
4f660f49 9554 ret = intel_pipe_set_base(set->crtc,
94352cf9 9555 set->x, set->y, set->fb);
50f56119
DV
9556 }
9557
2d05eae1 9558 if (ret) {
bf67dfeb
DV
9559 DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
9560 set->crtc->base.id, ret);
50f56119 9561fail:
2d05eae1 9562 intel_set_config_restore_state(dev, config);
50f56119 9563
2d05eae1
CW
9564 /* Try to restore the config */
9565 if (config->mode_changed &&
9566 intel_set_mode(save_set.crtc, save_set.mode,
9567 save_set.x, save_set.y, save_set.fb))
9568 DRM_ERROR("failed to restore config after modeset failure\n");
9569 }
50f56119 9570
d9e55608
DV
9571out_config:
9572 intel_set_config_free(config);
50f56119
DV
9573 return ret;
9574}
f6e5b160
CW
9575
9576static const struct drm_crtc_funcs intel_crtc_funcs = {
f6e5b160
CW
9577 .cursor_set = intel_crtc_cursor_set,
9578 .cursor_move = intel_crtc_cursor_move,
9579 .gamma_set = intel_crtc_gamma_set,
50f56119 9580 .set_config = intel_crtc_set_config,
f6e5b160
CW
9581 .destroy = intel_crtc_destroy,
9582 .page_flip = intel_crtc_page_flip,
9583};
9584
79f689aa
PZ
9585static void intel_cpu_pll_init(struct drm_device *dev)
9586{
affa9354 9587 if (HAS_DDI(dev))
79f689aa
PZ
9588 intel_ddi_pll_init(dev);
9589}
9590
5358901f
DV
9591static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
9592 struct intel_shared_dpll *pll,
9593 struct intel_dpll_hw_state *hw_state)
ee7b9f93 9594{
5358901f 9595 uint32_t val;
ee7b9f93 9596
5358901f 9597 val = I915_READ(PCH_DPLL(pll->id));
66e985c0
DV
9598 hw_state->dpll = val;
9599 hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
9600 hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
5358901f
DV
9601
9602 return val & DPLL_VCO_ENABLE;
9603}
9604
15bdd4cf
DV
9605static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
9606 struct intel_shared_dpll *pll)
9607{
9608 I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
9609 I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
9610}
9611
e7b903d2
DV
9612static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
9613 struct intel_shared_dpll *pll)
9614{
e7b903d2
DV
9615 /* PCH refclock must be enabled first */
9616 assert_pch_refclk_enabled(dev_priv);
9617
15bdd4cf
DV
9618 I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
9619
9620 /* Wait for the clocks to stabilize. */
9621 POSTING_READ(PCH_DPLL(pll->id));
9622 udelay(150);
9623
9624 /* The pixel multiplier can only be updated once the
9625 * DPLL is enabled and the clocks are stable.
9626 *
9627 * So write it again.
9628 */
9629 I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
9630 POSTING_READ(PCH_DPLL(pll->id));
e7b903d2
DV
9631 udelay(200);
9632}
9633
9634static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
9635 struct intel_shared_dpll *pll)
9636{
9637 struct drm_device *dev = dev_priv->dev;
9638 struct intel_crtc *crtc;
e7b903d2
DV
9639
9640 /* Make sure no transcoder isn't still depending on us. */
9641 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
9642 if (intel_crtc_to_shared_dpll(crtc) == pll)
9643 assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
ee7b9f93
JB
9644 }
9645
15bdd4cf
DV
9646 I915_WRITE(PCH_DPLL(pll->id), 0);
9647 POSTING_READ(PCH_DPLL(pll->id));
e7b903d2
DV
9648 udelay(200);
9649}
9650
46edb027
DV
9651static char *ibx_pch_dpll_names[] = {
9652 "PCH DPLL A",
9653 "PCH DPLL B",
9654};
9655
7c74ade1 9656static void ibx_pch_dpll_init(struct drm_device *dev)
ee7b9f93 9657{
e7b903d2 9658 struct drm_i915_private *dev_priv = dev->dev_private;
ee7b9f93
JB
9659 int i;
9660
7c74ade1 9661 dev_priv->num_shared_dpll = 2;
ee7b9f93 9662
e72f9fbf 9663 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
46edb027
DV
9664 dev_priv->shared_dplls[i].id = i;
9665 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
15bdd4cf 9666 dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
e7b903d2
DV
9667 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
9668 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
5358901f
DV
9669 dev_priv->shared_dplls[i].get_hw_state =
9670 ibx_pch_dpll_get_hw_state;
ee7b9f93
JB
9671 }
9672}
9673
7c74ade1
DV
9674static void intel_shared_dpll_init(struct drm_device *dev)
9675{
e7b903d2 9676 struct drm_i915_private *dev_priv = dev->dev_private;
7c74ade1
DV
9677
9678 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
9679 ibx_pch_dpll_init(dev);
9680 else
9681 dev_priv->num_shared_dpll = 0;
9682
9683 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
9684 DRM_DEBUG_KMS("%i shared PLLs initialized\n",
9685 dev_priv->num_shared_dpll);
9686}
9687
b358d0a6 9688static void intel_crtc_init(struct drm_device *dev, int pipe)
79e53945 9689{
22fd0fab 9690 drm_i915_private_t *dev_priv = dev->dev_private;
79e53945
JB
9691 struct intel_crtc *intel_crtc;
9692 int i;
9693
955382f3 9694 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
79e53945
JB
9695 if (intel_crtc == NULL)
9696 return;
9697
9698 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
9699
9700 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
79e53945
JB
9701 for (i = 0; i < 256; i++) {
9702 intel_crtc->lut_r[i] = i;
9703 intel_crtc->lut_g[i] = i;
9704 intel_crtc->lut_b[i] = i;
9705 }
9706
80824003
JB
9707 /* Swap pipes & planes for FBC on pre-965 */
9708 intel_crtc->pipe = pipe;
9709 intel_crtc->plane = pipe;
e2e767ab 9710 if (IS_MOBILE(dev) && IS_GEN3(dev)) {
28c97730 9711 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
e2e767ab 9712 intel_crtc->plane = !pipe;
80824003
JB
9713 }
9714
22fd0fab
JB
9715 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
9716 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
9717 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
9718 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
9719
79e53945 9720 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
79e53945
JB
9721}
9722
08d7b3d1 9723int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
05394f39 9724 struct drm_file *file)
08d7b3d1 9725{
08d7b3d1 9726 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
c05422d5
DV
9727 struct drm_mode_object *drmmode_obj;
9728 struct intel_crtc *crtc;
08d7b3d1 9729
1cff8f6b
DV
9730 if (!drm_core_check_feature(dev, DRIVER_MODESET))
9731 return -ENODEV;
08d7b3d1 9732
c05422d5
DV
9733 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
9734 DRM_MODE_OBJECT_CRTC);
08d7b3d1 9735
c05422d5 9736 if (!drmmode_obj) {
08d7b3d1
CW
9737 DRM_ERROR("no such CRTC id\n");
9738 return -EINVAL;
9739 }
9740
c05422d5
DV
9741 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
9742 pipe_from_crtc_id->pipe = crtc->pipe;
08d7b3d1 9743
c05422d5 9744 return 0;
08d7b3d1
CW
9745}
9746
66a9278e 9747static int intel_encoder_clones(struct intel_encoder *encoder)
79e53945 9748{
66a9278e
DV
9749 struct drm_device *dev = encoder->base.dev;
9750 struct intel_encoder *source_encoder;
79e53945 9751 int index_mask = 0;
79e53945
JB
9752 int entry = 0;
9753
66a9278e
DV
9754 list_for_each_entry(source_encoder,
9755 &dev->mode_config.encoder_list, base.head) {
9756
9757 if (encoder == source_encoder)
79e53945 9758 index_mask |= (1 << entry);
66a9278e
DV
9759
9760 /* Intel hw has only one MUX where enocoders could be cloned. */
9761 if (encoder->cloneable && source_encoder->cloneable)
9762 index_mask |= (1 << entry);
9763
79e53945
JB
9764 entry++;
9765 }
4ef69c7a 9766
79e53945
JB
9767 return index_mask;
9768}
9769
4d302442
CW
9770static bool has_edp_a(struct drm_device *dev)
9771{
9772 struct drm_i915_private *dev_priv = dev->dev_private;
9773
9774 if (!IS_MOBILE(dev))
9775 return false;
9776
9777 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
9778 return false;
9779
9780 if (IS_GEN5(dev) &&
9781 (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
9782 return false;
9783
9784 return true;
9785}
9786
79e53945
JB
9787static void intel_setup_outputs(struct drm_device *dev)
9788{
725e30ad 9789 struct drm_i915_private *dev_priv = dev->dev_private;
4ef69c7a 9790 struct intel_encoder *encoder;
cb0953d7 9791 bool dpd_is_edp = false;
79e53945 9792
c9093354 9793 intel_lvds_init(dev);
79e53945 9794
c40c0f5b 9795 if (!IS_ULT(dev))
79935fca 9796 intel_crt_init(dev);
cb0953d7 9797
affa9354 9798 if (HAS_DDI(dev)) {
0e72a5b5
ED
9799 int found;
9800
9801 /* Haswell uses DDI functions to detect digital outputs */
9802 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
9803 /* DDI A only supports eDP */
9804 if (found)
9805 intel_ddi_init(dev, PORT_A);
9806
9807 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
9808 * register */
9809 found = I915_READ(SFUSE_STRAP);
9810
9811 if (found & SFUSE_STRAP_DDIB_DETECTED)
9812 intel_ddi_init(dev, PORT_B);
9813 if (found & SFUSE_STRAP_DDIC_DETECTED)
9814 intel_ddi_init(dev, PORT_C);
9815 if (found & SFUSE_STRAP_DDID_DETECTED)
9816 intel_ddi_init(dev, PORT_D);
9817 } else if (HAS_PCH_SPLIT(dev)) {
cb0953d7 9818 int found;
270b3042
DV
9819 dpd_is_edp = intel_dpd_is_edp(dev);
9820
9821 if (has_edp_a(dev))
9822 intel_dp_init(dev, DP_A, PORT_A);
cb0953d7 9823
dc0fa718 9824 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
461ed3ca 9825 /* PCH SDVOB multiplex with HDMIB */
eef4eacb 9826 found = intel_sdvo_init(dev, PCH_SDVOB, true);
30ad48b7 9827 if (!found)
e2debe91 9828 intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
5eb08b69 9829 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
ab9d7c30 9830 intel_dp_init(dev, PCH_DP_B, PORT_B);
30ad48b7
ZW
9831 }
9832
dc0fa718 9833 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
e2debe91 9834 intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
30ad48b7 9835
dc0fa718 9836 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
e2debe91 9837 intel_hdmi_init(dev, PCH_HDMID, PORT_D);
30ad48b7 9838
5eb08b69 9839 if (I915_READ(PCH_DP_C) & DP_DETECTED)
ab9d7c30 9840 intel_dp_init(dev, PCH_DP_C, PORT_C);
5eb08b69 9841
270b3042 9842 if (I915_READ(PCH_DP_D) & DP_DETECTED)
ab9d7c30 9843 intel_dp_init(dev, PCH_DP_D, PORT_D);
4a87d65d 9844 } else if (IS_VALLEYVIEW(dev)) {
19c03924 9845 /* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
6f6005a5
JB
9846 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
9847 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
9848 PORT_C);
9849 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
9850 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C,
9851 PORT_C);
9852 }
19c03924 9853
dc0fa718 9854 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
e2debe91
PZ
9855 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
9856 PORT_B);
67cfc203
VS
9857 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
9858 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
4a87d65d 9859 }
3cfca973
JN
9860
9861 intel_dsi_init(dev);
103a196f 9862 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
27185ae1 9863 bool found = false;
7d57382e 9864
e2debe91 9865 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
b01f2c3a 9866 DRM_DEBUG_KMS("probing SDVOB\n");
e2debe91 9867 found = intel_sdvo_init(dev, GEN3_SDVOB, true);
b01f2c3a
JB
9868 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
9869 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
e2debe91 9870 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
b01f2c3a 9871 }
27185ae1 9872
e7281eab 9873 if (!found && SUPPORTS_INTEGRATED_DP(dev))
ab9d7c30 9874 intel_dp_init(dev, DP_B, PORT_B);
725e30ad 9875 }
13520b05
KH
9876
9877 /* Before G4X SDVOC doesn't have its own detect register */
13520b05 9878
e2debe91 9879 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
b01f2c3a 9880 DRM_DEBUG_KMS("probing SDVOC\n");
e2debe91 9881 found = intel_sdvo_init(dev, GEN3_SDVOC, false);
b01f2c3a 9882 }
27185ae1 9883
e2debe91 9884 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
27185ae1 9885
b01f2c3a
JB
9886 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
9887 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
e2debe91 9888 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
b01f2c3a 9889 }
e7281eab 9890 if (SUPPORTS_INTEGRATED_DP(dev))
ab9d7c30 9891 intel_dp_init(dev, DP_C, PORT_C);
725e30ad 9892 }
27185ae1 9893
b01f2c3a 9894 if (SUPPORTS_INTEGRATED_DP(dev) &&
e7281eab 9895 (I915_READ(DP_D) & DP_DETECTED))
ab9d7c30 9896 intel_dp_init(dev, DP_D, PORT_D);
bad720ff 9897 } else if (IS_GEN2(dev))
79e53945
JB
9898 intel_dvo_init(dev);
9899
103a196f 9900 if (SUPPORTS_TV(dev))
79e53945
JB
9901 intel_tv_init(dev);
9902
4ef69c7a
CW
9903 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
9904 encoder->base.possible_crtcs = encoder->crtc_mask;
9905 encoder->base.possible_clones =
66a9278e 9906 intel_encoder_clones(encoder);
79e53945 9907 }
47356eb6 9908
dde86e2d 9909 intel_init_pch_refclk(dev);
270b3042
DV
9910
9911 drm_helper_move_panel_connectors_to_head(dev);
79e53945
JB
9912}
9913
ddfe1567
CW
9914void intel_framebuffer_fini(struct intel_framebuffer *fb)
9915{
9916 drm_framebuffer_cleanup(&fb->base);
9917 drm_gem_object_unreference_unlocked(&fb->obj->base);
9918}
9919
79e53945
JB
9920static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
9921{
9922 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
79e53945 9923
ddfe1567 9924 intel_framebuffer_fini(intel_fb);
79e53945
JB
9925 kfree(intel_fb);
9926}
9927
9928static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
05394f39 9929 struct drm_file *file,
79e53945
JB
9930 unsigned int *handle)
9931{
9932 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
05394f39 9933 struct drm_i915_gem_object *obj = intel_fb->obj;
79e53945 9934
05394f39 9935 return drm_gem_handle_create(file, &obj->base, handle);
79e53945
JB
9936}
9937
9938static const struct drm_framebuffer_funcs intel_fb_funcs = {
9939 .destroy = intel_user_framebuffer_destroy,
9940 .create_handle = intel_user_framebuffer_create_handle,
9941};
9942
38651674
DA
9943int intel_framebuffer_init(struct drm_device *dev,
9944 struct intel_framebuffer *intel_fb,
308e5bcb 9945 struct drm_mode_fb_cmd2 *mode_cmd,
05394f39 9946 struct drm_i915_gem_object *obj)
79e53945 9947{
a35cdaa0 9948 int pitch_limit;
79e53945
JB
9949 int ret;
9950
c16ed4be
CW
9951 if (obj->tiling_mode == I915_TILING_Y) {
9952 DRM_DEBUG("hardware does not support tiling Y\n");
57cd6508 9953 return -EINVAL;
c16ed4be 9954 }
57cd6508 9955
c16ed4be
CW
9956 if (mode_cmd->pitches[0] & 63) {
9957 DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
9958 mode_cmd->pitches[0]);
57cd6508 9959 return -EINVAL;
c16ed4be 9960 }
57cd6508 9961
a35cdaa0
CW
9962 if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) {
9963 pitch_limit = 32*1024;
9964 } else if (INTEL_INFO(dev)->gen >= 4) {
9965 if (obj->tiling_mode)
9966 pitch_limit = 16*1024;
9967 else
9968 pitch_limit = 32*1024;
9969 } else if (INTEL_INFO(dev)->gen >= 3) {
9970 if (obj->tiling_mode)
9971 pitch_limit = 8*1024;
9972 else
9973 pitch_limit = 16*1024;
9974 } else
9975 /* XXX DSPC is limited to 4k tiled */
9976 pitch_limit = 8*1024;
9977
9978 if (mode_cmd->pitches[0] > pitch_limit) {
9979 DRM_DEBUG("%s pitch (%d) must be at less than %d\n",
9980 obj->tiling_mode ? "tiled" : "linear",
9981 mode_cmd->pitches[0], pitch_limit);
5d7bd705 9982 return -EINVAL;
c16ed4be 9983 }
5d7bd705
VS
9984
9985 if (obj->tiling_mode != I915_TILING_NONE &&
c16ed4be
CW
9986 mode_cmd->pitches[0] != obj->stride) {
9987 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
9988 mode_cmd->pitches[0], obj->stride);
5d7bd705 9989 return -EINVAL;
c16ed4be 9990 }
5d7bd705 9991
57779d06 9992 /* Reject formats not supported by any plane early. */
308e5bcb 9993 switch (mode_cmd->pixel_format) {
57779d06 9994 case DRM_FORMAT_C8:
04b3924d
VS
9995 case DRM_FORMAT_RGB565:
9996 case DRM_FORMAT_XRGB8888:
9997 case DRM_FORMAT_ARGB8888:
57779d06
VS
9998 break;
9999 case DRM_FORMAT_XRGB1555:
10000 case DRM_FORMAT_ARGB1555:
c16ed4be 10001 if (INTEL_INFO(dev)->gen > 3) {
4ee62c76
VS
10002 DRM_DEBUG("unsupported pixel format: %s\n",
10003 drm_get_format_name(mode_cmd->pixel_format));
57779d06 10004 return -EINVAL;
c16ed4be 10005 }
57779d06
VS
10006 break;
10007 case DRM_FORMAT_XBGR8888:
10008 case DRM_FORMAT_ABGR8888:
04b3924d
VS
10009 case DRM_FORMAT_XRGB2101010:
10010 case DRM_FORMAT_ARGB2101010:
57779d06
VS
10011 case DRM_FORMAT_XBGR2101010:
10012 case DRM_FORMAT_ABGR2101010:
c16ed4be 10013 if (INTEL_INFO(dev)->gen < 4) {
4ee62c76
VS
10014 DRM_DEBUG("unsupported pixel format: %s\n",
10015 drm_get_format_name(mode_cmd->pixel_format));
57779d06 10016 return -EINVAL;
c16ed4be 10017 }
b5626747 10018 break;
04b3924d
VS
10019 case DRM_FORMAT_YUYV:
10020 case DRM_FORMAT_UYVY:
10021 case DRM_FORMAT_YVYU:
10022 case DRM_FORMAT_VYUY:
c16ed4be 10023 if (INTEL_INFO(dev)->gen < 5) {
4ee62c76
VS
10024 DRM_DEBUG("unsupported pixel format: %s\n",
10025 drm_get_format_name(mode_cmd->pixel_format));
57779d06 10026 return -EINVAL;
c16ed4be 10027 }
57cd6508
CW
10028 break;
10029 default:
4ee62c76
VS
10030 DRM_DEBUG("unsupported pixel format: %s\n",
10031 drm_get_format_name(mode_cmd->pixel_format));
57cd6508
CW
10032 return -EINVAL;
10033 }
10034
90f9a336
VS
10035 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
10036 if (mode_cmd->offsets[0] != 0)
10037 return -EINVAL;
10038
c7d73f6a
DV
10039 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
10040 intel_fb->obj = obj;
10041
79e53945
JB
10042 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
10043 if (ret) {
10044 DRM_ERROR("framebuffer init failed %d\n", ret);
10045 return ret;
10046 }
10047
79e53945
JB
10048 return 0;
10049}
10050
79e53945
JB
10051static struct drm_framebuffer *
10052intel_user_framebuffer_create(struct drm_device *dev,
10053 struct drm_file *filp,
308e5bcb 10054 struct drm_mode_fb_cmd2 *mode_cmd)
79e53945 10055{
05394f39 10056 struct drm_i915_gem_object *obj;
79e53945 10057
308e5bcb
JB
10058 obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
10059 mode_cmd->handles[0]));
c8725226 10060 if (&obj->base == NULL)
cce13ff7 10061 return ERR_PTR(-ENOENT);
79e53945 10062
d2dff872 10063 return intel_framebuffer_create(dev, mode_cmd, obj);
79e53945
JB
10064}
10065
79e53945 10066static const struct drm_mode_config_funcs intel_mode_funcs = {
79e53945 10067 .fb_create = intel_user_framebuffer_create,
eb1f8e4f 10068 .output_poll_changed = intel_fb_output_poll_changed,
79e53945
JB
10069};
10070
e70236a8
JB
10071/* Set up chip specific display functions */
10072static void intel_init_display(struct drm_device *dev)
10073{
10074 struct drm_i915_private *dev_priv = dev->dev_private;
10075
ee9300bb
DV
10076 if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
10077 dev_priv->display.find_dpll = g4x_find_best_dpll;
10078 else if (IS_VALLEYVIEW(dev))
10079 dev_priv->display.find_dpll = vlv_find_best_dpll;
10080 else if (IS_PINEVIEW(dev))
10081 dev_priv->display.find_dpll = pnv_find_best_dpll;
10082 else
10083 dev_priv->display.find_dpll = i9xx_find_best_dpll;
10084
affa9354 10085 if (HAS_DDI(dev)) {
0e8ffe1b 10086 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
09b4ddf9 10087 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
4f771f10
PZ
10088 dev_priv->display.crtc_enable = haswell_crtc_enable;
10089 dev_priv->display.crtc_disable = haswell_crtc_disable;
6441ab5f 10090 dev_priv->display.off = haswell_crtc_off;
09b4ddf9
PZ
10091 dev_priv->display.update_plane = ironlake_update_plane;
10092 } else if (HAS_PCH_SPLIT(dev)) {
0e8ffe1b 10093 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
f564048e 10094 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
76e5a89c
DV
10095 dev_priv->display.crtc_enable = ironlake_crtc_enable;
10096 dev_priv->display.crtc_disable = ironlake_crtc_disable;
ee7b9f93 10097 dev_priv->display.off = ironlake_crtc_off;
17638cd6 10098 dev_priv->display.update_plane = ironlake_update_plane;
89b667f8
JB
10099 } else if (IS_VALLEYVIEW(dev)) {
10100 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
10101 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
10102 dev_priv->display.crtc_enable = valleyview_crtc_enable;
10103 dev_priv->display.crtc_disable = i9xx_crtc_disable;
10104 dev_priv->display.off = i9xx_crtc_off;
10105 dev_priv->display.update_plane = i9xx_update_plane;
f564048e 10106 } else {
0e8ffe1b 10107 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
f564048e 10108 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
76e5a89c
DV
10109 dev_priv->display.crtc_enable = i9xx_crtc_enable;
10110 dev_priv->display.crtc_disable = i9xx_crtc_disable;
ee7b9f93 10111 dev_priv->display.off = i9xx_crtc_off;
17638cd6 10112 dev_priv->display.update_plane = i9xx_update_plane;
f564048e 10113 }
e70236a8 10114
e70236a8 10115 /* Returns the core display clock speed */
25eb05fc
JB
10116 if (IS_VALLEYVIEW(dev))
10117 dev_priv->display.get_display_clock_speed =
10118 valleyview_get_display_clock_speed;
10119 else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
e70236a8
JB
10120 dev_priv->display.get_display_clock_speed =
10121 i945_get_display_clock_speed;
10122 else if (IS_I915G(dev))
10123 dev_priv->display.get_display_clock_speed =
10124 i915_get_display_clock_speed;
257a7ffc 10125 else if (IS_I945GM(dev) || IS_845G(dev))
e70236a8
JB
10126 dev_priv->display.get_display_clock_speed =
10127 i9xx_misc_get_display_clock_speed;
257a7ffc
DV
10128 else if (IS_PINEVIEW(dev))
10129 dev_priv->display.get_display_clock_speed =
10130 pnv_get_display_clock_speed;
e70236a8
JB
10131 else if (IS_I915GM(dev))
10132 dev_priv->display.get_display_clock_speed =
10133 i915gm_get_display_clock_speed;
10134 else if (IS_I865G(dev))
10135 dev_priv->display.get_display_clock_speed =
10136 i865_get_display_clock_speed;
f0f8a9ce 10137 else if (IS_I85X(dev))
e70236a8
JB
10138 dev_priv->display.get_display_clock_speed =
10139 i855_get_display_clock_speed;
10140 else /* 852, 830 */
10141 dev_priv->display.get_display_clock_speed =
10142 i830_get_display_clock_speed;
10143
7f8a8569 10144 if (HAS_PCH_SPLIT(dev)) {
f00a3ddf 10145 if (IS_GEN5(dev)) {
674cf967 10146 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
e0dac65e 10147 dev_priv->display.write_eld = ironlake_write_eld;
1398261a 10148 } else if (IS_GEN6(dev)) {
674cf967 10149 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
e0dac65e 10150 dev_priv->display.write_eld = ironlake_write_eld;
357555c0
JB
10151 } else if (IS_IVYBRIDGE(dev)) {
10152 /* FIXME: detect B0+ stepping and use auto training */
10153 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
e0dac65e 10154 dev_priv->display.write_eld = ironlake_write_eld;
01a415fd
DV
10155 dev_priv->display.modeset_global_resources =
10156 ivb_modeset_global_resources;
c82e4d26
ED
10157 } else if (IS_HASWELL(dev)) {
10158 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
83358c85 10159 dev_priv->display.write_eld = haswell_write_eld;
d6dd9eb1
DV
10160 dev_priv->display.modeset_global_resources =
10161 haswell_modeset_global_resources;
a0e63c22 10162 }
6067aaea 10163 } else if (IS_G4X(dev)) {
e0dac65e 10164 dev_priv->display.write_eld = g4x_write_eld;
e70236a8 10165 }
8c9f3aaf
JB
10166
10167 /* Default just returns -ENODEV to indicate unsupported */
10168 dev_priv->display.queue_flip = intel_default_queue_flip;
10169
10170 switch (INTEL_INFO(dev)->gen) {
10171 case 2:
10172 dev_priv->display.queue_flip = intel_gen2_queue_flip;
10173 break;
10174
10175 case 3:
10176 dev_priv->display.queue_flip = intel_gen3_queue_flip;
10177 break;
10178
10179 case 4:
10180 case 5:
10181 dev_priv->display.queue_flip = intel_gen4_queue_flip;
10182 break;
10183
10184 case 6:
10185 dev_priv->display.queue_flip = intel_gen6_queue_flip;
10186 break;
7c9017e5
JB
10187 case 7:
10188 dev_priv->display.queue_flip = intel_gen7_queue_flip;
10189 break;
8c9f3aaf 10190 }
e70236a8
JB
10191}
10192
b690e96c
JB
10193/*
10194 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
10195 * resume, or other times. This quirk makes sure that's the case for
10196 * affected systems.
10197 */
0206e353 10198static void quirk_pipea_force(struct drm_device *dev)
b690e96c
JB
10199{
10200 struct drm_i915_private *dev_priv = dev->dev_private;
10201
10202 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
bc0daf48 10203 DRM_INFO("applying pipe a force quirk\n");
b690e96c
JB
10204}
10205
435793df
KP
10206/*
10207 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
10208 */
10209static void quirk_ssc_force_disable(struct drm_device *dev)
10210{
10211 struct drm_i915_private *dev_priv = dev->dev_private;
10212 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
bc0daf48 10213 DRM_INFO("applying lvds SSC disable quirk\n");
435793df
KP
10214}
10215
4dca20ef 10216/*
5a15ab5b
CE
10217 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
10218 * brightness value
4dca20ef
CE
10219 */
10220static void quirk_invert_brightness(struct drm_device *dev)
10221{
10222 struct drm_i915_private *dev_priv = dev->dev_private;
10223 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
bc0daf48 10224 DRM_INFO("applying inverted panel brightness quirk\n");
435793df
KP
10225}
10226
e85843be
KM
10227/*
10228 * Some machines (Dell XPS13) suffer broken backlight controls if
10229 * BLM_PCH_PWM_ENABLE is set.
10230 */
10231static void quirk_no_pcm_pwm_enable(struct drm_device *dev)
10232{
10233 struct drm_i915_private *dev_priv = dev->dev_private;
10234 dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE;
10235 DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n");
10236}
10237
b690e96c
JB
10238struct intel_quirk {
10239 int device;
10240 int subsystem_vendor;
10241 int subsystem_device;
10242 void (*hook)(struct drm_device *dev);
10243};
10244
5f85f176
EE
10245/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
10246struct intel_dmi_quirk {
10247 void (*hook)(struct drm_device *dev);
10248 const struct dmi_system_id (*dmi_id_list)[];
10249};
10250
10251static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
10252{
10253 DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
10254 return 1;
10255}
10256
10257static const struct intel_dmi_quirk intel_dmi_quirks[] = {
10258 {
10259 .dmi_id_list = &(const struct dmi_system_id[]) {
10260 {
10261 .callback = intel_dmi_reverse_brightness,
10262 .ident = "NCR Corporation",
10263 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
10264 DMI_MATCH(DMI_PRODUCT_NAME, ""),
10265 },
10266 },
10267 { } /* terminating entry */
10268 },
10269 .hook = quirk_invert_brightness,
10270 },
10271};
10272
c43b5634 10273static struct intel_quirk intel_quirks[] = {
b690e96c 10274 /* HP Mini needs pipe A force quirk (LP: #322104) */
0206e353 10275 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
b690e96c 10276
b690e96c
JB
10277 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
10278 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
10279
b690e96c
JB
10280 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
10281 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
10282
ccd0d36e 10283 /* 830/845 need to leave pipe A & dpll A up */
b690e96c 10284 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
dcdaed6e 10285 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
435793df
KP
10286
10287 /* Lenovo U160 cannot use SSC on LVDS */
10288 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
070d329a
MAS
10289
10290 /* Sony Vaio Y cannot use SSC on LVDS */
10291 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
5a15ab5b 10292
ee1452d7
JN
10293 /*
10294 * All GM45 Acer (and its brands eMachines and Packard Bell) laptops
10295 * seem to use inverted backlight PWM.
10296 */
10297 { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness },
e85843be
KM
10298
10299 /* Dell XPS13 HD Sandy Bridge */
10300 { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
10301 /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */
10302 { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable },
b690e96c
JB
10303};
10304
10305static void intel_init_quirks(struct drm_device *dev)
10306{
10307 struct pci_dev *d = dev->pdev;
10308 int i;
10309
10310 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
10311 struct intel_quirk *q = &intel_quirks[i];
10312
10313 if (d->device == q->device &&
10314 (d->subsystem_vendor == q->subsystem_vendor ||
10315 q->subsystem_vendor == PCI_ANY_ID) &&
10316 (d->subsystem_device == q->subsystem_device ||
10317 q->subsystem_device == PCI_ANY_ID))
10318 q->hook(dev);
10319 }
5f85f176
EE
10320 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
10321 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
10322 intel_dmi_quirks[i].hook(dev);
10323 }
b690e96c
JB
10324}
10325
9cce37f4
JB
10326/* Disable the VGA plane that we never use */
10327static void i915_disable_vga(struct drm_device *dev)
10328{
10329 struct drm_i915_private *dev_priv = dev->dev_private;
10330 u8 sr1;
766aa1c4 10331 u32 vga_reg = i915_vgacntrl_reg(dev);
9cce37f4
JB
10332
10333 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
3fdcf431 10334 outb(SR01, VGA_SR_INDEX);
9cce37f4
JB
10335 sr1 = inb(VGA_SR_DATA);
10336 outb(sr1 | 1<<5, VGA_SR_DATA);
10337 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
10338 udelay(300);
10339
10340 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
10341 POSTING_READ(vga_reg);
10342}
10343
6e1b4fda 10344static void i915_enable_vga_mem(struct drm_device *dev)
81b5c7bc
AW
10345{
10346 /* Enable VGA memory on Intel HD */
10347 if (HAS_PCH_SPLIT(dev)) {
10348 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
10349 outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE);
10350 vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
10351 VGA_RSRC_LEGACY_MEM |
10352 VGA_RSRC_NORMAL_IO |
10353 VGA_RSRC_NORMAL_MEM);
10354 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
10355 }
10356}
10357
6e1b4fda
VS
10358void i915_disable_vga_mem(struct drm_device *dev)
10359{
10360 /* Disable VGA memory on Intel HD */
10361 if (HAS_PCH_SPLIT(dev)) {
10362 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
10363 outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE);
10364 vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
10365 VGA_RSRC_NORMAL_IO |
10366 VGA_RSRC_NORMAL_MEM);
10367 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
10368 }
10369}
10370
f817586c
DV
10371void intel_modeset_init_hw(struct drm_device *dev)
10372{
f6071166
JB
10373 struct drm_i915_private *dev_priv = dev->dev_private;
10374
a8f78b58
ED
10375 intel_prepare_ddi(dev);
10376
f817586c
DV
10377 intel_init_clock_gating(dev);
10378
f6071166
JB
10379 /* Enable the CRI clock source so we can get at the display */
10380 if (IS_VALLEYVIEW(dev))
10381 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
10382 DPLL_INTEGRATED_CRI_CLK_VLV);
10383
40e9cf64
JB
10384 intel_init_dpio(dev);
10385
79f5b2c7 10386 mutex_lock(&dev->struct_mutex);
8090c6b9 10387 intel_enable_gt_powersave(dev);
79f5b2c7 10388 mutex_unlock(&dev->struct_mutex);
f817586c
DV
10389}
10390
7d708ee4
ID
10391void intel_modeset_suspend_hw(struct drm_device *dev)
10392{
10393 intel_suspend_hw(dev);
10394}
10395
79e53945
JB
10396void intel_modeset_init(struct drm_device *dev)
10397{
652c393a 10398 struct drm_i915_private *dev_priv = dev->dev_private;
7f1f3851 10399 int i, j, ret;
79e53945
JB
10400
10401 drm_mode_config_init(dev);
10402
10403 dev->mode_config.min_width = 0;
10404 dev->mode_config.min_height = 0;
10405
019d96cb
DA
10406 dev->mode_config.preferred_depth = 24;
10407 dev->mode_config.prefer_shadow = 1;
10408
e6ecefaa 10409 dev->mode_config.funcs = &intel_mode_funcs;
79e53945 10410
b690e96c
JB
10411 intel_init_quirks(dev);
10412
1fa61106
ED
10413 intel_init_pm(dev);
10414
e3c74757
BW
10415 if (INTEL_INFO(dev)->num_pipes == 0)
10416 return;
10417
e70236a8
JB
10418 intel_init_display(dev);
10419
a6c45cf0
CW
10420 if (IS_GEN2(dev)) {
10421 dev->mode_config.max_width = 2048;
10422 dev->mode_config.max_height = 2048;
10423 } else if (IS_GEN3(dev)) {
5e4d6fa7
KP
10424 dev->mode_config.max_width = 4096;
10425 dev->mode_config.max_height = 4096;
79e53945 10426 } else {
a6c45cf0
CW
10427 dev->mode_config.max_width = 8192;
10428 dev->mode_config.max_height = 8192;
79e53945 10429 }
5d4545ae 10430 dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
79e53945 10431
28c97730 10432 DRM_DEBUG_KMS("%d display pipe%s available.\n",
7eb552ae
BW
10433 INTEL_INFO(dev)->num_pipes,
10434 INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
79e53945 10435
08e2a7de 10436 for_each_pipe(i) {
79e53945 10437 intel_crtc_init(dev, i);
7f1f3851
JB
10438 for (j = 0; j < dev_priv->num_plane; j++) {
10439 ret = intel_plane_init(dev, i, j);
10440 if (ret)
06da8da2
VS
10441 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
10442 pipe_name(i), sprite_name(i, j), ret);
7f1f3851 10443 }
79e53945
JB
10444 }
10445
79f689aa 10446 intel_cpu_pll_init(dev);
e72f9fbf 10447 intel_shared_dpll_init(dev);
ee7b9f93 10448
9cce37f4
JB
10449 /* Just disable it once at startup */
10450 i915_disable_vga(dev);
79e53945 10451 intel_setup_outputs(dev);
11be49eb
CW
10452
10453 /* Just in case the BIOS is doing something questionable. */
10454 intel_disable_fbc(dev);
2c7111db
CW
10455}
10456
24929352
DV
10457static void
10458intel_connector_break_all_links(struct intel_connector *connector)
10459{
10460 connector->base.dpms = DRM_MODE_DPMS_OFF;
10461 connector->base.encoder = NULL;
10462 connector->encoder->connectors_active = false;
10463 connector->encoder->base.crtc = NULL;
10464}
10465
7fad798e
DV
10466static void intel_enable_pipe_a(struct drm_device *dev)
10467{
10468 struct intel_connector *connector;
10469 struct drm_connector *crt = NULL;
10470 struct intel_load_detect_pipe load_detect_temp;
10471
10472 /* We can't just switch on the pipe A, we need to set things up with a
10473 * proper mode and output configuration. As a gross hack, enable pipe A
10474 * by enabling the load detect pipe once. */
10475 list_for_each_entry(connector,
10476 &dev->mode_config.connector_list,
10477 base.head) {
10478 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
10479 crt = &connector->base;
10480 break;
10481 }
10482 }
10483
10484 if (!crt)
10485 return;
10486
10487 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp))
10488 intel_release_load_detect_pipe(crt, &load_detect_temp);
10489
652c393a 10490
7fad798e
DV
10491}
10492
fa555837
DV
10493static bool
10494intel_check_plane_mapping(struct intel_crtc *crtc)
10495{
7eb552ae
BW
10496 struct drm_device *dev = crtc->base.dev;
10497 struct drm_i915_private *dev_priv = dev->dev_private;
fa555837
DV
10498 u32 reg, val;
10499
7eb552ae 10500 if (INTEL_INFO(dev)->num_pipes == 1)
fa555837
DV
10501 return true;
10502
10503 reg = DSPCNTR(!crtc->plane);
10504 val = I915_READ(reg);
10505
10506 if ((val & DISPLAY_PLANE_ENABLE) &&
10507 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
10508 return false;
10509
10510 return true;
10511}
10512
24929352
DV
10513static void intel_sanitize_crtc(struct intel_crtc *crtc)
10514{
10515 struct drm_device *dev = crtc->base.dev;
10516 struct drm_i915_private *dev_priv = dev->dev_private;
fa555837 10517 u32 reg;
24929352 10518
24929352 10519 /* Clear any frame start delays used for debugging left by the BIOS */
3b117c8f 10520 reg = PIPECONF(crtc->config.cpu_transcoder);
24929352
DV
10521 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
10522
10523 /* We need to sanitize the plane -> pipe mapping first because this will
fa555837
DV
10524 * disable the crtc (and hence change the state) if it is wrong. Note
10525 * that gen4+ has a fixed plane -> pipe mapping. */
10526 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
24929352
DV
10527 struct intel_connector *connector;
10528 bool plane;
10529
24929352
DV
10530 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
10531 crtc->base.base.id);
10532
10533 /* Pipe has the wrong plane attached and the plane is active.
10534 * Temporarily change the plane mapping and disable everything
10535 * ... */
10536 plane = crtc->plane;
10537 crtc->plane = !plane;
10538 dev_priv->display.crtc_disable(&crtc->base);
10539 crtc->plane = plane;
10540
10541 /* ... and break all links. */
10542 list_for_each_entry(connector, &dev->mode_config.connector_list,
10543 base.head) {
10544 if (connector->encoder->base.crtc != &crtc->base)
10545 continue;
10546
10547 intel_connector_break_all_links(connector);
10548 }
10549
10550 WARN_ON(crtc->active);
10551 crtc->base.enabled = false;
10552 }
24929352 10553
7fad798e
DV
10554 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
10555 crtc->pipe == PIPE_A && !crtc->active) {
10556 /* BIOS forgot to enable pipe A, this mostly happens after
10557 * resume. Force-enable the pipe to fix this, the update_dpms
10558 * call below we restore the pipe to the right state, but leave
10559 * the required bits on. */
10560 intel_enable_pipe_a(dev);
10561 }
10562
24929352
DV
10563 /* Adjust the state of the output pipe according to whether we
10564 * have active connectors/encoders. */
10565 intel_crtc_update_dpms(&crtc->base);
10566
10567 if (crtc->active != crtc->base.enabled) {
10568 struct intel_encoder *encoder;
10569
10570 /* This can happen either due to bugs in the get_hw_state
10571 * functions or because the pipe is force-enabled due to the
10572 * pipe A quirk. */
10573 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
10574 crtc->base.base.id,
10575 crtc->base.enabled ? "enabled" : "disabled",
10576 crtc->active ? "enabled" : "disabled");
10577
10578 crtc->base.enabled = crtc->active;
10579
10580 /* Because we only establish the connector -> encoder ->
10581 * crtc links if something is active, this means the
10582 * crtc is now deactivated. Break the links. connector
10583 * -> encoder links are only establish when things are
10584 * actually up, hence no need to break them. */
10585 WARN_ON(crtc->active);
10586
10587 for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
10588 WARN_ON(encoder->connectors_active);
10589 encoder->base.crtc = NULL;
10590 }
10591 }
10592}
10593
10594static void intel_sanitize_encoder(struct intel_encoder *encoder)
10595{
10596 struct intel_connector *connector;
10597 struct drm_device *dev = encoder->base.dev;
10598
10599 /* We need to check both for a crtc link (meaning that the
10600 * encoder is active and trying to read from a pipe) and the
10601 * pipe itself being active. */
10602 bool has_active_crtc = encoder->base.crtc &&
10603 to_intel_crtc(encoder->base.crtc)->active;
10604
10605 if (encoder->connectors_active && !has_active_crtc) {
10606 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
10607 encoder->base.base.id,
10608 drm_get_encoder_name(&encoder->base));
10609
10610 /* Connector is active, but has no active pipe. This is
10611 * fallout from our resume register restoring. Disable
10612 * the encoder manually again. */
10613 if (encoder->base.crtc) {
10614 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
10615 encoder->base.base.id,
10616 drm_get_encoder_name(&encoder->base));
10617 encoder->disable(encoder);
10618 }
10619
10620 /* Inconsistent output/port/pipe state happens presumably due to
10621 * a bug in one of the get_hw_state functions. Or someplace else
10622 * in our code, like the register restore mess on resume. Clamp
10623 * things to off as a safer default. */
10624 list_for_each_entry(connector,
10625 &dev->mode_config.connector_list,
10626 base.head) {
10627 if (connector->encoder != encoder)
10628 continue;
10629
10630 intel_connector_break_all_links(connector);
10631 }
10632 }
10633 /* Enabled encoders without active connectors will be fixed in
10634 * the crtc fixup. */
10635}
10636
44cec740 10637void i915_redisable_vga(struct drm_device *dev)
0fde901f
KM
10638{
10639 struct drm_i915_private *dev_priv = dev->dev_private;
766aa1c4 10640 u32 vga_reg = i915_vgacntrl_reg(dev);
0fde901f 10641
8dc8a27c
PZ
10642 /* This function can be called both from intel_modeset_setup_hw_state or
10643 * at a very early point in our resume sequence, where the power well
10644 * structures are not yet restored. Since this function is at a very
10645 * paranoid "someone might have enabled VGA while we were not looking"
10646 * level, just check if the power well is enabled instead of trying to
10647 * follow the "don't touch the power well if we don't need it" policy
10648 * the rest of the driver uses. */
10649 if (HAS_POWER_WELL(dev) &&
6aedd1f5 10650 (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
8dc8a27c
PZ
10651 return;
10652
0fde901f
KM
10653 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
10654 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
209d5211 10655 i915_disable_vga(dev);
6e1b4fda 10656 i915_disable_vga_mem(dev);
0fde901f
KM
10657 }
10658}
10659
30e984df 10660static void intel_modeset_readout_hw_state(struct drm_device *dev)
24929352
DV
10661{
10662 struct drm_i915_private *dev_priv = dev->dev_private;
10663 enum pipe pipe;
24929352
DV
10664 struct intel_crtc *crtc;
10665 struct intel_encoder *encoder;
10666 struct intel_connector *connector;
5358901f 10667 int i;
24929352 10668
0e8ffe1b
DV
10669 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
10670 base.head) {
88adfff1 10671 memset(&crtc->config, 0, sizeof(crtc->config));
3b117c8f 10672
0e8ffe1b
DV
10673 crtc->active = dev_priv->display.get_pipe_config(crtc,
10674 &crtc->config);
24929352
DV
10675
10676 crtc->base.enabled = crtc->active;
10677
10678 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
10679 crtc->base.base.id,
10680 crtc->active ? "enabled" : "disabled");
10681 }
10682
5358901f 10683 /* FIXME: Smash this into the new shared dpll infrastructure. */
affa9354 10684 if (HAS_DDI(dev))
6441ab5f
PZ
10685 intel_ddi_setup_hw_pll_state(dev);
10686
5358901f
DV
10687 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10688 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
10689
10690 pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state);
10691 pll->active = 0;
10692 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
10693 base.head) {
10694 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
10695 pll->active++;
10696 }
10697 pll->refcount = pll->active;
10698
35c95375
DV
10699 DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n",
10700 pll->name, pll->refcount, pll->on);
5358901f
DV
10701 }
10702
24929352
DV
10703 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10704 base.head) {
10705 pipe = 0;
10706
10707 if (encoder->get_hw_state(encoder, &pipe)) {
045ac3b5
JB
10708 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
10709 encoder->base.crtc = &crtc->base;
510d5f2f 10710 if (encoder->get_config)
045ac3b5 10711 encoder->get_config(encoder, &crtc->config);
24929352
DV
10712 } else {
10713 encoder->base.crtc = NULL;
10714 }
10715
10716 encoder->connectors_active = false;
10717 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe=%i\n",
10718 encoder->base.base.id,
10719 drm_get_encoder_name(&encoder->base),
10720 encoder->base.crtc ? "enabled" : "disabled",
10721 pipe);
10722 }
10723
10724 list_for_each_entry(connector, &dev->mode_config.connector_list,
10725 base.head) {
10726 if (connector->get_hw_state(connector)) {
10727 connector->base.dpms = DRM_MODE_DPMS_ON;
10728 connector->encoder->connectors_active = true;
10729 connector->base.encoder = &connector->encoder->base;
10730 } else {
10731 connector->base.dpms = DRM_MODE_DPMS_OFF;
10732 connector->base.encoder = NULL;
10733 }
10734 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
10735 connector->base.base.id,
10736 drm_get_connector_name(&connector->base),
10737 connector->base.encoder ? "enabled" : "disabled");
10738 }
30e984df
DV
10739}
10740
10741/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
10742 * and i915 state tracking structures. */
10743void intel_modeset_setup_hw_state(struct drm_device *dev,
10744 bool force_restore)
10745{
10746 struct drm_i915_private *dev_priv = dev->dev_private;
10747 enum pipe pipe;
30e984df
DV
10748 struct intel_crtc *crtc;
10749 struct intel_encoder *encoder;
35c95375 10750 int i;
30e984df
DV
10751
10752 intel_modeset_readout_hw_state(dev);
24929352 10753
babea61d
JB
10754 /*
10755 * Now that we have the config, copy it to each CRTC struct
10756 * Note that this could go away if we move to using crtc_config
10757 * checking everywhere.
10758 */
10759 list_for_each_entry(crtc, &dev->mode_config.crtc_list,
10760 base.head) {
10761 if (crtc->active && i915_fastboot) {
10762 intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
10763
10764 DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
10765 crtc->base.base.id);
10766 drm_mode_debug_printmodeline(&crtc->base.mode);
10767 }
10768 }
10769
24929352
DV
10770 /* HW state is read out, now we need to sanitize this mess. */
10771 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
10772 base.head) {
10773 intel_sanitize_encoder(encoder);
10774 }
10775
10776 for_each_pipe(pipe) {
10777 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
10778 intel_sanitize_crtc(crtc);
c0b03411 10779 intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]");
24929352 10780 }
9a935856 10781
35c95375
DV
10782 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
10783 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
10784
10785 if (!pll->on || pll->active)
10786 continue;
10787
10788 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
10789
10790 pll->disable(dev_priv, pll);
10791 pll->on = false;
10792 }
10793
45e2b5f6 10794 if (force_restore) {
7d0bc1ea
VS
10795 i915_redisable_vga(dev);
10796
f30da187
DV
10797 /*
10798 * We need to use raw interfaces for restoring state to avoid
10799 * checking (bogus) intermediate states.
10800 */
45e2b5f6 10801 for_each_pipe(pipe) {
b5644d05
JB
10802 struct drm_crtc *crtc =
10803 dev_priv->pipe_to_crtc_mapping[pipe];
f30da187
DV
10804
10805 __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
10806 crtc->fb);
45e2b5f6
DV
10807 }
10808 } else {
10809 intel_modeset_update_staged_output_state(dev);
10810 }
8af6cf88
DV
10811
10812 intel_modeset_check_state(dev);
2e938892
DV
10813
10814 drm_mode_config_reset(dev);
2c7111db
CW
10815}
10816
10817void intel_modeset_gem_init(struct drm_device *dev)
10818{
1833b134 10819 intel_modeset_init_hw(dev);
02e792fb
DV
10820
10821 intel_setup_overlay(dev);
24929352 10822
45e2b5f6 10823 intel_modeset_setup_hw_state(dev, false);
79e53945
JB
10824}
10825
10826void intel_modeset_cleanup(struct drm_device *dev)
10827{
652c393a
JB
10828 struct drm_i915_private *dev_priv = dev->dev_private;
10829 struct drm_crtc *crtc;
d9255d57 10830 struct drm_connector *connector;
652c393a 10831
fd0c0642
DV
10832 /*
10833 * Interrupts and polling as the first thing to avoid creating havoc.
10834 * Too much stuff here (turning of rps, connectors, ...) would
10835 * experience fancy races otherwise.
10836 */
10837 drm_irq_uninstall(dev);
10838 cancel_work_sync(&dev_priv->hotplug_work);
10839 /*
10840 * Due to the hpd irq storm handling the hotplug work can re-arm the
10841 * poll handlers. Hence disable polling after hpd handling is shut down.
10842 */
f87ea761 10843 drm_kms_helper_poll_fini(dev);
fd0c0642 10844
652c393a
JB
10845 mutex_lock(&dev->struct_mutex);
10846
723bfd70
JB
10847 intel_unregister_dsm_handler();
10848
652c393a
JB
10849 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
10850 /* Skip inactive CRTCs */
10851 if (!crtc->fb)
10852 continue;
10853
3dec0095 10854 intel_increase_pllclock(crtc);
652c393a
JB
10855 }
10856
973d04f9 10857 intel_disable_fbc(dev);
e70236a8 10858
6e1b4fda 10859 i915_enable_vga_mem(dev);
81b5c7bc 10860
8090c6b9 10861 intel_disable_gt_powersave(dev);
0cdab21f 10862
930ebb46
DV
10863 ironlake_teardown_rc6(dev);
10864
69341a5e
KH
10865 mutex_unlock(&dev->struct_mutex);
10866
1630fe75
CW
10867 /* flush any delayed tasks or pending work */
10868 flush_scheduled_work();
10869
dc652f90
JN
10870 /* destroy backlight, if any, before the connectors */
10871 intel_panel_destroy_backlight(dev);
10872
d9255d57
PZ
10873 /* destroy the sysfs files before encoders/connectors */
10874 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
10875 drm_sysfs_connector_remove(connector);
10876
79e53945 10877 drm_mode_config_cleanup(dev);
4d7bb011
DV
10878
10879 intel_cleanup_overlay(dev);
79e53945
JB
10880}
10881
f1c79df3
ZW
10882/*
10883 * Return which encoder is currently attached for connector.
10884 */
df0e9248 10885struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
79e53945 10886{
df0e9248
CW
10887 return &intel_attached_encoder(connector)->base;
10888}
f1c79df3 10889
df0e9248
CW
10890void intel_connector_attach_encoder(struct intel_connector *connector,
10891 struct intel_encoder *encoder)
10892{
10893 connector->encoder = encoder;
10894 drm_mode_connector_attach_encoder(&connector->base,
10895 &encoder->base);
79e53945 10896}
28d52043
DA
10897
10898/*
10899 * set vga decode state - true == enable VGA decode
10900 */
10901int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
10902{
10903 struct drm_i915_private *dev_priv = dev->dev_private;
10904 u16 gmch_ctrl;
10905
10906 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
10907 if (state)
10908 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
10909 else
10910 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
10911 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
10912 return 0;
10913}
c4a1d9e4 10914
c4a1d9e4 10915struct intel_display_error_state {
ff57f1b0
PZ
10916
10917 u32 power_well_driver;
10918
63b66e5b
CW
10919 int num_transcoders;
10920
c4a1d9e4
CW
10921 struct intel_cursor_error_state {
10922 u32 control;
10923 u32 position;
10924 u32 base;
10925 u32 size;
52331309 10926 } cursor[I915_MAX_PIPES];
c4a1d9e4
CW
10927
10928 struct intel_pipe_error_state {
c4a1d9e4 10929 u32 source;
52331309 10930 } pipe[I915_MAX_PIPES];
c4a1d9e4
CW
10931
10932 struct intel_plane_error_state {
10933 u32 control;
10934 u32 stride;
10935 u32 size;
10936 u32 pos;
10937 u32 addr;
10938 u32 surface;
10939 u32 tile_offset;
52331309 10940 } plane[I915_MAX_PIPES];
63b66e5b
CW
10941
10942 struct intel_transcoder_error_state {
10943 enum transcoder cpu_transcoder;
10944
10945 u32 conf;
10946
10947 u32 htotal;
10948 u32 hblank;
10949 u32 hsync;
10950 u32 vtotal;
10951 u32 vblank;
10952 u32 vsync;
10953 } transcoder[4];
c4a1d9e4
CW
10954};
10955
10956struct intel_display_error_state *
10957intel_display_capture_error_state(struct drm_device *dev)
10958{
0206e353 10959 drm_i915_private_t *dev_priv = dev->dev_private;
c4a1d9e4 10960 struct intel_display_error_state *error;
63b66e5b
CW
10961 int transcoders[] = {
10962 TRANSCODER_A,
10963 TRANSCODER_B,
10964 TRANSCODER_C,
10965 TRANSCODER_EDP,
10966 };
c4a1d9e4
CW
10967 int i;
10968
63b66e5b
CW
10969 if (INTEL_INFO(dev)->num_pipes == 0)
10970 return NULL;
10971
c4a1d9e4
CW
10972 error = kmalloc(sizeof(*error), GFP_ATOMIC);
10973 if (error == NULL)
10974 return NULL;
10975
ff57f1b0
PZ
10976 if (HAS_POWER_WELL(dev))
10977 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
10978
52331309 10979 for_each_pipe(i) {
a18c4c3d
PZ
10980 if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
10981 error->cursor[i].control = I915_READ(CURCNTR(i));
10982 error->cursor[i].position = I915_READ(CURPOS(i));
10983 error->cursor[i].base = I915_READ(CURBASE(i));
10984 } else {
10985 error->cursor[i].control = I915_READ(CURCNTR_IVB(i));
10986 error->cursor[i].position = I915_READ(CURPOS_IVB(i));
10987 error->cursor[i].base = I915_READ(CURBASE_IVB(i));
10988 }
c4a1d9e4
CW
10989
10990 error->plane[i].control = I915_READ(DSPCNTR(i));
10991 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
80ca378b 10992 if (INTEL_INFO(dev)->gen <= 3) {
51889b35 10993 error->plane[i].size = I915_READ(DSPSIZE(i));
80ca378b
PZ
10994 error->plane[i].pos = I915_READ(DSPPOS(i));
10995 }
ca291363
PZ
10996 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
10997 error->plane[i].addr = I915_READ(DSPADDR(i));
c4a1d9e4
CW
10998 if (INTEL_INFO(dev)->gen >= 4) {
10999 error->plane[i].surface = I915_READ(DSPSURF(i));
11000 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
11001 }
11002
c4a1d9e4 11003 error->pipe[i].source = I915_READ(PIPESRC(i));
63b66e5b
CW
11004 }
11005
11006 error->num_transcoders = INTEL_INFO(dev)->num_pipes;
11007 if (HAS_DDI(dev_priv->dev))
11008 error->num_transcoders++; /* Account for eDP. */
11009
11010 for (i = 0; i < error->num_transcoders; i++) {
11011 enum transcoder cpu_transcoder = transcoders[i];
11012
11013 error->transcoder[i].cpu_transcoder = cpu_transcoder;
11014
11015 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
11016 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
11017 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
11018 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
11019 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
11020 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
11021 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
c4a1d9e4
CW
11022 }
11023
12d217c7
PZ
11024 /* In the code above we read the registers without checking if the power
11025 * well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
11026 * prevent the next I915_WRITE from detecting it and printing an error
11027 * message. */
907b28c5 11028 intel_uncore_clear_errors(dev);
12d217c7 11029
c4a1d9e4
CW
11030 return error;
11031}
11032
edc3d884
MK
11033#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
11034
c4a1d9e4 11035void
edc3d884 11036intel_display_print_error_state(struct drm_i915_error_state_buf *m,
c4a1d9e4
CW
11037 struct drm_device *dev,
11038 struct intel_display_error_state *error)
11039{
11040 int i;
11041
63b66e5b
CW
11042 if (!error)
11043 return;
11044
edc3d884 11045 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
ff57f1b0 11046 if (HAS_POWER_WELL(dev))
edc3d884 11047 err_printf(m, "PWR_WELL_CTL2: %08x\n",
ff57f1b0 11048 error->power_well_driver);
52331309 11049 for_each_pipe(i) {
edc3d884 11050 err_printf(m, "Pipe [%d]:\n", i);
edc3d884 11051 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
edc3d884
MK
11052
11053 err_printf(m, "Plane [%d]:\n", i);
11054 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
11055 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
80ca378b 11056 if (INTEL_INFO(dev)->gen <= 3) {
edc3d884
MK
11057 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
11058 err_printf(m, " POS: %08x\n", error->plane[i].pos);
80ca378b 11059 }
4b71a570 11060 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
edc3d884 11061 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
c4a1d9e4 11062 if (INTEL_INFO(dev)->gen >= 4) {
edc3d884
MK
11063 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
11064 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
c4a1d9e4
CW
11065 }
11066
edc3d884
MK
11067 err_printf(m, "Cursor [%d]:\n", i);
11068 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
11069 err_printf(m, " POS: %08x\n", error->cursor[i].position);
11070 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
c4a1d9e4 11071 }
63b66e5b
CW
11072
11073 for (i = 0; i < error->num_transcoders; i++) {
11074 err_printf(m, " CPU transcoder: %c\n",
11075 transcoder_name(error->transcoder[i].cpu_transcoder));
11076 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
11077 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
11078 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
11079 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
11080 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
11081 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
11082 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
11083 }
c4a1d9e4 11084}
This page took 1.74523 seconds and 5 git commands to generate.