Merge branches 'pm-avs', 'pm-clk', 'pm-devfreq' and 'pm-sleep'
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include <drm/drmP.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39 #include "i915_trace.h"
40 #include <drm/drm_atomic.h>
41 #include <drm/drm_atomic_helper.h>
42 #include <drm/drm_dp_helper.h>
43 #include <drm/drm_crtc_helper.h>
44 #include <drm/drm_plane_helper.h>
45 #include <drm/drm_rect.h>
46 #include <linux/dma_remapping.h>
47 #include <linux/reservation.h>
48 #include <linux/dma-buf.h>
49
50 /* Primary plane formats for gen <= 3 */
51 static const uint32_t i8xx_primary_formats[] = {
52 DRM_FORMAT_C8,
53 DRM_FORMAT_RGB565,
54 DRM_FORMAT_XRGB1555,
55 DRM_FORMAT_XRGB8888,
56 };
57
58 /* Primary plane formats for gen >= 4 */
59 static const uint32_t i965_primary_formats[] = {
60 DRM_FORMAT_C8,
61 DRM_FORMAT_RGB565,
62 DRM_FORMAT_XRGB8888,
63 DRM_FORMAT_XBGR8888,
64 DRM_FORMAT_XRGB2101010,
65 DRM_FORMAT_XBGR2101010,
66 };
67
68 static const uint32_t skl_primary_formats[] = {
69 DRM_FORMAT_C8,
70 DRM_FORMAT_RGB565,
71 DRM_FORMAT_XRGB8888,
72 DRM_FORMAT_XBGR8888,
73 DRM_FORMAT_ARGB8888,
74 DRM_FORMAT_ABGR8888,
75 DRM_FORMAT_XRGB2101010,
76 DRM_FORMAT_XBGR2101010,
77 DRM_FORMAT_YUYV,
78 DRM_FORMAT_YVYU,
79 DRM_FORMAT_UYVY,
80 DRM_FORMAT_VYUY,
81 };
82
83 /* Cursor formats */
84 static const uint32_t intel_cursor_formats[] = {
85 DRM_FORMAT_ARGB8888,
86 };
87
88 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
89
90 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
91 struct intel_crtc_state *pipe_config);
92 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
93 struct intel_crtc_state *pipe_config);
94
95 static int intel_framebuffer_init(struct drm_device *dev,
96 struct intel_framebuffer *ifb,
97 struct drm_mode_fb_cmd2 *mode_cmd,
98 struct drm_i915_gem_object *obj);
99 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
100 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
101 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
102 struct intel_link_m_n *m_n,
103 struct intel_link_m_n *m2_n2);
104 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
105 static void haswell_set_pipeconf(struct drm_crtc *crtc);
106 static void intel_set_pipe_csc(struct drm_crtc *crtc);
107 static void vlv_prepare_pll(struct intel_crtc *crtc,
108 const struct intel_crtc_state *pipe_config);
109 static void chv_prepare_pll(struct intel_crtc *crtc,
110 const struct intel_crtc_state *pipe_config);
111 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
112 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
113 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
114 struct intel_crtc_state *crtc_state);
115 static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
116 int num_connectors);
117 static void skylake_pfit_enable(struct intel_crtc *crtc);
118 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
119 static void ironlake_pfit_enable(struct intel_crtc *crtc);
120 static void intel_modeset_setup_hw_state(struct drm_device *dev);
121 static void intel_pre_disable_primary(struct drm_crtc *crtc);
122
123 typedef struct {
124 int min, max;
125 } intel_range_t;
126
127 typedef struct {
128 int dot_limit;
129 int p2_slow, p2_fast;
130 } intel_p2_t;
131
132 typedef struct intel_limit intel_limit_t;
133 struct intel_limit {
134 intel_range_t dot, vco, n, m, m1, m2, p, p1;
135 intel_p2_t p2;
136 };
137
138 /* returns HPLL frequency in kHz */
139 static int valleyview_get_vco(struct drm_i915_private *dev_priv)
140 {
141 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
142
143 /* Obtain SKU information */
144 mutex_lock(&dev_priv->sb_lock);
145 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
146 CCK_FUSE_HPLL_FREQ_MASK;
147 mutex_unlock(&dev_priv->sb_lock);
148
149 return vco_freq[hpll_freq] * 1000;
150 }
151
152 static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
153 const char *name, u32 reg)
154 {
155 u32 val;
156 int divider;
157
158 if (dev_priv->hpll_freq == 0)
159 dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
160
161 mutex_lock(&dev_priv->sb_lock);
162 val = vlv_cck_read(dev_priv, reg);
163 mutex_unlock(&dev_priv->sb_lock);
164
165 divider = val & CCK_FREQUENCY_VALUES;
166
167 WARN((val & CCK_FREQUENCY_STATUS) !=
168 (divider << CCK_FREQUENCY_STATUS_SHIFT),
169 "%s change in progress\n", name);
170
171 return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
172 }
173
174 int
175 intel_pch_rawclk(struct drm_device *dev)
176 {
177 struct drm_i915_private *dev_priv = dev->dev_private;
178
179 WARN_ON(!HAS_PCH_SPLIT(dev));
180
181 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
182 }
183
184 /* hrawclock is 1/4 the FSB frequency */
185 int intel_hrawclk(struct drm_device *dev)
186 {
187 struct drm_i915_private *dev_priv = dev->dev_private;
188 uint32_t clkcfg;
189
190 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
191 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
192 return 200;
193
194 clkcfg = I915_READ(CLKCFG);
195 switch (clkcfg & CLKCFG_FSB_MASK) {
196 case CLKCFG_FSB_400:
197 return 100;
198 case CLKCFG_FSB_533:
199 return 133;
200 case CLKCFG_FSB_667:
201 return 166;
202 case CLKCFG_FSB_800:
203 return 200;
204 case CLKCFG_FSB_1067:
205 return 266;
206 case CLKCFG_FSB_1333:
207 return 333;
208 /* these two are just a guess; one of them might be right */
209 case CLKCFG_FSB_1600:
210 case CLKCFG_FSB_1600_ALT:
211 return 400;
212 default:
213 return 133;
214 }
215 }
216
217 static void intel_update_czclk(struct drm_i915_private *dev_priv)
218 {
219 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
220 return;
221
222 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
223 CCK_CZ_CLOCK_CONTROL);
224
225 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
226 }
227
228 static inline u32 /* units of 100MHz */
229 intel_fdi_link_freq(struct drm_device *dev)
230 {
231 if (IS_GEN5(dev)) {
232 struct drm_i915_private *dev_priv = dev->dev_private;
233 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
234 } else
235 return 27;
236 }
237
238 static const intel_limit_t intel_limits_i8xx_dac = {
239 .dot = { .min = 25000, .max = 350000 },
240 .vco = { .min = 908000, .max = 1512000 },
241 .n = { .min = 2, .max = 16 },
242 .m = { .min = 96, .max = 140 },
243 .m1 = { .min = 18, .max = 26 },
244 .m2 = { .min = 6, .max = 16 },
245 .p = { .min = 4, .max = 128 },
246 .p1 = { .min = 2, .max = 33 },
247 .p2 = { .dot_limit = 165000,
248 .p2_slow = 4, .p2_fast = 2 },
249 };
250
251 static const intel_limit_t intel_limits_i8xx_dvo = {
252 .dot = { .min = 25000, .max = 350000 },
253 .vco = { .min = 908000, .max = 1512000 },
254 .n = { .min = 2, .max = 16 },
255 .m = { .min = 96, .max = 140 },
256 .m1 = { .min = 18, .max = 26 },
257 .m2 = { .min = 6, .max = 16 },
258 .p = { .min = 4, .max = 128 },
259 .p1 = { .min = 2, .max = 33 },
260 .p2 = { .dot_limit = 165000,
261 .p2_slow = 4, .p2_fast = 4 },
262 };
263
264 static const intel_limit_t intel_limits_i8xx_lvds = {
265 .dot = { .min = 25000, .max = 350000 },
266 .vco = { .min = 908000, .max = 1512000 },
267 .n = { .min = 2, .max = 16 },
268 .m = { .min = 96, .max = 140 },
269 .m1 = { .min = 18, .max = 26 },
270 .m2 = { .min = 6, .max = 16 },
271 .p = { .min = 4, .max = 128 },
272 .p1 = { .min = 1, .max = 6 },
273 .p2 = { .dot_limit = 165000,
274 .p2_slow = 14, .p2_fast = 7 },
275 };
276
277 static const intel_limit_t intel_limits_i9xx_sdvo = {
278 .dot = { .min = 20000, .max = 400000 },
279 .vco = { .min = 1400000, .max = 2800000 },
280 .n = { .min = 1, .max = 6 },
281 .m = { .min = 70, .max = 120 },
282 .m1 = { .min = 8, .max = 18 },
283 .m2 = { .min = 3, .max = 7 },
284 .p = { .min = 5, .max = 80 },
285 .p1 = { .min = 1, .max = 8 },
286 .p2 = { .dot_limit = 200000,
287 .p2_slow = 10, .p2_fast = 5 },
288 };
289
290 static const intel_limit_t intel_limits_i9xx_lvds = {
291 .dot = { .min = 20000, .max = 400000 },
292 .vco = { .min = 1400000, .max = 2800000 },
293 .n = { .min = 1, .max = 6 },
294 .m = { .min = 70, .max = 120 },
295 .m1 = { .min = 8, .max = 18 },
296 .m2 = { .min = 3, .max = 7 },
297 .p = { .min = 7, .max = 98 },
298 .p1 = { .min = 1, .max = 8 },
299 .p2 = { .dot_limit = 112000,
300 .p2_slow = 14, .p2_fast = 7 },
301 };
302
303
304 static const intel_limit_t intel_limits_g4x_sdvo = {
305 .dot = { .min = 25000, .max = 270000 },
306 .vco = { .min = 1750000, .max = 3500000},
307 .n = { .min = 1, .max = 4 },
308 .m = { .min = 104, .max = 138 },
309 .m1 = { .min = 17, .max = 23 },
310 .m2 = { .min = 5, .max = 11 },
311 .p = { .min = 10, .max = 30 },
312 .p1 = { .min = 1, .max = 3},
313 .p2 = { .dot_limit = 270000,
314 .p2_slow = 10,
315 .p2_fast = 10
316 },
317 };
318
319 static const intel_limit_t intel_limits_g4x_hdmi = {
320 .dot = { .min = 22000, .max = 400000 },
321 .vco = { .min = 1750000, .max = 3500000},
322 .n = { .min = 1, .max = 4 },
323 .m = { .min = 104, .max = 138 },
324 .m1 = { .min = 16, .max = 23 },
325 .m2 = { .min = 5, .max = 11 },
326 .p = { .min = 5, .max = 80 },
327 .p1 = { .min = 1, .max = 8},
328 .p2 = { .dot_limit = 165000,
329 .p2_slow = 10, .p2_fast = 5 },
330 };
331
332 static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
333 .dot = { .min = 20000, .max = 115000 },
334 .vco = { .min = 1750000, .max = 3500000 },
335 .n = { .min = 1, .max = 3 },
336 .m = { .min = 104, .max = 138 },
337 .m1 = { .min = 17, .max = 23 },
338 .m2 = { .min = 5, .max = 11 },
339 .p = { .min = 28, .max = 112 },
340 .p1 = { .min = 2, .max = 8 },
341 .p2 = { .dot_limit = 0,
342 .p2_slow = 14, .p2_fast = 14
343 },
344 };
345
346 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
347 .dot = { .min = 80000, .max = 224000 },
348 .vco = { .min = 1750000, .max = 3500000 },
349 .n = { .min = 1, .max = 3 },
350 .m = { .min = 104, .max = 138 },
351 .m1 = { .min = 17, .max = 23 },
352 .m2 = { .min = 5, .max = 11 },
353 .p = { .min = 14, .max = 42 },
354 .p1 = { .min = 2, .max = 6 },
355 .p2 = { .dot_limit = 0,
356 .p2_slow = 7, .p2_fast = 7
357 },
358 };
359
360 static const intel_limit_t intel_limits_pineview_sdvo = {
361 .dot = { .min = 20000, .max = 400000},
362 .vco = { .min = 1700000, .max = 3500000 },
363 /* Pineview's Ncounter is a ring counter */
364 .n = { .min = 3, .max = 6 },
365 .m = { .min = 2, .max = 256 },
366 /* Pineview only has one combined m divider, which we treat as m2. */
367 .m1 = { .min = 0, .max = 0 },
368 .m2 = { .min = 0, .max = 254 },
369 .p = { .min = 5, .max = 80 },
370 .p1 = { .min = 1, .max = 8 },
371 .p2 = { .dot_limit = 200000,
372 .p2_slow = 10, .p2_fast = 5 },
373 };
374
375 static const intel_limit_t intel_limits_pineview_lvds = {
376 .dot = { .min = 20000, .max = 400000 },
377 .vco = { .min = 1700000, .max = 3500000 },
378 .n = { .min = 3, .max = 6 },
379 .m = { .min = 2, .max = 256 },
380 .m1 = { .min = 0, .max = 0 },
381 .m2 = { .min = 0, .max = 254 },
382 .p = { .min = 7, .max = 112 },
383 .p1 = { .min = 1, .max = 8 },
384 .p2 = { .dot_limit = 112000,
385 .p2_slow = 14, .p2_fast = 14 },
386 };
387
388 /* Ironlake / Sandybridge
389 *
390 * We calculate clock using (register_value + 2) for N/M1/M2, so here
391 * the range value for them is (actual_value - 2).
392 */
393 static const intel_limit_t intel_limits_ironlake_dac = {
394 .dot = { .min = 25000, .max = 350000 },
395 .vco = { .min = 1760000, .max = 3510000 },
396 .n = { .min = 1, .max = 5 },
397 .m = { .min = 79, .max = 127 },
398 .m1 = { .min = 12, .max = 22 },
399 .m2 = { .min = 5, .max = 9 },
400 .p = { .min = 5, .max = 80 },
401 .p1 = { .min = 1, .max = 8 },
402 .p2 = { .dot_limit = 225000,
403 .p2_slow = 10, .p2_fast = 5 },
404 };
405
406 static const intel_limit_t intel_limits_ironlake_single_lvds = {
407 .dot = { .min = 25000, .max = 350000 },
408 .vco = { .min = 1760000, .max = 3510000 },
409 .n = { .min = 1, .max = 3 },
410 .m = { .min = 79, .max = 118 },
411 .m1 = { .min = 12, .max = 22 },
412 .m2 = { .min = 5, .max = 9 },
413 .p = { .min = 28, .max = 112 },
414 .p1 = { .min = 2, .max = 8 },
415 .p2 = { .dot_limit = 225000,
416 .p2_slow = 14, .p2_fast = 14 },
417 };
418
419 static const intel_limit_t intel_limits_ironlake_dual_lvds = {
420 .dot = { .min = 25000, .max = 350000 },
421 .vco = { .min = 1760000, .max = 3510000 },
422 .n = { .min = 1, .max = 3 },
423 .m = { .min = 79, .max = 127 },
424 .m1 = { .min = 12, .max = 22 },
425 .m2 = { .min = 5, .max = 9 },
426 .p = { .min = 14, .max = 56 },
427 .p1 = { .min = 2, .max = 8 },
428 .p2 = { .dot_limit = 225000,
429 .p2_slow = 7, .p2_fast = 7 },
430 };
431
432 /* LVDS 100mhz refclk limits. */
433 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
434 .dot = { .min = 25000, .max = 350000 },
435 .vco = { .min = 1760000, .max = 3510000 },
436 .n = { .min = 1, .max = 2 },
437 .m = { .min = 79, .max = 126 },
438 .m1 = { .min = 12, .max = 22 },
439 .m2 = { .min = 5, .max = 9 },
440 .p = { .min = 28, .max = 112 },
441 .p1 = { .min = 2, .max = 8 },
442 .p2 = { .dot_limit = 225000,
443 .p2_slow = 14, .p2_fast = 14 },
444 };
445
446 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
447 .dot = { .min = 25000, .max = 350000 },
448 .vco = { .min = 1760000, .max = 3510000 },
449 .n = { .min = 1, .max = 3 },
450 .m = { .min = 79, .max = 126 },
451 .m1 = { .min = 12, .max = 22 },
452 .m2 = { .min = 5, .max = 9 },
453 .p = { .min = 14, .max = 42 },
454 .p1 = { .min = 2, .max = 6 },
455 .p2 = { .dot_limit = 225000,
456 .p2_slow = 7, .p2_fast = 7 },
457 };
458
459 static const intel_limit_t intel_limits_vlv = {
460 /*
461 * These are the data rate limits (measured in fast clocks)
462 * since those are the strictest limits we have. The fast
463 * clock and actual rate limits are more relaxed, so checking
464 * them would make no difference.
465 */
466 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
467 .vco = { .min = 4000000, .max = 6000000 },
468 .n = { .min = 1, .max = 7 },
469 .m1 = { .min = 2, .max = 3 },
470 .m2 = { .min = 11, .max = 156 },
471 .p1 = { .min = 2, .max = 3 },
472 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
473 };
474
475 static const intel_limit_t intel_limits_chv = {
476 /*
477 * These are the data rate limits (measured in fast clocks)
478 * since those are the strictest limits we have. The fast
479 * clock and actual rate limits are more relaxed, so checking
480 * them would make no difference.
481 */
482 .dot = { .min = 25000 * 5, .max = 540000 * 5},
483 .vco = { .min = 4800000, .max = 6480000 },
484 .n = { .min = 1, .max = 1 },
485 .m1 = { .min = 2, .max = 2 },
486 .m2 = { .min = 24 << 22, .max = 175 << 22 },
487 .p1 = { .min = 2, .max = 4 },
488 .p2 = { .p2_slow = 1, .p2_fast = 14 },
489 };
490
491 static const intel_limit_t intel_limits_bxt = {
492 /* FIXME: find real dot limits */
493 .dot = { .min = 0, .max = INT_MAX },
494 .vco = { .min = 4800000, .max = 6700000 },
495 .n = { .min = 1, .max = 1 },
496 .m1 = { .min = 2, .max = 2 },
497 /* FIXME: find real m2 limits */
498 .m2 = { .min = 2 << 22, .max = 255 << 22 },
499 .p1 = { .min = 2, .max = 4 },
500 .p2 = { .p2_slow = 1, .p2_fast = 20 },
501 };
502
503 static bool
504 needs_modeset(struct drm_crtc_state *state)
505 {
506 return drm_atomic_crtc_needs_modeset(state);
507 }
508
509 /**
510 * Returns whether any output on the specified pipe is of the specified type
511 */
512 bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
513 {
514 struct drm_device *dev = crtc->base.dev;
515 struct intel_encoder *encoder;
516
517 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
518 if (encoder->type == type)
519 return true;
520
521 return false;
522 }
523
524 /**
525 * Returns whether any output on the specified pipe will have the specified
526 * type after a staged modeset is complete, i.e., the same as
527 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
528 * encoder->crtc.
529 */
530 static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
531 int type)
532 {
533 struct drm_atomic_state *state = crtc_state->base.state;
534 struct drm_connector *connector;
535 struct drm_connector_state *connector_state;
536 struct intel_encoder *encoder;
537 int i, num_connectors = 0;
538
539 for_each_connector_in_state(state, connector, connector_state, i) {
540 if (connector_state->crtc != crtc_state->base.crtc)
541 continue;
542
543 num_connectors++;
544
545 encoder = to_intel_encoder(connector_state->best_encoder);
546 if (encoder->type == type)
547 return true;
548 }
549
550 WARN_ON(num_connectors == 0);
551
552 return false;
553 }
554
555 static const intel_limit_t *
556 intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
557 {
558 struct drm_device *dev = crtc_state->base.crtc->dev;
559 const intel_limit_t *limit;
560
561 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
562 if (intel_is_dual_link_lvds(dev)) {
563 if (refclk == 100000)
564 limit = &intel_limits_ironlake_dual_lvds_100m;
565 else
566 limit = &intel_limits_ironlake_dual_lvds;
567 } else {
568 if (refclk == 100000)
569 limit = &intel_limits_ironlake_single_lvds_100m;
570 else
571 limit = &intel_limits_ironlake_single_lvds;
572 }
573 } else
574 limit = &intel_limits_ironlake_dac;
575
576 return limit;
577 }
578
579 static const intel_limit_t *
580 intel_g4x_limit(struct intel_crtc_state *crtc_state)
581 {
582 struct drm_device *dev = crtc_state->base.crtc->dev;
583 const intel_limit_t *limit;
584
585 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
586 if (intel_is_dual_link_lvds(dev))
587 limit = &intel_limits_g4x_dual_channel_lvds;
588 else
589 limit = &intel_limits_g4x_single_channel_lvds;
590 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
591 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
592 limit = &intel_limits_g4x_hdmi;
593 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
594 limit = &intel_limits_g4x_sdvo;
595 } else /* The option is for other outputs */
596 limit = &intel_limits_i9xx_sdvo;
597
598 return limit;
599 }
600
601 static const intel_limit_t *
602 intel_limit(struct intel_crtc_state *crtc_state, int refclk)
603 {
604 struct drm_device *dev = crtc_state->base.crtc->dev;
605 const intel_limit_t *limit;
606
607 if (IS_BROXTON(dev))
608 limit = &intel_limits_bxt;
609 else if (HAS_PCH_SPLIT(dev))
610 limit = intel_ironlake_limit(crtc_state, refclk);
611 else if (IS_G4X(dev)) {
612 limit = intel_g4x_limit(crtc_state);
613 } else if (IS_PINEVIEW(dev)) {
614 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
615 limit = &intel_limits_pineview_lvds;
616 else
617 limit = &intel_limits_pineview_sdvo;
618 } else if (IS_CHERRYVIEW(dev)) {
619 limit = &intel_limits_chv;
620 } else if (IS_VALLEYVIEW(dev)) {
621 limit = &intel_limits_vlv;
622 } else if (!IS_GEN2(dev)) {
623 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
624 limit = &intel_limits_i9xx_lvds;
625 else
626 limit = &intel_limits_i9xx_sdvo;
627 } else {
628 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
629 limit = &intel_limits_i8xx_lvds;
630 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
631 limit = &intel_limits_i8xx_dvo;
632 else
633 limit = &intel_limits_i8xx_dac;
634 }
635 return limit;
636 }
637
638 /*
639 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
640 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
641 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
642 * The helpers' return value is the rate of the clock that is fed to the
643 * display engine's pipe which can be the above fast dot clock rate or a
644 * divided-down version of it.
645 */
646 /* m1 is reserved as 0 in Pineview, n is a ring counter */
647 static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
648 {
649 clock->m = clock->m2 + 2;
650 clock->p = clock->p1 * clock->p2;
651 if (WARN_ON(clock->n == 0 || clock->p == 0))
652 return 0;
653 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
654 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
655
656 return clock->dot;
657 }
658
659 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
660 {
661 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
662 }
663
664 static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
665 {
666 clock->m = i9xx_dpll_compute_m(clock);
667 clock->p = clock->p1 * clock->p2;
668 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
669 return 0;
670 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
671 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
672
673 return clock->dot;
674 }
675
676 static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
677 {
678 clock->m = clock->m1 * clock->m2;
679 clock->p = clock->p1 * clock->p2;
680 if (WARN_ON(clock->n == 0 || clock->p == 0))
681 return 0;
682 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
683 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
684
685 return clock->dot / 5;
686 }
687
688 int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
689 {
690 clock->m = clock->m1 * clock->m2;
691 clock->p = clock->p1 * clock->p2;
692 if (WARN_ON(clock->n == 0 || clock->p == 0))
693 return 0;
694 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
695 clock->n << 22);
696 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
697
698 return clock->dot / 5;
699 }
700
701 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
702 /**
703 * Returns whether the given set of divisors are valid for a given refclk with
704 * the given connectors.
705 */
706
707 static bool intel_PLL_is_valid(struct drm_device *dev,
708 const intel_limit_t *limit,
709 const intel_clock_t *clock)
710 {
711 if (clock->n < limit->n.min || limit->n.max < clock->n)
712 INTELPllInvalid("n out of range\n");
713 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
714 INTELPllInvalid("p1 out of range\n");
715 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
716 INTELPllInvalid("m2 out of range\n");
717 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
718 INTELPllInvalid("m1 out of range\n");
719
720 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
721 !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
722 if (clock->m1 <= clock->m2)
723 INTELPllInvalid("m1 <= m2\n");
724
725 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
726 if (clock->p < limit->p.min || limit->p.max < clock->p)
727 INTELPllInvalid("p out of range\n");
728 if (clock->m < limit->m.min || limit->m.max < clock->m)
729 INTELPllInvalid("m out of range\n");
730 }
731
732 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
733 INTELPllInvalid("vco out of range\n");
734 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
735 * connector, etc., rather than just a single range.
736 */
737 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
738 INTELPllInvalid("dot out of range\n");
739
740 return true;
741 }
742
743 static int
744 i9xx_select_p2_div(const intel_limit_t *limit,
745 const struct intel_crtc_state *crtc_state,
746 int target)
747 {
748 struct drm_device *dev = crtc_state->base.crtc->dev;
749
750 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
751 /*
752 * For LVDS just rely on its current settings for dual-channel.
753 * We haven't figured out how to reliably set up different
754 * single/dual channel state, if we even can.
755 */
756 if (intel_is_dual_link_lvds(dev))
757 return limit->p2.p2_fast;
758 else
759 return limit->p2.p2_slow;
760 } else {
761 if (target < limit->p2.dot_limit)
762 return limit->p2.p2_slow;
763 else
764 return limit->p2.p2_fast;
765 }
766 }
767
768 static bool
769 i9xx_find_best_dpll(const intel_limit_t *limit,
770 struct intel_crtc_state *crtc_state,
771 int target, int refclk, intel_clock_t *match_clock,
772 intel_clock_t *best_clock)
773 {
774 struct drm_device *dev = crtc_state->base.crtc->dev;
775 intel_clock_t clock;
776 int err = target;
777
778 memset(best_clock, 0, sizeof(*best_clock));
779
780 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
781
782 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
783 clock.m1++) {
784 for (clock.m2 = limit->m2.min;
785 clock.m2 <= limit->m2.max; clock.m2++) {
786 if (clock.m2 >= clock.m1)
787 break;
788 for (clock.n = limit->n.min;
789 clock.n <= limit->n.max; clock.n++) {
790 for (clock.p1 = limit->p1.min;
791 clock.p1 <= limit->p1.max; clock.p1++) {
792 int this_err;
793
794 i9xx_calc_dpll_params(refclk, &clock);
795 if (!intel_PLL_is_valid(dev, limit,
796 &clock))
797 continue;
798 if (match_clock &&
799 clock.p != match_clock->p)
800 continue;
801
802 this_err = abs(clock.dot - target);
803 if (this_err < err) {
804 *best_clock = clock;
805 err = this_err;
806 }
807 }
808 }
809 }
810 }
811
812 return (err != target);
813 }
814
815 static bool
816 pnv_find_best_dpll(const intel_limit_t *limit,
817 struct intel_crtc_state *crtc_state,
818 int target, int refclk, intel_clock_t *match_clock,
819 intel_clock_t *best_clock)
820 {
821 struct drm_device *dev = crtc_state->base.crtc->dev;
822 intel_clock_t clock;
823 int err = target;
824
825 memset(best_clock, 0, sizeof(*best_clock));
826
827 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
828
829 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
830 clock.m1++) {
831 for (clock.m2 = limit->m2.min;
832 clock.m2 <= limit->m2.max; clock.m2++) {
833 for (clock.n = limit->n.min;
834 clock.n <= limit->n.max; clock.n++) {
835 for (clock.p1 = limit->p1.min;
836 clock.p1 <= limit->p1.max; clock.p1++) {
837 int this_err;
838
839 pnv_calc_dpll_params(refclk, &clock);
840 if (!intel_PLL_is_valid(dev, limit,
841 &clock))
842 continue;
843 if (match_clock &&
844 clock.p != match_clock->p)
845 continue;
846
847 this_err = abs(clock.dot - target);
848 if (this_err < err) {
849 *best_clock = clock;
850 err = this_err;
851 }
852 }
853 }
854 }
855 }
856
857 return (err != target);
858 }
859
860 static bool
861 g4x_find_best_dpll(const intel_limit_t *limit,
862 struct intel_crtc_state *crtc_state,
863 int target, int refclk, intel_clock_t *match_clock,
864 intel_clock_t *best_clock)
865 {
866 struct drm_device *dev = crtc_state->base.crtc->dev;
867 intel_clock_t clock;
868 int max_n;
869 bool found = false;
870 /* approximately equals target * 0.00585 */
871 int err_most = (target >> 8) + (target >> 9);
872
873 memset(best_clock, 0, sizeof(*best_clock));
874
875 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
876
877 max_n = limit->n.max;
878 /* based on hardware requirement, prefer smaller n to precision */
879 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
880 /* based on hardware requirement, prefere larger m1,m2 */
881 for (clock.m1 = limit->m1.max;
882 clock.m1 >= limit->m1.min; clock.m1--) {
883 for (clock.m2 = limit->m2.max;
884 clock.m2 >= limit->m2.min; clock.m2--) {
885 for (clock.p1 = limit->p1.max;
886 clock.p1 >= limit->p1.min; clock.p1--) {
887 int this_err;
888
889 i9xx_calc_dpll_params(refclk, &clock);
890 if (!intel_PLL_is_valid(dev, limit,
891 &clock))
892 continue;
893
894 this_err = abs(clock.dot - target);
895 if (this_err < err_most) {
896 *best_clock = clock;
897 err_most = this_err;
898 max_n = clock.n;
899 found = true;
900 }
901 }
902 }
903 }
904 }
905 return found;
906 }
907
908 /*
909 * Check if the calculated PLL configuration is more optimal compared to the
910 * best configuration and error found so far. Return the calculated error.
911 */
912 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
913 const intel_clock_t *calculated_clock,
914 const intel_clock_t *best_clock,
915 unsigned int best_error_ppm,
916 unsigned int *error_ppm)
917 {
918 /*
919 * For CHV ignore the error and consider only the P value.
920 * Prefer a bigger P value based on HW requirements.
921 */
922 if (IS_CHERRYVIEW(dev)) {
923 *error_ppm = 0;
924
925 return calculated_clock->p > best_clock->p;
926 }
927
928 if (WARN_ON_ONCE(!target_freq))
929 return false;
930
931 *error_ppm = div_u64(1000000ULL *
932 abs(target_freq - calculated_clock->dot),
933 target_freq);
934 /*
935 * Prefer a better P value over a better (smaller) error if the error
936 * is small. Ensure this preference for future configurations too by
937 * setting the error to 0.
938 */
939 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
940 *error_ppm = 0;
941
942 return true;
943 }
944
945 return *error_ppm + 10 < best_error_ppm;
946 }
947
948 static bool
949 vlv_find_best_dpll(const intel_limit_t *limit,
950 struct intel_crtc_state *crtc_state,
951 int target, int refclk, intel_clock_t *match_clock,
952 intel_clock_t *best_clock)
953 {
954 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
955 struct drm_device *dev = crtc->base.dev;
956 intel_clock_t clock;
957 unsigned int bestppm = 1000000;
958 /* min update 19.2 MHz */
959 int max_n = min(limit->n.max, refclk / 19200);
960 bool found = false;
961
962 target *= 5; /* fast clock */
963
964 memset(best_clock, 0, sizeof(*best_clock));
965
966 /* based on hardware requirement, prefer smaller n to precision */
967 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
968 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
969 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
970 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
971 clock.p = clock.p1 * clock.p2;
972 /* based on hardware requirement, prefer bigger m1,m2 values */
973 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
974 unsigned int ppm;
975
976 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
977 refclk * clock.m1);
978
979 vlv_calc_dpll_params(refclk, &clock);
980
981 if (!intel_PLL_is_valid(dev, limit,
982 &clock))
983 continue;
984
985 if (!vlv_PLL_is_optimal(dev, target,
986 &clock,
987 best_clock,
988 bestppm, &ppm))
989 continue;
990
991 *best_clock = clock;
992 bestppm = ppm;
993 found = true;
994 }
995 }
996 }
997 }
998
999 return found;
1000 }
1001
1002 static bool
1003 chv_find_best_dpll(const intel_limit_t *limit,
1004 struct intel_crtc_state *crtc_state,
1005 int target, int refclk, intel_clock_t *match_clock,
1006 intel_clock_t *best_clock)
1007 {
1008 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1009 struct drm_device *dev = crtc->base.dev;
1010 unsigned int best_error_ppm;
1011 intel_clock_t clock;
1012 uint64_t m2;
1013 int found = false;
1014
1015 memset(best_clock, 0, sizeof(*best_clock));
1016 best_error_ppm = 1000000;
1017
1018 /*
1019 * Based on hardware doc, the n always set to 1, and m1 always
1020 * set to 2. If requires to support 200Mhz refclk, we need to
1021 * revisit this because n may not 1 anymore.
1022 */
1023 clock.n = 1, clock.m1 = 2;
1024 target *= 5; /* fast clock */
1025
1026 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1027 for (clock.p2 = limit->p2.p2_fast;
1028 clock.p2 >= limit->p2.p2_slow;
1029 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1030 unsigned int error_ppm;
1031
1032 clock.p = clock.p1 * clock.p2;
1033
1034 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
1035 clock.n) << 22, refclk * clock.m1);
1036
1037 if (m2 > INT_MAX/clock.m1)
1038 continue;
1039
1040 clock.m2 = m2;
1041
1042 chv_calc_dpll_params(refclk, &clock);
1043
1044 if (!intel_PLL_is_valid(dev, limit, &clock))
1045 continue;
1046
1047 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1048 best_error_ppm, &error_ppm))
1049 continue;
1050
1051 *best_clock = clock;
1052 best_error_ppm = error_ppm;
1053 found = true;
1054 }
1055 }
1056
1057 return found;
1058 }
1059
1060 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1061 intel_clock_t *best_clock)
1062 {
1063 int refclk = i9xx_get_refclk(crtc_state, 0);
1064
1065 return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state,
1066 target_clock, refclk, NULL, best_clock);
1067 }
1068
1069 bool intel_crtc_active(struct drm_crtc *crtc)
1070 {
1071 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1072
1073 /* Be paranoid as we can arrive here with only partial
1074 * state retrieved from the hardware during setup.
1075 *
1076 * We can ditch the adjusted_mode.crtc_clock check as soon
1077 * as Haswell has gained clock readout/fastboot support.
1078 *
1079 * We can ditch the crtc->primary->fb check as soon as we can
1080 * properly reconstruct framebuffers.
1081 *
1082 * FIXME: The intel_crtc->active here should be switched to
1083 * crtc->state->active once we have proper CRTC states wired up
1084 * for atomic.
1085 */
1086 return intel_crtc->active && crtc->primary->state->fb &&
1087 intel_crtc->config->base.adjusted_mode.crtc_clock;
1088 }
1089
1090 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1091 enum pipe pipe)
1092 {
1093 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1094 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1095
1096 return intel_crtc->config->cpu_transcoder;
1097 }
1098
1099 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1100 {
1101 struct drm_i915_private *dev_priv = dev->dev_private;
1102 i915_reg_t reg = PIPEDSL(pipe);
1103 u32 line1, line2;
1104 u32 line_mask;
1105
1106 if (IS_GEN2(dev))
1107 line_mask = DSL_LINEMASK_GEN2;
1108 else
1109 line_mask = DSL_LINEMASK_GEN3;
1110
1111 line1 = I915_READ(reg) & line_mask;
1112 msleep(5);
1113 line2 = I915_READ(reg) & line_mask;
1114
1115 return line1 == line2;
1116 }
1117
1118 /*
1119 * intel_wait_for_pipe_off - wait for pipe to turn off
1120 * @crtc: crtc whose pipe to wait for
1121 *
1122 * After disabling a pipe, we can't wait for vblank in the usual way,
1123 * spinning on the vblank interrupt status bit, since we won't actually
1124 * see an interrupt when the pipe is disabled.
1125 *
1126 * On Gen4 and above:
1127 * wait for the pipe register state bit to turn off
1128 *
1129 * Otherwise:
1130 * wait for the display line value to settle (it usually
1131 * ends up stopping at the start of the next frame).
1132 *
1133 */
1134 static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1135 {
1136 struct drm_device *dev = crtc->base.dev;
1137 struct drm_i915_private *dev_priv = dev->dev_private;
1138 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1139 enum pipe pipe = crtc->pipe;
1140
1141 if (INTEL_INFO(dev)->gen >= 4) {
1142 i915_reg_t reg = PIPECONF(cpu_transcoder);
1143
1144 /* Wait for the Pipe State to go off */
1145 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1146 100))
1147 WARN(1, "pipe_off wait timed out\n");
1148 } else {
1149 /* Wait for the display line to settle */
1150 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
1151 WARN(1, "pipe_off wait timed out\n");
1152 }
1153 }
1154
1155 static const char *state_string(bool enabled)
1156 {
1157 return enabled ? "on" : "off";
1158 }
1159
1160 /* Only for pre-ILK configs */
1161 void assert_pll(struct drm_i915_private *dev_priv,
1162 enum pipe pipe, bool state)
1163 {
1164 u32 val;
1165 bool cur_state;
1166
1167 val = I915_READ(DPLL(pipe));
1168 cur_state = !!(val & DPLL_VCO_ENABLE);
1169 I915_STATE_WARN(cur_state != state,
1170 "PLL state assertion failure (expected %s, current %s)\n",
1171 state_string(state), state_string(cur_state));
1172 }
1173
1174 /* XXX: the dsi pll is shared between MIPI DSI ports */
1175 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1176 {
1177 u32 val;
1178 bool cur_state;
1179
1180 mutex_lock(&dev_priv->sb_lock);
1181 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1182 mutex_unlock(&dev_priv->sb_lock);
1183
1184 cur_state = val & DSI_PLL_VCO_EN;
1185 I915_STATE_WARN(cur_state != state,
1186 "DSI PLL state assertion failure (expected %s, current %s)\n",
1187 state_string(state), state_string(cur_state));
1188 }
1189 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1190 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1191
1192 struct intel_shared_dpll *
1193 intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1194 {
1195 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1196
1197 if (crtc->config->shared_dpll < 0)
1198 return NULL;
1199
1200 return &dev_priv->shared_dplls[crtc->config->shared_dpll];
1201 }
1202
1203 /* For ILK+ */
1204 void assert_shared_dpll(struct drm_i915_private *dev_priv,
1205 struct intel_shared_dpll *pll,
1206 bool state)
1207 {
1208 bool cur_state;
1209 struct intel_dpll_hw_state hw_state;
1210
1211 if (WARN (!pll,
1212 "asserting DPLL %s with no DPLL\n", state_string(state)))
1213 return;
1214
1215 cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
1216 I915_STATE_WARN(cur_state != state,
1217 "%s assertion failure (expected %s, current %s)\n",
1218 pll->name, state_string(state), state_string(cur_state));
1219 }
1220
1221 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1222 enum pipe pipe, bool state)
1223 {
1224 bool cur_state;
1225 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1226 pipe);
1227
1228 if (HAS_DDI(dev_priv->dev)) {
1229 /* DDI does not have a specific FDI_TX register */
1230 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1231 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1232 } else {
1233 u32 val = I915_READ(FDI_TX_CTL(pipe));
1234 cur_state = !!(val & FDI_TX_ENABLE);
1235 }
1236 I915_STATE_WARN(cur_state != state,
1237 "FDI TX state assertion failure (expected %s, current %s)\n",
1238 state_string(state), state_string(cur_state));
1239 }
1240 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1241 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1242
1243 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1244 enum pipe pipe, bool state)
1245 {
1246 u32 val;
1247 bool cur_state;
1248
1249 val = I915_READ(FDI_RX_CTL(pipe));
1250 cur_state = !!(val & FDI_RX_ENABLE);
1251 I915_STATE_WARN(cur_state != state,
1252 "FDI RX state assertion failure (expected %s, current %s)\n",
1253 state_string(state), state_string(cur_state));
1254 }
1255 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1256 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1257
1258 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1259 enum pipe pipe)
1260 {
1261 u32 val;
1262
1263 /* ILK FDI PLL is always enabled */
1264 if (INTEL_INFO(dev_priv->dev)->gen == 5)
1265 return;
1266
1267 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1268 if (HAS_DDI(dev_priv->dev))
1269 return;
1270
1271 val = I915_READ(FDI_TX_CTL(pipe));
1272 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1273 }
1274
1275 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1276 enum pipe pipe, bool state)
1277 {
1278 u32 val;
1279 bool cur_state;
1280
1281 val = I915_READ(FDI_RX_CTL(pipe));
1282 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1283 I915_STATE_WARN(cur_state != state,
1284 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1285 state_string(state), state_string(cur_state));
1286 }
1287
1288 void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1289 enum pipe pipe)
1290 {
1291 struct drm_device *dev = dev_priv->dev;
1292 i915_reg_t pp_reg;
1293 u32 val;
1294 enum pipe panel_pipe = PIPE_A;
1295 bool locked = true;
1296
1297 if (WARN_ON(HAS_DDI(dev)))
1298 return;
1299
1300 if (HAS_PCH_SPLIT(dev)) {
1301 u32 port_sel;
1302
1303 pp_reg = PCH_PP_CONTROL;
1304 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1305
1306 if (port_sel == PANEL_PORT_SELECT_LVDS &&
1307 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1308 panel_pipe = PIPE_B;
1309 /* XXX: else fix for eDP */
1310 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1311 /* presumably write lock depends on pipe, not port select */
1312 pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1313 panel_pipe = pipe;
1314 } else {
1315 pp_reg = PP_CONTROL;
1316 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1317 panel_pipe = PIPE_B;
1318 }
1319
1320 val = I915_READ(pp_reg);
1321 if (!(val & PANEL_POWER_ON) ||
1322 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1323 locked = false;
1324
1325 I915_STATE_WARN(panel_pipe == pipe && locked,
1326 "panel assertion failure, pipe %c regs locked\n",
1327 pipe_name(pipe));
1328 }
1329
1330 static void assert_cursor(struct drm_i915_private *dev_priv,
1331 enum pipe pipe, bool state)
1332 {
1333 struct drm_device *dev = dev_priv->dev;
1334 bool cur_state;
1335
1336 if (IS_845G(dev) || IS_I865G(dev))
1337 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
1338 else
1339 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1340
1341 I915_STATE_WARN(cur_state != state,
1342 "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1343 pipe_name(pipe), state_string(state), state_string(cur_state));
1344 }
1345 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1346 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1347
1348 void assert_pipe(struct drm_i915_private *dev_priv,
1349 enum pipe pipe, bool state)
1350 {
1351 bool cur_state;
1352 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1353 pipe);
1354 enum intel_display_power_domain power_domain;
1355
1356 /* if we need the pipe quirk it must be always on */
1357 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1358 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1359 state = true;
1360
1361 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1362 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1363 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1364 cur_state = !!(val & PIPECONF_ENABLE);
1365
1366 intel_display_power_put(dev_priv, power_domain);
1367 } else {
1368 cur_state = false;
1369 }
1370
1371 I915_STATE_WARN(cur_state != state,
1372 "pipe %c assertion failure (expected %s, current %s)\n",
1373 pipe_name(pipe), state_string(state), state_string(cur_state));
1374 }
1375
1376 static void assert_plane(struct drm_i915_private *dev_priv,
1377 enum plane plane, bool state)
1378 {
1379 u32 val;
1380 bool cur_state;
1381
1382 val = I915_READ(DSPCNTR(plane));
1383 cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1384 I915_STATE_WARN(cur_state != state,
1385 "plane %c assertion failure (expected %s, current %s)\n",
1386 plane_name(plane), state_string(state), state_string(cur_state));
1387 }
1388
1389 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1390 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1391
1392 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1393 enum pipe pipe)
1394 {
1395 struct drm_device *dev = dev_priv->dev;
1396 int i;
1397
1398 /* Primary planes are fixed to pipes on gen4+ */
1399 if (INTEL_INFO(dev)->gen >= 4) {
1400 u32 val = I915_READ(DSPCNTR(pipe));
1401 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1402 "plane %c assertion failure, should be disabled but not\n",
1403 plane_name(pipe));
1404 return;
1405 }
1406
1407 /* Need to check both planes against the pipe */
1408 for_each_pipe(dev_priv, i) {
1409 u32 val = I915_READ(DSPCNTR(i));
1410 enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1411 DISPPLANE_SEL_PIPE_SHIFT;
1412 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1413 "plane %c assertion failure, should be off on pipe %c but is still active\n",
1414 plane_name(i), pipe_name(pipe));
1415 }
1416 }
1417
1418 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1419 enum pipe pipe)
1420 {
1421 struct drm_device *dev = dev_priv->dev;
1422 int sprite;
1423
1424 if (INTEL_INFO(dev)->gen >= 9) {
1425 for_each_sprite(dev_priv, pipe, sprite) {
1426 u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1427 I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1428 "plane %d assertion failure, should be off on pipe %c but is still active\n",
1429 sprite, pipe_name(pipe));
1430 }
1431 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1432 for_each_sprite(dev_priv, pipe, sprite) {
1433 u32 val = I915_READ(SPCNTR(pipe, sprite));
1434 I915_STATE_WARN(val & SP_ENABLE,
1435 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1436 sprite_name(pipe, sprite), pipe_name(pipe));
1437 }
1438 } else if (INTEL_INFO(dev)->gen >= 7) {
1439 u32 val = I915_READ(SPRCTL(pipe));
1440 I915_STATE_WARN(val & SPRITE_ENABLE,
1441 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1442 plane_name(pipe), pipe_name(pipe));
1443 } else if (INTEL_INFO(dev)->gen >= 5) {
1444 u32 val = I915_READ(DVSCNTR(pipe));
1445 I915_STATE_WARN(val & DVS_ENABLE,
1446 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1447 plane_name(pipe), pipe_name(pipe));
1448 }
1449 }
1450
1451 static void assert_vblank_disabled(struct drm_crtc *crtc)
1452 {
1453 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1454 drm_crtc_vblank_put(crtc);
1455 }
1456
1457 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1458 {
1459 u32 val;
1460 bool enabled;
1461
1462 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
1463
1464 val = I915_READ(PCH_DREF_CONTROL);
1465 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1466 DREF_SUPERSPREAD_SOURCE_MASK));
1467 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1468 }
1469
1470 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1471 enum pipe pipe)
1472 {
1473 u32 val;
1474 bool enabled;
1475
1476 val = I915_READ(PCH_TRANSCONF(pipe));
1477 enabled = !!(val & TRANS_ENABLE);
1478 I915_STATE_WARN(enabled,
1479 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1480 pipe_name(pipe));
1481 }
1482
1483 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1484 enum pipe pipe, u32 port_sel, u32 val)
1485 {
1486 if ((val & DP_PORT_EN) == 0)
1487 return false;
1488
1489 if (HAS_PCH_CPT(dev_priv->dev)) {
1490 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
1491 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1492 return false;
1493 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1494 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1495 return false;
1496 } else {
1497 if ((val & DP_PIPE_MASK) != (pipe << 30))
1498 return false;
1499 }
1500 return true;
1501 }
1502
1503 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1504 enum pipe pipe, u32 val)
1505 {
1506 if ((val & SDVO_ENABLE) == 0)
1507 return false;
1508
1509 if (HAS_PCH_CPT(dev_priv->dev)) {
1510 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1511 return false;
1512 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1513 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1514 return false;
1515 } else {
1516 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1517 return false;
1518 }
1519 return true;
1520 }
1521
1522 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1523 enum pipe pipe, u32 val)
1524 {
1525 if ((val & LVDS_PORT_EN) == 0)
1526 return false;
1527
1528 if (HAS_PCH_CPT(dev_priv->dev)) {
1529 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1530 return false;
1531 } else {
1532 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1533 return false;
1534 }
1535 return true;
1536 }
1537
1538 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1539 enum pipe pipe, u32 val)
1540 {
1541 if ((val & ADPA_DAC_ENABLE) == 0)
1542 return false;
1543 if (HAS_PCH_CPT(dev_priv->dev)) {
1544 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1545 return false;
1546 } else {
1547 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1548 return false;
1549 }
1550 return true;
1551 }
1552
1553 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1554 enum pipe pipe, i915_reg_t reg,
1555 u32 port_sel)
1556 {
1557 u32 val = I915_READ(reg);
1558 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1559 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1560 i915_mmio_reg_offset(reg), pipe_name(pipe));
1561
1562 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
1563 && (val & DP_PIPEB_SELECT),
1564 "IBX PCH dp port still using transcoder B\n");
1565 }
1566
1567 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1568 enum pipe pipe, i915_reg_t reg)
1569 {
1570 u32 val = I915_READ(reg);
1571 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1572 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1573 i915_mmio_reg_offset(reg), pipe_name(pipe));
1574
1575 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
1576 && (val & SDVO_PIPE_B_SELECT),
1577 "IBX PCH hdmi port still using transcoder B\n");
1578 }
1579
1580 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1581 enum pipe pipe)
1582 {
1583 u32 val;
1584
1585 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1586 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1587 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1588
1589 val = I915_READ(PCH_ADPA);
1590 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1591 "PCH VGA enabled on transcoder %c, should be disabled\n",
1592 pipe_name(pipe));
1593
1594 val = I915_READ(PCH_LVDS);
1595 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1596 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1597 pipe_name(pipe));
1598
1599 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1600 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1601 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1602 }
1603
1604 static void vlv_enable_pll(struct intel_crtc *crtc,
1605 const struct intel_crtc_state *pipe_config)
1606 {
1607 struct drm_device *dev = crtc->base.dev;
1608 struct drm_i915_private *dev_priv = dev->dev_private;
1609 i915_reg_t reg = DPLL(crtc->pipe);
1610 u32 dpll = pipe_config->dpll_hw_state.dpll;
1611
1612 assert_pipe_disabled(dev_priv, crtc->pipe);
1613
1614 /* PLL is protected by panel, make sure we can write it */
1615 if (IS_MOBILE(dev_priv->dev))
1616 assert_panel_unlocked(dev_priv, crtc->pipe);
1617
1618 I915_WRITE(reg, dpll);
1619 POSTING_READ(reg);
1620 udelay(150);
1621
1622 if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1623 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1624
1625 I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
1626 POSTING_READ(DPLL_MD(crtc->pipe));
1627
1628 /* We do this three times for luck */
1629 I915_WRITE(reg, dpll);
1630 POSTING_READ(reg);
1631 udelay(150); /* wait for warmup */
1632 I915_WRITE(reg, dpll);
1633 POSTING_READ(reg);
1634 udelay(150); /* wait for warmup */
1635 I915_WRITE(reg, dpll);
1636 POSTING_READ(reg);
1637 udelay(150); /* wait for warmup */
1638 }
1639
1640 static void chv_enable_pll(struct intel_crtc *crtc,
1641 const struct intel_crtc_state *pipe_config)
1642 {
1643 struct drm_device *dev = crtc->base.dev;
1644 struct drm_i915_private *dev_priv = dev->dev_private;
1645 int pipe = crtc->pipe;
1646 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1647 u32 tmp;
1648
1649 assert_pipe_disabled(dev_priv, crtc->pipe);
1650
1651 mutex_lock(&dev_priv->sb_lock);
1652
1653 /* Enable back the 10bit clock to display controller */
1654 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1655 tmp |= DPIO_DCLKP_EN;
1656 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1657
1658 mutex_unlock(&dev_priv->sb_lock);
1659
1660 /*
1661 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1662 */
1663 udelay(1);
1664
1665 /* Enable PLL */
1666 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1667
1668 /* Check PLL is locked */
1669 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1670 DRM_ERROR("PLL %d failed to lock\n", pipe);
1671
1672 /* not sure when this should be written */
1673 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1674 POSTING_READ(DPLL_MD(pipe));
1675 }
1676
1677 static int intel_num_dvo_pipes(struct drm_device *dev)
1678 {
1679 struct intel_crtc *crtc;
1680 int count = 0;
1681
1682 for_each_intel_crtc(dev, crtc)
1683 count += crtc->base.state->active &&
1684 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1685
1686 return count;
1687 }
1688
1689 static void i9xx_enable_pll(struct intel_crtc *crtc)
1690 {
1691 struct drm_device *dev = crtc->base.dev;
1692 struct drm_i915_private *dev_priv = dev->dev_private;
1693 i915_reg_t reg = DPLL(crtc->pipe);
1694 u32 dpll = crtc->config->dpll_hw_state.dpll;
1695
1696 assert_pipe_disabled(dev_priv, crtc->pipe);
1697
1698 /* No really, not for ILK+ */
1699 BUG_ON(INTEL_INFO(dev)->gen >= 5);
1700
1701 /* PLL is protected by panel, make sure we can write it */
1702 if (IS_MOBILE(dev) && !IS_I830(dev))
1703 assert_panel_unlocked(dev_priv, crtc->pipe);
1704
1705 /* Enable DVO 2x clock on both PLLs if necessary */
1706 if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1707 /*
1708 * It appears to be important that we don't enable this
1709 * for the current pipe before otherwise configuring the
1710 * PLL. No idea how this should be handled if multiple
1711 * DVO outputs are enabled simultaneosly.
1712 */
1713 dpll |= DPLL_DVO_2X_MODE;
1714 I915_WRITE(DPLL(!crtc->pipe),
1715 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1716 }
1717
1718 /*
1719 * Apparently we need to have VGA mode enabled prior to changing
1720 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1721 * dividers, even though the register value does change.
1722 */
1723 I915_WRITE(reg, 0);
1724
1725 I915_WRITE(reg, dpll);
1726
1727 /* Wait for the clocks to stabilize. */
1728 POSTING_READ(reg);
1729 udelay(150);
1730
1731 if (INTEL_INFO(dev)->gen >= 4) {
1732 I915_WRITE(DPLL_MD(crtc->pipe),
1733 crtc->config->dpll_hw_state.dpll_md);
1734 } else {
1735 /* The pixel multiplier can only be updated once the
1736 * DPLL is enabled and the clocks are stable.
1737 *
1738 * So write it again.
1739 */
1740 I915_WRITE(reg, dpll);
1741 }
1742
1743 /* We do this three times for luck */
1744 I915_WRITE(reg, dpll);
1745 POSTING_READ(reg);
1746 udelay(150); /* wait for warmup */
1747 I915_WRITE(reg, dpll);
1748 POSTING_READ(reg);
1749 udelay(150); /* wait for warmup */
1750 I915_WRITE(reg, dpll);
1751 POSTING_READ(reg);
1752 udelay(150); /* wait for warmup */
1753 }
1754
1755 /**
1756 * i9xx_disable_pll - disable a PLL
1757 * @dev_priv: i915 private structure
1758 * @pipe: pipe PLL to disable
1759 *
1760 * Disable the PLL for @pipe, making sure the pipe is off first.
1761 *
1762 * Note! This is for pre-ILK only.
1763 */
1764 static void i9xx_disable_pll(struct intel_crtc *crtc)
1765 {
1766 struct drm_device *dev = crtc->base.dev;
1767 struct drm_i915_private *dev_priv = dev->dev_private;
1768 enum pipe pipe = crtc->pipe;
1769
1770 /* Disable DVO 2x clock on both PLLs if necessary */
1771 if (IS_I830(dev) &&
1772 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1773 !intel_num_dvo_pipes(dev)) {
1774 I915_WRITE(DPLL(PIPE_B),
1775 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1776 I915_WRITE(DPLL(PIPE_A),
1777 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1778 }
1779
1780 /* Don't disable pipe or pipe PLLs if needed */
1781 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1782 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1783 return;
1784
1785 /* Make sure the pipe isn't still relying on us */
1786 assert_pipe_disabled(dev_priv, pipe);
1787
1788 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1789 POSTING_READ(DPLL(pipe));
1790 }
1791
1792 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1793 {
1794 u32 val;
1795
1796 /* Make sure the pipe isn't still relying on us */
1797 assert_pipe_disabled(dev_priv, pipe);
1798
1799 /*
1800 * Leave integrated clock source and reference clock enabled for pipe B.
1801 * The latter is needed for VGA hotplug / manual detection.
1802 */
1803 val = DPLL_VGA_MODE_DIS;
1804 if (pipe == PIPE_B)
1805 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
1806 I915_WRITE(DPLL(pipe), val);
1807 POSTING_READ(DPLL(pipe));
1808
1809 }
1810
1811 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1812 {
1813 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1814 u32 val;
1815
1816 /* Make sure the pipe isn't still relying on us */
1817 assert_pipe_disabled(dev_priv, pipe);
1818
1819 /* Set PLL en = 0 */
1820 val = DPLL_SSC_REF_CLK_CHV |
1821 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1822 if (pipe != PIPE_A)
1823 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1824 I915_WRITE(DPLL(pipe), val);
1825 POSTING_READ(DPLL(pipe));
1826
1827 mutex_lock(&dev_priv->sb_lock);
1828
1829 /* Disable 10bit clock to display controller */
1830 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1831 val &= ~DPIO_DCLKP_EN;
1832 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1833
1834 mutex_unlock(&dev_priv->sb_lock);
1835 }
1836
1837 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1838 struct intel_digital_port *dport,
1839 unsigned int expected_mask)
1840 {
1841 u32 port_mask;
1842 i915_reg_t dpll_reg;
1843
1844 switch (dport->port) {
1845 case PORT_B:
1846 port_mask = DPLL_PORTB_READY_MASK;
1847 dpll_reg = DPLL(0);
1848 break;
1849 case PORT_C:
1850 port_mask = DPLL_PORTC_READY_MASK;
1851 dpll_reg = DPLL(0);
1852 expected_mask <<= 4;
1853 break;
1854 case PORT_D:
1855 port_mask = DPLL_PORTD_READY_MASK;
1856 dpll_reg = DPIO_PHY_STATUS;
1857 break;
1858 default:
1859 BUG();
1860 }
1861
1862 if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
1863 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1864 port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
1865 }
1866
1867 static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1868 {
1869 struct drm_device *dev = crtc->base.dev;
1870 struct drm_i915_private *dev_priv = dev->dev_private;
1871 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1872
1873 if (WARN_ON(pll == NULL))
1874 return;
1875
1876 WARN_ON(!pll->config.crtc_mask);
1877 if (pll->active == 0) {
1878 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1879 WARN_ON(pll->on);
1880 assert_shared_dpll_disabled(dev_priv, pll);
1881
1882 pll->mode_set(dev_priv, pll);
1883 }
1884 }
1885
1886 /**
1887 * intel_enable_shared_dpll - enable PCH PLL
1888 * @dev_priv: i915 private structure
1889 * @pipe: pipe PLL to enable
1890 *
1891 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1892 * drives the transcoder clock.
1893 */
1894 static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1895 {
1896 struct drm_device *dev = crtc->base.dev;
1897 struct drm_i915_private *dev_priv = dev->dev_private;
1898 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1899
1900 if (WARN_ON(pll == NULL))
1901 return;
1902
1903 if (WARN_ON(pll->config.crtc_mask == 0))
1904 return;
1905
1906 DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
1907 pll->name, pll->active, pll->on,
1908 crtc->base.base.id);
1909
1910 if (pll->active++) {
1911 WARN_ON(!pll->on);
1912 assert_shared_dpll_enabled(dev_priv, pll);
1913 return;
1914 }
1915 WARN_ON(pll->on);
1916
1917 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1918
1919 DRM_DEBUG_KMS("enabling %s\n", pll->name);
1920 pll->enable(dev_priv, pll);
1921 pll->on = true;
1922 }
1923
1924 static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1925 {
1926 struct drm_device *dev = crtc->base.dev;
1927 struct drm_i915_private *dev_priv = dev->dev_private;
1928 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1929
1930 /* PCH only available on ILK+ */
1931 if (INTEL_INFO(dev)->gen < 5)
1932 return;
1933
1934 if (pll == NULL)
1935 return;
1936
1937 if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
1938 return;
1939
1940 DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1941 pll->name, pll->active, pll->on,
1942 crtc->base.base.id);
1943
1944 if (WARN_ON(pll->active == 0)) {
1945 assert_shared_dpll_disabled(dev_priv, pll);
1946 return;
1947 }
1948
1949 assert_shared_dpll_enabled(dev_priv, pll);
1950 WARN_ON(!pll->on);
1951 if (--pll->active)
1952 return;
1953
1954 DRM_DEBUG_KMS("disabling %s\n", pll->name);
1955 pll->disable(dev_priv, pll);
1956 pll->on = false;
1957
1958 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
1959 }
1960
1961 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1962 enum pipe pipe)
1963 {
1964 struct drm_device *dev = dev_priv->dev;
1965 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1966 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1967 i915_reg_t reg;
1968 uint32_t val, pipeconf_val;
1969
1970 /* PCH only available on ILK+ */
1971 BUG_ON(!HAS_PCH_SPLIT(dev));
1972
1973 /* Make sure PCH DPLL is enabled */
1974 assert_shared_dpll_enabled(dev_priv,
1975 intel_crtc_to_shared_dpll(intel_crtc));
1976
1977 /* FDI must be feeding us bits for PCH ports */
1978 assert_fdi_tx_enabled(dev_priv, pipe);
1979 assert_fdi_rx_enabled(dev_priv, pipe);
1980
1981 if (HAS_PCH_CPT(dev)) {
1982 /* Workaround: Set the timing override bit before enabling the
1983 * pch transcoder. */
1984 reg = TRANS_CHICKEN2(pipe);
1985 val = I915_READ(reg);
1986 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1987 I915_WRITE(reg, val);
1988 }
1989
1990 reg = PCH_TRANSCONF(pipe);
1991 val = I915_READ(reg);
1992 pipeconf_val = I915_READ(PIPECONF(pipe));
1993
1994 if (HAS_PCH_IBX(dev_priv->dev)) {
1995 /*
1996 * Make the BPC in transcoder be consistent with
1997 * that in pipeconf reg. For HDMI we must use 8bpc
1998 * here for both 8bpc and 12bpc.
1999 */
2000 val &= ~PIPECONF_BPC_MASK;
2001 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
2002 val |= PIPECONF_8BPC;
2003 else
2004 val |= pipeconf_val & PIPECONF_BPC_MASK;
2005 }
2006
2007 val &= ~TRANS_INTERLACE_MASK;
2008 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
2009 if (HAS_PCH_IBX(dev_priv->dev) &&
2010 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
2011 val |= TRANS_LEGACY_INTERLACED_ILK;
2012 else
2013 val |= TRANS_INTERLACED;
2014 else
2015 val |= TRANS_PROGRESSIVE;
2016
2017 I915_WRITE(reg, val | TRANS_ENABLE);
2018 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
2019 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
2020 }
2021
2022 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
2023 enum transcoder cpu_transcoder)
2024 {
2025 u32 val, pipeconf_val;
2026
2027 /* PCH only available on ILK+ */
2028 BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
2029
2030 /* FDI must be feeding us bits for PCH ports */
2031 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
2032 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
2033
2034 /* Workaround: set timing override bit. */
2035 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
2036 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
2037 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
2038
2039 val = TRANS_ENABLE;
2040 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
2041
2042 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
2043 PIPECONF_INTERLACED_ILK)
2044 val |= TRANS_INTERLACED;
2045 else
2046 val |= TRANS_PROGRESSIVE;
2047
2048 I915_WRITE(LPT_TRANSCONF, val);
2049 if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
2050 DRM_ERROR("Failed to enable PCH transcoder\n");
2051 }
2052
2053 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
2054 enum pipe pipe)
2055 {
2056 struct drm_device *dev = dev_priv->dev;
2057 i915_reg_t reg;
2058 uint32_t val;
2059
2060 /* FDI relies on the transcoder */
2061 assert_fdi_tx_disabled(dev_priv, pipe);
2062 assert_fdi_rx_disabled(dev_priv, pipe);
2063
2064 /* Ports must be off as well */
2065 assert_pch_ports_disabled(dev_priv, pipe);
2066
2067 reg = PCH_TRANSCONF(pipe);
2068 val = I915_READ(reg);
2069 val &= ~TRANS_ENABLE;
2070 I915_WRITE(reg, val);
2071 /* wait for PCH transcoder off, transcoder state */
2072 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
2073 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
2074
2075 if (HAS_PCH_CPT(dev)) {
2076 /* Workaround: Clear the timing override chicken bit again. */
2077 reg = TRANS_CHICKEN2(pipe);
2078 val = I915_READ(reg);
2079 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2080 I915_WRITE(reg, val);
2081 }
2082 }
2083
2084 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
2085 {
2086 u32 val;
2087
2088 val = I915_READ(LPT_TRANSCONF);
2089 val &= ~TRANS_ENABLE;
2090 I915_WRITE(LPT_TRANSCONF, val);
2091 /* wait for PCH transcoder off, transcoder state */
2092 if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
2093 DRM_ERROR("Failed to disable PCH transcoder\n");
2094
2095 /* Workaround: clear timing override bit. */
2096 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
2097 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2098 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
2099 }
2100
2101 /**
2102 * intel_enable_pipe - enable a pipe, asserting requirements
2103 * @crtc: crtc responsible for the pipe
2104 *
2105 * Enable @crtc's pipe, making sure that various hardware specific requirements
2106 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
2107 */
2108 static void intel_enable_pipe(struct intel_crtc *crtc)
2109 {
2110 struct drm_device *dev = crtc->base.dev;
2111 struct drm_i915_private *dev_priv = dev->dev_private;
2112 enum pipe pipe = crtc->pipe;
2113 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2114 enum pipe pch_transcoder;
2115 i915_reg_t reg;
2116 u32 val;
2117
2118 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
2119
2120 assert_planes_disabled(dev_priv, pipe);
2121 assert_cursor_disabled(dev_priv, pipe);
2122 assert_sprites_disabled(dev_priv, pipe);
2123
2124 if (HAS_PCH_LPT(dev_priv->dev))
2125 pch_transcoder = TRANSCODER_A;
2126 else
2127 pch_transcoder = pipe;
2128
2129 /*
2130 * A pipe without a PLL won't actually be able to drive bits from
2131 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
2132 * need the check.
2133 */
2134 if (HAS_GMCH_DISPLAY(dev_priv->dev))
2135 if (crtc->config->has_dsi_encoder)
2136 assert_dsi_pll_enabled(dev_priv);
2137 else
2138 assert_pll_enabled(dev_priv, pipe);
2139 else {
2140 if (crtc->config->has_pch_encoder) {
2141 /* if driving the PCH, we need FDI enabled */
2142 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
2143 assert_fdi_tx_pll_enabled(dev_priv,
2144 (enum pipe) cpu_transcoder);
2145 }
2146 /* FIXME: assert CPU port conditions for SNB+ */
2147 }
2148
2149 reg = PIPECONF(cpu_transcoder);
2150 val = I915_READ(reg);
2151 if (val & PIPECONF_ENABLE) {
2152 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2153 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2154 return;
2155 }
2156
2157 I915_WRITE(reg, val | PIPECONF_ENABLE);
2158 POSTING_READ(reg);
2159 }
2160
2161 /**
2162 * intel_disable_pipe - disable a pipe, asserting requirements
2163 * @crtc: crtc whose pipes is to be disabled
2164 *
2165 * Disable the pipe of @crtc, making sure that various hardware
2166 * specific requirements are met, if applicable, e.g. plane
2167 * disabled, panel fitter off, etc.
2168 *
2169 * Will wait until the pipe has shut down before returning.
2170 */
2171 static void intel_disable_pipe(struct intel_crtc *crtc)
2172 {
2173 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2174 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2175 enum pipe pipe = crtc->pipe;
2176 i915_reg_t reg;
2177 u32 val;
2178
2179 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2180
2181 /*
2182 * Make sure planes won't keep trying to pump pixels to us,
2183 * or we might hang the display.
2184 */
2185 assert_planes_disabled(dev_priv, pipe);
2186 assert_cursor_disabled(dev_priv, pipe);
2187 assert_sprites_disabled(dev_priv, pipe);
2188
2189 reg = PIPECONF(cpu_transcoder);
2190 val = I915_READ(reg);
2191 if ((val & PIPECONF_ENABLE) == 0)
2192 return;
2193
2194 /*
2195 * Double wide has implications for planes
2196 * so best keep it disabled when not needed.
2197 */
2198 if (crtc->config->double_wide)
2199 val &= ~PIPECONF_DOUBLE_WIDE;
2200
2201 /* Don't disable pipe or pipe PLLs if needed */
2202 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2203 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2204 val &= ~PIPECONF_ENABLE;
2205
2206 I915_WRITE(reg, val);
2207 if ((val & PIPECONF_ENABLE) == 0)
2208 intel_wait_for_pipe_off(crtc);
2209 }
2210
2211 static bool need_vtd_wa(struct drm_device *dev)
2212 {
2213 #ifdef CONFIG_INTEL_IOMMU
2214 if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2215 return true;
2216 #endif
2217 return false;
2218 }
2219
2220 unsigned int
2221 intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
2222 uint64_t fb_format_modifier, unsigned int plane)
2223 {
2224 unsigned int tile_height;
2225 uint32_t pixel_bytes;
2226
2227 switch (fb_format_modifier) {
2228 case DRM_FORMAT_MOD_NONE:
2229 tile_height = 1;
2230 break;
2231 case I915_FORMAT_MOD_X_TILED:
2232 tile_height = IS_GEN2(dev) ? 16 : 8;
2233 break;
2234 case I915_FORMAT_MOD_Y_TILED:
2235 tile_height = 32;
2236 break;
2237 case I915_FORMAT_MOD_Yf_TILED:
2238 pixel_bytes = drm_format_plane_cpp(pixel_format, plane);
2239 switch (pixel_bytes) {
2240 default:
2241 case 1:
2242 tile_height = 64;
2243 break;
2244 case 2:
2245 case 4:
2246 tile_height = 32;
2247 break;
2248 case 8:
2249 tile_height = 16;
2250 break;
2251 case 16:
2252 WARN_ONCE(1,
2253 "128-bit pixels are not supported for display!");
2254 tile_height = 16;
2255 break;
2256 }
2257 break;
2258 default:
2259 MISSING_CASE(fb_format_modifier);
2260 tile_height = 1;
2261 break;
2262 }
2263
2264 return tile_height;
2265 }
2266
2267 unsigned int
2268 intel_fb_align_height(struct drm_device *dev, unsigned int height,
2269 uint32_t pixel_format, uint64_t fb_format_modifier)
2270 {
2271 return ALIGN(height, intel_tile_height(dev, pixel_format,
2272 fb_format_modifier, 0));
2273 }
2274
2275 static void
2276 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
2277 const struct drm_plane_state *plane_state)
2278 {
2279 struct intel_rotation_info *info = &view->params.rotation_info;
2280 unsigned int tile_height, tile_pitch;
2281
2282 *view = i915_ggtt_view_normal;
2283
2284 if (!plane_state)
2285 return;
2286
2287 if (!intel_rotation_90_or_270(plane_state->rotation))
2288 return;
2289
2290 *view = i915_ggtt_view_rotated;
2291
2292 info->height = fb->height;
2293 info->pixel_format = fb->pixel_format;
2294 info->pitch = fb->pitches[0];
2295 info->uv_offset = fb->offsets[1];
2296 info->fb_modifier = fb->modifier[0];
2297
2298 tile_height = intel_tile_height(fb->dev, fb->pixel_format,
2299 fb->modifier[0], 0);
2300 tile_pitch = PAGE_SIZE / tile_height;
2301 info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
2302 info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
2303 info->size = info->width_pages * info->height_pages * PAGE_SIZE;
2304
2305 if (info->pixel_format == DRM_FORMAT_NV12) {
2306 tile_height = intel_tile_height(fb->dev, fb->pixel_format,
2307 fb->modifier[0], 1);
2308 tile_pitch = PAGE_SIZE / tile_height;
2309 info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
2310 info->height_pages_uv = DIV_ROUND_UP(fb->height / 2,
2311 tile_height);
2312 info->size_uv = info->width_pages_uv * info->height_pages_uv *
2313 PAGE_SIZE;
2314 }
2315 }
2316
2317 static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
2318 {
2319 if (INTEL_INFO(dev_priv)->gen >= 9)
2320 return 256 * 1024;
2321 else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
2322 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2323 return 128 * 1024;
2324 else if (INTEL_INFO(dev_priv)->gen >= 4)
2325 return 4 * 1024;
2326 else
2327 return 0;
2328 }
2329
2330 int
2331 intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2332 struct drm_framebuffer *fb,
2333 const struct drm_plane_state *plane_state)
2334 {
2335 struct drm_device *dev = fb->dev;
2336 struct drm_i915_private *dev_priv = dev->dev_private;
2337 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2338 struct i915_ggtt_view view;
2339 u32 alignment;
2340 int ret;
2341
2342 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2343
2344 switch (fb->modifier[0]) {
2345 case DRM_FORMAT_MOD_NONE:
2346 alignment = intel_linear_alignment(dev_priv);
2347 break;
2348 case I915_FORMAT_MOD_X_TILED:
2349 if (INTEL_INFO(dev)->gen >= 9)
2350 alignment = 256 * 1024;
2351 else {
2352 /* pin() will align the object as required by fence */
2353 alignment = 0;
2354 }
2355 break;
2356 case I915_FORMAT_MOD_Y_TILED:
2357 case I915_FORMAT_MOD_Yf_TILED:
2358 if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
2359 "Y tiling bo slipped through, driver bug!\n"))
2360 return -EINVAL;
2361 alignment = 1 * 1024 * 1024;
2362 break;
2363 default:
2364 MISSING_CASE(fb->modifier[0]);
2365 return -EINVAL;
2366 }
2367
2368 intel_fill_fb_ggtt_view(&view, fb, plane_state);
2369
2370 /* Note that the w/a also requires 64 PTE of padding following the
2371 * bo. We currently fill all unused PTE with the shadow page and so
2372 * we should always have valid PTE following the scanout preventing
2373 * the VT-d warning.
2374 */
2375 if (need_vtd_wa(dev) && alignment < 256 * 1024)
2376 alignment = 256 * 1024;
2377
2378 /*
2379 * Global gtt pte registers are special registers which actually forward
2380 * writes to a chunk of system memory. Which means that there is no risk
2381 * that the register values disappear as soon as we call
2382 * intel_runtime_pm_put(), so it is correct to wrap only the
2383 * pin/unpin/fence and not more.
2384 */
2385 intel_runtime_pm_get(dev_priv);
2386
2387 ret = i915_gem_object_pin_to_display_plane(obj, alignment,
2388 &view);
2389 if (ret)
2390 goto err_pm;
2391
2392 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2393 * fence, whereas 965+ only requires a fence if using
2394 * framebuffer compression. For simplicity, we always install
2395 * a fence as the cost is not that onerous.
2396 */
2397 if (view.type == I915_GGTT_VIEW_NORMAL) {
2398 ret = i915_gem_object_get_fence(obj);
2399 if (ret == -EDEADLK) {
2400 /*
2401 * -EDEADLK means there are no free fences
2402 * no pending flips.
2403 *
2404 * This is propagated to atomic, but it uses
2405 * -EDEADLK to force a locking recovery, so
2406 * change the returned error to -EBUSY.
2407 */
2408 ret = -EBUSY;
2409 goto err_unpin;
2410 } else if (ret)
2411 goto err_unpin;
2412
2413 i915_gem_object_pin_fence(obj);
2414 }
2415
2416 intel_runtime_pm_put(dev_priv);
2417 return 0;
2418
2419 err_unpin:
2420 i915_gem_object_unpin_from_display_plane(obj, &view);
2421 err_pm:
2422 intel_runtime_pm_put(dev_priv);
2423 return ret;
2424 }
2425
2426 static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
2427 const struct drm_plane_state *plane_state)
2428 {
2429 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2430 struct i915_ggtt_view view;
2431
2432 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2433
2434 intel_fill_fb_ggtt_view(&view, fb, plane_state);
2435
2436 if (view.type == I915_GGTT_VIEW_NORMAL)
2437 i915_gem_object_unpin_fence(obj);
2438
2439 i915_gem_object_unpin_from_display_plane(obj, &view);
2440 }
2441
2442 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2443 * is assumed to be a power-of-two. */
2444 unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
2445 int *x, int *y,
2446 unsigned int tiling_mode,
2447 unsigned int cpp,
2448 unsigned int pitch)
2449 {
2450 if (tiling_mode != I915_TILING_NONE) {
2451 unsigned int tile_rows, tiles;
2452
2453 tile_rows = *y / 8;
2454 *y %= 8;
2455
2456 tiles = *x / (512/cpp);
2457 *x %= 512/cpp;
2458
2459 return tile_rows * pitch * 8 + tiles * 4096;
2460 } else {
2461 unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
2462 unsigned int offset;
2463
2464 offset = *y * pitch + *x * cpp;
2465 *y = (offset & alignment) / pitch;
2466 *x = ((offset & alignment) - *y * pitch) / cpp;
2467 return offset & ~alignment;
2468 }
2469 }
2470
2471 static int i9xx_format_to_fourcc(int format)
2472 {
2473 switch (format) {
2474 case DISPPLANE_8BPP:
2475 return DRM_FORMAT_C8;
2476 case DISPPLANE_BGRX555:
2477 return DRM_FORMAT_XRGB1555;
2478 case DISPPLANE_BGRX565:
2479 return DRM_FORMAT_RGB565;
2480 default:
2481 case DISPPLANE_BGRX888:
2482 return DRM_FORMAT_XRGB8888;
2483 case DISPPLANE_RGBX888:
2484 return DRM_FORMAT_XBGR8888;
2485 case DISPPLANE_BGRX101010:
2486 return DRM_FORMAT_XRGB2101010;
2487 case DISPPLANE_RGBX101010:
2488 return DRM_FORMAT_XBGR2101010;
2489 }
2490 }
2491
2492 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2493 {
2494 switch (format) {
2495 case PLANE_CTL_FORMAT_RGB_565:
2496 return DRM_FORMAT_RGB565;
2497 default:
2498 case PLANE_CTL_FORMAT_XRGB_8888:
2499 if (rgb_order) {
2500 if (alpha)
2501 return DRM_FORMAT_ABGR8888;
2502 else
2503 return DRM_FORMAT_XBGR8888;
2504 } else {
2505 if (alpha)
2506 return DRM_FORMAT_ARGB8888;
2507 else
2508 return DRM_FORMAT_XRGB8888;
2509 }
2510 case PLANE_CTL_FORMAT_XRGB_2101010:
2511 if (rgb_order)
2512 return DRM_FORMAT_XBGR2101010;
2513 else
2514 return DRM_FORMAT_XRGB2101010;
2515 }
2516 }
2517
2518 static bool
2519 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2520 struct intel_initial_plane_config *plane_config)
2521 {
2522 struct drm_device *dev = crtc->base.dev;
2523 struct drm_i915_private *dev_priv = to_i915(dev);
2524 struct drm_i915_gem_object *obj = NULL;
2525 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2526 struct drm_framebuffer *fb = &plane_config->fb->base;
2527 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2528 u32 size_aligned = round_up(plane_config->base + plane_config->size,
2529 PAGE_SIZE);
2530
2531 size_aligned -= base_aligned;
2532
2533 if (plane_config->size == 0)
2534 return false;
2535
2536 /* If the FB is too big, just don't use it since fbdev is not very
2537 * important and we should probably use that space with FBC or other
2538 * features. */
2539 if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
2540 return false;
2541
2542 obj = i915_gem_object_create_stolen_for_preallocated(dev,
2543 base_aligned,
2544 base_aligned,
2545 size_aligned);
2546 if (!obj)
2547 return false;
2548
2549 obj->tiling_mode = plane_config->tiling;
2550 if (obj->tiling_mode == I915_TILING_X)
2551 obj->stride = fb->pitches[0];
2552
2553 mode_cmd.pixel_format = fb->pixel_format;
2554 mode_cmd.width = fb->width;
2555 mode_cmd.height = fb->height;
2556 mode_cmd.pitches[0] = fb->pitches[0];
2557 mode_cmd.modifier[0] = fb->modifier[0];
2558 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2559
2560 mutex_lock(&dev->struct_mutex);
2561 if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
2562 &mode_cmd, obj)) {
2563 DRM_DEBUG_KMS("intel fb init failed\n");
2564 goto out_unref_obj;
2565 }
2566 mutex_unlock(&dev->struct_mutex);
2567
2568 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2569 return true;
2570
2571 out_unref_obj:
2572 drm_gem_object_unreference(&obj->base);
2573 mutex_unlock(&dev->struct_mutex);
2574 return false;
2575 }
2576
2577 /* Update plane->state->fb to match plane->fb after driver-internal updates */
2578 static void
2579 update_state_fb(struct drm_plane *plane)
2580 {
2581 if (plane->fb == plane->state->fb)
2582 return;
2583
2584 if (plane->state->fb)
2585 drm_framebuffer_unreference(plane->state->fb);
2586 plane->state->fb = plane->fb;
2587 if (plane->state->fb)
2588 drm_framebuffer_reference(plane->state->fb);
2589 }
2590
2591 static void
2592 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2593 struct intel_initial_plane_config *plane_config)
2594 {
2595 struct drm_device *dev = intel_crtc->base.dev;
2596 struct drm_i915_private *dev_priv = dev->dev_private;
2597 struct drm_crtc *c;
2598 struct intel_crtc *i;
2599 struct drm_i915_gem_object *obj;
2600 struct drm_plane *primary = intel_crtc->base.primary;
2601 struct drm_plane_state *plane_state = primary->state;
2602 struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2603 struct intel_plane *intel_plane = to_intel_plane(primary);
2604 struct drm_framebuffer *fb;
2605
2606 if (!plane_config->fb)
2607 return;
2608
2609 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2610 fb = &plane_config->fb->base;
2611 goto valid_fb;
2612 }
2613
2614 kfree(plane_config->fb);
2615
2616 /*
2617 * Failed to alloc the obj, check to see if we should share
2618 * an fb with another CRTC instead
2619 */
2620 for_each_crtc(dev, c) {
2621 i = to_intel_crtc(c);
2622
2623 if (c == &intel_crtc->base)
2624 continue;
2625
2626 if (!i->active)
2627 continue;
2628
2629 fb = c->primary->fb;
2630 if (!fb)
2631 continue;
2632
2633 obj = intel_fb_obj(fb);
2634 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2635 drm_framebuffer_reference(fb);
2636 goto valid_fb;
2637 }
2638 }
2639
2640 /*
2641 * We've failed to reconstruct the BIOS FB. Current display state
2642 * indicates that the primary plane is visible, but has a NULL FB,
2643 * which will lead to problems later if we don't fix it up. The
2644 * simplest solution is to just disable the primary plane now and
2645 * pretend the BIOS never had it enabled.
2646 */
2647 to_intel_plane_state(plane_state)->visible = false;
2648 crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2649 intel_pre_disable_primary(&intel_crtc->base);
2650 intel_plane->disable_plane(primary, &intel_crtc->base);
2651
2652 return;
2653
2654 valid_fb:
2655 plane_state->src_x = 0;
2656 plane_state->src_y = 0;
2657 plane_state->src_w = fb->width << 16;
2658 plane_state->src_h = fb->height << 16;
2659
2660 plane_state->crtc_x = 0;
2661 plane_state->crtc_y = 0;
2662 plane_state->crtc_w = fb->width;
2663 plane_state->crtc_h = fb->height;
2664
2665 obj = intel_fb_obj(fb);
2666 if (obj->tiling_mode != I915_TILING_NONE)
2667 dev_priv->preserve_bios_swizzle = true;
2668
2669 drm_framebuffer_reference(fb);
2670 primary->fb = primary->state->fb = fb;
2671 primary->crtc = primary->state->crtc = &intel_crtc->base;
2672 intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
2673 obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
2674 }
2675
2676 static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2677 struct drm_framebuffer *fb,
2678 int x, int y)
2679 {
2680 struct drm_device *dev = crtc->dev;
2681 struct drm_i915_private *dev_priv = dev->dev_private;
2682 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2683 struct drm_plane *primary = crtc->primary;
2684 bool visible = to_intel_plane_state(primary->state)->visible;
2685 struct drm_i915_gem_object *obj;
2686 int plane = intel_crtc->plane;
2687 unsigned long linear_offset;
2688 u32 dspcntr;
2689 i915_reg_t reg = DSPCNTR(plane);
2690 int pixel_size;
2691
2692 if (!visible || !fb) {
2693 I915_WRITE(reg, 0);
2694 if (INTEL_INFO(dev)->gen >= 4)
2695 I915_WRITE(DSPSURF(plane), 0);
2696 else
2697 I915_WRITE(DSPADDR(plane), 0);
2698 POSTING_READ(reg);
2699 return;
2700 }
2701
2702 obj = intel_fb_obj(fb);
2703 if (WARN_ON(obj == NULL))
2704 return;
2705
2706 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2707
2708 dspcntr = DISPPLANE_GAMMA_ENABLE;
2709
2710 dspcntr |= DISPLAY_PLANE_ENABLE;
2711
2712 if (INTEL_INFO(dev)->gen < 4) {
2713 if (intel_crtc->pipe == PIPE_B)
2714 dspcntr |= DISPPLANE_SEL_PIPE_B;
2715
2716 /* pipesrc and dspsize control the size that is scaled from,
2717 * which should always be the user's requested size.
2718 */
2719 I915_WRITE(DSPSIZE(plane),
2720 ((intel_crtc->config->pipe_src_h - 1) << 16) |
2721 (intel_crtc->config->pipe_src_w - 1));
2722 I915_WRITE(DSPPOS(plane), 0);
2723 } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2724 I915_WRITE(PRIMSIZE(plane),
2725 ((intel_crtc->config->pipe_src_h - 1) << 16) |
2726 (intel_crtc->config->pipe_src_w - 1));
2727 I915_WRITE(PRIMPOS(plane), 0);
2728 I915_WRITE(PRIMCNSTALPHA(plane), 0);
2729 }
2730
2731 switch (fb->pixel_format) {
2732 case DRM_FORMAT_C8:
2733 dspcntr |= DISPPLANE_8BPP;
2734 break;
2735 case DRM_FORMAT_XRGB1555:
2736 dspcntr |= DISPPLANE_BGRX555;
2737 break;
2738 case DRM_FORMAT_RGB565:
2739 dspcntr |= DISPPLANE_BGRX565;
2740 break;
2741 case DRM_FORMAT_XRGB8888:
2742 dspcntr |= DISPPLANE_BGRX888;
2743 break;
2744 case DRM_FORMAT_XBGR8888:
2745 dspcntr |= DISPPLANE_RGBX888;
2746 break;
2747 case DRM_FORMAT_XRGB2101010:
2748 dspcntr |= DISPPLANE_BGRX101010;
2749 break;
2750 case DRM_FORMAT_XBGR2101010:
2751 dspcntr |= DISPPLANE_RGBX101010;
2752 break;
2753 default:
2754 BUG();
2755 }
2756
2757 if (INTEL_INFO(dev)->gen >= 4 &&
2758 obj->tiling_mode != I915_TILING_NONE)
2759 dspcntr |= DISPPLANE_TILED;
2760
2761 if (IS_G4X(dev))
2762 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2763
2764 linear_offset = y * fb->pitches[0] + x * pixel_size;
2765
2766 if (INTEL_INFO(dev)->gen >= 4) {
2767 intel_crtc->dspaddr_offset =
2768 intel_gen4_compute_page_offset(dev_priv,
2769 &x, &y, obj->tiling_mode,
2770 pixel_size,
2771 fb->pitches[0]);
2772 linear_offset -= intel_crtc->dspaddr_offset;
2773 } else {
2774 intel_crtc->dspaddr_offset = linear_offset;
2775 }
2776
2777 if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
2778 dspcntr |= DISPPLANE_ROTATE_180;
2779
2780 x += (intel_crtc->config->pipe_src_w - 1);
2781 y += (intel_crtc->config->pipe_src_h - 1);
2782
2783 /* Finding the last pixel of the last line of the display
2784 data and adding to linear_offset*/
2785 linear_offset +=
2786 (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2787 (intel_crtc->config->pipe_src_w - 1) * pixel_size;
2788 }
2789
2790 intel_crtc->adjusted_x = x;
2791 intel_crtc->adjusted_y = y;
2792
2793 I915_WRITE(reg, dspcntr);
2794
2795 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2796 if (INTEL_INFO(dev)->gen >= 4) {
2797 I915_WRITE(DSPSURF(plane),
2798 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2799 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2800 I915_WRITE(DSPLINOFF(plane), linear_offset);
2801 } else
2802 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2803 POSTING_READ(reg);
2804 }
2805
2806 static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2807 struct drm_framebuffer *fb,
2808 int x, int y)
2809 {
2810 struct drm_device *dev = crtc->dev;
2811 struct drm_i915_private *dev_priv = dev->dev_private;
2812 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2813 struct drm_plane *primary = crtc->primary;
2814 bool visible = to_intel_plane_state(primary->state)->visible;
2815 struct drm_i915_gem_object *obj;
2816 int plane = intel_crtc->plane;
2817 unsigned long linear_offset;
2818 u32 dspcntr;
2819 i915_reg_t reg = DSPCNTR(plane);
2820 int pixel_size;
2821
2822 if (!visible || !fb) {
2823 I915_WRITE(reg, 0);
2824 I915_WRITE(DSPSURF(plane), 0);
2825 POSTING_READ(reg);
2826 return;
2827 }
2828
2829 obj = intel_fb_obj(fb);
2830 if (WARN_ON(obj == NULL))
2831 return;
2832
2833 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2834
2835 dspcntr = DISPPLANE_GAMMA_ENABLE;
2836
2837 dspcntr |= DISPLAY_PLANE_ENABLE;
2838
2839 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2840 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2841
2842 switch (fb->pixel_format) {
2843 case DRM_FORMAT_C8:
2844 dspcntr |= DISPPLANE_8BPP;
2845 break;
2846 case DRM_FORMAT_RGB565:
2847 dspcntr |= DISPPLANE_BGRX565;
2848 break;
2849 case DRM_FORMAT_XRGB8888:
2850 dspcntr |= DISPPLANE_BGRX888;
2851 break;
2852 case DRM_FORMAT_XBGR8888:
2853 dspcntr |= DISPPLANE_RGBX888;
2854 break;
2855 case DRM_FORMAT_XRGB2101010:
2856 dspcntr |= DISPPLANE_BGRX101010;
2857 break;
2858 case DRM_FORMAT_XBGR2101010:
2859 dspcntr |= DISPPLANE_RGBX101010;
2860 break;
2861 default:
2862 BUG();
2863 }
2864
2865 if (obj->tiling_mode != I915_TILING_NONE)
2866 dspcntr |= DISPPLANE_TILED;
2867
2868 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2869 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2870
2871 linear_offset = y * fb->pitches[0] + x * pixel_size;
2872 intel_crtc->dspaddr_offset =
2873 intel_gen4_compute_page_offset(dev_priv,
2874 &x, &y, obj->tiling_mode,
2875 pixel_size,
2876 fb->pitches[0]);
2877 linear_offset -= intel_crtc->dspaddr_offset;
2878 if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
2879 dspcntr |= DISPPLANE_ROTATE_180;
2880
2881 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2882 x += (intel_crtc->config->pipe_src_w - 1);
2883 y += (intel_crtc->config->pipe_src_h - 1);
2884
2885 /* Finding the last pixel of the last line of the display
2886 data and adding to linear_offset*/
2887 linear_offset +=
2888 (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2889 (intel_crtc->config->pipe_src_w - 1) * pixel_size;
2890 }
2891 }
2892
2893 intel_crtc->adjusted_x = x;
2894 intel_crtc->adjusted_y = y;
2895
2896 I915_WRITE(reg, dspcntr);
2897
2898 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2899 I915_WRITE(DSPSURF(plane),
2900 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2901 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2902 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2903 } else {
2904 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2905 I915_WRITE(DSPLINOFF(plane), linear_offset);
2906 }
2907 POSTING_READ(reg);
2908 }
2909
2910 u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
2911 uint32_t pixel_format)
2912 {
2913 u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
2914
2915 /*
2916 * The stride is either expressed as a multiple of 64 bytes
2917 * chunks for linear buffers or in number of tiles for tiled
2918 * buffers.
2919 */
2920 switch (fb_modifier) {
2921 case DRM_FORMAT_MOD_NONE:
2922 return 64;
2923 case I915_FORMAT_MOD_X_TILED:
2924 if (INTEL_INFO(dev)->gen == 2)
2925 return 128;
2926 return 512;
2927 case I915_FORMAT_MOD_Y_TILED:
2928 /* No need to check for old gens and Y tiling since this is
2929 * about the display engine and those will be blocked before
2930 * we get here.
2931 */
2932 return 128;
2933 case I915_FORMAT_MOD_Yf_TILED:
2934 if (bits_per_pixel == 8)
2935 return 64;
2936 else
2937 return 128;
2938 default:
2939 MISSING_CASE(fb_modifier);
2940 return 64;
2941 }
2942 }
2943
2944 u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2945 struct drm_i915_gem_object *obj,
2946 unsigned int plane)
2947 {
2948 struct i915_ggtt_view view;
2949 struct i915_vma *vma;
2950 u64 offset;
2951
2952 intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
2953 intel_plane->base.state);
2954
2955 vma = i915_gem_obj_to_ggtt_view(obj, &view);
2956 if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
2957 view.type))
2958 return -1;
2959
2960 offset = vma->node.start;
2961
2962 if (plane == 1) {
2963 offset += vma->ggtt_view.params.rotation_info.uv_start_page *
2964 PAGE_SIZE;
2965 }
2966
2967 WARN_ON(upper_32_bits(offset));
2968
2969 return lower_32_bits(offset);
2970 }
2971
2972 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2973 {
2974 struct drm_device *dev = intel_crtc->base.dev;
2975 struct drm_i915_private *dev_priv = dev->dev_private;
2976
2977 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2978 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2979 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
2980 }
2981
2982 /*
2983 * This function detaches (aka. unbinds) unused scalers in hardware
2984 */
2985 static void skl_detach_scalers(struct intel_crtc *intel_crtc)
2986 {
2987 struct intel_crtc_scaler_state *scaler_state;
2988 int i;
2989
2990 scaler_state = &intel_crtc->config->scaler_state;
2991
2992 /* loop through and disable scalers that aren't in use */
2993 for (i = 0; i < intel_crtc->num_scalers; i++) {
2994 if (!scaler_state->scalers[i].in_use)
2995 skl_detach_scaler(intel_crtc, i);
2996 }
2997 }
2998
2999 u32 skl_plane_ctl_format(uint32_t pixel_format)
3000 {
3001 switch (pixel_format) {
3002 case DRM_FORMAT_C8:
3003 return PLANE_CTL_FORMAT_INDEXED;
3004 case DRM_FORMAT_RGB565:
3005 return PLANE_CTL_FORMAT_RGB_565;
3006 case DRM_FORMAT_XBGR8888:
3007 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
3008 case DRM_FORMAT_XRGB8888:
3009 return PLANE_CTL_FORMAT_XRGB_8888;
3010 /*
3011 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
3012 * to be already pre-multiplied. We need to add a knob (or a different
3013 * DRM_FORMAT) for user-space to configure that.
3014 */
3015 case DRM_FORMAT_ABGR8888:
3016 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
3017 PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3018 case DRM_FORMAT_ARGB8888:
3019 return PLANE_CTL_FORMAT_XRGB_8888 |
3020 PLANE_CTL_ALPHA_SW_PREMULTIPLY;
3021 case DRM_FORMAT_XRGB2101010:
3022 return PLANE_CTL_FORMAT_XRGB_2101010;
3023 case DRM_FORMAT_XBGR2101010:
3024 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
3025 case DRM_FORMAT_YUYV:
3026 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
3027 case DRM_FORMAT_YVYU:
3028 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
3029 case DRM_FORMAT_UYVY:
3030 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
3031 case DRM_FORMAT_VYUY:
3032 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
3033 default:
3034 MISSING_CASE(pixel_format);
3035 }
3036
3037 return 0;
3038 }
3039
3040 u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3041 {
3042 switch (fb_modifier) {
3043 case DRM_FORMAT_MOD_NONE:
3044 break;
3045 case I915_FORMAT_MOD_X_TILED:
3046 return PLANE_CTL_TILED_X;
3047 case I915_FORMAT_MOD_Y_TILED:
3048 return PLANE_CTL_TILED_Y;
3049 case I915_FORMAT_MOD_Yf_TILED:
3050 return PLANE_CTL_TILED_YF;
3051 default:
3052 MISSING_CASE(fb_modifier);
3053 }
3054
3055 return 0;
3056 }
3057
3058 u32 skl_plane_ctl_rotation(unsigned int rotation)
3059 {
3060 switch (rotation) {
3061 case BIT(DRM_ROTATE_0):
3062 break;
3063 /*
3064 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
3065 * while i915 HW rotation is clockwise, thats why this swapping.
3066 */
3067 case BIT(DRM_ROTATE_90):
3068 return PLANE_CTL_ROTATE_270;
3069 case BIT(DRM_ROTATE_180):
3070 return PLANE_CTL_ROTATE_180;
3071 case BIT(DRM_ROTATE_270):
3072 return PLANE_CTL_ROTATE_90;
3073 default:
3074 MISSING_CASE(rotation);
3075 }
3076
3077 return 0;
3078 }
3079
3080 static void skylake_update_primary_plane(struct drm_crtc *crtc,
3081 struct drm_framebuffer *fb,
3082 int x, int y)
3083 {
3084 struct drm_device *dev = crtc->dev;
3085 struct drm_i915_private *dev_priv = dev->dev_private;
3086 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3087 struct drm_plane *plane = crtc->primary;
3088 bool visible = to_intel_plane_state(plane->state)->visible;
3089 struct drm_i915_gem_object *obj;
3090 int pipe = intel_crtc->pipe;
3091 u32 plane_ctl, stride_div, stride;
3092 u32 tile_height, plane_offset, plane_size;
3093 unsigned int rotation;
3094 int x_offset, y_offset;
3095 u32 surf_addr;
3096 struct intel_crtc_state *crtc_state = intel_crtc->config;
3097 struct intel_plane_state *plane_state;
3098 int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
3099 int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0;
3100 int scaler_id = -1;
3101
3102 plane_state = to_intel_plane_state(plane->state);
3103
3104 if (!visible || !fb) {
3105 I915_WRITE(PLANE_CTL(pipe, 0), 0);
3106 I915_WRITE(PLANE_SURF(pipe, 0), 0);
3107 POSTING_READ(PLANE_CTL(pipe, 0));
3108 return;
3109 }
3110
3111 plane_ctl = PLANE_CTL_ENABLE |
3112 PLANE_CTL_PIPE_GAMMA_ENABLE |
3113 PLANE_CTL_PIPE_CSC_ENABLE;
3114
3115 plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3116 plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3117 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3118
3119 rotation = plane->state->rotation;
3120 plane_ctl |= skl_plane_ctl_rotation(rotation);
3121
3122 obj = intel_fb_obj(fb);
3123 stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
3124 fb->pixel_format);
3125 surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3126
3127 WARN_ON(drm_rect_width(&plane_state->src) == 0);
3128
3129 scaler_id = plane_state->scaler_id;
3130 src_x = plane_state->src.x1 >> 16;
3131 src_y = plane_state->src.y1 >> 16;
3132 src_w = drm_rect_width(&plane_state->src) >> 16;
3133 src_h = drm_rect_height(&plane_state->src) >> 16;
3134 dst_x = plane_state->dst.x1;
3135 dst_y = plane_state->dst.y1;
3136 dst_w = drm_rect_width(&plane_state->dst);
3137 dst_h = drm_rect_height(&plane_state->dst);
3138
3139 WARN_ON(x != src_x || y != src_y);
3140
3141 if (intel_rotation_90_or_270(rotation)) {
3142 /* stride = Surface height in tiles */
3143 tile_height = intel_tile_height(dev, fb->pixel_format,
3144 fb->modifier[0], 0);
3145 stride = DIV_ROUND_UP(fb->height, tile_height);
3146 x_offset = stride * tile_height - y - src_h;
3147 y_offset = x;
3148 plane_size = (src_w - 1) << 16 | (src_h - 1);
3149 } else {
3150 stride = fb->pitches[0] / stride_div;
3151 x_offset = x;
3152 y_offset = y;
3153 plane_size = (src_h - 1) << 16 | (src_w - 1);
3154 }
3155 plane_offset = y_offset << 16 | x_offset;
3156
3157 intel_crtc->adjusted_x = x_offset;
3158 intel_crtc->adjusted_y = y_offset;
3159
3160 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3161 I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3162 I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3163 I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
3164
3165 if (scaler_id >= 0) {
3166 uint32_t ps_ctrl = 0;
3167
3168 WARN_ON(!dst_w || !dst_h);
3169 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3170 crtc_state->scaler_state.scalers[scaler_id].mode;
3171 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3172 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3173 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3174 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3175 I915_WRITE(PLANE_POS(pipe, 0), 0);
3176 } else {
3177 I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3178 }
3179
3180 I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
3181
3182 POSTING_READ(PLANE_SURF(pipe, 0));
3183 }
3184
3185 /* Assume fb object is pinned & idle & fenced and just update base pointers */
3186 static int
3187 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3188 int x, int y, enum mode_set_atomic state)
3189 {
3190 struct drm_device *dev = crtc->dev;
3191 struct drm_i915_private *dev_priv = dev->dev_private;
3192
3193 if (dev_priv->fbc.deactivate)
3194 dev_priv->fbc.deactivate(dev_priv);
3195
3196 dev_priv->display.update_primary_plane(crtc, fb, x, y);
3197
3198 return 0;
3199 }
3200
3201 static void intel_complete_page_flips(struct drm_device *dev)
3202 {
3203 struct drm_crtc *crtc;
3204
3205 for_each_crtc(dev, crtc) {
3206 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3207 enum plane plane = intel_crtc->plane;
3208
3209 intel_prepare_page_flip(dev, plane);
3210 intel_finish_page_flip_plane(dev, plane);
3211 }
3212 }
3213
3214 static void intel_update_primary_planes(struct drm_device *dev)
3215 {
3216 struct drm_crtc *crtc;
3217
3218 for_each_crtc(dev, crtc) {
3219 struct intel_plane *plane = to_intel_plane(crtc->primary);
3220 struct intel_plane_state *plane_state;
3221
3222 drm_modeset_lock_crtc(crtc, &plane->base);
3223 plane_state = to_intel_plane_state(plane->base.state);
3224
3225 if (crtc->state->active && plane_state->base.fb)
3226 plane->commit_plane(&plane->base, plane_state);
3227
3228 drm_modeset_unlock_crtc(crtc);
3229 }
3230 }
3231
3232 void intel_prepare_reset(struct drm_device *dev)
3233 {
3234 /* no reset support for gen2 */
3235 if (IS_GEN2(dev))
3236 return;
3237
3238 /* reset doesn't touch the display */
3239 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
3240 return;
3241
3242 drm_modeset_lock_all(dev);
3243 /*
3244 * Disabling the crtcs gracefully seems nicer. Also the
3245 * g33 docs say we should at least disable all the planes.
3246 */
3247 intel_display_suspend(dev);
3248 }
3249
3250 void intel_finish_reset(struct drm_device *dev)
3251 {
3252 struct drm_i915_private *dev_priv = to_i915(dev);
3253
3254 /*
3255 * Flips in the rings will be nuked by the reset,
3256 * so complete all pending flips so that user space
3257 * will get its events and not get stuck.
3258 */
3259 intel_complete_page_flips(dev);
3260
3261 /* no reset support for gen2 */
3262 if (IS_GEN2(dev))
3263 return;
3264
3265 /* reset doesn't touch the display */
3266 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
3267 /*
3268 * Flips in the rings have been nuked by the reset,
3269 * so update the base address of all primary
3270 * planes to the the last fb to make sure we're
3271 * showing the correct fb after a reset.
3272 *
3273 * FIXME: Atomic will make this obsolete since we won't schedule
3274 * CS-based flips (which might get lost in gpu resets) any more.
3275 */
3276 intel_update_primary_planes(dev);
3277 return;
3278 }
3279
3280 /*
3281 * The display has been reset as well,
3282 * so need a full re-initialization.
3283 */
3284 intel_runtime_pm_disable_interrupts(dev_priv);
3285 intel_runtime_pm_enable_interrupts(dev_priv);
3286
3287 intel_modeset_init_hw(dev);
3288
3289 spin_lock_irq(&dev_priv->irq_lock);
3290 if (dev_priv->display.hpd_irq_setup)
3291 dev_priv->display.hpd_irq_setup(dev);
3292 spin_unlock_irq(&dev_priv->irq_lock);
3293
3294 intel_display_resume(dev);
3295
3296 intel_hpd_init(dev_priv);
3297
3298 drm_modeset_unlock_all(dev);
3299 }
3300
3301 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3302 {
3303 struct drm_device *dev = crtc->dev;
3304 struct drm_i915_private *dev_priv = dev->dev_private;
3305 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3306 bool pending;
3307
3308 if (i915_reset_in_progress(&dev_priv->gpu_error) ||
3309 intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
3310 return false;
3311
3312 spin_lock_irq(&dev->event_lock);
3313 pending = to_intel_crtc(crtc)->unpin_work != NULL;
3314 spin_unlock_irq(&dev->event_lock);
3315
3316 return pending;
3317 }
3318
3319 static void intel_update_pipe_config(struct intel_crtc *crtc,
3320 struct intel_crtc_state *old_crtc_state)
3321 {
3322 struct drm_device *dev = crtc->base.dev;
3323 struct drm_i915_private *dev_priv = dev->dev_private;
3324 struct intel_crtc_state *pipe_config =
3325 to_intel_crtc_state(crtc->base.state);
3326
3327 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3328 crtc->base.mode = crtc->base.state->mode;
3329
3330 DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3331 old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3332 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3333
3334 if (HAS_DDI(dev))
3335 intel_set_pipe_csc(&crtc->base);
3336
3337 /*
3338 * Update pipe size and adjust fitter if needed: the reason for this is
3339 * that in compute_mode_changes we check the native mode (not the pfit
3340 * mode) to see if we can flip rather than do a full mode set. In the
3341 * fastboot case, we'll flip, but if we don't update the pipesrc and
3342 * pfit state, we'll end up with a big fb scanned out into the wrong
3343 * sized surface.
3344 */
3345
3346 I915_WRITE(PIPESRC(crtc->pipe),
3347 ((pipe_config->pipe_src_w - 1) << 16) |
3348 (pipe_config->pipe_src_h - 1));
3349
3350 /* on skylake this is done by detaching scalers */
3351 if (INTEL_INFO(dev)->gen >= 9) {
3352 skl_detach_scalers(crtc);
3353
3354 if (pipe_config->pch_pfit.enabled)
3355 skylake_pfit_enable(crtc);
3356 } else if (HAS_PCH_SPLIT(dev)) {
3357 if (pipe_config->pch_pfit.enabled)
3358 ironlake_pfit_enable(crtc);
3359 else if (old_crtc_state->pch_pfit.enabled)
3360 ironlake_pfit_disable(crtc, true);
3361 }
3362 }
3363
3364 static void intel_fdi_normal_train(struct drm_crtc *crtc)
3365 {
3366 struct drm_device *dev = crtc->dev;
3367 struct drm_i915_private *dev_priv = dev->dev_private;
3368 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3369 int pipe = intel_crtc->pipe;
3370 i915_reg_t reg;
3371 u32 temp;
3372
3373 /* enable normal train */
3374 reg = FDI_TX_CTL(pipe);
3375 temp = I915_READ(reg);
3376 if (IS_IVYBRIDGE(dev)) {
3377 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3378 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3379 } else {
3380 temp &= ~FDI_LINK_TRAIN_NONE;
3381 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3382 }
3383 I915_WRITE(reg, temp);
3384
3385 reg = FDI_RX_CTL(pipe);
3386 temp = I915_READ(reg);
3387 if (HAS_PCH_CPT(dev)) {
3388 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3389 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3390 } else {
3391 temp &= ~FDI_LINK_TRAIN_NONE;
3392 temp |= FDI_LINK_TRAIN_NONE;
3393 }
3394 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3395
3396 /* wait one idle pattern time */
3397 POSTING_READ(reg);
3398 udelay(1000);
3399
3400 /* IVB wants error correction enabled */
3401 if (IS_IVYBRIDGE(dev))
3402 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3403 FDI_FE_ERRC_ENABLE);
3404 }
3405
3406 /* The FDI link training functions for ILK/Ibexpeak. */
3407 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3408 {
3409 struct drm_device *dev = crtc->dev;
3410 struct drm_i915_private *dev_priv = dev->dev_private;
3411 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3412 int pipe = intel_crtc->pipe;
3413 i915_reg_t reg;
3414 u32 temp, tries;
3415
3416 /* FDI needs bits from pipe first */
3417 assert_pipe_enabled(dev_priv, pipe);
3418
3419 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3420 for train result */
3421 reg = FDI_RX_IMR(pipe);
3422 temp = I915_READ(reg);
3423 temp &= ~FDI_RX_SYMBOL_LOCK;
3424 temp &= ~FDI_RX_BIT_LOCK;
3425 I915_WRITE(reg, temp);
3426 I915_READ(reg);
3427 udelay(150);
3428
3429 /* enable CPU FDI TX and PCH FDI RX */
3430 reg = FDI_TX_CTL(pipe);
3431 temp = I915_READ(reg);
3432 temp &= ~FDI_DP_PORT_WIDTH_MASK;
3433 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3434 temp &= ~FDI_LINK_TRAIN_NONE;
3435 temp |= FDI_LINK_TRAIN_PATTERN_1;
3436 I915_WRITE(reg, temp | FDI_TX_ENABLE);
3437
3438 reg = FDI_RX_CTL(pipe);
3439 temp = I915_READ(reg);
3440 temp &= ~FDI_LINK_TRAIN_NONE;
3441 temp |= FDI_LINK_TRAIN_PATTERN_1;
3442 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3443
3444 POSTING_READ(reg);
3445 udelay(150);
3446
3447 /* Ironlake workaround, enable clock pointer after FDI enable*/
3448 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3449 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3450 FDI_RX_PHASE_SYNC_POINTER_EN);
3451
3452 reg = FDI_RX_IIR(pipe);
3453 for (tries = 0; tries < 5; tries++) {
3454 temp = I915_READ(reg);
3455 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3456
3457 if ((temp & FDI_RX_BIT_LOCK)) {
3458 DRM_DEBUG_KMS("FDI train 1 done.\n");
3459 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3460 break;
3461 }
3462 }
3463 if (tries == 5)
3464 DRM_ERROR("FDI train 1 fail!\n");
3465
3466 /* Train 2 */
3467 reg = FDI_TX_CTL(pipe);
3468 temp = I915_READ(reg);
3469 temp &= ~FDI_LINK_TRAIN_NONE;
3470 temp |= FDI_LINK_TRAIN_PATTERN_2;
3471 I915_WRITE(reg, temp);
3472
3473 reg = FDI_RX_CTL(pipe);
3474 temp = I915_READ(reg);
3475 temp &= ~FDI_LINK_TRAIN_NONE;
3476 temp |= FDI_LINK_TRAIN_PATTERN_2;
3477 I915_WRITE(reg, temp);
3478
3479 POSTING_READ(reg);
3480 udelay(150);
3481
3482 reg = FDI_RX_IIR(pipe);
3483 for (tries = 0; tries < 5; tries++) {
3484 temp = I915_READ(reg);
3485 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3486
3487 if (temp & FDI_RX_SYMBOL_LOCK) {
3488 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3489 DRM_DEBUG_KMS("FDI train 2 done.\n");
3490 break;
3491 }
3492 }
3493 if (tries == 5)
3494 DRM_ERROR("FDI train 2 fail!\n");
3495
3496 DRM_DEBUG_KMS("FDI train done\n");
3497
3498 }
3499
3500 static const int snb_b_fdi_train_param[] = {
3501 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3502 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3503 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3504 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3505 };
3506
3507 /* The FDI link training functions for SNB/Cougarpoint. */
3508 static void gen6_fdi_link_train(struct drm_crtc *crtc)
3509 {
3510 struct drm_device *dev = crtc->dev;
3511 struct drm_i915_private *dev_priv = dev->dev_private;
3512 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3513 int pipe = intel_crtc->pipe;
3514 i915_reg_t reg;
3515 u32 temp, i, retry;
3516
3517 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3518 for train result */
3519 reg = FDI_RX_IMR(pipe);
3520 temp = I915_READ(reg);
3521 temp &= ~FDI_RX_SYMBOL_LOCK;
3522 temp &= ~FDI_RX_BIT_LOCK;
3523 I915_WRITE(reg, temp);
3524
3525 POSTING_READ(reg);
3526 udelay(150);
3527
3528 /* enable CPU FDI TX and PCH FDI RX */
3529 reg = FDI_TX_CTL(pipe);
3530 temp = I915_READ(reg);
3531 temp &= ~FDI_DP_PORT_WIDTH_MASK;
3532 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3533 temp &= ~FDI_LINK_TRAIN_NONE;
3534 temp |= FDI_LINK_TRAIN_PATTERN_1;
3535 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3536 /* SNB-B */
3537 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3538 I915_WRITE(reg, temp | FDI_TX_ENABLE);
3539
3540 I915_WRITE(FDI_RX_MISC(pipe),
3541 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3542
3543 reg = FDI_RX_CTL(pipe);
3544 temp = I915_READ(reg);
3545 if (HAS_PCH_CPT(dev)) {
3546 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3547 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3548 } else {
3549 temp &= ~FDI_LINK_TRAIN_NONE;
3550 temp |= FDI_LINK_TRAIN_PATTERN_1;
3551 }
3552 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3553
3554 POSTING_READ(reg);
3555 udelay(150);
3556
3557 for (i = 0; i < 4; i++) {
3558 reg = FDI_TX_CTL(pipe);
3559 temp = I915_READ(reg);
3560 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3561 temp |= snb_b_fdi_train_param[i];
3562 I915_WRITE(reg, temp);
3563
3564 POSTING_READ(reg);
3565 udelay(500);
3566
3567 for (retry = 0; retry < 5; retry++) {
3568 reg = FDI_RX_IIR(pipe);
3569 temp = I915_READ(reg);
3570 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3571 if (temp & FDI_RX_BIT_LOCK) {
3572 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3573 DRM_DEBUG_KMS("FDI train 1 done.\n");
3574 break;
3575 }
3576 udelay(50);
3577 }
3578 if (retry < 5)
3579 break;
3580 }
3581 if (i == 4)
3582 DRM_ERROR("FDI train 1 fail!\n");
3583
3584 /* Train 2 */
3585 reg = FDI_TX_CTL(pipe);
3586 temp = I915_READ(reg);
3587 temp &= ~FDI_LINK_TRAIN_NONE;
3588 temp |= FDI_LINK_TRAIN_PATTERN_2;
3589 if (IS_GEN6(dev)) {
3590 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3591 /* SNB-B */
3592 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3593 }
3594 I915_WRITE(reg, temp);
3595
3596 reg = FDI_RX_CTL(pipe);
3597 temp = I915_READ(reg);
3598 if (HAS_PCH_CPT(dev)) {
3599 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3600 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3601 } else {
3602 temp &= ~FDI_LINK_TRAIN_NONE;
3603 temp |= FDI_LINK_TRAIN_PATTERN_2;
3604 }
3605 I915_WRITE(reg, temp);
3606
3607 POSTING_READ(reg);
3608 udelay(150);
3609
3610 for (i = 0; i < 4; i++) {
3611 reg = FDI_TX_CTL(pipe);
3612 temp = I915_READ(reg);
3613 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3614 temp |= snb_b_fdi_train_param[i];
3615 I915_WRITE(reg, temp);
3616
3617 POSTING_READ(reg);
3618 udelay(500);
3619
3620 for (retry = 0; retry < 5; retry++) {
3621 reg = FDI_RX_IIR(pipe);
3622 temp = I915_READ(reg);
3623 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3624 if (temp & FDI_RX_SYMBOL_LOCK) {
3625 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3626 DRM_DEBUG_KMS("FDI train 2 done.\n");
3627 break;
3628 }
3629 udelay(50);
3630 }
3631 if (retry < 5)
3632 break;
3633 }
3634 if (i == 4)
3635 DRM_ERROR("FDI train 2 fail!\n");
3636
3637 DRM_DEBUG_KMS("FDI train done.\n");
3638 }
3639
3640 /* Manual link training for Ivy Bridge A0 parts */
3641 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3642 {
3643 struct drm_device *dev = crtc->dev;
3644 struct drm_i915_private *dev_priv = dev->dev_private;
3645 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3646 int pipe = intel_crtc->pipe;
3647 i915_reg_t reg;
3648 u32 temp, i, j;
3649
3650 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3651 for train result */
3652 reg = FDI_RX_IMR(pipe);
3653 temp = I915_READ(reg);
3654 temp &= ~FDI_RX_SYMBOL_LOCK;
3655 temp &= ~FDI_RX_BIT_LOCK;
3656 I915_WRITE(reg, temp);
3657
3658 POSTING_READ(reg);
3659 udelay(150);
3660
3661 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3662 I915_READ(FDI_RX_IIR(pipe)));
3663
3664 /* Try each vswing and preemphasis setting twice before moving on */
3665 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3666 /* disable first in case we need to retry */
3667 reg = FDI_TX_CTL(pipe);
3668 temp = I915_READ(reg);
3669 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3670 temp &= ~FDI_TX_ENABLE;
3671 I915_WRITE(reg, temp);
3672
3673 reg = FDI_RX_CTL(pipe);
3674 temp = I915_READ(reg);
3675 temp &= ~FDI_LINK_TRAIN_AUTO;
3676 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3677 temp &= ~FDI_RX_ENABLE;
3678 I915_WRITE(reg, temp);
3679
3680 /* enable CPU FDI TX and PCH FDI RX */
3681 reg = FDI_TX_CTL(pipe);
3682 temp = I915_READ(reg);
3683 temp &= ~FDI_DP_PORT_WIDTH_MASK;
3684 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3685 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3686 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3687 temp |= snb_b_fdi_train_param[j/2];
3688 temp |= FDI_COMPOSITE_SYNC;
3689 I915_WRITE(reg, temp | FDI_TX_ENABLE);
3690
3691 I915_WRITE(FDI_RX_MISC(pipe),
3692 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3693
3694 reg = FDI_RX_CTL(pipe);
3695 temp = I915_READ(reg);
3696 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3697 temp |= FDI_COMPOSITE_SYNC;
3698 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3699
3700 POSTING_READ(reg);
3701 udelay(1); /* should be 0.5us */
3702
3703 for (i = 0; i < 4; i++) {
3704 reg = FDI_RX_IIR(pipe);
3705 temp = I915_READ(reg);
3706 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3707
3708 if (temp & FDI_RX_BIT_LOCK ||
3709 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3710 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3711 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3712 i);
3713 break;
3714 }
3715 udelay(1); /* should be 0.5us */
3716 }
3717 if (i == 4) {
3718 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3719 continue;
3720 }
3721
3722 /* Train 2 */
3723 reg = FDI_TX_CTL(pipe);
3724 temp = I915_READ(reg);
3725 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3726 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3727 I915_WRITE(reg, temp);
3728
3729 reg = FDI_RX_CTL(pipe);
3730 temp = I915_READ(reg);
3731 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3732 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3733 I915_WRITE(reg, temp);
3734
3735 POSTING_READ(reg);
3736 udelay(2); /* should be 1.5us */
3737
3738 for (i = 0; i < 4; i++) {
3739 reg = FDI_RX_IIR(pipe);
3740 temp = I915_READ(reg);
3741 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3742
3743 if (temp & FDI_RX_SYMBOL_LOCK ||
3744 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3745 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3746 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3747 i);
3748 goto train_done;
3749 }
3750 udelay(2); /* should be 1.5us */
3751 }
3752 if (i == 4)
3753 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3754 }
3755
3756 train_done:
3757 DRM_DEBUG_KMS("FDI train done.\n");
3758 }
3759
3760 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3761 {
3762 struct drm_device *dev = intel_crtc->base.dev;
3763 struct drm_i915_private *dev_priv = dev->dev_private;
3764 int pipe = intel_crtc->pipe;
3765 i915_reg_t reg;
3766 u32 temp;
3767
3768 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3769 reg = FDI_RX_CTL(pipe);
3770 temp = I915_READ(reg);
3771 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3772 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3773 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3774 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3775
3776 POSTING_READ(reg);
3777 udelay(200);
3778
3779 /* Switch from Rawclk to PCDclk */
3780 temp = I915_READ(reg);
3781 I915_WRITE(reg, temp | FDI_PCDCLK);
3782
3783 POSTING_READ(reg);
3784 udelay(200);
3785
3786 /* Enable CPU FDI TX PLL, always on for Ironlake */
3787 reg = FDI_TX_CTL(pipe);
3788 temp = I915_READ(reg);
3789 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3790 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3791
3792 POSTING_READ(reg);
3793 udelay(100);
3794 }
3795 }
3796
3797 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3798 {
3799 struct drm_device *dev = intel_crtc->base.dev;
3800 struct drm_i915_private *dev_priv = dev->dev_private;
3801 int pipe = intel_crtc->pipe;
3802 i915_reg_t reg;
3803 u32 temp;
3804
3805 /* Switch from PCDclk to Rawclk */
3806 reg = FDI_RX_CTL(pipe);
3807 temp = I915_READ(reg);
3808 I915_WRITE(reg, temp & ~FDI_PCDCLK);
3809
3810 /* Disable CPU FDI TX PLL */
3811 reg = FDI_TX_CTL(pipe);
3812 temp = I915_READ(reg);
3813 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3814
3815 POSTING_READ(reg);
3816 udelay(100);
3817
3818 reg = FDI_RX_CTL(pipe);
3819 temp = I915_READ(reg);
3820 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3821
3822 /* Wait for the clocks to turn off. */
3823 POSTING_READ(reg);
3824 udelay(100);
3825 }
3826
3827 static void ironlake_fdi_disable(struct drm_crtc *crtc)
3828 {
3829 struct drm_device *dev = crtc->dev;
3830 struct drm_i915_private *dev_priv = dev->dev_private;
3831 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3832 int pipe = intel_crtc->pipe;
3833 i915_reg_t reg;
3834 u32 temp;
3835
3836 /* disable CPU FDI tx and PCH FDI rx */
3837 reg = FDI_TX_CTL(pipe);
3838 temp = I915_READ(reg);
3839 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3840 POSTING_READ(reg);
3841
3842 reg = FDI_RX_CTL(pipe);
3843 temp = I915_READ(reg);
3844 temp &= ~(0x7 << 16);
3845 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3846 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3847
3848 POSTING_READ(reg);
3849 udelay(100);
3850
3851 /* Ironlake workaround, disable clock pointer after downing FDI */
3852 if (HAS_PCH_IBX(dev))
3853 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3854
3855 /* still set train pattern 1 */
3856 reg = FDI_TX_CTL(pipe);
3857 temp = I915_READ(reg);
3858 temp &= ~FDI_LINK_TRAIN_NONE;
3859 temp |= FDI_LINK_TRAIN_PATTERN_1;
3860 I915_WRITE(reg, temp);
3861
3862 reg = FDI_RX_CTL(pipe);
3863 temp = I915_READ(reg);
3864 if (HAS_PCH_CPT(dev)) {
3865 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3866 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3867 } else {
3868 temp &= ~FDI_LINK_TRAIN_NONE;
3869 temp |= FDI_LINK_TRAIN_PATTERN_1;
3870 }
3871 /* BPC in FDI rx is consistent with that in PIPECONF */
3872 temp &= ~(0x07 << 16);
3873 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3874 I915_WRITE(reg, temp);
3875
3876 POSTING_READ(reg);
3877 udelay(100);
3878 }
3879
3880 bool intel_has_pending_fb_unpin(struct drm_device *dev)
3881 {
3882 struct intel_crtc *crtc;
3883
3884 /* Note that we don't need to be called with mode_config.lock here
3885 * as our list of CRTC objects is static for the lifetime of the
3886 * device and so cannot disappear as we iterate. Similarly, we can
3887 * happily treat the predicates as racy, atomic checks as userspace
3888 * cannot claim and pin a new fb without at least acquring the
3889 * struct_mutex and so serialising with us.
3890 */
3891 for_each_intel_crtc(dev, crtc) {
3892 if (atomic_read(&crtc->unpin_work_count) == 0)
3893 continue;
3894
3895 if (crtc->unpin_work)
3896 intel_wait_for_vblank(dev, crtc->pipe);
3897
3898 return true;
3899 }
3900
3901 return false;
3902 }
3903
3904 static void page_flip_completed(struct intel_crtc *intel_crtc)
3905 {
3906 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3907 struct intel_unpin_work *work = intel_crtc->unpin_work;
3908
3909 /* ensure that the unpin work is consistent wrt ->pending. */
3910 smp_rmb();
3911 intel_crtc->unpin_work = NULL;
3912
3913 if (work->event)
3914 drm_send_vblank_event(intel_crtc->base.dev,
3915 intel_crtc->pipe,
3916 work->event);
3917
3918 drm_crtc_vblank_put(&intel_crtc->base);
3919
3920 wake_up_all(&dev_priv->pending_flip_queue);
3921 queue_work(dev_priv->wq, &work->work);
3922
3923 trace_i915_flip_complete(intel_crtc->plane,
3924 work->pending_flip_obj);
3925 }
3926
3927 static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3928 {
3929 struct drm_device *dev = crtc->dev;
3930 struct drm_i915_private *dev_priv = dev->dev_private;
3931 long ret;
3932
3933 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3934
3935 ret = wait_event_interruptible_timeout(
3936 dev_priv->pending_flip_queue,
3937 !intel_crtc_has_pending_flip(crtc),
3938 60*HZ);
3939
3940 if (ret < 0)
3941 return ret;
3942
3943 if (ret == 0) {
3944 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3945
3946 spin_lock_irq(&dev->event_lock);
3947 if (intel_crtc->unpin_work) {
3948 WARN_ONCE(1, "Removing stuck page flip\n");
3949 page_flip_completed(intel_crtc);
3950 }
3951 spin_unlock_irq(&dev->event_lock);
3952 }
3953
3954 return 0;
3955 }
3956
3957 static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
3958 {
3959 u32 temp;
3960
3961 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3962
3963 mutex_lock(&dev_priv->sb_lock);
3964
3965 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3966 temp |= SBI_SSCCTL_DISABLE;
3967 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3968
3969 mutex_unlock(&dev_priv->sb_lock);
3970 }
3971
3972 /* Program iCLKIP clock to the desired frequency */
3973 static void lpt_program_iclkip(struct drm_crtc *crtc)
3974 {
3975 struct drm_device *dev = crtc->dev;
3976 struct drm_i915_private *dev_priv = dev->dev_private;
3977 int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
3978 u32 divsel, phaseinc, auxdiv, phasedir = 0;
3979 u32 temp;
3980
3981 lpt_disable_iclkip(dev_priv);
3982
3983 /* 20MHz is a corner case which is out of range for the 7-bit divisor */
3984 if (clock == 20000) {
3985 auxdiv = 1;
3986 divsel = 0x41;
3987 phaseinc = 0x20;
3988 } else {
3989 /* The iCLK virtual clock root frequency is in MHz,
3990 * but the adjusted_mode->crtc_clock in in KHz. To get the
3991 * divisors, it is necessary to divide one by another, so we
3992 * convert the virtual clock precision to KHz here for higher
3993 * precision.
3994 */
3995 u32 iclk_virtual_root_freq = 172800 * 1000;
3996 u32 iclk_pi_range = 64;
3997 u32 desired_divisor, msb_divisor_value, pi_value;
3998
3999 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, clock);
4000 msb_divisor_value = desired_divisor / iclk_pi_range;
4001 pi_value = desired_divisor % iclk_pi_range;
4002
4003 auxdiv = 0;
4004 divsel = msb_divisor_value - 2;
4005 phaseinc = pi_value;
4006 }
4007
4008 /* This should not happen with any sane values */
4009 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4010 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4011 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4012 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4013
4014 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
4015 clock,
4016 auxdiv,
4017 divsel,
4018 phasedir,
4019 phaseinc);
4020
4021 mutex_lock(&dev_priv->sb_lock);
4022
4023 /* Program SSCDIVINTPHASE6 */
4024 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
4025 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
4026 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
4027 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
4028 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
4029 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
4030 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
4031 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
4032
4033 /* Program SSCAUXDIV */
4034 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
4035 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4036 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
4037 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
4038
4039 /* Enable modulator and associated divider */
4040 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4041 temp &= ~SBI_SSCCTL_DISABLE;
4042 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4043
4044 mutex_unlock(&dev_priv->sb_lock);
4045
4046 /* Wait for initialization time */
4047 udelay(24);
4048
4049 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
4050 }
4051
4052 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4053 enum pipe pch_transcoder)
4054 {
4055 struct drm_device *dev = crtc->base.dev;
4056 struct drm_i915_private *dev_priv = dev->dev_private;
4057 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
4058
4059 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4060 I915_READ(HTOTAL(cpu_transcoder)));
4061 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4062 I915_READ(HBLANK(cpu_transcoder)));
4063 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4064 I915_READ(HSYNC(cpu_transcoder)));
4065
4066 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4067 I915_READ(VTOTAL(cpu_transcoder)));
4068 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4069 I915_READ(VBLANK(cpu_transcoder)));
4070 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4071 I915_READ(VSYNC(cpu_transcoder)));
4072 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4073 I915_READ(VSYNCSHIFT(cpu_transcoder)));
4074 }
4075
4076 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4077 {
4078 struct drm_i915_private *dev_priv = dev->dev_private;
4079 uint32_t temp;
4080
4081 temp = I915_READ(SOUTH_CHICKEN1);
4082 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
4083 return;
4084
4085 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4086 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4087
4088 temp &= ~FDI_BC_BIFURCATION_SELECT;
4089 if (enable)
4090 temp |= FDI_BC_BIFURCATION_SELECT;
4091
4092 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
4093 I915_WRITE(SOUTH_CHICKEN1, temp);
4094 POSTING_READ(SOUTH_CHICKEN1);
4095 }
4096
4097 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4098 {
4099 struct drm_device *dev = intel_crtc->base.dev;
4100
4101 switch (intel_crtc->pipe) {
4102 case PIPE_A:
4103 break;
4104 case PIPE_B:
4105 if (intel_crtc->config->fdi_lanes > 2)
4106 cpt_set_fdi_bc_bifurcation(dev, false);
4107 else
4108 cpt_set_fdi_bc_bifurcation(dev, true);
4109
4110 break;
4111 case PIPE_C:
4112 cpt_set_fdi_bc_bifurcation(dev, true);
4113
4114 break;
4115 default:
4116 BUG();
4117 }
4118 }
4119
4120 /* Return which DP Port should be selected for Transcoder DP control */
4121 static enum port
4122 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4123 {
4124 struct drm_device *dev = crtc->dev;
4125 struct intel_encoder *encoder;
4126
4127 for_each_encoder_on_crtc(dev, crtc, encoder) {
4128 if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4129 encoder->type == INTEL_OUTPUT_EDP)
4130 return enc_to_dig_port(&encoder->base)->port;
4131 }
4132
4133 return -1;
4134 }
4135
4136 /*
4137 * Enable PCH resources required for PCH ports:
4138 * - PCH PLLs
4139 * - FDI training & RX/TX
4140 * - update transcoder timings
4141 * - DP transcoding bits
4142 * - transcoder
4143 */
4144 static void ironlake_pch_enable(struct drm_crtc *crtc)
4145 {
4146 struct drm_device *dev = crtc->dev;
4147 struct drm_i915_private *dev_priv = dev->dev_private;
4148 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4149 int pipe = intel_crtc->pipe;
4150 u32 temp;
4151
4152 assert_pch_transcoder_disabled(dev_priv, pipe);
4153
4154 if (IS_IVYBRIDGE(dev))
4155 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4156
4157 /* Write the TU size bits before fdi link training, so that error
4158 * detection works. */
4159 I915_WRITE(FDI_RX_TUSIZE1(pipe),
4160 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4161
4162 /*
4163 * Sometimes spurious CPU pipe underruns happen during FDI
4164 * training, at least with VGA+HDMI cloning. Suppress them.
4165 */
4166 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4167
4168 /* For PCH output, training FDI link */
4169 dev_priv->display.fdi_link_train(crtc);
4170
4171 /* We need to program the right clock selection before writing the pixel
4172 * mutliplier into the DPLL. */
4173 if (HAS_PCH_CPT(dev)) {
4174 u32 sel;
4175
4176 temp = I915_READ(PCH_DPLL_SEL);
4177 temp |= TRANS_DPLL_ENABLE(pipe);
4178 sel = TRANS_DPLLB_SEL(pipe);
4179 if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
4180 temp |= sel;
4181 else
4182 temp &= ~sel;
4183 I915_WRITE(PCH_DPLL_SEL, temp);
4184 }
4185
4186 /* XXX: pch pll's can be enabled any time before we enable the PCH
4187 * transcoder, and we actually should do this to not upset any PCH
4188 * transcoder that already use the clock when we share it.
4189 *
4190 * Note that enable_shared_dpll tries to do the right thing, but
4191 * get_shared_dpll unconditionally resets the pll - we need that to have
4192 * the right LVDS enable sequence. */
4193 intel_enable_shared_dpll(intel_crtc);
4194
4195 /* set transcoder timing, panel must allow it */
4196 assert_panel_unlocked(dev_priv, pipe);
4197 ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
4198
4199 intel_fdi_normal_train(crtc);
4200
4201 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4202
4203 /* For PCH DP, enable TRANS_DP_CTL */
4204 if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
4205 const struct drm_display_mode *adjusted_mode =
4206 &intel_crtc->config->base.adjusted_mode;
4207 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4208 i915_reg_t reg = TRANS_DP_CTL(pipe);
4209 temp = I915_READ(reg);
4210 temp &= ~(TRANS_DP_PORT_SEL_MASK |
4211 TRANS_DP_SYNC_MASK |
4212 TRANS_DP_BPC_MASK);
4213 temp |= TRANS_DP_OUTPUT_ENABLE;
4214 temp |= bpc << 9; /* same format but at 11:9 */
4215
4216 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4217 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4218 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4219 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4220
4221 switch (intel_trans_dp_port_sel(crtc)) {
4222 case PORT_B:
4223 temp |= TRANS_DP_PORT_SEL_B;
4224 break;
4225 case PORT_C:
4226 temp |= TRANS_DP_PORT_SEL_C;
4227 break;
4228 case PORT_D:
4229 temp |= TRANS_DP_PORT_SEL_D;
4230 break;
4231 default:
4232 BUG();
4233 }
4234
4235 I915_WRITE(reg, temp);
4236 }
4237
4238 ironlake_enable_pch_transcoder(dev_priv, pipe);
4239 }
4240
4241 static void lpt_pch_enable(struct drm_crtc *crtc)
4242 {
4243 struct drm_device *dev = crtc->dev;
4244 struct drm_i915_private *dev_priv = dev->dev_private;
4245 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4246 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4247
4248 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
4249
4250 lpt_program_iclkip(crtc);
4251
4252 /* Set transcoder timing. */
4253 ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
4254
4255 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4256 }
4257
4258 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4259 struct intel_crtc_state *crtc_state)
4260 {
4261 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
4262 struct intel_shared_dpll *pll;
4263 struct intel_shared_dpll_config *shared_dpll;
4264 enum intel_dpll_id i;
4265 int max = dev_priv->num_shared_dpll;
4266
4267 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
4268
4269 if (HAS_PCH_IBX(dev_priv->dev)) {
4270 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
4271 i = (enum intel_dpll_id) crtc->pipe;
4272 pll = &dev_priv->shared_dplls[i];
4273
4274 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4275 crtc->base.base.id, pll->name);
4276
4277 WARN_ON(shared_dpll[i].crtc_mask);
4278
4279 goto found;
4280 }
4281
4282 if (IS_BROXTON(dev_priv->dev)) {
4283 /* PLL is attached to port in bxt */
4284 struct intel_encoder *encoder;
4285 struct intel_digital_port *intel_dig_port;
4286
4287 encoder = intel_ddi_get_crtc_new_encoder(crtc_state);
4288 if (WARN_ON(!encoder))
4289 return NULL;
4290
4291 intel_dig_port = enc_to_dig_port(&encoder->base);
4292 /* 1:1 mapping between ports and PLLs */
4293 i = (enum intel_dpll_id)intel_dig_port->port;
4294 pll = &dev_priv->shared_dplls[i];
4295 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4296 crtc->base.base.id, pll->name);
4297 WARN_ON(shared_dpll[i].crtc_mask);
4298
4299 goto found;
4300 } else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
4301 /* Do not consider SPLL */
4302 max = 2;
4303
4304 for (i = 0; i < max; i++) {
4305 pll = &dev_priv->shared_dplls[i];
4306
4307 /* Only want to check enabled timings first */
4308 if (shared_dpll[i].crtc_mask == 0)
4309 continue;
4310
4311 if (memcmp(&crtc_state->dpll_hw_state,
4312 &shared_dpll[i].hw_state,
4313 sizeof(crtc_state->dpll_hw_state)) == 0) {
4314 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
4315 crtc->base.base.id, pll->name,
4316 shared_dpll[i].crtc_mask,
4317 pll->active);
4318 goto found;
4319 }
4320 }
4321
4322 /* Ok no matching timings, maybe there's a free one? */
4323 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4324 pll = &dev_priv->shared_dplls[i];
4325 if (shared_dpll[i].crtc_mask == 0) {
4326 DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
4327 crtc->base.base.id, pll->name);
4328 goto found;
4329 }
4330 }
4331
4332 return NULL;
4333
4334 found:
4335 if (shared_dpll[i].crtc_mask == 0)
4336 shared_dpll[i].hw_state =
4337 crtc_state->dpll_hw_state;
4338
4339 crtc_state->shared_dpll = i;
4340 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
4341 pipe_name(crtc->pipe));
4342
4343 shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
4344
4345 return pll;
4346 }
4347
4348 static void intel_shared_dpll_commit(struct drm_atomic_state *state)
4349 {
4350 struct drm_i915_private *dev_priv = to_i915(state->dev);
4351 struct intel_shared_dpll_config *shared_dpll;
4352 struct intel_shared_dpll *pll;
4353 enum intel_dpll_id i;
4354
4355 if (!to_intel_atomic_state(state)->dpll_set)
4356 return;
4357
4358 shared_dpll = to_intel_atomic_state(state)->shared_dpll;
4359 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4360 pll = &dev_priv->shared_dplls[i];
4361 pll->config = shared_dpll[i];
4362 }
4363 }
4364
4365 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4366 {
4367 struct drm_i915_private *dev_priv = dev->dev_private;
4368 i915_reg_t dslreg = PIPEDSL(pipe);
4369 u32 temp;
4370
4371 temp = I915_READ(dslreg);
4372 udelay(500);
4373 if (wait_for(I915_READ(dslreg) != temp, 5)) {
4374 if (wait_for(I915_READ(dslreg) != temp, 5))
4375 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4376 }
4377 }
4378
4379 static int
4380 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4381 unsigned scaler_user, int *scaler_id, unsigned int rotation,
4382 int src_w, int src_h, int dst_w, int dst_h)
4383 {
4384 struct intel_crtc_scaler_state *scaler_state =
4385 &crtc_state->scaler_state;
4386 struct intel_crtc *intel_crtc =
4387 to_intel_crtc(crtc_state->base.crtc);
4388 int need_scaling;
4389
4390 need_scaling = intel_rotation_90_or_270(rotation) ?
4391 (src_h != dst_w || src_w != dst_h):
4392 (src_w != dst_w || src_h != dst_h);
4393
4394 /*
4395 * if plane is being disabled or scaler is no more required or force detach
4396 * - free scaler binded to this plane/crtc
4397 * - in order to do this, update crtc->scaler_usage
4398 *
4399 * Here scaler state in crtc_state is set free so that
4400 * scaler can be assigned to other user. Actual register
4401 * update to free the scaler is done in plane/panel-fit programming.
4402 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4403 */
4404 if (force_detach || !need_scaling) {
4405 if (*scaler_id >= 0) {
4406 scaler_state->scaler_users &= ~(1 << scaler_user);
4407 scaler_state->scalers[*scaler_id].in_use = 0;
4408
4409 DRM_DEBUG_KMS("scaler_user index %u.%u: "
4410 "Staged freeing scaler id %d scaler_users = 0x%x\n",
4411 intel_crtc->pipe, scaler_user, *scaler_id,
4412 scaler_state->scaler_users);
4413 *scaler_id = -1;
4414 }
4415 return 0;
4416 }
4417
4418 /* range checks */
4419 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4420 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4421
4422 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4423 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
4424 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4425 "size is out of scaler range\n",
4426 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4427 return -EINVAL;
4428 }
4429
4430 /* mark this plane as a scaler user in crtc_state */
4431 scaler_state->scaler_users |= (1 << scaler_user);
4432 DRM_DEBUG_KMS("scaler_user index %u.%u: "
4433 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4434 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4435 scaler_state->scaler_users);
4436
4437 return 0;
4438 }
4439
4440 /**
4441 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4442 *
4443 * @state: crtc's scaler state
4444 *
4445 * Return
4446 * 0 - scaler_usage updated successfully
4447 * error - requested scaling cannot be supported or other error condition
4448 */
4449 int skl_update_scaler_crtc(struct intel_crtc_state *state)
4450 {
4451 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4452 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4453
4454 DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
4455 intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
4456
4457 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4458 &state->scaler_state.scaler_id, DRM_ROTATE_0,
4459 state->pipe_src_w, state->pipe_src_h,
4460 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
4461 }
4462
4463 /**
4464 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4465 *
4466 * @state: crtc's scaler state
4467 * @plane_state: atomic plane state to update
4468 *
4469 * Return
4470 * 0 - scaler_usage updated successfully
4471 * error - requested scaling cannot be supported or other error condition
4472 */
4473 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4474 struct intel_plane_state *plane_state)
4475 {
4476
4477 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4478 struct intel_plane *intel_plane =
4479 to_intel_plane(plane_state->base.plane);
4480 struct drm_framebuffer *fb = plane_state->base.fb;
4481 int ret;
4482
4483 bool force_detach = !fb || !plane_state->visible;
4484
4485 DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
4486 intel_plane->base.base.id, intel_crtc->pipe,
4487 drm_plane_index(&intel_plane->base));
4488
4489 ret = skl_update_scaler(crtc_state, force_detach,
4490 drm_plane_index(&intel_plane->base),
4491 &plane_state->scaler_id,
4492 plane_state->base.rotation,
4493 drm_rect_width(&plane_state->src) >> 16,
4494 drm_rect_height(&plane_state->src) >> 16,
4495 drm_rect_width(&plane_state->dst),
4496 drm_rect_height(&plane_state->dst));
4497
4498 if (ret || plane_state->scaler_id < 0)
4499 return ret;
4500
4501 /* check colorkey */
4502 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4503 DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
4504 intel_plane->base.base.id);
4505 return -EINVAL;
4506 }
4507
4508 /* Check src format */
4509 switch (fb->pixel_format) {
4510 case DRM_FORMAT_RGB565:
4511 case DRM_FORMAT_XBGR8888:
4512 case DRM_FORMAT_XRGB8888:
4513 case DRM_FORMAT_ABGR8888:
4514 case DRM_FORMAT_ARGB8888:
4515 case DRM_FORMAT_XRGB2101010:
4516 case DRM_FORMAT_XBGR2101010:
4517 case DRM_FORMAT_YUYV:
4518 case DRM_FORMAT_YVYU:
4519 case DRM_FORMAT_UYVY:
4520 case DRM_FORMAT_VYUY:
4521 break;
4522 default:
4523 DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
4524 intel_plane->base.base.id, fb->base.id, fb->pixel_format);
4525 return -EINVAL;
4526 }
4527
4528 return 0;
4529 }
4530
4531 static void skylake_scaler_disable(struct intel_crtc *crtc)
4532 {
4533 int i;
4534
4535 for (i = 0; i < crtc->num_scalers; i++)
4536 skl_detach_scaler(crtc, i);
4537 }
4538
4539 static void skylake_pfit_enable(struct intel_crtc *crtc)
4540 {
4541 struct drm_device *dev = crtc->base.dev;
4542 struct drm_i915_private *dev_priv = dev->dev_private;
4543 int pipe = crtc->pipe;
4544 struct intel_crtc_scaler_state *scaler_state =
4545 &crtc->config->scaler_state;
4546
4547 DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4548
4549 if (crtc->config->pch_pfit.enabled) {
4550 int id;
4551
4552 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4553 DRM_ERROR("Requesting pfit without getting a scaler first\n");
4554 return;
4555 }
4556
4557 id = scaler_state->scaler_id;
4558 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4559 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4560 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4561 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4562
4563 DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
4564 }
4565 }
4566
4567 static void ironlake_pfit_enable(struct intel_crtc *crtc)
4568 {
4569 struct drm_device *dev = crtc->base.dev;
4570 struct drm_i915_private *dev_priv = dev->dev_private;
4571 int pipe = crtc->pipe;
4572
4573 if (crtc->config->pch_pfit.enabled) {
4574 /* Force use of hard-coded filter coefficients
4575 * as some pre-programmed values are broken,
4576 * e.g. x201.
4577 */
4578 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4579 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4580 PF_PIPE_SEL_IVB(pipe));
4581 else
4582 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4583 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4584 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4585 }
4586 }
4587
4588 void hsw_enable_ips(struct intel_crtc *crtc)
4589 {
4590 struct drm_device *dev = crtc->base.dev;
4591 struct drm_i915_private *dev_priv = dev->dev_private;
4592
4593 if (!crtc->config->ips_enabled)
4594 return;
4595
4596 /* We can only enable IPS after we enable a plane and wait for a vblank */
4597 intel_wait_for_vblank(dev, crtc->pipe);
4598
4599 assert_plane_enabled(dev_priv, crtc->plane);
4600 if (IS_BROADWELL(dev)) {
4601 mutex_lock(&dev_priv->rps.hw_lock);
4602 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4603 mutex_unlock(&dev_priv->rps.hw_lock);
4604 /* Quoting Art Runyan: "its not safe to expect any particular
4605 * value in IPS_CTL bit 31 after enabling IPS through the
4606 * mailbox." Moreover, the mailbox may return a bogus state,
4607 * so we need to just enable it and continue on.
4608 */
4609 } else {
4610 I915_WRITE(IPS_CTL, IPS_ENABLE);
4611 /* The bit only becomes 1 in the next vblank, so this wait here
4612 * is essentially intel_wait_for_vblank. If we don't have this
4613 * and don't wait for vblanks until the end of crtc_enable, then
4614 * the HW state readout code will complain that the expected
4615 * IPS_CTL value is not the one we read. */
4616 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4617 DRM_ERROR("Timed out waiting for IPS enable\n");
4618 }
4619 }
4620
4621 void hsw_disable_ips(struct intel_crtc *crtc)
4622 {
4623 struct drm_device *dev = crtc->base.dev;
4624 struct drm_i915_private *dev_priv = dev->dev_private;
4625
4626 if (!crtc->config->ips_enabled)
4627 return;
4628
4629 assert_plane_enabled(dev_priv, crtc->plane);
4630 if (IS_BROADWELL(dev)) {
4631 mutex_lock(&dev_priv->rps.hw_lock);
4632 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4633 mutex_unlock(&dev_priv->rps.hw_lock);
4634 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
4635 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4636 DRM_ERROR("Timed out waiting for IPS disable\n");
4637 } else {
4638 I915_WRITE(IPS_CTL, 0);
4639 POSTING_READ(IPS_CTL);
4640 }
4641
4642 /* We need to wait for a vblank before we can disable the plane. */
4643 intel_wait_for_vblank(dev, crtc->pipe);
4644 }
4645
4646 /** Loads the palette/gamma unit for the CRTC with the prepared values */
4647 static void intel_crtc_load_lut(struct drm_crtc *crtc)
4648 {
4649 struct drm_device *dev = crtc->dev;
4650 struct drm_i915_private *dev_priv = dev->dev_private;
4651 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4652 enum pipe pipe = intel_crtc->pipe;
4653 int i;
4654 bool reenable_ips = false;
4655
4656 /* The clocks have to be on to load the palette. */
4657 if (!crtc->state->active)
4658 return;
4659
4660 if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
4661 if (intel_crtc->config->has_dsi_encoder)
4662 assert_dsi_pll_enabled(dev_priv);
4663 else
4664 assert_pll_enabled(dev_priv, pipe);
4665 }
4666
4667 /* Workaround : Do not read or write the pipe palette/gamma data while
4668 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
4669 */
4670 if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
4671 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
4672 GAMMA_MODE_MODE_SPLIT)) {
4673 hsw_disable_ips(intel_crtc);
4674 reenable_ips = true;
4675 }
4676
4677 for (i = 0; i < 256; i++) {
4678 i915_reg_t palreg;
4679
4680 if (HAS_GMCH_DISPLAY(dev))
4681 palreg = PALETTE(pipe, i);
4682 else
4683 palreg = LGC_PALETTE(pipe, i);
4684
4685 I915_WRITE(palreg,
4686 (intel_crtc->lut_r[i] << 16) |
4687 (intel_crtc->lut_g[i] << 8) |
4688 intel_crtc->lut_b[i]);
4689 }
4690
4691 if (reenable_ips)
4692 hsw_enable_ips(intel_crtc);
4693 }
4694
4695 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
4696 {
4697 if (intel_crtc->overlay) {
4698 struct drm_device *dev = intel_crtc->base.dev;
4699 struct drm_i915_private *dev_priv = dev->dev_private;
4700
4701 mutex_lock(&dev->struct_mutex);
4702 dev_priv->mm.interruptible = false;
4703 (void) intel_overlay_switch_off(intel_crtc->overlay);
4704 dev_priv->mm.interruptible = true;
4705 mutex_unlock(&dev->struct_mutex);
4706 }
4707
4708 /* Let userspace switch the overlay on again. In most cases userspace
4709 * has to recompute where to put it anyway.
4710 */
4711 }
4712
4713 /**
4714 * intel_post_enable_primary - Perform operations after enabling primary plane
4715 * @crtc: the CRTC whose primary plane was just enabled
4716 *
4717 * Performs potentially sleeping operations that must be done after the primary
4718 * plane is enabled, such as updating FBC and IPS. Note that this may be
4719 * called due to an explicit primary plane update, or due to an implicit
4720 * re-enable that is caused when a sprite plane is updated to no longer
4721 * completely hide the primary plane.
4722 */
4723 static void
4724 intel_post_enable_primary(struct drm_crtc *crtc)
4725 {
4726 struct drm_device *dev = crtc->dev;
4727 struct drm_i915_private *dev_priv = dev->dev_private;
4728 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4729 int pipe = intel_crtc->pipe;
4730
4731 /*
4732 * FIXME IPS should be fine as long as one plane is
4733 * enabled, but in practice it seems to have problems
4734 * when going from primary only to sprite only and vice
4735 * versa.
4736 */
4737 hsw_enable_ips(intel_crtc);
4738
4739 /*
4740 * Gen2 reports pipe underruns whenever all planes are disabled.
4741 * So don't enable underrun reporting before at least some planes
4742 * are enabled.
4743 * FIXME: Need to fix the logic to work when we turn off all planes
4744 * but leave the pipe running.
4745 */
4746 if (IS_GEN2(dev))
4747 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4748
4749 /* Underruns don't always raise interrupts, so check manually. */
4750 intel_check_cpu_fifo_underruns(dev_priv);
4751 intel_check_pch_fifo_underruns(dev_priv);
4752 }
4753
4754 /**
4755 * intel_pre_disable_primary - Perform operations before disabling primary plane
4756 * @crtc: the CRTC whose primary plane is to be disabled
4757 *
4758 * Performs potentially sleeping operations that must be done before the
4759 * primary plane is disabled, such as updating FBC and IPS. Note that this may
4760 * be called due to an explicit primary plane update, or due to an implicit
4761 * disable that is caused when a sprite plane completely hides the primary
4762 * plane.
4763 */
4764 static void
4765 intel_pre_disable_primary(struct drm_crtc *crtc)
4766 {
4767 struct drm_device *dev = crtc->dev;
4768 struct drm_i915_private *dev_priv = dev->dev_private;
4769 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4770 int pipe = intel_crtc->pipe;
4771
4772 /*
4773 * Gen2 reports pipe underruns whenever all planes are disabled.
4774 * So diasble underrun reporting before all the planes get disabled.
4775 * FIXME: Need to fix the logic to work when we turn off all planes
4776 * but leave the pipe running.
4777 */
4778 if (IS_GEN2(dev))
4779 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4780
4781 /*
4782 * Vblank time updates from the shadow to live plane control register
4783 * are blocked if the memory self-refresh mode is active at that
4784 * moment. So to make sure the plane gets truly disabled, disable
4785 * first the self-refresh mode. The self-refresh enable bit in turn
4786 * will be checked/applied by the HW only at the next frame start
4787 * event which is after the vblank start event, so we need to have a
4788 * wait-for-vblank between disabling the plane and the pipe.
4789 */
4790 if (HAS_GMCH_DISPLAY(dev)) {
4791 intel_set_memory_cxsr(dev_priv, false);
4792 dev_priv->wm.vlv.cxsr = false;
4793 intel_wait_for_vblank(dev, pipe);
4794 }
4795
4796 /*
4797 * FIXME IPS should be fine as long as one plane is
4798 * enabled, but in practice it seems to have problems
4799 * when going from primary only to sprite only and vice
4800 * versa.
4801 */
4802 hsw_disable_ips(intel_crtc);
4803 }
4804
4805 static void intel_post_plane_update(struct intel_crtc *crtc)
4806 {
4807 struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4808 struct intel_crtc_state *pipe_config =
4809 to_intel_crtc_state(crtc->base.state);
4810 struct drm_device *dev = crtc->base.dev;
4811
4812 if (atomic->wait_vblank)
4813 intel_wait_for_vblank(dev, crtc->pipe);
4814
4815 intel_frontbuffer_flip(dev, atomic->fb_bits);
4816
4817 crtc->wm.cxsr_allowed = true;
4818
4819 if (pipe_config->wm_changed && pipe_config->base.active)
4820 intel_update_watermarks(&crtc->base);
4821
4822 if (atomic->update_fbc)
4823 intel_fbc_update(crtc);
4824
4825 if (atomic->post_enable_primary)
4826 intel_post_enable_primary(&crtc->base);
4827
4828 memset(atomic, 0, sizeof(*atomic));
4829 }
4830
4831 static void intel_pre_plane_update(struct intel_crtc *crtc)
4832 {
4833 struct drm_device *dev = crtc->base.dev;
4834 struct drm_i915_private *dev_priv = dev->dev_private;
4835 struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4836 struct intel_crtc_state *pipe_config =
4837 to_intel_crtc_state(crtc->base.state);
4838
4839 if (atomic->disable_fbc)
4840 intel_fbc_deactivate(crtc);
4841
4842 if (crtc->atomic.disable_ips)
4843 hsw_disable_ips(crtc);
4844
4845 if (atomic->pre_disable_primary)
4846 intel_pre_disable_primary(&crtc->base);
4847
4848 if (pipe_config->disable_cxsr) {
4849 crtc->wm.cxsr_allowed = false;
4850 intel_set_memory_cxsr(dev_priv, false);
4851 }
4852
4853 if (!needs_modeset(&pipe_config->base) && pipe_config->wm_changed)
4854 intel_update_watermarks(&crtc->base);
4855 }
4856
4857 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
4858 {
4859 struct drm_device *dev = crtc->dev;
4860 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4861 struct drm_plane *p;
4862 int pipe = intel_crtc->pipe;
4863
4864 intel_crtc_dpms_overlay_disable(intel_crtc);
4865
4866 drm_for_each_plane_mask(p, dev, plane_mask)
4867 to_intel_plane(p)->disable_plane(p, crtc);
4868
4869 /*
4870 * FIXME: Once we grow proper nuclear flip support out of this we need
4871 * to compute the mask of flip planes precisely. For the time being
4872 * consider this a flip to a NULL plane.
4873 */
4874 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4875 }
4876
4877 static void ironlake_crtc_enable(struct drm_crtc *crtc)
4878 {
4879 struct drm_device *dev = crtc->dev;
4880 struct drm_i915_private *dev_priv = dev->dev_private;
4881 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4882 struct intel_encoder *encoder;
4883 int pipe = intel_crtc->pipe;
4884
4885 if (WARN_ON(intel_crtc->active))
4886 return;
4887
4888 if (intel_crtc->config->has_pch_encoder)
4889 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4890
4891 if (intel_crtc->config->has_pch_encoder)
4892 intel_prepare_shared_dpll(intel_crtc);
4893
4894 if (intel_crtc->config->has_dp_encoder)
4895 intel_dp_set_m_n(intel_crtc, M1_N1);
4896
4897 intel_set_pipe_timings(intel_crtc);
4898
4899 if (intel_crtc->config->has_pch_encoder) {
4900 intel_cpu_transcoder_set_m_n(intel_crtc,
4901 &intel_crtc->config->fdi_m_n, NULL);
4902 }
4903
4904 ironlake_set_pipeconf(crtc);
4905
4906 intel_crtc->active = true;
4907
4908 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4909
4910 for_each_encoder_on_crtc(dev, crtc, encoder)
4911 if (encoder->pre_enable)
4912 encoder->pre_enable(encoder);
4913
4914 if (intel_crtc->config->has_pch_encoder) {
4915 /* Note: FDI PLL enabling _must_ be done before we enable the
4916 * cpu pipes, hence this is separate from all the other fdi/pch
4917 * enabling. */
4918 ironlake_fdi_pll_enable(intel_crtc);
4919 } else {
4920 assert_fdi_tx_disabled(dev_priv, pipe);
4921 assert_fdi_rx_disabled(dev_priv, pipe);
4922 }
4923
4924 ironlake_pfit_enable(intel_crtc);
4925
4926 /*
4927 * On ILK+ LUT must be loaded before the pipe is running but with
4928 * clocks enabled
4929 */
4930 intel_crtc_load_lut(crtc);
4931
4932 intel_update_watermarks(crtc);
4933 intel_enable_pipe(intel_crtc);
4934
4935 if (intel_crtc->config->has_pch_encoder)
4936 ironlake_pch_enable(crtc);
4937
4938 assert_vblank_disabled(crtc);
4939 drm_crtc_vblank_on(crtc);
4940
4941 for_each_encoder_on_crtc(dev, crtc, encoder)
4942 encoder->enable(encoder);
4943
4944 if (HAS_PCH_CPT(dev))
4945 cpt_verify_modeset(dev, intel_crtc->pipe);
4946
4947 /* Must wait for vblank to avoid spurious PCH FIFO underruns */
4948 if (intel_crtc->config->has_pch_encoder)
4949 intel_wait_for_vblank(dev, pipe);
4950 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4951
4952 intel_fbc_enable(intel_crtc);
4953 }
4954
4955 /* IPS only exists on ULT machines and is tied to pipe A. */
4956 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4957 {
4958 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4959 }
4960
4961 static void haswell_crtc_enable(struct drm_crtc *crtc)
4962 {
4963 struct drm_device *dev = crtc->dev;
4964 struct drm_i915_private *dev_priv = dev->dev_private;
4965 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4966 struct intel_encoder *encoder;
4967 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4968 struct intel_crtc_state *pipe_config =
4969 to_intel_crtc_state(crtc->state);
4970
4971 if (WARN_ON(intel_crtc->active))
4972 return;
4973
4974 if (intel_crtc->config->has_pch_encoder)
4975 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4976 false);
4977
4978 if (intel_crtc_to_shared_dpll(intel_crtc))
4979 intel_enable_shared_dpll(intel_crtc);
4980
4981 if (intel_crtc->config->has_dp_encoder)
4982 intel_dp_set_m_n(intel_crtc, M1_N1);
4983
4984 intel_set_pipe_timings(intel_crtc);
4985
4986 if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
4987 I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
4988 intel_crtc->config->pixel_multiplier - 1);
4989 }
4990
4991 if (intel_crtc->config->has_pch_encoder) {
4992 intel_cpu_transcoder_set_m_n(intel_crtc,
4993 &intel_crtc->config->fdi_m_n, NULL);
4994 }
4995
4996 haswell_set_pipeconf(crtc);
4997
4998 intel_set_pipe_csc(crtc);
4999
5000 intel_crtc->active = true;
5001
5002 if (intel_crtc->config->has_pch_encoder)
5003 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5004 else
5005 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5006
5007 for_each_encoder_on_crtc(dev, crtc, encoder) {
5008 if (encoder->pre_enable)
5009 encoder->pre_enable(encoder);
5010 }
5011
5012 if (intel_crtc->config->has_pch_encoder)
5013 dev_priv->display.fdi_link_train(crtc);
5014
5015 if (!intel_crtc->config->has_dsi_encoder)
5016 intel_ddi_enable_pipe_clock(intel_crtc);
5017
5018 if (INTEL_INFO(dev)->gen >= 9)
5019 skylake_pfit_enable(intel_crtc);
5020 else
5021 ironlake_pfit_enable(intel_crtc);
5022
5023 /*
5024 * On ILK+ LUT must be loaded before the pipe is running but with
5025 * clocks enabled
5026 */
5027 intel_crtc_load_lut(crtc);
5028
5029 intel_ddi_set_pipe_settings(crtc);
5030 if (!intel_crtc->config->has_dsi_encoder)
5031 intel_ddi_enable_transcoder_func(crtc);
5032
5033 intel_update_watermarks(crtc);
5034 intel_enable_pipe(intel_crtc);
5035
5036 if (intel_crtc->config->has_pch_encoder)
5037 lpt_pch_enable(crtc);
5038
5039 if (intel_crtc->config->dp_encoder_is_mst)
5040 intel_ddi_set_vc_payload_alloc(crtc, true);
5041
5042 assert_vblank_disabled(crtc);
5043 drm_crtc_vblank_on(crtc);
5044
5045 for_each_encoder_on_crtc(dev, crtc, encoder) {
5046 encoder->enable(encoder);
5047 intel_opregion_notify_encoder(encoder, true);
5048 }
5049
5050 if (intel_crtc->config->has_pch_encoder) {
5051 intel_wait_for_vblank(dev, pipe);
5052 intel_wait_for_vblank(dev, pipe);
5053 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5054 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5055 true);
5056 }
5057
5058 /* If we change the relative order between pipe/planes enabling, we need
5059 * to change the workaround. */
5060 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5061 if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
5062 intel_wait_for_vblank(dev, hsw_workaround_pipe);
5063 intel_wait_for_vblank(dev, hsw_workaround_pipe);
5064 }
5065
5066 intel_fbc_enable(intel_crtc);
5067 }
5068
5069 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
5070 {
5071 struct drm_device *dev = crtc->base.dev;
5072 struct drm_i915_private *dev_priv = dev->dev_private;
5073 int pipe = crtc->pipe;
5074
5075 /* To avoid upsetting the power well on haswell only disable the pfit if
5076 * it's in use. The hw state code will make sure we get this right. */
5077 if (force || crtc->config->pch_pfit.enabled) {
5078 I915_WRITE(PF_CTL(pipe), 0);
5079 I915_WRITE(PF_WIN_POS(pipe), 0);
5080 I915_WRITE(PF_WIN_SZ(pipe), 0);
5081 }
5082 }
5083
5084 static void ironlake_crtc_disable(struct drm_crtc *crtc)
5085 {
5086 struct drm_device *dev = crtc->dev;
5087 struct drm_i915_private *dev_priv = dev->dev_private;
5088 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5089 struct intel_encoder *encoder;
5090 int pipe = intel_crtc->pipe;
5091
5092 if (intel_crtc->config->has_pch_encoder)
5093 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5094
5095 for_each_encoder_on_crtc(dev, crtc, encoder)
5096 encoder->disable(encoder);
5097
5098 drm_crtc_vblank_off(crtc);
5099 assert_vblank_disabled(crtc);
5100
5101 /*
5102 * Sometimes spurious CPU pipe underruns happen when the
5103 * pipe is already disabled, but FDI RX/TX is still enabled.
5104 * Happens at least with VGA+HDMI cloning. Suppress them.
5105 */
5106 if (intel_crtc->config->has_pch_encoder)
5107 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
5108
5109 intel_disable_pipe(intel_crtc);
5110
5111 ironlake_pfit_disable(intel_crtc, false);
5112
5113 if (intel_crtc->config->has_pch_encoder) {
5114 ironlake_fdi_disable(crtc);
5115 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5116 }
5117
5118 for_each_encoder_on_crtc(dev, crtc, encoder)
5119 if (encoder->post_disable)
5120 encoder->post_disable(encoder);
5121
5122 if (intel_crtc->config->has_pch_encoder) {
5123 ironlake_disable_pch_transcoder(dev_priv, pipe);
5124
5125 if (HAS_PCH_CPT(dev)) {
5126 i915_reg_t reg;
5127 u32 temp;
5128
5129 /* disable TRANS_DP_CTL */
5130 reg = TRANS_DP_CTL(pipe);
5131 temp = I915_READ(reg);
5132 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5133 TRANS_DP_PORT_SEL_MASK);
5134 temp |= TRANS_DP_PORT_SEL_NONE;
5135 I915_WRITE(reg, temp);
5136
5137 /* disable DPLL_SEL */
5138 temp = I915_READ(PCH_DPLL_SEL);
5139 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
5140 I915_WRITE(PCH_DPLL_SEL, temp);
5141 }
5142
5143 ironlake_fdi_pll_disable(intel_crtc);
5144 }
5145
5146 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
5147
5148 intel_fbc_disable_crtc(intel_crtc);
5149 }
5150
5151 static void haswell_crtc_disable(struct drm_crtc *crtc)
5152 {
5153 struct drm_device *dev = crtc->dev;
5154 struct drm_i915_private *dev_priv = dev->dev_private;
5155 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5156 struct intel_encoder *encoder;
5157 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
5158
5159 if (intel_crtc->config->has_pch_encoder)
5160 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5161 false);
5162
5163 for_each_encoder_on_crtc(dev, crtc, encoder) {
5164 intel_opregion_notify_encoder(encoder, false);
5165 encoder->disable(encoder);
5166 }
5167
5168 drm_crtc_vblank_off(crtc);
5169 assert_vblank_disabled(crtc);
5170
5171 intel_disable_pipe(intel_crtc);
5172
5173 if (intel_crtc->config->dp_encoder_is_mst)
5174 intel_ddi_set_vc_payload_alloc(crtc, false);
5175
5176 if (!intel_crtc->config->has_dsi_encoder)
5177 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
5178
5179 if (INTEL_INFO(dev)->gen >= 9)
5180 skylake_scaler_disable(intel_crtc);
5181 else
5182 ironlake_pfit_disable(intel_crtc, false);
5183
5184 if (!intel_crtc->config->has_dsi_encoder)
5185 intel_ddi_disable_pipe_clock(intel_crtc);
5186
5187 for_each_encoder_on_crtc(dev, crtc, encoder)
5188 if (encoder->post_disable)
5189 encoder->post_disable(encoder);
5190
5191 if (intel_crtc->config->has_pch_encoder) {
5192 lpt_disable_pch_transcoder(dev_priv);
5193 lpt_disable_iclkip(dev_priv);
5194 intel_ddi_fdi_disable(crtc);
5195
5196 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5197 true);
5198 }
5199
5200 intel_fbc_disable_crtc(intel_crtc);
5201 }
5202
5203 static void i9xx_pfit_enable(struct intel_crtc *crtc)
5204 {
5205 struct drm_device *dev = crtc->base.dev;
5206 struct drm_i915_private *dev_priv = dev->dev_private;
5207 struct intel_crtc_state *pipe_config = crtc->config;
5208
5209 if (!pipe_config->gmch_pfit.control)
5210 return;
5211
5212 /*
5213 * The panel fitter should only be adjusted whilst the pipe is disabled,
5214 * according to register description and PRM.
5215 */
5216 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5217 assert_pipe_disabled(dev_priv, crtc->pipe);
5218
5219 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5220 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5221
5222 /* Border color in case we don't scale up to the full screen. Black by
5223 * default, change to something else for debugging. */
5224 I915_WRITE(BCLRPAT(crtc->pipe), 0);
5225 }
5226
5227 static enum intel_display_power_domain port_to_power_domain(enum port port)
5228 {
5229 switch (port) {
5230 case PORT_A:
5231 return POWER_DOMAIN_PORT_DDI_A_LANES;
5232 case PORT_B:
5233 return POWER_DOMAIN_PORT_DDI_B_LANES;
5234 case PORT_C:
5235 return POWER_DOMAIN_PORT_DDI_C_LANES;
5236 case PORT_D:
5237 return POWER_DOMAIN_PORT_DDI_D_LANES;
5238 case PORT_E:
5239 return POWER_DOMAIN_PORT_DDI_E_LANES;
5240 default:
5241 MISSING_CASE(port);
5242 return POWER_DOMAIN_PORT_OTHER;
5243 }
5244 }
5245
5246 static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5247 {
5248 switch (port) {
5249 case PORT_A:
5250 return POWER_DOMAIN_AUX_A;
5251 case PORT_B:
5252 return POWER_DOMAIN_AUX_B;
5253 case PORT_C:
5254 return POWER_DOMAIN_AUX_C;
5255 case PORT_D:
5256 return POWER_DOMAIN_AUX_D;
5257 case PORT_E:
5258 /* FIXME: Check VBT for actual wiring of PORT E */
5259 return POWER_DOMAIN_AUX_D;
5260 default:
5261 MISSING_CASE(port);
5262 return POWER_DOMAIN_AUX_A;
5263 }
5264 }
5265
5266 enum intel_display_power_domain
5267 intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5268 {
5269 struct drm_device *dev = intel_encoder->base.dev;
5270 struct intel_digital_port *intel_dig_port;
5271
5272 switch (intel_encoder->type) {
5273 case INTEL_OUTPUT_UNKNOWN:
5274 /* Only DDI platforms should ever use this output type */
5275 WARN_ON_ONCE(!HAS_DDI(dev));
5276 case INTEL_OUTPUT_DISPLAYPORT:
5277 case INTEL_OUTPUT_HDMI:
5278 case INTEL_OUTPUT_EDP:
5279 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5280 return port_to_power_domain(intel_dig_port->port);
5281 case INTEL_OUTPUT_DP_MST:
5282 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5283 return port_to_power_domain(intel_dig_port->port);
5284 case INTEL_OUTPUT_ANALOG:
5285 return POWER_DOMAIN_PORT_CRT;
5286 case INTEL_OUTPUT_DSI:
5287 return POWER_DOMAIN_PORT_DSI;
5288 default:
5289 return POWER_DOMAIN_PORT_OTHER;
5290 }
5291 }
5292
5293 enum intel_display_power_domain
5294 intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5295 {
5296 struct drm_device *dev = intel_encoder->base.dev;
5297 struct intel_digital_port *intel_dig_port;
5298
5299 switch (intel_encoder->type) {
5300 case INTEL_OUTPUT_UNKNOWN:
5301 case INTEL_OUTPUT_HDMI:
5302 /*
5303 * Only DDI platforms should ever use these output types.
5304 * We can get here after the HDMI detect code has already set
5305 * the type of the shared encoder. Since we can't be sure
5306 * what's the status of the given connectors, play safe and
5307 * run the DP detection too.
5308 */
5309 WARN_ON_ONCE(!HAS_DDI(dev));
5310 case INTEL_OUTPUT_DISPLAYPORT:
5311 case INTEL_OUTPUT_EDP:
5312 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5313 return port_to_aux_power_domain(intel_dig_port->port);
5314 case INTEL_OUTPUT_DP_MST:
5315 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5316 return port_to_aux_power_domain(intel_dig_port->port);
5317 default:
5318 MISSING_CASE(intel_encoder->type);
5319 return POWER_DOMAIN_AUX_A;
5320 }
5321 }
5322
5323 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
5324 {
5325 struct drm_device *dev = crtc->dev;
5326 struct intel_encoder *intel_encoder;
5327 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5328 enum pipe pipe = intel_crtc->pipe;
5329 unsigned long mask;
5330 enum transcoder transcoder = intel_crtc->config->cpu_transcoder;
5331
5332 if (!crtc->state->active)
5333 return 0;
5334
5335 mask = BIT(POWER_DOMAIN_PIPE(pipe));
5336 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
5337 if (intel_crtc->config->pch_pfit.enabled ||
5338 intel_crtc->config->pch_pfit.force_thru)
5339 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5340
5341 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
5342 mask |= BIT(intel_display_port_power_domain(intel_encoder));
5343
5344 return mask;
5345 }
5346
5347 static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
5348 {
5349 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5350 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5351 enum intel_display_power_domain domain;
5352 unsigned long domains, new_domains, old_domains;
5353
5354 old_domains = intel_crtc->enabled_power_domains;
5355 intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc);
5356
5357 domains = new_domains & ~old_domains;
5358
5359 for_each_power_domain(domain, domains)
5360 intel_display_power_get(dev_priv, domain);
5361
5362 return old_domains & ~new_domains;
5363 }
5364
5365 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5366 unsigned long domains)
5367 {
5368 enum intel_display_power_domain domain;
5369
5370 for_each_power_domain(domain, domains)
5371 intel_display_power_put(dev_priv, domain);
5372 }
5373
5374 static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
5375 {
5376 struct drm_device *dev = state->dev;
5377 struct drm_i915_private *dev_priv = dev->dev_private;
5378 unsigned long put_domains[I915_MAX_PIPES] = {};
5379 struct drm_crtc_state *crtc_state;
5380 struct drm_crtc *crtc;
5381 int i;
5382
5383 for_each_crtc_in_state(state, crtc, crtc_state, i) {
5384 if (needs_modeset(crtc->state))
5385 put_domains[to_intel_crtc(crtc)->pipe] =
5386 modeset_get_crtc_power_domains(crtc);
5387 }
5388
5389 if (dev_priv->display.modeset_commit_cdclk) {
5390 unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
5391
5392 if (cdclk != dev_priv->cdclk_freq &&
5393 !WARN_ON(!state->allow_modeset))
5394 dev_priv->display.modeset_commit_cdclk(state);
5395 }
5396
5397 for (i = 0; i < I915_MAX_PIPES; i++)
5398 if (put_domains[i])
5399 modeset_put_power_domains(dev_priv, put_domains[i]);
5400 }
5401
5402 static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5403 {
5404 int max_cdclk_freq = dev_priv->max_cdclk_freq;
5405
5406 if (INTEL_INFO(dev_priv)->gen >= 9 ||
5407 IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5408 return max_cdclk_freq;
5409 else if (IS_CHERRYVIEW(dev_priv))
5410 return max_cdclk_freq*95/100;
5411 else if (INTEL_INFO(dev_priv)->gen < 4)
5412 return 2*max_cdclk_freq*90/100;
5413 else
5414 return max_cdclk_freq*90/100;
5415 }
5416
5417 static void intel_update_max_cdclk(struct drm_device *dev)
5418 {
5419 struct drm_i915_private *dev_priv = dev->dev_private;
5420
5421 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5422 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5423
5424 if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5425 dev_priv->max_cdclk_freq = 675000;
5426 else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5427 dev_priv->max_cdclk_freq = 540000;
5428 else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5429 dev_priv->max_cdclk_freq = 450000;
5430 else
5431 dev_priv->max_cdclk_freq = 337500;
5432 } else if (IS_BROADWELL(dev)) {
5433 /*
5434 * FIXME with extra cooling we can allow
5435 * 540 MHz for ULX and 675 Mhz for ULT.
5436 * How can we know if extra cooling is
5437 * available? PCI ID, VTB, something else?
5438 */
5439 if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5440 dev_priv->max_cdclk_freq = 450000;
5441 else if (IS_BDW_ULX(dev))
5442 dev_priv->max_cdclk_freq = 450000;
5443 else if (IS_BDW_ULT(dev))
5444 dev_priv->max_cdclk_freq = 540000;
5445 else
5446 dev_priv->max_cdclk_freq = 675000;
5447 } else if (IS_CHERRYVIEW(dev)) {
5448 dev_priv->max_cdclk_freq = 320000;
5449 } else if (IS_VALLEYVIEW(dev)) {
5450 dev_priv->max_cdclk_freq = 400000;
5451 } else {
5452 /* otherwise assume cdclk is fixed */
5453 dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5454 }
5455
5456 dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5457
5458 DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5459 dev_priv->max_cdclk_freq);
5460
5461 DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5462 dev_priv->max_dotclk_freq);
5463 }
5464
5465 static void intel_update_cdclk(struct drm_device *dev)
5466 {
5467 struct drm_i915_private *dev_priv = dev->dev_private;
5468
5469 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5470 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5471 dev_priv->cdclk_freq);
5472
5473 /*
5474 * Program the gmbus_freq based on the cdclk frequency.
5475 * BSpec erroneously claims we should aim for 4MHz, but
5476 * in fact 1MHz is the correct frequency.
5477 */
5478 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5479 /*
5480 * Program the gmbus_freq based on the cdclk frequency.
5481 * BSpec erroneously claims we should aim for 4MHz, but
5482 * in fact 1MHz is the correct frequency.
5483 */
5484 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5485 }
5486
5487 if (dev_priv->max_cdclk_freq == 0)
5488 intel_update_max_cdclk(dev);
5489 }
5490
5491 static void broxton_set_cdclk(struct drm_device *dev, int frequency)
5492 {
5493 struct drm_i915_private *dev_priv = dev->dev_private;
5494 uint32_t divider;
5495 uint32_t ratio;
5496 uint32_t current_freq;
5497 int ret;
5498
5499 /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
5500 switch (frequency) {
5501 case 144000:
5502 divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5503 ratio = BXT_DE_PLL_RATIO(60);
5504 break;
5505 case 288000:
5506 divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5507 ratio = BXT_DE_PLL_RATIO(60);
5508 break;
5509 case 384000:
5510 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5511 ratio = BXT_DE_PLL_RATIO(60);
5512 break;
5513 case 576000:
5514 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5515 ratio = BXT_DE_PLL_RATIO(60);
5516 break;
5517 case 624000:
5518 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5519 ratio = BXT_DE_PLL_RATIO(65);
5520 break;
5521 case 19200:
5522 /*
5523 * Bypass frequency with DE PLL disabled. Init ratio, divider
5524 * to suppress GCC warning.
5525 */
5526 ratio = 0;
5527 divider = 0;
5528 break;
5529 default:
5530 DRM_ERROR("unsupported CDCLK freq %d", frequency);
5531
5532 return;
5533 }
5534
5535 mutex_lock(&dev_priv->rps.hw_lock);
5536 /* Inform power controller of upcoming frequency change */
5537 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5538 0x80000000);
5539 mutex_unlock(&dev_priv->rps.hw_lock);
5540
5541 if (ret) {
5542 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5543 ret, frequency);
5544 return;
5545 }
5546
5547 current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
5548 /* convert from .1 fixpoint MHz with -1MHz offset to kHz */
5549 current_freq = current_freq * 500 + 1000;
5550
5551 /*
5552 * DE PLL has to be disabled when
5553 * - setting to 19.2MHz (bypass, PLL isn't used)
5554 * - before setting to 624MHz (PLL needs toggling)
5555 * - before setting to any frequency from 624MHz (PLL needs toggling)
5556 */
5557 if (frequency == 19200 || frequency == 624000 ||
5558 current_freq == 624000) {
5559 I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5560 /* Timeout 200us */
5561 if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
5562 1))
5563 DRM_ERROR("timout waiting for DE PLL unlock\n");
5564 }
5565
5566 if (frequency != 19200) {
5567 uint32_t val;
5568
5569 val = I915_READ(BXT_DE_PLL_CTL);
5570 val &= ~BXT_DE_PLL_RATIO_MASK;
5571 val |= ratio;
5572 I915_WRITE(BXT_DE_PLL_CTL, val);
5573
5574 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5575 /* Timeout 200us */
5576 if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5577 DRM_ERROR("timeout waiting for DE PLL lock\n");
5578
5579 val = I915_READ(CDCLK_CTL);
5580 val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
5581 val |= divider;
5582 /*
5583 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5584 * enable otherwise.
5585 */
5586 val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5587 if (frequency >= 500000)
5588 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5589
5590 val &= ~CDCLK_FREQ_DECIMAL_MASK;
5591 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5592 val |= (frequency - 1000) / 500;
5593 I915_WRITE(CDCLK_CTL, val);
5594 }
5595
5596 mutex_lock(&dev_priv->rps.hw_lock);
5597 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5598 DIV_ROUND_UP(frequency, 25000));
5599 mutex_unlock(&dev_priv->rps.hw_lock);
5600
5601 if (ret) {
5602 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5603 ret, frequency);
5604 return;
5605 }
5606
5607 intel_update_cdclk(dev);
5608 }
5609
5610 void broxton_init_cdclk(struct drm_device *dev)
5611 {
5612 struct drm_i915_private *dev_priv = dev->dev_private;
5613 uint32_t val;
5614
5615 /*
5616 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5617 * or else the reset will hang because there is no PCH to respond.
5618 * Move the handshake programming to initialization sequence.
5619 * Previously was left up to BIOS.
5620 */
5621 val = I915_READ(HSW_NDE_RSTWRN_OPT);
5622 val &= ~RESET_PCH_HANDSHAKE_ENABLE;
5623 I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
5624
5625 /* Enable PG1 for cdclk */
5626 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
5627
5628 /* check if cd clock is enabled */
5629 if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
5630 DRM_DEBUG_KMS("Display already initialized\n");
5631 return;
5632 }
5633
5634 /*
5635 * FIXME:
5636 * - The initial CDCLK needs to be read from VBT.
5637 * Need to make this change after VBT has changes for BXT.
5638 * - check if setting the max (or any) cdclk freq is really necessary
5639 * here, it belongs to modeset time
5640 */
5641 broxton_set_cdclk(dev, 624000);
5642
5643 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5644 POSTING_READ(DBUF_CTL);
5645
5646 udelay(10);
5647
5648 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5649 DRM_ERROR("DBuf power enable timeout!\n");
5650 }
5651
5652 void broxton_uninit_cdclk(struct drm_device *dev)
5653 {
5654 struct drm_i915_private *dev_priv = dev->dev_private;
5655
5656 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5657 POSTING_READ(DBUF_CTL);
5658
5659 udelay(10);
5660
5661 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5662 DRM_ERROR("DBuf power disable timeout!\n");
5663
5664 /* Set minimum (bypass) frequency, in effect turning off the DE PLL */
5665 broxton_set_cdclk(dev, 19200);
5666
5667 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
5668 }
5669
5670 static const struct skl_cdclk_entry {
5671 unsigned int freq;
5672 unsigned int vco;
5673 } skl_cdclk_frequencies[] = {
5674 { .freq = 308570, .vco = 8640 },
5675 { .freq = 337500, .vco = 8100 },
5676 { .freq = 432000, .vco = 8640 },
5677 { .freq = 450000, .vco = 8100 },
5678 { .freq = 540000, .vco = 8100 },
5679 { .freq = 617140, .vco = 8640 },
5680 { .freq = 675000, .vco = 8100 },
5681 };
5682
5683 static unsigned int skl_cdclk_decimal(unsigned int freq)
5684 {
5685 return (freq - 1000) / 500;
5686 }
5687
5688 static unsigned int skl_cdclk_get_vco(unsigned int freq)
5689 {
5690 unsigned int i;
5691
5692 for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
5693 const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
5694
5695 if (e->freq == freq)
5696 return e->vco;
5697 }
5698
5699 return 8100;
5700 }
5701
5702 static void
5703 skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5704 {
5705 unsigned int min_freq;
5706 u32 val;
5707
5708 /* select the minimum CDCLK before enabling DPLL 0 */
5709 val = I915_READ(CDCLK_CTL);
5710 val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
5711 val |= CDCLK_FREQ_337_308;
5712
5713 if (required_vco == 8640)
5714 min_freq = 308570;
5715 else
5716 min_freq = 337500;
5717
5718 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
5719
5720 I915_WRITE(CDCLK_CTL, val);
5721 POSTING_READ(CDCLK_CTL);
5722
5723 /*
5724 * We always enable DPLL0 with the lowest link rate possible, but still
5725 * taking into account the VCO required to operate the eDP panel at the
5726 * desired frequency. The usual DP link rates operate with a VCO of
5727 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5728 * The modeset code is responsible for the selection of the exact link
5729 * rate later on, with the constraint of choosing a frequency that
5730 * works with required_vco.
5731 */
5732 val = I915_READ(DPLL_CTRL1);
5733
5734 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5735 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5736 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5737 if (required_vco == 8640)
5738 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5739 SKL_DPLL0);
5740 else
5741 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5742 SKL_DPLL0);
5743
5744 I915_WRITE(DPLL_CTRL1, val);
5745 POSTING_READ(DPLL_CTRL1);
5746
5747 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5748
5749 if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5750 DRM_ERROR("DPLL0 not locked\n");
5751 }
5752
5753 static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5754 {
5755 int ret;
5756 u32 val;
5757
5758 /* inform PCU we want to change CDCLK */
5759 val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5760 mutex_lock(&dev_priv->rps.hw_lock);
5761 ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5762 mutex_unlock(&dev_priv->rps.hw_lock);
5763
5764 return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5765 }
5766
5767 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5768 {
5769 unsigned int i;
5770
5771 for (i = 0; i < 15; i++) {
5772 if (skl_cdclk_pcu_ready(dev_priv))
5773 return true;
5774 udelay(10);
5775 }
5776
5777 return false;
5778 }
5779
5780 static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5781 {
5782 struct drm_device *dev = dev_priv->dev;
5783 u32 freq_select, pcu_ack;
5784
5785 DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
5786
5787 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5788 DRM_ERROR("failed to inform PCU about cdclk change\n");
5789 return;
5790 }
5791
5792 /* set CDCLK_CTL */
5793 switch(freq) {
5794 case 450000:
5795 case 432000:
5796 freq_select = CDCLK_FREQ_450_432;
5797 pcu_ack = 1;
5798 break;
5799 case 540000:
5800 freq_select = CDCLK_FREQ_540;
5801 pcu_ack = 2;
5802 break;
5803 case 308570:
5804 case 337500:
5805 default:
5806 freq_select = CDCLK_FREQ_337_308;
5807 pcu_ack = 0;
5808 break;
5809 case 617140:
5810 case 675000:
5811 freq_select = CDCLK_FREQ_675_617;
5812 pcu_ack = 3;
5813 break;
5814 }
5815
5816 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
5817 POSTING_READ(CDCLK_CTL);
5818
5819 /* inform PCU of the change */
5820 mutex_lock(&dev_priv->rps.hw_lock);
5821 sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5822 mutex_unlock(&dev_priv->rps.hw_lock);
5823
5824 intel_update_cdclk(dev);
5825 }
5826
5827 void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5828 {
5829 /* disable DBUF power */
5830 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5831 POSTING_READ(DBUF_CTL);
5832
5833 udelay(10);
5834
5835 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5836 DRM_ERROR("DBuf power disable timeout\n");
5837
5838 /* disable DPLL0 */
5839 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5840 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5841 DRM_ERROR("Couldn't disable DPLL0\n");
5842 }
5843
5844 void skl_init_cdclk(struct drm_i915_private *dev_priv)
5845 {
5846 unsigned int required_vco;
5847
5848 /* DPLL0 not enabled (happens on early BIOS versions) */
5849 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5850 /* enable DPLL0 */
5851 required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5852 skl_dpll0_enable(dev_priv, required_vco);
5853 }
5854
5855 /* set CDCLK to the frequency the BIOS chose */
5856 skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
5857
5858 /* enable DBUF power */
5859 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5860 POSTING_READ(DBUF_CTL);
5861
5862 udelay(10);
5863
5864 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5865 DRM_ERROR("DBuf power enable timeout\n");
5866 }
5867
5868 int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5869 {
5870 uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
5871 uint32_t cdctl = I915_READ(CDCLK_CTL);
5872 int freq = dev_priv->skl_boot_cdclk;
5873
5874 /*
5875 * check if the pre-os intialized the display
5876 * There is SWF18 scratchpad register defined which is set by the
5877 * pre-os which can be used by the OS drivers to check the status
5878 */
5879 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5880 goto sanitize;
5881
5882 /* Is PLL enabled and locked ? */
5883 if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
5884 goto sanitize;
5885
5886 /* DPLL okay; verify the cdclock
5887 *
5888 * Noticed in some instances that the freq selection is correct but
5889 * decimal part is programmed wrong from BIOS where pre-os does not
5890 * enable display. Verify the same as well.
5891 */
5892 if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
5893 /* All well; nothing to sanitize */
5894 return false;
5895 sanitize:
5896 /*
5897 * As of now initialize with max cdclk till
5898 * we get dynamic cdclk support
5899 * */
5900 dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
5901 skl_init_cdclk(dev_priv);
5902
5903 /* we did have to sanitize */
5904 return true;
5905 }
5906
5907 /* Adjust CDclk dividers to allow high res or save power if possible */
5908 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5909 {
5910 struct drm_i915_private *dev_priv = dev->dev_private;
5911 u32 val, cmd;
5912
5913 WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5914 != dev_priv->cdclk_freq);
5915
5916 if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
5917 cmd = 2;
5918 else if (cdclk == 266667)
5919 cmd = 1;
5920 else
5921 cmd = 0;
5922
5923 mutex_lock(&dev_priv->rps.hw_lock);
5924 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5925 val &= ~DSPFREQGUAR_MASK;
5926 val |= (cmd << DSPFREQGUAR_SHIFT);
5927 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5928 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5929 DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5930 50)) {
5931 DRM_ERROR("timed out waiting for CDclk change\n");
5932 }
5933 mutex_unlock(&dev_priv->rps.hw_lock);
5934
5935 mutex_lock(&dev_priv->sb_lock);
5936
5937 if (cdclk == 400000) {
5938 u32 divider;
5939
5940 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5941
5942 /* adjust cdclk divider */
5943 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5944 val &= ~CCK_FREQUENCY_VALUES;
5945 val |= divider;
5946 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5947
5948 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
5949 CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
5950 50))
5951 DRM_ERROR("timed out waiting for CDclk change\n");
5952 }
5953
5954 /* adjust self-refresh exit latency value */
5955 val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5956 val &= ~0x7f;
5957
5958 /*
5959 * For high bandwidth configs, we set a higher latency in the bunit
5960 * so that the core display fetch happens in time to avoid underruns.
5961 */
5962 if (cdclk == 400000)
5963 val |= 4500 / 250; /* 4.5 usec */
5964 else
5965 val |= 3000 / 250; /* 3.0 usec */
5966 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
5967
5968 mutex_unlock(&dev_priv->sb_lock);
5969
5970 intel_update_cdclk(dev);
5971 }
5972
5973 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5974 {
5975 struct drm_i915_private *dev_priv = dev->dev_private;
5976 u32 val, cmd;
5977
5978 WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5979 != dev_priv->cdclk_freq);
5980
5981 switch (cdclk) {
5982 case 333333:
5983 case 320000:
5984 case 266667:
5985 case 200000:
5986 break;
5987 default:
5988 MISSING_CASE(cdclk);
5989 return;
5990 }
5991
5992 /*
5993 * Specs are full of misinformation, but testing on actual
5994 * hardware has shown that we just need to write the desired
5995 * CCK divider into the Punit register.
5996 */
5997 cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5998
5999 mutex_lock(&dev_priv->rps.hw_lock);
6000 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
6001 val &= ~DSPFREQGUAR_MASK_CHV;
6002 val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
6003 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
6004 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
6005 DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
6006 50)) {
6007 DRM_ERROR("timed out waiting for CDclk change\n");
6008 }
6009 mutex_unlock(&dev_priv->rps.hw_lock);
6010
6011 intel_update_cdclk(dev);
6012 }
6013
6014 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
6015 int max_pixclk)
6016 {
6017 int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
6018 int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
6019
6020 /*
6021 * Really only a few cases to deal with, as only 4 CDclks are supported:
6022 * 200MHz
6023 * 267MHz
6024 * 320/333MHz (depends on HPLL freq)
6025 * 400MHz (VLV only)
6026 * So we check to see whether we're above 90% (VLV) or 95% (CHV)
6027 * of the lower bin and adjust if needed.
6028 *
6029 * We seem to get an unstable or solid color picture at 200MHz.
6030 * Not sure what's wrong. For now use 200MHz only when all pipes
6031 * are off.
6032 */
6033 if (!IS_CHERRYVIEW(dev_priv) &&
6034 max_pixclk > freq_320*limit/100)
6035 return 400000;
6036 else if (max_pixclk > 266667*limit/100)
6037 return freq_320;
6038 else if (max_pixclk > 0)
6039 return 266667;
6040 else
6041 return 200000;
6042 }
6043
6044 static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
6045 int max_pixclk)
6046 {
6047 /*
6048 * FIXME:
6049 * - remove the guardband, it's not needed on BXT
6050 * - set 19.2MHz bypass frequency if there are no active pipes
6051 */
6052 if (max_pixclk > 576000*9/10)
6053 return 624000;
6054 else if (max_pixclk > 384000*9/10)
6055 return 576000;
6056 else if (max_pixclk > 288000*9/10)
6057 return 384000;
6058 else if (max_pixclk > 144000*9/10)
6059 return 288000;
6060 else
6061 return 144000;
6062 }
6063
6064 /* Compute the max pixel clock for new configuration. Uses atomic state if
6065 * that's non-NULL, look at current state otherwise. */
6066 static int intel_mode_max_pixclk(struct drm_device *dev,
6067 struct drm_atomic_state *state)
6068 {
6069 struct intel_crtc *intel_crtc;
6070 struct intel_crtc_state *crtc_state;
6071 int max_pixclk = 0;
6072
6073 for_each_intel_crtc(dev, intel_crtc) {
6074 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6075 if (IS_ERR(crtc_state))
6076 return PTR_ERR(crtc_state);
6077
6078 if (!crtc_state->base.enable)
6079 continue;
6080
6081 max_pixclk = max(max_pixclk,
6082 crtc_state->base.adjusted_mode.crtc_clock);
6083 }
6084
6085 return max_pixclk;
6086 }
6087
6088 static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
6089 {
6090 struct drm_device *dev = state->dev;
6091 struct drm_i915_private *dev_priv = dev->dev_private;
6092 int max_pixclk = intel_mode_max_pixclk(dev, state);
6093
6094 if (max_pixclk < 0)
6095 return max_pixclk;
6096
6097 to_intel_atomic_state(state)->cdclk =
6098 valleyview_calc_cdclk(dev_priv, max_pixclk);
6099
6100 return 0;
6101 }
6102
6103 static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
6104 {
6105 struct drm_device *dev = state->dev;
6106 struct drm_i915_private *dev_priv = dev->dev_private;
6107 int max_pixclk = intel_mode_max_pixclk(dev, state);
6108
6109 if (max_pixclk < 0)
6110 return max_pixclk;
6111
6112 to_intel_atomic_state(state)->cdclk =
6113 broxton_calc_cdclk(dev_priv, max_pixclk);
6114
6115 return 0;
6116 }
6117
6118 static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
6119 {
6120 unsigned int credits, default_credits;
6121
6122 if (IS_CHERRYVIEW(dev_priv))
6123 default_credits = PFI_CREDIT(12);
6124 else
6125 default_credits = PFI_CREDIT(8);
6126
6127 if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
6128 /* CHV suggested value is 31 or 63 */
6129 if (IS_CHERRYVIEW(dev_priv))
6130 credits = PFI_CREDIT_63;
6131 else
6132 credits = PFI_CREDIT(15);
6133 } else {
6134 credits = default_credits;
6135 }
6136
6137 /*
6138 * WA - write default credits before re-programming
6139 * FIXME: should we also set the resend bit here?
6140 */
6141 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6142 default_credits);
6143
6144 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6145 credits | PFI_CREDIT_RESEND);
6146
6147 /*
6148 * FIXME is this guaranteed to clear
6149 * immediately or should we poll for it?
6150 */
6151 WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6152 }
6153
6154 static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
6155 {
6156 struct drm_device *dev = old_state->dev;
6157 unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
6158 struct drm_i915_private *dev_priv = dev->dev_private;
6159
6160 /*
6161 * FIXME: We can end up here with all power domains off, yet
6162 * with a CDCLK frequency other than the minimum. To account
6163 * for this take the PIPE-A power domain, which covers the HW
6164 * blocks needed for the following programming. This can be
6165 * removed once it's guaranteed that we get here either with
6166 * the minimum CDCLK set, or the required power domains
6167 * enabled.
6168 */
6169 intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
6170
6171 if (IS_CHERRYVIEW(dev))
6172 cherryview_set_cdclk(dev, req_cdclk);
6173 else
6174 valleyview_set_cdclk(dev, req_cdclk);
6175
6176 vlv_program_pfi_credits(dev_priv);
6177
6178 intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
6179 }
6180
6181 static void valleyview_crtc_enable(struct drm_crtc *crtc)
6182 {
6183 struct drm_device *dev = crtc->dev;
6184 struct drm_i915_private *dev_priv = to_i915(dev);
6185 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6186 struct intel_encoder *encoder;
6187 int pipe = intel_crtc->pipe;
6188
6189 if (WARN_ON(intel_crtc->active))
6190 return;
6191
6192 if (intel_crtc->config->has_dp_encoder)
6193 intel_dp_set_m_n(intel_crtc, M1_N1);
6194
6195 intel_set_pipe_timings(intel_crtc);
6196
6197 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6198 struct drm_i915_private *dev_priv = dev->dev_private;
6199
6200 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6201 I915_WRITE(CHV_CANVAS(pipe), 0);
6202 }
6203
6204 i9xx_set_pipeconf(intel_crtc);
6205
6206 intel_crtc->active = true;
6207
6208 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6209
6210 for_each_encoder_on_crtc(dev, crtc, encoder)
6211 if (encoder->pre_pll_enable)
6212 encoder->pre_pll_enable(encoder);
6213
6214 if (!intel_crtc->config->has_dsi_encoder) {
6215 if (IS_CHERRYVIEW(dev)) {
6216 chv_prepare_pll(intel_crtc, intel_crtc->config);
6217 chv_enable_pll(intel_crtc, intel_crtc->config);
6218 } else {
6219 vlv_prepare_pll(intel_crtc, intel_crtc->config);
6220 vlv_enable_pll(intel_crtc, intel_crtc->config);
6221 }
6222 }
6223
6224 for_each_encoder_on_crtc(dev, crtc, encoder)
6225 if (encoder->pre_enable)
6226 encoder->pre_enable(encoder);
6227
6228 i9xx_pfit_enable(intel_crtc);
6229
6230 intel_crtc_load_lut(crtc);
6231
6232 intel_enable_pipe(intel_crtc);
6233
6234 assert_vblank_disabled(crtc);
6235 drm_crtc_vblank_on(crtc);
6236
6237 for_each_encoder_on_crtc(dev, crtc, encoder)
6238 encoder->enable(encoder);
6239 }
6240
6241 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6242 {
6243 struct drm_device *dev = crtc->base.dev;
6244 struct drm_i915_private *dev_priv = dev->dev_private;
6245
6246 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6247 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
6248 }
6249
6250 static void i9xx_crtc_enable(struct drm_crtc *crtc)
6251 {
6252 struct drm_device *dev = crtc->dev;
6253 struct drm_i915_private *dev_priv = to_i915(dev);
6254 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6255 struct intel_encoder *encoder;
6256 int pipe = intel_crtc->pipe;
6257
6258 if (WARN_ON(intel_crtc->active))
6259 return;
6260
6261 i9xx_set_pll_dividers(intel_crtc);
6262
6263 if (intel_crtc->config->has_dp_encoder)
6264 intel_dp_set_m_n(intel_crtc, M1_N1);
6265
6266 intel_set_pipe_timings(intel_crtc);
6267
6268 i9xx_set_pipeconf(intel_crtc);
6269
6270 intel_crtc->active = true;
6271
6272 if (!IS_GEN2(dev))
6273 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6274
6275 for_each_encoder_on_crtc(dev, crtc, encoder)
6276 if (encoder->pre_enable)
6277 encoder->pre_enable(encoder);
6278
6279 i9xx_enable_pll(intel_crtc);
6280
6281 i9xx_pfit_enable(intel_crtc);
6282
6283 intel_crtc_load_lut(crtc);
6284
6285 intel_update_watermarks(crtc);
6286 intel_enable_pipe(intel_crtc);
6287
6288 assert_vblank_disabled(crtc);
6289 drm_crtc_vblank_on(crtc);
6290
6291 for_each_encoder_on_crtc(dev, crtc, encoder)
6292 encoder->enable(encoder);
6293
6294 intel_fbc_enable(intel_crtc);
6295 }
6296
6297 static void i9xx_pfit_disable(struct intel_crtc *crtc)
6298 {
6299 struct drm_device *dev = crtc->base.dev;
6300 struct drm_i915_private *dev_priv = dev->dev_private;
6301
6302 if (!crtc->config->gmch_pfit.control)
6303 return;
6304
6305 assert_pipe_disabled(dev_priv, crtc->pipe);
6306
6307 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6308 I915_READ(PFIT_CONTROL));
6309 I915_WRITE(PFIT_CONTROL, 0);
6310 }
6311
6312 static void i9xx_crtc_disable(struct drm_crtc *crtc)
6313 {
6314 struct drm_device *dev = crtc->dev;
6315 struct drm_i915_private *dev_priv = dev->dev_private;
6316 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6317 struct intel_encoder *encoder;
6318 int pipe = intel_crtc->pipe;
6319
6320 /*
6321 * On gen2 planes are double buffered but the pipe isn't, so we must
6322 * wait for planes to fully turn off before disabling the pipe.
6323 * We also need to wait on all gmch platforms because of the
6324 * self-refresh mode constraint explained above.
6325 */
6326 intel_wait_for_vblank(dev, pipe);
6327
6328 for_each_encoder_on_crtc(dev, crtc, encoder)
6329 encoder->disable(encoder);
6330
6331 drm_crtc_vblank_off(crtc);
6332 assert_vblank_disabled(crtc);
6333
6334 intel_disable_pipe(intel_crtc);
6335
6336 i9xx_pfit_disable(intel_crtc);
6337
6338 for_each_encoder_on_crtc(dev, crtc, encoder)
6339 if (encoder->post_disable)
6340 encoder->post_disable(encoder);
6341
6342 if (!intel_crtc->config->has_dsi_encoder) {
6343 if (IS_CHERRYVIEW(dev))
6344 chv_disable_pll(dev_priv, pipe);
6345 else if (IS_VALLEYVIEW(dev))
6346 vlv_disable_pll(dev_priv, pipe);
6347 else
6348 i9xx_disable_pll(intel_crtc);
6349 }
6350
6351 for_each_encoder_on_crtc(dev, crtc, encoder)
6352 if (encoder->post_pll_disable)
6353 encoder->post_pll_disable(encoder);
6354
6355 if (!IS_GEN2(dev))
6356 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6357
6358 intel_fbc_disable_crtc(intel_crtc);
6359 }
6360
6361 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6362 {
6363 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6364 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6365 enum intel_display_power_domain domain;
6366 unsigned long domains;
6367
6368 if (!intel_crtc->active)
6369 return;
6370
6371 if (to_intel_plane_state(crtc->primary->state)->visible) {
6372 WARN_ON(intel_crtc->unpin_work);
6373
6374 intel_pre_disable_primary(crtc);
6375
6376 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6377 to_intel_plane_state(crtc->primary->state)->visible = false;
6378 }
6379
6380 dev_priv->display.crtc_disable(crtc);
6381 intel_crtc->active = false;
6382 intel_update_watermarks(crtc);
6383 intel_disable_shared_dpll(intel_crtc);
6384
6385 domains = intel_crtc->enabled_power_domains;
6386 for_each_power_domain(domain, domains)
6387 intel_display_power_put(dev_priv, domain);
6388 intel_crtc->enabled_power_domains = 0;
6389 }
6390
6391 /*
6392 * turn all crtc's off, but do not adjust state
6393 * This has to be paired with a call to intel_modeset_setup_hw_state.
6394 */
6395 int intel_display_suspend(struct drm_device *dev)
6396 {
6397 struct drm_mode_config *config = &dev->mode_config;
6398 struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
6399 struct drm_atomic_state *state;
6400 struct drm_crtc *crtc;
6401 unsigned crtc_mask = 0;
6402 int ret = 0;
6403
6404 if (WARN_ON(!ctx))
6405 return 0;
6406
6407 lockdep_assert_held(&ctx->ww_ctx);
6408 state = drm_atomic_state_alloc(dev);
6409 if (WARN_ON(!state))
6410 return -ENOMEM;
6411
6412 state->acquire_ctx = ctx;
6413 state->allow_modeset = true;
6414
6415 for_each_crtc(dev, crtc) {
6416 struct drm_crtc_state *crtc_state =
6417 drm_atomic_get_crtc_state(state, crtc);
6418
6419 ret = PTR_ERR_OR_ZERO(crtc_state);
6420 if (ret)
6421 goto free;
6422
6423 if (!crtc_state->active)
6424 continue;
6425
6426 crtc_state->active = false;
6427 crtc_mask |= 1 << drm_crtc_index(crtc);
6428 }
6429
6430 if (crtc_mask) {
6431 ret = drm_atomic_commit(state);
6432
6433 if (!ret) {
6434 for_each_crtc(dev, crtc)
6435 if (crtc_mask & (1 << drm_crtc_index(crtc)))
6436 crtc->state->active = true;
6437
6438 return ret;
6439 }
6440 }
6441
6442 free:
6443 if (ret)
6444 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6445 drm_atomic_state_free(state);
6446 return ret;
6447 }
6448
6449 void intel_encoder_destroy(struct drm_encoder *encoder)
6450 {
6451 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6452
6453 drm_encoder_cleanup(encoder);
6454 kfree(intel_encoder);
6455 }
6456
6457 /* Cross check the actual hw state with our own modeset state tracking (and it's
6458 * internal consistency). */
6459 static void intel_connector_check_state(struct intel_connector *connector)
6460 {
6461 struct drm_crtc *crtc = connector->base.state->crtc;
6462
6463 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6464 connector->base.base.id,
6465 connector->base.name);
6466
6467 if (connector->get_hw_state(connector)) {
6468 struct intel_encoder *encoder = connector->encoder;
6469 struct drm_connector_state *conn_state = connector->base.state;
6470
6471 I915_STATE_WARN(!crtc,
6472 "connector enabled without attached crtc\n");
6473
6474 if (!crtc)
6475 return;
6476
6477 I915_STATE_WARN(!crtc->state->active,
6478 "connector is active, but attached crtc isn't\n");
6479
6480 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6481 return;
6482
6483 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6484 "atomic encoder doesn't match attached encoder\n");
6485
6486 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6487 "attached encoder crtc differs from connector crtc\n");
6488 } else {
6489 I915_STATE_WARN(crtc && crtc->state->active,
6490 "attached crtc is active, but connector isn't\n");
6491 I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6492 "best encoder set without crtc!\n");
6493 }
6494 }
6495
6496 int intel_connector_init(struct intel_connector *connector)
6497 {
6498 drm_atomic_helper_connector_reset(&connector->base);
6499
6500 if (!connector->base.state)
6501 return -ENOMEM;
6502
6503 return 0;
6504 }
6505
6506 struct intel_connector *intel_connector_alloc(void)
6507 {
6508 struct intel_connector *connector;
6509
6510 connector = kzalloc(sizeof *connector, GFP_KERNEL);
6511 if (!connector)
6512 return NULL;
6513
6514 if (intel_connector_init(connector) < 0) {
6515 kfree(connector);
6516 return NULL;
6517 }
6518
6519 return connector;
6520 }
6521
6522 /* Simple connector->get_hw_state implementation for encoders that support only
6523 * one connector and no cloning and hence the encoder state determines the state
6524 * of the connector. */
6525 bool intel_connector_get_hw_state(struct intel_connector *connector)
6526 {
6527 enum pipe pipe = 0;
6528 struct intel_encoder *encoder = connector->encoder;
6529
6530 return encoder->get_hw_state(encoder, &pipe);
6531 }
6532
6533 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6534 {
6535 if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6536 return crtc_state->fdi_lanes;
6537
6538 return 0;
6539 }
6540
6541 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6542 struct intel_crtc_state *pipe_config)
6543 {
6544 struct drm_atomic_state *state = pipe_config->base.state;
6545 struct intel_crtc *other_crtc;
6546 struct intel_crtc_state *other_crtc_state;
6547
6548 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6549 pipe_name(pipe), pipe_config->fdi_lanes);
6550 if (pipe_config->fdi_lanes > 4) {
6551 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6552 pipe_name(pipe), pipe_config->fdi_lanes);
6553 return -EINVAL;
6554 }
6555
6556 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6557 if (pipe_config->fdi_lanes > 2) {
6558 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6559 pipe_config->fdi_lanes);
6560 return -EINVAL;
6561 } else {
6562 return 0;
6563 }
6564 }
6565
6566 if (INTEL_INFO(dev)->num_pipes == 2)
6567 return 0;
6568
6569 /* Ivybridge 3 pipe is really complicated */
6570 switch (pipe) {
6571 case PIPE_A:
6572 return 0;
6573 case PIPE_B:
6574 if (pipe_config->fdi_lanes <= 2)
6575 return 0;
6576
6577 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
6578 other_crtc_state =
6579 intel_atomic_get_crtc_state(state, other_crtc);
6580 if (IS_ERR(other_crtc_state))
6581 return PTR_ERR(other_crtc_state);
6582
6583 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6584 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6585 pipe_name(pipe), pipe_config->fdi_lanes);
6586 return -EINVAL;
6587 }
6588 return 0;
6589 case PIPE_C:
6590 if (pipe_config->fdi_lanes > 2) {
6591 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6592 pipe_name(pipe), pipe_config->fdi_lanes);
6593 return -EINVAL;
6594 }
6595
6596 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
6597 other_crtc_state =
6598 intel_atomic_get_crtc_state(state, other_crtc);
6599 if (IS_ERR(other_crtc_state))
6600 return PTR_ERR(other_crtc_state);
6601
6602 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6603 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6604 return -EINVAL;
6605 }
6606 return 0;
6607 default:
6608 BUG();
6609 }
6610 }
6611
6612 #define RETRY 1
6613 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6614 struct intel_crtc_state *pipe_config)
6615 {
6616 struct drm_device *dev = intel_crtc->base.dev;
6617 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6618 int lane, link_bw, fdi_dotclock, ret;
6619 bool needs_recompute = false;
6620
6621 retry:
6622 /* FDI is a binary signal running at ~2.7GHz, encoding
6623 * each output octet as 10 bits. The actual frequency
6624 * is stored as a divider into a 100MHz clock, and the
6625 * mode pixel clock is stored in units of 1KHz.
6626 * Hence the bw of each lane in terms of the mode signal
6627 * is:
6628 */
6629 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
6630
6631 fdi_dotclock = adjusted_mode->crtc_clock;
6632
6633 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6634 pipe_config->pipe_bpp);
6635
6636 pipe_config->fdi_lanes = lane;
6637
6638 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6639 link_bw, &pipe_config->fdi_m_n);
6640
6641 ret = ironlake_check_fdi_lanes(intel_crtc->base.dev,
6642 intel_crtc->pipe, pipe_config);
6643 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6644 pipe_config->pipe_bpp -= 2*3;
6645 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6646 pipe_config->pipe_bpp);
6647 needs_recompute = true;
6648 pipe_config->bw_constrained = true;
6649
6650 goto retry;
6651 }
6652
6653 if (needs_recompute)
6654 return RETRY;
6655
6656 return ret;
6657 }
6658
6659 static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
6660 struct intel_crtc_state *pipe_config)
6661 {
6662 if (pipe_config->pipe_bpp > 24)
6663 return false;
6664
6665 /* HSW can handle pixel rate up to cdclk? */
6666 if (IS_HASWELL(dev_priv->dev))
6667 return true;
6668
6669 /*
6670 * We compare against max which means we must take
6671 * the increased cdclk requirement into account when
6672 * calculating the new cdclk.
6673 *
6674 * Should measure whether using a lower cdclk w/o IPS
6675 */
6676 return ilk_pipe_pixel_rate(pipe_config) <=
6677 dev_priv->max_cdclk_freq * 95 / 100;
6678 }
6679
6680 static void hsw_compute_ips_config(struct intel_crtc *crtc,
6681 struct intel_crtc_state *pipe_config)
6682 {
6683 struct drm_device *dev = crtc->base.dev;
6684 struct drm_i915_private *dev_priv = dev->dev_private;
6685
6686 pipe_config->ips_enabled = i915.enable_ips &&
6687 hsw_crtc_supports_ips(crtc) &&
6688 pipe_config_supports_ips(dev_priv, pipe_config);
6689 }
6690
6691 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6692 {
6693 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6694
6695 /* GDG double wide on either pipe, otherwise pipe A only */
6696 return INTEL_INFO(dev_priv)->gen < 4 &&
6697 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6698 }
6699
6700 static int intel_crtc_compute_config(struct intel_crtc *crtc,
6701 struct intel_crtc_state *pipe_config)
6702 {
6703 struct drm_device *dev = crtc->base.dev;
6704 struct drm_i915_private *dev_priv = dev->dev_private;
6705 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6706
6707 /* FIXME should check pixel clock limits on all platforms */
6708 if (INTEL_INFO(dev)->gen < 4) {
6709 int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6710
6711 /*
6712 * Enable double wide mode when the dot clock
6713 * is > 90% of the (display) core speed.
6714 */
6715 if (intel_crtc_supports_double_wide(crtc) &&
6716 adjusted_mode->crtc_clock > clock_limit) {
6717 clock_limit *= 2;
6718 pipe_config->double_wide = true;
6719 }
6720
6721 if (adjusted_mode->crtc_clock > clock_limit) {
6722 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6723 adjusted_mode->crtc_clock, clock_limit,
6724 yesno(pipe_config->double_wide));
6725 return -EINVAL;
6726 }
6727 }
6728
6729 /*
6730 * Pipe horizontal size must be even in:
6731 * - DVO ganged mode
6732 * - LVDS dual channel mode
6733 * - Double wide pipe
6734 */
6735 if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6736 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6737 pipe_config->pipe_src_w &= ~1;
6738
6739 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6740 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6741 */
6742 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
6743 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6744 return -EINVAL;
6745
6746 if (HAS_IPS(dev))
6747 hsw_compute_ips_config(crtc, pipe_config);
6748
6749 if (pipe_config->has_pch_encoder)
6750 return ironlake_fdi_compute_config(crtc, pipe_config);
6751
6752 return 0;
6753 }
6754
6755 static int skylake_get_display_clock_speed(struct drm_device *dev)
6756 {
6757 struct drm_i915_private *dev_priv = to_i915(dev);
6758 uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
6759 uint32_t cdctl = I915_READ(CDCLK_CTL);
6760 uint32_t linkrate;
6761
6762 if (!(lcpll1 & LCPLL_PLL_ENABLE))
6763 return 24000; /* 24MHz is the cd freq with NSSC ref */
6764
6765 if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
6766 return 540000;
6767
6768 linkrate = (I915_READ(DPLL_CTRL1) &
6769 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
6770
6771 if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
6772 linkrate == DPLL_CTRL1_LINK_RATE_1080) {
6773 /* vco 8640 */
6774 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6775 case CDCLK_FREQ_450_432:
6776 return 432000;
6777 case CDCLK_FREQ_337_308:
6778 return 308570;
6779 case CDCLK_FREQ_675_617:
6780 return 617140;
6781 default:
6782 WARN(1, "Unknown cd freq selection\n");
6783 }
6784 } else {
6785 /* vco 8100 */
6786 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6787 case CDCLK_FREQ_450_432:
6788 return 450000;
6789 case CDCLK_FREQ_337_308:
6790 return 337500;
6791 case CDCLK_FREQ_675_617:
6792 return 675000;
6793 default:
6794 WARN(1, "Unknown cd freq selection\n");
6795 }
6796 }
6797
6798 /* error case, do as if DPLL0 isn't enabled */
6799 return 24000;
6800 }
6801
6802 static int broxton_get_display_clock_speed(struct drm_device *dev)
6803 {
6804 struct drm_i915_private *dev_priv = to_i915(dev);
6805 uint32_t cdctl = I915_READ(CDCLK_CTL);
6806 uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
6807 uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
6808 int cdclk;
6809
6810 if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
6811 return 19200;
6812
6813 cdclk = 19200 * pll_ratio / 2;
6814
6815 switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
6816 case BXT_CDCLK_CD2X_DIV_SEL_1:
6817 return cdclk; /* 576MHz or 624MHz */
6818 case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6819 return cdclk * 2 / 3; /* 384MHz */
6820 case BXT_CDCLK_CD2X_DIV_SEL_2:
6821 return cdclk / 2; /* 288MHz */
6822 case BXT_CDCLK_CD2X_DIV_SEL_4:
6823 return cdclk / 4; /* 144MHz */
6824 }
6825
6826 /* error case, do as if DE PLL isn't enabled */
6827 return 19200;
6828 }
6829
6830 static int broadwell_get_display_clock_speed(struct drm_device *dev)
6831 {
6832 struct drm_i915_private *dev_priv = dev->dev_private;
6833 uint32_t lcpll = I915_READ(LCPLL_CTL);
6834 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6835
6836 if (lcpll & LCPLL_CD_SOURCE_FCLK)
6837 return 800000;
6838 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6839 return 450000;
6840 else if (freq == LCPLL_CLK_FREQ_450)
6841 return 450000;
6842 else if (freq == LCPLL_CLK_FREQ_54O_BDW)
6843 return 540000;
6844 else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
6845 return 337500;
6846 else
6847 return 675000;
6848 }
6849
6850 static int haswell_get_display_clock_speed(struct drm_device *dev)
6851 {
6852 struct drm_i915_private *dev_priv = dev->dev_private;
6853 uint32_t lcpll = I915_READ(LCPLL_CTL);
6854 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6855
6856 if (lcpll & LCPLL_CD_SOURCE_FCLK)
6857 return 800000;
6858 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6859 return 450000;
6860 else if (freq == LCPLL_CLK_FREQ_450)
6861 return 450000;
6862 else if (IS_HSW_ULT(dev))
6863 return 337500;
6864 else
6865 return 540000;
6866 }
6867
6868 static int valleyview_get_display_clock_speed(struct drm_device *dev)
6869 {
6870 return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
6871 CCK_DISPLAY_CLOCK_CONTROL);
6872 }
6873
6874 static int ilk_get_display_clock_speed(struct drm_device *dev)
6875 {
6876 return 450000;
6877 }
6878
6879 static int i945_get_display_clock_speed(struct drm_device *dev)
6880 {
6881 return 400000;
6882 }
6883
6884 static int i915_get_display_clock_speed(struct drm_device *dev)
6885 {
6886 return 333333;
6887 }
6888
6889 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
6890 {
6891 return 200000;
6892 }
6893
6894 static int pnv_get_display_clock_speed(struct drm_device *dev)
6895 {
6896 u16 gcfgc = 0;
6897
6898 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6899
6900 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6901 case GC_DISPLAY_CLOCK_267_MHZ_PNV:
6902 return 266667;
6903 case GC_DISPLAY_CLOCK_333_MHZ_PNV:
6904 return 333333;
6905 case GC_DISPLAY_CLOCK_444_MHZ_PNV:
6906 return 444444;
6907 case GC_DISPLAY_CLOCK_200_MHZ_PNV:
6908 return 200000;
6909 default:
6910 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
6911 case GC_DISPLAY_CLOCK_133_MHZ_PNV:
6912 return 133333;
6913 case GC_DISPLAY_CLOCK_167_MHZ_PNV:
6914 return 166667;
6915 }
6916 }
6917
6918 static int i915gm_get_display_clock_speed(struct drm_device *dev)
6919 {
6920 u16 gcfgc = 0;
6921
6922 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6923
6924 if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
6925 return 133333;
6926 else {
6927 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6928 case GC_DISPLAY_CLOCK_333_MHZ:
6929 return 333333;
6930 default:
6931 case GC_DISPLAY_CLOCK_190_200_MHZ:
6932 return 190000;
6933 }
6934 }
6935 }
6936
6937 static int i865_get_display_clock_speed(struct drm_device *dev)
6938 {
6939 return 266667;
6940 }
6941
6942 static int i85x_get_display_clock_speed(struct drm_device *dev)
6943 {
6944 u16 hpllcc = 0;
6945
6946 /*
6947 * 852GM/852GMV only supports 133 MHz and the HPLLCC
6948 * encoding is different :(
6949 * FIXME is this the right way to detect 852GM/852GMV?
6950 */
6951 if (dev->pdev->revision == 0x1)
6952 return 133333;
6953
6954 pci_bus_read_config_word(dev->pdev->bus,
6955 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
6956
6957 /* Assume that the hardware is in the high speed state. This
6958 * should be the default.
6959 */
6960 switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
6961 case GC_CLOCK_133_200:
6962 case GC_CLOCK_133_200_2:
6963 case GC_CLOCK_100_200:
6964 return 200000;
6965 case GC_CLOCK_166_250:
6966 return 250000;
6967 case GC_CLOCK_100_133:
6968 return 133333;
6969 case GC_CLOCK_133_266:
6970 case GC_CLOCK_133_266_2:
6971 case GC_CLOCK_166_266:
6972 return 266667;
6973 }
6974
6975 /* Shouldn't happen */
6976 return 0;
6977 }
6978
6979 static int i830_get_display_clock_speed(struct drm_device *dev)
6980 {
6981 return 133333;
6982 }
6983
6984 static unsigned int intel_hpll_vco(struct drm_device *dev)
6985 {
6986 struct drm_i915_private *dev_priv = dev->dev_private;
6987 static const unsigned int blb_vco[8] = {
6988 [0] = 3200000,
6989 [1] = 4000000,
6990 [2] = 5333333,
6991 [3] = 4800000,
6992 [4] = 6400000,
6993 };
6994 static const unsigned int pnv_vco[8] = {
6995 [0] = 3200000,
6996 [1] = 4000000,
6997 [2] = 5333333,
6998 [3] = 4800000,
6999 [4] = 2666667,
7000 };
7001 static const unsigned int cl_vco[8] = {
7002 [0] = 3200000,
7003 [1] = 4000000,
7004 [2] = 5333333,
7005 [3] = 6400000,
7006 [4] = 3333333,
7007 [5] = 3566667,
7008 [6] = 4266667,
7009 };
7010 static const unsigned int elk_vco[8] = {
7011 [0] = 3200000,
7012 [1] = 4000000,
7013 [2] = 5333333,
7014 [3] = 4800000,
7015 };
7016 static const unsigned int ctg_vco[8] = {
7017 [0] = 3200000,
7018 [1] = 4000000,
7019 [2] = 5333333,
7020 [3] = 6400000,
7021 [4] = 2666667,
7022 [5] = 4266667,
7023 };
7024 const unsigned int *vco_table;
7025 unsigned int vco;
7026 uint8_t tmp = 0;
7027
7028 /* FIXME other chipsets? */
7029 if (IS_GM45(dev))
7030 vco_table = ctg_vco;
7031 else if (IS_G4X(dev))
7032 vco_table = elk_vco;
7033 else if (IS_CRESTLINE(dev))
7034 vco_table = cl_vco;
7035 else if (IS_PINEVIEW(dev))
7036 vco_table = pnv_vco;
7037 else if (IS_G33(dev))
7038 vco_table = blb_vco;
7039 else
7040 return 0;
7041
7042 tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
7043
7044 vco = vco_table[tmp & 0x7];
7045 if (vco == 0)
7046 DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
7047 else
7048 DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
7049
7050 return vco;
7051 }
7052
7053 static int gm45_get_display_clock_speed(struct drm_device *dev)
7054 {
7055 unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7056 uint16_t tmp = 0;
7057
7058 pci_read_config_word(dev->pdev, GCFGC, &tmp);
7059
7060 cdclk_sel = (tmp >> 12) & 0x1;
7061
7062 switch (vco) {
7063 case 2666667:
7064 case 4000000:
7065 case 5333333:
7066 return cdclk_sel ? 333333 : 222222;
7067 case 3200000:
7068 return cdclk_sel ? 320000 : 228571;
7069 default:
7070 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
7071 return 222222;
7072 }
7073 }
7074
7075 static int i965gm_get_display_clock_speed(struct drm_device *dev)
7076 {
7077 static const uint8_t div_3200[] = { 16, 10, 8 };
7078 static const uint8_t div_4000[] = { 20, 12, 10 };
7079 static const uint8_t div_5333[] = { 24, 16, 14 };
7080 const uint8_t *div_table;
7081 unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7082 uint16_t tmp = 0;
7083
7084 pci_read_config_word(dev->pdev, GCFGC, &tmp);
7085
7086 cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
7087
7088 if (cdclk_sel >= ARRAY_SIZE(div_3200))
7089 goto fail;
7090
7091 switch (vco) {
7092 case 3200000:
7093 div_table = div_3200;
7094 break;
7095 case 4000000:
7096 div_table = div_4000;
7097 break;
7098 case 5333333:
7099 div_table = div_5333;
7100 break;
7101 default:
7102 goto fail;
7103 }
7104
7105 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7106
7107 fail:
7108 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
7109 return 200000;
7110 }
7111
7112 static int g33_get_display_clock_speed(struct drm_device *dev)
7113 {
7114 static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
7115 static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
7116 static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
7117 static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
7118 const uint8_t *div_table;
7119 unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7120 uint16_t tmp = 0;
7121
7122 pci_read_config_word(dev->pdev, GCFGC, &tmp);
7123
7124 cdclk_sel = (tmp >> 4) & 0x7;
7125
7126 if (cdclk_sel >= ARRAY_SIZE(div_3200))
7127 goto fail;
7128
7129 switch (vco) {
7130 case 3200000:
7131 div_table = div_3200;
7132 break;
7133 case 4000000:
7134 div_table = div_4000;
7135 break;
7136 case 4800000:
7137 div_table = div_4800;
7138 break;
7139 case 5333333:
7140 div_table = div_5333;
7141 break;
7142 default:
7143 goto fail;
7144 }
7145
7146 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7147
7148 fail:
7149 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
7150 return 190476;
7151 }
7152
7153 static void
7154 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
7155 {
7156 while (*num > DATA_LINK_M_N_MASK ||
7157 *den > DATA_LINK_M_N_MASK) {
7158 *num >>= 1;
7159 *den >>= 1;
7160 }
7161 }
7162
7163 static void compute_m_n(unsigned int m, unsigned int n,
7164 uint32_t *ret_m, uint32_t *ret_n)
7165 {
7166 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7167 *ret_m = div_u64((uint64_t) m * *ret_n, n);
7168 intel_reduce_m_n_ratio(ret_m, ret_n);
7169 }
7170
7171 void
7172 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
7173 int pixel_clock, int link_clock,
7174 struct intel_link_m_n *m_n)
7175 {
7176 m_n->tu = 64;
7177
7178 compute_m_n(bits_per_pixel * pixel_clock,
7179 link_clock * nlanes * 8,
7180 &m_n->gmch_m, &m_n->gmch_n);
7181
7182 compute_m_n(pixel_clock, link_clock,
7183 &m_n->link_m, &m_n->link_n);
7184 }
7185
7186 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7187 {
7188 if (i915.panel_use_ssc >= 0)
7189 return i915.panel_use_ssc != 0;
7190 return dev_priv->vbt.lvds_use_ssc
7191 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
7192 }
7193
7194 static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
7195 int num_connectors)
7196 {
7197 struct drm_device *dev = crtc_state->base.crtc->dev;
7198 struct drm_i915_private *dev_priv = dev->dev_private;
7199 int refclk;
7200
7201 WARN_ON(!crtc_state->base.state);
7202
7203 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) {
7204 refclk = 100000;
7205 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7206 intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
7207 refclk = dev_priv->vbt.lvds_ssc_freq;
7208 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7209 } else if (!IS_GEN2(dev)) {
7210 refclk = 96000;
7211 } else {
7212 refclk = 48000;
7213 }
7214
7215 return refclk;
7216 }
7217
7218 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
7219 {
7220 return (1 << dpll->n) << 16 | dpll->m2;
7221 }
7222
7223 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7224 {
7225 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
7226 }
7227
7228 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7229 struct intel_crtc_state *crtc_state,
7230 intel_clock_t *reduced_clock)
7231 {
7232 struct drm_device *dev = crtc->base.dev;
7233 u32 fp, fp2 = 0;
7234
7235 if (IS_PINEVIEW(dev)) {
7236 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7237 if (reduced_clock)
7238 fp2 = pnv_dpll_compute_fp(reduced_clock);
7239 } else {
7240 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7241 if (reduced_clock)
7242 fp2 = i9xx_dpll_compute_fp(reduced_clock);
7243 }
7244
7245 crtc_state->dpll_hw_state.fp0 = fp;
7246
7247 crtc->lowfreq_avail = false;
7248 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7249 reduced_clock) {
7250 crtc_state->dpll_hw_state.fp1 = fp2;
7251 crtc->lowfreq_avail = true;
7252 } else {
7253 crtc_state->dpll_hw_state.fp1 = fp;
7254 }
7255 }
7256
7257 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7258 pipe)
7259 {
7260 u32 reg_val;
7261
7262 /*
7263 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7264 * and set it to a reasonable value instead.
7265 */
7266 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7267 reg_val &= 0xffffff00;
7268 reg_val |= 0x00000030;
7269 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7270
7271 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7272 reg_val &= 0x8cffffff;
7273 reg_val = 0x8c000000;
7274 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7275
7276 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7277 reg_val &= 0xffffff00;
7278 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7279
7280 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7281 reg_val &= 0x00ffffff;
7282 reg_val |= 0xb0000000;
7283 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7284 }
7285
7286 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7287 struct intel_link_m_n *m_n)
7288 {
7289 struct drm_device *dev = crtc->base.dev;
7290 struct drm_i915_private *dev_priv = dev->dev_private;
7291 int pipe = crtc->pipe;
7292
7293 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7294 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7295 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7296 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7297 }
7298
7299 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
7300 struct intel_link_m_n *m_n,
7301 struct intel_link_m_n *m2_n2)
7302 {
7303 struct drm_device *dev = crtc->base.dev;
7304 struct drm_i915_private *dev_priv = dev->dev_private;
7305 int pipe = crtc->pipe;
7306 enum transcoder transcoder = crtc->config->cpu_transcoder;
7307
7308 if (INTEL_INFO(dev)->gen >= 5) {
7309 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7310 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7311 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7312 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7313 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
7314 * for gen < 8) and if DRRS is supported (to make sure the
7315 * registers are not unnecessarily accessed).
7316 */
7317 if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
7318 crtc->config->has_drrs) {
7319 I915_WRITE(PIPE_DATA_M2(transcoder),
7320 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7321 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7322 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7323 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7324 }
7325 } else {
7326 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7327 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7328 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7329 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7330 }
7331 }
7332
7333 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
7334 {
7335 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7336
7337 if (m_n == M1_N1) {
7338 dp_m_n = &crtc->config->dp_m_n;
7339 dp_m2_n2 = &crtc->config->dp_m2_n2;
7340 } else if (m_n == M2_N2) {
7341
7342 /*
7343 * M2_N2 registers are not supported. Hence m2_n2 divider value
7344 * needs to be programmed into M1_N1.
7345 */
7346 dp_m_n = &crtc->config->dp_m2_n2;
7347 } else {
7348 DRM_ERROR("Unsupported divider value\n");
7349 return;
7350 }
7351
7352 if (crtc->config->has_pch_encoder)
7353 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
7354 else
7355 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
7356 }
7357
7358 static void vlv_compute_dpll(struct intel_crtc *crtc,
7359 struct intel_crtc_state *pipe_config)
7360 {
7361 u32 dpll, dpll_md;
7362
7363 /*
7364 * Enable DPIO clock input. We should never disable the reference
7365 * clock for pipe B, since VGA hotplug / manual detection depends
7366 * on it.
7367 */
7368 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
7369 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
7370 /* We should never disable this, set it here for state tracking */
7371 if (crtc->pipe == PIPE_B)
7372 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7373 dpll |= DPLL_VCO_ENABLE;
7374 pipe_config->dpll_hw_state.dpll = dpll;
7375
7376 dpll_md = (pipe_config->pixel_multiplier - 1)
7377 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7378 pipe_config->dpll_hw_state.dpll_md = dpll_md;
7379 }
7380
7381 static void vlv_prepare_pll(struct intel_crtc *crtc,
7382 const struct intel_crtc_state *pipe_config)
7383 {
7384 struct drm_device *dev = crtc->base.dev;
7385 struct drm_i915_private *dev_priv = dev->dev_private;
7386 int pipe = crtc->pipe;
7387 u32 mdiv;
7388 u32 bestn, bestm1, bestm2, bestp1, bestp2;
7389 u32 coreclk, reg_val;
7390
7391 mutex_lock(&dev_priv->sb_lock);
7392
7393 bestn = pipe_config->dpll.n;
7394 bestm1 = pipe_config->dpll.m1;
7395 bestm2 = pipe_config->dpll.m2;
7396 bestp1 = pipe_config->dpll.p1;
7397 bestp2 = pipe_config->dpll.p2;
7398
7399 /* See eDP HDMI DPIO driver vbios notes doc */
7400
7401 /* PLL B needs special handling */
7402 if (pipe == PIPE_B)
7403 vlv_pllb_recal_opamp(dev_priv, pipe);
7404
7405 /* Set up Tx target for periodic Rcomp update */
7406 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7407
7408 /* Disable target IRef on PLL */
7409 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7410 reg_val &= 0x00ffffff;
7411 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7412
7413 /* Disable fast lock */
7414 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7415
7416 /* Set idtafcrecal before PLL is enabled */
7417 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7418 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7419 mdiv |= ((bestn << DPIO_N_SHIFT));
7420 mdiv |= (1 << DPIO_K_SHIFT);
7421
7422 /*
7423 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7424 * but we don't support that).
7425 * Note: don't use the DAC post divider as it seems unstable.
7426 */
7427 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7428 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7429
7430 mdiv |= DPIO_ENABLE_CALIBRATION;
7431 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7432
7433 /* Set HBR and RBR LPF coefficients */
7434 if (pipe_config->port_clock == 162000 ||
7435 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
7436 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
7437 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7438 0x009f0003);
7439 else
7440 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7441 0x00d0000f);
7442
7443 if (pipe_config->has_dp_encoder) {
7444 /* Use SSC source */
7445 if (pipe == PIPE_A)
7446 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7447 0x0df40000);
7448 else
7449 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7450 0x0df70000);
7451 } else { /* HDMI or VGA */
7452 /* Use bend source */
7453 if (pipe == PIPE_A)
7454 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7455 0x0df70000);
7456 else
7457 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7458 0x0df40000);
7459 }
7460
7461 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7462 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7463 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
7464 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
7465 coreclk |= 0x01000000;
7466 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7467
7468 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7469 mutex_unlock(&dev_priv->sb_lock);
7470 }
7471
7472 static void chv_compute_dpll(struct intel_crtc *crtc,
7473 struct intel_crtc_state *pipe_config)
7474 {
7475 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7476 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
7477 DPLL_VCO_ENABLE;
7478 if (crtc->pipe != PIPE_A)
7479 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7480
7481 pipe_config->dpll_hw_state.dpll_md =
7482 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7483 }
7484
7485 static void chv_prepare_pll(struct intel_crtc *crtc,
7486 const struct intel_crtc_state *pipe_config)
7487 {
7488 struct drm_device *dev = crtc->base.dev;
7489 struct drm_i915_private *dev_priv = dev->dev_private;
7490 int pipe = crtc->pipe;
7491 i915_reg_t dpll_reg = DPLL(crtc->pipe);
7492 enum dpio_channel port = vlv_pipe_to_channel(pipe);
7493 u32 loopfilter, tribuf_calcntr;
7494 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7495 u32 dpio_val;
7496 int vco;
7497
7498 bestn = pipe_config->dpll.n;
7499 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7500 bestm1 = pipe_config->dpll.m1;
7501 bestm2 = pipe_config->dpll.m2 >> 22;
7502 bestp1 = pipe_config->dpll.p1;
7503 bestp2 = pipe_config->dpll.p2;
7504 vco = pipe_config->dpll.vco;
7505 dpio_val = 0;
7506 loopfilter = 0;
7507
7508 /*
7509 * Enable Refclk and SSC
7510 */
7511 I915_WRITE(dpll_reg,
7512 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7513
7514 mutex_lock(&dev_priv->sb_lock);
7515
7516 /* p1 and p2 divider */
7517 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7518 5 << DPIO_CHV_S1_DIV_SHIFT |
7519 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7520 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7521 1 << DPIO_CHV_K_DIV_SHIFT);
7522
7523 /* Feedback post-divider - m2 */
7524 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7525
7526 /* Feedback refclk divider - n and m1 */
7527 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7528 DPIO_CHV_M1_DIV_BY_2 |
7529 1 << DPIO_CHV_N_DIV_SHIFT);
7530
7531 /* M2 fraction division */
7532 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7533
7534 /* M2 fraction division enable */
7535 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7536 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7537 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7538 if (bestm2_frac)
7539 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7540 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7541
7542 /* Program digital lock detect threshold */
7543 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7544 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7545 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7546 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7547 if (!bestm2_frac)
7548 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7549 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7550
7551 /* Loop filter */
7552 if (vco == 5400000) {
7553 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7554 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7555 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7556 tribuf_calcntr = 0x9;
7557 } else if (vco <= 6200000) {
7558 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7559 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7560 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7561 tribuf_calcntr = 0x9;
7562 } else if (vco <= 6480000) {
7563 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7564 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7565 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7566 tribuf_calcntr = 0x8;
7567 } else {
7568 /* Not supported. Apply the same limits as in the max case */
7569 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7570 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7571 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7572 tribuf_calcntr = 0;
7573 }
7574 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7575
7576 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7577 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7578 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7579 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7580
7581 /* AFC Recal */
7582 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7583 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7584 DPIO_AFC_RECAL);
7585
7586 mutex_unlock(&dev_priv->sb_lock);
7587 }
7588
7589 /**
7590 * vlv_force_pll_on - forcibly enable just the PLL
7591 * @dev_priv: i915 private structure
7592 * @pipe: pipe PLL to enable
7593 * @dpll: PLL configuration
7594 *
7595 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7596 * in cases where we need the PLL enabled even when @pipe is not going to
7597 * be enabled.
7598 */
7599 void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
7600 const struct dpll *dpll)
7601 {
7602 struct intel_crtc *crtc =
7603 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
7604 struct intel_crtc_state pipe_config = {
7605 .base.crtc = &crtc->base,
7606 .pixel_multiplier = 1,
7607 .dpll = *dpll,
7608 };
7609
7610 if (IS_CHERRYVIEW(dev)) {
7611 chv_compute_dpll(crtc, &pipe_config);
7612 chv_prepare_pll(crtc, &pipe_config);
7613 chv_enable_pll(crtc, &pipe_config);
7614 } else {
7615 vlv_compute_dpll(crtc, &pipe_config);
7616 vlv_prepare_pll(crtc, &pipe_config);
7617 vlv_enable_pll(crtc, &pipe_config);
7618 }
7619 }
7620
7621 /**
7622 * vlv_force_pll_off - forcibly disable just the PLL
7623 * @dev_priv: i915 private structure
7624 * @pipe: pipe PLL to disable
7625 *
7626 * Disable the PLL for @pipe. To be used in cases where we need
7627 * the PLL enabled even when @pipe is not going to be enabled.
7628 */
7629 void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
7630 {
7631 if (IS_CHERRYVIEW(dev))
7632 chv_disable_pll(to_i915(dev), pipe);
7633 else
7634 vlv_disable_pll(to_i915(dev), pipe);
7635 }
7636
7637 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7638 struct intel_crtc_state *crtc_state,
7639 intel_clock_t *reduced_clock,
7640 int num_connectors)
7641 {
7642 struct drm_device *dev = crtc->base.dev;
7643 struct drm_i915_private *dev_priv = dev->dev_private;
7644 u32 dpll;
7645 bool is_sdvo;
7646 struct dpll *clock = &crtc_state->dpll;
7647
7648 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7649
7650 is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7651 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
7652
7653 dpll = DPLL_VGA_MODE_DIS;
7654
7655 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
7656 dpll |= DPLLB_MODE_LVDS;
7657 else
7658 dpll |= DPLLB_MODE_DAC_SERIAL;
7659
7660 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
7661 dpll |= (crtc_state->pixel_multiplier - 1)
7662 << SDVO_MULTIPLIER_SHIFT_HIRES;
7663 }
7664
7665 if (is_sdvo)
7666 dpll |= DPLL_SDVO_HIGH_SPEED;
7667
7668 if (crtc_state->has_dp_encoder)
7669 dpll |= DPLL_SDVO_HIGH_SPEED;
7670
7671 /* compute bitmask from p1 value */
7672 if (IS_PINEVIEW(dev))
7673 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7674 else {
7675 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7676 if (IS_G4X(dev) && reduced_clock)
7677 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7678 }
7679 switch (clock->p2) {
7680 case 5:
7681 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7682 break;
7683 case 7:
7684 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7685 break;
7686 case 10:
7687 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7688 break;
7689 case 14:
7690 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7691 break;
7692 }
7693 if (INTEL_INFO(dev)->gen >= 4)
7694 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7695
7696 if (crtc_state->sdvo_tv_clock)
7697 dpll |= PLL_REF_INPUT_TVCLKINBC;
7698 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7699 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7700 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7701 else
7702 dpll |= PLL_REF_INPUT_DREFCLK;
7703
7704 dpll |= DPLL_VCO_ENABLE;
7705 crtc_state->dpll_hw_state.dpll = dpll;
7706
7707 if (INTEL_INFO(dev)->gen >= 4) {
7708 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7709 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7710 crtc_state->dpll_hw_state.dpll_md = dpll_md;
7711 }
7712 }
7713
7714 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7715 struct intel_crtc_state *crtc_state,
7716 intel_clock_t *reduced_clock,
7717 int num_connectors)
7718 {
7719 struct drm_device *dev = crtc->base.dev;
7720 struct drm_i915_private *dev_priv = dev->dev_private;
7721 u32 dpll;
7722 struct dpll *clock = &crtc_state->dpll;
7723
7724 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7725
7726 dpll = DPLL_VGA_MODE_DIS;
7727
7728 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7729 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7730 } else {
7731 if (clock->p1 == 2)
7732 dpll |= PLL_P1_DIVIDE_BY_TWO;
7733 else
7734 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7735 if (clock->p2 == 4)
7736 dpll |= PLL_P2_DIVIDE_BY_4;
7737 }
7738
7739 if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
7740 dpll |= DPLL_DVO_2X_MODE;
7741
7742 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7743 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7744 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7745 else
7746 dpll |= PLL_REF_INPUT_DREFCLK;
7747
7748 dpll |= DPLL_VCO_ENABLE;
7749 crtc_state->dpll_hw_state.dpll = dpll;
7750 }
7751
7752 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7753 {
7754 struct drm_device *dev = intel_crtc->base.dev;
7755 struct drm_i915_private *dev_priv = dev->dev_private;
7756 enum pipe pipe = intel_crtc->pipe;
7757 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7758 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
7759 uint32_t crtc_vtotal, crtc_vblank_end;
7760 int vsyncshift = 0;
7761
7762 /* We need to be careful not to changed the adjusted mode, for otherwise
7763 * the hw state checker will get angry at the mismatch. */
7764 crtc_vtotal = adjusted_mode->crtc_vtotal;
7765 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7766
7767 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7768 /* the chip adds 2 halflines automatically */
7769 crtc_vtotal -= 1;
7770 crtc_vblank_end -= 1;
7771
7772 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7773 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7774 else
7775 vsyncshift = adjusted_mode->crtc_hsync_start -
7776 adjusted_mode->crtc_htotal / 2;
7777 if (vsyncshift < 0)
7778 vsyncshift += adjusted_mode->crtc_htotal;
7779 }
7780
7781 if (INTEL_INFO(dev)->gen > 3)
7782 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7783
7784 I915_WRITE(HTOTAL(cpu_transcoder),
7785 (adjusted_mode->crtc_hdisplay - 1) |
7786 ((adjusted_mode->crtc_htotal - 1) << 16));
7787 I915_WRITE(HBLANK(cpu_transcoder),
7788 (adjusted_mode->crtc_hblank_start - 1) |
7789 ((adjusted_mode->crtc_hblank_end - 1) << 16));
7790 I915_WRITE(HSYNC(cpu_transcoder),
7791 (adjusted_mode->crtc_hsync_start - 1) |
7792 ((adjusted_mode->crtc_hsync_end - 1) << 16));
7793
7794 I915_WRITE(VTOTAL(cpu_transcoder),
7795 (adjusted_mode->crtc_vdisplay - 1) |
7796 ((crtc_vtotal - 1) << 16));
7797 I915_WRITE(VBLANK(cpu_transcoder),
7798 (adjusted_mode->crtc_vblank_start - 1) |
7799 ((crtc_vblank_end - 1) << 16));
7800 I915_WRITE(VSYNC(cpu_transcoder),
7801 (adjusted_mode->crtc_vsync_start - 1) |
7802 ((adjusted_mode->crtc_vsync_end - 1) << 16));
7803
7804 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7805 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7806 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7807 * bits. */
7808 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
7809 (pipe == PIPE_B || pipe == PIPE_C))
7810 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7811
7812 /* pipesrc controls the size that is scaled from, which should
7813 * always be the user's requested size.
7814 */
7815 I915_WRITE(PIPESRC(pipe),
7816 ((intel_crtc->config->pipe_src_w - 1) << 16) |
7817 (intel_crtc->config->pipe_src_h - 1));
7818 }
7819
7820 static void intel_get_pipe_timings(struct intel_crtc *crtc,
7821 struct intel_crtc_state *pipe_config)
7822 {
7823 struct drm_device *dev = crtc->base.dev;
7824 struct drm_i915_private *dev_priv = dev->dev_private;
7825 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7826 uint32_t tmp;
7827
7828 tmp = I915_READ(HTOTAL(cpu_transcoder));
7829 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7830 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7831 tmp = I915_READ(HBLANK(cpu_transcoder));
7832 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7833 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7834 tmp = I915_READ(HSYNC(cpu_transcoder));
7835 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7836 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7837
7838 tmp = I915_READ(VTOTAL(cpu_transcoder));
7839 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7840 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7841 tmp = I915_READ(VBLANK(cpu_transcoder));
7842 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7843 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7844 tmp = I915_READ(VSYNC(cpu_transcoder));
7845 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7846 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7847
7848 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7849 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7850 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7851 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7852 }
7853
7854 tmp = I915_READ(PIPESRC(crtc->pipe));
7855 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7856 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7857
7858 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7859 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7860 }
7861
7862 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7863 struct intel_crtc_state *pipe_config)
7864 {
7865 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7866 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7867 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7868 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7869
7870 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7871 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7872 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7873 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7874
7875 mode->flags = pipe_config->base.adjusted_mode.flags;
7876 mode->type = DRM_MODE_TYPE_DRIVER;
7877
7878 mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7879 mode->flags |= pipe_config->base.adjusted_mode.flags;
7880
7881 mode->hsync = drm_mode_hsync(mode);
7882 mode->vrefresh = drm_mode_vrefresh(mode);
7883 drm_mode_set_name(mode);
7884 }
7885
7886 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7887 {
7888 struct drm_device *dev = intel_crtc->base.dev;
7889 struct drm_i915_private *dev_priv = dev->dev_private;
7890 uint32_t pipeconf;
7891
7892 pipeconf = 0;
7893
7894 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
7895 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
7896 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
7897
7898 if (intel_crtc->config->double_wide)
7899 pipeconf |= PIPECONF_DOUBLE_WIDE;
7900
7901 /* only g4x and later have fancy bpc/dither controls */
7902 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
7903 /* Bspec claims that we can't use dithering for 30bpp pipes. */
7904 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
7905 pipeconf |= PIPECONF_DITHER_EN |
7906 PIPECONF_DITHER_TYPE_SP;
7907
7908 switch (intel_crtc->config->pipe_bpp) {
7909 case 18:
7910 pipeconf |= PIPECONF_6BPC;
7911 break;
7912 case 24:
7913 pipeconf |= PIPECONF_8BPC;
7914 break;
7915 case 30:
7916 pipeconf |= PIPECONF_10BPC;
7917 break;
7918 default:
7919 /* Case prevented by intel_choose_pipe_bpp_dither. */
7920 BUG();
7921 }
7922 }
7923
7924 if (HAS_PIPE_CXSR(dev)) {
7925 if (intel_crtc->lowfreq_avail) {
7926 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
7927 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
7928 } else {
7929 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
7930 }
7931 }
7932
7933 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7934 if (INTEL_INFO(dev)->gen < 4 ||
7935 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7936 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7937 else
7938 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7939 } else
7940 pipeconf |= PIPECONF_PROGRESSIVE;
7941
7942 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
7943 intel_crtc->config->limited_color_range)
7944 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7945
7946 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7947 POSTING_READ(PIPECONF(intel_crtc->pipe));
7948 }
7949
7950 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7951 struct intel_crtc_state *crtc_state)
7952 {
7953 struct drm_device *dev = crtc->base.dev;
7954 struct drm_i915_private *dev_priv = dev->dev_private;
7955 int refclk, num_connectors = 0;
7956 intel_clock_t clock;
7957 bool ok;
7958 const intel_limit_t *limit;
7959 struct drm_atomic_state *state = crtc_state->base.state;
7960 struct drm_connector *connector;
7961 struct drm_connector_state *connector_state;
7962 int i;
7963
7964 memset(&crtc_state->dpll_hw_state, 0,
7965 sizeof(crtc_state->dpll_hw_state));
7966
7967 if (crtc_state->has_dsi_encoder)
7968 return 0;
7969
7970 for_each_connector_in_state(state, connector, connector_state, i) {
7971 if (connector_state->crtc == &crtc->base)
7972 num_connectors++;
7973 }
7974
7975 if (!crtc_state->clock_set) {
7976 refclk = i9xx_get_refclk(crtc_state, num_connectors);
7977
7978 /*
7979 * Returns a set of divisors for the desired target clock with
7980 * the given refclk, or FALSE. The returned values represent
7981 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
7982 * 2) / p1 / p2.
7983 */
7984 limit = intel_limit(crtc_state, refclk);
7985 ok = dev_priv->display.find_dpll(limit, crtc_state,
7986 crtc_state->port_clock,
7987 refclk, NULL, &clock);
7988 if (!ok) {
7989 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7990 return -EINVAL;
7991 }
7992
7993 /* Compat-code for transition, will disappear. */
7994 crtc_state->dpll.n = clock.n;
7995 crtc_state->dpll.m1 = clock.m1;
7996 crtc_state->dpll.m2 = clock.m2;
7997 crtc_state->dpll.p1 = clock.p1;
7998 crtc_state->dpll.p2 = clock.p2;
7999 }
8000
8001 if (IS_GEN2(dev)) {
8002 i8xx_compute_dpll(crtc, crtc_state, NULL,
8003 num_connectors);
8004 } else if (IS_CHERRYVIEW(dev)) {
8005 chv_compute_dpll(crtc, crtc_state);
8006 } else if (IS_VALLEYVIEW(dev)) {
8007 vlv_compute_dpll(crtc, crtc_state);
8008 } else {
8009 i9xx_compute_dpll(crtc, crtc_state, NULL,
8010 num_connectors);
8011 }
8012
8013 return 0;
8014 }
8015
8016 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8017 struct intel_crtc_state *pipe_config)
8018 {
8019 struct drm_device *dev = crtc->base.dev;
8020 struct drm_i915_private *dev_priv = dev->dev_private;
8021 uint32_t tmp;
8022
8023 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
8024 return;
8025
8026 tmp = I915_READ(PFIT_CONTROL);
8027 if (!(tmp & PFIT_ENABLE))
8028 return;
8029
8030 /* Check whether the pfit is attached to our pipe. */
8031 if (INTEL_INFO(dev)->gen < 4) {
8032 if (crtc->pipe != PIPE_B)
8033 return;
8034 } else {
8035 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8036 return;
8037 }
8038
8039 pipe_config->gmch_pfit.control = tmp;
8040 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
8041 if (INTEL_INFO(dev)->gen < 5)
8042 pipe_config->gmch_pfit.lvds_border_bits =
8043 I915_READ(LVDS) & LVDS_BORDER_ENABLE;
8044 }
8045
8046 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8047 struct intel_crtc_state *pipe_config)
8048 {
8049 struct drm_device *dev = crtc->base.dev;
8050 struct drm_i915_private *dev_priv = dev->dev_private;
8051 int pipe = pipe_config->cpu_transcoder;
8052 intel_clock_t clock;
8053 u32 mdiv;
8054 int refclk = 100000;
8055
8056 /* In case of MIPI DPLL will not even be used */
8057 if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
8058 return;
8059
8060 mutex_lock(&dev_priv->sb_lock);
8061 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
8062 mutex_unlock(&dev_priv->sb_lock);
8063
8064 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8065 clock.m2 = mdiv & DPIO_M2DIV_MASK;
8066 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8067 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8068 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8069
8070 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
8071 }
8072
8073 static void
8074 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8075 struct intel_initial_plane_config *plane_config)
8076 {
8077 struct drm_device *dev = crtc->base.dev;
8078 struct drm_i915_private *dev_priv = dev->dev_private;
8079 u32 val, base, offset;
8080 int pipe = crtc->pipe, plane = crtc->plane;
8081 int fourcc, pixel_format;
8082 unsigned int aligned_height;
8083 struct drm_framebuffer *fb;
8084 struct intel_framebuffer *intel_fb;
8085
8086 val = I915_READ(DSPCNTR(plane));
8087 if (!(val & DISPLAY_PLANE_ENABLE))
8088 return;
8089
8090 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8091 if (!intel_fb) {
8092 DRM_DEBUG_KMS("failed to alloc fb\n");
8093 return;
8094 }
8095
8096 fb = &intel_fb->base;
8097
8098 if (INTEL_INFO(dev)->gen >= 4) {
8099 if (val & DISPPLANE_TILED) {
8100 plane_config->tiling = I915_TILING_X;
8101 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8102 }
8103 }
8104
8105 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8106 fourcc = i9xx_format_to_fourcc(pixel_format);
8107 fb->pixel_format = fourcc;
8108 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
8109
8110 if (INTEL_INFO(dev)->gen >= 4) {
8111 if (plane_config->tiling)
8112 offset = I915_READ(DSPTILEOFF(plane));
8113 else
8114 offset = I915_READ(DSPLINOFF(plane));
8115 base = I915_READ(DSPSURF(plane)) & 0xfffff000;
8116 } else {
8117 base = I915_READ(DSPADDR(plane));
8118 }
8119 plane_config->base = base;
8120
8121 val = I915_READ(PIPESRC(pipe));
8122 fb->width = ((val >> 16) & 0xfff) + 1;
8123 fb->height = ((val >> 0) & 0xfff) + 1;
8124
8125 val = I915_READ(DSPSTRIDE(pipe));
8126 fb->pitches[0] = val & 0xffffffc0;
8127
8128 aligned_height = intel_fb_align_height(dev, fb->height,
8129 fb->pixel_format,
8130 fb->modifier[0]);
8131
8132 plane_config->size = fb->pitches[0] * aligned_height;
8133
8134 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8135 pipe_name(pipe), plane, fb->width, fb->height,
8136 fb->bits_per_pixel, base, fb->pitches[0],
8137 plane_config->size);
8138
8139 plane_config->fb = intel_fb;
8140 }
8141
8142 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8143 struct intel_crtc_state *pipe_config)
8144 {
8145 struct drm_device *dev = crtc->base.dev;
8146 struct drm_i915_private *dev_priv = dev->dev_private;
8147 int pipe = pipe_config->cpu_transcoder;
8148 enum dpio_channel port = vlv_pipe_to_channel(pipe);
8149 intel_clock_t clock;
8150 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8151 int refclk = 100000;
8152
8153 mutex_lock(&dev_priv->sb_lock);
8154 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8155 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8156 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8157 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8158 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8159 mutex_unlock(&dev_priv->sb_lock);
8160
8161 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8162 clock.m2 = (pll_dw0 & 0xff) << 22;
8163 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8164 clock.m2 |= pll_dw2 & 0x3fffff;
8165 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8166 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8167 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8168
8169 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8170 }
8171
8172 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8173 struct intel_crtc_state *pipe_config)
8174 {
8175 struct drm_device *dev = crtc->base.dev;
8176 struct drm_i915_private *dev_priv = dev->dev_private;
8177 enum intel_display_power_domain power_domain;
8178 uint32_t tmp;
8179 bool ret;
8180
8181 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8182 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8183 return false;
8184
8185 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8186 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
8187
8188 ret = false;
8189
8190 tmp = I915_READ(PIPECONF(crtc->pipe));
8191 if (!(tmp & PIPECONF_ENABLE))
8192 goto out;
8193
8194 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
8195 switch (tmp & PIPECONF_BPC_MASK) {
8196 case PIPECONF_6BPC:
8197 pipe_config->pipe_bpp = 18;
8198 break;
8199 case PIPECONF_8BPC:
8200 pipe_config->pipe_bpp = 24;
8201 break;
8202 case PIPECONF_10BPC:
8203 pipe_config->pipe_bpp = 30;
8204 break;
8205 default:
8206 break;
8207 }
8208 }
8209
8210 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
8211 (tmp & PIPECONF_COLOR_RANGE_SELECT))
8212 pipe_config->limited_color_range = true;
8213
8214 if (INTEL_INFO(dev)->gen < 4)
8215 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8216
8217 intel_get_pipe_timings(crtc, pipe_config);
8218
8219 i9xx_get_pfit_config(crtc, pipe_config);
8220
8221 if (INTEL_INFO(dev)->gen >= 4) {
8222 tmp = I915_READ(DPLL_MD(crtc->pipe));
8223 pipe_config->pixel_multiplier =
8224 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8225 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8226 pipe_config->dpll_hw_state.dpll_md = tmp;
8227 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
8228 tmp = I915_READ(DPLL(crtc->pipe));
8229 pipe_config->pixel_multiplier =
8230 ((tmp & SDVO_MULTIPLIER_MASK)
8231 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8232 } else {
8233 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8234 * port and will be fixed up in the encoder->get_config
8235 * function. */
8236 pipe_config->pixel_multiplier = 1;
8237 }
8238 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8239 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
8240 /*
8241 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8242 * on 830. Filter it out here so that we don't
8243 * report errors due to that.
8244 */
8245 if (IS_I830(dev))
8246 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8247
8248 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8249 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8250 } else {
8251 /* Mask out read-only status bits. */
8252 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8253 DPLL_PORTC_READY_MASK |
8254 DPLL_PORTB_READY_MASK);
8255 }
8256
8257 if (IS_CHERRYVIEW(dev))
8258 chv_crtc_clock_get(crtc, pipe_config);
8259 else if (IS_VALLEYVIEW(dev))
8260 vlv_crtc_clock_get(crtc, pipe_config);
8261 else
8262 i9xx_crtc_clock_get(crtc, pipe_config);
8263
8264 /*
8265 * Normally the dotclock is filled in by the encoder .get_config()
8266 * but in case the pipe is enabled w/o any ports we need a sane
8267 * default.
8268 */
8269 pipe_config->base.adjusted_mode.crtc_clock =
8270 pipe_config->port_clock / pipe_config->pixel_multiplier;
8271
8272 ret = true;
8273
8274 out:
8275 intel_display_power_put(dev_priv, power_domain);
8276
8277 return ret;
8278 }
8279
8280 static void ironlake_init_pch_refclk(struct drm_device *dev)
8281 {
8282 struct drm_i915_private *dev_priv = dev->dev_private;
8283 struct intel_encoder *encoder;
8284 u32 val, final;
8285 bool has_lvds = false;
8286 bool has_cpu_edp = false;
8287 bool has_panel = false;
8288 bool has_ck505 = false;
8289 bool can_ssc = false;
8290
8291 /* We need to take the global config into account */
8292 for_each_intel_encoder(dev, encoder) {
8293 switch (encoder->type) {
8294 case INTEL_OUTPUT_LVDS:
8295 has_panel = true;
8296 has_lvds = true;
8297 break;
8298 case INTEL_OUTPUT_EDP:
8299 has_panel = true;
8300 if (enc_to_dig_port(&encoder->base)->port == PORT_A)
8301 has_cpu_edp = true;
8302 break;
8303 default:
8304 break;
8305 }
8306 }
8307
8308 if (HAS_PCH_IBX(dev)) {
8309 has_ck505 = dev_priv->vbt.display_clock_mode;
8310 can_ssc = has_ck505;
8311 } else {
8312 has_ck505 = false;
8313 can_ssc = true;
8314 }
8315
8316 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
8317 has_panel, has_lvds, has_ck505);
8318
8319 /* Ironlake: try to setup display ref clock before DPLL
8320 * enabling. This is only under driver's control after
8321 * PCH B stepping, previous chipset stepping should be
8322 * ignoring this setting.
8323 */
8324 val = I915_READ(PCH_DREF_CONTROL);
8325
8326 /* As we must carefully and slowly disable/enable each source in turn,
8327 * compute the final state we want first and check if we need to
8328 * make any changes at all.
8329 */
8330 final = val;
8331 final &= ~DREF_NONSPREAD_SOURCE_MASK;
8332 if (has_ck505)
8333 final |= DREF_NONSPREAD_CK505_ENABLE;
8334 else
8335 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8336
8337 final &= ~DREF_SSC_SOURCE_MASK;
8338 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8339 final &= ~DREF_SSC1_ENABLE;
8340
8341 if (has_panel) {
8342 final |= DREF_SSC_SOURCE_ENABLE;
8343
8344 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8345 final |= DREF_SSC1_ENABLE;
8346
8347 if (has_cpu_edp) {
8348 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8349 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8350 else
8351 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8352 } else
8353 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8354 } else {
8355 final |= DREF_SSC_SOURCE_DISABLE;
8356 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8357 }
8358
8359 if (final == val)
8360 return;
8361
8362 /* Always enable nonspread source */
8363 val &= ~DREF_NONSPREAD_SOURCE_MASK;
8364
8365 if (has_ck505)
8366 val |= DREF_NONSPREAD_CK505_ENABLE;
8367 else
8368 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8369
8370 if (has_panel) {
8371 val &= ~DREF_SSC_SOURCE_MASK;
8372 val |= DREF_SSC_SOURCE_ENABLE;
8373
8374 /* SSC must be turned on before enabling the CPU output */
8375 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8376 DRM_DEBUG_KMS("Using SSC on panel\n");
8377 val |= DREF_SSC1_ENABLE;
8378 } else
8379 val &= ~DREF_SSC1_ENABLE;
8380
8381 /* Get SSC going before enabling the outputs */
8382 I915_WRITE(PCH_DREF_CONTROL, val);
8383 POSTING_READ(PCH_DREF_CONTROL);
8384 udelay(200);
8385
8386 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8387
8388 /* Enable CPU source on CPU attached eDP */
8389 if (has_cpu_edp) {
8390 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8391 DRM_DEBUG_KMS("Using SSC on eDP\n");
8392 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8393 } else
8394 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8395 } else
8396 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8397
8398 I915_WRITE(PCH_DREF_CONTROL, val);
8399 POSTING_READ(PCH_DREF_CONTROL);
8400 udelay(200);
8401 } else {
8402 DRM_DEBUG_KMS("Disabling SSC entirely\n");
8403
8404 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8405
8406 /* Turn off CPU output */
8407 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8408
8409 I915_WRITE(PCH_DREF_CONTROL, val);
8410 POSTING_READ(PCH_DREF_CONTROL);
8411 udelay(200);
8412
8413 /* Turn off the SSC source */
8414 val &= ~DREF_SSC_SOURCE_MASK;
8415 val |= DREF_SSC_SOURCE_DISABLE;
8416
8417 /* Turn off SSC1 */
8418 val &= ~DREF_SSC1_ENABLE;
8419
8420 I915_WRITE(PCH_DREF_CONTROL, val);
8421 POSTING_READ(PCH_DREF_CONTROL);
8422 udelay(200);
8423 }
8424
8425 BUG_ON(val != final);
8426 }
8427
8428 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8429 {
8430 uint32_t tmp;
8431
8432 tmp = I915_READ(SOUTH_CHICKEN2);
8433 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8434 I915_WRITE(SOUTH_CHICKEN2, tmp);
8435
8436 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
8437 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8438 DRM_ERROR("FDI mPHY reset assert timeout\n");
8439
8440 tmp = I915_READ(SOUTH_CHICKEN2);
8441 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8442 I915_WRITE(SOUTH_CHICKEN2, tmp);
8443
8444 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
8445 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8446 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8447 }
8448
8449 /* WaMPhyProgramming:hsw */
8450 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8451 {
8452 uint32_t tmp;
8453
8454 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8455 tmp &= ~(0xFF << 24);
8456 tmp |= (0x12 << 24);
8457 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8458
8459 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8460 tmp |= (1 << 11);
8461 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8462
8463 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8464 tmp |= (1 << 11);
8465 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8466
8467 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8468 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8469 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8470
8471 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8472 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8473 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8474
8475 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8476 tmp &= ~(7 << 13);
8477 tmp |= (5 << 13);
8478 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8479
8480 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8481 tmp &= ~(7 << 13);
8482 tmp |= (5 << 13);
8483 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8484
8485 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8486 tmp &= ~0xFF;
8487 tmp |= 0x1C;
8488 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8489
8490 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8491 tmp &= ~0xFF;
8492 tmp |= 0x1C;
8493 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8494
8495 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8496 tmp &= ~(0xFF << 16);
8497 tmp |= (0x1C << 16);
8498 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8499
8500 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8501 tmp &= ~(0xFF << 16);
8502 tmp |= (0x1C << 16);
8503 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8504
8505 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8506 tmp |= (1 << 27);
8507 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8508
8509 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8510 tmp |= (1 << 27);
8511 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8512
8513 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8514 tmp &= ~(0xF << 28);
8515 tmp |= (4 << 28);
8516 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8517
8518 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8519 tmp &= ~(0xF << 28);
8520 tmp |= (4 << 28);
8521 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8522 }
8523
8524 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8525 * Programming" based on the parameters passed:
8526 * - Sequence to enable CLKOUT_DP
8527 * - Sequence to enable CLKOUT_DP without spread
8528 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8529 */
8530 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8531 bool with_fdi)
8532 {
8533 struct drm_i915_private *dev_priv = dev->dev_private;
8534 uint32_t reg, tmp;
8535
8536 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8537 with_spread = true;
8538 if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
8539 with_fdi = false;
8540
8541 mutex_lock(&dev_priv->sb_lock);
8542
8543 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8544 tmp &= ~SBI_SSCCTL_DISABLE;
8545 tmp |= SBI_SSCCTL_PATHALT;
8546 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8547
8548 udelay(24);
8549
8550 if (with_spread) {
8551 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8552 tmp &= ~SBI_SSCCTL_PATHALT;
8553 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8554
8555 if (with_fdi) {
8556 lpt_reset_fdi_mphy(dev_priv);
8557 lpt_program_fdi_mphy(dev_priv);
8558 }
8559 }
8560
8561 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8562 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8563 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8564 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8565
8566 mutex_unlock(&dev_priv->sb_lock);
8567 }
8568
8569 /* Sequence to disable CLKOUT_DP */
8570 static void lpt_disable_clkout_dp(struct drm_device *dev)
8571 {
8572 struct drm_i915_private *dev_priv = dev->dev_private;
8573 uint32_t reg, tmp;
8574
8575 mutex_lock(&dev_priv->sb_lock);
8576
8577 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8578 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8579 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8580 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8581
8582 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8583 if (!(tmp & SBI_SSCCTL_DISABLE)) {
8584 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8585 tmp |= SBI_SSCCTL_PATHALT;
8586 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8587 udelay(32);
8588 }
8589 tmp |= SBI_SSCCTL_DISABLE;
8590 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8591 }
8592
8593 mutex_unlock(&dev_priv->sb_lock);
8594 }
8595
8596 #define BEND_IDX(steps) ((50 + (steps)) / 5)
8597
8598 static const uint16_t sscdivintphase[] = {
8599 [BEND_IDX( 50)] = 0x3B23,
8600 [BEND_IDX( 45)] = 0x3B23,
8601 [BEND_IDX( 40)] = 0x3C23,
8602 [BEND_IDX( 35)] = 0x3C23,
8603 [BEND_IDX( 30)] = 0x3D23,
8604 [BEND_IDX( 25)] = 0x3D23,
8605 [BEND_IDX( 20)] = 0x3E23,
8606 [BEND_IDX( 15)] = 0x3E23,
8607 [BEND_IDX( 10)] = 0x3F23,
8608 [BEND_IDX( 5)] = 0x3F23,
8609 [BEND_IDX( 0)] = 0x0025,
8610 [BEND_IDX( -5)] = 0x0025,
8611 [BEND_IDX(-10)] = 0x0125,
8612 [BEND_IDX(-15)] = 0x0125,
8613 [BEND_IDX(-20)] = 0x0225,
8614 [BEND_IDX(-25)] = 0x0225,
8615 [BEND_IDX(-30)] = 0x0325,
8616 [BEND_IDX(-35)] = 0x0325,
8617 [BEND_IDX(-40)] = 0x0425,
8618 [BEND_IDX(-45)] = 0x0425,
8619 [BEND_IDX(-50)] = 0x0525,
8620 };
8621
8622 /*
8623 * Bend CLKOUT_DP
8624 * steps -50 to 50 inclusive, in steps of 5
8625 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8626 * change in clock period = -(steps / 10) * 5.787 ps
8627 */
8628 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8629 {
8630 uint32_t tmp;
8631 int idx = BEND_IDX(steps);
8632
8633 if (WARN_ON(steps % 5 != 0))
8634 return;
8635
8636 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8637 return;
8638
8639 mutex_lock(&dev_priv->sb_lock);
8640
8641 if (steps % 10 != 0)
8642 tmp = 0xAAAAAAAB;
8643 else
8644 tmp = 0x00000000;
8645 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8646
8647 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8648 tmp &= 0xffff0000;
8649 tmp |= sscdivintphase[idx];
8650 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8651
8652 mutex_unlock(&dev_priv->sb_lock);
8653 }
8654
8655 #undef BEND_IDX
8656
8657 static void lpt_init_pch_refclk(struct drm_device *dev)
8658 {
8659 struct intel_encoder *encoder;
8660 bool has_vga = false;
8661
8662 for_each_intel_encoder(dev, encoder) {
8663 switch (encoder->type) {
8664 case INTEL_OUTPUT_ANALOG:
8665 has_vga = true;
8666 break;
8667 default:
8668 break;
8669 }
8670 }
8671
8672 if (has_vga) {
8673 lpt_bend_clkout_dp(to_i915(dev), 0);
8674 lpt_enable_clkout_dp(dev, true, true);
8675 } else {
8676 lpt_disable_clkout_dp(dev);
8677 }
8678 }
8679
8680 /*
8681 * Initialize reference clocks when the driver loads
8682 */
8683 void intel_init_pch_refclk(struct drm_device *dev)
8684 {
8685 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
8686 ironlake_init_pch_refclk(dev);
8687 else if (HAS_PCH_LPT(dev))
8688 lpt_init_pch_refclk(dev);
8689 }
8690
8691 static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
8692 {
8693 struct drm_device *dev = crtc_state->base.crtc->dev;
8694 struct drm_i915_private *dev_priv = dev->dev_private;
8695 struct drm_atomic_state *state = crtc_state->base.state;
8696 struct drm_connector *connector;
8697 struct drm_connector_state *connector_state;
8698 struct intel_encoder *encoder;
8699 int num_connectors = 0, i;
8700 bool is_lvds = false;
8701
8702 for_each_connector_in_state(state, connector, connector_state, i) {
8703 if (connector_state->crtc != crtc_state->base.crtc)
8704 continue;
8705
8706 encoder = to_intel_encoder(connector_state->best_encoder);
8707
8708 switch (encoder->type) {
8709 case INTEL_OUTPUT_LVDS:
8710 is_lvds = true;
8711 break;
8712 default:
8713 break;
8714 }
8715 num_connectors++;
8716 }
8717
8718 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
8719 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8720 dev_priv->vbt.lvds_ssc_freq);
8721 return dev_priv->vbt.lvds_ssc_freq;
8722 }
8723
8724 return 120000;
8725 }
8726
8727 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8728 {
8729 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8730 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8731 int pipe = intel_crtc->pipe;
8732 uint32_t val;
8733
8734 val = 0;
8735
8736 switch (intel_crtc->config->pipe_bpp) {
8737 case 18:
8738 val |= PIPECONF_6BPC;
8739 break;
8740 case 24:
8741 val |= PIPECONF_8BPC;
8742 break;
8743 case 30:
8744 val |= PIPECONF_10BPC;
8745 break;
8746 case 36:
8747 val |= PIPECONF_12BPC;
8748 break;
8749 default:
8750 /* Case prevented by intel_choose_pipe_bpp_dither. */
8751 BUG();
8752 }
8753
8754 if (intel_crtc->config->dither)
8755 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8756
8757 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8758 val |= PIPECONF_INTERLACED_ILK;
8759 else
8760 val |= PIPECONF_PROGRESSIVE;
8761
8762 if (intel_crtc->config->limited_color_range)
8763 val |= PIPECONF_COLOR_RANGE_SELECT;
8764
8765 I915_WRITE(PIPECONF(pipe), val);
8766 POSTING_READ(PIPECONF(pipe));
8767 }
8768
8769 /*
8770 * Set up the pipe CSC unit.
8771 *
8772 * Currently only full range RGB to limited range RGB conversion
8773 * is supported, but eventually this should handle various
8774 * RGB<->YCbCr scenarios as well.
8775 */
8776 static void intel_set_pipe_csc(struct drm_crtc *crtc)
8777 {
8778 struct drm_device *dev = crtc->dev;
8779 struct drm_i915_private *dev_priv = dev->dev_private;
8780 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8781 int pipe = intel_crtc->pipe;
8782 uint16_t coeff = 0x7800; /* 1.0 */
8783
8784 /*
8785 * TODO: Check what kind of values actually come out of the pipe
8786 * with these coeff/postoff values and adjust to get the best
8787 * accuracy. Perhaps we even need to take the bpc value into
8788 * consideration.
8789 */
8790
8791 if (intel_crtc->config->limited_color_range)
8792 coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
8793
8794 /*
8795 * GY/GU and RY/RU should be the other way around according
8796 * to BSpec, but reality doesn't agree. Just set them up in
8797 * a way that results in the correct picture.
8798 */
8799 I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
8800 I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
8801
8802 I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
8803 I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
8804
8805 I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
8806 I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
8807
8808 I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
8809 I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
8810 I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
8811
8812 if (INTEL_INFO(dev)->gen > 6) {
8813 uint16_t postoff = 0;
8814
8815 if (intel_crtc->config->limited_color_range)
8816 postoff = (16 * (1 << 12) / 255) & 0x1fff;
8817
8818 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
8819 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
8820 I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
8821
8822 I915_WRITE(PIPE_CSC_MODE(pipe), 0);
8823 } else {
8824 uint32_t mode = CSC_MODE_YUV_TO_RGB;
8825
8826 if (intel_crtc->config->limited_color_range)
8827 mode |= CSC_BLACK_SCREEN_OFFSET;
8828
8829 I915_WRITE(PIPE_CSC_MODE(pipe), mode);
8830 }
8831 }
8832
8833 static void haswell_set_pipeconf(struct drm_crtc *crtc)
8834 {
8835 struct drm_device *dev = crtc->dev;
8836 struct drm_i915_private *dev_priv = dev->dev_private;
8837 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8838 enum pipe pipe = intel_crtc->pipe;
8839 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8840 uint32_t val;
8841
8842 val = 0;
8843
8844 if (IS_HASWELL(dev) && intel_crtc->config->dither)
8845 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8846
8847 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8848 val |= PIPECONF_INTERLACED_ILK;
8849 else
8850 val |= PIPECONF_PROGRESSIVE;
8851
8852 I915_WRITE(PIPECONF(cpu_transcoder), val);
8853 POSTING_READ(PIPECONF(cpu_transcoder));
8854
8855 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
8856 POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
8857
8858 if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
8859 val = 0;
8860
8861 switch (intel_crtc->config->pipe_bpp) {
8862 case 18:
8863 val |= PIPEMISC_DITHER_6_BPC;
8864 break;
8865 case 24:
8866 val |= PIPEMISC_DITHER_8_BPC;
8867 break;
8868 case 30:
8869 val |= PIPEMISC_DITHER_10_BPC;
8870 break;
8871 case 36:
8872 val |= PIPEMISC_DITHER_12_BPC;
8873 break;
8874 default:
8875 /* Case prevented by pipe_config_set_bpp. */
8876 BUG();
8877 }
8878
8879 if (intel_crtc->config->dither)
8880 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8881
8882 I915_WRITE(PIPEMISC(pipe), val);
8883 }
8884 }
8885
8886 static bool ironlake_compute_clocks(struct drm_crtc *crtc,
8887 struct intel_crtc_state *crtc_state,
8888 intel_clock_t *clock,
8889 bool *has_reduced_clock,
8890 intel_clock_t *reduced_clock)
8891 {
8892 struct drm_device *dev = crtc->dev;
8893 struct drm_i915_private *dev_priv = dev->dev_private;
8894 int refclk;
8895 const intel_limit_t *limit;
8896 bool ret;
8897
8898 refclk = ironlake_get_refclk(crtc_state);
8899
8900 /*
8901 * Returns a set of divisors for the desired target clock with the given
8902 * refclk, or FALSE. The returned values represent the clock equation:
8903 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
8904 */
8905 limit = intel_limit(crtc_state, refclk);
8906 ret = dev_priv->display.find_dpll(limit, crtc_state,
8907 crtc_state->port_clock,
8908 refclk, NULL, clock);
8909 if (!ret)
8910 return false;
8911
8912 return true;
8913 }
8914
8915 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8916 {
8917 /*
8918 * Account for spread spectrum to avoid
8919 * oversubscribing the link. Max center spread
8920 * is 2.5%; use 5% for safety's sake.
8921 */
8922 u32 bps = target_clock * bpp * 21 / 20;
8923 return DIV_ROUND_UP(bps, link_bw * 8);
8924 }
8925
8926 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8927 {
8928 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8929 }
8930
8931 static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8932 struct intel_crtc_state *crtc_state,
8933 u32 *fp,
8934 intel_clock_t *reduced_clock, u32 *fp2)
8935 {
8936 struct drm_crtc *crtc = &intel_crtc->base;
8937 struct drm_device *dev = crtc->dev;
8938 struct drm_i915_private *dev_priv = dev->dev_private;
8939 struct drm_atomic_state *state = crtc_state->base.state;
8940 struct drm_connector *connector;
8941 struct drm_connector_state *connector_state;
8942 struct intel_encoder *encoder;
8943 uint32_t dpll;
8944 int factor, num_connectors = 0, i;
8945 bool is_lvds = false, is_sdvo = false;
8946
8947 for_each_connector_in_state(state, connector, connector_state, i) {
8948 if (connector_state->crtc != crtc_state->base.crtc)
8949 continue;
8950
8951 encoder = to_intel_encoder(connector_state->best_encoder);
8952
8953 switch (encoder->type) {
8954 case INTEL_OUTPUT_LVDS:
8955 is_lvds = true;
8956 break;
8957 case INTEL_OUTPUT_SDVO:
8958 case INTEL_OUTPUT_HDMI:
8959 is_sdvo = true;
8960 break;
8961 default:
8962 break;
8963 }
8964
8965 num_connectors++;
8966 }
8967
8968 /* Enable autotuning of the PLL clock (if permissible) */
8969 factor = 21;
8970 if (is_lvds) {
8971 if ((intel_panel_use_ssc(dev_priv) &&
8972 dev_priv->vbt.lvds_ssc_freq == 100000) ||
8973 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
8974 factor = 25;
8975 } else if (crtc_state->sdvo_tv_clock)
8976 factor = 20;
8977
8978 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8979 *fp |= FP_CB_TUNE;
8980
8981 if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
8982 *fp2 |= FP_CB_TUNE;
8983
8984 dpll = 0;
8985
8986 if (is_lvds)
8987 dpll |= DPLLB_MODE_LVDS;
8988 else
8989 dpll |= DPLLB_MODE_DAC_SERIAL;
8990
8991 dpll |= (crtc_state->pixel_multiplier - 1)
8992 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8993
8994 if (is_sdvo)
8995 dpll |= DPLL_SDVO_HIGH_SPEED;
8996 if (crtc_state->has_dp_encoder)
8997 dpll |= DPLL_SDVO_HIGH_SPEED;
8998
8999 /* compute bitmask from p1 value */
9000 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9001 /* also FPA1 */
9002 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
9003
9004 switch (crtc_state->dpll.p2) {
9005 case 5:
9006 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9007 break;
9008 case 7:
9009 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9010 break;
9011 case 10:
9012 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9013 break;
9014 case 14:
9015 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9016 break;
9017 }
9018
9019 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
9020 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
9021 else
9022 dpll |= PLL_REF_INPUT_DREFCLK;
9023
9024 return dpll | DPLL_VCO_ENABLE;
9025 }
9026
9027 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9028 struct intel_crtc_state *crtc_state)
9029 {
9030 struct drm_device *dev = crtc->base.dev;
9031 intel_clock_t clock, reduced_clock;
9032 u32 dpll = 0, fp = 0, fp2 = 0;
9033 bool ok, has_reduced_clock = false;
9034 bool is_lvds = false;
9035 struct intel_shared_dpll *pll;
9036
9037 memset(&crtc_state->dpll_hw_state, 0,
9038 sizeof(crtc_state->dpll_hw_state));
9039
9040 is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS);
9041
9042 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
9043 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
9044
9045 ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock,
9046 &has_reduced_clock, &reduced_clock);
9047 if (!ok && !crtc_state->clock_set) {
9048 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9049 return -EINVAL;
9050 }
9051 /* Compat-code for transition, will disappear. */
9052 if (!crtc_state->clock_set) {
9053 crtc_state->dpll.n = clock.n;
9054 crtc_state->dpll.m1 = clock.m1;
9055 crtc_state->dpll.m2 = clock.m2;
9056 crtc_state->dpll.p1 = clock.p1;
9057 crtc_state->dpll.p2 = clock.p2;
9058 }
9059
9060 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9061 if (crtc_state->has_pch_encoder) {
9062 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9063 if (has_reduced_clock)
9064 fp2 = i9xx_dpll_compute_fp(&reduced_clock);
9065
9066 dpll = ironlake_compute_dpll(crtc, crtc_state,
9067 &fp, &reduced_clock,
9068 has_reduced_clock ? &fp2 : NULL);
9069
9070 crtc_state->dpll_hw_state.dpll = dpll;
9071 crtc_state->dpll_hw_state.fp0 = fp;
9072 if (has_reduced_clock)
9073 crtc_state->dpll_hw_state.fp1 = fp2;
9074 else
9075 crtc_state->dpll_hw_state.fp1 = fp;
9076
9077 pll = intel_get_shared_dpll(crtc, crtc_state);
9078 if (pll == NULL) {
9079 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
9080 pipe_name(crtc->pipe));
9081 return -EINVAL;
9082 }
9083 }
9084
9085 if (is_lvds && has_reduced_clock)
9086 crtc->lowfreq_avail = true;
9087 else
9088 crtc->lowfreq_avail = false;
9089
9090 return 0;
9091 }
9092
9093 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9094 struct intel_link_m_n *m_n)
9095 {
9096 struct drm_device *dev = crtc->base.dev;
9097 struct drm_i915_private *dev_priv = dev->dev_private;
9098 enum pipe pipe = crtc->pipe;
9099
9100 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9101 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9102 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9103 & ~TU_SIZE_MASK;
9104 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9105 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9106 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9107 }
9108
9109 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9110 enum transcoder transcoder,
9111 struct intel_link_m_n *m_n,
9112 struct intel_link_m_n *m2_n2)
9113 {
9114 struct drm_device *dev = crtc->base.dev;
9115 struct drm_i915_private *dev_priv = dev->dev_private;
9116 enum pipe pipe = crtc->pipe;
9117
9118 if (INTEL_INFO(dev)->gen >= 5) {
9119 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9120 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9121 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9122 & ~TU_SIZE_MASK;
9123 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9124 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9125 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9126 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
9127 * gen < 8) and if DRRS is supported (to make sure the
9128 * registers are not unnecessarily read).
9129 */
9130 if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
9131 crtc->config->has_drrs) {
9132 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9133 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9134 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9135 & ~TU_SIZE_MASK;
9136 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9137 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9138 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9139 }
9140 } else {
9141 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9142 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9143 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9144 & ~TU_SIZE_MASK;
9145 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9146 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9147 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9148 }
9149 }
9150
9151 void intel_dp_get_m_n(struct intel_crtc *crtc,
9152 struct intel_crtc_state *pipe_config)
9153 {
9154 if (pipe_config->has_pch_encoder)
9155 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9156 else
9157 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9158 &pipe_config->dp_m_n,
9159 &pipe_config->dp_m2_n2);
9160 }
9161
9162 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
9163 struct intel_crtc_state *pipe_config)
9164 {
9165 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
9166 &pipe_config->fdi_m_n, NULL);
9167 }
9168
9169 static void skylake_get_pfit_config(struct intel_crtc *crtc,
9170 struct intel_crtc_state *pipe_config)
9171 {
9172 struct drm_device *dev = crtc->base.dev;
9173 struct drm_i915_private *dev_priv = dev->dev_private;
9174 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9175 uint32_t ps_ctrl = 0;
9176 int id = -1;
9177 int i;
9178
9179 /* find scaler attached to this pipe */
9180 for (i = 0; i < crtc->num_scalers; i++) {
9181 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9182 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9183 id = i;
9184 pipe_config->pch_pfit.enabled = true;
9185 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9186 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9187 break;
9188 }
9189 }
9190
9191 scaler_state->scaler_id = id;
9192 if (id >= 0) {
9193 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9194 } else {
9195 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9196 }
9197 }
9198
9199 static void
9200 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9201 struct intel_initial_plane_config *plane_config)
9202 {
9203 struct drm_device *dev = crtc->base.dev;
9204 struct drm_i915_private *dev_priv = dev->dev_private;
9205 u32 val, base, offset, stride_mult, tiling;
9206 int pipe = crtc->pipe;
9207 int fourcc, pixel_format;
9208 unsigned int aligned_height;
9209 struct drm_framebuffer *fb;
9210 struct intel_framebuffer *intel_fb;
9211
9212 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9213 if (!intel_fb) {
9214 DRM_DEBUG_KMS("failed to alloc fb\n");
9215 return;
9216 }
9217
9218 fb = &intel_fb->base;
9219
9220 val = I915_READ(PLANE_CTL(pipe, 0));
9221 if (!(val & PLANE_CTL_ENABLE))
9222 goto error;
9223
9224 pixel_format = val & PLANE_CTL_FORMAT_MASK;
9225 fourcc = skl_format_to_fourcc(pixel_format,
9226 val & PLANE_CTL_ORDER_RGBX,
9227 val & PLANE_CTL_ALPHA_MASK);
9228 fb->pixel_format = fourcc;
9229 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9230
9231 tiling = val & PLANE_CTL_TILED_MASK;
9232 switch (tiling) {
9233 case PLANE_CTL_TILED_LINEAR:
9234 fb->modifier[0] = DRM_FORMAT_MOD_NONE;
9235 break;
9236 case PLANE_CTL_TILED_X:
9237 plane_config->tiling = I915_TILING_X;
9238 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9239 break;
9240 case PLANE_CTL_TILED_Y:
9241 fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
9242 break;
9243 case PLANE_CTL_TILED_YF:
9244 fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
9245 break;
9246 default:
9247 MISSING_CASE(tiling);
9248 goto error;
9249 }
9250
9251 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
9252 plane_config->base = base;
9253
9254 offset = I915_READ(PLANE_OFFSET(pipe, 0));
9255
9256 val = I915_READ(PLANE_SIZE(pipe, 0));
9257 fb->height = ((val >> 16) & 0xfff) + 1;
9258 fb->width = ((val >> 0) & 0x1fff) + 1;
9259
9260 val = I915_READ(PLANE_STRIDE(pipe, 0));
9261 stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
9262 fb->pixel_format);
9263 fb->pitches[0] = (val & 0x3ff) * stride_mult;
9264
9265 aligned_height = intel_fb_align_height(dev, fb->height,
9266 fb->pixel_format,
9267 fb->modifier[0]);
9268
9269 plane_config->size = fb->pitches[0] * aligned_height;
9270
9271 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9272 pipe_name(pipe), fb->width, fb->height,
9273 fb->bits_per_pixel, base, fb->pitches[0],
9274 plane_config->size);
9275
9276 plane_config->fb = intel_fb;
9277 return;
9278
9279 error:
9280 kfree(fb);
9281 }
9282
9283 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9284 struct intel_crtc_state *pipe_config)
9285 {
9286 struct drm_device *dev = crtc->base.dev;
9287 struct drm_i915_private *dev_priv = dev->dev_private;
9288 uint32_t tmp;
9289
9290 tmp = I915_READ(PF_CTL(crtc->pipe));
9291
9292 if (tmp & PF_ENABLE) {
9293 pipe_config->pch_pfit.enabled = true;
9294 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9295 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9296
9297 /* We currently do not free assignements of panel fitters on
9298 * ivb/hsw (since we don't use the higher upscaling modes which
9299 * differentiates them) so just WARN about this case for now. */
9300 if (IS_GEN7(dev)) {
9301 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9302 PF_PIPE_SEL_IVB(crtc->pipe));
9303 }
9304 }
9305 }
9306
9307 static void
9308 ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9309 struct intel_initial_plane_config *plane_config)
9310 {
9311 struct drm_device *dev = crtc->base.dev;
9312 struct drm_i915_private *dev_priv = dev->dev_private;
9313 u32 val, base, offset;
9314 int pipe = crtc->pipe;
9315 int fourcc, pixel_format;
9316 unsigned int aligned_height;
9317 struct drm_framebuffer *fb;
9318 struct intel_framebuffer *intel_fb;
9319
9320 val = I915_READ(DSPCNTR(pipe));
9321 if (!(val & DISPLAY_PLANE_ENABLE))
9322 return;
9323
9324 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9325 if (!intel_fb) {
9326 DRM_DEBUG_KMS("failed to alloc fb\n");
9327 return;
9328 }
9329
9330 fb = &intel_fb->base;
9331
9332 if (INTEL_INFO(dev)->gen >= 4) {
9333 if (val & DISPPLANE_TILED) {
9334 plane_config->tiling = I915_TILING_X;
9335 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9336 }
9337 }
9338
9339 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9340 fourcc = i9xx_format_to_fourcc(pixel_format);
9341 fb->pixel_format = fourcc;
9342 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9343
9344 base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
9345 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
9346 offset = I915_READ(DSPOFFSET(pipe));
9347 } else {
9348 if (plane_config->tiling)
9349 offset = I915_READ(DSPTILEOFF(pipe));
9350 else
9351 offset = I915_READ(DSPLINOFF(pipe));
9352 }
9353 plane_config->base = base;
9354
9355 val = I915_READ(PIPESRC(pipe));
9356 fb->width = ((val >> 16) & 0xfff) + 1;
9357 fb->height = ((val >> 0) & 0xfff) + 1;
9358
9359 val = I915_READ(DSPSTRIDE(pipe));
9360 fb->pitches[0] = val & 0xffffffc0;
9361
9362 aligned_height = intel_fb_align_height(dev, fb->height,
9363 fb->pixel_format,
9364 fb->modifier[0]);
9365
9366 plane_config->size = fb->pitches[0] * aligned_height;
9367
9368 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9369 pipe_name(pipe), fb->width, fb->height,
9370 fb->bits_per_pixel, base, fb->pitches[0],
9371 plane_config->size);
9372
9373 plane_config->fb = intel_fb;
9374 }
9375
9376 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9377 struct intel_crtc_state *pipe_config)
9378 {
9379 struct drm_device *dev = crtc->base.dev;
9380 struct drm_i915_private *dev_priv = dev->dev_private;
9381 enum intel_display_power_domain power_domain;
9382 uint32_t tmp;
9383 bool ret;
9384
9385 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9386 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9387 return false;
9388
9389 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9390 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9391
9392 ret = false;
9393 tmp = I915_READ(PIPECONF(crtc->pipe));
9394 if (!(tmp & PIPECONF_ENABLE))
9395 goto out;
9396
9397 switch (tmp & PIPECONF_BPC_MASK) {
9398 case PIPECONF_6BPC:
9399 pipe_config->pipe_bpp = 18;
9400 break;
9401 case PIPECONF_8BPC:
9402 pipe_config->pipe_bpp = 24;
9403 break;
9404 case PIPECONF_10BPC:
9405 pipe_config->pipe_bpp = 30;
9406 break;
9407 case PIPECONF_12BPC:
9408 pipe_config->pipe_bpp = 36;
9409 break;
9410 default:
9411 break;
9412 }
9413
9414 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9415 pipe_config->limited_color_range = true;
9416
9417 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9418 struct intel_shared_dpll *pll;
9419
9420 pipe_config->has_pch_encoder = true;
9421
9422 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9423 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9424 FDI_DP_PORT_WIDTH_SHIFT) + 1;
9425
9426 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9427
9428 if (HAS_PCH_IBX(dev_priv->dev)) {
9429 pipe_config->shared_dpll =
9430 (enum intel_dpll_id) crtc->pipe;
9431 } else {
9432 tmp = I915_READ(PCH_DPLL_SEL);
9433 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9434 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
9435 else
9436 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
9437 }
9438
9439 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
9440
9441 WARN_ON(!pll->get_hw_state(dev_priv, pll,
9442 &pipe_config->dpll_hw_state));
9443
9444 tmp = pipe_config->dpll_hw_state.dpll;
9445 pipe_config->pixel_multiplier =
9446 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9447 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9448
9449 ironlake_pch_clock_get(crtc, pipe_config);
9450 } else {
9451 pipe_config->pixel_multiplier = 1;
9452 }
9453
9454 intel_get_pipe_timings(crtc, pipe_config);
9455
9456 ironlake_get_pfit_config(crtc, pipe_config);
9457
9458 ret = true;
9459
9460 out:
9461 intel_display_power_put(dev_priv, power_domain);
9462
9463 return ret;
9464 }
9465
9466 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9467 {
9468 struct drm_device *dev = dev_priv->dev;
9469 struct intel_crtc *crtc;
9470
9471 for_each_intel_crtc(dev, crtc)
9472 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9473 pipe_name(crtc->pipe));
9474
9475 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9476 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9477 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9478 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9479 I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9480 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9481 "CPU PWM1 enabled\n");
9482 if (IS_HASWELL(dev))
9483 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9484 "CPU PWM2 enabled\n");
9485 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9486 "PCH PWM1 enabled\n");
9487 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9488 "Utility pin enabled\n");
9489 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9490
9491 /*
9492 * In theory we can still leave IRQs enabled, as long as only the HPD
9493 * interrupts remain enabled. We used to check for that, but since it's
9494 * gen-specific and since we only disable LCPLL after we fully disable
9495 * the interrupts, the check below should be enough.
9496 */
9497 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9498 }
9499
9500 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9501 {
9502 struct drm_device *dev = dev_priv->dev;
9503
9504 if (IS_HASWELL(dev))
9505 return I915_READ(D_COMP_HSW);
9506 else
9507 return I915_READ(D_COMP_BDW);
9508 }
9509
9510 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9511 {
9512 struct drm_device *dev = dev_priv->dev;
9513
9514 if (IS_HASWELL(dev)) {
9515 mutex_lock(&dev_priv->rps.hw_lock);
9516 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9517 val))
9518 DRM_ERROR("Failed to write to D_COMP\n");
9519 mutex_unlock(&dev_priv->rps.hw_lock);
9520 } else {
9521 I915_WRITE(D_COMP_BDW, val);
9522 POSTING_READ(D_COMP_BDW);
9523 }
9524 }
9525
9526 /*
9527 * This function implements pieces of two sequences from BSpec:
9528 * - Sequence for display software to disable LCPLL
9529 * - Sequence for display software to allow package C8+
9530 * The steps implemented here are just the steps that actually touch the LCPLL
9531 * register. Callers should take care of disabling all the display engine
9532 * functions, doing the mode unset, fixing interrupts, etc.
9533 */
9534 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9535 bool switch_to_fclk, bool allow_power_down)
9536 {
9537 uint32_t val;
9538
9539 assert_can_disable_lcpll(dev_priv);
9540
9541 val = I915_READ(LCPLL_CTL);
9542
9543 if (switch_to_fclk) {
9544 val |= LCPLL_CD_SOURCE_FCLK;
9545 I915_WRITE(LCPLL_CTL, val);
9546
9547 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9548 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9549 DRM_ERROR("Switching to FCLK failed\n");
9550
9551 val = I915_READ(LCPLL_CTL);
9552 }
9553
9554 val |= LCPLL_PLL_DISABLE;
9555 I915_WRITE(LCPLL_CTL, val);
9556 POSTING_READ(LCPLL_CTL);
9557
9558 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
9559 DRM_ERROR("LCPLL still locked\n");
9560
9561 val = hsw_read_dcomp(dev_priv);
9562 val |= D_COMP_COMP_DISABLE;
9563 hsw_write_dcomp(dev_priv, val);
9564 ndelay(100);
9565
9566 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9567 1))
9568 DRM_ERROR("D_COMP RCOMP still in progress\n");
9569
9570 if (allow_power_down) {
9571 val = I915_READ(LCPLL_CTL);
9572 val |= LCPLL_POWER_DOWN_ALLOW;
9573 I915_WRITE(LCPLL_CTL, val);
9574 POSTING_READ(LCPLL_CTL);
9575 }
9576 }
9577
9578 /*
9579 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9580 * source.
9581 */
9582 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9583 {
9584 uint32_t val;
9585
9586 val = I915_READ(LCPLL_CTL);
9587
9588 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9589 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9590 return;
9591
9592 /*
9593 * Make sure we're not on PC8 state before disabling PC8, otherwise
9594 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9595 */
9596 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9597
9598 if (val & LCPLL_POWER_DOWN_ALLOW) {
9599 val &= ~LCPLL_POWER_DOWN_ALLOW;
9600 I915_WRITE(LCPLL_CTL, val);
9601 POSTING_READ(LCPLL_CTL);
9602 }
9603
9604 val = hsw_read_dcomp(dev_priv);
9605 val |= D_COMP_COMP_FORCE;
9606 val &= ~D_COMP_COMP_DISABLE;
9607 hsw_write_dcomp(dev_priv, val);
9608
9609 val = I915_READ(LCPLL_CTL);
9610 val &= ~LCPLL_PLL_DISABLE;
9611 I915_WRITE(LCPLL_CTL, val);
9612
9613 if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
9614 DRM_ERROR("LCPLL not locked yet\n");
9615
9616 if (val & LCPLL_CD_SOURCE_FCLK) {
9617 val = I915_READ(LCPLL_CTL);
9618 val &= ~LCPLL_CD_SOURCE_FCLK;
9619 I915_WRITE(LCPLL_CTL, val);
9620
9621 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9622 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9623 DRM_ERROR("Switching back to LCPLL failed\n");
9624 }
9625
9626 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9627 intel_update_cdclk(dev_priv->dev);
9628 }
9629
9630 /*
9631 * Package states C8 and deeper are really deep PC states that can only be
9632 * reached when all the devices on the system allow it, so even if the graphics
9633 * device allows PC8+, it doesn't mean the system will actually get to these
9634 * states. Our driver only allows PC8+ when going into runtime PM.
9635 *
9636 * The requirements for PC8+ are that all the outputs are disabled, the power
9637 * well is disabled and most interrupts are disabled, and these are also
9638 * requirements for runtime PM. When these conditions are met, we manually do
9639 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9640 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9641 * hang the machine.
9642 *
9643 * When we really reach PC8 or deeper states (not just when we allow it) we lose
9644 * the state of some registers, so when we come back from PC8+ we need to
9645 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9646 * need to take care of the registers kept by RC6. Notice that this happens even
9647 * if we don't put the device in PCI D3 state (which is what currently happens
9648 * because of the runtime PM support).
9649 *
9650 * For more, read "Display Sequences for Package C8" on the hardware
9651 * documentation.
9652 */
9653 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9654 {
9655 struct drm_device *dev = dev_priv->dev;
9656 uint32_t val;
9657
9658 DRM_DEBUG_KMS("Enabling package C8+\n");
9659
9660 if (HAS_PCH_LPT_LP(dev)) {
9661 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9662 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9663 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9664 }
9665
9666 lpt_disable_clkout_dp(dev);
9667 hsw_disable_lcpll(dev_priv, true, true);
9668 }
9669
9670 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9671 {
9672 struct drm_device *dev = dev_priv->dev;
9673 uint32_t val;
9674
9675 DRM_DEBUG_KMS("Disabling package C8+\n");
9676
9677 hsw_restore_lcpll(dev_priv);
9678 lpt_init_pch_refclk(dev);
9679
9680 if (HAS_PCH_LPT_LP(dev)) {
9681 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9682 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9683 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9684 }
9685
9686 intel_prepare_ddi(dev);
9687 }
9688
9689 static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9690 {
9691 struct drm_device *dev = old_state->dev;
9692 unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
9693
9694 broxton_set_cdclk(dev, req_cdclk);
9695 }
9696
9697 /* compute the max rate for new configuration */
9698 static int ilk_max_pixel_rate(struct drm_atomic_state *state)
9699 {
9700 struct intel_crtc *intel_crtc;
9701 struct intel_crtc_state *crtc_state;
9702 int max_pixel_rate = 0;
9703
9704 for_each_intel_crtc(state->dev, intel_crtc) {
9705 int pixel_rate;
9706
9707 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
9708 if (IS_ERR(crtc_state))
9709 return PTR_ERR(crtc_state);
9710
9711 if (!crtc_state->base.enable)
9712 continue;
9713
9714 pixel_rate = ilk_pipe_pixel_rate(crtc_state);
9715
9716 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
9717 if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled)
9718 pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
9719
9720 max_pixel_rate = max(max_pixel_rate, pixel_rate);
9721 }
9722
9723 return max_pixel_rate;
9724 }
9725
9726 static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9727 {
9728 struct drm_i915_private *dev_priv = dev->dev_private;
9729 uint32_t val, data;
9730 int ret;
9731
9732 if (WARN((I915_READ(LCPLL_CTL) &
9733 (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
9734 LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
9735 LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
9736 LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
9737 "trying to change cdclk frequency with cdclk not enabled\n"))
9738 return;
9739
9740 mutex_lock(&dev_priv->rps.hw_lock);
9741 ret = sandybridge_pcode_write(dev_priv,
9742 BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
9743 mutex_unlock(&dev_priv->rps.hw_lock);
9744 if (ret) {
9745 DRM_ERROR("failed to inform pcode about cdclk change\n");
9746 return;
9747 }
9748
9749 val = I915_READ(LCPLL_CTL);
9750 val |= LCPLL_CD_SOURCE_FCLK;
9751 I915_WRITE(LCPLL_CTL, val);
9752
9753 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9754 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9755 DRM_ERROR("Switching to FCLK failed\n");
9756
9757 val = I915_READ(LCPLL_CTL);
9758 val &= ~LCPLL_CLK_FREQ_MASK;
9759
9760 switch (cdclk) {
9761 case 450000:
9762 val |= LCPLL_CLK_FREQ_450;
9763 data = 0;
9764 break;
9765 case 540000:
9766 val |= LCPLL_CLK_FREQ_54O_BDW;
9767 data = 1;
9768 break;
9769 case 337500:
9770 val |= LCPLL_CLK_FREQ_337_5_BDW;
9771 data = 2;
9772 break;
9773 case 675000:
9774 val |= LCPLL_CLK_FREQ_675_BDW;
9775 data = 3;
9776 break;
9777 default:
9778 WARN(1, "invalid cdclk frequency\n");
9779 return;
9780 }
9781
9782 I915_WRITE(LCPLL_CTL, val);
9783
9784 val = I915_READ(LCPLL_CTL);
9785 val &= ~LCPLL_CD_SOURCE_FCLK;
9786 I915_WRITE(LCPLL_CTL, val);
9787
9788 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9789 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9790 DRM_ERROR("Switching back to LCPLL failed\n");
9791
9792 mutex_lock(&dev_priv->rps.hw_lock);
9793 sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
9794 mutex_unlock(&dev_priv->rps.hw_lock);
9795
9796 intel_update_cdclk(dev);
9797
9798 WARN(cdclk != dev_priv->cdclk_freq,
9799 "cdclk requested %d kHz but got %d kHz\n",
9800 cdclk, dev_priv->cdclk_freq);
9801 }
9802
9803 static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9804 {
9805 struct drm_i915_private *dev_priv = to_i915(state->dev);
9806 int max_pixclk = ilk_max_pixel_rate(state);
9807 int cdclk;
9808
9809 /*
9810 * FIXME should also account for plane ratio
9811 * once 64bpp pixel formats are supported.
9812 */
9813 if (max_pixclk > 540000)
9814 cdclk = 675000;
9815 else if (max_pixclk > 450000)
9816 cdclk = 540000;
9817 else if (max_pixclk > 337500)
9818 cdclk = 450000;
9819 else
9820 cdclk = 337500;
9821
9822 if (cdclk > dev_priv->max_cdclk_freq) {
9823 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9824 cdclk, dev_priv->max_cdclk_freq);
9825 return -EINVAL;
9826 }
9827
9828 to_intel_atomic_state(state)->cdclk = cdclk;
9829
9830 return 0;
9831 }
9832
9833 static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9834 {
9835 struct drm_device *dev = old_state->dev;
9836 unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
9837
9838 broadwell_set_cdclk(dev, req_cdclk);
9839 }
9840
9841 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9842 struct intel_crtc_state *crtc_state)
9843 {
9844 if (!intel_ddi_pll_select(crtc, crtc_state))
9845 return -EINVAL;
9846
9847 crtc->lowfreq_avail = false;
9848
9849 return 0;
9850 }
9851
9852 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9853 enum port port,
9854 struct intel_crtc_state *pipe_config)
9855 {
9856 switch (port) {
9857 case PORT_A:
9858 pipe_config->ddi_pll_sel = SKL_DPLL0;
9859 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
9860 break;
9861 case PORT_B:
9862 pipe_config->ddi_pll_sel = SKL_DPLL1;
9863 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
9864 break;
9865 case PORT_C:
9866 pipe_config->ddi_pll_sel = SKL_DPLL2;
9867 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
9868 break;
9869 default:
9870 DRM_ERROR("Incorrect port type\n");
9871 }
9872 }
9873
9874 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9875 enum port port,
9876 struct intel_crtc_state *pipe_config)
9877 {
9878 u32 temp, dpll_ctl1;
9879
9880 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9881 pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
9882
9883 switch (pipe_config->ddi_pll_sel) {
9884 case SKL_DPLL0:
9885 /*
9886 * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
9887 * of the shared DPLL framework and thus needs to be read out
9888 * separately
9889 */
9890 dpll_ctl1 = I915_READ(DPLL_CTRL1);
9891 pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
9892 break;
9893 case SKL_DPLL1:
9894 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
9895 break;
9896 case SKL_DPLL2:
9897 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
9898 break;
9899 case SKL_DPLL3:
9900 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
9901 break;
9902 }
9903 }
9904
9905 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9906 enum port port,
9907 struct intel_crtc_state *pipe_config)
9908 {
9909 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9910
9911 switch (pipe_config->ddi_pll_sel) {
9912 case PORT_CLK_SEL_WRPLL1:
9913 pipe_config->shared_dpll = DPLL_ID_WRPLL1;
9914 break;
9915 case PORT_CLK_SEL_WRPLL2:
9916 pipe_config->shared_dpll = DPLL_ID_WRPLL2;
9917 break;
9918 case PORT_CLK_SEL_SPLL:
9919 pipe_config->shared_dpll = DPLL_ID_SPLL;
9920 break;
9921 }
9922 }
9923
9924 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9925 struct intel_crtc_state *pipe_config)
9926 {
9927 struct drm_device *dev = crtc->base.dev;
9928 struct drm_i915_private *dev_priv = dev->dev_private;
9929 struct intel_shared_dpll *pll;
9930 enum port port;
9931 uint32_t tmp;
9932
9933 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9934
9935 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9936
9937 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
9938 skylake_get_ddi_pll(dev_priv, port, pipe_config);
9939 else if (IS_BROXTON(dev))
9940 bxt_get_ddi_pll(dev_priv, port, pipe_config);
9941 else
9942 haswell_get_ddi_pll(dev_priv, port, pipe_config);
9943
9944 if (pipe_config->shared_dpll >= 0) {
9945 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
9946
9947 WARN_ON(!pll->get_hw_state(dev_priv, pll,
9948 &pipe_config->dpll_hw_state));
9949 }
9950
9951 /*
9952 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9953 * DDI E. So just check whether this pipe is wired to DDI E and whether
9954 * the PCH transcoder is on.
9955 */
9956 if (INTEL_INFO(dev)->gen < 9 &&
9957 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9958 pipe_config->has_pch_encoder = true;
9959
9960 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9961 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9962 FDI_DP_PORT_WIDTH_SHIFT) + 1;
9963
9964 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9965 }
9966 }
9967
9968 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9969 struct intel_crtc_state *pipe_config)
9970 {
9971 struct drm_device *dev = crtc->base.dev;
9972 struct drm_i915_private *dev_priv = dev->dev_private;
9973 enum intel_display_power_domain power_domain;
9974 unsigned long power_domain_mask;
9975 uint32_t tmp;
9976 bool ret;
9977
9978 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9979 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9980 return false;
9981 power_domain_mask = BIT(power_domain);
9982
9983 ret = false;
9984
9985 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9986 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9987
9988 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9989 if (tmp & TRANS_DDI_FUNC_ENABLE) {
9990 enum pipe trans_edp_pipe;
9991 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9992 default:
9993 WARN(1, "unknown pipe linked to edp transcoder\n");
9994 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9995 case TRANS_DDI_EDP_INPUT_A_ON:
9996 trans_edp_pipe = PIPE_A;
9997 break;
9998 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9999 trans_edp_pipe = PIPE_B;
10000 break;
10001 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10002 trans_edp_pipe = PIPE_C;
10003 break;
10004 }
10005
10006 if (trans_edp_pipe == crtc->pipe)
10007 pipe_config->cpu_transcoder = TRANSCODER_EDP;
10008 }
10009
10010 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10011 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10012 goto out;
10013 power_domain_mask |= BIT(power_domain);
10014
10015 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10016 if (!(tmp & PIPECONF_ENABLE))
10017 goto out;
10018
10019 haswell_get_ddi_port_state(crtc, pipe_config);
10020
10021 intel_get_pipe_timings(crtc, pipe_config);
10022
10023 if (INTEL_INFO(dev)->gen >= 9) {
10024 skl_init_scalers(dev, crtc, pipe_config);
10025 }
10026
10027 if (INTEL_INFO(dev)->gen >= 9) {
10028 pipe_config->scaler_state.scaler_id = -1;
10029 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
10030 }
10031
10032 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10033 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
10034 power_domain_mask |= BIT(power_domain);
10035 if (INTEL_INFO(dev)->gen >= 9)
10036 skylake_get_pfit_config(crtc, pipe_config);
10037 else
10038 ironlake_get_pfit_config(crtc, pipe_config);
10039 }
10040
10041 if (IS_HASWELL(dev))
10042 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
10043 (I915_READ(IPS_CTL) & IPS_ENABLE);
10044
10045 if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
10046 pipe_config->pixel_multiplier =
10047 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10048 } else {
10049 pipe_config->pixel_multiplier = 1;
10050 }
10051
10052 ret = true;
10053
10054 out:
10055 for_each_power_domain(power_domain, power_domain_mask)
10056 intel_display_power_put(dev_priv, power_domain);
10057
10058 return ret;
10059 }
10060
10061 static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
10062 {
10063 struct drm_device *dev = crtc->dev;
10064 struct drm_i915_private *dev_priv = dev->dev_private;
10065 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10066 uint32_t cntl = 0, size = 0;
10067
10068 if (on) {
10069 unsigned int width = intel_crtc->base.cursor->state->crtc_w;
10070 unsigned int height = intel_crtc->base.cursor->state->crtc_h;
10071 unsigned int stride = roundup_pow_of_two(width) * 4;
10072
10073 switch (stride) {
10074 default:
10075 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
10076 width, stride);
10077 stride = 256;
10078 /* fallthrough */
10079 case 256:
10080 case 512:
10081 case 1024:
10082 case 2048:
10083 break;
10084 }
10085
10086 cntl |= CURSOR_ENABLE |
10087 CURSOR_GAMMA_ENABLE |
10088 CURSOR_FORMAT_ARGB |
10089 CURSOR_STRIDE(stride);
10090
10091 size = (height << 12) | width;
10092 }
10093
10094 if (intel_crtc->cursor_cntl != 0 &&
10095 (intel_crtc->cursor_base != base ||
10096 intel_crtc->cursor_size != size ||
10097 intel_crtc->cursor_cntl != cntl)) {
10098 /* On these chipsets we can only modify the base/size/stride
10099 * whilst the cursor is disabled.
10100 */
10101 I915_WRITE(CURCNTR(PIPE_A), 0);
10102 POSTING_READ(CURCNTR(PIPE_A));
10103 intel_crtc->cursor_cntl = 0;
10104 }
10105
10106 if (intel_crtc->cursor_base != base) {
10107 I915_WRITE(CURBASE(PIPE_A), base);
10108 intel_crtc->cursor_base = base;
10109 }
10110
10111 if (intel_crtc->cursor_size != size) {
10112 I915_WRITE(CURSIZE, size);
10113 intel_crtc->cursor_size = size;
10114 }
10115
10116 if (intel_crtc->cursor_cntl != cntl) {
10117 I915_WRITE(CURCNTR(PIPE_A), cntl);
10118 POSTING_READ(CURCNTR(PIPE_A));
10119 intel_crtc->cursor_cntl = cntl;
10120 }
10121 }
10122
10123 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
10124 {
10125 struct drm_device *dev = crtc->dev;
10126 struct drm_i915_private *dev_priv = dev->dev_private;
10127 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10128 int pipe = intel_crtc->pipe;
10129 uint32_t cntl = 0;
10130
10131 if (on) {
10132 cntl = MCURSOR_GAMMA_ENABLE;
10133 switch (intel_crtc->base.cursor->state->crtc_w) {
10134 case 64:
10135 cntl |= CURSOR_MODE_64_ARGB_AX;
10136 break;
10137 case 128:
10138 cntl |= CURSOR_MODE_128_ARGB_AX;
10139 break;
10140 case 256:
10141 cntl |= CURSOR_MODE_256_ARGB_AX;
10142 break;
10143 default:
10144 MISSING_CASE(intel_crtc->base.cursor->state->crtc_w);
10145 return;
10146 }
10147 cntl |= pipe << 28; /* Connect to correct pipe */
10148
10149 if (HAS_DDI(dev))
10150 cntl |= CURSOR_PIPE_CSC_ENABLE;
10151 }
10152
10153 if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180))
10154 cntl |= CURSOR_ROTATE_180;
10155
10156 if (intel_crtc->cursor_cntl != cntl) {
10157 I915_WRITE(CURCNTR(pipe), cntl);
10158 POSTING_READ(CURCNTR(pipe));
10159 intel_crtc->cursor_cntl = cntl;
10160 }
10161
10162 /* and commit changes on next vblank */
10163 I915_WRITE(CURBASE(pipe), base);
10164 POSTING_READ(CURBASE(pipe));
10165
10166 intel_crtc->cursor_base = base;
10167 }
10168
10169 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
10170 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
10171 bool on)
10172 {
10173 struct drm_device *dev = crtc->dev;
10174 struct drm_i915_private *dev_priv = dev->dev_private;
10175 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10176 int pipe = intel_crtc->pipe;
10177 struct drm_plane_state *cursor_state = crtc->cursor->state;
10178 int x = cursor_state->crtc_x;
10179 int y = cursor_state->crtc_y;
10180 u32 base = 0, pos = 0;
10181
10182 base = intel_crtc->cursor_addr;
10183
10184 if (x >= intel_crtc->config->pipe_src_w)
10185 on = false;
10186
10187 if (y >= intel_crtc->config->pipe_src_h)
10188 on = false;
10189
10190 if (x < 0) {
10191 if (x + cursor_state->crtc_w <= 0)
10192 on = false;
10193
10194 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10195 x = -x;
10196 }
10197 pos |= x << CURSOR_X_SHIFT;
10198
10199 if (y < 0) {
10200 if (y + cursor_state->crtc_h <= 0)
10201 on = false;
10202
10203 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10204 y = -y;
10205 }
10206 pos |= y << CURSOR_Y_SHIFT;
10207
10208 I915_WRITE(CURPOS(pipe), pos);
10209
10210 /* ILK+ do this automagically */
10211 if (HAS_GMCH_DISPLAY(dev) &&
10212 crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
10213 base += (cursor_state->crtc_h *
10214 cursor_state->crtc_w - 1) * 4;
10215 }
10216
10217 if (IS_845G(dev) || IS_I865G(dev))
10218 i845_update_cursor(crtc, base, on);
10219 else
10220 i9xx_update_cursor(crtc, base, on);
10221 }
10222
10223 static bool cursor_size_ok(struct drm_device *dev,
10224 uint32_t width, uint32_t height)
10225 {
10226 if (width == 0 || height == 0)
10227 return false;
10228
10229 /*
10230 * 845g/865g are special in that they are only limited by
10231 * the width of their cursors, the height is arbitrary up to
10232 * the precision of the register. Everything else requires
10233 * square cursors, limited to a few power-of-two sizes.
10234 */
10235 if (IS_845G(dev) || IS_I865G(dev)) {
10236 if ((width & 63) != 0)
10237 return false;
10238
10239 if (width > (IS_845G(dev) ? 64 : 512))
10240 return false;
10241
10242 if (height > 1023)
10243 return false;
10244 } else {
10245 switch (width | height) {
10246 case 256:
10247 case 128:
10248 if (IS_GEN2(dev))
10249 return false;
10250 case 64:
10251 break;
10252 default:
10253 return false;
10254 }
10255 }
10256
10257 return true;
10258 }
10259
10260 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
10261 u16 *blue, uint32_t start, uint32_t size)
10262 {
10263 int end = (start + size > 256) ? 256 : start + size, i;
10264 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10265
10266 for (i = start; i < end; i++) {
10267 intel_crtc->lut_r[i] = red[i] >> 8;
10268 intel_crtc->lut_g[i] = green[i] >> 8;
10269 intel_crtc->lut_b[i] = blue[i] >> 8;
10270 }
10271
10272 intel_crtc_load_lut(crtc);
10273 }
10274
10275 /* VESA 640x480x72Hz mode to set on the pipe */
10276 static struct drm_display_mode load_detect_mode = {
10277 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10278 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10279 };
10280
10281 struct drm_framebuffer *
10282 __intel_framebuffer_create(struct drm_device *dev,
10283 struct drm_mode_fb_cmd2 *mode_cmd,
10284 struct drm_i915_gem_object *obj)
10285 {
10286 struct intel_framebuffer *intel_fb;
10287 int ret;
10288
10289 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10290 if (!intel_fb)
10291 return ERR_PTR(-ENOMEM);
10292
10293 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
10294 if (ret)
10295 goto err;
10296
10297 return &intel_fb->base;
10298
10299 err:
10300 kfree(intel_fb);
10301 return ERR_PTR(ret);
10302 }
10303
10304 static struct drm_framebuffer *
10305 intel_framebuffer_create(struct drm_device *dev,
10306 struct drm_mode_fb_cmd2 *mode_cmd,
10307 struct drm_i915_gem_object *obj)
10308 {
10309 struct drm_framebuffer *fb;
10310 int ret;
10311
10312 ret = i915_mutex_lock_interruptible(dev);
10313 if (ret)
10314 return ERR_PTR(ret);
10315 fb = __intel_framebuffer_create(dev, mode_cmd, obj);
10316 mutex_unlock(&dev->struct_mutex);
10317
10318 return fb;
10319 }
10320
10321 static u32
10322 intel_framebuffer_pitch_for_width(int width, int bpp)
10323 {
10324 u32 pitch = DIV_ROUND_UP(width * bpp, 8);
10325 return ALIGN(pitch, 64);
10326 }
10327
10328 static u32
10329 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
10330 {
10331 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
10332 return PAGE_ALIGN(pitch * mode->vdisplay);
10333 }
10334
10335 static struct drm_framebuffer *
10336 intel_framebuffer_create_for_mode(struct drm_device *dev,
10337 struct drm_display_mode *mode,
10338 int depth, int bpp)
10339 {
10340 struct drm_framebuffer *fb;
10341 struct drm_i915_gem_object *obj;
10342 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
10343
10344 obj = i915_gem_alloc_object(dev,
10345 intel_framebuffer_size_for_mode(mode, bpp));
10346 if (obj == NULL)
10347 return ERR_PTR(-ENOMEM);
10348
10349 mode_cmd.width = mode->hdisplay;
10350 mode_cmd.height = mode->vdisplay;
10351 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
10352 bpp);
10353 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
10354
10355 fb = intel_framebuffer_create(dev, &mode_cmd, obj);
10356 if (IS_ERR(fb))
10357 drm_gem_object_unreference_unlocked(&obj->base);
10358
10359 return fb;
10360 }
10361
10362 static struct drm_framebuffer *
10363 mode_fits_in_fbdev(struct drm_device *dev,
10364 struct drm_display_mode *mode)
10365 {
10366 #ifdef CONFIG_DRM_FBDEV_EMULATION
10367 struct drm_i915_private *dev_priv = dev->dev_private;
10368 struct drm_i915_gem_object *obj;
10369 struct drm_framebuffer *fb;
10370
10371 if (!dev_priv->fbdev)
10372 return NULL;
10373
10374 if (!dev_priv->fbdev->fb)
10375 return NULL;
10376
10377 obj = dev_priv->fbdev->fb->obj;
10378 BUG_ON(!obj);
10379
10380 fb = &dev_priv->fbdev->fb->base;
10381 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
10382 fb->bits_per_pixel))
10383 return NULL;
10384
10385 if (obj->base.size < mode->vdisplay * fb->pitches[0])
10386 return NULL;
10387
10388 return fb;
10389 #else
10390 return NULL;
10391 #endif
10392 }
10393
10394 static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
10395 struct drm_crtc *crtc,
10396 struct drm_display_mode *mode,
10397 struct drm_framebuffer *fb,
10398 int x, int y)
10399 {
10400 struct drm_plane_state *plane_state;
10401 int hdisplay, vdisplay;
10402 int ret;
10403
10404 plane_state = drm_atomic_get_plane_state(state, crtc->primary);
10405 if (IS_ERR(plane_state))
10406 return PTR_ERR(plane_state);
10407
10408 if (mode)
10409 drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
10410 else
10411 hdisplay = vdisplay = 0;
10412
10413 ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
10414 if (ret)
10415 return ret;
10416 drm_atomic_set_fb_for_plane(plane_state, fb);
10417 plane_state->crtc_x = 0;
10418 plane_state->crtc_y = 0;
10419 plane_state->crtc_w = hdisplay;
10420 plane_state->crtc_h = vdisplay;
10421 plane_state->src_x = x << 16;
10422 plane_state->src_y = y << 16;
10423 plane_state->src_w = hdisplay << 16;
10424 plane_state->src_h = vdisplay << 16;
10425
10426 return 0;
10427 }
10428
10429 bool intel_get_load_detect_pipe(struct drm_connector *connector,
10430 struct drm_display_mode *mode,
10431 struct intel_load_detect_pipe *old,
10432 struct drm_modeset_acquire_ctx *ctx)
10433 {
10434 struct intel_crtc *intel_crtc;
10435 struct intel_encoder *intel_encoder =
10436 intel_attached_encoder(connector);
10437 struct drm_crtc *possible_crtc;
10438 struct drm_encoder *encoder = &intel_encoder->base;
10439 struct drm_crtc *crtc = NULL;
10440 struct drm_device *dev = encoder->dev;
10441 struct drm_framebuffer *fb;
10442 struct drm_mode_config *config = &dev->mode_config;
10443 struct drm_atomic_state *state = NULL;
10444 struct drm_connector_state *connector_state;
10445 struct intel_crtc_state *crtc_state;
10446 int ret, i = -1;
10447
10448 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10449 connector->base.id, connector->name,
10450 encoder->base.id, encoder->name);
10451
10452 retry:
10453 ret = drm_modeset_lock(&config->connection_mutex, ctx);
10454 if (ret)
10455 goto fail;
10456
10457 /*
10458 * Algorithm gets a little messy:
10459 *
10460 * - if the connector already has an assigned crtc, use it (but make
10461 * sure it's on first)
10462 *
10463 * - try to find the first unused crtc that can drive this connector,
10464 * and use that if we find one
10465 */
10466
10467 /* See if we already have a CRTC for this connector */
10468 if (encoder->crtc) {
10469 crtc = encoder->crtc;
10470
10471 ret = drm_modeset_lock(&crtc->mutex, ctx);
10472 if (ret)
10473 goto fail;
10474 ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10475 if (ret)
10476 goto fail;
10477
10478 old->dpms_mode = connector->dpms;
10479 old->load_detect_temp = false;
10480
10481 /* Make sure the crtc and connector are running */
10482 if (connector->dpms != DRM_MODE_DPMS_ON)
10483 connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
10484
10485 return true;
10486 }
10487
10488 /* Find an unused one (if possible) */
10489 for_each_crtc(dev, possible_crtc) {
10490 i++;
10491 if (!(encoder->possible_crtcs & (1 << i)))
10492 continue;
10493 if (possible_crtc->state->enable)
10494 continue;
10495
10496 crtc = possible_crtc;
10497 break;
10498 }
10499
10500 /*
10501 * If we didn't find an unused CRTC, don't use any.
10502 */
10503 if (!crtc) {
10504 DRM_DEBUG_KMS("no pipe available for load-detect\n");
10505 goto fail;
10506 }
10507
10508 ret = drm_modeset_lock(&crtc->mutex, ctx);
10509 if (ret)
10510 goto fail;
10511 ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10512 if (ret)
10513 goto fail;
10514
10515 intel_crtc = to_intel_crtc(crtc);
10516 old->dpms_mode = connector->dpms;
10517 old->load_detect_temp = true;
10518 old->release_fb = NULL;
10519
10520 state = drm_atomic_state_alloc(dev);
10521 if (!state)
10522 return false;
10523
10524 state->acquire_ctx = ctx;
10525
10526 connector_state = drm_atomic_get_connector_state(state, connector);
10527 if (IS_ERR(connector_state)) {
10528 ret = PTR_ERR(connector_state);
10529 goto fail;
10530 }
10531
10532 connector_state->crtc = crtc;
10533 connector_state->best_encoder = &intel_encoder->base;
10534
10535 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10536 if (IS_ERR(crtc_state)) {
10537 ret = PTR_ERR(crtc_state);
10538 goto fail;
10539 }
10540
10541 crtc_state->base.active = crtc_state->base.enable = true;
10542
10543 if (!mode)
10544 mode = &load_detect_mode;
10545
10546 /* We need a framebuffer large enough to accommodate all accesses
10547 * that the plane may generate whilst we perform load detection.
10548 * We can not rely on the fbcon either being present (we get called
10549 * during its initialisation to detect all boot displays, or it may
10550 * not even exist) or that it is large enough to satisfy the
10551 * requested mode.
10552 */
10553 fb = mode_fits_in_fbdev(dev, mode);
10554 if (fb == NULL) {
10555 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
10556 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
10557 old->release_fb = fb;
10558 } else
10559 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
10560 if (IS_ERR(fb)) {
10561 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
10562 goto fail;
10563 }
10564
10565 ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
10566 if (ret)
10567 goto fail;
10568
10569 drm_mode_copy(&crtc_state->base.mode, mode);
10570
10571 if (drm_atomic_commit(state)) {
10572 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10573 if (old->release_fb)
10574 old->release_fb->funcs->destroy(old->release_fb);
10575 goto fail;
10576 }
10577 crtc->primary->crtc = crtc;
10578
10579 /* let the connector get through one full cycle before testing */
10580 intel_wait_for_vblank(dev, intel_crtc->pipe);
10581 return true;
10582
10583 fail:
10584 drm_atomic_state_free(state);
10585 state = NULL;
10586
10587 if (ret == -EDEADLK) {
10588 drm_modeset_backoff(ctx);
10589 goto retry;
10590 }
10591
10592 return false;
10593 }
10594
10595 void intel_release_load_detect_pipe(struct drm_connector *connector,
10596 struct intel_load_detect_pipe *old,
10597 struct drm_modeset_acquire_ctx *ctx)
10598 {
10599 struct drm_device *dev = connector->dev;
10600 struct intel_encoder *intel_encoder =
10601 intel_attached_encoder(connector);
10602 struct drm_encoder *encoder = &intel_encoder->base;
10603 struct drm_crtc *crtc = encoder->crtc;
10604 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10605 struct drm_atomic_state *state;
10606 struct drm_connector_state *connector_state;
10607 struct intel_crtc_state *crtc_state;
10608 int ret;
10609
10610 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10611 connector->base.id, connector->name,
10612 encoder->base.id, encoder->name);
10613
10614 if (old->load_detect_temp) {
10615 state = drm_atomic_state_alloc(dev);
10616 if (!state)
10617 goto fail;
10618
10619 state->acquire_ctx = ctx;
10620
10621 connector_state = drm_atomic_get_connector_state(state, connector);
10622 if (IS_ERR(connector_state))
10623 goto fail;
10624
10625 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10626 if (IS_ERR(crtc_state))
10627 goto fail;
10628
10629 connector_state->best_encoder = NULL;
10630 connector_state->crtc = NULL;
10631
10632 crtc_state->base.enable = crtc_state->base.active = false;
10633
10634 ret = intel_modeset_setup_plane_state(state, crtc, NULL, NULL,
10635 0, 0);
10636 if (ret)
10637 goto fail;
10638
10639 ret = drm_atomic_commit(state);
10640 if (ret)
10641 goto fail;
10642
10643 if (old->release_fb) {
10644 drm_framebuffer_unregister_private(old->release_fb);
10645 drm_framebuffer_unreference(old->release_fb);
10646 }
10647
10648 return;
10649 }
10650
10651 /* Switch crtc and encoder back off if necessary */
10652 if (old->dpms_mode != DRM_MODE_DPMS_ON)
10653 connector->funcs->dpms(connector, old->dpms_mode);
10654
10655 return;
10656 fail:
10657 DRM_DEBUG_KMS("Couldn't release load detect pipe.\n");
10658 drm_atomic_state_free(state);
10659 }
10660
10661 static int i9xx_pll_refclk(struct drm_device *dev,
10662 const struct intel_crtc_state *pipe_config)
10663 {
10664 struct drm_i915_private *dev_priv = dev->dev_private;
10665 u32 dpll = pipe_config->dpll_hw_state.dpll;
10666
10667 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10668 return dev_priv->vbt.lvds_ssc_freq;
10669 else if (HAS_PCH_SPLIT(dev))
10670 return 120000;
10671 else if (!IS_GEN2(dev))
10672 return 96000;
10673 else
10674 return 48000;
10675 }
10676
10677 /* Returns the clock of the currently programmed mode of the given pipe. */
10678 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10679 struct intel_crtc_state *pipe_config)
10680 {
10681 struct drm_device *dev = crtc->base.dev;
10682 struct drm_i915_private *dev_priv = dev->dev_private;
10683 int pipe = pipe_config->cpu_transcoder;
10684 u32 dpll = pipe_config->dpll_hw_state.dpll;
10685 u32 fp;
10686 intel_clock_t clock;
10687 int port_clock;
10688 int refclk = i9xx_pll_refclk(dev, pipe_config);
10689
10690 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10691 fp = pipe_config->dpll_hw_state.fp0;
10692 else
10693 fp = pipe_config->dpll_hw_state.fp1;
10694
10695 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10696 if (IS_PINEVIEW(dev)) {
10697 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10698 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10699 } else {
10700 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10701 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10702 }
10703
10704 if (!IS_GEN2(dev)) {
10705 if (IS_PINEVIEW(dev))
10706 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10707 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10708 else
10709 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10710 DPLL_FPA01_P1_POST_DIV_SHIFT);
10711
10712 switch (dpll & DPLL_MODE_MASK) {
10713 case DPLLB_MODE_DAC_SERIAL:
10714 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10715 5 : 10;
10716 break;
10717 case DPLLB_MODE_LVDS:
10718 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10719 7 : 14;
10720 break;
10721 default:
10722 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10723 "mode\n", (int)(dpll & DPLL_MODE_MASK));
10724 return;
10725 }
10726
10727 if (IS_PINEVIEW(dev))
10728 port_clock = pnv_calc_dpll_params(refclk, &clock);
10729 else
10730 port_clock = i9xx_calc_dpll_params(refclk, &clock);
10731 } else {
10732 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
10733 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10734
10735 if (is_lvds) {
10736 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10737 DPLL_FPA01_P1_POST_DIV_SHIFT);
10738
10739 if (lvds & LVDS_CLKB_POWER_UP)
10740 clock.p2 = 7;
10741 else
10742 clock.p2 = 14;
10743 } else {
10744 if (dpll & PLL_P1_DIVIDE_BY_TWO)
10745 clock.p1 = 2;
10746 else {
10747 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10748 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10749 }
10750 if (dpll & PLL_P2_DIVIDE_BY_4)
10751 clock.p2 = 4;
10752 else
10753 clock.p2 = 2;
10754 }
10755
10756 port_clock = i9xx_calc_dpll_params(refclk, &clock);
10757 }
10758
10759 /*
10760 * This value includes pixel_multiplier. We will use
10761 * port_clock to compute adjusted_mode.crtc_clock in the
10762 * encoder's get_config() function.
10763 */
10764 pipe_config->port_clock = port_clock;
10765 }
10766
10767 int intel_dotclock_calculate(int link_freq,
10768 const struct intel_link_m_n *m_n)
10769 {
10770 /*
10771 * The calculation for the data clock is:
10772 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10773 * But we want to avoid losing precison if possible, so:
10774 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10775 *
10776 * and the link clock is simpler:
10777 * link_clock = (m * link_clock) / n
10778 */
10779
10780 if (!m_n->link_n)
10781 return 0;
10782
10783 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
10784 }
10785
10786 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10787 struct intel_crtc_state *pipe_config)
10788 {
10789 struct drm_device *dev = crtc->base.dev;
10790
10791 /* read out port_clock from the DPLL */
10792 i9xx_crtc_clock_get(crtc, pipe_config);
10793
10794 /*
10795 * This value does not include pixel_multiplier.
10796 * We will check that port_clock and adjusted_mode.crtc_clock
10797 * agree once we know their relationship in the encoder's
10798 * get_config() function.
10799 */
10800 pipe_config->base.adjusted_mode.crtc_clock =
10801 intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
10802 &pipe_config->fdi_m_n);
10803 }
10804
10805 /** Returns the currently programmed mode of the given pipe. */
10806 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10807 struct drm_crtc *crtc)
10808 {
10809 struct drm_i915_private *dev_priv = dev->dev_private;
10810 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10811 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
10812 struct drm_display_mode *mode;
10813 struct intel_crtc_state pipe_config;
10814 int htot = I915_READ(HTOTAL(cpu_transcoder));
10815 int hsync = I915_READ(HSYNC(cpu_transcoder));
10816 int vtot = I915_READ(VTOTAL(cpu_transcoder));
10817 int vsync = I915_READ(VSYNC(cpu_transcoder));
10818 enum pipe pipe = intel_crtc->pipe;
10819
10820 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10821 if (!mode)
10822 return NULL;
10823
10824 /*
10825 * Construct a pipe_config sufficient for getting the clock info
10826 * back out of crtc_clock_get.
10827 *
10828 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
10829 * to use a real value here instead.
10830 */
10831 pipe_config.cpu_transcoder = (enum transcoder) pipe;
10832 pipe_config.pixel_multiplier = 1;
10833 pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
10834 pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
10835 pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
10836 i9xx_crtc_clock_get(intel_crtc, &pipe_config);
10837
10838 mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
10839 mode->hdisplay = (htot & 0xffff) + 1;
10840 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10841 mode->hsync_start = (hsync & 0xffff) + 1;
10842 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10843 mode->vdisplay = (vtot & 0xffff) + 1;
10844 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10845 mode->vsync_start = (vsync & 0xffff) + 1;
10846 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10847
10848 drm_mode_set_name(mode);
10849
10850 return mode;
10851 }
10852
10853 void intel_mark_busy(struct drm_device *dev)
10854 {
10855 struct drm_i915_private *dev_priv = dev->dev_private;
10856
10857 if (dev_priv->mm.busy)
10858 return;
10859
10860 intel_runtime_pm_get(dev_priv);
10861 i915_update_gfx_val(dev_priv);
10862 if (INTEL_INFO(dev)->gen >= 6)
10863 gen6_rps_busy(dev_priv);
10864 dev_priv->mm.busy = true;
10865 }
10866
10867 void intel_mark_idle(struct drm_device *dev)
10868 {
10869 struct drm_i915_private *dev_priv = dev->dev_private;
10870
10871 if (!dev_priv->mm.busy)
10872 return;
10873
10874 dev_priv->mm.busy = false;
10875
10876 if (INTEL_INFO(dev)->gen >= 6)
10877 gen6_rps_idle(dev->dev_private);
10878
10879 intel_runtime_pm_put(dev_priv);
10880 }
10881
10882 static void intel_crtc_destroy(struct drm_crtc *crtc)
10883 {
10884 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10885 struct drm_device *dev = crtc->dev;
10886 struct intel_unpin_work *work;
10887
10888 spin_lock_irq(&dev->event_lock);
10889 work = intel_crtc->unpin_work;
10890 intel_crtc->unpin_work = NULL;
10891 spin_unlock_irq(&dev->event_lock);
10892
10893 if (work) {
10894 cancel_work_sync(&work->work);
10895 kfree(work);
10896 }
10897
10898 drm_crtc_cleanup(crtc);
10899
10900 kfree(intel_crtc);
10901 }
10902
10903 static void intel_unpin_work_fn(struct work_struct *__work)
10904 {
10905 struct intel_unpin_work *work =
10906 container_of(__work, struct intel_unpin_work, work);
10907 struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10908 struct drm_device *dev = crtc->base.dev;
10909 struct drm_plane *primary = crtc->base.primary;
10910
10911 mutex_lock(&dev->struct_mutex);
10912 intel_unpin_fb_obj(work->old_fb, primary->state);
10913 drm_gem_object_unreference(&work->pending_flip_obj->base);
10914
10915 if (work->flip_queued_req)
10916 i915_gem_request_assign(&work->flip_queued_req, NULL);
10917 mutex_unlock(&dev->struct_mutex);
10918
10919 intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
10920 drm_framebuffer_unreference(work->old_fb);
10921
10922 BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
10923 atomic_dec(&crtc->unpin_work_count);
10924
10925 kfree(work);
10926 }
10927
10928 static void do_intel_finish_page_flip(struct drm_device *dev,
10929 struct drm_crtc *crtc)
10930 {
10931 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10932 struct intel_unpin_work *work;
10933 unsigned long flags;
10934
10935 /* Ignore early vblank irqs */
10936 if (intel_crtc == NULL)
10937 return;
10938
10939 /*
10940 * This is called both by irq handlers and the reset code (to complete
10941 * lost pageflips) so needs the full irqsave spinlocks.
10942 */
10943 spin_lock_irqsave(&dev->event_lock, flags);
10944 work = intel_crtc->unpin_work;
10945
10946 /* Ensure we don't miss a work->pending update ... */
10947 smp_rmb();
10948
10949 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
10950 spin_unlock_irqrestore(&dev->event_lock, flags);
10951 return;
10952 }
10953
10954 page_flip_completed(intel_crtc);
10955
10956 spin_unlock_irqrestore(&dev->event_lock, flags);
10957 }
10958
10959 void intel_finish_page_flip(struct drm_device *dev, int pipe)
10960 {
10961 struct drm_i915_private *dev_priv = dev->dev_private;
10962 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10963
10964 do_intel_finish_page_flip(dev, crtc);
10965 }
10966
10967 void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
10968 {
10969 struct drm_i915_private *dev_priv = dev->dev_private;
10970 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
10971
10972 do_intel_finish_page_flip(dev, crtc);
10973 }
10974
10975 /* Is 'a' after or equal to 'b'? */
10976 static bool g4x_flip_count_after_eq(u32 a, u32 b)
10977 {
10978 return !((a - b) & 0x80000000);
10979 }
10980
10981 static bool page_flip_finished(struct intel_crtc *crtc)
10982 {
10983 struct drm_device *dev = crtc->base.dev;
10984 struct drm_i915_private *dev_priv = dev->dev_private;
10985
10986 if (i915_reset_in_progress(&dev_priv->gpu_error) ||
10987 crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
10988 return true;
10989
10990 /*
10991 * The relevant registers doen't exist on pre-ctg.
10992 * As the flip done interrupt doesn't trigger for mmio
10993 * flips on gmch platforms, a flip count check isn't
10994 * really needed there. But since ctg has the registers,
10995 * include it in the check anyway.
10996 */
10997 if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
10998 return true;
10999
11000 /*
11001 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
11002 * used the same base address. In that case the mmio flip might
11003 * have completed, but the CS hasn't even executed the flip yet.
11004 *
11005 * A flip count check isn't enough as the CS might have updated
11006 * the base address just after start of vblank, but before we
11007 * managed to process the interrupt. This means we'd complete the
11008 * CS flip too soon.
11009 *
11010 * Combining both checks should get us a good enough result. It may
11011 * still happen that the CS flip has been executed, but has not
11012 * yet actually completed. But in case the base address is the same
11013 * anyway, we don't really care.
11014 */
11015 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
11016 crtc->unpin_work->gtt_offset &&
11017 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
11018 crtc->unpin_work->flip_count);
11019 }
11020
11021 void intel_prepare_page_flip(struct drm_device *dev, int plane)
11022 {
11023 struct drm_i915_private *dev_priv = dev->dev_private;
11024 struct intel_crtc *intel_crtc =
11025 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
11026 unsigned long flags;
11027
11028
11029 /*
11030 * This is called both by irq handlers and the reset code (to complete
11031 * lost pageflips) so needs the full irqsave spinlocks.
11032 *
11033 * NB: An MMIO update of the plane base pointer will also
11034 * generate a page-flip completion irq, i.e. every modeset
11035 * is also accompanied by a spurious intel_prepare_page_flip().
11036 */
11037 spin_lock_irqsave(&dev->event_lock, flags);
11038 if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
11039 atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
11040 spin_unlock_irqrestore(&dev->event_lock, flags);
11041 }
11042
11043 static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
11044 {
11045 /* Ensure that the work item is consistent when activating it ... */
11046 smp_wmb();
11047 atomic_set(&work->pending, INTEL_FLIP_PENDING);
11048 /* and that it is marked active as soon as the irq could fire. */
11049 smp_wmb();
11050 }
11051
11052 static int intel_gen2_queue_flip(struct drm_device *dev,
11053 struct drm_crtc *crtc,
11054 struct drm_framebuffer *fb,
11055 struct drm_i915_gem_object *obj,
11056 struct drm_i915_gem_request *req,
11057 uint32_t flags)
11058 {
11059 struct intel_engine_cs *ring = req->ring;
11060 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11061 u32 flip_mask;
11062 int ret;
11063
11064 ret = intel_ring_begin(req, 6);
11065 if (ret)
11066 return ret;
11067
11068 /* Can't queue multiple flips, so wait for the previous
11069 * one to finish before executing the next.
11070 */
11071 if (intel_crtc->plane)
11072 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11073 else
11074 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11075 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
11076 intel_ring_emit(ring, MI_NOOP);
11077 intel_ring_emit(ring, MI_DISPLAY_FLIP |
11078 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11079 intel_ring_emit(ring, fb->pitches[0]);
11080 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11081 intel_ring_emit(ring, 0); /* aux display base address, unused */
11082
11083 intel_mark_page_flip_active(intel_crtc->unpin_work);
11084 return 0;
11085 }
11086
11087 static int intel_gen3_queue_flip(struct drm_device *dev,
11088 struct drm_crtc *crtc,
11089 struct drm_framebuffer *fb,
11090 struct drm_i915_gem_object *obj,
11091 struct drm_i915_gem_request *req,
11092 uint32_t flags)
11093 {
11094 struct intel_engine_cs *ring = req->ring;
11095 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11096 u32 flip_mask;
11097 int ret;
11098
11099 ret = intel_ring_begin(req, 6);
11100 if (ret)
11101 return ret;
11102
11103 if (intel_crtc->plane)
11104 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11105 else
11106 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11107 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
11108 intel_ring_emit(ring, MI_NOOP);
11109 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
11110 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11111 intel_ring_emit(ring, fb->pitches[0]);
11112 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11113 intel_ring_emit(ring, MI_NOOP);
11114
11115 intel_mark_page_flip_active(intel_crtc->unpin_work);
11116 return 0;
11117 }
11118
11119 static int intel_gen4_queue_flip(struct drm_device *dev,
11120 struct drm_crtc *crtc,
11121 struct drm_framebuffer *fb,
11122 struct drm_i915_gem_object *obj,
11123 struct drm_i915_gem_request *req,
11124 uint32_t flags)
11125 {
11126 struct intel_engine_cs *ring = req->ring;
11127 struct drm_i915_private *dev_priv = dev->dev_private;
11128 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11129 uint32_t pf, pipesrc;
11130 int ret;
11131
11132 ret = intel_ring_begin(req, 4);
11133 if (ret)
11134 return ret;
11135
11136 /* i965+ uses the linear or tiled offsets from the
11137 * Display Registers (which do not change across a page-flip)
11138 * so we need only reprogram the base address.
11139 */
11140 intel_ring_emit(ring, MI_DISPLAY_FLIP |
11141 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11142 intel_ring_emit(ring, fb->pitches[0]);
11143 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
11144 obj->tiling_mode);
11145
11146 /* XXX Enabling the panel-fitter across page-flip is so far
11147 * untested on non-native modes, so ignore it for now.
11148 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
11149 */
11150 pf = 0;
11151 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11152 intel_ring_emit(ring, pf | pipesrc);
11153
11154 intel_mark_page_flip_active(intel_crtc->unpin_work);
11155 return 0;
11156 }
11157
11158 static int intel_gen6_queue_flip(struct drm_device *dev,
11159 struct drm_crtc *crtc,
11160 struct drm_framebuffer *fb,
11161 struct drm_i915_gem_object *obj,
11162 struct drm_i915_gem_request *req,
11163 uint32_t flags)
11164 {
11165 struct intel_engine_cs *ring = req->ring;
11166 struct drm_i915_private *dev_priv = dev->dev_private;
11167 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11168 uint32_t pf, pipesrc;
11169 int ret;
11170
11171 ret = intel_ring_begin(req, 4);
11172 if (ret)
11173 return ret;
11174
11175 intel_ring_emit(ring, MI_DISPLAY_FLIP |
11176 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11177 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
11178 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11179
11180 /* Contrary to the suggestions in the documentation,
11181 * "Enable Panel Fitter" does not seem to be required when page
11182 * flipping with a non-native mode, and worse causes a normal
11183 * modeset to fail.
11184 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
11185 */
11186 pf = 0;
11187 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11188 intel_ring_emit(ring, pf | pipesrc);
11189
11190 intel_mark_page_flip_active(intel_crtc->unpin_work);
11191 return 0;
11192 }
11193
11194 static int intel_gen7_queue_flip(struct drm_device *dev,
11195 struct drm_crtc *crtc,
11196 struct drm_framebuffer *fb,
11197 struct drm_i915_gem_object *obj,
11198 struct drm_i915_gem_request *req,
11199 uint32_t flags)
11200 {
11201 struct intel_engine_cs *ring = req->ring;
11202 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11203 uint32_t plane_bit = 0;
11204 int len, ret;
11205
11206 switch (intel_crtc->plane) {
11207 case PLANE_A:
11208 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
11209 break;
11210 case PLANE_B:
11211 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
11212 break;
11213 case PLANE_C:
11214 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
11215 break;
11216 default:
11217 WARN_ONCE(1, "unknown plane in flip command\n");
11218 return -ENODEV;
11219 }
11220
11221 len = 4;
11222 if (ring->id == RCS) {
11223 len += 6;
11224 /*
11225 * On Gen 8, SRM is now taking an extra dword to accommodate
11226 * 48bits addresses, and we need a NOOP for the batch size to
11227 * stay even.
11228 */
11229 if (IS_GEN8(dev))
11230 len += 2;
11231 }
11232
11233 /*
11234 * BSpec MI_DISPLAY_FLIP for IVB:
11235 * "The full packet must be contained within the same cache line."
11236 *
11237 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
11238 * cacheline, if we ever start emitting more commands before
11239 * the MI_DISPLAY_FLIP we may need to first emit everything else,
11240 * then do the cacheline alignment, and finally emit the
11241 * MI_DISPLAY_FLIP.
11242 */
11243 ret = intel_ring_cacheline_align(req);
11244 if (ret)
11245 return ret;
11246
11247 ret = intel_ring_begin(req, len);
11248 if (ret)
11249 return ret;
11250
11251 /* Unmask the flip-done completion message. Note that the bspec says that
11252 * we should do this for both the BCS and RCS, and that we must not unmask
11253 * more than one flip event at any time (or ensure that one flip message
11254 * can be sent by waiting for flip-done prior to queueing new flips).
11255 * Experimentation says that BCS works despite DERRMR masking all
11256 * flip-done completion events and that unmasking all planes at once
11257 * for the RCS also doesn't appear to drop events. Setting the DERRMR
11258 * to zero does lead to lockups within MI_DISPLAY_FLIP.
11259 */
11260 if (ring->id == RCS) {
11261 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
11262 intel_ring_emit_reg(ring, DERRMR);
11263 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11264 DERRMR_PIPEB_PRI_FLIP_DONE |
11265 DERRMR_PIPEC_PRI_FLIP_DONE));
11266 if (IS_GEN8(dev))
11267 intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
11268 MI_SRM_LRM_GLOBAL_GTT);
11269 else
11270 intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
11271 MI_SRM_LRM_GLOBAL_GTT);
11272 intel_ring_emit_reg(ring, DERRMR);
11273 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
11274 if (IS_GEN8(dev)) {
11275 intel_ring_emit(ring, 0);
11276 intel_ring_emit(ring, MI_NOOP);
11277 }
11278 }
11279
11280 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
11281 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
11282 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
11283 intel_ring_emit(ring, (MI_NOOP));
11284
11285 intel_mark_page_flip_active(intel_crtc->unpin_work);
11286 return 0;
11287 }
11288
11289 static bool use_mmio_flip(struct intel_engine_cs *ring,
11290 struct drm_i915_gem_object *obj)
11291 {
11292 /*
11293 * This is not being used for older platforms, because
11294 * non-availability of flip done interrupt forces us to use
11295 * CS flips. Older platforms derive flip done using some clever
11296 * tricks involving the flip_pending status bits and vblank irqs.
11297 * So using MMIO flips there would disrupt this mechanism.
11298 */
11299
11300 if (ring == NULL)
11301 return true;
11302
11303 if (INTEL_INFO(ring->dev)->gen < 5)
11304 return false;
11305
11306 if (i915.use_mmio_flip < 0)
11307 return false;
11308 else if (i915.use_mmio_flip > 0)
11309 return true;
11310 else if (i915.enable_execlists)
11311 return true;
11312 else if (obj->base.dma_buf &&
11313 !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
11314 false))
11315 return true;
11316 else
11317 return ring != i915_gem_request_get_ring(obj->last_write_req);
11318 }
11319
11320 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11321 unsigned int rotation,
11322 struct intel_unpin_work *work)
11323 {
11324 struct drm_device *dev = intel_crtc->base.dev;
11325 struct drm_i915_private *dev_priv = dev->dev_private;
11326 struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11327 const enum pipe pipe = intel_crtc->pipe;
11328 u32 ctl, stride, tile_height;
11329
11330 ctl = I915_READ(PLANE_CTL(pipe, 0));
11331 ctl &= ~PLANE_CTL_TILED_MASK;
11332 switch (fb->modifier[0]) {
11333 case DRM_FORMAT_MOD_NONE:
11334 break;
11335 case I915_FORMAT_MOD_X_TILED:
11336 ctl |= PLANE_CTL_TILED_X;
11337 break;
11338 case I915_FORMAT_MOD_Y_TILED:
11339 ctl |= PLANE_CTL_TILED_Y;
11340 break;
11341 case I915_FORMAT_MOD_Yf_TILED:
11342 ctl |= PLANE_CTL_TILED_YF;
11343 break;
11344 default:
11345 MISSING_CASE(fb->modifier[0]);
11346 }
11347
11348 /*
11349 * The stride is either expressed as a multiple of 64 bytes chunks for
11350 * linear buffers or in number of tiles for tiled buffers.
11351 */
11352 if (intel_rotation_90_or_270(rotation)) {
11353 /* stride = Surface height in tiles */
11354 tile_height = intel_tile_height(dev, fb->pixel_format,
11355 fb->modifier[0], 0);
11356 stride = DIV_ROUND_UP(fb->height, tile_height);
11357 } else {
11358 stride = fb->pitches[0] /
11359 intel_fb_stride_alignment(dev, fb->modifier[0],
11360 fb->pixel_format);
11361 }
11362
11363 /*
11364 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11365 * PLANE_SURF updates, the update is then guaranteed to be atomic.
11366 */
11367 I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11368 I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11369
11370 I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
11371 POSTING_READ(PLANE_SURF(pipe, 0));
11372 }
11373
11374 static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11375 struct intel_unpin_work *work)
11376 {
11377 struct drm_device *dev = intel_crtc->base.dev;
11378 struct drm_i915_private *dev_priv = dev->dev_private;
11379 struct intel_framebuffer *intel_fb =
11380 to_intel_framebuffer(intel_crtc->base.primary->fb);
11381 struct drm_i915_gem_object *obj = intel_fb->obj;
11382 i915_reg_t reg = DSPCNTR(intel_crtc->plane);
11383 u32 dspcntr;
11384
11385 dspcntr = I915_READ(reg);
11386
11387 if (obj->tiling_mode != I915_TILING_NONE)
11388 dspcntr |= DISPPLANE_TILED;
11389 else
11390 dspcntr &= ~DISPPLANE_TILED;
11391
11392 I915_WRITE(reg, dspcntr);
11393
11394 I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
11395 POSTING_READ(DSPSURF(intel_crtc->plane));
11396 }
11397
11398 /*
11399 * XXX: This is the temporary way to update the plane registers until we get
11400 * around to using the usual plane update functions for MMIO flips
11401 */
11402 static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
11403 {
11404 struct intel_crtc *crtc = mmio_flip->crtc;
11405 struct intel_unpin_work *work;
11406
11407 spin_lock_irq(&crtc->base.dev->event_lock);
11408 work = crtc->unpin_work;
11409 spin_unlock_irq(&crtc->base.dev->event_lock);
11410 if (work == NULL)
11411 return;
11412
11413 intel_mark_page_flip_active(work);
11414
11415 intel_pipe_update_start(crtc);
11416
11417 if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
11418 skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
11419 else
11420 /* use_mmio_flip() retricts MMIO flips to ilk+ */
11421 ilk_do_mmio_flip(crtc, work);
11422
11423 intel_pipe_update_end(crtc);
11424 }
11425
11426 static void intel_mmio_flip_work_func(struct work_struct *work)
11427 {
11428 struct intel_mmio_flip *mmio_flip =
11429 container_of(work, struct intel_mmio_flip, work);
11430 struct intel_framebuffer *intel_fb =
11431 to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
11432 struct drm_i915_gem_object *obj = intel_fb->obj;
11433
11434 if (mmio_flip->req) {
11435 WARN_ON(__i915_wait_request(mmio_flip->req,
11436 mmio_flip->crtc->reset_counter,
11437 false, NULL,
11438 &mmio_flip->i915->rps.mmioflips));
11439 i915_gem_request_unreference__unlocked(mmio_flip->req);
11440 }
11441
11442 /* For framebuffer backed by dmabuf, wait for fence */
11443 if (obj->base.dma_buf)
11444 WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
11445 false, false,
11446 MAX_SCHEDULE_TIMEOUT) < 0);
11447
11448 intel_do_mmio_flip(mmio_flip);
11449 kfree(mmio_flip);
11450 }
11451
11452 static int intel_queue_mmio_flip(struct drm_device *dev,
11453 struct drm_crtc *crtc,
11454 struct drm_i915_gem_object *obj)
11455 {
11456 struct intel_mmio_flip *mmio_flip;
11457
11458 mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
11459 if (mmio_flip == NULL)
11460 return -ENOMEM;
11461
11462 mmio_flip->i915 = to_i915(dev);
11463 mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
11464 mmio_flip->crtc = to_intel_crtc(crtc);
11465 mmio_flip->rotation = crtc->primary->state->rotation;
11466
11467 INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
11468 schedule_work(&mmio_flip->work);
11469
11470 return 0;
11471 }
11472
11473 static int intel_default_queue_flip(struct drm_device *dev,
11474 struct drm_crtc *crtc,
11475 struct drm_framebuffer *fb,
11476 struct drm_i915_gem_object *obj,
11477 struct drm_i915_gem_request *req,
11478 uint32_t flags)
11479 {
11480 return -ENODEV;
11481 }
11482
11483 static bool __intel_pageflip_stall_check(struct drm_device *dev,
11484 struct drm_crtc *crtc)
11485 {
11486 struct drm_i915_private *dev_priv = dev->dev_private;
11487 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11488 struct intel_unpin_work *work = intel_crtc->unpin_work;
11489 u32 addr;
11490
11491 if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
11492 return true;
11493
11494 if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
11495 return false;
11496
11497 if (!work->enable_stall_check)
11498 return false;
11499
11500 if (work->flip_ready_vblank == 0) {
11501 if (work->flip_queued_req &&
11502 !i915_gem_request_completed(work->flip_queued_req, true))
11503 return false;
11504
11505 work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
11506 }
11507
11508 if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
11509 return false;
11510
11511 /* Potential stall - if we see that the flip has happened,
11512 * assume a missed interrupt. */
11513 if (INTEL_INFO(dev)->gen >= 4)
11514 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11515 else
11516 addr = I915_READ(DSPADDR(intel_crtc->plane));
11517
11518 /* There is a potential issue here with a false positive after a flip
11519 * to the same address. We could address this by checking for a
11520 * non-incrementing frame counter.
11521 */
11522 return addr == work->gtt_offset;
11523 }
11524
11525 void intel_check_page_flip(struct drm_device *dev, int pipe)
11526 {
11527 struct drm_i915_private *dev_priv = dev->dev_private;
11528 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11529 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11530 struct intel_unpin_work *work;
11531
11532 WARN_ON(!in_interrupt());
11533
11534 if (crtc == NULL)
11535 return;
11536
11537 spin_lock(&dev->event_lock);
11538 work = intel_crtc->unpin_work;
11539 if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
11540 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
11541 work->flip_queued_vblank, drm_vblank_count(dev, pipe));
11542 page_flip_completed(intel_crtc);
11543 work = NULL;
11544 }
11545 if (work != NULL &&
11546 drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
11547 intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
11548 spin_unlock(&dev->event_lock);
11549 }
11550
11551 static int intel_crtc_page_flip(struct drm_crtc *crtc,
11552 struct drm_framebuffer *fb,
11553 struct drm_pending_vblank_event *event,
11554 uint32_t page_flip_flags)
11555 {
11556 struct drm_device *dev = crtc->dev;
11557 struct drm_i915_private *dev_priv = dev->dev_private;
11558 struct drm_framebuffer *old_fb = crtc->primary->fb;
11559 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11560 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11561 struct drm_plane *primary = crtc->primary;
11562 enum pipe pipe = intel_crtc->pipe;
11563 struct intel_unpin_work *work;
11564 struct intel_engine_cs *ring;
11565 bool mmio_flip;
11566 struct drm_i915_gem_request *request = NULL;
11567 int ret;
11568
11569 /*
11570 * drm_mode_page_flip_ioctl() should already catch this, but double
11571 * check to be safe. In the future we may enable pageflipping from
11572 * a disabled primary plane.
11573 */
11574 if (WARN_ON(intel_fb_obj(old_fb) == NULL))
11575 return -EBUSY;
11576
11577 /* Can't change pixel format via MI display flips. */
11578 if (fb->pixel_format != crtc->primary->fb->pixel_format)
11579 return -EINVAL;
11580
11581 /*
11582 * TILEOFF/LINOFF registers can't be changed via MI display flips.
11583 * Note that pitch changes could also affect these register.
11584 */
11585 if (INTEL_INFO(dev)->gen > 3 &&
11586 (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
11587 fb->pitches[0] != crtc->primary->fb->pitches[0]))
11588 return -EINVAL;
11589
11590 if (i915_terminally_wedged(&dev_priv->gpu_error))
11591 goto out_hang;
11592
11593 work = kzalloc(sizeof(*work), GFP_KERNEL);
11594 if (work == NULL)
11595 return -ENOMEM;
11596
11597 work->event = event;
11598 work->crtc = crtc;
11599 work->old_fb = old_fb;
11600 INIT_WORK(&work->work, intel_unpin_work_fn);
11601
11602 ret = drm_crtc_vblank_get(crtc);
11603 if (ret)
11604 goto free_work;
11605
11606 /* We borrow the event spin lock for protecting unpin_work */
11607 spin_lock_irq(&dev->event_lock);
11608 if (intel_crtc->unpin_work) {
11609 /* Before declaring the flip queue wedged, check if
11610 * the hardware completed the operation behind our backs.
11611 */
11612 if (__intel_pageflip_stall_check(dev, crtc)) {
11613 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11614 page_flip_completed(intel_crtc);
11615 } else {
11616 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
11617 spin_unlock_irq(&dev->event_lock);
11618
11619 drm_crtc_vblank_put(crtc);
11620 kfree(work);
11621 return -EBUSY;
11622 }
11623 }
11624 intel_crtc->unpin_work = work;
11625 spin_unlock_irq(&dev->event_lock);
11626
11627 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11628 flush_workqueue(dev_priv->wq);
11629
11630 /* Reference the objects for the scheduled work. */
11631 drm_framebuffer_reference(work->old_fb);
11632 drm_gem_object_reference(&obj->base);
11633
11634 crtc->primary->fb = fb;
11635 update_state_fb(crtc->primary);
11636
11637 work->pending_flip_obj = obj;
11638
11639 ret = i915_mutex_lock_interruptible(dev);
11640 if (ret)
11641 goto cleanup;
11642
11643 atomic_inc(&intel_crtc->unpin_work_count);
11644 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
11645
11646 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
11647 work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
11648
11649 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
11650 ring = &dev_priv->ring[BCS];
11651 if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
11652 /* vlv: DISPLAY_FLIP fails to change tiling */
11653 ring = NULL;
11654 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
11655 ring = &dev_priv->ring[BCS];
11656 } else if (INTEL_INFO(dev)->gen >= 7) {
11657 ring = i915_gem_request_get_ring(obj->last_write_req);
11658 if (ring == NULL || ring->id != RCS)
11659 ring = &dev_priv->ring[BCS];
11660 } else {
11661 ring = &dev_priv->ring[RCS];
11662 }
11663
11664 mmio_flip = use_mmio_flip(ring, obj);
11665
11666 /* When using CS flips, we want to emit semaphores between rings.
11667 * However, when using mmio flips we will create a task to do the
11668 * synchronisation, so all we want here is to pin the framebuffer
11669 * into the display plane and skip any waits.
11670 */
11671 if (!mmio_flip) {
11672 ret = i915_gem_object_sync(obj, ring, &request);
11673 if (ret)
11674 goto cleanup_pending;
11675 }
11676
11677 ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
11678 crtc->primary->state);
11679 if (ret)
11680 goto cleanup_pending;
11681
11682 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11683 obj, 0);
11684 work->gtt_offset += intel_crtc->dspaddr_offset;
11685
11686 if (mmio_flip) {
11687 ret = intel_queue_mmio_flip(dev, crtc, obj);
11688 if (ret)
11689 goto cleanup_unpin;
11690
11691 i915_gem_request_assign(&work->flip_queued_req,
11692 obj->last_write_req);
11693 } else {
11694 if (!request) {
11695 ret = i915_gem_request_alloc(ring, ring->default_context, &request);
11696 if (ret)
11697 goto cleanup_unpin;
11698 }
11699
11700 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11701 page_flip_flags);
11702 if (ret)
11703 goto cleanup_unpin;
11704
11705 i915_gem_request_assign(&work->flip_queued_req, request);
11706 }
11707
11708 if (request)
11709 i915_add_request_no_flush(request);
11710
11711 work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
11712 work->enable_stall_check = true;
11713
11714 i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
11715 to_intel_plane(primary)->frontbuffer_bit);
11716 mutex_unlock(&dev->struct_mutex);
11717
11718 intel_fbc_deactivate(intel_crtc);
11719 intel_frontbuffer_flip_prepare(dev,
11720 to_intel_plane(primary)->frontbuffer_bit);
11721
11722 trace_i915_flip_request(intel_crtc->plane, obj);
11723
11724 return 0;
11725
11726 cleanup_unpin:
11727 intel_unpin_fb_obj(fb, crtc->primary->state);
11728 cleanup_pending:
11729 if (request)
11730 i915_gem_request_cancel(request);
11731 atomic_dec(&intel_crtc->unpin_work_count);
11732 mutex_unlock(&dev->struct_mutex);
11733 cleanup:
11734 crtc->primary->fb = old_fb;
11735 update_state_fb(crtc->primary);
11736
11737 drm_gem_object_unreference_unlocked(&obj->base);
11738 drm_framebuffer_unreference(work->old_fb);
11739
11740 spin_lock_irq(&dev->event_lock);
11741 intel_crtc->unpin_work = NULL;
11742 spin_unlock_irq(&dev->event_lock);
11743
11744 drm_crtc_vblank_put(crtc);
11745 free_work:
11746 kfree(work);
11747
11748 if (ret == -EIO) {
11749 struct drm_atomic_state *state;
11750 struct drm_plane_state *plane_state;
11751
11752 out_hang:
11753 state = drm_atomic_state_alloc(dev);
11754 if (!state)
11755 return -ENOMEM;
11756 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
11757
11758 retry:
11759 plane_state = drm_atomic_get_plane_state(state, primary);
11760 ret = PTR_ERR_OR_ZERO(plane_state);
11761 if (!ret) {
11762 drm_atomic_set_fb_for_plane(plane_state, fb);
11763
11764 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
11765 if (!ret)
11766 ret = drm_atomic_commit(state);
11767 }
11768
11769 if (ret == -EDEADLK) {
11770 drm_modeset_backoff(state->acquire_ctx);
11771 drm_atomic_state_clear(state);
11772 goto retry;
11773 }
11774
11775 if (ret)
11776 drm_atomic_state_free(state);
11777
11778 if (ret == 0 && event) {
11779 spin_lock_irq(&dev->event_lock);
11780 drm_send_vblank_event(dev, pipe, event);
11781 spin_unlock_irq(&dev->event_lock);
11782 }
11783 }
11784 return ret;
11785 }
11786
11787
11788 /**
11789 * intel_wm_need_update - Check whether watermarks need updating
11790 * @plane: drm plane
11791 * @state: new plane state
11792 *
11793 * Check current plane state versus the new one to determine whether
11794 * watermarks need to be recalculated.
11795 *
11796 * Returns true or false.
11797 */
11798 static bool intel_wm_need_update(struct drm_plane *plane,
11799 struct drm_plane_state *state)
11800 {
11801 struct intel_plane_state *new = to_intel_plane_state(state);
11802 struct intel_plane_state *cur = to_intel_plane_state(plane->state);
11803
11804 /* Update watermarks on tiling or size changes. */
11805 if (new->visible != cur->visible)
11806 return true;
11807
11808 if (!cur->base.fb || !new->base.fb)
11809 return false;
11810
11811 if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
11812 cur->base.rotation != new->base.rotation ||
11813 drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
11814 drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
11815 drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
11816 drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
11817 return true;
11818
11819 return false;
11820 }
11821
11822 static bool needs_scaling(struct intel_plane_state *state)
11823 {
11824 int src_w = drm_rect_width(&state->src) >> 16;
11825 int src_h = drm_rect_height(&state->src) >> 16;
11826 int dst_w = drm_rect_width(&state->dst);
11827 int dst_h = drm_rect_height(&state->dst);
11828
11829 return (src_w != dst_w || src_h != dst_h);
11830 }
11831
11832 int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11833 struct drm_plane_state *plane_state)
11834 {
11835 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
11836 struct drm_crtc *crtc = crtc_state->crtc;
11837 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11838 struct drm_plane *plane = plane_state->plane;
11839 struct drm_device *dev = crtc->dev;
11840 struct drm_i915_private *dev_priv = dev->dev_private;
11841 struct intel_plane_state *old_plane_state =
11842 to_intel_plane_state(plane->state);
11843 int idx = intel_crtc->base.base.id, ret;
11844 int i = drm_plane_index(plane);
11845 bool mode_changed = needs_modeset(crtc_state);
11846 bool was_crtc_enabled = crtc->state->active;
11847 bool is_crtc_enabled = crtc_state->active;
11848 bool turn_off, turn_on, visible, was_visible;
11849 struct drm_framebuffer *fb = plane_state->fb;
11850
11851 if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
11852 plane->type != DRM_PLANE_TYPE_CURSOR) {
11853 ret = skl_update_scaler_plane(
11854 to_intel_crtc_state(crtc_state),
11855 to_intel_plane_state(plane_state));
11856 if (ret)
11857 return ret;
11858 }
11859
11860 was_visible = old_plane_state->visible;
11861 visible = to_intel_plane_state(plane_state)->visible;
11862
11863 if (!was_crtc_enabled && WARN_ON(was_visible))
11864 was_visible = false;
11865
11866 if (!is_crtc_enabled && WARN_ON(visible))
11867 visible = false;
11868
11869 if (!was_visible && !visible)
11870 return 0;
11871
11872 turn_off = was_visible && (!visible || mode_changed);
11873 turn_on = visible && (!was_visible || mode_changed);
11874
11875 DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
11876 plane->base.id, fb ? fb->base.id : -1);
11877
11878 DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
11879 plane->base.id, was_visible, visible,
11880 turn_off, turn_on, mode_changed);
11881
11882 if (turn_on || turn_off) {
11883 pipe_config->wm_changed = true;
11884
11885 /* must disable cxsr around plane enable/disable */
11886 if (plane->type != DRM_PLANE_TYPE_CURSOR) {
11887 if (is_crtc_enabled)
11888 intel_crtc->atomic.wait_vblank = true;
11889 pipe_config->disable_cxsr = true;
11890 }
11891 } else if (intel_wm_need_update(plane, plane_state)) {
11892 pipe_config->wm_changed = true;
11893 }
11894
11895 if (visible || was_visible)
11896 intel_crtc->atomic.fb_bits |=
11897 to_intel_plane(plane)->frontbuffer_bit;
11898
11899 switch (plane->type) {
11900 case DRM_PLANE_TYPE_PRIMARY:
11901 intel_crtc->atomic.pre_disable_primary = turn_off;
11902 intel_crtc->atomic.post_enable_primary = turn_on;
11903
11904 if (turn_off) {
11905 /*
11906 * FIXME: Actually if we will still have any other
11907 * plane enabled on the pipe we could let IPS enabled
11908 * still, but for now lets consider that when we make
11909 * primary invisible by setting DSPCNTR to 0 on
11910 * update_primary_plane function IPS needs to be
11911 * disable.
11912 */
11913 intel_crtc->atomic.disable_ips = true;
11914
11915 intel_crtc->atomic.disable_fbc = true;
11916 }
11917
11918 /*
11919 * FBC does not work on some platforms for rotated
11920 * planes, so disable it when rotation is not 0 and
11921 * update it when rotation is set back to 0.
11922 *
11923 * FIXME: This is redundant with the fbc update done in
11924 * the primary plane enable function except that that
11925 * one is done too late. We eventually need to unify
11926 * this.
11927 */
11928
11929 if (visible &&
11930 INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
11931 dev_priv->fbc.crtc == intel_crtc &&
11932 plane_state->rotation != BIT(DRM_ROTATE_0))
11933 intel_crtc->atomic.disable_fbc = true;
11934
11935 /*
11936 * BDW signals flip done immediately if the plane
11937 * is disabled, even if the plane enable is already
11938 * armed to occur at the next vblank :(
11939 */
11940 if (turn_on && IS_BROADWELL(dev))
11941 intel_crtc->atomic.wait_vblank = true;
11942
11943 intel_crtc->atomic.update_fbc |= visible || mode_changed;
11944 break;
11945 case DRM_PLANE_TYPE_CURSOR:
11946 break;
11947 case DRM_PLANE_TYPE_OVERLAY:
11948 /*
11949 * WaCxSRDisabledForSpriteScaling:ivb
11950 *
11951 * cstate->update_wm was already set above, so this flag will
11952 * take effect when we commit and program watermarks.
11953 */
11954 if (IS_IVYBRIDGE(dev) &&
11955 needs_scaling(to_intel_plane_state(plane_state)) &&
11956 !needs_scaling(old_plane_state)) {
11957 to_intel_crtc_state(crtc_state)->disable_lp_wm = true;
11958 } else if (turn_off && !mode_changed) {
11959 intel_crtc->atomic.wait_vblank = true;
11960 intel_crtc->atomic.update_sprite_watermarks |=
11961 1 << i;
11962 }
11963
11964 break;
11965 }
11966 return 0;
11967 }
11968
11969 static bool encoders_cloneable(const struct intel_encoder *a,
11970 const struct intel_encoder *b)
11971 {
11972 /* masks could be asymmetric, so check both ways */
11973 return a == b || (a->cloneable & (1 << b->type) &&
11974 b->cloneable & (1 << a->type));
11975 }
11976
11977 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11978 struct intel_crtc *crtc,
11979 struct intel_encoder *encoder)
11980 {
11981 struct intel_encoder *source_encoder;
11982 struct drm_connector *connector;
11983 struct drm_connector_state *connector_state;
11984 int i;
11985
11986 for_each_connector_in_state(state, connector, connector_state, i) {
11987 if (connector_state->crtc != &crtc->base)
11988 continue;
11989
11990 source_encoder =
11991 to_intel_encoder(connector_state->best_encoder);
11992 if (!encoders_cloneable(encoder, source_encoder))
11993 return false;
11994 }
11995
11996 return true;
11997 }
11998
11999 static bool check_encoder_cloning(struct drm_atomic_state *state,
12000 struct intel_crtc *crtc)
12001 {
12002 struct intel_encoder *encoder;
12003 struct drm_connector *connector;
12004 struct drm_connector_state *connector_state;
12005 int i;
12006
12007 for_each_connector_in_state(state, connector, connector_state, i) {
12008 if (connector_state->crtc != &crtc->base)
12009 continue;
12010
12011 encoder = to_intel_encoder(connector_state->best_encoder);
12012 if (!check_single_encoder_cloning(state, crtc, encoder))
12013 return false;
12014 }
12015
12016 return true;
12017 }
12018
12019 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
12020 struct drm_crtc_state *crtc_state)
12021 {
12022 struct drm_device *dev = crtc->dev;
12023 struct drm_i915_private *dev_priv = dev->dev_private;
12024 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12025 struct intel_crtc_state *pipe_config =
12026 to_intel_crtc_state(crtc_state);
12027 struct drm_atomic_state *state = crtc_state->state;
12028 int ret;
12029 bool mode_changed = needs_modeset(crtc_state);
12030
12031 if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
12032 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12033 return -EINVAL;
12034 }
12035
12036 if (mode_changed && !crtc_state->active)
12037 pipe_config->wm_changed = true;
12038
12039 if (mode_changed && crtc_state->enable &&
12040 dev_priv->display.crtc_compute_clock &&
12041 !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
12042 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
12043 pipe_config);
12044 if (ret)
12045 return ret;
12046 }
12047
12048 ret = 0;
12049 if (dev_priv->display.compute_pipe_wm) {
12050 ret = dev_priv->display.compute_pipe_wm(intel_crtc, state);
12051 if (ret)
12052 return ret;
12053 }
12054
12055 if (INTEL_INFO(dev)->gen >= 9) {
12056 if (mode_changed)
12057 ret = skl_update_scaler_crtc(pipe_config);
12058
12059 if (!ret)
12060 ret = intel_atomic_setup_scalers(dev, intel_crtc,
12061 pipe_config);
12062 }
12063
12064 return ret;
12065 }
12066
12067 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
12068 .mode_set_base_atomic = intel_pipe_set_base_atomic,
12069 .load_lut = intel_crtc_load_lut,
12070 .atomic_begin = intel_begin_crtc_commit,
12071 .atomic_flush = intel_finish_crtc_commit,
12072 .atomic_check = intel_crtc_atomic_check,
12073 };
12074
12075 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12076 {
12077 struct intel_connector *connector;
12078
12079 for_each_intel_connector(dev, connector) {
12080 if (connector->base.encoder) {
12081 connector->base.state->best_encoder =
12082 connector->base.encoder;
12083 connector->base.state->crtc =
12084 connector->base.encoder->crtc;
12085 } else {
12086 connector->base.state->best_encoder = NULL;
12087 connector->base.state->crtc = NULL;
12088 }
12089 }
12090 }
12091
12092 static void
12093 connected_sink_compute_bpp(struct intel_connector *connector,
12094 struct intel_crtc_state *pipe_config)
12095 {
12096 int bpp = pipe_config->pipe_bpp;
12097
12098 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
12099 connector->base.base.id,
12100 connector->base.name);
12101
12102 /* Don't use an invalid EDID bpc value */
12103 if (connector->base.display_info.bpc &&
12104 connector->base.display_info.bpc * 3 < bpp) {
12105 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
12106 bpp, connector->base.display_info.bpc*3);
12107 pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
12108 }
12109
12110 /* Clamp bpp to default limit on screens without EDID 1.4 */
12111 if (connector->base.display_info.bpc == 0) {
12112 int type = connector->base.connector_type;
12113 int clamp_bpp = 24;
12114
12115 /* Fall back to 18 bpp when DP sink capability is unknown. */
12116 if (type == DRM_MODE_CONNECTOR_DisplayPort ||
12117 type == DRM_MODE_CONNECTOR_eDP)
12118 clamp_bpp = 18;
12119
12120 if (bpp > clamp_bpp) {
12121 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
12122 bpp, clamp_bpp);
12123 pipe_config->pipe_bpp = clamp_bpp;
12124 }
12125 }
12126 }
12127
12128 static int
12129 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
12130 struct intel_crtc_state *pipe_config)
12131 {
12132 struct drm_device *dev = crtc->base.dev;
12133 struct drm_atomic_state *state;
12134 struct drm_connector *connector;
12135 struct drm_connector_state *connector_state;
12136 int bpp, i;
12137
12138 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
12139 bpp = 10*3;
12140 else if (INTEL_INFO(dev)->gen >= 5)
12141 bpp = 12*3;
12142 else
12143 bpp = 8*3;
12144
12145
12146 pipe_config->pipe_bpp = bpp;
12147
12148 state = pipe_config->base.state;
12149
12150 /* Clamp display bpp to EDID value */
12151 for_each_connector_in_state(state, connector, connector_state, i) {
12152 if (connector_state->crtc != &crtc->base)
12153 continue;
12154
12155 connected_sink_compute_bpp(to_intel_connector(connector),
12156 pipe_config);
12157 }
12158
12159 return bpp;
12160 }
12161
12162 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12163 {
12164 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12165 "type: 0x%x flags: 0x%x\n",
12166 mode->crtc_clock,
12167 mode->crtc_hdisplay, mode->crtc_hsync_start,
12168 mode->crtc_hsync_end, mode->crtc_htotal,
12169 mode->crtc_vdisplay, mode->crtc_vsync_start,
12170 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
12171 }
12172
12173 static void intel_dump_pipe_config(struct intel_crtc *crtc,
12174 struct intel_crtc_state *pipe_config,
12175 const char *context)
12176 {
12177 struct drm_device *dev = crtc->base.dev;
12178 struct drm_plane *plane;
12179 struct intel_plane *intel_plane;
12180 struct intel_plane_state *state;
12181 struct drm_framebuffer *fb;
12182
12183 DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
12184 context, pipe_config, pipe_name(crtc->pipe));
12185
12186 DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
12187 DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
12188 pipe_config->pipe_bpp, pipe_config->dither);
12189 DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12190 pipe_config->has_pch_encoder,
12191 pipe_config->fdi_lanes,
12192 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
12193 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12194 pipe_config->fdi_m_n.tu);
12195 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12196 pipe_config->has_dp_encoder,
12197 pipe_config->lane_count,
12198 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12199 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12200 pipe_config->dp_m_n.tu);
12201
12202 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
12203 pipe_config->has_dp_encoder,
12204 pipe_config->lane_count,
12205 pipe_config->dp_m2_n2.gmch_m,
12206 pipe_config->dp_m2_n2.gmch_n,
12207 pipe_config->dp_m2_n2.link_m,
12208 pipe_config->dp_m2_n2.link_n,
12209 pipe_config->dp_m2_n2.tu);
12210
12211 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
12212 pipe_config->has_audio,
12213 pipe_config->has_infoframe);
12214
12215 DRM_DEBUG_KMS("requested mode:\n");
12216 drm_mode_debug_printmodeline(&pipe_config->base.mode);
12217 DRM_DEBUG_KMS("adjusted mode:\n");
12218 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12219 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
12220 DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
12221 DRM_DEBUG_KMS("pipe src size: %dx%d\n",
12222 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
12223 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12224 crtc->num_scalers,
12225 pipe_config->scaler_state.scaler_users,
12226 pipe_config->scaler_state.scaler_id);
12227 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12228 pipe_config->gmch_pfit.control,
12229 pipe_config->gmch_pfit.pgm_ratios,
12230 pipe_config->gmch_pfit.lvds_border_bits);
12231 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
12232 pipe_config->pch_pfit.pos,
12233 pipe_config->pch_pfit.size,
12234 pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
12235 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
12236 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
12237
12238 if (IS_BROXTON(dev)) {
12239 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
12240 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
12241 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
12242 pipe_config->ddi_pll_sel,
12243 pipe_config->dpll_hw_state.ebb0,
12244 pipe_config->dpll_hw_state.ebb4,
12245 pipe_config->dpll_hw_state.pll0,
12246 pipe_config->dpll_hw_state.pll1,
12247 pipe_config->dpll_hw_state.pll2,
12248 pipe_config->dpll_hw_state.pll3,
12249 pipe_config->dpll_hw_state.pll6,
12250 pipe_config->dpll_hw_state.pll8,
12251 pipe_config->dpll_hw_state.pll9,
12252 pipe_config->dpll_hw_state.pll10,
12253 pipe_config->dpll_hw_state.pcsdw12);
12254 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
12255 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
12256 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12257 pipe_config->ddi_pll_sel,
12258 pipe_config->dpll_hw_state.ctrl1,
12259 pipe_config->dpll_hw_state.cfgcr1,
12260 pipe_config->dpll_hw_state.cfgcr2);
12261 } else if (HAS_DDI(dev)) {
12262 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
12263 pipe_config->ddi_pll_sel,
12264 pipe_config->dpll_hw_state.wrpll,
12265 pipe_config->dpll_hw_state.spll);
12266 } else {
12267 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12268 "fp0: 0x%x, fp1: 0x%x\n",
12269 pipe_config->dpll_hw_state.dpll,
12270 pipe_config->dpll_hw_state.dpll_md,
12271 pipe_config->dpll_hw_state.fp0,
12272 pipe_config->dpll_hw_state.fp1);
12273 }
12274
12275 DRM_DEBUG_KMS("planes on this crtc\n");
12276 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
12277 intel_plane = to_intel_plane(plane);
12278 if (intel_plane->pipe != crtc->pipe)
12279 continue;
12280
12281 state = to_intel_plane_state(plane->state);
12282 fb = state->base.fb;
12283 if (!fb) {
12284 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
12285 "disabled, scaler_id = %d\n",
12286 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12287 plane->base.id, intel_plane->pipe,
12288 (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
12289 drm_plane_index(plane), state->scaler_id);
12290 continue;
12291 }
12292
12293 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
12294 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12295 plane->base.id, intel_plane->pipe,
12296 crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
12297 drm_plane_index(plane));
12298 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
12299 fb->base.id, fb->width, fb->height, fb->pixel_format);
12300 DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
12301 state->scaler_id,
12302 state->src.x1 >> 16, state->src.y1 >> 16,
12303 drm_rect_width(&state->src) >> 16,
12304 drm_rect_height(&state->src) >> 16,
12305 state->dst.x1, state->dst.y1,
12306 drm_rect_width(&state->dst), drm_rect_height(&state->dst));
12307 }
12308 }
12309
12310 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
12311 {
12312 struct drm_device *dev = state->dev;
12313 struct drm_connector *connector;
12314 unsigned int used_ports = 0;
12315
12316 /*
12317 * Walk the connector list instead of the encoder
12318 * list to detect the problem on ddi platforms
12319 * where there's just one encoder per digital port.
12320 */
12321 drm_for_each_connector(connector, dev) {
12322 struct drm_connector_state *connector_state;
12323 struct intel_encoder *encoder;
12324
12325 connector_state = drm_atomic_get_existing_connector_state(state, connector);
12326 if (!connector_state)
12327 connector_state = connector->state;
12328
12329 if (!connector_state->best_encoder)
12330 continue;
12331
12332 encoder = to_intel_encoder(connector_state->best_encoder);
12333
12334 WARN_ON(!connector_state->crtc);
12335
12336 switch (encoder->type) {
12337 unsigned int port_mask;
12338 case INTEL_OUTPUT_UNKNOWN:
12339 if (WARN_ON(!HAS_DDI(dev)))
12340 break;
12341 case INTEL_OUTPUT_DISPLAYPORT:
12342 case INTEL_OUTPUT_HDMI:
12343 case INTEL_OUTPUT_EDP:
12344 port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
12345
12346 /* the same port mustn't appear more than once */
12347 if (used_ports & port_mask)
12348 return false;
12349
12350 used_ports |= port_mask;
12351 default:
12352 break;
12353 }
12354 }
12355
12356 return true;
12357 }
12358
12359 static void
12360 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12361 {
12362 struct drm_crtc_state tmp_state;
12363 struct intel_crtc_scaler_state scaler_state;
12364 struct intel_dpll_hw_state dpll_hw_state;
12365 enum intel_dpll_id shared_dpll;
12366 uint32_t ddi_pll_sel;
12367 bool force_thru;
12368
12369 /* FIXME: before the switch to atomic started, a new pipe_config was
12370 * kzalloc'd. Code that depends on any field being zero should be
12371 * fixed, so that the crtc_state can be safely duplicated. For now,
12372 * only fields that are know to not cause problems are preserved. */
12373
12374 tmp_state = crtc_state->base;
12375 scaler_state = crtc_state->scaler_state;
12376 shared_dpll = crtc_state->shared_dpll;
12377 dpll_hw_state = crtc_state->dpll_hw_state;
12378 ddi_pll_sel = crtc_state->ddi_pll_sel;
12379 force_thru = crtc_state->pch_pfit.force_thru;
12380
12381 memset(crtc_state, 0, sizeof *crtc_state);
12382
12383 crtc_state->base = tmp_state;
12384 crtc_state->scaler_state = scaler_state;
12385 crtc_state->shared_dpll = shared_dpll;
12386 crtc_state->dpll_hw_state = dpll_hw_state;
12387 crtc_state->ddi_pll_sel = ddi_pll_sel;
12388 crtc_state->pch_pfit.force_thru = force_thru;
12389 }
12390
12391 static int
12392 intel_modeset_pipe_config(struct drm_crtc *crtc,
12393 struct intel_crtc_state *pipe_config)
12394 {
12395 struct drm_atomic_state *state = pipe_config->base.state;
12396 struct intel_encoder *encoder;
12397 struct drm_connector *connector;
12398 struct drm_connector_state *connector_state;
12399 int base_bpp, ret = -EINVAL;
12400 int i;
12401 bool retry = true;
12402
12403 clear_intel_crtc_state(pipe_config);
12404
12405 pipe_config->cpu_transcoder =
12406 (enum transcoder) to_intel_crtc(crtc)->pipe;
12407
12408 /*
12409 * Sanitize sync polarity flags based on requested ones. If neither
12410 * positive or negative polarity is requested, treat this as meaning
12411 * negative polarity.
12412 */
12413 if (!(pipe_config->base.adjusted_mode.flags &
12414 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
12415 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
12416
12417 if (!(pipe_config->base.adjusted_mode.flags &
12418 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
12419 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
12420
12421 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12422 pipe_config);
12423 if (base_bpp < 0)
12424 goto fail;
12425
12426 /*
12427 * Determine the real pipe dimensions. Note that stereo modes can
12428 * increase the actual pipe size due to the frame doubling and
12429 * insertion of additional space for blanks between the frame. This
12430 * is stored in the crtc timings. We use the requested mode to do this
12431 * computation to clearly distinguish it from the adjusted mode, which
12432 * can be changed by the connectors in the below retry loop.
12433 */
12434 drm_crtc_get_hv_timing(&pipe_config->base.mode,
12435 &pipe_config->pipe_src_w,
12436 &pipe_config->pipe_src_h);
12437
12438 encoder_retry:
12439 /* Ensure the port clock defaults are reset when retrying. */
12440 pipe_config->port_clock = 0;
12441 pipe_config->pixel_multiplier = 1;
12442
12443 /* Fill in default crtc timings, allow encoders to overwrite them. */
12444 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12445 CRTC_STEREO_DOUBLE);
12446
12447 /* Pass our mode to the connectors and the CRTC to give them a chance to
12448 * adjust it according to limitations or connector properties, and also
12449 * a chance to reject the mode entirely.
12450 */
12451 for_each_connector_in_state(state, connector, connector_state, i) {
12452 if (connector_state->crtc != crtc)
12453 continue;
12454
12455 encoder = to_intel_encoder(connector_state->best_encoder);
12456
12457 if (!(encoder->compute_config(encoder, pipe_config))) {
12458 DRM_DEBUG_KMS("Encoder config failure\n");
12459 goto fail;
12460 }
12461 }
12462
12463 /* Set default port clock if not overwritten by the encoder. Needs to be
12464 * done afterwards in case the encoder adjusts the mode. */
12465 if (!pipe_config->port_clock)
12466 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
12467 * pipe_config->pixel_multiplier;
12468
12469 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
12470 if (ret < 0) {
12471 DRM_DEBUG_KMS("CRTC fixup failed\n");
12472 goto fail;
12473 }
12474
12475 if (ret == RETRY) {
12476 if (WARN(!retry, "loop in pipe configuration computation\n")) {
12477 ret = -EINVAL;
12478 goto fail;
12479 }
12480
12481 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12482 retry = false;
12483 goto encoder_retry;
12484 }
12485
12486 /* Dithering seems to not pass-through bits correctly when it should, so
12487 * only enable it on 6bpc panels. */
12488 pipe_config->dither = pipe_config->pipe_bpp == 6*3;
12489 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
12490 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
12491
12492 fail:
12493 return ret;
12494 }
12495
12496 static void
12497 intel_modeset_update_crtc_state(struct drm_atomic_state *state)
12498 {
12499 struct drm_crtc *crtc;
12500 struct drm_crtc_state *crtc_state;
12501 int i;
12502
12503 /* Double check state. */
12504 for_each_crtc_in_state(state, crtc, crtc_state, i) {
12505 to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
12506
12507 /* Update hwmode for vblank functions */
12508 if (crtc->state->active)
12509 crtc->hwmode = crtc->state->adjusted_mode;
12510 else
12511 crtc->hwmode.crtc_clock = 0;
12512
12513 /*
12514 * Update legacy state to satisfy fbc code. This can
12515 * be removed when fbc uses the atomic state.
12516 */
12517 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
12518 struct drm_plane_state *plane_state = crtc->primary->state;
12519
12520 crtc->primary->fb = plane_state->fb;
12521 crtc->x = plane_state->src_x >> 16;
12522 crtc->y = plane_state->src_y >> 16;
12523 }
12524 }
12525 }
12526
12527 static bool intel_fuzzy_clock_check(int clock1, int clock2)
12528 {
12529 int diff;
12530
12531 if (clock1 == clock2)
12532 return true;
12533
12534 if (!clock1 || !clock2)
12535 return false;
12536
12537 diff = abs(clock1 - clock2);
12538
12539 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12540 return true;
12541
12542 return false;
12543 }
12544
12545 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
12546 list_for_each_entry((intel_crtc), \
12547 &(dev)->mode_config.crtc_list, \
12548 base.head) \
12549 for_each_if (mask & (1 <<(intel_crtc)->pipe))
12550
12551 static bool
12552 intel_compare_m_n(unsigned int m, unsigned int n,
12553 unsigned int m2, unsigned int n2,
12554 bool exact)
12555 {
12556 if (m == m2 && n == n2)
12557 return true;
12558
12559 if (exact || !m || !n || !m2 || !n2)
12560 return false;
12561
12562 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12563
12564 if (m > m2) {
12565 while (m > m2) {
12566 m2 <<= 1;
12567 n2 <<= 1;
12568 }
12569 } else if (m < m2) {
12570 while (m < m2) {
12571 m <<= 1;
12572 n <<= 1;
12573 }
12574 }
12575
12576 return m == m2 && n == n2;
12577 }
12578
12579 static bool
12580 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12581 struct intel_link_m_n *m2_n2,
12582 bool adjust)
12583 {
12584 if (m_n->tu == m2_n2->tu &&
12585 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12586 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12587 intel_compare_m_n(m_n->link_m, m_n->link_n,
12588 m2_n2->link_m, m2_n2->link_n, !adjust)) {
12589 if (adjust)
12590 *m2_n2 = *m_n;
12591
12592 return true;
12593 }
12594
12595 return false;
12596 }
12597
12598 static bool
12599 intel_pipe_config_compare(struct drm_device *dev,
12600 struct intel_crtc_state *current_config,
12601 struct intel_crtc_state *pipe_config,
12602 bool adjust)
12603 {
12604 bool ret = true;
12605
12606 #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
12607 do { \
12608 if (!adjust) \
12609 DRM_ERROR(fmt, ##__VA_ARGS__); \
12610 else \
12611 DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
12612 } while (0)
12613
12614 #define PIPE_CONF_CHECK_X(name) \
12615 if (current_config->name != pipe_config->name) { \
12616 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12617 "(expected 0x%08x, found 0x%08x)\n", \
12618 current_config->name, \
12619 pipe_config->name); \
12620 ret = false; \
12621 }
12622
12623 #define PIPE_CONF_CHECK_I(name) \
12624 if (current_config->name != pipe_config->name) { \
12625 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12626 "(expected %i, found %i)\n", \
12627 current_config->name, \
12628 pipe_config->name); \
12629 ret = false; \
12630 }
12631
12632 #define PIPE_CONF_CHECK_M_N(name) \
12633 if (!intel_compare_link_m_n(&current_config->name, \
12634 &pipe_config->name,\
12635 adjust)) { \
12636 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12637 "(expected tu %i gmch %i/%i link %i/%i, " \
12638 "found tu %i, gmch %i/%i link %i/%i)\n", \
12639 current_config->name.tu, \
12640 current_config->name.gmch_m, \
12641 current_config->name.gmch_n, \
12642 current_config->name.link_m, \
12643 current_config->name.link_n, \
12644 pipe_config->name.tu, \
12645 pipe_config->name.gmch_m, \
12646 pipe_config->name.gmch_n, \
12647 pipe_config->name.link_m, \
12648 pipe_config->name.link_n); \
12649 ret = false; \
12650 }
12651
12652 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
12653 if (!intel_compare_link_m_n(&current_config->name, \
12654 &pipe_config->name, adjust) && \
12655 !intel_compare_link_m_n(&current_config->alt_name, \
12656 &pipe_config->name, adjust)) { \
12657 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12658 "(expected tu %i gmch %i/%i link %i/%i, " \
12659 "or tu %i gmch %i/%i link %i/%i, " \
12660 "found tu %i, gmch %i/%i link %i/%i)\n", \
12661 current_config->name.tu, \
12662 current_config->name.gmch_m, \
12663 current_config->name.gmch_n, \
12664 current_config->name.link_m, \
12665 current_config->name.link_n, \
12666 current_config->alt_name.tu, \
12667 current_config->alt_name.gmch_m, \
12668 current_config->alt_name.gmch_n, \
12669 current_config->alt_name.link_m, \
12670 current_config->alt_name.link_n, \
12671 pipe_config->name.tu, \
12672 pipe_config->name.gmch_m, \
12673 pipe_config->name.gmch_n, \
12674 pipe_config->name.link_m, \
12675 pipe_config->name.link_n); \
12676 ret = false; \
12677 }
12678
12679 /* This is required for BDW+ where there is only one set of registers for
12680 * switching between high and low RR.
12681 * This macro can be used whenever a comparison has to be made between one
12682 * hw state and multiple sw state variables.
12683 */
12684 #define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
12685 if ((current_config->name != pipe_config->name) && \
12686 (current_config->alt_name != pipe_config->name)) { \
12687 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12688 "(expected %i or %i, found %i)\n", \
12689 current_config->name, \
12690 current_config->alt_name, \
12691 pipe_config->name); \
12692 ret = false; \
12693 }
12694
12695 #define PIPE_CONF_CHECK_FLAGS(name, mask) \
12696 if ((current_config->name ^ pipe_config->name) & (mask)) { \
12697 INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
12698 "(expected %i, found %i)\n", \
12699 current_config->name & (mask), \
12700 pipe_config->name & (mask)); \
12701 ret = false; \
12702 }
12703
12704 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
12705 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12706 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12707 "(expected %i, found %i)\n", \
12708 current_config->name, \
12709 pipe_config->name); \
12710 ret = false; \
12711 }
12712
12713 #define PIPE_CONF_QUIRK(quirk) \
12714 ((current_config->quirks | pipe_config->quirks) & (quirk))
12715
12716 PIPE_CONF_CHECK_I(cpu_transcoder);
12717
12718 PIPE_CONF_CHECK_I(has_pch_encoder);
12719 PIPE_CONF_CHECK_I(fdi_lanes);
12720 PIPE_CONF_CHECK_M_N(fdi_m_n);
12721
12722 PIPE_CONF_CHECK_I(has_dp_encoder);
12723 PIPE_CONF_CHECK_I(lane_count);
12724
12725 if (INTEL_INFO(dev)->gen < 8) {
12726 PIPE_CONF_CHECK_M_N(dp_m_n);
12727
12728 if (current_config->has_drrs)
12729 PIPE_CONF_CHECK_M_N(dp_m2_n2);
12730 } else
12731 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12732
12733 PIPE_CONF_CHECK_I(has_dsi_encoder);
12734
12735 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12736 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12737 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12738 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12739 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12740 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12741
12742 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12743 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12744 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12745 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12746 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12747 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12748
12749 PIPE_CONF_CHECK_I(pixel_multiplier);
12750 PIPE_CONF_CHECK_I(has_hdmi_sink);
12751 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
12752 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
12753 PIPE_CONF_CHECK_I(limited_color_range);
12754 PIPE_CONF_CHECK_I(has_infoframe);
12755
12756 PIPE_CONF_CHECK_I(has_audio);
12757
12758 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12759 DRM_MODE_FLAG_INTERLACE);
12760
12761 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12762 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12763 DRM_MODE_FLAG_PHSYNC);
12764 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12765 DRM_MODE_FLAG_NHSYNC);
12766 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12767 DRM_MODE_FLAG_PVSYNC);
12768 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12769 DRM_MODE_FLAG_NVSYNC);
12770 }
12771
12772 PIPE_CONF_CHECK_X(gmch_pfit.control);
12773 /* pfit ratios are autocomputed by the hw on gen4+ */
12774 if (INTEL_INFO(dev)->gen < 4)
12775 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
12776 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12777
12778 if (!adjust) {
12779 PIPE_CONF_CHECK_I(pipe_src_w);
12780 PIPE_CONF_CHECK_I(pipe_src_h);
12781
12782 PIPE_CONF_CHECK_I(pch_pfit.enabled);
12783 if (current_config->pch_pfit.enabled) {
12784 PIPE_CONF_CHECK_X(pch_pfit.pos);
12785 PIPE_CONF_CHECK_X(pch_pfit.size);
12786 }
12787
12788 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12789 }
12790
12791 /* BDW+ don't expose a synchronous way to read the state */
12792 if (IS_HASWELL(dev))
12793 PIPE_CONF_CHECK_I(ips_enabled);
12794
12795 PIPE_CONF_CHECK_I(double_wide);
12796
12797 PIPE_CONF_CHECK_X(ddi_pll_sel);
12798
12799 PIPE_CONF_CHECK_I(shared_dpll);
12800 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12801 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12802 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12803 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12804 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12805 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12806 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12807 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12808 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12809
12810 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
12811 PIPE_CONF_CHECK_I(pipe_bpp);
12812
12813 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12814 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12815
12816 #undef PIPE_CONF_CHECK_X
12817 #undef PIPE_CONF_CHECK_I
12818 #undef PIPE_CONF_CHECK_I_ALT
12819 #undef PIPE_CONF_CHECK_FLAGS
12820 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12821 #undef PIPE_CONF_QUIRK
12822 #undef INTEL_ERR_OR_DBG_KMS
12823
12824 return ret;
12825 }
12826
12827 static void check_wm_state(struct drm_device *dev)
12828 {
12829 struct drm_i915_private *dev_priv = dev->dev_private;
12830 struct skl_ddb_allocation hw_ddb, *sw_ddb;
12831 struct intel_crtc *intel_crtc;
12832 int plane;
12833
12834 if (INTEL_INFO(dev)->gen < 9)
12835 return;
12836
12837 skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12838 sw_ddb = &dev_priv->wm.skl_hw.ddb;
12839
12840 for_each_intel_crtc(dev, intel_crtc) {
12841 struct skl_ddb_entry *hw_entry, *sw_entry;
12842 const enum pipe pipe = intel_crtc->pipe;
12843
12844 if (!intel_crtc->active)
12845 continue;
12846
12847 /* planes */
12848 for_each_plane(dev_priv, pipe, plane) {
12849 hw_entry = &hw_ddb.plane[pipe][plane];
12850 sw_entry = &sw_ddb->plane[pipe][plane];
12851
12852 if (skl_ddb_entry_equal(hw_entry, sw_entry))
12853 continue;
12854
12855 DRM_ERROR("mismatch in DDB state pipe %c plane %d "
12856 "(expected (%u,%u), found (%u,%u))\n",
12857 pipe_name(pipe), plane + 1,
12858 sw_entry->start, sw_entry->end,
12859 hw_entry->start, hw_entry->end);
12860 }
12861
12862 /* cursor */
12863 hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
12864 sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
12865
12866 if (skl_ddb_entry_equal(hw_entry, sw_entry))
12867 continue;
12868
12869 DRM_ERROR("mismatch in DDB state pipe %c cursor "
12870 "(expected (%u,%u), found (%u,%u))\n",
12871 pipe_name(pipe),
12872 sw_entry->start, sw_entry->end,
12873 hw_entry->start, hw_entry->end);
12874 }
12875 }
12876
12877 static void
12878 check_connector_state(struct drm_device *dev,
12879 struct drm_atomic_state *old_state)
12880 {
12881 struct drm_connector_state *old_conn_state;
12882 struct drm_connector *connector;
12883 int i;
12884
12885 for_each_connector_in_state(old_state, connector, old_conn_state, i) {
12886 struct drm_encoder *encoder = connector->encoder;
12887 struct drm_connector_state *state = connector->state;
12888
12889 /* This also checks the encoder/connector hw state with the
12890 * ->get_hw_state callbacks. */
12891 intel_connector_check_state(to_intel_connector(connector));
12892
12893 I915_STATE_WARN(state->best_encoder != encoder,
12894 "connector's atomic encoder doesn't match legacy encoder\n");
12895 }
12896 }
12897
12898 static void
12899 check_encoder_state(struct drm_device *dev)
12900 {
12901 struct intel_encoder *encoder;
12902 struct intel_connector *connector;
12903
12904 for_each_intel_encoder(dev, encoder) {
12905 bool enabled = false;
12906 enum pipe pipe;
12907
12908 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12909 encoder->base.base.id,
12910 encoder->base.name);
12911
12912 for_each_intel_connector(dev, connector) {
12913 if (connector->base.state->best_encoder != &encoder->base)
12914 continue;
12915 enabled = true;
12916
12917 I915_STATE_WARN(connector->base.state->crtc !=
12918 encoder->base.crtc,
12919 "connector's crtc doesn't match encoder crtc\n");
12920 }
12921
12922 I915_STATE_WARN(!!encoder->base.crtc != enabled,
12923 "encoder's enabled state mismatch "
12924 "(expected %i, found %i)\n",
12925 !!encoder->base.crtc, enabled);
12926
12927 if (!encoder->base.crtc) {
12928 bool active;
12929
12930 active = encoder->get_hw_state(encoder, &pipe);
12931 I915_STATE_WARN(active,
12932 "encoder detached but still enabled on pipe %c.\n",
12933 pipe_name(pipe));
12934 }
12935 }
12936 }
12937
12938 static void
12939 check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
12940 {
12941 struct drm_i915_private *dev_priv = dev->dev_private;
12942 struct intel_encoder *encoder;
12943 struct drm_crtc_state *old_crtc_state;
12944 struct drm_crtc *crtc;
12945 int i;
12946
12947 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
12948 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12949 struct intel_crtc_state *pipe_config, *sw_config;
12950 bool active;
12951
12952 if (!needs_modeset(crtc->state) &&
12953 !to_intel_crtc_state(crtc->state)->update_pipe)
12954 continue;
12955
12956 __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
12957 pipe_config = to_intel_crtc_state(old_crtc_state);
12958 memset(pipe_config, 0, sizeof(*pipe_config));
12959 pipe_config->base.crtc = crtc;
12960 pipe_config->base.state = old_state;
12961
12962 DRM_DEBUG_KMS("[CRTC:%d]\n",
12963 crtc->base.id);
12964
12965 active = dev_priv->display.get_pipe_config(intel_crtc,
12966 pipe_config);
12967
12968 /* hw state is inconsistent with the pipe quirk */
12969 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
12970 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
12971 active = crtc->state->active;
12972
12973 I915_STATE_WARN(crtc->state->active != active,
12974 "crtc active state doesn't match with hw state "
12975 "(expected %i, found %i)\n", crtc->state->active, active);
12976
12977 I915_STATE_WARN(intel_crtc->active != crtc->state->active,
12978 "transitional active state does not match atomic hw state "
12979 "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
12980
12981 for_each_encoder_on_crtc(dev, crtc, encoder) {
12982 enum pipe pipe;
12983
12984 active = encoder->get_hw_state(encoder, &pipe);
12985 I915_STATE_WARN(active != crtc->state->active,
12986 "[ENCODER:%i] active %i with crtc active %i\n",
12987 encoder->base.base.id, active, crtc->state->active);
12988
12989 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12990 "Encoder connected to wrong pipe %c\n",
12991 pipe_name(pipe));
12992
12993 if (active)
12994 encoder->get_config(encoder, pipe_config);
12995 }
12996
12997 if (!crtc->state->active)
12998 continue;
12999
13000 sw_config = to_intel_crtc_state(crtc->state);
13001 if (!intel_pipe_config_compare(dev, sw_config,
13002 pipe_config, false)) {
13003 I915_STATE_WARN(1, "pipe state doesn't match!\n");
13004 intel_dump_pipe_config(intel_crtc, pipe_config,
13005 "[hw state]");
13006 intel_dump_pipe_config(intel_crtc, sw_config,
13007 "[sw state]");
13008 }
13009 }
13010 }
13011
13012 static void
13013 check_shared_dpll_state(struct drm_device *dev)
13014 {
13015 struct drm_i915_private *dev_priv = dev->dev_private;
13016 struct intel_crtc *crtc;
13017 struct intel_dpll_hw_state dpll_hw_state;
13018 int i;
13019
13020 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13021 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
13022 int enabled_crtcs = 0, active_crtcs = 0;
13023 bool active;
13024
13025 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
13026
13027 DRM_DEBUG_KMS("%s\n", pll->name);
13028
13029 active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
13030
13031 I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
13032 "more active pll users than references: %i vs %i\n",
13033 pll->active, hweight32(pll->config.crtc_mask));
13034 I915_STATE_WARN(pll->active && !pll->on,
13035 "pll in active use but not on in sw tracking\n");
13036 I915_STATE_WARN(pll->on && !pll->active,
13037 "pll in on but not on in use in sw tracking\n");
13038 I915_STATE_WARN(pll->on != active,
13039 "pll on state mismatch (expected %i, found %i)\n",
13040 pll->on, active);
13041
13042 for_each_intel_crtc(dev, crtc) {
13043 if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
13044 enabled_crtcs++;
13045 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
13046 active_crtcs++;
13047 }
13048 I915_STATE_WARN(pll->active != active_crtcs,
13049 "pll active crtcs mismatch (expected %i, found %i)\n",
13050 pll->active, active_crtcs);
13051 I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
13052 "pll enabled crtcs mismatch (expected %i, found %i)\n",
13053 hweight32(pll->config.crtc_mask), enabled_crtcs);
13054
13055 I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
13056 sizeof(dpll_hw_state)),
13057 "pll hw state mismatch\n");
13058 }
13059 }
13060
13061 static void
13062 intel_modeset_check_state(struct drm_device *dev,
13063 struct drm_atomic_state *old_state)
13064 {
13065 check_wm_state(dev);
13066 check_connector_state(dev, old_state);
13067 check_encoder_state(dev);
13068 check_crtc_state(dev, old_state);
13069 check_shared_dpll_state(dev);
13070 }
13071
13072 void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
13073 int dotclock)
13074 {
13075 /*
13076 * FDI already provided one idea for the dotclock.
13077 * Yell if the encoder disagrees.
13078 */
13079 WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock),
13080 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
13081 pipe_config->base.adjusted_mode.crtc_clock, dotclock);
13082 }
13083
13084 static void update_scanline_offset(struct intel_crtc *crtc)
13085 {
13086 struct drm_device *dev = crtc->base.dev;
13087
13088 /*
13089 * The scanline counter increments at the leading edge of hsync.
13090 *
13091 * On most platforms it starts counting from vtotal-1 on the
13092 * first active line. That means the scanline counter value is
13093 * always one less than what we would expect. Ie. just after
13094 * start of vblank, which also occurs at start of hsync (on the
13095 * last active line), the scanline counter will read vblank_start-1.
13096 *
13097 * On gen2 the scanline counter starts counting from 1 instead
13098 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13099 * to keep the value positive), instead of adding one.
13100 *
13101 * On HSW+ the behaviour of the scanline counter depends on the output
13102 * type. For DP ports it behaves like most other platforms, but on HDMI
13103 * there's an extra 1 line difference. So we need to add two instead of
13104 * one to the value.
13105 */
13106 if (IS_GEN2(dev)) {
13107 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
13108 int vtotal;
13109
13110 vtotal = adjusted_mode->crtc_vtotal;
13111 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
13112 vtotal /= 2;
13113
13114 crtc->scanline_offset = vtotal - 1;
13115 } else if (HAS_DDI(dev) &&
13116 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
13117 crtc->scanline_offset = 2;
13118 } else
13119 crtc->scanline_offset = 1;
13120 }
13121
13122 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
13123 {
13124 struct drm_device *dev = state->dev;
13125 struct drm_i915_private *dev_priv = to_i915(dev);
13126 struct intel_shared_dpll_config *shared_dpll = NULL;
13127 struct intel_crtc *intel_crtc;
13128 struct intel_crtc_state *intel_crtc_state;
13129 struct drm_crtc *crtc;
13130 struct drm_crtc_state *crtc_state;
13131 int i;
13132
13133 if (!dev_priv->display.crtc_compute_clock)
13134 return;
13135
13136 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13137 int dpll;
13138
13139 intel_crtc = to_intel_crtc(crtc);
13140 intel_crtc_state = to_intel_crtc_state(crtc_state);
13141 dpll = intel_crtc_state->shared_dpll;
13142
13143 if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE)
13144 continue;
13145
13146 intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
13147
13148 if (!shared_dpll)
13149 shared_dpll = intel_atomic_get_shared_dpll_state(state);
13150
13151 shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
13152 }
13153 }
13154
13155 /*
13156 * This implements the workaround described in the "notes" section of the mode
13157 * set sequence documentation. When going from no pipes or single pipe to
13158 * multiple pipes, and planes are enabled after the pipe, we need to wait at
13159 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13160 */
13161 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
13162 {
13163 struct drm_crtc_state *crtc_state;
13164 struct intel_crtc *intel_crtc;
13165 struct drm_crtc *crtc;
13166 struct intel_crtc_state *first_crtc_state = NULL;
13167 struct intel_crtc_state *other_crtc_state = NULL;
13168 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13169 int i;
13170
13171 /* look at all crtc's that are going to be enabled in during modeset */
13172 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13173 intel_crtc = to_intel_crtc(crtc);
13174
13175 if (!crtc_state->active || !needs_modeset(crtc_state))
13176 continue;
13177
13178 if (first_crtc_state) {
13179 other_crtc_state = to_intel_crtc_state(crtc_state);
13180 break;
13181 } else {
13182 first_crtc_state = to_intel_crtc_state(crtc_state);
13183 first_pipe = intel_crtc->pipe;
13184 }
13185 }
13186
13187 /* No workaround needed? */
13188 if (!first_crtc_state)
13189 return 0;
13190
13191 /* w/a possibly needed, check how many crtc's are already enabled. */
13192 for_each_intel_crtc(state->dev, intel_crtc) {
13193 struct intel_crtc_state *pipe_config;
13194
13195 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
13196 if (IS_ERR(pipe_config))
13197 return PTR_ERR(pipe_config);
13198
13199 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
13200
13201 if (!pipe_config->base.active ||
13202 needs_modeset(&pipe_config->base))
13203 continue;
13204
13205 /* 2 or more enabled crtcs means no need for w/a */
13206 if (enabled_pipe != INVALID_PIPE)
13207 return 0;
13208
13209 enabled_pipe = intel_crtc->pipe;
13210 }
13211
13212 if (enabled_pipe != INVALID_PIPE)
13213 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13214 else if (other_crtc_state)
13215 other_crtc_state->hsw_workaround_pipe = first_pipe;
13216
13217 return 0;
13218 }
13219
13220 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13221 {
13222 struct drm_crtc *crtc;
13223 struct drm_crtc_state *crtc_state;
13224 int ret = 0;
13225
13226 /* add all active pipes to the state */
13227 for_each_crtc(state->dev, crtc) {
13228 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13229 if (IS_ERR(crtc_state))
13230 return PTR_ERR(crtc_state);
13231
13232 if (!crtc_state->active || needs_modeset(crtc_state))
13233 continue;
13234
13235 crtc_state->mode_changed = true;
13236
13237 ret = drm_atomic_add_affected_connectors(state, crtc);
13238 if (ret)
13239 break;
13240
13241 ret = drm_atomic_add_affected_planes(state, crtc);
13242 if (ret)
13243 break;
13244 }
13245
13246 return ret;
13247 }
13248
13249 static int intel_modeset_checks(struct drm_atomic_state *state)
13250 {
13251 struct drm_device *dev = state->dev;
13252 struct drm_i915_private *dev_priv = dev->dev_private;
13253 int ret;
13254
13255 if (!check_digital_port_conflicts(state)) {
13256 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13257 return -EINVAL;
13258 }
13259
13260 /*
13261 * See if the config requires any additional preparation, e.g.
13262 * to adjust global state with pipes off. We need to do this
13263 * here so we can get the modeset_pipe updated config for the new
13264 * mode set on this crtc. For other crtcs we need to use the
13265 * adjusted_mode bits in the crtc directly.
13266 */
13267 if (dev_priv->display.modeset_calc_cdclk) {
13268 unsigned int cdclk;
13269
13270 ret = dev_priv->display.modeset_calc_cdclk(state);
13271
13272 cdclk = to_intel_atomic_state(state)->cdclk;
13273 if (!ret && cdclk != dev_priv->cdclk_freq)
13274 ret = intel_modeset_all_pipes(state);
13275
13276 if (ret < 0)
13277 return ret;
13278 } else
13279 to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq;
13280
13281 intel_modeset_clear_plls(state);
13282
13283 if (IS_HASWELL(dev))
13284 return haswell_mode_set_planes_workaround(state);
13285
13286 return 0;
13287 }
13288
13289 /*
13290 * Handle calculation of various watermark data at the end of the atomic check
13291 * phase. The code here should be run after the per-crtc and per-plane 'check'
13292 * handlers to ensure that all derived state has been updated.
13293 */
13294 static void calc_watermark_data(struct drm_atomic_state *state)
13295 {
13296 struct drm_device *dev = state->dev;
13297 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13298 struct drm_crtc *crtc;
13299 struct drm_crtc_state *cstate;
13300 struct drm_plane *plane;
13301 struct drm_plane_state *pstate;
13302
13303 /*
13304 * Calculate watermark configuration details now that derived
13305 * plane/crtc state is all properly updated.
13306 */
13307 drm_for_each_crtc(crtc, dev) {
13308 cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
13309 crtc->state;
13310
13311 if (cstate->active)
13312 intel_state->wm_config.num_pipes_active++;
13313 }
13314 drm_for_each_legacy_plane(plane, dev) {
13315 pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
13316 plane->state;
13317
13318 if (!to_intel_plane_state(pstate)->visible)
13319 continue;
13320
13321 intel_state->wm_config.sprites_enabled = true;
13322 if (pstate->crtc_w != pstate->src_w >> 16 ||
13323 pstate->crtc_h != pstate->src_h >> 16)
13324 intel_state->wm_config.sprites_scaled = true;
13325 }
13326 }
13327
13328 /**
13329 * intel_atomic_check - validate state object
13330 * @dev: drm device
13331 * @state: state to validate
13332 */
13333 static int intel_atomic_check(struct drm_device *dev,
13334 struct drm_atomic_state *state)
13335 {
13336 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13337 struct drm_crtc *crtc;
13338 struct drm_crtc_state *crtc_state;
13339 int ret, i;
13340 bool any_ms = false;
13341
13342 ret = drm_atomic_helper_check_modeset(dev, state);
13343 if (ret)
13344 return ret;
13345
13346 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13347 struct intel_crtc_state *pipe_config =
13348 to_intel_crtc_state(crtc_state);
13349
13350 memset(&to_intel_crtc(crtc)->atomic, 0,
13351 sizeof(struct intel_crtc_atomic_commit));
13352
13353 /* Catch I915_MODE_FLAG_INHERITED */
13354 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13355 crtc_state->mode_changed = true;
13356
13357 if (!crtc_state->enable) {
13358 if (needs_modeset(crtc_state))
13359 any_ms = true;
13360 continue;
13361 }
13362
13363 if (!needs_modeset(crtc_state))
13364 continue;
13365
13366 /* FIXME: For only active_changed we shouldn't need to do any
13367 * state recomputation at all. */
13368
13369 ret = drm_atomic_add_affected_connectors(state, crtc);
13370 if (ret)
13371 return ret;
13372
13373 ret = intel_modeset_pipe_config(crtc, pipe_config);
13374 if (ret)
13375 return ret;
13376
13377 if (i915.fastboot &&
13378 intel_pipe_config_compare(state->dev,
13379 to_intel_crtc_state(crtc->state),
13380 pipe_config, true)) {
13381 crtc_state->mode_changed = false;
13382 to_intel_crtc_state(crtc_state)->update_pipe = true;
13383 }
13384
13385 if (needs_modeset(crtc_state)) {
13386 any_ms = true;
13387
13388 ret = drm_atomic_add_affected_planes(state, crtc);
13389 if (ret)
13390 return ret;
13391 }
13392
13393 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13394 needs_modeset(crtc_state) ?
13395 "[modeset]" : "[fastset]");
13396 }
13397
13398 if (any_ms) {
13399 ret = intel_modeset_checks(state);
13400
13401 if (ret)
13402 return ret;
13403 } else
13404 intel_state->cdclk = to_i915(state->dev)->cdclk_freq;
13405
13406 ret = drm_atomic_helper_check_planes(state->dev, state);
13407 if (ret)
13408 return ret;
13409
13410 calc_watermark_data(state);
13411
13412 return 0;
13413 }
13414
13415 static int intel_atomic_prepare_commit(struct drm_device *dev,
13416 struct drm_atomic_state *state,
13417 bool async)
13418 {
13419 struct drm_i915_private *dev_priv = dev->dev_private;
13420 struct drm_plane_state *plane_state;
13421 struct drm_crtc_state *crtc_state;
13422 struct drm_plane *plane;
13423 struct drm_crtc *crtc;
13424 int i, ret;
13425
13426 if (async) {
13427 DRM_DEBUG_KMS("i915 does not yet support async commit\n");
13428 return -EINVAL;
13429 }
13430
13431 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13432 ret = intel_crtc_wait_for_pending_flips(crtc);
13433 if (ret)
13434 return ret;
13435
13436 if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
13437 flush_workqueue(dev_priv->wq);
13438 }
13439
13440 ret = mutex_lock_interruptible(&dev->struct_mutex);
13441 if (ret)
13442 return ret;
13443
13444 ret = drm_atomic_helper_prepare_planes(dev, state);
13445 if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) {
13446 u32 reset_counter;
13447
13448 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
13449 mutex_unlock(&dev->struct_mutex);
13450
13451 for_each_plane_in_state(state, plane, plane_state, i) {
13452 struct intel_plane_state *intel_plane_state =
13453 to_intel_plane_state(plane_state);
13454
13455 if (!intel_plane_state->wait_req)
13456 continue;
13457
13458 ret = __i915_wait_request(intel_plane_state->wait_req,
13459 reset_counter, true,
13460 NULL, NULL);
13461
13462 /* Swallow -EIO errors to allow updates during hw lockup. */
13463 if (ret == -EIO)
13464 ret = 0;
13465
13466 if (ret)
13467 break;
13468 }
13469
13470 if (!ret)
13471 return 0;
13472
13473 mutex_lock(&dev->struct_mutex);
13474 drm_atomic_helper_cleanup_planes(dev, state);
13475 }
13476
13477 mutex_unlock(&dev->struct_mutex);
13478 return ret;
13479 }
13480
13481 /**
13482 * intel_atomic_commit - commit validated state object
13483 * @dev: DRM device
13484 * @state: the top-level driver state object
13485 * @async: asynchronous commit
13486 *
13487 * This function commits a top-level state object that has been validated
13488 * with drm_atomic_helper_check().
13489 *
13490 * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
13491 * we can only handle plane-related operations and do not yet support
13492 * asynchronous commit.
13493 *
13494 * RETURNS
13495 * Zero for success or -errno.
13496 */
13497 static int intel_atomic_commit(struct drm_device *dev,
13498 struct drm_atomic_state *state,
13499 bool async)
13500 {
13501 struct drm_i915_private *dev_priv = dev->dev_private;
13502 struct drm_crtc_state *crtc_state;
13503 struct drm_crtc *crtc;
13504 int ret = 0;
13505 int i;
13506 bool any_ms = false;
13507
13508 ret = intel_atomic_prepare_commit(dev, state, async);
13509 if (ret) {
13510 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13511 return ret;
13512 }
13513
13514 drm_atomic_helper_swap_state(dev, state);
13515 dev_priv->wm.config = to_intel_atomic_state(state)->wm_config;
13516
13517 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13518 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13519
13520 if (!needs_modeset(crtc->state))
13521 continue;
13522
13523 any_ms = true;
13524 intel_pre_plane_update(intel_crtc);
13525
13526 if (crtc_state->active) {
13527 intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
13528 dev_priv->display.crtc_disable(crtc);
13529 intel_crtc->active = false;
13530 intel_disable_shared_dpll(intel_crtc);
13531
13532 /*
13533 * Underruns don't always raise
13534 * interrupts, so check manually.
13535 */
13536 intel_check_cpu_fifo_underruns(dev_priv);
13537 intel_check_pch_fifo_underruns(dev_priv);
13538
13539 if (!crtc->state->active)
13540 intel_update_watermarks(crtc);
13541 }
13542 }
13543
13544 /* Only after disabling all output pipelines that will be changed can we
13545 * update the the output configuration. */
13546 intel_modeset_update_crtc_state(state);
13547
13548 if (any_ms) {
13549 intel_shared_dpll_commit(state);
13550
13551 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13552 modeset_update_crtc_power_domains(state);
13553 }
13554
13555 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
13556 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13557 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13558 bool modeset = needs_modeset(crtc->state);
13559 bool update_pipe = !modeset &&
13560 to_intel_crtc_state(crtc->state)->update_pipe;
13561 unsigned long put_domains = 0;
13562
13563 if (modeset)
13564 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13565
13566 if (modeset && crtc->state->active) {
13567 update_scanline_offset(to_intel_crtc(crtc));
13568 dev_priv->display.crtc_enable(crtc);
13569 }
13570
13571 if (update_pipe) {
13572 put_domains = modeset_get_crtc_power_domains(crtc);
13573
13574 /* make sure intel_modeset_check_state runs */
13575 any_ms = true;
13576 }
13577
13578 if (!modeset)
13579 intel_pre_plane_update(intel_crtc);
13580
13581 if (crtc->state->active &&
13582 (crtc->state->planes_changed || update_pipe))
13583 drm_atomic_helper_commit_planes_on_crtc(crtc_state);
13584
13585 if (put_domains)
13586 modeset_put_power_domains(dev_priv, put_domains);
13587
13588 intel_post_plane_update(intel_crtc);
13589
13590 if (modeset)
13591 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13592 }
13593
13594 /* FIXME: add subpixel order */
13595
13596 drm_atomic_helper_wait_for_vblanks(dev, state);
13597
13598 mutex_lock(&dev->struct_mutex);
13599 drm_atomic_helper_cleanup_planes(dev, state);
13600 mutex_unlock(&dev->struct_mutex);
13601
13602 if (any_ms)
13603 intel_modeset_check_state(dev, state);
13604
13605 drm_atomic_state_free(state);
13606
13607 return 0;
13608 }
13609
13610 void intel_crtc_restore_mode(struct drm_crtc *crtc)
13611 {
13612 struct drm_device *dev = crtc->dev;
13613 struct drm_atomic_state *state;
13614 struct drm_crtc_state *crtc_state;
13615 int ret;
13616
13617 state = drm_atomic_state_alloc(dev);
13618 if (!state) {
13619 DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
13620 crtc->base.id);
13621 return;
13622 }
13623
13624 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
13625
13626 retry:
13627 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13628 ret = PTR_ERR_OR_ZERO(crtc_state);
13629 if (!ret) {
13630 if (!crtc_state->active)
13631 goto out;
13632
13633 crtc_state->mode_changed = true;
13634 ret = drm_atomic_commit(state);
13635 }
13636
13637 if (ret == -EDEADLK) {
13638 drm_atomic_state_clear(state);
13639 drm_modeset_backoff(state->acquire_ctx);
13640 goto retry;
13641 }
13642
13643 if (ret)
13644 out:
13645 drm_atomic_state_free(state);
13646 }
13647
13648 #undef for_each_intel_crtc_masked
13649
13650 static const struct drm_crtc_funcs intel_crtc_funcs = {
13651 .gamma_set = intel_crtc_gamma_set,
13652 .set_config = drm_atomic_helper_set_config,
13653 .destroy = intel_crtc_destroy,
13654 .page_flip = intel_crtc_page_flip,
13655 .atomic_duplicate_state = intel_crtc_duplicate_state,
13656 .atomic_destroy_state = intel_crtc_destroy_state,
13657 };
13658
13659 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
13660 struct intel_shared_dpll *pll,
13661 struct intel_dpll_hw_state *hw_state)
13662 {
13663 uint32_t val;
13664
13665 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
13666 return false;
13667
13668 val = I915_READ(PCH_DPLL(pll->id));
13669 hw_state->dpll = val;
13670 hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
13671 hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
13672
13673 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
13674
13675 return val & DPLL_VCO_ENABLE;
13676 }
13677
13678 static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
13679 struct intel_shared_dpll *pll)
13680 {
13681 I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
13682 I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
13683 }
13684
13685 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
13686 struct intel_shared_dpll *pll)
13687 {
13688 /* PCH refclock must be enabled first */
13689 ibx_assert_pch_refclk_enabled(dev_priv);
13690
13691 I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
13692
13693 /* Wait for the clocks to stabilize. */
13694 POSTING_READ(PCH_DPLL(pll->id));
13695 udelay(150);
13696
13697 /* The pixel multiplier can only be updated once the
13698 * DPLL is enabled and the clocks are stable.
13699 *
13700 * So write it again.
13701 */
13702 I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
13703 POSTING_READ(PCH_DPLL(pll->id));
13704 udelay(200);
13705 }
13706
13707 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
13708 struct intel_shared_dpll *pll)
13709 {
13710 struct drm_device *dev = dev_priv->dev;
13711 struct intel_crtc *crtc;
13712
13713 /* Make sure no transcoder isn't still depending on us. */
13714 for_each_intel_crtc(dev, crtc) {
13715 if (intel_crtc_to_shared_dpll(crtc) == pll)
13716 assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
13717 }
13718
13719 I915_WRITE(PCH_DPLL(pll->id), 0);
13720 POSTING_READ(PCH_DPLL(pll->id));
13721 udelay(200);
13722 }
13723
13724 static char *ibx_pch_dpll_names[] = {
13725 "PCH DPLL A",
13726 "PCH DPLL B",
13727 };
13728
13729 static void ibx_pch_dpll_init(struct drm_device *dev)
13730 {
13731 struct drm_i915_private *dev_priv = dev->dev_private;
13732 int i;
13733
13734 dev_priv->num_shared_dpll = 2;
13735
13736 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13737 dev_priv->shared_dplls[i].id = i;
13738 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
13739 dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
13740 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
13741 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
13742 dev_priv->shared_dplls[i].get_hw_state =
13743 ibx_pch_dpll_get_hw_state;
13744 }
13745 }
13746
13747 static void intel_shared_dpll_init(struct drm_device *dev)
13748 {
13749 struct drm_i915_private *dev_priv = dev->dev_private;
13750
13751 if (HAS_DDI(dev))
13752 intel_ddi_pll_init(dev);
13753 else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
13754 ibx_pch_dpll_init(dev);
13755 else
13756 dev_priv->num_shared_dpll = 0;
13757
13758 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
13759 }
13760
13761 /**
13762 * intel_prepare_plane_fb - Prepare fb for usage on plane
13763 * @plane: drm plane to prepare for
13764 * @fb: framebuffer to prepare for presentation
13765 *
13766 * Prepares a framebuffer for usage on a display plane. Generally this
13767 * involves pinning the underlying object and updating the frontbuffer tracking
13768 * bits. Some older platforms need special physical address handling for
13769 * cursor planes.
13770 *
13771 * Must be called with struct_mutex held.
13772 *
13773 * Returns 0 on success, negative error code on failure.
13774 */
13775 int
13776 intel_prepare_plane_fb(struct drm_plane *plane,
13777 const struct drm_plane_state *new_state)
13778 {
13779 struct drm_device *dev = plane->dev;
13780 struct drm_framebuffer *fb = new_state->fb;
13781 struct intel_plane *intel_plane = to_intel_plane(plane);
13782 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13783 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13784 int ret = 0;
13785
13786 if (!obj && !old_obj)
13787 return 0;
13788
13789 if (old_obj) {
13790 struct drm_crtc_state *crtc_state =
13791 drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
13792
13793 /* Big Hammer, we also need to ensure that any pending
13794 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13795 * current scanout is retired before unpinning the old
13796 * framebuffer. Note that we rely on userspace rendering
13797 * into the buffer attached to the pipe they are waiting
13798 * on. If not, userspace generates a GPU hang with IPEHR
13799 * point to the MI_WAIT_FOR_EVENT.
13800 *
13801 * This should only fail upon a hung GPU, in which case we
13802 * can safely continue.
13803 */
13804 if (needs_modeset(crtc_state))
13805 ret = i915_gem_object_wait_rendering(old_obj, true);
13806
13807 /* Swallow -EIO errors to allow updates during hw lockup. */
13808 if (ret && ret != -EIO)
13809 return ret;
13810 }
13811
13812 /* For framebuffer backed by dmabuf, wait for fence */
13813 if (obj && obj->base.dma_buf) {
13814 long lret;
13815
13816 lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
13817 false, true,
13818 MAX_SCHEDULE_TIMEOUT);
13819 if (lret == -ERESTARTSYS)
13820 return lret;
13821
13822 WARN(lret < 0, "waiting returns %li\n", lret);
13823 }
13824
13825 if (!obj) {
13826 ret = 0;
13827 } else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
13828 INTEL_INFO(dev)->cursor_needs_physical) {
13829 int align = IS_I830(dev) ? 16 * 1024 : 256;
13830 ret = i915_gem_object_attach_phys(obj, align);
13831 if (ret)
13832 DRM_DEBUG_KMS("failed to attach phys object\n");
13833 } else {
13834 ret = intel_pin_and_fence_fb_obj(plane, fb, new_state);
13835 }
13836
13837 if (ret == 0) {
13838 if (obj) {
13839 struct intel_plane_state *plane_state =
13840 to_intel_plane_state(new_state);
13841
13842 i915_gem_request_assign(&plane_state->wait_req,
13843 obj->last_write_req);
13844 }
13845
13846 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13847 }
13848
13849 return ret;
13850 }
13851
13852 /**
13853 * intel_cleanup_plane_fb - Cleans up an fb after plane use
13854 * @plane: drm plane to clean up for
13855 * @fb: old framebuffer that was on plane
13856 *
13857 * Cleans up a framebuffer that has just been removed from a plane.
13858 *
13859 * Must be called with struct_mutex held.
13860 */
13861 void
13862 intel_cleanup_plane_fb(struct drm_plane *plane,
13863 const struct drm_plane_state *old_state)
13864 {
13865 struct drm_device *dev = plane->dev;
13866 struct intel_plane *intel_plane = to_intel_plane(plane);
13867 struct intel_plane_state *old_intel_state;
13868 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
13869 struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
13870
13871 old_intel_state = to_intel_plane_state(old_state);
13872
13873 if (!obj && !old_obj)
13874 return;
13875
13876 if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
13877 !INTEL_INFO(dev)->cursor_needs_physical))
13878 intel_unpin_fb_obj(old_state->fb, old_state);
13879
13880 /* prepare_fb aborted? */
13881 if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
13882 (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
13883 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13884
13885 i915_gem_request_assign(&old_intel_state->wait_req, NULL);
13886
13887 }
13888
13889 int
13890 skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
13891 {
13892 int max_scale;
13893 struct drm_device *dev;
13894 struct drm_i915_private *dev_priv;
13895 int crtc_clock, cdclk;
13896
13897 if (!intel_crtc || !crtc_state)
13898 return DRM_PLANE_HELPER_NO_SCALING;
13899
13900 dev = intel_crtc->base.dev;
13901 dev_priv = dev->dev_private;
13902 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13903 cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
13904
13905 if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
13906 return DRM_PLANE_HELPER_NO_SCALING;
13907
13908 /*
13909 * skl max scale is lower of:
13910 * close to 3 but not 3, -1 is for that purpose
13911 * or
13912 * cdclk/crtc_clock
13913 */
13914 max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
13915
13916 return max_scale;
13917 }
13918
13919 static int
13920 intel_check_primary_plane(struct drm_plane *plane,
13921 struct intel_crtc_state *crtc_state,
13922 struct intel_plane_state *state)
13923 {
13924 struct drm_crtc *crtc = state->base.crtc;
13925 struct drm_framebuffer *fb = state->base.fb;
13926 int min_scale = DRM_PLANE_HELPER_NO_SCALING;
13927 int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13928 bool can_position = false;
13929
13930 if (INTEL_INFO(plane->dev)->gen >= 9) {
13931 /* use scaler when colorkey is not required */
13932 if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
13933 min_scale = 1;
13934 max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
13935 }
13936 can_position = true;
13937 }
13938
13939 return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13940 &state->dst, &state->clip,
13941 min_scale, max_scale,
13942 can_position, true,
13943 &state->visible);
13944 }
13945
13946 static void
13947 intel_commit_primary_plane(struct drm_plane *plane,
13948 struct intel_plane_state *state)
13949 {
13950 struct drm_crtc *crtc = state->base.crtc;
13951 struct drm_framebuffer *fb = state->base.fb;
13952 struct drm_device *dev = plane->dev;
13953 struct drm_i915_private *dev_priv = dev->dev_private;
13954
13955 crtc = crtc ? crtc : plane->crtc;
13956
13957 dev_priv->display.update_primary_plane(crtc, fb,
13958 state->src.x1 >> 16,
13959 state->src.y1 >> 16);
13960 }
13961
13962 static void
13963 intel_disable_primary_plane(struct drm_plane *plane,
13964 struct drm_crtc *crtc)
13965 {
13966 struct drm_device *dev = plane->dev;
13967 struct drm_i915_private *dev_priv = dev->dev_private;
13968
13969 dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
13970 }
13971
13972 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13973 struct drm_crtc_state *old_crtc_state)
13974 {
13975 struct drm_device *dev = crtc->dev;
13976 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13977 struct intel_crtc_state *old_intel_state =
13978 to_intel_crtc_state(old_crtc_state);
13979 bool modeset = needs_modeset(crtc->state);
13980
13981 /* Perform vblank evasion around commit operation */
13982 intel_pipe_update_start(intel_crtc);
13983
13984 if (modeset)
13985 return;
13986
13987 if (to_intel_crtc_state(crtc->state)->update_pipe)
13988 intel_update_pipe_config(intel_crtc, old_intel_state);
13989 else if (INTEL_INFO(dev)->gen >= 9)
13990 skl_detach_scalers(intel_crtc);
13991 }
13992
13993 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13994 struct drm_crtc_state *old_crtc_state)
13995 {
13996 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13997
13998 intel_pipe_update_end(intel_crtc);
13999 }
14000
14001 /**
14002 * intel_plane_destroy - destroy a plane
14003 * @plane: plane to destroy
14004 *
14005 * Common destruction function for all types of planes (primary, cursor,
14006 * sprite).
14007 */
14008 void intel_plane_destroy(struct drm_plane *plane)
14009 {
14010 struct intel_plane *intel_plane = to_intel_plane(plane);
14011 drm_plane_cleanup(plane);
14012 kfree(intel_plane);
14013 }
14014
14015 const struct drm_plane_funcs intel_plane_funcs = {
14016 .update_plane = drm_atomic_helper_update_plane,
14017 .disable_plane = drm_atomic_helper_disable_plane,
14018 .destroy = intel_plane_destroy,
14019 .set_property = drm_atomic_helper_plane_set_property,
14020 .atomic_get_property = intel_plane_atomic_get_property,
14021 .atomic_set_property = intel_plane_atomic_set_property,
14022 .atomic_duplicate_state = intel_plane_duplicate_state,
14023 .atomic_destroy_state = intel_plane_destroy_state,
14024
14025 };
14026
14027 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
14028 int pipe)
14029 {
14030 struct intel_plane *primary;
14031 struct intel_plane_state *state;
14032 const uint32_t *intel_primary_formats;
14033 unsigned int num_formats;
14034
14035 primary = kzalloc(sizeof(*primary), GFP_KERNEL);
14036 if (primary == NULL)
14037 return NULL;
14038
14039 state = intel_create_plane_state(&primary->base);
14040 if (!state) {
14041 kfree(primary);
14042 return NULL;
14043 }
14044 primary->base.state = &state->base;
14045
14046 primary->can_scale = false;
14047 primary->max_downscale = 1;
14048 if (INTEL_INFO(dev)->gen >= 9) {
14049 primary->can_scale = true;
14050 state->scaler_id = -1;
14051 }
14052 primary->pipe = pipe;
14053 primary->plane = pipe;
14054 primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
14055 primary->check_plane = intel_check_primary_plane;
14056 primary->commit_plane = intel_commit_primary_plane;
14057 primary->disable_plane = intel_disable_primary_plane;
14058 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
14059 primary->plane = !pipe;
14060
14061 if (INTEL_INFO(dev)->gen >= 9) {
14062 intel_primary_formats = skl_primary_formats;
14063 num_formats = ARRAY_SIZE(skl_primary_formats);
14064 } else if (INTEL_INFO(dev)->gen >= 4) {
14065 intel_primary_formats = i965_primary_formats;
14066 num_formats = ARRAY_SIZE(i965_primary_formats);
14067 } else {
14068 intel_primary_formats = i8xx_primary_formats;
14069 num_formats = ARRAY_SIZE(i8xx_primary_formats);
14070 }
14071
14072 drm_universal_plane_init(dev, &primary->base, 0,
14073 &intel_plane_funcs,
14074 intel_primary_formats, num_formats,
14075 DRM_PLANE_TYPE_PRIMARY, NULL);
14076
14077 if (INTEL_INFO(dev)->gen >= 4)
14078 intel_create_rotation_property(dev, primary);
14079
14080 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
14081
14082 return &primary->base;
14083 }
14084
14085 void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
14086 {
14087 if (!dev->mode_config.rotation_property) {
14088 unsigned long flags = BIT(DRM_ROTATE_0) |
14089 BIT(DRM_ROTATE_180);
14090
14091 if (INTEL_INFO(dev)->gen >= 9)
14092 flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
14093
14094 dev->mode_config.rotation_property =
14095 drm_mode_create_rotation_property(dev, flags);
14096 }
14097 if (dev->mode_config.rotation_property)
14098 drm_object_attach_property(&plane->base.base,
14099 dev->mode_config.rotation_property,
14100 plane->base.state->rotation);
14101 }
14102
14103 static int
14104 intel_check_cursor_plane(struct drm_plane *plane,
14105 struct intel_crtc_state *crtc_state,
14106 struct intel_plane_state *state)
14107 {
14108 struct drm_crtc *crtc = crtc_state->base.crtc;
14109 struct drm_framebuffer *fb = state->base.fb;
14110 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14111 enum pipe pipe = to_intel_plane(plane)->pipe;
14112 unsigned stride;
14113 int ret;
14114
14115 ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
14116 &state->dst, &state->clip,
14117 DRM_PLANE_HELPER_NO_SCALING,
14118 DRM_PLANE_HELPER_NO_SCALING,
14119 true, true, &state->visible);
14120 if (ret)
14121 return ret;
14122
14123 /* if we want to turn off the cursor ignore width and height */
14124 if (!obj)
14125 return 0;
14126
14127 /* Check for which cursor types we support */
14128 if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
14129 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
14130 state->base.crtc_w, state->base.crtc_h);
14131 return -EINVAL;
14132 }
14133
14134 stride = roundup_pow_of_two(state->base.crtc_w) * 4;
14135 if (obj->base.size < stride * state->base.crtc_h) {
14136 DRM_DEBUG_KMS("buffer is too small\n");
14137 return -ENOMEM;
14138 }
14139
14140 if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
14141 DRM_DEBUG_KMS("cursor cannot be tiled\n");
14142 return -EINVAL;
14143 }
14144
14145 /*
14146 * There's something wrong with the cursor on CHV pipe C.
14147 * If it straddles the left edge of the screen then
14148 * moving it away from the edge or disabling it often
14149 * results in a pipe underrun, and often that can lead to
14150 * dead pipe (constant underrun reported, and it scans
14151 * out just a solid color). To recover from that, the
14152 * display power well must be turned off and on again.
14153 * Refuse the put the cursor into that compromised position.
14154 */
14155 if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
14156 state->visible && state->base.crtc_x < 0) {
14157 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
14158 return -EINVAL;
14159 }
14160
14161 return 0;
14162 }
14163
14164 static void
14165 intel_disable_cursor_plane(struct drm_plane *plane,
14166 struct drm_crtc *crtc)
14167 {
14168 intel_crtc_update_cursor(crtc, false);
14169 }
14170
14171 static void
14172 intel_commit_cursor_plane(struct drm_plane *plane,
14173 struct intel_plane_state *state)
14174 {
14175 struct drm_crtc *crtc = state->base.crtc;
14176 struct drm_device *dev = plane->dev;
14177 struct intel_crtc *intel_crtc;
14178 struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
14179 uint32_t addr;
14180
14181 crtc = crtc ? crtc : plane->crtc;
14182 intel_crtc = to_intel_crtc(crtc);
14183
14184 if (!obj)
14185 addr = 0;
14186 else if (!INTEL_INFO(dev)->cursor_needs_physical)
14187 addr = i915_gem_obj_ggtt_offset(obj);
14188 else
14189 addr = obj->phys_handle->busaddr;
14190
14191 intel_crtc->cursor_addr = addr;
14192
14193 if (crtc->state->active)
14194 intel_crtc_update_cursor(crtc, state->visible);
14195 }
14196
14197 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
14198 int pipe)
14199 {
14200 struct intel_plane *cursor;
14201 struct intel_plane_state *state;
14202
14203 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
14204 if (cursor == NULL)
14205 return NULL;
14206
14207 state = intel_create_plane_state(&cursor->base);
14208 if (!state) {
14209 kfree(cursor);
14210 return NULL;
14211 }
14212 cursor->base.state = &state->base;
14213
14214 cursor->can_scale = false;
14215 cursor->max_downscale = 1;
14216 cursor->pipe = pipe;
14217 cursor->plane = pipe;
14218 cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
14219 cursor->check_plane = intel_check_cursor_plane;
14220 cursor->commit_plane = intel_commit_cursor_plane;
14221 cursor->disable_plane = intel_disable_cursor_plane;
14222
14223 drm_universal_plane_init(dev, &cursor->base, 0,
14224 &intel_plane_funcs,
14225 intel_cursor_formats,
14226 ARRAY_SIZE(intel_cursor_formats),
14227 DRM_PLANE_TYPE_CURSOR, NULL);
14228
14229 if (INTEL_INFO(dev)->gen >= 4) {
14230 if (!dev->mode_config.rotation_property)
14231 dev->mode_config.rotation_property =
14232 drm_mode_create_rotation_property(dev,
14233 BIT(DRM_ROTATE_0) |
14234 BIT(DRM_ROTATE_180));
14235 if (dev->mode_config.rotation_property)
14236 drm_object_attach_property(&cursor->base.base,
14237 dev->mode_config.rotation_property,
14238 state->base.rotation);
14239 }
14240
14241 if (INTEL_INFO(dev)->gen >=9)
14242 state->scaler_id = -1;
14243
14244 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14245
14246 return &cursor->base;
14247 }
14248
14249 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
14250 struct intel_crtc_state *crtc_state)
14251 {
14252 int i;
14253 struct intel_scaler *intel_scaler;
14254 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
14255
14256 for (i = 0; i < intel_crtc->num_scalers; i++) {
14257 intel_scaler = &scaler_state->scalers[i];
14258 intel_scaler->in_use = 0;
14259 intel_scaler->mode = PS_SCALER_MODE_DYN;
14260 }
14261
14262 scaler_state->scaler_id = -1;
14263 }
14264
14265 static void intel_crtc_init(struct drm_device *dev, int pipe)
14266 {
14267 struct drm_i915_private *dev_priv = dev->dev_private;
14268 struct intel_crtc *intel_crtc;
14269 struct intel_crtc_state *crtc_state = NULL;
14270 struct drm_plane *primary = NULL;
14271 struct drm_plane *cursor = NULL;
14272 int i, ret;
14273
14274 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
14275 if (intel_crtc == NULL)
14276 return;
14277
14278 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14279 if (!crtc_state)
14280 goto fail;
14281 intel_crtc->config = crtc_state;
14282 intel_crtc->base.state = &crtc_state->base;
14283 crtc_state->base.crtc = &intel_crtc->base;
14284
14285 /* initialize shared scalers */
14286 if (INTEL_INFO(dev)->gen >= 9) {
14287 if (pipe == PIPE_C)
14288 intel_crtc->num_scalers = 1;
14289 else
14290 intel_crtc->num_scalers = SKL_NUM_SCALERS;
14291
14292 skl_init_scalers(dev, intel_crtc, crtc_state);
14293 }
14294
14295 primary = intel_primary_plane_create(dev, pipe);
14296 if (!primary)
14297 goto fail;
14298
14299 cursor = intel_cursor_plane_create(dev, pipe);
14300 if (!cursor)
14301 goto fail;
14302
14303 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
14304 cursor, &intel_crtc_funcs, NULL);
14305 if (ret)
14306 goto fail;
14307
14308 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
14309 for (i = 0; i < 256; i++) {
14310 intel_crtc->lut_r[i] = i;
14311 intel_crtc->lut_g[i] = i;
14312 intel_crtc->lut_b[i] = i;
14313 }
14314
14315 /*
14316 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
14317 * is hooked to pipe B. Hence we want plane A feeding pipe B.
14318 */
14319 intel_crtc->pipe = pipe;
14320 intel_crtc->plane = pipe;
14321 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
14322 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
14323 intel_crtc->plane = !pipe;
14324 }
14325
14326 intel_crtc->cursor_base = ~0;
14327 intel_crtc->cursor_cntl = ~0;
14328 intel_crtc->cursor_size = ~0;
14329
14330 intel_crtc->wm.cxsr_allowed = true;
14331
14332 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14333 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
14334 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
14335 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
14336
14337 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
14338
14339 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
14340 return;
14341
14342 fail:
14343 if (primary)
14344 drm_plane_cleanup(primary);
14345 if (cursor)
14346 drm_plane_cleanup(cursor);
14347 kfree(crtc_state);
14348 kfree(intel_crtc);
14349 }
14350
14351 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14352 {
14353 struct drm_encoder *encoder = connector->base.encoder;
14354 struct drm_device *dev = connector->base.dev;
14355
14356 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
14357
14358 if (!encoder || WARN_ON(!encoder->crtc))
14359 return INVALID_PIPE;
14360
14361 return to_intel_crtc(encoder->crtc)->pipe;
14362 }
14363
14364 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
14365 struct drm_file *file)
14366 {
14367 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
14368 struct drm_crtc *drmmode_crtc;
14369 struct intel_crtc *crtc;
14370
14371 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
14372
14373 if (!drmmode_crtc) {
14374 DRM_ERROR("no such CRTC id\n");
14375 return -ENOENT;
14376 }
14377
14378 crtc = to_intel_crtc(drmmode_crtc);
14379 pipe_from_crtc_id->pipe = crtc->pipe;
14380
14381 return 0;
14382 }
14383
14384 static int intel_encoder_clones(struct intel_encoder *encoder)
14385 {
14386 struct drm_device *dev = encoder->base.dev;
14387 struct intel_encoder *source_encoder;
14388 int index_mask = 0;
14389 int entry = 0;
14390
14391 for_each_intel_encoder(dev, source_encoder) {
14392 if (encoders_cloneable(encoder, source_encoder))
14393 index_mask |= (1 << entry);
14394
14395 entry++;
14396 }
14397
14398 return index_mask;
14399 }
14400
14401 static bool has_edp_a(struct drm_device *dev)
14402 {
14403 struct drm_i915_private *dev_priv = dev->dev_private;
14404
14405 if (!IS_MOBILE(dev))
14406 return false;
14407
14408 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14409 return false;
14410
14411 if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
14412 return false;
14413
14414 return true;
14415 }
14416
14417 static bool intel_crt_present(struct drm_device *dev)
14418 {
14419 struct drm_i915_private *dev_priv = dev->dev_private;
14420
14421 if (INTEL_INFO(dev)->gen >= 9)
14422 return false;
14423
14424 if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
14425 return false;
14426
14427 if (IS_CHERRYVIEW(dev))
14428 return false;
14429
14430 if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14431 return false;
14432
14433 /* DDI E can't be used if DDI A requires 4 lanes */
14434 if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14435 return false;
14436
14437 if (!dev_priv->vbt.int_crt_support)
14438 return false;
14439
14440 return true;
14441 }
14442
14443 static void intel_setup_outputs(struct drm_device *dev)
14444 {
14445 struct drm_i915_private *dev_priv = dev->dev_private;
14446 struct intel_encoder *encoder;
14447 bool dpd_is_edp = false;
14448
14449 intel_lvds_init(dev);
14450
14451 if (intel_crt_present(dev))
14452 intel_crt_init(dev);
14453
14454 if (IS_BROXTON(dev)) {
14455 /*
14456 * FIXME: Broxton doesn't support port detection via the
14457 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14458 * detect the ports.
14459 */
14460 intel_ddi_init(dev, PORT_A);
14461 intel_ddi_init(dev, PORT_B);
14462 intel_ddi_init(dev, PORT_C);
14463 } else if (HAS_DDI(dev)) {
14464 int found;
14465
14466 /*
14467 * Haswell uses DDI functions to detect digital outputs.
14468 * On SKL pre-D0 the strap isn't connected, so we assume
14469 * it's there.
14470 */
14471 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
14472 /* WaIgnoreDDIAStrap: skl */
14473 if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
14474 intel_ddi_init(dev, PORT_A);
14475
14476 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
14477 * register */
14478 found = I915_READ(SFUSE_STRAP);
14479
14480 if (found & SFUSE_STRAP_DDIB_DETECTED)
14481 intel_ddi_init(dev, PORT_B);
14482 if (found & SFUSE_STRAP_DDIC_DETECTED)
14483 intel_ddi_init(dev, PORT_C);
14484 if (found & SFUSE_STRAP_DDID_DETECTED)
14485 intel_ddi_init(dev, PORT_D);
14486 /*
14487 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14488 */
14489 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
14490 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14491 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14492 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14493 intel_ddi_init(dev, PORT_E);
14494
14495 } else if (HAS_PCH_SPLIT(dev)) {
14496 int found;
14497 dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
14498
14499 if (has_edp_a(dev))
14500 intel_dp_init(dev, DP_A, PORT_A);
14501
14502 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
14503 /* PCH SDVOB multiplex with HDMIB */
14504 found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
14505 if (!found)
14506 intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
14507 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
14508 intel_dp_init(dev, PCH_DP_B, PORT_B);
14509 }
14510
14511 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
14512 intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
14513
14514 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
14515 intel_hdmi_init(dev, PCH_HDMID, PORT_D);
14516
14517 if (I915_READ(PCH_DP_C) & DP_DETECTED)
14518 intel_dp_init(dev, PCH_DP_C, PORT_C);
14519
14520 if (I915_READ(PCH_DP_D) & DP_DETECTED)
14521 intel_dp_init(dev, PCH_DP_D, PORT_D);
14522 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
14523 /*
14524 * The DP_DETECTED bit is the latched state of the DDC
14525 * SDA pin at boot. However since eDP doesn't require DDC
14526 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14527 * eDP ports may have been muxed to an alternate function.
14528 * Thus we can't rely on the DP_DETECTED bit alone to detect
14529 * eDP ports. Consult the VBT as well as DP_DETECTED to
14530 * detect eDP ports.
14531 */
14532 if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
14533 !intel_dp_is_edp(dev, PORT_B))
14534 intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
14535 if (I915_READ(VLV_DP_B) & DP_DETECTED ||
14536 intel_dp_is_edp(dev, PORT_B))
14537 intel_dp_init(dev, VLV_DP_B, PORT_B);
14538
14539 if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
14540 !intel_dp_is_edp(dev, PORT_C))
14541 intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
14542 if (I915_READ(VLV_DP_C) & DP_DETECTED ||
14543 intel_dp_is_edp(dev, PORT_C))
14544 intel_dp_init(dev, VLV_DP_C, PORT_C);
14545
14546 if (IS_CHERRYVIEW(dev)) {
14547 /* eDP not supported on port D, so don't check VBT */
14548 if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
14549 intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14550 if (I915_READ(CHV_DP_D) & DP_DETECTED)
14551 intel_dp_init(dev, CHV_DP_D, PORT_D);
14552 }
14553
14554 intel_dsi_init(dev);
14555 } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
14556 bool found = false;
14557
14558 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14559 DRM_DEBUG_KMS("probing SDVOB\n");
14560 found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
14561 if (!found && IS_G4X(dev)) {
14562 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14563 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
14564 }
14565
14566 if (!found && IS_G4X(dev))
14567 intel_dp_init(dev, DP_B, PORT_B);
14568 }
14569
14570 /* Before G4X SDVOC doesn't have its own detect register */
14571
14572 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14573 DRM_DEBUG_KMS("probing SDVOC\n");
14574 found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
14575 }
14576
14577 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14578
14579 if (IS_G4X(dev)) {
14580 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14581 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
14582 }
14583 if (IS_G4X(dev))
14584 intel_dp_init(dev, DP_C, PORT_C);
14585 }
14586
14587 if (IS_G4X(dev) &&
14588 (I915_READ(DP_D) & DP_DETECTED))
14589 intel_dp_init(dev, DP_D, PORT_D);
14590 } else if (IS_GEN2(dev))
14591 intel_dvo_init(dev);
14592
14593 if (SUPPORTS_TV(dev))
14594 intel_tv_init(dev);
14595
14596 intel_psr_init(dev);
14597
14598 for_each_intel_encoder(dev, encoder) {
14599 encoder->base.possible_crtcs = encoder->crtc_mask;
14600 encoder->base.possible_clones =
14601 intel_encoder_clones(encoder);
14602 }
14603
14604 intel_init_pch_refclk(dev);
14605
14606 drm_helper_move_panel_connectors_to_head(dev);
14607 }
14608
14609 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14610 {
14611 struct drm_device *dev = fb->dev;
14612 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14613
14614 drm_framebuffer_cleanup(fb);
14615 mutex_lock(&dev->struct_mutex);
14616 WARN_ON(!intel_fb->obj->framebuffer_references--);
14617 drm_gem_object_unreference(&intel_fb->obj->base);
14618 mutex_unlock(&dev->struct_mutex);
14619 kfree(intel_fb);
14620 }
14621
14622 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14623 struct drm_file *file,
14624 unsigned int *handle)
14625 {
14626 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14627 struct drm_i915_gem_object *obj = intel_fb->obj;
14628
14629 if (obj->userptr.mm) {
14630 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14631 return -EINVAL;
14632 }
14633
14634 return drm_gem_handle_create(file, &obj->base, handle);
14635 }
14636
14637 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14638 struct drm_file *file,
14639 unsigned flags, unsigned color,
14640 struct drm_clip_rect *clips,
14641 unsigned num_clips)
14642 {
14643 struct drm_device *dev = fb->dev;
14644 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14645 struct drm_i915_gem_object *obj = intel_fb->obj;
14646
14647 mutex_lock(&dev->struct_mutex);
14648 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
14649 mutex_unlock(&dev->struct_mutex);
14650
14651 return 0;
14652 }
14653
14654 static const struct drm_framebuffer_funcs intel_fb_funcs = {
14655 .destroy = intel_user_framebuffer_destroy,
14656 .create_handle = intel_user_framebuffer_create_handle,
14657 .dirty = intel_user_framebuffer_dirty,
14658 };
14659
14660 static
14661 u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
14662 uint32_t pixel_format)
14663 {
14664 u32 gen = INTEL_INFO(dev)->gen;
14665
14666 if (gen >= 9) {
14667 /* "The stride in bytes must not exceed the of the size of 8K
14668 * pixels and 32K bytes."
14669 */
14670 return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
14671 } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
14672 return 32*1024;
14673 } else if (gen >= 4) {
14674 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14675 return 16*1024;
14676 else
14677 return 32*1024;
14678 } else if (gen >= 3) {
14679 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14680 return 8*1024;
14681 else
14682 return 16*1024;
14683 } else {
14684 /* XXX DSPC is limited to 4k tiled */
14685 return 8*1024;
14686 }
14687 }
14688
14689 static int intel_framebuffer_init(struct drm_device *dev,
14690 struct intel_framebuffer *intel_fb,
14691 struct drm_mode_fb_cmd2 *mode_cmd,
14692 struct drm_i915_gem_object *obj)
14693 {
14694 unsigned int aligned_height;
14695 int ret;
14696 u32 pitch_limit, stride_alignment;
14697
14698 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
14699
14700 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14701 /* Enforce that fb modifier and tiling mode match, but only for
14702 * X-tiled. This is needed for FBC. */
14703 if (!!(obj->tiling_mode == I915_TILING_X) !=
14704 !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
14705 DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
14706 return -EINVAL;
14707 }
14708 } else {
14709 if (obj->tiling_mode == I915_TILING_X)
14710 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14711 else if (obj->tiling_mode == I915_TILING_Y) {
14712 DRM_DEBUG("No Y tiling for legacy addfb\n");
14713 return -EINVAL;
14714 }
14715 }
14716
14717 /* Passed in modifier sanity checking. */
14718 switch (mode_cmd->modifier[0]) {
14719 case I915_FORMAT_MOD_Y_TILED:
14720 case I915_FORMAT_MOD_Yf_TILED:
14721 if (INTEL_INFO(dev)->gen < 9) {
14722 DRM_DEBUG("Unsupported tiling 0x%llx!\n",
14723 mode_cmd->modifier[0]);
14724 return -EINVAL;
14725 }
14726 case DRM_FORMAT_MOD_NONE:
14727 case I915_FORMAT_MOD_X_TILED:
14728 break;
14729 default:
14730 DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
14731 mode_cmd->modifier[0]);
14732 return -EINVAL;
14733 }
14734
14735 stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
14736 mode_cmd->pixel_format);
14737 if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
14738 DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
14739 mode_cmd->pitches[0], stride_alignment);
14740 return -EINVAL;
14741 }
14742
14743 pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
14744 mode_cmd->pixel_format);
14745 if (mode_cmd->pitches[0] > pitch_limit) {
14746 DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
14747 mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
14748 "tiled" : "linear",
14749 mode_cmd->pitches[0], pitch_limit);
14750 return -EINVAL;
14751 }
14752
14753 if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
14754 mode_cmd->pitches[0] != obj->stride) {
14755 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
14756 mode_cmd->pitches[0], obj->stride);
14757 return -EINVAL;
14758 }
14759
14760 /* Reject formats not supported by any plane early. */
14761 switch (mode_cmd->pixel_format) {
14762 case DRM_FORMAT_C8:
14763 case DRM_FORMAT_RGB565:
14764 case DRM_FORMAT_XRGB8888:
14765 case DRM_FORMAT_ARGB8888:
14766 break;
14767 case DRM_FORMAT_XRGB1555:
14768 if (INTEL_INFO(dev)->gen > 3) {
14769 DRM_DEBUG("unsupported pixel format: %s\n",
14770 drm_get_format_name(mode_cmd->pixel_format));
14771 return -EINVAL;
14772 }
14773 break;
14774 case DRM_FORMAT_ABGR8888:
14775 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
14776 INTEL_INFO(dev)->gen < 9) {
14777 DRM_DEBUG("unsupported pixel format: %s\n",
14778 drm_get_format_name(mode_cmd->pixel_format));
14779 return -EINVAL;
14780 }
14781 break;
14782 case DRM_FORMAT_XBGR8888:
14783 case DRM_FORMAT_XRGB2101010:
14784 case DRM_FORMAT_XBGR2101010:
14785 if (INTEL_INFO(dev)->gen < 4) {
14786 DRM_DEBUG("unsupported pixel format: %s\n",
14787 drm_get_format_name(mode_cmd->pixel_format));
14788 return -EINVAL;
14789 }
14790 break;
14791 case DRM_FORMAT_ABGR2101010:
14792 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
14793 DRM_DEBUG("unsupported pixel format: %s\n",
14794 drm_get_format_name(mode_cmd->pixel_format));
14795 return -EINVAL;
14796 }
14797 break;
14798 case DRM_FORMAT_YUYV:
14799 case DRM_FORMAT_UYVY:
14800 case DRM_FORMAT_YVYU:
14801 case DRM_FORMAT_VYUY:
14802 if (INTEL_INFO(dev)->gen < 5) {
14803 DRM_DEBUG("unsupported pixel format: %s\n",
14804 drm_get_format_name(mode_cmd->pixel_format));
14805 return -EINVAL;
14806 }
14807 break;
14808 default:
14809 DRM_DEBUG("unsupported pixel format: %s\n",
14810 drm_get_format_name(mode_cmd->pixel_format));
14811 return -EINVAL;
14812 }
14813
14814 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14815 if (mode_cmd->offsets[0] != 0)
14816 return -EINVAL;
14817
14818 aligned_height = intel_fb_align_height(dev, mode_cmd->height,
14819 mode_cmd->pixel_format,
14820 mode_cmd->modifier[0]);
14821 /* FIXME drm helper for size checks (especially planar formats)? */
14822 if (obj->base.size < aligned_height * mode_cmd->pitches[0])
14823 return -EINVAL;
14824
14825 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
14826 intel_fb->obj = obj;
14827 intel_fb->obj->framebuffer_references++;
14828
14829 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
14830 if (ret) {
14831 DRM_ERROR("framebuffer init failed %d\n", ret);
14832 return ret;
14833 }
14834
14835 return 0;
14836 }
14837
14838 static struct drm_framebuffer *
14839 intel_user_framebuffer_create(struct drm_device *dev,
14840 struct drm_file *filp,
14841 const struct drm_mode_fb_cmd2 *user_mode_cmd)
14842 {
14843 struct drm_framebuffer *fb;
14844 struct drm_i915_gem_object *obj;
14845 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14846
14847 obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
14848 mode_cmd.handles[0]));
14849 if (&obj->base == NULL)
14850 return ERR_PTR(-ENOENT);
14851
14852 fb = intel_framebuffer_create(dev, &mode_cmd, obj);
14853 if (IS_ERR(fb))
14854 drm_gem_object_unreference_unlocked(&obj->base);
14855
14856 return fb;
14857 }
14858
14859 #ifndef CONFIG_DRM_FBDEV_EMULATION
14860 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
14861 {
14862 }
14863 #endif
14864
14865 static const struct drm_mode_config_funcs intel_mode_funcs = {
14866 .fb_create = intel_user_framebuffer_create,
14867 .output_poll_changed = intel_fbdev_output_poll_changed,
14868 .atomic_check = intel_atomic_check,
14869 .atomic_commit = intel_atomic_commit,
14870 .atomic_state_alloc = intel_atomic_state_alloc,
14871 .atomic_state_clear = intel_atomic_state_clear,
14872 };
14873
14874 /* Set up chip specific display functions */
14875 static void intel_init_display(struct drm_device *dev)
14876 {
14877 struct drm_i915_private *dev_priv = dev->dev_private;
14878
14879 if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
14880 dev_priv->display.find_dpll = g4x_find_best_dpll;
14881 else if (IS_CHERRYVIEW(dev))
14882 dev_priv->display.find_dpll = chv_find_best_dpll;
14883 else if (IS_VALLEYVIEW(dev))
14884 dev_priv->display.find_dpll = vlv_find_best_dpll;
14885 else if (IS_PINEVIEW(dev))
14886 dev_priv->display.find_dpll = pnv_find_best_dpll;
14887 else
14888 dev_priv->display.find_dpll = i9xx_find_best_dpll;
14889
14890 if (INTEL_INFO(dev)->gen >= 9) {
14891 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14892 dev_priv->display.get_initial_plane_config =
14893 skylake_get_initial_plane_config;
14894 dev_priv->display.crtc_compute_clock =
14895 haswell_crtc_compute_clock;
14896 dev_priv->display.crtc_enable = haswell_crtc_enable;
14897 dev_priv->display.crtc_disable = haswell_crtc_disable;
14898 dev_priv->display.update_primary_plane =
14899 skylake_update_primary_plane;
14900 } else if (HAS_DDI(dev)) {
14901 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14902 dev_priv->display.get_initial_plane_config =
14903 ironlake_get_initial_plane_config;
14904 dev_priv->display.crtc_compute_clock =
14905 haswell_crtc_compute_clock;
14906 dev_priv->display.crtc_enable = haswell_crtc_enable;
14907 dev_priv->display.crtc_disable = haswell_crtc_disable;
14908 dev_priv->display.update_primary_plane =
14909 ironlake_update_primary_plane;
14910 } else if (HAS_PCH_SPLIT(dev)) {
14911 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
14912 dev_priv->display.get_initial_plane_config =
14913 ironlake_get_initial_plane_config;
14914 dev_priv->display.crtc_compute_clock =
14915 ironlake_crtc_compute_clock;
14916 dev_priv->display.crtc_enable = ironlake_crtc_enable;
14917 dev_priv->display.crtc_disable = ironlake_crtc_disable;
14918 dev_priv->display.update_primary_plane =
14919 ironlake_update_primary_plane;
14920 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
14921 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14922 dev_priv->display.get_initial_plane_config =
14923 i9xx_get_initial_plane_config;
14924 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14925 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14926 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14927 dev_priv->display.update_primary_plane =
14928 i9xx_update_primary_plane;
14929 } else {
14930 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14931 dev_priv->display.get_initial_plane_config =
14932 i9xx_get_initial_plane_config;
14933 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14934 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14935 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14936 dev_priv->display.update_primary_plane =
14937 i9xx_update_primary_plane;
14938 }
14939
14940 /* Returns the core display clock speed */
14941 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
14942 dev_priv->display.get_display_clock_speed =
14943 skylake_get_display_clock_speed;
14944 else if (IS_BROXTON(dev))
14945 dev_priv->display.get_display_clock_speed =
14946 broxton_get_display_clock_speed;
14947 else if (IS_BROADWELL(dev))
14948 dev_priv->display.get_display_clock_speed =
14949 broadwell_get_display_clock_speed;
14950 else if (IS_HASWELL(dev))
14951 dev_priv->display.get_display_clock_speed =
14952 haswell_get_display_clock_speed;
14953 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
14954 dev_priv->display.get_display_clock_speed =
14955 valleyview_get_display_clock_speed;
14956 else if (IS_GEN5(dev))
14957 dev_priv->display.get_display_clock_speed =
14958 ilk_get_display_clock_speed;
14959 else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
14960 IS_GEN6(dev) || IS_IVYBRIDGE(dev))
14961 dev_priv->display.get_display_clock_speed =
14962 i945_get_display_clock_speed;
14963 else if (IS_GM45(dev))
14964 dev_priv->display.get_display_clock_speed =
14965 gm45_get_display_clock_speed;
14966 else if (IS_CRESTLINE(dev))
14967 dev_priv->display.get_display_clock_speed =
14968 i965gm_get_display_clock_speed;
14969 else if (IS_PINEVIEW(dev))
14970 dev_priv->display.get_display_clock_speed =
14971 pnv_get_display_clock_speed;
14972 else if (IS_G33(dev) || IS_G4X(dev))
14973 dev_priv->display.get_display_clock_speed =
14974 g33_get_display_clock_speed;
14975 else if (IS_I915G(dev))
14976 dev_priv->display.get_display_clock_speed =
14977 i915_get_display_clock_speed;
14978 else if (IS_I945GM(dev) || IS_845G(dev))
14979 dev_priv->display.get_display_clock_speed =
14980 i9xx_misc_get_display_clock_speed;
14981 else if (IS_I915GM(dev))
14982 dev_priv->display.get_display_clock_speed =
14983 i915gm_get_display_clock_speed;
14984 else if (IS_I865G(dev))
14985 dev_priv->display.get_display_clock_speed =
14986 i865_get_display_clock_speed;
14987 else if (IS_I85X(dev))
14988 dev_priv->display.get_display_clock_speed =
14989 i85x_get_display_clock_speed;
14990 else { /* 830 */
14991 WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
14992 dev_priv->display.get_display_clock_speed =
14993 i830_get_display_clock_speed;
14994 }
14995
14996 if (IS_GEN5(dev)) {
14997 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
14998 } else if (IS_GEN6(dev)) {
14999 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
15000 } else if (IS_IVYBRIDGE(dev)) {
15001 /* FIXME: detect B0+ stepping and use auto training */
15002 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15003 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
15004 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15005 if (IS_BROADWELL(dev)) {
15006 dev_priv->display.modeset_commit_cdclk =
15007 broadwell_modeset_commit_cdclk;
15008 dev_priv->display.modeset_calc_cdclk =
15009 broadwell_modeset_calc_cdclk;
15010 }
15011 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
15012 dev_priv->display.modeset_commit_cdclk =
15013 valleyview_modeset_commit_cdclk;
15014 dev_priv->display.modeset_calc_cdclk =
15015 valleyview_modeset_calc_cdclk;
15016 } else if (IS_BROXTON(dev)) {
15017 dev_priv->display.modeset_commit_cdclk =
15018 broxton_modeset_commit_cdclk;
15019 dev_priv->display.modeset_calc_cdclk =
15020 broxton_modeset_calc_cdclk;
15021 }
15022
15023 switch (INTEL_INFO(dev)->gen) {
15024 case 2:
15025 dev_priv->display.queue_flip = intel_gen2_queue_flip;
15026 break;
15027
15028 case 3:
15029 dev_priv->display.queue_flip = intel_gen3_queue_flip;
15030 break;
15031
15032 case 4:
15033 case 5:
15034 dev_priv->display.queue_flip = intel_gen4_queue_flip;
15035 break;
15036
15037 case 6:
15038 dev_priv->display.queue_flip = intel_gen6_queue_flip;
15039 break;
15040 case 7:
15041 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
15042 dev_priv->display.queue_flip = intel_gen7_queue_flip;
15043 break;
15044 case 9:
15045 /* Drop through - unsupported since execlist only. */
15046 default:
15047 /* Default just returns -ENODEV to indicate unsupported */
15048 dev_priv->display.queue_flip = intel_default_queue_flip;
15049 }
15050
15051 mutex_init(&dev_priv->pps_mutex);
15052 }
15053
15054 /*
15055 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
15056 * resume, or other times. This quirk makes sure that's the case for
15057 * affected systems.
15058 */
15059 static void quirk_pipea_force(struct drm_device *dev)
15060 {
15061 struct drm_i915_private *dev_priv = dev->dev_private;
15062
15063 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
15064 DRM_INFO("applying pipe a force quirk\n");
15065 }
15066
15067 static void quirk_pipeb_force(struct drm_device *dev)
15068 {
15069 struct drm_i915_private *dev_priv = dev->dev_private;
15070
15071 dev_priv->quirks |= QUIRK_PIPEB_FORCE;
15072 DRM_INFO("applying pipe b force quirk\n");
15073 }
15074
15075 /*
15076 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
15077 */
15078 static void quirk_ssc_force_disable(struct drm_device *dev)
15079 {
15080 struct drm_i915_private *dev_priv = dev->dev_private;
15081 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
15082 DRM_INFO("applying lvds SSC disable quirk\n");
15083 }
15084
15085 /*
15086 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
15087 * brightness value
15088 */
15089 static void quirk_invert_brightness(struct drm_device *dev)
15090 {
15091 struct drm_i915_private *dev_priv = dev->dev_private;
15092 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
15093 DRM_INFO("applying inverted panel brightness quirk\n");
15094 }
15095
15096 /* Some VBT's incorrectly indicate no backlight is present */
15097 static void quirk_backlight_present(struct drm_device *dev)
15098 {
15099 struct drm_i915_private *dev_priv = dev->dev_private;
15100 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
15101 DRM_INFO("applying backlight present quirk\n");
15102 }
15103
15104 struct intel_quirk {
15105 int device;
15106 int subsystem_vendor;
15107 int subsystem_device;
15108 void (*hook)(struct drm_device *dev);
15109 };
15110
15111 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
15112 struct intel_dmi_quirk {
15113 void (*hook)(struct drm_device *dev);
15114 const struct dmi_system_id (*dmi_id_list)[];
15115 };
15116
15117 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
15118 {
15119 DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
15120 return 1;
15121 }
15122
15123 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
15124 {
15125 .dmi_id_list = &(const struct dmi_system_id[]) {
15126 {
15127 .callback = intel_dmi_reverse_brightness,
15128 .ident = "NCR Corporation",
15129 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
15130 DMI_MATCH(DMI_PRODUCT_NAME, ""),
15131 },
15132 },
15133 { } /* terminating entry */
15134 },
15135 .hook = quirk_invert_brightness,
15136 },
15137 };
15138
15139 static struct intel_quirk intel_quirks[] = {
15140 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
15141 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
15142
15143 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
15144 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
15145
15146 /* 830 needs to leave pipe A & dpll A up */
15147 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
15148
15149 /* 830 needs to leave pipe B & dpll B up */
15150 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
15151
15152 /* Lenovo U160 cannot use SSC on LVDS */
15153 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
15154
15155 /* Sony Vaio Y cannot use SSC on LVDS */
15156 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
15157
15158 /* Acer Aspire 5734Z must invert backlight brightness */
15159 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
15160
15161 /* Acer/eMachines G725 */
15162 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
15163
15164 /* Acer/eMachines e725 */
15165 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
15166
15167 /* Acer/Packard Bell NCL20 */
15168 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
15169
15170 /* Acer Aspire 4736Z */
15171 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
15172
15173 /* Acer Aspire 5336 */
15174 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
15175
15176 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
15177 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
15178
15179 /* Acer C720 Chromebook (Core i3 4005U) */
15180 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
15181
15182 /* Apple Macbook 2,1 (Core 2 T7400) */
15183 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
15184
15185 /* Apple Macbook 4,1 */
15186 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
15187
15188 /* Toshiba CB35 Chromebook (Celeron 2955U) */
15189 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
15190
15191 /* HP Chromebook 14 (Celeron 2955U) */
15192 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
15193
15194 /* Dell Chromebook 11 */
15195 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
15196
15197 /* Dell Chromebook 11 (2015 version) */
15198 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
15199 };
15200
15201 static void intel_init_quirks(struct drm_device *dev)
15202 {
15203 struct pci_dev *d = dev->pdev;
15204 int i;
15205
15206 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
15207 struct intel_quirk *q = &intel_quirks[i];
15208
15209 if (d->device == q->device &&
15210 (d->subsystem_vendor == q->subsystem_vendor ||
15211 q->subsystem_vendor == PCI_ANY_ID) &&
15212 (d->subsystem_device == q->subsystem_device ||
15213 q->subsystem_device == PCI_ANY_ID))
15214 q->hook(dev);
15215 }
15216 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15217 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15218 intel_dmi_quirks[i].hook(dev);
15219 }
15220 }
15221
15222 /* Disable the VGA plane that we never use */
15223 static void i915_disable_vga(struct drm_device *dev)
15224 {
15225 struct drm_i915_private *dev_priv = dev->dev_private;
15226 u8 sr1;
15227 i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15228
15229 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
15230 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
15231 outb(SR01, VGA_SR_INDEX);
15232 sr1 = inb(VGA_SR_DATA);
15233 outb(sr1 | 1<<5, VGA_SR_DATA);
15234 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
15235 udelay(300);
15236
15237 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
15238 POSTING_READ(vga_reg);
15239 }
15240
15241 void intel_modeset_init_hw(struct drm_device *dev)
15242 {
15243 intel_update_cdclk(dev);
15244 intel_prepare_ddi(dev);
15245 intel_init_clock_gating(dev);
15246 intel_enable_gt_powersave(dev);
15247 }
15248
15249 void intel_modeset_init(struct drm_device *dev)
15250 {
15251 struct drm_i915_private *dev_priv = dev->dev_private;
15252 int sprite, ret;
15253 enum pipe pipe;
15254 struct intel_crtc *crtc;
15255
15256 drm_mode_config_init(dev);
15257
15258 dev->mode_config.min_width = 0;
15259 dev->mode_config.min_height = 0;
15260
15261 dev->mode_config.preferred_depth = 24;
15262 dev->mode_config.prefer_shadow = 1;
15263
15264 dev->mode_config.allow_fb_modifiers = true;
15265
15266 dev->mode_config.funcs = &intel_mode_funcs;
15267
15268 intel_init_quirks(dev);
15269
15270 intel_init_pm(dev);
15271
15272 if (INTEL_INFO(dev)->num_pipes == 0)
15273 return;
15274
15275 /*
15276 * There may be no VBT; and if the BIOS enabled SSC we can
15277 * just keep using it to avoid unnecessary flicker. Whereas if the
15278 * BIOS isn't using it, don't assume it will work even if the VBT
15279 * indicates as much.
15280 */
15281 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
15282 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15283 DREF_SSC1_ENABLE);
15284
15285 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15286 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15287 bios_lvds_use_ssc ? "en" : "dis",
15288 dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15289 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15290 }
15291 }
15292
15293 intel_init_display(dev);
15294 intel_init_audio(dev);
15295
15296 if (IS_GEN2(dev)) {
15297 dev->mode_config.max_width = 2048;
15298 dev->mode_config.max_height = 2048;
15299 } else if (IS_GEN3(dev)) {
15300 dev->mode_config.max_width = 4096;
15301 dev->mode_config.max_height = 4096;
15302 } else {
15303 dev->mode_config.max_width = 8192;
15304 dev->mode_config.max_height = 8192;
15305 }
15306
15307 if (IS_845G(dev) || IS_I865G(dev)) {
15308 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
15309 dev->mode_config.cursor_height = 1023;
15310 } else if (IS_GEN2(dev)) {
15311 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
15312 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
15313 } else {
15314 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
15315 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
15316 }
15317
15318 dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
15319
15320 DRM_DEBUG_KMS("%d display pipe%s available.\n",
15321 INTEL_INFO(dev)->num_pipes,
15322 INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
15323
15324 for_each_pipe(dev_priv, pipe) {
15325 intel_crtc_init(dev, pipe);
15326 for_each_sprite(dev_priv, pipe, sprite) {
15327 ret = intel_plane_init(dev, pipe, sprite);
15328 if (ret)
15329 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
15330 pipe_name(pipe), sprite_name(pipe, sprite), ret);
15331 }
15332 }
15333
15334 intel_update_czclk(dev_priv);
15335 intel_update_cdclk(dev);
15336
15337 intel_shared_dpll_init(dev);
15338
15339 /* Just disable it once at startup */
15340 i915_disable_vga(dev);
15341 intel_setup_outputs(dev);
15342
15343 drm_modeset_lock_all(dev);
15344 intel_modeset_setup_hw_state(dev);
15345 drm_modeset_unlock_all(dev);
15346
15347 for_each_intel_crtc(dev, crtc) {
15348 struct intel_initial_plane_config plane_config = {};
15349
15350 if (!crtc->active)
15351 continue;
15352
15353 /*
15354 * Note that reserving the BIOS fb up front prevents us
15355 * from stuffing other stolen allocations like the ring
15356 * on top. This prevents some ugliness at boot time, and
15357 * can even allow for smooth boot transitions if the BIOS
15358 * fb is large enough for the active pipe configuration.
15359 */
15360 dev_priv->display.get_initial_plane_config(crtc,
15361 &plane_config);
15362
15363 /*
15364 * If the fb is shared between multiple heads, we'll
15365 * just get the first one.
15366 */
15367 intel_find_initial_plane_obj(crtc, &plane_config);
15368 }
15369 }
15370
15371 static void intel_enable_pipe_a(struct drm_device *dev)
15372 {
15373 struct intel_connector *connector;
15374 struct drm_connector *crt = NULL;
15375 struct intel_load_detect_pipe load_detect_temp;
15376 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
15377
15378 /* We can't just switch on the pipe A, we need to set things up with a
15379 * proper mode and output configuration. As a gross hack, enable pipe A
15380 * by enabling the load detect pipe once. */
15381 for_each_intel_connector(dev, connector) {
15382 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
15383 crt = &connector->base;
15384 break;
15385 }
15386 }
15387
15388 if (!crt)
15389 return;
15390
15391 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
15392 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
15393 }
15394
15395 static bool
15396 intel_check_plane_mapping(struct intel_crtc *crtc)
15397 {
15398 struct drm_device *dev = crtc->base.dev;
15399 struct drm_i915_private *dev_priv = dev->dev_private;
15400 u32 val;
15401
15402 if (INTEL_INFO(dev)->num_pipes == 1)
15403 return true;
15404
15405 val = I915_READ(DSPCNTR(!crtc->plane));
15406
15407 if ((val & DISPLAY_PLANE_ENABLE) &&
15408 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
15409 return false;
15410
15411 return true;
15412 }
15413
15414 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15415 {
15416 struct drm_device *dev = crtc->base.dev;
15417 struct intel_encoder *encoder;
15418
15419 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15420 return true;
15421
15422 return false;
15423 }
15424
15425 static void intel_sanitize_crtc(struct intel_crtc *crtc)
15426 {
15427 struct drm_device *dev = crtc->base.dev;
15428 struct drm_i915_private *dev_priv = dev->dev_private;
15429 i915_reg_t reg = PIPECONF(crtc->config->cpu_transcoder);
15430
15431 /* Clear any frame start delays used for debugging left by the BIOS */
15432 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15433
15434 /* restore vblank interrupts to correct state */
15435 drm_crtc_vblank_reset(&crtc->base);
15436 if (crtc->active) {
15437 struct intel_plane *plane;
15438
15439 drm_crtc_vblank_on(&crtc->base);
15440
15441 /* Disable everything but the primary plane */
15442 for_each_intel_plane_on_crtc(dev, crtc, plane) {
15443 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15444 continue;
15445
15446 plane->disable_plane(&plane->base, &crtc->base);
15447 }
15448 }
15449
15450 /* We need to sanitize the plane -> pipe mapping first because this will
15451 * disable the crtc (and hence change the state) if it is wrong. Note
15452 * that gen4+ has a fixed plane -> pipe mapping. */
15453 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
15454 bool plane;
15455
15456 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
15457 crtc->base.base.id);
15458
15459 /* Pipe has the wrong plane attached and the plane is active.
15460 * Temporarily change the plane mapping and disable everything
15461 * ... */
15462 plane = crtc->plane;
15463 to_intel_plane_state(crtc->base.primary->state)->visible = true;
15464 crtc->plane = !plane;
15465 intel_crtc_disable_noatomic(&crtc->base);
15466 crtc->plane = plane;
15467 }
15468
15469 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15470 crtc->pipe == PIPE_A && !crtc->active) {
15471 /* BIOS forgot to enable pipe A, this mostly happens after
15472 * resume. Force-enable the pipe to fix this, the update_dpms
15473 * call below we restore the pipe to the right state, but leave
15474 * the required bits on. */
15475 intel_enable_pipe_a(dev);
15476 }
15477
15478 /* Adjust the state of the output pipe according to whether we
15479 * have active connectors/encoders. */
15480 if (!intel_crtc_has_encoders(crtc))
15481 intel_crtc_disable_noatomic(&crtc->base);
15482
15483 if (crtc->active != crtc->base.state->active) {
15484 struct intel_encoder *encoder;
15485
15486 /* This can happen either due to bugs in the get_hw_state
15487 * functions or because of calls to intel_crtc_disable_noatomic,
15488 * or because the pipe is force-enabled due to the
15489 * pipe A quirk. */
15490 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
15491 crtc->base.base.id,
15492 crtc->base.state->enable ? "enabled" : "disabled",
15493 crtc->active ? "enabled" : "disabled");
15494
15495 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
15496 crtc->base.state->active = crtc->active;
15497 crtc->base.enabled = crtc->active;
15498 crtc->base.state->connector_mask = 0;
15499
15500 /* Because we only establish the connector -> encoder ->
15501 * crtc links if something is active, this means the
15502 * crtc is now deactivated. Break the links. connector
15503 * -> encoder links are only establish when things are
15504 * actually up, hence no need to break them. */
15505 WARN_ON(crtc->active);
15506
15507 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15508 encoder->base.crtc = NULL;
15509 }
15510
15511 if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
15512 /*
15513 * We start out with underrun reporting disabled to avoid races.
15514 * For correct bookkeeping mark this on active crtcs.
15515 *
15516 * Also on gmch platforms we dont have any hardware bits to
15517 * disable the underrun reporting. Which means we need to start
15518 * out with underrun reporting disabled also on inactive pipes,
15519 * since otherwise we'll complain about the garbage we read when
15520 * e.g. coming up after runtime pm.
15521 *
15522 * No protection against concurrent access is required - at
15523 * worst a fifo underrun happens which also sets this to false.
15524 */
15525 crtc->cpu_fifo_underrun_disabled = true;
15526 crtc->pch_fifo_underrun_disabled = true;
15527 }
15528 }
15529
15530 static void intel_sanitize_encoder(struct intel_encoder *encoder)
15531 {
15532 struct intel_connector *connector;
15533 struct drm_device *dev = encoder->base.dev;
15534 bool active = false;
15535
15536 /* We need to check both for a crtc link (meaning that the
15537 * encoder is active and trying to read from a pipe) and the
15538 * pipe itself being active. */
15539 bool has_active_crtc = encoder->base.crtc &&
15540 to_intel_crtc(encoder->base.crtc)->active;
15541
15542 for_each_intel_connector(dev, connector) {
15543 if (connector->base.encoder != &encoder->base)
15544 continue;
15545
15546 active = true;
15547 break;
15548 }
15549
15550 if (active && !has_active_crtc) {
15551 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15552 encoder->base.base.id,
15553 encoder->base.name);
15554
15555 /* Connector is active, but has no active pipe. This is
15556 * fallout from our resume register restoring. Disable
15557 * the encoder manually again. */
15558 if (encoder->base.crtc) {
15559 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15560 encoder->base.base.id,
15561 encoder->base.name);
15562 encoder->disable(encoder);
15563 if (encoder->post_disable)
15564 encoder->post_disable(encoder);
15565 }
15566 encoder->base.crtc = NULL;
15567
15568 /* Inconsistent output/port/pipe state happens presumably due to
15569 * a bug in one of the get_hw_state functions. Or someplace else
15570 * in our code, like the register restore mess on resume. Clamp
15571 * things to off as a safer default. */
15572 for_each_intel_connector(dev, connector) {
15573 if (connector->encoder != encoder)
15574 continue;
15575 connector->base.dpms = DRM_MODE_DPMS_OFF;
15576 connector->base.encoder = NULL;
15577 }
15578 }
15579 /* Enabled encoders without active connectors will be fixed in
15580 * the crtc fixup. */
15581 }
15582
15583 void i915_redisable_vga_power_on(struct drm_device *dev)
15584 {
15585 struct drm_i915_private *dev_priv = dev->dev_private;
15586 i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15587
15588 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15589 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15590 i915_disable_vga(dev);
15591 }
15592 }
15593
15594 void i915_redisable_vga(struct drm_device *dev)
15595 {
15596 struct drm_i915_private *dev_priv = dev->dev_private;
15597
15598 /* This function can be called both from intel_modeset_setup_hw_state or
15599 * at a very early point in our resume sequence, where the power well
15600 * structures are not yet restored. Since this function is at a very
15601 * paranoid "someone might have enabled VGA while we were not looking"
15602 * level, just check if the power well is enabled instead of trying to
15603 * follow the "don't touch the power well if we don't need it" policy
15604 * the rest of the driver uses. */
15605 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
15606 return;
15607
15608 i915_redisable_vga_power_on(dev);
15609
15610 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
15611 }
15612
15613 static bool primary_get_hw_state(struct intel_plane *plane)
15614 {
15615 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15616
15617 return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
15618 }
15619
15620 /* FIXME read out full plane state for all planes */
15621 static void readout_plane_state(struct intel_crtc *crtc)
15622 {
15623 struct drm_plane *primary = crtc->base.primary;
15624 struct intel_plane_state *plane_state =
15625 to_intel_plane_state(primary->state);
15626
15627 plane_state->visible = crtc->active &&
15628 primary_get_hw_state(to_intel_plane(primary));
15629
15630 if (plane_state->visible)
15631 crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
15632 }
15633
15634 static void intel_modeset_readout_hw_state(struct drm_device *dev)
15635 {
15636 struct drm_i915_private *dev_priv = dev->dev_private;
15637 enum pipe pipe;
15638 struct intel_crtc *crtc;
15639 struct intel_encoder *encoder;
15640 struct intel_connector *connector;
15641 int i;
15642
15643 for_each_intel_crtc(dev, crtc) {
15644 __drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
15645 memset(crtc->config, 0, sizeof(*crtc->config));
15646 crtc->config->base.crtc = &crtc->base;
15647
15648 crtc->active = dev_priv->display.get_pipe_config(crtc,
15649 crtc->config);
15650
15651 crtc->base.state->active = crtc->active;
15652 crtc->base.enabled = crtc->active;
15653
15654 readout_plane_state(crtc);
15655
15656 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
15657 crtc->base.base.id,
15658 crtc->active ? "enabled" : "disabled");
15659 }
15660
15661 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15662 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15663
15664 pll->on = pll->get_hw_state(dev_priv, pll,
15665 &pll->config.hw_state);
15666 pll->active = 0;
15667 pll->config.crtc_mask = 0;
15668 for_each_intel_crtc(dev, crtc) {
15669 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
15670 pll->active++;
15671 pll->config.crtc_mask |= 1 << crtc->pipe;
15672 }
15673 }
15674
15675 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15676 pll->name, pll->config.crtc_mask, pll->on);
15677
15678 if (pll->config.crtc_mask)
15679 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
15680 }
15681
15682 for_each_intel_encoder(dev, encoder) {
15683 pipe = 0;
15684
15685 if (encoder->get_hw_state(encoder, &pipe)) {
15686 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15687 encoder->base.crtc = &crtc->base;
15688 encoder->get_config(encoder, crtc->config);
15689 } else {
15690 encoder->base.crtc = NULL;
15691 }
15692
15693 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15694 encoder->base.base.id,
15695 encoder->base.name,
15696 encoder->base.crtc ? "enabled" : "disabled",
15697 pipe_name(pipe));
15698 }
15699
15700 for_each_intel_connector(dev, connector) {
15701 if (connector->get_hw_state(connector)) {
15702 connector->base.dpms = DRM_MODE_DPMS_ON;
15703
15704 encoder = connector->encoder;
15705 connector->base.encoder = &encoder->base;
15706
15707 if (encoder->base.crtc &&
15708 encoder->base.crtc->state->active) {
15709 /*
15710 * This has to be done during hardware readout
15711 * because anything calling .crtc_disable may
15712 * rely on the connector_mask being accurate.
15713 */
15714 encoder->base.crtc->state->connector_mask |=
15715 1 << drm_connector_index(&connector->base);
15716 }
15717
15718 } else {
15719 connector->base.dpms = DRM_MODE_DPMS_OFF;
15720 connector->base.encoder = NULL;
15721 }
15722 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15723 connector->base.base.id,
15724 connector->base.name,
15725 connector->base.encoder ? "enabled" : "disabled");
15726 }
15727
15728 for_each_intel_crtc(dev, crtc) {
15729 crtc->base.hwmode = crtc->config->base.adjusted_mode;
15730
15731 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15732 if (crtc->base.state->active) {
15733 intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
15734 intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
15735 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15736
15737 /*
15738 * The initial mode needs to be set in order to keep
15739 * the atomic core happy. It wants a valid mode if the
15740 * crtc's enabled, so we do the above call.
15741 *
15742 * At this point some state updated by the connectors
15743 * in their ->detect() callback has not run yet, so
15744 * no recalculation can be done yet.
15745 *
15746 * Even if we could do a recalculation and modeset
15747 * right now it would cause a double modeset if
15748 * fbdev or userspace chooses a different initial mode.
15749 *
15750 * If that happens, someone indicated they wanted a
15751 * mode change, which means it's safe to do a full
15752 * recalculation.
15753 */
15754 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
15755
15756 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
15757 update_scanline_offset(crtc);
15758 }
15759 }
15760 }
15761
15762 /* Scan out the current hw modeset state,
15763 * and sanitizes it to the current state
15764 */
15765 static void
15766 intel_modeset_setup_hw_state(struct drm_device *dev)
15767 {
15768 struct drm_i915_private *dev_priv = dev->dev_private;
15769 enum pipe pipe;
15770 struct intel_crtc *crtc;
15771 struct intel_encoder *encoder;
15772 int i;
15773
15774 intel_modeset_readout_hw_state(dev);
15775
15776 /* HW state is read out, now we need to sanitize this mess. */
15777 for_each_intel_encoder(dev, encoder) {
15778 intel_sanitize_encoder(encoder);
15779 }
15780
15781 for_each_pipe(dev_priv, pipe) {
15782 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15783 intel_sanitize_crtc(crtc);
15784 intel_dump_pipe_config(crtc, crtc->config,
15785 "[setup_hw_state]");
15786 }
15787
15788 intel_modeset_update_connector_atomic_state(dev);
15789
15790 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15791 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15792
15793 if (!pll->on || pll->active)
15794 continue;
15795
15796 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
15797
15798 pll->disable(dev_priv, pll);
15799 pll->on = false;
15800 }
15801
15802 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
15803 vlv_wm_get_hw_state(dev);
15804 else if (IS_GEN9(dev))
15805 skl_wm_get_hw_state(dev);
15806 else if (HAS_PCH_SPLIT(dev))
15807 ilk_wm_get_hw_state(dev);
15808
15809 for_each_intel_crtc(dev, crtc) {
15810 unsigned long put_domains;
15811
15812 put_domains = modeset_get_crtc_power_domains(&crtc->base);
15813 if (WARN_ON(put_domains))
15814 modeset_put_power_domains(dev_priv, put_domains);
15815 }
15816 intel_display_set_init_power(dev_priv, false);
15817 }
15818
15819 void intel_display_resume(struct drm_device *dev)
15820 {
15821 struct drm_atomic_state *state = drm_atomic_state_alloc(dev);
15822 struct intel_connector *conn;
15823 struct intel_plane *plane;
15824 struct drm_crtc *crtc;
15825 int ret;
15826
15827 if (!state)
15828 return;
15829
15830 state->acquire_ctx = dev->mode_config.acquire_ctx;
15831
15832 /* preserve complete old state, including dpll */
15833 intel_atomic_get_shared_dpll_state(state);
15834
15835 for_each_crtc(dev, crtc) {
15836 struct drm_crtc_state *crtc_state =
15837 drm_atomic_get_crtc_state(state, crtc);
15838
15839 ret = PTR_ERR_OR_ZERO(crtc_state);
15840 if (ret)
15841 goto err;
15842
15843 /* force a restore */
15844 crtc_state->mode_changed = true;
15845 }
15846
15847 for_each_intel_plane(dev, plane) {
15848 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base));
15849 if (ret)
15850 goto err;
15851 }
15852
15853 for_each_intel_connector(dev, conn) {
15854 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base));
15855 if (ret)
15856 goto err;
15857 }
15858
15859 intel_modeset_setup_hw_state(dev);
15860
15861 i915_redisable_vga(dev);
15862 ret = drm_atomic_commit(state);
15863 if (!ret)
15864 return;
15865
15866 err:
15867 DRM_ERROR("Restoring old state failed with %i\n", ret);
15868 drm_atomic_state_free(state);
15869 }
15870
15871 void intel_modeset_gem_init(struct drm_device *dev)
15872 {
15873 struct drm_crtc *c;
15874 struct drm_i915_gem_object *obj;
15875 int ret;
15876
15877 mutex_lock(&dev->struct_mutex);
15878 intel_init_gt_powersave(dev);
15879 mutex_unlock(&dev->struct_mutex);
15880
15881 intel_modeset_init_hw(dev);
15882
15883 intel_setup_overlay(dev);
15884
15885 /*
15886 * Make sure any fbs we allocated at startup are properly
15887 * pinned & fenced. When we do the allocation it's too early
15888 * for this.
15889 */
15890 for_each_crtc(dev, c) {
15891 obj = intel_fb_obj(c->primary->fb);
15892 if (obj == NULL)
15893 continue;
15894
15895 mutex_lock(&dev->struct_mutex);
15896 ret = intel_pin_and_fence_fb_obj(c->primary,
15897 c->primary->fb,
15898 c->primary->state);
15899 mutex_unlock(&dev->struct_mutex);
15900 if (ret) {
15901 DRM_ERROR("failed to pin boot fb on pipe %d\n",
15902 to_intel_crtc(c)->pipe);
15903 drm_framebuffer_unreference(c->primary->fb);
15904 c->primary->fb = NULL;
15905 c->primary->crtc = c->primary->state->crtc = NULL;
15906 update_state_fb(c->primary);
15907 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
15908 }
15909 }
15910
15911 intel_backlight_register(dev);
15912 }
15913
15914 void intel_connector_unregister(struct intel_connector *intel_connector)
15915 {
15916 struct drm_connector *connector = &intel_connector->base;
15917
15918 intel_panel_destroy_backlight(connector);
15919 drm_connector_unregister(connector);
15920 }
15921
15922 void intel_modeset_cleanup(struct drm_device *dev)
15923 {
15924 struct drm_i915_private *dev_priv = dev->dev_private;
15925 struct intel_connector *connector;
15926
15927 intel_disable_gt_powersave(dev);
15928
15929 intel_backlight_unregister(dev);
15930
15931 /*
15932 * Interrupts and polling as the first thing to avoid creating havoc.
15933 * Too much stuff here (turning of connectors, ...) would
15934 * experience fancy races otherwise.
15935 */
15936 intel_irq_uninstall(dev_priv);
15937
15938 /*
15939 * Due to the hpd irq storm handling the hotplug work can re-arm the
15940 * poll handlers. Hence disable polling after hpd handling is shut down.
15941 */
15942 drm_kms_helper_poll_fini(dev);
15943
15944 intel_unregister_dsm_handler();
15945
15946 intel_fbc_disable(dev_priv);
15947
15948 /* flush any delayed tasks or pending work */
15949 flush_scheduled_work();
15950
15951 /* destroy the backlight and sysfs files before encoders/connectors */
15952 for_each_intel_connector(dev, connector)
15953 connector->unregister(connector);
15954
15955 drm_mode_config_cleanup(dev);
15956
15957 intel_cleanup_overlay(dev);
15958
15959 mutex_lock(&dev->struct_mutex);
15960 intel_cleanup_gt_powersave(dev);
15961 mutex_unlock(&dev->struct_mutex);
15962
15963 intel_teardown_gmbus(dev);
15964 }
15965
15966 /*
15967 * Return which encoder is currently attached for connector.
15968 */
15969 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
15970 {
15971 return &intel_attached_encoder(connector)->base;
15972 }
15973
15974 void intel_connector_attach_encoder(struct intel_connector *connector,
15975 struct intel_encoder *encoder)
15976 {
15977 connector->encoder = encoder;
15978 drm_mode_connector_attach_encoder(&connector->base,
15979 &encoder->base);
15980 }
15981
15982 /*
15983 * set vga decode state - true == enable VGA decode
15984 */
15985 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
15986 {
15987 struct drm_i915_private *dev_priv = dev->dev_private;
15988 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
15989 u16 gmch_ctrl;
15990
15991 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
15992 DRM_ERROR("failed to read control word\n");
15993 return -EIO;
15994 }
15995
15996 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
15997 return 0;
15998
15999 if (state)
16000 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16001 else
16002 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
16003
16004 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16005 DRM_ERROR("failed to write control word\n");
16006 return -EIO;
16007 }
16008
16009 return 0;
16010 }
16011
16012 struct intel_display_error_state {
16013
16014 u32 power_well_driver;
16015
16016 int num_transcoders;
16017
16018 struct intel_cursor_error_state {
16019 u32 control;
16020 u32 position;
16021 u32 base;
16022 u32 size;
16023 } cursor[I915_MAX_PIPES];
16024
16025 struct intel_pipe_error_state {
16026 bool power_domain_on;
16027 u32 source;
16028 u32 stat;
16029 } pipe[I915_MAX_PIPES];
16030
16031 struct intel_plane_error_state {
16032 u32 control;
16033 u32 stride;
16034 u32 size;
16035 u32 pos;
16036 u32 addr;
16037 u32 surface;
16038 u32 tile_offset;
16039 } plane[I915_MAX_PIPES];
16040
16041 struct intel_transcoder_error_state {
16042 bool power_domain_on;
16043 enum transcoder cpu_transcoder;
16044
16045 u32 conf;
16046
16047 u32 htotal;
16048 u32 hblank;
16049 u32 hsync;
16050 u32 vtotal;
16051 u32 vblank;
16052 u32 vsync;
16053 } transcoder[4];
16054 };
16055
16056 struct intel_display_error_state *
16057 intel_display_capture_error_state(struct drm_device *dev)
16058 {
16059 struct drm_i915_private *dev_priv = dev->dev_private;
16060 struct intel_display_error_state *error;
16061 int transcoders[] = {
16062 TRANSCODER_A,
16063 TRANSCODER_B,
16064 TRANSCODER_C,
16065 TRANSCODER_EDP,
16066 };
16067 int i;
16068
16069 if (INTEL_INFO(dev)->num_pipes == 0)
16070 return NULL;
16071
16072 error = kzalloc(sizeof(*error), GFP_ATOMIC);
16073 if (error == NULL)
16074 return NULL;
16075
16076 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
16077 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
16078
16079 for_each_pipe(dev_priv, i) {
16080 error->pipe[i].power_domain_on =
16081 __intel_display_power_is_enabled(dev_priv,
16082 POWER_DOMAIN_PIPE(i));
16083 if (!error->pipe[i].power_domain_on)
16084 continue;
16085
16086 error->cursor[i].control = I915_READ(CURCNTR(i));
16087 error->cursor[i].position = I915_READ(CURPOS(i));
16088 error->cursor[i].base = I915_READ(CURBASE(i));
16089
16090 error->plane[i].control = I915_READ(DSPCNTR(i));
16091 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16092 if (INTEL_INFO(dev)->gen <= 3) {
16093 error->plane[i].size = I915_READ(DSPSIZE(i));
16094 error->plane[i].pos = I915_READ(DSPPOS(i));
16095 }
16096 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
16097 error->plane[i].addr = I915_READ(DSPADDR(i));
16098 if (INTEL_INFO(dev)->gen >= 4) {
16099 error->plane[i].surface = I915_READ(DSPSURF(i));
16100 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16101 }
16102
16103 error->pipe[i].source = I915_READ(PIPESRC(i));
16104
16105 if (HAS_GMCH_DISPLAY(dev))
16106 error->pipe[i].stat = I915_READ(PIPESTAT(i));
16107 }
16108
16109 error->num_transcoders = INTEL_INFO(dev)->num_pipes;
16110 if (HAS_DDI(dev_priv->dev))
16111 error->num_transcoders++; /* Account for eDP. */
16112
16113 for (i = 0; i < error->num_transcoders; i++) {
16114 enum transcoder cpu_transcoder = transcoders[i];
16115
16116 error->transcoder[i].power_domain_on =
16117 __intel_display_power_is_enabled(dev_priv,
16118 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
16119 if (!error->transcoder[i].power_domain_on)
16120 continue;
16121
16122 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16123
16124 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16125 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16126 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16127 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16128 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16129 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16130 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
16131 }
16132
16133 return error;
16134 }
16135
16136 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16137
16138 void
16139 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16140 struct drm_device *dev,
16141 struct intel_display_error_state *error)
16142 {
16143 struct drm_i915_private *dev_priv = dev->dev_private;
16144 int i;
16145
16146 if (!error)
16147 return;
16148
16149 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
16150 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
16151 err_printf(m, "PWR_WELL_CTL2: %08x\n",
16152 error->power_well_driver);
16153 for_each_pipe(dev_priv, i) {
16154 err_printf(m, "Pipe [%d]:\n", i);
16155 err_printf(m, " Power: %s\n",
16156 error->pipe[i].power_domain_on ? "on" : "off");
16157 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
16158 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
16159
16160 err_printf(m, "Plane [%d]:\n", i);
16161 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
16162 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
16163 if (INTEL_INFO(dev)->gen <= 3) {
16164 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
16165 err_printf(m, " POS: %08x\n", error->plane[i].pos);
16166 }
16167 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
16168 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
16169 if (INTEL_INFO(dev)->gen >= 4) {
16170 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
16171 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
16172 }
16173
16174 err_printf(m, "Cursor [%d]:\n", i);
16175 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
16176 err_printf(m, " POS: %08x\n", error->cursor[i].position);
16177 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
16178 }
16179
16180 for (i = 0; i < error->num_transcoders; i++) {
16181 err_printf(m, "CPU transcoder: %c\n",
16182 transcoder_name(error->transcoder[i].cpu_transcoder));
16183 err_printf(m, " Power: %s\n",
16184 error->transcoder[i].power_domain_on ? "on" : "off");
16185 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
16186 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
16187 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
16188 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
16189 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
16190 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
16191 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
16192 }
16193 }
16194
16195 void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
16196 {
16197 struct intel_crtc *crtc;
16198
16199 for_each_intel_crtc(dev, crtc) {
16200 struct intel_unpin_work *work;
16201
16202 spin_lock_irq(&dev->event_lock);
16203
16204 work = crtc->unpin_work;
16205
16206 if (work && work->event &&
16207 work->event->base.file_priv == file) {
16208 kfree(work->event);
16209 work->event = NULL;
16210 }
16211
16212 spin_unlock_irq(&dev->event_lock);
16213 }
16214 }
This page took 0.469162 seconds and 6 git commands to generate.