drm/i915/ddi: fix intel_display_port_aux_power_domain() after HDMI detect
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_display.c
CommitLineData
79e53945
JB
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
618563e3 27#include <linux/dmi.h>
c1c7af60
JB
28#include <linux/module.h>
29#include <linux/input.h>
79e53945 30#include <linux/i2c.h>
7662c8bd 31#include <linux/kernel.h>
5a0e3ad6 32#include <linux/slab.h>
9cce37f4 33#include <linux/vgaarb.h>
e0dac65e 34#include <drm/drm_edid.h>
760285e7 35#include <drm/drmP.h>
79e53945 36#include "intel_drv.h"
760285e7 37#include <drm/i915_drm.h>
79e53945 38#include "i915_drv.h"
e5510fac 39#include "i915_trace.h"
319c1d42 40#include <drm/drm_atomic.h>
c196e1d6 41#include <drm/drm_atomic_helper.h>
760285e7
DH
42#include <drm/drm_dp_helper.h>
43#include <drm/drm_crtc_helper.h>
465c120c
MR
44#include <drm/drm_plane_helper.h>
45#include <drm/drm_rect.h>
c0f372b3 46#include <linux/dma_remapping.h>
79e53945 47
465c120c 48/* Primary plane formats for gen <= 3 */
568db4f2 49static const uint32_t i8xx_primary_formats[] = {
67fe7dc5
DL
50 DRM_FORMAT_C8,
51 DRM_FORMAT_RGB565,
465c120c 52 DRM_FORMAT_XRGB1555,
67fe7dc5 53 DRM_FORMAT_XRGB8888,
465c120c
MR
54};
55
56/* Primary plane formats for gen >= 4 */
568db4f2 57static const uint32_t i965_primary_formats[] = {
6c0fd451
DL
58 DRM_FORMAT_C8,
59 DRM_FORMAT_RGB565,
60 DRM_FORMAT_XRGB8888,
61 DRM_FORMAT_XBGR8888,
62 DRM_FORMAT_XRGB2101010,
63 DRM_FORMAT_XBGR2101010,
64};
65
66static const uint32_t skl_primary_formats[] = {
67fe7dc5
DL
67 DRM_FORMAT_C8,
68 DRM_FORMAT_RGB565,
69 DRM_FORMAT_XRGB8888,
465c120c 70 DRM_FORMAT_XBGR8888,
67fe7dc5 71 DRM_FORMAT_ARGB8888,
465c120c
MR
72 DRM_FORMAT_ABGR8888,
73 DRM_FORMAT_XRGB2101010,
465c120c 74 DRM_FORMAT_XBGR2101010,
ea916ea0
KM
75 DRM_FORMAT_YUYV,
76 DRM_FORMAT_YVYU,
77 DRM_FORMAT_UYVY,
78 DRM_FORMAT_VYUY,
465c120c
MR
79};
80
3d7d6510
MR
81/* Cursor formats */
82static const uint32_t intel_cursor_formats[] = {
83 DRM_FORMAT_ARGB8888,
84};
85
6b383a7f 86static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
79e53945 87
f1f644dc 88static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
5cec258b 89 struct intel_crtc_state *pipe_config);
18442d08 90static void ironlake_pch_clock_get(struct intel_crtc *crtc,
5cec258b 91 struct intel_crtc_state *pipe_config);
f1f644dc 92
eb1bfe80
JB
93static int intel_framebuffer_init(struct drm_device *dev,
94 struct intel_framebuffer *ifb,
95 struct drm_mode_fb_cmd2 *mode_cmd,
96 struct drm_i915_gem_object *obj);
5b18e57c
DV
97static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
98static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
29407aab 99static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
f769cd24
VK
100 struct intel_link_m_n *m_n,
101 struct intel_link_m_n *m2_n2);
29407aab 102static void ironlake_set_pipeconf(struct drm_crtc *crtc);
229fca97
DV
103static void haswell_set_pipeconf(struct drm_crtc *crtc);
104static void intel_set_pipe_csc(struct drm_crtc *crtc);
d288f65f 105static void vlv_prepare_pll(struct intel_crtc *crtc,
5cec258b 106 const struct intel_crtc_state *pipe_config);
d288f65f 107static void chv_prepare_pll(struct intel_crtc *crtc,
5cec258b 108 const struct intel_crtc_state *pipe_config);
613d2b27
ML
109static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
110static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
549e2bfb
CK
111static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
112 struct intel_crtc_state *crtc_state);
5ab7b0b7
ID
113static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
114 int num_connectors);
bfd16b2a
ML
115static void skylake_pfit_enable(struct intel_crtc *crtc);
116static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
117static void ironlake_pfit_enable(struct intel_crtc *crtc);
043e9bda 118static void intel_modeset_setup_hw_state(struct drm_device *dev);
e7457a9a 119
79e53945 120typedef struct {
0206e353 121 int min, max;
79e53945
JB
122} intel_range_t;
123
124typedef struct {
0206e353
AJ
125 int dot_limit;
126 int p2_slow, p2_fast;
79e53945
JB
127} intel_p2_t;
128
d4906093
ML
129typedef struct intel_limit intel_limit_t;
130struct intel_limit {
0206e353
AJ
131 intel_range_t dot, vco, n, m, m1, m2, p, p1;
132 intel_p2_t p2;
d4906093 133};
79e53945 134
bfa7df01
VS
135/* returns HPLL frequency in kHz */
136static int valleyview_get_vco(struct drm_i915_private *dev_priv)
137{
138 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
139
140 /* Obtain SKU information */
141 mutex_lock(&dev_priv->sb_lock);
142 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
143 CCK_FUSE_HPLL_FREQ_MASK;
144 mutex_unlock(&dev_priv->sb_lock);
145
146 return vco_freq[hpll_freq] * 1000;
147}
148
149static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
150 const char *name, u32 reg)
151{
152 u32 val;
153 int divider;
154
155 if (dev_priv->hpll_freq == 0)
156 dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
157
158 mutex_lock(&dev_priv->sb_lock);
159 val = vlv_cck_read(dev_priv, reg);
160 mutex_unlock(&dev_priv->sb_lock);
161
162 divider = val & CCK_FREQUENCY_VALUES;
163
164 WARN((val & CCK_FREQUENCY_STATUS) !=
165 (divider << CCK_FREQUENCY_STATUS_SHIFT),
166 "%s change in progress\n", name);
167
168 return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
169}
170
d2acd215
DV
171int
172intel_pch_rawclk(struct drm_device *dev)
173{
174 struct drm_i915_private *dev_priv = dev->dev_private;
175
176 WARN_ON(!HAS_PCH_SPLIT(dev));
177
178 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
179}
180
79e50a4f
JN
181/* hrawclock is 1/4 the FSB frequency */
182int intel_hrawclk(struct drm_device *dev)
183{
184 struct drm_i915_private *dev_priv = dev->dev_private;
185 uint32_t clkcfg;
186
187 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
188 if (IS_VALLEYVIEW(dev))
189 return 200;
190
191 clkcfg = I915_READ(CLKCFG);
192 switch (clkcfg & CLKCFG_FSB_MASK) {
193 case CLKCFG_FSB_400:
194 return 100;
195 case CLKCFG_FSB_533:
196 return 133;
197 case CLKCFG_FSB_667:
198 return 166;
199 case CLKCFG_FSB_800:
200 return 200;
201 case CLKCFG_FSB_1067:
202 return 266;
203 case CLKCFG_FSB_1333:
204 return 333;
205 /* these two are just a guess; one of them might be right */
206 case CLKCFG_FSB_1600:
207 case CLKCFG_FSB_1600_ALT:
208 return 400;
209 default:
210 return 133;
211 }
212}
213
bfa7df01
VS
214static void intel_update_czclk(struct drm_i915_private *dev_priv)
215{
216 if (!IS_VALLEYVIEW(dev_priv))
217 return;
218
219 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
220 CCK_CZ_CLOCK_CONTROL);
221
222 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
223}
224
021357ac
CW
225static inline u32 /* units of 100MHz */
226intel_fdi_link_freq(struct drm_device *dev)
227{
8b99e68c
CW
228 if (IS_GEN5(dev)) {
229 struct drm_i915_private *dev_priv = dev->dev_private;
230 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
231 } else
232 return 27;
021357ac
CW
233}
234
5d536e28 235static const intel_limit_t intel_limits_i8xx_dac = {
0206e353 236 .dot = { .min = 25000, .max = 350000 },
9c333719 237 .vco = { .min = 908000, .max = 1512000 },
91dbe5fb 238 .n = { .min = 2, .max = 16 },
0206e353
AJ
239 .m = { .min = 96, .max = 140 },
240 .m1 = { .min = 18, .max = 26 },
241 .m2 = { .min = 6, .max = 16 },
242 .p = { .min = 4, .max = 128 },
243 .p1 = { .min = 2, .max = 33 },
273e27ca
EA
244 .p2 = { .dot_limit = 165000,
245 .p2_slow = 4, .p2_fast = 2 },
e4b36699
KP
246};
247
5d536e28
DV
248static const intel_limit_t intel_limits_i8xx_dvo = {
249 .dot = { .min = 25000, .max = 350000 },
9c333719 250 .vco = { .min = 908000, .max = 1512000 },
91dbe5fb 251 .n = { .min = 2, .max = 16 },
5d536e28
DV
252 .m = { .min = 96, .max = 140 },
253 .m1 = { .min = 18, .max = 26 },
254 .m2 = { .min = 6, .max = 16 },
255 .p = { .min = 4, .max = 128 },
256 .p1 = { .min = 2, .max = 33 },
257 .p2 = { .dot_limit = 165000,
258 .p2_slow = 4, .p2_fast = 4 },
259};
260
e4b36699 261static const intel_limit_t intel_limits_i8xx_lvds = {
0206e353 262 .dot = { .min = 25000, .max = 350000 },
9c333719 263 .vco = { .min = 908000, .max = 1512000 },
91dbe5fb 264 .n = { .min = 2, .max = 16 },
0206e353
AJ
265 .m = { .min = 96, .max = 140 },
266 .m1 = { .min = 18, .max = 26 },
267 .m2 = { .min = 6, .max = 16 },
268 .p = { .min = 4, .max = 128 },
269 .p1 = { .min = 1, .max = 6 },
273e27ca
EA
270 .p2 = { .dot_limit = 165000,
271 .p2_slow = 14, .p2_fast = 7 },
e4b36699 272};
273e27ca 273
e4b36699 274static const intel_limit_t intel_limits_i9xx_sdvo = {
0206e353
AJ
275 .dot = { .min = 20000, .max = 400000 },
276 .vco = { .min = 1400000, .max = 2800000 },
277 .n = { .min = 1, .max = 6 },
278 .m = { .min = 70, .max = 120 },
4f7dfb67
PJ
279 .m1 = { .min = 8, .max = 18 },
280 .m2 = { .min = 3, .max = 7 },
0206e353
AJ
281 .p = { .min = 5, .max = 80 },
282 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
283 .p2 = { .dot_limit = 200000,
284 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
285};
286
287static const intel_limit_t intel_limits_i9xx_lvds = {
0206e353
AJ
288 .dot = { .min = 20000, .max = 400000 },
289 .vco = { .min = 1400000, .max = 2800000 },
290 .n = { .min = 1, .max = 6 },
291 .m = { .min = 70, .max = 120 },
53a7d2d1
PJ
292 .m1 = { .min = 8, .max = 18 },
293 .m2 = { .min = 3, .max = 7 },
0206e353
AJ
294 .p = { .min = 7, .max = 98 },
295 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
296 .p2 = { .dot_limit = 112000,
297 .p2_slow = 14, .p2_fast = 7 },
e4b36699
KP
298};
299
273e27ca 300
e4b36699 301static const intel_limit_t intel_limits_g4x_sdvo = {
273e27ca
EA
302 .dot = { .min = 25000, .max = 270000 },
303 .vco = { .min = 1750000, .max = 3500000},
304 .n = { .min = 1, .max = 4 },
305 .m = { .min = 104, .max = 138 },
306 .m1 = { .min = 17, .max = 23 },
307 .m2 = { .min = 5, .max = 11 },
308 .p = { .min = 10, .max = 30 },
309 .p1 = { .min = 1, .max = 3},
310 .p2 = { .dot_limit = 270000,
311 .p2_slow = 10,
312 .p2_fast = 10
044c7c41 313 },
e4b36699
KP
314};
315
316static const intel_limit_t intel_limits_g4x_hdmi = {
273e27ca
EA
317 .dot = { .min = 22000, .max = 400000 },
318 .vco = { .min = 1750000, .max = 3500000},
319 .n = { .min = 1, .max = 4 },
320 .m = { .min = 104, .max = 138 },
321 .m1 = { .min = 16, .max = 23 },
322 .m2 = { .min = 5, .max = 11 },
323 .p = { .min = 5, .max = 80 },
324 .p1 = { .min = 1, .max = 8},
325 .p2 = { .dot_limit = 165000,
326 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
327};
328
329static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
273e27ca
EA
330 .dot = { .min = 20000, .max = 115000 },
331 .vco = { .min = 1750000, .max = 3500000 },
332 .n = { .min = 1, .max = 3 },
333 .m = { .min = 104, .max = 138 },
334 .m1 = { .min = 17, .max = 23 },
335 .m2 = { .min = 5, .max = 11 },
336 .p = { .min = 28, .max = 112 },
337 .p1 = { .min = 2, .max = 8 },
338 .p2 = { .dot_limit = 0,
339 .p2_slow = 14, .p2_fast = 14
044c7c41 340 },
e4b36699
KP
341};
342
343static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
273e27ca
EA
344 .dot = { .min = 80000, .max = 224000 },
345 .vco = { .min = 1750000, .max = 3500000 },
346 .n = { .min = 1, .max = 3 },
347 .m = { .min = 104, .max = 138 },
348 .m1 = { .min = 17, .max = 23 },
349 .m2 = { .min = 5, .max = 11 },
350 .p = { .min = 14, .max = 42 },
351 .p1 = { .min = 2, .max = 6 },
352 .p2 = { .dot_limit = 0,
353 .p2_slow = 7, .p2_fast = 7
044c7c41 354 },
e4b36699
KP
355};
356
f2b115e6 357static const intel_limit_t intel_limits_pineview_sdvo = {
0206e353
AJ
358 .dot = { .min = 20000, .max = 400000},
359 .vco = { .min = 1700000, .max = 3500000 },
273e27ca 360 /* Pineview's Ncounter is a ring counter */
0206e353
AJ
361 .n = { .min = 3, .max = 6 },
362 .m = { .min = 2, .max = 256 },
273e27ca 363 /* Pineview only has one combined m divider, which we treat as m2. */
0206e353
AJ
364 .m1 = { .min = 0, .max = 0 },
365 .m2 = { .min = 0, .max = 254 },
366 .p = { .min = 5, .max = 80 },
367 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
368 .p2 = { .dot_limit = 200000,
369 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
370};
371
f2b115e6 372static const intel_limit_t intel_limits_pineview_lvds = {
0206e353
AJ
373 .dot = { .min = 20000, .max = 400000 },
374 .vco = { .min = 1700000, .max = 3500000 },
375 .n = { .min = 3, .max = 6 },
376 .m = { .min = 2, .max = 256 },
377 .m1 = { .min = 0, .max = 0 },
378 .m2 = { .min = 0, .max = 254 },
379 .p = { .min = 7, .max = 112 },
380 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
381 .p2 = { .dot_limit = 112000,
382 .p2_slow = 14, .p2_fast = 14 },
e4b36699
KP
383};
384
273e27ca
EA
385/* Ironlake / Sandybridge
386 *
387 * We calculate clock using (register_value + 2) for N/M1/M2, so here
388 * the range value for them is (actual_value - 2).
389 */
b91ad0ec 390static const intel_limit_t intel_limits_ironlake_dac = {
273e27ca
EA
391 .dot = { .min = 25000, .max = 350000 },
392 .vco = { .min = 1760000, .max = 3510000 },
393 .n = { .min = 1, .max = 5 },
394 .m = { .min = 79, .max = 127 },
395 .m1 = { .min = 12, .max = 22 },
396 .m2 = { .min = 5, .max = 9 },
397 .p = { .min = 5, .max = 80 },
398 .p1 = { .min = 1, .max = 8 },
399 .p2 = { .dot_limit = 225000,
400 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
401};
402
b91ad0ec 403static const intel_limit_t intel_limits_ironlake_single_lvds = {
273e27ca
EA
404 .dot = { .min = 25000, .max = 350000 },
405 .vco = { .min = 1760000, .max = 3510000 },
406 .n = { .min = 1, .max = 3 },
407 .m = { .min = 79, .max = 118 },
408 .m1 = { .min = 12, .max = 22 },
409 .m2 = { .min = 5, .max = 9 },
410 .p = { .min = 28, .max = 112 },
411 .p1 = { .min = 2, .max = 8 },
412 .p2 = { .dot_limit = 225000,
413 .p2_slow = 14, .p2_fast = 14 },
b91ad0ec
ZW
414};
415
416static const intel_limit_t intel_limits_ironlake_dual_lvds = {
273e27ca
EA
417 .dot = { .min = 25000, .max = 350000 },
418 .vco = { .min = 1760000, .max = 3510000 },
419 .n = { .min = 1, .max = 3 },
420 .m = { .min = 79, .max = 127 },
421 .m1 = { .min = 12, .max = 22 },
422 .m2 = { .min = 5, .max = 9 },
423 .p = { .min = 14, .max = 56 },
424 .p1 = { .min = 2, .max = 8 },
425 .p2 = { .dot_limit = 225000,
426 .p2_slow = 7, .p2_fast = 7 },
b91ad0ec
ZW
427};
428
273e27ca 429/* LVDS 100mhz refclk limits. */
b91ad0ec 430static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
273e27ca
EA
431 .dot = { .min = 25000, .max = 350000 },
432 .vco = { .min = 1760000, .max = 3510000 },
433 .n = { .min = 1, .max = 2 },
434 .m = { .min = 79, .max = 126 },
435 .m1 = { .min = 12, .max = 22 },
436 .m2 = { .min = 5, .max = 9 },
437 .p = { .min = 28, .max = 112 },
0206e353 438 .p1 = { .min = 2, .max = 8 },
273e27ca
EA
439 .p2 = { .dot_limit = 225000,
440 .p2_slow = 14, .p2_fast = 14 },
b91ad0ec
ZW
441};
442
443static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
273e27ca
EA
444 .dot = { .min = 25000, .max = 350000 },
445 .vco = { .min = 1760000, .max = 3510000 },
446 .n = { .min = 1, .max = 3 },
447 .m = { .min = 79, .max = 126 },
448 .m1 = { .min = 12, .max = 22 },
449 .m2 = { .min = 5, .max = 9 },
450 .p = { .min = 14, .max = 42 },
0206e353 451 .p1 = { .min = 2, .max = 6 },
273e27ca
EA
452 .p2 = { .dot_limit = 225000,
453 .p2_slow = 7, .p2_fast = 7 },
4547668a
ZY
454};
455
dc730512 456static const intel_limit_t intel_limits_vlv = {
f01b7962
VS
457 /*
458 * These are the data rate limits (measured in fast clocks)
459 * since those are the strictest limits we have. The fast
460 * clock and actual rate limits are more relaxed, so checking
461 * them would make no difference.
462 */
463 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
75e53986 464 .vco = { .min = 4000000, .max = 6000000 },
a0c4da24 465 .n = { .min = 1, .max = 7 },
a0c4da24
JB
466 .m1 = { .min = 2, .max = 3 },
467 .m2 = { .min = 11, .max = 156 },
b99ab663 468 .p1 = { .min = 2, .max = 3 },
5fdc9c49 469 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
a0c4da24
JB
470};
471
ef9348c8
CML
472static const intel_limit_t intel_limits_chv = {
473 /*
474 * These are the data rate limits (measured in fast clocks)
475 * since those are the strictest limits we have. The fast
476 * clock and actual rate limits are more relaxed, so checking
477 * them would make no difference.
478 */
479 .dot = { .min = 25000 * 5, .max = 540000 * 5},
17fe1021 480 .vco = { .min = 4800000, .max = 6480000 },
ef9348c8
CML
481 .n = { .min = 1, .max = 1 },
482 .m1 = { .min = 2, .max = 2 },
483 .m2 = { .min = 24 << 22, .max = 175 << 22 },
484 .p1 = { .min = 2, .max = 4 },
485 .p2 = { .p2_slow = 1, .p2_fast = 14 },
486};
487
5ab7b0b7
ID
488static const intel_limit_t intel_limits_bxt = {
489 /* FIXME: find real dot limits */
490 .dot = { .min = 0, .max = INT_MAX },
e6292556 491 .vco = { .min = 4800000, .max = 6700000 },
5ab7b0b7
ID
492 .n = { .min = 1, .max = 1 },
493 .m1 = { .min = 2, .max = 2 },
494 /* FIXME: find real m2 limits */
495 .m2 = { .min = 2 << 22, .max = 255 << 22 },
496 .p1 = { .min = 2, .max = 4 },
497 .p2 = { .p2_slow = 1, .p2_fast = 20 },
498};
499
cdba954e
ACO
500static bool
501needs_modeset(struct drm_crtc_state *state)
502{
fc596660 503 return drm_atomic_crtc_needs_modeset(state);
cdba954e
ACO
504}
505
e0638cdf
PZ
506/**
507 * Returns whether any output on the specified pipe is of the specified type
508 */
4093561b 509bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
e0638cdf 510{
409ee761 511 struct drm_device *dev = crtc->base.dev;
e0638cdf
PZ
512 struct intel_encoder *encoder;
513
409ee761 514 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
e0638cdf
PZ
515 if (encoder->type == type)
516 return true;
517
518 return false;
519}
520
d0737e1d
ACO
521/**
522 * Returns whether any output on the specified pipe will have the specified
523 * type after a staged modeset is complete, i.e., the same as
524 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
525 * encoder->crtc.
526 */
a93e255f
ACO
527static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
528 int type)
d0737e1d 529{
a93e255f 530 struct drm_atomic_state *state = crtc_state->base.state;
da3ced29 531 struct drm_connector *connector;
a93e255f 532 struct drm_connector_state *connector_state;
d0737e1d 533 struct intel_encoder *encoder;
a93e255f
ACO
534 int i, num_connectors = 0;
535
da3ced29 536 for_each_connector_in_state(state, connector, connector_state, i) {
a93e255f
ACO
537 if (connector_state->crtc != crtc_state->base.crtc)
538 continue;
539
540 num_connectors++;
d0737e1d 541
a93e255f
ACO
542 encoder = to_intel_encoder(connector_state->best_encoder);
543 if (encoder->type == type)
d0737e1d 544 return true;
a93e255f
ACO
545 }
546
547 WARN_ON(num_connectors == 0);
d0737e1d
ACO
548
549 return false;
550}
551
a93e255f
ACO
552static const intel_limit_t *
553intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk)
2c07245f 554{
a93e255f 555 struct drm_device *dev = crtc_state->base.crtc->dev;
2c07245f 556 const intel_limit_t *limit;
b91ad0ec 557
a93e255f 558 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1974cad0 559 if (intel_is_dual_link_lvds(dev)) {
1b894b59 560 if (refclk == 100000)
b91ad0ec
ZW
561 limit = &intel_limits_ironlake_dual_lvds_100m;
562 else
563 limit = &intel_limits_ironlake_dual_lvds;
564 } else {
1b894b59 565 if (refclk == 100000)
b91ad0ec
ZW
566 limit = &intel_limits_ironlake_single_lvds_100m;
567 else
568 limit = &intel_limits_ironlake_single_lvds;
569 }
c6bb3538 570 } else
b91ad0ec 571 limit = &intel_limits_ironlake_dac;
2c07245f
ZW
572
573 return limit;
574}
575
a93e255f
ACO
576static const intel_limit_t *
577intel_g4x_limit(struct intel_crtc_state *crtc_state)
044c7c41 578{
a93e255f 579 struct drm_device *dev = crtc_state->base.crtc->dev;
044c7c41
ML
580 const intel_limit_t *limit;
581
a93e255f 582 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
1974cad0 583 if (intel_is_dual_link_lvds(dev))
e4b36699 584 limit = &intel_limits_g4x_dual_channel_lvds;
044c7c41 585 else
e4b36699 586 limit = &intel_limits_g4x_single_channel_lvds;
a93e255f
ACO
587 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
588 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
e4b36699 589 limit = &intel_limits_g4x_hdmi;
a93e255f 590 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
e4b36699 591 limit = &intel_limits_g4x_sdvo;
044c7c41 592 } else /* The option is for other outputs */
e4b36699 593 limit = &intel_limits_i9xx_sdvo;
044c7c41
ML
594
595 return limit;
596}
597
a93e255f
ACO
598static const intel_limit_t *
599intel_limit(struct intel_crtc_state *crtc_state, int refclk)
79e53945 600{
a93e255f 601 struct drm_device *dev = crtc_state->base.crtc->dev;
79e53945
JB
602 const intel_limit_t *limit;
603
5ab7b0b7
ID
604 if (IS_BROXTON(dev))
605 limit = &intel_limits_bxt;
606 else if (HAS_PCH_SPLIT(dev))
a93e255f 607 limit = intel_ironlake_limit(crtc_state, refclk);
2c07245f 608 else if (IS_G4X(dev)) {
a93e255f 609 limit = intel_g4x_limit(crtc_state);
f2b115e6 610 } else if (IS_PINEVIEW(dev)) {
a93e255f 611 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
f2b115e6 612 limit = &intel_limits_pineview_lvds;
2177832f 613 else
f2b115e6 614 limit = &intel_limits_pineview_sdvo;
ef9348c8
CML
615 } else if (IS_CHERRYVIEW(dev)) {
616 limit = &intel_limits_chv;
a0c4da24 617 } else if (IS_VALLEYVIEW(dev)) {
dc730512 618 limit = &intel_limits_vlv;
a6c45cf0 619 } else if (!IS_GEN2(dev)) {
a93e255f 620 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
a6c45cf0
CW
621 limit = &intel_limits_i9xx_lvds;
622 else
623 limit = &intel_limits_i9xx_sdvo;
79e53945 624 } else {
a93e255f 625 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
e4b36699 626 limit = &intel_limits_i8xx_lvds;
a93e255f 627 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
e4b36699 628 limit = &intel_limits_i8xx_dvo;
5d536e28
DV
629 else
630 limit = &intel_limits_i8xx_dac;
79e53945
JB
631 }
632 return limit;
633}
634
dccbea3b
ID
635/*
636 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
637 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
638 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
639 * The helpers' return value is the rate of the clock that is fed to the
640 * display engine's pipe which can be the above fast dot clock rate or a
641 * divided-down version of it.
642 */
f2b115e6 643/* m1 is reserved as 0 in Pineview, n is a ring counter */
dccbea3b 644static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock)
79e53945 645{
2177832f
SL
646 clock->m = clock->m2 + 2;
647 clock->p = clock->p1 * clock->p2;
ed5ca77e 648 if (WARN_ON(clock->n == 0 || clock->p == 0))
dccbea3b 649 return 0;
fb03ac01
VS
650 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
651 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
dccbea3b
ID
652
653 return clock->dot;
2177832f
SL
654}
655
7429e9d4
DV
656static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
657{
658 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
659}
660
dccbea3b 661static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
2177832f 662{
7429e9d4 663 clock->m = i9xx_dpll_compute_m(clock);
79e53945 664 clock->p = clock->p1 * clock->p2;
ed5ca77e 665 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
dccbea3b 666 return 0;
fb03ac01
VS
667 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
668 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
dccbea3b
ID
669
670 return clock->dot;
79e53945
JB
671}
672
dccbea3b 673static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
589eca67
ID
674{
675 clock->m = clock->m1 * clock->m2;
676 clock->p = clock->p1 * clock->p2;
677 if (WARN_ON(clock->n == 0 || clock->p == 0))
dccbea3b 678 return 0;
589eca67
ID
679 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
680 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
dccbea3b
ID
681
682 return clock->dot / 5;
589eca67
ID
683}
684
dccbea3b 685int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
ef9348c8
CML
686{
687 clock->m = clock->m1 * clock->m2;
688 clock->p = clock->p1 * clock->p2;
689 if (WARN_ON(clock->n == 0 || clock->p == 0))
dccbea3b 690 return 0;
ef9348c8
CML
691 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
692 clock->n << 22);
693 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
dccbea3b
ID
694
695 return clock->dot / 5;
ef9348c8
CML
696}
697
7c04d1d9 698#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
79e53945
JB
699/**
700 * Returns whether the given set of divisors are valid for a given refclk with
701 * the given connectors.
702 */
703
1b894b59
CW
704static bool intel_PLL_is_valid(struct drm_device *dev,
705 const intel_limit_t *limit,
706 const intel_clock_t *clock)
79e53945 707{
f01b7962
VS
708 if (clock->n < limit->n.min || limit->n.max < clock->n)
709 INTELPllInvalid("n out of range\n");
79e53945 710 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
0206e353 711 INTELPllInvalid("p1 out of range\n");
79e53945 712 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
0206e353 713 INTELPllInvalid("m2 out of range\n");
79e53945 714 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
0206e353 715 INTELPllInvalid("m1 out of range\n");
f01b7962 716
5ab7b0b7 717 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev))
f01b7962
VS
718 if (clock->m1 <= clock->m2)
719 INTELPllInvalid("m1 <= m2\n");
720
5ab7b0b7 721 if (!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) {
f01b7962
VS
722 if (clock->p < limit->p.min || limit->p.max < clock->p)
723 INTELPllInvalid("p out of range\n");
724 if (clock->m < limit->m.min || limit->m.max < clock->m)
725 INTELPllInvalid("m out of range\n");
726 }
727
79e53945 728 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
0206e353 729 INTELPllInvalid("vco out of range\n");
79e53945
JB
730 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
731 * connector, etc., rather than just a single range.
732 */
733 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
0206e353 734 INTELPllInvalid("dot out of range\n");
79e53945
JB
735
736 return true;
737}
738
3b1429d9
VS
739static int
740i9xx_select_p2_div(const intel_limit_t *limit,
741 const struct intel_crtc_state *crtc_state,
742 int target)
79e53945 743{
3b1429d9 744 struct drm_device *dev = crtc_state->base.crtc->dev;
79e53945 745
a93e255f 746 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
79e53945 747 /*
a210b028
DV
748 * For LVDS just rely on its current settings for dual-channel.
749 * We haven't figured out how to reliably set up different
750 * single/dual channel state, if we even can.
79e53945 751 */
1974cad0 752 if (intel_is_dual_link_lvds(dev))
3b1429d9 753 return limit->p2.p2_fast;
79e53945 754 else
3b1429d9 755 return limit->p2.p2_slow;
79e53945
JB
756 } else {
757 if (target < limit->p2.dot_limit)
3b1429d9 758 return limit->p2.p2_slow;
79e53945 759 else
3b1429d9 760 return limit->p2.p2_fast;
79e53945 761 }
3b1429d9
VS
762}
763
764static bool
765i9xx_find_best_dpll(const intel_limit_t *limit,
766 struct intel_crtc_state *crtc_state,
767 int target, int refclk, intel_clock_t *match_clock,
768 intel_clock_t *best_clock)
769{
770 struct drm_device *dev = crtc_state->base.crtc->dev;
771 intel_clock_t clock;
772 int err = target;
79e53945 773
0206e353 774 memset(best_clock, 0, sizeof(*best_clock));
79e53945 775
3b1429d9
VS
776 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
777
42158660
ZY
778 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
779 clock.m1++) {
780 for (clock.m2 = limit->m2.min;
781 clock.m2 <= limit->m2.max; clock.m2++) {
c0efc387 782 if (clock.m2 >= clock.m1)
42158660
ZY
783 break;
784 for (clock.n = limit->n.min;
785 clock.n <= limit->n.max; clock.n++) {
786 for (clock.p1 = limit->p1.min;
787 clock.p1 <= limit->p1.max; clock.p1++) {
79e53945
JB
788 int this_err;
789
dccbea3b 790 i9xx_calc_dpll_params(refclk, &clock);
ac58c3f0
DV
791 if (!intel_PLL_is_valid(dev, limit,
792 &clock))
793 continue;
794 if (match_clock &&
795 clock.p != match_clock->p)
796 continue;
797
798 this_err = abs(clock.dot - target);
799 if (this_err < err) {
800 *best_clock = clock;
801 err = this_err;
802 }
803 }
804 }
805 }
806 }
807
808 return (err != target);
809}
810
811static bool
a93e255f
ACO
812pnv_find_best_dpll(const intel_limit_t *limit,
813 struct intel_crtc_state *crtc_state,
ee9300bb
DV
814 int target, int refclk, intel_clock_t *match_clock,
815 intel_clock_t *best_clock)
79e53945 816{
3b1429d9 817 struct drm_device *dev = crtc_state->base.crtc->dev;
79e53945 818 intel_clock_t clock;
79e53945
JB
819 int err = target;
820
0206e353 821 memset(best_clock, 0, sizeof(*best_clock));
79e53945 822
3b1429d9
VS
823 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
824
42158660
ZY
825 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
826 clock.m1++) {
827 for (clock.m2 = limit->m2.min;
828 clock.m2 <= limit->m2.max; clock.m2++) {
42158660
ZY
829 for (clock.n = limit->n.min;
830 clock.n <= limit->n.max; clock.n++) {
831 for (clock.p1 = limit->p1.min;
832 clock.p1 <= limit->p1.max; clock.p1++) {
79e53945
JB
833 int this_err;
834
dccbea3b 835 pnv_calc_dpll_params(refclk, &clock);
1b894b59
CW
836 if (!intel_PLL_is_valid(dev, limit,
837 &clock))
79e53945 838 continue;
cec2f356
SP
839 if (match_clock &&
840 clock.p != match_clock->p)
841 continue;
79e53945
JB
842
843 this_err = abs(clock.dot - target);
844 if (this_err < err) {
845 *best_clock = clock;
846 err = this_err;
847 }
848 }
849 }
850 }
851 }
852
853 return (err != target);
854}
855
d4906093 856static bool
a93e255f
ACO
857g4x_find_best_dpll(const intel_limit_t *limit,
858 struct intel_crtc_state *crtc_state,
ee9300bb
DV
859 int target, int refclk, intel_clock_t *match_clock,
860 intel_clock_t *best_clock)
d4906093 861{
3b1429d9 862 struct drm_device *dev = crtc_state->base.crtc->dev;
d4906093
ML
863 intel_clock_t clock;
864 int max_n;
3b1429d9 865 bool found = false;
6ba770dc
AJ
866 /* approximately equals target * 0.00585 */
867 int err_most = (target >> 8) + (target >> 9);
d4906093
ML
868
869 memset(best_clock, 0, sizeof(*best_clock));
3b1429d9
VS
870
871 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
872
d4906093 873 max_n = limit->n.max;
f77f13e2 874 /* based on hardware requirement, prefer smaller n to precision */
d4906093 875 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
f77f13e2 876 /* based on hardware requirement, prefere larger m1,m2 */
d4906093
ML
877 for (clock.m1 = limit->m1.max;
878 clock.m1 >= limit->m1.min; clock.m1--) {
879 for (clock.m2 = limit->m2.max;
880 clock.m2 >= limit->m2.min; clock.m2--) {
881 for (clock.p1 = limit->p1.max;
882 clock.p1 >= limit->p1.min; clock.p1--) {
883 int this_err;
884
dccbea3b 885 i9xx_calc_dpll_params(refclk, &clock);
1b894b59
CW
886 if (!intel_PLL_is_valid(dev, limit,
887 &clock))
d4906093 888 continue;
1b894b59
CW
889
890 this_err = abs(clock.dot - target);
d4906093
ML
891 if (this_err < err_most) {
892 *best_clock = clock;
893 err_most = this_err;
894 max_n = clock.n;
895 found = true;
896 }
897 }
898 }
899 }
900 }
2c07245f
ZW
901 return found;
902}
903
d5dd62bd
ID
904/*
905 * Check if the calculated PLL configuration is more optimal compared to the
906 * best configuration and error found so far. Return the calculated error.
907 */
908static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
909 const intel_clock_t *calculated_clock,
910 const intel_clock_t *best_clock,
911 unsigned int best_error_ppm,
912 unsigned int *error_ppm)
913{
9ca3ba01
ID
914 /*
915 * For CHV ignore the error and consider only the P value.
916 * Prefer a bigger P value based on HW requirements.
917 */
918 if (IS_CHERRYVIEW(dev)) {
919 *error_ppm = 0;
920
921 return calculated_clock->p > best_clock->p;
922 }
923
24be4e46
ID
924 if (WARN_ON_ONCE(!target_freq))
925 return false;
926
d5dd62bd
ID
927 *error_ppm = div_u64(1000000ULL *
928 abs(target_freq - calculated_clock->dot),
929 target_freq);
930 /*
931 * Prefer a better P value over a better (smaller) error if the error
932 * is small. Ensure this preference for future configurations too by
933 * setting the error to 0.
934 */
935 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
936 *error_ppm = 0;
937
938 return true;
939 }
940
941 return *error_ppm + 10 < best_error_ppm;
942}
943
a0c4da24 944static bool
a93e255f
ACO
945vlv_find_best_dpll(const intel_limit_t *limit,
946 struct intel_crtc_state *crtc_state,
ee9300bb
DV
947 int target, int refclk, intel_clock_t *match_clock,
948 intel_clock_t *best_clock)
a0c4da24 949{
a93e255f 950 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
a919ff14 951 struct drm_device *dev = crtc->base.dev;
6b4bf1c4 952 intel_clock_t clock;
69e4f900 953 unsigned int bestppm = 1000000;
27e639bf
VS
954 /* min update 19.2 MHz */
955 int max_n = min(limit->n.max, refclk / 19200);
49e497ef 956 bool found = false;
a0c4da24 957
6b4bf1c4
VS
958 target *= 5; /* fast clock */
959
960 memset(best_clock, 0, sizeof(*best_clock));
a0c4da24
JB
961
962 /* based on hardware requirement, prefer smaller n to precision */
27e639bf 963 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
811bbf05 964 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
889059d8 965 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
c1a9ae43 966 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
6b4bf1c4 967 clock.p = clock.p1 * clock.p2;
a0c4da24 968 /* based on hardware requirement, prefer bigger m1,m2 values */
6b4bf1c4 969 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
d5dd62bd 970 unsigned int ppm;
69e4f900 971
6b4bf1c4
VS
972 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
973 refclk * clock.m1);
974
dccbea3b 975 vlv_calc_dpll_params(refclk, &clock);
43b0ac53 976
f01b7962
VS
977 if (!intel_PLL_is_valid(dev, limit,
978 &clock))
43b0ac53
VS
979 continue;
980
d5dd62bd
ID
981 if (!vlv_PLL_is_optimal(dev, target,
982 &clock,
983 best_clock,
984 bestppm, &ppm))
985 continue;
6b4bf1c4 986
d5dd62bd
ID
987 *best_clock = clock;
988 bestppm = ppm;
989 found = true;
a0c4da24
JB
990 }
991 }
992 }
993 }
a0c4da24 994
49e497ef 995 return found;
a0c4da24 996}
a4fc5ed6 997
ef9348c8 998static bool
a93e255f
ACO
999chv_find_best_dpll(const intel_limit_t *limit,
1000 struct intel_crtc_state *crtc_state,
ef9348c8
CML
1001 int target, int refclk, intel_clock_t *match_clock,
1002 intel_clock_t *best_clock)
1003{
a93e255f 1004 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
a919ff14 1005 struct drm_device *dev = crtc->base.dev;
9ca3ba01 1006 unsigned int best_error_ppm;
ef9348c8
CML
1007 intel_clock_t clock;
1008 uint64_t m2;
1009 int found = false;
1010
1011 memset(best_clock, 0, sizeof(*best_clock));
9ca3ba01 1012 best_error_ppm = 1000000;
ef9348c8
CML
1013
1014 /*
1015 * Based on hardware doc, the n always set to 1, and m1 always
1016 * set to 2. If requires to support 200Mhz refclk, we need to
1017 * revisit this because n may not 1 anymore.
1018 */
1019 clock.n = 1, clock.m1 = 2;
1020 target *= 5; /* fast clock */
1021
1022 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1023 for (clock.p2 = limit->p2.p2_fast;
1024 clock.p2 >= limit->p2.p2_slow;
1025 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
9ca3ba01 1026 unsigned int error_ppm;
ef9348c8
CML
1027
1028 clock.p = clock.p1 * clock.p2;
1029
1030 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
1031 clock.n) << 22, refclk * clock.m1);
1032
1033 if (m2 > INT_MAX/clock.m1)
1034 continue;
1035
1036 clock.m2 = m2;
1037
dccbea3b 1038 chv_calc_dpll_params(refclk, &clock);
ef9348c8
CML
1039
1040 if (!intel_PLL_is_valid(dev, limit, &clock))
1041 continue;
1042
9ca3ba01
ID
1043 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1044 best_error_ppm, &error_ppm))
1045 continue;
1046
1047 *best_clock = clock;
1048 best_error_ppm = error_ppm;
1049 found = true;
ef9348c8
CML
1050 }
1051 }
1052
1053 return found;
1054}
1055
5ab7b0b7
ID
1056bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1057 intel_clock_t *best_clock)
1058{
1059 int refclk = i9xx_get_refclk(crtc_state, 0);
1060
1061 return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state,
1062 target_clock, refclk, NULL, best_clock);
1063}
1064
20ddf665
VS
1065bool intel_crtc_active(struct drm_crtc *crtc)
1066{
1067 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1068
1069 /* Be paranoid as we can arrive here with only partial
1070 * state retrieved from the hardware during setup.
1071 *
241bfc38 1072 * We can ditch the adjusted_mode.crtc_clock check as soon
20ddf665
VS
1073 * as Haswell has gained clock readout/fastboot support.
1074 *
66e514c1 1075 * We can ditch the crtc->primary->fb check as soon as we can
20ddf665 1076 * properly reconstruct framebuffers.
c3d1f436
MR
1077 *
1078 * FIXME: The intel_crtc->active here should be switched to
1079 * crtc->state->active once we have proper CRTC states wired up
1080 * for atomic.
20ddf665 1081 */
c3d1f436 1082 return intel_crtc->active && crtc->primary->state->fb &&
6e3c9717 1083 intel_crtc->config->base.adjusted_mode.crtc_clock;
20ddf665
VS
1084}
1085
a5c961d1
PZ
1086enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1087 enum pipe pipe)
1088{
1089 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1090 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1091
6e3c9717 1092 return intel_crtc->config->cpu_transcoder;
a5c961d1
PZ
1093}
1094
fbf49ea2
VS
1095static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1096{
1097 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 1098 i915_reg_t reg = PIPEDSL(pipe);
fbf49ea2
VS
1099 u32 line1, line2;
1100 u32 line_mask;
1101
1102 if (IS_GEN2(dev))
1103 line_mask = DSL_LINEMASK_GEN2;
1104 else
1105 line_mask = DSL_LINEMASK_GEN3;
1106
1107 line1 = I915_READ(reg) & line_mask;
6adfb1ef 1108 msleep(5);
fbf49ea2
VS
1109 line2 = I915_READ(reg) & line_mask;
1110
1111 return line1 == line2;
1112}
1113
ab7ad7f6
KP
1114/*
1115 * intel_wait_for_pipe_off - wait for pipe to turn off
575f7ab7 1116 * @crtc: crtc whose pipe to wait for
9d0498a2
JB
1117 *
1118 * After disabling a pipe, we can't wait for vblank in the usual way,
1119 * spinning on the vblank interrupt status bit, since we won't actually
1120 * see an interrupt when the pipe is disabled.
1121 *
ab7ad7f6
KP
1122 * On Gen4 and above:
1123 * wait for the pipe register state bit to turn off
1124 *
1125 * Otherwise:
1126 * wait for the display line value to settle (it usually
1127 * ends up stopping at the start of the next frame).
58e10eb9 1128 *
9d0498a2 1129 */
575f7ab7 1130static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
9d0498a2 1131{
575f7ab7 1132 struct drm_device *dev = crtc->base.dev;
9d0498a2 1133 struct drm_i915_private *dev_priv = dev->dev_private;
6e3c9717 1134 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
575f7ab7 1135 enum pipe pipe = crtc->pipe;
ab7ad7f6
KP
1136
1137 if (INTEL_INFO(dev)->gen >= 4) {
f0f59a00 1138 i915_reg_t reg = PIPECONF(cpu_transcoder);
ab7ad7f6
KP
1139
1140 /* Wait for the Pipe State to go off */
58e10eb9
CW
1141 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1142 100))
284637d9 1143 WARN(1, "pipe_off wait timed out\n");
ab7ad7f6 1144 } else {
ab7ad7f6 1145 /* Wait for the display line to settle */
fbf49ea2 1146 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
284637d9 1147 WARN(1, "pipe_off wait timed out\n");
ab7ad7f6 1148 }
79e53945
JB
1149}
1150
b24e7179
JB
1151static const char *state_string(bool enabled)
1152{
1153 return enabled ? "on" : "off";
1154}
1155
1156/* Only for pre-ILK configs */
55607e8a
DV
1157void assert_pll(struct drm_i915_private *dev_priv,
1158 enum pipe pipe, bool state)
b24e7179 1159{
b24e7179
JB
1160 u32 val;
1161 bool cur_state;
1162
649636ef 1163 val = I915_READ(DPLL(pipe));
b24e7179 1164 cur_state = !!(val & DPLL_VCO_ENABLE);
e2c719b7 1165 I915_STATE_WARN(cur_state != state,
b24e7179
JB
1166 "PLL state assertion failure (expected %s, current %s)\n",
1167 state_string(state), state_string(cur_state));
1168}
b24e7179 1169
23538ef1
JN
1170/* XXX: the dsi pll is shared between MIPI DSI ports */
1171static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1172{
1173 u32 val;
1174 bool cur_state;
1175
a580516d 1176 mutex_lock(&dev_priv->sb_lock);
23538ef1 1177 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
a580516d 1178 mutex_unlock(&dev_priv->sb_lock);
23538ef1
JN
1179
1180 cur_state = val & DSI_PLL_VCO_EN;
e2c719b7 1181 I915_STATE_WARN(cur_state != state,
23538ef1
JN
1182 "DSI PLL state assertion failure (expected %s, current %s)\n",
1183 state_string(state), state_string(cur_state));
1184}
1185#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
1186#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
1187
55607e8a 1188struct intel_shared_dpll *
e2b78267
DV
1189intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
1190{
1191 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1192
6e3c9717 1193 if (crtc->config->shared_dpll < 0)
e2b78267
DV
1194 return NULL;
1195
6e3c9717 1196 return &dev_priv->shared_dplls[crtc->config->shared_dpll];
e2b78267
DV
1197}
1198
040484af 1199/* For ILK+ */
55607e8a
DV
1200void assert_shared_dpll(struct drm_i915_private *dev_priv,
1201 struct intel_shared_dpll *pll,
1202 bool state)
040484af 1203{
040484af 1204 bool cur_state;
5358901f 1205 struct intel_dpll_hw_state hw_state;
040484af 1206
92b27b08 1207 if (WARN (!pll,
46edb027 1208 "asserting DPLL %s with no DPLL\n", state_string(state)))
ee7b9f93 1209 return;
ee7b9f93 1210
5358901f 1211 cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
e2c719b7 1212 I915_STATE_WARN(cur_state != state,
5358901f
DV
1213 "%s assertion failure (expected %s, current %s)\n",
1214 pll->name, state_string(state), state_string(cur_state));
040484af 1215}
040484af
JB
1216
1217static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1218 enum pipe pipe, bool state)
1219{
040484af 1220 bool cur_state;
ad80a810
PZ
1221 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1222 pipe);
040484af 1223
affa9354
PZ
1224 if (HAS_DDI(dev_priv->dev)) {
1225 /* DDI does not have a specific FDI_TX register */
649636ef 1226 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
ad80a810 1227 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
bf507ef7 1228 } else {
649636ef 1229 u32 val = I915_READ(FDI_TX_CTL(pipe));
bf507ef7
ED
1230 cur_state = !!(val & FDI_TX_ENABLE);
1231 }
e2c719b7 1232 I915_STATE_WARN(cur_state != state,
040484af
JB
1233 "FDI TX state assertion failure (expected %s, current %s)\n",
1234 state_string(state), state_string(cur_state));
1235}
1236#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1237#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1238
1239static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1240 enum pipe pipe, bool state)
1241{
040484af
JB
1242 u32 val;
1243 bool cur_state;
1244
649636ef 1245 val = I915_READ(FDI_RX_CTL(pipe));
d63fa0dc 1246 cur_state = !!(val & FDI_RX_ENABLE);
e2c719b7 1247 I915_STATE_WARN(cur_state != state,
040484af
JB
1248 "FDI RX state assertion failure (expected %s, current %s)\n",
1249 state_string(state), state_string(cur_state));
1250}
1251#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1252#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1253
1254static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1255 enum pipe pipe)
1256{
040484af
JB
1257 u32 val;
1258
1259 /* ILK FDI PLL is always enabled */
3d13ef2e 1260 if (INTEL_INFO(dev_priv->dev)->gen == 5)
040484af
JB
1261 return;
1262
bf507ef7 1263 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
affa9354 1264 if (HAS_DDI(dev_priv->dev))
bf507ef7
ED
1265 return;
1266
649636ef 1267 val = I915_READ(FDI_TX_CTL(pipe));
e2c719b7 1268 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
040484af
JB
1269}
1270
55607e8a
DV
1271void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1272 enum pipe pipe, bool state)
040484af 1273{
040484af 1274 u32 val;
55607e8a 1275 bool cur_state;
040484af 1276
649636ef 1277 val = I915_READ(FDI_RX_CTL(pipe));
55607e8a 1278 cur_state = !!(val & FDI_RX_PLL_ENABLE);
e2c719b7 1279 I915_STATE_WARN(cur_state != state,
55607e8a
DV
1280 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1281 state_string(state), state_string(cur_state));
040484af
JB
1282}
1283
b680c37a
DV
1284void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1285 enum pipe pipe)
ea0760cf 1286{
bedd4dba 1287 struct drm_device *dev = dev_priv->dev;
f0f59a00 1288 i915_reg_t pp_reg;
ea0760cf
JB
1289 u32 val;
1290 enum pipe panel_pipe = PIPE_A;
0de3b485 1291 bool locked = true;
ea0760cf 1292
bedd4dba
JN
1293 if (WARN_ON(HAS_DDI(dev)))
1294 return;
1295
1296 if (HAS_PCH_SPLIT(dev)) {
1297 u32 port_sel;
1298
ea0760cf 1299 pp_reg = PCH_PP_CONTROL;
bedd4dba
JN
1300 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1301
1302 if (port_sel == PANEL_PORT_SELECT_LVDS &&
1303 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1304 panel_pipe = PIPE_B;
1305 /* XXX: else fix for eDP */
1306 } else if (IS_VALLEYVIEW(dev)) {
1307 /* presumably write lock depends on pipe, not port select */
1308 pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1309 panel_pipe = pipe;
ea0760cf
JB
1310 } else {
1311 pp_reg = PP_CONTROL;
bedd4dba
JN
1312 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1313 panel_pipe = PIPE_B;
ea0760cf
JB
1314 }
1315
1316 val = I915_READ(pp_reg);
1317 if (!(val & PANEL_POWER_ON) ||
ec49ba2d 1318 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
ea0760cf
JB
1319 locked = false;
1320
e2c719b7 1321 I915_STATE_WARN(panel_pipe == pipe && locked,
ea0760cf 1322 "panel assertion failure, pipe %c regs locked\n",
9db4a9c7 1323 pipe_name(pipe));
ea0760cf
JB
1324}
1325
93ce0ba6
JN
1326static void assert_cursor(struct drm_i915_private *dev_priv,
1327 enum pipe pipe, bool state)
1328{
1329 struct drm_device *dev = dev_priv->dev;
1330 bool cur_state;
1331
d9d82081 1332 if (IS_845G(dev) || IS_I865G(dev))
0b87c24e 1333 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
d9d82081 1334 else
5efb3e28 1335 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
93ce0ba6 1336
e2c719b7 1337 I915_STATE_WARN(cur_state != state,
93ce0ba6
JN
1338 "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1339 pipe_name(pipe), state_string(state), state_string(cur_state));
1340}
1341#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1342#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1343
b840d907
JB
1344void assert_pipe(struct drm_i915_private *dev_priv,
1345 enum pipe pipe, bool state)
b24e7179 1346{
63d7bbe9 1347 bool cur_state;
702e7a56
PZ
1348 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1349 pipe);
b24e7179 1350
b6b5d049
VS
1351 /* if we need the pipe quirk it must be always on */
1352 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1353 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
8e636784
DV
1354 state = true;
1355
f458ebbc 1356 if (!intel_display_power_is_enabled(dev_priv,
b97186f0 1357 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
69310161
PZ
1358 cur_state = false;
1359 } else {
649636ef 1360 u32 val = I915_READ(PIPECONF(cpu_transcoder));
69310161
PZ
1361 cur_state = !!(val & PIPECONF_ENABLE);
1362 }
1363
e2c719b7 1364 I915_STATE_WARN(cur_state != state,
63d7bbe9 1365 "pipe %c assertion failure (expected %s, current %s)\n",
9db4a9c7 1366 pipe_name(pipe), state_string(state), state_string(cur_state));
b24e7179
JB
1367}
1368
931872fc
CW
1369static void assert_plane(struct drm_i915_private *dev_priv,
1370 enum plane plane, bool state)
b24e7179 1371{
b24e7179 1372 u32 val;
931872fc 1373 bool cur_state;
b24e7179 1374
649636ef 1375 val = I915_READ(DSPCNTR(plane));
931872fc 1376 cur_state = !!(val & DISPLAY_PLANE_ENABLE);
e2c719b7 1377 I915_STATE_WARN(cur_state != state,
931872fc
CW
1378 "plane %c assertion failure (expected %s, current %s)\n",
1379 plane_name(plane), state_string(state), state_string(cur_state));
b24e7179
JB
1380}
1381
931872fc
CW
1382#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1383#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1384
b24e7179
JB
1385static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1386 enum pipe pipe)
1387{
653e1026 1388 struct drm_device *dev = dev_priv->dev;
649636ef 1389 int i;
b24e7179 1390
653e1026
VS
1391 /* Primary planes are fixed to pipes on gen4+ */
1392 if (INTEL_INFO(dev)->gen >= 4) {
649636ef 1393 u32 val = I915_READ(DSPCNTR(pipe));
e2c719b7 1394 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
28c05794
AJ
1395 "plane %c assertion failure, should be disabled but not\n",
1396 plane_name(pipe));
19ec1358 1397 return;
28c05794 1398 }
19ec1358 1399
b24e7179 1400 /* Need to check both planes against the pipe */
055e393f 1401 for_each_pipe(dev_priv, i) {
649636ef
VS
1402 u32 val = I915_READ(DSPCNTR(i));
1403 enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
b24e7179 1404 DISPPLANE_SEL_PIPE_SHIFT;
e2c719b7 1405 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
9db4a9c7
JB
1406 "plane %c assertion failure, should be off on pipe %c but is still active\n",
1407 plane_name(i), pipe_name(pipe));
b24e7179
JB
1408 }
1409}
1410
19332d7a
JB
1411static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1412 enum pipe pipe)
1413{
20674eef 1414 struct drm_device *dev = dev_priv->dev;
649636ef 1415 int sprite;
19332d7a 1416
7feb8b88 1417 if (INTEL_INFO(dev)->gen >= 9) {
3bdcfc0c 1418 for_each_sprite(dev_priv, pipe, sprite) {
649636ef 1419 u32 val = I915_READ(PLANE_CTL(pipe, sprite));
e2c719b7 1420 I915_STATE_WARN(val & PLANE_CTL_ENABLE,
7feb8b88
DL
1421 "plane %d assertion failure, should be off on pipe %c but is still active\n",
1422 sprite, pipe_name(pipe));
1423 }
1424 } else if (IS_VALLEYVIEW(dev)) {
3bdcfc0c 1425 for_each_sprite(dev_priv, pipe, sprite) {
649636ef 1426 u32 val = I915_READ(SPCNTR(pipe, sprite));
e2c719b7 1427 I915_STATE_WARN(val & SP_ENABLE,
20674eef 1428 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1fe47785 1429 sprite_name(pipe, sprite), pipe_name(pipe));
20674eef
VS
1430 }
1431 } else if (INTEL_INFO(dev)->gen >= 7) {
649636ef 1432 u32 val = I915_READ(SPRCTL(pipe));
e2c719b7 1433 I915_STATE_WARN(val & SPRITE_ENABLE,
06da8da2 1434 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
20674eef
VS
1435 plane_name(pipe), pipe_name(pipe));
1436 } else if (INTEL_INFO(dev)->gen >= 5) {
649636ef 1437 u32 val = I915_READ(DVSCNTR(pipe));
e2c719b7 1438 I915_STATE_WARN(val & DVS_ENABLE,
06da8da2 1439 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
20674eef 1440 plane_name(pipe), pipe_name(pipe));
19332d7a
JB
1441 }
1442}
1443
08c71e5e
VS
1444static void assert_vblank_disabled(struct drm_crtc *crtc)
1445{
e2c719b7 1446 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
08c71e5e
VS
1447 drm_crtc_vblank_put(crtc);
1448}
1449
89eff4be 1450static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
92f2584a
JB
1451{
1452 u32 val;
1453 bool enabled;
1454
e2c719b7 1455 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev)));
9d82aa17 1456
92f2584a
JB
1457 val = I915_READ(PCH_DREF_CONTROL);
1458 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1459 DREF_SUPERSPREAD_SOURCE_MASK));
e2c719b7 1460 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
92f2584a
JB
1461}
1462
ab9412ba
DV
1463static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1464 enum pipe pipe)
92f2584a 1465{
92f2584a
JB
1466 u32 val;
1467 bool enabled;
1468
649636ef 1469 val = I915_READ(PCH_TRANSCONF(pipe));
92f2584a 1470 enabled = !!(val & TRANS_ENABLE);
e2c719b7 1471 I915_STATE_WARN(enabled,
9db4a9c7
JB
1472 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1473 pipe_name(pipe));
92f2584a
JB
1474}
1475
4e634389
KP
1476static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1477 enum pipe pipe, u32 port_sel, u32 val)
f0575e92
KP
1478{
1479 if ((val & DP_PORT_EN) == 0)
1480 return false;
1481
1482 if (HAS_PCH_CPT(dev_priv->dev)) {
f0f59a00 1483 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
f0575e92
KP
1484 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1485 return false;
44f37d1f
CML
1486 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1487 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1488 return false;
f0575e92
KP
1489 } else {
1490 if ((val & DP_PIPE_MASK) != (pipe << 30))
1491 return false;
1492 }
1493 return true;
1494}
1495
1519b995
KP
1496static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1497 enum pipe pipe, u32 val)
1498{
dc0fa718 1499 if ((val & SDVO_ENABLE) == 0)
1519b995
KP
1500 return false;
1501
1502 if (HAS_PCH_CPT(dev_priv->dev)) {
dc0fa718 1503 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1519b995 1504 return false;
44f37d1f
CML
1505 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1506 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1507 return false;
1519b995 1508 } else {
dc0fa718 1509 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1519b995
KP
1510 return false;
1511 }
1512 return true;
1513}
1514
1515static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1516 enum pipe pipe, u32 val)
1517{
1518 if ((val & LVDS_PORT_EN) == 0)
1519 return false;
1520
1521 if (HAS_PCH_CPT(dev_priv->dev)) {
1522 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1523 return false;
1524 } else {
1525 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1526 return false;
1527 }
1528 return true;
1529}
1530
1531static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1532 enum pipe pipe, u32 val)
1533{
1534 if ((val & ADPA_DAC_ENABLE) == 0)
1535 return false;
1536 if (HAS_PCH_CPT(dev_priv->dev)) {
1537 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1538 return false;
1539 } else {
1540 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1541 return false;
1542 }
1543 return true;
1544}
1545
291906f1 1546static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
f0f59a00
VS
1547 enum pipe pipe, i915_reg_t reg,
1548 u32 port_sel)
291906f1 1549{
47a05eca 1550 u32 val = I915_READ(reg);
e2c719b7 1551 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
291906f1 1552 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
f0f59a00 1553 i915_mmio_reg_offset(reg), pipe_name(pipe));
de9a35ab 1554
e2c719b7 1555 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0
75c5da27 1556 && (val & DP_PIPEB_SELECT),
de9a35ab 1557 "IBX PCH dp port still using transcoder B\n");
291906f1
JB
1558}
1559
1560static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
f0f59a00 1561 enum pipe pipe, i915_reg_t reg)
291906f1 1562{
47a05eca 1563 u32 val = I915_READ(reg);
e2c719b7 1564 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
23c99e77 1565 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
f0f59a00 1566 i915_mmio_reg_offset(reg), pipe_name(pipe));
de9a35ab 1567
e2c719b7 1568 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0
75c5da27 1569 && (val & SDVO_PIPE_B_SELECT),
de9a35ab 1570 "IBX PCH hdmi port still using transcoder B\n");
291906f1
JB
1571}
1572
1573static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1574 enum pipe pipe)
1575{
291906f1 1576 u32 val;
291906f1 1577
f0575e92
KP
1578 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1579 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1580 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
291906f1 1581
649636ef 1582 val = I915_READ(PCH_ADPA);
e2c719b7 1583 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
291906f1 1584 "PCH VGA enabled on transcoder %c, should be disabled\n",
9db4a9c7 1585 pipe_name(pipe));
291906f1 1586
649636ef 1587 val = I915_READ(PCH_LVDS);
e2c719b7 1588 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
291906f1 1589 "PCH LVDS enabled on transcoder %c, should be disabled\n",
9db4a9c7 1590 pipe_name(pipe));
291906f1 1591
e2debe91
PZ
1592 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1593 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1594 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
291906f1
JB
1595}
1596
d288f65f 1597static void vlv_enable_pll(struct intel_crtc *crtc,
5cec258b 1598 const struct intel_crtc_state *pipe_config)
87442f73 1599{
426115cf
DV
1600 struct drm_device *dev = crtc->base.dev;
1601 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 1602 i915_reg_t reg = DPLL(crtc->pipe);
d288f65f 1603 u32 dpll = pipe_config->dpll_hw_state.dpll;
87442f73 1604
426115cf 1605 assert_pipe_disabled(dev_priv, crtc->pipe);
87442f73
DV
1606
1607 /* No really, not for ILK+ */
1608 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
1609
1610 /* PLL is protected by panel, make sure we can write it */
6a9e7363 1611 if (IS_MOBILE(dev_priv->dev))
426115cf 1612 assert_panel_unlocked(dev_priv, crtc->pipe);
87442f73 1613
426115cf
DV
1614 I915_WRITE(reg, dpll);
1615 POSTING_READ(reg);
1616 udelay(150);
1617
1618 if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1619 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1620
d288f65f 1621 I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
426115cf 1622 POSTING_READ(DPLL_MD(crtc->pipe));
87442f73
DV
1623
1624 /* We do this three times for luck */
426115cf 1625 I915_WRITE(reg, dpll);
87442f73
DV
1626 POSTING_READ(reg);
1627 udelay(150); /* wait for warmup */
426115cf 1628 I915_WRITE(reg, dpll);
87442f73
DV
1629 POSTING_READ(reg);
1630 udelay(150); /* wait for warmup */
426115cf 1631 I915_WRITE(reg, dpll);
87442f73
DV
1632 POSTING_READ(reg);
1633 udelay(150); /* wait for warmup */
1634}
1635
d288f65f 1636static void chv_enable_pll(struct intel_crtc *crtc,
5cec258b 1637 const struct intel_crtc_state *pipe_config)
9d556c99
CML
1638{
1639 struct drm_device *dev = crtc->base.dev;
1640 struct drm_i915_private *dev_priv = dev->dev_private;
1641 int pipe = crtc->pipe;
1642 enum dpio_channel port = vlv_pipe_to_channel(pipe);
9d556c99
CML
1643 u32 tmp;
1644
1645 assert_pipe_disabled(dev_priv, crtc->pipe);
1646
1647 BUG_ON(!IS_CHERRYVIEW(dev_priv->dev));
1648
a580516d 1649 mutex_lock(&dev_priv->sb_lock);
9d556c99
CML
1650
1651 /* Enable back the 10bit clock to display controller */
1652 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1653 tmp |= DPIO_DCLKP_EN;
1654 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1655
54433e91
VS
1656 mutex_unlock(&dev_priv->sb_lock);
1657
9d556c99
CML
1658 /*
1659 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1660 */
1661 udelay(1);
1662
1663 /* Enable PLL */
d288f65f 1664 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
9d556c99
CML
1665
1666 /* Check PLL is locked */
a11b0703 1667 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
9d556c99
CML
1668 DRM_ERROR("PLL %d failed to lock\n", pipe);
1669
a11b0703 1670 /* not sure when this should be written */
d288f65f 1671 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
a11b0703 1672 POSTING_READ(DPLL_MD(pipe));
9d556c99
CML
1673}
1674
1c4e0274
VS
1675static int intel_num_dvo_pipes(struct drm_device *dev)
1676{
1677 struct intel_crtc *crtc;
1678 int count = 0;
1679
1680 for_each_intel_crtc(dev, crtc)
3538b9df 1681 count += crtc->base.state->active &&
409ee761 1682 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1c4e0274
VS
1683
1684 return count;
1685}
1686
66e3d5c0 1687static void i9xx_enable_pll(struct intel_crtc *crtc)
63d7bbe9 1688{
66e3d5c0
DV
1689 struct drm_device *dev = crtc->base.dev;
1690 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 1691 i915_reg_t reg = DPLL(crtc->pipe);
6e3c9717 1692 u32 dpll = crtc->config->dpll_hw_state.dpll;
63d7bbe9 1693
66e3d5c0 1694 assert_pipe_disabled(dev_priv, crtc->pipe);
58c6eaa2 1695
63d7bbe9 1696 /* No really, not for ILK+ */
3d13ef2e 1697 BUG_ON(INTEL_INFO(dev)->gen >= 5);
63d7bbe9
JB
1698
1699 /* PLL is protected by panel, make sure we can write it */
66e3d5c0
DV
1700 if (IS_MOBILE(dev) && !IS_I830(dev))
1701 assert_panel_unlocked(dev_priv, crtc->pipe);
63d7bbe9 1702
1c4e0274
VS
1703 /* Enable DVO 2x clock on both PLLs if necessary */
1704 if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1705 /*
1706 * It appears to be important that we don't enable this
1707 * for the current pipe before otherwise configuring the
1708 * PLL. No idea how this should be handled if multiple
1709 * DVO outputs are enabled simultaneosly.
1710 */
1711 dpll |= DPLL_DVO_2X_MODE;
1712 I915_WRITE(DPLL(!crtc->pipe),
1713 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1714 }
66e3d5c0
DV
1715
1716 /* Wait for the clocks to stabilize. */
1717 POSTING_READ(reg);
1718 udelay(150);
1719
1720 if (INTEL_INFO(dev)->gen >= 4) {
1721 I915_WRITE(DPLL_MD(crtc->pipe),
6e3c9717 1722 crtc->config->dpll_hw_state.dpll_md);
66e3d5c0
DV
1723 } else {
1724 /* The pixel multiplier can only be updated once the
1725 * DPLL is enabled and the clocks are stable.
1726 *
1727 * So write it again.
1728 */
1729 I915_WRITE(reg, dpll);
1730 }
63d7bbe9
JB
1731
1732 /* We do this three times for luck */
66e3d5c0 1733 I915_WRITE(reg, dpll);
63d7bbe9
JB
1734 POSTING_READ(reg);
1735 udelay(150); /* wait for warmup */
66e3d5c0 1736 I915_WRITE(reg, dpll);
63d7bbe9
JB
1737 POSTING_READ(reg);
1738 udelay(150); /* wait for warmup */
66e3d5c0 1739 I915_WRITE(reg, dpll);
63d7bbe9
JB
1740 POSTING_READ(reg);
1741 udelay(150); /* wait for warmup */
1742}
1743
1744/**
50b44a44 1745 * i9xx_disable_pll - disable a PLL
63d7bbe9
JB
1746 * @dev_priv: i915 private structure
1747 * @pipe: pipe PLL to disable
1748 *
1749 * Disable the PLL for @pipe, making sure the pipe is off first.
1750 *
1751 * Note! This is for pre-ILK only.
1752 */
1c4e0274 1753static void i9xx_disable_pll(struct intel_crtc *crtc)
63d7bbe9 1754{
1c4e0274
VS
1755 struct drm_device *dev = crtc->base.dev;
1756 struct drm_i915_private *dev_priv = dev->dev_private;
1757 enum pipe pipe = crtc->pipe;
1758
1759 /* Disable DVO 2x clock on both PLLs if necessary */
1760 if (IS_I830(dev) &&
409ee761 1761 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
3538b9df 1762 !intel_num_dvo_pipes(dev)) {
1c4e0274
VS
1763 I915_WRITE(DPLL(PIPE_B),
1764 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1765 I915_WRITE(DPLL(PIPE_A),
1766 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1767 }
1768
b6b5d049
VS
1769 /* Don't disable pipe or pipe PLLs if needed */
1770 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1771 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
63d7bbe9
JB
1772 return;
1773
1774 /* Make sure the pipe isn't still relying on us */
1775 assert_pipe_disabled(dev_priv, pipe);
1776
b8afb911 1777 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
50b44a44 1778 POSTING_READ(DPLL(pipe));
63d7bbe9
JB
1779}
1780
f6071166
JB
1781static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1782{
b8afb911 1783 u32 val;
f6071166
JB
1784
1785 /* Make sure the pipe isn't still relying on us */
1786 assert_pipe_disabled(dev_priv, pipe);
1787
e5cbfbfb
ID
1788 /*
1789 * Leave integrated clock source and reference clock enabled for pipe B.
1790 * The latter is needed for VGA hotplug / manual detection.
1791 */
b8afb911 1792 val = DPLL_VGA_MODE_DIS;
f6071166 1793 if (pipe == PIPE_B)
60bfe44f 1794 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV;
f6071166
JB
1795 I915_WRITE(DPLL(pipe), val);
1796 POSTING_READ(DPLL(pipe));
076ed3b2
CML
1797
1798}
1799
1800static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1801{
d752048d 1802 enum dpio_channel port = vlv_pipe_to_channel(pipe);
076ed3b2
CML
1803 u32 val;
1804
a11b0703
VS
1805 /* Make sure the pipe isn't still relying on us */
1806 assert_pipe_disabled(dev_priv, pipe);
076ed3b2 1807
a11b0703 1808 /* Set PLL en = 0 */
60bfe44f
VS
1809 val = DPLL_SSC_REF_CLK_CHV |
1810 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
a11b0703
VS
1811 if (pipe != PIPE_A)
1812 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1813 I915_WRITE(DPLL(pipe), val);
1814 POSTING_READ(DPLL(pipe));
d752048d 1815
a580516d 1816 mutex_lock(&dev_priv->sb_lock);
d752048d
VS
1817
1818 /* Disable 10bit clock to display controller */
1819 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1820 val &= ~DPIO_DCLKP_EN;
1821 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1822
a580516d 1823 mutex_unlock(&dev_priv->sb_lock);
f6071166
JB
1824}
1825
e4607fcf 1826void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
9b6de0a1
VS
1827 struct intel_digital_port *dport,
1828 unsigned int expected_mask)
89b667f8
JB
1829{
1830 u32 port_mask;
f0f59a00 1831 i915_reg_t dpll_reg;
89b667f8 1832
e4607fcf
CML
1833 switch (dport->port) {
1834 case PORT_B:
89b667f8 1835 port_mask = DPLL_PORTB_READY_MASK;
00fc31b7 1836 dpll_reg = DPLL(0);
e4607fcf
CML
1837 break;
1838 case PORT_C:
89b667f8 1839 port_mask = DPLL_PORTC_READY_MASK;
00fc31b7 1840 dpll_reg = DPLL(0);
9b6de0a1 1841 expected_mask <<= 4;
00fc31b7
CML
1842 break;
1843 case PORT_D:
1844 port_mask = DPLL_PORTD_READY_MASK;
1845 dpll_reg = DPIO_PHY_STATUS;
e4607fcf
CML
1846 break;
1847 default:
1848 BUG();
1849 }
89b667f8 1850
9b6de0a1
VS
1851 if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
1852 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1853 port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
89b667f8
JB
1854}
1855
b14b1055
DV
1856static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1857{
1858 struct drm_device *dev = crtc->base.dev;
1859 struct drm_i915_private *dev_priv = dev->dev_private;
1860 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
1861
be19f0ff
CW
1862 if (WARN_ON(pll == NULL))
1863 return;
1864
3e369b76 1865 WARN_ON(!pll->config.crtc_mask);
b14b1055
DV
1866 if (pll->active == 0) {
1867 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1868 WARN_ON(pll->on);
1869 assert_shared_dpll_disabled(dev_priv, pll);
1870
1871 pll->mode_set(dev_priv, pll);
1872 }
1873}
1874
92f2584a 1875/**
85b3894f 1876 * intel_enable_shared_dpll - enable PCH PLL
92f2584a
JB
1877 * @dev_priv: i915 private structure
1878 * @pipe: pipe PLL to enable
1879 *
1880 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1881 * drives the transcoder clock.
1882 */
85b3894f 1883static void intel_enable_shared_dpll(struct intel_crtc *crtc)
92f2584a 1884{
3d13ef2e
DL
1885 struct drm_device *dev = crtc->base.dev;
1886 struct drm_i915_private *dev_priv = dev->dev_private;
e2b78267 1887 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
92f2584a 1888
87a875bb 1889 if (WARN_ON(pll == NULL))
48da64a8
CW
1890 return;
1891
3e369b76 1892 if (WARN_ON(pll->config.crtc_mask == 0))
48da64a8 1893 return;
ee7b9f93 1894
74dd6928 1895 DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
46edb027 1896 pll->name, pll->active, pll->on,
e2b78267 1897 crtc->base.base.id);
92f2584a 1898
cdbd2316
DV
1899 if (pll->active++) {
1900 WARN_ON(!pll->on);
e9d6944e 1901 assert_shared_dpll_enabled(dev_priv, pll);
ee7b9f93
JB
1902 return;
1903 }
f4a091c7 1904 WARN_ON(pll->on);
ee7b9f93 1905
bd2bb1b9
PZ
1906 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
1907
46edb027 1908 DRM_DEBUG_KMS("enabling %s\n", pll->name);
e7b903d2 1909 pll->enable(dev_priv, pll);
ee7b9f93 1910 pll->on = true;
92f2584a
JB
1911}
1912
f6daaec2 1913static void intel_disable_shared_dpll(struct intel_crtc *crtc)
92f2584a 1914{
3d13ef2e
DL
1915 struct drm_device *dev = crtc->base.dev;
1916 struct drm_i915_private *dev_priv = dev->dev_private;
e2b78267 1917 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
4c609cb8 1918
92f2584a 1919 /* PCH only available on ILK+ */
80aa9312
JB
1920 if (INTEL_INFO(dev)->gen < 5)
1921 return;
1922
eddfcbcd
ML
1923 if (pll == NULL)
1924 return;
92f2584a 1925
eddfcbcd 1926 if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base)))))
48da64a8 1927 return;
7a419866 1928
46edb027
DV
1929 DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
1930 pll->name, pll->active, pll->on,
e2b78267 1931 crtc->base.base.id);
7a419866 1932
48da64a8 1933 if (WARN_ON(pll->active == 0)) {
e9d6944e 1934 assert_shared_dpll_disabled(dev_priv, pll);
48da64a8
CW
1935 return;
1936 }
1937
e9d6944e 1938 assert_shared_dpll_enabled(dev_priv, pll);
f4a091c7 1939 WARN_ON(!pll->on);
cdbd2316 1940 if (--pll->active)
7a419866 1941 return;
ee7b9f93 1942
46edb027 1943 DRM_DEBUG_KMS("disabling %s\n", pll->name);
e7b903d2 1944 pll->disable(dev_priv, pll);
ee7b9f93 1945 pll->on = false;
bd2bb1b9
PZ
1946
1947 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
92f2584a
JB
1948}
1949
b8a4f404
PZ
1950static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1951 enum pipe pipe)
040484af 1952{
23670b32 1953 struct drm_device *dev = dev_priv->dev;
7c26e5c6 1954 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
e2b78267 1955 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
f0f59a00
VS
1956 i915_reg_t reg;
1957 uint32_t val, pipeconf_val;
040484af
JB
1958
1959 /* PCH only available on ILK+ */
55522f37 1960 BUG_ON(!HAS_PCH_SPLIT(dev));
040484af
JB
1961
1962 /* Make sure PCH DPLL is enabled */
e72f9fbf 1963 assert_shared_dpll_enabled(dev_priv,
e9d6944e 1964 intel_crtc_to_shared_dpll(intel_crtc));
040484af
JB
1965
1966 /* FDI must be feeding us bits for PCH ports */
1967 assert_fdi_tx_enabled(dev_priv, pipe);
1968 assert_fdi_rx_enabled(dev_priv, pipe);
1969
23670b32
DV
1970 if (HAS_PCH_CPT(dev)) {
1971 /* Workaround: Set the timing override bit before enabling the
1972 * pch transcoder. */
1973 reg = TRANS_CHICKEN2(pipe);
1974 val = I915_READ(reg);
1975 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1976 I915_WRITE(reg, val);
59c859d6 1977 }
23670b32 1978
ab9412ba 1979 reg = PCH_TRANSCONF(pipe);
040484af 1980 val = I915_READ(reg);
5f7f726d 1981 pipeconf_val = I915_READ(PIPECONF(pipe));
e9bcff5c
JB
1982
1983 if (HAS_PCH_IBX(dev_priv->dev)) {
1984 /*
c5de7c6f
VS
1985 * Make the BPC in transcoder be consistent with
1986 * that in pipeconf reg. For HDMI we must use 8bpc
1987 * here for both 8bpc and 12bpc.
e9bcff5c 1988 */
dfd07d72 1989 val &= ~PIPECONF_BPC_MASK;
c5de7c6f
VS
1990 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
1991 val |= PIPECONF_8BPC;
1992 else
1993 val |= pipeconf_val & PIPECONF_BPC_MASK;
e9bcff5c 1994 }
5f7f726d
PZ
1995
1996 val &= ~TRANS_INTERLACE_MASK;
1997 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
7c26e5c6 1998 if (HAS_PCH_IBX(dev_priv->dev) &&
409ee761 1999 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7c26e5c6
PZ
2000 val |= TRANS_LEGACY_INTERLACED_ILK;
2001 else
2002 val |= TRANS_INTERLACED;
5f7f726d
PZ
2003 else
2004 val |= TRANS_PROGRESSIVE;
2005
040484af
JB
2006 I915_WRITE(reg, val | TRANS_ENABLE);
2007 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
4bb6f1f3 2008 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
040484af
JB
2009}
2010
8fb033d7 2011static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
937bb610 2012 enum transcoder cpu_transcoder)
040484af 2013{
8fb033d7 2014 u32 val, pipeconf_val;
8fb033d7
PZ
2015
2016 /* PCH only available on ILK+ */
55522f37 2017 BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev));
8fb033d7 2018
8fb033d7 2019 /* FDI must be feeding us bits for PCH ports */
1a240d4d 2020 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
937bb610 2021 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
8fb033d7 2022
223a6fdf 2023 /* Workaround: set timing override bit. */
36c0d0cf 2024 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
23670b32 2025 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
36c0d0cf 2026 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
223a6fdf 2027
25f3ef11 2028 val = TRANS_ENABLE;
937bb610 2029 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
8fb033d7 2030
9a76b1c6
PZ
2031 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
2032 PIPECONF_INTERLACED_ILK)
a35f2679 2033 val |= TRANS_INTERLACED;
8fb033d7
PZ
2034 else
2035 val |= TRANS_PROGRESSIVE;
2036
ab9412ba
DV
2037 I915_WRITE(LPT_TRANSCONF, val);
2038 if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
937bb610 2039 DRM_ERROR("Failed to enable PCH transcoder\n");
8fb033d7
PZ
2040}
2041
b8a4f404
PZ
2042static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
2043 enum pipe pipe)
040484af 2044{
23670b32 2045 struct drm_device *dev = dev_priv->dev;
f0f59a00
VS
2046 i915_reg_t reg;
2047 uint32_t val;
040484af
JB
2048
2049 /* FDI relies on the transcoder */
2050 assert_fdi_tx_disabled(dev_priv, pipe);
2051 assert_fdi_rx_disabled(dev_priv, pipe);
2052
291906f1
JB
2053 /* Ports must be off as well */
2054 assert_pch_ports_disabled(dev_priv, pipe);
2055
ab9412ba 2056 reg = PCH_TRANSCONF(pipe);
040484af
JB
2057 val = I915_READ(reg);
2058 val &= ~TRANS_ENABLE;
2059 I915_WRITE(reg, val);
2060 /* wait for PCH transcoder off, transcoder state */
2061 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
4bb6f1f3 2062 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
23670b32 2063
c465613b 2064 if (HAS_PCH_CPT(dev)) {
23670b32
DV
2065 /* Workaround: Clear the timing override chicken bit again. */
2066 reg = TRANS_CHICKEN2(pipe);
2067 val = I915_READ(reg);
2068 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
2069 I915_WRITE(reg, val);
2070 }
040484af
JB
2071}
2072
ab4d966c 2073static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
8fb033d7 2074{
8fb033d7
PZ
2075 u32 val;
2076
ab9412ba 2077 val = I915_READ(LPT_TRANSCONF);
8fb033d7 2078 val &= ~TRANS_ENABLE;
ab9412ba 2079 I915_WRITE(LPT_TRANSCONF, val);
8fb033d7 2080 /* wait for PCH transcoder off, transcoder state */
ab9412ba 2081 if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
8a52fd9f 2082 DRM_ERROR("Failed to disable PCH transcoder\n");
223a6fdf
PZ
2083
2084 /* Workaround: clear timing override bit. */
36c0d0cf 2085 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
23670b32 2086 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
36c0d0cf 2087 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
040484af
JB
2088}
2089
b24e7179 2090/**
309cfea8 2091 * intel_enable_pipe - enable a pipe, asserting requirements
0372264a 2092 * @crtc: crtc responsible for the pipe
b24e7179 2093 *
0372264a 2094 * Enable @crtc's pipe, making sure that various hardware specific requirements
b24e7179 2095 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
b24e7179 2096 */
e1fdc473 2097static void intel_enable_pipe(struct intel_crtc *crtc)
b24e7179 2098{
0372264a
PZ
2099 struct drm_device *dev = crtc->base.dev;
2100 struct drm_i915_private *dev_priv = dev->dev_private;
2101 enum pipe pipe = crtc->pipe;
1a70a728 2102 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1a240d4d 2103 enum pipe pch_transcoder;
f0f59a00 2104 i915_reg_t reg;
b24e7179
JB
2105 u32 val;
2106
9e2ee2dd
VS
2107 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
2108
58c6eaa2 2109 assert_planes_disabled(dev_priv, pipe);
93ce0ba6 2110 assert_cursor_disabled(dev_priv, pipe);
58c6eaa2
DV
2111 assert_sprites_disabled(dev_priv, pipe);
2112
681e5811 2113 if (HAS_PCH_LPT(dev_priv->dev))
cc391bbb
PZ
2114 pch_transcoder = TRANSCODER_A;
2115 else
2116 pch_transcoder = pipe;
2117
b24e7179
JB
2118 /*
2119 * A pipe without a PLL won't actually be able to drive bits from
2120 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
2121 * need the check.
2122 */
50360403 2123 if (HAS_GMCH_DISPLAY(dev_priv->dev))
409ee761 2124 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
23538ef1
JN
2125 assert_dsi_pll_enabled(dev_priv);
2126 else
2127 assert_pll_enabled(dev_priv, pipe);
040484af 2128 else {
6e3c9717 2129 if (crtc->config->has_pch_encoder) {
040484af 2130 /* if driving the PCH, we need FDI enabled */
cc391bbb 2131 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1a240d4d
DV
2132 assert_fdi_tx_pll_enabled(dev_priv,
2133 (enum pipe) cpu_transcoder);
040484af
JB
2134 }
2135 /* FIXME: assert CPU port conditions for SNB+ */
2136 }
b24e7179 2137
702e7a56 2138 reg = PIPECONF(cpu_transcoder);
b24e7179 2139 val = I915_READ(reg);
7ad25d48 2140 if (val & PIPECONF_ENABLE) {
b6b5d049
VS
2141 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2142 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
00d70b15 2143 return;
7ad25d48 2144 }
00d70b15
CW
2145
2146 I915_WRITE(reg, val | PIPECONF_ENABLE);
851855d8 2147 POSTING_READ(reg);
b24e7179
JB
2148}
2149
2150/**
309cfea8 2151 * intel_disable_pipe - disable a pipe, asserting requirements
575f7ab7 2152 * @crtc: crtc whose pipes is to be disabled
b24e7179 2153 *
575f7ab7
VS
2154 * Disable the pipe of @crtc, making sure that various hardware
2155 * specific requirements are met, if applicable, e.g. plane
2156 * disabled, panel fitter off, etc.
b24e7179
JB
2157 *
2158 * Will wait until the pipe has shut down before returning.
2159 */
575f7ab7 2160static void intel_disable_pipe(struct intel_crtc *crtc)
b24e7179 2161{
575f7ab7 2162 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
6e3c9717 2163 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
575f7ab7 2164 enum pipe pipe = crtc->pipe;
f0f59a00 2165 i915_reg_t reg;
b24e7179
JB
2166 u32 val;
2167
9e2ee2dd
VS
2168 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2169
b24e7179
JB
2170 /*
2171 * Make sure planes won't keep trying to pump pixels to us,
2172 * or we might hang the display.
2173 */
2174 assert_planes_disabled(dev_priv, pipe);
93ce0ba6 2175 assert_cursor_disabled(dev_priv, pipe);
19332d7a 2176 assert_sprites_disabled(dev_priv, pipe);
b24e7179 2177
702e7a56 2178 reg = PIPECONF(cpu_transcoder);
b24e7179 2179 val = I915_READ(reg);
00d70b15
CW
2180 if ((val & PIPECONF_ENABLE) == 0)
2181 return;
2182
67adc644
VS
2183 /*
2184 * Double wide has implications for planes
2185 * so best keep it disabled when not needed.
2186 */
6e3c9717 2187 if (crtc->config->double_wide)
67adc644
VS
2188 val &= ~PIPECONF_DOUBLE_WIDE;
2189
2190 /* Don't disable pipe or pipe PLLs if needed */
b6b5d049
VS
2191 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2192 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
67adc644
VS
2193 val &= ~PIPECONF_ENABLE;
2194
2195 I915_WRITE(reg, val);
2196 if ((val & PIPECONF_ENABLE) == 0)
2197 intel_wait_for_pipe_off(crtc);
b24e7179
JB
2198}
2199
693db184
CW
2200static bool need_vtd_wa(struct drm_device *dev)
2201{
2202#ifdef CONFIG_INTEL_IOMMU
2203 if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2204 return true;
2205#endif
2206 return false;
2207}
2208
50470bb0 2209unsigned int
6761dd31 2210intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
fe47ea0c 2211 uint64_t fb_format_modifier, unsigned int plane)
a57ce0b2 2212{
6761dd31
TU
2213 unsigned int tile_height;
2214 uint32_t pixel_bytes;
a57ce0b2 2215
b5d0e9bf
DL
2216 switch (fb_format_modifier) {
2217 case DRM_FORMAT_MOD_NONE:
2218 tile_height = 1;
2219 break;
2220 case I915_FORMAT_MOD_X_TILED:
2221 tile_height = IS_GEN2(dev) ? 16 : 8;
2222 break;
2223 case I915_FORMAT_MOD_Y_TILED:
2224 tile_height = 32;
2225 break;
2226 case I915_FORMAT_MOD_Yf_TILED:
fe47ea0c 2227 pixel_bytes = drm_format_plane_cpp(pixel_format, plane);
6761dd31 2228 switch (pixel_bytes) {
b5d0e9bf 2229 default:
6761dd31 2230 case 1:
b5d0e9bf
DL
2231 tile_height = 64;
2232 break;
6761dd31
TU
2233 case 2:
2234 case 4:
b5d0e9bf
DL
2235 tile_height = 32;
2236 break;
6761dd31 2237 case 8:
b5d0e9bf
DL
2238 tile_height = 16;
2239 break;
6761dd31 2240 case 16:
b5d0e9bf
DL
2241 WARN_ONCE(1,
2242 "128-bit pixels are not supported for display!");
2243 tile_height = 16;
2244 break;
2245 }
2246 break;
2247 default:
2248 MISSING_CASE(fb_format_modifier);
2249 tile_height = 1;
2250 break;
2251 }
091df6cb 2252
6761dd31
TU
2253 return tile_height;
2254}
2255
2256unsigned int
2257intel_fb_align_height(struct drm_device *dev, unsigned int height,
2258 uint32_t pixel_format, uint64_t fb_format_modifier)
2259{
2260 return ALIGN(height, intel_tile_height(dev, pixel_format,
fe47ea0c 2261 fb_format_modifier, 0));
a57ce0b2
JB
2262}
2263
f64b98cd
TU
2264static int
2265intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
2266 const struct drm_plane_state *plane_state)
2267{
50470bb0 2268 struct intel_rotation_info *info = &view->rotation_info;
84fe03f7 2269 unsigned int tile_height, tile_pitch;
50470bb0 2270
f64b98cd
TU
2271 *view = i915_ggtt_view_normal;
2272
50470bb0
TU
2273 if (!plane_state)
2274 return 0;
2275
121920fa 2276 if (!intel_rotation_90_or_270(plane_state->rotation))
50470bb0
TU
2277 return 0;
2278
9abc4648 2279 *view = i915_ggtt_view_rotated;
50470bb0
TU
2280
2281 info->height = fb->height;
2282 info->pixel_format = fb->pixel_format;
2283 info->pitch = fb->pitches[0];
89e3e142 2284 info->uv_offset = fb->offsets[1];
50470bb0
TU
2285 info->fb_modifier = fb->modifier[0];
2286
84fe03f7 2287 tile_height = intel_tile_height(fb->dev, fb->pixel_format,
fe47ea0c 2288 fb->modifier[0], 0);
84fe03f7
TU
2289 tile_pitch = PAGE_SIZE / tile_height;
2290 info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
2291 info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
2292 info->size = info->width_pages * info->height_pages * PAGE_SIZE;
2293
89e3e142
TU
2294 if (info->pixel_format == DRM_FORMAT_NV12) {
2295 tile_height = intel_tile_height(fb->dev, fb->pixel_format,
2296 fb->modifier[0], 1);
2297 tile_pitch = PAGE_SIZE / tile_height;
2298 info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
2299 info->height_pages_uv = DIV_ROUND_UP(fb->height / 2,
2300 tile_height);
2301 info->size_uv = info->width_pages_uv * info->height_pages_uv *
2302 PAGE_SIZE;
2303 }
2304
f64b98cd
TU
2305 return 0;
2306}
2307
4e9a86b6
VS
2308static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
2309{
2310 if (INTEL_INFO(dev_priv)->gen >= 9)
2311 return 256 * 1024;
985b8bb4
VS
2312 else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
2313 IS_VALLEYVIEW(dev_priv))
4e9a86b6
VS
2314 return 128 * 1024;
2315 else if (INTEL_INFO(dev_priv)->gen >= 4)
2316 return 4 * 1024;
2317 else
44c5905e 2318 return 0;
4e9a86b6
VS
2319}
2320
127bd2ac 2321int
850c4cdc
TU
2322intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2323 struct drm_framebuffer *fb,
7580d774 2324 const struct drm_plane_state *plane_state)
6b95a207 2325{
850c4cdc 2326 struct drm_device *dev = fb->dev;
ce453d81 2327 struct drm_i915_private *dev_priv = dev->dev_private;
850c4cdc 2328 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
f64b98cd 2329 struct i915_ggtt_view view;
6b95a207
KH
2330 u32 alignment;
2331 int ret;
2332
ebcdd39e
MR
2333 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2334
7b911adc
TU
2335 switch (fb->modifier[0]) {
2336 case DRM_FORMAT_MOD_NONE:
4e9a86b6 2337 alignment = intel_linear_alignment(dev_priv);
6b95a207 2338 break;
7b911adc 2339 case I915_FORMAT_MOD_X_TILED:
1fada4cc
DL
2340 if (INTEL_INFO(dev)->gen >= 9)
2341 alignment = 256 * 1024;
2342 else {
2343 /* pin() will align the object as required by fence */
2344 alignment = 0;
2345 }
6b95a207 2346 break;
7b911adc 2347 case I915_FORMAT_MOD_Y_TILED:
1327b9a1
DL
2348 case I915_FORMAT_MOD_Yf_TILED:
2349 if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
2350 "Y tiling bo slipped through, driver bug!\n"))
2351 return -EINVAL;
2352 alignment = 1 * 1024 * 1024;
2353 break;
6b95a207 2354 default:
7b911adc
TU
2355 MISSING_CASE(fb->modifier[0]);
2356 return -EINVAL;
6b95a207
KH
2357 }
2358
f64b98cd
TU
2359 ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
2360 if (ret)
2361 return ret;
2362
693db184
CW
2363 /* Note that the w/a also requires 64 PTE of padding following the
2364 * bo. We currently fill all unused PTE with the shadow page and so
2365 * we should always have valid PTE following the scanout preventing
2366 * the VT-d warning.
2367 */
2368 if (need_vtd_wa(dev) && alignment < 256 * 1024)
2369 alignment = 256 * 1024;
2370
d6dd6843
PZ
2371 /*
2372 * Global gtt pte registers are special registers which actually forward
2373 * writes to a chunk of system memory. Which means that there is no risk
2374 * that the register values disappear as soon as we call
2375 * intel_runtime_pm_put(), so it is correct to wrap only the
2376 * pin/unpin/fence and not more.
2377 */
2378 intel_runtime_pm_get(dev_priv);
2379
7580d774
ML
2380 ret = i915_gem_object_pin_to_display_plane(obj, alignment,
2381 &view);
48b956c5 2382 if (ret)
b26a6b35 2383 goto err_pm;
6b95a207
KH
2384
2385 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2386 * fence, whereas 965+ only requires a fence if using
2387 * framebuffer compression. For simplicity, we always install
2388 * a fence as the cost is not that onerous.
2389 */
06d98131 2390 ret = i915_gem_object_get_fence(obj);
842315ee
ML
2391 if (ret == -EDEADLK) {
2392 /*
2393 * -EDEADLK means there are no free fences
2394 * no pending flips.
2395 *
2396 * This is propagated to atomic, but it uses
2397 * -EDEADLK to force a locking recovery, so
2398 * change the returned error to -EBUSY.
2399 */
2400 ret = -EBUSY;
2401 goto err_unpin;
2402 } else if (ret)
9a5a53b3 2403 goto err_unpin;
1690e1eb 2404
9a5a53b3 2405 i915_gem_object_pin_fence(obj);
6b95a207 2406
d6dd6843 2407 intel_runtime_pm_put(dev_priv);
6b95a207 2408 return 0;
48b956c5
CW
2409
2410err_unpin:
f64b98cd 2411 i915_gem_object_unpin_from_display_plane(obj, &view);
b26a6b35 2412err_pm:
d6dd6843 2413 intel_runtime_pm_put(dev_priv);
48b956c5 2414 return ret;
6b95a207
KH
2415}
2416
82bc3b2d
TU
2417static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
2418 const struct drm_plane_state *plane_state)
1690e1eb 2419{
82bc3b2d 2420 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
f64b98cd
TU
2421 struct i915_ggtt_view view;
2422 int ret;
82bc3b2d 2423
ebcdd39e
MR
2424 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2425
f64b98cd
TU
2426 ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
2427 WARN_ONCE(ret, "Couldn't get view from plane state!");
2428
1690e1eb 2429 i915_gem_object_unpin_fence(obj);
f64b98cd 2430 i915_gem_object_unpin_from_display_plane(obj, &view);
1690e1eb
CW
2431}
2432
c2c75131
DV
2433/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
2434 * is assumed to be a power-of-two. */
4e9a86b6
VS
2435unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
2436 int *x, int *y,
bc752862
CW
2437 unsigned int tiling_mode,
2438 unsigned int cpp,
2439 unsigned int pitch)
c2c75131 2440{
bc752862
CW
2441 if (tiling_mode != I915_TILING_NONE) {
2442 unsigned int tile_rows, tiles;
c2c75131 2443
bc752862
CW
2444 tile_rows = *y / 8;
2445 *y %= 8;
c2c75131 2446
bc752862
CW
2447 tiles = *x / (512/cpp);
2448 *x %= 512/cpp;
2449
2450 return tile_rows * pitch * 8 + tiles * 4096;
2451 } else {
4e9a86b6 2452 unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
bc752862
CW
2453 unsigned int offset;
2454
2455 offset = *y * pitch + *x * cpp;
4e9a86b6
VS
2456 *y = (offset & alignment) / pitch;
2457 *x = ((offset & alignment) - *y * pitch) / cpp;
2458 return offset & ~alignment;
bc752862 2459 }
c2c75131
DV
2460}
2461
b35d63fa 2462static int i9xx_format_to_fourcc(int format)
46f297fb
JB
2463{
2464 switch (format) {
2465 case DISPPLANE_8BPP:
2466 return DRM_FORMAT_C8;
2467 case DISPPLANE_BGRX555:
2468 return DRM_FORMAT_XRGB1555;
2469 case DISPPLANE_BGRX565:
2470 return DRM_FORMAT_RGB565;
2471 default:
2472 case DISPPLANE_BGRX888:
2473 return DRM_FORMAT_XRGB8888;
2474 case DISPPLANE_RGBX888:
2475 return DRM_FORMAT_XBGR8888;
2476 case DISPPLANE_BGRX101010:
2477 return DRM_FORMAT_XRGB2101010;
2478 case DISPPLANE_RGBX101010:
2479 return DRM_FORMAT_XBGR2101010;
2480 }
2481}
2482
bc8d7dff
DL
2483static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2484{
2485 switch (format) {
2486 case PLANE_CTL_FORMAT_RGB_565:
2487 return DRM_FORMAT_RGB565;
2488 default:
2489 case PLANE_CTL_FORMAT_XRGB_8888:
2490 if (rgb_order) {
2491 if (alpha)
2492 return DRM_FORMAT_ABGR8888;
2493 else
2494 return DRM_FORMAT_XBGR8888;
2495 } else {
2496 if (alpha)
2497 return DRM_FORMAT_ARGB8888;
2498 else
2499 return DRM_FORMAT_XRGB8888;
2500 }
2501 case PLANE_CTL_FORMAT_XRGB_2101010:
2502 if (rgb_order)
2503 return DRM_FORMAT_XBGR2101010;
2504 else
2505 return DRM_FORMAT_XRGB2101010;
2506 }
2507}
2508
5724dbd1 2509static bool
f6936e29
DV
2510intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2511 struct intel_initial_plane_config *plane_config)
46f297fb
JB
2512{
2513 struct drm_device *dev = crtc->base.dev;
3badb49f 2514 struct drm_i915_private *dev_priv = to_i915(dev);
46f297fb
JB
2515 struct drm_i915_gem_object *obj = NULL;
2516 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2d14030b 2517 struct drm_framebuffer *fb = &plane_config->fb->base;
f37b5c2b
DV
2518 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2519 u32 size_aligned = round_up(plane_config->base + plane_config->size,
2520 PAGE_SIZE);
2521
2522 size_aligned -= base_aligned;
46f297fb 2523
ff2652ea
CW
2524 if (plane_config->size == 0)
2525 return false;
2526
3badb49f
PZ
2527 /* If the FB is too big, just don't use it since fbdev is not very
2528 * important and we should probably use that space with FBC or other
2529 * features. */
2530 if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
2531 return false;
2532
f37b5c2b
DV
2533 obj = i915_gem_object_create_stolen_for_preallocated(dev,
2534 base_aligned,
2535 base_aligned,
2536 size_aligned);
46f297fb 2537 if (!obj)
484b41dd 2538 return false;
46f297fb 2539
49af449b
DL
2540 obj->tiling_mode = plane_config->tiling;
2541 if (obj->tiling_mode == I915_TILING_X)
6bf129df 2542 obj->stride = fb->pitches[0];
46f297fb 2543
6bf129df
DL
2544 mode_cmd.pixel_format = fb->pixel_format;
2545 mode_cmd.width = fb->width;
2546 mode_cmd.height = fb->height;
2547 mode_cmd.pitches[0] = fb->pitches[0];
18c5247e
DV
2548 mode_cmd.modifier[0] = fb->modifier[0];
2549 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
46f297fb
JB
2550
2551 mutex_lock(&dev->struct_mutex);
6bf129df 2552 if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
484b41dd 2553 &mode_cmd, obj)) {
46f297fb
JB
2554 DRM_DEBUG_KMS("intel fb init failed\n");
2555 goto out_unref_obj;
2556 }
46f297fb 2557 mutex_unlock(&dev->struct_mutex);
484b41dd 2558
f6936e29 2559 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
484b41dd 2560 return true;
46f297fb
JB
2561
2562out_unref_obj:
2563 drm_gem_object_unreference(&obj->base);
2564 mutex_unlock(&dev->struct_mutex);
484b41dd
JB
2565 return false;
2566}
2567
afd65eb4
MR
2568/* Update plane->state->fb to match plane->fb after driver-internal updates */
2569static void
2570update_state_fb(struct drm_plane *plane)
2571{
2572 if (plane->fb == plane->state->fb)
2573 return;
2574
2575 if (plane->state->fb)
2576 drm_framebuffer_unreference(plane->state->fb);
2577 plane->state->fb = plane->fb;
2578 if (plane->state->fb)
2579 drm_framebuffer_reference(plane->state->fb);
2580}
2581
5724dbd1 2582static void
f6936e29
DV
2583intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2584 struct intel_initial_plane_config *plane_config)
484b41dd
JB
2585{
2586 struct drm_device *dev = intel_crtc->base.dev;
d9ceb816 2587 struct drm_i915_private *dev_priv = dev->dev_private;
484b41dd
JB
2588 struct drm_crtc *c;
2589 struct intel_crtc *i;
2ff8fde1 2590 struct drm_i915_gem_object *obj;
88595ac9 2591 struct drm_plane *primary = intel_crtc->base.primary;
be5651f2 2592 struct drm_plane_state *plane_state = primary->state;
88595ac9 2593 struct drm_framebuffer *fb;
484b41dd 2594
2d14030b 2595 if (!plane_config->fb)
484b41dd
JB
2596 return;
2597
f6936e29 2598 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
88595ac9
DV
2599 fb = &plane_config->fb->base;
2600 goto valid_fb;
f55548b5 2601 }
484b41dd 2602
2d14030b 2603 kfree(plane_config->fb);
484b41dd
JB
2604
2605 /*
2606 * Failed to alloc the obj, check to see if we should share
2607 * an fb with another CRTC instead
2608 */
70e1e0ec 2609 for_each_crtc(dev, c) {
484b41dd
JB
2610 i = to_intel_crtc(c);
2611
2612 if (c == &intel_crtc->base)
2613 continue;
2614
2ff8fde1
MR
2615 if (!i->active)
2616 continue;
2617
88595ac9
DV
2618 fb = c->primary->fb;
2619 if (!fb)
484b41dd
JB
2620 continue;
2621
88595ac9 2622 obj = intel_fb_obj(fb);
2ff8fde1 2623 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
88595ac9
DV
2624 drm_framebuffer_reference(fb);
2625 goto valid_fb;
484b41dd
JB
2626 }
2627 }
88595ac9
DV
2628
2629 return;
2630
2631valid_fb:
be5651f2
ML
2632 plane_state->src_x = plane_state->src_y = 0;
2633 plane_state->src_w = fb->width << 16;
2634 plane_state->src_h = fb->height << 16;
2635
2636 plane_state->crtc_x = plane_state->src_y = 0;
2637 plane_state->crtc_w = fb->width;
2638 plane_state->crtc_h = fb->height;
2639
88595ac9
DV
2640 obj = intel_fb_obj(fb);
2641 if (obj->tiling_mode != I915_TILING_NONE)
2642 dev_priv->preserve_bios_swizzle = true;
2643
be5651f2
ML
2644 drm_framebuffer_reference(fb);
2645 primary->fb = primary->state->fb = fb;
36750f28 2646 primary->crtc = primary->state->crtc = &intel_crtc->base;
36750f28 2647 intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
a9ff8714 2648 obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
46f297fb
JB
2649}
2650
29b9bde6
DV
2651static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2652 struct drm_framebuffer *fb,
2653 int x, int y)
81255565
JB
2654{
2655 struct drm_device *dev = crtc->dev;
2656 struct drm_i915_private *dev_priv = dev->dev_private;
2657 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
b70709a6
ML
2658 struct drm_plane *primary = crtc->primary;
2659 bool visible = to_intel_plane_state(primary->state)->visible;
c9ba6fad 2660 struct drm_i915_gem_object *obj;
81255565 2661 int plane = intel_crtc->plane;
e506a0c6 2662 unsigned long linear_offset;
81255565 2663 u32 dspcntr;
f0f59a00 2664 i915_reg_t reg = DSPCNTR(plane);
48404c1e 2665 int pixel_size;
f45651ba 2666
b70709a6 2667 if (!visible || !fb) {
fdd508a6
VS
2668 I915_WRITE(reg, 0);
2669 if (INTEL_INFO(dev)->gen >= 4)
2670 I915_WRITE(DSPSURF(plane), 0);
2671 else
2672 I915_WRITE(DSPADDR(plane), 0);
2673 POSTING_READ(reg);
2674 return;
2675 }
2676
c9ba6fad
VS
2677 obj = intel_fb_obj(fb);
2678 if (WARN_ON(obj == NULL))
2679 return;
2680
2681 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2682
f45651ba
VS
2683 dspcntr = DISPPLANE_GAMMA_ENABLE;
2684
fdd508a6 2685 dspcntr |= DISPLAY_PLANE_ENABLE;
f45651ba
VS
2686
2687 if (INTEL_INFO(dev)->gen < 4) {
2688 if (intel_crtc->pipe == PIPE_B)
2689 dspcntr |= DISPPLANE_SEL_PIPE_B;
2690
2691 /* pipesrc and dspsize control the size that is scaled from,
2692 * which should always be the user's requested size.
2693 */
2694 I915_WRITE(DSPSIZE(plane),
6e3c9717
ACO
2695 ((intel_crtc->config->pipe_src_h - 1) << 16) |
2696 (intel_crtc->config->pipe_src_w - 1));
f45651ba 2697 I915_WRITE(DSPPOS(plane), 0);
c14b0485
VS
2698 } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2699 I915_WRITE(PRIMSIZE(plane),
6e3c9717
ACO
2700 ((intel_crtc->config->pipe_src_h - 1) << 16) |
2701 (intel_crtc->config->pipe_src_w - 1));
c14b0485
VS
2702 I915_WRITE(PRIMPOS(plane), 0);
2703 I915_WRITE(PRIMCNSTALPHA(plane), 0);
f45651ba 2704 }
81255565 2705
57779d06
VS
2706 switch (fb->pixel_format) {
2707 case DRM_FORMAT_C8:
81255565
JB
2708 dspcntr |= DISPPLANE_8BPP;
2709 break;
57779d06 2710 case DRM_FORMAT_XRGB1555:
57779d06 2711 dspcntr |= DISPPLANE_BGRX555;
81255565 2712 break;
57779d06
VS
2713 case DRM_FORMAT_RGB565:
2714 dspcntr |= DISPPLANE_BGRX565;
2715 break;
2716 case DRM_FORMAT_XRGB8888:
57779d06
VS
2717 dspcntr |= DISPPLANE_BGRX888;
2718 break;
2719 case DRM_FORMAT_XBGR8888:
57779d06
VS
2720 dspcntr |= DISPPLANE_RGBX888;
2721 break;
2722 case DRM_FORMAT_XRGB2101010:
57779d06
VS
2723 dspcntr |= DISPPLANE_BGRX101010;
2724 break;
2725 case DRM_FORMAT_XBGR2101010:
57779d06 2726 dspcntr |= DISPPLANE_RGBX101010;
81255565
JB
2727 break;
2728 default:
baba133a 2729 BUG();
81255565 2730 }
57779d06 2731
f45651ba
VS
2732 if (INTEL_INFO(dev)->gen >= 4 &&
2733 obj->tiling_mode != I915_TILING_NONE)
2734 dspcntr |= DISPPLANE_TILED;
81255565 2735
de1aa629
VS
2736 if (IS_G4X(dev))
2737 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2738
b9897127 2739 linear_offset = y * fb->pitches[0] + x * pixel_size;
81255565 2740
c2c75131
DV
2741 if (INTEL_INFO(dev)->gen >= 4) {
2742 intel_crtc->dspaddr_offset =
4e9a86b6
VS
2743 intel_gen4_compute_page_offset(dev_priv,
2744 &x, &y, obj->tiling_mode,
b9897127 2745 pixel_size,
bc752862 2746 fb->pitches[0]);
c2c75131
DV
2747 linear_offset -= intel_crtc->dspaddr_offset;
2748 } else {
e506a0c6 2749 intel_crtc->dspaddr_offset = linear_offset;
c2c75131 2750 }
e506a0c6 2751
8e7d688b 2752 if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
48404c1e
SJ
2753 dspcntr |= DISPPLANE_ROTATE_180;
2754
6e3c9717
ACO
2755 x += (intel_crtc->config->pipe_src_w - 1);
2756 y += (intel_crtc->config->pipe_src_h - 1);
48404c1e
SJ
2757
2758 /* Finding the last pixel of the last line of the display
2759 data and adding to linear_offset*/
2760 linear_offset +=
6e3c9717
ACO
2761 (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2762 (intel_crtc->config->pipe_src_w - 1) * pixel_size;
48404c1e
SJ
2763 }
2764
2db3366b
PZ
2765 intel_crtc->adjusted_x = x;
2766 intel_crtc->adjusted_y = y;
2767
48404c1e
SJ
2768 I915_WRITE(reg, dspcntr);
2769
01f2c773 2770 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
a6c45cf0 2771 if (INTEL_INFO(dev)->gen >= 4) {
85ba7b7d
DV
2772 I915_WRITE(DSPSURF(plane),
2773 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
5eddb70b 2774 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
e506a0c6 2775 I915_WRITE(DSPLINOFF(plane), linear_offset);
5eddb70b 2776 } else
f343c5f6 2777 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
5eddb70b 2778 POSTING_READ(reg);
17638cd6
JB
2779}
2780
29b9bde6
DV
2781static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2782 struct drm_framebuffer *fb,
2783 int x, int y)
17638cd6
JB
2784{
2785 struct drm_device *dev = crtc->dev;
2786 struct drm_i915_private *dev_priv = dev->dev_private;
2787 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
b70709a6
ML
2788 struct drm_plane *primary = crtc->primary;
2789 bool visible = to_intel_plane_state(primary->state)->visible;
c9ba6fad 2790 struct drm_i915_gem_object *obj;
17638cd6 2791 int plane = intel_crtc->plane;
e506a0c6 2792 unsigned long linear_offset;
17638cd6 2793 u32 dspcntr;
f0f59a00 2794 i915_reg_t reg = DSPCNTR(plane);
48404c1e 2795 int pixel_size;
f45651ba 2796
b70709a6 2797 if (!visible || !fb) {
fdd508a6
VS
2798 I915_WRITE(reg, 0);
2799 I915_WRITE(DSPSURF(plane), 0);
2800 POSTING_READ(reg);
2801 return;
2802 }
2803
c9ba6fad
VS
2804 obj = intel_fb_obj(fb);
2805 if (WARN_ON(obj == NULL))
2806 return;
2807
2808 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
2809
f45651ba
VS
2810 dspcntr = DISPPLANE_GAMMA_ENABLE;
2811
fdd508a6 2812 dspcntr |= DISPLAY_PLANE_ENABLE;
f45651ba
VS
2813
2814 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2815 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
17638cd6 2816
57779d06
VS
2817 switch (fb->pixel_format) {
2818 case DRM_FORMAT_C8:
17638cd6
JB
2819 dspcntr |= DISPPLANE_8BPP;
2820 break;
57779d06
VS
2821 case DRM_FORMAT_RGB565:
2822 dspcntr |= DISPPLANE_BGRX565;
17638cd6 2823 break;
57779d06 2824 case DRM_FORMAT_XRGB8888:
57779d06
VS
2825 dspcntr |= DISPPLANE_BGRX888;
2826 break;
2827 case DRM_FORMAT_XBGR8888:
57779d06
VS
2828 dspcntr |= DISPPLANE_RGBX888;
2829 break;
2830 case DRM_FORMAT_XRGB2101010:
57779d06
VS
2831 dspcntr |= DISPPLANE_BGRX101010;
2832 break;
2833 case DRM_FORMAT_XBGR2101010:
57779d06 2834 dspcntr |= DISPPLANE_RGBX101010;
17638cd6
JB
2835 break;
2836 default:
baba133a 2837 BUG();
17638cd6
JB
2838 }
2839
2840 if (obj->tiling_mode != I915_TILING_NONE)
2841 dspcntr |= DISPPLANE_TILED;
17638cd6 2842
f45651ba 2843 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1f5d76db 2844 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
17638cd6 2845
b9897127 2846 linear_offset = y * fb->pitches[0] + x * pixel_size;
c2c75131 2847 intel_crtc->dspaddr_offset =
4e9a86b6
VS
2848 intel_gen4_compute_page_offset(dev_priv,
2849 &x, &y, obj->tiling_mode,
b9897127 2850 pixel_size,
bc752862 2851 fb->pitches[0]);
c2c75131 2852 linear_offset -= intel_crtc->dspaddr_offset;
8e7d688b 2853 if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
48404c1e
SJ
2854 dspcntr |= DISPPLANE_ROTATE_180;
2855
2856 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
6e3c9717
ACO
2857 x += (intel_crtc->config->pipe_src_w - 1);
2858 y += (intel_crtc->config->pipe_src_h - 1);
48404c1e
SJ
2859
2860 /* Finding the last pixel of the last line of the display
2861 data and adding to linear_offset*/
2862 linear_offset +=
6e3c9717
ACO
2863 (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
2864 (intel_crtc->config->pipe_src_w - 1) * pixel_size;
48404c1e
SJ
2865 }
2866 }
2867
2db3366b
PZ
2868 intel_crtc->adjusted_x = x;
2869 intel_crtc->adjusted_y = y;
2870
48404c1e 2871 I915_WRITE(reg, dspcntr);
17638cd6 2872
01f2c773 2873 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
85ba7b7d
DV
2874 I915_WRITE(DSPSURF(plane),
2875 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
b3dc685e 2876 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
bc1c91eb
DL
2877 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2878 } else {
2879 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2880 I915_WRITE(DSPLINOFF(plane), linear_offset);
2881 }
17638cd6 2882 POSTING_READ(reg);
17638cd6
JB
2883}
2884
b321803d
DL
2885u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
2886 uint32_t pixel_format)
2887{
2888 u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
2889
2890 /*
2891 * The stride is either expressed as a multiple of 64 bytes
2892 * chunks for linear buffers or in number of tiles for tiled
2893 * buffers.
2894 */
2895 switch (fb_modifier) {
2896 case DRM_FORMAT_MOD_NONE:
2897 return 64;
2898 case I915_FORMAT_MOD_X_TILED:
2899 if (INTEL_INFO(dev)->gen == 2)
2900 return 128;
2901 return 512;
2902 case I915_FORMAT_MOD_Y_TILED:
2903 /* No need to check for old gens and Y tiling since this is
2904 * about the display engine and those will be blocked before
2905 * we get here.
2906 */
2907 return 128;
2908 case I915_FORMAT_MOD_Yf_TILED:
2909 if (bits_per_pixel == 8)
2910 return 64;
2911 else
2912 return 128;
2913 default:
2914 MISSING_CASE(fb_modifier);
2915 return 64;
2916 }
2917}
2918
44eb0cb9
MK
2919u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2920 struct drm_i915_gem_object *obj,
2921 unsigned int plane)
121920fa 2922{
9abc4648 2923 const struct i915_ggtt_view *view = &i915_ggtt_view_normal;
dedf278c 2924 struct i915_vma *vma;
44eb0cb9 2925 u64 offset;
121920fa
TU
2926
2927 if (intel_rotation_90_or_270(intel_plane->base.state->rotation))
9abc4648 2928 view = &i915_ggtt_view_rotated;
121920fa 2929
dedf278c
TU
2930 vma = i915_gem_obj_to_ggtt_view(obj, view);
2931 if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
2932 view->type))
2933 return -1;
2934
44eb0cb9 2935 offset = vma->node.start;
dedf278c
TU
2936
2937 if (plane == 1) {
2938 offset += vma->ggtt_view.rotation_info.uv_start_page *
2939 PAGE_SIZE;
2940 }
2941
44eb0cb9
MK
2942 WARN_ON(upper_32_bits(offset));
2943
2944 return lower_32_bits(offset);
121920fa
TU
2945}
2946
e435d6e5
ML
2947static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2948{
2949 struct drm_device *dev = intel_crtc->base.dev;
2950 struct drm_i915_private *dev_priv = dev->dev_private;
2951
2952 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2953 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2954 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
e435d6e5
ML
2955}
2956
a1b2278e
CK
2957/*
2958 * This function detaches (aka. unbinds) unused scalers in hardware
2959 */
0583236e 2960static void skl_detach_scalers(struct intel_crtc *intel_crtc)
a1b2278e 2961{
a1b2278e
CK
2962 struct intel_crtc_scaler_state *scaler_state;
2963 int i;
2964
a1b2278e
CK
2965 scaler_state = &intel_crtc->config->scaler_state;
2966
2967 /* loop through and disable scalers that aren't in use */
2968 for (i = 0; i < intel_crtc->num_scalers; i++) {
e435d6e5
ML
2969 if (!scaler_state->scalers[i].in_use)
2970 skl_detach_scaler(intel_crtc, i);
a1b2278e
CK
2971 }
2972}
2973
6156a456 2974u32 skl_plane_ctl_format(uint32_t pixel_format)
70d21f0e 2975{
6156a456 2976 switch (pixel_format) {
d161cf7a 2977 case DRM_FORMAT_C8:
c34ce3d1 2978 return PLANE_CTL_FORMAT_INDEXED;
70d21f0e 2979 case DRM_FORMAT_RGB565:
c34ce3d1 2980 return PLANE_CTL_FORMAT_RGB_565;
70d21f0e 2981 case DRM_FORMAT_XBGR8888:
c34ce3d1 2982 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
6156a456 2983 case DRM_FORMAT_XRGB8888:
c34ce3d1 2984 return PLANE_CTL_FORMAT_XRGB_8888;
6156a456
CK
2985 /*
2986 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
2987 * to be already pre-multiplied. We need to add a knob (or a different
2988 * DRM_FORMAT) for user-space to configure that.
2989 */
f75fb42a 2990 case DRM_FORMAT_ABGR8888:
c34ce3d1 2991 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
6156a456 2992 PLANE_CTL_ALPHA_SW_PREMULTIPLY;
6156a456 2993 case DRM_FORMAT_ARGB8888:
c34ce3d1 2994 return PLANE_CTL_FORMAT_XRGB_8888 |
6156a456 2995 PLANE_CTL_ALPHA_SW_PREMULTIPLY;
70d21f0e 2996 case DRM_FORMAT_XRGB2101010:
c34ce3d1 2997 return PLANE_CTL_FORMAT_XRGB_2101010;
70d21f0e 2998 case DRM_FORMAT_XBGR2101010:
c34ce3d1 2999 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
6156a456 3000 case DRM_FORMAT_YUYV:
c34ce3d1 3001 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
6156a456 3002 case DRM_FORMAT_YVYU:
c34ce3d1 3003 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
6156a456 3004 case DRM_FORMAT_UYVY:
c34ce3d1 3005 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
6156a456 3006 case DRM_FORMAT_VYUY:
c34ce3d1 3007 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
70d21f0e 3008 default:
4249eeef 3009 MISSING_CASE(pixel_format);
70d21f0e 3010 }
8cfcba41 3011
c34ce3d1 3012 return 0;
6156a456 3013}
70d21f0e 3014
6156a456
CK
3015u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
3016{
6156a456 3017 switch (fb_modifier) {
30af77c4 3018 case DRM_FORMAT_MOD_NONE:
70d21f0e 3019 break;
30af77c4 3020 case I915_FORMAT_MOD_X_TILED:
c34ce3d1 3021 return PLANE_CTL_TILED_X;
b321803d 3022 case I915_FORMAT_MOD_Y_TILED:
c34ce3d1 3023 return PLANE_CTL_TILED_Y;
b321803d 3024 case I915_FORMAT_MOD_Yf_TILED:
c34ce3d1 3025 return PLANE_CTL_TILED_YF;
70d21f0e 3026 default:
6156a456 3027 MISSING_CASE(fb_modifier);
70d21f0e 3028 }
8cfcba41 3029
c34ce3d1 3030 return 0;
6156a456 3031}
70d21f0e 3032
6156a456
CK
3033u32 skl_plane_ctl_rotation(unsigned int rotation)
3034{
3b7a5119 3035 switch (rotation) {
6156a456
CK
3036 case BIT(DRM_ROTATE_0):
3037 break;
1e8df167
SJ
3038 /*
3039 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
3040 * while i915 HW rotation is clockwise, thats why this swapping.
3041 */
3b7a5119 3042 case BIT(DRM_ROTATE_90):
1e8df167 3043 return PLANE_CTL_ROTATE_270;
3b7a5119 3044 case BIT(DRM_ROTATE_180):
c34ce3d1 3045 return PLANE_CTL_ROTATE_180;
3b7a5119 3046 case BIT(DRM_ROTATE_270):
1e8df167 3047 return PLANE_CTL_ROTATE_90;
6156a456
CK
3048 default:
3049 MISSING_CASE(rotation);
3050 }
3051
c34ce3d1 3052 return 0;
6156a456
CK
3053}
3054
3055static void skylake_update_primary_plane(struct drm_crtc *crtc,
3056 struct drm_framebuffer *fb,
3057 int x, int y)
3058{
3059 struct drm_device *dev = crtc->dev;
3060 struct drm_i915_private *dev_priv = dev->dev_private;
3061 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
b70709a6
ML
3062 struct drm_plane *plane = crtc->primary;
3063 bool visible = to_intel_plane_state(plane->state)->visible;
6156a456
CK
3064 struct drm_i915_gem_object *obj;
3065 int pipe = intel_crtc->pipe;
3066 u32 plane_ctl, stride_div, stride;
3067 u32 tile_height, plane_offset, plane_size;
3068 unsigned int rotation;
3069 int x_offset, y_offset;
44eb0cb9 3070 u32 surf_addr;
6156a456
CK
3071 struct intel_crtc_state *crtc_state = intel_crtc->config;
3072 struct intel_plane_state *plane_state;
3073 int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
3074 int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0;
3075 int scaler_id = -1;
3076
6156a456
CK
3077 plane_state = to_intel_plane_state(plane->state);
3078
b70709a6 3079 if (!visible || !fb) {
6156a456
CK
3080 I915_WRITE(PLANE_CTL(pipe, 0), 0);
3081 I915_WRITE(PLANE_SURF(pipe, 0), 0);
3082 POSTING_READ(PLANE_CTL(pipe, 0));
3083 return;
3b7a5119 3084 }
70d21f0e 3085
6156a456
CK
3086 plane_ctl = PLANE_CTL_ENABLE |
3087 PLANE_CTL_PIPE_GAMMA_ENABLE |
3088 PLANE_CTL_PIPE_CSC_ENABLE;
3089
3090 plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3091 plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3092 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3093
3094 rotation = plane->state->rotation;
3095 plane_ctl |= skl_plane_ctl_rotation(rotation);
3096
b321803d
DL
3097 obj = intel_fb_obj(fb);
3098 stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
3099 fb->pixel_format);
dedf278c 3100 surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3b7a5119 3101
a42e5a23
PZ
3102 WARN_ON(drm_rect_width(&plane_state->src) == 0);
3103
3104 scaler_id = plane_state->scaler_id;
3105 src_x = plane_state->src.x1 >> 16;
3106 src_y = plane_state->src.y1 >> 16;
3107 src_w = drm_rect_width(&plane_state->src) >> 16;
3108 src_h = drm_rect_height(&plane_state->src) >> 16;
3109 dst_x = plane_state->dst.x1;
3110 dst_y = plane_state->dst.y1;
3111 dst_w = drm_rect_width(&plane_state->dst);
3112 dst_h = drm_rect_height(&plane_state->dst);
3113
3114 WARN_ON(x != src_x || y != src_y);
6156a456 3115
3b7a5119
SJ
3116 if (intel_rotation_90_or_270(rotation)) {
3117 /* stride = Surface height in tiles */
2614f17d 3118 tile_height = intel_tile_height(dev, fb->pixel_format,
fe47ea0c 3119 fb->modifier[0], 0);
3b7a5119 3120 stride = DIV_ROUND_UP(fb->height, tile_height);
6156a456 3121 x_offset = stride * tile_height - y - src_h;
3b7a5119 3122 y_offset = x;
6156a456 3123 plane_size = (src_w - 1) << 16 | (src_h - 1);
3b7a5119
SJ
3124 } else {
3125 stride = fb->pitches[0] / stride_div;
3126 x_offset = x;
3127 y_offset = y;
6156a456 3128 plane_size = (src_h - 1) << 16 | (src_w - 1);
3b7a5119
SJ
3129 }
3130 plane_offset = y_offset << 16 | x_offset;
b321803d 3131
2db3366b
PZ
3132 intel_crtc->adjusted_x = x_offset;
3133 intel_crtc->adjusted_y = y_offset;
3134
70d21f0e 3135 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3b7a5119
SJ
3136 I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3137 I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3138 I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
6156a456
CK
3139
3140 if (scaler_id >= 0) {
3141 uint32_t ps_ctrl = 0;
3142
3143 WARN_ON(!dst_w || !dst_h);
3144 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3145 crtc_state->scaler_state.scalers[scaler_id].mode;
3146 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3147 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3148 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3149 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3150 I915_WRITE(PLANE_POS(pipe, 0), 0);
3151 } else {
3152 I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3153 }
3154
121920fa 3155 I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
70d21f0e
DL
3156
3157 POSTING_READ(PLANE_SURF(pipe, 0));
3158}
3159
17638cd6
JB
3160/* Assume fb object is pinned & idle & fenced and just update base pointers */
3161static int
3162intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3163 int x, int y, enum mode_set_atomic state)
3164{
3165 struct drm_device *dev = crtc->dev;
3166 struct drm_i915_private *dev_priv = dev->dev_private;
17638cd6 3167
ff2a3117 3168 if (dev_priv->fbc.disable_fbc)
7733b49b 3169 dev_priv->fbc.disable_fbc(dev_priv);
81255565 3170
29b9bde6
DV
3171 dev_priv->display.update_primary_plane(crtc, fb, x, y);
3172
3173 return 0;
81255565
JB
3174}
3175
7514747d 3176static void intel_complete_page_flips(struct drm_device *dev)
96a02917 3177{
96a02917
VS
3178 struct drm_crtc *crtc;
3179
70e1e0ec 3180 for_each_crtc(dev, crtc) {
96a02917
VS
3181 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3182 enum plane plane = intel_crtc->plane;
3183
3184 intel_prepare_page_flip(dev, plane);
3185 intel_finish_page_flip_plane(dev, plane);
3186 }
7514747d
VS
3187}
3188
3189static void intel_update_primary_planes(struct drm_device *dev)
3190{
7514747d 3191 struct drm_crtc *crtc;
96a02917 3192
70e1e0ec 3193 for_each_crtc(dev, crtc) {
11c22da6
ML
3194 struct intel_plane *plane = to_intel_plane(crtc->primary);
3195 struct intel_plane_state *plane_state;
96a02917 3196
11c22da6 3197 drm_modeset_lock_crtc(crtc, &plane->base);
11c22da6
ML
3198 plane_state = to_intel_plane_state(plane->base.state);
3199
f029ee82 3200 if (crtc->state->active && plane_state->base.fb)
11c22da6
ML
3201 plane->commit_plane(&plane->base, plane_state);
3202
3203 drm_modeset_unlock_crtc(crtc);
96a02917
VS
3204 }
3205}
3206
7514747d
VS
3207void intel_prepare_reset(struct drm_device *dev)
3208{
3209 /* no reset support for gen2 */
3210 if (IS_GEN2(dev))
3211 return;
3212
3213 /* reset doesn't touch the display */
3214 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
3215 return;
3216
3217 drm_modeset_lock_all(dev);
f98ce92f
VS
3218 /*
3219 * Disabling the crtcs gracefully seems nicer. Also the
3220 * g33 docs say we should at least disable all the planes.
3221 */
6b72d486 3222 intel_display_suspend(dev);
7514747d
VS
3223}
3224
3225void intel_finish_reset(struct drm_device *dev)
3226{
3227 struct drm_i915_private *dev_priv = to_i915(dev);
3228
3229 /*
3230 * Flips in the rings will be nuked by the reset,
3231 * so complete all pending flips so that user space
3232 * will get its events and not get stuck.
3233 */
3234 intel_complete_page_flips(dev);
3235
3236 /* no reset support for gen2 */
3237 if (IS_GEN2(dev))
3238 return;
3239
3240 /* reset doesn't touch the display */
3241 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
3242 /*
3243 * Flips in the rings have been nuked by the reset,
3244 * so update the base address of all primary
3245 * planes to the the last fb to make sure we're
3246 * showing the correct fb after a reset.
11c22da6
ML
3247 *
3248 * FIXME: Atomic will make this obsolete since we won't schedule
3249 * CS-based flips (which might get lost in gpu resets) any more.
7514747d
VS
3250 */
3251 intel_update_primary_planes(dev);
3252 return;
3253 }
3254
3255 /*
3256 * The display has been reset as well,
3257 * so need a full re-initialization.
3258 */
3259 intel_runtime_pm_disable_interrupts(dev_priv);
3260 intel_runtime_pm_enable_interrupts(dev_priv);
3261
3262 intel_modeset_init_hw(dev);
3263
3264 spin_lock_irq(&dev_priv->irq_lock);
3265 if (dev_priv->display.hpd_irq_setup)
3266 dev_priv->display.hpd_irq_setup(dev);
3267 spin_unlock_irq(&dev_priv->irq_lock);
3268
043e9bda 3269 intel_display_resume(dev);
7514747d
VS
3270
3271 intel_hpd_init(dev_priv);
3272
3273 drm_modeset_unlock_all(dev);
3274}
3275
7d5e3799
CW
3276static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3277{
3278 struct drm_device *dev = crtc->dev;
3279 struct drm_i915_private *dev_priv = dev->dev_private;
3280 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7d5e3799
CW
3281 bool pending;
3282
3283 if (i915_reset_in_progress(&dev_priv->gpu_error) ||
3284 intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
3285 return false;
3286
5e2d7afc 3287 spin_lock_irq(&dev->event_lock);
7d5e3799 3288 pending = to_intel_crtc(crtc)->unpin_work != NULL;
5e2d7afc 3289 spin_unlock_irq(&dev->event_lock);
7d5e3799
CW
3290
3291 return pending;
3292}
3293
bfd16b2a
ML
3294static void intel_update_pipe_config(struct intel_crtc *crtc,
3295 struct intel_crtc_state *old_crtc_state)
e30e8f75
GP
3296{
3297 struct drm_device *dev = crtc->base.dev;
3298 struct drm_i915_private *dev_priv = dev->dev_private;
bfd16b2a
ML
3299 struct intel_crtc_state *pipe_config =
3300 to_intel_crtc_state(crtc->base.state);
e30e8f75 3301
bfd16b2a
ML
3302 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3303 crtc->base.mode = crtc->base.state->mode;
3304
3305 DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3306 old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3307 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
e30e8f75 3308
44522d85
ML
3309 if (HAS_DDI(dev))
3310 intel_set_pipe_csc(&crtc->base);
3311
e30e8f75
GP
3312 /*
3313 * Update pipe size and adjust fitter if needed: the reason for this is
3314 * that in compute_mode_changes we check the native mode (not the pfit
3315 * mode) to see if we can flip rather than do a full mode set. In the
3316 * fastboot case, we'll flip, but if we don't update the pipesrc and
3317 * pfit state, we'll end up with a big fb scanned out into the wrong
3318 * sized surface.
e30e8f75
GP
3319 */
3320
e30e8f75 3321 I915_WRITE(PIPESRC(crtc->pipe),
bfd16b2a
ML
3322 ((pipe_config->pipe_src_w - 1) << 16) |
3323 (pipe_config->pipe_src_h - 1));
3324
3325 /* on skylake this is done by detaching scalers */
3326 if (INTEL_INFO(dev)->gen >= 9) {
3327 skl_detach_scalers(crtc);
3328
3329 if (pipe_config->pch_pfit.enabled)
3330 skylake_pfit_enable(crtc);
3331 } else if (HAS_PCH_SPLIT(dev)) {
3332 if (pipe_config->pch_pfit.enabled)
3333 ironlake_pfit_enable(crtc);
3334 else if (old_crtc_state->pch_pfit.enabled)
3335 ironlake_pfit_disable(crtc, true);
e30e8f75 3336 }
e30e8f75
GP
3337}
3338
5e84e1a4
ZW
3339static void intel_fdi_normal_train(struct drm_crtc *crtc)
3340{
3341 struct drm_device *dev = crtc->dev;
3342 struct drm_i915_private *dev_priv = dev->dev_private;
3343 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3344 int pipe = intel_crtc->pipe;
f0f59a00
VS
3345 i915_reg_t reg;
3346 u32 temp;
5e84e1a4
ZW
3347
3348 /* enable normal train */
3349 reg = FDI_TX_CTL(pipe);
3350 temp = I915_READ(reg);
61e499bf 3351 if (IS_IVYBRIDGE(dev)) {
357555c0
JB
3352 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3353 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
61e499bf
KP
3354 } else {
3355 temp &= ~FDI_LINK_TRAIN_NONE;
3356 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
357555c0 3357 }
5e84e1a4
ZW
3358 I915_WRITE(reg, temp);
3359
3360 reg = FDI_RX_CTL(pipe);
3361 temp = I915_READ(reg);
3362 if (HAS_PCH_CPT(dev)) {
3363 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3364 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3365 } else {
3366 temp &= ~FDI_LINK_TRAIN_NONE;
3367 temp |= FDI_LINK_TRAIN_NONE;
3368 }
3369 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3370
3371 /* wait one idle pattern time */
3372 POSTING_READ(reg);
3373 udelay(1000);
357555c0
JB
3374
3375 /* IVB wants error correction enabled */
3376 if (IS_IVYBRIDGE(dev))
3377 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3378 FDI_FE_ERRC_ENABLE);
5e84e1a4
ZW
3379}
3380
8db9d77b
ZW
3381/* The FDI link training functions for ILK/Ibexpeak. */
3382static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3383{
3384 struct drm_device *dev = crtc->dev;
3385 struct drm_i915_private *dev_priv = dev->dev_private;
3386 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3387 int pipe = intel_crtc->pipe;
f0f59a00
VS
3388 i915_reg_t reg;
3389 u32 temp, tries;
8db9d77b 3390
1c8562f6 3391 /* FDI needs bits from pipe first */
0fc932b8 3392 assert_pipe_enabled(dev_priv, pipe);
0fc932b8 3393
e1a44743
AJ
3394 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3395 for train result */
5eddb70b
CW
3396 reg = FDI_RX_IMR(pipe);
3397 temp = I915_READ(reg);
e1a44743
AJ
3398 temp &= ~FDI_RX_SYMBOL_LOCK;
3399 temp &= ~FDI_RX_BIT_LOCK;
5eddb70b
CW
3400 I915_WRITE(reg, temp);
3401 I915_READ(reg);
e1a44743
AJ
3402 udelay(150);
3403
8db9d77b 3404 /* enable CPU FDI TX and PCH FDI RX */
5eddb70b
CW
3405 reg = FDI_TX_CTL(pipe);
3406 temp = I915_READ(reg);
627eb5a3 3407 temp &= ~FDI_DP_PORT_WIDTH_MASK;
6e3c9717 3408 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
8db9d77b
ZW
3409 temp &= ~FDI_LINK_TRAIN_NONE;
3410 temp |= FDI_LINK_TRAIN_PATTERN_1;
5eddb70b 3411 I915_WRITE(reg, temp | FDI_TX_ENABLE);
8db9d77b 3412
5eddb70b
CW
3413 reg = FDI_RX_CTL(pipe);
3414 temp = I915_READ(reg);
8db9d77b
ZW
3415 temp &= ~FDI_LINK_TRAIN_NONE;
3416 temp |= FDI_LINK_TRAIN_PATTERN_1;
5eddb70b
CW
3417 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3418
3419 POSTING_READ(reg);
8db9d77b
ZW
3420 udelay(150);
3421
5b2adf89 3422 /* Ironlake workaround, enable clock pointer after FDI enable*/
8f5718a6
DV
3423 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3424 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3425 FDI_RX_PHASE_SYNC_POINTER_EN);
5b2adf89 3426
5eddb70b 3427 reg = FDI_RX_IIR(pipe);
e1a44743 3428 for (tries = 0; tries < 5; tries++) {
5eddb70b 3429 temp = I915_READ(reg);
8db9d77b
ZW
3430 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3431
3432 if ((temp & FDI_RX_BIT_LOCK)) {
3433 DRM_DEBUG_KMS("FDI train 1 done.\n");
5eddb70b 3434 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
8db9d77b
ZW
3435 break;
3436 }
8db9d77b 3437 }
e1a44743 3438 if (tries == 5)
5eddb70b 3439 DRM_ERROR("FDI train 1 fail!\n");
8db9d77b
ZW
3440
3441 /* Train 2 */
5eddb70b
CW
3442 reg = FDI_TX_CTL(pipe);
3443 temp = I915_READ(reg);
8db9d77b
ZW
3444 temp &= ~FDI_LINK_TRAIN_NONE;
3445 temp |= FDI_LINK_TRAIN_PATTERN_2;
5eddb70b 3446 I915_WRITE(reg, temp);
8db9d77b 3447
5eddb70b
CW
3448 reg = FDI_RX_CTL(pipe);
3449 temp = I915_READ(reg);
8db9d77b
ZW
3450 temp &= ~FDI_LINK_TRAIN_NONE;
3451 temp |= FDI_LINK_TRAIN_PATTERN_2;
5eddb70b 3452 I915_WRITE(reg, temp);
8db9d77b 3453
5eddb70b
CW
3454 POSTING_READ(reg);
3455 udelay(150);
8db9d77b 3456
5eddb70b 3457 reg = FDI_RX_IIR(pipe);
e1a44743 3458 for (tries = 0; tries < 5; tries++) {
5eddb70b 3459 temp = I915_READ(reg);
8db9d77b
ZW
3460 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3461
3462 if (temp & FDI_RX_SYMBOL_LOCK) {
5eddb70b 3463 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
8db9d77b
ZW
3464 DRM_DEBUG_KMS("FDI train 2 done.\n");
3465 break;
3466 }
8db9d77b 3467 }
e1a44743 3468 if (tries == 5)
5eddb70b 3469 DRM_ERROR("FDI train 2 fail!\n");
8db9d77b
ZW
3470
3471 DRM_DEBUG_KMS("FDI train done\n");
5c5313c8 3472
8db9d77b
ZW
3473}
3474
0206e353 3475static const int snb_b_fdi_train_param[] = {
8db9d77b
ZW
3476 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3477 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3478 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3479 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3480};
3481
3482/* The FDI link training functions for SNB/Cougarpoint. */
3483static void gen6_fdi_link_train(struct drm_crtc *crtc)
3484{
3485 struct drm_device *dev = crtc->dev;
3486 struct drm_i915_private *dev_priv = dev->dev_private;
3487 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3488 int pipe = intel_crtc->pipe;
f0f59a00
VS
3489 i915_reg_t reg;
3490 u32 temp, i, retry;
8db9d77b 3491
e1a44743
AJ
3492 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3493 for train result */
5eddb70b
CW
3494 reg = FDI_RX_IMR(pipe);
3495 temp = I915_READ(reg);
e1a44743
AJ
3496 temp &= ~FDI_RX_SYMBOL_LOCK;
3497 temp &= ~FDI_RX_BIT_LOCK;
5eddb70b
CW
3498 I915_WRITE(reg, temp);
3499
3500 POSTING_READ(reg);
e1a44743
AJ
3501 udelay(150);
3502
8db9d77b 3503 /* enable CPU FDI TX and PCH FDI RX */
5eddb70b
CW
3504 reg = FDI_TX_CTL(pipe);
3505 temp = I915_READ(reg);
627eb5a3 3506 temp &= ~FDI_DP_PORT_WIDTH_MASK;
6e3c9717 3507 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
8db9d77b
ZW
3508 temp &= ~FDI_LINK_TRAIN_NONE;
3509 temp |= FDI_LINK_TRAIN_PATTERN_1;
3510 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3511 /* SNB-B */
3512 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5eddb70b 3513 I915_WRITE(reg, temp | FDI_TX_ENABLE);
8db9d77b 3514
d74cf324
DV
3515 I915_WRITE(FDI_RX_MISC(pipe),
3516 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3517
5eddb70b
CW
3518 reg = FDI_RX_CTL(pipe);
3519 temp = I915_READ(reg);
8db9d77b
ZW
3520 if (HAS_PCH_CPT(dev)) {
3521 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3522 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3523 } else {
3524 temp &= ~FDI_LINK_TRAIN_NONE;
3525 temp |= FDI_LINK_TRAIN_PATTERN_1;
3526 }
5eddb70b
CW
3527 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3528
3529 POSTING_READ(reg);
8db9d77b
ZW
3530 udelay(150);
3531
0206e353 3532 for (i = 0; i < 4; i++) {
5eddb70b
CW
3533 reg = FDI_TX_CTL(pipe);
3534 temp = I915_READ(reg);
8db9d77b
ZW
3535 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3536 temp |= snb_b_fdi_train_param[i];
5eddb70b
CW
3537 I915_WRITE(reg, temp);
3538
3539 POSTING_READ(reg);
8db9d77b
ZW
3540 udelay(500);
3541
fa37d39e
SP
3542 for (retry = 0; retry < 5; retry++) {
3543 reg = FDI_RX_IIR(pipe);
3544 temp = I915_READ(reg);
3545 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3546 if (temp & FDI_RX_BIT_LOCK) {
3547 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3548 DRM_DEBUG_KMS("FDI train 1 done.\n");
3549 break;
3550 }
3551 udelay(50);
8db9d77b 3552 }
fa37d39e
SP
3553 if (retry < 5)
3554 break;
8db9d77b
ZW
3555 }
3556 if (i == 4)
5eddb70b 3557 DRM_ERROR("FDI train 1 fail!\n");
8db9d77b
ZW
3558
3559 /* Train 2 */
5eddb70b
CW
3560 reg = FDI_TX_CTL(pipe);
3561 temp = I915_READ(reg);
8db9d77b
ZW
3562 temp &= ~FDI_LINK_TRAIN_NONE;
3563 temp |= FDI_LINK_TRAIN_PATTERN_2;
3564 if (IS_GEN6(dev)) {
3565 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3566 /* SNB-B */
3567 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3568 }
5eddb70b 3569 I915_WRITE(reg, temp);
8db9d77b 3570
5eddb70b
CW
3571 reg = FDI_RX_CTL(pipe);
3572 temp = I915_READ(reg);
8db9d77b
ZW
3573 if (HAS_PCH_CPT(dev)) {
3574 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3575 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3576 } else {
3577 temp &= ~FDI_LINK_TRAIN_NONE;
3578 temp |= FDI_LINK_TRAIN_PATTERN_2;
3579 }
5eddb70b
CW
3580 I915_WRITE(reg, temp);
3581
3582 POSTING_READ(reg);
8db9d77b
ZW
3583 udelay(150);
3584
0206e353 3585 for (i = 0; i < 4; i++) {
5eddb70b
CW
3586 reg = FDI_TX_CTL(pipe);
3587 temp = I915_READ(reg);
8db9d77b
ZW
3588 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3589 temp |= snb_b_fdi_train_param[i];
5eddb70b
CW
3590 I915_WRITE(reg, temp);
3591
3592 POSTING_READ(reg);
8db9d77b
ZW
3593 udelay(500);
3594
fa37d39e
SP
3595 for (retry = 0; retry < 5; retry++) {
3596 reg = FDI_RX_IIR(pipe);
3597 temp = I915_READ(reg);
3598 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3599 if (temp & FDI_RX_SYMBOL_LOCK) {
3600 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3601 DRM_DEBUG_KMS("FDI train 2 done.\n");
3602 break;
3603 }
3604 udelay(50);
8db9d77b 3605 }
fa37d39e
SP
3606 if (retry < 5)
3607 break;
8db9d77b
ZW
3608 }
3609 if (i == 4)
5eddb70b 3610 DRM_ERROR("FDI train 2 fail!\n");
8db9d77b
ZW
3611
3612 DRM_DEBUG_KMS("FDI train done.\n");
3613}
3614
357555c0
JB
3615/* Manual link training for Ivy Bridge A0 parts */
3616static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3617{
3618 struct drm_device *dev = crtc->dev;
3619 struct drm_i915_private *dev_priv = dev->dev_private;
3620 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3621 int pipe = intel_crtc->pipe;
f0f59a00
VS
3622 i915_reg_t reg;
3623 u32 temp, i, j;
357555c0
JB
3624
3625 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3626 for train result */
3627 reg = FDI_RX_IMR(pipe);
3628 temp = I915_READ(reg);
3629 temp &= ~FDI_RX_SYMBOL_LOCK;
3630 temp &= ~FDI_RX_BIT_LOCK;
3631 I915_WRITE(reg, temp);
3632
3633 POSTING_READ(reg);
3634 udelay(150);
3635
01a415fd
DV
3636 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3637 I915_READ(FDI_RX_IIR(pipe)));
3638
139ccd3f
JB
3639 /* Try each vswing and preemphasis setting twice before moving on */
3640 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3641 /* disable first in case we need to retry */
3642 reg = FDI_TX_CTL(pipe);
3643 temp = I915_READ(reg);
3644 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3645 temp &= ~FDI_TX_ENABLE;
3646 I915_WRITE(reg, temp);
357555c0 3647
139ccd3f
JB
3648 reg = FDI_RX_CTL(pipe);
3649 temp = I915_READ(reg);
3650 temp &= ~FDI_LINK_TRAIN_AUTO;
3651 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3652 temp &= ~FDI_RX_ENABLE;
3653 I915_WRITE(reg, temp);
357555c0 3654
139ccd3f 3655 /* enable CPU FDI TX and PCH FDI RX */
357555c0
JB
3656 reg = FDI_TX_CTL(pipe);
3657 temp = I915_READ(reg);
139ccd3f 3658 temp &= ~FDI_DP_PORT_WIDTH_MASK;
6e3c9717 3659 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
139ccd3f 3660 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
357555c0 3661 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
139ccd3f
JB
3662 temp |= snb_b_fdi_train_param[j/2];
3663 temp |= FDI_COMPOSITE_SYNC;
3664 I915_WRITE(reg, temp | FDI_TX_ENABLE);
357555c0 3665
139ccd3f
JB
3666 I915_WRITE(FDI_RX_MISC(pipe),
3667 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
357555c0 3668
139ccd3f 3669 reg = FDI_RX_CTL(pipe);
357555c0 3670 temp = I915_READ(reg);
139ccd3f
JB
3671 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3672 temp |= FDI_COMPOSITE_SYNC;
3673 I915_WRITE(reg, temp | FDI_RX_ENABLE);
357555c0 3674
139ccd3f
JB
3675 POSTING_READ(reg);
3676 udelay(1); /* should be 0.5us */
357555c0 3677
139ccd3f
JB
3678 for (i = 0; i < 4; i++) {
3679 reg = FDI_RX_IIR(pipe);
3680 temp = I915_READ(reg);
3681 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
357555c0 3682
139ccd3f
JB
3683 if (temp & FDI_RX_BIT_LOCK ||
3684 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3685 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3686 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3687 i);
3688 break;
3689 }
3690 udelay(1); /* should be 0.5us */
3691 }
3692 if (i == 4) {
3693 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3694 continue;
3695 }
357555c0 3696
139ccd3f 3697 /* Train 2 */
357555c0
JB
3698 reg = FDI_TX_CTL(pipe);
3699 temp = I915_READ(reg);
139ccd3f
JB
3700 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3701 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3702 I915_WRITE(reg, temp);
3703
3704 reg = FDI_RX_CTL(pipe);
3705 temp = I915_READ(reg);
3706 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3707 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
357555c0
JB
3708 I915_WRITE(reg, temp);
3709
3710 POSTING_READ(reg);
139ccd3f 3711 udelay(2); /* should be 1.5us */
357555c0 3712
139ccd3f
JB
3713 for (i = 0; i < 4; i++) {
3714 reg = FDI_RX_IIR(pipe);
3715 temp = I915_READ(reg);
3716 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
357555c0 3717
139ccd3f
JB
3718 if (temp & FDI_RX_SYMBOL_LOCK ||
3719 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3720 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3721 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3722 i);
3723 goto train_done;
3724 }
3725 udelay(2); /* should be 1.5us */
357555c0 3726 }
139ccd3f
JB
3727 if (i == 4)
3728 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
357555c0 3729 }
357555c0 3730
139ccd3f 3731train_done:
357555c0
JB
3732 DRM_DEBUG_KMS("FDI train done.\n");
3733}
3734
88cefb6c 3735static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2c07245f 3736{
88cefb6c 3737 struct drm_device *dev = intel_crtc->base.dev;
2c07245f 3738 struct drm_i915_private *dev_priv = dev->dev_private;
2c07245f 3739 int pipe = intel_crtc->pipe;
f0f59a00
VS
3740 i915_reg_t reg;
3741 u32 temp;
c64e311e 3742
c98e9dcf 3743 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5eddb70b
CW
3744 reg = FDI_RX_CTL(pipe);
3745 temp = I915_READ(reg);
627eb5a3 3746 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
6e3c9717 3747 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
dfd07d72 3748 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5eddb70b
CW
3749 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3750
3751 POSTING_READ(reg);
c98e9dcf
JB
3752 udelay(200);
3753
3754 /* Switch from Rawclk to PCDclk */
5eddb70b
CW
3755 temp = I915_READ(reg);
3756 I915_WRITE(reg, temp | FDI_PCDCLK);
3757
3758 POSTING_READ(reg);
c98e9dcf
JB
3759 udelay(200);
3760
20749730
PZ
3761 /* Enable CPU FDI TX PLL, always on for Ironlake */
3762 reg = FDI_TX_CTL(pipe);
3763 temp = I915_READ(reg);
3764 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3765 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
5eddb70b 3766
20749730
PZ
3767 POSTING_READ(reg);
3768 udelay(100);
6be4a607 3769 }
0e23b99d
JB
3770}
3771
88cefb6c
DV
3772static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3773{
3774 struct drm_device *dev = intel_crtc->base.dev;
3775 struct drm_i915_private *dev_priv = dev->dev_private;
3776 int pipe = intel_crtc->pipe;
f0f59a00
VS
3777 i915_reg_t reg;
3778 u32 temp;
88cefb6c
DV
3779
3780 /* Switch from PCDclk to Rawclk */
3781 reg = FDI_RX_CTL(pipe);
3782 temp = I915_READ(reg);
3783 I915_WRITE(reg, temp & ~FDI_PCDCLK);
3784
3785 /* Disable CPU FDI TX PLL */
3786 reg = FDI_TX_CTL(pipe);
3787 temp = I915_READ(reg);
3788 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3789
3790 POSTING_READ(reg);
3791 udelay(100);
3792
3793 reg = FDI_RX_CTL(pipe);
3794 temp = I915_READ(reg);
3795 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3796
3797 /* Wait for the clocks to turn off. */
3798 POSTING_READ(reg);
3799 udelay(100);
3800}
3801
0fc932b8
JB
3802static void ironlake_fdi_disable(struct drm_crtc *crtc)
3803{
3804 struct drm_device *dev = crtc->dev;
3805 struct drm_i915_private *dev_priv = dev->dev_private;
3806 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3807 int pipe = intel_crtc->pipe;
f0f59a00
VS
3808 i915_reg_t reg;
3809 u32 temp;
0fc932b8
JB
3810
3811 /* disable CPU FDI tx and PCH FDI rx */
3812 reg = FDI_TX_CTL(pipe);
3813 temp = I915_READ(reg);
3814 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3815 POSTING_READ(reg);
3816
3817 reg = FDI_RX_CTL(pipe);
3818 temp = I915_READ(reg);
3819 temp &= ~(0x7 << 16);
dfd07d72 3820 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
0fc932b8
JB
3821 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3822
3823 POSTING_READ(reg);
3824 udelay(100);
3825
3826 /* Ironlake workaround, disable clock pointer after downing FDI */
eba905b2 3827 if (HAS_PCH_IBX(dev))
6f06ce18 3828 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
0fc932b8
JB
3829
3830 /* still set train pattern 1 */
3831 reg = FDI_TX_CTL(pipe);
3832 temp = I915_READ(reg);
3833 temp &= ~FDI_LINK_TRAIN_NONE;
3834 temp |= FDI_LINK_TRAIN_PATTERN_1;
3835 I915_WRITE(reg, temp);
3836
3837 reg = FDI_RX_CTL(pipe);
3838 temp = I915_READ(reg);
3839 if (HAS_PCH_CPT(dev)) {
3840 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3841 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3842 } else {
3843 temp &= ~FDI_LINK_TRAIN_NONE;
3844 temp |= FDI_LINK_TRAIN_PATTERN_1;
3845 }
3846 /* BPC in FDI rx is consistent with that in PIPECONF */
3847 temp &= ~(0x07 << 16);
dfd07d72 3848 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
0fc932b8
JB
3849 I915_WRITE(reg, temp);
3850
3851 POSTING_READ(reg);
3852 udelay(100);
3853}
3854
5dce5b93
CW
3855bool intel_has_pending_fb_unpin(struct drm_device *dev)
3856{
3857 struct intel_crtc *crtc;
3858
3859 /* Note that we don't need to be called with mode_config.lock here
3860 * as our list of CRTC objects is static for the lifetime of the
3861 * device and so cannot disappear as we iterate. Similarly, we can
3862 * happily treat the predicates as racy, atomic checks as userspace
3863 * cannot claim and pin a new fb without at least acquring the
3864 * struct_mutex and so serialising with us.
3865 */
d3fcc808 3866 for_each_intel_crtc(dev, crtc) {
5dce5b93
CW
3867 if (atomic_read(&crtc->unpin_work_count) == 0)
3868 continue;
3869
3870 if (crtc->unpin_work)
3871 intel_wait_for_vblank(dev, crtc->pipe);
3872
3873 return true;
3874 }
3875
3876 return false;
3877}
3878
d6bbafa1
CW
3879static void page_flip_completed(struct intel_crtc *intel_crtc)
3880{
3881 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3882 struct intel_unpin_work *work = intel_crtc->unpin_work;
3883
3884 /* ensure that the unpin work is consistent wrt ->pending. */
3885 smp_rmb();
3886 intel_crtc->unpin_work = NULL;
3887
3888 if (work->event)
3889 drm_send_vblank_event(intel_crtc->base.dev,
3890 intel_crtc->pipe,
3891 work->event);
3892
3893 drm_crtc_vblank_put(&intel_crtc->base);
3894
3895 wake_up_all(&dev_priv->pending_flip_queue);
3896 queue_work(dev_priv->wq, &work->work);
3897
3898 trace_i915_flip_complete(intel_crtc->plane,
3899 work->pending_flip_obj);
3900}
3901
5008e874 3902static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
e6c3a2a6 3903{
0f91128d 3904 struct drm_device *dev = crtc->dev;
5bb61643 3905 struct drm_i915_private *dev_priv = dev->dev_private;
5008e874 3906 long ret;
e6c3a2a6 3907
2c10d571 3908 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
5008e874
ML
3909
3910 ret = wait_event_interruptible_timeout(
3911 dev_priv->pending_flip_queue,
3912 !intel_crtc_has_pending_flip(crtc),
3913 60*HZ);
3914
3915 if (ret < 0)
3916 return ret;
3917
3918 if (ret == 0) {
9c787942 3919 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2c10d571 3920
5e2d7afc 3921 spin_lock_irq(&dev->event_lock);
9c787942
CW
3922 if (intel_crtc->unpin_work) {
3923 WARN_ONCE(1, "Removing stuck page flip\n");
3924 page_flip_completed(intel_crtc);
3925 }
5e2d7afc 3926 spin_unlock_irq(&dev->event_lock);
9c787942 3927 }
5bb61643 3928
5008e874 3929 return 0;
e6c3a2a6
CW
3930}
3931
e615efe4
ED
3932/* Program iCLKIP clock to the desired frequency */
3933static void lpt_program_iclkip(struct drm_crtc *crtc)
3934{
3935 struct drm_device *dev = crtc->dev;
3936 struct drm_i915_private *dev_priv = dev->dev_private;
6e3c9717 3937 int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
e615efe4
ED
3938 u32 divsel, phaseinc, auxdiv, phasedir = 0;
3939 u32 temp;
3940
a580516d 3941 mutex_lock(&dev_priv->sb_lock);
09153000 3942
e615efe4
ED
3943 /* It is necessary to ungate the pixclk gate prior to programming
3944 * the divisors, and gate it back when it is done.
3945 */
3946 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3947
3948 /* Disable SSCCTL */
3949 intel_sbi_write(dev_priv, SBI_SSCCTL6,
988d6ee8
PZ
3950 intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
3951 SBI_SSCCTL_DISABLE,
3952 SBI_ICLK);
e615efe4
ED
3953
3954 /* 20MHz is a corner case which is out of range for the 7-bit divisor */
12d7ceed 3955 if (clock == 20000) {
e615efe4
ED
3956 auxdiv = 1;
3957 divsel = 0x41;
3958 phaseinc = 0x20;
3959 } else {
3960 /* The iCLK virtual clock root frequency is in MHz,
241bfc38
DL
3961 * but the adjusted_mode->crtc_clock in in KHz. To get the
3962 * divisors, it is necessary to divide one by another, so we
e615efe4
ED
3963 * convert the virtual clock precision to KHz here for higher
3964 * precision.
3965 */
3966 u32 iclk_virtual_root_freq = 172800 * 1000;
3967 u32 iclk_pi_range = 64;
3968 u32 desired_divisor, msb_divisor_value, pi_value;
3969
12d7ceed 3970 desired_divisor = (iclk_virtual_root_freq / clock);
e615efe4
ED
3971 msb_divisor_value = desired_divisor / iclk_pi_range;
3972 pi_value = desired_divisor % iclk_pi_range;
3973
3974 auxdiv = 0;
3975 divsel = msb_divisor_value - 2;
3976 phaseinc = pi_value;
3977 }
3978
3979 /* This should not happen with any sane values */
3980 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3981 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3982 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3983 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3984
3985 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
12d7ceed 3986 clock,
e615efe4
ED
3987 auxdiv,
3988 divsel,
3989 phasedir,
3990 phaseinc);
3991
3992 /* Program SSCDIVINTPHASE6 */
988d6ee8 3993 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
e615efe4
ED
3994 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3995 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3996 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3997 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3998 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3999 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
988d6ee8 4000 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
e615efe4
ED
4001
4002 /* Program SSCAUXDIV */
988d6ee8 4003 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
e615efe4
ED
4004 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
4005 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
988d6ee8 4006 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
e615efe4
ED
4007
4008 /* Enable modulator and associated divider */
988d6ee8 4009 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
e615efe4 4010 temp &= ~SBI_SSCCTL_DISABLE;
988d6ee8 4011 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
e615efe4
ED
4012
4013 /* Wait for initialization time */
4014 udelay(24);
4015
4016 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
09153000 4017
a580516d 4018 mutex_unlock(&dev_priv->sb_lock);
e615efe4
ED
4019}
4020
275f01b2
DV
4021static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4022 enum pipe pch_transcoder)
4023{
4024 struct drm_device *dev = crtc->base.dev;
4025 struct drm_i915_private *dev_priv = dev->dev_private;
6e3c9717 4026 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
275f01b2
DV
4027
4028 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4029 I915_READ(HTOTAL(cpu_transcoder)));
4030 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4031 I915_READ(HBLANK(cpu_transcoder)));
4032 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4033 I915_READ(HSYNC(cpu_transcoder)));
4034
4035 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4036 I915_READ(VTOTAL(cpu_transcoder)));
4037 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4038 I915_READ(VBLANK(cpu_transcoder)));
4039 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4040 I915_READ(VSYNC(cpu_transcoder)));
4041 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4042 I915_READ(VSYNCSHIFT(cpu_transcoder)));
4043}
4044
003632d9 4045static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
1fbc0d78
DV
4046{
4047 struct drm_i915_private *dev_priv = dev->dev_private;
4048 uint32_t temp;
4049
4050 temp = I915_READ(SOUTH_CHICKEN1);
003632d9 4051 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
1fbc0d78
DV
4052 return;
4053
4054 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4055 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4056
003632d9
ACO
4057 temp &= ~FDI_BC_BIFURCATION_SELECT;
4058 if (enable)
4059 temp |= FDI_BC_BIFURCATION_SELECT;
4060
4061 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
1fbc0d78
DV
4062 I915_WRITE(SOUTH_CHICKEN1, temp);
4063 POSTING_READ(SOUTH_CHICKEN1);
4064}
4065
4066static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4067{
4068 struct drm_device *dev = intel_crtc->base.dev;
1fbc0d78
DV
4069
4070 switch (intel_crtc->pipe) {
4071 case PIPE_A:
4072 break;
4073 case PIPE_B:
6e3c9717 4074 if (intel_crtc->config->fdi_lanes > 2)
003632d9 4075 cpt_set_fdi_bc_bifurcation(dev, false);
1fbc0d78 4076 else
003632d9 4077 cpt_set_fdi_bc_bifurcation(dev, true);
1fbc0d78
DV
4078
4079 break;
4080 case PIPE_C:
003632d9 4081 cpt_set_fdi_bc_bifurcation(dev, true);
1fbc0d78
DV
4082
4083 break;
4084 default:
4085 BUG();
4086 }
4087}
4088
c48b5305
VS
4089/* Return which DP Port should be selected for Transcoder DP control */
4090static enum port
4091intel_trans_dp_port_sel(struct drm_crtc *crtc)
4092{
4093 struct drm_device *dev = crtc->dev;
4094 struct intel_encoder *encoder;
4095
4096 for_each_encoder_on_crtc(dev, crtc, encoder) {
4097 if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4098 encoder->type == INTEL_OUTPUT_EDP)
4099 return enc_to_dig_port(&encoder->base)->port;
4100 }
4101
4102 return -1;
4103}
4104
f67a559d
JB
4105/*
4106 * Enable PCH resources required for PCH ports:
4107 * - PCH PLLs
4108 * - FDI training & RX/TX
4109 * - update transcoder timings
4110 * - DP transcoding bits
4111 * - transcoder
4112 */
4113static void ironlake_pch_enable(struct drm_crtc *crtc)
0e23b99d
JB
4114{
4115 struct drm_device *dev = crtc->dev;
4116 struct drm_i915_private *dev_priv = dev->dev_private;
4117 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4118 int pipe = intel_crtc->pipe;
f0f59a00 4119 u32 temp;
2c07245f 4120
ab9412ba 4121 assert_pch_transcoder_disabled(dev_priv, pipe);
e7e164db 4122
1fbc0d78
DV
4123 if (IS_IVYBRIDGE(dev))
4124 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4125
cd986abb
DV
4126 /* Write the TU size bits before fdi link training, so that error
4127 * detection works. */
4128 I915_WRITE(FDI_RX_TUSIZE1(pipe),
4129 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4130
c98e9dcf 4131 /* For PCH output, training FDI link */
674cf967 4132 dev_priv->display.fdi_link_train(crtc);
2c07245f 4133
3ad8a208
DV
4134 /* We need to program the right clock selection before writing the pixel
4135 * mutliplier into the DPLL. */
303b81e0 4136 if (HAS_PCH_CPT(dev)) {
ee7b9f93 4137 u32 sel;
4b645f14 4138
c98e9dcf 4139 temp = I915_READ(PCH_DPLL_SEL);
11887397
DV
4140 temp |= TRANS_DPLL_ENABLE(pipe);
4141 sel = TRANS_DPLLB_SEL(pipe);
6e3c9717 4142 if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B)
ee7b9f93
JB
4143 temp |= sel;
4144 else
4145 temp &= ~sel;
c98e9dcf 4146 I915_WRITE(PCH_DPLL_SEL, temp);
c98e9dcf 4147 }
5eddb70b 4148
3ad8a208
DV
4149 /* XXX: pch pll's can be enabled any time before we enable the PCH
4150 * transcoder, and we actually should do this to not upset any PCH
4151 * transcoder that already use the clock when we share it.
4152 *
4153 * Note that enable_shared_dpll tries to do the right thing, but
4154 * get_shared_dpll unconditionally resets the pll - we need that to have
4155 * the right LVDS enable sequence. */
85b3894f 4156 intel_enable_shared_dpll(intel_crtc);
3ad8a208 4157
d9b6cb56
JB
4158 /* set transcoder timing, panel must allow it */
4159 assert_panel_unlocked(dev_priv, pipe);
275f01b2 4160 ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
8db9d77b 4161
303b81e0 4162 intel_fdi_normal_train(crtc);
5e84e1a4 4163
c98e9dcf 4164 /* For PCH DP, enable TRANS_DP_CTL */
6e3c9717 4165 if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
9c4edaee
VS
4166 const struct drm_display_mode *adjusted_mode =
4167 &intel_crtc->config->base.adjusted_mode;
dfd07d72 4168 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
f0f59a00 4169 i915_reg_t reg = TRANS_DP_CTL(pipe);
5eddb70b
CW
4170 temp = I915_READ(reg);
4171 temp &= ~(TRANS_DP_PORT_SEL_MASK |
220cad3c
EA
4172 TRANS_DP_SYNC_MASK |
4173 TRANS_DP_BPC_MASK);
e3ef4479 4174 temp |= TRANS_DP_OUTPUT_ENABLE;
9325c9f0 4175 temp |= bpc << 9; /* same format but at 11:9 */
c98e9dcf 4176
9c4edaee 4177 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5eddb70b 4178 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
9c4edaee 4179 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5eddb70b 4180 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
c98e9dcf
JB
4181
4182 switch (intel_trans_dp_port_sel(crtc)) {
c48b5305 4183 case PORT_B:
5eddb70b 4184 temp |= TRANS_DP_PORT_SEL_B;
c98e9dcf 4185 break;
c48b5305 4186 case PORT_C:
5eddb70b 4187 temp |= TRANS_DP_PORT_SEL_C;
c98e9dcf 4188 break;
c48b5305 4189 case PORT_D:
5eddb70b 4190 temp |= TRANS_DP_PORT_SEL_D;
c98e9dcf
JB
4191 break;
4192 default:
e95d41e1 4193 BUG();
32f9d658 4194 }
2c07245f 4195
5eddb70b 4196 I915_WRITE(reg, temp);
6be4a607 4197 }
b52eb4dc 4198
b8a4f404 4199 ironlake_enable_pch_transcoder(dev_priv, pipe);
f67a559d
JB
4200}
4201
1507e5bd
PZ
4202static void lpt_pch_enable(struct drm_crtc *crtc)
4203{
4204 struct drm_device *dev = crtc->dev;
4205 struct drm_i915_private *dev_priv = dev->dev_private;
4206 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6e3c9717 4207 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
1507e5bd 4208
ab9412ba 4209 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
1507e5bd 4210
8c52b5e8 4211 lpt_program_iclkip(crtc);
1507e5bd 4212
0540e488 4213 /* Set transcoder timing. */
275f01b2 4214 ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
1507e5bd 4215
937bb610 4216 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
f67a559d
JB
4217}
4218
190f68c5
ACO
4219struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
4220 struct intel_crtc_state *crtc_state)
ee7b9f93 4221{
e2b78267 4222 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
8bd31e67 4223 struct intel_shared_dpll *pll;
de419ab6 4224 struct intel_shared_dpll_config *shared_dpll;
e2b78267 4225 enum intel_dpll_id i;
ee7b9f93 4226
de419ab6
ML
4227 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
4228
98b6bd99
DV
4229 if (HAS_PCH_IBX(dev_priv->dev)) {
4230 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
d94ab068 4231 i = (enum intel_dpll_id) crtc->pipe;
e72f9fbf 4232 pll = &dev_priv->shared_dplls[i];
98b6bd99 4233
46edb027
DV
4234 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4235 crtc->base.base.id, pll->name);
98b6bd99 4236
de419ab6 4237 WARN_ON(shared_dpll[i].crtc_mask);
f2a69f44 4238
98b6bd99
DV
4239 goto found;
4240 }
4241
bcddf610
S
4242 if (IS_BROXTON(dev_priv->dev)) {
4243 /* PLL is attached to port in bxt */
4244 struct intel_encoder *encoder;
4245 struct intel_digital_port *intel_dig_port;
4246
4247 encoder = intel_ddi_get_crtc_new_encoder(crtc_state);
4248 if (WARN_ON(!encoder))
4249 return NULL;
4250
4251 intel_dig_port = enc_to_dig_port(&encoder->base);
4252 /* 1:1 mapping between ports and PLLs */
4253 i = (enum intel_dpll_id)intel_dig_port->port;
4254 pll = &dev_priv->shared_dplls[i];
4255 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
4256 crtc->base.base.id, pll->name);
de419ab6 4257 WARN_ON(shared_dpll[i].crtc_mask);
bcddf610
S
4258
4259 goto found;
4260 }
4261
e72f9fbf
DV
4262 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4263 pll = &dev_priv->shared_dplls[i];
ee7b9f93
JB
4264
4265 /* Only want to check enabled timings first */
de419ab6 4266 if (shared_dpll[i].crtc_mask == 0)
ee7b9f93
JB
4267 continue;
4268
190f68c5 4269 if (memcmp(&crtc_state->dpll_hw_state,
de419ab6
ML
4270 &shared_dpll[i].hw_state,
4271 sizeof(crtc_state->dpll_hw_state)) == 0) {
8bd31e67 4272 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
1e6f2ddc 4273 crtc->base.base.id, pll->name,
de419ab6 4274 shared_dpll[i].crtc_mask,
8bd31e67 4275 pll->active);
ee7b9f93
JB
4276 goto found;
4277 }
4278 }
4279
4280 /* Ok no matching timings, maybe there's a free one? */
e72f9fbf
DV
4281 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4282 pll = &dev_priv->shared_dplls[i];
de419ab6 4283 if (shared_dpll[i].crtc_mask == 0) {
46edb027
DV
4284 DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
4285 crtc->base.base.id, pll->name);
ee7b9f93
JB
4286 goto found;
4287 }
4288 }
4289
4290 return NULL;
4291
4292found:
de419ab6
ML
4293 if (shared_dpll[i].crtc_mask == 0)
4294 shared_dpll[i].hw_state =
4295 crtc_state->dpll_hw_state;
f2a69f44 4296
190f68c5 4297 crtc_state->shared_dpll = i;
46edb027
DV
4298 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
4299 pipe_name(crtc->pipe));
ee7b9f93 4300
de419ab6 4301 shared_dpll[i].crtc_mask |= 1 << crtc->pipe;
e04c7350 4302
ee7b9f93
JB
4303 return pll;
4304}
4305
de419ab6 4306static void intel_shared_dpll_commit(struct drm_atomic_state *state)
8bd31e67 4307{
de419ab6
ML
4308 struct drm_i915_private *dev_priv = to_i915(state->dev);
4309 struct intel_shared_dpll_config *shared_dpll;
8bd31e67
ACO
4310 struct intel_shared_dpll *pll;
4311 enum intel_dpll_id i;
4312
de419ab6
ML
4313 if (!to_intel_atomic_state(state)->dpll_set)
4314 return;
8bd31e67 4315
de419ab6 4316 shared_dpll = to_intel_atomic_state(state)->shared_dpll;
8bd31e67
ACO
4317 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4318 pll = &dev_priv->shared_dplls[i];
de419ab6 4319 pll->config = shared_dpll[i];
8bd31e67
ACO
4320 }
4321}
4322
a1520318 4323static void cpt_verify_modeset(struct drm_device *dev, int pipe)
d4270e57
JB
4324{
4325 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 4326 i915_reg_t dslreg = PIPEDSL(pipe);
d4270e57
JB
4327 u32 temp;
4328
4329 temp = I915_READ(dslreg);
4330 udelay(500);
4331 if (wait_for(I915_READ(dslreg) != temp, 5)) {
d4270e57 4332 if (wait_for(I915_READ(dslreg) != temp, 5))
84f44ce7 4333 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
d4270e57
JB
4334 }
4335}
4336
86adf9d7
ML
4337static int
4338skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4339 unsigned scaler_user, int *scaler_id, unsigned int rotation,
4340 int src_w, int src_h, int dst_w, int dst_h)
a1b2278e 4341{
86adf9d7
ML
4342 struct intel_crtc_scaler_state *scaler_state =
4343 &crtc_state->scaler_state;
4344 struct intel_crtc *intel_crtc =
4345 to_intel_crtc(crtc_state->base.crtc);
a1b2278e 4346 int need_scaling;
6156a456
CK
4347
4348 need_scaling = intel_rotation_90_or_270(rotation) ?
4349 (src_h != dst_w || src_w != dst_h):
4350 (src_w != dst_w || src_h != dst_h);
a1b2278e
CK
4351
4352 /*
4353 * if plane is being disabled or scaler is no more required or force detach
4354 * - free scaler binded to this plane/crtc
4355 * - in order to do this, update crtc->scaler_usage
4356 *
4357 * Here scaler state in crtc_state is set free so that
4358 * scaler can be assigned to other user. Actual register
4359 * update to free the scaler is done in plane/panel-fit programming.
4360 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4361 */
86adf9d7 4362 if (force_detach || !need_scaling) {
a1b2278e 4363 if (*scaler_id >= 0) {
86adf9d7 4364 scaler_state->scaler_users &= ~(1 << scaler_user);
a1b2278e
CK
4365 scaler_state->scalers[*scaler_id].in_use = 0;
4366
86adf9d7
ML
4367 DRM_DEBUG_KMS("scaler_user index %u.%u: "
4368 "Staged freeing scaler id %d scaler_users = 0x%x\n",
4369 intel_crtc->pipe, scaler_user, *scaler_id,
a1b2278e
CK
4370 scaler_state->scaler_users);
4371 *scaler_id = -1;
4372 }
4373 return 0;
4374 }
4375
4376 /* range checks */
4377 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4378 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4379
4380 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4381 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
86adf9d7 4382 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
a1b2278e 4383 "size is out of scaler range\n",
86adf9d7 4384 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
a1b2278e
CK
4385 return -EINVAL;
4386 }
4387
86adf9d7
ML
4388 /* mark this plane as a scaler user in crtc_state */
4389 scaler_state->scaler_users |= (1 << scaler_user);
4390 DRM_DEBUG_KMS("scaler_user index %u.%u: "
4391 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4392 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4393 scaler_state->scaler_users);
4394
4395 return 0;
4396}
4397
4398/**
4399 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4400 *
4401 * @state: crtc's scaler state
86adf9d7
ML
4402 *
4403 * Return
4404 * 0 - scaler_usage updated successfully
4405 * error - requested scaling cannot be supported or other error condition
4406 */
e435d6e5 4407int skl_update_scaler_crtc(struct intel_crtc_state *state)
86adf9d7
ML
4408{
4409 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
7c5f93b0 4410 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
86adf9d7
ML
4411
4412 DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
4413 intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
4414
e435d6e5 4415 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
86adf9d7
ML
4416 &state->scaler_state.scaler_id, DRM_ROTATE_0,
4417 state->pipe_src_w, state->pipe_src_h,
aad941d5 4418 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
86adf9d7
ML
4419}
4420
4421/**
4422 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4423 *
4424 * @state: crtc's scaler state
86adf9d7
ML
4425 * @plane_state: atomic plane state to update
4426 *
4427 * Return
4428 * 0 - scaler_usage updated successfully
4429 * error - requested scaling cannot be supported or other error condition
4430 */
da20eabd
ML
4431static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4432 struct intel_plane_state *plane_state)
86adf9d7
ML
4433{
4434
4435 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
da20eabd
ML
4436 struct intel_plane *intel_plane =
4437 to_intel_plane(plane_state->base.plane);
86adf9d7
ML
4438 struct drm_framebuffer *fb = plane_state->base.fb;
4439 int ret;
4440
4441 bool force_detach = !fb || !plane_state->visible;
4442
4443 DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
4444 intel_plane->base.base.id, intel_crtc->pipe,
4445 drm_plane_index(&intel_plane->base));
4446
4447 ret = skl_update_scaler(crtc_state, force_detach,
4448 drm_plane_index(&intel_plane->base),
4449 &plane_state->scaler_id,
4450 plane_state->base.rotation,
4451 drm_rect_width(&plane_state->src) >> 16,
4452 drm_rect_height(&plane_state->src) >> 16,
4453 drm_rect_width(&plane_state->dst),
4454 drm_rect_height(&plane_state->dst));
4455
4456 if (ret || plane_state->scaler_id < 0)
4457 return ret;
4458
a1b2278e 4459 /* check colorkey */
818ed961 4460 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
86adf9d7 4461 DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
818ed961 4462 intel_plane->base.base.id);
a1b2278e
CK
4463 return -EINVAL;
4464 }
4465
4466 /* Check src format */
86adf9d7
ML
4467 switch (fb->pixel_format) {
4468 case DRM_FORMAT_RGB565:
4469 case DRM_FORMAT_XBGR8888:
4470 case DRM_FORMAT_XRGB8888:
4471 case DRM_FORMAT_ABGR8888:
4472 case DRM_FORMAT_ARGB8888:
4473 case DRM_FORMAT_XRGB2101010:
4474 case DRM_FORMAT_XBGR2101010:
4475 case DRM_FORMAT_YUYV:
4476 case DRM_FORMAT_YVYU:
4477 case DRM_FORMAT_UYVY:
4478 case DRM_FORMAT_VYUY:
4479 break;
4480 default:
4481 DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
4482 intel_plane->base.base.id, fb->base.id, fb->pixel_format);
4483 return -EINVAL;
a1b2278e
CK
4484 }
4485
a1b2278e
CK
4486 return 0;
4487}
4488
e435d6e5
ML
4489static void skylake_scaler_disable(struct intel_crtc *crtc)
4490{
4491 int i;
4492
4493 for (i = 0; i < crtc->num_scalers; i++)
4494 skl_detach_scaler(crtc, i);
4495}
4496
4497static void skylake_pfit_enable(struct intel_crtc *crtc)
bd2e244f
JB
4498{
4499 struct drm_device *dev = crtc->base.dev;
4500 struct drm_i915_private *dev_priv = dev->dev_private;
4501 int pipe = crtc->pipe;
a1b2278e
CK
4502 struct intel_crtc_scaler_state *scaler_state =
4503 &crtc->config->scaler_state;
4504
4505 DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4506
6e3c9717 4507 if (crtc->config->pch_pfit.enabled) {
a1b2278e
CK
4508 int id;
4509
4510 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4511 DRM_ERROR("Requesting pfit without getting a scaler first\n");
4512 return;
4513 }
4514
4515 id = scaler_state->scaler_id;
4516 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4517 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4518 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4519 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4520
4521 DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
bd2e244f
JB
4522 }
4523}
4524
b074cec8
JB
4525static void ironlake_pfit_enable(struct intel_crtc *crtc)
4526{
4527 struct drm_device *dev = crtc->base.dev;
4528 struct drm_i915_private *dev_priv = dev->dev_private;
4529 int pipe = crtc->pipe;
4530
6e3c9717 4531 if (crtc->config->pch_pfit.enabled) {
b074cec8
JB
4532 /* Force use of hard-coded filter coefficients
4533 * as some pre-programmed values are broken,
4534 * e.g. x201.
4535 */
4536 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4537 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4538 PF_PIPE_SEL_IVB(pipe));
4539 else
4540 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
6e3c9717
ACO
4541 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4542 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
d4270e57
JB
4543 }
4544}
4545
20bc8673 4546void hsw_enable_ips(struct intel_crtc *crtc)
d77e4531 4547{
cea165c3
VS
4548 struct drm_device *dev = crtc->base.dev;
4549 struct drm_i915_private *dev_priv = dev->dev_private;
d77e4531 4550
6e3c9717 4551 if (!crtc->config->ips_enabled)
d77e4531
PZ
4552 return;
4553
cea165c3
VS
4554 /* We can only enable IPS after we enable a plane and wait for a vblank */
4555 intel_wait_for_vblank(dev, crtc->pipe);
4556
d77e4531 4557 assert_plane_enabled(dev_priv, crtc->plane);
cea165c3 4558 if (IS_BROADWELL(dev)) {
2a114cc1
BW
4559 mutex_lock(&dev_priv->rps.hw_lock);
4560 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4561 mutex_unlock(&dev_priv->rps.hw_lock);
4562 /* Quoting Art Runyan: "its not safe to expect any particular
4563 * value in IPS_CTL bit 31 after enabling IPS through the
e59150dc
JB
4564 * mailbox." Moreover, the mailbox may return a bogus state,
4565 * so we need to just enable it and continue on.
2a114cc1
BW
4566 */
4567 } else {
4568 I915_WRITE(IPS_CTL, IPS_ENABLE);
4569 /* The bit only becomes 1 in the next vblank, so this wait here
4570 * is essentially intel_wait_for_vblank. If we don't have this
4571 * and don't wait for vblanks until the end of crtc_enable, then
4572 * the HW state readout code will complain that the expected
4573 * IPS_CTL value is not the one we read. */
4574 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4575 DRM_ERROR("Timed out waiting for IPS enable\n");
4576 }
d77e4531
PZ
4577}
4578
20bc8673 4579void hsw_disable_ips(struct intel_crtc *crtc)
d77e4531
PZ
4580{
4581 struct drm_device *dev = crtc->base.dev;
4582 struct drm_i915_private *dev_priv = dev->dev_private;
4583
6e3c9717 4584 if (!crtc->config->ips_enabled)
d77e4531
PZ
4585 return;
4586
4587 assert_plane_enabled(dev_priv, crtc->plane);
23d0b130 4588 if (IS_BROADWELL(dev)) {
2a114cc1
BW
4589 mutex_lock(&dev_priv->rps.hw_lock);
4590 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4591 mutex_unlock(&dev_priv->rps.hw_lock);
23d0b130
BW
4592 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
4593 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4594 DRM_ERROR("Timed out waiting for IPS disable\n");
e59150dc 4595 } else {
2a114cc1 4596 I915_WRITE(IPS_CTL, 0);
e59150dc
JB
4597 POSTING_READ(IPS_CTL);
4598 }
d77e4531
PZ
4599
4600 /* We need to wait for a vblank before we can disable the plane. */
4601 intel_wait_for_vblank(dev, crtc->pipe);
4602}
4603
4604/** Loads the palette/gamma unit for the CRTC with the prepared values */
4605static void intel_crtc_load_lut(struct drm_crtc *crtc)
4606{
4607 struct drm_device *dev = crtc->dev;
4608 struct drm_i915_private *dev_priv = dev->dev_private;
4609 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4610 enum pipe pipe = intel_crtc->pipe;
d77e4531
PZ
4611 int i;
4612 bool reenable_ips = false;
4613
4614 /* The clocks have to be on to load the palette. */
53d9f4e9 4615 if (!crtc->state->active)
d77e4531
PZ
4616 return;
4617
50360403 4618 if (HAS_GMCH_DISPLAY(dev_priv->dev)) {
409ee761 4619 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
d77e4531
PZ
4620 assert_dsi_pll_enabled(dev_priv);
4621 else
4622 assert_pll_enabled(dev_priv, pipe);
4623 }
4624
d77e4531
PZ
4625 /* Workaround : Do not read or write the pipe palette/gamma data while
4626 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
4627 */
6e3c9717 4628 if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled &&
d77e4531
PZ
4629 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) ==
4630 GAMMA_MODE_MODE_SPLIT)) {
4631 hsw_disable_ips(intel_crtc);
4632 reenable_ips = true;
4633 }
4634
4635 for (i = 0; i < 256; i++) {
f0f59a00 4636 i915_reg_t palreg;
f65a9c5b
VS
4637
4638 if (HAS_GMCH_DISPLAY(dev))
4639 palreg = PALETTE(pipe, i);
4640 else
4641 palreg = LGC_PALETTE(pipe, i);
4642
4643 I915_WRITE(palreg,
d77e4531
PZ
4644 (intel_crtc->lut_r[i] << 16) |
4645 (intel_crtc->lut_g[i] << 8) |
4646 intel_crtc->lut_b[i]);
4647 }
4648
4649 if (reenable_ips)
4650 hsw_enable_ips(intel_crtc);
4651}
4652
7cac945f 4653static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
d3eedb1a 4654{
7cac945f 4655 if (intel_crtc->overlay) {
d3eedb1a
VS
4656 struct drm_device *dev = intel_crtc->base.dev;
4657 struct drm_i915_private *dev_priv = dev->dev_private;
4658
4659 mutex_lock(&dev->struct_mutex);
4660 dev_priv->mm.interruptible = false;
4661 (void) intel_overlay_switch_off(intel_crtc->overlay);
4662 dev_priv->mm.interruptible = true;
4663 mutex_unlock(&dev->struct_mutex);
4664 }
4665
4666 /* Let userspace switch the overlay on again. In most cases userspace
4667 * has to recompute where to put it anyway.
4668 */
4669}
4670
87d4300a
ML
4671/**
4672 * intel_post_enable_primary - Perform operations after enabling primary plane
4673 * @crtc: the CRTC whose primary plane was just enabled
4674 *
4675 * Performs potentially sleeping operations that must be done after the primary
4676 * plane is enabled, such as updating FBC and IPS. Note that this may be
4677 * called due to an explicit primary plane update, or due to an implicit
4678 * re-enable that is caused when a sprite plane is updated to no longer
4679 * completely hide the primary plane.
4680 */
4681static void
4682intel_post_enable_primary(struct drm_crtc *crtc)
a5c4d7bc
VS
4683{
4684 struct drm_device *dev = crtc->dev;
87d4300a 4685 struct drm_i915_private *dev_priv = dev->dev_private;
a5c4d7bc
VS
4686 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4687 int pipe = intel_crtc->pipe;
a5c4d7bc 4688
87d4300a
ML
4689 /*
4690 * BDW signals flip done immediately if the plane
4691 * is disabled, even if the plane enable is already
4692 * armed to occur at the next vblank :(
4693 */
4694 if (IS_BROADWELL(dev))
4695 intel_wait_for_vblank(dev, pipe);
a5c4d7bc 4696
87d4300a
ML
4697 /*
4698 * FIXME IPS should be fine as long as one plane is
4699 * enabled, but in practice it seems to have problems
4700 * when going from primary only to sprite only and vice
4701 * versa.
4702 */
a5c4d7bc
VS
4703 hsw_enable_ips(intel_crtc);
4704
f99d7069 4705 /*
87d4300a
ML
4706 * Gen2 reports pipe underruns whenever all planes are disabled.
4707 * So don't enable underrun reporting before at least some planes
4708 * are enabled.
4709 * FIXME: Need to fix the logic to work when we turn off all planes
4710 * but leave the pipe running.
f99d7069 4711 */
87d4300a
ML
4712 if (IS_GEN2(dev))
4713 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4714
aca7b684
VS
4715 /* Underruns don't always raise interrupts, so check manually. */
4716 intel_check_cpu_fifo_underruns(dev_priv);
4717 intel_check_pch_fifo_underruns(dev_priv);
a5c4d7bc
VS
4718}
4719
87d4300a
ML
4720/**
4721 * intel_pre_disable_primary - Perform operations before disabling primary plane
4722 * @crtc: the CRTC whose primary plane is to be disabled
4723 *
4724 * Performs potentially sleeping operations that must be done before the
4725 * primary plane is disabled, such as updating FBC and IPS. Note that this may
4726 * be called due to an explicit primary plane update, or due to an implicit
4727 * disable that is caused when a sprite plane completely hides the primary
4728 * plane.
4729 */
4730static void
4731intel_pre_disable_primary(struct drm_crtc *crtc)
a5c4d7bc
VS
4732{
4733 struct drm_device *dev = crtc->dev;
4734 struct drm_i915_private *dev_priv = dev->dev_private;
4735 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4736 int pipe = intel_crtc->pipe;
a5c4d7bc 4737
87d4300a
ML
4738 /*
4739 * Gen2 reports pipe underruns whenever all planes are disabled.
4740 * So diasble underrun reporting before all the planes get disabled.
4741 * FIXME: Need to fix the logic to work when we turn off all planes
4742 * but leave the pipe running.
4743 */
4744 if (IS_GEN2(dev))
4745 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
a5c4d7bc 4746
87d4300a
ML
4747 /*
4748 * Vblank time updates from the shadow to live plane control register
4749 * are blocked if the memory self-refresh mode is active at that
4750 * moment. So to make sure the plane gets truly disabled, disable
4751 * first the self-refresh mode. The self-refresh enable bit in turn
4752 * will be checked/applied by the HW only at the next frame start
4753 * event which is after the vblank start event, so we need to have a
4754 * wait-for-vblank between disabling the plane and the pipe.
4755 */
262cd2e1 4756 if (HAS_GMCH_DISPLAY(dev)) {
87d4300a 4757 intel_set_memory_cxsr(dev_priv, false);
262cd2e1
VS
4758 dev_priv->wm.vlv.cxsr = false;
4759 intel_wait_for_vblank(dev, pipe);
4760 }
87d4300a 4761
87d4300a
ML
4762 /*
4763 * FIXME IPS should be fine as long as one plane is
4764 * enabled, but in practice it seems to have problems
4765 * when going from primary only to sprite only and vice
4766 * versa.
4767 */
a5c4d7bc 4768 hsw_disable_ips(intel_crtc);
87d4300a
ML
4769}
4770
ac21b225
ML
4771static void intel_post_plane_update(struct intel_crtc *crtc)
4772{
4773 struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
4774 struct drm_device *dev = crtc->base.dev;
7733b49b 4775 struct drm_i915_private *dev_priv = dev->dev_private;
ac21b225
ML
4776
4777 if (atomic->wait_vblank)
4778 intel_wait_for_vblank(dev, crtc->pipe);
4779
4780 intel_frontbuffer_flip(dev, atomic->fb_bits);
4781
852eb00d
VS
4782 if (atomic->disable_cxsr)
4783 crtc->wm.cxsr_allowed = true;
4784
f015c551
VS
4785 if (crtc->atomic.update_wm_post)
4786 intel_update_watermarks(&crtc->base);
4787
c80ac854 4788 if (atomic->update_fbc)
7733b49b 4789 intel_fbc_update(dev_priv);
ac21b225
ML
4790
4791 if (atomic->post_enable_primary)
4792 intel_post_enable_primary(&crtc->base);
4793
ac21b225
ML
4794 memset(atomic, 0, sizeof(*atomic));
4795}
4796
4797static void intel_pre_plane_update(struct intel_crtc *crtc)
4798{
4799 struct drm_device *dev = crtc->base.dev;
eddfcbcd 4800 struct drm_i915_private *dev_priv = dev->dev_private;
ac21b225 4801 struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
ac21b225 4802
c80ac854 4803 if (atomic->disable_fbc)
25ad93fd 4804 intel_fbc_disable_crtc(crtc);
ac21b225 4805
066cf55b
RV
4806 if (crtc->atomic.disable_ips)
4807 hsw_disable_ips(crtc);
4808
ac21b225
ML
4809 if (atomic->pre_disable_primary)
4810 intel_pre_disable_primary(&crtc->base);
852eb00d
VS
4811
4812 if (atomic->disable_cxsr) {
4813 crtc->wm.cxsr_allowed = false;
4814 intel_set_memory_cxsr(dev_priv, false);
4815 }
ac21b225
ML
4816}
4817
d032ffa0 4818static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
87d4300a
ML
4819{
4820 struct drm_device *dev = crtc->dev;
4821 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
d032ffa0 4822 struct drm_plane *p;
87d4300a
ML
4823 int pipe = intel_crtc->pipe;
4824
7cac945f 4825 intel_crtc_dpms_overlay_disable(intel_crtc);
27321ae8 4826
d032ffa0
ML
4827 drm_for_each_plane_mask(p, dev, plane_mask)
4828 to_intel_plane(p)->disable_plane(p, crtc);
f98551ae 4829
f99d7069
DV
4830 /*
4831 * FIXME: Once we grow proper nuclear flip support out of this we need
4832 * to compute the mask of flip planes precisely. For the time being
4833 * consider this a flip to a NULL plane.
4834 */
4835 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
a5c4d7bc
VS
4836}
4837
f67a559d
JB
4838static void ironlake_crtc_enable(struct drm_crtc *crtc)
4839{
4840 struct drm_device *dev = crtc->dev;
4841 struct drm_i915_private *dev_priv = dev->dev_private;
4842 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 4843 struct intel_encoder *encoder;
f67a559d 4844 int pipe = intel_crtc->pipe;
f67a559d 4845
53d9f4e9 4846 if (WARN_ON(intel_crtc->active))
f67a559d
JB
4847 return;
4848
81b088ca
VS
4849 if (intel_crtc->config->has_pch_encoder)
4850 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4851
6e3c9717 4852 if (intel_crtc->config->has_pch_encoder)
b14b1055
DV
4853 intel_prepare_shared_dpll(intel_crtc);
4854
6e3c9717 4855 if (intel_crtc->config->has_dp_encoder)
fe3cd48d 4856 intel_dp_set_m_n(intel_crtc, M1_N1);
29407aab
DV
4857
4858 intel_set_pipe_timings(intel_crtc);
4859
6e3c9717 4860 if (intel_crtc->config->has_pch_encoder) {
29407aab 4861 intel_cpu_transcoder_set_m_n(intel_crtc,
6e3c9717 4862 &intel_crtc->config->fdi_m_n, NULL);
29407aab
DV
4863 }
4864
4865 ironlake_set_pipeconf(crtc);
4866
f67a559d 4867 intel_crtc->active = true;
8664281b 4868
a72e4c9f 4869 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
8664281b 4870
f6736a1a 4871 for_each_encoder_on_crtc(dev, crtc, encoder)
952735ee
DV
4872 if (encoder->pre_enable)
4873 encoder->pre_enable(encoder);
f67a559d 4874
6e3c9717 4875 if (intel_crtc->config->has_pch_encoder) {
fff367c7
DV
4876 /* Note: FDI PLL enabling _must_ be done before we enable the
4877 * cpu pipes, hence this is separate from all the other fdi/pch
4878 * enabling. */
88cefb6c 4879 ironlake_fdi_pll_enable(intel_crtc);
46b6f814
DV
4880 } else {
4881 assert_fdi_tx_disabled(dev_priv, pipe);
4882 assert_fdi_rx_disabled(dev_priv, pipe);
4883 }
f67a559d 4884
b074cec8 4885 ironlake_pfit_enable(intel_crtc);
f67a559d 4886
9c54c0dd
JB
4887 /*
4888 * On ILK+ LUT must be loaded before the pipe is running but with
4889 * clocks enabled
4890 */
4891 intel_crtc_load_lut(crtc);
4892
f37fcc2a 4893 intel_update_watermarks(crtc);
e1fdc473 4894 intel_enable_pipe(intel_crtc);
f67a559d 4895
6e3c9717 4896 if (intel_crtc->config->has_pch_encoder)
f67a559d 4897 ironlake_pch_enable(crtc);
c98e9dcf 4898
f9b61ff6
DV
4899 assert_vblank_disabled(crtc);
4900 drm_crtc_vblank_on(crtc);
4901
fa5c73b1
DV
4902 for_each_encoder_on_crtc(dev, crtc, encoder)
4903 encoder->enable(encoder);
61b77ddd
DV
4904
4905 if (HAS_PCH_CPT(dev))
a1520318 4906 cpt_verify_modeset(dev, intel_crtc->pipe);
37ca8d4c
VS
4907
4908 /* Must wait for vblank to avoid spurious PCH FIFO underruns */
4909 if (intel_crtc->config->has_pch_encoder)
4910 intel_wait_for_vblank(dev, pipe);
4911 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6be4a607
JB
4912}
4913
42db64ef
PZ
4914/* IPS only exists on ULT machines and is tied to pipe A. */
4915static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4916{
f5adf94e 4917 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
42db64ef
PZ
4918}
4919
4f771f10
PZ
4920static void haswell_crtc_enable(struct drm_crtc *crtc)
4921{
4922 struct drm_device *dev = crtc->dev;
4923 struct drm_i915_private *dev_priv = dev->dev_private;
4924 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4925 struct intel_encoder *encoder;
99d736a2
ML
4926 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4927 struct intel_crtc_state *pipe_config =
4928 to_intel_crtc_state(crtc->state);
7d4aefd0 4929 bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
4f771f10 4930
53d9f4e9 4931 if (WARN_ON(intel_crtc->active))
4f771f10
PZ
4932 return;
4933
81b088ca
VS
4934 if (intel_crtc->config->has_pch_encoder)
4935 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4936 false);
4937
df8ad70c
DV
4938 if (intel_crtc_to_shared_dpll(intel_crtc))
4939 intel_enable_shared_dpll(intel_crtc);
4940
6e3c9717 4941 if (intel_crtc->config->has_dp_encoder)
fe3cd48d 4942 intel_dp_set_m_n(intel_crtc, M1_N1);
229fca97
DV
4943
4944 intel_set_pipe_timings(intel_crtc);
4945
6e3c9717
ACO
4946 if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) {
4947 I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder),
4948 intel_crtc->config->pixel_multiplier - 1);
ebb69c95
CT
4949 }
4950
6e3c9717 4951 if (intel_crtc->config->has_pch_encoder) {
229fca97 4952 intel_cpu_transcoder_set_m_n(intel_crtc,
6e3c9717 4953 &intel_crtc->config->fdi_m_n, NULL);
229fca97
DV
4954 }
4955
4956 haswell_set_pipeconf(crtc);
4957
4958 intel_set_pipe_csc(crtc);
4959
4f771f10 4960 intel_crtc->active = true;
8664281b 4961
a72e4c9f 4962 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
7d4aefd0
SS
4963 for_each_encoder_on_crtc(dev, crtc, encoder) {
4964 if (encoder->pre_pll_enable)
4965 encoder->pre_pll_enable(encoder);
4f771f10
PZ
4966 if (encoder->pre_enable)
4967 encoder->pre_enable(encoder);
7d4aefd0 4968 }
4f771f10 4969
d2d65408 4970 if (intel_crtc->config->has_pch_encoder)
4fe9467d 4971 dev_priv->display.fdi_link_train(crtc);
4fe9467d 4972
7d4aefd0
SS
4973 if (!is_dsi)
4974 intel_ddi_enable_pipe_clock(intel_crtc);
4f771f10 4975
1c132b44 4976 if (INTEL_INFO(dev)->gen >= 9)
e435d6e5 4977 skylake_pfit_enable(intel_crtc);
ff6d9f55 4978 else
1c132b44 4979 ironlake_pfit_enable(intel_crtc);
4f771f10
PZ
4980
4981 /*
4982 * On ILK+ LUT must be loaded before the pipe is running but with
4983 * clocks enabled
4984 */
4985 intel_crtc_load_lut(crtc);
4986
1f544388 4987 intel_ddi_set_pipe_settings(crtc);
7d4aefd0
SS
4988 if (!is_dsi)
4989 intel_ddi_enable_transcoder_func(crtc);
4f771f10 4990
f37fcc2a 4991 intel_update_watermarks(crtc);
e1fdc473 4992 intel_enable_pipe(intel_crtc);
42db64ef 4993
6e3c9717 4994 if (intel_crtc->config->has_pch_encoder)
1507e5bd 4995 lpt_pch_enable(crtc);
4f771f10 4996
7d4aefd0 4997 if (intel_crtc->config->dp_encoder_is_mst && !is_dsi)
0e32b39c
DA
4998 intel_ddi_set_vc_payload_alloc(crtc, true);
4999
f9b61ff6
DV
5000 assert_vblank_disabled(crtc);
5001 drm_crtc_vblank_on(crtc);
5002
8807e55b 5003 for_each_encoder_on_crtc(dev, crtc, encoder) {
4f771f10 5004 encoder->enable(encoder);
8807e55b
JN
5005 intel_opregion_notify_encoder(encoder, true);
5006 }
4f771f10 5007
d2d65408
VS
5008 if (intel_crtc->config->has_pch_encoder)
5009 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5010 true);
5011
e4916946
PZ
5012 /* If we change the relative order between pipe/planes enabling, we need
5013 * to change the workaround. */
99d736a2
ML
5014 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
5015 if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
5016 intel_wait_for_vblank(dev, hsw_workaround_pipe);
5017 intel_wait_for_vblank(dev, hsw_workaround_pipe);
5018 }
4f771f10
PZ
5019}
5020
bfd16b2a 5021static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
3f8dce3a
DV
5022{
5023 struct drm_device *dev = crtc->base.dev;
5024 struct drm_i915_private *dev_priv = dev->dev_private;
5025 int pipe = crtc->pipe;
5026
5027 /* To avoid upsetting the power well on haswell only disable the pfit if
5028 * it's in use. The hw state code will make sure we get this right. */
bfd16b2a 5029 if (force || crtc->config->pch_pfit.enabled) {
3f8dce3a
DV
5030 I915_WRITE(PF_CTL(pipe), 0);
5031 I915_WRITE(PF_WIN_POS(pipe), 0);
5032 I915_WRITE(PF_WIN_SZ(pipe), 0);
5033 }
5034}
5035
6be4a607
JB
5036static void ironlake_crtc_disable(struct drm_crtc *crtc)
5037{
5038 struct drm_device *dev = crtc->dev;
5039 struct drm_i915_private *dev_priv = dev->dev_private;
5040 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 5041 struct intel_encoder *encoder;
6be4a607 5042 int pipe = intel_crtc->pipe;
b52eb4dc 5043
37ca8d4c
VS
5044 if (intel_crtc->config->has_pch_encoder)
5045 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
5046
ea9d758d
DV
5047 for_each_encoder_on_crtc(dev, crtc, encoder)
5048 encoder->disable(encoder);
5049
f9b61ff6
DV
5050 drm_crtc_vblank_off(crtc);
5051 assert_vblank_disabled(crtc);
5052
575f7ab7 5053 intel_disable_pipe(intel_crtc);
32f9d658 5054
bfd16b2a 5055 ironlake_pfit_disable(intel_crtc, false);
2c07245f 5056
5a74f70a
VS
5057 if (intel_crtc->config->has_pch_encoder)
5058 ironlake_fdi_disable(crtc);
5059
bf49ec8c
DV
5060 for_each_encoder_on_crtc(dev, crtc, encoder)
5061 if (encoder->post_disable)
5062 encoder->post_disable(encoder);
2c07245f 5063
6e3c9717 5064 if (intel_crtc->config->has_pch_encoder) {
d925c59a 5065 ironlake_disable_pch_transcoder(dev_priv, pipe);
6be4a607 5066
d925c59a 5067 if (HAS_PCH_CPT(dev)) {
f0f59a00
VS
5068 i915_reg_t reg;
5069 u32 temp;
5070
d925c59a
DV
5071 /* disable TRANS_DP_CTL */
5072 reg = TRANS_DP_CTL(pipe);
5073 temp = I915_READ(reg);
5074 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5075 TRANS_DP_PORT_SEL_MASK);
5076 temp |= TRANS_DP_PORT_SEL_NONE;
5077 I915_WRITE(reg, temp);
5078
5079 /* disable DPLL_SEL */
5080 temp = I915_READ(PCH_DPLL_SEL);
11887397 5081 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
d925c59a 5082 I915_WRITE(PCH_DPLL_SEL, temp);
9db4a9c7 5083 }
e3421a18 5084
d925c59a
DV
5085 ironlake_fdi_pll_disable(intel_crtc);
5086 }
81b088ca
VS
5087
5088 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6be4a607 5089}
1b3c7a47 5090
4f771f10 5091static void haswell_crtc_disable(struct drm_crtc *crtc)
ee7b9f93 5092{
4f771f10
PZ
5093 struct drm_device *dev = crtc->dev;
5094 struct drm_i915_private *dev_priv = dev->dev_private;
ee7b9f93 5095 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4f771f10 5096 struct intel_encoder *encoder;
6e3c9717 5097 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7d4aefd0 5098 bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
ee7b9f93 5099
d2d65408
VS
5100 if (intel_crtc->config->has_pch_encoder)
5101 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5102 false);
5103
8807e55b
JN
5104 for_each_encoder_on_crtc(dev, crtc, encoder) {
5105 intel_opregion_notify_encoder(encoder, false);
4f771f10 5106 encoder->disable(encoder);
8807e55b 5107 }
4f771f10 5108
f9b61ff6
DV
5109 drm_crtc_vblank_off(crtc);
5110 assert_vblank_disabled(crtc);
5111
575f7ab7 5112 intel_disable_pipe(intel_crtc);
4f771f10 5113
6e3c9717 5114 if (intel_crtc->config->dp_encoder_is_mst)
a4bf214f
VS
5115 intel_ddi_set_vc_payload_alloc(crtc, false);
5116
7d4aefd0
SS
5117 if (!is_dsi)
5118 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4f771f10 5119
1c132b44 5120 if (INTEL_INFO(dev)->gen >= 9)
e435d6e5 5121 skylake_scaler_disable(intel_crtc);
ff6d9f55 5122 else
bfd16b2a 5123 ironlake_pfit_disable(intel_crtc, false);
4f771f10 5124
7d4aefd0
SS
5125 if (!is_dsi)
5126 intel_ddi_disable_pipe_clock(intel_crtc);
4f771f10 5127
6e3c9717 5128 if (intel_crtc->config->has_pch_encoder) {
ab4d966c 5129 lpt_disable_pch_transcoder(dev_priv);
1ad960f2 5130 intel_ddi_fdi_disable(crtc);
83616634 5131 }
4f771f10 5132
97b040aa
ID
5133 for_each_encoder_on_crtc(dev, crtc, encoder)
5134 if (encoder->post_disable)
5135 encoder->post_disable(encoder);
81b088ca
VS
5136
5137 if (intel_crtc->config->has_pch_encoder)
5138 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5139 true);
4f771f10
PZ
5140}
5141
2dd24552
JB
5142static void i9xx_pfit_enable(struct intel_crtc *crtc)
5143{
5144 struct drm_device *dev = crtc->base.dev;
5145 struct drm_i915_private *dev_priv = dev->dev_private;
6e3c9717 5146 struct intel_crtc_state *pipe_config = crtc->config;
2dd24552 5147
681a8504 5148 if (!pipe_config->gmch_pfit.control)
2dd24552
JB
5149 return;
5150
2dd24552 5151 /*
c0b03411
DV
5152 * The panel fitter should only be adjusted whilst the pipe is disabled,
5153 * according to register description and PRM.
2dd24552 5154 */
c0b03411
DV
5155 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5156 assert_pipe_disabled(dev_priv, crtc->pipe);
2dd24552 5157
b074cec8
JB
5158 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5159 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5a80c45c
DV
5160
5161 /* Border color in case we don't scale up to the full screen. Black by
5162 * default, change to something else for debugging. */
5163 I915_WRITE(BCLRPAT(crtc->pipe), 0);
2dd24552
JB
5164}
5165
d05410f9
DA
5166static enum intel_display_power_domain port_to_power_domain(enum port port)
5167{
5168 switch (port) {
5169 case PORT_A:
6331a704 5170 return POWER_DOMAIN_PORT_DDI_A_LANES;
d05410f9 5171 case PORT_B:
6331a704 5172 return POWER_DOMAIN_PORT_DDI_B_LANES;
d05410f9 5173 case PORT_C:
6331a704 5174 return POWER_DOMAIN_PORT_DDI_C_LANES;
d05410f9 5175 case PORT_D:
6331a704 5176 return POWER_DOMAIN_PORT_DDI_D_LANES;
d8e19f99 5177 case PORT_E:
6331a704 5178 return POWER_DOMAIN_PORT_DDI_E_LANES;
d05410f9
DA
5179 default:
5180 WARN_ON_ONCE(1);
5181 return POWER_DOMAIN_PORT_OTHER;
5182 }
5183}
5184
25f78f58
VS
5185static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5186{
5187 switch (port) {
5188 case PORT_A:
5189 return POWER_DOMAIN_AUX_A;
5190 case PORT_B:
5191 return POWER_DOMAIN_AUX_B;
5192 case PORT_C:
5193 return POWER_DOMAIN_AUX_C;
5194 case PORT_D:
5195 return POWER_DOMAIN_AUX_D;
5196 case PORT_E:
5197 /* FIXME: Check VBT for actual wiring of PORT E */
5198 return POWER_DOMAIN_AUX_D;
5199 default:
5200 WARN_ON_ONCE(1);
5201 return POWER_DOMAIN_AUX_A;
5202 }
5203}
5204
77d22dca
ID
5205#define for_each_power_domain(domain, mask) \
5206 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
5207 if ((1 << (domain)) & (mask))
5208
319be8ae
ID
5209enum intel_display_power_domain
5210intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5211{
5212 struct drm_device *dev = intel_encoder->base.dev;
5213 struct intel_digital_port *intel_dig_port;
5214
5215 switch (intel_encoder->type) {
5216 case INTEL_OUTPUT_UNKNOWN:
5217 /* Only DDI platforms should ever use this output type */
5218 WARN_ON_ONCE(!HAS_DDI(dev));
5219 case INTEL_OUTPUT_DISPLAYPORT:
5220 case INTEL_OUTPUT_HDMI:
5221 case INTEL_OUTPUT_EDP:
5222 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
d05410f9 5223 return port_to_power_domain(intel_dig_port->port);
0e32b39c
DA
5224 case INTEL_OUTPUT_DP_MST:
5225 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5226 return port_to_power_domain(intel_dig_port->port);
319be8ae
ID
5227 case INTEL_OUTPUT_ANALOG:
5228 return POWER_DOMAIN_PORT_CRT;
5229 case INTEL_OUTPUT_DSI:
5230 return POWER_DOMAIN_PORT_DSI;
5231 default:
5232 return POWER_DOMAIN_PORT_OTHER;
5233 }
5234}
5235
25f78f58
VS
5236enum intel_display_power_domain
5237intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5238{
5239 struct drm_device *dev = intel_encoder->base.dev;
5240 struct intel_digital_port *intel_dig_port;
5241
5242 switch (intel_encoder->type) {
5243 case INTEL_OUTPUT_UNKNOWN:
651174a4
ID
5244 case INTEL_OUTPUT_HDMI:
5245 /*
5246 * Only DDI platforms should ever use these output types.
5247 * We can get here after the HDMI detect code has already set
5248 * the type of the shared encoder. Since we can't be sure
5249 * what's the status of the given connectors, play safe and
5250 * run the DP detection too.
5251 */
25f78f58
VS
5252 WARN_ON_ONCE(!HAS_DDI(dev));
5253 case INTEL_OUTPUT_DISPLAYPORT:
5254 case INTEL_OUTPUT_EDP:
5255 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5256 return port_to_aux_power_domain(intel_dig_port->port);
5257 case INTEL_OUTPUT_DP_MST:
5258 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5259 return port_to_aux_power_domain(intel_dig_port->port);
5260 default:
5261 WARN_ON_ONCE(1);
5262 return POWER_DOMAIN_AUX_A;
5263 }
5264}
5265
319be8ae 5266static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
77d22dca 5267{
319be8ae
ID
5268 struct drm_device *dev = crtc->dev;
5269 struct intel_encoder *intel_encoder;
5270 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5271 enum pipe pipe = intel_crtc->pipe;
77d22dca 5272 unsigned long mask;
1a70a728 5273 enum transcoder transcoder = intel_crtc->config->cpu_transcoder;
77d22dca 5274
292b990e
ML
5275 if (!crtc->state->active)
5276 return 0;
5277
77d22dca
ID
5278 mask = BIT(POWER_DOMAIN_PIPE(pipe));
5279 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
6e3c9717
ACO
5280 if (intel_crtc->config->pch_pfit.enabled ||
5281 intel_crtc->config->pch_pfit.force_thru)
77d22dca
ID
5282 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5283
319be8ae
ID
5284 for_each_encoder_on_crtc(dev, crtc, intel_encoder)
5285 mask |= BIT(intel_display_port_power_domain(intel_encoder));
5286
77d22dca
ID
5287 return mask;
5288}
5289
292b990e 5290static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc)
77d22dca 5291{
292b990e
ML
5292 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5293 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5294 enum intel_display_power_domain domain;
5295 unsigned long domains, new_domains, old_domains;
77d22dca 5296
292b990e
ML
5297 old_domains = intel_crtc->enabled_power_domains;
5298 intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc);
77d22dca 5299
292b990e
ML
5300 domains = new_domains & ~old_domains;
5301
5302 for_each_power_domain(domain, domains)
5303 intel_display_power_get(dev_priv, domain);
5304
5305 return old_domains & ~new_domains;
5306}
5307
5308static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5309 unsigned long domains)
5310{
5311 enum intel_display_power_domain domain;
5312
5313 for_each_power_domain(domain, domains)
5314 intel_display_power_put(dev_priv, domain);
5315}
77d22dca 5316
292b990e
ML
5317static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
5318{
5319 struct drm_device *dev = state->dev;
5320 struct drm_i915_private *dev_priv = dev->dev_private;
5321 unsigned long put_domains[I915_MAX_PIPES] = {};
5322 struct drm_crtc_state *crtc_state;
5323 struct drm_crtc *crtc;
5324 int i;
77d22dca 5325
292b990e
ML
5326 for_each_crtc_in_state(state, crtc, crtc_state, i) {
5327 if (needs_modeset(crtc->state))
5328 put_domains[to_intel_crtc(crtc)->pipe] =
5329 modeset_get_crtc_power_domains(crtc);
77d22dca
ID
5330 }
5331
27c329ed
ML
5332 if (dev_priv->display.modeset_commit_cdclk) {
5333 unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
5334
5335 if (cdclk != dev_priv->cdclk_freq &&
5336 !WARN_ON(!state->allow_modeset))
5337 dev_priv->display.modeset_commit_cdclk(state);
5338 }
50f6e502 5339
292b990e
ML
5340 for (i = 0; i < I915_MAX_PIPES; i++)
5341 if (put_domains[i])
5342 modeset_put_power_domains(dev_priv, put_domains[i]);
77d22dca
ID
5343}
5344
adafdc6f
MK
5345static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5346{
5347 int max_cdclk_freq = dev_priv->max_cdclk_freq;
5348
5349 if (INTEL_INFO(dev_priv)->gen >= 9 ||
5350 IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5351 return max_cdclk_freq;
5352 else if (IS_CHERRYVIEW(dev_priv))
5353 return max_cdclk_freq*95/100;
5354 else if (INTEL_INFO(dev_priv)->gen < 4)
5355 return 2*max_cdclk_freq*90/100;
5356 else
5357 return max_cdclk_freq*90/100;
5358}
5359
560a7ae4
DL
5360static void intel_update_max_cdclk(struct drm_device *dev)
5361{
5362 struct drm_i915_private *dev_priv = dev->dev_private;
5363
ef11bdb3 5364 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
560a7ae4
DL
5365 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5366
5367 if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5368 dev_priv->max_cdclk_freq = 675000;
5369 else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5370 dev_priv->max_cdclk_freq = 540000;
5371 else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5372 dev_priv->max_cdclk_freq = 450000;
5373 else
5374 dev_priv->max_cdclk_freq = 337500;
5375 } else if (IS_BROADWELL(dev)) {
5376 /*
5377 * FIXME with extra cooling we can allow
5378 * 540 MHz for ULX and 675 Mhz for ULT.
5379 * How can we know if extra cooling is
5380 * available? PCI ID, VTB, something else?
5381 */
5382 if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5383 dev_priv->max_cdclk_freq = 450000;
5384 else if (IS_BDW_ULX(dev))
5385 dev_priv->max_cdclk_freq = 450000;
5386 else if (IS_BDW_ULT(dev))
5387 dev_priv->max_cdclk_freq = 540000;
5388 else
5389 dev_priv->max_cdclk_freq = 675000;
0904deaf
MK
5390 } else if (IS_CHERRYVIEW(dev)) {
5391 dev_priv->max_cdclk_freq = 320000;
560a7ae4
DL
5392 } else if (IS_VALLEYVIEW(dev)) {
5393 dev_priv->max_cdclk_freq = 400000;
5394 } else {
5395 /* otherwise assume cdclk is fixed */
5396 dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5397 }
5398
adafdc6f
MK
5399 dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5400
560a7ae4
DL
5401 DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5402 dev_priv->max_cdclk_freq);
adafdc6f
MK
5403
5404 DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5405 dev_priv->max_dotclk_freq);
560a7ae4
DL
5406}
5407
5408static void intel_update_cdclk(struct drm_device *dev)
5409{
5410 struct drm_i915_private *dev_priv = dev->dev_private;
5411
5412 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5413 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5414 dev_priv->cdclk_freq);
5415
5416 /*
5417 * Program the gmbus_freq based on the cdclk frequency.
5418 * BSpec erroneously claims we should aim for 4MHz, but
5419 * in fact 1MHz is the correct frequency.
5420 */
5421 if (IS_VALLEYVIEW(dev)) {
5422 /*
5423 * Program the gmbus_freq based on the cdclk frequency.
5424 * BSpec erroneously claims we should aim for 4MHz, but
5425 * in fact 1MHz is the correct frequency.
5426 */
5427 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5428 }
5429
5430 if (dev_priv->max_cdclk_freq == 0)
5431 intel_update_max_cdclk(dev);
5432}
5433
70d0c574 5434static void broxton_set_cdclk(struct drm_device *dev, int frequency)
f8437dd1
VK
5435{
5436 struct drm_i915_private *dev_priv = dev->dev_private;
5437 uint32_t divider;
5438 uint32_t ratio;
5439 uint32_t current_freq;
5440 int ret;
5441
5442 /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
5443 switch (frequency) {
5444 case 144000:
5445 divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5446 ratio = BXT_DE_PLL_RATIO(60);
5447 break;
5448 case 288000:
5449 divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5450 ratio = BXT_DE_PLL_RATIO(60);
5451 break;
5452 case 384000:
5453 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5454 ratio = BXT_DE_PLL_RATIO(60);
5455 break;
5456 case 576000:
5457 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5458 ratio = BXT_DE_PLL_RATIO(60);
5459 break;
5460 case 624000:
5461 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5462 ratio = BXT_DE_PLL_RATIO(65);
5463 break;
5464 case 19200:
5465 /*
5466 * Bypass frequency with DE PLL disabled. Init ratio, divider
5467 * to suppress GCC warning.
5468 */
5469 ratio = 0;
5470 divider = 0;
5471 break;
5472 default:
5473 DRM_ERROR("unsupported CDCLK freq %d", frequency);
5474
5475 return;
5476 }
5477
5478 mutex_lock(&dev_priv->rps.hw_lock);
5479 /* Inform power controller of upcoming frequency change */
5480 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5481 0x80000000);
5482 mutex_unlock(&dev_priv->rps.hw_lock);
5483
5484 if (ret) {
5485 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5486 ret, frequency);
5487 return;
5488 }
5489
5490 current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
5491 /* convert from .1 fixpoint MHz with -1MHz offset to kHz */
5492 current_freq = current_freq * 500 + 1000;
5493
5494 /*
5495 * DE PLL has to be disabled when
5496 * - setting to 19.2MHz (bypass, PLL isn't used)
5497 * - before setting to 624MHz (PLL needs toggling)
5498 * - before setting to any frequency from 624MHz (PLL needs toggling)
5499 */
5500 if (frequency == 19200 || frequency == 624000 ||
5501 current_freq == 624000) {
5502 I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5503 /* Timeout 200us */
5504 if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
5505 1))
5506 DRM_ERROR("timout waiting for DE PLL unlock\n");
5507 }
5508
5509 if (frequency != 19200) {
5510 uint32_t val;
5511
5512 val = I915_READ(BXT_DE_PLL_CTL);
5513 val &= ~BXT_DE_PLL_RATIO_MASK;
5514 val |= ratio;
5515 I915_WRITE(BXT_DE_PLL_CTL, val);
5516
5517 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5518 /* Timeout 200us */
5519 if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5520 DRM_ERROR("timeout waiting for DE PLL lock\n");
5521
5522 val = I915_READ(CDCLK_CTL);
5523 val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
5524 val |= divider;
5525 /*
5526 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5527 * enable otherwise.
5528 */
5529 val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5530 if (frequency >= 500000)
5531 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5532
5533 val &= ~CDCLK_FREQ_DECIMAL_MASK;
5534 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5535 val |= (frequency - 1000) / 500;
5536 I915_WRITE(CDCLK_CTL, val);
5537 }
5538
5539 mutex_lock(&dev_priv->rps.hw_lock);
5540 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5541 DIV_ROUND_UP(frequency, 25000));
5542 mutex_unlock(&dev_priv->rps.hw_lock);
5543
5544 if (ret) {
5545 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5546 ret, frequency);
5547 return;
5548 }
5549
a47871bd 5550 intel_update_cdclk(dev);
f8437dd1
VK
5551}
5552
5553void broxton_init_cdclk(struct drm_device *dev)
5554{
5555 struct drm_i915_private *dev_priv = dev->dev_private;
5556 uint32_t val;
5557
5558 /*
5559 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT
5560 * or else the reset will hang because there is no PCH to respond.
5561 * Move the handshake programming to initialization sequence.
5562 * Previously was left up to BIOS.
5563 */
5564 val = I915_READ(HSW_NDE_RSTWRN_OPT);
5565 val &= ~RESET_PCH_HANDSHAKE_ENABLE;
5566 I915_WRITE(HSW_NDE_RSTWRN_OPT, val);
5567
5568 /* Enable PG1 for cdclk */
5569 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
5570
5571 /* check if cd clock is enabled */
5572 if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) {
5573 DRM_DEBUG_KMS("Display already initialized\n");
5574 return;
5575 }
5576
5577 /*
5578 * FIXME:
5579 * - The initial CDCLK needs to be read from VBT.
5580 * Need to make this change after VBT has changes for BXT.
5581 * - check if setting the max (or any) cdclk freq is really necessary
5582 * here, it belongs to modeset time
5583 */
5584 broxton_set_cdclk(dev, 624000);
5585
5586 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
22e02c0b
VS
5587 POSTING_READ(DBUF_CTL);
5588
f8437dd1
VK
5589 udelay(10);
5590
5591 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5592 DRM_ERROR("DBuf power enable timeout!\n");
5593}
5594
5595void broxton_uninit_cdclk(struct drm_device *dev)
5596{
5597 struct drm_i915_private *dev_priv = dev->dev_private;
5598
5599 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
22e02c0b
VS
5600 POSTING_READ(DBUF_CTL);
5601
f8437dd1
VK
5602 udelay(10);
5603
5604 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5605 DRM_ERROR("DBuf power disable timeout!\n");
5606
5607 /* Set minimum (bypass) frequency, in effect turning off the DE PLL */
5608 broxton_set_cdclk(dev, 19200);
5609
5610 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
5611}
5612
5d96d8af
DL
5613static const struct skl_cdclk_entry {
5614 unsigned int freq;
5615 unsigned int vco;
5616} skl_cdclk_frequencies[] = {
5617 { .freq = 308570, .vco = 8640 },
5618 { .freq = 337500, .vco = 8100 },
5619 { .freq = 432000, .vco = 8640 },
5620 { .freq = 450000, .vco = 8100 },
5621 { .freq = 540000, .vco = 8100 },
5622 { .freq = 617140, .vco = 8640 },
5623 { .freq = 675000, .vco = 8100 },
5624};
5625
5626static unsigned int skl_cdclk_decimal(unsigned int freq)
5627{
5628 return (freq - 1000) / 500;
5629}
5630
5631static unsigned int skl_cdclk_get_vco(unsigned int freq)
5632{
5633 unsigned int i;
5634
5635 for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
5636 const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
5637
5638 if (e->freq == freq)
5639 return e->vco;
5640 }
5641
5642 return 8100;
5643}
5644
5645static void
5646skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5647{
5648 unsigned int min_freq;
5649 u32 val;
5650
5651 /* select the minimum CDCLK before enabling DPLL 0 */
5652 val = I915_READ(CDCLK_CTL);
5653 val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
5654 val |= CDCLK_FREQ_337_308;
5655
5656 if (required_vco == 8640)
5657 min_freq = 308570;
5658 else
5659 min_freq = 337500;
5660
5661 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
5662
5663 I915_WRITE(CDCLK_CTL, val);
5664 POSTING_READ(CDCLK_CTL);
5665
5666 /*
5667 * We always enable DPLL0 with the lowest link rate possible, but still
5668 * taking into account the VCO required to operate the eDP panel at the
5669 * desired frequency. The usual DP link rates operate with a VCO of
5670 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5671 * The modeset code is responsible for the selection of the exact link
5672 * rate later on, with the constraint of choosing a frequency that
5673 * works with required_vco.
5674 */
5675 val = I915_READ(DPLL_CTRL1);
5676
5677 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5678 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5679 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5680 if (required_vco == 8640)
5681 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5682 SKL_DPLL0);
5683 else
5684 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5685 SKL_DPLL0);
5686
5687 I915_WRITE(DPLL_CTRL1, val);
5688 POSTING_READ(DPLL_CTRL1);
5689
5690 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5691
5692 if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5693 DRM_ERROR("DPLL0 not locked\n");
5694}
5695
5696static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5697{
5698 int ret;
5699 u32 val;
5700
5701 /* inform PCU we want to change CDCLK */
5702 val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5703 mutex_lock(&dev_priv->rps.hw_lock);
5704 ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5705 mutex_unlock(&dev_priv->rps.hw_lock);
5706
5707 return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5708}
5709
5710static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5711{
5712 unsigned int i;
5713
5714 for (i = 0; i < 15; i++) {
5715 if (skl_cdclk_pcu_ready(dev_priv))
5716 return true;
5717 udelay(10);
5718 }
5719
5720 return false;
5721}
5722
5723static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5724{
560a7ae4 5725 struct drm_device *dev = dev_priv->dev;
5d96d8af
DL
5726 u32 freq_select, pcu_ack;
5727
5728 DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq);
5729
5730 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5731 DRM_ERROR("failed to inform PCU about cdclk change\n");
5732 return;
5733 }
5734
5735 /* set CDCLK_CTL */
5736 switch(freq) {
5737 case 450000:
5738 case 432000:
5739 freq_select = CDCLK_FREQ_450_432;
5740 pcu_ack = 1;
5741 break;
5742 case 540000:
5743 freq_select = CDCLK_FREQ_540;
5744 pcu_ack = 2;
5745 break;
5746 case 308570:
5747 case 337500:
5748 default:
5749 freq_select = CDCLK_FREQ_337_308;
5750 pcu_ack = 0;
5751 break;
5752 case 617140:
5753 case 675000:
5754 freq_select = CDCLK_FREQ_675_617;
5755 pcu_ack = 3;
5756 break;
5757 }
5758
5759 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq));
5760 POSTING_READ(CDCLK_CTL);
5761
5762 /* inform PCU of the change */
5763 mutex_lock(&dev_priv->rps.hw_lock);
5764 sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5765 mutex_unlock(&dev_priv->rps.hw_lock);
560a7ae4
DL
5766
5767 intel_update_cdclk(dev);
5d96d8af
DL
5768}
5769
5770void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5771{
5772 /* disable DBUF power */
5773 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5774 POSTING_READ(DBUF_CTL);
5775
5776 udelay(10);
5777
5778 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5779 DRM_ERROR("DBuf power disable timeout\n");
5780
ab96c1ee
ID
5781 /* disable DPLL0 */
5782 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5783 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5784 DRM_ERROR("Couldn't disable DPLL0\n");
5d96d8af
DL
5785}
5786
5787void skl_init_cdclk(struct drm_i915_private *dev_priv)
5788{
5d96d8af
DL
5789 unsigned int required_vco;
5790
39d9b85a
GW
5791 /* DPLL0 not enabled (happens on early BIOS versions) */
5792 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5793 /* enable DPLL0 */
5794 required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5795 skl_dpll0_enable(dev_priv, required_vco);
5d96d8af
DL
5796 }
5797
5d96d8af
DL
5798 /* set CDCLK to the frequency the BIOS chose */
5799 skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
5800
5801 /* enable DBUF power */
5802 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5803 POSTING_READ(DBUF_CTL);
5804
5805 udelay(10);
5806
5807 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5808 DRM_ERROR("DBuf power enable timeout\n");
5809}
5810
c73666f3
SK
5811int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5812{
5813 uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
5814 uint32_t cdctl = I915_READ(CDCLK_CTL);
5815 int freq = dev_priv->skl_boot_cdclk;
5816
f1b391a5
SK
5817 /*
5818 * check if the pre-os intialized the display
5819 * There is SWF18 scratchpad register defined which is set by the
5820 * pre-os which can be used by the OS drivers to check the status
5821 */
5822 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5823 goto sanitize;
5824
c73666f3
SK
5825 /* Is PLL enabled and locked ? */
5826 if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
5827 goto sanitize;
5828
5829 /* DPLL okay; verify the cdclock
5830 *
5831 * Noticed in some instances that the freq selection is correct but
5832 * decimal part is programmed wrong from BIOS where pre-os does not
5833 * enable display. Verify the same as well.
5834 */
5835 if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
5836 /* All well; nothing to sanitize */
5837 return false;
5838sanitize:
5839 /*
5840 * As of now initialize with max cdclk till
5841 * we get dynamic cdclk support
5842 * */
5843 dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
5844 skl_init_cdclk(dev_priv);
5845
5846 /* we did have to sanitize */
5847 return true;
5848}
5849
30a970c6
JB
5850/* Adjust CDclk dividers to allow high res or save power if possible */
5851static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5852{
5853 struct drm_i915_private *dev_priv = dev->dev_private;
5854 u32 val, cmd;
5855
164dfd28
VK
5856 WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5857 != dev_priv->cdclk_freq);
d60c4473 5858
dfcab17e 5859 if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
30a970c6 5860 cmd = 2;
dfcab17e 5861 else if (cdclk == 266667)
30a970c6
JB
5862 cmd = 1;
5863 else
5864 cmd = 0;
5865
5866 mutex_lock(&dev_priv->rps.hw_lock);
5867 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5868 val &= ~DSPFREQGUAR_MASK;
5869 val |= (cmd << DSPFREQGUAR_SHIFT);
5870 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5871 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5872 DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5873 50)) {
5874 DRM_ERROR("timed out waiting for CDclk change\n");
5875 }
5876 mutex_unlock(&dev_priv->rps.hw_lock);
5877
54433e91
VS
5878 mutex_lock(&dev_priv->sb_lock);
5879
dfcab17e 5880 if (cdclk == 400000) {
6bcda4f0 5881 u32 divider;
30a970c6 5882
6bcda4f0 5883 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
30a970c6 5884
30a970c6
JB
5885 /* adjust cdclk divider */
5886 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
87d5d259 5887 val &= ~CCK_FREQUENCY_VALUES;
30a970c6
JB
5888 val |= divider;
5889 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
a877e801
VS
5890
5891 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
87d5d259 5892 CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
a877e801
VS
5893 50))
5894 DRM_ERROR("timed out waiting for CDclk change\n");
30a970c6
JB
5895 }
5896
30a970c6
JB
5897 /* adjust self-refresh exit latency value */
5898 val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5899 val &= ~0x7f;
5900
5901 /*
5902 * For high bandwidth configs, we set a higher latency in the bunit
5903 * so that the core display fetch happens in time to avoid underruns.
5904 */
dfcab17e 5905 if (cdclk == 400000)
30a970c6
JB
5906 val |= 4500 / 250; /* 4.5 usec */
5907 else
5908 val |= 3000 / 250; /* 3.0 usec */
5909 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
54433e91 5910
a580516d 5911 mutex_unlock(&dev_priv->sb_lock);
30a970c6 5912
b6283055 5913 intel_update_cdclk(dev);
30a970c6
JB
5914}
5915
383c5a6a
VS
5916static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5917{
5918 struct drm_i915_private *dev_priv = dev->dev_private;
5919 u32 val, cmd;
5920
164dfd28
VK
5921 WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5922 != dev_priv->cdclk_freq);
383c5a6a
VS
5923
5924 switch (cdclk) {
383c5a6a
VS
5925 case 333333:
5926 case 320000:
383c5a6a 5927 case 266667:
383c5a6a 5928 case 200000:
383c5a6a
VS
5929 break;
5930 default:
5f77eeb0 5931 MISSING_CASE(cdclk);
383c5a6a
VS
5932 return;
5933 }
5934
9d0d3fda
VS
5935 /*
5936 * Specs are full of misinformation, but testing on actual
5937 * hardware has shown that we just need to write the desired
5938 * CCK divider into the Punit register.
5939 */
5940 cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5941
383c5a6a
VS
5942 mutex_lock(&dev_priv->rps.hw_lock);
5943 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5944 val &= ~DSPFREQGUAR_MASK_CHV;
5945 val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
5946 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5947 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5948 DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
5949 50)) {
5950 DRM_ERROR("timed out waiting for CDclk change\n");
5951 }
5952 mutex_unlock(&dev_priv->rps.hw_lock);
5953
b6283055 5954 intel_update_cdclk(dev);
383c5a6a
VS
5955}
5956
30a970c6
JB
5957static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5958 int max_pixclk)
5959{
6bcda4f0 5960 int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
6cca3195 5961 int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
29dc7ef3 5962
30a970c6
JB
5963 /*
5964 * Really only a few cases to deal with, as only 4 CDclks are supported:
5965 * 200MHz
5966 * 267MHz
29dc7ef3 5967 * 320/333MHz (depends on HPLL freq)
6cca3195
VS
5968 * 400MHz (VLV only)
5969 * So we check to see whether we're above 90% (VLV) or 95% (CHV)
5970 * of the lower bin and adjust if needed.
e37c67a1
VS
5971 *
5972 * We seem to get an unstable or solid color picture at 200MHz.
5973 * Not sure what's wrong. For now use 200MHz only when all pipes
5974 * are off.
30a970c6 5975 */
6cca3195
VS
5976 if (!IS_CHERRYVIEW(dev_priv) &&
5977 max_pixclk > freq_320*limit/100)
dfcab17e 5978 return 400000;
6cca3195 5979 else if (max_pixclk > 266667*limit/100)
29dc7ef3 5980 return freq_320;
e37c67a1 5981 else if (max_pixclk > 0)
dfcab17e 5982 return 266667;
e37c67a1
VS
5983 else
5984 return 200000;
30a970c6
JB
5985}
5986
f8437dd1
VK
5987static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
5988 int max_pixclk)
5989{
5990 /*
5991 * FIXME:
5992 * - remove the guardband, it's not needed on BXT
5993 * - set 19.2MHz bypass frequency if there are no active pipes
5994 */
5995 if (max_pixclk > 576000*9/10)
5996 return 624000;
5997 else if (max_pixclk > 384000*9/10)
5998 return 576000;
5999 else if (max_pixclk > 288000*9/10)
6000 return 384000;
6001 else if (max_pixclk > 144000*9/10)
6002 return 288000;
6003 else
6004 return 144000;
6005}
6006
a821fc46
ACO
6007/* Compute the max pixel clock for new configuration. Uses atomic state if
6008 * that's non-NULL, look at current state otherwise. */
6009static int intel_mode_max_pixclk(struct drm_device *dev,
6010 struct drm_atomic_state *state)
30a970c6 6011{
30a970c6 6012 struct intel_crtc *intel_crtc;
304603f4 6013 struct intel_crtc_state *crtc_state;
30a970c6
JB
6014 int max_pixclk = 0;
6015
d3fcc808 6016 for_each_intel_crtc(dev, intel_crtc) {
27c329ed 6017 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
304603f4
ACO
6018 if (IS_ERR(crtc_state))
6019 return PTR_ERR(crtc_state);
6020
6021 if (!crtc_state->base.enable)
6022 continue;
6023
6024 max_pixclk = max(max_pixclk,
6025 crtc_state->base.adjusted_mode.crtc_clock);
30a970c6
JB
6026 }
6027
6028 return max_pixclk;
6029}
6030
27c329ed 6031static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
30a970c6 6032{
27c329ed
ML
6033 struct drm_device *dev = state->dev;
6034 struct drm_i915_private *dev_priv = dev->dev_private;
6035 int max_pixclk = intel_mode_max_pixclk(dev, state);
30a970c6 6036
304603f4
ACO
6037 if (max_pixclk < 0)
6038 return max_pixclk;
30a970c6 6039
27c329ed
ML
6040 to_intel_atomic_state(state)->cdclk =
6041 valleyview_calc_cdclk(dev_priv, max_pixclk);
0a9ab303 6042
27c329ed
ML
6043 return 0;
6044}
304603f4 6045
27c329ed
ML
6046static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
6047{
6048 struct drm_device *dev = state->dev;
6049 struct drm_i915_private *dev_priv = dev->dev_private;
6050 int max_pixclk = intel_mode_max_pixclk(dev, state);
85a96e7a 6051
27c329ed
ML
6052 if (max_pixclk < 0)
6053 return max_pixclk;
85a96e7a 6054
27c329ed
ML
6055 to_intel_atomic_state(state)->cdclk =
6056 broxton_calc_cdclk(dev_priv, max_pixclk);
85a96e7a 6057
27c329ed 6058 return 0;
30a970c6
JB
6059}
6060
1e69cd74
VS
6061static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
6062{
6063 unsigned int credits, default_credits;
6064
6065 if (IS_CHERRYVIEW(dev_priv))
6066 default_credits = PFI_CREDIT(12);
6067 else
6068 default_credits = PFI_CREDIT(8);
6069
bfa7df01 6070 if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
1e69cd74
VS
6071 /* CHV suggested value is 31 or 63 */
6072 if (IS_CHERRYVIEW(dev_priv))
fcc0008f 6073 credits = PFI_CREDIT_63;
1e69cd74
VS
6074 else
6075 credits = PFI_CREDIT(15);
6076 } else {
6077 credits = default_credits;
6078 }
6079
6080 /*
6081 * WA - write default credits before re-programming
6082 * FIXME: should we also set the resend bit here?
6083 */
6084 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6085 default_credits);
6086
6087 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6088 credits | PFI_CREDIT_RESEND);
6089
6090 /*
6091 * FIXME is this guaranteed to clear
6092 * immediately or should we poll for it?
6093 */
6094 WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6095}
6096
27c329ed 6097static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
30a970c6 6098{
a821fc46 6099 struct drm_device *dev = old_state->dev;
27c329ed 6100 unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
30a970c6 6101 struct drm_i915_private *dev_priv = dev->dev_private;
30a970c6 6102
27c329ed
ML
6103 /*
6104 * FIXME: We can end up here with all power domains off, yet
6105 * with a CDCLK frequency other than the minimum. To account
6106 * for this take the PIPE-A power domain, which covers the HW
6107 * blocks needed for the following programming. This can be
6108 * removed once it's guaranteed that we get here either with
6109 * the minimum CDCLK set, or the required power domains
6110 * enabled.
6111 */
6112 intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
738c05c0 6113
27c329ed
ML
6114 if (IS_CHERRYVIEW(dev))
6115 cherryview_set_cdclk(dev, req_cdclk);
6116 else
6117 valleyview_set_cdclk(dev, req_cdclk);
738c05c0 6118
27c329ed 6119 vlv_program_pfi_credits(dev_priv);
1e69cd74 6120
27c329ed 6121 intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
30a970c6
JB
6122}
6123
89b667f8
JB
6124static void valleyview_crtc_enable(struct drm_crtc *crtc)
6125{
6126 struct drm_device *dev = crtc->dev;
a72e4c9f 6127 struct drm_i915_private *dev_priv = to_i915(dev);
89b667f8
JB
6128 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6129 struct intel_encoder *encoder;
6130 int pipe = intel_crtc->pipe;
23538ef1 6131 bool is_dsi;
89b667f8 6132
53d9f4e9 6133 if (WARN_ON(intel_crtc->active))
89b667f8
JB
6134 return;
6135
409ee761 6136 is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
8525a235 6137
6e3c9717 6138 if (intel_crtc->config->has_dp_encoder)
fe3cd48d 6139 intel_dp_set_m_n(intel_crtc, M1_N1);
5b18e57c
DV
6140
6141 intel_set_pipe_timings(intel_crtc);
6142
c14b0485
VS
6143 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6144 struct drm_i915_private *dev_priv = dev->dev_private;
6145
6146 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6147 I915_WRITE(CHV_CANVAS(pipe), 0);
6148 }
6149
5b18e57c
DV
6150 i9xx_set_pipeconf(intel_crtc);
6151
89b667f8 6152 intel_crtc->active = true;
89b667f8 6153
a72e4c9f 6154 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4a3436e8 6155
89b667f8
JB
6156 for_each_encoder_on_crtc(dev, crtc, encoder)
6157 if (encoder->pre_pll_enable)
6158 encoder->pre_pll_enable(encoder);
6159
9d556c99 6160 if (!is_dsi) {
c0b4c660
VS
6161 if (IS_CHERRYVIEW(dev)) {
6162 chv_prepare_pll(intel_crtc, intel_crtc->config);
6e3c9717 6163 chv_enable_pll(intel_crtc, intel_crtc->config);
c0b4c660
VS
6164 } else {
6165 vlv_prepare_pll(intel_crtc, intel_crtc->config);
6e3c9717 6166 vlv_enable_pll(intel_crtc, intel_crtc->config);
c0b4c660 6167 }
9d556c99 6168 }
89b667f8
JB
6169
6170 for_each_encoder_on_crtc(dev, crtc, encoder)
6171 if (encoder->pre_enable)
6172 encoder->pre_enable(encoder);
6173
2dd24552
JB
6174 i9xx_pfit_enable(intel_crtc);
6175
63cbb074
VS
6176 intel_crtc_load_lut(crtc);
6177
e1fdc473 6178 intel_enable_pipe(intel_crtc);
be6a6f8e 6179
4b3a9526
VS
6180 assert_vblank_disabled(crtc);
6181 drm_crtc_vblank_on(crtc);
6182
f9b61ff6
DV
6183 for_each_encoder_on_crtc(dev, crtc, encoder)
6184 encoder->enable(encoder);
89b667f8
JB
6185}
6186
f13c2ef3
DV
6187static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6188{
6189 struct drm_device *dev = crtc->base.dev;
6190 struct drm_i915_private *dev_priv = dev->dev_private;
6191
6e3c9717
ACO
6192 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6193 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
f13c2ef3
DV
6194}
6195
0b8765c6 6196static void i9xx_crtc_enable(struct drm_crtc *crtc)
79e53945
JB
6197{
6198 struct drm_device *dev = crtc->dev;
a72e4c9f 6199 struct drm_i915_private *dev_priv = to_i915(dev);
79e53945 6200 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 6201 struct intel_encoder *encoder;
79e53945 6202 int pipe = intel_crtc->pipe;
79e53945 6203
53d9f4e9 6204 if (WARN_ON(intel_crtc->active))
f7abfe8b
CW
6205 return;
6206
f13c2ef3
DV
6207 i9xx_set_pll_dividers(intel_crtc);
6208
6e3c9717 6209 if (intel_crtc->config->has_dp_encoder)
fe3cd48d 6210 intel_dp_set_m_n(intel_crtc, M1_N1);
5b18e57c
DV
6211
6212 intel_set_pipe_timings(intel_crtc);
6213
5b18e57c
DV
6214 i9xx_set_pipeconf(intel_crtc);
6215
f7abfe8b 6216 intel_crtc->active = true;
6b383a7f 6217
4a3436e8 6218 if (!IS_GEN2(dev))
a72e4c9f 6219 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4a3436e8 6220
9d6d9f19
MK
6221 for_each_encoder_on_crtc(dev, crtc, encoder)
6222 if (encoder->pre_enable)
6223 encoder->pre_enable(encoder);
6224
f6736a1a
DV
6225 i9xx_enable_pll(intel_crtc);
6226
2dd24552
JB
6227 i9xx_pfit_enable(intel_crtc);
6228
63cbb074
VS
6229 intel_crtc_load_lut(crtc);
6230
f37fcc2a 6231 intel_update_watermarks(crtc);
e1fdc473 6232 intel_enable_pipe(intel_crtc);
be6a6f8e 6233
4b3a9526
VS
6234 assert_vblank_disabled(crtc);
6235 drm_crtc_vblank_on(crtc);
6236
f9b61ff6
DV
6237 for_each_encoder_on_crtc(dev, crtc, encoder)
6238 encoder->enable(encoder);
0b8765c6 6239}
79e53945 6240
87476d63
DV
6241static void i9xx_pfit_disable(struct intel_crtc *crtc)
6242{
6243 struct drm_device *dev = crtc->base.dev;
6244 struct drm_i915_private *dev_priv = dev->dev_private;
87476d63 6245
6e3c9717 6246 if (!crtc->config->gmch_pfit.control)
328d8e82 6247 return;
87476d63 6248
328d8e82 6249 assert_pipe_disabled(dev_priv, crtc->pipe);
87476d63 6250
328d8e82
DV
6251 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6252 I915_READ(PFIT_CONTROL));
6253 I915_WRITE(PFIT_CONTROL, 0);
87476d63
DV
6254}
6255
0b8765c6
JB
6256static void i9xx_crtc_disable(struct drm_crtc *crtc)
6257{
6258 struct drm_device *dev = crtc->dev;
6259 struct drm_i915_private *dev_priv = dev->dev_private;
6260 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 6261 struct intel_encoder *encoder;
0b8765c6 6262 int pipe = intel_crtc->pipe;
ef9c3aee 6263
6304cd91
VS
6264 /*
6265 * On gen2 planes are double buffered but the pipe isn't, so we must
6266 * wait for planes to fully turn off before disabling the pipe.
564ed191
ID
6267 * We also need to wait on all gmch platforms because of the
6268 * self-refresh mode constraint explained above.
6304cd91 6269 */
564ed191 6270 intel_wait_for_vblank(dev, pipe);
6304cd91 6271
4b3a9526
VS
6272 for_each_encoder_on_crtc(dev, crtc, encoder)
6273 encoder->disable(encoder);
6274
f9b61ff6
DV
6275 drm_crtc_vblank_off(crtc);
6276 assert_vblank_disabled(crtc);
6277
575f7ab7 6278 intel_disable_pipe(intel_crtc);
24a1f16d 6279
87476d63 6280 i9xx_pfit_disable(intel_crtc);
24a1f16d 6281
89b667f8
JB
6282 for_each_encoder_on_crtc(dev, crtc, encoder)
6283 if (encoder->post_disable)
6284 encoder->post_disable(encoder);
6285
409ee761 6286 if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
076ed3b2
CML
6287 if (IS_CHERRYVIEW(dev))
6288 chv_disable_pll(dev_priv, pipe);
6289 else if (IS_VALLEYVIEW(dev))
6290 vlv_disable_pll(dev_priv, pipe);
6291 else
1c4e0274 6292 i9xx_disable_pll(intel_crtc);
076ed3b2 6293 }
0b8765c6 6294
d6db995f
VS
6295 for_each_encoder_on_crtc(dev, crtc, encoder)
6296 if (encoder->post_pll_disable)
6297 encoder->post_pll_disable(encoder);
6298
4a3436e8 6299 if (!IS_GEN2(dev))
a72e4c9f 6300 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
0b8765c6
JB
6301}
6302
b17d48e2
ML
6303static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6304{
6305 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6306 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6307 enum intel_display_power_domain domain;
6308 unsigned long domains;
6309
6310 if (!intel_crtc->active)
6311 return;
6312
a539205a 6313 if (to_intel_plane_state(crtc->primary->state)->visible) {
fc32b1fd
ML
6314 WARN_ON(intel_crtc->unpin_work);
6315
a539205a
ML
6316 intel_pre_disable_primary(crtc);
6317 }
6318
d032ffa0 6319 intel_crtc_disable_planes(crtc, crtc->state->plane_mask);
b17d48e2 6320 dev_priv->display.crtc_disable(crtc);
37d9078b
MR
6321 intel_crtc->active = false;
6322 intel_update_watermarks(crtc);
1f7457b1 6323 intel_disable_shared_dpll(intel_crtc);
b17d48e2
ML
6324
6325 domains = intel_crtc->enabled_power_domains;
6326 for_each_power_domain(domain, domains)
6327 intel_display_power_put(dev_priv, domain);
6328 intel_crtc->enabled_power_domains = 0;
6329}
6330
6b72d486
ML
6331/*
6332 * turn all crtc's off, but do not adjust state
6333 * This has to be paired with a call to intel_modeset_setup_hw_state.
6334 */
70e0bd74 6335int intel_display_suspend(struct drm_device *dev)
ee7b9f93 6336{
70e0bd74
ML
6337 struct drm_mode_config *config = &dev->mode_config;
6338 struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx;
6339 struct drm_atomic_state *state;
6b72d486 6340 struct drm_crtc *crtc;
70e0bd74
ML
6341 unsigned crtc_mask = 0;
6342 int ret = 0;
6343
6344 if (WARN_ON(!ctx))
6345 return 0;
6346
6347 lockdep_assert_held(&ctx->ww_ctx);
6348 state = drm_atomic_state_alloc(dev);
6349 if (WARN_ON(!state))
6350 return -ENOMEM;
6351
6352 state->acquire_ctx = ctx;
6353 state->allow_modeset = true;
6354
6355 for_each_crtc(dev, crtc) {
6356 struct drm_crtc_state *crtc_state =
6357 drm_atomic_get_crtc_state(state, crtc);
6b72d486 6358
70e0bd74
ML
6359 ret = PTR_ERR_OR_ZERO(crtc_state);
6360 if (ret)
6361 goto free;
6362
6363 if (!crtc_state->active)
6364 continue;
6365
6366 crtc_state->active = false;
6367 crtc_mask |= 1 << drm_crtc_index(crtc);
6368 }
6369
6370 if (crtc_mask) {
74c090b1 6371 ret = drm_atomic_commit(state);
70e0bd74
ML
6372
6373 if (!ret) {
6374 for_each_crtc(dev, crtc)
6375 if (crtc_mask & (1 << drm_crtc_index(crtc)))
6376 crtc->state->active = true;
6377
6378 return ret;
6379 }
6380 }
6381
6382free:
6383 if (ret)
6384 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6385 drm_atomic_state_free(state);
6386 return ret;
ee7b9f93
JB
6387}
6388
ea5b213a 6389void intel_encoder_destroy(struct drm_encoder *encoder)
7e7d76c3 6390{
4ef69c7a 6391 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
ea5b213a 6392
ea5b213a
CW
6393 drm_encoder_cleanup(encoder);
6394 kfree(intel_encoder);
7e7d76c3
JB
6395}
6396
0a91ca29
DV
6397/* Cross check the actual hw state with our own modeset state tracking (and it's
6398 * internal consistency). */
b980514c 6399static void intel_connector_check_state(struct intel_connector *connector)
79e53945 6400{
35dd3c64
ML
6401 struct drm_crtc *crtc = connector->base.state->crtc;
6402
6403 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6404 connector->base.base.id,
6405 connector->base.name);
6406
0a91ca29 6407 if (connector->get_hw_state(connector)) {
e85376cb 6408 struct intel_encoder *encoder = connector->encoder;
35dd3c64 6409 struct drm_connector_state *conn_state = connector->base.state;
0a91ca29 6410
35dd3c64
ML
6411 I915_STATE_WARN(!crtc,
6412 "connector enabled without attached crtc\n");
0a91ca29 6413
35dd3c64
ML
6414 if (!crtc)
6415 return;
6416
6417 I915_STATE_WARN(!crtc->state->active,
6418 "connector is active, but attached crtc isn't\n");
6419
e85376cb 6420 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
35dd3c64
ML
6421 return;
6422
e85376cb 6423 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
35dd3c64
ML
6424 "atomic encoder doesn't match attached encoder\n");
6425
e85376cb 6426 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
35dd3c64
ML
6427 "attached encoder crtc differs from connector crtc\n");
6428 } else {
4d688a2a
ML
6429 I915_STATE_WARN(crtc && crtc->state->active,
6430 "attached crtc is active, but connector isn't\n");
35dd3c64
ML
6431 I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6432 "best encoder set without crtc!\n");
0a91ca29 6433 }
79e53945
JB
6434}
6435
08d9bc92
ACO
6436int intel_connector_init(struct intel_connector *connector)
6437{
6438 struct drm_connector_state *connector_state;
6439
6440 connector_state = kzalloc(sizeof *connector_state, GFP_KERNEL);
6441 if (!connector_state)
6442 return -ENOMEM;
6443
6444 connector->base.state = connector_state;
6445 return 0;
6446}
6447
6448struct intel_connector *intel_connector_alloc(void)
6449{
6450 struct intel_connector *connector;
6451
6452 connector = kzalloc(sizeof *connector, GFP_KERNEL);
6453 if (!connector)
6454 return NULL;
6455
6456 if (intel_connector_init(connector) < 0) {
6457 kfree(connector);
6458 return NULL;
6459 }
6460
6461 return connector;
6462}
6463
f0947c37
DV
6464/* Simple connector->get_hw_state implementation for encoders that support only
6465 * one connector and no cloning and hence the encoder state determines the state
6466 * of the connector. */
6467bool intel_connector_get_hw_state(struct intel_connector *connector)
ea5b213a 6468{
24929352 6469 enum pipe pipe = 0;
f0947c37 6470 struct intel_encoder *encoder = connector->encoder;
ea5b213a 6471
f0947c37 6472 return encoder->get_hw_state(encoder, &pipe);
ea5b213a
CW
6473}
6474
6d293983 6475static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
d272ddfa 6476{
6d293983
ACO
6477 if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6478 return crtc_state->fdi_lanes;
d272ddfa
VS
6479
6480 return 0;
6481}
6482
6d293983 6483static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
5cec258b 6484 struct intel_crtc_state *pipe_config)
1857e1da 6485{
6d293983
ACO
6486 struct drm_atomic_state *state = pipe_config->base.state;
6487 struct intel_crtc *other_crtc;
6488 struct intel_crtc_state *other_crtc_state;
6489
1857e1da
DV
6490 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6491 pipe_name(pipe), pipe_config->fdi_lanes);
6492 if (pipe_config->fdi_lanes > 4) {
6493 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6494 pipe_name(pipe), pipe_config->fdi_lanes);
6d293983 6495 return -EINVAL;
1857e1da
DV
6496 }
6497
bafb6553 6498 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1857e1da
DV
6499 if (pipe_config->fdi_lanes > 2) {
6500 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6501 pipe_config->fdi_lanes);
6d293983 6502 return -EINVAL;
1857e1da 6503 } else {
6d293983 6504 return 0;
1857e1da
DV
6505 }
6506 }
6507
6508 if (INTEL_INFO(dev)->num_pipes == 2)
6d293983 6509 return 0;
1857e1da
DV
6510
6511 /* Ivybridge 3 pipe is really complicated */
6512 switch (pipe) {
6513 case PIPE_A:
6d293983 6514 return 0;
1857e1da 6515 case PIPE_B:
6d293983
ACO
6516 if (pipe_config->fdi_lanes <= 2)
6517 return 0;
6518
6519 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
6520 other_crtc_state =
6521 intel_atomic_get_crtc_state(state, other_crtc);
6522 if (IS_ERR(other_crtc_state))
6523 return PTR_ERR(other_crtc_state);
6524
6525 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
1857e1da
DV
6526 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6527 pipe_name(pipe), pipe_config->fdi_lanes);
6d293983 6528 return -EINVAL;
1857e1da 6529 }
6d293983 6530 return 0;
1857e1da 6531 case PIPE_C:
251cc67c
VS
6532 if (pipe_config->fdi_lanes > 2) {
6533 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6534 pipe_name(pipe), pipe_config->fdi_lanes);
6d293983 6535 return -EINVAL;
251cc67c 6536 }
6d293983
ACO
6537
6538 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
6539 other_crtc_state =
6540 intel_atomic_get_crtc_state(state, other_crtc);
6541 if (IS_ERR(other_crtc_state))
6542 return PTR_ERR(other_crtc_state);
6543
6544 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
1857e1da 6545 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6d293983 6546 return -EINVAL;
1857e1da 6547 }
6d293983 6548 return 0;
1857e1da
DV
6549 default:
6550 BUG();
6551 }
6552}
6553
e29c22c0
DV
6554#define RETRY 1
6555static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
5cec258b 6556 struct intel_crtc_state *pipe_config)
877d48d5 6557{
1857e1da 6558 struct drm_device *dev = intel_crtc->base.dev;
7c5f93b0 6559 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6d293983
ACO
6560 int lane, link_bw, fdi_dotclock, ret;
6561 bool needs_recompute = false;
877d48d5 6562
e29c22c0 6563retry:
877d48d5
DV
6564 /* FDI is a binary signal running at ~2.7GHz, encoding
6565 * each output octet as 10 bits. The actual frequency
6566 * is stored as a divider into a 100MHz clock, and the
6567 * mode pixel clock is stored in units of 1KHz.
6568 * Hence the bw of each lane in terms of the mode signal
6569 * is:
6570 */
6571 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
6572
241bfc38 6573 fdi_dotclock = adjusted_mode->crtc_clock;
877d48d5 6574
2bd89a07 6575 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
877d48d5
DV
6576 pipe_config->pipe_bpp);
6577
6578 pipe_config->fdi_lanes = lane;
6579
2bd89a07 6580 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
877d48d5 6581 link_bw, &pipe_config->fdi_m_n);
1857e1da 6582
6d293983
ACO
6583 ret = ironlake_check_fdi_lanes(intel_crtc->base.dev,
6584 intel_crtc->pipe, pipe_config);
6585 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
e29c22c0
DV
6586 pipe_config->pipe_bpp -= 2*3;
6587 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6588 pipe_config->pipe_bpp);
6589 needs_recompute = true;
6590 pipe_config->bw_constrained = true;
6591
6592 goto retry;
6593 }
6594
6595 if (needs_recompute)
6596 return RETRY;
6597
6d293983 6598 return ret;
877d48d5
DV
6599}
6600
8cfb3407
VS
6601static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
6602 struct intel_crtc_state *pipe_config)
6603{
6604 if (pipe_config->pipe_bpp > 24)
6605 return false;
6606
6607 /* HSW can handle pixel rate up to cdclk? */
6608 if (IS_HASWELL(dev_priv->dev))
6609 return true;
6610
6611 /*
b432e5cf
VS
6612 * We compare against max which means we must take
6613 * the increased cdclk requirement into account when
6614 * calculating the new cdclk.
6615 *
6616 * Should measure whether using a lower cdclk w/o IPS
8cfb3407
VS
6617 */
6618 return ilk_pipe_pixel_rate(pipe_config) <=
6619 dev_priv->max_cdclk_freq * 95 / 100;
6620}
6621
42db64ef 6622static void hsw_compute_ips_config(struct intel_crtc *crtc,
5cec258b 6623 struct intel_crtc_state *pipe_config)
42db64ef 6624{
8cfb3407
VS
6625 struct drm_device *dev = crtc->base.dev;
6626 struct drm_i915_private *dev_priv = dev->dev_private;
6627
d330a953 6628 pipe_config->ips_enabled = i915.enable_ips &&
8cfb3407
VS
6629 hsw_crtc_supports_ips(crtc) &&
6630 pipe_config_supports_ips(dev_priv, pipe_config);
42db64ef
PZ
6631}
6632
39acb4aa
VS
6633static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6634{
6635 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6636
6637 /* GDG double wide on either pipe, otherwise pipe A only */
6638 return INTEL_INFO(dev_priv)->gen < 4 &&
6639 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6640}
6641
a43f6e0f 6642static int intel_crtc_compute_config(struct intel_crtc *crtc,
5cec258b 6643 struct intel_crtc_state *pipe_config)
79e53945 6644{
a43f6e0f 6645 struct drm_device *dev = crtc->base.dev;
8bd31e67 6646 struct drm_i915_private *dev_priv = dev->dev_private;
7c5f93b0 6647 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
89749350 6648
ad3a4479 6649 /* FIXME should check pixel clock limits on all platforms */
cf532bb2 6650 if (INTEL_INFO(dev)->gen < 4) {
39acb4aa 6651 int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
cf532bb2
VS
6652
6653 /*
39acb4aa 6654 * Enable double wide mode when the dot clock
cf532bb2 6655 * is > 90% of the (display) core speed.
cf532bb2 6656 */
39acb4aa
VS
6657 if (intel_crtc_supports_double_wide(crtc) &&
6658 adjusted_mode->crtc_clock > clock_limit) {
ad3a4479 6659 clock_limit *= 2;
cf532bb2 6660 pipe_config->double_wide = true;
ad3a4479
VS
6661 }
6662
39acb4aa
VS
6663 if (adjusted_mode->crtc_clock > clock_limit) {
6664 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6665 adjusted_mode->crtc_clock, clock_limit,
6666 yesno(pipe_config->double_wide));
e29c22c0 6667 return -EINVAL;
39acb4aa 6668 }
2c07245f 6669 }
89749350 6670
1d1d0e27
VS
6671 /*
6672 * Pipe horizontal size must be even in:
6673 * - DVO ganged mode
6674 * - LVDS dual channel mode
6675 * - Double wide pipe
6676 */
a93e255f 6677 if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
1d1d0e27
VS
6678 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6679 pipe_config->pipe_src_w &= ~1;
6680
8693a824
DL
6681 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6682 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
44f46b42
CW
6683 */
6684 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
aad941d5 6685 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
e29c22c0 6686 return -EINVAL;
44f46b42 6687
f5adf94e 6688 if (HAS_IPS(dev))
a43f6e0f
DV
6689 hsw_compute_ips_config(crtc, pipe_config);
6690
877d48d5 6691 if (pipe_config->has_pch_encoder)
a43f6e0f 6692 return ironlake_fdi_compute_config(crtc, pipe_config);
877d48d5 6693
cf5a15be 6694 return 0;
79e53945
JB
6695}
6696
1652d19e
VS
6697static int skylake_get_display_clock_speed(struct drm_device *dev)
6698{
6699 struct drm_i915_private *dev_priv = to_i915(dev);
6700 uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
6701 uint32_t cdctl = I915_READ(CDCLK_CTL);
6702 uint32_t linkrate;
6703
414355a7 6704 if (!(lcpll1 & LCPLL_PLL_ENABLE))
1652d19e 6705 return 24000; /* 24MHz is the cd freq with NSSC ref */
1652d19e
VS
6706
6707 if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
6708 return 540000;
6709
6710 linkrate = (I915_READ(DPLL_CTRL1) &
71cd8423 6711 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
1652d19e 6712
71cd8423
DL
6713 if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
6714 linkrate == DPLL_CTRL1_LINK_RATE_1080) {
1652d19e
VS
6715 /* vco 8640 */
6716 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6717 case CDCLK_FREQ_450_432:
6718 return 432000;
6719 case CDCLK_FREQ_337_308:
6720 return 308570;
6721 case CDCLK_FREQ_675_617:
6722 return 617140;
6723 default:
6724 WARN(1, "Unknown cd freq selection\n");
6725 }
6726 } else {
6727 /* vco 8100 */
6728 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6729 case CDCLK_FREQ_450_432:
6730 return 450000;
6731 case CDCLK_FREQ_337_308:
6732 return 337500;
6733 case CDCLK_FREQ_675_617:
6734 return 675000;
6735 default:
6736 WARN(1, "Unknown cd freq selection\n");
6737 }
6738 }
6739
6740 /* error case, do as if DPLL0 isn't enabled */
6741 return 24000;
6742}
6743
acd3f3d3
BP
6744static int broxton_get_display_clock_speed(struct drm_device *dev)
6745{
6746 struct drm_i915_private *dev_priv = to_i915(dev);
6747 uint32_t cdctl = I915_READ(CDCLK_CTL);
6748 uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
6749 uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
6750 int cdclk;
6751
6752 if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
6753 return 19200;
6754
6755 cdclk = 19200 * pll_ratio / 2;
6756
6757 switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
6758 case BXT_CDCLK_CD2X_DIV_SEL_1:
6759 return cdclk; /* 576MHz or 624MHz */
6760 case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6761 return cdclk * 2 / 3; /* 384MHz */
6762 case BXT_CDCLK_CD2X_DIV_SEL_2:
6763 return cdclk / 2; /* 288MHz */
6764 case BXT_CDCLK_CD2X_DIV_SEL_4:
6765 return cdclk / 4; /* 144MHz */
6766 }
6767
6768 /* error case, do as if DE PLL isn't enabled */
6769 return 19200;
6770}
6771
1652d19e
VS
6772static int broadwell_get_display_clock_speed(struct drm_device *dev)
6773{
6774 struct drm_i915_private *dev_priv = dev->dev_private;
6775 uint32_t lcpll = I915_READ(LCPLL_CTL);
6776 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6777
6778 if (lcpll & LCPLL_CD_SOURCE_FCLK)
6779 return 800000;
6780 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6781 return 450000;
6782 else if (freq == LCPLL_CLK_FREQ_450)
6783 return 450000;
6784 else if (freq == LCPLL_CLK_FREQ_54O_BDW)
6785 return 540000;
6786 else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
6787 return 337500;
6788 else
6789 return 675000;
6790}
6791
6792static int haswell_get_display_clock_speed(struct drm_device *dev)
6793{
6794 struct drm_i915_private *dev_priv = dev->dev_private;
6795 uint32_t lcpll = I915_READ(LCPLL_CTL);
6796 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6797
6798 if (lcpll & LCPLL_CD_SOURCE_FCLK)
6799 return 800000;
6800 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6801 return 450000;
6802 else if (freq == LCPLL_CLK_FREQ_450)
6803 return 450000;
6804 else if (IS_HSW_ULT(dev))
6805 return 337500;
6806 else
6807 return 540000;
79e53945
JB
6808}
6809
25eb05fc
JB
6810static int valleyview_get_display_clock_speed(struct drm_device *dev)
6811{
bfa7df01
VS
6812 return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
6813 CCK_DISPLAY_CLOCK_CONTROL);
25eb05fc
JB
6814}
6815
b37a6434
VS
6816static int ilk_get_display_clock_speed(struct drm_device *dev)
6817{
6818 return 450000;
6819}
6820
e70236a8
JB
6821static int i945_get_display_clock_speed(struct drm_device *dev)
6822{
6823 return 400000;
6824}
79e53945 6825
e70236a8 6826static int i915_get_display_clock_speed(struct drm_device *dev)
79e53945 6827{
e907f170 6828 return 333333;
e70236a8 6829}
79e53945 6830
e70236a8
JB
6831static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
6832{
6833 return 200000;
6834}
79e53945 6835
257a7ffc
DV
6836static int pnv_get_display_clock_speed(struct drm_device *dev)
6837{
6838 u16 gcfgc = 0;
6839
6840 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6841
6842 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6843 case GC_DISPLAY_CLOCK_267_MHZ_PNV:
e907f170 6844 return 266667;
257a7ffc 6845 case GC_DISPLAY_CLOCK_333_MHZ_PNV:
e907f170 6846 return 333333;
257a7ffc 6847 case GC_DISPLAY_CLOCK_444_MHZ_PNV:
e907f170 6848 return 444444;
257a7ffc
DV
6849 case GC_DISPLAY_CLOCK_200_MHZ_PNV:
6850 return 200000;
6851 default:
6852 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
6853 case GC_DISPLAY_CLOCK_133_MHZ_PNV:
e907f170 6854 return 133333;
257a7ffc 6855 case GC_DISPLAY_CLOCK_167_MHZ_PNV:
e907f170 6856 return 166667;
257a7ffc
DV
6857 }
6858}
6859
e70236a8
JB
6860static int i915gm_get_display_clock_speed(struct drm_device *dev)
6861{
6862 u16 gcfgc = 0;
79e53945 6863
e70236a8
JB
6864 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6865
6866 if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
e907f170 6867 return 133333;
e70236a8
JB
6868 else {
6869 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6870 case GC_DISPLAY_CLOCK_333_MHZ:
e907f170 6871 return 333333;
e70236a8
JB
6872 default:
6873 case GC_DISPLAY_CLOCK_190_200_MHZ:
6874 return 190000;
79e53945 6875 }
e70236a8
JB
6876 }
6877}
6878
6879static int i865_get_display_clock_speed(struct drm_device *dev)
6880{
e907f170 6881 return 266667;
e70236a8
JB
6882}
6883
1b1d2716 6884static int i85x_get_display_clock_speed(struct drm_device *dev)
e70236a8
JB
6885{
6886 u16 hpllcc = 0;
1b1d2716 6887
65cd2b3f
VS
6888 /*
6889 * 852GM/852GMV only supports 133 MHz and the HPLLCC
6890 * encoding is different :(
6891 * FIXME is this the right way to detect 852GM/852GMV?
6892 */
6893 if (dev->pdev->revision == 0x1)
6894 return 133333;
6895
1b1d2716
VS
6896 pci_bus_read_config_word(dev->pdev->bus,
6897 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
6898
e70236a8
JB
6899 /* Assume that the hardware is in the high speed state. This
6900 * should be the default.
6901 */
6902 switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
6903 case GC_CLOCK_133_200:
1b1d2716 6904 case GC_CLOCK_133_200_2:
e70236a8
JB
6905 case GC_CLOCK_100_200:
6906 return 200000;
6907 case GC_CLOCK_166_250:
6908 return 250000;
6909 case GC_CLOCK_100_133:
e907f170 6910 return 133333;
1b1d2716
VS
6911 case GC_CLOCK_133_266:
6912 case GC_CLOCK_133_266_2:
6913 case GC_CLOCK_166_266:
6914 return 266667;
e70236a8 6915 }
79e53945 6916
e70236a8
JB
6917 /* Shouldn't happen */
6918 return 0;
6919}
79e53945 6920
e70236a8
JB
6921static int i830_get_display_clock_speed(struct drm_device *dev)
6922{
e907f170 6923 return 133333;
79e53945
JB
6924}
6925
34edce2f
VS
6926static unsigned int intel_hpll_vco(struct drm_device *dev)
6927{
6928 struct drm_i915_private *dev_priv = dev->dev_private;
6929 static const unsigned int blb_vco[8] = {
6930 [0] = 3200000,
6931 [1] = 4000000,
6932 [2] = 5333333,
6933 [3] = 4800000,
6934 [4] = 6400000,
6935 };
6936 static const unsigned int pnv_vco[8] = {
6937 [0] = 3200000,
6938 [1] = 4000000,
6939 [2] = 5333333,
6940 [3] = 4800000,
6941 [4] = 2666667,
6942 };
6943 static const unsigned int cl_vco[8] = {
6944 [0] = 3200000,
6945 [1] = 4000000,
6946 [2] = 5333333,
6947 [3] = 6400000,
6948 [4] = 3333333,
6949 [5] = 3566667,
6950 [6] = 4266667,
6951 };
6952 static const unsigned int elk_vco[8] = {
6953 [0] = 3200000,
6954 [1] = 4000000,
6955 [2] = 5333333,
6956 [3] = 4800000,
6957 };
6958 static const unsigned int ctg_vco[8] = {
6959 [0] = 3200000,
6960 [1] = 4000000,
6961 [2] = 5333333,
6962 [3] = 6400000,
6963 [4] = 2666667,
6964 [5] = 4266667,
6965 };
6966 const unsigned int *vco_table;
6967 unsigned int vco;
6968 uint8_t tmp = 0;
6969
6970 /* FIXME other chipsets? */
6971 if (IS_GM45(dev))
6972 vco_table = ctg_vco;
6973 else if (IS_G4X(dev))
6974 vco_table = elk_vco;
6975 else if (IS_CRESTLINE(dev))
6976 vco_table = cl_vco;
6977 else if (IS_PINEVIEW(dev))
6978 vco_table = pnv_vco;
6979 else if (IS_G33(dev))
6980 vco_table = blb_vco;
6981 else
6982 return 0;
6983
6984 tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
6985
6986 vco = vco_table[tmp & 0x7];
6987 if (vco == 0)
6988 DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
6989 else
6990 DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
6991
6992 return vco;
6993}
6994
6995static int gm45_get_display_clock_speed(struct drm_device *dev)
6996{
6997 unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
6998 uint16_t tmp = 0;
6999
7000 pci_read_config_word(dev->pdev, GCFGC, &tmp);
7001
7002 cdclk_sel = (tmp >> 12) & 0x1;
7003
7004 switch (vco) {
7005 case 2666667:
7006 case 4000000:
7007 case 5333333:
7008 return cdclk_sel ? 333333 : 222222;
7009 case 3200000:
7010 return cdclk_sel ? 320000 : 228571;
7011 default:
7012 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
7013 return 222222;
7014 }
7015}
7016
7017static int i965gm_get_display_clock_speed(struct drm_device *dev)
7018{
7019 static const uint8_t div_3200[] = { 16, 10, 8 };
7020 static const uint8_t div_4000[] = { 20, 12, 10 };
7021 static const uint8_t div_5333[] = { 24, 16, 14 };
7022 const uint8_t *div_table;
7023 unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7024 uint16_t tmp = 0;
7025
7026 pci_read_config_word(dev->pdev, GCFGC, &tmp);
7027
7028 cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
7029
7030 if (cdclk_sel >= ARRAY_SIZE(div_3200))
7031 goto fail;
7032
7033 switch (vco) {
7034 case 3200000:
7035 div_table = div_3200;
7036 break;
7037 case 4000000:
7038 div_table = div_4000;
7039 break;
7040 case 5333333:
7041 div_table = div_5333;
7042 break;
7043 default:
7044 goto fail;
7045 }
7046
7047 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7048
caf4e252 7049fail:
34edce2f
VS
7050 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
7051 return 200000;
7052}
7053
7054static int g33_get_display_clock_speed(struct drm_device *dev)
7055{
7056 static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
7057 static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
7058 static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
7059 static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
7060 const uint8_t *div_table;
7061 unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7062 uint16_t tmp = 0;
7063
7064 pci_read_config_word(dev->pdev, GCFGC, &tmp);
7065
7066 cdclk_sel = (tmp >> 4) & 0x7;
7067
7068 if (cdclk_sel >= ARRAY_SIZE(div_3200))
7069 goto fail;
7070
7071 switch (vco) {
7072 case 3200000:
7073 div_table = div_3200;
7074 break;
7075 case 4000000:
7076 div_table = div_4000;
7077 break;
7078 case 4800000:
7079 div_table = div_4800;
7080 break;
7081 case 5333333:
7082 div_table = div_5333;
7083 break;
7084 default:
7085 goto fail;
7086 }
7087
7088 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7089
caf4e252 7090fail:
34edce2f
VS
7091 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
7092 return 190476;
7093}
7094
2c07245f 7095static void
a65851af 7096intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
2c07245f 7097{
a65851af
VS
7098 while (*num > DATA_LINK_M_N_MASK ||
7099 *den > DATA_LINK_M_N_MASK) {
2c07245f
ZW
7100 *num >>= 1;
7101 *den >>= 1;
7102 }
7103}
7104
a65851af
VS
7105static void compute_m_n(unsigned int m, unsigned int n,
7106 uint32_t *ret_m, uint32_t *ret_n)
7107{
7108 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7109 *ret_m = div_u64((uint64_t) m * *ret_n, n);
7110 intel_reduce_m_n_ratio(ret_m, ret_n);
7111}
7112
e69d0bc1
DV
7113void
7114intel_link_compute_m_n(int bits_per_pixel, int nlanes,
7115 int pixel_clock, int link_clock,
7116 struct intel_link_m_n *m_n)
2c07245f 7117{
e69d0bc1 7118 m_n->tu = 64;
a65851af
VS
7119
7120 compute_m_n(bits_per_pixel * pixel_clock,
7121 link_clock * nlanes * 8,
7122 &m_n->gmch_m, &m_n->gmch_n);
7123
7124 compute_m_n(pixel_clock, link_clock,
7125 &m_n->link_m, &m_n->link_n);
2c07245f
ZW
7126}
7127
a7615030
CW
7128static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7129{
d330a953
JN
7130 if (i915.panel_use_ssc >= 0)
7131 return i915.panel_use_ssc != 0;
41aa3448 7132 return dev_priv->vbt.lvds_use_ssc
435793df 7133 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
a7615030
CW
7134}
7135
a93e255f
ACO
7136static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
7137 int num_connectors)
c65d77d8 7138{
a93e255f 7139 struct drm_device *dev = crtc_state->base.crtc->dev;
c65d77d8
JB
7140 struct drm_i915_private *dev_priv = dev->dev_private;
7141 int refclk;
7142
a93e255f
ACO
7143 WARN_ON(!crtc_state->base.state);
7144
5ab7b0b7 7145 if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) {
9a0ea498 7146 refclk = 100000;
a93e255f 7147 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
c65d77d8 7148 intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
e91e941b
VS
7149 refclk = dev_priv->vbt.lvds_ssc_freq;
7150 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
c65d77d8
JB
7151 } else if (!IS_GEN2(dev)) {
7152 refclk = 96000;
7153 } else {
7154 refclk = 48000;
7155 }
7156
7157 return refclk;
7158}
7159
7429e9d4 7160static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
c65d77d8 7161{
7df00d7a 7162 return (1 << dpll->n) << 16 | dpll->m2;
7429e9d4 7163}
f47709a9 7164
7429e9d4
DV
7165static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7166{
7167 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
c65d77d8
JB
7168}
7169
f47709a9 7170static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
190f68c5 7171 struct intel_crtc_state *crtc_state,
a7516a05
JB
7172 intel_clock_t *reduced_clock)
7173{
f47709a9 7174 struct drm_device *dev = crtc->base.dev;
a7516a05
JB
7175 u32 fp, fp2 = 0;
7176
7177 if (IS_PINEVIEW(dev)) {
190f68c5 7178 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
a7516a05 7179 if (reduced_clock)
7429e9d4 7180 fp2 = pnv_dpll_compute_fp(reduced_clock);
a7516a05 7181 } else {
190f68c5 7182 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
a7516a05 7183 if (reduced_clock)
7429e9d4 7184 fp2 = i9xx_dpll_compute_fp(reduced_clock);
a7516a05
JB
7185 }
7186
190f68c5 7187 crtc_state->dpll_hw_state.fp0 = fp;
a7516a05 7188
f47709a9 7189 crtc->lowfreq_avail = false;
a93e255f 7190 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
ab585dea 7191 reduced_clock) {
190f68c5 7192 crtc_state->dpll_hw_state.fp1 = fp2;
f47709a9 7193 crtc->lowfreq_avail = true;
a7516a05 7194 } else {
190f68c5 7195 crtc_state->dpll_hw_state.fp1 = fp;
a7516a05
JB
7196 }
7197}
7198
5e69f97f
CML
7199static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7200 pipe)
89b667f8
JB
7201{
7202 u32 reg_val;
7203
7204 /*
7205 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7206 * and set it to a reasonable value instead.
7207 */
ab3c759a 7208 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
89b667f8
JB
7209 reg_val &= 0xffffff00;
7210 reg_val |= 0x00000030;
ab3c759a 7211 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
89b667f8 7212
ab3c759a 7213 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
89b667f8
JB
7214 reg_val &= 0x8cffffff;
7215 reg_val = 0x8c000000;
ab3c759a 7216 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
89b667f8 7217
ab3c759a 7218 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
89b667f8 7219 reg_val &= 0xffffff00;
ab3c759a 7220 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
89b667f8 7221
ab3c759a 7222 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
89b667f8
JB
7223 reg_val &= 0x00ffffff;
7224 reg_val |= 0xb0000000;
ab3c759a 7225 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
89b667f8
JB
7226}
7227
b551842d
DV
7228static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7229 struct intel_link_m_n *m_n)
7230{
7231 struct drm_device *dev = crtc->base.dev;
7232 struct drm_i915_private *dev_priv = dev->dev_private;
7233 int pipe = crtc->pipe;
7234
e3b95f1e
DV
7235 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7236 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7237 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7238 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
b551842d
DV
7239}
7240
7241static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
f769cd24
VK
7242 struct intel_link_m_n *m_n,
7243 struct intel_link_m_n *m2_n2)
b551842d
DV
7244{
7245 struct drm_device *dev = crtc->base.dev;
7246 struct drm_i915_private *dev_priv = dev->dev_private;
7247 int pipe = crtc->pipe;
6e3c9717 7248 enum transcoder transcoder = crtc->config->cpu_transcoder;
b551842d
DV
7249
7250 if (INTEL_INFO(dev)->gen >= 5) {
7251 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7252 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7253 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7254 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
f769cd24
VK
7255 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
7256 * for gen < 8) and if DRRS is supported (to make sure the
7257 * registers are not unnecessarily accessed).
7258 */
44395bfe 7259 if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
6e3c9717 7260 crtc->config->has_drrs) {
f769cd24
VK
7261 I915_WRITE(PIPE_DATA_M2(transcoder),
7262 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7263 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7264 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7265 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7266 }
b551842d 7267 } else {
e3b95f1e
DV
7268 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7269 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7270 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7271 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
b551842d
DV
7272 }
7273}
7274
fe3cd48d 7275void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
03afc4a2 7276{
fe3cd48d
R
7277 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7278
7279 if (m_n == M1_N1) {
7280 dp_m_n = &crtc->config->dp_m_n;
7281 dp_m2_n2 = &crtc->config->dp_m2_n2;
7282 } else if (m_n == M2_N2) {
7283
7284 /*
7285 * M2_N2 registers are not supported. Hence m2_n2 divider value
7286 * needs to be programmed into M1_N1.
7287 */
7288 dp_m_n = &crtc->config->dp_m2_n2;
7289 } else {
7290 DRM_ERROR("Unsupported divider value\n");
7291 return;
7292 }
7293
6e3c9717
ACO
7294 if (crtc->config->has_pch_encoder)
7295 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
03afc4a2 7296 else
fe3cd48d 7297 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
03afc4a2
DV
7298}
7299
251ac862
DV
7300static void vlv_compute_dpll(struct intel_crtc *crtc,
7301 struct intel_crtc_state *pipe_config)
bdd4b6a6
DV
7302{
7303 u32 dpll, dpll_md;
7304
7305 /*
7306 * Enable DPIO clock input. We should never disable the reference
7307 * clock for pipe B, since VGA hotplug / manual detection depends
7308 * on it.
7309 */
60bfe44f
VS
7310 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV |
7311 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV;
bdd4b6a6
DV
7312 /* We should never disable this, set it here for state tracking */
7313 if (crtc->pipe == PIPE_B)
7314 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7315 dpll |= DPLL_VCO_ENABLE;
d288f65f 7316 pipe_config->dpll_hw_state.dpll = dpll;
bdd4b6a6 7317
d288f65f 7318 dpll_md = (pipe_config->pixel_multiplier - 1)
bdd4b6a6 7319 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
d288f65f 7320 pipe_config->dpll_hw_state.dpll_md = dpll_md;
bdd4b6a6
DV
7321}
7322
d288f65f 7323static void vlv_prepare_pll(struct intel_crtc *crtc,
5cec258b 7324 const struct intel_crtc_state *pipe_config)
a0c4da24 7325{
f47709a9 7326 struct drm_device *dev = crtc->base.dev;
a0c4da24 7327 struct drm_i915_private *dev_priv = dev->dev_private;
f47709a9 7328 int pipe = crtc->pipe;
bdd4b6a6 7329 u32 mdiv;
a0c4da24 7330 u32 bestn, bestm1, bestm2, bestp1, bestp2;
bdd4b6a6 7331 u32 coreclk, reg_val;
a0c4da24 7332
a580516d 7333 mutex_lock(&dev_priv->sb_lock);
09153000 7334
d288f65f
VS
7335 bestn = pipe_config->dpll.n;
7336 bestm1 = pipe_config->dpll.m1;
7337 bestm2 = pipe_config->dpll.m2;
7338 bestp1 = pipe_config->dpll.p1;
7339 bestp2 = pipe_config->dpll.p2;
a0c4da24 7340
89b667f8
JB
7341 /* See eDP HDMI DPIO driver vbios notes doc */
7342
7343 /* PLL B needs special handling */
bdd4b6a6 7344 if (pipe == PIPE_B)
5e69f97f 7345 vlv_pllb_recal_opamp(dev_priv, pipe);
89b667f8
JB
7346
7347 /* Set up Tx target for periodic Rcomp update */
ab3c759a 7348 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
89b667f8
JB
7349
7350 /* Disable target IRef on PLL */
ab3c759a 7351 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
89b667f8 7352 reg_val &= 0x00ffffff;
ab3c759a 7353 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
89b667f8
JB
7354
7355 /* Disable fast lock */
ab3c759a 7356 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
89b667f8
JB
7357
7358 /* Set idtafcrecal before PLL is enabled */
a0c4da24
JB
7359 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7360 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7361 mdiv |= ((bestn << DPIO_N_SHIFT));
a0c4da24 7362 mdiv |= (1 << DPIO_K_SHIFT);
7df5080b
JB
7363
7364 /*
7365 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7366 * but we don't support that).
7367 * Note: don't use the DAC post divider as it seems unstable.
7368 */
7369 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
ab3c759a 7370 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
a0c4da24 7371
a0c4da24 7372 mdiv |= DPIO_ENABLE_CALIBRATION;
ab3c759a 7373 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
a0c4da24 7374
89b667f8 7375 /* Set HBR and RBR LPF coefficients */
d288f65f 7376 if (pipe_config->port_clock == 162000 ||
409ee761
ACO
7377 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
7378 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
ab3c759a 7379 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
885b0120 7380 0x009f0003);
89b667f8 7381 else
ab3c759a 7382 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
89b667f8
JB
7383 0x00d0000f);
7384
681a8504 7385 if (pipe_config->has_dp_encoder) {
89b667f8 7386 /* Use SSC source */
bdd4b6a6 7387 if (pipe == PIPE_A)
ab3c759a 7388 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
89b667f8
JB
7389 0x0df40000);
7390 else
ab3c759a 7391 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
89b667f8
JB
7392 0x0df70000);
7393 } else { /* HDMI or VGA */
7394 /* Use bend source */
bdd4b6a6 7395 if (pipe == PIPE_A)
ab3c759a 7396 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
89b667f8
JB
7397 0x0df70000);
7398 else
ab3c759a 7399 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
89b667f8
JB
7400 0x0df40000);
7401 }
a0c4da24 7402
ab3c759a 7403 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
89b667f8 7404 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
409ee761
ACO
7405 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
7406 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
89b667f8 7407 coreclk |= 0x01000000;
ab3c759a 7408 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
a0c4da24 7409
ab3c759a 7410 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
a580516d 7411 mutex_unlock(&dev_priv->sb_lock);
a0c4da24
JB
7412}
7413
251ac862
DV
7414static void chv_compute_dpll(struct intel_crtc *crtc,
7415 struct intel_crtc_state *pipe_config)
1ae0d137 7416{
60bfe44f
VS
7417 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7418 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
1ae0d137
VS
7419 DPLL_VCO_ENABLE;
7420 if (crtc->pipe != PIPE_A)
d288f65f 7421 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
1ae0d137 7422
d288f65f
VS
7423 pipe_config->dpll_hw_state.dpll_md =
7424 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
1ae0d137
VS
7425}
7426
d288f65f 7427static void chv_prepare_pll(struct intel_crtc *crtc,
5cec258b 7428 const struct intel_crtc_state *pipe_config)
9d556c99
CML
7429{
7430 struct drm_device *dev = crtc->base.dev;
7431 struct drm_i915_private *dev_priv = dev->dev_private;
7432 int pipe = crtc->pipe;
f0f59a00 7433 i915_reg_t dpll_reg = DPLL(crtc->pipe);
9d556c99 7434 enum dpio_channel port = vlv_pipe_to_channel(pipe);
9cbe40c1 7435 u32 loopfilter, tribuf_calcntr;
9d556c99 7436 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
a945ce7e 7437 u32 dpio_val;
9cbe40c1 7438 int vco;
9d556c99 7439
d288f65f
VS
7440 bestn = pipe_config->dpll.n;
7441 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7442 bestm1 = pipe_config->dpll.m1;
7443 bestm2 = pipe_config->dpll.m2 >> 22;
7444 bestp1 = pipe_config->dpll.p1;
7445 bestp2 = pipe_config->dpll.p2;
9cbe40c1 7446 vco = pipe_config->dpll.vco;
a945ce7e 7447 dpio_val = 0;
9cbe40c1 7448 loopfilter = 0;
9d556c99
CML
7449
7450 /*
7451 * Enable Refclk and SSC
7452 */
a11b0703 7453 I915_WRITE(dpll_reg,
d288f65f 7454 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
a11b0703 7455
a580516d 7456 mutex_lock(&dev_priv->sb_lock);
9d556c99 7457
9d556c99
CML
7458 /* p1 and p2 divider */
7459 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7460 5 << DPIO_CHV_S1_DIV_SHIFT |
7461 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7462 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7463 1 << DPIO_CHV_K_DIV_SHIFT);
7464
7465 /* Feedback post-divider - m2 */
7466 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7467
7468 /* Feedback refclk divider - n and m1 */
7469 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7470 DPIO_CHV_M1_DIV_BY_2 |
7471 1 << DPIO_CHV_N_DIV_SHIFT);
7472
7473 /* M2 fraction division */
25a25dfc 7474 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
9d556c99
CML
7475
7476 /* M2 fraction division enable */
a945ce7e
VP
7477 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7478 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7479 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7480 if (bestm2_frac)
7481 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7482 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
9d556c99 7483
de3a0fde
VP
7484 /* Program digital lock detect threshold */
7485 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7486 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7487 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7488 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7489 if (!bestm2_frac)
7490 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7491 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7492
9d556c99 7493 /* Loop filter */
9cbe40c1
VP
7494 if (vco == 5400000) {
7495 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7496 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7497 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7498 tribuf_calcntr = 0x9;
7499 } else if (vco <= 6200000) {
7500 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7501 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7502 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7503 tribuf_calcntr = 0x9;
7504 } else if (vco <= 6480000) {
7505 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7506 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7507 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7508 tribuf_calcntr = 0x8;
7509 } else {
7510 /* Not supported. Apply the same limits as in the max case */
7511 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7512 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7513 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7514 tribuf_calcntr = 0;
7515 }
9d556c99
CML
7516 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7517
968040b2 7518 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
9cbe40c1
VP
7519 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7520 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7521 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7522
9d556c99
CML
7523 /* AFC Recal */
7524 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7525 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7526 DPIO_AFC_RECAL);
7527
a580516d 7528 mutex_unlock(&dev_priv->sb_lock);
9d556c99
CML
7529}
7530
d288f65f
VS
7531/**
7532 * vlv_force_pll_on - forcibly enable just the PLL
7533 * @dev_priv: i915 private structure
7534 * @pipe: pipe PLL to enable
7535 * @dpll: PLL configuration
7536 *
7537 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7538 * in cases where we need the PLL enabled even when @pipe is not going to
7539 * be enabled.
7540 */
7541void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
7542 const struct dpll *dpll)
7543{
7544 struct intel_crtc *crtc =
7545 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
5cec258b 7546 struct intel_crtc_state pipe_config = {
a93e255f 7547 .base.crtc = &crtc->base,
d288f65f
VS
7548 .pixel_multiplier = 1,
7549 .dpll = *dpll,
7550 };
7551
7552 if (IS_CHERRYVIEW(dev)) {
251ac862 7553 chv_compute_dpll(crtc, &pipe_config);
d288f65f
VS
7554 chv_prepare_pll(crtc, &pipe_config);
7555 chv_enable_pll(crtc, &pipe_config);
7556 } else {
251ac862 7557 vlv_compute_dpll(crtc, &pipe_config);
d288f65f
VS
7558 vlv_prepare_pll(crtc, &pipe_config);
7559 vlv_enable_pll(crtc, &pipe_config);
7560 }
7561}
7562
7563/**
7564 * vlv_force_pll_off - forcibly disable just the PLL
7565 * @dev_priv: i915 private structure
7566 * @pipe: pipe PLL to disable
7567 *
7568 * Disable the PLL for @pipe. To be used in cases where we need
7569 * the PLL enabled even when @pipe is not going to be enabled.
7570 */
7571void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
7572{
7573 if (IS_CHERRYVIEW(dev))
7574 chv_disable_pll(to_i915(dev), pipe);
7575 else
7576 vlv_disable_pll(to_i915(dev), pipe);
7577}
7578
251ac862
DV
7579static void i9xx_compute_dpll(struct intel_crtc *crtc,
7580 struct intel_crtc_state *crtc_state,
7581 intel_clock_t *reduced_clock,
7582 int num_connectors)
eb1cbe48 7583{
f47709a9 7584 struct drm_device *dev = crtc->base.dev;
eb1cbe48 7585 struct drm_i915_private *dev_priv = dev->dev_private;
eb1cbe48
DV
7586 u32 dpll;
7587 bool is_sdvo;
190f68c5 7588 struct dpll *clock = &crtc_state->dpll;
eb1cbe48 7589
190f68c5 7590 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
2a8f64ca 7591
a93e255f
ACO
7592 is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7593 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
eb1cbe48
DV
7594
7595 dpll = DPLL_VGA_MODE_DIS;
7596
a93e255f 7597 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
eb1cbe48
DV
7598 dpll |= DPLLB_MODE_LVDS;
7599 else
7600 dpll |= DPLLB_MODE_DAC_SERIAL;
6cc5f341 7601
ef1b460d 7602 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
190f68c5 7603 dpll |= (crtc_state->pixel_multiplier - 1)
198a037f 7604 << SDVO_MULTIPLIER_SHIFT_HIRES;
eb1cbe48 7605 }
198a037f
DV
7606
7607 if (is_sdvo)
4a33e48d 7608 dpll |= DPLL_SDVO_HIGH_SPEED;
198a037f 7609
190f68c5 7610 if (crtc_state->has_dp_encoder)
4a33e48d 7611 dpll |= DPLL_SDVO_HIGH_SPEED;
eb1cbe48
DV
7612
7613 /* compute bitmask from p1 value */
7614 if (IS_PINEVIEW(dev))
7615 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7616 else {
7617 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7618 if (IS_G4X(dev) && reduced_clock)
7619 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7620 }
7621 switch (clock->p2) {
7622 case 5:
7623 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7624 break;
7625 case 7:
7626 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7627 break;
7628 case 10:
7629 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7630 break;
7631 case 14:
7632 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7633 break;
7634 }
7635 if (INTEL_INFO(dev)->gen >= 4)
7636 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7637
190f68c5 7638 if (crtc_state->sdvo_tv_clock)
eb1cbe48 7639 dpll |= PLL_REF_INPUT_TVCLKINBC;
a93e255f 7640 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
eb1cbe48
DV
7641 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7642 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7643 else
7644 dpll |= PLL_REF_INPUT_DREFCLK;
7645
7646 dpll |= DPLL_VCO_ENABLE;
190f68c5 7647 crtc_state->dpll_hw_state.dpll = dpll;
8bcc2795 7648
eb1cbe48 7649 if (INTEL_INFO(dev)->gen >= 4) {
190f68c5 7650 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
ef1b460d 7651 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
190f68c5 7652 crtc_state->dpll_hw_state.dpll_md = dpll_md;
eb1cbe48
DV
7653 }
7654}
7655
251ac862
DV
7656static void i8xx_compute_dpll(struct intel_crtc *crtc,
7657 struct intel_crtc_state *crtc_state,
7658 intel_clock_t *reduced_clock,
7659 int num_connectors)
eb1cbe48 7660{
f47709a9 7661 struct drm_device *dev = crtc->base.dev;
eb1cbe48 7662 struct drm_i915_private *dev_priv = dev->dev_private;
eb1cbe48 7663 u32 dpll;
190f68c5 7664 struct dpll *clock = &crtc_state->dpll;
eb1cbe48 7665
190f68c5 7666 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
2a8f64ca 7667
eb1cbe48
DV
7668 dpll = DPLL_VGA_MODE_DIS;
7669
a93e255f 7670 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
eb1cbe48
DV
7671 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7672 } else {
7673 if (clock->p1 == 2)
7674 dpll |= PLL_P1_DIVIDE_BY_TWO;
7675 else
7676 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7677 if (clock->p2 == 4)
7678 dpll |= PLL_P2_DIVIDE_BY_4;
7679 }
7680
a93e255f 7681 if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
4a33e48d
DV
7682 dpll |= DPLL_DVO_2X_MODE;
7683
a93e255f 7684 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
eb1cbe48
DV
7685 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
7686 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7687 else
7688 dpll |= PLL_REF_INPUT_DREFCLK;
7689
7690 dpll |= DPLL_VCO_ENABLE;
190f68c5 7691 crtc_state->dpll_hw_state.dpll = dpll;
eb1cbe48
DV
7692}
7693
8a654f3b 7694static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
b0e77b9c
PZ
7695{
7696 struct drm_device *dev = intel_crtc->base.dev;
7697 struct drm_i915_private *dev_priv = dev->dev_private;
7698 enum pipe pipe = intel_crtc->pipe;
6e3c9717 7699 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7c5f93b0 7700 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
1caea6e9
VS
7701 uint32_t crtc_vtotal, crtc_vblank_end;
7702 int vsyncshift = 0;
4d8a62ea
DV
7703
7704 /* We need to be careful not to changed the adjusted mode, for otherwise
7705 * the hw state checker will get angry at the mismatch. */
7706 crtc_vtotal = adjusted_mode->crtc_vtotal;
7707 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
b0e77b9c 7708
609aeaca 7709 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
b0e77b9c 7710 /* the chip adds 2 halflines automatically */
4d8a62ea
DV
7711 crtc_vtotal -= 1;
7712 crtc_vblank_end -= 1;
609aeaca 7713
409ee761 7714 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
609aeaca
VS
7715 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7716 else
7717 vsyncshift = adjusted_mode->crtc_hsync_start -
7718 adjusted_mode->crtc_htotal / 2;
1caea6e9
VS
7719 if (vsyncshift < 0)
7720 vsyncshift += adjusted_mode->crtc_htotal;
b0e77b9c
PZ
7721 }
7722
7723 if (INTEL_INFO(dev)->gen > 3)
fe2b8f9d 7724 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
b0e77b9c 7725
fe2b8f9d 7726 I915_WRITE(HTOTAL(cpu_transcoder),
b0e77b9c
PZ
7727 (adjusted_mode->crtc_hdisplay - 1) |
7728 ((adjusted_mode->crtc_htotal - 1) << 16));
fe2b8f9d 7729 I915_WRITE(HBLANK(cpu_transcoder),
b0e77b9c
PZ
7730 (adjusted_mode->crtc_hblank_start - 1) |
7731 ((adjusted_mode->crtc_hblank_end - 1) << 16));
fe2b8f9d 7732 I915_WRITE(HSYNC(cpu_transcoder),
b0e77b9c
PZ
7733 (adjusted_mode->crtc_hsync_start - 1) |
7734 ((adjusted_mode->crtc_hsync_end - 1) << 16));
7735
fe2b8f9d 7736 I915_WRITE(VTOTAL(cpu_transcoder),
b0e77b9c 7737 (adjusted_mode->crtc_vdisplay - 1) |
4d8a62ea 7738 ((crtc_vtotal - 1) << 16));
fe2b8f9d 7739 I915_WRITE(VBLANK(cpu_transcoder),
b0e77b9c 7740 (adjusted_mode->crtc_vblank_start - 1) |
4d8a62ea 7741 ((crtc_vblank_end - 1) << 16));
fe2b8f9d 7742 I915_WRITE(VSYNC(cpu_transcoder),
b0e77b9c
PZ
7743 (adjusted_mode->crtc_vsync_start - 1) |
7744 ((adjusted_mode->crtc_vsync_end - 1) << 16));
7745
b5e508d4
PZ
7746 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7747 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7748 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7749 * bits. */
7750 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
7751 (pipe == PIPE_B || pipe == PIPE_C))
7752 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7753
b0e77b9c
PZ
7754 /* pipesrc controls the size that is scaled from, which should
7755 * always be the user's requested size.
7756 */
7757 I915_WRITE(PIPESRC(pipe),
6e3c9717
ACO
7758 ((intel_crtc->config->pipe_src_w - 1) << 16) |
7759 (intel_crtc->config->pipe_src_h - 1));
b0e77b9c
PZ
7760}
7761
1bd1bd80 7762static void intel_get_pipe_timings(struct intel_crtc *crtc,
5cec258b 7763 struct intel_crtc_state *pipe_config)
1bd1bd80
DV
7764{
7765 struct drm_device *dev = crtc->base.dev;
7766 struct drm_i915_private *dev_priv = dev->dev_private;
7767 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7768 uint32_t tmp;
7769
7770 tmp = I915_READ(HTOTAL(cpu_transcoder));
2d112de7
ACO
7771 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7772 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80 7773 tmp = I915_READ(HBLANK(cpu_transcoder));
2d112de7
ACO
7774 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7775 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80 7776 tmp = I915_READ(HSYNC(cpu_transcoder));
2d112de7
ACO
7777 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7778 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80
DV
7779
7780 tmp = I915_READ(VTOTAL(cpu_transcoder));
2d112de7
ACO
7781 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7782 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80 7783 tmp = I915_READ(VBLANK(cpu_transcoder));
2d112de7
ACO
7784 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7785 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80 7786 tmp = I915_READ(VSYNC(cpu_transcoder));
2d112de7
ACO
7787 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7788 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80
DV
7789
7790 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
2d112de7
ACO
7791 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7792 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7793 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
1bd1bd80
DV
7794 }
7795
7796 tmp = I915_READ(PIPESRC(crtc->pipe));
37327abd
VS
7797 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7798 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7799
2d112de7
ACO
7800 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7801 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
1bd1bd80
DV
7802}
7803
f6a83288 7804void intel_mode_from_pipe_config(struct drm_display_mode *mode,
5cec258b 7805 struct intel_crtc_state *pipe_config)
babea61d 7806{
2d112de7
ACO
7807 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7808 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7809 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7810 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
babea61d 7811
2d112de7
ACO
7812 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7813 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7814 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7815 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
babea61d 7816
2d112de7 7817 mode->flags = pipe_config->base.adjusted_mode.flags;
cd13f5ab 7818 mode->type = DRM_MODE_TYPE_DRIVER;
babea61d 7819
2d112de7
ACO
7820 mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7821 mode->flags |= pipe_config->base.adjusted_mode.flags;
cd13f5ab
ML
7822
7823 mode->hsync = drm_mode_hsync(mode);
7824 mode->vrefresh = drm_mode_vrefresh(mode);
7825 drm_mode_set_name(mode);
babea61d
JB
7826}
7827
84b046f3
DV
7828static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7829{
7830 struct drm_device *dev = intel_crtc->base.dev;
7831 struct drm_i915_private *dev_priv = dev->dev_private;
7832 uint32_t pipeconf;
7833
9f11a9e4 7834 pipeconf = 0;
84b046f3 7835
b6b5d049
VS
7836 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
7837 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
7838 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
67c72a12 7839
6e3c9717 7840 if (intel_crtc->config->double_wide)
cf532bb2 7841 pipeconf |= PIPECONF_DOUBLE_WIDE;
84b046f3 7842
ff9ce46e
DV
7843 /* only g4x and later have fancy bpc/dither controls */
7844 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
ff9ce46e 7845 /* Bspec claims that we can't use dithering for 30bpp pipes. */
6e3c9717 7846 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
ff9ce46e 7847 pipeconf |= PIPECONF_DITHER_EN |
84b046f3 7848 PIPECONF_DITHER_TYPE_SP;
84b046f3 7849
6e3c9717 7850 switch (intel_crtc->config->pipe_bpp) {
ff9ce46e
DV
7851 case 18:
7852 pipeconf |= PIPECONF_6BPC;
7853 break;
7854 case 24:
7855 pipeconf |= PIPECONF_8BPC;
7856 break;
7857 case 30:
7858 pipeconf |= PIPECONF_10BPC;
7859 break;
7860 default:
7861 /* Case prevented by intel_choose_pipe_bpp_dither. */
7862 BUG();
84b046f3
DV
7863 }
7864 }
7865
7866 if (HAS_PIPE_CXSR(dev)) {
7867 if (intel_crtc->lowfreq_avail) {
7868 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
7869 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
7870 } else {
7871 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
84b046f3
DV
7872 }
7873 }
7874
6e3c9717 7875 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
efc2cfff 7876 if (INTEL_INFO(dev)->gen < 4 ||
409ee761 7877 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
efc2cfff
VS
7878 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7879 else
7880 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7881 } else
84b046f3
DV
7882 pipeconf |= PIPECONF_PROGRESSIVE;
7883
6e3c9717 7884 if (IS_VALLEYVIEW(dev) && intel_crtc->config->limited_color_range)
9f11a9e4 7885 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
9c8e09b7 7886
84b046f3
DV
7887 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7888 POSTING_READ(PIPECONF(intel_crtc->pipe));
7889}
7890
190f68c5
ACO
7891static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7892 struct intel_crtc_state *crtc_state)
79e53945 7893{
c7653199 7894 struct drm_device *dev = crtc->base.dev;
79e53945 7895 struct drm_i915_private *dev_priv = dev->dev_private;
c751ce4f 7896 int refclk, num_connectors = 0;
c329a4ec
DV
7897 intel_clock_t clock;
7898 bool ok;
7899 bool is_dsi = false;
5eddb70b 7900 struct intel_encoder *encoder;
d4906093 7901 const intel_limit_t *limit;
55bb9992 7902 struct drm_atomic_state *state = crtc_state->base.state;
da3ced29 7903 struct drm_connector *connector;
55bb9992
ACO
7904 struct drm_connector_state *connector_state;
7905 int i;
79e53945 7906
dd3cd74a
ACO
7907 memset(&crtc_state->dpll_hw_state, 0,
7908 sizeof(crtc_state->dpll_hw_state));
7909
da3ced29 7910 for_each_connector_in_state(state, connector, connector_state, i) {
55bb9992
ACO
7911 if (connector_state->crtc != &crtc->base)
7912 continue;
7913
7914 encoder = to_intel_encoder(connector_state->best_encoder);
7915
5eddb70b 7916 switch (encoder->type) {
e9fd1c02
JN
7917 case INTEL_OUTPUT_DSI:
7918 is_dsi = true;
7919 break;
6847d71b
PZ
7920 default:
7921 break;
79e53945 7922 }
43565a06 7923
c751ce4f 7924 num_connectors++;
79e53945
JB
7925 }
7926
f2335330 7927 if (is_dsi)
5b18e57c 7928 return 0;
f2335330 7929
190f68c5 7930 if (!crtc_state->clock_set) {
a93e255f 7931 refclk = i9xx_get_refclk(crtc_state, num_connectors);
79e53945 7932
e9fd1c02
JN
7933 /*
7934 * Returns a set of divisors for the desired target clock with
7935 * the given refclk, or FALSE. The returned values represent
7936 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n +
7937 * 2) / p1 / p2.
7938 */
a93e255f
ACO
7939 limit = intel_limit(crtc_state, refclk);
7940 ok = dev_priv->display.find_dpll(limit, crtc_state,
190f68c5 7941 crtc_state->port_clock,
e9fd1c02 7942 refclk, NULL, &clock);
f2335330 7943 if (!ok) {
e9fd1c02
JN
7944 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7945 return -EINVAL;
7946 }
79e53945 7947
f2335330 7948 /* Compat-code for transition, will disappear. */
190f68c5
ACO
7949 crtc_state->dpll.n = clock.n;
7950 crtc_state->dpll.m1 = clock.m1;
7951 crtc_state->dpll.m2 = clock.m2;
7952 crtc_state->dpll.p1 = clock.p1;
7953 crtc_state->dpll.p2 = clock.p2;
f47709a9 7954 }
7026d4ac 7955
e9fd1c02 7956 if (IS_GEN2(dev)) {
c329a4ec 7957 i8xx_compute_dpll(crtc, crtc_state, NULL,
251ac862 7958 num_connectors);
9d556c99 7959 } else if (IS_CHERRYVIEW(dev)) {
251ac862 7960 chv_compute_dpll(crtc, crtc_state);
e9fd1c02 7961 } else if (IS_VALLEYVIEW(dev)) {
251ac862 7962 vlv_compute_dpll(crtc, crtc_state);
e9fd1c02 7963 } else {
c329a4ec 7964 i9xx_compute_dpll(crtc, crtc_state, NULL,
251ac862 7965 num_connectors);
e9fd1c02 7966 }
79e53945 7967
c8f7a0db 7968 return 0;
f564048e
EA
7969}
7970
2fa2fe9a 7971static void i9xx_get_pfit_config(struct intel_crtc *crtc,
5cec258b 7972 struct intel_crtc_state *pipe_config)
2fa2fe9a
DV
7973{
7974 struct drm_device *dev = crtc->base.dev;
7975 struct drm_i915_private *dev_priv = dev->dev_private;
7976 uint32_t tmp;
7977
dc9e7dec
VS
7978 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
7979 return;
7980
2fa2fe9a 7981 tmp = I915_READ(PFIT_CONTROL);
06922821
DV
7982 if (!(tmp & PFIT_ENABLE))
7983 return;
2fa2fe9a 7984
06922821 7985 /* Check whether the pfit is attached to our pipe. */
2fa2fe9a
DV
7986 if (INTEL_INFO(dev)->gen < 4) {
7987 if (crtc->pipe != PIPE_B)
7988 return;
2fa2fe9a
DV
7989 } else {
7990 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7991 return;
7992 }
7993
06922821 7994 pipe_config->gmch_pfit.control = tmp;
2fa2fe9a
DV
7995 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
7996 if (INTEL_INFO(dev)->gen < 5)
7997 pipe_config->gmch_pfit.lvds_border_bits =
7998 I915_READ(LVDS) & LVDS_BORDER_ENABLE;
7999}
8000
acbec814 8001static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5cec258b 8002 struct intel_crtc_state *pipe_config)
acbec814
JB
8003{
8004 struct drm_device *dev = crtc->base.dev;
8005 struct drm_i915_private *dev_priv = dev->dev_private;
8006 int pipe = pipe_config->cpu_transcoder;
8007 intel_clock_t clock;
8008 u32 mdiv;
662c6ecb 8009 int refclk = 100000;
acbec814 8010
f573de5a
SK
8011 /* In case of MIPI DPLL will not even be used */
8012 if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE))
8013 return;
8014
a580516d 8015 mutex_lock(&dev_priv->sb_lock);
ab3c759a 8016 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
a580516d 8017 mutex_unlock(&dev_priv->sb_lock);
acbec814
JB
8018
8019 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8020 clock.m2 = mdiv & DPIO_M2DIV_MASK;
8021 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8022 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8023 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8024
dccbea3b 8025 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
acbec814
JB
8026}
8027
5724dbd1
DL
8028static void
8029i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8030 struct intel_initial_plane_config *plane_config)
1ad292b5
JB
8031{
8032 struct drm_device *dev = crtc->base.dev;
8033 struct drm_i915_private *dev_priv = dev->dev_private;
8034 u32 val, base, offset;
8035 int pipe = crtc->pipe, plane = crtc->plane;
8036 int fourcc, pixel_format;
6761dd31 8037 unsigned int aligned_height;
b113d5ee 8038 struct drm_framebuffer *fb;
1b842c89 8039 struct intel_framebuffer *intel_fb;
1ad292b5 8040
42a7b088
DL
8041 val = I915_READ(DSPCNTR(plane));
8042 if (!(val & DISPLAY_PLANE_ENABLE))
8043 return;
8044
d9806c9f 8045 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
1b842c89 8046 if (!intel_fb) {
1ad292b5
JB
8047 DRM_DEBUG_KMS("failed to alloc fb\n");
8048 return;
8049 }
8050
1b842c89
DL
8051 fb = &intel_fb->base;
8052
18c5247e
DV
8053 if (INTEL_INFO(dev)->gen >= 4) {
8054 if (val & DISPPLANE_TILED) {
49af449b 8055 plane_config->tiling = I915_TILING_X;
18c5247e
DV
8056 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8057 }
8058 }
1ad292b5
JB
8059
8060 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
b35d63fa 8061 fourcc = i9xx_format_to_fourcc(pixel_format);
b113d5ee
DL
8062 fb->pixel_format = fourcc;
8063 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
1ad292b5
JB
8064
8065 if (INTEL_INFO(dev)->gen >= 4) {
49af449b 8066 if (plane_config->tiling)
1ad292b5
JB
8067 offset = I915_READ(DSPTILEOFF(plane));
8068 else
8069 offset = I915_READ(DSPLINOFF(plane));
8070 base = I915_READ(DSPSURF(plane)) & 0xfffff000;
8071 } else {
8072 base = I915_READ(DSPADDR(plane));
8073 }
8074 plane_config->base = base;
8075
8076 val = I915_READ(PIPESRC(pipe));
b113d5ee
DL
8077 fb->width = ((val >> 16) & 0xfff) + 1;
8078 fb->height = ((val >> 0) & 0xfff) + 1;
1ad292b5
JB
8079
8080 val = I915_READ(DSPSTRIDE(pipe));
b113d5ee 8081 fb->pitches[0] = val & 0xffffffc0;
1ad292b5 8082
b113d5ee 8083 aligned_height = intel_fb_align_height(dev, fb->height,
091df6cb
DV
8084 fb->pixel_format,
8085 fb->modifier[0]);
1ad292b5 8086
f37b5c2b 8087 plane_config->size = fb->pitches[0] * aligned_height;
1ad292b5 8088
2844a921
DL
8089 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8090 pipe_name(pipe), plane, fb->width, fb->height,
8091 fb->bits_per_pixel, base, fb->pitches[0],
8092 plane_config->size);
1ad292b5 8093
2d14030b 8094 plane_config->fb = intel_fb;
1ad292b5
JB
8095}
8096
70b23a98 8097static void chv_crtc_clock_get(struct intel_crtc *crtc,
5cec258b 8098 struct intel_crtc_state *pipe_config)
70b23a98
VS
8099{
8100 struct drm_device *dev = crtc->base.dev;
8101 struct drm_i915_private *dev_priv = dev->dev_private;
8102 int pipe = pipe_config->cpu_transcoder;
8103 enum dpio_channel port = vlv_pipe_to_channel(pipe);
8104 intel_clock_t clock;
0d7b6b11 8105 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
70b23a98
VS
8106 int refclk = 100000;
8107
a580516d 8108 mutex_lock(&dev_priv->sb_lock);
70b23a98
VS
8109 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8110 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8111 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8112 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
0d7b6b11 8113 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
a580516d 8114 mutex_unlock(&dev_priv->sb_lock);
70b23a98
VS
8115
8116 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
0d7b6b11
ID
8117 clock.m2 = (pll_dw0 & 0xff) << 22;
8118 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8119 clock.m2 |= pll_dw2 & 0x3fffff;
70b23a98
VS
8120 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8121 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8122 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8123
dccbea3b 8124 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
70b23a98
VS
8125}
8126
0e8ffe1b 8127static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5cec258b 8128 struct intel_crtc_state *pipe_config)
0e8ffe1b
DV
8129{
8130 struct drm_device *dev = crtc->base.dev;
8131 struct drm_i915_private *dev_priv = dev->dev_private;
8132 uint32_t tmp;
8133
f458ebbc
DV
8134 if (!intel_display_power_is_enabled(dev_priv,
8135 POWER_DOMAIN_PIPE(crtc->pipe)))
b5482bd0
ID
8136 return false;
8137
e143a21c 8138 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
c0d43d62 8139 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
eccb140b 8140
0e8ffe1b
DV
8141 tmp = I915_READ(PIPECONF(crtc->pipe));
8142 if (!(tmp & PIPECONF_ENABLE))
8143 return false;
8144
42571aef
VS
8145 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
8146 switch (tmp & PIPECONF_BPC_MASK) {
8147 case PIPECONF_6BPC:
8148 pipe_config->pipe_bpp = 18;
8149 break;
8150 case PIPECONF_8BPC:
8151 pipe_config->pipe_bpp = 24;
8152 break;
8153 case PIPECONF_10BPC:
8154 pipe_config->pipe_bpp = 30;
8155 break;
8156 default:
8157 break;
8158 }
8159 }
8160
b5a9fa09
DV
8161 if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT))
8162 pipe_config->limited_color_range = true;
8163
282740f7
VS
8164 if (INTEL_INFO(dev)->gen < 4)
8165 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8166
1bd1bd80
DV
8167 intel_get_pipe_timings(crtc, pipe_config);
8168
2fa2fe9a
DV
8169 i9xx_get_pfit_config(crtc, pipe_config);
8170
6c49f241
DV
8171 if (INTEL_INFO(dev)->gen >= 4) {
8172 tmp = I915_READ(DPLL_MD(crtc->pipe));
8173 pipe_config->pixel_multiplier =
8174 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8175 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8bcc2795 8176 pipe_config->dpll_hw_state.dpll_md = tmp;
6c49f241
DV
8177 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
8178 tmp = I915_READ(DPLL(crtc->pipe));
8179 pipe_config->pixel_multiplier =
8180 ((tmp & SDVO_MULTIPLIER_MASK)
8181 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8182 } else {
8183 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8184 * port and will be fixed up in the encoder->get_config
8185 * function. */
8186 pipe_config->pixel_multiplier = 1;
8187 }
8bcc2795
DV
8188 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8189 if (!IS_VALLEYVIEW(dev)) {
1c4e0274
VS
8190 /*
8191 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8192 * on 830. Filter it out here so that we don't
8193 * report errors due to that.
8194 */
8195 if (IS_I830(dev))
8196 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8197
8bcc2795
DV
8198 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8199 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
165e901c
VS
8200 } else {
8201 /* Mask out read-only status bits. */
8202 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8203 DPLL_PORTC_READY_MASK |
8204 DPLL_PORTB_READY_MASK);
8bcc2795 8205 }
6c49f241 8206
70b23a98
VS
8207 if (IS_CHERRYVIEW(dev))
8208 chv_crtc_clock_get(crtc, pipe_config);
8209 else if (IS_VALLEYVIEW(dev))
acbec814
JB
8210 vlv_crtc_clock_get(crtc, pipe_config);
8211 else
8212 i9xx_crtc_clock_get(crtc, pipe_config);
18442d08 8213
0f64614d
VS
8214 /*
8215 * Normally the dotclock is filled in by the encoder .get_config()
8216 * but in case the pipe is enabled w/o any ports we need a sane
8217 * default.
8218 */
8219 pipe_config->base.adjusted_mode.crtc_clock =
8220 pipe_config->port_clock / pipe_config->pixel_multiplier;
8221
0e8ffe1b
DV
8222 return true;
8223}
8224
dde86e2d 8225static void ironlake_init_pch_refclk(struct drm_device *dev)
13d83a67
JB
8226{
8227 struct drm_i915_private *dev_priv = dev->dev_private;
13d83a67 8228 struct intel_encoder *encoder;
74cfd7ac 8229 u32 val, final;
13d83a67 8230 bool has_lvds = false;
199e5d79 8231 bool has_cpu_edp = false;
199e5d79 8232 bool has_panel = false;
99eb6a01
KP
8233 bool has_ck505 = false;
8234 bool can_ssc = false;
13d83a67
JB
8235
8236 /* We need to take the global config into account */
b2784e15 8237 for_each_intel_encoder(dev, encoder) {
199e5d79
KP
8238 switch (encoder->type) {
8239 case INTEL_OUTPUT_LVDS:
8240 has_panel = true;
8241 has_lvds = true;
8242 break;
8243 case INTEL_OUTPUT_EDP:
8244 has_panel = true;
2de6905f 8245 if (enc_to_dig_port(&encoder->base)->port == PORT_A)
199e5d79
KP
8246 has_cpu_edp = true;
8247 break;
6847d71b
PZ
8248 default:
8249 break;
13d83a67
JB
8250 }
8251 }
8252
99eb6a01 8253 if (HAS_PCH_IBX(dev)) {
41aa3448 8254 has_ck505 = dev_priv->vbt.display_clock_mode;
99eb6a01
KP
8255 can_ssc = has_ck505;
8256 } else {
8257 has_ck505 = false;
8258 can_ssc = true;
8259 }
8260
2de6905f
ID
8261 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
8262 has_panel, has_lvds, has_ck505);
13d83a67
JB
8263
8264 /* Ironlake: try to setup display ref clock before DPLL
8265 * enabling. This is only under driver's control after
8266 * PCH B stepping, previous chipset stepping should be
8267 * ignoring this setting.
8268 */
74cfd7ac
CW
8269 val = I915_READ(PCH_DREF_CONTROL);
8270
8271 /* As we must carefully and slowly disable/enable each source in turn,
8272 * compute the final state we want first and check if we need to
8273 * make any changes at all.
8274 */
8275 final = val;
8276 final &= ~DREF_NONSPREAD_SOURCE_MASK;
8277 if (has_ck505)
8278 final |= DREF_NONSPREAD_CK505_ENABLE;
8279 else
8280 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8281
8282 final &= ~DREF_SSC_SOURCE_MASK;
8283 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8284 final &= ~DREF_SSC1_ENABLE;
8285
8286 if (has_panel) {
8287 final |= DREF_SSC_SOURCE_ENABLE;
8288
8289 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8290 final |= DREF_SSC1_ENABLE;
8291
8292 if (has_cpu_edp) {
8293 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8294 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8295 else
8296 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8297 } else
8298 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8299 } else {
8300 final |= DREF_SSC_SOURCE_DISABLE;
8301 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8302 }
8303
8304 if (final == val)
8305 return;
8306
13d83a67 8307 /* Always enable nonspread source */
74cfd7ac 8308 val &= ~DREF_NONSPREAD_SOURCE_MASK;
13d83a67 8309
99eb6a01 8310 if (has_ck505)
74cfd7ac 8311 val |= DREF_NONSPREAD_CK505_ENABLE;
99eb6a01 8312 else
74cfd7ac 8313 val |= DREF_NONSPREAD_SOURCE_ENABLE;
13d83a67 8314
199e5d79 8315 if (has_panel) {
74cfd7ac
CW
8316 val &= ~DREF_SSC_SOURCE_MASK;
8317 val |= DREF_SSC_SOURCE_ENABLE;
13d83a67 8318
199e5d79 8319 /* SSC must be turned on before enabling the CPU output */
99eb6a01 8320 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
199e5d79 8321 DRM_DEBUG_KMS("Using SSC on panel\n");
74cfd7ac 8322 val |= DREF_SSC1_ENABLE;
e77166b5 8323 } else
74cfd7ac 8324 val &= ~DREF_SSC1_ENABLE;
199e5d79
KP
8325
8326 /* Get SSC going before enabling the outputs */
74cfd7ac 8327 I915_WRITE(PCH_DREF_CONTROL, val);
199e5d79
KP
8328 POSTING_READ(PCH_DREF_CONTROL);
8329 udelay(200);
8330
74cfd7ac 8331 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
13d83a67
JB
8332
8333 /* Enable CPU source on CPU attached eDP */
199e5d79 8334 if (has_cpu_edp) {
99eb6a01 8335 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
199e5d79 8336 DRM_DEBUG_KMS("Using SSC on eDP\n");
74cfd7ac 8337 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
eba905b2 8338 } else
74cfd7ac 8339 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
199e5d79 8340 } else
74cfd7ac 8341 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
199e5d79 8342
74cfd7ac 8343 I915_WRITE(PCH_DREF_CONTROL, val);
199e5d79
KP
8344 POSTING_READ(PCH_DREF_CONTROL);
8345 udelay(200);
8346 } else {
8347 DRM_DEBUG_KMS("Disabling SSC entirely\n");
8348
74cfd7ac 8349 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
199e5d79
KP
8350
8351 /* Turn off CPU output */
74cfd7ac 8352 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
199e5d79 8353
74cfd7ac 8354 I915_WRITE(PCH_DREF_CONTROL, val);
199e5d79
KP
8355 POSTING_READ(PCH_DREF_CONTROL);
8356 udelay(200);
8357
8358 /* Turn off the SSC source */
74cfd7ac
CW
8359 val &= ~DREF_SSC_SOURCE_MASK;
8360 val |= DREF_SSC_SOURCE_DISABLE;
199e5d79
KP
8361
8362 /* Turn off SSC1 */
74cfd7ac 8363 val &= ~DREF_SSC1_ENABLE;
199e5d79 8364
74cfd7ac 8365 I915_WRITE(PCH_DREF_CONTROL, val);
13d83a67
JB
8366 POSTING_READ(PCH_DREF_CONTROL);
8367 udelay(200);
8368 }
74cfd7ac
CW
8369
8370 BUG_ON(val != final);
13d83a67
JB
8371}
8372
f31f2d55 8373static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
dde86e2d 8374{
f31f2d55 8375 uint32_t tmp;
dde86e2d 8376
0ff066a9
PZ
8377 tmp = I915_READ(SOUTH_CHICKEN2);
8378 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8379 I915_WRITE(SOUTH_CHICKEN2, tmp);
dde86e2d 8380
0ff066a9
PZ
8381 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
8382 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8383 DRM_ERROR("FDI mPHY reset assert timeout\n");
dde86e2d 8384
0ff066a9
PZ
8385 tmp = I915_READ(SOUTH_CHICKEN2);
8386 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8387 I915_WRITE(SOUTH_CHICKEN2, tmp);
dde86e2d 8388
0ff066a9
PZ
8389 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
8390 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8391 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
f31f2d55
PZ
8392}
8393
8394/* WaMPhyProgramming:hsw */
8395static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8396{
8397 uint32_t tmp;
dde86e2d
PZ
8398
8399 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8400 tmp &= ~(0xFF << 24);
8401 tmp |= (0x12 << 24);
8402 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8403
dde86e2d
PZ
8404 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8405 tmp |= (1 << 11);
8406 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8407
8408 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8409 tmp |= (1 << 11);
8410 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8411
dde86e2d
PZ
8412 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8413 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8414 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8415
8416 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8417 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8418 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8419
0ff066a9
PZ
8420 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8421 tmp &= ~(7 << 13);
8422 tmp |= (5 << 13);
8423 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
dde86e2d 8424
0ff066a9
PZ
8425 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8426 tmp &= ~(7 << 13);
8427 tmp |= (5 << 13);
8428 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
dde86e2d
PZ
8429
8430 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8431 tmp &= ~0xFF;
8432 tmp |= 0x1C;
8433 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8434
8435 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8436 tmp &= ~0xFF;
8437 tmp |= 0x1C;
8438 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8439
8440 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8441 tmp &= ~(0xFF << 16);
8442 tmp |= (0x1C << 16);
8443 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8444
8445 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8446 tmp &= ~(0xFF << 16);
8447 tmp |= (0x1C << 16);
8448 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8449
0ff066a9
PZ
8450 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8451 tmp |= (1 << 27);
8452 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
dde86e2d 8453
0ff066a9
PZ
8454 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8455 tmp |= (1 << 27);
8456 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
dde86e2d 8457
0ff066a9
PZ
8458 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8459 tmp &= ~(0xF << 28);
8460 tmp |= (4 << 28);
8461 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
dde86e2d 8462
0ff066a9
PZ
8463 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8464 tmp &= ~(0xF << 28);
8465 tmp |= (4 << 28);
8466 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
f31f2d55
PZ
8467}
8468
2fa86a1f
PZ
8469/* Implements 3 different sequences from BSpec chapter "Display iCLK
8470 * Programming" based on the parameters passed:
8471 * - Sequence to enable CLKOUT_DP
8472 * - Sequence to enable CLKOUT_DP without spread
8473 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8474 */
8475static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8476 bool with_fdi)
f31f2d55
PZ
8477{
8478 struct drm_i915_private *dev_priv = dev->dev_private;
2fa86a1f
PZ
8479 uint32_t reg, tmp;
8480
8481 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8482 with_spread = true;
c2699524 8483 if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
2fa86a1f 8484 with_fdi = false;
f31f2d55 8485
a580516d 8486 mutex_lock(&dev_priv->sb_lock);
f31f2d55
PZ
8487
8488 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8489 tmp &= ~SBI_SSCCTL_DISABLE;
8490 tmp |= SBI_SSCCTL_PATHALT;
8491 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8492
8493 udelay(24);
8494
2fa86a1f
PZ
8495 if (with_spread) {
8496 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8497 tmp &= ~SBI_SSCCTL_PATHALT;
8498 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
f31f2d55 8499
2fa86a1f
PZ
8500 if (with_fdi) {
8501 lpt_reset_fdi_mphy(dev_priv);
8502 lpt_program_fdi_mphy(dev_priv);
8503 }
8504 }
dde86e2d 8505
c2699524 8506 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
2fa86a1f
PZ
8507 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8508 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8509 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
c00db246 8510
a580516d 8511 mutex_unlock(&dev_priv->sb_lock);
dde86e2d
PZ
8512}
8513
47701c3b
PZ
8514/* Sequence to disable CLKOUT_DP */
8515static void lpt_disable_clkout_dp(struct drm_device *dev)
8516{
8517 struct drm_i915_private *dev_priv = dev->dev_private;
8518 uint32_t reg, tmp;
8519
a580516d 8520 mutex_lock(&dev_priv->sb_lock);
47701c3b 8521
c2699524 8522 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
47701c3b
PZ
8523 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8524 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8525 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8526
8527 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8528 if (!(tmp & SBI_SSCCTL_DISABLE)) {
8529 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8530 tmp |= SBI_SSCCTL_PATHALT;
8531 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8532 udelay(32);
8533 }
8534 tmp |= SBI_SSCCTL_DISABLE;
8535 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8536 }
8537
a580516d 8538 mutex_unlock(&dev_priv->sb_lock);
47701c3b
PZ
8539}
8540
bf8fa3d3
PZ
8541static void lpt_init_pch_refclk(struct drm_device *dev)
8542{
bf8fa3d3
PZ
8543 struct intel_encoder *encoder;
8544 bool has_vga = false;
8545
b2784e15 8546 for_each_intel_encoder(dev, encoder) {
bf8fa3d3
PZ
8547 switch (encoder->type) {
8548 case INTEL_OUTPUT_ANALOG:
8549 has_vga = true;
8550 break;
6847d71b
PZ
8551 default:
8552 break;
bf8fa3d3
PZ
8553 }
8554 }
8555
47701c3b
PZ
8556 if (has_vga)
8557 lpt_enable_clkout_dp(dev, true, true);
8558 else
8559 lpt_disable_clkout_dp(dev);
bf8fa3d3
PZ
8560}
8561
dde86e2d
PZ
8562/*
8563 * Initialize reference clocks when the driver loads
8564 */
8565void intel_init_pch_refclk(struct drm_device *dev)
8566{
8567 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
8568 ironlake_init_pch_refclk(dev);
8569 else if (HAS_PCH_LPT(dev))
8570 lpt_init_pch_refclk(dev);
8571}
8572
55bb9992 8573static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
d9d444cb 8574{
55bb9992 8575 struct drm_device *dev = crtc_state->base.crtc->dev;
d9d444cb 8576 struct drm_i915_private *dev_priv = dev->dev_private;
55bb9992 8577 struct drm_atomic_state *state = crtc_state->base.state;
da3ced29 8578 struct drm_connector *connector;
55bb9992 8579 struct drm_connector_state *connector_state;
d9d444cb 8580 struct intel_encoder *encoder;
55bb9992 8581 int num_connectors = 0, i;
d9d444cb
JB
8582 bool is_lvds = false;
8583
da3ced29 8584 for_each_connector_in_state(state, connector, connector_state, i) {
55bb9992
ACO
8585 if (connector_state->crtc != crtc_state->base.crtc)
8586 continue;
8587
8588 encoder = to_intel_encoder(connector_state->best_encoder);
8589
d9d444cb
JB
8590 switch (encoder->type) {
8591 case INTEL_OUTPUT_LVDS:
8592 is_lvds = true;
8593 break;
6847d71b
PZ
8594 default:
8595 break;
d9d444cb
JB
8596 }
8597 num_connectors++;
8598 }
8599
8600 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
e91e941b 8601 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
41aa3448 8602 dev_priv->vbt.lvds_ssc_freq);
e91e941b 8603 return dev_priv->vbt.lvds_ssc_freq;
d9d444cb
JB
8604 }
8605
8606 return 120000;
8607}
8608
6ff93609 8609static void ironlake_set_pipeconf(struct drm_crtc *crtc)
79e53945 8610{
c8203565 8611 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
79e53945
JB
8612 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8613 int pipe = intel_crtc->pipe;
c8203565
PZ
8614 uint32_t val;
8615
78114071 8616 val = 0;
c8203565 8617
6e3c9717 8618 switch (intel_crtc->config->pipe_bpp) {
c8203565 8619 case 18:
dfd07d72 8620 val |= PIPECONF_6BPC;
c8203565
PZ
8621 break;
8622 case 24:
dfd07d72 8623 val |= PIPECONF_8BPC;
c8203565
PZ
8624 break;
8625 case 30:
dfd07d72 8626 val |= PIPECONF_10BPC;
c8203565
PZ
8627 break;
8628 case 36:
dfd07d72 8629 val |= PIPECONF_12BPC;
c8203565
PZ
8630 break;
8631 default:
cc769b62
PZ
8632 /* Case prevented by intel_choose_pipe_bpp_dither. */
8633 BUG();
c8203565
PZ
8634 }
8635
6e3c9717 8636 if (intel_crtc->config->dither)
c8203565
PZ
8637 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8638
6e3c9717 8639 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
c8203565
PZ
8640 val |= PIPECONF_INTERLACED_ILK;
8641 else
8642 val |= PIPECONF_PROGRESSIVE;
8643
6e3c9717 8644 if (intel_crtc->config->limited_color_range)
3685a8f3 8645 val |= PIPECONF_COLOR_RANGE_SELECT;
3685a8f3 8646
c8203565
PZ
8647 I915_WRITE(PIPECONF(pipe), val);
8648 POSTING_READ(PIPECONF(pipe));
8649}
8650
86d3efce
VS
8651/*
8652 * Set up the pipe CSC unit.
8653 *
8654 * Currently only full range RGB to limited range RGB conversion
8655 * is supported, but eventually this should handle various
8656 * RGB<->YCbCr scenarios as well.
8657 */
50f3b016 8658static void intel_set_pipe_csc(struct drm_crtc *crtc)
86d3efce
VS
8659{
8660 struct drm_device *dev = crtc->dev;
8661 struct drm_i915_private *dev_priv = dev->dev_private;
8662 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8663 int pipe = intel_crtc->pipe;
8664 uint16_t coeff = 0x7800; /* 1.0 */
8665
8666 /*
8667 * TODO: Check what kind of values actually come out of the pipe
8668 * with these coeff/postoff values and adjust to get the best
8669 * accuracy. Perhaps we even need to take the bpc value into
8670 * consideration.
8671 */
8672
6e3c9717 8673 if (intel_crtc->config->limited_color_range)
86d3efce
VS
8674 coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
8675
8676 /*
8677 * GY/GU and RY/RU should be the other way around according
8678 * to BSpec, but reality doesn't agree. Just set them up in
8679 * a way that results in the correct picture.
8680 */
8681 I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
8682 I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
8683
8684 I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
8685 I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
8686
8687 I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
8688 I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
8689
8690 I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
8691 I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
8692 I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
8693
8694 if (INTEL_INFO(dev)->gen > 6) {
8695 uint16_t postoff = 0;
8696
6e3c9717 8697 if (intel_crtc->config->limited_color_range)
32cf0cb0 8698 postoff = (16 * (1 << 12) / 255) & 0x1fff;
86d3efce
VS
8699
8700 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
8701 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
8702 I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
8703
8704 I915_WRITE(PIPE_CSC_MODE(pipe), 0);
8705 } else {
8706 uint32_t mode = CSC_MODE_YUV_TO_RGB;
8707
6e3c9717 8708 if (intel_crtc->config->limited_color_range)
86d3efce
VS
8709 mode |= CSC_BLACK_SCREEN_OFFSET;
8710
8711 I915_WRITE(PIPE_CSC_MODE(pipe), mode);
8712 }
8713}
8714
6ff93609 8715static void haswell_set_pipeconf(struct drm_crtc *crtc)
ee2b0b38 8716{
756f85cf
PZ
8717 struct drm_device *dev = crtc->dev;
8718 struct drm_i915_private *dev_priv = dev->dev_private;
ee2b0b38 8719 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
756f85cf 8720 enum pipe pipe = intel_crtc->pipe;
6e3c9717 8721 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
ee2b0b38
PZ
8722 uint32_t val;
8723
3eff4faa 8724 val = 0;
ee2b0b38 8725
6e3c9717 8726 if (IS_HASWELL(dev) && intel_crtc->config->dither)
ee2b0b38
PZ
8727 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8728
6e3c9717 8729 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
ee2b0b38
PZ
8730 val |= PIPECONF_INTERLACED_ILK;
8731 else
8732 val |= PIPECONF_PROGRESSIVE;
8733
702e7a56
PZ
8734 I915_WRITE(PIPECONF(cpu_transcoder), val);
8735 POSTING_READ(PIPECONF(cpu_transcoder));
3eff4faa
DV
8736
8737 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
8738 POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
756f85cf 8739
3cdf122c 8740 if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
756f85cf
PZ
8741 val = 0;
8742
6e3c9717 8743 switch (intel_crtc->config->pipe_bpp) {
756f85cf
PZ
8744 case 18:
8745 val |= PIPEMISC_DITHER_6_BPC;
8746 break;
8747 case 24:
8748 val |= PIPEMISC_DITHER_8_BPC;
8749 break;
8750 case 30:
8751 val |= PIPEMISC_DITHER_10_BPC;
8752 break;
8753 case 36:
8754 val |= PIPEMISC_DITHER_12_BPC;
8755 break;
8756 default:
8757 /* Case prevented by pipe_config_set_bpp. */
8758 BUG();
8759 }
8760
6e3c9717 8761 if (intel_crtc->config->dither)
756f85cf
PZ
8762 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8763
8764 I915_WRITE(PIPEMISC(pipe), val);
8765 }
ee2b0b38
PZ
8766}
8767
6591c6e4 8768static bool ironlake_compute_clocks(struct drm_crtc *crtc,
190f68c5 8769 struct intel_crtc_state *crtc_state,
6591c6e4
PZ
8770 intel_clock_t *clock,
8771 bool *has_reduced_clock,
8772 intel_clock_t *reduced_clock)
8773{
8774 struct drm_device *dev = crtc->dev;
8775 struct drm_i915_private *dev_priv = dev->dev_private;
6591c6e4 8776 int refclk;
d4906093 8777 const intel_limit_t *limit;
c329a4ec 8778 bool ret;
79e53945 8779
55bb9992 8780 refclk = ironlake_get_refclk(crtc_state);
79e53945 8781
d4906093
ML
8782 /*
8783 * Returns a set of divisors for the desired target clock with the given
8784 * refclk, or FALSE. The returned values represent the clock equation:
8785 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
8786 */
a93e255f
ACO
8787 limit = intel_limit(crtc_state, refclk);
8788 ret = dev_priv->display.find_dpll(limit, crtc_state,
190f68c5 8789 crtc_state->port_clock,
ee9300bb 8790 refclk, NULL, clock);
6591c6e4
PZ
8791 if (!ret)
8792 return false;
cda4b7d3 8793
6591c6e4
PZ
8794 return true;
8795}
8796
d4b1931c
PZ
8797int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8798{
8799 /*
8800 * Account for spread spectrum to avoid
8801 * oversubscribing the link. Max center spread
8802 * is 2.5%; use 5% for safety's sake.
8803 */
8804 u32 bps = target_clock * bpp * 21 / 20;
619d4d04 8805 return DIV_ROUND_UP(bps, link_bw * 8);
d4b1931c
PZ
8806}
8807
7429e9d4 8808static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
6cf86a5e 8809{
7429e9d4 8810 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
f48d8f23
PZ
8811}
8812
de13a2e3 8813static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
190f68c5 8814 struct intel_crtc_state *crtc_state,
7429e9d4 8815 u32 *fp,
9a7c7890 8816 intel_clock_t *reduced_clock, u32 *fp2)
79e53945 8817{
de13a2e3 8818 struct drm_crtc *crtc = &intel_crtc->base;
79e53945
JB
8819 struct drm_device *dev = crtc->dev;
8820 struct drm_i915_private *dev_priv = dev->dev_private;
55bb9992 8821 struct drm_atomic_state *state = crtc_state->base.state;
da3ced29 8822 struct drm_connector *connector;
55bb9992
ACO
8823 struct drm_connector_state *connector_state;
8824 struct intel_encoder *encoder;
de13a2e3 8825 uint32_t dpll;
55bb9992 8826 int factor, num_connectors = 0, i;
09ede541 8827 bool is_lvds = false, is_sdvo = false;
79e53945 8828
da3ced29 8829 for_each_connector_in_state(state, connector, connector_state, i) {
55bb9992
ACO
8830 if (connector_state->crtc != crtc_state->base.crtc)
8831 continue;
8832
8833 encoder = to_intel_encoder(connector_state->best_encoder);
8834
8835 switch (encoder->type) {
79e53945
JB
8836 case INTEL_OUTPUT_LVDS:
8837 is_lvds = true;
8838 break;
8839 case INTEL_OUTPUT_SDVO:
7d57382e 8840 case INTEL_OUTPUT_HDMI:
79e53945 8841 is_sdvo = true;
79e53945 8842 break;
6847d71b
PZ
8843 default:
8844 break;
79e53945 8845 }
43565a06 8846
c751ce4f 8847 num_connectors++;
79e53945 8848 }
79e53945 8849
c1858123 8850 /* Enable autotuning of the PLL clock (if permissible) */
8febb297
EA
8851 factor = 21;
8852 if (is_lvds) {
8853 if ((intel_panel_use_ssc(dev_priv) &&
e91e941b 8854 dev_priv->vbt.lvds_ssc_freq == 100000) ||
f0b44056 8855 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
8febb297 8856 factor = 25;
190f68c5 8857 } else if (crtc_state->sdvo_tv_clock)
8febb297 8858 factor = 20;
c1858123 8859
190f68c5 8860 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
7d0ac5b7 8861 *fp |= FP_CB_TUNE;
2c07245f 8862
9a7c7890
DV
8863 if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
8864 *fp2 |= FP_CB_TUNE;
8865
5eddb70b 8866 dpll = 0;
2c07245f 8867
a07d6787
EA
8868 if (is_lvds)
8869 dpll |= DPLLB_MODE_LVDS;
8870 else
8871 dpll |= DPLLB_MODE_DAC_SERIAL;
198a037f 8872
190f68c5 8873 dpll |= (crtc_state->pixel_multiplier - 1)
ef1b460d 8874 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
198a037f
DV
8875
8876 if (is_sdvo)
4a33e48d 8877 dpll |= DPLL_SDVO_HIGH_SPEED;
190f68c5 8878 if (crtc_state->has_dp_encoder)
4a33e48d 8879 dpll |= DPLL_SDVO_HIGH_SPEED;
79e53945 8880
a07d6787 8881 /* compute bitmask from p1 value */
190f68c5 8882 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
a07d6787 8883 /* also FPA1 */
190f68c5 8884 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
a07d6787 8885
190f68c5 8886 switch (crtc_state->dpll.p2) {
a07d6787
EA
8887 case 5:
8888 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8889 break;
8890 case 7:
8891 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8892 break;
8893 case 10:
8894 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8895 break;
8896 case 14:
8897 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8898 break;
79e53945
JB
8899 }
8900
b4c09f3b 8901 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
43565a06 8902 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
79e53945
JB
8903 else
8904 dpll |= PLL_REF_INPUT_DREFCLK;
8905
959e16d6 8906 return dpll | DPLL_VCO_ENABLE;
de13a2e3
PZ
8907}
8908
190f68c5
ACO
8909static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8910 struct intel_crtc_state *crtc_state)
de13a2e3 8911{
c7653199 8912 struct drm_device *dev = crtc->base.dev;
de13a2e3 8913 intel_clock_t clock, reduced_clock;
cbbab5bd 8914 u32 dpll = 0, fp = 0, fp2 = 0;
e2f12b07 8915 bool ok, has_reduced_clock = false;
8b47047b 8916 bool is_lvds = false;
e2b78267 8917 struct intel_shared_dpll *pll;
de13a2e3 8918
dd3cd74a
ACO
8919 memset(&crtc_state->dpll_hw_state, 0,
8920 sizeof(crtc_state->dpll_hw_state));
8921
409ee761 8922 is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
79e53945 8923
5dc5298b
PZ
8924 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
8925 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
a07d6787 8926
190f68c5 8927 ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock,
de13a2e3 8928 &has_reduced_clock, &reduced_clock);
190f68c5 8929 if (!ok && !crtc_state->clock_set) {
de13a2e3
PZ
8930 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8931 return -EINVAL;
79e53945 8932 }
f47709a9 8933 /* Compat-code for transition, will disappear. */
190f68c5
ACO
8934 if (!crtc_state->clock_set) {
8935 crtc_state->dpll.n = clock.n;
8936 crtc_state->dpll.m1 = clock.m1;
8937 crtc_state->dpll.m2 = clock.m2;
8938 crtc_state->dpll.p1 = clock.p1;
8939 crtc_state->dpll.p2 = clock.p2;
f47709a9 8940 }
79e53945 8941
5dc5298b 8942 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
190f68c5
ACO
8943 if (crtc_state->has_pch_encoder) {
8944 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
cbbab5bd 8945 if (has_reduced_clock)
7429e9d4 8946 fp2 = i9xx_dpll_compute_fp(&reduced_clock);
cbbab5bd 8947
190f68c5 8948 dpll = ironlake_compute_dpll(crtc, crtc_state,
cbbab5bd
DV
8949 &fp, &reduced_clock,
8950 has_reduced_clock ? &fp2 : NULL);
8951
190f68c5
ACO
8952 crtc_state->dpll_hw_state.dpll = dpll;
8953 crtc_state->dpll_hw_state.fp0 = fp;
66e985c0 8954 if (has_reduced_clock)
190f68c5 8955 crtc_state->dpll_hw_state.fp1 = fp2;
66e985c0 8956 else
190f68c5 8957 crtc_state->dpll_hw_state.fp1 = fp;
66e985c0 8958
190f68c5 8959 pll = intel_get_shared_dpll(crtc, crtc_state);
ee7b9f93 8960 if (pll == NULL) {
84f44ce7 8961 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
c7653199 8962 pipe_name(crtc->pipe));
4b645f14
JB
8963 return -EINVAL;
8964 }
3fb37703 8965 }
79e53945 8966
ab585dea 8967 if (is_lvds && has_reduced_clock)
c7653199 8968 crtc->lowfreq_avail = true;
bcd644e0 8969 else
c7653199 8970 crtc->lowfreq_avail = false;
e2b78267 8971
c8f7a0db 8972 return 0;
79e53945
JB
8973}
8974
eb14cb74
VS
8975static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
8976 struct intel_link_m_n *m_n)
8977{
8978 struct drm_device *dev = crtc->base.dev;
8979 struct drm_i915_private *dev_priv = dev->dev_private;
8980 enum pipe pipe = crtc->pipe;
8981
8982 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
8983 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
8984 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
8985 & ~TU_SIZE_MASK;
8986 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
8987 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
8988 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8989}
8990
8991static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
8992 enum transcoder transcoder,
b95af8be
VK
8993 struct intel_link_m_n *m_n,
8994 struct intel_link_m_n *m2_n2)
72419203
DV
8995{
8996 struct drm_device *dev = crtc->base.dev;
8997 struct drm_i915_private *dev_priv = dev->dev_private;
eb14cb74 8998 enum pipe pipe = crtc->pipe;
72419203 8999
eb14cb74
VS
9000 if (INTEL_INFO(dev)->gen >= 5) {
9001 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9002 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9003 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9004 & ~TU_SIZE_MASK;
9005 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9006 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9007 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
b95af8be
VK
9008 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
9009 * gen < 8) and if DRRS is supported (to make sure the
9010 * registers are not unnecessarily read).
9011 */
9012 if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
6e3c9717 9013 crtc->config->has_drrs) {
b95af8be
VK
9014 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9015 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9016 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9017 & ~TU_SIZE_MASK;
9018 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9019 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9020 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9021 }
eb14cb74
VS
9022 } else {
9023 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9024 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9025 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9026 & ~TU_SIZE_MASK;
9027 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9028 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9029 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9030 }
9031}
9032
9033void intel_dp_get_m_n(struct intel_crtc *crtc,
5cec258b 9034 struct intel_crtc_state *pipe_config)
eb14cb74 9035{
681a8504 9036 if (pipe_config->has_pch_encoder)
eb14cb74
VS
9037 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9038 else
9039 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
b95af8be
VK
9040 &pipe_config->dp_m_n,
9041 &pipe_config->dp_m2_n2);
eb14cb74 9042}
72419203 9043
eb14cb74 9044static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
5cec258b 9045 struct intel_crtc_state *pipe_config)
eb14cb74
VS
9046{
9047 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
b95af8be 9048 &pipe_config->fdi_m_n, NULL);
72419203
DV
9049}
9050
bd2e244f 9051static void skylake_get_pfit_config(struct intel_crtc *crtc,
5cec258b 9052 struct intel_crtc_state *pipe_config)
bd2e244f
JB
9053{
9054 struct drm_device *dev = crtc->base.dev;
9055 struct drm_i915_private *dev_priv = dev->dev_private;
a1b2278e
CK
9056 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9057 uint32_t ps_ctrl = 0;
9058 int id = -1;
9059 int i;
bd2e244f 9060
a1b2278e
CK
9061 /* find scaler attached to this pipe */
9062 for (i = 0; i < crtc->num_scalers; i++) {
9063 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9064 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9065 id = i;
9066 pipe_config->pch_pfit.enabled = true;
9067 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9068 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9069 break;
9070 }
9071 }
bd2e244f 9072
a1b2278e
CK
9073 scaler_state->scaler_id = id;
9074 if (id >= 0) {
9075 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9076 } else {
9077 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
bd2e244f
JB
9078 }
9079}
9080
5724dbd1
DL
9081static void
9082skylake_get_initial_plane_config(struct intel_crtc *crtc,
9083 struct intel_initial_plane_config *plane_config)
bc8d7dff
DL
9084{
9085 struct drm_device *dev = crtc->base.dev;
9086 struct drm_i915_private *dev_priv = dev->dev_private;
40f46283 9087 u32 val, base, offset, stride_mult, tiling;
bc8d7dff
DL
9088 int pipe = crtc->pipe;
9089 int fourcc, pixel_format;
6761dd31 9090 unsigned int aligned_height;
bc8d7dff 9091 struct drm_framebuffer *fb;
1b842c89 9092 struct intel_framebuffer *intel_fb;
bc8d7dff 9093
d9806c9f 9094 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
1b842c89 9095 if (!intel_fb) {
bc8d7dff
DL
9096 DRM_DEBUG_KMS("failed to alloc fb\n");
9097 return;
9098 }
9099
1b842c89
DL
9100 fb = &intel_fb->base;
9101
bc8d7dff 9102 val = I915_READ(PLANE_CTL(pipe, 0));
42a7b088
DL
9103 if (!(val & PLANE_CTL_ENABLE))
9104 goto error;
9105
bc8d7dff
DL
9106 pixel_format = val & PLANE_CTL_FORMAT_MASK;
9107 fourcc = skl_format_to_fourcc(pixel_format,
9108 val & PLANE_CTL_ORDER_RGBX,
9109 val & PLANE_CTL_ALPHA_MASK);
9110 fb->pixel_format = fourcc;
9111 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9112
40f46283
DL
9113 tiling = val & PLANE_CTL_TILED_MASK;
9114 switch (tiling) {
9115 case PLANE_CTL_TILED_LINEAR:
9116 fb->modifier[0] = DRM_FORMAT_MOD_NONE;
9117 break;
9118 case PLANE_CTL_TILED_X:
9119 plane_config->tiling = I915_TILING_X;
9120 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9121 break;
9122 case PLANE_CTL_TILED_Y:
9123 fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
9124 break;
9125 case PLANE_CTL_TILED_YF:
9126 fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
9127 break;
9128 default:
9129 MISSING_CASE(tiling);
9130 goto error;
9131 }
9132
bc8d7dff
DL
9133 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
9134 plane_config->base = base;
9135
9136 offset = I915_READ(PLANE_OFFSET(pipe, 0));
9137
9138 val = I915_READ(PLANE_SIZE(pipe, 0));
9139 fb->height = ((val >> 16) & 0xfff) + 1;
9140 fb->width = ((val >> 0) & 0x1fff) + 1;
9141
9142 val = I915_READ(PLANE_STRIDE(pipe, 0));
40f46283
DL
9143 stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
9144 fb->pixel_format);
bc8d7dff
DL
9145 fb->pitches[0] = (val & 0x3ff) * stride_mult;
9146
9147 aligned_height = intel_fb_align_height(dev, fb->height,
091df6cb
DV
9148 fb->pixel_format,
9149 fb->modifier[0]);
bc8d7dff 9150
f37b5c2b 9151 plane_config->size = fb->pitches[0] * aligned_height;
bc8d7dff
DL
9152
9153 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9154 pipe_name(pipe), fb->width, fb->height,
9155 fb->bits_per_pixel, base, fb->pitches[0],
9156 plane_config->size);
9157
2d14030b 9158 plane_config->fb = intel_fb;
bc8d7dff
DL
9159 return;
9160
9161error:
9162 kfree(fb);
9163}
9164
2fa2fe9a 9165static void ironlake_get_pfit_config(struct intel_crtc *crtc,
5cec258b 9166 struct intel_crtc_state *pipe_config)
2fa2fe9a
DV
9167{
9168 struct drm_device *dev = crtc->base.dev;
9169 struct drm_i915_private *dev_priv = dev->dev_private;
9170 uint32_t tmp;
9171
9172 tmp = I915_READ(PF_CTL(crtc->pipe));
9173
9174 if (tmp & PF_ENABLE) {
fd4daa9c 9175 pipe_config->pch_pfit.enabled = true;
2fa2fe9a
DV
9176 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9177 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
cb8b2a30
DV
9178
9179 /* We currently do not free assignements of panel fitters on
9180 * ivb/hsw (since we don't use the higher upscaling modes which
9181 * differentiates them) so just WARN about this case for now. */
9182 if (IS_GEN7(dev)) {
9183 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9184 PF_PIPE_SEL_IVB(crtc->pipe));
9185 }
2fa2fe9a 9186 }
79e53945
JB
9187}
9188
5724dbd1
DL
9189static void
9190ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9191 struct intel_initial_plane_config *plane_config)
4c6baa59
JB
9192{
9193 struct drm_device *dev = crtc->base.dev;
9194 struct drm_i915_private *dev_priv = dev->dev_private;
9195 u32 val, base, offset;
aeee5a49 9196 int pipe = crtc->pipe;
4c6baa59 9197 int fourcc, pixel_format;
6761dd31 9198 unsigned int aligned_height;
b113d5ee 9199 struct drm_framebuffer *fb;
1b842c89 9200 struct intel_framebuffer *intel_fb;
4c6baa59 9201
42a7b088
DL
9202 val = I915_READ(DSPCNTR(pipe));
9203 if (!(val & DISPLAY_PLANE_ENABLE))
9204 return;
9205
d9806c9f 9206 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
1b842c89 9207 if (!intel_fb) {
4c6baa59
JB
9208 DRM_DEBUG_KMS("failed to alloc fb\n");
9209 return;
9210 }
9211
1b842c89
DL
9212 fb = &intel_fb->base;
9213
18c5247e
DV
9214 if (INTEL_INFO(dev)->gen >= 4) {
9215 if (val & DISPPLANE_TILED) {
49af449b 9216 plane_config->tiling = I915_TILING_X;
18c5247e
DV
9217 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9218 }
9219 }
4c6baa59
JB
9220
9221 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
b35d63fa 9222 fourcc = i9xx_format_to_fourcc(pixel_format);
b113d5ee
DL
9223 fb->pixel_format = fourcc;
9224 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
4c6baa59 9225
aeee5a49 9226 base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
4c6baa59 9227 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
aeee5a49 9228 offset = I915_READ(DSPOFFSET(pipe));
4c6baa59 9229 } else {
49af449b 9230 if (plane_config->tiling)
aeee5a49 9231 offset = I915_READ(DSPTILEOFF(pipe));
4c6baa59 9232 else
aeee5a49 9233 offset = I915_READ(DSPLINOFF(pipe));
4c6baa59
JB
9234 }
9235 plane_config->base = base;
9236
9237 val = I915_READ(PIPESRC(pipe));
b113d5ee
DL
9238 fb->width = ((val >> 16) & 0xfff) + 1;
9239 fb->height = ((val >> 0) & 0xfff) + 1;
4c6baa59
JB
9240
9241 val = I915_READ(DSPSTRIDE(pipe));
b113d5ee 9242 fb->pitches[0] = val & 0xffffffc0;
4c6baa59 9243
b113d5ee 9244 aligned_height = intel_fb_align_height(dev, fb->height,
091df6cb
DV
9245 fb->pixel_format,
9246 fb->modifier[0]);
4c6baa59 9247
f37b5c2b 9248 plane_config->size = fb->pitches[0] * aligned_height;
4c6baa59 9249
2844a921
DL
9250 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9251 pipe_name(pipe), fb->width, fb->height,
9252 fb->bits_per_pixel, base, fb->pitches[0],
9253 plane_config->size);
b113d5ee 9254
2d14030b 9255 plane_config->fb = intel_fb;
4c6baa59
JB
9256}
9257
0e8ffe1b 9258static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5cec258b 9259 struct intel_crtc_state *pipe_config)
0e8ffe1b
DV
9260{
9261 struct drm_device *dev = crtc->base.dev;
9262 struct drm_i915_private *dev_priv = dev->dev_private;
9263 uint32_t tmp;
9264
f458ebbc
DV
9265 if (!intel_display_power_is_enabled(dev_priv,
9266 POWER_DOMAIN_PIPE(crtc->pipe)))
930e8c9e
PZ
9267 return false;
9268
e143a21c 9269 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
c0d43d62 9270 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
eccb140b 9271
0e8ffe1b
DV
9272 tmp = I915_READ(PIPECONF(crtc->pipe));
9273 if (!(tmp & PIPECONF_ENABLE))
9274 return false;
9275
42571aef
VS
9276 switch (tmp & PIPECONF_BPC_MASK) {
9277 case PIPECONF_6BPC:
9278 pipe_config->pipe_bpp = 18;
9279 break;
9280 case PIPECONF_8BPC:
9281 pipe_config->pipe_bpp = 24;
9282 break;
9283 case PIPECONF_10BPC:
9284 pipe_config->pipe_bpp = 30;
9285 break;
9286 case PIPECONF_12BPC:
9287 pipe_config->pipe_bpp = 36;
9288 break;
9289 default:
9290 break;
9291 }
9292
b5a9fa09
DV
9293 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9294 pipe_config->limited_color_range = true;
9295
ab9412ba 9296 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
66e985c0
DV
9297 struct intel_shared_dpll *pll;
9298
88adfff1
DV
9299 pipe_config->has_pch_encoder = true;
9300
627eb5a3
DV
9301 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9302 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9303 FDI_DP_PORT_WIDTH_SHIFT) + 1;
72419203
DV
9304
9305 ironlake_get_fdi_m_n_config(crtc, pipe_config);
6c49f241 9306
c0d43d62 9307 if (HAS_PCH_IBX(dev_priv->dev)) {
d94ab068
DV
9308 pipe_config->shared_dpll =
9309 (enum intel_dpll_id) crtc->pipe;
c0d43d62
DV
9310 } else {
9311 tmp = I915_READ(PCH_DPLL_SEL);
9312 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9313 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B;
9314 else
9315 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A;
9316 }
66e985c0
DV
9317
9318 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
9319
9320 WARN_ON(!pll->get_hw_state(dev_priv, pll,
9321 &pipe_config->dpll_hw_state));
c93f54cf
DV
9322
9323 tmp = pipe_config->dpll_hw_state.dpll;
9324 pipe_config->pixel_multiplier =
9325 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9326 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
18442d08
VS
9327
9328 ironlake_pch_clock_get(crtc, pipe_config);
6c49f241
DV
9329 } else {
9330 pipe_config->pixel_multiplier = 1;
627eb5a3
DV
9331 }
9332
1bd1bd80
DV
9333 intel_get_pipe_timings(crtc, pipe_config);
9334
2fa2fe9a
DV
9335 ironlake_get_pfit_config(crtc, pipe_config);
9336
0e8ffe1b
DV
9337 return true;
9338}
9339
be256dc7
PZ
9340static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9341{
9342 struct drm_device *dev = dev_priv->dev;
be256dc7 9343 struct intel_crtc *crtc;
be256dc7 9344
d3fcc808 9345 for_each_intel_crtc(dev, crtc)
e2c719b7 9346 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
be256dc7
PZ
9347 pipe_name(crtc->pipe));
9348
e2c719b7
RC
9349 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9350 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
01403de3
VS
9351 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9352 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
e2c719b7
RC
9353 I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9354 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
be256dc7 9355 "CPU PWM1 enabled\n");
c5107b87 9356 if (IS_HASWELL(dev))
e2c719b7 9357 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
c5107b87 9358 "CPU PWM2 enabled\n");
e2c719b7 9359 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
be256dc7 9360 "PCH PWM1 enabled\n");
e2c719b7 9361 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
be256dc7 9362 "Utility pin enabled\n");
e2c719b7 9363 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
be256dc7 9364
9926ada1
PZ
9365 /*
9366 * In theory we can still leave IRQs enabled, as long as only the HPD
9367 * interrupts remain enabled. We used to check for that, but since it's
9368 * gen-specific and since we only disable LCPLL after we fully disable
9369 * the interrupts, the check below should be enough.
9370 */
e2c719b7 9371 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
be256dc7
PZ
9372}
9373
9ccd5aeb
PZ
9374static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9375{
9376 struct drm_device *dev = dev_priv->dev;
9377
9378 if (IS_HASWELL(dev))
9379 return I915_READ(D_COMP_HSW);
9380 else
9381 return I915_READ(D_COMP_BDW);
9382}
9383
3c4c9b81
PZ
9384static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9385{
9386 struct drm_device *dev = dev_priv->dev;
9387
9388 if (IS_HASWELL(dev)) {
9389 mutex_lock(&dev_priv->rps.hw_lock);
9390 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9391 val))
f475dadf 9392 DRM_ERROR("Failed to write to D_COMP\n");
3c4c9b81
PZ
9393 mutex_unlock(&dev_priv->rps.hw_lock);
9394 } else {
9ccd5aeb
PZ
9395 I915_WRITE(D_COMP_BDW, val);
9396 POSTING_READ(D_COMP_BDW);
3c4c9b81 9397 }
be256dc7
PZ
9398}
9399
9400/*
9401 * This function implements pieces of two sequences from BSpec:
9402 * - Sequence for display software to disable LCPLL
9403 * - Sequence for display software to allow package C8+
9404 * The steps implemented here are just the steps that actually touch the LCPLL
9405 * register. Callers should take care of disabling all the display engine
9406 * functions, doing the mode unset, fixing interrupts, etc.
9407 */
6ff58d53
PZ
9408static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9409 bool switch_to_fclk, bool allow_power_down)
be256dc7
PZ
9410{
9411 uint32_t val;
9412
9413 assert_can_disable_lcpll(dev_priv);
9414
9415 val = I915_READ(LCPLL_CTL);
9416
9417 if (switch_to_fclk) {
9418 val |= LCPLL_CD_SOURCE_FCLK;
9419 I915_WRITE(LCPLL_CTL, val);
9420
9421 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9422 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9423 DRM_ERROR("Switching to FCLK failed\n");
9424
9425 val = I915_READ(LCPLL_CTL);
9426 }
9427
9428 val |= LCPLL_PLL_DISABLE;
9429 I915_WRITE(LCPLL_CTL, val);
9430 POSTING_READ(LCPLL_CTL);
9431
9432 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
9433 DRM_ERROR("LCPLL still locked\n");
9434
9ccd5aeb 9435 val = hsw_read_dcomp(dev_priv);
be256dc7 9436 val |= D_COMP_COMP_DISABLE;
3c4c9b81 9437 hsw_write_dcomp(dev_priv, val);
be256dc7
PZ
9438 ndelay(100);
9439
9ccd5aeb
PZ
9440 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9441 1))
be256dc7
PZ
9442 DRM_ERROR("D_COMP RCOMP still in progress\n");
9443
9444 if (allow_power_down) {
9445 val = I915_READ(LCPLL_CTL);
9446 val |= LCPLL_POWER_DOWN_ALLOW;
9447 I915_WRITE(LCPLL_CTL, val);
9448 POSTING_READ(LCPLL_CTL);
9449 }
9450}
9451
9452/*
9453 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9454 * source.
9455 */
6ff58d53 9456static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
be256dc7
PZ
9457{
9458 uint32_t val;
9459
9460 val = I915_READ(LCPLL_CTL);
9461
9462 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9463 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9464 return;
9465
a8a8bd54
PZ
9466 /*
9467 * Make sure we're not on PC8 state before disabling PC8, otherwise
9468 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
a8a8bd54 9469 */
59bad947 9470 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
215733fa 9471
be256dc7
PZ
9472 if (val & LCPLL_POWER_DOWN_ALLOW) {
9473 val &= ~LCPLL_POWER_DOWN_ALLOW;
9474 I915_WRITE(LCPLL_CTL, val);
35d8f2eb 9475 POSTING_READ(LCPLL_CTL);
be256dc7
PZ
9476 }
9477
9ccd5aeb 9478 val = hsw_read_dcomp(dev_priv);
be256dc7
PZ
9479 val |= D_COMP_COMP_FORCE;
9480 val &= ~D_COMP_COMP_DISABLE;
3c4c9b81 9481 hsw_write_dcomp(dev_priv, val);
be256dc7
PZ
9482
9483 val = I915_READ(LCPLL_CTL);
9484 val &= ~LCPLL_PLL_DISABLE;
9485 I915_WRITE(LCPLL_CTL, val);
9486
9487 if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
9488 DRM_ERROR("LCPLL not locked yet\n");
9489
9490 if (val & LCPLL_CD_SOURCE_FCLK) {
9491 val = I915_READ(LCPLL_CTL);
9492 val &= ~LCPLL_CD_SOURCE_FCLK;
9493 I915_WRITE(LCPLL_CTL, val);
9494
9495 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9496 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9497 DRM_ERROR("Switching back to LCPLL failed\n");
9498 }
215733fa 9499
59bad947 9500 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
b6283055 9501 intel_update_cdclk(dev_priv->dev);
be256dc7
PZ
9502}
9503
765dab67
PZ
9504/*
9505 * Package states C8 and deeper are really deep PC states that can only be
9506 * reached when all the devices on the system allow it, so even if the graphics
9507 * device allows PC8+, it doesn't mean the system will actually get to these
9508 * states. Our driver only allows PC8+ when going into runtime PM.
9509 *
9510 * The requirements for PC8+ are that all the outputs are disabled, the power
9511 * well is disabled and most interrupts are disabled, and these are also
9512 * requirements for runtime PM. When these conditions are met, we manually do
9513 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9514 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9515 * hang the machine.
9516 *
9517 * When we really reach PC8 or deeper states (not just when we allow it) we lose
9518 * the state of some registers, so when we come back from PC8+ we need to
9519 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9520 * need to take care of the registers kept by RC6. Notice that this happens even
9521 * if we don't put the device in PCI D3 state (which is what currently happens
9522 * because of the runtime PM support).
9523 *
9524 * For more, read "Display Sequences for Package C8" on the hardware
9525 * documentation.
9526 */
a14cb6fc 9527void hsw_enable_pc8(struct drm_i915_private *dev_priv)
c67a470b 9528{
c67a470b
PZ
9529 struct drm_device *dev = dev_priv->dev;
9530 uint32_t val;
9531
c67a470b
PZ
9532 DRM_DEBUG_KMS("Enabling package C8+\n");
9533
c2699524 9534 if (HAS_PCH_LPT_LP(dev)) {
c67a470b
PZ
9535 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9536 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9537 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9538 }
9539
9540 lpt_disable_clkout_dp(dev);
c67a470b
PZ
9541 hsw_disable_lcpll(dev_priv, true, true);
9542}
9543
a14cb6fc 9544void hsw_disable_pc8(struct drm_i915_private *dev_priv)
c67a470b
PZ
9545{
9546 struct drm_device *dev = dev_priv->dev;
9547 uint32_t val;
9548
c67a470b
PZ
9549 DRM_DEBUG_KMS("Disabling package C8+\n");
9550
9551 hsw_restore_lcpll(dev_priv);
c67a470b
PZ
9552 lpt_init_pch_refclk(dev);
9553
c2699524 9554 if (HAS_PCH_LPT_LP(dev)) {
c67a470b
PZ
9555 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9556 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9557 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9558 }
9559
9560 intel_prepare_ddi(dev);
c67a470b
PZ
9561}
9562
27c329ed 9563static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
f8437dd1 9564{
a821fc46 9565 struct drm_device *dev = old_state->dev;
27c329ed 9566 unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
f8437dd1 9567
27c329ed 9568 broxton_set_cdclk(dev, req_cdclk);
f8437dd1
VK
9569}
9570
b432e5cf 9571/* compute the max rate for new configuration */
27c329ed 9572static int ilk_max_pixel_rate(struct drm_atomic_state *state)
b432e5cf 9573{
b432e5cf 9574 struct intel_crtc *intel_crtc;
27c329ed 9575 struct intel_crtc_state *crtc_state;
b432e5cf 9576 int max_pixel_rate = 0;
b432e5cf 9577
27c329ed
ML
9578 for_each_intel_crtc(state->dev, intel_crtc) {
9579 int pixel_rate;
9580
9581 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
9582 if (IS_ERR(crtc_state))
9583 return PTR_ERR(crtc_state);
9584
9585 if (!crtc_state->base.enable)
b432e5cf
VS
9586 continue;
9587
27c329ed 9588 pixel_rate = ilk_pipe_pixel_rate(crtc_state);
b432e5cf
VS
9589
9590 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
27c329ed 9591 if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled)
b432e5cf
VS
9592 pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
9593
9594 max_pixel_rate = max(max_pixel_rate, pixel_rate);
9595 }
9596
9597 return max_pixel_rate;
9598}
9599
9600static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9601{
9602 struct drm_i915_private *dev_priv = dev->dev_private;
9603 uint32_t val, data;
9604 int ret;
9605
9606 if (WARN((I915_READ(LCPLL_CTL) &
9607 (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
9608 LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
9609 LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
9610 LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
9611 "trying to change cdclk frequency with cdclk not enabled\n"))
9612 return;
9613
9614 mutex_lock(&dev_priv->rps.hw_lock);
9615 ret = sandybridge_pcode_write(dev_priv,
9616 BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
9617 mutex_unlock(&dev_priv->rps.hw_lock);
9618 if (ret) {
9619 DRM_ERROR("failed to inform pcode about cdclk change\n");
9620 return;
9621 }
9622
9623 val = I915_READ(LCPLL_CTL);
9624 val |= LCPLL_CD_SOURCE_FCLK;
9625 I915_WRITE(LCPLL_CTL, val);
9626
9627 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9628 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9629 DRM_ERROR("Switching to FCLK failed\n");
9630
9631 val = I915_READ(LCPLL_CTL);
9632 val &= ~LCPLL_CLK_FREQ_MASK;
9633
9634 switch (cdclk) {
9635 case 450000:
9636 val |= LCPLL_CLK_FREQ_450;
9637 data = 0;
9638 break;
9639 case 540000:
9640 val |= LCPLL_CLK_FREQ_54O_BDW;
9641 data = 1;
9642 break;
9643 case 337500:
9644 val |= LCPLL_CLK_FREQ_337_5_BDW;
9645 data = 2;
9646 break;
9647 case 675000:
9648 val |= LCPLL_CLK_FREQ_675_BDW;
9649 data = 3;
9650 break;
9651 default:
9652 WARN(1, "invalid cdclk frequency\n");
9653 return;
9654 }
9655
9656 I915_WRITE(LCPLL_CTL, val);
9657
9658 val = I915_READ(LCPLL_CTL);
9659 val &= ~LCPLL_CD_SOURCE_FCLK;
9660 I915_WRITE(LCPLL_CTL, val);
9661
9662 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9663 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9664 DRM_ERROR("Switching back to LCPLL failed\n");
9665
9666 mutex_lock(&dev_priv->rps.hw_lock);
9667 sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
9668 mutex_unlock(&dev_priv->rps.hw_lock);
9669
9670 intel_update_cdclk(dev);
9671
9672 WARN(cdclk != dev_priv->cdclk_freq,
9673 "cdclk requested %d kHz but got %d kHz\n",
9674 cdclk, dev_priv->cdclk_freq);
9675}
9676
27c329ed 9677static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
b432e5cf 9678{
27c329ed
ML
9679 struct drm_i915_private *dev_priv = to_i915(state->dev);
9680 int max_pixclk = ilk_max_pixel_rate(state);
b432e5cf
VS
9681 int cdclk;
9682
9683 /*
9684 * FIXME should also account for plane ratio
9685 * once 64bpp pixel formats are supported.
9686 */
27c329ed 9687 if (max_pixclk > 540000)
b432e5cf 9688 cdclk = 675000;
27c329ed 9689 else if (max_pixclk > 450000)
b432e5cf 9690 cdclk = 540000;
27c329ed 9691 else if (max_pixclk > 337500)
b432e5cf
VS
9692 cdclk = 450000;
9693 else
9694 cdclk = 337500;
9695
9696 /*
9697 * FIXME move the cdclk caclulation to
9698 * compute_config() so we can fail gracegully.
9699 */
9700 if (cdclk > dev_priv->max_cdclk_freq) {
9701 DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9702 cdclk, dev_priv->max_cdclk_freq);
9703 cdclk = dev_priv->max_cdclk_freq;
9704 }
9705
27c329ed 9706 to_intel_atomic_state(state)->cdclk = cdclk;
b432e5cf
VS
9707
9708 return 0;
9709}
9710
27c329ed 9711static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
b432e5cf 9712{
27c329ed
ML
9713 struct drm_device *dev = old_state->dev;
9714 unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
b432e5cf 9715
27c329ed 9716 broadwell_set_cdclk(dev, req_cdclk);
b432e5cf
VS
9717}
9718
190f68c5
ACO
9719static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9720 struct intel_crtc_state *crtc_state)
09b4ddf9 9721{
190f68c5 9722 if (!intel_ddi_pll_select(crtc, crtc_state))
6441ab5f 9723 return -EINVAL;
716c2e55 9724
c7653199 9725 crtc->lowfreq_avail = false;
644cef34 9726
c8f7a0db 9727 return 0;
79e53945
JB
9728}
9729
3760b59c
S
9730static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9731 enum port port,
9732 struct intel_crtc_state *pipe_config)
9733{
9734 switch (port) {
9735 case PORT_A:
9736 pipe_config->ddi_pll_sel = SKL_DPLL0;
9737 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
9738 break;
9739 case PORT_B:
9740 pipe_config->ddi_pll_sel = SKL_DPLL1;
9741 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
9742 break;
9743 case PORT_C:
9744 pipe_config->ddi_pll_sel = SKL_DPLL2;
9745 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
9746 break;
9747 default:
9748 DRM_ERROR("Incorrect port type\n");
9749 }
9750}
9751
96b7dfb7
S
9752static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9753 enum port port,
5cec258b 9754 struct intel_crtc_state *pipe_config)
96b7dfb7 9755{
3148ade7 9756 u32 temp, dpll_ctl1;
96b7dfb7
S
9757
9758 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9759 pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
9760
9761 switch (pipe_config->ddi_pll_sel) {
3148ade7
DL
9762 case SKL_DPLL0:
9763 /*
9764 * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part
9765 * of the shared DPLL framework and thus needs to be read out
9766 * separately
9767 */
9768 dpll_ctl1 = I915_READ(DPLL_CTRL1);
9769 pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f;
9770 break;
96b7dfb7
S
9771 case SKL_DPLL1:
9772 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
9773 break;
9774 case SKL_DPLL2:
9775 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
9776 break;
9777 case SKL_DPLL3:
9778 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
9779 break;
96b7dfb7
S
9780 }
9781}
9782
7d2c8175
DL
9783static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9784 enum port port,
5cec258b 9785 struct intel_crtc_state *pipe_config)
7d2c8175
DL
9786{
9787 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9788
9789 switch (pipe_config->ddi_pll_sel) {
9790 case PORT_CLK_SEL_WRPLL1:
9791 pipe_config->shared_dpll = DPLL_ID_WRPLL1;
9792 break;
9793 case PORT_CLK_SEL_WRPLL2:
9794 pipe_config->shared_dpll = DPLL_ID_WRPLL2;
9795 break;
9796 }
9797}
9798
26804afd 9799static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
5cec258b 9800 struct intel_crtc_state *pipe_config)
26804afd
DV
9801{
9802 struct drm_device *dev = crtc->base.dev;
9803 struct drm_i915_private *dev_priv = dev->dev_private;
d452c5b6 9804 struct intel_shared_dpll *pll;
26804afd
DV
9805 enum port port;
9806 uint32_t tmp;
9807
9808 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9809
9810 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9811
ef11bdb3 9812 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
96b7dfb7 9813 skylake_get_ddi_pll(dev_priv, port, pipe_config);
3760b59c
S
9814 else if (IS_BROXTON(dev))
9815 bxt_get_ddi_pll(dev_priv, port, pipe_config);
96b7dfb7
S
9816 else
9817 haswell_get_ddi_pll(dev_priv, port, pipe_config);
9cd86933 9818
d452c5b6
DV
9819 if (pipe_config->shared_dpll >= 0) {
9820 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
9821
9822 WARN_ON(!pll->get_hw_state(dev_priv, pll,
9823 &pipe_config->dpll_hw_state));
9824 }
9825
26804afd
DV
9826 /*
9827 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9828 * DDI E. So just check whether this pipe is wired to DDI E and whether
9829 * the PCH transcoder is on.
9830 */
ca370455
DL
9831 if (INTEL_INFO(dev)->gen < 9 &&
9832 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
26804afd
DV
9833 pipe_config->has_pch_encoder = true;
9834
9835 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9836 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9837 FDI_DP_PORT_WIDTH_SHIFT) + 1;
9838
9839 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9840 }
9841}
9842
0e8ffe1b 9843static bool haswell_get_pipe_config(struct intel_crtc *crtc,
5cec258b 9844 struct intel_crtc_state *pipe_config)
0e8ffe1b
DV
9845{
9846 struct drm_device *dev = crtc->base.dev;
9847 struct drm_i915_private *dev_priv = dev->dev_private;
2fa2fe9a 9848 enum intel_display_power_domain pfit_domain;
0e8ffe1b
DV
9849 uint32_t tmp;
9850
f458ebbc 9851 if (!intel_display_power_is_enabled(dev_priv,
b5482bd0
ID
9852 POWER_DOMAIN_PIPE(crtc->pipe)))
9853 return false;
9854
e143a21c 9855 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
c0d43d62
DV
9856 pipe_config->shared_dpll = DPLL_ID_PRIVATE;
9857
eccb140b
DV
9858 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9859 if (tmp & TRANS_DDI_FUNC_ENABLE) {
9860 enum pipe trans_edp_pipe;
9861 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9862 default:
9863 WARN(1, "unknown pipe linked to edp transcoder\n");
9864 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9865 case TRANS_DDI_EDP_INPUT_A_ON:
9866 trans_edp_pipe = PIPE_A;
9867 break;
9868 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9869 trans_edp_pipe = PIPE_B;
9870 break;
9871 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9872 trans_edp_pipe = PIPE_C;
9873 break;
9874 }
9875
9876 if (trans_edp_pipe == crtc->pipe)
9877 pipe_config->cpu_transcoder = TRANSCODER_EDP;
9878 }
9879
f458ebbc 9880 if (!intel_display_power_is_enabled(dev_priv,
eccb140b 9881 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
2bfce950
PZ
9882 return false;
9883
eccb140b 9884 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
0e8ffe1b
DV
9885 if (!(tmp & PIPECONF_ENABLE))
9886 return false;
9887
26804afd 9888 haswell_get_ddi_port_state(crtc, pipe_config);
627eb5a3 9889
1bd1bd80
DV
9890 intel_get_pipe_timings(crtc, pipe_config);
9891
a1b2278e
CK
9892 if (INTEL_INFO(dev)->gen >= 9) {
9893 skl_init_scalers(dev, crtc, pipe_config);
9894 }
9895
2fa2fe9a 9896 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
af99ceda
CK
9897
9898 if (INTEL_INFO(dev)->gen >= 9) {
9899 pipe_config->scaler_state.scaler_id = -1;
9900 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
9901 }
9902
bd2e244f 9903 if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
1c132b44 9904 if (INTEL_INFO(dev)->gen >= 9)
bd2e244f 9905 skylake_get_pfit_config(crtc, pipe_config);
ff6d9f55 9906 else
1c132b44 9907 ironlake_get_pfit_config(crtc, pipe_config);
bd2e244f 9908 }
88adfff1 9909
e59150dc
JB
9910 if (IS_HASWELL(dev))
9911 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
9912 (I915_READ(IPS_CTL) & IPS_ENABLE);
42db64ef 9913
ebb69c95
CT
9914 if (pipe_config->cpu_transcoder != TRANSCODER_EDP) {
9915 pipe_config->pixel_multiplier =
9916 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
9917 } else {
9918 pipe_config->pixel_multiplier = 1;
9919 }
6c49f241 9920
0e8ffe1b
DV
9921 return true;
9922}
9923
560b85bb
CW
9924static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
9925{
9926 struct drm_device *dev = crtc->dev;
9927 struct drm_i915_private *dev_priv = dev->dev_private;
9928 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
dc41c154 9929 uint32_t cntl = 0, size = 0;
560b85bb 9930
dc41c154 9931 if (base) {
3dd512fb
MR
9932 unsigned int width = intel_crtc->base.cursor->state->crtc_w;
9933 unsigned int height = intel_crtc->base.cursor->state->crtc_h;
dc41c154
VS
9934 unsigned int stride = roundup_pow_of_two(width) * 4;
9935
9936 switch (stride) {
9937 default:
9938 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
9939 width, stride);
9940 stride = 256;
9941 /* fallthrough */
9942 case 256:
9943 case 512:
9944 case 1024:
9945 case 2048:
9946 break;
4b0e333e
CW
9947 }
9948
dc41c154
VS
9949 cntl |= CURSOR_ENABLE |
9950 CURSOR_GAMMA_ENABLE |
9951 CURSOR_FORMAT_ARGB |
9952 CURSOR_STRIDE(stride);
9953
9954 size = (height << 12) | width;
4b0e333e 9955 }
560b85bb 9956
dc41c154
VS
9957 if (intel_crtc->cursor_cntl != 0 &&
9958 (intel_crtc->cursor_base != base ||
9959 intel_crtc->cursor_size != size ||
9960 intel_crtc->cursor_cntl != cntl)) {
9961 /* On these chipsets we can only modify the base/size/stride
9962 * whilst the cursor is disabled.
9963 */
0b87c24e
VS
9964 I915_WRITE(CURCNTR(PIPE_A), 0);
9965 POSTING_READ(CURCNTR(PIPE_A));
dc41c154 9966 intel_crtc->cursor_cntl = 0;
4b0e333e 9967 }
560b85bb 9968
99d1f387 9969 if (intel_crtc->cursor_base != base) {
0b87c24e 9970 I915_WRITE(CURBASE(PIPE_A), base);
99d1f387
VS
9971 intel_crtc->cursor_base = base;
9972 }
4726e0b0 9973
dc41c154
VS
9974 if (intel_crtc->cursor_size != size) {
9975 I915_WRITE(CURSIZE, size);
9976 intel_crtc->cursor_size = size;
4b0e333e 9977 }
560b85bb 9978
4b0e333e 9979 if (intel_crtc->cursor_cntl != cntl) {
0b87c24e
VS
9980 I915_WRITE(CURCNTR(PIPE_A), cntl);
9981 POSTING_READ(CURCNTR(PIPE_A));
4b0e333e 9982 intel_crtc->cursor_cntl = cntl;
560b85bb 9983 }
560b85bb
CW
9984}
9985
560b85bb 9986static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
65a21cd6
JB
9987{
9988 struct drm_device *dev = crtc->dev;
9989 struct drm_i915_private *dev_priv = dev->dev_private;
9990 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9991 int pipe = intel_crtc->pipe;
4b0e333e
CW
9992 uint32_t cntl;
9993
9994 cntl = 0;
9995 if (base) {
9996 cntl = MCURSOR_GAMMA_ENABLE;
3dd512fb 9997 switch (intel_crtc->base.cursor->state->crtc_w) {
4726e0b0
SK
9998 case 64:
9999 cntl |= CURSOR_MODE_64_ARGB_AX;
10000 break;
10001 case 128:
10002 cntl |= CURSOR_MODE_128_ARGB_AX;
10003 break;
10004 case 256:
10005 cntl |= CURSOR_MODE_256_ARGB_AX;
10006 break;
10007 default:
3dd512fb 10008 MISSING_CASE(intel_crtc->base.cursor->state->crtc_w);
4726e0b0 10009 return;
65a21cd6 10010 }
4b0e333e 10011 cntl |= pipe << 28; /* Connect to correct pipe */
47bf17a7 10012
fc6f93bc 10013 if (HAS_DDI(dev))
47bf17a7 10014 cntl |= CURSOR_PIPE_CSC_ENABLE;
4b0e333e 10015 }
65a21cd6 10016
8e7d688b 10017 if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180))
4398ad45
VS
10018 cntl |= CURSOR_ROTATE_180;
10019
4b0e333e
CW
10020 if (intel_crtc->cursor_cntl != cntl) {
10021 I915_WRITE(CURCNTR(pipe), cntl);
10022 POSTING_READ(CURCNTR(pipe));
10023 intel_crtc->cursor_cntl = cntl;
65a21cd6 10024 }
4b0e333e 10025
65a21cd6 10026 /* and commit changes on next vblank */
5efb3e28
VS
10027 I915_WRITE(CURBASE(pipe), base);
10028 POSTING_READ(CURBASE(pipe));
99d1f387
VS
10029
10030 intel_crtc->cursor_base = base;
65a21cd6
JB
10031}
10032
cda4b7d3 10033/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
6b383a7f
CW
10034static void intel_crtc_update_cursor(struct drm_crtc *crtc,
10035 bool on)
cda4b7d3
CW
10036{
10037 struct drm_device *dev = crtc->dev;
10038 struct drm_i915_private *dev_priv = dev->dev_private;
10039 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10040 int pipe = intel_crtc->pipe;
9b4101be
ML
10041 struct drm_plane_state *cursor_state = crtc->cursor->state;
10042 int x = cursor_state->crtc_x;
10043 int y = cursor_state->crtc_y;
d6e4db15 10044 u32 base = 0, pos = 0;
cda4b7d3 10045
d6e4db15 10046 if (on)
cda4b7d3 10047 base = intel_crtc->cursor_addr;
cda4b7d3 10048
6e3c9717 10049 if (x >= intel_crtc->config->pipe_src_w)
d6e4db15
VS
10050 base = 0;
10051
6e3c9717 10052 if (y >= intel_crtc->config->pipe_src_h)
cda4b7d3
CW
10053 base = 0;
10054
10055 if (x < 0) {
9b4101be 10056 if (x + cursor_state->crtc_w <= 0)
cda4b7d3
CW
10057 base = 0;
10058
10059 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10060 x = -x;
10061 }
10062 pos |= x << CURSOR_X_SHIFT;
10063
10064 if (y < 0) {
9b4101be 10065 if (y + cursor_state->crtc_h <= 0)
cda4b7d3
CW
10066 base = 0;
10067
10068 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10069 y = -y;
10070 }
10071 pos |= y << CURSOR_Y_SHIFT;
10072
4b0e333e 10073 if (base == 0 && intel_crtc->cursor_base == 0)
cda4b7d3
CW
10074 return;
10075
5efb3e28
VS
10076 I915_WRITE(CURPOS(pipe), pos);
10077
4398ad45
VS
10078 /* ILK+ do this automagically */
10079 if (HAS_GMCH_DISPLAY(dev) &&
8e7d688b 10080 crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
9b4101be
ML
10081 base += (cursor_state->crtc_h *
10082 cursor_state->crtc_w - 1) * 4;
4398ad45
VS
10083 }
10084
8ac54669 10085 if (IS_845G(dev) || IS_I865G(dev))
5efb3e28
VS
10086 i845_update_cursor(crtc, base);
10087 else
10088 i9xx_update_cursor(crtc, base);
cda4b7d3
CW
10089}
10090
dc41c154
VS
10091static bool cursor_size_ok(struct drm_device *dev,
10092 uint32_t width, uint32_t height)
10093{
10094 if (width == 0 || height == 0)
10095 return false;
10096
10097 /*
10098 * 845g/865g are special in that they are only limited by
10099 * the width of their cursors, the height is arbitrary up to
10100 * the precision of the register. Everything else requires
10101 * square cursors, limited to a few power-of-two sizes.
10102 */
10103 if (IS_845G(dev) || IS_I865G(dev)) {
10104 if ((width & 63) != 0)
10105 return false;
10106
10107 if (width > (IS_845G(dev) ? 64 : 512))
10108 return false;
10109
10110 if (height > 1023)
10111 return false;
10112 } else {
10113 switch (width | height) {
10114 case 256:
10115 case 128:
10116 if (IS_GEN2(dev))
10117 return false;
10118 case 64:
10119 break;
10120 default:
10121 return false;
10122 }
10123 }
10124
10125 return true;
10126}
10127
79e53945 10128static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
7203425a 10129 u16 *blue, uint32_t start, uint32_t size)
79e53945 10130{
7203425a 10131 int end = (start + size > 256) ? 256 : start + size, i;
79e53945 10132 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
79e53945 10133
7203425a 10134 for (i = start; i < end; i++) {
79e53945
JB
10135 intel_crtc->lut_r[i] = red[i] >> 8;
10136 intel_crtc->lut_g[i] = green[i] >> 8;
10137 intel_crtc->lut_b[i] = blue[i] >> 8;
10138 }
10139
10140 intel_crtc_load_lut(crtc);
10141}
10142
79e53945
JB
10143/* VESA 640x480x72Hz mode to set on the pipe */
10144static struct drm_display_mode load_detect_mode = {
10145 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10146 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10147};
10148
a8bb6818
DV
10149struct drm_framebuffer *
10150__intel_framebuffer_create(struct drm_device *dev,
10151 struct drm_mode_fb_cmd2 *mode_cmd,
10152 struct drm_i915_gem_object *obj)
d2dff872
CW
10153{
10154 struct intel_framebuffer *intel_fb;
10155 int ret;
10156
10157 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
dcb1394e 10158 if (!intel_fb)
d2dff872 10159 return ERR_PTR(-ENOMEM);
d2dff872
CW
10160
10161 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
dd4916c5
DV
10162 if (ret)
10163 goto err;
d2dff872
CW
10164
10165 return &intel_fb->base;
dcb1394e 10166
dd4916c5 10167err:
dd4916c5 10168 kfree(intel_fb);
dd4916c5 10169 return ERR_PTR(ret);
d2dff872
CW
10170}
10171
b5ea642a 10172static struct drm_framebuffer *
a8bb6818
DV
10173intel_framebuffer_create(struct drm_device *dev,
10174 struct drm_mode_fb_cmd2 *mode_cmd,
10175 struct drm_i915_gem_object *obj)
10176{
10177 struct drm_framebuffer *fb;
10178 int ret;
10179
10180 ret = i915_mutex_lock_interruptible(dev);
10181 if (ret)
10182 return ERR_PTR(ret);
10183 fb = __intel_framebuffer_create(dev, mode_cmd, obj);
10184 mutex_unlock(&dev->struct_mutex);
10185
10186 return fb;
10187}
10188
d2dff872
CW
10189static u32
10190intel_framebuffer_pitch_for_width(int width, int bpp)
10191{
10192 u32 pitch = DIV_ROUND_UP(width * bpp, 8);
10193 return ALIGN(pitch, 64);
10194}
10195
10196static u32
10197intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
10198{
10199 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
1267a26b 10200 return PAGE_ALIGN(pitch * mode->vdisplay);
d2dff872
CW
10201}
10202
10203static struct drm_framebuffer *
10204intel_framebuffer_create_for_mode(struct drm_device *dev,
10205 struct drm_display_mode *mode,
10206 int depth, int bpp)
10207{
dcb1394e 10208 struct drm_framebuffer *fb;
d2dff872 10209 struct drm_i915_gem_object *obj;
0fed39bd 10210 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
d2dff872
CW
10211
10212 obj = i915_gem_alloc_object(dev,
10213 intel_framebuffer_size_for_mode(mode, bpp));
10214 if (obj == NULL)
10215 return ERR_PTR(-ENOMEM);
10216
10217 mode_cmd.width = mode->hdisplay;
10218 mode_cmd.height = mode->vdisplay;
308e5bcb
JB
10219 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
10220 bpp);
5ca0c34a 10221 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
d2dff872 10222
dcb1394e
LW
10223 fb = intel_framebuffer_create(dev, &mode_cmd, obj);
10224 if (IS_ERR(fb))
10225 drm_gem_object_unreference_unlocked(&obj->base);
10226
10227 return fb;
d2dff872
CW
10228}
10229
10230static struct drm_framebuffer *
10231mode_fits_in_fbdev(struct drm_device *dev,
10232 struct drm_display_mode *mode)
10233{
0695726e 10234#ifdef CONFIG_DRM_FBDEV_EMULATION
d2dff872
CW
10235 struct drm_i915_private *dev_priv = dev->dev_private;
10236 struct drm_i915_gem_object *obj;
10237 struct drm_framebuffer *fb;
10238
4c0e5528 10239 if (!dev_priv->fbdev)
d2dff872
CW
10240 return NULL;
10241
4c0e5528 10242 if (!dev_priv->fbdev->fb)
d2dff872
CW
10243 return NULL;
10244
4c0e5528
DV
10245 obj = dev_priv->fbdev->fb->obj;
10246 BUG_ON(!obj);
10247
8bcd4553 10248 fb = &dev_priv->fbdev->fb->base;
01f2c773
VS
10249 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
10250 fb->bits_per_pixel))
d2dff872
CW
10251 return NULL;
10252
01f2c773 10253 if (obj->base.size < mode->vdisplay * fb->pitches[0])
d2dff872
CW
10254 return NULL;
10255
10256 return fb;
4520f53a
DV
10257#else
10258 return NULL;
10259#endif
d2dff872
CW
10260}
10261
d3a40d1b
ACO
10262static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
10263 struct drm_crtc *crtc,
10264 struct drm_display_mode *mode,
10265 struct drm_framebuffer *fb,
10266 int x, int y)
10267{
10268 struct drm_plane_state *plane_state;
10269 int hdisplay, vdisplay;
10270 int ret;
10271
10272 plane_state = drm_atomic_get_plane_state(state, crtc->primary);
10273 if (IS_ERR(plane_state))
10274 return PTR_ERR(plane_state);
10275
10276 if (mode)
10277 drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
10278 else
10279 hdisplay = vdisplay = 0;
10280
10281 ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
10282 if (ret)
10283 return ret;
10284 drm_atomic_set_fb_for_plane(plane_state, fb);
10285 plane_state->crtc_x = 0;
10286 plane_state->crtc_y = 0;
10287 plane_state->crtc_w = hdisplay;
10288 plane_state->crtc_h = vdisplay;
10289 plane_state->src_x = x << 16;
10290 plane_state->src_y = y << 16;
10291 plane_state->src_w = hdisplay << 16;
10292 plane_state->src_h = vdisplay << 16;
10293
10294 return 0;
10295}
10296
d2434ab7 10297bool intel_get_load_detect_pipe(struct drm_connector *connector,
7173188d 10298 struct drm_display_mode *mode,
51fd371b
RC
10299 struct intel_load_detect_pipe *old,
10300 struct drm_modeset_acquire_ctx *ctx)
79e53945
JB
10301{
10302 struct intel_crtc *intel_crtc;
d2434ab7
DV
10303 struct intel_encoder *intel_encoder =
10304 intel_attached_encoder(connector);
79e53945 10305 struct drm_crtc *possible_crtc;
4ef69c7a 10306 struct drm_encoder *encoder = &intel_encoder->base;
79e53945
JB
10307 struct drm_crtc *crtc = NULL;
10308 struct drm_device *dev = encoder->dev;
94352cf9 10309 struct drm_framebuffer *fb;
51fd371b 10310 struct drm_mode_config *config = &dev->mode_config;
83a57153 10311 struct drm_atomic_state *state = NULL;
944b0c76 10312 struct drm_connector_state *connector_state;
4be07317 10313 struct intel_crtc_state *crtc_state;
51fd371b 10314 int ret, i = -1;
79e53945 10315
d2dff872 10316 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
c23cc417 10317 connector->base.id, connector->name,
8e329a03 10318 encoder->base.id, encoder->name);
d2dff872 10319
51fd371b
RC
10320retry:
10321 ret = drm_modeset_lock(&config->connection_mutex, ctx);
10322 if (ret)
ad3c558f 10323 goto fail;
6e9f798d 10324
79e53945
JB
10325 /*
10326 * Algorithm gets a little messy:
7a5e4805 10327 *
79e53945
JB
10328 * - if the connector already has an assigned crtc, use it (but make
10329 * sure it's on first)
7a5e4805 10330 *
79e53945
JB
10331 * - try to find the first unused crtc that can drive this connector,
10332 * and use that if we find one
79e53945
JB
10333 */
10334
10335 /* See if we already have a CRTC for this connector */
10336 if (encoder->crtc) {
10337 crtc = encoder->crtc;
8261b191 10338
51fd371b 10339 ret = drm_modeset_lock(&crtc->mutex, ctx);
4d02e2de 10340 if (ret)
ad3c558f 10341 goto fail;
4d02e2de 10342 ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
51fd371b 10343 if (ret)
ad3c558f 10344 goto fail;
7b24056b 10345
24218aac 10346 old->dpms_mode = connector->dpms;
8261b191
CW
10347 old->load_detect_temp = false;
10348
10349 /* Make sure the crtc and connector are running */
24218aac
DV
10350 if (connector->dpms != DRM_MODE_DPMS_ON)
10351 connector->funcs->dpms(connector, DRM_MODE_DPMS_ON);
8261b191 10352
7173188d 10353 return true;
79e53945
JB
10354 }
10355
10356 /* Find an unused one (if possible) */
70e1e0ec 10357 for_each_crtc(dev, possible_crtc) {
79e53945
JB
10358 i++;
10359 if (!(encoder->possible_crtcs & (1 << i)))
10360 continue;
83d65738 10361 if (possible_crtc->state->enable)
a459249c 10362 continue;
a459249c
VS
10363
10364 crtc = possible_crtc;
10365 break;
79e53945
JB
10366 }
10367
10368 /*
10369 * If we didn't find an unused CRTC, don't use any.
10370 */
10371 if (!crtc) {
7173188d 10372 DRM_DEBUG_KMS("no pipe available for load-detect\n");
ad3c558f 10373 goto fail;
79e53945
JB
10374 }
10375
51fd371b
RC
10376 ret = drm_modeset_lock(&crtc->mutex, ctx);
10377 if (ret)
ad3c558f 10378 goto fail;
4d02e2de
DV
10379 ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10380 if (ret)
ad3c558f 10381 goto fail;
79e53945
JB
10382
10383 intel_crtc = to_intel_crtc(crtc);
24218aac 10384 old->dpms_mode = connector->dpms;
8261b191 10385 old->load_detect_temp = true;
d2dff872 10386 old->release_fb = NULL;
79e53945 10387
83a57153
ACO
10388 state = drm_atomic_state_alloc(dev);
10389 if (!state)
10390 return false;
10391
10392 state->acquire_ctx = ctx;
10393
944b0c76
ACO
10394 connector_state = drm_atomic_get_connector_state(state, connector);
10395 if (IS_ERR(connector_state)) {
10396 ret = PTR_ERR(connector_state);
10397 goto fail;
10398 }
10399
10400 connector_state->crtc = crtc;
10401 connector_state->best_encoder = &intel_encoder->base;
10402
4be07317
ACO
10403 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10404 if (IS_ERR(crtc_state)) {
10405 ret = PTR_ERR(crtc_state);
10406 goto fail;
10407 }
10408
49d6fa21 10409 crtc_state->base.active = crtc_state->base.enable = true;
4be07317 10410
6492711d
CW
10411 if (!mode)
10412 mode = &load_detect_mode;
79e53945 10413
d2dff872
CW
10414 /* We need a framebuffer large enough to accommodate all accesses
10415 * that the plane may generate whilst we perform load detection.
10416 * We can not rely on the fbcon either being present (we get called
10417 * during its initialisation to detect all boot displays, or it may
10418 * not even exist) or that it is large enough to satisfy the
10419 * requested mode.
10420 */
94352cf9
DV
10421 fb = mode_fits_in_fbdev(dev, mode);
10422 if (fb == NULL) {
d2dff872 10423 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
94352cf9
DV
10424 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
10425 old->release_fb = fb;
d2dff872
CW
10426 } else
10427 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
94352cf9 10428 if (IS_ERR(fb)) {
d2dff872 10429 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
412b61d8 10430 goto fail;
79e53945 10431 }
79e53945 10432
d3a40d1b
ACO
10433 ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
10434 if (ret)
10435 goto fail;
10436
8c7b5ccb
ACO
10437 drm_mode_copy(&crtc_state->base.mode, mode);
10438
74c090b1 10439 if (drm_atomic_commit(state)) {
6492711d 10440 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
d2dff872
CW
10441 if (old->release_fb)
10442 old->release_fb->funcs->destroy(old->release_fb);
412b61d8 10443 goto fail;
79e53945 10444 }
9128b040 10445 crtc->primary->crtc = crtc;
7173188d 10446
79e53945 10447 /* let the connector get through one full cycle before testing */
9d0498a2 10448 intel_wait_for_vblank(dev, intel_crtc->pipe);
7173188d 10449 return true;
412b61d8 10450
ad3c558f 10451fail:
e5d958ef
ACO
10452 drm_atomic_state_free(state);
10453 state = NULL;
83a57153 10454
51fd371b
RC
10455 if (ret == -EDEADLK) {
10456 drm_modeset_backoff(ctx);
10457 goto retry;
10458 }
10459
412b61d8 10460 return false;
79e53945
JB
10461}
10462
d2434ab7 10463void intel_release_load_detect_pipe(struct drm_connector *connector,
49172fee
ACO
10464 struct intel_load_detect_pipe *old,
10465 struct drm_modeset_acquire_ctx *ctx)
79e53945 10466{
83a57153 10467 struct drm_device *dev = connector->dev;
d2434ab7
DV
10468 struct intel_encoder *intel_encoder =
10469 intel_attached_encoder(connector);
4ef69c7a 10470 struct drm_encoder *encoder = &intel_encoder->base;
7b24056b 10471 struct drm_crtc *crtc = encoder->crtc;
412b61d8 10472 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
83a57153 10473 struct drm_atomic_state *state;
944b0c76 10474 struct drm_connector_state *connector_state;
4be07317 10475 struct intel_crtc_state *crtc_state;
d3a40d1b 10476 int ret;
79e53945 10477
d2dff872 10478 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
c23cc417 10479 connector->base.id, connector->name,
8e329a03 10480 encoder->base.id, encoder->name);
d2dff872 10481
8261b191 10482 if (old->load_detect_temp) {
83a57153 10483 state = drm_atomic_state_alloc(dev);
944b0c76
ACO
10484 if (!state)
10485 goto fail;
83a57153
ACO
10486
10487 state->acquire_ctx = ctx;
10488
944b0c76
ACO
10489 connector_state = drm_atomic_get_connector_state(state, connector);
10490 if (IS_ERR(connector_state))
10491 goto fail;
10492
4be07317
ACO
10493 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10494 if (IS_ERR(crtc_state))
10495 goto fail;
10496
944b0c76
ACO
10497 connector_state->best_encoder = NULL;
10498 connector_state->crtc = NULL;
10499
49d6fa21 10500 crtc_state->base.enable = crtc_state->base.active = false;
4be07317 10501
d3a40d1b
ACO
10502 ret = intel_modeset_setup_plane_state(state, crtc, NULL, NULL,
10503 0, 0);
10504 if (ret)
10505 goto fail;
10506
74c090b1 10507 ret = drm_atomic_commit(state);
2bfb4627
ACO
10508 if (ret)
10509 goto fail;
d2dff872 10510
36206361
DV
10511 if (old->release_fb) {
10512 drm_framebuffer_unregister_private(old->release_fb);
10513 drm_framebuffer_unreference(old->release_fb);
10514 }
d2dff872 10515
0622a53c 10516 return;
79e53945
JB
10517 }
10518
c751ce4f 10519 /* Switch crtc and encoder back off if necessary */
24218aac
DV
10520 if (old->dpms_mode != DRM_MODE_DPMS_ON)
10521 connector->funcs->dpms(connector, old->dpms_mode);
944b0c76
ACO
10522
10523 return;
10524fail:
10525 DRM_DEBUG_KMS("Couldn't release load detect pipe.\n");
10526 drm_atomic_state_free(state);
79e53945
JB
10527}
10528
da4a1efa 10529static int i9xx_pll_refclk(struct drm_device *dev,
5cec258b 10530 const struct intel_crtc_state *pipe_config)
da4a1efa
VS
10531{
10532 struct drm_i915_private *dev_priv = dev->dev_private;
10533 u32 dpll = pipe_config->dpll_hw_state.dpll;
10534
10535 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
e91e941b 10536 return dev_priv->vbt.lvds_ssc_freq;
da4a1efa
VS
10537 else if (HAS_PCH_SPLIT(dev))
10538 return 120000;
10539 else if (!IS_GEN2(dev))
10540 return 96000;
10541 else
10542 return 48000;
10543}
10544
79e53945 10545/* Returns the clock of the currently programmed mode of the given pipe. */
f1f644dc 10546static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
5cec258b 10547 struct intel_crtc_state *pipe_config)
79e53945 10548{
f1f644dc 10549 struct drm_device *dev = crtc->base.dev;
79e53945 10550 struct drm_i915_private *dev_priv = dev->dev_private;
f1f644dc 10551 int pipe = pipe_config->cpu_transcoder;
293623f7 10552 u32 dpll = pipe_config->dpll_hw_state.dpll;
79e53945
JB
10553 u32 fp;
10554 intel_clock_t clock;
dccbea3b 10555 int port_clock;
da4a1efa 10556 int refclk = i9xx_pll_refclk(dev, pipe_config);
79e53945
JB
10557
10558 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
293623f7 10559 fp = pipe_config->dpll_hw_state.fp0;
79e53945 10560 else
293623f7 10561 fp = pipe_config->dpll_hw_state.fp1;
79e53945
JB
10562
10563 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
f2b115e6
AJ
10564 if (IS_PINEVIEW(dev)) {
10565 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10566 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
2177832f
SL
10567 } else {
10568 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10569 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10570 }
10571
a6c45cf0 10572 if (!IS_GEN2(dev)) {
f2b115e6
AJ
10573 if (IS_PINEVIEW(dev))
10574 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10575 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
2177832f
SL
10576 else
10577 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
79e53945
JB
10578 DPLL_FPA01_P1_POST_DIV_SHIFT);
10579
10580 switch (dpll & DPLL_MODE_MASK) {
10581 case DPLLB_MODE_DAC_SERIAL:
10582 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10583 5 : 10;
10584 break;
10585 case DPLLB_MODE_LVDS:
10586 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10587 7 : 14;
10588 break;
10589 default:
28c97730 10590 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
79e53945 10591 "mode\n", (int)(dpll & DPLL_MODE_MASK));
f1f644dc 10592 return;
79e53945
JB
10593 }
10594
ac58c3f0 10595 if (IS_PINEVIEW(dev))
dccbea3b 10596 port_clock = pnv_calc_dpll_params(refclk, &clock);
ac58c3f0 10597 else
dccbea3b 10598 port_clock = i9xx_calc_dpll_params(refclk, &clock);
79e53945 10599 } else {
0fb58223 10600 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
b1c560d1 10601 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
79e53945
JB
10602
10603 if (is_lvds) {
10604 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10605 DPLL_FPA01_P1_POST_DIV_SHIFT);
b1c560d1
VS
10606
10607 if (lvds & LVDS_CLKB_POWER_UP)
10608 clock.p2 = 7;
10609 else
10610 clock.p2 = 14;
79e53945
JB
10611 } else {
10612 if (dpll & PLL_P1_DIVIDE_BY_TWO)
10613 clock.p1 = 2;
10614 else {
10615 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10616 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10617 }
10618 if (dpll & PLL_P2_DIVIDE_BY_4)
10619 clock.p2 = 4;
10620 else
10621 clock.p2 = 2;
79e53945 10622 }
da4a1efa 10623
dccbea3b 10624 port_clock = i9xx_calc_dpll_params(refclk, &clock);
79e53945
JB
10625 }
10626
18442d08
VS
10627 /*
10628 * This value includes pixel_multiplier. We will use
241bfc38 10629 * port_clock to compute adjusted_mode.crtc_clock in the
18442d08
VS
10630 * encoder's get_config() function.
10631 */
dccbea3b 10632 pipe_config->port_clock = port_clock;
f1f644dc
JB
10633}
10634
6878da05
VS
10635int intel_dotclock_calculate(int link_freq,
10636 const struct intel_link_m_n *m_n)
f1f644dc 10637{
f1f644dc
JB
10638 /*
10639 * The calculation for the data clock is:
1041a02f 10640 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
f1f644dc 10641 * But we want to avoid losing precison if possible, so:
1041a02f 10642 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
f1f644dc
JB
10643 *
10644 * and the link clock is simpler:
1041a02f 10645 * link_clock = (m * link_clock) / n
f1f644dc
JB
10646 */
10647
6878da05
VS
10648 if (!m_n->link_n)
10649 return 0;
f1f644dc 10650
6878da05
VS
10651 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
10652}
f1f644dc 10653
18442d08 10654static void ironlake_pch_clock_get(struct intel_crtc *crtc,
5cec258b 10655 struct intel_crtc_state *pipe_config)
6878da05
VS
10656{
10657 struct drm_device *dev = crtc->base.dev;
79e53945 10658
18442d08
VS
10659 /* read out port_clock from the DPLL */
10660 i9xx_crtc_clock_get(crtc, pipe_config);
f1f644dc 10661
f1f644dc 10662 /*
18442d08 10663 * This value does not include pixel_multiplier.
241bfc38 10664 * We will check that port_clock and adjusted_mode.crtc_clock
18442d08
VS
10665 * agree once we know their relationship in the encoder's
10666 * get_config() function.
79e53945 10667 */
2d112de7 10668 pipe_config->base.adjusted_mode.crtc_clock =
18442d08
VS
10669 intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000,
10670 &pipe_config->fdi_m_n);
79e53945
JB
10671}
10672
10673/** Returns the currently programmed mode of the given pipe. */
10674struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10675 struct drm_crtc *crtc)
10676{
548f245b 10677 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945 10678 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6e3c9717 10679 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
79e53945 10680 struct drm_display_mode *mode;
5cec258b 10681 struct intel_crtc_state pipe_config;
fe2b8f9d
PZ
10682 int htot = I915_READ(HTOTAL(cpu_transcoder));
10683 int hsync = I915_READ(HSYNC(cpu_transcoder));
10684 int vtot = I915_READ(VTOTAL(cpu_transcoder));
10685 int vsync = I915_READ(VSYNC(cpu_transcoder));
293623f7 10686 enum pipe pipe = intel_crtc->pipe;
79e53945
JB
10687
10688 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10689 if (!mode)
10690 return NULL;
10691
f1f644dc
JB
10692 /*
10693 * Construct a pipe_config sufficient for getting the clock info
10694 * back out of crtc_clock_get.
10695 *
10696 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
10697 * to use a real value here instead.
10698 */
293623f7 10699 pipe_config.cpu_transcoder = (enum transcoder) pipe;
f1f644dc 10700 pipe_config.pixel_multiplier = 1;
293623f7
VS
10701 pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
10702 pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
10703 pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
f1f644dc
JB
10704 i9xx_crtc_clock_get(intel_crtc, &pipe_config);
10705
773ae034 10706 mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
79e53945
JB
10707 mode->hdisplay = (htot & 0xffff) + 1;
10708 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10709 mode->hsync_start = (hsync & 0xffff) + 1;
10710 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10711 mode->vdisplay = (vtot & 0xffff) + 1;
10712 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10713 mode->vsync_start = (vsync & 0xffff) + 1;
10714 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10715
10716 drm_mode_set_name(mode);
79e53945
JB
10717
10718 return mode;
10719}
10720
f047e395
CW
10721void intel_mark_busy(struct drm_device *dev)
10722{
c67a470b
PZ
10723 struct drm_i915_private *dev_priv = dev->dev_private;
10724
f62a0076
CW
10725 if (dev_priv->mm.busy)
10726 return;
10727
43694d69 10728 intel_runtime_pm_get(dev_priv);
c67a470b 10729 i915_update_gfx_val(dev_priv);
43cf3bf0
CW
10730 if (INTEL_INFO(dev)->gen >= 6)
10731 gen6_rps_busy(dev_priv);
f62a0076 10732 dev_priv->mm.busy = true;
f047e395
CW
10733}
10734
10735void intel_mark_idle(struct drm_device *dev)
652c393a 10736{
c67a470b 10737 struct drm_i915_private *dev_priv = dev->dev_private;
652c393a 10738
f62a0076
CW
10739 if (!dev_priv->mm.busy)
10740 return;
10741
10742 dev_priv->mm.busy = false;
10743
3d13ef2e 10744 if (INTEL_INFO(dev)->gen >= 6)
b29c19b6 10745 gen6_rps_idle(dev->dev_private);
bb4cdd53 10746
43694d69 10747 intel_runtime_pm_put(dev_priv);
652c393a
JB
10748}
10749
79e53945
JB
10750static void intel_crtc_destroy(struct drm_crtc *crtc)
10751{
10752 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
67e77c5a
DV
10753 struct drm_device *dev = crtc->dev;
10754 struct intel_unpin_work *work;
67e77c5a 10755
5e2d7afc 10756 spin_lock_irq(&dev->event_lock);
67e77c5a
DV
10757 work = intel_crtc->unpin_work;
10758 intel_crtc->unpin_work = NULL;
5e2d7afc 10759 spin_unlock_irq(&dev->event_lock);
67e77c5a
DV
10760
10761 if (work) {
10762 cancel_work_sync(&work->work);
10763 kfree(work);
10764 }
79e53945
JB
10765
10766 drm_crtc_cleanup(crtc);
67e77c5a 10767
79e53945
JB
10768 kfree(intel_crtc);
10769}
10770
6b95a207
KH
10771static void intel_unpin_work_fn(struct work_struct *__work)
10772{
10773 struct intel_unpin_work *work =
10774 container_of(__work, struct intel_unpin_work, work);
a9ff8714
VS
10775 struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10776 struct drm_device *dev = crtc->base.dev;
10777 struct drm_plane *primary = crtc->base.primary;
6b95a207 10778
b4a98e57 10779 mutex_lock(&dev->struct_mutex);
a9ff8714 10780 intel_unpin_fb_obj(work->old_fb, primary->state);
05394f39 10781 drm_gem_object_unreference(&work->pending_flip_obj->base);
d9e86c0e 10782
f06cc1b9 10783 if (work->flip_queued_req)
146d84f0 10784 i915_gem_request_assign(&work->flip_queued_req, NULL);
b4a98e57
CW
10785 mutex_unlock(&dev->struct_mutex);
10786
a9ff8714 10787 intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
89ed88ba 10788 drm_framebuffer_unreference(work->old_fb);
f99d7069 10789
a9ff8714
VS
10790 BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
10791 atomic_dec(&crtc->unpin_work_count);
b4a98e57 10792
6b95a207
KH
10793 kfree(work);
10794}
10795
1afe3e9d 10796static void do_intel_finish_page_flip(struct drm_device *dev,
49b14a5c 10797 struct drm_crtc *crtc)
6b95a207 10798{
6b95a207
KH
10799 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10800 struct intel_unpin_work *work;
6b95a207
KH
10801 unsigned long flags;
10802
10803 /* Ignore early vblank irqs */
10804 if (intel_crtc == NULL)
10805 return;
10806
f326038a
DV
10807 /*
10808 * This is called both by irq handlers and the reset code (to complete
10809 * lost pageflips) so needs the full irqsave spinlocks.
10810 */
6b95a207
KH
10811 spin_lock_irqsave(&dev->event_lock, flags);
10812 work = intel_crtc->unpin_work;
e7d841ca
CW
10813
10814 /* Ensure we don't miss a work->pending update ... */
10815 smp_rmb();
10816
10817 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
6b95a207
KH
10818 spin_unlock_irqrestore(&dev->event_lock, flags);
10819 return;
10820 }
10821
d6bbafa1 10822 page_flip_completed(intel_crtc);
0af7e4df 10823
6b95a207 10824 spin_unlock_irqrestore(&dev->event_lock, flags);
6b95a207
KH
10825}
10826
1afe3e9d
JB
10827void intel_finish_page_flip(struct drm_device *dev, int pipe)
10828{
fbee40df 10829 struct drm_i915_private *dev_priv = dev->dev_private;
1afe3e9d
JB
10830 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10831
49b14a5c 10832 do_intel_finish_page_flip(dev, crtc);
1afe3e9d
JB
10833}
10834
10835void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
10836{
fbee40df 10837 struct drm_i915_private *dev_priv = dev->dev_private;
1afe3e9d
JB
10838 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
10839
49b14a5c 10840 do_intel_finish_page_flip(dev, crtc);
1afe3e9d
JB
10841}
10842
75f7f3ec
VS
10843/* Is 'a' after or equal to 'b'? */
10844static bool g4x_flip_count_after_eq(u32 a, u32 b)
10845{
10846 return !((a - b) & 0x80000000);
10847}
10848
10849static bool page_flip_finished(struct intel_crtc *crtc)
10850{
10851 struct drm_device *dev = crtc->base.dev;
10852 struct drm_i915_private *dev_priv = dev->dev_private;
10853
bdfa7542
VS
10854 if (i915_reset_in_progress(&dev_priv->gpu_error) ||
10855 crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
10856 return true;
10857
75f7f3ec
VS
10858 /*
10859 * The relevant registers doen't exist on pre-ctg.
10860 * As the flip done interrupt doesn't trigger for mmio
10861 * flips on gmch platforms, a flip count check isn't
10862 * really needed there. But since ctg has the registers,
10863 * include it in the check anyway.
10864 */
10865 if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
10866 return true;
10867
10868 /*
10869 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
10870 * used the same base address. In that case the mmio flip might
10871 * have completed, but the CS hasn't even executed the flip yet.
10872 *
10873 * A flip count check isn't enough as the CS might have updated
10874 * the base address just after start of vblank, but before we
10875 * managed to process the interrupt. This means we'd complete the
10876 * CS flip too soon.
10877 *
10878 * Combining both checks should get us a good enough result. It may
10879 * still happen that the CS flip has been executed, but has not
10880 * yet actually completed. But in case the base address is the same
10881 * anyway, we don't really care.
10882 */
10883 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
10884 crtc->unpin_work->gtt_offset &&
fd8f507c 10885 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
75f7f3ec
VS
10886 crtc->unpin_work->flip_count);
10887}
10888
6b95a207
KH
10889void intel_prepare_page_flip(struct drm_device *dev, int plane)
10890{
fbee40df 10891 struct drm_i915_private *dev_priv = dev->dev_private;
6b95a207
KH
10892 struct intel_crtc *intel_crtc =
10893 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
10894 unsigned long flags;
10895
f326038a
DV
10896
10897 /*
10898 * This is called both by irq handlers and the reset code (to complete
10899 * lost pageflips) so needs the full irqsave spinlocks.
10900 *
10901 * NB: An MMIO update of the plane base pointer will also
e7d841ca
CW
10902 * generate a page-flip completion irq, i.e. every modeset
10903 * is also accompanied by a spurious intel_prepare_page_flip().
10904 */
6b95a207 10905 spin_lock_irqsave(&dev->event_lock, flags);
75f7f3ec 10906 if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
e7d841ca 10907 atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
6b95a207
KH
10908 spin_unlock_irqrestore(&dev->event_lock, flags);
10909}
10910
6042639c 10911static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
e7d841ca
CW
10912{
10913 /* Ensure that the work item is consistent when activating it ... */
10914 smp_wmb();
6042639c 10915 atomic_set(&work->pending, INTEL_FLIP_PENDING);
e7d841ca
CW
10916 /* and that it is marked active as soon as the irq could fire. */
10917 smp_wmb();
10918}
10919
8c9f3aaf
JB
10920static int intel_gen2_queue_flip(struct drm_device *dev,
10921 struct drm_crtc *crtc,
10922 struct drm_framebuffer *fb,
ed8d1975 10923 struct drm_i915_gem_object *obj,
6258fbe2 10924 struct drm_i915_gem_request *req,
ed8d1975 10925 uint32_t flags)
8c9f3aaf 10926{
6258fbe2 10927 struct intel_engine_cs *ring = req->ring;
8c9f3aaf 10928 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8c9f3aaf
JB
10929 u32 flip_mask;
10930 int ret;
10931
5fb9de1a 10932 ret = intel_ring_begin(req, 6);
8c9f3aaf 10933 if (ret)
4fa62c89 10934 return ret;
8c9f3aaf
JB
10935
10936 /* Can't queue multiple flips, so wait for the previous
10937 * one to finish before executing the next.
10938 */
10939 if (intel_crtc->plane)
10940 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
10941 else
10942 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6d90c952
DV
10943 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
10944 intel_ring_emit(ring, MI_NOOP);
10945 intel_ring_emit(ring, MI_DISPLAY_FLIP |
10946 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
10947 intel_ring_emit(ring, fb->pitches[0]);
75f7f3ec 10948 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
6d90c952 10949 intel_ring_emit(ring, 0); /* aux display base address, unused */
e7d841ca 10950
6042639c 10951 intel_mark_page_flip_active(intel_crtc->unpin_work);
83d4092b 10952 return 0;
8c9f3aaf
JB
10953}
10954
10955static int intel_gen3_queue_flip(struct drm_device *dev,
10956 struct drm_crtc *crtc,
10957 struct drm_framebuffer *fb,
ed8d1975 10958 struct drm_i915_gem_object *obj,
6258fbe2 10959 struct drm_i915_gem_request *req,
ed8d1975 10960 uint32_t flags)
8c9f3aaf 10961{
6258fbe2 10962 struct intel_engine_cs *ring = req->ring;
8c9f3aaf 10963 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8c9f3aaf
JB
10964 u32 flip_mask;
10965 int ret;
10966
5fb9de1a 10967 ret = intel_ring_begin(req, 6);
8c9f3aaf 10968 if (ret)
4fa62c89 10969 return ret;
8c9f3aaf
JB
10970
10971 if (intel_crtc->plane)
10972 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
10973 else
10974 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6d90c952
DV
10975 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
10976 intel_ring_emit(ring, MI_NOOP);
10977 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
10978 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
10979 intel_ring_emit(ring, fb->pitches[0]);
75f7f3ec 10980 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
6d90c952
DV
10981 intel_ring_emit(ring, MI_NOOP);
10982
6042639c 10983 intel_mark_page_flip_active(intel_crtc->unpin_work);
83d4092b 10984 return 0;
8c9f3aaf
JB
10985}
10986
10987static int intel_gen4_queue_flip(struct drm_device *dev,
10988 struct drm_crtc *crtc,
10989 struct drm_framebuffer *fb,
ed8d1975 10990 struct drm_i915_gem_object *obj,
6258fbe2 10991 struct drm_i915_gem_request *req,
ed8d1975 10992 uint32_t flags)
8c9f3aaf 10993{
6258fbe2 10994 struct intel_engine_cs *ring = req->ring;
8c9f3aaf
JB
10995 struct drm_i915_private *dev_priv = dev->dev_private;
10996 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10997 uint32_t pf, pipesrc;
10998 int ret;
10999
5fb9de1a 11000 ret = intel_ring_begin(req, 4);
8c9f3aaf 11001 if (ret)
4fa62c89 11002 return ret;
8c9f3aaf
JB
11003
11004 /* i965+ uses the linear or tiled offsets from the
11005 * Display Registers (which do not change across a page-flip)
11006 * so we need only reprogram the base address.
11007 */
6d90c952
DV
11008 intel_ring_emit(ring, MI_DISPLAY_FLIP |
11009 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11010 intel_ring_emit(ring, fb->pitches[0]);
75f7f3ec 11011 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset |
c2c75131 11012 obj->tiling_mode);
8c9f3aaf
JB
11013
11014 /* XXX Enabling the panel-fitter across page-flip is so far
11015 * untested on non-native modes, so ignore it for now.
11016 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
11017 */
11018 pf = 0;
11019 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6d90c952 11020 intel_ring_emit(ring, pf | pipesrc);
e7d841ca 11021
6042639c 11022 intel_mark_page_flip_active(intel_crtc->unpin_work);
83d4092b 11023 return 0;
8c9f3aaf
JB
11024}
11025
11026static int intel_gen6_queue_flip(struct drm_device *dev,
11027 struct drm_crtc *crtc,
11028 struct drm_framebuffer *fb,
ed8d1975 11029 struct drm_i915_gem_object *obj,
6258fbe2 11030 struct drm_i915_gem_request *req,
ed8d1975 11031 uint32_t flags)
8c9f3aaf 11032{
6258fbe2 11033 struct intel_engine_cs *ring = req->ring;
8c9f3aaf
JB
11034 struct drm_i915_private *dev_priv = dev->dev_private;
11035 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11036 uint32_t pf, pipesrc;
11037 int ret;
11038
5fb9de1a 11039 ret = intel_ring_begin(req, 4);
8c9f3aaf 11040 if (ret)
4fa62c89 11041 return ret;
8c9f3aaf 11042
6d90c952
DV
11043 intel_ring_emit(ring, MI_DISPLAY_FLIP |
11044 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11045 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
75f7f3ec 11046 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
8c9f3aaf 11047
dc257cf1
DV
11048 /* Contrary to the suggestions in the documentation,
11049 * "Enable Panel Fitter" does not seem to be required when page
11050 * flipping with a non-native mode, and worse causes a normal
11051 * modeset to fail.
11052 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
11053 */
11054 pf = 0;
8c9f3aaf 11055 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6d90c952 11056 intel_ring_emit(ring, pf | pipesrc);
e7d841ca 11057
6042639c 11058 intel_mark_page_flip_active(intel_crtc->unpin_work);
83d4092b 11059 return 0;
8c9f3aaf
JB
11060}
11061
7c9017e5
JB
11062static int intel_gen7_queue_flip(struct drm_device *dev,
11063 struct drm_crtc *crtc,
11064 struct drm_framebuffer *fb,
ed8d1975 11065 struct drm_i915_gem_object *obj,
6258fbe2 11066 struct drm_i915_gem_request *req,
ed8d1975 11067 uint32_t flags)
7c9017e5 11068{
6258fbe2 11069 struct intel_engine_cs *ring = req->ring;
7c9017e5 11070 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
cb05d8de 11071 uint32_t plane_bit = 0;
ffe74d75
CW
11072 int len, ret;
11073
eba905b2 11074 switch (intel_crtc->plane) {
cb05d8de
DV
11075 case PLANE_A:
11076 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
11077 break;
11078 case PLANE_B:
11079 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
11080 break;
11081 case PLANE_C:
11082 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
11083 break;
11084 default:
11085 WARN_ONCE(1, "unknown plane in flip command\n");
4fa62c89 11086 return -ENODEV;
cb05d8de
DV
11087 }
11088
ffe74d75 11089 len = 4;
f476828a 11090 if (ring->id == RCS) {
ffe74d75 11091 len += 6;
f476828a
DL
11092 /*
11093 * On Gen 8, SRM is now taking an extra dword to accommodate
11094 * 48bits addresses, and we need a NOOP for the batch size to
11095 * stay even.
11096 */
11097 if (IS_GEN8(dev))
11098 len += 2;
11099 }
ffe74d75 11100
f66fab8e
VS
11101 /*
11102 * BSpec MI_DISPLAY_FLIP for IVB:
11103 * "The full packet must be contained within the same cache line."
11104 *
11105 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
11106 * cacheline, if we ever start emitting more commands before
11107 * the MI_DISPLAY_FLIP we may need to first emit everything else,
11108 * then do the cacheline alignment, and finally emit the
11109 * MI_DISPLAY_FLIP.
11110 */
bba09b12 11111 ret = intel_ring_cacheline_align(req);
f66fab8e 11112 if (ret)
4fa62c89 11113 return ret;
f66fab8e 11114
5fb9de1a 11115 ret = intel_ring_begin(req, len);
7c9017e5 11116 if (ret)
4fa62c89 11117 return ret;
7c9017e5 11118
ffe74d75
CW
11119 /* Unmask the flip-done completion message. Note that the bspec says that
11120 * we should do this for both the BCS and RCS, and that we must not unmask
11121 * more than one flip event at any time (or ensure that one flip message
11122 * can be sent by waiting for flip-done prior to queueing new flips).
11123 * Experimentation says that BCS works despite DERRMR masking all
11124 * flip-done completion events and that unmasking all planes at once
11125 * for the RCS also doesn't appear to drop events. Setting the DERRMR
11126 * to zero does lead to lockups within MI_DISPLAY_FLIP.
11127 */
11128 if (ring->id == RCS) {
11129 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
f92a9162 11130 intel_ring_emit_reg(ring, DERRMR);
ffe74d75
CW
11131 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11132 DERRMR_PIPEB_PRI_FLIP_DONE |
11133 DERRMR_PIPEC_PRI_FLIP_DONE));
f476828a 11134 if (IS_GEN8(dev))
f1afe24f 11135 intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
f476828a
DL
11136 MI_SRM_LRM_GLOBAL_GTT);
11137 else
f1afe24f 11138 intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
f476828a 11139 MI_SRM_LRM_GLOBAL_GTT);
f92a9162 11140 intel_ring_emit_reg(ring, DERRMR);
ffe74d75 11141 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
f476828a
DL
11142 if (IS_GEN8(dev)) {
11143 intel_ring_emit(ring, 0);
11144 intel_ring_emit(ring, MI_NOOP);
11145 }
ffe74d75
CW
11146 }
11147
cb05d8de 11148 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
01f2c773 11149 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
75f7f3ec 11150 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
7c9017e5 11151 intel_ring_emit(ring, (MI_NOOP));
e7d841ca 11152
6042639c 11153 intel_mark_page_flip_active(intel_crtc->unpin_work);
83d4092b 11154 return 0;
7c9017e5
JB
11155}
11156
84c33a64
SG
11157static bool use_mmio_flip(struct intel_engine_cs *ring,
11158 struct drm_i915_gem_object *obj)
11159{
11160 /*
11161 * This is not being used for older platforms, because
11162 * non-availability of flip done interrupt forces us to use
11163 * CS flips. Older platforms derive flip done using some clever
11164 * tricks involving the flip_pending status bits and vblank irqs.
11165 * So using MMIO flips there would disrupt this mechanism.
11166 */
11167
8e09bf83
CW
11168 if (ring == NULL)
11169 return true;
11170
84c33a64
SG
11171 if (INTEL_INFO(ring->dev)->gen < 5)
11172 return false;
11173
11174 if (i915.use_mmio_flip < 0)
11175 return false;
11176 else if (i915.use_mmio_flip > 0)
11177 return true;
14bf993e
OM
11178 else if (i915.enable_execlists)
11179 return true;
84c33a64 11180 else
b4716185 11181 return ring != i915_gem_request_get_ring(obj->last_write_req);
84c33a64
SG
11182}
11183
6042639c 11184static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
86efe24a 11185 unsigned int rotation,
6042639c 11186 struct intel_unpin_work *work)
ff944564
DL
11187{
11188 struct drm_device *dev = intel_crtc->base.dev;
11189 struct drm_i915_private *dev_priv = dev->dev_private;
11190 struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
ff944564 11191 const enum pipe pipe = intel_crtc->pipe;
86efe24a 11192 u32 ctl, stride, tile_height;
ff944564
DL
11193
11194 ctl = I915_READ(PLANE_CTL(pipe, 0));
11195 ctl &= ~PLANE_CTL_TILED_MASK;
2ebef630
TU
11196 switch (fb->modifier[0]) {
11197 case DRM_FORMAT_MOD_NONE:
11198 break;
11199 case I915_FORMAT_MOD_X_TILED:
ff944564 11200 ctl |= PLANE_CTL_TILED_X;
2ebef630
TU
11201 break;
11202 case I915_FORMAT_MOD_Y_TILED:
11203 ctl |= PLANE_CTL_TILED_Y;
11204 break;
11205 case I915_FORMAT_MOD_Yf_TILED:
11206 ctl |= PLANE_CTL_TILED_YF;
11207 break;
11208 default:
11209 MISSING_CASE(fb->modifier[0]);
11210 }
ff944564
DL
11211
11212 /*
11213 * The stride is either expressed as a multiple of 64 bytes chunks for
11214 * linear buffers or in number of tiles for tiled buffers.
11215 */
86efe24a
TU
11216 if (intel_rotation_90_or_270(rotation)) {
11217 /* stride = Surface height in tiles */
11218 tile_height = intel_tile_height(dev, fb->pixel_format,
11219 fb->modifier[0], 0);
11220 stride = DIV_ROUND_UP(fb->height, tile_height);
11221 } else {
11222 stride = fb->pitches[0] /
11223 intel_fb_stride_alignment(dev, fb->modifier[0],
11224 fb->pixel_format);
11225 }
ff944564
DL
11226
11227 /*
11228 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11229 * PLANE_SURF updates, the update is then guaranteed to be atomic.
11230 */
11231 I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11232 I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11233
6042639c 11234 I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
ff944564
DL
11235 POSTING_READ(PLANE_SURF(pipe, 0));
11236}
11237
6042639c
CW
11238static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11239 struct intel_unpin_work *work)
84c33a64
SG
11240{
11241 struct drm_device *dev = intel_crtc->base.dev;
11242 struct drm_i915_private *dev_priv = dev->dev_private;
11243 struct intel_framebuffer *intel_fb =
11244 to_intel_framebuffer(intel_crtc->base.primary->fb);
11245 struct drm_i915_gem_object *obj = intel_fb->obj;
f0f59a00 11246 i915_reg_t reg = DSPCNTR(intel_crtc->plane);
84c33a64 11247 u32 dspcntr;
84c33a64 11248
84c33a64
SG
11249 dspcntr = I915_READ(reg);
11250
c5d97472
DL
11251 if (obj->tiling_mode != I915_TILING_NONE)
11252 dspcntr |= DISPPLANE_TILED;
11253 else
11254 dspcntr &= ~DISPPLANE_TILED;
11255
84c33a64
SG
11256 I915_WRITE(reg, dspcntr);
11257
6042639c 11258 I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
84c33a64 11259 POSTING_READ(DSPSURF(intel_crtc->plane));
ff944564
DL
11260}
11261
11262/*
11263 * XXX: This is the temporary way to update the plane registers until we get
11264 * around to using the usual plane update functions for MMIO flips
11265 */
6042639c 11266static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
ff944564 11267{
6042639c
CW
11268 struct intel_crtc *crtc = mmio_flip->crtc;
11269 struct intel_unpin_work *work;
11270
11271 spin_lock_irq(&crtc->base.dev->event_lock);
11272 work = crtc->unpin_work;
11273 spin_unlock_irq(&crtc->base.dev->event_lock);
11274 if (work == NULL)
11275 return;
ff944564 11276
6042639c 11277 intel_mark_page_flip_active(work);
ff944564 11278
6042639c 11279 intel_pipe_update_start(crtc);
ff944564 11280
6042639c 11281 if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
86efe24a 11282 skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
ff944564
DL
11283 else
11284 /* use_mmio_flip() retricts MMIO flips to ilk+ */
6042639c 11285 ilk_do_mmio_flip(crtc, work);
ff944564 11286
6042639c 11287 intel_pipe_update_end(crtc);
84c33a64
SG
11288}
11289
9362c7c5 11290static void intel_mmio_flip_work_func(struct work_struct *work)
84c33a64 11291{
b2cfe0ab
CW
11292 struct intel_mmio_flip *mmio_flip =
11293 container_of(work, struct intel_mmio_flip, work);
84c33a64 11294
6042639c 11295 if (mmio_flip->req) {
eed29a5b 11296 WARN_ON(__i915_wait_request(mmio_flip->req,
b2cfe0ab 11297 mmio_flip->crtc->reset_counter,
bcafc4e3
CW
11298 false, NULL,
11299 &mmio_flip->i915->rps.mmioflips));
6042639c
CW
11300 i915_gem_request_unreference__unlocked(mmio_flip->req);
11301 }
84c33a64 11302
6042639c 11303 intel_do_mmio_flip(mmio_flip);
b2cfe0ab 11304 kfree(mmio_flip);
84c33a64
SG
11305}
11306
11307static int intel_queue_mmio_flip(struct drm_device *dev,
11308 struct drm_crtc *crtc,
86efe24a 11309 struct drm_i915_gem_object *obj)
84c33a64 11310{
b2cfe0ab
CW
11311 struct intel_mmio_flip *mmio_flip;
11312
11313 mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
11314 if (mmio_flip == NULL)
11315 return -ENOMEM;
84c33a64 11316
bcafc4e3 11317 mmio_flip->i915 = to_i915(dev);
eed29a5b 11318 mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
b2cfe0ab 11319 mmio_flip->crtc = to_intel_crtc(crtc);
86efe24a 11320 mmio_flip->rotation = crtc->primary->state->rotation;
536f5b5e 11321
b2cfe0ab
CW
11322 INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
11323 schedule_work(&mmio_flip->work);
84c33a64 11324
84c33a64
SG
11325 return 0;
11326}
11327
8c9f3aaf
JB
11328static int intel_default_queue_flip(struct drm_device *dev,
11329 struct drm_crtc *crtc,
11330 struct drm_framebuffer *fb,
ed8d1975 11331 struct drm_i915_gem_object *obj,
6258fbe2 11332 struct drm_i915_gem_request *req,
ed8d1975 11333 uint32_t flags)
8c9f3aaf
JB
11334{
11335 return -ENODEV;
11336}
11337
d6bbafa1
CW
11338static bool __intel_pageflip_stall_check(struct drm_device *dev,
11339 struct drm_crtc *crtc)
11340{
11341 struct drm_i915_private *dev_priv = dev->dev_private;
11342 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11343 struct intel_unpin_work *work = intel_crtc->unpin_work;
11344 u32 addr;
11345
11346 if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
11347 return true;
11348
908565c2
CW
11349 if (atomic_read(&work->pending) < INTEL_FLIP_PENDING)
11350 return false;
11351
d6bbafa1
CW
11352 if (!work->enable_stall_check)
11353 return false;
11354
11355 if (work->flip_ready_vblank == 0) {
3a8a946e
DV
11356 if (work->flip_queued_req &&
11357 !i915_gem_request_completed(work->flip_queued_req, true))
d6bbafa1
CW
11358 return false;
11359
1e3feefd 11360 work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
d6bbafa1
CW
11361 }
11362
1e3feefd 11363 if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
d6bbafa1
CW
11364 return false;
11365
11366 /* Potential stall - if we see that the flip has happened,
11367 * assume a missed interrupt. */
11368 if (INTEL_INFO(dev)->gen >= 4)
11369 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11370 else
11371 addr = I915_READ(DSPADDR(intel_crtc->plane));
11372
11373 /* There is a potential issue here with a false positive after a flip
11374 * to the same address. We could address this by checking for a
11375 * non-incrementing frame counter.
11376 */
11377 return addr == work->gtt_offset;
11378}
11379
11380void intel_check_page_flip(struct drm_device *dev, int pipe)
11381{
11382 struct drm_i915_private *dev_priv = dev->dev_private;
11383 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11384 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6ad790c0 11385 struct intel_unpin_work *work;
f326038a 11386
6c51d46f 11387 WARN_ON(!in_interrupt());
d6bbafa1
CW
11388
11389 if (crtc == NULL)
11390 return;
11391
f326038a 11392 spin_lock(&dev->event_lock);
6ad790c0
CW
11393 work = intel_crtc->unpin_work;
11394 if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
d6bbafa1 11395 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
6ad790c0 11396 work->flip_queued_vblank, drm_vblank_count(dev, pipe));
d6bbafa1 11397 page_flip_completed(intel_crtc);
6ad790c0 11398 work = NULL;
d6bbafa1 11399 }
6ad790c0
CW
11400 if (work != NULL &&
11401 drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
11402 intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
f326038a 11403 spin_unlock(&dev->event_lock);
d6bbafa1
CW
11404}
11405
6b95a207
KH
11406static int intel_crtc_page_flip(struct drm_crtc *crtc,
11407 struct drm_framebuffer *fb,
ed8d1975
KP
11408 struct drm_pending_vblank_event *event,
11409 uint32_t page_flip_flags)
6b95a207
KH
11410{
11411 struct drm_device *dev = crtc->dev;
11412 struct drm_i915_private *dev_priv = dev->dev_private;
f4510a27 11413 struct drm_framebuffer *old_fb = crtc->primary->fb;
2ff8fde1 11414 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
6b95a207 11415 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
455a6808 11416 struct drm_plane *primary = crtc->primary;
a071fa00 11417 enum pipe pipe = intel_crtc->pipe;
6b95a207 11418 struct intel_unpin_work *work;
a4872ba6 11419 struct intel_engine_cs *ring;
cf5d8a46 11420 bool mmio_flip;
91af127f 11421 struct drm_i915_gem_request *request = NULL;
52e68630 11422 int ret;
6b95a207 11423
2ff8fde1
MR
11424 /*
11425 * drm_mode_page_flip_ioctl() should already catch this, but double
11426 * check to be safe. In the future we may enable pageflipping from
11427 * a disabled primary plane.
11428 */
11429 if (WARN_ON(intel_fb_obj(old_fb) == NULL))
11430 return -EBUSY;
11431
e6a595d2 11432 /* Can't change pixel format via MI display flips. */
f4510a27 11433 if (fb->pixel_format != crtc->primary->fb->pixel_format)
e6a595d2
VS
11434 return -EINVAL;
11435
11436 /*
11437 * TILEOFF/LINOFF registers can't be changed via MI display flips.
11438 * Note that pitch changes could also affect these register.
11439 */
11440 if (INTEL_INFO(dev)->gen > 3 &&
f4510a27
MR
11441 (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
11442 fb->pitches[0] != crtc->primary->fb->pitches[0]))
e6a595d2
VS
11443 return -EINVAL;
11444
f900db47
CW
11445 if (i915_terminally_wedged(&dev_priv->gpu_error))
11446 goto out_hang;
11447
b14c5679 11448 work = kzalloc(sizeof(*work), GFP_KERNEL);
6b95a207
KH
11449 if (work == NULL)
11450 return -ENOMEM;
11451
6b95a207 11452 work->event = event;
b4a98e57 11453 work->crtc = crtc;
ab8d6675 11454 work->old_fb = old_fb;
6b95a207
KH
11455 INIT_WORK(&work->work, intel_unpin_work_fn);
11456
87b6b101 11457 ret = drm_crtc_vblank_get(crtc);
7317c75e
JB
11458 if (ret)
11459 goto free_work;
11460
6b95a207 11461 /* We borrow the event spin lock for protecting unpin_work */
5e2d7afc 11462 spin_lock_irq(&dev->event_lock);
6b95a207 11463 if (intel_crtc->unpin_work) {
d6bbafa1
CW
11464 /* Before declaring the flip queue wedged, check if
11465 * the hardware completed the operation behind our backs.
11466 */
11467 if (__intel_pageflip_stall_check(dev, crtc)) {
11468 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11469 page_flip_completed(intel_crtc);
11470 } else {
11471 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
5e2d7afc 11472 spin_unlock_irq(&dev->event_lock);
468f0b44 11473
d6bbafa1
CW
11474 drm_crtc_vblank_put(crtc);
11475 kfree(work);
11476 return -EBUSY;
11477 }
6b95a207
KH
11478 }
11479 intel_crtc->unpin_work = work;
5e2d7afc 11480 spin_unlock_irq(&dev->event_lock);
6b95a207 11481
b4a98e57
CW
11482 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11483 flush_workqueue(dev_priv->wq);
11484
75dfca80 11485 /* Reference the objects for the scheduled work. */
ab8d6675 11486 drm_framebuffer_reference(work->old_fb);
05394f39 11487 drm_gem_object_reference(&obj->base);
6b95a207 11488
f4510a27 11489 crtc->primary->fb = fb;
afd65eb4 11490 update_state_fb(crtc->primary);
1ed1f968 11491
e1f99ce6 11492 work->pending_flip_obj = obj;
e1f99ce6 11493
89ed88ba
CW
11494 ret = i915_mutex_lock_interruptible(dev);
11495 if (ret)
11496 goto cleanup;
11497
b4a98e57 11498 atomic_inc(&intel_crtc->unpin_work_count);
10d83730 11499 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
e1f99ce6 11500
75f7f3ec 11501 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
fd8f507c 11502 work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
75f7f3ec 11503
4fa62c89
VS
11504 if (IS_VALLEYVIEW(dev)) {
11505 ring = &dev_priv->ring[BCS];
ab8d6675 11506 if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
8e09bf83
CW
11507 /* vlv: DISPLAY_FLIP fails to change tiling */
11508 ring = NULL;
48bf5b2d 11509 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
2a92d5bc 11510 ring = &dev_priv->ring[BCS];
4fa62c89 11511 } else if (INTEL_INFO(dev)->gen >= 7) {
b4716185 11512 ring = i915_gem_request_get_ring(obj->last_write_req);
4fa62c89
VS
11513 if (ring == NULL || ring->id != RCS)
11514 ring = &dev_priv->ring[BCS];
11515 } else {
11516 ring = &dev_priv->ring[RCS];
11517 }
11518
cf5d8a46
CW
11519 mmio_flip = use_mmio_flip(ring, obj);
11520
11521 /* When using CS flips, we want to emit semaphores between rings.
11522 * However, when using mmio flips we will create a task to do the
11523 * synchronisation, so all we want here is to pin the framebuffer
11524 * into the display plane and skip any waits.
11525 */
7580d774
ML
11526 if (!mmio_flip) {
11527 ret = i915_gem_object_sync(obj, ring, &request);
11528 if (ret)
11529 goto cleanup_pending;
11530 }
11531
82bc3b2d 11532 ret = intel_pin_and_fence_fb_obj(crtc->primary, fb,
7580d774 11533 crtc->primary->state);
8c9f3aaf
JB
11534 if (ret)
11535 goto cleanup_pending;
6b95a207 11536
dedf278c
TU
11537 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11538 obj, 0);
11539 work->gtt_offset += intel_crtc->dspaddr_offset;
4fa62c89 11540
cf5d8a46 11541 if (mmio_flip) {
86efe24a 11542 ret = intel_queue_mmio_flip(dev, crtc, obj);
d6bbafa1
CW
11543 if (ret)
11544 goto cleanup_unpin;
11545
f06cc1b9
JH
11546 i915_gem_request_assign(&work->flip_queued_req,
11547 obj->last_write_req);
d6bbafa1 11548 } else {
6258fbe2
JH
11549 if (!request) {
11550 ret = i915_gem_request_alloc(ring, ring->default_context, &request);
11551 if (ret)
11552 goto cleanup_unpin;
11553 }
11554
11555 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
d6bbafa1
CW
11556 page_flip_flags);
11557 if (ret)
11558 goto cleanup_unpin;
11559
6258fbe2 11560 i915_gem_request_assign(&work->flip_queued_req, request);
d6bbafa1
CW
11561 }
11562
91af127f 11563 if (request)
75289874 11564 i915_add_request_no_flush(request);
91af127f 11565
1e3feefd 11566 work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
d6bbafa1 11567 work->enable_stall_check = true;
4fa62c89 11568
ab8d6675 11569 i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
a9ff8714 11570 to_intel_plane(primary)->frontbuffer_bit);
c80ac854 11571 mutex_unlock(&dev->struct_mutex);
a071fa00 11572
4e1e26f1 11573 intel_fbc_disable_crtc(intel_crtc);
a9ff8714
VS
11574 intel_frontbuffer_flip_prepare(dev,
11575 to_intel_plane(primary)->frontbuffer_bit);
6b95a207 11576
e5510fac
JB
11577 trace_i915_flip_request(intel_crtc->plane, obj);
11578
6b95a207 11579 return 0;
96b099fd 11580
4fa62c89 11581cleanup_unpin:
82bc3b2d 11582 intel_unpin_fb_obj(fb, crtc->primary->state);
8c9f3aaf 11583cleanup_pending:
91af127f
JH
11584 if (request)
11585 i915_gem_request_cancel(request);
b4a98e57 11586 atomic_dec(&intel_crtc->unpin_work_count);
89ed88ba
CW
11587 mutex_unlock(&dev->struct_mutex);
11588cleanup:
f4510a27 11589 crtc->primary->fb = old_fb;
afd65eb4 11590 update_state_fb(crtc->primary);
89ed88ba
CW
11591
11592 drm_gem_object_unreference_unlocked(&obj->base);
ab8d6675 11593 drm_framebuffer_unreference(work->old_fb);
96b099fd 11594
5e2d7afc 11595 spin_lock_irq(&dev->event_lock);
96b099fd 11596 intel_crtc->unpin_work = NULL;
5e2d7afc 11597 spin_unlock_irq(&dev->event_lock);
96b099fd 11598
87b6b101 11599 drm_crtc_vblank_put(crtc);
7317c75e 11600free_work:
96b099fd
CW
11601 kfree(work);
11602
f900db47 11603 if (ret == -EIO) {
02e0efb5
ML
11604 struct drm_atomic_state *state;
11605 struct drm_plane_state *plane_state;
11606
f900db47 11607out_hang:
02e0efb5
ML
11608 state = drm_atomic_state_alloc(dev);
11609 if (!state)
11610 return -ENOMEM;
11611 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
11612
11613retry:
11614 plane_state = drm_atomic_get_plane_state(state, primary);
11615 ret = PTR_ERR_OR_ZERO(plane_state);
11616 if (!ret) {
11617 drm_atomic_set_fb_for_plane(plane_state, fb);
11618
11619 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
11620 if (!ret)
11621 ret = drm_atomic_commit(state);
11622 }
11623
11624 if (ret == -EDEADLK) {
11625 drm_modeset_backoff(state->acquire_ctx);
11626 drm_atomic_state_clear(state);
11627 goto retry;
11628 }
11629
11630 if (ret)
11631 drm_atomic_state_free(state);
11632
f0d3dad3 11633 if (ret == 0 && event) {
5e2d7afc 11634 spin_lock_irq(&dev->event_lock);
a071fa00 11635 drm_send_vblank_event(dev, pipe, event);
5e2d7afc 11636 spin_unlock_irq(&dev->event_lock);
f0d3dad3 11637 }
f900db47 11638 }
96b099fd 11639 return ret;
6b95a207
KH
11640}
11641
da20eabd
ML
11642
11643/**
11644 * intel_wm_need_update - Check whether watermarks need updating
11645 * @plane: drm plane
11646 * @state: new plane state
11647 *
11648 * Check current plane state versus the new one to determine whether
11649 * watermarks need to be recalculated.
11650 *
11651 * Returns true or false.
11652 */
11653static bool intel_wm_need_update(struct drm_plane *plane,
11654 struct drm_plane_state *state)
11655{
d21fbe87
MR
11656 struct intel_plane_state *new = to_intel_plane_state(state);
11657 struct intel_plane_state *cur = to_intel_plane_state(plane->state);
11658
11659 /* Update watermarks on tiling or size changes. */
da20eabd
ML
11660 if (!plane->state->fb || !state->fb ||
11661 plane->state->fb->modifier[0] != state->fb->modifier[0] ||
d21fbe87
MR
11662 plane->state->rotation != state->rotation ||
11663 drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
11664 drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
11665 drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
11666 drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
2791a16c 11667 return true;
7809e5ae 11668
2791a16c 11669 return false;
7809e5ae
MR
11670}
11671
d21fbe87
MR
11672static bool needs_scaling(struct intel_plane_state *state)
11673{
11674 int src_w = drm_rect_width(&state->src) >> 16;
11675 int src_h = drm_rect_height(&state->src) >> 16;
11676 int dst_w = drm_rect_width(&state->dst);
11677 int dst_h = drm_rect_height(&state->dst);
11678
11679 return (src_w != dst_w || src_h != dst_h);
11680}
11681
da20eabd
ML
11682int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11683 struct drm_plane_state *plane_state)
11684{
11685 struct drm_crtc *crtc = crtc_state->crtc;
11686 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11687 struct drm_plane *plane = plane_state->plane;
11688 struct drm_device *dev = crtc->dev;
11689 struct drm_i915_private *dev_priv = dev->dev_private;
11690 struct intel_plane_state *old_plane_state =
11691 to_intel_plane_state(plane->state);
11692 int idx = intel_crtc->base.base.id, ret;
11693 int i = drm_plane_index(plane);
11694 bool mode_changed = needs_modeset(crtc_state);
11695 bool was_crtc_enabled = crtc->state->active;
11696 bool is_crtc_enabled = crtc_state->active;
da20eabd
ML
11697 bool turn_off, turn_on, visible, was_visible;
11698 struct drm_framebuffer *fb = plane_state->fb;
11699
11700 if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
11701 plane->type != DRM_PLANE_TYPE_CURSOR) {
11702 ret = skl_update_scaler_plane(
11703 to_intel_crtc_state(crtc_state),
11704 to_intel_plane_state(plane_state));
11705 if (ret)
11706 return ret;
11707 }
11708
da20eabd
ML
11709 was_visible = old_plane_state->visible;
11710 visible = to_intel_plane_state(plane_state)->visible;
11711
11712 if (!was_crtc_enabled && WARN_ON(was_visible))
11713 was_visible = false;
11714
11715 if (!is_crtc_enabled && WARN_ON(visible))
11716 visible = false;
11717
11718 if (!was_visible && !visible)
11719 return 0;
11720
11721 turn_off = was_visible && (!visible || mode_changed);
11722 turn_on = visible && (!was_visible || mode_changed);
11723
11724 DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
11725 plane->base.id, fb ? fb->base.id : -1);
11726
11727 DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
11728 plane->base.id, was_visible, visible,
11729 turn_off, turn_on, mode_changed);
11730
852eb00d 11731 if (turn_on) {
f015c551 11732 intel_crtc->atomic.update_wm_pre = true;
852eb00d
VS
11733 /* must disable cxsr around plane enable/disable */
11734 if (plane->type != DRM_PLANE_TYPE_CURSOR) {
11735 intel_crtc->atomic.disable_cxsr = true;
11736 /* to potentially re-enable cxsr */
11737 intel_crtc->atomic.wait_vblank = true;
11738 intel_crtc->atomic.update_wm_post = true;
11739 }
11740 } else if (turn_off) {
f015c551 11741 intel_crtc->atomic.update_wm_post = true;
852eb00d
VS
11742 /* must disable cxsr around plane enable/disable */
11743 if (plane->type != DRM_PLANE_TYPE_CURSOR) {
11744 if (is_crtc_enabled)
11745 intel_crtc->atomic.wait_vblank = true;
11746 intel_crtc->atomic.disable_cxsr = true;
11747 }
11748 } else if (intel_wm_need_update(plane, plane_state)) {
f015c551 11749 intel_crtc->atomic.update_wm_pre = true;
852eb00d 11750 }
da20eabd 11751
8be6ca85 11752 if (visible || was_visible)
a9ff8714
VS
11753 intel_crtc->atomic.fb_bits |=
11754 to_intel_plane(plane)->frontbuffer_bit;
11755
da20eabd
ML
11756 switch (plane->type) {
11757 case DRM_PLANE_TYPE_PRIMARY:
da20eabd
ML
11758 intel_crtc->atomic.pre_disable_primary = turn_off;
11759 intel_crtc->atomic.post_enable_primary = turn_on;
11760
066cf55b
RV
11761 if (turn_off) {
11762 /*
11763 * FIXME: Actually if we will still have any other
11764 * plane enabled on the pipe we could let IPS enabled
11765 * still, but for now lets consider that when we make
11766 * primary invisible by setting DSPCNTR to 0 on
11767 * update_primary_plane function IPS needs to be
11768 * disable.
11769 */
11770 intel_crtc->atomic.disable_ips = true;
11771
da20eabd 11772 intel_crtc->atomic.disable_fbc = true;
066cf55b 11773 }
da20eabd
ML
11774
11775 /*
11776 * FBC does not work on some platforms for rotated
11777 * planes, so disable it when rotation is not 0 and
11778 * update it when rotation is set back to 0.
11779 *
11780 * FIXME: This is redundant with the fbc update done in
11781 * the primary plane enable function except that that
11782 * one is done too late. We eventually need to unify
11783 * this.
11784 */
11785
11786 if (visible &&
11787 INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
11788 dev_priv->fbc.crtc == intel_crtc &&
11789 plane_state->rotation != BIT(DRM_ROTATE_0))
11790 intel_crtc->atomic.disable_fbc = true;
11791
11792 /*
11793 * BDW signals flip done immediately if the plane
11794 * is disabled, even if the plane enable is already
11795 * armed to occur at the next vblank :(
11796 */
11797 if (turn_on && IS_BROADWELL(dev))
11798 intel_crtc->atomic.wait_vblank = true;
11799
11800 intel_crtc->atomic.update_fbc |= visible || mode_changed;
11801 break;
11802 case DRM_PLANE_TYPE_CURSOR:
da20eabd
ML
11803 break;
11804 case DRM_PLANE_TYPE_OVERLAY:
d21fbe87
MR
11805 /*
11806 * WaCxSRDisabledForSpriteScaling:ivb
11807 *
11808 * cstate->update_wm was already set above, so this flag will
11809 * take effect when we commit and program watermarks.
11810 */
11811 if (IS_IVYBRIDGE(dev) &&
11812 needs_scaling(to_intel_plane_state(plane_state)) &&
11813 !needs_scaling(old_plane_state)) {
11814 to_intel_crtc_state(crtc_state)->disable_lp_wm = true;
11815 } else if (turn_off && !mode_changed) {
da20eabd
ML
11816 intel_crtc->atomic.wait_vblank = true;
11817 intel_crtc->atomic.update_sprite_watermarks |=
11818 1 << i;
11819 }
d21fbe87
MR
11820
11821 break;
da20eabd
ML
11822 }
11823 return 0;
11824}
11825
6d3a1ce7
ML
11826static bool encoders_cloneable(const struct intel_encoder *a,
11827 const struct intel_encoder *b)
11828{
11829 /* masks could be asymmetric, so check both ways */
11830 return a == b || (a->cloneable & (1 << b->type) &&
11831 b->cloneable & (1 << a->type));
11832}
11833
11834static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11835 struct intel_crtc *crtc,
11836 struct intel_encoder *encoder)
11837{
11838 struct intel_encoder *source_encoder;
11839 struct drm_connector *connector;
11840 struct drm_connector_state *connector_state;
11841 int i;
11842
11843 for_each_connector_in_state(state, connector, connector_state, i) {
11844 if (connector_state->crtc != &crtc->base)
11845 continue;
11846
11847 source_encoder =
11848 to_intel_encoder(connector_state->best_encoder);
11849 if (!encoders_cloneable(encoder, source_encoder))
11850 return false;
11851 }
11852
11853 return true;
11854}
11855
11856static bool check_encoder_cloning(struct drm_atomic_state *state,
11857 struct intel_crtc *crtc)
11858{
11859 struct intel_encoder *encoder;
11860 struct drm_connector *connector;
11861 struct drm_connector_state *connector_state;
11862 int i;
11863
11864 for_each_connector_in_state(state, connector, connector_state, i) {
11865 if (connector_state->crtc != &crtc->base)
11866 continue;
11867
11868 encoder = to_intel_encoder(connector_state->best_encoder);
11869 if (!check_single_encoder_cloning(state, crtc, encoder))
11870 return false;
11871 }
11872
11873 return true;
11874}
11875
11876static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11877 struct drm_crtc_state *crtc_state)
11878{
cf5a15be 11879 struct drm_device *dev = crtc->dev;
ad421372 11880 struct drm_i915_private *dev_priv = dev->dev_private;
6d3a1ce7 11881 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
cf5a15be
ML
11882 struct intel_crtc_state *pipe_config =
11883 to_intel_crtc_state(crtc_state);
6d3a1ce7 11884 struct drm_atomic_state *state = crtc_state->state;
4d20cd86 11885 int ret;
6d3a1ce7
ML
11886 bool mode_changed = needs_modeset(crtc_state);
11887
11888 if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
11889 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11890 return -EINVAL;
11891 }
11892
852eb00d
VS
11893 if (mode_changed && !crtc_state->active)
11894 intel_crtc->atomic.update_wm_post = true;
eddfcbcd 11895
ad421372
ML
11896 if (mode_changed && crtc_state->enable &&
11897 dev_priv->display.crtc_compute_clock &&
11898 !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) {
11899 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11900 pipe_config);
11901 if (ret)
11902 return ret;
11903 }
11904
e435d6e5 11905 ret = 0;
86c8bbbe
MR
11906 if (dev_priv->display.compute_pipe_wm) {
11907 ret = dev_priv->display.compute_pipe_wm(intel_crtc, state);
11908 if (ret)
11909 return ret;
11910 }
11911
e435d6e5
ML
11912 if (INTEL_INFO(dev)->gen >= 9) {
11913 if (mode_changed)
11914 ret = skl_update_scaler_crtc(pipe_config);
11915
11916 if (!ret)
11917 ret = intel_atomic_setup_scalers(dev, intel_crtc,
11918 pipe_config);
11919 }
11920
11921 return ret;
6d3a1ce7
ML
11922}
11923
65b38e0d 11924static const struct drm_crtc_helper_funcs intel_helper_funcs = {
f6e5b160
CW
11925 .mode_set_base_atomic = intel_pipe_set_base_atomic,
11926 .load_lut = intel_crtc_load_lut,
ea2c67bb
MR
11927 .atomic_begin = intel_begin_crtc_commit,
11928 .atomic_flush = intel_finish_crtc_commit,
6d3a1ce7 11929 .atomic_check = intel_crtc_atomic_check,
f6e5b160
CW
11930};
11931
d29b2f9d
ACO
11932static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11933{
11934 struct intel_connector *connector;
11935
11936 for_each_intel_connector(dev, connector) {
11937 if (connector->base.encoder) {
11938 connector->base.state->best_encoder =
11939 connector->base.encoder;
11940 connector->base.state->crtc =
11941 connector->base.encoder->crtc;
11942 } else {
11943 connector->base.state->best_encoder = NULL;
11944 connector->base.state->crtc = NULL;
11945 }
11946 }
11947}
11948
050f7aeb 11949static void
eba905b2 11950connected_sink_compute_bpp(struct intel_connector *connector,
5cec258b 11951 struct intel_crtc_state *pipe_config)
050f7aeb
DV
11952{
11953 int bpp = pipe_config->pipe_bpp;
11954
11955 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
11956 connector->base.base.id,
c23cc417 11957 connector->base.name);
050f7aeb
DV
11958
11959 /* Don't use an invalid EDID bpc value */
11960 if (connector->base.display_info.bpc &&
11961 connector->base.display_info.bpc * 3 < bpp) {
11962 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
11963 bpp, connector->base.display_info.bpc*3);
11964 pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
11965 }
11966
11967 /* Clamp bpp to 8 on screens without EDID 1.4 */
11968 if (connector->base.display_info.bpc == 0 && bpp > 24) {
11969 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
11970 bpp);
11971 pipe_config->pipe_bpp = 24;
11972 }
11973}
11974
4e53c2e0 11975static int
050f7aeb 11976compute_baseline_pipe_bpp(struct intel_crtc *crtc,
5cec258b 11977 struct intel_crtc_state *pipe_config)
4e53c2e0 11978{
050f7aeb 11979 struct drm_device *dev = crtc->base.dev;
1486017f 11980 struct drm_atomic_state *state;
da3ced29
ACO
11981 struct drm_connector *connector;
11982 struct drm_connector_state *connector_state;
1486017f 11983 int bpp, i;
4e53c2e0 11984
d328c9d7 11985 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)))
4e53c2e0 11986 bpp = 10*3;
d328c9d7
DV
11987 else if (INTEL_INFO(dev)->gen >= 5)
11988 bpp = 12*3;
11989 else
11990 bpp = 8*3;
11991
4e53c2e0 11992
4e53c2e0
DV
11993 pipe_config->pipe_bpp = bpp;
11994
1486017f
ACO
11995 state = pipe_config->base.state;
11996
4e53c2e0 11997 /* Clamp display bpp to EDID value */
da3ced29
ACO
11998 for_each_connector_in_state(state, connector, connector_state, i) {
11999 if (connector_state->crtc != &crtc->base)
4e53c2e0
DV
12000 continue;
12001
da3ced29
ACO
12002 connected_sink_compute_bpp(to_intel_connector(connector),
12003 pipe_config);
4e53c2e0
DV
12004 }
12005
12006 return bpp;
12007}
12008
644db711
DV
12009static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12010{
12011 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12012 "type: 0x%x flags: 0x%x\n",
1342830c 12013 mode->crtc_clock,
644db711
DV
12014 mode->crtc_hdisplay, mode->crtc_hsync_start,
12015 mode->crtc_hsync_end, mode->crtc_htotal,
12016 mode->crtc_vdisplay, mode->crtc_vsync_start,
12017 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
12018}
12019
c0b03411 12020static void intel_dump_pipe_config(struct intel_crtc *crtc,
5cec258b 12021 struct intel_crtc_state *pipe_config,
c0b03411
DV
12022 const char *context)
12023{
6a60cd87
CK
12024 struct drm_device *dev = crtc->base.dev;
12025 struct drm_plane *plane;
12026 struct intel_plane *intel_plane;
12027 struct intel_plane_state *state;
12028 struct drm_framebuffer *fb;
12029
12030 DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
12031 context, pipe_config, pipe_name(crtc->pipe));
c0b03411
DV
12032
12033 DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder));
12034 DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
12035 pipe_config->pipe_bpp, pipe_config->dither);
12036 DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12037 pipe_config->has_pch_encoder,
12038 pipe_config->fdi_lanes,
12039 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
12040 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12041 pipe_config->fdi_m_n.tu);
90a6b7b0 12042 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
eb14cb74 12043 pipe_config->has_dp_encoder,
90a6b7b0 12044 pipe_config->lane_count,
eb14cb74
VS
12045 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12046 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12047 pipe_config->dp_m_n.tu);
b95af8be 12048
90a6b7b0 12049 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
b95af8be 12050 pipe_config->has_dp_encoder,
90a6b7b0 12051 pipe_config->lane_count,
b95af8be
VK
12052 pipe_config->dp_m2_n2.gmch_m,
12053 pipe_config->dp_m2_n2.gmch_n,
12054 pipe_config->dp_m2_n2.link_m,
12055 pipe_config->dp_m2_n2.link_n,
12056 pipe_config->dp_m2_n2.tu);
12057
55072d19
DV
12058 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
12059 pipe_config->has_audio,
12060 pipe_config->has_infoframe);
12061
c0b03411 12062 DRM_DEBUG_KMS("requested mode:\n");
2d112de7 12063 drm_mode_debug_printmodeline(&pipe_config->base.mode);
c0b03411 12064 DRM_DEBUG_KMS("adjusted mode:\n");
2d112de7
ACO
12065 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12066 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
d71b8d4a 12067 DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
37327abd
VS
12068 DRM_DEBUG_KMS("pipe src size: %dx%d\n",
12069 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
0ec463d3
TU
12070 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12071 crtc->num_scalers,
12072 pipe_config->scaler_state.scaler_users,
12073 pipe_config->scaler_state.scaler_id);
c0b03411
DV
12074 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12075 pipe_config->gmch_pfit.control,
12076 pipe_config->gmch_pfit.pgm_ratios,
12077 pipe_config->gmch_pfit.lvds_border_bits);
fd4daa9c 12078 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
c0b03411 12079 pipe_config->pch_pfit.pos,
fd4daa9c
CW
12080 pipe_config->pch_pfit.size,
12081 pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
42db64ef 12082 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
cf532bb2 12083 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
6a60cd87 12084
415ff0f6 12085 if (IS_BROXTON(dev)) {
05712c15 12086 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
415ff0f6 12087 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
c8453338 12088 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
415ff0f6
TU
12089 pipe_config->ddi_pll_sel,
12090 pipe_config->dpll_hw_state.ebb0,
05712c15 12091 pipe_config->dpll_hw_state.ebb4,
415ff0f6
TU
12092 pipe_config->dpll_hw_state.pll0,
12093 pipe_config->dpll_hw_state.pll1,
12094 pipe_config->dpll_hw_state.pll2,
12095 pipe_config->dpll_hw_state.pll3,
12096 pipe_config->dpll_hw_state.pll6,
12097 pipe_config->dpll_hw_state.pll8,
05712c15 12098 pipe_config->dpll_hw_state.pll9,
c8453338 12099 pipe_config->dpll_hw_state.pll10,
415ff0f6 12100 pipe_config->dpll_hw_state.pcsdw12);
ef11bdb3 12101 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
415ff0f6
TU
12102 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
12103 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12104 pipe_config->ddi_pll_sel,
12105 pipe_config->dpll_hw_state.ctrl1,
12106 pipe_config->dpll_hw_state.cfgcr1,
12107 pipe_config->dpll_hw_state.cfgcr2);
12108 } else if (HAS_DDI(dev)) {
12109 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x\n",
12110 pipe_config->ddi_pll_sel,
12111 pipe_config->dpll_hw_state.wrpll);
12112 } else {
12113 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12114 "fp0: 0x%x, fp1: 0x%x\n",
12115 pipe_config->dpll_hw_state.dpll,
12116 pipe_config->dpll_hw_state.dpll_md,
12117 pipe_config->dpll_hw_state.fp0,
12118 pipe_config->dpll_hw_state.fp1);
12119 }
12120
6a60cd87
CK
12121 DRM_DEBUG_KMS("planes on this crtc\n");
12122 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
12123 intel_plane = to_intel_plane(plane);
12124 if (intel_plane->pipe != crtc->pipe)
12125 continue;
12126
12127 state = to_intel_plane_state(plane->state);
12128 fb = state->base.fb;
12129 if (!fb) {
12130 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
12131 "disabled, scaler_id = %d\n",
12132 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12133 plane->base.id, intel_plane->pipe,
12134 (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
12135 drm_plane_index(plane), state->scaler_id);
12136 continue;
12137 }
12138
12139 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
12140 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12141 plane->base.id, intel_plane->pipe,
12142 crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
12143 drm_plane_index(plane));
12144 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
12145 fb->base.id, fb->width, fb->height, fb->pixel_format);
12146 DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
12147 state->scaler_id,
12148 state->src.x1 >> 16, state->src.y1 >> 16,
12149 drm_rect_width(&state->src) >> 16,
12150 drm_rect_height(&state->src) >> 16,
12151 state->dst.x1, state->dst.y1,
12152 drm_rect_width(&state->dst), drm_rect_height(&state->dst));
12153 }
c0b03411
DV
12154}
12155
5448a00d 12156static bool check_digital_port_conflicts(struct drm_atomic_state *state)
00f0b378 12157{
5448a00d
ACO
12158 struct drm_device *dev = state->dev;
12159 struct intel_encoder *encoder;
da3ced29 12160 struct drm_connector *connector;
5448a00d 12161 struct drm_connector_state *connector_state;
00f0b378 12162 unsigned int used_ports = 0;
5448a00d 12163 int i;
00f0b378
VS
12164
12165 /*
12166 * Walk the connector list instead of the encoder
12167 * list to detect the problem on ddi platforms
12168 * where there's just one encoder per digital port.
12169 */
da3ced29 12170 for_each_connector_in_state(state, connector, connector_state, i) {
5448a00d 12171 if (!connector_state->best_encoder)
00f0b378
VS
12172 continue;
12173
5448a00d
ACO
12174 encoder = to_intel_encoder(connector_state->best_encoder);
12175
12176 WARN_ON(!connector_state->crtc);
00f0b378
VS
12177
12178 switch (encoder->type) {
12179 unsigned int port_mask;
12180 case INTEL_OUTPUT_UNKNOWN:
12181 if (WARN_ON(!HAS_DDI(dev)))
12182 break;
12183 case INTEL_OUTPUT_DISPLAYPORT:
12184 case INTEL_OUTPUT_HDMI:
12185 case INTEL_OUTPUT_EDP:
12186 port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
12187
12188 /* the same port mustn't appear more than once */
12189 if (used_ports & port_mask)
12190 return false;
12191
12192 used_ports |= port_mask;
12193 default:
12194 break;
12195 }
12196 }
12197
12198 return true;
12199}
12200
83a57153
ACO
12201static void
12202clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12203{
12204 struct drm_crtc_state tmp_state;
663a3640 12205 struct intel_crtc_scaler_state scaler_state;
4978cc93
ACO
12206 struct intel_dpll_hw_state dpll_hw_state;
12207 enum intel_dpll_id shared_dpll;
8504c74c 12208 uint32_t ddi_pll_sel;
c4e2d043 12209 bool force_thru;
83a57153 12210
7546a384
ACO
12211 /* FIXME: before the switch to atomic started, a new pipe_config was
12212 * kzalloc'd. Code that depends on any field being zero should be
12213 * fixed, so that the crtc_state can be safely duplicated. For now,
12214 * only fields that are know to not cause problems are preserved. */
12215
83a57153 12216 tmp_state = crtc_state->base;
663a3640 12217 scaler_state = crtc_state->scaler_state;
4978cc93
ACO
12218 shared_dpll = crtc_state->shared_dpll;
12219 dpll_hw_state = crtc_state->dpll_hw_state;
8504c74c 12220 ddi_pll_sel = crtc_state->ddi_pll_sel;
c4e2d043 12221 force_thru = crtc_state->pch_pfit.force_thru;
4978cc93 12222
83a57153 12223 memset(crtc_state, 0, sizeof *crtc_state);
4978cc93 12224
83a57153 12225 crtc_state->base = tmp_state;
663a3640 12226 crtc_state->scaler_state = scaler_state;
4978cc93
ACO
12227 crtc_state->shared_dpll = shared_dpll;
12228 crtc_state->dpll_hw_state = dpll_hw_state;
8504c74c 12229 crtc_state->ddi_pll_sel = ddi_pll_sel;
c4e2d043 12230 crtc_state->pch_pfit.force_thru = force_thru;
83a57153
ACO
12231}
12232
548ee15b 12233static int
b8cecdf5 12234intel_modeset_pipe_config(struct drm_crtc *crtc,
b359283a 12235 struct intel_crtc_state *pipe_config)
ee7b9f93 12236{
b359283a 12237 struct drm_atomic_state *state = pipe_config->base.state;
7758a113 12238 struct intel_encoder *encoder;
da3ced29 12239 struct drm_connector *connector;
0b901879 12240 struct drm_connector_state *connector_state;
d328c9d7 12241 int base_bpp, ret = -EINVAL;
0b901879 12242 int i;
e29c22c0 12243 bool retry = true;
ee7b9f93 12244
83a57153 12245 clear_intel_crtc_state(pipe_config);
7758a113 12246
e143a21c
DV
12247 pipe_config->cpu_transcoder =
12248 (enum transcoder) to_intel_crtc(crtc)->pipe;
b8cecdf5 12249
2960bc9c
ID
12250 /*
12251 * Sanitize sync polarity flags based on requested ones. If neither
12252 * positive or negative polarity is requested, treat this as meaning
12253 * negative polarity.
12254 */
2d112de7 12255 if (!(pipe_config->base.adjusted_mode.flags &
2960bc9c 12256 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
2d112de7 12257 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
2960bc9c 12258
2d112de7 12259 if (!(pipe_config->base.adjusted_mode.flags &
2960bc9c 12260 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
2d112de7 12261 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
2960bc9c 12262
d328c9d7
DV
12263 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12264 pipe_config);
12265 if (base_bpp < 0)
4e53c2e0
DV
12266 goto fail;
12267
e41a56be
VS
12268 /*
12269 * Determine the real pipe dimensions. Note that stereo modes can
12270 * increase the actual pipe size due to the frame doubling and
12271 * insertion of additional space for blanks between the frame. This
12272 * is stored in the crtc timings. We use the requested mode to do this
12273 * computation to clearly distinguish it from the adjusted mode, which
12274 * can be changed by the connectors in the below retry loop.
12275 */
2d112de7 12276 drm_crtc_get_hv_timing(&pipe_config->base.mode,
ecb7e16b
GP
12277 &pipe_config->pipe_src_w,
12278 &pipe_config->pipe_src_h);
e41a56be 12279
e29c22c0 12280encoder_retry:
ef1b460d 12281 /* Ensure the port clock defaults are reset when retrying. */
ff9a6750 12282 pipe_config->port_clock = 0;
ef1b460d 12283 pipe_config->pixel_multiplier = 1;
ff9a6750 12284
135c81b8 12285 /* Fill in default crtc timings, allow encoders to overwrite them. */
2d112de7
ACO
12286 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12287 CRTC_STEREO_DOUBLE);
135c81b8 12288
7758a113
DV
12289 /* Pass our mode to the connectors and the CRTC to give them a chance to
12290 * adjust it according to limitations or connector properties, and also
12291 * a chance to reject the mode entirely.
47f1c6c9 12292 */
da3ced29 12293 for_each_connector_in_state(state, connector, connector_state, i) {
0b901879 12294 if (connector_state->crtc != crtc)
7758a113 12295 continue;
7ae89233 12296
0b901879
ACO
12297 encoder = to_intel_encoder(connector_state->best_encoder);
12298
efea6e8e
DV
12299 if (!(encoder->compute_config(encoder, pipe_config))) {
12300 DRM_DEBUG_KMS("Encoder config failure\n");
7758a113
DV
12301 goto fail;
12302 }
ee7b9f93 12303 }
47f1c6c9 12304
ff9a6750
DV
12305 /* Set default port clock if not overwritten by the encoder. Needs to be
12306 * done afterwards in case the encoder adjusts the mode. */
12307 if (!pipe_config->port_clock)
2d112de7 12308 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
241bfc38 12309 * pipe_config->pixel_multiplier;
ff9a6750 12310
a43f6e0f 12311 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
e29c22c0 12312 if (ret < 0) {
7758a113
DV
12313 DRM_DEBUG_KMS("CRTC fixup failed\n");
12314 goto fail;
ee7b9f93 12315 }
e29c22c0
DV
12316
12317 if (ret == RETRY) {
12318 if (WARN(!retry, "loop in pipe configuration computation\n")) {
12319 ret = -EINVAL;
12320 goto fail;
12321 }
12322
12323 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12324 retry = false;
12325 goto encoder_retry;
12326 }
12327
e8fa4270
DV
12328 /* Dithering seems to not pass-through bits correctly when it should, so
12329 * only enable it on 6bpc panels. */
12330 pipe_config->dither = pipe_config->pipe_bpp == 6*3;
62f0ace5 12331 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
d328c9d7 12332 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
4e53c2e0 12333
7758a113 12334fail:
548ee15b 12335 return ret;
ee7b9f93 12336}
47f1c6c9 12337
ea9d758d 12338static void
4740b0f2 12339intel_modeset_update_crtc_state(struct drm_atomic_state *state)
ea9d758d 12340{
0a9ab303
ACO
12341 struct drm_crtc *crtc;
12342 struct drm_crtc_state *crtc_state;
8a75d157 12343 int i;
ea9d758d 12344
7668851f 12345 /* Double check state. */
8a75d157 12346 for_each_crtc_in_state(state, crtc, crtc_state, i) {
3cb480bc 12347 to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
fc467a22
ML
12348
12349 /* Update hwmode for vblank functions */
12350 if (crtc->state->active)
12351 crtc->hwmode = crtc->state->adjusted_mode;
12352 else
12353 crtc->hwmode.crtc_clock = 0;
61067a5e
ML
12354
12355 /*
12356 * Update legacy state to satisfy fbc code. This can
12357 * be removed when fbc uses the atomic state.
12358 */
12359 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
12360 struct drm_plane_state *plane_state = crtc->primary->state;
12361
12362 crtc->primary->fb = plane_state->fb;
12363 crtc->x = plane_state->src_x >> 16;
12364 crtc->y = plane_state->src_y >> 16;
12365 }
ea9d758d 12366 }
ea9d758d
DV
12367}
12368
3bd26263 12369static bool intel_fuzzy_clock_check(int clock1, int clock2)
f1f644dc 12370{
3bd26263 12371 int diff;
f1f644dc
JB
12372
12373 if (clock1 == clock2)
12374 return true;
12375
12376 if (!clock1 || !clock2)
12377 return false;
12378
12379 diff = abs(clock1 - clock2);
12380
12381 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12382 return true;
12383
12384 return false;
12385}
12386
25c5b266
DV
12387#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
12388 list_for_each_entry((intel_crtc), \
12389 &(dev)->mode_config.crtc_list, \
12390 base.head) \
0973f18f 12391 if (mask & (1 <<(intel_crtc)->pipe))
25c5b266 12392
cfb23ed6
ML
12393static bool
12394intel_compare_m_n(unsigned int m, unsigned int n,
12395 unsigned int m2, unsigned int n2,
12396 bool exact)
12397{
12398 if (m == m2 && n == n2)
12399 return true;
12400
12401 if (exact || !m || !n || !m2 || !n2)
12402 return false;
12403
12404 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12405
12406 if (m > m2) {
12407 while (m > m2) {
12408 m2 <<= 1;
12409 n2 <<= 1;
12410 }
12411 } else if (m < m2) {
12412 while (m < m2) {
12413 m <<= 1;
12414 n <<= 1;
12415 }
12416 }
12417
12418 return m == m2 && n == n2;
12419}
12420
12421static bool
12422intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12423 struct intel_link_m_n *m2_n2,
12424 bool adjust)
12425{
12426 if (m_n->tu == m2_n2->tu &&
12427 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12428 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12429 intel_compare_m_n(m_n->link_m, m_n->link_n,
12430 m2_n2->link_m, m2_n2->link_n, !adjust)) {
12431 if (adjust)
12432 *m2_n2 = *m_n;
12433
12434 return true;
12435 }
12436
12437 return false;
12438}
12439
0e8ffe1b 12440static bool
2fa2fe9a 12441intel_pipe_config_compare(struct drm_device *dev,
5cec258b 12442 struct intel_crtc_state *current_config,
cfb23ed6
ML
12443 struct intel_crtc_state *pipe_config,
12444 bool adjust)
0e8ffe1b 12445{
cfb23ed6
ML
12446 bool ret = true;
12447
12448#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
12449 do { \
12450 if (!adjust) \
12451 DRM_ERROR(fmt, ##__VA_ARGS__); \
12452 else \
12453 DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
12454 } while (0)
12455
66e985c0
DV
12456#define PIPE_CONF_CHECK_X(name) \
12457 if (current_config->name != pipe_config->name) { \
cfb23ed6 12458 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
66e985c0
DV
12459 "(expected 0x%08x, found 0x%08x)\n", \
12460 current_config->name, \
12461 pipe_config->name); \
cfb23ed6 12462 ret = false; \
66e985c0
DV
12463 }
12464
08a24034
DV
12465#define PIPE_CONF_CHECK_I(name) \
12466 if (current_config->name != pipe_config->name) { \
cfb23ed6 12467 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
08a24034
DV
12468 "(expected %i, found %i)\n", \
12469 current_config->name, \
12470 pipe_config->name); \
cfb23ed6
ML
12471 ret = false; \
12472 }
12473
12474#define PIPE_CONF_CHECK_M_N(name) \
12475 if (!intel_compare_link_m_n(&current_config->name, \
12476 &pipe_config->name,\
12477 adjust)) { \
12478 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12479 "(expected tu %i gmch %i/%i link %i/%i, " \
12480 "found tu %i, gmch %i/%i link %i/%i)\n", \
12481 current_config->name.tu, \
12482 current_config->name.gmch_m, \
12483 current_config->name.gmch_n, \
12484 current_config->name.link_m, \
12485 current_config->name.link_n, \
12486 pipe_config->name.tu, \
12487 pipe_config->name.gmch_m, \
12488 pipe_config->name.gmch_n, \
12489 pipe_config->name.link_m, \
12490 pipe_config->name.link_n); \
12491 ret = false; \
12492 }
12493
12494#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
12495 if (!intel_compare_link_m_n(&current_config->name, \
12496 &pipe_config->name, adjust) && \
12497 !intel_compare_link_m_n(&current_config->alt_name, \
12498 &pipe_config->name, adjust)) { \
12499 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12500 "(expected tu %i gmch %i/%i link %i/%i, " \
12501 "or tu %i gmch %i/%i link %i/%i, " \
12502 "found tu %i, gmch %i/%i link %i/%i)\n", \
12503 current_config->name.tu, \
12504 current_config->name.gmch_m, \
12505 current_config->name.gmch_n, \
12506 current_config->name.link_m, \
12507 current_config->name.link_n, \
12508 current_config->alt_name.tu, \
12509 current_config->alt_name.gmch_m, \
12510 current_config->alt_name.gmch_n, \
12511 current_config->alt_name.link_m, \
12512 current_config->alt_name.link_n, \
12513 pipe_config->name.tu, \
12514 pipe_config->name.gmch_m, \
12515 pipe_config->name.gmch_n, \
12516 pipe_config->name.link_m, \
12517 pipe_config->name.link_n); \
12518 ret = false; \
88adfff1
DV
12519 }
12520
b95af8be
VK
12521/* This is required for BDW+ where there is only one set of registers for
12522 * switching between high and low RR.
12523 * This macro can be used whenever a comparison has to be made between one
12524 * hw state and multiple sw state variables.
12525 */
12526#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \
12527 if ((current_config->name != pipe_config->name) && \
12528 (current_config->alt_name != pipe_config->name)) { \
cfb23ed6 12529 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
b95af8be
VK
12530 "(expected %i or %i, found %i)\n", \
12531 current_config->name, \
12532 current_config->alt_name, \
12533 pipe_config->name); \
cfb23ed6 12534 ret = false; \
b95af8be
VK
12535 }
12536
1bd1bd80
DV
12537#define PIPE_CONF_CHECK_FLAGS(name, mask) \
12538 if ((current_config->name ^ pipe_config->name) & (mask)) { \
cfb23ed6 12539 INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
1bd1bd80
DV
12540 "(expected %i, found %i)\n", \
12541 current_config->name & (mask), \
12542 pipe_config->name & (mask)); \
cfb23ed6 12543 ret = false; \
1bd1bd80
DV
12544 }
12545
5e550656
VS
12546#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
12547 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
cfb23ed6 12548 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
5e550656
VS
12549 "(expected %i, found %i)\n", \
12550 current_config->name, \
12551 pipe_config->name); \
cfb23ed6 12552 ret = false; \
5e550656
VS
12553 }
12554
bb760063
DV
12555#define PIPE_CONF_QUIRK(quirk) \
12556 ((current_config->quirks | pipe_config->quirks) & (quirk))
12557
eccb140b
DV
12558 PIPE_CONF_CHECK_I(cpu_transcoder);
12559
08a24034
DV
12560 PIPE_CONF_CHECK_I(has_pch_encoder);
12561 PIPE_CONF_CHECK_I(fdi_lanes);
cfb23ed6 12562 PIPE_CONF_CHECK_M_N(fdi_m_n);
08a24034 12563
eb14cb74 12564 PIPE_CONF_CHECK_I(has_dp_encoder);
90a6b7b0 12565 PIPE_CONF_CHECK_I(lane_count);
b95af8be
VK
12566
12567 if (INTEL_INFO(dev)->gen < 8) {
cfb23ed6
ML
12568 PIPE_CONF_CHECK_M_N(dp_m_n);
12569
12570 PIPE_CONF_CHECK_I(has_drrs);
12571 if (current_config->has_drrs)
12572 PIPE_CONF_CHECK_M_N(dp_m2_n2);
12573 } else
12574 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
eb14cb74 12575
2d112de7
ACO
12576 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12577 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12578 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12579 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12580 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12581 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
1bd1bd80 12582
2d112de7
ACO
12583 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12584 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12585 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12586 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12587 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12588 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
1bd1bd80 12589
c93f54cf 12590 PIPE_CONF_CHECK_I(pixel_multiplier);
6897b4b5 12591 PIPE_CONF_CHECK_I(has_hdmi_sink);
b5a9fa09
DV
12592 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
12593 IS_VALLEYVIEW(dev))
12594 PIPE_CONF_CHECK_I(limited_color_range);
e43823ec 12595 PIPE_CONF_CHECK_I(has_infoframe);
6c49f241 12596
9ed109a7
DV
12597 PIPE_CONF_CHECK_I(has_audio);
12598
2d112de7 12599 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
1bd1bd80
DV
12600 DRM_MODE_FLAG_INTERLACE);
12601
bb760063 12602 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
2d112de7 12603 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
bb760063 12604 DRM_MODE_FLAG_PHSYNC);
2d112de7 12605 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
bb760063 12606 DRM_MODE_FLAG_NHSYNC);
2d112de7 12607 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
bb760063 12608 DRM_MODE_FLAG_PVSYNC);
2d112de7 12609 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
bb760063
DV
12610 DRM_MODE_FLAG_NVSYNC);
12611 }
045ac3b5 12612
333b8ca8 12613 PIPE_CONF_CHECK_X(gmch_pfit.control);
e2ff2d4a
DV
12614 /* pfit ratios are autocomputed by the hw on gen4+ */
12615 if (INTEL_INFO(dev)->gen < 4)
12616 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
333b8ca8 12617 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
9953599b 12618
bfd16b2a
ML
12619 if (!adjust) {
12620 PIPE_CONF_CHECK_I(pipe_src_w);
12621 PIPE_CONF_CHECK_I(pipe_src_h);
12622
12623 PIPE_CONF_CHECK_I(pch_pfit.enabled);
12624 if (current_config->pch_pfit.enabled) {
12625 PIPE_CONF_CHECK_X(pch_pfit.pos);
12626 PIPE_CONF_CHECK_X(pch_pfit.size);
12627 }
2fa2fe9a 12628
7aefe2b5
ML
12629 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12630 }
a1b2278e 12631
e59150dc
JB
12632 /* BDW+ don't expose a synchronous way to read the state */
12633 if (IS_HASWELL(dev))
12634 PIPE_CONF_CHECK_I(ips_enabled);
42db64ef 12635
282740f7
VS
12636 PIPE_CONF_CHECK_I(double_wide);
12637
26804afd
DV
12638 PIPE_CONF_CHECK_X(ddi_pll_sel);
12639
c0d43d62 12640 PIPE_CONF_CHECK_I(shared_dpll);
66e985c0 12641 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8bcc2795 12642 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
66e985c0
DV
12643 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12644 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
d452c5b6 12645 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
3f4cd19f
DL
12646 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12647 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12648 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
c0d43d62 12649
42571aef
VS
12650 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
12651 PIPE_CONF_CHECK_I(pipe_bpp);
12652
2d112de7 12653 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
a9a7e98a 12654 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
5e550656 12655
66e985c0 12656#undef PIPE_CONF_CHECK_X
08a24034 12657#undef PIPE_CONF_CHECK_I
b95af8be 12658#undef PIPE_CONF_CHECK_I_ALT
1bd1bd80 12659#undef PIPE_CONF_CHECK_FLAGS
5e550656 12660#undef PIPE_CONF_CHECK_CLOCK_FUZZY
bb760063 12661#undef PIPE_CONF_QUIRK
cfb23ed6 12662#undef INTEL_ERR_OR_DBG_KMS
88adfff1 12663
cfb23ed6 12664 return ret;
0e8ffe1b
DV
12665}
12666
08db6652
DL
12667static void check_wm_state(struct drm_device *dev)
12668{
12669 struct drm_i915_private *dev_priv = dev->dev_private;
12670 struct skl_ddb_allocation hw_ddb, *sw_ddb;
12671 struct intel_crtc *intel_crtc;
12672 int plane;
12673
12674 if (INTEL_INFO(dev)->gen < 9)
12675 return;
12676
12677 skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12678 sw_ddb = &dev_priv->wm.skl_hw.ddb;
12679
12680 for_each_intel_crtc(dev, intel_crtc) {
12681 struct skl_ddb_entry *hw_entry, *sw_entry;
12682 const enum pipe pipe = intel_crtc->pipe;
12683
12684 if (!intel_crtc->active)
12685 continue;
12686
12687 /* planes */
dd740780 12688 for_each_plane(dev_priv, pipe, plane) {
08db6652
DL
12689 hw_entry = &hw_ddb.plane[pipe][plane];
12690 sw_entry = &sw_ddb->plane[pipe][plane];
12691
12692 if (skl_ddb_entry_equal(hw_entry, sw_entry))
12693 continue;
12694
12695 DRM_ERROR("mismatch in DDB state pipe %c plane %d "
12696 "(expected (%u,%u), found (%u,%u))\n",
12697 pipe_name(pipe), plane + 1,
12698 sw_entry->start, sw_entry->end,
12699 hw_entry->start, hw_entry->end);
12700 }
12701
12702 /* cursor */
4969d33e
MR
12703 hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
12704 sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
08db6652
DL
12705
12706 if (skl_ddb_entry_equal(hw_entry, sw_entry))
12707 continue;
12708
12709 DRM_ERROR("mismatch in DDB state pipe %c cursor "
12710 "(expected (%u,%u), found (%u,%u))\n",
12711 pipe_name(pipe),
12712 sw_entry->start, sw_entry->end,
12713 hw_entry->start, hw_entry->end);
12714 }
12715}
12716
91d1b4bd 12717static void
35dd3c64
ML
12718check_connector_state(struct drm_device *dev,
12719 struct drm_atomic_state *old_state)
8af6cf88 12720{
35dd3c64
ML
12721 struct drm_connector_state *old_conn_state;
12722 struct drm_connector *connector;
12723 int i;
8af6cf88 12724
35dd3c64
ML
12725 for_each_connector_in_state(old_state, connector, old_conn_state, i) {
12726 struct drm_encoder *encoder = connector->encoder;
12727 struct drm_connector_state *state = connector->state;
ad3c558f 12728
8af6cf88
DV
12729 /* This also checks the encoder/connector hw state with the
12730 * ->get_hw_state callbacks. */
35dd3c64 12731 intel_connector_check_state(to_intel_connector(connector));
8af6cf88 12732
ad3c558f 12733 I915_STATE_WARN(state->best_encoder != encoder,
35dd3c64 12734 "connector's atomic encoder doesn't match legacy encoder\n");
8af6cf88 12735 }
91d1b4bd
DV
12736}
12737
12738static void
12739check_encoder_state(struct drm_device *dev)
12740{
12741 struct intel_encoder *encoder;
12742 struct intel_connector *connector;
8af6cf88 12743
b2784e15 12744 for_each_intel_encoder(dev, encoder) {
8af6cf88 12745 bool enabled = false;
4d20cd86 12746 enum pipe pipe;
8af6cf88
DV
12747
12748 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12749 encoder->base.base.id,
8e329a03 12750 encoder->base.name);
8af6cf88 12751
3a3371ff 12752 for_each_intel_connector(dev, connector) {
4d20cd86 12753 if (connector->base.state->best_encoder != &encoder->base)
8af6cf88
DV
12754 continue;
12755 enabled = true;
ad3c558f
ML
12756
12757 I915_STATE_WARN(connector->base.state->crtc !=
12758 encoder->base.crtc,
12759 "connector's crtc doesn't match encoder crtc\n");
8af6cf88 12760 }
0e32b39c 12761
e2c719b7 12762 I915_STATE_WARN(!!encoder->base.crtc != enabled,
8af6cf88
DV
12763 "encoder's enabled state mismatch "
12764 "(expected %i, found %i)\n",
12765 !!encoder->base.crtc, enabled);
7c60d198
ML
12766
12767 if (!encoder->base.crtc) {
4d20cd86 12768 bool active;
7c60d198 12769
4d20cd86
ML
12770 active = encoder->get_hw_state(encoder, &pipe);
12771 I915_STATE_WARN(active,
12772 "encoder detached but still enabled on pipe %c.\n",
12773 pipe_name(pipe));
7c60d198 12774 }
8af6cf88 12775 }
91d1b4bd
DV
12776}
12777
12778static void
4d20cd86 12779check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
91d1b4bd 12780{
fbee40df 12781 struct drm_i915_private *dev_priv = dev->dev_private;
91d1b4bd 12782 struct intel_encoder *encoder;
4d20cd86
ML
12783 struct drm_crtc_state *old_crtc_state;
12784 struct drm_crtc *crtc;
12785 int i;
8af6cf88 12786
4d20cd86
ML
12787 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
12788 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12789 struct intel_crtc_state *pipe_config, *sw_config;
7b89b8de 12790 bool active;
8af6cf88 12791
bfd16b2a
ML
12792 if (!needs_modeset(crtc->state) &&
12793 !to_intel_crtc_state(crtc->state)->update_pipe)
4d20cd86 12794 continue;
045ac3b5 12795
4d20cd86
ML
12796 __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
12797 pipe_config = to_intel_crtc_state(old_crtc_state);
12798 memset(pipe_config, 0, sizeof(*pipe_config));
12799 pipe_config->base.crtc = crtc;
12800 pipe_config->base.state = old_state;
8af6cf88 12801
4d20cd86
ML
12802 DRM_DEBUG_KMS("[CRTC:%d]\n",
12803 crtc->base.id);
8af6cf88 12804
4d20cd86
ML
12805 active = dev_priv->display.get_pipe_config(intel_crtc,
12806 pipe_config);
d62cf62a 12807
b6b5d049 12808 /* hw state is inconsistent with the pipe quirk */
4d20cd86
ML
12809 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
12810 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
12811 active = crtc->state->active;
6c49f241 12812
4d20cd86 12813 I915_STATE_WARN(crtc->state->active != active,
0e8ffe1b 12814 "crtc active state doesn't match with hw state "
4d20cd86 12815 "(expected %i, found %i)\n", crtc->state->active, active);
0e8ffe1b 12816
4d20cd86 12817 I915_STATE_WARN(intel_crtc->active != crtc->state->active,
53d9f4e9 12818 "transitional active state does not match atomic hw state "
4d20cd86
ML
12819 "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active);
12820
12821 for_each_encoder_on_crtc(dev, crtc, encoder) {
12822 enum pipe pipe;
12823
12824 active = encoder->get_hw_state(encoder, &pipe);
12825 I915_STATE_WARN(active != crtc->state->active,
12826 "[ENCODER:%i] active %i with crtc active %i\n",
12827 encoder->base.base.id, active, crtc->state->active);
12828
12829 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12830 "Encoder connected to wrong pipe %c\n",
12831 pipe_name(pipe));
12832
12833 if (active)
12834 encoder->get_config(encoder, pipe_config);
12835 }
53d9f4e9 12836
4d20cd86 12837 if (!crtc->state->active)
cfb23ed6
ML
12838 continue;
12839
4d20cd86
ML
12840 sw_config = to_intel_crtc_state(crtc->state);
12841 if (!intel_pipe_config_compare(dev, sw_config,
12842 pipe_config, false)) {
e2c719b7 12843 I915_STATE_WARN(1, "pipe state doesn't match!\n");
4d20cd86 12844 intel_dump_pipe_config(intel_crtc, pipe_config,
c0b03411 12845 "[hw state]");
4d20cd86 12846 intel_dump_pipe_config(intel_crtc, sw_config,
c0b03411
DV
12847 "[sw state]");
12848 }
8af6cf88
DV
12849 }
12850}
12851
91d1b4bd
DV
12852static void
12853check_shared_dpll_state(struct drm_device *dev)
12854{
fbee40df 12855 struct drm_i915_private *dev_priv = dev->dev_private;
91d1b4bd
DV
12856 struct intel_crtc *crtc;
12857 struct intel_dpll_hw_state dpll_hw_state;
12858 int i;
5358901f
DV
12859
12860 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
12861 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
12862 int enabled_crtcs = 0, active_crtcs = 0;
12863 bool active;
12864
12865 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12866
12867 DRM_DEBUG_KMS("%s\n", pll->name);
12868
12869 active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
12870
e2c719b7 12871 I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask),
5358901f 12872 "more active pll users than references: %i vs %i\n",
3e369b76 12873 pll->active, hweight32(pll->config.crtc_mask));
e2c719b7 12874 I915_STATE_WARN(pll->active && !pll->on,
5358901f 12875 "pll in active use but not on in sw tracking\n");
e2c719b7 12876 I915_STATE_WARN(pll->on && !pll->active,
35c95375 12877 "pll in on but not on in use in sw tracking\n");
e2c719b7 12878 I915_STATE_WARN(pll->on != active,
5358901f
DV
12879 "pll on state mismatch (expected %i, found %i)\n",
12880 pll->on, active);
12881
d3fcc808 12882 for_each_intel_crtc(dev, crtc) {
83d65738 12883 if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll)
5358901f
DV
12884 enabled_crtcs++;
12885 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll)
12886 active_crtcs++;
12887 }
e2c719b7 12888 I915_STATE_WARN(pll->active != active_crtcs,
5358901f
DV
12889 "pll active crtcs mismatch (expected %i, found %i)\n",
12890 pll->active, active_crtcs);
e2c719b7 12891 I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
5358901f 12892 "pll enabled crtcs mismatch (expected %i, found %i)\n",
3e369b76 12893 hweight32(pll->config.crtc_mask), enabled_crtcs);
66e985c0 12894
e2c719b7 12895 I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
66e985c0
DV
12896 sizeof(dpll_hw_state)),
12897 "pll hw state mismatch\n");
5358901f 12898 }
8af6cf88
DV
12899}
12900
ee165b1a
ML
12901static void
12902intel_modeset_check_state(struct drm_device *dev,
12903 struct drm_atomic_state *old_state)
91d1b4bd 12904{
08db6652 12905 check_wm_state(dev);
35dd3c64 12906 check_connector_state(dev, old_state);
91d1b4bd 12907 check_encoder_state(dev);
4d20cd86 12908 check_crtc_state(dev, old_state);
91d1b4bd
DV
12909 check_shared_dpll_state(dev);
12910}
12911
5cec258b 12912void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config,
18442d08
VS
12913 int dotclock)
12914{
12915 /*
12916 * FDI already provided one idea for the dotclock.
12917 * Yell if the encoder disagrees.
12918 */
2d112de7 12919 WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock),
18442d08 12920 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
2d112de7 12921 pipe_config->base.adjusted_mode.crtc_clock, dotclock);
18442d08
VS
12922}
12923
80715b2f
VS
12924static void update_scanline_offset(struct intel_crtc *crtc)
12925{
12926 struct drm_device *dev = crtc->base.dev;
12927
12928 /*
12929 * The scanline counter increments at the leading edge of hsync.
12930 *
12931 * On most platforms it starts counting from vtotal-1 on the
12932 * first active line. That means the scanline counter value is
12933 * always one less than what we would expect. Ie. just after
12934 * start of vblank, which also occurs at start of hsync (on the
12935 * last active line), the scanline counter will read vblank_start-1.
12936 *
12937 * On gen2 the scanline counter starts counting from 1 instead
12938 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12939 * to keep the value positive), instead of adding one.
12940 *
12941 * On HSW+ the behaviour of the scanline counter depends on the output
12942 * type. For DP ports it behaves like most other platforms, but on HDMI
12943 * there's an extra 1 line difference. So we need to add two instead of
12944 * one to the value.
12945 */
12946 if (IS_GEN2(dev)) {
124abe07 12947 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
80715b2f
VS
12948 int vtotal;
12949
124abe07
VS
12950 vtotal = adjusted_mode->crtc_vtotal;
12951 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
80715b2f
VS
12952 vtotal /= 2;
12953
12954 crtc->scanline_offset = vtotal - 1;
12955 } else if (HAS_DDI(dev) &&
409ee761 12956 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
80715b2f
VS
12957 crtc->scanline_offset = 2;
12958 } else
12959 crtc->scanline_offset = 1;
12960}
12961
ad421372 12962static void intel_modeset_clear_plls(struct drm_atomic_state *state)
ed6739ef 12963{
225da59b 12964 struct drm_device *dev = state->dev;
ed6739ef 12965 struct drm_i915_private *dev_priv = to_i915(dev);
ad421372 12966 struct intel_shared_dpll_config *shared_dpll = NULL;
ed6739ef 12967 struct intel_crtc *intel_crtc;
0a9ab303
ACO
12968 struct intel_crtc_state *intel_crtc_state;
12969 struct drm_crtc *crtc;
12970 struct drm_crtc_state *crtc_state;
0a9ab303 12971 int i;
ed6739ef
ACO
12972
12973 if (!dev_priv->display.crtc_compute_clock)
ad421372 12974 return;
ed6739ef 12975
0a9ab303 12976 for_each_crtc_in_state(state, crtc, crtc_state, i) {
ad421372
ML
12977 int dpll;
12978
0a9ab303 12979 intel_crtc = to_intel_crtc(crtc);
4978cc93 12980 intel_crtc_state = to_intel_crtc_state(crtc_state);
ad421372 12981 dpll = intel_crtc_state->shared_dpll;
0a9ab303 12982
ad421372 12983 if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE)
225da59b
ACO
12984 continue;
12985
ad421372 12986 intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
0a9ab303 12987
ad421372
ML
12988 if (!shared_dpll)
12989 shared_dpll = intel_atomic_get_shared_dpll_state(state);
ed6739ef 12990
ad421372
ML
12991 shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe);
12992 }
ed6739ef
ACO
12993}
12994
99d736a2
ML
12995/*
12996 * This implements the workaround described in the "notes" section of the mode
12997 * set sequence documentation. When going from no pipes or single pipe to
12998 * multiple pipes, and planes are enabled after the pipe, we need to wait at
12999 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13000 */
13001static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
13002{
13003 struct drm_crtc_state *crtc_state;
13004 struct intel_crtc *intel_crtc;
13005 struct drm_crtc *crtc;
13006 struct intel_crtc_state *first_crtc_state = NULL;
13007 struct intel_crtc_state *other_crtc_state = NULL;
13008 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13009 int i;
13010
13011 /* look at all crtc's that are going to be enabled in during modeset */
13012 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13013 intel_crtc = to_intel_crtc(crtc);
13014
13015 if (!crtc_state->active || !needs_modeset(crtc_state))
13016 continue;
13017
13018 if (first_crtc_state) {
13019 other_crtc_state = to_intel_crtc_state(crtc_state);
13020 break;
13021 } else {
13022 first_crtc_state = to_intel_crtc_state(crtc_state);
13023 first_pipe = intel_crtc->pipe;
13024 }
13025 }
13026
13027 /* No workaround needed? */
13028 if (!first_crtc_state)
13029 return 0;
13030
13031 /* w/a possibly needed, check how many crtc's are already enabled. */
13032 for_each_intel_crtc(state->dev, intel_crtc) {
13033 struct intel_crtc_state *pipe_config;
13034
13035 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
13036 if (IS_ERR(pipe_config))
13037 return PTR_ERR(pipe_config);
13038
13039 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
13040
13041 if (!pipe_config->base.active ||
13042 needs_modeset(&pipe_config->base))
13043 continue;
13044
13045 /* 2 or more enabled crtcs means no need for w/a */
13046 if (enabled_pipe != INVALID_PIPE)
13047 return 0;
13048
13049 enabled_pipe = intel_crtc->pipe;
13050 }
13051
13052 if (enabled_pipe != INVALID_PIPE)
13053 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13054 else if (other_crtc_state)
13055 other_crtc_state->hsw_workaround_pipe = first_pipe;
13056
13057 return 0;
13058}
13059
27c329ed
ML
13060static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13061{
13062 struct drm_crtc *crtc;
13063 struct drm_crtc_state *crtc_state;
13064 int ret = 0;
13065
13066 /* add all active pipes to the state */
13067 for_each_crtc(state->dev, crtc) {
13068 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13069 if (IS_ERR(crtc_state))
13070 return PTR_ERR(crtc_state);
13071
13072 if (!crtc_state->active || needs_modeset(crtc_state))
13073 continue;
13074
13075 crtc_state->mode_changed = true;
13076
13077 ret = drm_atomic_add_affected_connectors(state, crtc);
13078 if (ret)
13079 break;
13080
13081 ret = drm_atomic_add_affected_planes(state, crtc);
13082 if (ret)
13083 break;
13084 }
13085
13086 return ret;
13087}
13088
c347a676 13089static int intel_modeset_checks(struct drm_atomic_state *state)
054518dd
ACO
13090{
13091 struct drm_device *dev = state->dev;
27c329ed 13092 struct drm_i915_private *dev_priv = dev->dev_private;
054518dd
ACO
13093 int ret;
13094
b359283a
ML
13095 if (!check_digital_port_conflicts(state)) {
13096 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13097 return -EINVAL;
13098 }
13099
054518dd
ACO
13100 /*
13101 * See if the config requires any additional preparation, e.g.
13102 * to adjust global state with pipes off. We need to do this
13103 * here so we can get the modeset_pipe updated config for the new
13104 * mode set on this crtc. For other crtcs we need to use the
13105 * adjusted_mode bits in the crtc directly.
13106 */
27c329ed
ML
13107 if (dev_priv->display.modeset_calc_cdclk) {
13108 unsigned int cdclk;
b432e5cf 13109
27c329ed
ML
13110 ret = dev_priv->display.modeset_calc_cdclk(state);
13111
13112 cdclk = to_intel_atomic_state(state)->cdclk;
13113 if (!ret && cdclk != dev_priv->cdclk_freq)
13114 ret = intel_modeset_all_pipes(state);
13115
13116 if (ret < 0)
054518dd 13117 return ret;
27c329ed
ML
13118 } else
13119 to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq;
054518dd 13120
ad421372 13121 intel_modeset_clear_plls(state);
054518dd 13122
99d736a2 13123 if (IS_HASWELL(dev))
ad421372 13124 return haswell_mode_set_planes_workaround(state);
99d736a2 13125
ad421372 13126 return 0;
c347a676
ACO
13127}
13128
aa363136
MR
13129/*
13130 * Handle calculation of various watermark data at the end of the atomic check
13131 * phase. The code here should be run after the per-crtc and per-plane 'check'
13132 * handlers to ensure that all derived state has been updated.
13133 */
13134static void calc_watermark_data(struct drm_atomic_state *state)
13135{
13136 struct drm_device *dev = state->dev;
13137 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13138 struct drm_crtc *crtc;
13139 struct drm_crtc_state *cstate;
13140 struct drm_plane *plane;
13141 struct drm_plane_state *pstate;
13142
13143 /*
13144 * Calculate watermark configuration details now that derived
13145 * plane/crtc state is all properly updated.
13146 */
13147 drm_for_each_crtc(crtc, dev) {
13148 cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
13149 crtc->state;
13150
13151 if (cstate->active)
13152 intel_state->wm_config.num_pipes_active++;
13153 }
13154 drm_for_each_legacy_plane(plane, dev) {
13155 pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
13156 plane->state;
13157
13158 if (!to_intel_plane_state(pstate)->visible)
13159 continue;
13160
13161 intel_state->wm_config.sprites_enabled = true;
13162 if (pstate->crtc_w != pstate->src_w >> 16 ||
13163 pstate->crtc_h != pstate->src_h >> 16)
13164 intel_state->wm_config.sprites_scaled = true;
13165 }
13166}
13167
74c090b1
ML
13168/**
13169 * intel_atomic_check - validate state object
13170 * @dev: drm device
13171 * @state: state to validate
13172 */
13173static int intel_atomic_check(struct drm_device *dev,
13174 struct drm_atomic_state *state)
c347a676 13175{
aa363136 13176 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
c347a676
ACO
13177 struct drm_crtc *crtc;
13178 struct drm_crtc_state *crtc_state;
13179 int ret, i;
61333b60 13180 bool any_ms = false;
c347a676 13181
74c090b1 13182 ret = drm_atomic_helper_check_modeset(dev, state);
054518dd
ACO
13183 if (ret)
13184 return ret;
13185
c347a676 13186 for_each_crtc_in_state(state, crtc, crtc_state, i) {
cfb23ed6
ML
13187 struct intel_crtc_state *pipe_config =
13188 to_intel_crtc_state(crtc_state);
1ed51de9
DV
13189
13190 /* Catch I915_MODE_FLAG_INHERITED */
13191 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13192 crtc_state->mode_changed = true;
cfb23ed6 13193
61333b60
ML
13194 if (!crtc_state->enable) {
13195 if (needs_modeset(crtc_state))
13196 any_ms = true;
c347a676 13197 continue;
61333b60 13198 }
c347a676 13199
26495481 13200 if (!needs_modeset(crtc_state))
cfb23ed6
ML
13201 continue;
13202
26495481
DV
13203 /* FIXME: For only active_changed we shouldn't need to do any
13204 * state recomputation at all. */
13205
1ed51de9
DV
13206 ret = drm_atomic_add_affected_connectors(state, crtc);
13207 if (ret)
13208 return ret;
b359283a 13209
cfb23ed6 13210 ret = intel_modeset_pipe_config(crtc, pipe_config);
c347a676
ACO
13211 if (ret)
13212 return ret;
13213
6764e9f8 13214 if (intel_pipe_config_compare(state->dev,
cfb23ed6 13215 to_intel_crtc_state(crtc->state),
1ed51de9 13216 pipe_config, true)) {
26495481 13217 crtc_state->mode_changed = false;
bfd16b2a 13218 to_intel_crtc_state(crtc_state)->update_pipe = true;
26495481
DV
13219 }
13220
13221 if (needs_modeset(crtc_state)) {
13222 any_ms = true;
cfb23ed6
ML
13223
13224 ret = drm_atomic_add_affected_planes(state, crtc);
13225 if (ret)
13226 return ret;
13227 }
61333b60 13228
26495481
DV
13229 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13230 needs_modeset(crtc_state) ?
13231 "[modeset]" : "[fastset]");
c347a676
ACO
13232 }
13233
61333b60
ML
13234 if (any_ms) {
13235 ret = intel_modeset_checks(state);
13236
13237 if (ret)
13238 return ret;
27c329ed 13239 } else
aa363136 13240 intel_state->cdclk = to_i915(state->dev)->cdclk_freq;
76305b1a 13241
aa363136
MR
13242 ret = drm_atomic_helper_check_planes(state->dev, state);
13243 if (ret)
13244 return ret;
13245
13246 calc_watermark_data(state);
13247
13248 return 0;
054518dd
ACO
13249}
13250
5008e874
ML
13251static int intel_atomic_prepare_commit(struct drm_device *dev,
13252 struct drm_atomic_state *state,
13253 bool async)
13254{
7580d774
ML
13255 struct drm_i915_private *dev_priv = dev->dev_private;
13256 struct drm_plane_state *plane_state;
5008e874 13257 struct drm_crtc_state *crtc_state;
7580d774 13258 struct drm_plane *plane;
5008e874
ML
13259 struct drm_crtc *crtc;
13260 int i, ret;
13261
13262 if (async) {
13263 DRM_DEBUG_KMS("i915 does not yet support async commit\n");
13264 return -EINVAL;
13265 }
13266
13267 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13268 ret = intel_crtc_wait_for_pending_flips(crtc);
13269 if (ret)
13270 return ret;
7580d774
ML
13271
13272 if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
13273 flush_workqueue(dev_priv->wq);
5008e874
ML
13274 }
13275
f935675f
ML
13276 ret = mutex_lock_interruptible(&dev->struct_mutex);
13277 if (ret)
13278 return ret;
13279
5008e874 13280 ret = drm_atomic_helper_prepare_planes(dev, state);
7580d774
ML
13281 if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) {
13282 u32 reset_counter;
13283
13284 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
13285 mutex_unlock(&dev->struct_mutex);
13286
13287 for_each_plane_in_state(state, plane, plane_state, i) {
13288 struct intel_plane_state *intel_plane_state =
13289 to_intel_plane_state(plane_state);
13290
13291 if (!intel_plane_state->wait_req)
13292 continue;
13293
13294 ret = __i915_wait_request(intel_plane_state->wait_req,
13295 reset_counter, true,
13296 NULL, NULL);
13297
13298 /* Swallow -EIO errors to allow updates during hw lockup. */
13299 if (ret == -EIO)
13300 ret = 0;
13301
13302 if (ret)
13303 break;
13304 }
13305
13306 if (!ret)
13307 return 0;
13308
13309 mutex_lock(&dev->struct_mutex);
13310 drm_atomic_helper_cleanup_planes(dev, state);
13311 }
5008e874 13312
f935675f 13313 mutex_unlock(&dev->struct_mutex);
5008e874
ML
13314 return ret;
13315}
13316
74c090b1
ML
13317/**
13318 * intel_atomic_commit - commit validated state object
13319 * @dev: DRM device
13320 * @state: the top-level driver state object
13321 * @async: asynchronous commit
13322 *
13323 * This function commits a top-level state object that has been validated
13324 * with drm_atomic_helper_check().
13325 *
13326 * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
13327 * we can only handle plane-related operations and do not yet support
13328 * asynchronous commit.
13329 *
13330 * RETURNS
13331 * Zero for success or -errno.
13332 */
13333static int intel_atomic_commit(struct drm_device *dev,
13334 struct drm_atomic_state *state,
13335 bool async)
a6778b3c 13336{
fbee40df 13337 struct drm_i915_private *dev_priv = dev->dev_private;
0a9ab303 13338 struct drm_crtc_state *crtc_state;
7580d774 13339 struct drm_crtc *crtc;
c0c36b94 13340 int ret = 0;
0a9ab303 13341 int i;
61333b60 13342 bool any_ms = false;
a6778b3c 13343
5008e874 13344 ret = intel_atomic_prepare_commit(dev, state, async);
7580d774
ML
13345 if (ret) {
13346 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
d4afb8cc 13347 return ret;
7580d774 13348 }
d4afb8cc 13349
1c5e19f8 13350 drm_atomic_helper_swap_state(dev, state);
aa363136 13351 dev_priv->wm.config = to_intel_atomic_state(state)->wm_config;
1c5e19f8 13352
0a9ab303 13353 for_each_crtc_in_state(state, crtc, crtc_state, i) {
a539205a
ML
13354 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13355
61333b60
ML
13356 if (!needs_modeset(crtc->state))
13357 continue;
13358
13359 any_ms = true;
a539205a 13360 intel_pre_plane_update(intel_crtc);
460da916 13361
a539205a
ML
13362 if (crtc_state->active) {
13363 intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
13364 dev_priv->display.crtc_disable(crtc);
eddfcbcd
ML
13365 intel_crtc->active = false;
13366 intel_disable_shared_dpll(intel_crtc);
a539205a 13367 }
b8cecdf5 13368 }
7758a113 13369
ea9d758d
DV
13370 /* Only after disabling all output pipelines that will be changed can we
13371 * update the the output configuration. */
4740b0f2 13372 intel_modeset_update_crtc_state(state);
f6e5b160 13373
4740b0f2
ML
13374 if (any_ms) {
13375 intel_shared_dpll_commit(state);
13376
13377 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
61333b60 13378 modeset_update_crtc_power_domains(state);
4740b0f2 13379 }
47fab737 13380
a6778b3c 13381 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
0a9ab303 13382 for_each_crtc_in_state(state, crtc, crtc_state, i) {
f6ac4b2a
ML
13383 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13384 bool modeset = needs_modeset(crtc->state);
bfd16b2a
ML
13385 bool update_pipe = !modeset &&
13386 to_intel_crtc_state(crtc->state)->update_pipe;
13387 unsigned long put_domains = 0;
f6ac4b2a 13388
9f836f90
PJ
13389 if (modeset)
13390 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13391
f6ac4b2a 13392 if (modeset && crtc->state->active) {
a539205a
ML
13393 update_scanline_offset(to_intel_crtc(crtc));
13394 dev_priv->display.crtc_enable(crtc);
13395 }
80715b2f 13396
bfd16b2a
ML
13397 if (update_pipe) {
13398 put_domains = modeset_get_crtc_power_domains(crtc);
13399
13400 /* make sure intel_modeset_check_state runs */
13401 any_ms = true;
13402 }
13403
f6ac4b2a
ML
13404 if (!modeset)
13405 intel_pre_plane_update(intel_crtc);
13406
6173ee28
ML
13407 if (crtc->state->active &&
13408 (crtc->state->planes_changed || update_pipe))
62852622 13409 drm_atomic_helper_commit_planes_on_crtc(crtc_state);
bfd16b2a
ML
13410
13411 if (put_domains)
13412 modeset_put_power_domains(dev_priv, put_domains);
13413
f6ac4b2a 13414 intel_post_plane_update(intel_crtc);
9f836f90
PJ
13415
13416 if (modeset)
13417 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
80715b2f 13418 }
a6778b3c 13419
a6778b3c 13420 /* FIXME: add subpixel order */
83a57153 13421
74c090b1 13422 drm_atomic_helper_wait_for_vblanks(dev, state);
f935675f
ML
13423
13424 mutex_lock(&dev->struct_mutex);
d4afb8cc 13425 drm_atomic_helper_cleanup_planes(dev, state);
f935675f 13426 mutex_unlock(&dev->struct_mutex);
2bfb4627 13427
74c090b1 13428 if (any_ms)
ee165b1a
ML
13429 intel_modeset_check_state(dev, state);
13430
13431 drm_atomic_state_free(state);
f30da187 13432
74c090b1 13433 return 0;
7f27126e
JB
13434}
13435
c0c36b94
CW
13436void intel_crtc_restore_mode(struct drm_crtc *crtc)
13437{
83a57153
ACO
13438 struct drm_device *dev = crtc->dev;
13439 struct drm_atomic_state *state;
e694eb02 13440 struct drm_crtc_state *crtc_state;
2bfb4627 13441 int ret;
83a57153
ACO
13442
13443 state = drm_atomic_state_alloc(dev);
13444 if (!state) {
e694eb02 13445 DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
83a57153
ACO
13446 crtc->base.id);
13447 return;
13448 }
13449
e694eb02 13450 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
83a57153 13451
e694eb02
ML
13452retry:
13453 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13454 ret = PTR_ERR_OR_ZERO(crtc_state);
13455 if (!ret) {
13456 if (!crtc_state->active)
13457 goto out;
83a57153 13458
e694eb02 13459 crtc_state->mode_changed = true;
74c090b1 13460 ret = drm_atomic_commit(state);
83a57153
ACO
13461 }
13462
e694eb02
ML
13463 if (ret == -EDEADLK) {
13464 drm_atomic_state_clear(state);
13465 drm_modeset_backoff(state->acquire_ctx);
13466 goto retry;
4ed9fb37 13467 }
4be07317 13468
2bfb4627 13469 if (ret)
e694eb02 13470out:
2bfb4627 13471 drm_atomic_state_free(state);
c0c36b94
CW
13472}
13473
25c5b266
DV
13474#undef for_each_intel_crtc_masked
13475
f6e5b160 13476static const struct drm_crtc_funcs intel_crtc_funcs = {
f6e5b160 13477 .gamma_set = intel_crtc_gamma_set,
74c090b1 13478 .set_config = drm_atomic_helper_set_config,
f6e5b160
CW
13479 .destroy = intel_crtc_destroy,
13480 .page_flip = intel_crtc_page_flip,
1356837e
MR
13481 .atomic_duplicate_state = intel_crtc_duplicate_state,
13482 .atomic_destroy_state = intel_crtc_destroy_state,
f6e5b160
CW
13483};
13484
5358901f
DV
13485static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
13486 struct intel_shared_dpll *pll,
13487 struct intel_dpll_hw_state *hw_state)
ee7b9f93 13488{
5358901f 13489 uint32_t val;
ee7b9f93 13490
f458ebbc 13491 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
bd2bb1b9
PZ
13492 return false;
13493
5358901f 13494 val = I915_READ(PCH_DPLL(pll->id));
66e985c0
DV
13495 hw_state->dpll = val;
13496 hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
13497 hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
5358901f
DV
13498
13499 return val & DPLL_VCO_ENABLE;
13500}
13501
15bdd4cf
DV
13502static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
13503 struct intel_shared_dpll *pll)
13504{
3e369b76
ACO
13505 I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
13506 I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
15bdd4cf
DV
13507}
13508
e7b903d2
DV
13509static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
13510 struct intel_shared_dpll *pll)
13511{
e7b903d2 13512 /* PCH refclock must be enabled first */
89eff4be 13513 ibx_assert_pch_refclk_enabled(dev_priv);
e7b903d2 13514
3e369b76 13515 I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
15bdd4cf
DV
13516
13517 /* Wait for the clocks to stabilize. */
13518 POSTING_READ(PCH_DPLL(pll->id));
13519 udelay(150);
13520
13521 /* The pixel multiplier can only be updated once the
13522 * DPLL is enabled and the clocks are stable.
13523 *
13524 * So write it again.
13525 */
3e369b76 13526 I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
15bdd4cf 13527 POSTING_READ(PCH_DPLL(pll->id));
e7b903d2
DV
13528 udelay(200);
13529}
13530
13531static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
13532 struct intel_shared_dpll *pll)
13533{
13534 struct drm_device *dev = dev_priv->dev;
13535 struct intel_crtc *crtc;
e7b903d2
DV
13536
13537 /* Make sure no transcoder isn't still depending on us. */
d3fcc808 13538 for_each_intel_crtc(dev, crtc) {
e7b903d2
DV
13539 if (intel_crtc_to_shared_dpll(crtc) == pll)
13540 assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
ee7b9f93
JB
13541 }
13542
15bdd4cf
DV
13543 I915_WRITE(PCH_DPLL(pll->id), 0);
13544 POSTING_READ(PCH_DPLL(pll->id));
e7b903d2
DV
13545 udelay(200);
13546}
13547
46edb027
DV
13548static char *ibx_pch_dpll_names[] = {
13549 "PCH DPLL A",
13550 "PCH DPLL B",
13551};
13552
7c74ade1 13553static void ibx_pch_dpll_init(struct drm_device *dev)
ee7b9f93 13554{
e7b903d2 13555 struct drm_i915_private *dev_priv = dev->dev_private;
ee7b9f93
JB
13556 int i;
13557
7c74ade1 13558 dev_priv->num_shared_dpll = 2;
ee7b9f93 13559
e72f9fbf 13560 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
46edb027
DV
13561 dev_priv->shared_dplls[i].id = i;
13562 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
15bdd4cf 13563 dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
e7b903d2
DV
13564 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
13565 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
5358901f
DV
13566 dev_priv->shared_dplls[i].get_hw_state =
13567 ibx_pch_dpll_get_hw_state;
ee7b9f93
JB
13568 }
13569}
13570
7c74ade1
DV
13571static void intel_shared_dpll_init(struct drm_device *dev)
13572{
e7b903d2 13573 struct drm_i915_private *dev_priv = dev->dev_private;
7c74ade1 13574
9cd86933
DV
13575 if (HAS_DDI(dev))
13576 intel_ddi_pll_init(dev);
13577 else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
7c74ade1
DV
13578 ibx_pch_dpll_init(dev);
13579 else
13580 dev_priv->num_shared_dpll = 0;
13581
13582 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
7c74ade1
DV
13583}
13584
6beb8c23
MR
13585/**
13586 * intel_prepare_plane_fb - Prepare fb for usage on plane
13587 * @plane: drm plane to prepare for
13588 * @fb: framebuffer to prepare for presentation
13589 *
13590 * Prepares a framebuffer for usage on a display plane. Generally this
13591 * involves pinning the underlying object and updating the frontbuffer tracking
13592 * bits. Some older platforms need special physical address handling for
13593 * cursor planes.
13594 *
f935675f
ML
13595 * Must be called with struct_mutex held.
13596 *
6beb8c23
MR
13597 * Returns 0 on success, negative error code on failure.
13598 */
13599int
13600intel_prepare_plane_fb(struct drm_plane *plane,
d136dfee 13601 const struct drm_plane_state *new_state)
465c120c
MR
13602{
13603 struct drm_device *dev = plane->dev;
844f9111 13604 struct drm_framebuffer *fb = new_state->fb;
6beb8c23 13605 struct intel_plane *intel_plane = to_intel_plane(plane);
6beb8c23 13606 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1ee49399 13607 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
6beb8c23 13608 int ret = 0;
465c120c 13609
1ee49399 13610 if (!obj && !old_obj)
465c120c
MR
13611 return 0;
13612
5008e874
ML
13613 if (old_obj) {
13614 struct drm_crtc_state *crtc_state =
13615 drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
13616
13617 /* Big Hammer, we also need to ensure that any pending
13618 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13619 * current scanout is retired before unpinning the old
13620 * framebuffer. Note that we rely on userspace rendering
13621 * into the buffer attached to the pipe they are waiting
13622 * on. If not, userspace generates a GPU hang with IPEHR
13623 * point to the MI_WAIT_FOR_EVENT.
13624 *
13625 * This should only fail upon a hung GPU, in which case we
13626 * can safely continue.
13627 */
13628 if (needs_modeset(crtc_state))
13629 ret = i915_gem_object_wait_rendering(old_obj, true);
13630
13631 /* Swallow -EIO errors to allow updates during hw lockup. */
13632 if (ret && ret != -EIO)
f935675f 13633 return ret;
5008e874
ML
13634 }
13635
1ee49399
ML
13636 if (!obj) {
13637 ret = 0;
13638 } else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
6beb8c23
MR
13639 INTEL_INFO(dev)->cursor_needs_physical) {
13640 int align = IS_I830(dev) ? 16 * 1024 : 256;
13641 ret = i915_gem_object_attach_phys(obj, align);
13642 if (ret)
13643 DRM_DEBUG_KMS("failed to attach phys object\n");
13644 } else {
7580d774 13645 ret = intel_pin_and_fence_fb_obj(plane, fb, new_state);
6beb8c23 13646 }
465c120c 13647
7580d774
ML
13648 if (ret == 0) {
13649 if (obj) {
13650 struct intel_plane_state *plane_state =
13651 to_intel_plane_state(new_state);
13652
13653 i915_gem_request_assign(&plane_state->wait_req,
13654 obj->last_write_req);
13655 }
13656
a9ff8714 13657 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
7580d774 13658 }
fdd508a6 13659
6beb8c23
MR
13660 return ret;
13661}
13662
38f3ce3a
MR
13663/**
13664 * intel_cleanup_plane_fb - Cleans up an fb after plane use
13665 * @plane: drm plane to clean up for
13666 * @fb: old framebuffer that was on plane
13667 *
13668 * Cleans up a framebuffer that has just been removed from a plane.
f935675f
ML
13669 *
13670 * Must be called with struct_mutex held.
38f3ce3a
MR
13671 */
13672void
13673intel_cleanup_plane_fb(struct drm_plane *plane,
d136dfee 13674 const struct drm_plane_state *old_state)
38f3ce3a
MR
13675{
13676 struct drm_device *dev = plane->dev;
1ee49399 13677 struct intel_plane *intel_plane = to_intel_plane(plane);
7580d774 13678 struct intel_plane_state *old_intel_state;
1ee49399
ML
13679 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
13680 struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
38f3ce3a 13681
7580d774
ML
13682 old_intel_state = to_intel_plane_state(old_state);
13683
1ee49399 13684 if (!obj && !old_obj)
38f3ce3a
MR
13685 return;
13686
1ee49399
ML
13687 if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
13688 !INTEL_INFO(dev)->cursor_needs_physical))
844f9111 13689 intel_unpin_fb_obj(old_state->fb, old_state);
1ee49399
ML
13690
13691 /* prepare_fb aborted? */
13692 if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
13693 (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
13694 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
7580d774
ML
13695
13696 i915_gem_request_assign(&old_intel_state->wait_req, NULL);
13697
465c120c
MR
13698}
13699
6156a456
CK
13700int
13701skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
13702{
13703 int max_scale;
13704 struct drm_device *dev;
13705 struct drm_i915_private *dev_priv;
13706 int crtc_clock, cdclk;
13707
13708 if (!intel_crtc || !crtc_state)
13709 return DRM_PLANE_HELPER_NO_SCALING;
13710
13711 dev = intel_crtc->base.dev;
13712 dev_priv = dev->dev_private;
13713 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
27c329ed 13714 cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
6156a456 13715
54bf1ce6 13716 if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
6156a456
CK
13717 return DRM_PLANE_HELPER_NO_SCALING;
13718
13719 /*
13720 * skl max scale is lower of:
13721 * close to 3 but not 3, -1 is for that purpose
13722 * or
13723 * cdclk/crtc_clock
13724 */
13725 max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
13726
13727 return max_scale;
13728}
13729
465c120c 13730static int
3c692a41 13731intel_check_primary_plane(struct drm_plane *plane,
061e4b8d 13732 struct intel_crtc_state *crtc_state,
3c692a41
GP
13733 struct intel_plane_state *state)
13734{
2b875c22
MR
13735 struct drm_crtc *crtc = state->base.crtc;
13736 struct drm_framebuffer *fb = state->base.fb;
6156a456 13737 int min_scale = DRM_PLANE_HELPER_NO_SCALING;
061e4b8d
ML
13738 int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13739 bool can_position = false;
465c120c 13740
061e4b8d
ML
13741 /* use scaler when colorkey is not required */
13742 if (INTEL_INFO(plane->dev)->gen >= 9 &&
818ed961 13743 state->ckey.flags == I915_SET_COLORKEY_NONE) {
061e4b8d
ML
13744 min_scale = 1;
13745 max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
d8106366 13746 can_position = true;
6156a456 13747 }
d8106366 13748
061e4b8d
ML
13749 return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13750 &state->dst, &state->clip,
da20eabd
ML
13751 min_scale, max_scale,
13752 can_position, true,
13753 &state->visible);
14af293f
GP
13754}
13755
13756static void
13757intel_commit_primary_plane(struct drm_plane *plane,
13758 struct intel_plane_state *state)
13759{
2b875c22
MR
13760 struct drm_crtc *crtc = state->base.crtc;
13761 struct drm_framebuffer *fb = state->base.fb;
13762 struct drm_device *dev = plane->dev;
14af293f 13763 struct drm_i915_private *dev_priv = dev->dev_private;
14af293f 13764
ea2c67bb 13765 crtc = crtc ? crtc : plane->crtc;
ccc759dc 13766
d4b08630
ML
13767 dev_priv->display.update_primary_plane(crtc, fb,
13768 state->src.x1 >> 16,
13769 state->src.y1 >> 16);
465c120c
MR
13770}
13771
a8ad0d8e
ML
13772static void
13773intel_disable_primary_plane(struct drm_plane *plane,
7fabf5ef 13774 struct drm_crtc *crtc)
a8ad0d8e
ML
13775{
13776 struct drm_device *dev = plane->dev;
13777 struct drm_i915_private *dev_priv = dev->dev_private;
13778
a8ad0d8e
ML
13779 dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
13780}
13781
613d2b27
ML
13782static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13783 struct drm_crtc_state *old_crtc_state)
3c692a41 13784{
32b7eeec 13785 struct drm_device *dev = crtc->dev;
3c692a41 13786 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bfd16b2a
ML
13787 struct intel_crtc_state *old_intel_state =
13788 to_intel_crtc_state(old_crtc_state);
13789 bool modeset = needs_modeset(crtc->state);
3c692a41 13790
f015c551 13791 if (intel_crtc->atomic.update_wm_pre)
32b7eeec 13792 intel_update_watermarks(crtc);
3c692a41 13793
c34c9ee4 13794 /* Perform vblank evasion around commit operation */
62852622 13795 intel_pipe_update_start(intel_crtc);
0583236e 13796
bfd16b2a
ML
13797 if (modeset)
13798 return;
13799
13800 if (to_intel_crtc_state(crtc->state)->update_pipe)
13801 intel_update_pipe_config(intel_crtc, old_intel_state);
13802 else if (INTEL_INFO(dev)->gen >= 9)
0583236e 13803 skl_detach_scalers(intel_crtc);
32b7eeec
MR
13804}
13805
613d2b27
ML
13806static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13807 struct drm_crtc_state *old_crtc_state)
32b7eeec 13808{
32b7eeec 13809 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
32b7eeec 13810
62852622 13811 intel_pipe_update_end(intel_crtc);
3c692a41
GP
13812}
13813
cf4c7c12 13814/**
4a3b8769
MR
13815 * intel_plane_destroy - destroy a plane
13816 * @plane: plane to destroy
cf4c7c12 13817 *
4a3b8769
MR
13818 * Common destruction function for all types of planes (primary, cursor,
13819 * sprite).
cf4c7c12 13820 */
4a3b8769 13821void intel_plane_destroy(struct drm_plane *plane)
465c120c
MR
13822{
13823 struct intel_plane *intel_plane = to_intel_plane(plane);
13824 drm_plane_cleanup(plane);
13825 kfree(intel_plane);
13826}
13827
65a3fea0 13828const struct drm_plane_funcs intel_plane_funcs = {
70a101f8
MR
13829 .update_plane = drm_atomic_helper_update_plane,
13830 .disable_plane = drm_atomic_helper_disable_plane,
3d7d6510 13831 .destroy = intel_plane_destroy,
c196e1d6 13832 .set_property = drm_atomic_helper_plane_set_property,
a98b3431
MR
13833 .atomic_get_property = intel_plane_atomic_get_property,
13834 .atomic_set_property = intel_plane_atomic_set_property,
ea2c67bb
MR
13835 .atomic_duplicate_state = intel_plane_duplicate_state,
13836 .atomic_destroy_state = intel_plane_destroy_state,
13837
465c120c
MR
13838};
13839
13840static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
13841 int pipe)
13842{
13843 struct intel_plane *primary;
8e7d688b 13844 struct intel_plane_state *state;
465c120c 13845 const uint32_t *intel_primary_formats;
45e3743a 13846 unsigned int num_formats;
465c120c
MR
13847
13848 primary = kzalloc(sizeof(*primary), GFP_KERNEL);
13849 if (primary == NULL)
13850 return NULL;
13851
8e7d688b
MR
13852 state = intel_create_plane_state(&primary->base);
13853 if (!state) {
ea2c67bb
MR
13854 kfree(primary);
13855 return NULL;
13856 }
8e7d688b 13857 primary->base.state = &state->base;
ea2c67bb 13858
465c120c
MR
13859 primary->can_scale = false;
13860 primary->max_downscale = 1;
6156a456
CK
13861 if (INTEL_INFO(dev)->gen >= 9) {
13862 primary->can_scale = true;
af99ceda 13863 state->scaler_id = -1;
6156a456 13864 }
465c120c
MR
13865 primary->pipe = pipe;
13866 primary->plane = pipe;
a9ff8714 13867 primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
c59cb179
MR
13868 primary->check_plane = intel_check_primary_plane;
13869 primary->commit_plane = intel_commit_primary_plane;
a8ad0d8e 13870 primary->disable_plane = intel_disable_primary_plane;
465c120c
MR
13871 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
13872 primary->plane = !pipe;
13873
6c0fd451
DL
13874 if (INTEL_INFO(dev)->gen >= 9) {
13875 intel_primary_formats = skl_primary_formats;
13876 num_formats = ARRAY_SIZE(skl_primary_formats);
13877 } else if (INTEL_INFO(dev)->gen >= 4) {
568db4f2
DL
13878 intel_primary_formats = i965_primary_formats;
13879 num_formats = ARRAY_SIZE(i965_primary_formats);
6c0fd451
DL
13880 } else {
13881 intel_primary_formats = i8xx_primary_formats;
13882 num_formats = ARRAY_SIZE(i8xx_primary_formats);
465c120c
MR
13883 }
13884
13885 drm_universal_plane_init(dev, &primary->base, 0,
65a3fea0 13886 &intel_plane_funcs,
465c120c
MR
13887 intel_primary_formats, num_formats,
13888 DRM_PLANE_TYPE_PRIMARY);
48404c1e 13889
3b7a5119
SJ
13890 if (INTEL_INFO(dev)->gen >= 4)
13891 intel_create_rotation_property(dev, primary);
48404c1e 13892
ea2c67bb
MR
13893 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
13894
465c120c
MR
13895 return &primary->base;
13896}
13897
3b7a5119
SJ
13898void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
13899{
13900 if (!dev->mode_config.rotation_property) {
13901 unsigned long flags = BIT(DRM_ROTATE_0) |
13902 BIT(DRM_ROTATE_180);
13903
13904 if (INTEL_INFO(dev)->gen >= 9)
13905 flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
13906
13907 dev->mode_config.rotation_property =
13908 drm_mode_create_rotation_property(dev, flags);
13909 }
13910 if (dev->mode_config.rotation_property)
13911 drm_object_attach_property(&plane->base.base,
13912 dev->mode_config.rotation_property,
13913 plane->base.state->rotation);
13914}
13915
3d7d6510 13916static int
852e787c 13917intel_check_cursor_plane(struct drm_plane *plane,
061e4b8d 13918 struct intel_crtc_state *crtc_state,
852e787c 13919 struct intel_plane_state *state)
3d7d6510 13920{
061e4b8d 13921 struct drm_crtc *crtc = crtc_state->base.crtc;
2b875c22 13922 struct drm_framebuffer *fb = state->base.fb;
757f9a3e 13923 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
757f9a3e
GP
13924 unsigned stride;
13925 int ret;
3d7d6510 13926
061e4b8d
ML
13927 ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13928 &state->dst, &state->clip,
3d7d6510
MR
13929 DRM_PLANE_HELPER_NO_SCALING,
13930 DRM_PLANE_HELPER_NO_SCALING,
852e787c 13931 true, true, &state->visible);
757f9a3e
GP
13932 if (ret)
13933 return ret;
13934
757f9a3e
GP
13935 /* if we want to turn off the cursor ignore width and height */
13936 if (!obj)
da20eabd 13937 return 0;
757f9a3e 13938
757f9a3e 13939 /* Check for which cursor types we support */
061e4b8d 13940 if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
ea2c67bb
MR
13941 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
13942 state->base.crtc_w, state->base.crtc_h);
757f9a3e
GP
13943 return -EINVAL;
13944 }
13945
ea2c67bb
MR
13946 stride = roundup_pow_of_two(state->base.crtc_w) * 4;
13947 if (obj->base.size < stride * state->base.crtc_h) {
757f9a3e
GP
13948 DRM_DEBUG_KMS("buffer is too small\n");
13949 return -ENOMEM;
13950 }
13951
3a656b54 13952 if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
757f9a3e 13953 DRM_DEBUG_KMS("cursor cannot be tiled\n");
da20eabd 13954 return -EINVAL;
32b7eeec
MR
13955 }
13956
da20eabd 13957 return 0;
852e787c 13958}
3d7d6510 13959
a8ad0d8e
ML
13960static void
13961intel_disable_cursor_plane(struct drm_plane *plane,
7fabf5ef 13962 struct drm_crtc *crtc)
a8ad0d8e 13963{
a8ad0d8e
ML
13964 intel_crtc_update_cursor(crtc, false);
13965}
13966
f4a2cf29 13967static void
852e787c
GP
13968intel_commit_cursor_plane(struct drm_plane *plane,
13969 struct intel_plane_state *state)
13970{
2b875c22 13971 struct drm_crtc *crtc = state->base.crtc;
ea2c67bb
MR
13972 struct drm_device *dev = plane->dev;
13973 struct intel_crtc *intel_crtc;
2b875c22 13974 struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
a912f12f 13975 uint32_t addr;
852e787c 13976
ea2c67bb
MR
13977 crtc = crtc ? crtc : plane->crtc;
13978 intel_crtc = to_intel_crtc(crtc);
13979
a912f12f
GP
13980 if (intel_crtc->cursor_bo == obj)
13981 goto update;
4ed91096 13982
f4a2cf29 13983 if (!obj)
a912f12f 13984 addr = 0;
f4a2cf29 13985 else if (!INTEL_INFO(dev)->cursor_needs_physical)
a912f12f 13986 addr = i915_gem_obj_ggtt_offset(obj);
f4a2cf29 13987 else
a912f12f 13988 addr = obj->phys_handle->busaddr;
852e787c 13989
a912f12f
GP
13990 intel_crtc->cursor_addr = addr;
13991 intel_crtc->cursor_bo = obj;
852e787c 13992
302d19ac 13993update:
62852622 13994 intel_crtc_update_cursor(crtc, state->visible);
852e787c
GP
13995}
13996
3d7d6510
MR
13997static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
13998 int pipe)
13999{
14000 struct intel_plane *cursor;
8e7d688b 14001 struct intel_plane_state *state;
3d7d6510
MR
14002
14003 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
14004 if (cursor == NULL)
14005 return NULL;
14006
8e7d688b
MR
14007 state = intel_create_plane_state(&cursor->base);
14008 if (!state) {
ea2c67bb
MR
14009 kfree(cursor);
14010 return NULL;
14011 }
8e7d688b 14012 cursor->base.state = &state->base;
ea2c67bb 14013
3d7d6510
MR
14014 cursor->can_scale = false;
14015 cursor->max_downscale = 1;
14016 cursor->pipe = pipe;
14017 cursor->plane = pipe;
a9ff8714 14018 cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
c59cb179
MR
14019 cursor->check_plane = intel_check_cursor_plane;
14020 cursor->commit_plane = intel_commit_cursor_plane;
a8ad0d8e 14021 cursor->disable_plane = intel_disable_cursor_plane;
3d7d6510
MR
14022
14023 drm_universal_plane_init(dev, &cursor->base, 0,
65a3fea0 14024 &intel_plane_funcs,
3d7d6510
MR
14025 intel_cursor_formats,
14026 ARRAY_SIZE(intel_cursor_formats),
14027 DRM_PLANE_TYPE_CURSOR);
4398ad45
VS
14028
14029 if (INTEL_INFO(dev)->gen >= 4) {
14030 if (!dev->mode_config.rotation_property)
14031 dev->mode_config.rotation_property =
14032 drm_mode_create_rotation_property(dev,
14033 BIT(DRM_ROTATE_0) |
14034 BIT(DRM_ROTATE_180));
14035 if (dev->mode_config.rotation_property)
14036 drm_object_attach_property(&cursor->base.base,
14037 dev->mode_config.rotation_property,
8e7d688b 14038 state->base.rotation);
4398ad45
VS
14039 }
14040
af99ceda
CK
14041 if (INTEL_INFO(dev)->gen >=9)
14042 state->scaler_id = -1;
14043
ea2c67bb
MR
14044 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14045
3d7d6510
MR
14046 return &cursor->base;
14047}
14048
549e2bfb
CK
14049static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
14050 struct intel_crtc_state *crtc_state)
14051{
14052 int i;
14053 struct intel_scaler *intel_scaler;
14054 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
14055
14056 for (i = 0; i < intel_crtc->num_scalers; i++) {
14057 intel_scaler = &scaler_state->scalers[i];
14058 intel_scaler->in_use = 0;
549e2bfb
CK
14059 intel_scaler->mode = PS_SCALER_MODE_DYN;
14060 }
14061
14062 scaler_state->scaler_id = -1;
14063}
14064
b358d0a6 14065static void intel_crtc_init(struct drm_device *dev, int pipe)
79e53945 14066{
fbee40df 14067 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945 14068 struct intel_crtc *intel_crtc;
f5de6e07 14069 struct intel_crtc_state *crtc_state = NULL;
3d7d6510
MR
14070 struct drm_plane *primary = NULL;
14071 struct drm_plane *cursor = NULL;
465c120c 14072 int i, ret;
79e53945 14073
955382f3 14074 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
79e53945
JB
14075 if (intel_crtc == NULL)
14076 return;
14077
f5de6e07
ACO
14078 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14079 if (!crtc_state)
14080 goto fail;
550acefd
ACO
14081 intel_crtc->config = crtc_state;
14082 intel_crtc->base.state = &crtc_state->base;
07878248 14083 crtc_state->base.crtc = &intel_crtc->base;
f5de6e07 14084
549e2bfb
CK
14085 /* initialize shared scalers */
14086 if (INTEL_INFO(dev)->gen >= 9) {
14087 if (pipe == PIPE_C)
14088 intel_crtc->num_scalers = 1;
14089 else
14090 intel_crtc->num_scalers = SKL_NUM_SCALERS;
14091
14092 skl_init_scalers(dev, intel_crtc, crtc_state);
14093 }
14094
465c120c 14095 primary = intel_primary_plane_create(dev, pipe);
3d7d6510
MR
14096 if (!primary)
14097 goto fail;
14098
14099 cursor = intel_cursor_plane_create(dev, pipe);
14100 if (!cursor)
14101 goto fail;
14102
465c120c 14103 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
3d7d6510
MR
14104 cursor, &intel_crtc_funcs);
14105 if (ret)
14106 goto fail;
79e53945
JB
14107
14108 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
79e53945
JB
14109 for (i = 0; i < 256; i++) {
14110 intel_crtc->lut_r[i] = i;
14111 intel_crtc->lut_g[i] = i;
14112 intel_crtc->lut_b[i] = i;
14113 }
14114
1f1c2e24
VS
14115 /*
14116 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
8c0f92e1 14117 * is hooked to pipe B. Hence we want plane A feeding pipe B.
1f1c2e24 14118 */
80824003
JB
14119 intel_crtc->pipe = pipe;
14120 intel_crtc->plane = pipe;
3a77c4c4 14121 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
28c97730 14122 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
e2e767ab 14123 intel_crtc->plane = !pipe;
80824003
JB
14124 }
14125
4b0e333e
CW
14126 intel_crtc->cursor_base = ~0;
14127 intel_crtc->cursor_cntl = ~0;
dc41c154 14128 intel_crtc->cursor_size = ~0;
8d7849db 14129
852eb00d
VS
14130 intel_crtc->wm.cxsr_allowed = true;
14131
22fd0fab
JB
14132 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14133 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
14134 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
14135 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
14136
79e53945 14137 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
87b6b101
DV
14138
14139 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
3d7d6510
MR
14140 return;
14141
14142fail:
14143 if (primary)
14144 drm_plane_cleanup(primary);
14145 if (cursor)
14146 drm_plane_cleanup(cursor);
f5de6e07 14147 kfree(crtc_state);
3d7d6510 14148 kfree(intel_crtc);
79e53945
JB
14149}
14150
752aa88a
JB
14151enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14152{
14153 struct drm_encoder *encoder = connector->base.encoder;
6e9f798d 14154 struct drm_device *dev = connector->base.dev;
752aa88a 14155
51fd371b 14156 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
752aa88a 14157
d3babd3f 14158 if (!encoder || WARN_ON(!encoder->crtc))
752aa88a
JB
14159 return INVALID_PIPE;
14160
14161 return to_intel_crtc(encoder->crtc)->pipe;
14162}
14163
08d7b3d1 14164int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
05394f39 14165 struct drm_file *file)
08d7b3d1 14166{
08d7b3d1 14167 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7707e653 14168 struct drm_crtc *drmmode_crtc;
c05422d5 14169 struct intel_crtc *crtc;
08d7b3d1 14170
7707e653 14171 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
08d7b3d1 14172
7707e653 14173 if (!drmmode_crtc) {
08d7b3d1 14174 DRM_ERROR("no such CRTC id\n");
3f2c2057 14175 return -ENOENT;
08d7b3d1
CW
14176 }
14177
7707e653 14178 crtc = to_intel_crtc(drmmode_crtc);
c05422d5 14179 pipe_from_crtc_id->pipe = crtc->pipe;
08d7b3d1 14180
c05422d5 14181 return 0;
08d7b3d1
CW
14182}
14183
66a9278e 14184static int intel_encoder_clones(struct intel_encoder *encoder)
79e53945 14185{
66a9278e
DV
14186 struct drm_device *dev = encoder->base.dev;
14187 struct intel_encoder *source_encoder;
79e53945 14188 int index_mask = 0;
79e53945
JB
14189 int entry = 0;
14190
b2784e15 14191 for_each_intel_encoder(dev, source_encoder) {
bc079e8b 14192 if (encoders_cloneable(encoder, source_encoder))
66a9278e
DV
14193 index_mask |= (1 << entry);
14194
79e53945
JB
14195 entry++;
14196 }
4ef69c7a 14197
79e53945
JB
14198 return index_mask;
14199}
14200
4d302442
CW
14201static bool has_edp_a(struct drm_device *dev)
14202{
14203 struct drm_i915_private *dev_priv = dev->dev_private;
14204
14205 if (!IS_MOBILE(dev))
14206 return false;
14207
14208 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14209 return false;
14210
e3589908 14211 if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
4d302442
CW
14212 return false;
14213
14214 return true;
14215}
14216
84b4e042
JB
14217static bool intel_crt_present(struct drm_device *dev)
14218{
14219 struct drm_i915_private *dev_priv = dev->dev_private;
14220
884497ed
DL
14221 if (INTEL_INFO(dev)->gen >= 9)
14222 return false;
14223
cf404ce4 14224 if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
84b4e042
JB
14225 return false;
14226
14227 if (IS_CHERRYVIEW(dev))
14228 return false;
14229
14230 if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
14231 return false;
14232
14233 return true;
14234}
14235
79e53945
JB
14236static void intel_setup_outputs(struct drm_device *dev)
14237{
725e30ad 14238 struct drm_i915_private *dev_priv = dev->dev_private;
4ef69c7a 14239 struct intel_encoder *encoder;
cb0953d7 14240 bool dpd_is_edp = false;
79e53945 14241
c9093354 14242 intel_lvds_init(dev);
79e53945 14243
84b4e042 14244 if (intel_crt_present(dev))
79935fca 14245 intel_crt_init(dev);
cb0953d7 14246
c776eb2e
VK
14247 if (IS_BROXTON(dev)) {
14248 /*
14249 * FIXME: Broxton doesn't support port detection via the
14250 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14251 * detect the ports.
14252 */
14253 intel_ddi_init(dev, PORT_A);
14254 intel_ddi_init(dev, PORT_B);
14255 intel_ddi_init(dev, PORT_C);
14256 } else if (HAS_DDI(dev)) {
0e72a5b5
ED
14257 int found;
14258
de31facd
JB
14259 /*
14260 * Haswell uses DDI functions to detect digital outputs.
14261 * On SKL pre-D0 the strap isn't connected, so we assume
14262 * it's there.
14263 */
77179400 14264 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
de31facd 14265 /* WaIgnoreDDIAStrap: skl */
ef11bdb3 14266 if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
0e72a5b5
ED
14267 intel_ddi_init(dev, PORT_A);
14268
14269 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
14270 * register */
14271 found = I915_READ(SFUSE_STRAP);
14272
14273 if (found & SFUSE_STRAP_DDIB_DETECTED)
14274 intel_ddi_init(dev, PORT_B);
14275 if (found & SFUSE_STRAP_DDIC_DETECTED)
14276 intel_ddi_init(dev, PORT_C);
14277 if (found & SFUSE_STRAP_DDID_DETECTED)
14278 intel_ddi_init(dev, PORT_D);
2800e4c2
RV
14279 /*
14280 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14281 */
ef11bdb3 14282 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
2800e4c2
RV
14283 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14284 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14285 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14286 intel_ddi_init(dev, PORT_E);
14287
0e72a5b5 14288 } else if (HAS_PCH_SPLIT(dev)) {
cb0953d7 14289 int found;
5d8a7752 14290 dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
270b3042
DV
14291
14292 if (has_edp_a(dev))
14293 intel_dp_init(dev, DP_A, PORT_A);
cb0953d7 14294
dc0fa718 14295 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
461ed3ca 14296 /* PCH SDVOB multiplex with HDMIB */
2a5c0832 14297 found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
30ad48b7 14298 if (!found)
e2debe91 14299 intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
5eb08b69 14300 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
ab9d7c30 14301 intel_dp_init(dev, PCH_DP_B, PORT_B);
30ad48b7
ZW
14302 }
14303
dc0fa718 14304 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
e2debe91 14305 intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
30ad48b7 14306
dc0fa718 14307 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
e2debe91 14308 intel_hdmi_init(dev, PCH_HDMID, PORT_D);
30ad48b7 14309
5eb08b69 14310 if (I915_READ(PCH_DP_C) & DP_DETECTED)
ab9d7c30 14311 intel_dp_init(dev, PCH_DP_C, PORT_C);
5eb08b69 14312
270b3042 14313 if (I915_READ(PCH_DP_D) & DP_DETECTED)
ab9d7c30 14314 intel_dp_init(dev, PCH_DP_D, PORT_D);
4a87d65d 14315 } else if (IS_VALLEYVIEW(dev)) {
e17ac6db
VS
14316 /*
14317 * The DP_DETECTED bit is the latched state of the DDC
14318 * SDA pin at boot. However since eDP doesn't require DDC
14319 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14320 * eDP ports may have been muxed to an alternate function.
14321 * Thus we can't rely on the DP_DETECTED bit alone to detect
14322 * eDP ports. Consult the VBT as well as DP_DETECTED to
14323 * detect eDP ports.
14324 */
e66eb81d 14325 if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
d2182a66 14326 !intel_dp_is_edp(dev, PORT_B))
e66eb81d
VS
14327 intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
14328 if (I915_READ(VLV_DP_B) & DP_DETECTED ||
e17ac6db 14329 intel_dp_is_edp(dev, PORT_B))
e66eb81d 14330 intel_dp_init(dev, VLV_DP_B, PORT_B);
585a94b8 14331
e66eb81d 14332 if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
d2182a66 14333 !intel_dp_is_edp(dev, PORT_C))
e66eb81d
VS
14334 intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
14335 if (I915_READ(VLV_DP_C) & DP_DETECTED ||
e17ac6db 14336 intel_dp_is_edp(dev, PORT_C))
e66eb81d 14337 intel_dp_init(dev, VLV_DP_C, PORT_C);
19c03924 14338
9418c1f1 14339 if (IS_CHERRYVIEW(dev)) {
e17ac6db 14340 /* eDP not supported on port D, so don't check VBT */
e66eb81d
VS
14341 if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
14342 intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14343 if (I915_READ(CHV_DP_D) & DP_DETECTED)
14344 intel_dp_init(dev, CHV_DP_D, PORT_D);
9418c1f1
VS
14345 }
14346
3cfca973 14347 intel_dsi_init(dev);
09da55dc 14348 } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
27185ae1 14349 bool found = false;
7d57382e 14350
e2debe91 14351 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
b01f2c3a 14352 DRM_DEBUG_KMS("probing SDVOB\n");
2a5c0832 14353 found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
3fec3d2f 14354 if (!found && IS_G4X(dev)) {
b01f2c3a 14355 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
e2debe91 14356 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
b01f2c3a 14357 }
27185ae1 14358
3fec3d2f 14359 if (!found && IS_G4X(dev))
ab9d7c30 14360 intel_dp_init(dev, DP_B, PORT_B);
725e30ad 14361 }
13520b05
KH
14362
14363 /* Before G4X SDVOC doesn't have its own detect register */
13520b05 14364
e2debe91 14365 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
b01f2c3a 14366 DRM_DEBUG_KMS("probing SDVOC\n");
2a5c0832 14367 found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
b01f2c3a 14368 }
27185ae1 14369
e2debe91 14370 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
27185ae1 14371
3fec3d2f 14372 if (IS_G4X(dev)) {
b01f2c3a 14373 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
e2debe91 14374 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
b01f2c3a 14375 }
3fec3d2f 14376 if (IS_G4X(dev))
ab9d7c30 14377 intel_dp_init(dev, DP_C, PORT_C);
725e30ad 14378 }
27185ae1 14379
3fec3d2f 14380 if (IS_G4X(dev) &&
e7281eab 14381 (I915_READ(DP_D) & DP_DETECTED))
ab9d7c30 14382 intel_dp_init(dev, DP_D, PORT_D);
bad720ff 14383 } else if (IS_GEN2(dev))
79e53945
JB
14384 intel_dvo_init(dev);
14385
103a196f 14386 if (SUPPORTS_TV(dev))
79e53945
JB
14387 intel_tv_init(dev);
14388
0bc12bcb 14389 intel_psr_init(dev);
7c8f8a70 14390
b2784e15 14391 for_each_intel_encoder(dev, encoder) {
4ef69c7a
CW
14392 encoder->base.possible_crtcs = encoder->crtc_mask;
14393 encoder->base.possible_clones =
66a9278e 14394 intel_encoder_clones(encoder);
79e53945 14395 }
47356eb6 14396
dde86e2d 14397 intel_init_pch_refclk(dev);
270b3042
DV
14398
14399 drm_helper_move_panel_connectors_to_head(dev);
79e53945
JB
14400}
14401
14402static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14403{
60a5ca01 14404 struct drm_device *dev = fb->dev;
79e53945 14405 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
79e53945 14406
ef2d633e 14407 drm_framebuffer_cleanup(fb);
60a5ca01 14408 mutex_lock(&dev->struct_mutex);
ef2d633e 14409 WARN_ON(!intel_fb->obj->framebuffer_references--);
60a5ca01
VS
14410 drm_gem_object_unreference(&intel_fb->obj->base);
14411 mutex_unlock(&dev->struct_mutex);
79e53945
JB
14412 kfree(intel_fb);
14413}
14414
14415static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
05394f39 14416 struct drm_file *file,
79e53945
JB
14417 unsigned int *handle)
14418{
14419 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
05394f39 14420 struct drm_i915_gem_object *obj = intel_fb->obj;
79e53945 14421
05394f39 14422 return drm_gem_handle_create(file, &obj->base, handle);
79e53945
JB
14423}
14424
86c98588
RV
14425static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14426 struct drm_file *file,
14427 unsigned flags, unsigned color,
14428 struct drm_clip_rect *clips,
14429 unsigned num_clips)
14430{
14431 struct drm_device *dev = fb->dev;
14432 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14433 struct drm_i915_gem_object *obj = intel_fb->obj;
14434
14435 mutex_lock(&dev->struct_mutex);
74b4ea1e 14436 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
86c98588
RV
14437 mutex_unlock(&dev->struct_mutex);
14438
14439 return 0;
14440}
14441
79e53945
JB
14442static const struct drm_framebuffer_funcs intel_fb_funcs = {
14443 .destroy = intel_user_framebuffer_destroy,
14444 .create_handle = intel_user_framebuffer_create_handle,
86c98588 14445 .dirty = intel_user_framebuffer_dirty,
79e53945
JB
14446};
14447
b321803d
DL
14448static
14449u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
14450 uint32_t pixel_format)
14451{
14452 u32 gen = INTEL_INFO(dev)->gen;
14453
14454 if (gen >= 9) {
14455 /* "The stride in bytes must not exceed the of the size of 8K
14456 * pixels and 32K bytes."
14457 */
14458 return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
14459 } else if (gen >= 5 && !IS_VALLEYVIEW(dev)) {
14460 return 32*1024;
14461 } else if (gen >= 4) {
14462 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14463 return 16*1024;
14464 else
14465 return 32*1024;
14466 } else if (gen >= 3) {
14467 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14468 return 8*1024;
14469 else
14470 return 16*1024;
14471 } else {
14472 /* XXX DSPC is limited to 4k tiled */
14473 return 8*1024;
14474 }
14475}
14476
b5ea642a
DV
14477static int intel_framebuffer_init(struct drm_device *dev,
14478 struct intel_framebuffer *intel_fb,
14479 struct drm_mode_fb_cmd2 *mode_cmd,
14480 struct drm_i915_gem_object *obj)
79e53945 14481{
6761dd31 14482 unsigned int aligned_height;
79e53945 14483 int ret;
b321803d 14484 u32 pitch_limit, stride_alignment;
79e53945 14485
dd4916c5
DV
14486 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
14487
2a80eada
DV
14488 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14489 /* Enforce that fb modifier and tiling mode match, but only for
14490 * X-tiled. This is needed for FBC. */
14491 if (!!(obj->tiling_mode == I915_TILING_X) !=
14492 !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
14493 DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
14494 return -EINVAL;
14495 }
14496 } else {
14497 if (obj->tiling_mode == I915_TILING_X)
14498 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14499 else if (obj->tiling_mode == I915_TILING_Y) {
14500 DRM_DEBUG("No Y tiling for legacy addfb\n");
14501 return -EINVAL;
14502 }
14503 }
14504
9a8f0a12
TU
14505 /* Passed in modifier sanity checking. */
14506 switch (mode_cmd->modifier[0]) {
14507 case I915_FORMAT_MOD_Y_TILED:
14508 case I915_FORMAT_MOD_Yf_TILED:
14509 if (INTEL_INFO(dev)->gen < 9) {
14510 DRM_DEBUG("Unsupported tiling 0x%llx!\n",
14511 mode_cmd->modifier[0]);
14512 return -EINVAL;
14513 }
14514 case DRM_FORMAT_MOD_NONE:
14515 case I915_FORMAT_MOD_X_TILED:
14516 break;
14517 default:
c0f40428
JB
14518 DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
14519 mode_cmd->modifier[0]);
57cd6508 14520 return -EINVAL;
c16ed4be 14521 }
57cd6508 14522
b321803d
DL
14523 stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
14524 mode_cmd->pixel_format);
14525 if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
14526 DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
14527 mode_cmd->pitches[0], stride_alignment);
57cd6508 14528 return -EINVAL;
c16ed4be 14529 }
57cd6508 14530
b321803d
DL
14531 pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
14532 mode_cmd->pixel_format);
a35cdaa0 14533 if (mode_cmd->pitches[0] > pitch_limit) {
b321803d
DL
14534 DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
14535 mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
2a80eada 14536 "tiled" : "linear",
a35cdaa0 14537 mode_cmd->pitches[0], pitch_limit);
5d7bd705 14538 return -EINVAL;
c16ed4be 14539 }
5d7bd705 14540
2a80eada 14541 if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
c16ed4be
CW
14542 mode_cmd->pitches[0] != obj->stride) {
14543 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
14544 mode_cmd->pitches[0], obj->stride);
5d7bd705 14545 return -EINVAL;
c16ed4be 14546 }
5d7bd705 14547
57779d06 14548 /* Reject formats not supported by any plane early. */
308e5bcb 14549 switch (mode_cmd->pixel_format) {
57779d06 14550 case DRM_FORMAT_C8:
04b3924d
VS
14551 case DRM_FORMAT_RGB565:
14552 case DRM_FORMAT_XRGB8888:
14553 case DRM_FORMAT_ARGB8888:
57779d06
VS
14554 break;
14555 case DRM_FORMAT_XRGB1555:
c16ed4be 14556 if (INTEL_INFO(dev)->gen > 3) {
4ee62c76
VS
14557 DRM_DEBUG("unsupported pixel format: %s\n",
14558 drm_get_format_name(mode_cmd->pixel_format));
57779d06 14559 return -EINVAL;
c16ed4be 14560 }
57779d06 14561 break;
57779d06 14562 case DRM_FORMAT_ABGR8888:
6c0fd451
DL
14563 if (!IS_VALLEYVIEW(dev) && INTEL_INFO(dev)->gen < 9) {
14564 DRM_DEBUG("unsupported pixel format: %s\n",
14565 drm_get_format_name(mode_cmd->pixel_format));
14566 return -EINVAL;
14567 }
14568 break;
14569 case DRM_FORMAT_XBGR8888:
04b3924d 14570 case DRM_FORMAT_XRGB2101010:
57779d06 14571 case DRM_FORMAT_XBGR2101010:
c16ed4be 14572 if (INTEL_INFO(dev)->gen < 4) {
4ee62c76
VS
14573 DRM_DEBUG("unsupported pixel format: %s\n",
14574 drm_get_format_name(mode_cmd->pixel_format));
57779d06 14575 return -EINVAL;
c16ed4be 14576 }
b5626747 14577 break;
7531208b
DL
14578 case DRM_FORMAT_ABGR2101010:
14579 if (!IS_VALLEYVIEW(dev)) {
14580 DRM_DEBUG("unsupported pixel format: %s\n",
14581 drm_get_format_name(mode_cmd->pixel_format));
14582 return -EINVAL;
14583 }
14584 break;
04b3924d
VS
14585 case DRM_FORMAT_YUYV:
14586 case DRM_FORMAT_UYVY:
14587 case DRM_FORMAT_YVYU:
14588 case DRM_FORMAT_VYUY:
c16ed4be 14589 if (INTEL_INFO(dev)->gen < 5) {
4ee62c76
VS
14590 DRM_DEBUG("unsupported pixel format: %s\n",
14591 drm_get_format_name(mode_cmd->pixel_format));
57779d06 14592 return -EINVAL;
c16ed4be 14593 }
57cd6508
CW
14594 break;
14595 default:
4ee62c76
VS
14596 DRM_DEBUG("unsupported pixel format: %s\n",
14597 drm_get_format_name(mode_cmd->pixel_format));
57cd6508
CW
14598 return -EINVAL;
14599 }
14600
90f9a336
VS
14601 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14602 if (mode_cmd->offsets[0] != 0)
14603 return -EINVAL;
14604
ec2c981e 14605 aligned_height = intel_fb_align_height(dev, mode_cmd->height,
091df6cb
DV
14606 mode_cmd->pixel_format,
14607 mode_cmd->modifier[0]);
53155c0a
DV
14608 /* FIXME drm helper for size checks (especially planar formats)? */
14609 if (obj->base.size < aligned_height * mode_cmd->pitches[0])
14610 return -EINVAL;
14611
c7d73f6a
DV
14612 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
14613 intel_fb->obj = obj;
80075d49 14614 intel_fb->obj->framebuffer_references++;
c7d73f6a 14615
79e53945
JB
14616 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
14617 if (ret) {
14618 DRM_ERROR("framebuffer init failed %d\n", ret);
14619 return ret;
14620 }
14621
79e53945
JB
14622 return 0;
14623}
14624
79e53945
JB
14625static struct drm_framebuffer *
14626intel_user_framebuffer_create(struct drm_device *dev,
14627 struct drm_file *filp,
308e5bcb 14628 struct drm_mode_fb_cmd2 *mode_cmd)
79e53945 14629{
dcb1394e 14630 struct drm_framebuffer *fb;
05394f39 14631 struct drm_i915_gem_object *obj;
79e53945 14632
308e5bcb
JB
14633 obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
14634 mode_cmd->handles[0]));
c8725226 14635 if (&obj->base == NULL)
cce13ff7 14636 return ERR_PTR(-ENOENT);
79e53945 14637
dcb1394e
LW
14638 fb = intel_framebuffer_create(dev, mode_cmd, obj);
14639 if (IS_ERR(fb))
14640 drm_gem_object_unreference_unlocked(&obj->base);
14641
14642 return fb;
79e53945
JB
14643}
14644
0695726e 14645#ifndef CONFIG_DRM_FBDEV_EMULATION
0632fef6 14646static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
4520f53a
DV
14647{
14648}
14649#endif
14650
79e53945 14651static const struct drm_mode_config_funcs intel_mode_funcs = {
79e53945 14652 .fb_create = intel_user_framebuffer_create,
0632fef6 14653 .output_poll_changed = intel_fbdev_output_poll_changed,
5ee67f1c
MR
14654 .atomic_check = intel_atomic_check,
14655 .atomic_commit = intel_atomic_commit,
de419ab6
ML
14656 .atomic_state_alloc = intel_atomic_state_alloc,
14657 .atomic_state_clear = intel_atomic_state_clear,
79e53945
JB
14658};
14659
e70236a8
JB
14660/* Set up chip specific display functions */
14661static void intel_init_display(struct drm_device *dev)
14662{
14663 struct drm_i915_private *dev_priv = dev->dev_private;
14664
ee9300bb
DV
14665 if (HAS_PCH_SPLIT(dev) || IS_G4X(dev))
14666 dev_priv->display.find_dpll = g4x_find_best_dpll;
ef9348c8
CML
14667 else if (IS_CHERRYVIEW(dev))
14668 dev_priv->display.find_dpll = chv_find_best_dpll;
ee9300bb
DV
14669 else if (IS_VALLEYVIEW(dev))
14670 dev_priv->display.find_dpll = vlv_find_best_dpll;
14671 else if (IS_PINEVIEW(dev))
14672 dev_priv->display.find_dpll = pnv_find_best_dpll;
14673 else
14674 dev_priv->display.find_dpll = i9xx_find_best_dpll;
14675
bc8d7dff
DL
14676 if (INTEL_INFO(dev)->gen >= 9) {
14677 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
5724dbd1
DL
14678 dev_priv->display.get_initial_plane_config =
14679 skylake_get_initial_plane_config;
bc8d7dff
DL
14680 dev_priv->display.crtc_compute_clock =
14681 haswell_crtc_compute_clock;
14682 dev_priv->display.crtc_enable = haswell_crtc_enable;
14683 dev_priv->display.crtc_disable = haswell_crtc_disable;
bc8d7dff
DL
14684 dev_priv->display.update_primary_plane =
14685 skylake_update_primary_plane;
14686 } else if (HAS_DDI(dev)) {
0e8ffe1b 14687 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
5724dbd1
DL
14688 dev_priv->display.get_initial_plane_config =
14689 ironlake_get_initial_plane_config;
797d0259
ACO
14690 dev_priv->display.crtc_compute_clock =
14691 haswell_crtc_compute_clock;
4f771f10
PZ
14692 dev_priv->display.crtc_enable = haswell_crtc_enable;
14693 dev_priv->display.crtc_disable = haswell_crtc_disable;
bc8d7dff
DL
14694 dev_priv->display.update_primary_plane =
14695 ironlake_update_primary_plane;
09b4ddf9 14696 } else if (HAS_PCH_SPLIT(dev)) {
0e8ffe1b 14697 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
5724dbd1
DL
14698 dev_priv->display.get_initial_plane_config =
14699 ironlake_get_initial_plane_config;
3fb37703
ACO
14700 dev_priv->display.crtc_compute_clock =
14701 ironlake_crtc_compute_clock;
76e5a89c
DV
14702 dev_priv->display.crtc_enable = ironlake_crtc_enable;
14703 dev_priv->display.crtc_disable = ironlake_crtc_disable;
262ca2b0
MR
14704 dev_priv->display.update_primary_plane =
14705 ironlake_update_primary_plane;
89b667f8
JB
14706 } else if (IS_VALLEYVIEW(dev)) {
14707 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
5724dbd1
DL
14708 dev_priv->display.get_initial_plane_config =
14709 i9xx_get_initial_plane_config;
d6dfee7a 14710 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
89b667f8
JB
14711 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14712 dev_priv->display.crtc_disable = i9xx_crtc_disable;
262ca2b0
MR
14713 dev_priv->display.update_primary_plane =
14714 i9xx_update_primary_plane;
f564048e 14715 } else {
0e8ffe1b 14716 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
5724dbd1
DL
14717 dev_priv->display.get_initial_plane_config =
14718 i9xx_get_initial_plane_config;
d6dfee7a 14719 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
76e5a89c
DV
14720 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14721 dev_priv->display.crtc_disable = i9xx_crtc_disable;
262ca2b0
MR
14722 dev_priv->display.update_primary_plane =
14723 i9xx_update_primary_plane;
f564048e 14724 }
e70236a8 14725
e70236a8 14726 /* Returns the core display clock speed */
ef11bdb3 14727 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
1652d19e
VS
14728 dev_priv->display.get_display_clock_speed =
14729 skylake_get_display_clock_speed;
acd3f3d3
BP
14730 else if (IS_BROXTON(dev))
14731 dev_priv->display.get_display_clock_speed =
14732 broxton_get_display_clock_speed;
1652d19e
VS
14733 else if (IS_BROADWELL(dev))
14734 dev_priv->display.get_display_clock_speed =
14735 broadwell_get_display_clock_speed;
14736 else if (IS_HASWELL(dev))
14737 dev_priv->display.get_display_clock_speed =
14738 haswell_get_display_clock_speed;
14739 else if (IS_VALLEYVIEW(dev))
25eb05fc
JB
14740 dev_priv->display.get_display_clock_speed =
14741 valleyview_get_display_clock_speed;
b37a6434
VS
14742 else if (IS_GEN5(dev))
14743 dev_priv->display.get_display_clock_speed =
14744 ilk_get_display_clock_speed;
a7c66cd8 14745 else if (IS_I945G(dev) || IS_BROADWATER(dev) ||
34edce2f 14746 IS_GEN6(dev) || IS_IVYBRIDGE(dev))
e70236a8
JB
14747 dev_priv->display.get_display_clock_speed =
14748 i945_get_display_clock_speed;
34edce2f
VS
14749 else if (IS_GM45(dev))
14750 dev_priv->display.get_display_clock_speed =
14751 gm45_get_display_clock_speed;
14752 else if (IS_CRESTLINE(dev))
14753 dev_priv->display.get_display_clock_speed =
14754 i965gm_get_display_clock_speed;
14755 else if (IS_PINEVIEW(dev))
14756 dev_priv->display.get_display_clock_speed =
14757 pnv_get_display_clock_speed;
14758 else if (IS_G33(dev) || IS_G4X(dev))
14759 dev_priv->display.get_display_clock_speed =
14760 g33_get_display_clock_speed;
e70236a8
JB
14761 else if (IS_I915G(dev))
14762 dev_priv->display.get_display_clock_speed =
14763 i915_get_display_clock_speed;
257a7ffc 14764 else if (IS_I945GM(dev) || IS_845G(dev))
e70236a8
JB
14765 dev_priv->display.get_display_clock_speed =
14766 i9xx_misc_get_display_clock_speed;
257a7ffc
DV
14767 else if (IS_PINEVIEW(dev))
14768 dev_priv->display.get_display_clock_speed =
14769 pnv_get_display_clock_speed;
e70236a8
JB
14770 else if (IS_I915GM(dev))
14771 dev_priv->display.get_display_clock_speed =
14772 i915gm_get_display_clock_speed;
14773 else if (IS_I865G(dev))
14774 dev_priv->display.get_display_clock_speed =
14775 i865_get_display_clock_speed;
f0f8a9ce 14776 else if (IS_I85X(dev))
e70236a8 14777 dev_priv->display.get_display_clock_speed =
1b1d2716 14778 i85x_get_display_clock_speed;
623e01e5
VS
14779 else { /* 830 */
14780 WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n");
e70236a8
JB
14781 dev_priv->display.get_display_clock_speed =
14782 i830_get_display_clock_speed;
623e01e5 14783 }
e70236a8 14784
7c10a2b5 14785 if (IS_GEN5(dev)) {
3bb11b53 14786 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
3bb11b53
SJ
14787 } else if (IS_GEN6(dev)) {
14788 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
3bb11b53
SJ
14789 } else if (IS_IVYBRIDGE(dev)) {
14790 /* FIXME: detect B0+ stepping and use auto training */
14791 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
059b2fe9 14792 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
3bb11b53 14793 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
27c329ed
ML
14794 if (IS_BROADWELL(dev)) {
14795 dev_priv->display.modeset_commit_cdclk =
14796 broadwell_modeset_commit_cdclk;
14797 dev_priv->display.modeset_calc_cdclk =
14798 broadwell_modeset_calc_cdclk;
14799 }
30a970c6 14800 } else if (IS_VALLEYVIEW(dev)) {
27c329ed
ML
14801 dev_priv->display.modeset_commit_cdclk =
14802 valleyview_modeset_commit_cdclk;
14803 dev_priv->display.modeset_calc_cdclk =
14804 valleyview_modeset_calc_cdclk;
f8437dd1 14805 } else if (IS_BROXTON(dev)) {
27c329ed
ML
14806 dev_priv->display.modeset_commit_cdclk =
14807 broxton_modeset_commit_cdclk;
14808 dev_priv->display.modeset_calc_cdclk =
14809 broxton_modeset_calc_cdclk;
e70236a8 14810 }
8c9f3aaf 14811
8c9f3aaf
JB
14812 switch (INTEL_INFO(dev)->gen) {
14813 case 2:
14814 dev_priv->display.queue_flip = intel_gen2_queue_flip;
14815 break;
14816
14817 case 3:
14818 dev_priv->display.queue_flip = intel_gen3_queue_flip;
14819 break;
14820
14821 case 4:
14822 case 5:
14823 dev_priv->display.queue_flip = intel_gen4_queue_flip;
14824 break;
14825
14826 case 6:
14827 dev_priv->display.queue_flip = intel_gen6_queue_flip;
14828 break;
7c9017e5 14829 case 7:
4e0bbc31 14830 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
7c9017e5
JB
14831 dev_priv->display.queue_flip = intel_gen7_queue_flip;
14832 break;
830c81db 14833 case 9:
ba343e02
TU
14834 /* Drop through - unsupported since execlist only. */
14835 default:
14836 /* Default just returns -ENODEV to indicate unsupported */
14837 dev_priv->display.queue_flip = intel_default_queue_flip;
8c9f3aaf 14838 }
7bd688cd 14839
e39b999a 14840 mutex_init(&dev_priv->pps_mutex);
e70236a8
JB
14841}
14842
b690e96c
JB
14843/*
14844 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
14845 * resume, or other times. This quirk makes sure that's the case for
14846 * affected systems.
14847 */
0206e353 14848static void quirk_pipea_force(struct drm_device *dev)
b690e96c
JB
14849{
14850 struct drm_i915_private *dev_priv = dev->dev_private;
14851
14852 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
bc0daf48 14853 DRM_INFO("applying pipe a force quirk\n");
b690e96c
JB
14854}
14855
b6b5d049
VS
14856static void quirk_pipeb_force(struct drm_device *dev)
14857{
14858 struct drm_i915_private *dev_priv = dev->dev_private;
14859
14860 dev_priv->quirks |= QUIRK_PIPEB_FORCE;
14861 DRM_INFO("applying pipe b force quirk\n");
14862}
14863
435793df
KP
14864/*
14865 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
14866 */
14867static void quirk_ssc_force_disable(struct drm_device *dev)
14868{
14869 struct drm_i915_private *dev_priv = dev->dev_private;
14870 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
bc0daf48 14871 DRM_INFO("applying lvds SSC disable quirk\n");
435793df
KP
14872}
14873
4dca20ef 14874/*
5a15ab5b
CE
14875 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
14876 * brightness value
4dca20ef
CE
14877 */
14878static void quirk_invert_brightness(struct drm_device *dev)
14879{
14880 struct drm_i915_private *dev_priv = dev->dev_private;
14881 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
bc0daf48 14882 DRM_INFO("applying inverted panel brightness quirk\n");
435793df
KP
14883}
14884
9c72cc6f
SD
14885/* Some VBT's incorrectly indicate no backlight is present */
14886static void quirk_backlight_present(struct drm_device *dev)
14887{
14888 struct drm_i915_private *dev_priv = dev->dev_private;
14889 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
14890 DRM_INFO("applying backlight present quirk\n");
14891}
14892
b690e96c
JB
14893struct intel_quirk {
14894 int device;
14895 int subsystem_vendor;
14896 int subsystem_device;
14897 void (*hook)(struct drm_device *dev);
14898};
14899
5f85f176
EE
14900/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
14901struct intel_dmi_quirk {
14902 void (*hook)(struct drm_device *dev);
14903 const struct dmi_system_id (*dmi_id_list)[];
14904};
14905
14906static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
14907{
14908 DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
14909 return 1;
14910}
14911
14912static const struct intel_dmi_quirk intel_dmi_quirks[] = {
14913 {
14914 .dmi_id_list = &(const struct dmi_system_id[]) {
14915 {
14916 .callback = intel_dmi_reverse_brightness,
14917 .ident = "NCR Corporation",
14918 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
14919 DMI_MATCH(DMI_PRODUCT_NAME, ""),
14920 },
14921 },
14922 { } /* terminating entry */
14923 },
14924 .hook = quirk_invert_brightness,
14925 },
14926};
14927
c43b5634 14928static struct intel_quirk intel_quirks[] = {
b690e96c
JB
14929 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
14930 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
14931
b690e96c
JB
14932 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
14933 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
14934
5f080c0f
VS
14935 /* 830 needs to leave pipe A & dpll A up */
14936 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
14937
b6b5d049
VS
14938 /* 830 needs to leave pipe B & dpll B up */
14939 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
14940
435793df
KP
14941 /* Lenovo U160 cannot use SSC on LVDS */
14942 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
070d329a
MAS
14943
14944 /* Sony Vaio Y cannot use SSC on LVDS */
14945 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
5a15ab5b 14946
be505f64
AH
14947 /* Acer Aspire 5734Z must invert backlight brightness */
14948 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
14949
14950 /* Acer/eMachines G725 */
14951 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
14952
14953 /* Acer/eMachines e725 */
14954 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
14955
14956 /* Acer/Packard Bell NCL20 */
14957 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
14958
14959 /* Acer Aspire 4736Z */
14960 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
0f540c3a
JN
14961
14962 /* Acer Aspire 5336 */
14963 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
2e93a1aa
SD
14964
14965 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
14966 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
d4967d8c 14967
dfb3d47b
SD
14968 /* Acer C720 Chromebook (Core i3 4005U) */
14969 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
14970
b2a9601c 14971 /* Apple Macbook 2,1 (Core 2 T7400) */
14972 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
14973
d4967d8c
SD
14974 /* Toshiba CB35 Chromebook (Celeron 2955U) */
14975 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
724cb06f
SD
14976
14977 /* HP Chromebook 14 (Celeron 2955U) */
14978 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
cf6f0af9
JN
14979
14980 /* Dell Chromebook 11 */
14981 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
b690e96c
JB
14982};
14983
14984static void intel_init_quirks(struct drm_device *dev)
14985{
14986 struct pci_dev *d = dev->pdev;
14987 int i;
14988
14989 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
14990 struct intel_quirk *q = &intel_quirks[i];
14991
14992 if (d->device == q->device &&
14993 (d->subsystem_vendor == q->subsystem_vendor ||
14994 q->subsystem_vendor == PCI_ANY_ID) &&
14995 (d->subsystem_device == q->subsystem_device ||
14996 q->subsystem_device == PCI_ANY_ID))
14997 q->hook(dev);
14998 }
5f85f176
EE
14999 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15000 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15001 intel_dmi_quirks[i].hook(dev);
15002 }
b690e96c
JB
15003}
15004
9cce37f4
JB
15005/* Disable the VGA plane that we never use */
15006static void i915_disable_vga(struct drm_device *dev)
15007{
15008 struct drm_i915_private *dev_priv = dev->dev_private;
15009 u8 sr1;
f0f59a00 15010 i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
9cce37f4 15011
2b37c616 15012 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
9cce37f4 15013 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
3fdcf431 15014 outb(SR01, VGA_SR_INDEX);
9cce37f4
JB
15015 sr1 = inb(VGA_SR_DATA);
15016 outb(sr1 | 1<<5, VGA_SR_DATA);
15017 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
15018 udelay(300);
15019
01f5a626 15020 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
9cce37f4
JB
15021 POSTING_READ(vga_reg);
15022}
15023
f817586c
DV
15024void intel_modeset_init_hw(struct drm_device *dev)
15025{
b6283055 15026 intel_update_cdclk(dev);
a8f78b58 15027 intel_prepare_ddi(dev);
f817586c 15028 intel_init_clock_gating(dev);
8090c6b9 15029 intel_enable_gt_powersave(dev);
f817586c
DV
15030}
15031
79e53945
JB
15032void intel_modeset_init(struct drm_device *dev)
15033{
652c393a 15034 struct drm_i915_private *dev_priv = dev->dev_private;
1fe47785 15035 int sprite, ret;
8cc87b75 15036 enum pipe pipe;
46f297fb 15037 struct intel_crtc *crtc;
79e53945
JB
15038
15039 drm_mode_config_init(dev);
15040
15041 dev->mode_config.min_width = 0;
15042 dev->mode_config.min_height = 0;
15043
019d96cb
DA
15044 dev->mode_config.preferred_depth = 24;
15045 dev->mode_config.prefer_shadow = 1;
15046
25bab385
TU
15047 dev->mode_config.allow_fb_modifiers = true;
15048
e6ecefaa 15049 dev->mode_config.funcs = &intel_mode_funcs;
79e53945 15050
b690e96c
JB
15051 intel_init_quirks(dev);
15052
1fa61106
ED
15053 intel_init_pm(dev);
15054
e3c74757
BW
15055 if (INTEL_INFO(dev)->num_pipes == 0)
15056 return;
15057
69f92f67
LW
15058 /*
15059 * There may be no VBT; and if the BIOS enabled SSC we can
15060 * just keep using it to avoid unnecessary flicker. Whereas if the
15061 * BIOS isn't using it, don't assume it will work even if the VBT
15062 * indicates as much.
15063 */
15064 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
15065 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15066 DREF_SSC1_ENABLE);
15067
15068 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15069 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15070 bios_lvds_use_ssc ? "en" : "dis",
15071 dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15072 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15073 }
15074 }
15075
e70236a8 15076 intel_init_display(dev);
7c10a2b5 15077 intel_init_audio(dev);
e70236a8 15078
a6c45cf0
CW
15079 if (IS_GEN2(dev)) {
15080 dev->mode_config.max_width = 2048;
15081 dev->mode_config.max_height = 2048;
15082 } else if (IS_GEN3(dev)) {
5e4d6fa7
KP
15083 dev->mode_config.max_width = 4096;
15084 dev->mode_config.max_height = 4096;
79e53945 15085 } else {
a6c45cf0
CW
15086 dev->mode_config.max_width = 8192;
15087 dev->mode_config.max_height = 8192;
79e53945 15088 }
068be561 15089
dc41c154
VS
15090 if (IS_845G(dev) || IS_I865G(dev)) {
15091 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
15092 dev->mode_config.cursor_height = 1023;
15093 } else if (IS_GEN2(dev)) {
068be561
DL
15094 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
15095 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
15096 } else {
15097 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
15098 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
15099 }
15100
5d4545ae 15101 dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
79e53945 15102
28c97730 15103 DRM_DEBUG_KMS("%d display pipe%s available.\n",
7eb552ae
BW
15104 INTEL_INFO(dev)->num_pipes,
15105 INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
79e53945 15106
055e393f 15107 for_each_pipe(dev_priv, pipe) {
8cc87b75 15108 intel_crtc_init(dev, pipe);
3bdcfc0c 15109 for_each_sprite(dev_priv, pipe, sprite) {
1fe47785 15110 ret = intel_plane_init(dev, pipe, sprite);
7f1f3851 15111 if (ret)
06da8da2 15112 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
1fe47785 15113 pipe_name(pipe), sprite_name(pipe, sprite), ret);
7f1f3851 15114 }
79e53945
JB
15115 }
15116
bfa7df01
VS
15117 intel_update_czclk(dev_priv);
15118 intel_update_cdclk(dev);
15119
e72f9fbf 15120 intel_shared_dpll_init(dev);
ee7b9f93 15121
9cce37f4
JB
15122 /* Just disable it once at startup */
15123 i915_disable_vga(dev);
79e53945 15124 intel_setup_outputs(dev);
11be49eb 15125
6e9f798d 15126 drm_modeset_lock_all(dev);
043e9bda 15127 intel_modeset_setup_hw_state(dev);
6e9f798d 15128 drm_modeset_unlock_all(dev);
46f297fb 15129
d3fcc808 15130 for_each_intel_crtc(dev, crtc) {
eeebeac5
ML
15131 struct intel_initial_plane_config plane_config = {};
15132
46f297fb
JB
15133 if (!crtc->active)
15134 continue;
15135
46f297fb 15136 /*
46f297fb
JB
15137 * Note that reserving the BIOS fb up front prevents us
15138 * from stuffing other stolen allocations like the ring
15139 * on top. This prevents some ugliness at boot time, and
15140 * can even allow for smooth boot transitions if the BIOS
15141 * fb is large enough for the active pipe configuration.
15142 */
eeebeac5
ML
15143 dev_priv->display.get_initial_plane_config(crtc,
15144 &plane_config);
15145
15146 /*
15147 * If the fb is shared between multiple heads, we'll
15148 * just get the first one.
15149 */
15150 intel_find_initial_plane_obj(crtc, &plane_config);
46f297fb 15151 }
2c7111db
CW
15152}
15153
7fad798e
DV
15154static void intel_enable_pipe_a(struct drm_device *dev)
15155{
15156 struct intel_connector *connector;
15157 struct drm_connector *crt = NULL;
15158 struct intel_load_detect_pipe load_detect_temp;
208bf9fd 15159 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
7fad798e
DV
15160
15161 /* We can't just switch on the pipe A, we need to set things up with a
15162 * proper mode and output configuration. As a gross hack, enable pipe A
15163 * by enabling the load detect pipe once. */
3a3371ff 15164 for_each_intel_connector(dev, connector) {
7fad798e
DV
15165 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
15166 crt = &connector->base;
15167 break;
15168 }
15169 }
15170
15171 if (!crt)
15172 return;
15173
208bf9fd 15174 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
49172fee 15175 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
7fad798e
DV
15176}
15177
fa555837
DV
15178static bool
15179intel_check_plane_mapping(struct intel_crtc *crtc)
15180{
7eb552ae
BW
15181 struct drm_device *dev = crtc->base.dev;
15182 struct drm_i915_private *dev_priv = dev->dev_private;
649636ef 15183 u32 val;
fa555837 15184
7eb552ae 15185 if (INTEL_INFO(dev)->num_pipes == 1)
fa555837
DV
15186 return true;
15187
649636ef 15188 val = I915_READ(DSPCNTR(!crtc->plane));
fa555837
DV
15189
15190 if ((val & DISPLAY_PLANE_ENABLE) &&
15191 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
15192 return false;
15193
15194 return true;
15195}
15196
02e93c35
VS
15197static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15198{
15199 struct drm_device *dev = crtc->base.dev;
15200 struct intel_encoder *encoder;
15201
15202 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15203 return true;
15204
15205 return false;
15206}
15207
24929352
DV
15208static void intel_sanitize_crtc(struct intel_crtc *crtc)
15209{
15210 struct drm_device *dev = crtc->base.dev;
15211 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 15212 i915_reg_t reg = PIPECONF(crtc->config->cpu_transcoder);
24929352 15213
24929352 15214 /* Clear any frame start delays used for debugging left by the BIOS */
24929352
DV
15215 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15216
d3eaf884 15217 /* restore vblank interrupts to correct state */
9625604c 15218 drm_crtc_vblank_reset(&crtc->base);
d297e103 15219 if (crtc->active) {
f9cd7b88
VS
15220 struct intel_plane *plane;
15221
9625604c 15222 drm_crtc_vblank_on(&crtc->base);
f9cd7b88
VS
15223
15224 /* Disable everything but the primary plane */
15225 for_each_intel_plane_on_crtc(dev, crtc, plane) {
15226 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15227 continue;
15228
15229 plane->disable_plane(&plane->base, &crtc->base);
15230 }
9625604c 15231 }
d3eaf884 15232
24929352 15233 /* We need to sanitize the plane -> pipe mapping first because this will
fa555837
DV
15234 * disable the crtc (and hence change the state) if it is wrong. Note
15235 * that gen4+ has a fixed plane -> pipe mapping. */
15236 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
24929352
DV
15237 bool plane;
15238
24929352
DV
15239 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
15240 crtc->base.base.id);
15241
15242 /* Pipe has the wrong plane attached and the plane is active.
15243 * Temporarily change the plane mapping and disable everything
15244 * ... */
15245 plane = crtc->plane;
b70709a6 15246 to_intel_plane_state(crtc->base.primary->state)->visible = true;
24929352 15247 crtc->plane = !plane;
b17d48e2 15248 intel_crtc_disable_noatomic(&crtc->base);
24929352 15249 crtc->plane = plane;
24929352 15250 }
24929352 15251
7fad798e
DV
15252 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15253 crtc->pipe == PIPE_A && !crtc->active) {
15254 /* BIOS forgot to enable pipe A, this mostly happens after
15255 * resume. Force-enable the pipe to fix this, the update_dpms
15256 * call below we restore the pipe to the right state, but leave
15257 * the required bits on. */
15258 intel_enable_pipe_a(dev);
15259 }
15260
24929352
DV
15261 /* Adjust the state of the output pipe according to whether we
15262 * have active connectors/encoders. */
02e93c35 15263 if (!intel_crtc_has_encoders(crtc))
b17d48e2 15264 intel_crtc_disable_noatomic(&crtc->base);
24929352 15265
53d9f4e9 15266 if (crtc->active != crtc->base.state->active) {
02e93c35 15267 struct intel_encoder *encoder;
24929352
DV
15268
15269 /* This can happen either due to bugs in the get_hw_state
b17d48e2
ML
15270 * functions or because of calls to intel_crtc_disable_noatomic,
15271 * or because the pipe is force-enabled due to the
24929352
DV
15272 * pipe A quirk. */
15273 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n",
15274 crtc->base.base.id,
83d65738 15275 crtc->base.state->enable ? "enabled" : "disabled",
24929352
DV
15276 crtc->active ? "enabled" : "disabled");
15277
4be40c98 15278 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0);
49d6fa21 15279 crtc->base.state->active = crtc->active;
24929352
DV
15280 crtc->base.enabled = crtc->active;
15281
15282 /* Because we only establish the connector -> encoder ->
15283 * crtc links if something is active, this means the
15284 * crtc is now deactivated. Break the links. connector
15285 * -> encoder links are only establish when things are
15286 * actually up, hence no need to break them. */
15287 WARN_ON(crtc->active);
15288
2d406bb0 15289 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
24929352 15290 encoder->base.crtc = NULL;
24929352 15291 }
c5ab3bc0 15292
a3ed6aad 15293 if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
4cc31489
DV
15294 /*
15295 * We start out with underrun reporting disabled to avoid races.
15296 * For correct bookkeeping mark this on active crtcs.
15297 *
c5ab3bc0
DV
15298 * Also on gmch platforms we dont have any hardware bits to
15299 * disable the underrun reporting. Which means we need to start
15300 * out with underrun reporting disabled also on inactive pipes,
15301 * since otherwise we'll complain about the garbage we read when
15302 * e.g. coming up after runtime pm.
15303 *
4cc31489
DV
15304 * No protection against concurrent access is required - at
15305 * worst a fifo underrun happens which also sets this to false.
15306 */
15307 crtc->cpu_fifo_underrun_disabled = true;
15308 crtc->pch_fifo_underrun_disabled = true;
15309 }
24929352
DV
15310}
15311
15312static void intel_sanitize_encoder(struct intel_encoder *encoder)
15313{
15314 struct intel_connector *connector;
15315 struct drm_device *dev = encoder->base.dev;
873ffe69 15316 bool active = false;
24929352
DV
15317
15318 /* We need to check both for a crtc link (meaning that the
15319 * encoder is active and trying to read from a pipe) and the
15320 * pipe itself being active. */
15321 bool has_active_crtc = encoder->base.crtc &&
15322 to_intel_crtc(encoder->base.crtc)->active;
15323
873ffe69
ML
15324 for_each_intel_connector(dev, connector) {
15325 if (connector->base.encoder != &encoder->base)
15326 continue;
15327
15328 active = true;
15329 break;
15330 }
15331
15332 if (active && !has_active_crtc) {
24929352
DV
15333 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15334 encoder->base.base.id,
8e329a03 15335 encoder->base.name);
24929352
DV
15336
15337 /* Connector is active, but has no active pipe. This is
15338 * fallout from our resume register restoring. Disable
15339 * the encoder manually again. */
15340 if (encoder->base.crtc) {
15341 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15342 encoder->base.base.id,
8e329a03 15343 encoder->base.name);
24929352 15344 encoder->disable(encoder);
a62d1497
VS
15345 if (encoder->post_disable)
15346 encoder->post_disable(encoder);
24929352 15347 }
7f1950fb 15348 encoder->base.crtc = NULL;
24929352
DV
15349
15350 /* Inconsistent output/port/pipe state happens presumably due to
15351 * a bug in one of the get_hw_state functions. Or someplace else
15352 * in our code, like the register restore mess on resume. Clamp
15353 * things to off as a safer default. */
3a3371ff 15354 for_each_intel_connector(dev, connector) {
24929352
DV
15355 if (connector->encoder != encoder)
15356 continue;
7f1950fb
EE
15357 connector->base.dpms = DRM_MODE_DPMS_OFF;
15358 connector->base.encoder = NULL;
24929352
DV
15359 }
15360 }
15361 /* Enabled encoders without active connectors will be fixed in
15362 * the crtc fixup. */
15363}
15364
04098753 15365void i915_redisable_vga_power_on(struct drm_device *dev)
0fde901f
KM
15366{
15367 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 15368 i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
0fde901f 15369
04098753
ID
15370 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15371 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15372 i915_disable_vga(dev);
15373 }
15374}
15375
15376void i915_redisable_vga(struct drm_device *dev)
15377{
15378 struct drm_i915_private *dev_priv = dev->dev_private;
15379
8dc8a27c
PZ
15380 /* This function can be called both from intel_modeset_setup_hw_state or
15381 * at a very early point in our resume sequence, where the power well
15382 * structures are not yet restored. Since this function is at a very
15383 * paranoid "someone might have enabled VGA while we were not looking"
15384 * level, just check if the power well is enabled instead of trying to
15385 * follow the "don't touch the power well if we don't need it" policy
15386 * the rest of the driver uses. */
f458ebbc 15387 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
8dc8a27c
PZ
15388 return;
15389
04098753 15390 i915_redisable_vga_power_on(dev);
0fde901f
KM
15391}
15392
f9cd7b88 15393static bool primary_get_hw_state(struct intel_plane *plane)
98ec7739 15394{
f9cd7b88 15395 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
98ec7739 15396
f9cd7b88 15397 return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
d032ffa0
ML
15398}
15399
f9cd7b88
VS
15400/* FIXME read out full plane state for all planes */
15401static void readout_plane_state(struct intel_crtc *crtc)
d032ffa0 15402{
b26d3ea3 15403 struct drm_plane *primary = crtc->base.primary;
f9cd7b88 15404 struct intel_plane_state *plane_state =
b26d3ea3 15405 to_intel_plane_state(primary->state);
d032ffa0 15406
19b8d387 15407 plane_state->visible = crtc->active &&
b26d3ea3
ML
15408 primary_get_hw_state(to_intel_plane(primary));
15409
15410 if (plane_state->visible)
15411 crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
98ec7739
VS
15412}
15413
30e984df 15414static void intel_modeset_readout_hw_state(struct drm_device *dev)
24929352
DV
15415{
15416 struct drm_i915_private *dev_priv = dev->dev_private;
15417 enum pipe pipe;
24929352
DV
15418 struct intel_crtc *crtc;
15419 struct intel_encoder *encoder;
15420 struct intel_connector *connector;
5358901f 15421 int i;
24929352 15422
d3fcc808 15423 for_each_intel_crtc(dev, crtc) {
b06f8b0d 15424 __drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
6e3c9717 15425 memset(crtc->config, 0, sizeof(*crtc->config));
f7217905 15426 crtc->config->base.crtc = &crtc->base;
3b117c8f 15427
0e8ffe1b 15428 crtc->active = dev_priv->display.get_pipe_config(crtc,
6e3c9717 15429 crtc->config);
24929352 15430
49d6fa21 15431 crtc->base.state->active = crtc->active;
24929352 15432 crtc->base.enabled = crtc->active;
b70709a6 15433
f9cd7b88 15434 readout_plane_state(crtc);
24929352
DV
15435
15436 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
15437 crtc->base.base.id,
15438 crtc->active ? "enabled" : "disabled");
15439 }
15440
5358901f
DV
15441 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15442 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15443
3e369b76
ACO
15444 pll->on = pll->get_hw_state(dev_priv, pll,
15445 &pll->config.hw_state);
5358901f 15446 pll->active = 0;
3e369b76 15447 pll->config.crtc_mask = 0;
d3fcc808 15448 for_each_intel_crtc(dev, crtc) {
1e6f2ddc 15449 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
5358901f 15450 pll->active++;
3e369b76 15451 pll->config.crtc_mask |= 1 << crtc->pipe;
1e6f2ddc 15452 }
5358901f 15453 }
5358901f 15454
1e6f2ddc 15455 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
3e369b76 15456 pll->name, pll->config.crtc_mask, pll->on);
bd2bb1b9 15457
3e369b76 15458 if (pll->config.crtc_mask)
bd2bb1b9 15459 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
5358901f
DV
15460 }
15461
b2784e15 15462 for_each_intel_encoder(dev, encoder) {
24929352
DV
15463 pipe = 0;
15464
15465 if (encoder->get_hw_state(encoder, &pipe)) {
045ac3b5
JB
15466 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15467 encoder->base.crtc = &crtc->base;
6e3c9717 15468 encoder->get_config(encoder, crtc->config);
24929352
DV
15469 } else {
15470 encoder->base.crtc = NULL;
15471 }
15472
6f2bcceb 15473 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
24929352 15474 encoder->base.base.id,
8e329a03 15475 encoder->base.name,
24929352 15476 encoder->base.crtc ? "enabled" : "disabled",
6f2bcceb 15477 pipe_name(pipe));
24929352
DV
15478 }
15479
3a3371ff 15480 for_each_intel_connector(dev, connector) {
24929352
DV
15481 if (connector->get_hw_state(connector)) {
15482 connector->base.dpms = DRM_MODE_DPMS_ON;
24929352
DV
15483 connector->base.encoder = &connector->encoder->base;
15484 } else {
15485 connector->base.dpms = DRM_MODE_DPMS_OFF;
15486 connector->base.encoder = NULL;
15487 }
15488 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15489 connector->base.base.id,
c23cc417 15490 connector->base.name,
24929352
DV
15491 connector->base.encoder ? "enabled" : "disabled");
15492 }
7f4c6284
VS
15493
15494 for_each_intel_crtc(dev, crtc) {
15495 crtc->base.hwmode = crtc->config->base.adjusted_mode;
15496
15497 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15498 if (crtc->base.state->active) {
15499 intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
15500 intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
15501 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15502
15503 /*
15504 * The initial mode needs to be set in order to keep
15505 * the atomic core happy. It wants a valid mode if the
15506 * crtc's enabled, so we do the above call.
15507 *
15508 * At this point some state updated by the connectors
15509 * in their ->detect() callback has not run yet, so
15510 * no recalculation can be done yet.
15511 *
15512 * Even if we could do a recalculation and modeset
15513 * right now it would cause a double modeset if
15514 * fbdev or userspace chooses a different initial mode.
15515 *
15516 * If that happens, someone indicated they wanted a
15517 * mode change, which means it's safe to do a full
15518 * recalculation.
15519 */
15520 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
9eca6832
VS
15521
15522 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
15523 update_scanline_offset(crtc);
7f4c6284
VS
15524 }
15525 }
30e984df
DV
15526}
15527
043e9bda
ML
15528/* Scan out the current hw modeset state,
15529 * and sanitizes it to the current state
15530 */
15531static void
15532intel_modeset_setup_hw_state(struct drm_device *dev)
30e984df
DV
15533{
15534 struct drm_i915_private *dev_priv = dev->dev_private;
15535 enum pipe pipe;
30e984df
DV
15536 struct intel_crtc *crtc;
15537 struct intel_encoder *encoder;
35c95375 15538 int i;
30e984df
DV
15539
15540 intel_modeset_readout_hw_state(dev);
24929352
DV
15541
15542 /* HW state is read out, now we need to sanitize this mess. */
b2784e15 15543 for_each_intel_encoder(dev, encoder) {
24929352
DV
15544 intel_sanitize_encoder(encoder);
15545 }
15546
055e393f 15547 for_each_pipe(dev_priv, pipe) {
24929352
DV
15548 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15549 intel_sanitize_crtc(crtc);
6e3c9717
ACO
15550 intel_dump_pipe_config(crtc, crtc->config,
15551 "[setup_hw_state]");
24929352 15552 }
9a935856 15553
d29b2f9d
ACO
15554 intel_modeset_update_connector_atomic_state(dev);
15555
35c95375
DV
15556 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15557 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15558
15559 if (!pll->on || pll->active)
15560 continue;
15561
15562 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
15563
15564 pll->disable(dev_priv, pll);
15565 pll->on = false;
15566 }
15567
26e1fe4f 15568 if (IS_VALLEYVIEW(dev))
6eb1a681
VS
15569 vlv_wm_get_hw_state(dev);
15570 else if (IS_GEN9(dev))
3078999f
PB
15571 skl_wm_get_hw_state(dev);
15572 else if (HAS_PCH_SPLIT(dev))
243e6a44 15573 ilk_wm_get_hw_state(dev);
292b990e
ML
15574
15575 for_each_intel_crtc(dev, crtc) {
15576 unsigned long put_domains;
15577
15578 put_domains = modeset_get_crtc_power_domains(&crtc->base);
15579 if (WARN_ON(put_domains))
15580 modeset_put_power_domains(dev_priv, put_domains);
15581 }
15582 intel_display_set_init_power(dev_priv, false);
043e9bda 15583}
7d0bc1ea 15584
043e9bda
ML
15585void intel_display_resume(struct drm_device *dev)
15586{
15587 struct drm_atomic_state *state = drm_atomic_state_alloc(dev);
15588 struct intel_connector *conn;
15589 struct intel_plane *plane;
15590 struct drm_crtc *crtc;
15591 int ret;
f30da187 15592
043e9bda
ML
15593 if (!state)
15594 return;
15595
15596 state->acquire_ctx = dev->mode_config.acquire_ctx;
15597
15598 /* preserve complete old state, including dpll */
15599 intel_atomic_get_shared_dpll_state(state);
15600
15601 for_each_crtc(dev, crtc) {
15602 struct drm_crtc_state *crtc_state =
15603 drm_atomic_get_crtc_state(state, crtc);
15604
15605 ret = PTR_ERR_OR_ZERO(crtc_state);
15606 if (ret)
15607 goto err;
15608
15609 /* force a restore */
15610 crtc_state->mode_changed = true;
45e2b5f6 15611 }
8af6cf88 15612
043e9bda
ML
15613 for_each_intel_plane(dev, plane) {
15614 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base));
15615 if (ret)
15616 goto err;
15617 }
15618
15619 for_each_intel_connector(dev, conn) {
15620 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base));
15621 if (ret)
15622 goto err;
15623 }
15624
15625 intel_modeset_setup_hw_state(dev);
15626
15627 i915_redisable_vga(dev);
74c090b1 15628 ret = drm_atomic_commit(state);
043e9bda
ML
15629 if (!ret)
15630 return;
15631
15632err:
15633 DRM_ERROR("Restoring old state failed with %i\n", ret);
15634 drm_atomic_state_free(state);
2c7111db
CW
15635}
15636
15637void intel_modeset_gem_init(struct drm_device *dev)
15638{
484b41dd 15639 struct drm_crtc *c;
2ff8fde1 15640 struct drm_i915_gem_object *obj;
e0d6149b 15641 int ret;
484b41dd 15642
ae48434c
ID
15643 mutex_lock(&dev->struct_mutex);
15644 intel_init_gt_powersave(dev);
15645 mutex_unlock(&dev->struct_mutex);
15646
1833b134 15647 intel_modeset_init_hw(dev);
02e792fb
DV
15648
15649 intel_setup_overlay(dev);
484b41dd
JB
15650
15651 /*
15652 * Make sure any fbs we allocated at startup are properly
15653 * pinned & fenced. When we do the allocation it's too early
15654 * for this.
15655 */
70e1e0ec 15656 for_each_crtc(dev, c) {
2ff8fde1
MR
15657 obj = intel_fb_obj(c->primary->fb);
15658 if (obj == NULL)
484b41dd
JB
15659 continue;
15660
e0d6149b
TU
15661 mutex_lock(&dev->struct_mutex);
15662 ret = intel_pin_and_fence_fb_obj(c->primary,
15663 c->primary->fb,
7580d774 15664 c->primary->state);
e0d6149b
TU
15665 mutex_unlock(&dev->struct_mutex);
15666 if (ret) {
484b41dd
JB
15667 DRM_ERROR("failed to pin boot fb on pipe %d\n",
15668 to_intel_crtc(c)->pipe);
66e514c1
DA
15669 drm_framebuffer_unreference(c->primary->fb);
15670 c->primary->fb = NULL;
36750f28 15671 c->primary->crtc = c->primary->state->crtc = NULL;
afd65eb4 15672 update_state_fb(c->primary);
36750f28 15673 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
484b41dd
JB
15674 }
15675 }
0962c3c9
VS
15676
15677 intel_backlight_register(dev);
79e53945
JB
15678}
15679
4932e2c3
ID
15680void intel_connector_unregister(struct intel_connector *intel_connector)
15681{
15682 struct drm_connector *connector = &intel_connector->base;
15683
15684 intel_panel_destroy_backlight(connector);
34ea3d38 15685 drm_connector_unregister(connector);
4932e2c3
ID
15686}
15687
79e53945
JB
15688void intel_modeset_cleanup(struct drm_device *dev)
15689{
652c393a 15690 struct drm_i915_private *dev_priv = dev->dev_private;
d9255d57 15691 struct drm_connector *connector;
652c393a 15692
2eb5252e
ID
15693 intel_disable_gt_powersave(dev);
15694
0962c3c9
VS
15695 intel_backlight_unregister(dev);
15696
fd0c0642
DV
15697 /*
15698 * Interrupts and polling as the first thing to avoid creating havoc.
2eb5252e 15699 * Too much stuff here (turning of connectors, ...) would
fd0c0642
DV
15700 * experience fancy races otherwise.
15701 */
2aeb7d3a 15702 intel_irq_uninstall(dev_priv);
eb21b92b 15703
fd0c0642
DV
15704 /*
15705 * Due to the hpd irq storm handling the hotplug work can re-arm the
15706 * poll handlers. Hence disable polling after hpd handling is shut down.
15707 */
f87ea761 15708 drm_kms_helper_poll_fini(dev);
fd0c0642 15709
723bfd70
JB
15710 intel_unregister_dsm_handler();
15711
7733b49b 15712 intel_fbc_disable(dev_priv);
69341a5e 15713
1630fe75
CW
15714 /* flush any delayed tasks or pending work */
15715 flush_scheduled_work();
15716
db31af1d
JN
15717 /* destroy the backlight and sysfs files before encoders/connectors */
15718 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
4932e2c3
ID
15719 struct intel_connector *intel_connector;
15720
15721 intel_connector = to_intel_connector(connector);
15722 intel_connector->unregister(intel_connector);
db31af1d 15723 }
d9255d57 15724
79e53945 15725 drm_mode_config_cleanup(dev);
4d7bb011
DV
15726
15727 intel_cleanup_overlay(dev);
ae48434c
ID
15728
15729 mutex_lock(&dev->struct_mutex);
15730 intel_cleanup_gt_powersave(dev);
15731 mutex_unlock(&dev->struct_mutex);
79e53945
JB
15732}
15733
f1c79df3
ZW
15734/*
15735 * Return which encoder is currently attached for connector.
15736 */
df0e9248 15737struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
79e53945 15738{
df0e9248
CW
15739 return &intel_attached_encoder(connector)->base;
15740}
f1c79df3 15741
df0e9248
CW
15742void intel_connector_attach_encoder(struct intel_connector *connector,
15743 struct intel_encoder *encoder)
15744{
15745 connector->encoder = encoder;
15746 drm_mode_connector_attach_encoder(&connector->base,
15747 &encoder->base);
79e53945 15748}
28d52043
DA
15749
15750/*
15751 * set vga decode state - true == enable VGA decode
15752 */
15753int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
15754{
15755 struct drm_i915_private *dev_priv = dev->dev_private;
a885b3cc 15756 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
28d52043
DA
15757 u16 gmch_ctrl;
15758
75fa041d
CW
15759 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
15760 DRM_ERROR("failed to read control word\n");
15761 return -EIO;
15762 }
15763
c0cc8a55
CW
15764 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
15765 return 0;
15766
28d52043
DA
15767 if (state)
15768 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
15769 else
15770 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
75fa041d
CW
15771
15772 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
15773 DRM_ERROR("failed to write control word\n");
15774 return -EIO;
15775 }
15776
28d52043
DA
15777 return 0;
15778}
c4a1d9e4 15779
c4a1d9e4 15780struct intel_display_error_state {
ff57f1b0
PZ
15781
15782 u32 power_well_driver;
15783
63b66e5b
CW
15784 int num_transcoders;
15785
c4a1d9e4
CW
15786 struct intel_cursor_error_state {
15787 u32 control;
15788 u32 position;
15789 u32 base;
15790 u32 size;
52331309 15791 } cursor[I915_MAX_PIPES];
c4a1d9e4
CW
15792
15793 struct intel_pipe_error_state {
ddf9c536 15794 bool power_domain_on;
c4a1d9e4 15795 u32 source;
f301b1e1 15796 u32 stat;
52331309 15797 } pipe[I915_MAX_PIPES];
c4a1d9e4
CW
15798
15799 struct intel_plane_error_state {
15800 u32 control;
15801 u32 stride;
15802 u32 size;
15803 u32 pos;
15804 u32 addr;
15805 u32 surface;
15806 u32 tile_offset;
52331309 15807 } plane[I915_MAX_PIPES];
63b66e5b
CW
15808
15809 struct intel_transcoder_error_state {
ddf9c536 15810 bool power_domain_on;
63b66e5b
CW
15811 enum transcoder cpu_transcoder;
15812
15813 u32 conf;
15814
15815 u32 htotal;
15816 u32 hblank;
15817 u32 hsync;
15818 u32 vtotal;
15819 u32 vblank;
15820 u32 vsync;
15821 } transcoder[4];
c4a1d9e4
CW
15822};
15823
15824struct intel_display_error_state *
15825intel_display_capture_error_state(struct drm_device *dev)
15826{
fbee40df 15827 struct drm_i915_private *dev_priv = dev->dev_private;
c4a1d9e4 15828 struct intel_display_error_state *error;
63b66e5b
CW
15829 int transcoders[] = {
15830 TRANSCODER_A,
15831 TRANSCODER_B,
15832 TRANSCODER_C,
15833 TRANSCODER_EDP,
15834 };
c4a1d9e4
CW
15835 int i;
15836
63b66e5b
CW
15837 if (INTEL_INFO(dev)->num_pipes == 0)
15838 return NULL;
15839
9d1cb914 15840 error = kzalloc(sizeof(*error), GFP_ATOMIC);
c4a1d9e4
CW
15841 if (error == NULL)
15842 return NULL;
15843
190be112 15844 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
ff57f1b0
PZ
15845 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
15846
055e393f 15847 for_each_pipe(dev_priv, i) {
ddf9c536 15848 error->pipe[i].power_domain_on =
f458ebbc
DV
15849 __intel_display_power_is_enabled(dev_priv,
15850 POWER_DOMAIN_PIPE(i));
ddf9c536 15851 if (!error->pipe[i].power_domain_on)
9d1cb914
PZ
15852 continue;
15853
5efb3e28
VS
15854 error->cursor[i].control = I915_READ(CURCNTR(i));
15855 error->cursor[i].position = I915_READ(CURPOS(i));
15856 error->cursor[i].base = I915_READ(CURBASE(i));
c4a1d9e4
CW
15857
15858 error->plane[i].control = I915_READ(DSPCNTR(i));
15859 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
80ca378b 15860 if (INTEL_INFO(dev)->gen <= 3) {
51889b35 15861 error->plane[i].size = I915_READ(DSPSIZE(i));
80ca378b
PZ
15862 error->plane[i].pos = I915_READ(DSPPOS(i));
15863 }
ca291363
PZ
15864 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
15865 error->plane[i].addr = I915_READ(DSPADDR(i));
c4a1d9e4
CW
15866 if (INTEL_INFO(dev)->gen >= 4) {
15867 error->plane[i].surface = I915_READ(DSPSURF(i));
15868 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
15869 }
15870
c4a1d9e4 15871 error->pipe[i].source = I915_READ(PIPESRC(i));
f301b1e1 15872
3abfce77 15873 if (HAS_GMCH_DISPLAY(dev))
f301b1e1 15874 error->pipe[i].stat = I915_READ(PIPESTAT(i));
63b66e5b
CW
15875 }
15876
15877 error->num_transcoders = INTEL_INFO(dev)->num_pipes;
15878 if (HAS_DDI(dev_priv->dev))
15879 error->num_transcoders++; /* Account for eDP. */
15880
15881 for (i = 0; i < error->num_transcoders; i++) {
15882 enum transcoder cpu_transcoder = transcoders[i];
15883
ddf9c536 15884 error->transcoder[i].power_domain_on =
f458ebbc 15885 __intel_display_power_is_enabled(dev_priv,
38cc1daf 15886 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
ddf9c536 15887 if (!error->transcoder[i].power_domain_on)
9d1cb914
PZ
15888 continue;
15889
63b66e5b
CW
15890 error->transcoder[i].cpu_transcoder = cpu_transcoder;
15891
15892 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
15893 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
15894 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
15895 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
15896 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
15897 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
15898 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
c4a1d9e4
CW
15899 }
15900
15901 return error;
15902}
15903
edc3d884
MK
15904#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
15905
c4a1d9e4 15906void
edc3d884 15907intel_display_print_error_state(struct drm_i915_error_state_buf *m,
c4a1d9e4
CW
15908 struct drm_device *dev,
15909 struct intel_display_error_state *error)
15910{
055e393f 15911 struct drm_i915_private *dev_priv = dev->dev_private;
c4a1d9e4
CW
15912 int i;
15913
63b66e5b
CW
15914 if (!error)
15915 return;
15916
edc3d884 15917 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
190be112 15918 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
edc3d884 15919 err_printf(m, "PWR_WELL_CTL2: %08x\n",
ff57f1b0 15920 error->power_well_driver);
055e393f 15921 for_each_pipe(dev_priv, i) {
edc3d884 15922 err_printf(m, "Pipe [%d]:\n", i);
ddf9c536
ID
15923 err_printf(m, " Power: %s\n",
15924 error->pipe[i].power_domain_on ? "on" : "off");
edc3d884 15925 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
f301b1e1 15926 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
edc3d884
MK
15927
15928 err_printf(m, "Plane [%d]:\n", i);
15929 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
15930 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
80ca378b 15931 if (INTEL_INFO(dev)->gen <= 3) {
edc3d884
MK
15932 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
15933 err_printf(m, " POS: %08x\n", error->plane[i].pos);
80ca378b 15934 }
4b71a570 15935 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
edc3d884 15936 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
c4a1d9e4 15937 if (INTEL_INFO(dev)->gen >= 4) {
edc3d884
MK
15938 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
15939 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
c4a1d9e4
CW
15940 }
15941
edc3d884
MK
15942 err_printf(m, "Cursor [%d]:\n", i);
15943 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
15944 err_printf(m, " POS: %08x\n", error->cursor[i].position);
15945 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
c4a1d9e4 15946 }
63b66e5b
CW
15947
15948 for (i = 0; i < error->num_transcoders; i++) {
1cf84bb6 15949 err_printf(m, "CPU transcoder: %c\n",
63b66e5b 15950 transcoder_name(error->transcoder[i].cpu_transcoder));
ddf9c536
ID
15951 err_printf(m, " Power: %s\n",
15952 error->transcoder[i].power_domain_on ? "on" : "off");
63b66e5b
CW
15953 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
15954 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
15955 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
15956 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
15957 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
15958 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
15959 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
15960 }
c4a1d9e4 15961}
e2fcdaa9
VS
15962
15963void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
15964{
15965 struct intel_crtc *crtc;
15966
15967 for_each_intel_crtc(dev, crtc) {
15968 struct intel_unpin_work *work;
e2fcdaa9 15969
5e2d7afc 15970 spin_lock_irq(&dev->event_lock);
e2fcdaa9
VS
15971
15972 work = crtc->unpin_work;
15973
15974 if (work && work->event &&
15975 work->event->base.file_priv == file) {
15976 kfree(work->event);
15977 work->event = NULL;
15978 }
15979
5e2d7afc 15980 spin_unlock_irq(&dev->event_lock);
e2fcdaa9
VS
15981 }
15982}
This page took 3.277432 seconds and 5 git commands to generate.