2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
29 #include <drm/drm_plane_helper.h>
31 #include "intel_drv.h"
32 #include "../../../platform/x86/intel_ips.h"
33 #include <linux/module.h>
38 * RC6 is a special power stage which allows the GPU to enter an very
39 * low-voltage mode when idle, using down to 0V while at this stage. This
40 * stage is entered automatically when the GPU is idle when RC6 support is
41 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
43 * There are different RC6 modes available in Intel GPU, which differentiate
44 * among each other with the latency required to enter and leave RC6 and
45 * voltage consumed by the GPU in different states.
47 * The combination of the following flags define which states GPU is allowed
48 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
49 * RC6pp is deepest RC6. Their support by hardware varies according to the
50 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
51 * which brings the most power savings; deeper states save more power, but
52 * require higher latency to switch to and wake up.
54 #define INTEL_RC6_ENABLE (1<<0)
55 #define INTEL_RC6p_ENABLE (1<<1)
56 #define INTEL_RC6pp_ENABLE (1<<2)
58 static void gen9_init_clock_gating(struct drm_device
*dev
)
60 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
62 /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
63 I915_WRITE(CHICKEN_PAR1_1
,
64 I915_READ(CHICKEN_PAR1_1
) | SKL_EDP_PSR_FIX_RDWRAP
);
66 I915_WRITE(GEN8_CONFIG0
,
67 I915_READ(GEN8_CONFIG0
) | GEN9_DEFAULT_FIXES
);
69 /* WaEnableChickenDCPR:skl,bxt,kbl */
70 I915_WRITE(GEN8_CHICKEN_DCPR_1
,
71 I915_READ(GEN8_CHICKEN_DCPR_1
) | MASK_WAKEMEM
);
73 /* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
74 /* WaFbcWakeMemOn:skl,bxt,kbl */
75 I915_WRITE(DISP_ARB_CTL
, I915_READ(DISP_ARB_CTL
) |
77 DISP_FBC_MEMORY_WAKE
);
79 /* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
80 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
81 ILK_DPFC_DISABLE_DUMMY0
);
84 static void bxt_init_clock_gating(struct drm_device
*dev
)
86 struct drm_i915_private
*dev_priv
= to_i915(dev
);
88 gen9_init_clock_gating(dev
);
90 /* WaDisableSDEUnitClockGating:bxt */
91 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
92 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
96 * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only.
98 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
99 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ
);
102 * Wa: Backlight PWM may stop in the asserted state, causing backlight
105 if (IS_BXT_REVID(dev_priv
, BXT_REVID_B0
, REVID_FOREVER
))
106 I915_WRITE(GEN9_CLKGATE_DIS_0
, I915_READ(GEN9_CLKGATE_DIS_0
) |
107 PWM1_GATING_DIS
| PWM2_GATING_DIS
);
110 static void i915_pineview_get_mem_freq(struct drm_device
*dev
)
112 struct drm_i915_private
*dev_priv
= to_i915(dev
);
115 tmp
= I915_READ(CLKCFG
);
117 switch (tmp
& CLKCFG_FSB_MASK
) {
119 dev_priv
->fsb_freq
= 533; /* 133*4 */
122 dev_priv
->fsb_freq
= 800; /* 200*4 */
125 dev_priv
->fsb_freq
= 667; /* 167*4 */
128 dev_priv
->fsb_freq
= 400; /* 100*4 */
132 switch (tmp
& CLKCFG_MEM_MASK
) {
134 dev_priv
->mem_freq
= 533;
137 dev_priv
->mem_freq
= 667;
140 dev_priv
->mem_freq
= 800;
144 /* detect pineview DDR3 setting */
145 tmp
= I915_READ(CSHRDDR3CTL
);
146 dev_priv
->is_ddr3
= (tmp
& CSHRDDR3CTL_DDR3
) ? 1 : 0;
149 static void i915_ironlake_get_mem_freq(struct drm_device
*dev
)
151 struct drm_i915_private
*dev_priv
= to_i915(dev
);
154 ddrpll
= I915_READ16(DDRMPLL1
);
155 csipll
= I915_READ16(CSIPLL0
);
157 switch (ddrpll
& 0xff) {
159 dev_priv
->mem_freq
= 800;
162 dev_priv
->mem_freq
= 1066;
165 dev_priv
->mem_freq
= 1333;
168 dev_priv
->mem_freq
= 1600;
171 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
173 dev_priv
->mem_freq
= 0;
177 dev_priv
->ips
.r_t
= dev_priv
->mem_freq
;
179 switch (csipll
& 0x3ff) {
181 dev_priv
->fsb_freq
= 3200;
184 dev_priv
->fsb_freq
= 3733;
187 dev_priv
->fsb_freq
= 4266;
190 dev_priv
->fsb_freq
= 4800;
193 dev_priv
->fsb_freq
= 5333;
196 dev_priv
->fsb_freq
= 5866;
199 dev_priv
->fsb_freq
= 6400;
202 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
204 dev_priv
->fsb_freq
= 0;
208 if (dev_priv
->fsb_freq
== 3200) {
209 dev_priv
->ips
.c_m
= 0;
210 } else if (dev_priv
->fsb_freq
> 3200 && dev_priv
->fsb_freq
<= 4800) {
211 dev_priv
->ips
.c_m
= 1;
213 dev_priv
->ips
.c_m
= 2;
217 static const struct cxsr_latency cxsr_latency_table
[] = {
218 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
219 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
220 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
221 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
222 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
224 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
225 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
226 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
227 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
228 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
230 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
231 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
232 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
233 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
234 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
236 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
237 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
238 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
239 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
240 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
242 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
243 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
244 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
245 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
246 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
248 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
249 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
250 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
251 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
252 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
255 static const struct cxsr_latency
*intel_get_cxsr_latency(int is_desktop
,
260 const struct cxsr_latency
*latency
;
263 if (fsb
== 0 || mem
== 0)
266 for (i
= 0; i
< ARRAY_SIZE(cxsr_latency_table
); i
++) {
267 latency
= &cxsr_latency_table
[i
];
268 if (is_desktop
== latency
->is_desktop
&&
269 is_ddr3
== latency
->is_ddr3
&&
270 fsb
== latency
->fsb_freq
&& mem
== latency
->mem_freq
)
274 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
279 static void chv_set_memory_dvfs(struct drm_i915_private
*dev_priv
, bool enable
)
283 mutex_lock(&dev_priv
->rps
.hw_lock
);
285 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
287 val
&= ~FORCE_DDR_HIGH_FREQ
;
289 val
|= FORCE_DDR_HIGH_FREQ
;
290 val
&= ~FORCE_DDR_LOW_FREQ
;
291 val
|= FORCE_DDR_FREQ_REQ_ACK
;
292 vlv_punit_write(dev_priv
, PUNIT_REG_DDR_SETUP2
, val
);
294 if (wait_for((vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
) &
295 FORCE_DDR_FREQ_REQ_ACK
) == 0, 3))
296 DRM_ERROR("timed out waiting for Punit DDR DVFS request\n");
298 mutex_unlock(&dev_priv
->rps
.hw_lock
);
301 static void chv_set_memory_pm5(struct drm_i915_private
*dev_priv
, bool enable
)
305 mutex_lock(&dev_priv
->rps
.hw_lock
);
307 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DSPFREQ
);
309 val
|= DSP_MAXFIFO_PM5_ENABLE
;
311 val
&= ~DSP_MAXFIFO_PM5_ENABLE
;
312 vlv_punit_write(dev_priv
, PUNIT_REG_DSPFREQ
, val
);
314 mutex_unlock(&dev_priv
->rps
.hw_lock
);
317 #define FW_WM(value, plane) \
318 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK)
320 void intel_set_memory_cxsr(struct drm_i915_private
*dev_priv
, bool enable
)
322 struct drm_device
*dev
= &dev_priv
->drm
;
325 if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
)) {
326 I915_WRITE(FW_BLC_SELF_VLV
, enable
? FW_CSPWRDWNEN
: 0);
327 POSTING_READ(FW_BLC_SELF_VLV
);
328 dev_priv
->wm
.vlv
.cxsr
= enable
;
329 } else if (IS_G4X(dev
) || IS_CRESTLINE(dev
)) {
330 I915_WRITE(FW_BLC_SELF
, enable
? FW_BLC_SELF_EN
: 0);
331 POSTING_READ(FW_BLC_SELF
);
332 } else if (IS_PINEVIEW(dev
)) {
333 val
= I915_READ(DSPFW3
) & ~PINEVIEW_SELF_REFRESH_EN
;
334 val
|= enable
? PINEVIEW_SELF_REFRESH_EN
: 0;
335 I915_WRITE(DSPFW3
, val
);
336 POSTING_READ(DSPFW3
);
337 } else if (IS_I945G(dev
) || IS_I945GM(dev
)) {
338 val
= enable
? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN
) :
339 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN
);
340 I915_WRITE(FW_BLC_SELF
, val
);
341 POSTING_READ(FW_BLC_SELF
);
342 } else if (IS_I915GM(dev
)) {
343 val
= enable
? _MASKED_BIT_ENABLE(INSTPM_SELF_EN
) :
344 _MASKED_BIT_DISABLE(INSTPM_SELF_EN
);
345 I915_WRITE(INSTPM
, val
);
346 POSTING_READ(INSTPM
);
351 DRM_DEBUG_KMS("memory self-refresh is %s\n",
352 enable
? "enabled" : "disabled");
357 * Latency for FIFO fetches is dependent on several factors:
358 * - memory configuration (speed, channels)
360 * - current MCH state
361 * It can be fairly high in some situations, so here we assume a fairly
362 * pessimal value. It's a tradeoff between extra memory fetches (if we
363 * set this value too high, the FIFO will fetch frequently to stay full)
364 * and power consumption (set it too low to save power and we might see
365 * FIFO underruns and display "flicker").
367 * A value of 5us seems to be a good balance; safe for very low end
368 * platforms but not overly aggressive on lower latency configs.
370 static const int pessimal_latency_ns
= 5000;
372 #define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
373 ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
375 static int vlv_get_fifo_size(struct drm_device
*dev
,
376 enum pipe pipe
, int plane
)
378 struct drm_i915_private
*dev_priv
= to_i915(dev
);
379 int sprite0_start
, sprite1_start
, size
;
382 uint32_t dsparb
, dsparb2
, dsparb3
;
384 dsparb
= I915_READ(DSPARB
);
385 dsparb2
= I915_READ(DSPARB2
);
386 sprite0_start
= VLV_FIFO_START(dsparb
, dsparb2
, 0, 0);
387 sprite1_start
= VLV_FIFO_START(dsparb
, dsparb2
, 8, 4);
390 dsparb
= I915_READ(DSPARB
);
391 dsparb2
= I915_READ(DSPARB2
);
392 sprite0_start
= VLV_FIFO_START(dsparb
, dsparb2
, 16, 8);
393 sprite1_start
= VLV_FIFO_START(dsparb
, dsparb2
, 24, 12);
396 dsparb2
= I915_READ(DSPARB2
);
397 dsparb3
= I915_READ(DSPARB3
);
398 sprite0_start
= VLV_FIFO_START(dsparb3
, dsparb2
, 0, 16);
399 sprite1_start
= VLV_FIFO_START(dsparb3
, dsparb2
, 8, 20);
407 size
= sprite0_start
;
410 size
= sprite1_start
- sprite0_start
;
413 size
= 512 - 1 - sprite1_start
;
419 DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n",
420 pipe_name(pipe
), plane
== 0 ? "primary" : "sprite",
421 plane
== 0 ? plane_name(pipe
) : sprite_name(pipe
, plane
- 1),
427 static int i9xx_get_fifo_size(struct drm_device
*dev
, int plane
)
429 struct drm_i915_private
*dev_priv
= to_i915(dev
);
430 uint32_t dsparb
= I915_READ(DSPARB
);
433 size
= dsparb
& 0x7f;
435 size
= ((dsparb
>> DSPARB_CSTART_SHIFT
) & 0x7f) - size
;
437 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
438 plane
? "B" : "A", size
);
443 static int i830_get_fifo_size(struct drm_device
*dev
, int plane
)
445 struct drm_i915_private
*dev_priv
= to_i915(dev
);
446 uint32_t dsparb
= I915_READ(DSPARB
);
449 size
= dsparb
& 0x1ff;
451 size
= ((dsparb
>> DSPARB_BEND_SHIFT
) & 0x1ff) - size
;
452 size
>>= 1; /* Convert to cachelines */
454 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
455 plane
? "B" : "A", size
);
460 static int i845_get_fifo_size(struct drm_device
*dev
, int plane
)
462 struct drm_i915_private
*dev_priv
= to_i915(dev
);
463 uint32_t dsparb
= I915_READ(DSPARB
);
466 size
= dsparb
& 0x7f;
467 size
>>= 2; /* Convert to cachelines */
469 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
476 /* Pineview has different values for various configs */
477 static const struct intel_watermark_params pineview_display_wm
= {
478 .fifo_size
= PINEVIEW_DISPLAY_FIFO
,
479 .max_wm
= PINEVIEW_MAX_WM
,
480 .default_wm
= PINEVIEW_DFT_WM
,
481 .guard_size
= PINEVIEW_GUARD_WM
,
482 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
484 static const struct intel_watermark_params pineview_display_hplloff_wm
= {
485 .fifo_size
= PINEVIEW_DISPLAY_FIFO
,
486 .max_wm
= PINEVIEW_MAX_WM
,
487 .default_wm
= PINEVIEW_DFT_HPLLOFF_WM
,
488 .guard_size
= PINEVIEW_GUARD_WM
,
489 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
491 static const struct intel_watermark_params pineview_cursor_wm
= {
492 .fifo_size
= PINEVIEW_CURSOR_FIFO
,
493 .max_wm
= PINEVIEW_CURSOR_MAX_WM
,
494 .default_wm
= PINEVIEW_CURSOR_DFT_WM
,
495 .guard_size
= PINEVIEW_CURSOR_GUARD_WM
,
496 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
498 static const struct intel_watermark_params pineview_cursor_hplloff_wm
= {
499 .fifo_size
= PINEVIEW_CURSOR_FIFO
,
500 .max_wm
= PINEVIEW_CURSOR_MAX_WM
,
501 .default_wm
= PINEVIEW_CURSOR_DFT_WM
,
502 .guard_size
= PINEVIEW_CURSOR_GUARD_WM
,
503 .cacheline_size
= PINEVIEW_FIFO_LINE_SIZE
,
505 static const struct intel_watermark_params g4x_wm_info
= {
506 .fifo_size
= G4X_FIFO_SIZE
,
507 .max_wm
= G4X_MAX_WM
,
508 .default_wm
= G4X_MAX_WM
,
510 .cacheline_size
= G4X_FIFO_LINE_SIZE
,
512 static const struct intel_watermark_params g4x_cursor_wm_info
= {
513 .fifo_size
= I965_CURSOR_FIFO
,
514 .max_wm
= I965_CURSOR_MAX_WM
,
515 .default_wm
= I965_CURSOR_DFT_WM
,
517 .cacheline_size
= G4X_FIFO_LINE_SIZE
,
519 static const struct intel_watermark_params i965_cursor_wm_info
= {
520 .fifo_size
= I965_CURSOR_FIFO
,
521 .max_wm
= I965_CURSOR_MAX_WM
,
522 .default_wm
= I965_CURSOR_DFT_WM
,
524 .cacheline_size
= I915_FIFO_LINE_SIZE
,
526 static const struct intel_watermark_params i945_wm_info
= {
527 .fifo_size
= I945_FIFO_SIZE
,
528 .max_wm
= I915_MAX_WM
,
531 .cacheline_size
= I915_FIFO_LINE_SIZE
,
533 static const struct intel_watermark_params i915_wm_info
= {
534 .fifo_size
= I915_FIFO_SIZE
,
535 .max_wm
= I915_MAX_WM
,
538 .cacheline_size
= I915_FIFO_LINE_SIZE
,
540 static const struct intel_watermark_params i830_a_wm_info
= {
541 .fifo_size
= I855GM_FIFO_SIZE
,
542 .max_wm
= I915_MAX_WM
,
545 .cacheline_size
= I830_FIFO_LINE_SIZE
,
547 static const struct intel_watermark_params i830_bc_wm_info
= {
548 .fifo_size
= I855GM_FIFO_SIZE
,
549 .max_wm
= I915_MAX_WM
/2,
552 .cacheline_size
= I830_FIFO_LINE_SIZE
,
554 static const struct intel_watermark_params i845_wm_info
= {
555 .fifo_size
= I830_FIFO_SIZE
,
556 .max_wm
= I915_MAX_WM
,
559 .cacheline_size
= I830_FIFO_LINE_SIZE
,
563 * intel_calculate_wm - calculate watermark level
564 * @clock_in_khz: pixel clock
565 * @wm: chip FIFO params
566 * @cpp: bytes per pixel
567 * @latency_ns: memory latency for the platform
569 * Calculate the watermark level (the level at which the display plane will
570 * start fetching from memory again). Each chip has a different display
571 * FIFO size and allocation, so the caller needs to figure that out and pass
572 * in the correct intel_watermark_params structure.
574 * As the pixel clock runs, the FIFO will be drained at a rate that depends
575 * on the pixel size. When it reaches the watermark level, it'll start
576 * fetching FIFO line sized based chunks from memory until the FIFO fills
577 * past the watermark point. If the FIFO drains completely, a FIFO underrun
578 * will occur, and a display engine hang could result.
580 static unsigned long intel_calculate_wm(unsigned long clock_in_khz
,
581 const struct intel_watermark_params
*wm
,
582 int fifo_size
, int cpp
,
583 unsigned long latency_ns
)
585 long entries_required
, wm_size
;
588 * Note: we need to make sure we don't overflow for various clock &
590 * clocks go from a few thousand to several hundred thousand.
591 * latency is usually a few thousand
593 entries_required
= ((clock_in_khz
/ 1000) * cpp
* latency_ns
) /
595 entries_required
= DIV_ROUND_UP(entries_required
, wm
->cacheline_size
);
597 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required
);
599 wm_size
= fifo_size
- (entries_required
+ wm
->guard_size
);
601 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size
);
603 /* Don't promote wm_size to unsigned... */
604 if (wm_size
> (long)wm
->max_wm
)
605 wm_size
= wm
->max_wm
;
607 wm_size
= wm
->default_wm
;
610 * Bspec seems to indicate that the value shouldn't be lower than
611 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
612 * Lets go for 8 which is the burst size since certain platforms
613 * already use a hardcoded 8 (which is what the spec says should be
622 static struct drm_crtc
*single_enabled_crtc(struct drm_device
*dev
)
624 struct drm_crtc
*crtc
, *enabled
= NULL
;
626 for_each_crtc(dev
, crtc
) {
627 if (intel_crtc_active(crtc
)) {
637 static void pineview_update_wm(struct drm_crtc
*unused_crtc
)
639 struct drm_device
*dev
= unused_crtc
->dev
;
640 struct drm_i915_private
*dev_priv
= to_i915(dev
);
641 struct drm_crtc
*crtc
;
642 const struct cxsr_latency
*latency
;
646 latency
= intel_get_cxsr_latency(IS_PINEVIEW_G(dev
), dev_priv
->is_ddr3
,
647 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
649 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
650 intel_set_memory_cxsr(dev_priv
, false);
654 crtc
= single_enabled_crtc(dev
);
656 const struct drm_display_mode
*adjusted_mode
= &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
657 int cpp
= drm_format_plane_cpp(crtc
->primary
->state
->fb
->pixel_format
, 0);
658 int clock
= adjusted_mode
->crtc_clock
;
661 wm
= intel_calculate_wm(clock
, &pineview_display_wm
,
662 pineview_display_wm
.fifo_size
,
663 cpp
, latency
->display_sr
);
664 reg
= I915_READ(DSPFW1
);
665 reg
&= ~DSPFW_SR_MASK
;
666 reg
|= FW_WM(wm
, SR
);
667 I915_WRITE(DSPFW1
, reg
);
668 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg
);
671 wm
= intel_calculate_wm(clock
, &pineview_cursor_wm
,
672 pineview_display_wm
.fifo_size
,
673 cpp
, latency
->cursor_sr
);
674 reg
= I915_READ(DSPFW3
);
675 reg
&= ~DSPFW_CURSOR_SR_MASK
;
676 reg
|= FW_WM(wm
, CURSOR_SR
);
677 I915_WRITE(DSPFW3
, reg
);
679 /* Display HPLL off SR */
680 wm
= intel_calculate_wm(clock
, &pineview_display_hplloff_wm
,
681 pineview_display_hplloff_wm
.fifo_size
,
682 cpp
, latency
->display_hpll_disable
);
683 reg
= I915_READ(DSPFW3
);
684 reg
&= ~DSPFW_HPLL_SR_MASK
;
685 reg
|= FW_WM(wm
, HPLL_SR
);
686 I915_WRITE(DSPFW3
, reg
);
688 /* cursor HPLL off SR */
689 wm
= intel_calculate_wm(clock
, &pineview_cursor_hplloff_wm
,
690 pineview_display_hplloff_wm
.fifo_size
,
691 cpp
, latency
->cursor_hpll_disable
);
692 reg
= I915_READ(DSPFW3
);
693 reg
&= ~DSPFW_HPLL_CURSOR_MASK
;
694 reg
|= FW_WM(wm
, HPLL_CURSOR
);
695 I915_WRITE(DSPFW3
, reg
);
696 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg
);
698 intel_set_memory_cxsr(dev_priv
, true);
700 intel_set_memory_cxsr(dev_priv
, false);
704 static bool g4x_compute_wm0(struct drm_device
*dev
,
706 const struct intel_watermark_params
*display
,
707 int display_latency_ns
,
708 const struct intel_watermark_params
*cursor
,
709 int cursor_latency_ns
,
713 struct drm_crtc
*crtc
;
714 const struct drm_display_mode
*adjusted_mode
;
715 int htotal
, hdisplay
, clock
, cpp
;
716 int line_time_us
, line_count
;
717 int entries
, tlb_miss
;
719 crtc
= intel_get_crtc_for_plane(dev
, plane
);
720 if (!intel_crtc_active(crtc
)) {
721 *cursor_wm
= cursor
->guard_size
;
722 *plane_wm
= display
->guard_size
;
726 adjusted_mode
= &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
727 clock
= adjusted_mode
->crtc_clock
;
728 htotal
= adjusted_mode
->crtc_htotal
;
729 hdisplay
= to_intel_crtc(crtc
)->config
->pipe_src_w
;
730 cpp
= drm_format_plane_cpp(crtc
->primary
->state
->fb
->pixel_format
, 0);
732 /* Use the small buffer method to calculate plane watermark */
733 entries
= ((clock
* cpp
/ 1000) * display_latency_ns
) / 1000;
734 tlb_miss
= display
->fifo_size
*display
->cacheline_size
- hdisplay
* 8;
737 entries
= DIV_ROUND_UP(entries
, display
->cacheline_size
);
738 *plane_wm
= entries
+ display
->guard_size
;
739 if (*plane_wm
> (int)display
->max_wm
)
740 *plane_wm
= display
->max_wm
;
742 /* Use the large buffer method to calculate cursor watermark */
743 line_time_us
= max(htotal
* 1000 / clock
, 1);
744 line_count
= (cursor_latency_ns
/ line_time_us
+ 1000) / 1000;
745 entries
= line_count
* crtc
->cursor
->state
->crtc_w
* cpp
;
746 tlb_miss
= cursor
->fifo_size
*cursor
->cacheline_size
- hdisplay
* 8;
749 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
750 *cursor_wm
= entries
+ cursor
->guard_size
;
751 if (*cursor_wm
> (int)cursor
->max_wm
)
752 *cursor_wm
= (int)cursor
->max_wm
;
758 * Check the wm result.
760 * If any calculated watermark values is larger than the maximum value that
761 * can be programmed into the associated watermark register, that watermark
764 static bool g4x_check_srwm(struct drm_device
*dev
,
765 int display_wm
, int cursor_wm
,
766 const struct intel_watermark_params
*display
,
767 const struct intel_watermark_params
*cursor
)
769 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
770 display_wm
, cursor_wm
);
772 if (display_wm
> display
->max_wm
) {
773 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
774 display_wm
, display
->max_wm
);
778 if (cursor_wm
> cursor
->max_wm
) {
779 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
780 cursor_wm
, cursor
->max_wm
);
784 if (!(display_wm
|| cursor_wm
)) {
785 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
792 static bool g4x_compute_srwm(struct drm_device
*dev
,
795 const struct intel_watermark_params
*display
,
796 const struct intel_watermark_params
*cursor
,
797 int *display_wm
, int *cursor_wm
)
799 struct drm_crtc
*crtc
;
800 const struct drm_display_mode
*adjusted_mode
;
801 int hdisplay
, htotal
, cpp
, clock
;
802 unsigned long line_time_us
;
803 int line_count
, line_size
;
808 *display_wm
= *cursor_wm
= 0;
812 crtc
= intel_get_crtc_for_plane(dev
, plane
);
813 adjusted_mode
= &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
814 clock
= adjusted_mode
->crtc_clock
;
815 htotal
= adjusted_mode
->crtc_htotal
;
816 hdisplay
= to_intel_crtc(crtc
)->config
->pipe_src_w
;
817 cpp
= drm_format_plane_cpp(crtc
->primary
->state
->fb
->pixel_format
, 0);
819 line_time_us
= max(htotal
* 1000 / clock
, 1);
820 line_count
= (latency_ns
/ line_time_us
+ 1000) / 1000;
821 line_size
= hdisplay
* cpp
;
823 /* Use the minimum of the small and large buffer method for primary */
824 small
= ((clock
* cpp
/ 1000) * latency_ns
) / 1000;
825 large
= line_count
* line_size
;
827 entries
= DIV_ROUND_UP(min(small
, large
), display
->cacheline_size
);
828 *display_wm
= entries
+ display
->guard_size
;
830 /* calculate the self-refresh watermark for display cursor */
831 entries
= line_count
* cpp
* crtc
->cursor
->state
->crtc_w
;
832 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
833 *cursor_wm
= entries
+ cursor
->guard_size
;
835 return g4x_check_srwm(dev
,
836 *display_wm
, *cursor_wm
,
840 #define FW_WM_VLV(value, plane) \
841 (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV)
843 static void vlv_write_wm_values(struct intel_crtc
*crtc
,
844 const struct vlv_wm_values
*wm
)
846 struct drm_i915_private
*dev_priv
= to_i915(crtc
->base
.dev
);
847 enum pipe pipe
= crtc
->pipe
;
849 I915_WRITE(VLV_DDL(pipe
),
850 (wm
->ddl
[pipe
].cursor
<< DDL_CURSOR_SHIFT
) |
851 (wm
->ddl
[pipe
].sprite
[1] << DDL_SPRITE_SHIFT(1)) |
852 (wm
->ddl
[pipe
].sprite
[0] << DDL_SPRITE_SHIFT(0)) |
853 (wm
->ddl
[pipe
].primary
<< DDL_PLANE_SHIFT
));
856 FW_WM(wm
->sr
.plane
, SR
) |
857 FW_WM(wm
->pipe
[PIPE_B
].cursor
, CURSORB
) |
858 FW_WM_VLV(wm
->pipe
[PIPE_B
].primary
, PLANEB
) |
859 FW_WM_VLV(wm
->pipe
[PIPE_A
].primary
, PLANEA
));
861 FW_WM_VLV(wm
->pipe
[PIPE_A
].sprite
[1], SPRITEB
) |
862 FW_WM(wm
->pipe
[PIPE_A
].cursor
, CURSORA
) |
863 FW_WM_VLV(wm
->pipe
[PIPE_A
].sprite
[0], SPRITEA
));
865 FW_WM(wm
->sr
.cursor
, CURSOR_SR
));
867 if (IS_CHERRYVIEW(dev_priv
)) {
868 I915_WRITE(DSPFW7_CHV
,
869 FW_WM_VLV(wm
->pipe
[PIPE_B
].sprite
[1], SPRITED
) |
870 FW_WM_VLV(wm
->pipe
[PIPE_B
].sprite
[0], SPRITEC
));
871 I915_WRITE(DSPFW8_CHV
,
872 FW_WM_VLV(wm
->pipe
[PIPE_C
].sprite
[1], SPRITEF
) |
873 FW_WM_VLV(wm
->pipe
[PIPE_C
].sprite
[0], SPRITEE
));
874 I915_WRITE(DSPFW9_CHV
,
875 FW_WM_VLV(wm
->pipe
[PIPE_C
].primary
, PLANEC
) |
876 FW_WM(wm
->pipe
[PIPE_C
].cursor
, CURSORC
));
878 FW_WM(wm
->sr
.plane
>> 9, SR_HI
) |
879 FW_WM(wm
->pipe
[PIPE_C
].sprite
[1] >> 8, SPRITEF_HI
) |
880 FW_WM(wm
->pipe
[PIPE_C
].sprite
[0] >> 8, SPRITEE_HI
) |
881 FW_WM(wm
->pipe
[PIPE_C
].primary
>> 8, PLANEC_HI
) |
882 FW_WM(wm
->pipe
[PIPE_B
].sprite
[1] >> 8, SPRITED_HI
) |
883 FW_WM(wm
->pipe
[PIPE_B
].sprite
[0] >> 8, SPRITEC_HI
) |
884 FW_WM(wm
->pipe
[PIPE_B
].primary
>> 8, PLANEB_HI
) |
885 FW_WM(wm
->pipe
[PIPE_A
].sprite
[1] >> 8, SPRITEB_HI
) |
886 FW_WM(wm
->pipe
[PIPE_A
].sprite
[0] >> 8, SPRITEA_HI
) |
887 FW_WM(wm
->pipe
[PIPE_A
].primary
>> 8, PLANEA_HI
));
890 FW_WM_VLV(wm
->pipe
[PIPE_B
].sprite
[1], SPRITED
) |
891 FW_WM_VLV(wm
->pipe
[PIPE_B
].sprite
[0], SPRITEC
));
893 FW_WM(wm
->sr
.plane
>> 9, SR_HI
) |
894 FW_WM(wm
->pipe
[PIPE_B
].sprite
[1] >> 8, SPRITED_HI
) |
895 FW_WM(wm
->pipe
[PIPE_B
].sprite
[0] >> 8, SPRITEC_HI
) |
896 FW_WM(wm
->pipe
[PIPE_B
].primary
>> 8, PLANEB_HI
) |
897 FW_WM(wm
->pipe
[PIPE_A
].sprite
[1] >> 8, SPRITEB_HI
) |
898 FW_WM(wm
->pipe
[PIPE_A
].sprite
[0] >> 8, SPRITEA_HI
) |
899 FW_WM(wm
->pipe
[PIPE_A
].primary
>> 8, PLANEA_HI
));
902 /* zero (unused) WM1 watermarks */
903 I915_WRITE(DSPFW4
, 0);
904 I915_WRITE(DSPFW5
, 0);
905 I915_WRITE(DSPFW6
, 0);
906 I915_WRITE(DSPHOWM1
, 0);
908 POSTING_READ(DSPFW1
);
916 VLV_WM_LEVEL_DDR_DVFS
,
919 /* latency must be in 0.1us units. */
920 static unsigned int vlv_wm_method2(unsigned int pixel_rate
,
921 unsigned int pipe_htotal
,
922 unsigned int horiz_pixels
,
924 unsigned int latency
)
928 ret
= (latency
* pixel_rate
) / (pipe_htotal
* 10000);
929 ret
= (ret
+ 1) * horiz_pixels
* cpp
;
930 ret
= DIV_ROUND_UP(ret
, 64);
935 static void vlv_setup_wm_latency(struct drm_device
*dev
)
937 struct drm_i915_private
*dev_priv
= to_i915(dev
);
939 /* all latencies in usec */
940 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_PM2
] = 3;
942 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_PM2
;
944 if (IS_CHERRYVIEW(dev_priv
)) {
945 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_PM5
] = 12;
946 dev_priv
->wm
.pri_latency
[VLV_WM_LEVEL_DDR_DVFS
] = 33;
948 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_DDR_DVFS
;
952 static uint16_t vlv_compute_wm_level(struct intel_plane
*plane
,
953 struct intel_crtc
*crtc
,
954 const struct intel_plane_state
*state
,
957 struct drm_i915_private
*dev_priv
= to_i915(plane
->base
.dev
);
958 int clock
, htotal
, cpp
, width
, wm
;
960 if (dev_priv
->wm
.pri_latency
[level
] == 0)
966 cpp
= drm_format_plane_cpp(state
->base
.fb
->pixel_format
, 0);
967 clock
= crtc
->config
->base
.adjusted_mode
.crtc_clock
;
968 htotal
= crtc
->config
->base
.adjusted_mode
.crtc_htotal
;
969 width
= crtc
->config
->pipe_src_w
;
970 if (WARN_ON(htotal
== 0))
973 if (plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
) {
975 * FIXME the formula gives values that are
976 * too big for the cursor FIFO, and hence we
977 * would never be able to use cursors. For
978 * now just hardcode the watermark.
982 wm
= vlv_wm_method2(clock
, htotal
, width
, cpp
,
983 dev_priv
->wm
.pri_latency
[level
] * 10);
986 return min_t(int, wm
, USHRT_MAX
);
989 static void vlv_compute_fifo(struct intel_crtc
*crtc
)
991 struct drm_device
*dev
= crtc
->base
.dev
;
992 struct vlv_wm_state
*wm_state
= &crtc
->wm_state
;
993 struct intel_plane
*plane
;
994 unsigned int total_rate
= 0;
995 const int fifo_size
= 512 - 1;
996 int fifo_extra
, fifo_left
= fifo_size
;
998 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
999 struct intel_plane_state
*state
=
1000 to_intel_plane_state(plane
->base
.state
);
1002 if (plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
)
1005 if (state
->visible
) {
1006 wm_state
->num_active_planes
++;
1007 total_rate
+= drm_format_plane_cpp(state
->base
.fb
->pixel_format
, 0);
1011 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
1012 struct intel_plane_state
*state
=
1013 to_intel_plane_state(plane
->base
.state
);
1016 if (plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
) {
1017 plane
->wm
.fifo_size
= 63;
1021 if (!state
->visible
) {
1022 plane
->wm
.fifo_size
= 0;
1026 rate
= drm_format_plane_cpp(state
->base
.fb
->pixel_format
, 0);
1027 plane
->wm
.fifo_size
= fifo_size
* rate
/ total_rate
;
1028 fifo_left
-= plane
->wm
.fifo_size
;
1031 fifo_extra
= DIV_ROUND_UP(fifo_left
, wm_state
->num_active_planes
?: 1);
1033 /* spread the remainder evenly */
1034 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
1040 if (plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
)
1043 /* give it all to the first plane if none are active */
1044 if (plane
->wm
.fifo_size
== 0 &&
1045 wm_state
->num_active_planes
)
1048 plane_extra
= min(fifo_extra
, fifo_left
);
1049 plane
->wm
.fifo_size
+= plane_extra
;
1050 fifo_left
-= plane_extra
;
1053 WARN_ON(fifo_left
!= 0);
1056 static void vlv_invert_wms(struct intel_crtc
*crtc
)
1058 struct vlv_wm_state
*wm_state
= &crtc
->wm_state
;
1061 for (level
= 0; level
< wm_state
->num_levels
; level
++) {
1062 struct drm_device
*dev
= crtc
->base
.dev
;
1063 const int sr_fifo_size
= INTEL_INFO(dev
)->num_pipes
* 512 - 1;
1064 struct intel_plane
*plane
;
1066 wm_state
->sr
[level
].plane
= sr_fifo_size
- wm_state
->sr
[level
].plane
;
1067 wm_state
->sr
[level
].cursor
= 63 - wm_state
->sr
[level
].cursor
;
1069 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
1070 switch (plane
->base
.type
) {
1072 case DRM_PLANE_TYPE_CURSOR
:
1073 wm_state
->wm
[level
].cursor
= plane
->wm
.fifo_size
-
1074 wm_state
->wm
[level
].cursor
;
1076 case DRM_PLANE_TYPE_PRIMARY
:
1077 wm_state
->wm
[level
].primary
= plane
->wm
.fifo_size
-
1078 wm_state
->wm
[level
].primary
;
1080 case DRM_PLANE_TYPE_OVERLAY
:
1081 sprite
= plane
->plane
;
1082 wm_state
->wm
[level
].sprite
[sprite
] = plane
->wm
.fifo_size
-
1083 wm_state
->wm
[level
].sprite
[sprite
];
1090 static void vlv_compute_wm(struct intel_crtc
*crtc
)
1092 struct drm_device
*dev
= crtc
->base
.dev
;
1093 struct vlv_wm_state
*wm_state
= &crtc
->wm_state
;
1094 struct intel_plane
*plane
;
1095 int sr_fifo_size
= INTEL_INFO(dev
)->num_pipes
* 512 - 1;
1098 memset(wm_state
, 0, sizeof(*wm_state
));
1100 wm_state
->cxsr
= crtc
->pipe
!= PIPE_C
&& crtc
->wm
.cxsr_allowed
;
1101 wm_state
->num_levels
= to_i915(dev
)->wm
.max_level
+ 1;
1103 wm_state
->num_active_planes
= 0;
1105 vlv_compute_fifo(crtc
);
1107 if (wm_state
->num_active_planes
!= 1)
1108 wm_state
->cxsr
= false;
1110 if (wm_state
->cxsr
) {
1111 for (level
= 0; level
< wm_state
->num_levels
; level
++) {
1112 wm_state
->sr
[level
].plane
= sr_fifo_size
;
1113 wm_state
->sr
[level
].cursor
= 63;
1117 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
1118 struct intel_plane_state
*state
=
1119 to_intel_plane_state(plane
->base
.state
);
1121 if (!state
->visible
)
1124 /* normal watermarks */
1125 for (level
= 0; level
< wm_state
->num_levels
; level
++) {
1126 int wm
= vlv_compute_wm_level(plane
, crtc
, state
, level
);
1127 int max_wm
= plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
? 63 : 511;
1130 if (WARN_ON(level
== 0 && wm
> max_wm
))
1133 if (wm
> plane
->wm
.fifo_size
)
1136 switch (plane
->base
.type
) {
1138 case DRM_PLANE_TYPE_CURSOR
:
1139 wm_state
->wm
[level
].cursor
= wm
;
1141 case DRM_PLANE_TYPE_PRIMARY
:
1142 wm_state
->wm
[level
].primary
= wm
;
1144 case DRM_PLANE_TYPE_OVERLAY
:
1145 sprite
= plane
->plane
;
1146 wm_state
->wm
[level
].sprite
[sprite
] = wm
;
1151 wm_state
->num_levels
= level
;
1153 if (!wm_state
->cxsr
)
1156 /* maxfifo watermarks */
1157 switch (plane
->base
.type
) {
1159 case DRM_PLANE_TYPE_CURSOR
:
1160 for (level
= 0; level
< wm_state
->num_levels
; level
++)
1161 wm_state
->sr
[level
].cursor
=
1162 wm_state
->wm
[level
].cursor
;
1164 case DRM_PLANE_TYPE_PRIMARY
:
1165 for (level
= 0; level
< wm_state
->num_levels
; level
++)
1166 wm_state
->sr
[level
].plane
=
1167 min(wm_state
->sr
[level
].plane
,
1168 wm_state
->wm
[level
].primary
);
1170 case DRM_PLANE_TYPE_OVERLAY
:
1171 sprite
= plane
->plane
;
1172 for (level
= 0; level
< wm_state
->num_levels
; level
++)
1173 wm_state
->sr
[level
].plane
=
1174 min(wm_state
->sr
[level
].plane
,
1175 wm_state
->wm
[level
].sprite
[sprite
]);
1180 /* clear any (partially) filled invalid levels */
1181 for (level
= wm_state
->num_levels
; level
< to_i915(dev
)->wm
.max_level
+ 1; level
++) {
1182 memset(&wm_state
->wm
[level
], 0, sizeof(wm_state
->wm
[level
]));
1183 memset(&wm_state
->sr
[level
], 0, sizeof(wm_state
->sr
[level
]));
1186 vlv_invert_wms(crtc
);
1189 #define VLV_FIFO(plane, value) \
1190 (((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
1192 static void vlv_pipe_set_fifo_size(struct intel_crtc
*crtc
)
1194 struct drm_device
*dev
= crtc
->base
.dev
;
1195 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1196 struct intel_plane
*plane
;
1197 int sprite0_start
= 0, sprite1_start
= 0, fifo_size
= 0;
1199 for_each_intel_plane_on_crtc(dev
, crtc
, plane
) {
1200 if (plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
) {
1201 WARN_ON(plane
->wm
.fifo_size
!= 63);
1205 if (plane
->base
.type
== DRM_PLANE_TYPE_PRIMARY
)
1206 sprite0_start
= plane
->wm
.fifo_size
;
1207 else if (plane
->plane
== 0)
1208 sprite1_start
= sprite0_start
+ plane
->wm
.fifo_size
;
1210 fifo_size
= sprite1_start
+ plane
->wm
.fifo_size
;
1213 WARN_ON(fifo_size
!= 512 - 1);
1215 DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
1216 pipe_name(crtc
->pipe
), sprite0_start
,
1217 sprite1_start
, fifo_size
);
1219 switch (crtc
->pipe
) {
1220 uint32_t dsparb
, dsparb2
, dsparb3
;
1222 dsparb
= I915_READ(DSPARB
);
1223 dsparb2
= I915_READ(DSPARB2
);
1225 dsparb
&= ~(VLV_FIFO(SPRITEA
, 0xff) |
1226 VLV_FIFO(SPRITEB
, 0xff));
1227 dsparb
|= (VLV_FIFO(SPRITEA
, sprite0_start
) |
1228 VLV_FIFO(SPRITEB
, sprite1_start
));
1230 dsparb2
&= ~(VLV_FIFO(SPRITEA_HI
, 0x1) |
1231 VLV_FIFO(SPRITEB_HI
, 0x1));
1232 dsparb2
|= (VLV_FIFO(SPRITEA_HI
, sprite0_start
>> 8) |
1233 VLV_FIFO(SPRITEB_HI
, sprite1_start
>> 8));
1235 I915_WRITE(DSPARB
, dsparb
);
1236 I915_WRITE(DSPARB2
, dsparb2
);
1239 dsparb
= I915_READ(DSPARB
);
1240 dsparb2
= I915_READ(DSPARB2
);
1242 dsparb
&= ~(VLV_FIFO(SPRITEC
, 0xff) |
1243 VLV_FIFO(SPRITED
, 0xff));
1244 dsparb
|= (VLV_FIFO(SPRITEC
, sprite0_start
) |
1245 VLV_FIFO(SPRITED
, sprite1_start
));
1247 dsparb2
&= ~(VLV_FIFO(SPRITEC_HI
, 0xff) |
1248 VLV_FIFO(SPRITED_HI
, 0xff));
1249 dsparb2
|= (VLV_FIFO(SPRITEC_HI
, sprite0_start
>> 8) |
1250 VLV_FIFO(SPRITED_HI
, sprite1_start
>> 8));
1252 I915_WRITE(DSPARB
, dsparb
);
1253 I915_WRITE(DSPARB2
, dsparb2
);
1256 dsparb3
= I915_READ(DSPARB3
);
1257 dsparb2
= I915_READ(DSPARB2
);
1259 dsparb3
&= ~(VLV_FIFO(SPRITEE
, 0xff) |
1260 VLV_FIFO(SPRITEF
, 0xff));
1261 dsparb3
|= (VLV_FIFO(SPRITEE
, sprite0_start
) |
1262 VLV_FIFO(SPRITEF
, sprite1_start
));
1264 dsparb2
&= ~(VLV_FIFO(SPRITEE_HI
, 0xff) |
1265 VLV_FIFO(SPRITEF_HI
, 0xff));
1266 dsparb2
|= (VLV_FIFO(SPRITEE_HI
, sprite0_start
>> 8) |
1267 VLV_FIFO(SPRITEF_HI
, sprite1_start
>> 8));
1269 I915_WRITE(DSPARB3
, dsparb3
);
1270 I915_WRITE(DSPARB2
, dsparb2
);
1279 static void vlv_merge_wm(struct drm_device
*dev
,
1280 struct vlv_wm_values
*wm
)
1282 struct intel_crtc
*crtc
;
1283 int num_active_crtcs
= 0;
1285 wm
->level
= to_i915(dev
)->wm
.max_level
;
1288 for_each_intel_crtc(dev
, crtc
) {
1289 const struct vlv_wm_state
*wm_state
= &crtc
->wm_state
;
1294 if (!wm_state
->cxsr
)
1298 wm
->level
= min_t(int, wm
->level
, wm_state
->num_levels
- 1);
1301 if (num_active_crtcs
!= 1)
1304 if (num_active_crtcs
> 1)
1305 wm
->level
= VLV_WM_LEVEL_PM2
;
1307 for_each_intel_crtc(dev
, crtc
) {
1308 struct vlv_wm_state
*wm_state
= &crtc
->wm_state
;
1309 enum pipe pipe
= crtc
->pipe
;
1314 wm
->pipe
[pipe
] = wm_state
->wm
[wm
->level
];
1316 wm
->sr
= wm_state
->sr
[wm
->level
];
1318 wm
->ddl
[pipe
].primary
= DDL_PRECISION_HIGH
| 2;
1319 wm
->ddl
[pipe
].sprite
[0] = DDL_PRECISION_HIGH
| 2;
1320 wm
->ddl
[pipe
].sprite
[1] = DDL_PRECISION_HIGH
| 2;
1321 wm
->ddl
[pipe
].cursor
= DDL_PRECISION_HIGH
| 2;
1325 static void vlv_update_wm(struct drm_crtc
*crtc
)
1327 struct drm_device
*dev
= crtc
->dev
;
1328 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1329 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
1330 enum pipe pipe
= intel_crtc
->pipe
;
1331 struct vlv_wm_values wm
= {};
1333 vlv_compute_wm(intel_crtc
);
1334 vlv_merge_wm(dev
, &wm
);
1336 if (memcmp(&dev_priv
->wm
.vlv
, &wm
, sizeof(wm
)) == 0) {
1337 /* FIXME should be part of crtc atomic commit */
1338 vlv_pipe_set_fifo_size(intel_crtc
);
1342 if (wm
.level
< VLV_WM_LEVEL_DDR_DVFS
&&
1343 dev_priv
->wm
.vlv
.level
>= VLV_WM_LEVEL_DDR_DVFS
)
1344 chv_set_memory_dvfs(dev_priv
, false);
1346 if (wm
.level
< VLV_WM_LEVEL_PM5
&&
1347 dev_priv
->wm
.vlv
.level
>= VLV_WM_LEVEL_PM5
)
1348 chv_set_memory_pm5(dev_priv
, false);
1350 if (!wm
.cxsr
&& dev_priv
->wm
.vlv
.cxsr
)
1351 intel_set_memory_cxsr(dev_priv
, false);
1353 /* FIXME should be part of crtc atomic commit */
1354 vlv_pipe_set_fifo_size(intel_crtc
);
1356 vlv_write_wm_values(intel_crtc
, &wm
);
1358 DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
1359 "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
1360 pipe_name(pipe
), wm
.pipe
[pipe
].primary
, wm
.pipe
[pipe
].cursor
,
1361 wm
.pipe
[pipe
].sprite
[0], wm
.pipe
[pipe
].sprite
[1],
1362 wm
.sr
.plane
, wm
.sr
.cursor
, wm
.level
, wm
.cxsr
);
1364 if (wm
.cxsr
&& !dev_priv
->wm
.vlv
.cxsr
)
1365 intel_set_memory_cxsr(dev_priv
, true);
1367 if (wm
.level
>= VLV_WM_LEVEL_PM5
&&
1368 dev_priv
->wm
.vlv
.level
< VLV_WM_LEVEL_PM5
)
1369 chv_set_memory_pm5(dev_priv
, true);
1371 if (wm
.level
>= VLV_WM_LEVEL_DDR_DVFS
&&
1372 dev_priv
->wm
.vlv
.level
< VLV_WM_LEVEL_DDR_DVFS
)
1373 chv_set_memory_dvfs(dev_priv
, true);
1375 dev_priv
->wm
.vlv
= wm
;
1378 #define single_plane_enabled(mask) is_power_of_2(mask)
1380 static void g4x_update_wm(struct drm_crtc
*crtc
)
1382 struct drm_device
*dev
= crtc
->dev
;
1383 static const int sr_latency_ns
= 12000;
1384 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1385 int planea_wm
, planeb_wm
, cursora_wm
, cursorb_wm
;
1386 int plane_sr
, cursor_sr
;
1387 unsigned int enabled
= 0;
1390 if (g4x_compute_wm0(dev
, PIPE_A
,
1391 &g4x_wm_info
, pessimal_latency_ns
,
1392 &g4x_cursor_wm_info
, pessimal_latency_ns
,
1393 &planea_wm
, &cursora_wm
))
1394 enabled
|= 1 << PIPE_A
;
1396 if (g4x_compute_wm0(dev
, PIPE_B
,
1397 &g4x_wm_info
, pessimal_latency_ns
,
1398 &g4x_cursor_wm_info
, pessimal_latency_ns
,
1399 &planeb_wm
, &cursorb_wm
))
1400 enabled
|= 1 << PIPE_B
;
1402 if (single_plane_enabled(enabled
) &&
1403 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1406 &g4x_cursor_wm_info
,
1407 &plane_sr
, &cursor_sr
)) {
1408 cxsr_enabled
= true;
1410 cxsr_enabled
= false;
1411 intel_set_memory_cxsr(dev_priv
, false);
1412 plane_sr
= cursor_sr
= 0;
1415 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1416 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1417 planea_wm
, cursora_wm
,
1418 planeb_wm
, cursorb_wm
,
1419 plane_sr
, cursor_sr
);
1422 FW_WM(plane_sr
, SR
) |
1423 FW_WM(cursorb_wm
, CURSORB
) |
1424 FW_WM(planeb_wm
, PLANEB
) |
1425 FW_WM(planea_wm
, PLANEA
));
1427 (I915_READ(DSPFW2
) & ~DSPFW_CURSORA_MASK
) |
1428 FW_WM(cursora_wm
, CURSORA
));
1429 /* HPLL off in SR has some issues on G4x... disable it */
1431 (I915_READ(DSPFW3
) & ~(DSPFW_HPLL_SR_EN
| DSPFW_CURSOR_SR_MASK
)) |
1432 FW_WM(cursor_sr
, CURSOR_SR
));
1435 intel_set_memory_cxsr(dev_priv
, true);
1438 static void i965_update_wm(struct drm_crtc
*unused_crtc
)
1440 struct drm_device
*dev
= unused_crtc
->dev
;
1441 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1442 struct drm_crtc
*crtc
;
1447 /* Calc sr entries for one plane configs */
1448 crtc
= single_enabled_crtc(dev
);
1450 /* self-refresh has much higher latency */
1451 static const int sr_latency_ns
= 12000;
1452 const struct drm_display_mode
*adjusted_mode
= &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
1453 int clock
= adjusted_mode
->crtc_clock
;
1454 int htotal
= adjusted_mode
->crtc_htotal
;
1455 int hdisplay
= to_intel_crtc(crtc
)->config
->pipe_src_w
;
1456 int cpp
= drm_format_plane_cpp(crtc
->primary
->state
->fb
->pixel_format
, 0);
1457 unsigned long line_time_us
;
1460 line_time_us
= max(htotal
* 1000 / clock
, 1);
1462 /* Use ns/us then divide to preserve precision */
1463 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1465 entries
= DIV_ROUND_UP(entries
, I915_FIFO_LINE_SIZE
);
1466 srwm
= I965_FIFO_SIZE
- entries
;
1470 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1473 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1474 cpp
* crtc
->cursor
->state
->crtc_w
;
1475 entries
= DIV_ROUND_UP(entries
,
1476 i965_cursor_wm_info
.cacheline_size
);
1477 cursor_sr
= i965_cursor_wm_info
.fifo_size
-
1478 (entries
+ i965_cursor_wm_info
.guard_size
);
1480 if (cursor_sr
> i965_cursor_wm_info
.max_wm
)
1481 cursor_sr
= i965_cursor_wm_info
.max_wm
;
1483 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1484 "cursor %d\n", srwm
, cursor_sr
);
1486 cxsr_enabled
= true;
1488 cxsr_enabled
= false;
1489 /* Turn off self refresh if both pipes are enabled */
1490 intel_set_memory_cxsr(dev_priv
, false);
1493 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1496 /* 965 has limitations... */
1497 I915_WRITE(DSPFW1
, FW_WM(srwm
, SR
) |
1501 I915_WRITE(DSPFW2
, FW_WM(8, CURSORA
) |
1502 FW_WM(8, PLANEC_OLD
));
1503 /* update cursor SR watermark */
1504 I915_WRITE(DSPFW3
, FW_WM(cursor_sr
, CURSOR_SR
));
1507 intel_set_memory_cxsr(dev_priv
, true);
1512 static void i9xx_update_wm(struct drm_crtc
*unused_crtc
)
1514 struct drm_device
*dev
= unused_crtc
->dev
;
1515 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1516 const struct intel_watermark_params
*wm_info
;
1521 int planea_wm
, planeb_wm
;
1522 struct drm_crtc
*crtc
, *enabled
= NULL
;
1525 wm_info
= &i945_wm_info
;
1526 else if (!IS_GEN2(dev
))
1527 wm_info
= &i915_wm_info
;
1529 wm_info
= &i830_a_wm_info
;
1531 fifo_size
= dev_priv
->display
.get_fifo_size(dev
, 0);
1532 crtc
= intel_get_crtc_for_plane(dev
, 0);
1533 if (intel_crtc_active(crtc
)) {
1534 const struct drm_display_mode
*adjusted_mode
;
1535 int cpp
= drm_format_plane_cpp(crtc
->primary
->state
->fb
->pixel_format
, 0);
1539 adjusted_mode
= &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
1540 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1541 wm_info
, fifo_size
, cpp
,
1542 pessimal_latency_ns
);
1545 planea_wm
= fifo_size
- wm_info
->guard_size
;
1546 if (planea_wm
> (long)wm_info
->max_wm
)
1547 planea_wm
= wm_info
->max_wm
;
1551 wm_info
= &i830_bc_wm_info
;
1553 fifo_size
= dev_priv
->display
.get_fifo_size(dev
, 1);
1554 crtc
= intel_get_crtc_for_plane(dev
, 1);
1555 if (intel_crtc_active(crtc
)) {
1556 const struct drm_display_mode
*adjusted_mode
;
1557 int cpp
= drm_format_plane_cpp(crtc
->primary
->state
->fb
->pixel_format
, 0);
1561 adjusted_mode
= &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
1562 planeb_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1563 wm_info
, fifo_size
, cpp
,
1564 pessimal_latency_ns
);
1565 if (enabled
== NULL
)
1570 planeb_wm
= fifo_size
- wm_info
->guard_size
;
1571 if (planeb_wm
> (long)wm_info
->max_wm
)
1572 planeb_wm
= wm_info
->max_wm
;
1575 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm
, planeb_wm
);
1577 if (IS_I915GM(dev
) && enabled
) {
1578 struct drm_i915_gem_object
*obj
;
1580 obj
= intel_fb_obj(enabled
->primary
->state
->fb
);
1582 /* self-refresh seems busted with untiled */
1583 if (obj
->tiling_mode
== I915_TILING_NONE
)
1588 * Overlay gets an aggressive default since video jitter is bad.
1592 /* Play safe and disable self-refresh before adjusting watermarks. */
1593 intel_set_memory_cxsr(dev_priv
, false);
1595 /* Calc sr entries for one plane configs */
1596 if (HAS_FW_BLC(dev
) && enabled
) {
1597 /* self-refresh has much higher latency */
1598 static const int sr_latency_ns
= 6000;
1599 const struct drm_display_mode
*adjusted_mode
= &to_intel_crtc(enabled
)->config
->base
.adjusted_mode
;
1600 int clock
= adjusted_mode
->crtc_clock
;
1601 int htotal
= adjusted_mode
->crtc_htotal
;
1602 int hdisplay
= to_intel_crtc(enabled
)->config
->pipe_src_w
;
1603 int cpp
= drm_format_plane_cpp(enabled
->primary
->state
->fb
->pixel_format
, 0);
1604 unsigned long line_time_us
;
1607 line_time_us
= max(htotal
* 1000 / clock
, 1);
1609 /* Use ns/us then divide to preserve precision */
1610 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1612 entries
= DIV_ROUND_UP(entries
, wm_info
->cacheline_size
);
1613 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries
);
1614 srwm
= wm_info
->fifo_size
- entries
;
1618 if (IS_I945G(dev
) || IS_I945GM(dev
))
1619 I915_WRITE(FW_BLC_SELF
,
1620 FW_BLC_SELF_FIFO_MASK
| (srwm
& 0xff));
1621 else if (IS_I915GM(dev
))
1622 I915_WRITE(FW_BLC_SELF
, srwm
& 0x3f);
1625 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1626 planea_wm
, planeb_wm
, cwm
, srwm
);
1628 fwater_lo
= ((planeb_wm
& 0x3f) << 16) | (planea_wm
& 0x3f);
1629 fwater_hi
= (cwm
& 0x1f);
1631 /* Set request length to 8 cachelines per fetch */
1632 fwater_lo
= fwater_lo
| (1 << 24) | (1 << 8);
1633 fwater_hi
= fwater_hi
| (1 << 8);
1635 I915_WRITE(FW_BLC
, fwater_lo
);
1636 I915_WRITE(FW_BLC2
, fwater_hi
);
1639 intel_set_memory_cxsr(dev_priv
, true);
1642 static void i845_update_wm(struct drm_crtc
*unused_crtc
)
1644 struct drm_device
*dev
= unused_crtc
->dev
;
1645 struct drm_i915_private
*dev_priv
= to_i915(dev
);
1646 struct drm_crtc
*crtc
;
1647 const struct drm_display_mode
*adjusted_mode
;
1651 crtc
= single_enabled_crtc(dev
);
1655 adjusted_mode
= &to_intel_crtc(crtc
)->config
->base
.adjusted_mode
;
1656 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1658 dev_priv
->display
.get_fifo_size(dev
, 0),
1659 4, pessimal_latency_ns
);
1660 fwater_lo
= I915_READ(FW_BLC
) & ~0xfff;
1661 fwater_lo
|= (3<<8) | planea_wm
;
1663 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm
);
1665 I915_WRITE(FW_BLC
, fwater_lo
);
1668 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state
*pipe_config
)
1670 uint32_t pixel_rate
;
1672 pixel_rate
= pipe_config
->base
.adjusted_mode
.crtc_clock
;
1674 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1675 * adjust the pixel_rate here. */
1677 if (pipe_config
->pch_pfit
.enabled
) {
1678 uint64_t pipe_w
, pipe_h
, pfit_w
, pfit_h
;
1679 uint32_t pfit_size
= pipe_config
->pch_pfit
.size
;
1681 pipe_w
= pipe_config
->pipe_src_w
;
1682 pipe_h
= pipe_config
->pipe_src_h
;
1684 pfit_w
= (pfit_size
>> 16) & 0xFFFF;
1685 pfit_h
= pfit_size
& 0xFFFF;
1686 if (pipe_w
< pfit_w
)
1688 if (pipe_h
< pfit_h
)
1691 if (WARN_ON(!pfit_w
|| !pfit_h
))
1694 pixel_rate
= div_u64((uint64_t) pixel_rate
* pipe_w
* pipe_h
,
1701 /* latency must be in 0.1us units. */
1702 static uint32_t ilk_wm_method1(uint32_t pixel_rate
, uint8_t cpp
, uint32_t latency
)
1706 if (WARN(latency
== 0, "Latency value missing\n"))
1709 ret
= (uint64_t) pixel_rate
* cpp
* latency
;
1710 ret
= DIV_ROUND_UP_ULL(ret
, 64 * 10000) + 2;
1715 /* latency must be in 0.1us units. */
1716 static uint32_t ilk_wm_method2(uint32_t pixel_rate
, uint32_t pipe_htotal
,
1717 uint32_t horiz_pixels
, uint8_t cpp
,
1722 if (WARN(latency
== 0, "Latency value missing\n"))
1724 if (WARN_ON(!pipe_htotal
))
1727 ret
= (latency
* pixel_rate
) / (pipe_htotal
* 10000);
1728 ret
= (ret
+ 1) * horiz_pixels
* cpp
;
1729 ret
= DIV_ROUND_UP(ret
, 64) + 2;
1733 static uint32_t ilk_wm_fbc(uint32_t pri_val
, uint32_t horiz_pixels
,
1737 * Neither of these should be possible since this function shouldn't be
1738 * called if the CRTC is off or the plane is invisible. But let's be
1739 * extra paranoid to avoid a potential divide-by-zero if we screw up
1740 * elsewhere in the driver.
1744 if (WARN_ON(!horiz_pixels
))
1747 return DIV_ROUND_UP(pri_val
* 64, horiz_pixels
* cpp
) + 2;
1750 struct ilk_wm_maximums
{
1758 * For both WM_PIPE and WM_LP.
1759 * mem_value must be in 0.1us units.
1761 static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state
*cstate
,
1762 const struct intel_plane_state
*pstate
,
1766 int cpp
= pstate
->base
.fb
?
1767 drm_format_plane_cpp(pstate
->base
.fb
->pixel_format
, 0) : 0;
1768 uint32_t method1
, method2
;
1770 if (!cstate
->base
.active
|| !pstate
->visible
)
1773 method1
= ilk_wm_method1(ilk_pipe_pixel_rate(cstate
), cpp
, mem_value
);
1778 method2
= ilk_wm_method2(ilk_pipe_pixel_rate(cstate
),
1779 cstate
->base
.adjusted_mode
.crtc_htotal
,
1780 drm_rect_width(&pstate
->dst
),
1783 return min(method1
, method2
);
1787 * For both WM_PIPE and WM_LP.
1788 * mem_value must be in 0.1us units.
1790 static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state
*cstate
,
1791 const struct intel_plane_state
*pstate
,
1794 int cpp
= pstate
->base
.fb
?
1795 drm_format_plane_cpp(pstate
->base
.fb
->pixel_format
, 0) : 0;
1796 uint32_t method1
, method2
;
1798 if (!cstate
->base
.active
|| !pstate
->visible
)
1801 method1
= ilk_wm_method1(ilk_pipe_pixel_rate(cstate
), cpp
, mem_value
);
1802 method2
= ilk_wm_method2(ilk_pipe_pixel_rate(cstate
),
1803 cstate
->base
.adjusted_mode
.crtc_htotal
,
1804 drm_rect_width(&pstate
->dst
),
1806 return min(method1
, method2
);
1810 * For both WM_PIPE and WM_LP.
1811 * mem_value must be in 0.1us units.
1813 static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state
*cstate
,
1814 const struct intel_plane_state
*pstate
,
1818 * We treat the cursor plane as always-on for the purposes of watermark
1819 * calculation. Until we have two-stage watermark programming merged,
1820 * this is necessary to avoid flickering.
1823 int width
= pstate
->visible
? pstate
->base
.crtc_w
: 64;
1825 if (!cstate
->base
.active
)
1828 return ilk_wm_method2(ilk_pipe_pixel_rate(cstate
),
1829 cstate
->base
.adjusted_mode
.crtc_htotal
,
1830 width
, cpp
, mem_value
);
1833 /* Only for WM_LP. */
1834 static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state
*cstate
,
1835 const struct intel_plane_state
*pstate
,
1838 int cpp
= pstate
->base
.fb
?
1839 drm_format_plane_cpp(pstate
->base
.fb
->pixel_format
, 0) : 0;
1841 if (!cstate
->base
.active
|| !pstate
->visible
)
1844 return ilk_wm_fbc(pri_val
, drm_rect_width(&pstate
->dst
), cpp
);
1847 static unsigned int ilk_display_fifo_size(const struct drm_device
*dev
)
1849 if (INTEL_INFO(dev
)->gen
>= 8)
1851 else if (INTEL_INFO(dev
)->gen
>= 7)
1857 static unsigned int ilk_plane_wm_reg_max(const struct drm_device
*dev
,
1858 int level
, bool is_sprite
)
1860 if (INTEL_INFO(dev
)->gen
>= 8)
1861 /* BDW primary/sprite plane watermarks */
1862 return level
== 0 ? 255 : 2047;
1863 else if (INTEL_INFO(dev
)->gen
>= 7)
1864 /* IVB/HSW primary/sprite plane watermarks */
1865 return level
== 0 ? 127 : 1023;
1866 else if (!is_sprite
)
1867 /* ILK/SNB primary plane watermarks */
1868 return level
== 0 ? 127 : 511;
1870 /* ILK/SNB sprite plane watermarks */
1871 return level
== 0 ? 63 : 255;
1874 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device
*dev
,
1877 if (INTEL_INFO(dev
)->gen
>= 7)
1878 return level
== 0 ? 63 : 255;
1880 return level
== 0 ? 31 : 63;
1883 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device
*dev
)
1885 if (INTEL_INFO(dev
)->gen
>= 8)
1891 /* Calculate the maximum primary/sprite plane watermark */
1892 static unsigned int ilk_plane_wm_max(const struct drm_device
*dev
,
1894 const struct intel_wm_config
*config
,
1895 enum intel_ddb_partitioning ddb_partitioning
,
1898 unsigned int fifo_size
= ilk_display_fifo_size(dev
);
1900 /* if sprites aren't enabled, sprites get nothing */
1901 if (is_sprite
&& !config
->sprites_enabled
)
1904 /* HSW allows LP1+ watermarks even with multiple pipes */
1905 if (level
== 0 || config
->num_pipes_active
> 1) {
1906 fifo_size
/= INTEL_INFO(dev
)->num_pipes
;
1909 * For some reason the non self refresh
1910 * FIFO size is only half of the self
1911 * refresh FIFO size on ILK/SNB.
1913 if (INTEL_INFO(dev
)->gen
<= 6)
1917 if (config
->sprites_enabled
) {
1918 /* level 0 is always calculated with 1:1 split */
1919 if (level
> 0 && ddb_partitioning
== INTEL_DDB_PART_5_6
) {
1928 /* clamp to max that the registers can hold */
1929 return min(fifo_size
, ilk_plane_wm_reg_max(dev
, level
, is_sprite
));
1932 /* Calculate the maximum cursor plane watermark */
1933 static unsigned int ilk_cursor_wm_max(const struct drm_device
*dev
,
1935 const struct intel_wm_config
*config
)
1937 /* HSW LP1+ watermarks w/ multiple pipes */
1938 if (level
> 0 && config
->num_pipes_active
> 1)
1941 /* otherwise just report max that registers can hold */
1942 return ilk_cursor_wm_reg_max(dev
, level
);
1945 static void ilk_compute_wm_maximums(const struct drm_device
*dev
,
1947 const struct intel_wm_config
*config
,
1948 enum intel_ddb_partitioning ddb_partitioning
,
1949 struct ilk_wm_maximums
*max
)
1951 max
->pri
= ilk_plane_wm_max(dev
, level
, config
, ddb_partitioning
, false);
1952 max
->spr
= ilk_plane_wm_max(dev
, level
, config
, ddb_partitioning
, true);
1953 max
->cur
= ilk_cursor_wm_max(dev
, level
, config
);
1954 max
->fbc
= ilk_fbc_wm_reg_max(dev
);
1957 static void ilk_compute_wm_reg_maximums(struct drm_device
*dev
,
1959 struct ilk_wm_maximums
*max
)
1961 max
->pri
= ilk_plane_wm_reg_max(dev
, level
, false);
1962 max
->spr
= ilk_plane_wm_reg_max(dev
, level
, true);
1963 max
->cur
= ilk_cursor_wm_reg_max(dev
, level
);
1964 max
->fbc
= ilk_fbc_wm_reg_max(dev
);
1967 static bool ilk_validate_wm_level(int level
,
1968 const struct ilk_wm_maximums
*max
,
1969 struct intel_wm_level
*result
)
1973 /* already determined to be invalid? */
1974 if (!result
->enable
)
1977 result
->enable
= result
->pri_val
<= max
->pri
&&
1978 result
->spr_val
<= max
->spr
&&
1979 result
->cur_val
<= max
->cur
;
1981 ret
= result
->enable
;
1984 * HACK until we can pre-compute everything,
1985 * and thus fail gracefully if LP0 watermarks
1988 if (level
== 0 && !result
->enable
) {
1989 if (result
->pri_val
> max
->pri
)
1990 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
1991 level
, result
->pri_val
, max
->pri
);
1992 if (result
->spr_val
> max
->spr
)
1993 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
1994 level
, result
->spr_val
, max
->spr
);
1995 if (result
->cur_val
> max
->cur
)
1996 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
1997 level
, result
->cur_val
, max
->cur
);
1999 result
->pri_val
= min_t(uint32_t, result
->pri_val
, max
->pri
);
2000 result
->spr_val
= min_t(uint32_t, result
->spr_val
, max
->spr
);
2001 result
->cur_val
= min_t(uint32_t, result
->cur_val
, max
->cur
);
2002 result
->enable
= true;
2008 static void ilk_compute_wm_level(const struct drm_i915_private
*dev_priv
,
2009 const struct intel_crtc
*intel_crtc
,
2011 struct intel_crtc_state
*cstate
,
2012 struct intel_plane_state
*pristate
,
2013 struct intel_plane_state
*sprstate
,
2014 struct intel_plane_state
*curstate
,
2015 struct intel_wm_level
*result
)
2017 uint16_t pri_latency
= dev_priv
->wm
.pri_latency
[level
];
2018 uint16_t spr_latency
= dev_priv
->wm
.spr_latency
[level
];
2019 uint16_t cur_latency
= dev_priv
->wm
.cur_latency
[level
];
2021 /* WM1+ latency values stored in 0.5us units */
2029 result
->pri_val
= ilk_compute_pri_wm(cstate
, pristate
,
2030 pri_latency
, level
);
2031 result
->fbc_val
= ilk_compute_fbc_wm(cstate
, pristate
, result
->pri_val
);
2035 result
->spr_val
= ilk_compute_spr_wm(cstate
, sprstate
, spr_latency
);
2038 result
->cur_val
= ilk_compute_cur_wm(cstate
, curstate
, cur_latency
);
2040 result
->enable
= true;
2044 hsw_compute_linetime_wm(const struct intel_crtc_state
*cstate
)
2046 const struct intel_atomic_state
*intel_state
=
2047 to_intel_atomic_state(cstate
->base
.state
);
2048 const struct drm_display_mode
*adjusted_mode
=
2049 &cstate
->base
.adjusted_mode
;
2050 u32 linetime
, ips_linetime
;
2052 if (!cstate
->base
.active
)
2054 if (WARN_ON(adjusted_mode
->crtc_clock
== 0))
2056 if (WARN_ON(intel_state
->cdclk
== 0))
2059 /* The WM are computed with base on how long it takes to fill a single
2060 * row at the given clock rate, multiplied by 8.
2062 linetime
= DIV_ROUND_CLOSEST(adjusted_mode
->crtc_htotal
* 1000 * 8,
2063 adjusted_mode
->crtc_clock
);
2064 ips_linetime
= DIV_ROUND_CLOSEST(adjusted_mode
->crtc_htotal
* 1000 * 8,
2065 intel_state
->cdclk
);
2067 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime
) |
2068 PIPE_WM_LINETIME_TIME(linetime
);
2071 static void intel_read_wm_latency(struct drm_device
*dev
, uint16_t wm
[8])
2073 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2078 int level
, max_level
= ilk_wm_max_level(dev
);
2080 /* read the first set of memory latencies[0:3] */
2081 val
= 0; /* data0 to be programmed to 0 for first set */
2082 mutex_lock(&dev_priv
->rps
.hw_lock
);
2083 ret
= sandybridge_pcode_read(dev_priv
,
2084 GEN9_PCODE_READ_MEM_LATENCY
,
2086 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2089 DRM_ERROR("SKL Mailbox read error = %d\n", ret
);
2093 wm
[0] = val
& GEN9_MEM_LATENCY_LEVEL_MASK
;
2094 wm
[1] = (val
>> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT
) &
2095 GEN9_MEM_LATENCY_LEVEL_MASK
;
2096 wm
[2] = (val
>> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT
) &
2097 GEN9_MEM_LATENCY_LEVEL_MASK
;
2098 wm
[3] = (val
>> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT
) &
2099 GEN9_MEM_LATENCY_LEVEL_MASK
;
2101 /* read the second set of memory latencies[4:7] */
2102 val
= 1; /* data0 to be programmed to 1 for second set */
2103 mutex_lock(&dev_priv
->rps
.hw_lock
);
2104 ret
= sandybridge_pcode_read(dev_priv
,
2105 GEN9_PCODE_READ_MEM_LATENCY
,
2107 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2109 DRM_ERROR("SKL Mailbox read error = %d\n", ret
);
2113 wm
[4] = val
& GEN9_MEM_LATENCY_LEVEL_MASK
;
2114 wm
[5] = (val
>> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT
) &
2115 GEN9_MEM_LATENCY_LEVEL_MASK
;
2116 wm
[6] = (val
>> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT
) &
2117 GEN9_MEM_LATENCY_LEVEL_MASK
;
2118 wm
[7] = (val
>> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT
) &
2119 GEN9_MEM_LATENCY_LEVEL_MASK
;
2122 * WaWmMemoryReadLatency:skl
2124 * punit doesn't take into account the read latency so we need
2125 * to add 2us to the various latency levels we retrieve from
2127 * - W0 is a bit special in that it's the only level that
2128 * can't be disabled if we want to have display working, so
2129 * we always add 2us there.
2130 * - For levels >=1, punit returns 0us latency when they are
2131 * disabled, so we respect that and don't add 2us then
2133 * Additionally, if a level n (n > 1) has a 0us latency, all
2134 * levels m (m >= n) need to be disabled. We make sure to
2135 * sanitize the values out of the punit to satisfy this
2139 for (level
= 1; level
<= max_level
; level
++)
2143 for (i
= level
+ 1; i
<= max_level
; i
++)
2148 } else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
2149 uint64_t sskpd
= I915_READ64(MCH_SSKPD
);
2151 wm
[0] = (sskpd
>> 56) & 0xFF;
2153 wm
[0] = sskpd
& 0xF;
2154 wm
[1] = (sskpd
>> 4) & 0xFF;
2155 wm
[2] = (sskpd
>> 12) & 0xFF;
2156 wm
[3] = (sskpd
>> 20) & 0x1FF;
2157 wm
[4] = (sskpd
>> 32) & 0x1FF;
2158 } else if (INTEL_INFO(dev
)->gen
>= 6) {
2159 uint32_t sskpd
= I915_READ(MCH_SSKPD
);
2161 wm
[0] = (sskpd
>> SSKPD_WM0_SHIFT
) & SSKPD_WM_MASK
;
2162 wm
[1] = (sskpd
>> SSKPD_WM1_SHIFT
) & SSKPD_WM_MASK
;
2163 wm
[2] = (sskpd
>> SSKPD_WM2_SHIFT
) & SSKPD_WM_MASK
;
2164 wm
[3] = (sskpd
>> SSKPD_WM3_SHIFT
) & SSKPD_WM_MASK
;
2165 } else if (INTEL_INFO(dev
)->gen
>= 5) {
2166 uint32_t mltr
= I915_READ(MLTR_ILK
);
2168 /* ILK primary LP0 latency is 700 ns */
2170 wm
[1] = (mltr
>> MLTR_WM1_SHIFT
) & ILK_SRLT_MASK
;
2171 wm
[2] = (mltr
>> MLTR_WM2_SHIFT
) & ILK_SRLT_MASK
;
2175 static void intel_fixup_spr_wm_latency(struct drm_device
*dev
, uint16_t wm
[5])
2177 /* ILK sprite LP0 latency is 1300 ns */
2182 static void intel_fixup_cur_wm_latency(struct drm_device
*dev
, uint16_t wm
[5])
2184 /* ILK cursor LP0 latency is 1300 ns */
2188 /* WaDoubleCursorLP3Latency:ivb */
2189 if (IS_IVYBRIDGE(dev
))
2193 int ilk_wm_max_level(const struct drm_device
*dev
)
2195 /* how many WM levels are we expecting */
2196 if (INTEL_INFO(dev
)->gen
>= 9)
2198 else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
2200 else if (INTEL_INFO(dev
)->gen
>= 6)
2206 static void intel_print_wm_latency(struct drm_device
*dev
,
2208 const uint16_t wm
[8])
2210 int level
, max_level
= ilk_wm_max_level(dev
);
2212 for (level
= 0; level
<= max_level
; level
++) {
2213 unsigned int latency
= wm
[level
];
2216 DRM_ERROR("%s WM%d latency not provided\n",
2222 * - latencies are in us on gen9.
2223 * - before then, WM1+ latency values are in 0.5us units
2230 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2231 name
, level
, wm
[level
],
2232 latency
/ 10, latency
% 10);
2236 static bool ilk_increase_wm_latency(struct drm_i915_private
*dev_priv
,
2237 uint16_t wm
[5], uint16_t min
)
2239 int level
, max_level
= ilk_wm_max_level(&dev_priv
->drm
);
2244 wm
[0] = max(wm
[0], min
);
2245 for (level
= 1; level
<= max_level
; level
++)
2246 wm
[level
] = max_t(uint16_t, wm
[level
], DIV_ROUND_UP(min
, 5));
2251 static void snb_wm_latency_quirk(struct drm_device
*dev
)
2253 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2257 * The BIOS provided WM memory latency values are often
2258 * inadequate for high resolution displays. Adjust them.
2260 changed
= ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.pri_latency
, 12) |
2261 ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.spr_latency
, 12) |
2262 ilk_increase_wm_latency(dev_priv
, dev_priv
->wm
.cur_latency
, 12);
2267 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2268 intel_print_wm_latency(dev
, "Primary", dev_priv
->wm
.pri_latency
);
2269 intel_print_wm_latency(dev
, "Sprite", dev_priv
->wm
.spr_latency
);
2270 intel_print_wm_latency(dev
, "Cursor", dev_priv
->wm
.cur_latency
);
2273 static void ilk_setup_wm_latency(struct drm_device
*dev
)
2275 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2277 intel_read_wm_latency(dev
, dev_priv
->wm
.pri_latency
);
2279 memcpy(dev_priv
->wm
.spr_latency
, dev_priv
->wm
.pri_latency
,
2280 sizeof(dev_priv
->wm
.pri_latency
));
2281 memcpy(dev_priv
->wm
.cur_latency
, dev_priv
->wm
.pri_latency
,
2282 sizeof(dev_priv
->wm
.pri_latency
));
2284 intel_fixup_spr_wm_latency(dev
, dev_priv
->wm
.spr_latency
);
2285 intel_fixup_cur_wm_latency(dev
, dev_priv
->wm
.cur_latency
);
2287 intel_print_wm_latency(dev
, "Primary", dev_priv
->wm
.pri_latency
);
2288 intel_print_wm_latency(dev
, "Sprite", dev_priv
->wm
.spr_latency
);
2289 intel_print_wm_latency(dev
, "Cursor", dev_priv
->wm
.cur_latency
);
2292 snb_wm_latency_quirk(dev
);
2295 static void skl_setup_wm_latency(struct drm_device
*dev
)
2297 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2299 intel_read_wm_latency(dev
, dev_priv
->wm
.skl_latency
);
2300 intel_print_wm_latency(dev
, "Gen9 Plane", dev_priv
->wm
.skl_latency
);
2303 static bool ilk_validate_pipe_wm(struct drm_device
*dev
,
2304 struct intel_pipe_wm
*pipe_wm
)
2306 /* LP0 watermark maximums depend on this pipe alone */
2307 const struct intel_wm_config config
= {
2308 .num_pipes_active
= 1,
2309 .sprites_enabled
= pipe_wm
->sprites_enabled
,
2310 .sprites_scaled
= pipe_wm
->sprites_scaled
,
2312 struct ilk_wm_maximums max
;
2314 /* LP0 watermarks always use 1/2 DDB partitioning */
2315 ilk_compute_wm_maximums(dev
, 0, &config
, INTEL_DDB_PART_1_2
, &max
);
2317 /* At least LP0 must be valid */
2318 if (!ilk_validate_wm_level(0, &max
, &pipe_wm
->wm
[0])) {
2319 DRM_DEBUG_KMS("LP0 watermark invalid\n");
2326 /* Compute new watermarks for the pipe */
2327 static int ilk_compute_pipe_wm(struct intel_crtc_state
*cstate
)
2329 struct drm_atomic_state
*state
= cstate
->base
.state
;
2330 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
2331 struct intel_pipe_wm
*pipe_wm
;
2332 struct drm_device
*dev
= state
->dev
;
2333 const struct drm_i915_private
*dev_priv
= to_i915(dev
);
2334 struct intel_plane
*intel_plane
;
2335 struct intel_plane_state
*pristate
= NULL
;
2336 struct intel_plane_state
*sprstate
= NULL
;
2337 struct intel_plane_state
*curstate
= NULL
;
2338 int level
, max_level
= ilk_wm_max_level(dev
), usable_level
;
2339 struct ilk_wm_maximums max
;
2341 pipe_wm
= &cstate
->wm
.ilk
.optimal
;
2343 for_each_intel_plane_on_crtc(dev
, intel_crtc
, intel_plane
) {
2344 struct intel_plane_state
*ps
;
2346 ps
= intel_atomic_get_existing_plane_state(state
,
2351 if (intel_plane
->base
.type
== DRM_PLANE_TYPE_PRIMARY
)
2353 else if (intel_plane
->base
.type
== DRM_PLANE_TYPE_OVERLAY
)
2355 else if (intel_plane
->base
.type
== DRM_PLANE_TYPE_CURSOR
)
2359 pipe_wm
->pipe_enabled
= cstate
->base
.active
;
2361 pipe_wm
->sprites_enabled
= sprstate
->visible
;
2362 pipe_wm
->sprites_scaled
= sprstate
->visible
&&
2363 (drm_rect_width(&sprstate
->dst
) != drm_rect_width(&sprstate
->src
) >> 16 ||
2364 drm_rect_height(&sprstate
->dst
) != drm_rect_height(&sprstate
->src
) >> 16);
2367 usable_level
= max_level
;
2369 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2370 if (INTEL_INFO(dev
)->gen
<= 6 && pipe_wm
->sprites_enabled
)
2373 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2374 if (pipe_wm
->sprites_scaled
)
2377 ilk_compute_wm_level(dev_priv
, intel_crtc
, 0, cstate
,
2378 pristate
, sprstate
, curstate
, &pipe_wm
->raw_wm
[0]);
2380 memset(&pipe_wm
->wm
, 0, sizeof(pipe_wm
->wm
));
2381 pipe_wm
->wm
[0] = pipe_wm
->raw_wm
[0];
2383 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
2384 pipe_wm
->linetime
= hsw_compute_linetime_wm(cstate
);
2386 if (!ilk_validate_pipe_wm(dev
, pipe_wm
))
2389 ilk_compute_wm_reg_maximums(dev
, 1, &max
);
2391 for (level
= 1; level
<= max_level
; level
++) {
2392 struct intel_wm_level
*wm
= &pipe_wm
->raw_wm
[level
];
2394 ilk_compute_wm_level(dev_priv
, intel_crtc
, level
, cstate
,
2395 pristate
, sprstate
, curstate
, wm
);
2398 * Disable any watermark level that exceeds the
2399 * register maximums since such watermarks are
2402 if (level
> usable_level
)
2405 if (ilk_validate_wm_level(level
, &max
, wm
))
2406 pipe_wm
->wm
[level
] = *wm
;
2408 usable_level
= level
;
2415 * Build a set of 'intermediate' watermark values that satisfy both the old
2416 * state and the new state. These can be programmed to the hardware
2419 static int ilk_compute_intermediate_wm(struct drm_device
*dev
,
2420 struct intel_crtc
*intel_crtc
,
2421 struct intel_crtc_state
*newstate
)
2423 struct intel_pipe_wm
*a
= &newstate
->wm
.ilk
.intermediate
;
2424 struct intel_pipe_wm
*b
= &intel_crtc
->wm
.active
.ilk
;
2425 int level
, max_level
= ilk_wm_max_level(dev
);
2428 * Start with the final, target watermarks, then combine with the
2429 * currently active watermarks to get values that are safe both before
2430 * and after the vblank.
2432 *a
= newstate
->wm
.ilk
.optimal
;
2433 a
->pipe_enabled
|= b
->pipe_enabled
;
2434 a
->sprites_enabled
|= b
->sprites_enabled
;
2435 a
->sprites_scaled
|= b
->sprites_scaled
;
2437 for (level
= 0; level
<= max_level
; level
++) {
2438 struct intel_wm_level
*a_wm
= &a
->wm
[level
];
2439 const struct intel_wm_level
*b_wm
= &b
->wm
[level
];
2441 a_wm
->enable
&= b_wm
->enable
;
2442 a_wm
->pri_val
= max(a_wm
->pri_val
, b_wm
->pri_val
);
2443 a_wm
->spr_val
= max(a_wm
->spr_val
, b_wm
->spr_val
);
2444 a_wm
->cur_val
= max(a_wm
->cur_val
, b_wm
->cur_val
);
2445 a_wm
->fbc_val
= max(a_wm
->fbc_val
, b_wm
->fbc_val
);
2449 * We need to make sure that these merged watermark values are
2450 * actually a valid configuration themselves. If they're not,
2451 * there's no safe way to transition from the old state to
2452 * the new state, so we need to fail the atomic transaction.
2454 if (!ilk_validate_pipe_wm(dev
, a
))
2458 * If our intermediate WM are identical to the final WM, then we can
2459 * omit the post-vblank programming; only update if it's different.
2461 if (memcmp(a
, &newstate
->wm
.ilk
.optimal
, sizeof(*a
)) == 0)
2462 newstate
->wm
.need_postvbl_update
= false;
2468 * Merge the watermarks from all active pipes for a specific level.
2470 static void ilk_merge_wm_level(struct drm_device
*dev
,
2472 struct intel_wm_level
*ret_wm
)
2474 const struct intel_crtc
*intel_crtc
;
2476 ret_wm
->enable
= true;
2478 for_each_intel_crtc(dev
, intel_crtc
) {
2479 const struct intel_pipe_wm
*active
= &intel_crtc
->wm
.active
.ilk
;
2480 const struct intel_wm_level
*wm
= &active
->wm
[level
];
2482 if (!active
->pipe_enabled
)
2486 * The watermark values may have been used in the past,
2487 * so we must maintain them in the registers for some
2488 * time even if the level is now disabled.
2491 ret_wm
->enable
= false;
2493 ret_wm
->pri_val
= max(ret_wm
->pri_val
, wm
->pri_val
);
2494 ret_wm
->spr_val
= max(ret_wm
->spr_val
, wm
->spr_val
);
2495 ret_wm
->cur_val
= max(ret_wm
->cur_val
, wm
->cur_val
);
2496 ret_wm
->fbc_val
= max(ret_wm
->fbc_val
, wm
->fbc_val
);
2501 * Merge all low power watermarks for all active pipes.
2503 static void ilk_wm_merge(struct drm_device
*dev
,
2504 const struct intel_wm_config
*config
,
2505 const struct ilk_wm_maximums
*max
,
2506 struct intel_pipe_wm
*merged
)
2508 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2509 int level
, max_level
= ilk_wm_max_level(dev
);
2510 int last_enabled_level
= max_level
;
2512 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2513 if ((INTEL_INFO(dev
)->gen
<= 6 || IS_IVYBRIDGE(dev
)) &&
2514 config
->num_pipes_active
> 1)
2515 last_enabled_level
= 0;
2517 /* ILK: FBC WM must be disabled always */
2518 merged
->fbc_wm_enabled
= INTEL_INFO(dev
)->gen
>= 6;
2520 /* merge each WM1+ level */
2521 for (level
= 1; level
<= max_level
; level
++) {
2522 struct intel_wm_level
*wm
= &merged
->wm
[level
];
2524 ilk_merge_wm_level(dev
, level
, wm
);
2526 if (level
> last_enabled_level
)
2528 else if (!ilk_validate_wm_level(level
, max
, wm
))
2529 /* make sure all following levels get disabled */
2530 last_enabled_level
= level
- 1;
2533 * The spec says it is preferred to disable
2534 * FBC WMs instead of disabling a WM level.
2536 if (wm
->fbc_val
> max
->fbc
) {
2538 merged
->fbc_wm_enabled
= false;
2543 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2545 * FIXME this is racy. FBC might get enabled later.
2546 * What we should check here is whether FBC can be
2547 * enabled sometime later.
2549 if (IS_GEN5(dev
) && !merged
->fbc_wm_enabled
&&
2550 intel_fbc_is_active(dev_priv
)) {
2551 for (level
= 2; level
<= max_level
; level
++) {
2552 struct intel_wm_level
*wm
= &merged
->wm
[level
];
2559 static int ilk_wm_lp_to_level(int wm_lp
, const struct intel_pipe_wm
*pipe_wm
)
2561 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2562 return wm_lp
+ (wm_lp
>= 2 && pipe_wm
->wm
[4].enable
);
2565 /* The value we need to program into the WM_LPx latency field */
2566 static unsigned int ilk_wm_lp_latency(struct drm_device
*dev
, int level
)
2568 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2570 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
2573 return dev_priv
->wm
.pri_latency
[level
];
2576 static void ilk_compute_wm_results(struct drm_device
*dev
,
2577 const struct intel_pipe_wm
*merged
,
2578 enum intel_ddb_partitioning partitioning
,
2579 struct ilk_wm_values
*results
)
2581 struct intel_crtc
*intel_crtc
;
2584 results
->enable_fbc_wm
= merged
->fbc_wm_enabled
;
2585 results
->partitioning
= partitioning
;
2587 /* LP1+ register values */
2588 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
2589 const struct intel_wm_level
*r
;
2591 level
= ilk_wm_lp_to_level(wm_lp
, merged
);
2593 r
= &merged
->wm
[level
];
2596 * Maintain the watermark values even if the level is
2597 * disabled. Doing otherwise could cause underruns.
2599 results
->wm_lp
[wm_lp
- 1] =
2600 (ilk_wm_lp_latency(dev
, level
) << WM1_LP_LATENCY_SHIFT
) |
2601 (r
->pri_val
<< WM1_LP_SR_SHIFT
) |
2605 results
->wm_lp
[wm_lp
- 1] |= WM1_LP_SR_EN
;
2607 if (INTEL_INFO(dev
)->gen
>= 8)
2608 results
->wm_lp
[wm_lp
- 1] |=
2609 r
->fbc_val
<< WM1_LP_FBC_SHIFT_BDW
;
2611 results
->wm_lp
[wm_lp
- 1] |=
2612 r
->fbc_val
<< WM1_LP_FBC_SHIFT
;
2615 * Always set WM1S_LP_EN when spr_val != 0, even if the
2616 * level is disabled. Doing otherwise could cause underruns.
2618 if (INTEL_INFO(dev
)->gen
<= 6 && r
->spr_val
) {
2619 WARN_ON(wm_lp
!= 1);
2620 results
->wm_lp_spr
[wm_lp
- 1] = WM1S_LP_EN
| r
->spr_val
;
2622 results
->wm_lp_spr
[wm_lp
- 1] = r
->spr_val
;
2625 /* LP0 register values */
2626 for_each_intel_crtc(dev
, intel_crtc
) {
2627 enum pipe pipe
= intel_crtc
->pipe
;
2628 const struct intel_wm_level
*r
=
2629 &intel_crtc
->wm
.active
.ilk
.wm
[0];
2631 if (WARN_ON(!r
->enable
))
2634 results
->wm_linetime
[pipe
] = intel_crtc
->wm
.active
.ilk
.linetime
;
2636 results
->wm_pipe
[pipe
] =
2637 (r
->pri_val
<< WM0_PIPE_PLANE_SHIFT
) |
2638 (r
->spr_val
<< WM0_PIPE_SPRITE_SHIFT
) |
2643 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2644 * case both are at the same level. Prefer r1 in case they're the same. */
2645 static struct intel_pipe_wm
*ilk_find_best_result(struct drm_device
*dev
,
2646 struct intel_pipe_wm
*r1
,
2647 struct intel_pipe_wm
*r2
)
2649 int level
, max_level
= ilk_wm_max_level(dev
);
2650 int level1
= 0, level2
= 0;
2652 for (level
= 1; level
<= max_level
; level
++) {
2653 if (r1
->wm
[level
].enable
)
2655 if (r2
->wm
[level
].enable
)
2659 if (level1
== level2
) {
2660 if (r2
->fbc_wm_enabled
&& !r1
->fbc_wm_enabled
)
2664 } else if (level1
> level2
) {
2671 /* dirty bits used to track which watermarks need changes */
2672 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2673 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2674 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2675 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2676 #define WM_DIRTY_FBC (1 << 24)
2677 #define WM_DIRTY_DDB (1 << 25)
2679 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private
*dev_priv
,
2680 const struct ilk_wm_values
*old
,
2681 const struct ilk_wm_values
*new)
2683 unsigned int dirty
= 0;
2687 for_each_pipe(dev_priv
, pipe
) {
2688 if (old
->wm_linetime
[pipe
] != new->wm_linetime
[pipe
]) {
2689 dirty
|= WM_DIRTY_LINETIME(pipe
);
2690 /* Must disable LP1+ watermarks too */
2691 dirty
|= WM_DIRTY_LP_ALL
;
2694 if (old
->wm_pipe
[pipe
] != new->wm_pipe
[pipe
]) {
2695 dirty
|= WM_DIRTY_PIPE(pipe
);
2696 /* Must disable LP1+ watermarks too */
2697 dirty
|= WM_DIRTY_LP_ALL
;
2701 if (old
->enable_fbc_wm
!= new->enable_fbc_wm
) {
2702 dirty
|= WM_DIRTY_FBC
;
2703 /* Must disable LP1+ watermarks too */
2704 dirty
|= WM_DIRTY_LP_ALL
;
2707 if (old
->partitioning
!= new->partitioning
) {
2708 dirty
|= WM_DIRTY_DDB
;
2709 /* Must disable LP1+ watermarks too */
2710 dirty
|= WM_DIRTY_LP_ALL
;
2713 /* LP1+ watermarks already deemed dirty, no need to continue */
2714 if (dirty
& WM_DIRTY_LP_ALL
)
2717 /* Find the lowest numbered LP1+ watermark in need of an update... */
2718 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
2719 if (old
->wm_lp
[wm_lp
- 1] != new->wm_lp
[wm_lp
- 1] ||
2720 old
->wm_lp_spr
[wm_lp
- 1] != new->wm_lp_spr
[wm_lp
- 1])
2724 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2725 for (; wm_lp
<= 3; wm_lp
++)
2726 dirty
|= WM_DIRTY_LP(wm_lp
);
2731 static bool _ilk_disable_lp_wm(struct drm_i915_private
*dev_priv
,
2734 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
2735 bool changed
= false;
2737 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] & WM1_LP_SR_EN
) {
2738 previous
->wm_lp
[2] &= ~WM1_LP_SR_EN
;
2739 I915_WRITE(WM3_LP_ILK
, previous
->wm_lp
[2]);
2742 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] & WM1_LP_SR_EN
) {
2743 previous
->wm_lp
[1] &= ~WM1_LP_SR_EN
;
2744 I915_WRITE(WM2_LP_ILK
, previous
->wm_lp
[1]);
2747 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] & WM1_LP_SR_EN
) {
2748 previous
->wm_lp
[0] &= ~WM1_LP_SR_EN
;
2749 I915_WRITE(WM1_LP_ILK
, previous
->wm_lp
[0]);
2754 * Don't touch WM1S_LP_EN here.
2755 * Doing so could cause underruns.
2762 * The spec says we shouldn't write when we don't need, because every write
2763 * causes WMs to be re-evaluated, expending some power.
2765 static void ilk_write_wm_values(struct drm_i915_private
*dev_priv
,
2766 struct ilk_wm_values
*results
)
2768 struct drm_device
*dev
= &dev_priv
->drm
;
2769 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
2773 dirty
= ilk_compute_wm_dirty(dev_priv
, previous
, results
);
2777 _ilk_disable_lp_wm(dev_priv
, dirty
);
2779 if (dirty
& WM_DIRTY_PIPE(PIPE_A
))
2780 I915_WRITE(WM0_PIPEA_ILK
, results
->wm_pipe
[0]);
2781 if (dirty
& WM_DIRTY_PIPE(PIPE_B
))
2782 I915_WRITE(WM0_PIPEB_ILK
, results
->wm_pipe
[1]);
2783 if (dirty
& WM_DIRTY_PIPE(PIPE_C
))
2784 I915_WRITE(WM0_PIPEC_IVB
, results
->wm_pipe
[2]);
2786 if (dirty
& WM_DIRTY_LINETIME(PIPE_A
))
2787 I915_WRITE(PIPE_WM_LINETIME(PIPE_A
), results
->wm_linetime
[0]);
2788 if (dirty
& WM_DIRTY_LINETIME(PIPE_B
))
2789 I915_WRITE(PIPE_WM_LINETIME(PIPE_B
), results
->wm_linetime
[1]);
2790 if (dirty
& WM_DIRTY_LINETIME(PIPE_C
))
2791 I915_WRITE(PIPE_WM_LINETIME(PIPE_C
), results
->wm_linetime
[2]);
2793 if (dirty
& WM_DIRTY_DDB
) {
2794 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
2795 val
= I915_READ(WM_MISC
);
2796 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
2797 val
&= ~WM_MISC_DATA_PARTITION_5_6
;
2799 val
|= WM_MISC_DATA_PARTITION_5_6
;
2800 I915_WRITE(WM_MISC
, val
);
2802 val
= I915_READ(DISP_ARB_CTL2
);
2803 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
2804 val
&= ~DISP_DATA_PARTITION_5_6
;
2806 val
|= DISP_DATA_PARTITION_5_6
;
2807 I915_WRITE(DISP_ARB_CTL2
, val
);
2811 if (dirty
& WM_DIRTY_FBC
) {
2812 val
= I915_READ(DISP_ARB_CTL
);
2813 if (results
->enable_fbc_wm
)
2814 val
&= ~DISP_FBC_WM_DIS
;
2816 val
|= DISP_FBC_WM_DIS
;
2817 I915_WRITE(DISP_ARB_CTL
, val
);
2820 if (dirty
& WM_DIRTY_LP(1) &&
2821 previous
->wm_lp_spr
[0] != results
->wm_lp_spr
[0])
2822 I915_WRITE(WM1S_LP_ILK
, results
->wm_lp_spr
[0]);
2824 if (INTEL_INFO(dev
)->gen
>= 7) {
2825 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp_spr
[1] != results
->wm_lp_spr
[1])
2826 I915_WRITE(WM2S_LP_IVB
, results
->wm_lp_spr
[1]);
2827 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp_spr
[2] != results
->wm_lp_spr
[2])
2828 I915_WRITE(WM3S_LP_IVB
, results
->wm_lp_spr
[2]);
2831 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] != results
->wm_lp
[0])
2832 I915_WRITE(WM1_LP_ILK
, results
->wm_lp
[0]);
2833 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] != results
->wm_lp
[1])
2834 I915_WRITE(WM2_LP_ILK
, results
->wm_lp
[1]);
2835 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] != results
->wm_lp
[2])
2836 I915_WRITE(WM3_LP_ILK
, results
->wm_lp
[2]);
2838 dev_priv
->wm
.hw
= *results
;
2841 bool ilk_disable_lp_wm(struct drm_device
*dev
)
2843 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2845 return _ilk_disable_lp_wm(dev_priv
, WM_DIRTY_LP_ALL
);
2849 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
2850 * different active planes.
2853 #define SKL_DDB_SIZE 896 /* in blocks */
2854 #define BXT_DDB_SIZE 512
2855 #define SKL_SAGV_BLOCK_TIME 30 /* µs */
2858 * Return the index of a plane in the SKL DDB and wm result arrays. Primary
2859 * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and
2860 * other universal planes are in indices 1..n. Note that this may leave unused
2861 * indices between the top "sprite" plane and the cursor.
2864 skl_wm_plane_id(const struct intel_plane
*plane
)
2866 switch (plane
->base
.type
) {
2867 case DRM_PLANE_TYPE_PRIMARY
:
2869 case DRM_PLANE_TYPE_CURSOR
:
2870 return PLANE_CURSOR
;
2871 case DRM_PLANE_TYPE_OVERLAY
:
2872 return plane
->plane
+ 1;
2874 MISSING_CASE(plane
->base
.type
);
2875 return plane
->plane
;
2880 * SAGV dynamically adjusts the system agent voltage and clock frequencies
2881 * depending on power and performance requirements. The display engine access
2882 * to system memory is blocked during the adjustment time. Because of the
2883 * blocking time, having this enabled can cause full system hangs and/or pipe
2884 * underruns if we don't meet all of the following requirements:
2886 * - <= 1 pipe enabled
2887 * - All planes can enable watermarks for latencies >= SAGV engine block time
2888 * - We're not using an interlaced display configuration
2891 skl_enable_sagv(struct drm_i915_private
*dev_priv
)
2895 if (dev_priv
->skl_sagv_status
== I915_SKL_SAGV_NOT_CONTROLLED
||
2896 dev_priv
->skl_sagv_status
== I915_SKL_SAGV_ENABLED
)
2899 DRM_DEBUG_KMS("Enabling the SAGV\n");
2900 mutex_lock(&dev_priv
->rps
.hw_lock
);
2902 ret
= sandybridge_pcode_write(dev_priv
, GEN9_PCODE_SAGV_CONTROL
,
2905 /* We don't need to wait for the SAGV when enabling */
2906 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2909 * Some skl systems, pre-release machines in particular,
2910 * don't actually have an SAGV.
2912 if (ret
== -ENXIO
) {
2913 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
2914 dev_priv
->skl_sagv_status
= I915_SKL_SAGV_NOT_CONTROLLED
;
2916 } else if (ret
< 0) {
2917 DRM_ERROR("Failed to enable the SAGV\n");
2921 dev_priv
->skl_sagv_status
= I915_SKL_SAGV_ENABLED
;
2926 skl_do_sagv_disable(struct drm_i915_private
*dev_priv
)
2929 uint32_t temp
= GEN9_SAGV_DISABLE
;
2931 ret
= sandybridge_pcode_read(dev_priv
, GEN9_PCODE_SAGV_CONTROL
,
2936 return temp
& GEN9_SAGV_IS_DISABLED
;
2940 skl_disable_sagv(struct drm_i915_private
*dev_priv
)
2944 if (dev_priv
->skl_sagv_status
== I915_SKL_SAGV_NOT_CONTROLLED
||
2945 dev_priv
->skl_sagv_status
== I915_SKL_SAGV_DISABLED
)
2948 DRM_DEBUG_KMS("Disabling the SAGV\n");
2949 mutex_lock(&dev_priv
->rps
.hw_lock
);
2951 /* bspec says to keep retrying for at least 1 ms */
2952 ret
= wait_for(result
= skl_do_sagv_disable(dev_priv
), 1);
2953 mutex_unlock(&dev_priv
->rps
.hw_lock
);
2955 if (ret
== -ETIMEDOUT
) {
2956 DRM_ERROR("Request to disable SAGV timed out\n");
2961 * Some skl systems, pre-release machines in particular,
2962 * don't actually have an SAGV.
2964 if (result
== -ENXIO
) {
2965 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
2966 dev_priv
->skl_sagv_status
= I915_SKL_SAGV_NOT_CONTROLLED
;
2968 } else if (result
< 0) {
2969 DRM_ERROR("Failed to disable the SAGV\n");
2973 dev_priv
->skl_sagv_status
= I915_SKL_SAGV_DISABLED
;
2977 bool skl_can_enable_sagv(struct drm_atomic_state
*state
)
2979 struct drm_device
*dev
= state
->dev
;
2980 struct drm_i915_private
*dev_priv
= to_i915(dev
);
2981 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
2982 struct drm_crtc
*crtc
;
2987 * SKL workaround: bspec recommends we disable the SAGV when we have
2988 * more then one pipe enabled
2990 * If there are no active CRTCs, no additional checks need be performed
2992 if (hweight32(intel_state
->active_crtcs
) == 0)
2994 else if (hweight32(intel_state
->active_crtcs
) > 1)
2997 /* Since we're now guaranteed to only have one active CRTC... */
2998 pipe
= ffs(intel_state
->active_crtcs
) - 1;
2999 crtc
= dev_priv
->pipe_to_crtc_mapping
[pipe
];
3001 if (crtc
->state
->mode
.flags
& DRM_MODE_FLAG_INTERLACE
)
3004 for_each_plane(dev_priv
, pipe
, plane
) {
3005 /* Skip this plane if it's not enabled */
3006 if (intel_state
->wm_results
.plane
[pipe
][plane
][0] == 0)
3009 /* Find the highest enabled wm level for this plane */
3010 for (level
= ilk_wm_max_level(dev
);
3011 intel_state
->wm_results
.plane
[pipe
][plane
][level
] == 0; --level
)
3015 * If any of the planes on this pipe don't enable wm levels
3016 * that incur memory latencies higher then 30µs we can't enable
3019 if (dev_priv
->wm
.skl_latency
[level
] < SKL_SAGV_BLOCK_TIME
)
3027 skl_ddb_get_pipe_allocation_limits(struct drm_device
*dev
,
3028 const struct intel_crtc_state
*cstate
,
3029 struct skl_ddb_entry
*alloc
, /* out */
3030 int *num_active
/* out */)
3032 struct drm_atomic_state
*state
= cstate
->base
.state
;
3033 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
3034 struct drm_i915_private
*dev_priv
= to_i915(dev
);
3035 struct drm_crtc
*for_crtc
= cstate
->base
.crtc
;
3036 unsigned int pipe_size
, ddb_size
;
3037 int nth_active_pipe
;
3038 int pipe
= to_intel_crtc(for_crtc
)->pipe
;
3040 if (WARN_ON(!state
) || !cstate
->base
.active
) {
3043 *num_active
= hweight32(dev_priv
->active_crtcs
);
3047 if (intel_state
->active_pipe_changes
)
3048 *num_active
= hweight32(intel_state
->active_crtcs
);
3050 *num_active
= hweight32(dev_priv
->active_crtcs
);
3052 if (IS_BROXTON(dev
))
3053 ddb_size
= BXT_DDB_SIZE
;
3055 ddb_size
= SKL_DDB_SIZE
;
3057 ddb_size
-= 4; /* 4 blocks for bypass path allocation */
3060 * If the state doesn't change the active CRTC's, then there's
3061 * no need to recalculate; the existing pipe allocation limits
3062 * should remain unchanged. Note that we're safe from racing
3063 * commits since any racing commit that changes the active CRTC
3064 * list would need to grab _all_ crtc locks, including the one
3065 * we currently hold.
3067 if (!intel_state
->active_pipe_changes
) {
3068 *alloc
= dev_priv
->wm
.skl_hw
.ddb
.pipe
[pipe
];
3072 nth_active_pipe
= hweight32(intel_state
->active_crtcs
&
3073 (drm_crtc_mask(for_crtc
) - 1));
3074 pipe_size
= ddb_size
/ hweight32(intel_state
->active_crtcs
);
3075 alloc
->start
= nth_active_pipe
* ddb_size
/ *num_active
;
3076 alloc
->end
= alloc
->start
+ pipe_size
;
3079 static unsigned int skl_cursor_allocation(int num_active
)
3081 if (num_active
== 1)
3087 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry
*entry
, u32 reg
)
3089 entry
->start
= reg
& 0x3ff;
3090 entry
->end
= (reg
>> 16) & 0x3ff;
3095 void skl_ddb_get_hw_state(struct drm_i915_private
*dev_priv
,
3096 struct skl_ddb_allocation
*ddb
/* out */)
3102 memset(ddb
, 0, sizeof(*ddb
));
3104 for_each_pipe(dev_priv
, pipe
) {
3105 enum intel_display_power_domain power_domain
;
3107 power_domain
= POWER_DOMAIN_PIPE(pipe
);
3108 if (!intel_display_power_get_if_enabled(dev_priv
, power_domain
))
3111 for_each_plane(dev_priv
, pipe
, plane
) {
3112 val
= I915_READ(PLANE_BUF_CFG(pipe
, plane
));
3113 skl_ddb_entry_init_from_hw(&ddb
->plane
[pipe
][plane
],
3117 val
= I915_READ(CUR_BUF_CFG(pipe
));
3118 skl_ddb_entry_init_from_hw(&ddb
->plane
[pipe
][PLANE_CURSOR
],
3121 intel_display_power_put(dev_priv
, power_domain
);
3126 * Determines the downscale amount of a plane for the purposes of watermark calculations.
3127 * The bspec defines downscale amount as:
3130 * Horizontal down scale amount = maximum[1, Horizontal source size /
3131 * Horizontal destination size]
3132 * Vertical down scale amount = maximum[1, Vertical source size /
3133 * Vertical destination size]
3134 * Total down scale amount = Horizontal down scale amount *
3135 * Vertical down scale amount
3138 * Return value is provided in 16.16 fixed point form to retain fractional part.
3139 * Caller should take care of dividing & rounding off the value.
3142 skl_plane_downscale_amount(const struct intel_plane_state
*pstate
)
3144 uint32_t downscale_h
, downscale_w
;
3145 uint32_t src_w
, src_h
, dst_w
, dst_h
;
3147 if (WARN_ON(!pstate
->visible
))
3148 return DRM_PLANE_HELPER_NO_SCALING
;
3150 /* n.b., src is 16.16 fixed point, dst is whole integer */
3151 src_w
= drm_rect_width(&pstate
->src
);
3152 src_h
= drm_rect_height(&pstate
->src
);
3153 dst_w
= drm_rect_width(&pstate
->dst
);
3154 dst_h
= drm_rect_height(&pstate
->dst
);
3155 if (intel_rotation_90_or_270(pstate
->base
.rotation
))
3158 downscale_h
= max(src_h
/ dst_h
, (uint32_t)DRM_PLANE_HELPER_NO_SCALING
);
3159 downscale_w
= max(src_w
/ dst_w
, (uint32_t)DRM_PLANE_HELPER_NO_SCALING
);
3161 /* Provide result in 16.16 fixed point */
3162 return (uint64_t)downscale_w
* downscale_h
>> 16;
3166 skl_plane_relative_data_rate(const struct intel_crtc_state
*cstate
,
3167 const struct drm_plane_state
*pstate
,
3170 struct intel_plane_state
*intel_pstate
= to_intel_plane_state(pstate
);
3171 struct drm_framebuffer
*fb
= pstate
->fb
;
3172 uint32_t down_scale_amount
, data_rate
;
3173 uint32_t width
= 0, height
= 0;
3174 unsigned format
= fb
? fb
->pixel_format
: DRM_FORMAT_XRGB8888
;
3176 if (!intel_pstate
->visible
)
3178 if (pstate
->plane
->type
== DRM_PLANE_TYPE_CURSOR
)
3180 if (y
&& format
!= DRM_FORMAT_NV12
)
3183 width
= drm_rect_width(&intel_pstate
->src
) >> 16;
3184 height
= drm_rect_height(&intel_pstate
->src
) >> 16;
3186 if (intel_rotation_90_or_270(pstate
->rotation
))
3187 swap(width
, height
);
3189 /* for planar format */
3190 if (format
== DRM_FORMAT_NV12
) {
3191 if (y
) /* y-plane data rate */
3192 data_rate
= width
* height
*
3193 drm_format_plane_cpp(format
, 0);
3194 else /* uv-plane data rate */
3195 data_rate
= (width
/ 2) * (height
/ 2) *
3196 drm_format_plane_cpp(format
, 1);
3198 /* for packed formats */
3199 data_rate
= width
* height
* drm_format_plane_cpp(format
, 0);
3202 down_scale_amount
= skl_plane_downscale_amount(intel_pstate
);
3204 return (uint64_t)data_rate
* down_scale_amount
>> 16;
3208 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
3209 * a 8192x4096@32bpp framebuffer:
3210 * 3 * 4096 * 8192 * 4 < 2^32
3213 skl_get_total_relative_data_rate(struct intel_crtc_state
*intel_cstate
)
3215 struct drm_crtc_state
*cstate
= &intel_cstate
->base
;
3216 struct drm_atomic_state
*state
= cstate
->state
;
3217 struct drm_crtc
*crtc
= cstate
->crtc
;
3218 struct drm_device
*dev
= crtc
->dev
;
3219 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3220 const struct drm_plane
*plane
;
3221 const struct intel_plane
*intel_plane
;
3222 struct drm_plane_state
*pstate
;
3223 unsigned int rate
, total_data_rate
= 0;
3227 if (WARN_ON(!state
))
3230 /* Calculate and cache data rate for each plane */
3231 for_each_plane_in_state(state
, plane
, pstate
, i
) {
3232 id
= skl_wm_plane_id(to_intel_plane(plane
));
3233 intel_plane
= to_intel_plane(plane
);
3235 if (intel_plane
->pipe
!= intel_crtc
->pipe
)
3239 rate
= skl_plane_relative_data_rate(intel_cstate
,
3241 intel_cstate
->wm
.skl
.plane_data_rate
[id
] = rate
;
3244 rate
= skl_plane_relative_data_rate(intel_cstate
,
3246 intel_cstate
->wm
.skl
.plane_y_data_rate
[id
] = rate
;
3249 /* Calculate CRTC's total data rate from cached values */
3250 for_each_intel_plane_on_crtc(dev
, intel_crtc
, intel_plane
) {
3251 int id
= skl_wm_plane_id(intel_plane
);
3254 total_data_rate
+= intel_cstate
->wm
.skl
.plane_data_rate
[id
];
3255 total_data_rate
+= intel_cstate
->wm
.skl
.plane_y_data_rate
[id
];
3258 return total_data_rate
;
3262 skl_ddb_min_alloc(const struct drm_plane_state
*pstate
,
3265 struct drm_framebuffer
*fb
= pstate
->fb
;
3266 struct intel_plane_state
*intel_pstate
= to_intel_plane_state(pstate
);
3267 uint32_t src_w
, src_h
;
3268 uint32_t min_scanlines
= 8;
3274 /* For packed formats, no y-plane, return 0 */
3275 if (y
&& fb
->pixel_format
!= DRM_FORMAT_NV12
)
3278 /* For Non Y-tile return 8-blocks */
3279 if (fb
->modifier
[0] != I915_FORMAT_MOD_Y_TILED
&&
3280 fb
->modifier
[0] != I915_FORMAT_MOD_Yf_TILED
)
3283 src_w
= drm_rect_width(&intel_pstate
->src
) >> 16;
3284 src_h
= drm_rect_height(&intel_pstate
->src
) >> 16;
3286 if (intel_rotation_90_or_270(pstate
->rotation
))
3289 /* Halve UV plane width and height for NV12 */
3290 if (fb
->pixel_format
== DRM_FORMAT_NV12
&& !y
) {
3295 if (fb
->pixel_format
== DRM_FORMAT_NV12
&& !y
)
3296 plane_bpp
= drm_format_plane_cpp(fb
->pixel_format
, 1);
3298 plane_bpp
= drm_format_plane_cpp(fb
->pixel_format
, 0);
3300 if (intel_rotation_90_or_270(pstate
->rotation
)) {
3301 switch (plane_bpp
) {
3315 WARN(1, "Unsupported pixel depth %u for rotation",
3321 return DIV_ROUND_UP((4 * src_w
* plane_bpp
), 512) * min_scanlines
/4 + 3;
3325 skl_allocate_pipe_ddb(struct intel_crtc_state
*cstate
,
3326 struct skl_ddb_allocation
*ddb
/* out */)
3328 struct drm_atomic_state
*state
= cstate
->base
.state
;
3329 struct drm_crtc
*crtc
= cstate
->base
.crtc
;
3330 struct drm_device
*dev
= crtc
->dev
;
3331 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3332 struct intel_plane
*intel_plane
;
3333 struct drm_plane
*plane
;
3334 struct drm_plane_state
*pstate
;
3335 enum pipe pipe
= intel_crtc
->pipe
;
3336 struct skl_ddb_entry
*alloc
= &ddb
->pipe
[pipe
];
3337 uint16_t alloc_size
, start
, cursor_blocks
;
3338 uint16_t *minimum
= cstate
->wm
.skl
.minimum_blocks
;
3339 uint16_t *y_minimum
= cstate
->wm
.skl
.minimum_y_blocks
;
3340 unsigned int total_data_rate
;
3344 if (WARN_ON(!state
))
3347 if (!cstate
->base
.active
) {
3348 ddb
->pipe
[pipe
].start
= ddb
->pipe
[pipe
].end
= 0;
3349 memset(ddb
->plane
[pipe
], 0, sizeof(ddb
->plane
[pipe
]));
3350 memset(ddb
->y_plane
[pipe
], 0, sizeof(ddb
->y_plane
[pipe
]));
3354 skl_ddb_get_pipe_allocation_limits(dev
, cstate
, alloc
, &num_active
);
3355 alloc_size
= skl_ddb_entry_size(alloc
);
3356 if (alloc_size
== 0) {
3357 memset(ddb
->plane
[pipe
], 0, sizeof(ddb
->plane
[pipe
]));
3361 cursor_blocks
= skl_cursor_allocation(num_active
);
3362 ddb
->plane
[pipe
][PLANE_CURSOR
].start
= alloc
->end
- cursor_blocks
;
3363 ddb
->plane
[pipe
][PLANE_CURSOR
].end
= alloc
->end
;
3365 alloc_size
-= cursor_blocks
;
3367 /* 1. Allocate the mininum required blocks for each active plane */
3368 for_each_plane_in_state(state
, plane
, pstate
, i
) {
3369 intel_plane
= to_intel_plane(plane
);
3370 id
= skl_wm_plane_id(intel_plane
);
3372 if (intel_plane
->pipe
!= pipe
)
3375 if (!to_intel_plane_state(pstate
)->visible
) {
3380 if (plane
->type
== DRM_PLANE_TYPE_CURSOR
) {
3386 minimum
[id
] = skl_ddb_min_alloc(pstate
, 0);
3387 y_minimum
[id
] = skl_ddb_min_alloc(pstate
, 1);
3390 for (i
= 0; i
< PLANE_CURSOR
; i
++) {
3391 alloc_size
-= minimum
[i
];
3392 alloc_size
-= y_minimum
[i
];
3396 * 2. Distribute the remaining space in proportion to the amount of
3397 * data each plane needs to fetch from memory.
3399 * FIXME: we may not allocate every single block here.
3401 total_data_rate
= skl_get_total_relative_data_rate(cstate
);
3402 if (total_data_rate
== 0)
3405 start
= alloc
->start
;
3406 for_each_intel_plane_on_crtc(dev
, intel_crtc
, intel_plane
) {
3407 unsigned int data_rate
, y_data_rate
;
3408 uint16_t plane_blocks
, y_plane_blocks
= 0;
3409 int id
= skl_wm_plane_id(intel_plane
);
3411 data_rate
= cstate
->wm
.skl
.plane_data_rate
[id
];
3414 * allocation for (packed formats) or (uv-plane part of planar format):
3415 * promote the expression to 64 bits to avoid overflowing, the
3416 * result is < available as data_rate / total_data_rate < 1
3418 plane_blocks
= minimum
[id
];
3419 plane_blocks
+= div_u64((uint64_t)alloc_size
* data_rate
,
3422 /* Leave disabled planes at (0,0) */
3424 ddb
->plane
[pipe
][id
].start
= start
;
3425 ddb
->plane
[pipe
][id
].end
= start
+ plane_blocks
;
3428 start
+= plane_blocks
;
3431 * allocation for y_plane part of planar format:
3433 y_data_rate
= cstate
->wm
.skl
.plane_y_data_rate
[id
];
3435 y_plane_blocks
= y_minimum
[id
];
3436 y_plane_blocks
+= div_u64((uint64_t)alloc_size
* y_data_rate
,
3440 ddb
->y_plane
[pipe
][id
].start
= start
;
3441 ddb
->y_plane
[pipe
][id
].end
= start
+ y_plane_blocks
;
3444 start
+= y_plane_blocks
;
3450 static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state
*config
)
3452 /* TODO: Take into account the scalers once we support them */
3453 return config
->base
.adjusted_mode
.crtc_clock
;
3457 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3458 * for the read latency) and cpp should always be <= 8, so that
3459 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3460 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3462 static uint32_t skl_wm_method1(uint32_t pixel_rate
, uint8_t cpp
, uint32_t latency
)
3464 uint32_t wm_intermediate_val
, ret
;
3469 wm_intermediate_val
= latency
* pixel_rate
* cpp
/ 512;
3470 ret
= DIV_ROUND_UP(wm_intermediate_val
, 1000);
3475 static uint32_t skl_wm_method2(uint32_t pixel_rate
, uint32_t pipe_htotal
,
3476 uint32_t horiz_pixels
, uint8_t cpp
,
3477 uint64_t tiling
, uint32_t latency
)
3480 uint32_t plane_bytes_per_line
, plane_blocks_per_line
;
3481 uint32_t wm_intermediate_val
;
3486 plane_bytes_per_line
= horiz_pixels
* cpp
;
3488 if (tiling
== I915_FORMAT_MOD_Y_TILED
||
3489 tiling
== I915_FORMAT_MOD_Yf_TILED
) {
3490 plane_bytes_per_line
*= 4;
3491 plane_blocks_per_line
= DIV_ROUND_UP(plane_bytes_per_line
, 512);
3492 plane_blocks_per_line
/= 4;
3493 } else if (tiling
== DRM_FORMAT_MOD_NONE
) {
3494 plane_blocks_per_line
= DIV_ROUND_UP(plane_bytes_per_line
, 512) + 1;
3496 plane_blocks_per_line
= DIV_ROUND_UP(plane_bytes_per_line
, 512);
3499 wm_intermediate_val
= latency
* pixel_rate
;
3500 ret
= DIV_ROUND_UP(wm_intermediate_val
, pipe_htotal
* 1000) *
3501 plane_blocks_per_line
;
3506 static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state
*cstate
,
3507 struct intel_plane_state
*pstate
)
3509 uint64_t adjusted_pixel_rate
;
3510 uint64_t downscale_amount
;
3511 uint64_t pixel_rate
;
3513 /* Shouldn't reach here on disabled planes... */
3514 if (WARN_ON(!pstate
->visible
))
3518 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
3519 * with additional adjustments for plane-specific scaling.
3521 adjusted_pixel_rate
= skl_pipe_pixel_rate(cstate
);
3522 downscale_amount
= skl_plane_downscale_amount(pstate
);
3524 pixel_rate
= adjusted_pixel_rate
* downscale_amount
>> 16;
3525 WARN_ON(pixel_rate
!= clamp_t(uint32_t, pixel_rate
, 0, ~0));
3530 static int skl_compute_plane_wm(const struct drm_i915_private
*dev_priv
,
3531 struct intel_crtc_state
*cstate
,
3532 struct intel_plane_state
*intel_pstate
,
3533 uint16_t ddb_allocation
,
3535 uint16_t *out_blocks
, /* out */
3536 uint8_t *out_lines
, /* out */
3537 bool *enabled
/* out */)
3539 struct drm_plane_state
*pstate
= &intel_pstate
->base
;
3540 struct drm_framebuffer
*fb
= pstate
->fb
;
3541 uint32_t latency
= dev_priv
->wm
.skl_latency
[level
];
3542 uint32_t method1
, method2
;
3543 uint32_t plane_bytes_per_line
, plane_blocks_per_line
;
3544 uint32_t res_blocks
, res_lines
;
3545 uint32_t selected_result
;
3547 uint32_t width
= 0, height
= 0;
3548 uint32_t plane_pixel_rate
;
3550 if (latency
== 0 || !cstate
->base
.active
|| !intel_pstate
->visible
) {
3555 width
= drm_rect_width(&intel_pstate
->src
) >> 16;
3556 height
= drm_rect_height(&intel_pstate
->src
) >> 16;
3558 if (intel_rotation_90_or_270(pstate
->rotation
))
3559 swap(width
, height
);
3561 cpp
= drm_format_plane_cpp(fb
->pixel_format
, 0);
3562 plane_pixel_rate
= skl_adjusted_plane_pixel_rate(cstate
, intel_pstate
);
3564 method1
= skl_wm_method1(plane_pixel_rate
, cpp
, latency
);
3565 method2
= skl_wm_method2(plane_pixel_rate
,
3566 cstate
->base
.adjusted_mode
.crtc_htotal
,
3572 plane_bytes_per_line
= width
* cpp
;
3573 plane_blocks_per_line
= DIV_ROUND_UP(plane_bytes_per_line
, 512);
3575 if (fb
->modifier
[0] == I915_FORMAT_MOD_Y_TILED
||
3576 fb
->modifier
[0] == I915_FORMAT_MOD_Yf_TILED
) {
3577 uint32_t min_scanlines
= 4;
3578 uint32_t y_tile_minimum
;
3579 if (intel_rotation_90_or_270(pstate
->rotation
)) {
3580 int cpp
= (fb
->pixel_format
== DRM_FORMAT_NV12
) ?
3581 drm_format_plane_cpp(fb
->pixel_format
, 1) :
3582 drm_format_plane_cpp(fb
->pixel_format
, 0);
3592 WARN(1, "Unsupported pixel depth for rotation");
3595 y_tile_minimum
= plane_blocks_per_line
* min_scanlines
;
3596 selected_result
= max(method2
, y_tile_minimum
);
3598 if ((ddb_allocation
/ plane_blocks_per_line
) >= 1)
3599 selected_result
= min(method1
, method2
);
3601 selected_result
= method1
;
3604 res_blocks
= selected_result
+ 1;
3605 res_lines
= DIV_ROUND_UP(selected_result
, plane_blocks_per_line
);
3607 if (level
>= 1 && level
<= 7) {
3608 if (fb
->modifier
[0] == I915_FORMAT_MOD_Y_TILED
||
3609 fb
->modifier
[0] == I915_FORMAT_MOD_Yf_TILED
)
3615 if (res_blocks
>= ddb_allocation
|| res_lines
> 31) {
3619 * If there are no valid level 0 watermarks, then we can't
3620 * support this display configuration.
3625 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
3626 DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n",
3627 to_intel_crtc(cstate
->base
.crtc
)->pipe
,
3628 skl_wm_plane_id(to_intel_plane(pstate
->plane
)),
3629 res_blocks
, ddb_allocation
, res_lines
);
3635 *out_blocks
= res_blocks
;
3636 *out_lines
= res_lines
;
3643 skl_compute_wm_level(const struct drm_i915_private
*dev_priv
,
3644 struct skl_ddb_allocation
*ddb
,
3645 struct intel_crtc_state
*cstate
,
3647 struct skl_wm_level
*result
)
3649 struct drm_atomic_state
*state
= cstate
->base
.state
;
3650 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
3651 struct drm_plane
*plane
;
3652 struct intel_plane
*intel_plane
;
3653 struct intel_plane_state
*intel_pstate
;
3654 uint16_t ddb_blocks
;
3655 enum pipe pipe
= intel_crtc
->pipe
;
3659 * We'll only calculate watermarks for planes that are actually
3660 * enabled, so make sure all other planes are set as disabled.
3662 memset(result
, 0, sizeof(*result
));
3664 for_each_intel_plane_mask(&dev_priv
->drm
,
3666 cstate
->base
.plane_mask
) {
3667 int i
= skl_wm_plane_id(intel_plane
);
3669 plane
= &intel_plane
->base
;
3670 intel_pstate
= NULL
;
3673 intel_atomic_get_existing_plane_state(state
,
3677 * Note: If we start supporting multiple pending atomic commits
3678 * against the same planes/CRTC's in the future, plane->state
3679 * will no longer be the correct pre-state to use for the
3680 * calculations here and we'll need to change where we get the
3681 * 'unchanged' plane data from.
3683 * For now this is fine because we only allow one queued commit
3684 * against a CRTC. Even if the plane isn't modified by this
3685 * transaction and we don't have a plane lock, we still have
3686 * the CRTC's lock, so we know that no other transactions are
3687 * racing with us to update it.
3690 intel_pstate
= to_intel_plane_state(plane
->state
);
3692 WARN_ON(!intel_pstate
->base
.fb
);
3694 ddb_blocks
= skl_ddb_entry_size(&ddb
->plane
[pipe
][i
]);
3696 ret
= skl_compute_plane_wm(dev_priv
,
3701 &result
->plane_res_b
[i
],
3702 &result
->plane_res_l
[i
],
3703 &result
->plane_en
[i
]);
3712 skl_compute_linetime_wm(struct intel_crtc_state
*cstate
)
3714 if (!cstate
->base
.active
)
3717 if (WARN_ON(skl_pipe_pixel_rate(cstate
) == 0))
3720 return DIV_ROUND_UP(8 * cstate
->base
.adjusted_mode
.crtc_htotal
* 1000,
3721 skl_pipe_pixel_rate(cstate
));
3724 static void skl_compute_transition_wm(struct intel_crtc_state
*cstate
,
3725 struct skl_wm_level
*trans_wm
/* out */)
3727 struct drm_crtc
*crtc
= cstate
->base
.crtc
;
3728 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
3729 struct intel_plane
*intel_plane
;
3731 if (!cstate
->base
.active
)
3734 /* Until we know more, just disable transition WMs */
3735 for_each_intel_plane_on_crtc(crtc
->dev
, intel_crtc
, intel_plane
) {
3736 int i
= skl_wm_plane_id(intel_plane
);
3738 trans_wm
->plane_en
[i
] = false;
3742 static int skl_build_pipe_wm(struct intel_crtc_state
*cstate
,
3743 struct skl_ddb_allocation
*ddb
,
3744 struct skl_pipe_wm
*pipe_wm
)
3746 struct drm_device
*dev
= cstate
->base
.crtc
->dev
;
3747 const struct drm_i915_private
*dev_priv
= to_i915(dev
);
3748 int level
, max_level
= ilk_wm_max_level(dev
);
3751 for (level
= 0; level
<= max_level
; level
++) {
3752 ret
= skl_compute_wm_level(dev_priv
, ddb
, cstate
,
3753 level
, &pipe_wm
->wm
[level
]);
3757 pipe_wm
->linetime
= skl_compute_linetime_wm(cstate
);
3759 skl_compute_transition_wm(cstate
, &pipe_wm
->trans_wm
);
3764 static void skl_compute_wm_results(struct drm_device
*dev
,
3765 struct skl_pipe_wm
*p_wm
,
3766 struct skl_wm_values
*r
,
3767 struct intel_crtc
*intel_crtc
)
3769 int level
, max_level
= ilk_wm_max_level(dev
);
3770 enum pipe pipe
= intel_crtc
->pipe
;
3774 for (level
= 0; level
<= max_level
; level
++) {
3775 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++) {
3778 temp
|= p_wm
->wm
[level
].plane_res_l
[i
] <<
3779 PLANE_WM_LINES_SHIFT
;
3780 temp
|= p_wm
->wm
[level
].plane_res_b
[i
];
3781 if (p_wm
->wm
[level
].plane_en
[i
])
3782 temp
|= PLANE_WM_EN
;
3784 r
->plane
[pipe
][i
][level
] = temp
;
3789 temp
|= p_wm
->wm
[level
].plane_res_l
[PLANE_CURSOR
] << PLANE_WM_LINES_SHIFT
;
3790 temp
|= p_wm
->wm
[level
].plane_res_b
[PLANE_CURSOR
];
3792 if (p_wm
->wm
[level
].plane_en
[PLANE_CURSOR
])
3793 temp
|= PLANE_WM_EN
;
3795 r
->plane
[pipe
][PLANE_CURSOR
][level
] = temp
;
3799 /* transition WMs */
3800 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++) {
3802 temp
|= p_wm
->trans_wm
.plane_res_l
[i
] << PLANE_WM_LINES_SHIFT
;
3803 temp
|= p_wm
->trans_wm
.plane_res_b
[i
];
3804 if (p_wm
->trans_wm
.plane_en
[i
])
3805 temp
|= PLANE_WM_EN
;
3807 r
->plane_trans
[pipe
][i
] = temp
;
3811 temp
|= p_wm
->trans_wm
.plane_res_l
[PLANE_CURSOR
] << PLANE_WM_LINES_SHIFT
;
3812 temp
|= p_wm
->trans_wm
.plane_res_b
[PLANE_CURSOR
];
3813 if (p_wm
->trans_wm
.plane_en
[PLANE_CURSOR
])
3814 temp
|= PLANE_WM_EN
;
3816 r
->plane_trans
[pipe
][PLANE_CURSOR
] = temp
;
3818 r
->wm_linetime
[pipe
] = p_wm
->linetime
;
3821 static void skl_ddb_entry_write(struct drm_i915_private
*dev_priv
,
3823 const struct skl_ddb_entry
*entry
)
3826 I915_WRITE(reg
, (entry
->end
- 1) << 16 | entry
->start
);
3831 static void skl_write_wm_values(struct drm_i915_private
*dev_priv
,
3832 const struct skl_wm_values
*new)
3834 struct drm_device
*dev
= &dev_priv
->drm
;
3835 struct intel_crtc
*crtc
;
3837 for_each_intel_crtc(dev
, crtc
) {
3838 int i
, level
, max_level
= ilk_wm_max_level(dev
);
3839 enum pipe pipe
= crtc
->pipe
;
3841 if ((new->dirty_pipes
& drm_crtc_mask(&crtc
->base
)) == 0)
3846 I915_WRITE(PIPE_WM_LINETIME(pipe
), new->wm_linetime
[pipe
]);
3848 for (level
= 0; level
<= max_level
; level
++) {
3849 for (i
= 0; i
< intel_num_planes(crtc
); i
++)
3850 I915_WRITE(PLANE_WM(pipe
, i
, level
),
3851 new->plane
[pipe
][i
][level
]);
3852 I915_WRITE(CUR_WM(pipe
, level
),
3853 new->plane
[pipe
][PLANE_CURSOR
][level
]);
3855 for (i
= 0; i
< intel_num_planes(crtc
); i
++)
3856 I915_WRITE(PLANE_WM_TRANS(pipe
, i
),
3857 new->plane_trans
[pipe
][i
]);
3858 I915_WRITE(CUR_WM_TRANS(pipe
),
3859 new->plane_trans
[pipe
][PLANE_CURSOR
]);
3861 for (i
= 0; i
< intel_num_planes(crtc
); i
++) {
3862 skl_ddb_entry_write(dev_priv
,
3863 PLANE_BUF_CFG(pipe
, i
),
3864 &new->ddb
.plane
[pipe
][i
]);
3865 skl_ddb_entry_write(dev_priv
,
3866 PLANE_NV12_BUF_CFG(pipe
, i
),
3867 &new->ddb
.y_plane
[pipe
][i
]);
3870 skl_ddb_entry_write(dev_priv
, CUR_BUF_CFG(pipe
),
3871 &new->ddb
.plane
[pipe
][PLANE_CURSOR
]);
3876 * When setting up a new DDB allocation arrangement, we need to correctly
3877 * sequence the times at which the new allocations for the pipes are taken into
3878 * account or we'll have pipes fetching from space previously allocated to
3881 * Roughly the sequence looks like:
3882 * 1. re-allocate the pipe(s) with the allocation being reduced and not
3883 * overlapping with a previous light-up pipe (another way to put it is:
3884 * pipes with their new allocation strickly included into their old ones).
3885 * 2. re-allocate the other pipes that get their allocation reduced
3886 * 3. allocate the pipes having their allocation increased
3888 * Steps 1. and 2. are here to take care of the following case:
3889 * - Initially DDB looks like this:
3892 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
3896 * We need to sequence the re-allocation: C, B, A (and not B, C, A).
3900 skl_wm_flush_pipe(struct drm_i915_private
*dev_priv
, enum pipe pipe
, int pass
)
3904 DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe
), pass
);
3906 for_each_plane(dev_priv
, pipe
, plane
) {
3907 I915_WRITE(PLANE_SURF(pipe
, plane
),
3908 I915_READ(PLANE_SURF(pipe
, plane
)));
3910 I915_WRITE(CURBASE(pipe
), I915_READ(CURBASE(pipe
)));
3914 skl_ddb_allocation_included(const struct skl_ddb_allocation
*old
,
3915 const struct skl_ddb_allocation
*new,
3918 uint16_t old_size
, new_size
;
3920 old_size
= skl_ddb_entry_size(&old
->pipe
[pipe
]);
3921 new_size
= skl_ddb_entry_size(&new->pipe
[pipe
]);
3923 return old_size
!= new_size
&&
3924 new->pipe
[pipe
].start
>= old
->pipe
[pipe
].start
&&
3925 new->pipe
[pipe
].end
<= old
->pipe
[pipe
].end
;
3928 static void skl_flush_wm_values(struct drm_i915_private
*dev_priv
,
3929 struct skl_wm_values
*new_values
)
3931 struct drm_device
*dev
= &dev_priv
->drm
;
3932 struct skl_ddb_allocation
*cur_ddb
, *new_ddb
;
3933 bool reallocated
[I915_MAX_PIPES
] = {};
3934 struct intel_crtc
*crtc
;
3937 new_ddb
= &new_values
->ddb
;
3938 cur_ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
3941 * First pass: flush the pipes with the new allocation contained into
3944 * We'll wait for the vblank on those pipes to ensure we can safely
3945 * re-allocate the freed space without this pipe fetching from it.
3947 for_each_intel_crtc(dev
, crtc
) {
3953 if (!skl_ddb_allocation_included(cur_ddb
, new_ddb
, pipe
))
3956 skl_wm_flush_pipe(dev_priv
, pipe
, 1);
3957 intel_wait_for_vblank(dev
, pipe
);
3959 reallocated
[pipe
] = true;
3964 * Second pass: flush the pipes that are having their allocation
3965 * reduced, but overlapping with a previous allocation.
3967 * Here as well we need to wait for the vblank to make sure the freed
3968 * space is not used anymore.
3970 for_each_intel_crtc(dev
, crtc
) {
3976 if (reallocated
[pipe
])
3979 if (skl_ddb_entry_size(&new_ddb
->pipe
[pipe
]) <
3980 skl_ddb_entry_size(&cur_ddb
->pipe
[pipe
])) {
3981 skl_wm_flush_pipe(dev_priv
, pipe
, 2);
3982 intel_wait_for_vblank(dev
, pipe
);
3983 reallocated
[pipe
] = true;
3988 * Third pass: flush the pipes that got more space allocated.
3990 * We don't need to actively wait for the update here, next vblank
3991 * will just get more DDB space with the correct WM values.
3993 for_each_intel_crtc(dev
, crtc
) {
4000 * At this point, only the pipes more space than before are
4001 * left to re-allocate.
4003 if (reallocated
[pipe
])
4006 skl_wm_flush_pipe(dev_priv
, pipe
, 3);
4010 static int skl_update_pipe_wm(struct drm_crtc_state
*cstate
,
4011 struct skl_ddb_allocation
*ddb
, /* out */
4012 struct skl_pipe_wm
*pipe_wm
, /* out */
4013 bool *changed
/* out */)
4015 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->crtc
);
4016 struct intel_crtc_state
*intel_cstate
= to_intel_crtc_state(cstate
);
4019 ret
= skl_build_pipe_wm(intel_cstate
, ddb
, pipe_wm
);
4023 if (!memcmp(&intel_crtc
->wm
.active
.skl
, pipe_wm
, sizeof(*pipe_wm
)))
4032 pipes_modified(struct drm_atomic_state
*state
)
4034 struct drm_crtc
*crtc
;
4035 struct drm_crtc_state
*cstate
;
4036 uint32_t i
, ret
= 0;
4038 for_each_crtc_in_state(state
, crtc
, cstate
, i
)
4039 ret
|= drm_crtc_mask(crtc
);
4045 skl_compute_ddb(struct drm_atomic_state
*state
)
4047 struct drm_device
*dev
= state
->dev
;
4048 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4049 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
4050 struct intel_crtc
*intel_crtc
;
4051 struct skl_ddb_allocation
*ddb
= &intel_state
->wm_results
.ddb
;
4052 uint32_t realloc_pipes
= pipes_modified(state
);
4056 * If this is our first atomic update following hardware readout,
4057 * we can't trust the DDB that the BIOS programmed for us. Let's
4058 * pretend that all pipes switched active status so that we'll
4059 * ensure a full DDB recompute.
4061 if (dev_priv
->wm
.distrust_bios_wm
) {
4062 ret
= drm_modeset_lock(&dev
->mode_config
.connection_mutex
,
4063 state
->acquire_ctx
);
4067 intel_state
->active_pipe_changes
= ~0;
4070 * We usually only initialize intel_state->active_crtcs if we
4071 * we're doing a modeset; make sure this field is always
4072 * initialized during the sanitization process that happens
4073 * on the first commit too.
4075 if (!intel_state
->modeset
)
4076 intel_state
->active_crtcs
= dev_priv
->active_crtcs
;
4080 * If the modeset changes which CRTC's are active, we need to
4081 * recompute the DDB allocation for *all* active pipes, even
4082 * those that weren't otherwise being modified in any way by this
4083 * atomic commit. Due to the shrinking of the per-pipe allocations
4084 * when new active CRTC's are added, it's possible for a pipe that
4085 * we were already using and aren't changing at all here to suddenly
4086 * become invalid if its DDB needs exceeds its new allocation.
4088 * Note that if we wind up doing a full DDB recompute, we can't let
4089 * any other display updates race with this transaction, so we need
4090 * to grab the lock on *all* CRTC's.
4092 if (intel_state
->active_pipe_changes
) {
4094 intel_state
->wm_results
.dirty_pipes
= ~0;
4097 for_each_intel_crtc_mask(dev
, intel_crtc
, realloc_pipes
) {
4098 struct intel_crtc_state
*cstate
;
4100 cstate
= intel_atomic_get_crtc_state(state
, intel_crtc
);
4102 return PTR_ERR(cstate
);
4104 ret
= skl_allocate_pipe_ddb(cstate
, ddb
);
4108 ret
= drm_atomic_add_affected_planes(state
, &intel_crtc
->base
);
4117 skl_copy_wm_for_pipe(struct skl_wm_values
*dst
,
4118 struct skl_wm_values
*src
,
4121 dst
->wm_linetime
[pipe
] = src
->wm_linetime
[pipe
];
4122 memcpy(dst
->plane
[pipe
], src
->plane
[pipe
],
4123 sizeof(dst
->plane
[pipe
]));
4124 memcpy(dst
->plane_trans
[pipe
], src
->plane_trans
[pipe
],
4125 sizeof(dst
->plane_trans
[pipe
]));
4127 dst
->ddb
.pipe
[pipe
] = src
->ddb
.pipe
[pipe
];
4128 memcpy(dst
->ddb
.y_plane
[pipe
], src
->ddb
.y_plane
[pipe
],
4129 sizeof(dst
->ddb
.y_plane
[pipe
]));
4130 memcpy(dst
->ddb
.plane
[pipe
], src
->ddb
.plane
[pipe
],
4131 sizeof(dst
->ddb
.plane
[pipe
]));
4135 skl_compute_wm(struct drm_atomic_state
*state
)
4137 struct drm_crtc
*crtc
;
4138 struct drm_crtc_state
*cstate
;
4139 struct intel_atomic_state
*intel_state
= to_intel_atomic_state(state
);
4140 struct skl_wm_values
*results
= &intel_state
->wm_results
;
4141 struct skl_pipe_wm
*pipe_wm
;
4142 bool changed
= false;
4146 * If this transaction isn't actually touching any CRTC's, don't
4147 * bother with watermark calculation. Note that if we pass this
4148 * test, we're guaranteed to hold at least one CRTC state mutex,
4149 * which means we can safely use values like dev_priv->active_crtcs
4150 * since any racing commits that want to update them would need to
4151 * hold _all_ CRTC state mutexes.
4153 for_each_crtc_in_state(state
, crtc
, cstate
, i
)
4158 /* Clear all dirty flags */
4159 results
->dirty_pipes
= 0;
4161 ret
= skl_compute_ddb(state
);
4166 * Calculate WM's for all pipes that are part of this transaction.
4167 * Note that the DDB allocation above may have added more CRTC's that
4168 * weren't otherwise being modified (and set bits in dirty_pipes) if
4169 * pipe allocations had to change.
4171 * FIXME: Now that we're doing this in the atomic check phase, we
4172 * should allow skl_update_pipe_wm() to return failure in cases where
4173 * no suitable watermark values can be found.
4175 for_each_crtc_in_state(state
, crtc
, cstate
, i
) {
4176 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4177 struct intel_crtc_state
*intel_cstate
=
4178 to_intel_crtc_state(cstate
);
4180 pipe_wm
= &intel_cstate
->wm
.skl
.optimal
;
4181 ret
= skl_update_pipe_wm(cstate
, &results
->ddb
, pipe_wm
,
4187 results
->dirty_pipes
|= drm_crtc_mask(crtc
);
4189 if ((results
->dirty_pipes
& drm_crtc_mask(crtc
)) == 0)
4190 /* This pipe's WM's did not change */
4193 intel_cstate
->update_wm_pre
= true;
4194 skl_compute_wm_results(crtc
->dev
, pipe_wm
, results
, intel_crtc
);
4200 static void skl_update_wm(struct drm_crtc
*crtc
)
4202 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4203 struct drm_device
*dev
= crtc
->dev
;
4204 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4205 struct skl_wm_values
*results
= &dev_priv
->wm
.skl_results
;
4206 struct skl_wm_values
*hw_vals
= &dev_priv
->wm
.skl_hw
;
4207 struct intel_crtc_state
*cstate
= to_intel_crtc_state(crtc
->state
);
4208 struct skl_pipe_wm
*pipe_wm
= &cstate
->wm
.skl
.optimal
;
4211 if ((results
->dirty_pipes
& drm_crtc_mask(crtc
)) == 0)
4214 intel_crtc
->wm
.active
.skl
= *pipe_wm
;
4216 mutex_lock(&dev_priv
->wm
.wm_mutex
);
4218 skl_write_wm_values(dev_priv
, results
);
4219 skl_flush_wm_values(dev_priv
, results
);
4222 * Store the new configuration (but only for the pipes that have
4223 * changed; the other values weren't recomputed).
4225 for_each_pipe_masked(dev_priv
, pipe
, results
->dirty_pipes
)
4226 skl_copy_wm_for_pipe(hw_vals
, results
, pipe
);
4228 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
4231 static void ilk_compute_wm_config(struct drm_device
*dev
,
4232 struct intel_wm_config
*config
)
4234 struct intel_crtc
*crtc
;
4236 /* Compute the currently _active_ config */
4237 for_each_intel_crtc(dev
, crtc
) {
4238 const struct intel_pipe_wm
*wm
= &crtc
->wm
.active
.ilk
;
4240 if (!wm
->pipe_enabled
)
4243 config
->sprites_enabled
|= wm
->sprites_enabled
;
4244 config
->sprites_scaled
|= wm
->sprites_scaled
;
4245 config
->num_pipes_active
++;
4249 static void ilk_program_watermarks(struct drm_i915_private
*dev_priv
)
4251 struct drm_device
*dev
= &dev_priv
->drm
;
4252 struct intel_pipe_wm lp_wm_1_2
= {}, lp_wm_5_6
= {}, *best_lp_wm
;
4253 struct ilk_wm_maximums max
;
4254 struct intel_wm_config config
= {};
4255 struct ilk_wm_values results
= {};
4256 enum intel_ddb_partitioning partitioning
;
4258 ilk_compute_wm_config(dev
, &config
);
4260 ilk_compute_wm_maximums(dev
, 1, &config
, INTEL_DDB_PART_1_2
, &max
);
4261 ilk_wm_merge(dev
, &config
, &max
, &lp_wm_1_2
);
4263 /* 5/6 split only in single pipe config on IVB+ */
4264 if (INTEL_INFO(dev
)->gen
>= 7 &&
4265 config
.num_pipes_active
== 1 && config
.sprites_enabled
) {
4266 ilk_compute_wm_maximums(dev
, 1, &config
, INTEL_DDB_PART_5_6
, &max
);
4267 ilk_wm_merge(dev
, &config
, &max
, &lp_wm_5_6
);
4269 best_lp_wm
= ilk_find_best_result(dev
, &lp_wm_1_2
, &lp_wm_5_6
);
4271 best_lp_wm
= &lp_wm_1_2
;
4274 partitioning
= (best_lp_wm
== &lp_wm_1_2
) ?
4275 INTEL_DDB_PART_1_2
: INTEL_DDB_PART_5_6
;
4277 ilk_compute_wm_results(dev
, best_lp_wm
, partitioning
, &results
);
4279 ilk_write_wm_values(dev_priv
, &results
);
4282 static void ilk_initial_watermarks(struct intel_crtc_state
*cstate
)
4284 struct drm_i915_private
*dev_priv
= to_i915(cstate
->base
.crtc
->dev
);
4285 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
4287 mutex_lock(&dev_priv
->wm
.wm_mutex
);
4288 intel_crtc
->wm
.active
.ilk
= cstate
->wm
.ilk
.intermediate
;
4289 ilk_program_watermarks(dev_priv
);
4290 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
4293 static void ilk_optimize_watermarks(struct intel_crtc_state
*cstate
)
4295 struct drm_i915_private
*dev_priv
= to_i915(cstate
->base
.crtc
->dev
);
4296 struct intel_crtc
*intel_crtc
= to_intel_crtc(cstate
->base
.crtc
);
4298 mutex_lock(&dev_priv
->wm
.wm_mutex
);
4299 if (cstate
->wm
.need_postvbl_update
) {
4300 intel_crtc
->wm
.active
.ilk
= cstate
->wm
.ilk
.optimal
;
4301 ilk_program_watermarks(dev_priv
);
4303 mutex_unlock(&dev_priv
->wm
.wm_mutex
);
4306 static void skl_pipe_wm_active_state(uint32_t val
,
4307 struct skl_pipe_wm
*active
,
4313 bool is_enabled
= (val
& PLANE_WM_EN
) != 0;
4317 active
->wm
[level
].plane_en
[i
] = is_enabled
;
4318 active
->wm
[level
].plane_res_b
[i
] =
4319 val
& PLANE_WM_BLOCKS_MASK
;
4320 active
->wm
[level
].plane_res_l
[i
] =
4321 (val
>> PLANE_WM_LINES_SHIFT
) &
4322 PLANE_WM_LINES_MASK
;
4324 active
->wm
[level
].plane_en
[PLANE_CURSOR
] = is_enabled
;
4325 active
->wm
[level
].plane_res_b
[PLANE_CURSOR
] =
4326 val
& PLANE_WM_BLOCKS_MASK
;
4327 active
->wm
[level
].plane_res_l
[PLANE_CURSOR
] =
4328 (val
>> PLANE_WM_LINES_SHIFT
) &
4329 PLANE_WM_LINES_MASK
;
4333 active
->trans_wm
.plane_en
[i
] = is_enabled
;
4334 active
->trans_wm
.plane_res_b
[i
] =
4335 val
& PLANE_WM_BLOCKS_MASK
;
4336 active
->trans_wm
.plane_res_l
[i
] =
4337 (val
>> PLANE_WM_LINES_SHIFT
) &
4338 PLANE_WM_LINES_MASK
;
4340 active
->trans_wm
.plane_en
[PLANE_CURSOR
] = is_enabled
;
4341 active
->trans_wm
.plane_res_b
[PLANE_CURSOR
] =
4342 val
& PLANE_WM_BLOCKS_MASK
;
4343 active
->trans_wm
.plane_res_l
[PLANE_CURSOR
] =
4344 (val
>> PLANE_WM_LINES_SHIFT
) &
4345 PLANE_WM_LINES_MASK
;
4350 static void skl_pipe_wm_get_hw_state(struct drm_crtc
*crtc
)
4352 struct drm_device
*dev
= crtc
->dev
;
4353 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4354 struct skl_wm_values
*hw
= &dev_priv
->wm
.skl_hw
;
4355 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4356 struct intel_crtc_state
*cstate
= to_intel_crtc_state(crtc
->state
);
4357 struct skl_pipe_wm
*active
= &cstate
->wm
.skl
.optimal
;
4358 enum pipe pipe
= intel_crtc
->pipe
;
4359 int level
, i
, max_level
;
4362 max_level
= ilk_wm_max_level(dev
);
4364 hw
->wm_linetime
[pipe
] = I915_READ(PIPE_WM_LINETIME(pipe
));
4366 for (level
= 0; level
<= max_level
; level
++) {
4367 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++)
4368 hw
->plane
[pipe
][i
][level
] =
4369 I915_READ(PLANE_WM(pipe
, i
, level
));
4370 hw
->plane
[pipe
][PLANE_CURSOR
][level
] = I915_READ(CUR_WM(pipe
, level
));
4373 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++)
4374 hw
->plane_trans
[pipe
][i
] = I915_READ(PLANE_WM_TRANS(pipe
, i
));
4375 hw
->plane_trans
[pipe
][PLANE_CURSOR
] = I915_READ(CUR_WM_TRANS(pipe
));
4377 if (!intel_crtc
->active
)
4380 hw
->dirty_pipes
|= drm_crtc_mask(crtc
);
4382 active
->linetime
= hw
->wm_linetime
[pipe
];
4384 for (level
= 0; level
<= max_level
; level
++) {
4385 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++) {
4386 temp
= hw
->plane
[pipe
][i
][level
];
4387 skl_pipe_wm_active_state(temp
, active
, false,
4390 temp
= hw
->plane
[pipe
][PLANE_CURSOR
][level
];
4391 skl_pipe_wm_active_state(temp
, active
, false, true, i
, level
);
4394 for (i
= 0; i
< intel_num_planes(intel_crtc
); i
++) {
4395 temp
= hw
->plane_trans
[pipe
][i
];
4396 skl_pipe_wm_active_state(temp
, active
, true, false, i
, 0);
4399 temp
= hw
->plane_trans
[pipe
][PLANE_CURSOR
];
4400 skl_pipe_wm_active_state(temp
, active
, true, true, i
, 0);
4402 intel_crtc
->wm
.active
.skl
= *active
;
4405 void skl_wm_get_hw_state(struct drm_device
*dev
)
4407 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4408 struct skl_ddb_allocation
*ddb
= &dev_priv
->wm
.skl_hw
.ddb
;
4409 struct drm_crtc
*crtc
;
4411 skl_ddb_get_hw_state(dev_priv
, ddb
);
4412 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
)
4413 skl_pipe_wm_get_hw_state(crtc
);
4415 if (dev_priv
->active_crtcs
) {
4416 /* Fully recompute DDB on first atomic commit */
4417 dev_priv
->wm
.distrust_bios_wm
= true;
4419 /* Easy/common case; just sanitize DDB now if everything off */
4420 memset(ddb
, 0, sizeof(*ddb
));
4424 static void ilk_pipe_wm_get_hw_state(struct drm_crtc
*crtc
)
4426 struct drm_device
*dev
= crtc
->dev
;
4427 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4428 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
4429 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
4430 struct intel_crtc_state
*cstate
= to_intel_crtc_state(crtc
->state
);
4431 struct intel_pipe_wm
*active
= &cstate
->wm
.ilk
.optimal
;
4432 enum pipe pipe
= intel_crtc
->pipe
;
4433 static const i915_reg_t wm0_pipe_reg
[] = {
4434 [PIPE_A
] = WM0_PIPEA_ILK
,
4435 [PIPE_B
] = WM0_PIPEB_ILK
,
4436 [PIPE_C
] = WM0_PIPEC_IVB
,
4439 hw
->wm_pipe
[pipe
] = I915_READ(wm0_pipe_reg
[pipe
]);
4440 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
4441 hw
->wm_linetime
[pipe
] = I915_READ(PIPE_WM_LINETIME(pipe
));
4443 memset(active
, 0, sizeof(*active
));
4445 active
->pipe_enabled
= intel_crtc
->active
;
4447 if (active
->pipe_enabled
) {
4448 u32 tmp
= hw
->wm_pipe
[pipe
];
4451 * For active pipes LP0 watermark is marked as
4452 * enabled, and LP1+ watermaks as disabled since
4453 * we can't really reverse compute them in case
4454 * multiple pipes are active.
4456 active
->wm
[0].enable
= true;
4457 active
->wm
[0].pri_val
= (tmp
& WM0_PIPE_PLANE_MASK
) >> WM0_PIPE_PLANE_SHIFT
;
4458 active
->wm
[0].spr_val
= (tmp
& WM0_PIPE_SPRITE_MASK
) >> WM0_PIPE_SPRITE_SHIFT
;
4459 active
->wm
[0].cur_val
= tmp
& WM0_PIPE_CURSOR_MASK
;
4460 active
->linetime
= hw
->wm_linetime
[pipe
];
4462 int level
, max_level
= ilk_wm_max_level(dev
);
4465 * For inactive pipes, all watermark levels
4466 * should be marked as enabled but zeroed,
4467 * which is what we'd compute them to.
4469 for (level
= 0; level
<= max_level
; level
++)
4470 active
->wm
[level
].enable
= true;
4473 intel_crtc
->wm
.active
.ilk
= *active
;
4476 #define _FW_WM(value, plane) \
4477 (((value) & DSPFW_ ## plane ## _MASK) >> DSPFW_ ## plane ## _SHIFT)
4478 #define _FW_WM_VLV(value, plane) \
4479 (((value) & DSPFW_ ## plane ## _MASK_VLV) >> DSPFW_ ## plane ## _SHIFT)
4481 static void vlv_read_wm_values(struct drm_i915_private
*dev_priv
,
4482 struct vlv_wm_values
*wm
)
4487 for_each_pipe(dev_priv
, pipe
) {
4488 tmp
= I915_READ(VLV_DDL(pipe
));
4490 wm
->ddl
[pipe
].primary
=
4491 (tmp
>> DDL_PLANE_SHIFT
) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
4492 wm
->ddl
[pipe
].cursor
=
4493 (tmp
>> DDL_CURSOR_SHIFT
) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
4494 wm
->ddl
[pipe
].sprite
[0] =
4495 (tmp
>> DDL_SPRITE_SHIFT(0)) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
4496 wm
->ddl
[pipe
].sprite
[1] =
4497 (tmp
>> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH
| DRAIN_LATENCY_MASK
);
4500 tmp
= I915_READ(DSPFW1
);
4501 wm
->sr
.plane
= _FW_WM(tmp
, SR
);
4502 wm
->pipe
[PIPE_B
].cursor
= _FW_WM(tmp
, CURSORB
);
4503 wm
->pipe
[PIPE_B
].primary
= _FW_WM_VLV(tmp
, PLANEB
);
4504 wm
->pipe
[PIPE_A
].primary
= _FW_WM_VLV(tmp
, PLANEA
);
4506 tmp
= I915_READ(DSPFW2
);
4507 wm
->pipe
[PIPE_A
].sprite
[1] = _FW_WM_VLV(tmp
, SPRITEB
);
4508 wm
->pipe
[PIPE_A
].cursor
= _FW_WM(tmp
, CURSORA
);
4509 wm
->pipe
[PIPE_A
].sprite
[0] = _FW_WM_VLV(tmp
, SPRITEA
);
4511 tmp
= I915_READ(DSPFW3
);
4512 wm
->sr
.cursor
= _FW_WM(tmp
, CURSOR_SR
);
4514 if (IS_CHERRYVIEW(dev_priv
)) {
4515 tmp
= I915_READ(DSPFW7_CHV
);
4516 wm
->pipe
[PIPE_B
].sprite
[1] = _FW_WM_VLV(tmp
, SPRITED
);
4517 wm
->pipe
[PIPE_B
].sprite
[0] = _FW_WM_VLV(tmp
, SPRITEC
);
4519 tmp
= I915_READ(DSPFW8_CHV
);
4520 wm
->pipe
[PIPE_C
].sprite
[1] = _FW_WM_VLV(tmp
, SPRITEF
);
4521 wm
->pipe
[PIPE_C
].sprite
[0] = _FW_WM_VLV(tmp
, SPRITEE
);
4523 tmp
= I915_READ(DSPFW9_CHV
);
4524 wm
->pipe
[PIPE_C
].primary
= _FW_WM_VLV(tmp
, PLANEC
);
4525 wm
->pipe
[PIPE_C
].cursor
= _FW_WM(tmp
, CURSORC
);
4527 tmp
= I915_READ(DSPHOWM
);
4528 wm
->sr
.plane
|= _FW_WM(tmp
, SR_HI
) << 9;
4529 wm
->pipe
[PIPE_C
].sprite
[1] |= _FW_WM(tmp
, SPRITEF_HI
) << 8;
4530 wm
->pipe
[PIPE_C
].sprite
[0] |= _FW_WM(tmp
, SPRITEE_HI
) << 8;
4531 wm
->pipe
[PIPE_C
].primary
|= _FW_WM(tmp
, PLANEC_HI
) << 8;
4532 wm
->pipe
[PIPE_B
].sprite
[1] |= _FW_WM(tmp
, SPRITED_HI
) << 8;
4533 wm
->pipe
[PIPE_B
].sprite
[0] |= _FW_WM(tmp
, SPRITEC_HI
) << 8;
4534 wm
->pipe
[PIPE_B
].primary
|= _FW_WM(tmp
, PLANEB_HI
) << 8;
4535 wm
->pipe
[PIPE_A
].sprite
[1] |= _FW_WM(tmp
, SPRITEB_HI
) << 8;
4536 wm
->pipe
[PIPE_A
].sprite
[0] |= _FW_WM(tmp
, SPRITEA_HI
) << 8;
4537 wm
->pipe
[PIPE_A
].primary
|= _FW_WM(tmp
, PLANEA_HI
) << 8;
4539 tmp
= I915_READ(DSPFW7
);
4540 wm
->pipe
[PIPE_B
].sprite
[1] = _FW_WM_VLV(tmp
, SPRITED
);
4541 wm
->pipe
[PIPE_B
].sprite
[0] = _FW_WM_VLV(tmp
, SPRITEC
);
4543 tmp
= I915_READ(DSPHOWM
);
4544 wm
->sr
.plane
|= _FW_WM(tmp
, SR_HI
) << 9;
4545 wm
->pipe
[PIPE_B
].sprite
[1] |= _FW_WM(tmp
, SPRITED_HI
) << 8;
4546 wm
->pipe
[PIPE_B
].sprite
[0] |= _FW_WM(tmp
, SPRITEC_HI
) << 8;
4547 wm
->pipe
[PIPE_B
].primary
|= _FW_WM(tmp
, PLANEB_HI
) << 8;
4548 wm
->pipe
[PIPE_A
].sprite
[1] |= _FW_WM(tmp
, SPRITEB_HI
) << 8;
4549 wm
->pipe
[PIPE_A
].sprite
[0] |= _FW_WM(tmp
, SPRITEA_HI
) << 8;
4550 wm
->pipe
[PIPE_A
].primary
|= _FW_WM(tmp
, PLANEA_HI
) << 8;
4557 void vlv_wm_get_hw_state(struct drm_device
*dev
)
4559 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4560 struct vlv_wm_values
*wm
= &dev_priv
->wm
.vlv
;
4561 struct intel_plane
*plane
;
4565 vlv_read_wm_values(dev_priv
, wm
);
4567 for_each_intel_plane(dev
, plane
) {
4568 switch (plane
->base
.type
) {
4570 case DRM_PLANE_TYPE_CURSOR
:
4571 plane
->wm
.fifo_size
= 63;
4573 case DRM_PLANE_TYPE_PRIMARY
:
4574 plane
->wm
.fifo_size
= vlv_get_fifo_size(dev
, plane
->pipe
, 0);
4576 case DRM_PLANE_TYPE_OVERLAY
:
4577 sprite
= plane
->plane
;
4578 plane
->wm
.fifo_size
= vlv_get_fifo_size(dev
, plane
->pipe
, sprite
+ 1);
4583 wm
->cxsr
= I915_READ(FW_BLC_SELF_VLV
) & FW_CSPWRDWNEN
;
4584 wm
->level
= VLV_WM_LEVEL_PM2
;
4586 if (IS_CHERRYVIEW(dev_priv
)) {
4587 mutex_lock(&dev_priv
->rps
.hw_lock
);
4589 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DSPFREQ
);
4590 if (val
& DSP_MAXFIFO_PM5_ENABLE
)
4591 wm
->level
= VLV_WM_LEVEL_PM5
;
4594 * If DDR DVFS is disabled in the BIOS, Punit
4595 * will never ack the request. So if that happens
4596 * assume we don't have to enable/disable DDR DVFS
4597 * dynamically. To test that just set the REQ_ACK
4598 * bit to poke the Punit, but don't change the
4599 * HIGH/LOW bits so that we don't actually change
4600 * the current state.
4602 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
4603 val
|= FORCE_DDR_FREQ_REQ_ACK
;
4604 vlv_punit_write(dev_priv
, PUNIT_REG_DDR_SETUP2
, val
);
4606 if (wait_for((vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
) &
4607 FORCE_DDR_FREQ_REQ_ACK
) == 0, 3)) {
4608 DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
4609 "assuming DDR DVFS is disabled\n");
4610 dev_priv
->wm
.max_level
= VLV_WM_LEVEL_PM5
;
4612 val
= vlv_punit_read(dev_priv
, PUNIT_REG_DDR_SETUP2
);
4613 if ((val
& FORCE_DDR_HIGH_FREQ
) == 0)
4614 wm
->level
= VLV_WM_LEVEL_DDR_DVFS
;
4617 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4620 for_each_pipe(dev_priv
, pipe
)
4621 DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
4622 pipe_name(pipe
), wm
->pipe
[pipe
].primary
, wm
->pipe
[pipe
].cursor
,
4623 wm
->pipe
[pipe
].sprite
[0], wm
->pipe
[pipe
].sprite
[1]);
4625 DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
4626 wm
->sr
.plane
, wm
->sr
.cursor
, wm
->level
, wm
->cxsr
);
4629 void ilk_wm_get_hw_state(struct drm_device
*dev
)
4631 struct drm_i915_private
*dev_priv
= to_i915(dev
);
4632 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
4633 struct drm_crtc
*crtc
;
4635 for_each_crtc(dev
, crtc
)
4636 ilk_pipe_wm_get_hw_state(crtc
);
4638 hw
->wm_lp
[0] = I915_READ(WM1_LP_ILK
);
4639 hw
->wm_lp
[1] = I915_READ(WM2_LP_ILK
);
4640 hw
->wm_lp
[2] = I915_READ(WM3_LP_ILK
);
4642 hw
->wm_lp_spr
[0] = I915_READ(WM1S_LP_ILK
);
4643 if (INTEL_INFO(dev
)->gen
>= 7) {
4644 hw
->wm_lp_spr
[1] = I915_READ(WM2S_LP_IVB
);
4645 hw
->wm_lp_spr
[2] = I915_READ(WM3S_LP_IVB
);
4648 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
4649 hw
->partitioning
= (I915_READ(WM_MISC
) & WM_MISC_DATA_PARTITION_5_6
) ?
4650 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
4651 else if (IS_IVYBRIDGE(dev
))
4652 hw
->partitioning
= (I915_READ(DISP_ARB_CTL2
) & DISP_DATA_PARTITION_5_6
) ?
4653 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
4656 !(I915_READ(DISP_ARB_CTL
) & DISP_FBC_WM_DIS
);
4660 * intel_update_watermarks - update FIFO watermark values based on current modes
4662 * Calculate watermark values for the various WM regs based on current mode
4663 * and plane configuration.
4665 * There are several cases to deal with here:
4666 * - normal (i.e. non-self-refresh)
4667 * - self-refresh (SR) mode
4668 * - lines are large relative to FIFO size (buffer can hold up to 2)
4669 * - lines are small relative to FIFO size (buffer can hold more than 2
4670 * lines), so need to account for TLB latency
4672 * The normal calculation is:
4673 * watermark = dotclock * bytes per pixel * latency
4674 * where latency is platform & configuration dependent (we assume pessimal
4677 * The SR calculation is:
4678 * watermark = (trunc(latency/line time)+1) * surface width *
4681 * line time = htotal / dotclock
4682 * surface width = hdisplay for normal plane and 64 for cursor
4683 * and latency is assumed to be high, as above.
4685 * The final value programmed to the register should always be rounded up,
4686 * and include an extra 2 entries to account for clock crossings.
4688 * We don't use the sprite, so we can ignore that. And on Crestline we have
4689 * to set the non-SR watermarks to 8.
4691 void intel_update_watermarks(struct drm_crtc
*crtc
)
4693 struct drm_i915_private
*dev_priv
= to_i915(crtc
->dev
);
4695 if (dev_priv
->display
.update_wm
)
4696 dev_priv
->display
.update_wm(crtc
);
4700 * Lock protecting IPS related data structures
4702 DEFINE_SPINLOCK(mchdev_lock
);
4704 /* Global for IPS driver to get at the current i915 device. Protected by
4706 static struct drm_i915_private
*i915_mch_dev
;
4708 bool ironlake_set_drps(struct drm_i915_private
*dev_priv
, u8 val
)
4712 assert_spin_locked(&mchdev_lock
);
4714 rgvswctl
= I915_READ16(MEMSWCTL
);
4715 if (rgvswctl
& MEMCTL_CMD_STS
) {
4716 DRM_DEBUG("gpu busy, RCS change rejected\n");
4717 return false; /* still busy with another command */
4720 rgvswctl
= (MEMCTL_CMD_CHFREQ
<< MEMCTL_CMD_SHIFT
) |
4721 (val
<< MEMCTL_FREQ_SHIFT
) | MEMCTL_SFCAVM
;
4722 I915_WRITE16(MEMSWCTL
, rgvswctl
);
4723 POSTING_READ16(MEMSWCTL
);
4725 rgvswctl
|= MEMCTL_CMD_STS
;
4726 I915_WRITE16(MEMSWCTL
, rgvswctl
);
4731 static void ironlake_enable_drps(struct drm_i915_private
*dev_priv
)
4734 u8 fmax
, fmin
, fstart
, vstart
;
4736 spin_lock_irq(&mchdev_lock
);
4738 rgvmodectl
= I915_READ(MEMMODECTL
);
4740 /* Enable temp reporting */
4741 I915_WRITE16(PMMISC
, I915_READ(PMMISC
) | MCPPCE_EN
);
4742 I915_WRITE16(TSC1
, I915_READ(TSC1
) | TSE
);
4744 /* 100ms RC evaluation intervals */
4745 I915_WRITE(RCUPEI
, 100000);
4746 I915_WRITE(RCDNEI
, 100000);
4748 /* Set max/min thresholds to 90ms and 80ms respectively */
4749 I915_WRITE(RCBMAXAVG
, 90000);
4750 I915_WRITE(RCBMINAVG
, 80000);
4752 I915_WRITE(MEMIHYST
, 1);
4754 /* Set up min, max, and cur for interrupt handling */
4755 fmax
= (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
;
4756 fmin
= (rgvmodectl
& MEMMODE_FMIN_MASK
);
4757 fstart
= (rgvmodectl
& MEMMODE_FSTART_MASK
) >>
4758 MEMMODE_FSTART_SHIFT
;
4760 vstart
= (I915_READ(PXVFREQ(fstart
)) & PXVFREQ_PX_MASK
) >>
4763 dev_priv
->ips
.fmax
= fmax
; /* IPS callback will increase this */
4764 dev_priv
->ips
.fstart
= fstart
;
4766 dev_priv
->ips
.max_delay
= fstart
;
4767 dev_priv
->ips
.min_delay
= fmin
;
4768 dev_priv
->ips
.cur_delay
= fstart
;
4770 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
4771 fmax
, fmin
, fstart
);
4773 I915_WRITE(MEMINTREN
, MEMINT_CX_SUPR_EN
| MEMINT_EVAL_CHG_EN
);
4776 * Interrupts will be enabled in ironlake_irq_postinstall
4779 I915_WRITE(VIDSTART
, vstart
);
4780 POSTING_READ(VIDSTART
);
4782 rgvmodectl
|= MEMMODE_SWMODE_EN
;
4783 I915_WRITE(MEMMODECTL
, rgvmodectl
);
4785 if (wait_for_atomic((I915_READ(MEMSWCTL
) & MEMCTL_CMD_STS
) == 0, 10))
4786 DRM_ERROR("stuck trying to change perf mode\n");
4789 ironlake_set_drps(dev_priv
, fstart
);
4791 dev_priv
->ips
.last_count1
= I915_READ(DMIEC
) +
4792 I915_READ(DDREC
) + I915_READ(CSIEC
);
4793 dev_priv
->ips
.last_time1
= jiffies_to_msecs(jiffies
);
4794 dev_priv
->ips
.last_count2
= I915_READ(GFXEC
);
4795 dev_priv
->ips
.last_time2
= ktime_get_raw_ns();
4797 spin_unlock_irq(&mchdev_lock
);
4800 static void ironlake_disable_drps(struct drm_i915_private
*dev_priv
)
4804 spin_lock_irq(&mchdev_lock
);
4806 rgvswctl
= I915_READ16(MEMSWCTL
);
4808 /* Ack interrupts, disable EFC interrupt */
4809 I915_WRITE(MEMINTREN
, I915_READ(MEMINTREN
) & ~MEMINT_EVAL_CHG_EN
);
4810 I915_WRITE(MEMINTRSTS
, MEMINT_EVAL_CHG
);
4811 I915_WRITE(DEIER
, I915_READ(DEIER
) & ~DE_PCU_EVENT
);
4812 I915_WRITE(DEIIR
, DE_PCU_EVENT
);
4813 I915_WRITE(DEIMR
, I915_READ(DEIMR
) | DE_PCU_EVENT
);
4815 /* Go back to the starting frequency */
4816 ironlake_set_drps(dev_priv
, dev_priv
->ips
.fstart
);
4818 rgvswctl
|= MEMCTL_CMD_STS
;
4819 I915_WRITE(MEMSWCTL
, rgvswctl
);
4822 spin_unlock_irq(&mchdev_lock
);
4825 /* There's a funny hw issue where the hw returns all 0 when reading from
4826 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
4827 * ourselves, instead of doing a rmw cycle (which might result in us clearing
4828 * all limits and the gpu stuck at whatever frequency it is at atm).
4830 static u32
intel_rps_limits(struct drm_i915_private
*dev_priv
, u8 val
)
4834 /* Only set the down limit when we've reached the lowest level to avoid
4835 * getting more interrupts, otherwise leave this clear. This prevents a
4836 * race in the hw when coming out of rc6: There's a tiny window where
4837 * the hw runs at the minimal clock before selecting the desired
4838 * frequency, if the down threshold expires in that window we will not
4839 * receive a down interrupt. */
4840 if (IS_GEN9(dev_priv
)) {
4841 limits
= (dev_priv
->rps
.max_freq_softlimit
) << 23;
4842 if (val
<= dev_priv
->rps
.min_freq_softlimit
)
4843 limits
|= (dev_priv
->rps
.min_freq_softlimit
) << 14;
4845 limits
= dev_priv
->rps
.max_freq_softlimit
<< 24;
4846 if (val
<= dev_priv
->rps
.min_freq_softlimit
)
4847 limits
|= dev_priv
->rps
.min_freq_softlimit
<< 16;
4853 static void gen6_set_rps_thresholds(struct drm_i915_private
*dev_priv
, u8 val
)
4856 u32 threshold_up
= 0, threshold_down
= 0; /* in % */
4857 u32 ei_up
= 0, ei_down
= 0;
4859 new_power
= dev_priv
->rps
.power
;
4860 switch (dev_priv
->rps
.power
) {
4862 if (val
> dev_priv
->rps
.efficient_freq
+ 1 &&
4863 val
> dev_priv
->rps
.cur_freq
)
4864 new_power
= BETWEEN
;
4868 if (val
<= dev_priv
->rps
.efficient_freq
&&
4869 val
< dev_priv
->rps
.cur_freq
)
4870 new_power
= LOW_POWER
;
4871 else if (val
>= dev_priv
->rps
.rp0_freq
&&
4872 val
> dev_priv
->rps
.cur_freq
)
4873 new_power
= HIGH_POWER
;
4877 if (val
< (dev_priv
->rps
.rp1_freq
+ dev_priv
->rps
.rp0_freq
) >> 1 &&
4878 val
< dev_priv
->rps
.cur_freq
)
4879 new_power
= BETWEEN
;
4882 /* Max/min bins are special */
4883 if (val
<= dev_priv
->rps
.min_freq_softlimit
)
4884 new_power
= LOW_POWER
;
4885 if (val
>= dev_priv
->rps
.max_freq_softlimit
)
4886 new_power
= HIGH_POWER
;
4887 if (new_power
== dev_priv
->rps
.power
)
4890 /* Note the units here are not exactly 1us, but 1280ns. */
4891 switch (new_power
) {
4893 /* Upclock if more than 95% busy over 16ms */
4897 /* Downclock if less than 85% busy over 32ms */
4899 threshold_down
= 85;
4903 /* Upclock if more than 90% busy over 13ms */
4907 /* Downclock if less than 75% busy over 32ms */
4909 threshold_down
= 75;
4913 /* Upclock if more than 85% busy over 10ms */
4917 /* Downclock if less than 60% busy over 32ms */
4919 threshold_down
= 60;
4923 I915_WRITE(GEN6_RP_UP_EI
,
4924 GT_INTERVAL_FROM_US(dev_priv
, ei_up
));
4925 I915_WRITE(GEN6_RP_UP_THRESHOLD
,
4926 GT_INTERVAL_FROM_US(dev_priv
,
4927 ei_up
* threshold_up
/ 100));
4929 I915_WRITE(GEN6_RP_DOWN_EI
,
4930 GT_INTERVAL_FROM_US(dev_priv
, ei_down
));
4931 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
,
4932 GT_INTERVAL_FROM_US(dev_priv
,
4933 ei_down
* threshold_down
/ 100));
4935 I915_WRITE(GEN6_RP_CONTROL
,
4936 GEN6_RP_MEDIA_TURBO
|
4937 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
4938 GEN6_RP_MEDIA_IS_GFX
|
4940 GEN6_RP_UP_BUSY_AVG
|
4941 GEN6_RP_DOWN_IDLE_AVG
);
4943 dev_priv
->rps
.power
= new_power
;
4944 dev_priv
->rps
.up_threshold
= threshold_up
;
4945 dev_priv
->rps
.down_threshold
= threshold_down
;
4946 dev_priv
->rps
.last_adj
= 0;
4949 static u32
gen6_rps_pm_mask(struct drm_i915_private
*dev_priv
, u8 val
)
4953 if (val
> dev_priv
->rps
.min_freq_softlimit
)
4954 mask
|= GEN6_PM_RP_DOWN_EI_EXPIRED
| GEN6_PM_RP_DOWN_THRESHOLD
| GEN6_PM_RP_DOWN_TIMEOUT
;
4955 if (val
< dev_priv
->rps
.max_freq_softlimit
)
4956 mask
|= GEN6_PM_RP_UP_EI_EXPIRED
| GEN6_PM_RP_UP_THRESHOLD
;
4958 mask
&= dev_priv
->pm_rps_events
;
4960 return gen6_sanitize_rps_pm_mask(dev_priv
, ~mask
);
4963 /* gen6_set_rps is called to update the frequency request, but should also be
4964 * called when the range (min_delay and max_delay) is modified so that we can
4965 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
4966 static void gen6_set_rps(struct drm_i915_private
*dev_priv
, u8 val
)
4968 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4969 if (IS_BXT_REVID(dev_priv
, 0, BXT_REVID_A1
))
4972 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
4973 WARN_ON(val
> dev_priv
->rps
.max_freq
);
4974 WARN_ON(val
< dev_priv
->rps
.min_freq
);
4976 /* min/max delay may still have been modified so be sure to
4977 * write the limits value.
4979 if (val
!= dev_priv
->rps
.cur_freq
) {
4980 gen6_set_rps_thresholds(dev_priv
, val
);
4982 if (IS_GEN9(dev_priv
))
4983 I915_WRITE(GEN6_RPNSWREQ
,
4984 GEN9_FREQUENCY(val
));
4985 else if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
4986 I915_WRITE(GEN6_RPNSWREQ
,
4987 HSW_FREQUENCY(val
));
4989 I915_WRITE(GEN6_RPNSWREQ
,
4990 GEN6_FREQUENCY(val
) |
4992 GEN6_AGGRESSIVE_TURBO
);
4995 /* Make sure we continue to get interrupts
4996 * until we hit the minimum or maximum frequencies.
4998 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
, intel_rps_limits(dev_priv
, val
));
4999 I915_WRITE(GEN6_PMINTRMSK
, gen6_rps_pm_mask(dev_priv
, val
));
5001 POSTING_READ(GEN6_RPNSWREQ
);
5003 dev_priv
->rps
.cur_freq
= val
;
5004 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv
, val
));
5007 static void valleyview_set_rps(struct drm_i915_private
*dev_priv
, u8 val
)
5009 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
5010 WARN_ON(val
> dev_priv
->rps
.max_freq
);
5011 WARN_ON(val
< dev_priv
->rps
.min_freq
);
5013 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv
) && (val
& 1),
5014 "Odd GPU freq value\n"))
5017 I915_WRITE(GEN6_PMINTRMSK
, gen6_rps_pm_mask(dev_priv
, val
));
5019 if (val
!= dev_priv
->rps
.cur_freq
) {
5020 vlv_punit_write(dev_priv
, PUNIT_REG_GPU_FREQ_REQ
, val
);
5021 if (!IS_CHERRYVIEW(dev_priv
))
5022 gen6_set_rps_thresholds(dev_priv
, val
);
5025 dev_priv
->rps
.cur_freq
= val
;
5026 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv
, val
));
5029 /* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
5031 * * If Gfx is Idle, then
5032 * 1. Forcewake Media well.
5033 * 2. Request idle freq.
5034 * 3. Release Forcewake of Media well.
5036 static void vlv_set_rps_idle(struct drm_i915_private
*dev_priv
)
5038 u32 val
= dev_priv
->rps
.idle_freq
;
5040 if (dev_priv
->rps
.cur_freq
<= val
)
5043 /* Wake up the media well, as that takes a lot less
5044 * power than the Render well. */
5045 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_MEDIA
);
5046 valleyview_set_rps(dev_priv
, val
);
5047 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_MEDIA
);
5050 void gen6_rps_busy(struct drm_i915_private
*dev_priv
)
5052 mutex_lock(&dev_priv
->rps
.hw_lock
);
5053 if (dev_priv
->rps
.enabled
) {
5054 if (dev_priv
->pm_rps_events
& (GEN6_PM_RP_DOWN_EI_EXPIRED
| GEN6_PM_RP_UP_EI_EXPIRED
))
5055 gen6_rps_reset_ei(dev_priv
);
5056 I915_WRITE(GEN6_PMINTRMSK
,
5057 gen6_rps_pm_mask(dev_priv
, dev_priv
->rps
.cur_freq
));
5059 gen6_enable_rps_interrupts(dev_priv
);
5061 /* Ensure we start at the user's desired frequency */
5062 intel_set_rps(dev_priv
,
5063 clamp(dev_priv
->rps
.cur_freq
,
5064 dev_priv
->rps
.min_freq_softlimit
,
5065 dev_priv
->rps
.max_freq_softlimit
));
5067 mutex_unlock(&dev_priv
->rps
.hw_lock
);
5070 void gen6_rps_idle(struct drm_i915_private
*dev_priv
)
5072 /* Flush our bottom-half so that it does not race with us
5073 * setting the idle frequency and so that it is bounded by
5074 * our rpm wakeref. And then disable the interrupts to stop any
5075 * futher RPS reclocking whilst we are asleep.
5077 gen6_disable_rps_interrupts(dev_priv
);
5079 mutex_lock(&dev_priv
->rps
.hw_lock
);
5080 if (dev_priv
->rps
.enabled
) {
5081 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
5082 vlv_set_rps_idle(dev_priv
);
5084 gen6_set_rps(dev_priv
, dev_priv
->rps
.idle_freq
);
5085 dev_priv
->rps
.last_adj
= 0;
5086 I915_WRITE(GEN6_PMINTRMSK
,
5087 gen6_sanitize_rps_pm_mask(dev_priv
, ~0));
5089 mutex_unlock(&dev_priv
->rps
.hw_lock
);
5091 spin_lock(&dev_priv
->rps
.client_lock
);
5092 while (!list_empty(&dev_priv
->rps
.clients
))
5093 list_del_init(dev_priv
->rps
.clients
.next
);
5094 spin_unlock(&dev_priv
->rps
.client_lock
);
5097 void gen6_rps_boost(struct drm_i915_private
*dev_priv
,
5098 struct intel_rps_client
*rps
,
5099 unsigned long submitted
)
5101 /* This is intentionally racy! We peek at the state here, then
5102 * validate inside the RPS worker.
5104 if (!(dev_priv
->gt
.awake
&&
5105 dev_priv
->rps
.enabled
&&
5106 dev_priv
->rps
.cur_freq
< dev_priv
->rps
.max_freq_softlimit
))
5109 /* Force a RPS boost (and don't count it against the client) if
5110 * the GPU is severely congested.
5112 if (rps
&& time_after(jiffies
, submitted
+ DRM_I915_THROTTLE_JIFFIES
))
5115 spin_lock(&dev_priv
->rps
.client_lock
);
5116 if (rps
== NULL
|| list_empty(&rps
->link
)) {
5117 spin_lock_irq(&dev_priv
->irq_lock
);
5118 if (dev_priv
->rps
.interrupts_enabled
) {
5119 dev_priv
->rps
.client_boost
= true;
5120 schedule_work(&dev_priv
->rps
.work
);
5122 spin_unlock_irq(&dev_priv
->irq_lock
);
5125 list_add(&rps
->link
, &dev_priv
->rps
.clients
);
5128 dev_priv
->rps
.boosts
++;
5130 spin_unlock(&dev_priv
->rps
.client_lock
);
5133 void intel_set_rps(struct drm_i915_private
*dev_priv
, u8 val
)
5135 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
))
5136 valleyview_set_rps(dev_priv
, val
);
5138 gen6_set_rps(dev_priv
, val
);
5141 static void gen9_disable_rc6(struct drm_i915_private
*dev_priv
)
5143 I915_WRITE(GEN6_RC_CONTROL
, 0);
5144 I915_WRITE(GEN9_PG_ENABLE
, 0);
5147 static void gen9_disable_rps(struct drm_i915_private
*dev_priv
)
5149 I915_WRITE(GEN6_RP_CONTROL
, 0);
5152 static void gen6_disable_rps(struct drm_i915_private
*dev_priv
)
5154 I915_WRITE(GEN6_RC_CONTROL
, 0);
5155 I915_WRITE(GEN6_RPNSWREQ
, 1 << 31);
5156 I915_WRITE(GEN6_RP_CONTROL
, 0);
5159 static void cherryview_disable_rps(struct drm_i915_private
*dev_priv
)
5161 I915_WRITE(GEN6_RC_CONTROL
, 0);
5164 static void valleyview_disable_rps(struct drm_i915_private
*dev_priv
)
5166 /* we're doing forcewake before Disabling RC6,
5167 * This what the BIOS expects when going into suspend */
5168 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5170 I915_WRITE(GEN6_RC_CONTROL
, 0);
5172 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5175 static void intel_print_rc6_info(struct drm_i915_private
*dev_priv
, u32 mode
)
5177 if (IS_VALLEYVIEW(dev_priv
) || IS_CHERRYVIEW(dev_priv
)) {
5178 if (mode
& (GEN7_RC_CTL_TO_MODE
| GEN6_RC_CTL_EI_MODE(1)))
5179 mode
= GEN6_RC_CTL_RC6_ENABLE
;
5183 if (HAS_RC6p(dev_priv
))
5184 DRM_DEBUG_DRIVER("Enabling RC6 states: "
5185 "RC6 %s RC6p %s RC6pp %s\n",
5186 onoff(mode
& GEN6_RC_CTL_RC6_ENABLE
),
5187 onoff(mode
& GEN6_RC_CTL_RC6p_ENABLE
),
5188 onoff(mode
& GEN6_RC_CTL_RC6pp_ENABLE
));
5191 DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
5192 onoff(mode
& GEN6_RC_CTL_RC6_ENABLE
));
5195 static bool bxt_check_bios_rc6_setup(struct drm_i915_private
*dev_priv
)
5197 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
5198 bool enable_rc6
= true;
5199 unsigned long rc6_ctx_base
;
5203 rc_ctl
= I915_READ(GEN6_RC_CONTROL
);
5204 rc_sw_target
= (I915_READ(GEN6_RC_STATE
) & RC_SW_TARGET_STATE_MASK
) >>
5205 RC_SW_TARGET_STATE_SHIFT
;
5206 DRM_DEBUG_DRIVER("BIOS enabled RC states: "
5207 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
5208 onoff(rc_ctl
& GEN6_RC_CTL_HW_ENABLE
),
5209 onoff(rc_ctl
& GEN6_RC_CTL_RC6_ENABLE
),
5212 if (!(I915_READ(RC6_LOCATION
) & RC6_CTX_IN_DRAM
)) {
5213 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
5218 * The exact context size is not known for BXT, so assume a page size
5221 rc6_ctx_base
= I915_READ(RC6_CTX_BASE
) & RC6_CTX_BASE_MASK
;
5222 if (!((rc6_ctx_base
>= ggtt
->stolen_reserved_base
) &&
5223 (rc6_ctx_base
+ PAGE_SIZE
<= ggtt
->stolen_reserved_base
+
5224 ggtt
->stolen_reserved_size
))) {
5225 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
5229 if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT
) & IDLE_TIME_MASK
) > 1) &&
5230 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0
) & IDLE_TIME_MASK
) > 1) &&
5231 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT
) & IDLE_TIME_MASK
) > 1) &&
5232 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT
) & IDLE_TIME_MASK
) > 1))) {
5233 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
5237 if (!I915_READ(GEN8_PUSHBUS_CONTROL
) ||
5238 !I915_READ(GEN8_PUSHBUS_ENABLE
) ||
5239 !I915_READ(GEN8_PUSHBUS_SHIFT
)) {
5240 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
5244 if (!I915_READ(GEN6_GFXPAUSE
)) {
5245 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
5249 if (!I915_READ(GEN8_MISC_CTRL0
)) {
5250 DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
5257 int sanitize_rc6_option(struct drm_i915_private
*dev_priv
, int enable_rc6
)
5259 /* No RC6 before Ironlake and code is gone for ilk. */
5260 if (INTEL_INFO(dev_priv
)->gen
< 6)
5266 if (IS_BROXTON(dev_priv
) && !bxt_check_bios_rc6_setup(dev_priv
)) {
5267 DRM_INFO("RC6 disabled by BIOS\n");
5271 /* Respect the kernel parameter if it is set */
5272 if (enable_rc6
>= 0) {
5275 if (HAS_RC6p(dev_priv
))
5276 mask
= INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
|
5279 mask
= INTEL_RC6_ENABLE
;
5281 if ((enable_rc6
& mask
) != enable_rc6
)
5282 DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
5283 "(requested %d, valid %d)\n",
5284 enable_rc6
& mask
, enable_rc6
, mask
);
5286 return enable_rc6
& mask
;
5289 if (IS_IVYBRIDGE(dev_priv
))
5290 return (INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
);
5292 return INTEL_RC6_ENABLE
;
5295 static void gen6_init_rps_frequencies(struct drm_i915_private
*dev_priv
)
5297 uint32_t rp_state_cap
;
5298 u32 ddcc_status
= 0;
5301 /* All of these values are in units of 50MHz */
5302 dev_priv
->rps
.cur_freq
= 0;
5303 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
5304 if (IS_BROXTON(dev_priv
)) {
5305 rp_state_cap
= I915_READ(BXT_RP_STATE_CAP
);
5306 dev_priv
->rps
.rp0_freq
= (rp_state_cap
>> 16) & 0xff;
5307 dev_priv
->rps
.rp1_freq
= (rp_state_cap
>> 8) & 0xff;
5308 dev_priv
->rps
.min_freq
= (rp_state_cap
>> 0) & 0xff;
5310 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
5311 dev_priv
->rps
.rp0_freq
= (rp_state_cap
>> 0) & 0xff;
5312 dev_priv
->rps
.rp1_freq
= (rp_state_cap
>> 8) & 0xff;
5313 dev_priv
->rps
.min_freq
= (rp_state_cap
>> 16) & 0xff;
5316 /* hw_max = RP0 until we check for overclocking */
5317 dev_priv
->rps
.max_freq
= dev_priv
->rps
.rp0_freq
;
5319 dev_priv
->rps
.efficient_freq
= dev_priv
->rps
.rp1_freq
;
5320 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
) ||
5321 IS_SKYLAKE(dev_priv
) || IS_KABYLAKE(dev_priv
)) {
5322 ret
= sandybridge_pcode_read(dev_priv
,
5323 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL
,
5326 dev_priv
->rps
.efficient_freq
=
5328 ((ddcc_status
>> 8) & 0xff),
5329 dev_priv
->rps
.min_freq
,
5330 dev_priv
->rps
.max_freq
);
5333 if (IS_SKYLAKE(dev_priv
) || IS_KABYLAKE(dev_priv
)) {
5334 /* Store the frequency values in 16.66 MHZ units, which is
5335 the natural hardware unit for SKL */
5336 dev_priv
->rps
.rp0_freq
*= GEN9_FREQ_SCALER
;
5337 dev_priv
->rps
.rp1_freq
*= GEN9_FREQ_SCALER
;
5338 dev_priv
->rps
.min_freq
*= GEN9_FREQ_SCALER
;
5339 dev_priv
->rps
.max_freq
*= GEN9_FREQ_SCALER
;
5340 dev_priv
->rps
.efficient_freq
*= GEN9_FREQ_SCALER
;
5343 dev_priv
->rps
.idle_freq
= dev_priv
->rps
.min_freq
;
5345 /* Preserve min/max settings in case of re-init */
5346 if (dev_priv
->rps
.max_freq_softlimit
== 0)
5347 dev_priv
->rps
.max_freq_softlimit
= dev_priv
->rps
.max_freq
;
5349 if (dev_priv
->rps
.min_freq_softlimit
== 0) {
5350 if (IS_HASWELL(dev_priv
) || IS_BROADWELL(dev_priv
))
5351 dev_priv
->rps
.min_freq_softlimit
=
5352 max_t(int, dev_priv
->rps
.efficient_freq
,
5353 intel_freq_opcode(dev_priv
, 450));
5355 dev_priv
->rps
.min_freq_softlimit
=
5356 dev_priv
->rps
.min_freq
;
5360 /* See the Gen9_GT_PM_Programming_Guide doc for the below */
5361 static void gen9_enable_rps(struct drm_i915_private
*dev_priv
)
5363 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5365 gen6_init_rps_frequencies(dev_priv
);
5367 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
5368 if (IS_BXT_REVID(dev_priv
, 0, BXT_REVID_A1
)) {
5370 * BIOS could leave the Hw Turbo enabled, so need to explicitly
5371 * clear out the Control register just to avoid inconsitency
5372 * with debugfs interface, which will show Turbo as enabled
5373 * only and that is not expected by the User after adding the
5374 * WaGsvDisableTurbo. Apart from this there is no problem even
5375 * if the Turbo is left enabled in the Control register, as the
5376 * Up/Down interrupts would remain masked.
5378 gen9_disable_rps(dev_priv
);
5379 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5383 /* Program defaults and thresholds for RPS*/
5384 I915_WRITE(GEN6_RC_VIDEO_FREQ
,
5385 GEN9_FREQUENCY(dev_priv
->rps
.rp1_freq
));
5387 /* 1 second timeout*/
5388 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
,
5389 GT_INTERVAL_FROM_US(dev_priv
, 1000000));
5391 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 0xa);
5393 /* Leaning on the below call to gen6_set_rps to program/setup the
5394 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
5395 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
5396 dev_priv
->rps
.power
= HIGH_POWER
; /* force a reset */
5397 gen6_set_rps(dev_priv
, dev_priv
->rps
.idle_freq
);
5399 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5402 static void gen9_enable_rc6(struct drm_i915_private
*dev_priv
)
5404 struct intel_engine_cs
*engine
;
5405 uint32_t rc6_mask
= 0;
5407 /* 1a: Software RC state - RC0 */
5408 I915_WRITE(GEN6_RC_STATE
, 0);
5410 /* 1b: Get forcewake during program sequence. Although the driver
5411 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5412 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5414 /* 2a: Disable RC states. */
5415 I915_WRITE(GEN6_RC_CONTROL
, 0);
5417 /* 2b: Program RC6 thresholds.*/
5419 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
5420 if (IS_SKYLAKE(dev_priv
))
5421 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 108 << 16);
5423 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 54 << 16);
5424 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
5425 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
5426 for_each_engine(engine
, dev_priv
)
5427 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
5429 if (HAS_GUC(dev_priv
))
5430 I915_WRITE(GUC_MAX_IDLE_COUNT
, 0xA);
5432 I915_WRITE(GEN6_RC_SLEEP
, 0);
5434 /* 2c: Program Coarse Power Gating Policies. */
5435 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS
, 25);
5436 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS
, 25);
5438 /* 3a: Enable RC6 */
5439 if (intel_enable_rc6() & INTEL_RC6_ENABLE
)
5440 rc6_mask
= GEN6_RC_CTL_RC6_ENABLE
;
5441 DRM_INFO("RC6 %s\n", onoff(rc6_mask
& GEN6_RC_CTL_RC6_ENABLE
));
5442 /* WaRsUseTimeoutMode */
5443 if (IS_SKL_REVID(dev_priv
, 0, SKL_REVID_D0
) ||
5444 IS_BXT_REVID(dev_priv
, 0, BXT_REVID_A1
)) {
5445 I915_WRITE(GEN6_RC6_THRESHOLD
, 625); /* 800us */
5446 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
5447 GEN7_RC_CTL_TO_MODE
|
5450 I915_WRITE(GEN6_RC6_THRESHOLD
, 37500); /* 37.5/125ms per EI */
5451 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
5452 GEN6_RC_CTL_EI_MODE(1) |
5457 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
5458 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
5460 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv
))
5461 I915_WRITE(GEN9_PG_ENABLE
, 0);
5463 I915_WRITE(GEN9_PG_ENABLE
, (rc6_mask
& GEN6_RC_CTL_RC6_ENABLE
) ?
5464 (GEN9_RENDER_PG_ENABLE
| GEN9_MEDIA_PG_ENABLE
) : 0);
5466 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5469 static void gen8_enable_rps(struct drm_i915_private
*dev_priv
)
5471 struct intel_engine_cs
*engine
;
5472 uint32_t rc6_mask
= 0;
5474 /* 1a: Software RC state - RC0 */
5475 I915_WRITE(GEN6_RC_STATE
, 0);
5477 /* 1c & 1d: Get forcewake during program sequence. Although the driver
5478 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5479 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5481 /* 2a: Disable RC states. */
5482 I915_WRITE(GEN6_RC_CONTROL
, 0);
5484 /* Initialize rps frequencies */
5485 gen6_init_rps_frequencies(dev_priv
);
5487 /* 2b: Program RC6 thresholds.*/
5488 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16);
5489 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
5490 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
5491 for_each_engine(engine
, dev_priv
)
5492 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
5493 I915_WRITE(GEN6_RC_SLEEP
, 0);
5494 if (IS_BROADWELL(dev_priv
))
5495 I915_WRITE(GEN6_RC6_THRESHOLD
, 625); /* 800us/1.28 for TO */
5497 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000); /* 50/125ms per EI */
5500 if (intel_enable_rc6() & INTEL_RC6_ENABLE
)
5501 rc6_mask
= GEN6_RC_CTL_RC6_ENABLE
;
5502 intel_print_rc6_info(dev_priv
, rc6_mask
);
5503 if (IS_BROADWELL(dev_priv
))
5504 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
5505 GEN7_RC_CTL_TO_MODE
|
5508 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
5509 GEN6_RC_CTL_EI_MODE(1) |
5512 /* 4 Program defaults and thresholds for RPS*/
5513 I915_WRITE(GEN6_RPNSWREQ
,
5514 HSW_FREQUENCY(dev_priv
->rps
.rp1_freq
));
5515 I915_WRITE(GEN6_RC_VIDEO_FREQ
,
5516 HSW_FREQUENCY(dev_priv
->rps
.rp1_freq
));
5517 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
5518 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 100000000 / 128); /* 1 second timeout */
5520 /* Docs recommend 900MHz, and 300 MHz respectively */
5521 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
,
5522 dev_priv
->rps
.max_freq_softlimit
<< 24 |
5523 dev_priv
->rps
.min_freq_softlimit
<< 16);
5525 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 7600000 / 128); /* 76ms busyness per EI, 90% */
5526 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 31300000 / 128); /* 313ms busyness per EI, 70%*/
5527 I915_WRITE(GEN6_RP_UP_EI
, 66000); /* 84.48ms, XXX: random? */
5528 I915_WRITE(GEN6_RP_DOWN_EI
, 350000); /* 448ms, XXX: random? */
5530 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
5533 I915_WRITE(GEN6_RP_CONTROL
,
5534 GEN6_RP_MEDIA_TURBO
|
5535 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
5536 GEN6_RP_MEDIA_IS_GFX
|
5538 GEN6_RP_UP_BUSY_AVG
|
5539 GEN6_RP_DOWN_IDLE_AVG
);
5541 /* 6: Ring frequency + overclocking (our driver does this later */
5543 dev_priv
->rps
.power
= HIGH_POWER
; /* force a reset */
5544 gen6_set_rps(dev_priv
, dev_priv
->rps
.idle_freq
);
5546 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5549 static void gen6_enable_rps(struct drm_i915_private
*dev_priv
)
5551 struct intel_engine_cs
*engine
;
5552 u32 rc6vids
, pcu_mbox
= 0, rc6_mask
= 0;
5557 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
5559 /* Here begins a magic sequence of register writes to enable
5560 * auto-downclocking.
5562 * Perhaps there might be some value in exposing these to
5565 I915_WRITE(GEN6_RC_STATE
, 0);
5567 /* Clear the DBG now so we don't confuse earlier errors */
5568 gtfifodbg
= I915_READ(GTFIFODBG
);
5570 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg
);
5571 I915_WRITE(GTFIFODBG
, gtfifodbg
);
5574 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
5576 /* Initialize rps frequencies */
5577 gen6_init_rps_frequencies(dev_priv
);
5579 /* disable the counters and set deterministic thresholds */
5580 I915_WRITE(GEN6_RC_CONTROL
, 0);
5582 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT
, 1000 << 16);
5583 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16 | 30);
5584 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT
, 30);
5585 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
5586 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
5588 for_each_engine(engine
, dev_priv
)
5589 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
5591 I915_WRITE(GEN6_RC_SLEEP
, 0);
5592 I915_WRITE(GEN6_RC1e_THRESHOLD
, 1000);
5593 if (IS_IVYBRIDGE(dev_priv
))
5594 I915_WRITE(GEN6_RC6_THRESHOLD
, 125000);
5596 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000);
5597 I915_WRITE(GEN6_RC6p_THRESHOLD
, 150000);
5598 I915_WRITE(GEN6_RC6pp_THRESHOLD
, 64000); /* unused */
5600 /* Check if we are enabling RC6 */
5601 rc6_mode
= intel_enable_rc6();
5602 if (rc6_mode
& INTEL_RC6_ENABLE
)
5603 rc6_mask
|= GEN6_RC_CTL_RC6_ENABLE
;
5605 /* We don't use those on Haswell */
5606 if (!IS_HASWELL(dev_priv
)) {
5607 if (rc6_mode
& INTEL_RC6p_ENABLE
)
5608 rc6_mask
|= GEN6_RC_CTL_RC6p_ENABLE
;
5610 if (rc6_mode
& INTEL_RC6pp_ENABLE
)
5611 rc6_mask
|= GEN6_RC_CTL_RC6pp_ENABLE
;
5614 intel_print_rc6_info(dev_priv
, rc6_mask
);
5616 I915_WRITE(GEN6_RC_CONTROL
,
5618 GEN6_RC_CTL_EI_MODE(1) |
5619 GEN6_RC_CTL_HW_ENABLE
);
5621 /* Power down if completely idle for over 50ms */
5622 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 50000);
5623 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
5625 ret
= sandybridge_pcode_write(dev_priv
, GEN6_PCODE_WRITE_MIN_FREQ_TABLE
, 0);
5627 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
5629 ret
= sandybridge_pcode_read(dev_priv
, GEN6_READ_OC_PARAMS
, &pcu_mbox
);
5630 if (!ret
&& (pcu_mbox
& (1<<31))) { /* OC supported */
5631 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
5632 (dev_priv
->rps
.max_freq_softlimit
& 0xff) * 50,
5633 (pcu_mbox
& 0xff) * 50);
5634 dev_priv
->rps
.max_freq
= pcu_mbox
& 0xff;
5637 dev_priv
->rps
.power
= HIGH_POWER
; /* force a reset */
5638 gen6_set_rps(dev_priv
, dev_priv
->rps
.idle_freq
);
5641 ret
= sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
5642 if (IS_GEN6(dev_priv
) && ret
) {
5643 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5644 } else if (IS_GEN6(dev_priv
) && (GEN6_DECODE_RC6_VID(rc6vids
& 0xff) < 450)) {
5645 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5646 GEN6_DECODE_RC6_VID(rc6vids
& 0xff), 450);
5647 rc6vids
&= 0xffff00;
5648 rc6vids
|= GEN6_ENCODE_RC6_VID(450);
5649 ret
= sandybridge_pcode_write(dev_priv
, GEN6_PCODE_WRITE_RC6VIDS
, rc6vids
);
5651 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
5654 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
5657 static void __gen6_update_ring_freq(struct drm_i915_private
*dev_priv
)
5660 unsigned int gpu_freq
;
5661 unsigned int max_ia_freq
, min_ring_freq
;
5662 unsigned int max_gpu_freq
, min_gpu_freq
;
5663 int scaling_factor
= 180;
5664 struct cpufreq_policy
*policy
;
5666 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
5668 policy
= cpufreq_cpu_get(0);
5670 max_ia_freq
= policy
->cpuinfo
.max_freq
;
5671 cpufreq_cpu_put(policy
);
5674 * Default to measured freq if none found, PCU will ensure we
5677 max_ia_freq
= tsc_khz
;
5680 /* Convert from kHz to MHz */
5681 max_ia_freq
/= 1000;
5683 min_ring_freq
= I915_READ(DCLK
) & 0xf;
5684 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5685 min_ring_freq
= mult_frac(min_ring_freq
, 8, 3);
5687 if (IS_SKYLAKE(dev_priv
) || IS_KABYLAKE(dev_priv
)) {
5688 /* Convert GT frequency to 50 HZ units */
5689 min_gpu_freq
= dev_priv
->rps
.min_freq
/ GEN9_FREQ_SCALER
;
5690 max_gpu_freq
= dev_priv
->rps
.max_freq
/ GEN9_FREQ_SCALER
;
5692 min_gpu_freq
= dev_priv
->rps
.min_freq
;
5693 max_gpu_freq
= dev_priv
->rps
.max_freq
;
5697 * For each potential GPU frequency, load a ring frequency we'd like
5698 * to use for memory access. We do this by specifying the IA frequency
5699 * the PCU should use as a reference to determine the ring frequency.
5701 for (gpu_freq
= max_gpu_freq
; gpu_freq
>= min_gpu_freq
; gpu_freq
--) {
5702 int diff
= max_gpu_freq
- gpu_freq
;
5703 unsigned int ia_freq
= 0, ring_freq
= 0;
5705 if (IS_SKYLAKE(dev_priv
) || IS_KABYLAKE(dev_priv
)) {
5707 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5708 * No floor required for ring frequency on SKL.
5710 ring_freq
= gpu_freq
;
5711 } else if (INTEL_INFO(dev_priv
)->gen
>= 8) {
5712 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5713 ring_freq
= max(min_ring_freq
, gpu_freq
);
5714 } else if (IS_HASWELL(dev_priv
)) {
5715 ring_freq
= mult_frac(gpu_freq
, 5, 4);
5716 ring_freq
= max(min_ring_freq
, ring_freq
);
5717 /* leave ia_freq as the default, chosen by cpufreq */
5719 /* On older processors, there is no separate ring
5720 * clock domain, so in order to boost the bandwidth
5721 * of the ring, we need to upclock the CPU (ia_freq).
5723 * For GPU frequencies less than 750MHz,
5724 * just use the lowest ring freq.
5726 if (gpu_freq
< min_freq
)
5729 ia_freq
= max_ia_freq
- ((diff
* scaling_factor
) / 2);
5730 ia_freq
= DIV_ROUND_CLOSEST(ia_freq
, 100);
5733 sandybridge_pcode_write(dev_priv
,
5734 GEN6_PCODE_WRITE_MIN_FREQ_TABLE
,
5735 ia_freq
<< GEN6_PCODE_FREQ_IA_RATIO_SHIFT
|
5736 ring_freq
<< GEN6_PCODE_FREQ_RING_RATIO_SHIFT
|
5741 void gen6_update_ring_freq(struct drm_i915_private
*dev_priv
)
5743 if (!HAS_CORE_RING_FREQ(dev_priv
))
5746 mutex_lock(&dev_priv
->rps
.hw_lock
);
5747 __gen6_update_ring_freq(dev_priv
);
5748 mutex_unlock(&dev_priv
->rps
.hw_lock
);
5751 static int cherryview_rps_max_freq(struct drm_i915_private
*dev_priv
)
5755 val
= vlv_punit_read(dev_priv
, FB_GFX_FMAX_AT_VMAX_FUSE
);
5757 switch (INTEL_INFO(dev_priv
)->eu_total
) {
5759 /* (2 * 4) config */
5760 rp0
= (val
>> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT
);
5763 /* (2 * 6) config */
5764 rp0
= (val
>> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT
);
5767 /* (2 * 8) config */
5769 /* Setting (2 * 8) Min RP0 for any other combination */
5770 rp0
= (val
>> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT
);
5774 rp0
= (rp0
& FB_GFX_FREQ_FUSE_MASK
);
5779 static int cherryview_rps_rpe_freq(struct drm_i915_private
*dev_priv
)
5783 val
= vlv_punit_read(dev_priv
, PUNIT_GPU_DUTYCYCLE_REG
);
5784 rpe
= (val
>> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT
) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK
;
5789 static int cherryview_rps_guar_freq(struct drm_i915_private
*dev_priv
)
5793 val
= vlv_punit_read(dev_priv
, FB_GFX_FMAX_AT_VMAX_FUSE
);
5794 rp1
= (val
& FB_GFX_FREQ_FUSE_MASK
);
5799 static int valleyview_rps_guar_freq(struct drm_i915_private
*dev_priv
)
5803 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FREQ_FUSE
);
5805 rp1
= (val
& FB_GFX_FGUARANTEED_FREQ_FUSE_MASK
) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT
;
5810 static int valleyview_rps_max_freq(struct drm_i915_private
*dev_priv
)
5814 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FREQ_FUSE
);
5816 rp0
= (val
& FB_GFX_MAX_FREQ_FUSE_MASK
) >> FB_GFX_MAX_FREQ_FUSE_SHIFT
;
5818 rp0
= min_t(u32
, rp0
, 0xea);
5823 static int valleyview_rps_rpe_freq(struct drm_i915_private
*dev_priv
)
5827 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FMAX_FUSE_LO
);
5828 rpe
= (val
& FB_FMAX_VMIN_FREQ_LO_MASK
) >> FB_FMAX_VMIN_FREQ_LO_SHIFT
;
5829 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FMAX_FUSE_HI
);
5830 rpe
|= (val
& FB_FMAX_VMIN_FREQ_HI_MASK
) << 5;
5835 static int valleyview_rps_min_freq(struct drm_i915_private
*dev_priv
)
5839 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_LFM
) & 0xff;
5841 * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value
5842 * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on
5843 * a BYT-M B0 the above register contains 0xbf. Moreover when setting
5844 * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0
5845 * to make sure it matches what Punit accepts.
5847 return max_t(u32
, val
, 0xc0);
5850 /* Check that the pctx buffer wasn't move under us. */
5851 static void valleyview_check_pctx(struct drm_i915_private
*dev_priv
)
5853 unsigned long pctx_addr
= I915_READ(VLV_PCBR
) & ~4095;
5855 WARN_ON(pctx_addr
!= dev_priv
->mm
.stolen_base
+
5856 dev_priv
->vlv_pctx
->stolen
->start
);
5860 /* Check that the pcbr address is not empty. */
5861 static void cherryview_check_pctx(struct drm_i915_private
*dev_priv
)
5863 unsigned long pctx_addr
= I915_READ(VLV_PCBR
) & ~4095;
5865 WARN_ON((pctx_addr
>> VLV_PCBR_ADDR_SHIFT
) == 0);
5868 static void cherryview_setup_pctx(struct drm_i915_private
*dev_priv
)
5870 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
5871 unsigned long pctx_paddr
, paddr
;
5873 int pctx_size
= 32*1024;
5875 pcbr
= I915_READ(VLV_PCBR
);
5876 if ((pcbr
>> VLV_PCBR_ADDR_SHIFT
) == 0) {
5877 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5878 paddr
= (dev_priv
->mm
.stolen_base
+
5879 (ggtt
->stolen_size
- pctx_size
));
5881 pctx_paddr
= (paddr
& (~4095));
5882 I915_WRITE(VLV_PCBR
, pctx_paddr
);
5885 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR
));
5888 static void valleyview_setup_pctx(struct drm_i915_private
*dev_priv
)
5890 struct drm_i915_gem_object
*pctx
;
5891 unsigned long pctx_paddr
;
5893 int pctx_size
= 24*1024;
5895 mutex_lock(&dev_priv
->drm
.struct_mutex
);
5897 pcbr
= I915_READ(VLV_PCBR
);
5899 /* BIOS set it up already, grab the pre-alloc'd space */
5902 pcbr_offset
= (pcbr
& (~4095)) - dev_priv
->mm
.stolen_base
;
5903 pctx
= i915_gem_object_create_stolen_for_preallocated(&dev_priv
->drm
,
5905 I915_GTT_OFFSET_NONE
,
5910 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5913 * From the Gunit register HAS:
5914 * The Gfx driver is expected to program this register and ensure
5915 * proper allocation within Gfx stolen memory. For example, this
5916 * register should be programmed such than the PCBR range does not
5917 * overlap with other ranges, such as the frame buffer, protected
5918 * memory, or any other relevant ranges.
5920 pctx
= i915_gem_object_create_stolen(&dev_priv
->drm
, pctx_size
);
5922 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5926 pctx_paddr
= dev_priv
->mm
.stolen_base
+ pctx
->stolen
->start
;
5927 I915_WRITE(VLV_PCBR
, pctx_paddr
);
5930 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR
));
5931 dev_priv
->vlv_pctx
= pctx
;
5932 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
5935 static void valleyview_cleanup_pctx(struct drm_i915_private
*dev_priv
)
5937 if (WARN_ON(!dev_priv
->vlv_pctx
))
5940 drm_gem_object_unreference_unlocked(&dev_priv
->vlv_pctx
->base
);
5941 dev_priv
->vlv_pctx
= NULL
;
5944 static void vlv_init_gpll_ref_freq(struct drm_i915_private
*dev_priv
)
5946 dev_priv
->rps
.gpll_ref_freq
=
5947 vlv_get_cck_clock(dev_priv
, "GPLL ref",
5948 CCK_GPLL_CLOCK_CONTROL
,
5949 dev_priv
->czclk_freq
);
5951 DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n",
5952 dev_priv
->rps
.gpll_ref_freq
);
5955 static void valleyview_init_gt_powersave(struct drm_i915_private
*dev_priv
)
5959 valleyview_setup_pctx(dev_priv
);
5961 vlv_init_gpll_ref_freq(dev_priv
);
5963 mutex_lock(&dev_priv
->rps
.hw_lock
);
5965 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
5966 switch ((val
>> 6) & 3) {
5969 dev_priv
->mem_freq
= 800;
5972 dev_priv
->mem_freq
= 1066;
5975 dev_priv
->mem_freq
= 1333;
5978 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv
->mem_freq
);
5980 dev_priv
->rps
.max_freq
= valleyview_rps_max_freq(dev_priv
);
5981 dev_priv
->rps
.rp0_freq
= dev_priv
->rps
.max_freq
;
5982 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5983 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
),
5984 dev_priv
->rps
.max_freq
);
5986 dev_priv
->rps
.efficient_freq
= valleyview_rps_rpe_freq(dev_priv
);
5987 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5988 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
5989 dev_priv
->rps
.efficient_freq
);
5991 dev_priv
->rps
.rp1_freq
= valleyview_rps_guar_freq(dev_priv
);
5992 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
5993 intel_gpu_freq(dev_priv
, dev_priv
->rps
.rp1_freq
),
5994 dev_priv
->rps
.rp1_freq
);
5996 dev_priv
->rps
.min_freq
= valleyview_rps_min_freq(dev_priv
);
5997 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5998 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
5999 dev_priv
->rps
.min_freq
);
6001 dev_priv
->rps
.idle_freq
= dev_priv
->rps
.min_freq
;
6003 /* Preserve min/max settings in case of re-init */
6004 if (dev_priv
->rps
.max_freq_softlimit
== 0)
6005 dev_priv
->rps
.max_freq_softlimit
= dev_priv
->rps
.max_freq
;
6007 if (dev_priv
->rps
.min_freq_softlimit
== 0)
6008 dev_priv
->rps
.min_freq_softlimit
= dev_priv
->rps
.min_freq
;
6010 mutex_unlock(&dev_priv
->rps
.hw_lock
);
6013 static void cherryview_init_gt_powersave(struct drm_i915_private
*dev_priv
)
6017 cherryview_setup_pctx(dev_priv
);
6019 vlv_init_gpll_ref_freq(dev_priv
);
6021 mutex_lock(&dev_priv
->rps
.hw_lock
);
6023 mutex_lock(&dev_priv
->sb_lock
);
6024 val
= vlv_cck_read(dev_priv
, CCK_FUSE_REG
);
6025 mutex_unlock(&dev_priv
->sb_lock
);
6027 switch ((val
>> 2) & 0x7) {
6029 dev_priv
->mem_freq
= 2000;
6032 dev_priv
->mem_freq
= 1600;
6035 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv
->mem_freq
);
6037 dev_priv
->rps
.max_freq
= cherryview_rps_max_freq(dev_priv
);
6038 dev_priv
->rps
.rp0_freq
= dev_priv
->rps
.max_freq
;
6039 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
6040 intel_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
),
6041 dev_priv
->rps
.max_freq
);
6043 dev_priv
->rps
.efficient_freq
= cherryview_rps_rpe_freq(dev_priv
);
6044 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
6045 intel_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
6046 dev_priv
->rps
.efficient_freq
);
6048 dev_priv
->rps
.rp1_freq
= cherryview_rps_guar_freq(dev_priv
);
6049 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
6050 intel_gpu_freq(dev_priv
, dev_priv
->rps
.rp1_freq
),
6051 dev_priv
->rps
.rp1_freq
);
6053 /* PUnit validated range is only [RPe, RP0] */
6054 dev_priv
->rps
.min_freq
= dev_priv
->rps
.efficient_freq
;
6055 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
6056 intel_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
6057 dev_priv
->rps
.min_freq
);
6059 WARN_ONCE((dev_priv
->rps
.max_freq
|
6060 dev_priv
->rps
.efficient_freq
|
6061 dev_priv
->rps
.rp1_freq
|
6062 dev_priv
->rps
.min_freq
) & 1,
6063 "Odd GPU freq values\n");
6065 dev_priv
->rps
.idle_freq
= dev_priv
->rps
.min_freq
;
6067 /* Preserve min/max settings in case of re-init */
6068 if (dev_priv
->rps
.max_freq_softlimit
== 0)
6069 dev_priv
->rps
.max_freq_softlimit
= dev_priv
->rps
.max_freq
;
6071 if (dev_priv
->rps
.min_freq_softlimit
== 0)
6072 dev_priv
->rps
.min_freq_softlimit
= dev_priv
->rps
.min_freq
;
6074 mutex_unlock(&dev_priv
->rps
.hw_lock
);
6077 static void valleyview_cleanup_gt_powersave(struct drm_i915_private
*dev_priv
)
6079 valleyview_cleanup_pctx(dev_priv
);
6082 static void cherryview_enable_rps(struct drm_i915_private
*dev_priv
)
6084 struct intel_engine_cs
*engine
;
6085 u32 gtfifodbg
, val
, rc6_mode
= 0, pcbr
;
6087 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
6089 gtfifodbg
= I915_READ(GTFIFODBG
) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV
|
6090 GT_FIFO_FREE_ENTRIES_CHV
);
6092 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
6094 I915_WRITE(GTFIFODBG
, gtfifodbg
);
6097 cherryview_check_pctx(dev_priv
);
6099 /* 1a & 1b: Get forcewake during program sequence. Although the driver
6100 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
6101 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
6103 /* Disable RC states. */
6104 I915_WRITE(GEN6_RC_CONTROL
, 0);
6106 /* 2a: Program RC6 thresholds.*/
6107 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16);
6108 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
6109 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
6111 for_each_engine(engine
, dev_priv
)
6112 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
6113 I915_WRITE(GEN6_RC_SLEEP
, 0);
6115 /* TO threshold set to 500 us ( 0x186 * 1.28 us) */
6116 I915_WRITE(GEN6_RC6_THRESHOLD
, 0x186);
6118 /* allows RC6 residency counter to work */
6119 I915_WRITE(VLV_COUNTER_CONTROL
,
6120 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH
|
6121 VLV_MEDIA_RC6_COUNT_EN
|
6122 VLV_RENDER_RC6_COUNT_EN
));
6124 /* For now we assume BIOS is allocating and populating the PCBR */
6125 pcbr
= I915_READ(VLV_PCBR
);
6128 if ((intel_enable_rc6() & INTEL_RC6_ENABLE
) &&
6129 (pcbr
>> VLV_PCBR_ADDR_SHIFT
))
6130 rc6_mode
= GEN7_RC_CTL_TO_MODE
;
6132 I915_WRITE(GEN6_RC_CONTROL
, rc6_mode
);
6134 /* 4 Program defaults and thresholds for RPS*/
6135 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 1000000);
6136 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 59400);
6137 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 245000);
6138 I915_WRITE(GEN6_RP_UP_EI
, 66000);
6139 I915_WRITE(GEN6_RP_DOWN_EI
, 350000);
6141 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
6144 I915_WRITE(GEN6_RP_CONTROL
,
6145 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
6146 GEN6_RP_MEDIA_IS_GFX
|
6148 GEN6_RP_UP_BUSY_AVG
|
6149 GEN6_RP_DOWN_IDLE_AVG
);
6151 /* Setting Fixed Bias */
6152 val
= VLV_OVERRIDE_EN
|
6154 CHV_BIAS_CPU_50_SOC_50
;
6155 vlv_punit_write(dev_priv
, VLV_TURBO_SOC_OVERRIDE
, val
);
6157 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
6159 /* RPS code assumes GPLL is used */
6160 WARN_ONCE((val
& GPLLENABLE
) == 0, "GPLL not enabled\n");
6162 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val
& GPLLENABLE
));
6163 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val
);
6165 dev_priv
->rps
.cur_freq
= (val
>> 8) & 0xff;
6166 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
6167 intel_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
),
6168 dev_priv
->rps
.cur_freq
);
6170 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
6171 intel_gpu_freq(dev_priv
, dev_priv
->rps
.idle_freq
),
6172 dev_priv
->rps
.idle_freq
);
6174 valleyview_set_rps(dev_priv
, dev_priv
->rps
.idle_freq
);
6176 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
6179 static void valleyview_enable_rps(struct drm_i915_private
*dev_priv
)
6181 struct intel_engine_cs
*engine
;
6182 u32 gtfifodbg
, val
, rc6_mode
= 0;
6184 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
6186 valleyview_check_pctx(dev_priv
);
6188 gtfifodbg
= I915_READ(GTFIFODBG
);
6190 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
6192 I915_WRITE(GTFIFODBG
, gtfifodbg
);
6195 /* If VLV, Forcewake all wells, else re-direct to regular path */
6196 intel_uncore_forcewake_get(dev_priv
, FORCEWAKE_ALL
);
6198 /* Disable RC states. */
6199 I915_WRITE(GEN6_RC_CONTROL
, 0);
6201 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 1000000);
6202 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 59400);
6203 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 245000);
6204 I915_WRITE(GEN6_RP_UP_EI
, 66000);
6205 I915_WRITE(GEN6_RP_DOWN_EI
, 350000);
6207 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
6209 I915_WRITE(GEN6_RP_CONTROL
,
6210 GEN6_RP_MEDIA_TURBO
|
6211 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
6212 GEN6_RP_MEDIA_IS_GFX
|
6214 GEN6_RP_UP_BUSY_AVG
|
6215 GEN6_RP_DOWN_IDLE_CONT
);
6217 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 0x00280000);
6218 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
6219 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
6221 for_each_engine(engine
, dev_priv
)
6222 I915_WRITE(RING_MAX_IDLE(engine
->mmio_base
), 10);
6224 I915_WRITE(GEN6_RC6_THRESHOLD
, 0x557);
6226 /* allows RC6 residency counter to work */
6227 I915_WRITE(VLV_COUNTER_CONTROL
,
6228 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN
|
6229 VLV_RENDER_RC0_COUNT_EN
|
6230 VLV_MEDIA_RC6_COUNT_EN
|
6231 VLV_RENDER_RC6_COUNT_EN
));
6233 if (intel_enable_rc6() & INTEL_RC6_ENABLE
)
6234 rc6_mode
= GEN7_RC_CTL_TO_MODE
| VLV_RC_CTL_CTX_RST_PARALLEL
;
6236 intel_print_rc6_info(dev_priv
, rc6_mode
);
6238 I915_WRITE(GEN6_RC_CONTROL
, rc6_mode
);
6240 /* Setting Fixed Bias */
6241 val
= VLV_OVERRIDE_EN
|
6243 VLV_BIAS_CPU_125_SOC_875
;
6244 vlv_punit_write(dev_priv
, VLV_TURBO_SOC_OVERRIDE
, val
);
6246 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
6248 /* RPS code assumes GPLL is used */
6249 WARN_ONCE((val
& GPLLENABLE
) == 0, "GPLL not enabled\n");
6251 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val
& GPLLENABLE
));
6252 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val
);
6254 dev_priv
->rps
.cur_freq
= (val
>> 8) & 0xff;
6255 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
6256 intel_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
),
6257 dev_priv
->rps
.cur_freq
);
6259 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
6260 intel_gpu_freq(dev_priv
, dev_priv
->rps
.idle_freq
),
6261 dev_priv
->rps
.idle_freq
);
6263 valleyview_set_rps(dev_priv
, dev_priv
->rps
.idle_freq
);
6265 intel_uncore_forcewake_put(dev_priv
, FORCEWAKE_ALL
);
6268 static unsigned long intel_pxfreq(u32 vidfreq
)
6271 int div
= (vidfreq
& 0x3f0000) >> 16;
6272 int post
= (vidfreq
& 0x3000) >> 12;
6273 int pre
= (vidfreq
& 0x7);
6278 freq
= ((div
* 133333) / ((1<<post
) * pre
));
6283 static const struct cparams
{
6289 { 1, 1333, 301, 28664 },
6290 { 1, 1066, 294, 24460 },
6291 { 1, 800, 294, 25192 },
6292 { 0, 1333, 276, 27605 },
6293 { 0, 1066, 276, 27605 },
6294 { 0, 800, 231, 23784 },
6297 static unsigned long __i915_chipset_val(struct drm_i915_private
*dev_priv
)
6299 u64 total_count
, diff
, ret
;
6300 u32 count1
, count2
, count3
, m
= 0, c
= 0;
6301 unsigned long now
= jiffies_to_msecs(jiffies
), diff1
;
6304 assert_spin_locked(&mchdev_lock
);
6306 diff1
= now
- dev_priv
->ips
.last_time1
;
6308 /* Prevent division-by-zero if we are asking too fast.
6309 * Also, we don't get interesting results if we are polling
6310 * faster than once in 10ms, so just return the saved value
6314 return dev_priv
->ips
.chipset_power
;
6316 count1
= I915_READ(DMIEC
);
6317 count2
= I915_READ(DDREC
);
6318 count3
= I915_READ(CSIEC
);
6320 total_count
= count1
+ count2
+ count3
;
6322 /* FIXME: handle per-counter overflow */
6323 if (total_count
< dev_priv
->ips
.last_count1
) {
6324 diff
= ~0UL - dev_priv
->ips
.last_count1
;
6325 diff
+= total_count
;
6327 diff
= total_count
- dev_priv
->ips
.last_count1
;
6330 for (i
= 0; i
< ARRAY_SIZE(cparams
); i
++) {
6331 if (cparams
[i
].i
== dev_priv
->ips
.c_m
&&
6332 cparams
[i
].t
== dev_priv
->ips
.r_t
) {
6339 diff
= div_u64(diff
, diff1
);
6340 ret
= ((m
* diff
) + c
);
6341 ret
= div_u64(ret
, 10);
6343 dev_priv
->ips
.last_count1
= total_count
;
6344 dev_priv
->ips
.last_time1
= now
;
6346 dev_priv
->ips
.chipset_power
= ret
;
6351 unsigned long i915_chipset_val(struct drm_i915_private
*dev_priv
)
6355 if (INTEL_INFO(dev_priv
)->gen
!= 5)
6358 spin_lock_irq(&mchdev_lock
);
6360 val
= __i915_chipset_val(dev_priv
);
6362 spin_unlock_irq(&mchdev_lock
);
6367 unsigned long i915_mch_val(struct drm_i915_private
*dev_priv
)
6369 unsigned long m
, x
, b
;
6372 tsfs
= I915_READ(TSFS
);
6374 m
= ((tsfs
& TSFS_SLOPE_MASK
) >> TSFS_SLOPE_SHIFT
);
6375 x
= I915_READ8(TR1
);
6377 b
= tsfs
& TSFS_INTR_MASK
;
6379 return ((m
* x
) / 127) - b
;
6382 static int _pxvid_to_vd(u8 pxvid
)
6387 if (pxvid
>= 8 && pxvid
< 31)
6390 return (pxvid
+ 2) * 125;
6393 static u32
pvid_to_extvid(struct drm_i915_private
*dev_priv
, u8 pxvid
)
6395 const int vd
= _pxvid_to_vd(pxvid
);
6396 const int vm
= vd
- 1125;
6398 if (INTEL_INFO(dev_priv
)->is_mobile
)
6399 return vm
> 0 ? vm
: 0;
6404 static void __i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
6406 u64 now
, diff
, diffms
;
6409 assert_spin_locked(&mchdev_lock
);
6411 now
= ktime_get_raw_ns();
6412 diffms
= now
- dev_priv
->ips
.last_time2
;
6413 do_div(diffms
, NSEC_PER_MSEC
);
6415 /* Don't divide by 0 */
6419 count
= I915_READ(GFXEC
);
6421 if (count
< dev_priv
->ips
.last_count2
) {
6422 diff
= ~0UL - dev_priv
->ips
.last_count2
;
6425 diff
= count
- dev_priv
->ips
.last_count2
;
6428 dev_priv
->ips
.last_count2
= count
;
6429 dev_priv
->ips
.last_time2
= now
;
6431 /* More magic constants... */
6433 diff
= div_u64(diff
, diffms
* 10);
6434 dev_priv
->ips
.gfx_power
= diff
;
6437 void i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
6439 if (INTEL_INFO(dev_priv
)->gen
!= 5)
6442 spin_lock_irq(&mchdev_lock
);
6444 __i915_update_gfx_val(dev_priv
);
6446 spin_unlock_irq(&mchdev_lock
);
6449 static unsigned long __i915_gfx_val(struct drm_i915_private
*dev_priv
)
6451 unsigned long t
, corr
, state1
, corr2
, state2
;
6454 assert_spin_locked(&mchdev_lock
);
6456 pxvid
= I915_READ(PXVFREQ(dev_priv
->rps
.cur_freq
));
6457 pxvid
= (pxvid
>> 24) & 0x7f;
6458 ext_v
= pvid_to_extvid(dev_priv
, pxvid
);
6462 t
= i915_mch_val(dev_priv
);
6464 /* Revel in the empirically derived constants */
6466 /* Correction factor in 1/100000 units */
6468 corr
= ((t
* 2349) + 135940);
6470 corr
= ((t
* 964) + 29317);
6472 corr
= ((t
* 301) + 1004);
6474 corr
= corr
* ((150142 * state1
) / 10000 - 78642);
6476 corr2
= (corr
* dev_priv
->ips
.corr
);
6478 state2
= (corr2
* state1
) / 10000;
6479 state2
/= 100; /* convert to mW */
6481 __i915_update_gfx_val(dev_priv
);
6483 return dev_priv
->ips
.gfx_power
+ state2
;
6486 unsigned long i915_gfx_val(struct drm_i915_private
*dev_priv
)
6490 if (INTEL_INFO(dev_priv
)->gen
!= 5)
6493 spin_lock_irq(&mchdev_lock
);
6495 val
= __i915_gfx_val(dev_priv
);
6497 spin_unlock_irq(&mchdev_lock
);
6503 * i915_read_mch_val - return value for IPS use
6505 * Calculate and return a value for the IPS driver to use when deciding whether
6506 * we have thermal and power headroom to increase CPU or GPU power budget.
6508 unsigned long i915_read_mch_val(void)
6510 struct drm_i915_private
*dev_priv
;
6511 unsigned long chipset_val
, graphics_val
, ret
= 0;
6513 spin_lock_irq(&mchdev_lock
);
6516 dev_priv
= i915_mch_dev
;
6518 chipset_val
= __i915_chipset_val(dev_priv
);
6519 graphics_val
= __i915_gfx_val(dev_priv
);
6521 ret
= chipset_val
+ graphics_val
;
6524 spin_unlock_irq(&mchdev_lock
);
6528 EXPORT_SYMBOL_GPL(i915_read_mch_val
);
6531 * i915_gpu_raise - raise GPU frequency limit
6533 * Raise the limit; IPS indicates we have thermal headroom.
6535 bool i915_gpu_raise(void)
6537 struct drm_i915_private
*dev_priv
;
6540 spin_lock_irq(&mchdev_lock
);
6541 if (!i915_mch_dev
) {
6545 dev_priv
= i915_mch_dev
;
6547 if (dev_priv
->ips
.max_delay
> dev_priv
->ips
.fmax
)
6548 dev_priv
->ips
.max_delay
--;
6551 spin_unlock_irq(&mchdev_lock
);
6555 EXPORT_SYMBOL_GPL(i915_gpu_raise
);
6558 * i915_gpu_lower - lower GPU frequency limit
6560 * IPS indicates we're close to a thermal limit, so throttle back the GPU
6561 * frequency maximum.
6563 bool i915_gpu_lower(void)
6565 struct drm_i915_private
*dev_priv
;
6568 spin_lock_irq(&mchdev_lock
);
6569 if (!i915_mch_dev
) {
6573 dev_priv
= i915_mch_dev
;
6575 if (dev_priv
->ips
.max_delay
< dev_priv
->ips
.min_delay
)
6576 dev_priv
->ips
.max_delay
++;
6579 spin_unlock_irq(&mchdev_lock
);
6583 EXPORT_SYMBOL_GPL(i915_gpu_lower
);
6586 * i915_gpu_busy - indicate GPU business to IPS
6588 * Tell the IPS driver whether or not the GPU is busy.
6590 bool i915_gpu_busy(void)
6592 struct drm_i915_private
*dev_priv
;
6593 struct intel_engine_cs
*engine
;
6596 spin_lock_irq(&mchdev_lock
);
6599 dev_priv
= i915_mch_dev
;
6601 for_each_engine(engine
, dev_priv
)
6602 ret
|= !list_empty(&engine
->request_list
);
6605 spin_unlock_irq(&mchdev_lock
);
6609 EXPORT_SYMBOL_GPL(i915_gpu_busy
);
6612 * i915_gpu_turbo_disable - disable graphics turbo
6614 * Disable graphics turbo by resetting the max frequency and setting the
6615 * current frequency to the default.
6617 bool i915_gpu_turbo_disable(void)
6619 struct drm_i915_private
*dev_priv
;
6622 spin_lock_irq(&mchdev_lock
);
6623 if (!i915_mch_dev
) {
6627 dev_priv
= i915_mch_dev
;
6629 dev_priv
->ips
.max_delay
= dev_priv
->ips
.fstart
;
6631 if (!ironlake_set_drps(dev_priv
, dev_priv
->ips
.fstart
))
6635 spin_unlock_irq(&mchdev_lock
);
6639 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable
);
6642 * Tells the intel_ips driver that the i915 driver is now loaded, if
6643 * IPS got loaded first.
6645 * This awkward dance is so that neither module has to depend on the
6646 * other in order for IPS to do the appropriate communication of
6647 * GPU turbo limits to i915.
6650 ips_ping_for_i915_load(void)
6654 link
= symbol_get(ips_link_to_i915_driver
);
6657 symbol_put(ips_link_to_i915_driver
);
6661 void intel_gpu_ips_init(struct drm_i915_private
*dev_priv
)
6663 /* We only register the i915 ips part with intel-ips once everything is
6664 * set up, to avoid intel-ips sneaking in and reading bogus values. */
6665 spin_lock_irq(&mchdev_lock
);
6666 i915_mch_dev
= dev_priv
;
6667 spin_unlock_irq(&mchdev_lock
);
6669 ips_ping_for_i915_load();
6672 void intel_gpu_ips_teardown(void)
6674 spin_lock_irq(&mchdev_lock
);
6675 i915_mch_dev
= NULL
;
6676 spin_unlock_irq(&mchdev_lock
);
6679 static void intel_init_emon(struct drm_i915_private
*dev_priv
)
6685 /* Disable to program */
6689 /* Program energy weights for various events */
6690 I915_WRITE(SDEW
, 0x15040d00);
6691 I915_WRITE(CSIEW0
, 0x007f0000);
6692 I915_WRITE(CSIEW1
, 0x1e220004);
6693 I915_WRITE(CSIEW2
, 0x04000004);
6695 for (i
= 0; i
< 5; i
++)
6696 I915_WRITE(PEW(i
), 0);
6697 for (i
= 0; i
< 3; i
++)
6698 I915_WRITE(DEW(i
), 0);
6700 /* Program P-state weights to account for frequency power adjustment */
6701 for (i
= 0; i
< 16; i
++) {
6702 u32 pxvidfreq
= I915_READ(PXVFREQ(i
));
6703 unsigned long freq
= intel_pxfreq(pxvidfreq
);
6704 unsigned long vid
= (pxvidfreq
& PXVFREQ_PX_MASK
) >>
6709 val
*= (freq
/ 1000);
6711 val
/= (127*127*900);
6713 DRM_ERROR("bad pxval: %ld\n", val
);
6716 /* Render standby states get 0 weight */
6720 for (i
= 0; i
< 4; i
++) {
6721 u32 val
= (pxw
[i
*4] << 24) | (pxw
[(i
*4)+1] << 16) |
6722 (pxw
[(i
*4)+2] << 8) | (pxw
[(i
*4)+3]);
6723 I915_WRITE(PXW(i
), val
);
6726 /* Adjust magic regs to magic values (more experimental results) */
6727 I915_WRITE(OGW0
, 0);
6728 I915_WRITE(OGW1
, 0);
6729 I915_WRITE(EG0
, 0x00007f00);
6730 I915_WRITE(EG1
, 0x0000000e);
6731 I915_WRITE(EG2
, 0x000e0000);
6732 I915_WRITE(EG3
, 0x68000300);
6733 I915_WRITE(EG4
, 0x42000000);
6734 I915_WRITE(EG5
, 0x00140031);
6738 for (i
= 0; i
< 8; i
++)
6739 I915_WRITE(PXWL(i
), 0);
6741 /* Enable PMON + select events */
6742 I915_WRITE(ECR
, 0x80000019);
6744 lcfuse
= I915_READ(LCFUSE02
);
6746 dev_priv
->ips
.corr
= (lcfuse
& LCFUSE_HIV_MASK
);
6749 void intel_init_gt_powersave(struct drm_i915_private
*dev_priv
)
6752 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6755 if (!i915
.enable_rc6
) {
6756 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6757 intel_runtime_pm_get(dev_priv
);
6760 if (IS_CHERRYVIEW(dev_priv
))
6761 cherryview_init_gt_powersave(dev_priv
);
6762 else if (IS_VALLEYVIEW(dev_priv
))
6763 valleyview_init_gt_powersave(dev_priv
);
6766 void intel_cleanup_gt_powersave(struct drm_i915_private
*dev_priv
)
6768 if (IS_VALLEYVIEW(dev_priv
))
6769 valleyview_cleanup_gt_powersave(dev_priv
);
6771 if (!i915
.enable_rc6
)
6772 intel_runtime_pm_put(dev_priv
);
6775 static void gen6_suspend_rps(struct drm_i915_private
*dev_priv
)
6777 flush_delayed_work(&dev_priv
->rps
.delayed_resume_work
);
6779 gen6_disable_rps_interrupts(dev_priv
);
6783 * intel_suspend_gt_powersave - suspend PM work and helper threads
6784 * @dev_priv: i915 device
6786 * We don't want to disable RC6 or other features here, we just want
6787 * to make sure any work we've queued has finished and won't bother
6788 * us while we're suspended.
6790 void intel_suspend_gt_powersave(struct drm_i915_private
*dev_priv
)
6792 if (INTEL_GEN(dev_priv
) < 6)
6795 gen6_suspend_rps(dev_priv
);
6797 /* Force GPU to min freq during suspend */
6798 gen6_rps_idle(dev_priv
);
6801 void intel_disable_gt_powersave(struct drm_i915_private
*dev_priv
)
6803 if (IS_IRONLAKE_M(dev_priv
)) {
6804 ironlake_disable_drps(dev_priv
);
6805 } else if (INTEL_INFO(dev_priv
)->gen
>= 6) {
6806 intel_suspend_gt_powersave(dev_priv
);
6808 mutex_lock(&dev_priv
->rps
.hw_lock
);
6809 if (INTEL_INFO(dev_priv
)->gen
>= 9) {
6810 gen9_disable_rc6(dev_priv
);
6811 gen9_disable_rps(dev_priv
);
6812 } else if (IS_CHERRYVIEW(dev_priv
))
6813 cherryview_disable_rps(dev_priv
);
6814 else if (IS_VALLEYVIEW(dev_priv
))
6815 valleyview_disable_rps(dev_priv
);
6817 gen6_disable_rps(dev_priv
);
6819 dev_priv
->rps
.enabled
= false;
6820 mutex_unlock(&dev_priv
->rps
.hw_lock
);
6824 static void intel_gen6_powersave_work(struct work_struct
*work
)
6826 struct drm_i915_private
*dev_priv
=
6827 container_of(work
, struct drm_i915_private
,
6828 rps
.delayed_resume_work
.work
);
6830 mutex_lock(&dev_priv
->rps
.hw_lock
);
6832 gen6_reset_rps_interrupts(dev_priv
);
6834 if (IS_CHERRYVIEW(dev_priv
)) {
6835 cherryview_enable_rps(dev_priv
);
6836 } else if (IS_VALLEYVIEW(dev_priv
)) {
6837 valleyview_enable_rps(dev_priv
);
6838 } else if (INTEL_INFO(dev_priv
)->gen
>= 9) {
6839 gen9_enable_rc6(dev_priv
);
6840 gen9_enable_rps(dev_priv
);
6841 if (IS_SKYLAKE(dev_priv
) || IS_KABYLAKE(dev_priv
))
6842 __gen6_update_ring_freq(dev_priv
);
6843 } else if (IS_BROADWELL(dev_priv
)) {
6844 gen8_enable_rps(dev_priv
);
6845 __gen6_update_ring_freq(dev_priv
);
6847 gen6_enable_rps(dev_priv
);
6848 __gen6_update_ring_freq(dev_priv
);
6851 WARN_ON(dev_priv
->rps
.max_freq
< dev_priv
->rps
.min_freq
);
6852 WARN_ON(dev_priv
->rps
.idle_freq
> dev_priv
->rps
.max_freq
);
6854 WARN_ON(dev_priv
->rps
.efficient_freq
< dev_priv
->rps
.min_freq
);
6855 WARN_ON(dev_priv
->rps
.efficient_freq
> dev_priv
->rps
.max_freq
);
6857 dev_priv
->rps
.enabled
= true;
6859 gen6_enable_rps_interrupts(dev_priv
);
6861 mutex_unlock(&dev_priv
->rps
.hw_lock
);
6863 intel_runtime_pm_put(dev_priv
);
6866 void intel_enable_gt_powersave(struct drm_i915_private
*dev_priv
)
6868 /* Powersaving is controlled by the host when inside a VM */
6869 if (intel_vgpu_active(dev_priv
))
6872 if (IS_IRONLAKE_M(dev_priv
)) {
6873 ironlake_enable_drps(dev_priv
);
6874 mutex_lock(&dev_priv
->drm
.struct_mutex
);
6875 intel_init_emon(dev_priv
);
6876 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
6877 } else if (INTEL_INFO(dev_priv
)->gen
>= 6) {
6879 * PCU communication is slow and this doesn't need to be
6880 * done at any specific time, so do this out of our fast path
6881 * to make resume and init faster.
6883 * We depend on the HW RC6 power context save/restore
6884 * mechanism when entering D3 through runtime PM suspend. So
6885 * disable RPM until RPS/RC6 is properly setup. We can only
6886 * get here via the driver load/system resume/runtime resume
6887 * paths, so the _noresume version is enough (and in case of
6888 * runtime resume it's necessary).
6890 if (schedule_delayed_work(&dev_priv
->rps
.delayed_resume_work
,
6891 round_jiffies_up_relative(HZ
)))
6892 intel_runtime_pm_get_noresume(dev_priv
);
6896 void intel_reset_gt_powersave(struct drm_i915_private
*dev_priv
)
6898 if (INTEL_INFO(dev_priv
)->gen
< 6)
6901 gen6_suspend_rps(dev_priv
);
6902 dev_priv
->rps
.enabled
= false;
6905 static void ibx_init_clock_gating(struct drm_device
*dev
)
6907 struct drm_i915_private
*dev_priv
= to_i915(dev
);
6910 * On Ibex Peak and Cougar Point, we need to disable clock
6911 * gating for the panel power sequencer or it will fail to
6912 * start up when no ports are active.
6914 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
);
6917 static void g4x_disable_trickle_feed(struct drm_device
*dev
)
6919 struct drm_i915_private
*dev_priv
= to_i915(dev
);
6922 for_each_pipe(dev_priv
, pipe
) {
6923 I915_WRITE(DSPCNTR(pipe
),
6924 I915_READ(DSPCNTR(pipe
)) |
6925 DISPPLANE_TRICKLE_FEED_DISABLE
);
6927 I915_WRITE(DSPSURF(pipe
), I915_READ(DSPSURF(pipe
)));
6928 POSTING_READ(DSPSURF(pipe
));
6932 static void ilk_init_lp_watermarks(struct drm_device
*dev
)
6934 struct drm_i915_private
*dev_priv
= to_i915(dev
);
6936 I915_WRITE(WM3_LP_ILK
, I915_READ(WM3_LP_ILK
) & ~WM1_LP_SR_EN
);
6937 I915_WRITE(WM2_LP_ILK
, I915_READ(WM2_LP_ILK
) & ~WM1_LP_SR_EN
);
6938 I915_WRITE(WM1_LP_ILK
, I915_READ(WM1_LP_ILK
) & ~WM1_LP_SR_EN
);
6941 * Don't touch WM1S_LP_EN here.
6942 * Doing so could cause underruns.
6946 static void ironlake_init_clock_gating(struct drm_device
*dev
)
6948 struct drm_i915_private
*dev_priv
= to_i915(dev
);
6949 uint32_t dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
6953 * WaFbcDisableDpfcClockGating:ilk
6955 dspclk_gate
|= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE
|
6956 ILK_DPFCUNIT_CLOCK_GATE_DISABLE
|
6957 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
;
6959 I915_WRITE(PCH_3DCGDIS0
,
6960 MARIUNIT_CLOCK_GATE_DISABLE
|
6961 SVSMUNIT_CLOCK_GATE_DISABLE
);
6962 I915_WRITE(PCH_3DCGDIS1
,
6963 VFMUNIT_CLOCK_GATE_DISABLE
);
6966 * According to the spec the following bits should be set in
6967 * order to enable memory self-refresh
6968 * The bit 22/21 of 0x42004
6969 * The bit 5 of 0x42020
6970 * The bit 15 of 0x45000
6972 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6973 (I915_READ(ILK_DISPLAY_CHICKEN2
) |
6974 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
));
6975 dspclk_gate
|= ILK_DPARBUNIT_CLOCK_GATE_ENABLE
;
6976 I915_WRITE(DISP_ARB_CTL
,
6977 (I915_READ(DISP_ARB_CTL
) |
6980 ilk_init_lp_watermarks(dev
);
6983 * Based on the document from hardware guys the following bits
6984 * should be set unconditionally in order to enable FBC.
6985 * The bit 22 of 0x42000
6986 * The bit 22 of 0x42004
6987 * The bit 7,8,9 of 0x42020.
6989 if (IS_IRONLAKE_M(dev
)) {
6990 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
6991 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
6992 I915_READ(ILK_DISPLAY_CHICKEN1
) |
6994 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
6995 I915_READ(ILK_DISPLAY_CHICKEN2
) |
6999 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
7001 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
7002 I915_READ(ILK_DISPLAY_CHICKEN2
) |
7003 ILK_ELPIN_409_SELECT
);
7004 I915_WRITE(_3D_CHICKEN2
,
7005 _3D_CHICKEN2_WM_READ_PIPELINED
<< 16 |
7006 _3D_CHICKEN2_WM_READ_PIPELINED
);
7008 /* WaDisableRenderCachePipelinedFlush:ilk */
7009 I915_WRITE(CACHE_MODE_0
,
7010 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
7012 /* WaDisable_RenderCache_OperationalFlush:ilk */
7013 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7015 g4x_disable_trickle_feed(dev
);
7017 ibx_init_clock_gating(dev
);
7020 static void cpt_init_clock_gating(struct drm_device
*dev
)
7022 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7027 * On Ibex Peak and Cougar Point, we need to disable clock
7028 * gating for the panel power sequencer or it will fail to
7029 * start up when no ports are active.
7031 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
|
7032 PCH_DPLUNIT_CLOCK_GATE_DISABLE
|
7033 PCH_CPUNIT_CLOCK_GATE_DISABLE
);
7034 I915_WRITE(SOUTH_CHICKEN2
, I915_READ(SOUTH_CHICKEN2
) |
7035 DPLS_EDP_PPS_FIX_DIS
);
7036 /* The below fixes the weird display corruption, a few pixels shifted
7037 * downward, on (only) LVDS of some HP laptops with IVY.
7039 for_each_pipe(dev_priv
, pipe
) {
7040 val
= I915_READ(TRANS_CHICKEN2(pipe
));
7041 val
|= TRANS_CHICKEN2_TIMING_OVERRIDE
;
7042 val
&= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
7043 if (dev_priv
->vbt
.fdi_rx_polarity_inverted
)
7044 val
|= TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
7045 val
&= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK
;
7046 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER
;
7047 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH
;
7048 I915_WRITE(TRANS_CHICKEN2(pipe
), val
);
7050 /* WADP0ClockGatingDisable */
7051 for_each_pipe(dev_priv
, pipe
) {
7052 I915_WRITE(TRANS_CHICKEN1(pipe
),
7053 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
7057 static void gen6_check_mch_setup(struct drm_device
*dev
)
7059 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7062 tmp
= I915_READ(MCH_SSKPD
);
7063 if ((tmp
& MCH_SSKPD_WM0_MASK
) != MCH_SSKPD_WM0_VAL
)
7064 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
7068 static void gen6_init_clock_gating(struct drm_device
*dev
)
7070 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7071 uint32_t dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
7073 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
7075 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
7076 I915_READ(ILK_DISPLAY_CHICKEN2
) |
7077 ILK_ELPIN_409_SELECT
);
7079 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
7080 I915_WRITE(_3D_CHICKEN
,
7081 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB
));
7083 /* WaDisable_RenderCache_OperationalFlush:snb */
7084 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7087 * BSpec recoomends 8x4 when MSAA is used,
7088 * however in practice 16x4 seems fastest.
7090 * Note that PS/WM thread counts depend on the WIZ hashing
7091 * disable bit, which we don't touch here, but it's good
7092 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7094 I915_WRITE(GEN6_GT_MODE
,
7095 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
7097 ilk_init_lp_watermarks(dev
);
7099 I915_WRITE(CACHE_MODE_0
,
7100 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB
));
7102 I915_WRITE(GEN6_UCGCTL1
,
7103 I915_READ(GEN6_UCGCTL1
) |
7104 GEN6_BLBUNIT_CLOCK_GATE_DISABLE
|
7105 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
7107 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
7108 * gating disable must be set. Failure to set it results in
7109 * flickering pixels due to Z write ordering failures after
7110 * some amount of runtime in the Mesa "fire" demo, and Unigine
7111 * Sanctuary and Tropics, and apparently anything else with
7112 * alpha test or pixel discard.
7114 * According to the spec, bit 11 (RCCUNIT) must also be set,
7115 * but we didn't debug actual testcases to find it out.
7117 * WaDisableRCCUnitClockGating:snb
7118 * WaDisableRCPBUnitClockGating:snb
7120 I915_WRITE(GEN6_UCGCTL2
,
7121 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE
|
7122 GEN6_RCCUNIT_CLOCK_GATE_DISABLE
);
7124 /* WaStripsFansDisableFastClipPerformanceFix:snb */
7125 I915_WRITE(_3D_CHICKEN3
,
7126 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL
));
7130 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
7131 * 3DSTATE_SF number of SF output attributes is more than 16."
7133 I915_WRITE(_3D_CHICKEN3
,
7134 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH
));
7137 * According to the spec the following bits should be
7138 * set in order to enable memory self-refresh and fbc:
7139 * The bit21 and bit22 of 0x42000
7140 * The bit21 and bit22 of 0x42004
7141 * The bit5 and bit7 of 0x42020
7142 * The bit14 of 0x70180
7143 * The bit14 of 0x71180
7145 * WaFbcAsynchFlipDisableFbcQueue:snb
7147 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
7148 I915_READ(ILK_DISPLAY_CHICKEN1
) |
7149 ILK_FBCQ_DIS
| ILK_PABSTRETCH_DIS
);
7150 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
7151 I915_READ(ILK_DISPLAY_CHICKEN2
) |
7152 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
);
7153 I915_WRITE(ILK_DSPCLK_GATE_D
,
7154 I915_READ(ILK_DSPCLK_GATE_D
) |
7155 ILK_DPARBUNIT_CLOCK_GATE_ENABLE
|
7156 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
);
7158 g4x_disable_trickle_feed(dev
);
7160 cpt_init_clock_gating(dev
);
7162 gen6_check_mch_setup(dev
);
7165 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private
*dev_priv
)
7167 uint32_t reg
= I915_READ(GEN7_FF_THREAD_MODE
);
7170 * WaVSThreadDispatchOverride:ivb,vlv
7172 * This actually overrides the dispatch
7173 * mode for all thread types.
7175 reg
&= ~GEN7_FF_SCHED_MASK
;
7176 reg
|= GEN7_FF_TS_SCHED_HW
;
7177 reg
|= GEN7_FF_VS_SCHED_HW
;
7178 reg
|= GEN7_FF_DS_SCHED_HW
;
7180 I915_WRITE(GEN7_FF_THREAD_MODE
, reg
);
7183 static void lpt_init_clock_gating(struct drm_device
*dev
)
7185 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7188 * TODO: this bit should only be enabled when really needed, then
7189 * disabled when not needed anymore in order to save power.
7191 if (HAS_PCH_LPT_LP(dev
))
7192 I915_WRITE(SOUTH_DSPCLK_GATE_D
,
7193 I915_READ(SOUTH_DSPCLK_GATE_D
) |
7194 PCH_LP_PARTITION_LEVEL_DISABLE
);
7196 /* WADPOClockGatingDisable:hsw */
7197 I915_WRITE(TRANS_CHICKEN1(PIPE_A
),
7198 I915_READ(TRANS_CHICKEN1(PIPE_A
)) |
7199 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
7202 static void lpt_suspend_hw(struct drm_device
*dev
)
7204 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7206 if (HAS_PCH_LPT_LP(dev
)) {
7207 uint32_t val
= I915_READ(SOUTH_DSPCLK_GATE_D
);
7209 val
&= ~PCH_LP_PARTITION_LEVEL_DISABLE
;
7210 I915_WRITE(SOUTH_DSPCLK_GATE_D
, val
);
7214 static void gen8_set_l3sqc_credits(struct drm_i915_private
*dev_priv
,
7215 int general_prio_credits
,
7216 int high_prio_credits
)
7220 /* WaTempDisableDOPClkGating:bdw */
7221 misccpctl
= I915_READ(GEN7_MISCCPCTL
);
7222 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
& ~GEN7_DOP_CLOCK_GATE_ENABLE
);
7224 I915_WRITE(GEN8_L3SQCREG1
,
7225 L3_GENERAL_PRIO_CREDITS(general_prio_credits
) |
7226 L3_HIGH_PRIO_CREDITS(high_prio_credits
));
7229 * Wait at least 100 clocks before re-enabling clock gating.
7230 * See the definition of L3SQCREG1 in BSpec.
7232 POSTING_READ(GEN8_L3SQCREG1
);
7234 I915_WRITE(GEN7_MISCCPCTL
, misccpctl
);
7237 static void kabylake_init_clock_gating(struct drm_device
*dev
)
7239 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7241 gen9_init_clock_gating(dev
);
7243 /* WaDisableSDEUnitClockGating:kbl */
7244 if (IS_KBL_REVID(dev_priv
, 0, KBL_REVID_B0
))
7245 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
7246 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
7248 /* WaDisableGamClockGating:kbl */
7249 if (IS_KBL_REVID(dev_priv
, 0, KBL_REVID_B0
))
7250 I915_WRITE(GEN6_UCGCTL1
, I915_READ(GEN6_UCGCTL1
) |
7251 GEN6_GAMUNIT_CLOCK_GATE_DISABLE
);
7253 /* WaFbcNukeOnHostModify:kbl */
7254 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
7255 ILK_DPFC_NUKE_ON_ANY_MODIFICATION
);
7258 static void skylake_init_clock_gating(struct drm_device
*dev
)
7260 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
7262 gen9_init_clock_gating(dev
);
7264 /* WAC6entrylatency:skl */
7265 I915_WRITE(FBC_LLC_READ_CTRL
, I915_READ(FBC_LLC_READ_CTRL
) |
7266 FBC_LLC_FULLY_OPEN
);
7268 /* WaFbcNukeOnHostModify:skl */
7269 I915_WRITE(ILK_DPFC_CHICKEN
, I915_READ(ILK_DPFC_CHICKEN
) |
7270 ILK_DPFC_NUKE_ON_ANY_MODIFICATION
);
7273 static void broadwell_init_clock_gating(struct drm_device
*dev
)
7275 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7278 ilk_init_lp_watermarks(dev
);
7280 /* WaSwitchSolVfFArbitrationPriority:bdw */
7281 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
7283 /* WaPsrDPAMaskVBlankInSRD:bdw */
7284 I915_WRITE(CHICKEN_PAR1_1
,
7285 I915_READ(CHICKEN_PAR1_1
) | DPA_MASK_VBLANK_SRD
);
7287 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
7288 for_each_pipe(dev_priv
, pipe
) {
7289 I915_WRITE(CHICKEN_PIPESL_1(pipe
),
7290 I915_READ(CHICKEN_PIPESL_1(pipe
)) |
7291 BDW_DPRS_MASK_VBLANK_SRD
);
7294 /* WaVSRefCountFullforceMissDisable:bdw */
7295 /* WaDSRefCountFullforceMissDisable:bdw */
7296 I915_WRITE(GEN7_FF_THREAD_MODE
,
7297 I915_READ(GEN7_FF_THREAD_MODE
) &
7298 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
7300 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
7301 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
7303 /* WaDisableSDEUnitClockGating:bdw */
7304 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
7305 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
7307 /* WaProgramL3SqcReg1Default:bdw */
7308 gen8_set_l3sqc_credits(dev_priv
, 30, 2);
7311 * WaGttCachingOffByDefault:bdw
7312 * GTT cache may not work with big pages, so if those
7313 * are ever enabled GTT cache may need to be disabled.
7315 I915_WRITE(HSW_GTT_CACHE_EN
, GTT_CACHE_EN_ALL
);
7317 /* WaKVMNotificationOnConfigChange:bdw */
7318 I915_WRITE(CHICKEN_PAR2_1
, I915_READ(CHICKEN_PAR2_1
)
7319 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT
);
7321 lpt_init_clock_gating(dev
);
7324 static void haswell_init_clock_gating(struct drm_device
*dev
)
7326 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7328 ilk_init_lp_watermarks(dev
);
7330 /* L3 caching of data atomics doesn't work -- disable it. */
7331 I915_WRITE(HSW_SCRATCH1
, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE
);
7332 I915_WRITE(HSW_ROW_CHICKEN3
,
7333 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE
));
7335 /* This is required by WaCatErrorRejectionIssue:hsw */
7336 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
7337 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
7338 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
7340 /* WaVSRefCountFullforceMissDisable:hsw */
7341 I915_WRITE(GEN7_FF_THREAD_MODE
,
7342 I915_READ(GEN7_FF_THREAD_MODE
) & ~GEN7_FF_VS_REF_CNT_FFME
);
7344 /* WaDisable_RenderCache_OperationalFlush:hsw */
7345 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7347 /* enable HiZ Raw Stall Optimization */
7348 I915_WRITE(CACHE_MODE_0_GEN7
,
7349 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
7351 /* WaDisable4x2SubspanOptimization:hsw */
7352 I915_WRITE(CACHE_MODE_1
,
7353 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
7356 * BSpec recommends 8x4 when MSAA is used,
7357 * however in practice 16x4 seems fastest.
7359 * Note that PS/WM thread counts depend on the WIZ hashing
7360 * disable bit, which we don't touch here, but it's good
7361 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7363 I915_WRITE(GEN7_GT_MODE
,
7364 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
7366 /* WaSampleCChickenBitEnable:hsw */
7367 I915_WRITE(HALF_SLICE_CHICKEN3
,
7368 _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE
));
7370 /* WaSwitchSolVfFArbitrationPriority:hsw */
7371 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
7373 /* WaRsPkgCStateDisplayPMReq:hsw */
7374 I915_WRITE(CHICKEN_PAR1_1
,
7375 I915_READ(CHICKEN_PAR1_1
) | FORCE_ARB_IDLE_PLANES
);
7377 lpt_init_clock_gating(dev
);
7380 static void ivybridge_init_clock_gating(struct drm_device
*dev
)
7382 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7385 ilk_init_lp_watermarks(dev
);
7387 I915_WRITE(ILK_DSPCLK_GATE_D
, ILK_VRHUNIT_CLOCK_GATE_DISABLE
);
7389 /* WaDisableEarlyCull:ivb */
7390 I915_WRITE(_3D_CHICKEN3
,
7391 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
7393 /* WaDisableBackToBackFlipFix:ivb */
7394 I915_WRITE(IVB_CHICKEN3
,
7395 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
7396 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
7398 /* WaDisablePSDDualDispatchEnable:ivb */
7399 if (IS_IVB_GT1(dev
))
7400 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
7401 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
7403 /* WaDisable_RenderCache_OperationalFlush:ivb */
7404 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7406 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
7407 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1
,
7408 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC
);
7410 /* WaApplyL3ControlAndL3ChickenMode:ivb */
7411 I915_WRITE(GEN7_L3CNTLREG1
,
7412 GEN7_WA_FOR_GEN7_L3_CONTROL
);
7413 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER
,
7414 GEN7_WA_L3_CHICKEN_MODE
);
7415 if (IS_IVB_GT1(dev
))
7416 I915_WRITE(GEN7_ROW_CHICKEN2
,
7417 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
7419 /* must write both registers */
7420 I915_WRITE(GEN7_ROW_CHICKEN2
,
7421 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
7422 I915_WRITE(GEN7_ROW_CHICKEN2_GT2
,
7423 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
7426 /* WaForceL3Serialization:ivb */
7427 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
7428 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
7431 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7432 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
7434 I915_WRITE(GEN6_UCGCTL2
,
7435 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
7437 /* This is required by WaCatErrorRejectionIssue:ivb */
7438 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
7439 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
7440 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
7442 g4x_disable_trickle_feed(dev
);
7444 gen7_setup_fixed_func_scheduler(dev_priv
);
7446 if (0) { /* causes HiZ corruption on ivb:gt1 */
7447 /* enable HiZ Raw Stall Optimization */
7448 I915_WRITE(CACHE_MODE_0_GEN7
,
7449 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
7452 /* WaDisable4x2SubspanOptimization:ivb */
7453 I915_WRITE(CACHE_MODE_1
,
7454 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
7457 * BSpec recommends 8x4 when MSAA is used,
7458 * however in practice 16x4 seems fastest.
7460 * Note that PS/WM thread counts depend on the WIZ hashing
7461 * disable bit, which we don't touch here, but it's good
7462 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7464 I915_WRITE(GEN7_GT_MODE
,
7465 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
7467 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
7468 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
7469 snpcr
|= GEN6_MBC_SNPCR_MED
;
7470 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
7472 if (!HAS_PCH_NOP(dev
))
7473 cpt_init_clock_gating(dev
);
7475 gen6_check_mch_setup(dev
);
7478 static void valleyview_init_clock_gating(struct drm_device
*dev
)
7480 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7482 /* WaDisableEarlyCull:vlv */
7483 I915_WRITE(_3D_CHICKEN3
,
7484 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
7486 /* WaDisableBackToBackFlipFix:vlv */
7487 I915_WRITE(IVB_CHICKEN3
,
7488 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
7489 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
7491 /* WaPsdDispatchEnable:vlv */
7492 /* WaDisablePSDDualDispatchEnable:vlv */
7493 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
7494 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP
|
7495 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
7497 /* WaDisable_RenderCache_OperationalFlush:vlv */
7498 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7500 /* WaForceL3Serialization:vlv */
7501 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
7502 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
7504 /* WaDisableDopClockGating:vlv */
7505 I915_WRITE(GEN7_ROW_CHICKEN2
,
7506 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
7508 /* This is required by WaCatErrorRejectionIssue:vlv */
7509 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
7510 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
7511 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
7513 gen7_setup_fixed_func_scheduler(dev_priv
);
7516 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
7517 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
7519 I915_WRITE(GEN6_UCGCTL2
,
7520 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
7522 /* WaDisableL3Bank2xClockGate:vlv
7523 * Disabling L3 clock gating- MMIO 940c[25] = 1
7524 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
7525 I915_WRITE(GEN7_UCGCTL4
,
7526 I915_READ(GEN7_UCGCTL4
) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE
);
7529 * BSpec says this must be set, even though
7530 * WaDisable4x2SubspanOptimization isn't listed for VLV.
7532 I915_WRITE(CACHE_MODE_1
,
7533 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
7536 * BSpec recommends 8x4 when MSAA is used,
7537 * however in practice 16x4 seems fastest.
7539 * Note that PS/WM thread counts depend on the WIZ hashing
7540 * disable bit, which we don't touch here, but it's good
7541 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
7543 I915_WRITE(GEN7_GT_MODE
,
7544 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK
, GEN6_WIZ_HASHING_16x4
));
7547 * WaIncreaseL3CreditsForVLVB0:vlv
7548 * This is the hardware default actually.
7550 I915_WRITE(GEN7_L3SQCREG1
, VLV_B0_WA_L3SQCREG1_VALUE
);
7553 * WaDisableVLVClockGating_VBIIssue:vlv
7554 * Disable clock gating on th GCFG unit to prevent a delay
7555 * in the reporting of vblank events.
7557 I915_WRITE(VLV_GUNIT_CLOCK_GATE
, GCFG_DIS
);
7560 static void cherryview_init_clock_gating(struct drm_device
*dev
)
7562 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7564 /* WaVSRefCountFullforceMissDisable:chv */
7565 /* WaDSRefCountFullforceMissDisable:chv */
7566 I915_WRITE(GEN7_FF_THREAD_MODE
,
7567 I915_READ(GEN7_FF_THREAD_MODE
) &
7568 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
7570 /* WaDisableSemaphoreAndSyncFlipWait:chv */
7571 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
7572 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
7574 /* WaDisableCSUnitClockGating:chv */
7575 I915_WRITE(GEN6_UCGCTL1
, I915_READ(GEN6_UCGCTL1
) |
7576 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
7578 /* WaDisableSDEUnitClockGating:chv */
7579 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
7580 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
7583 * WaProgramL3SqcReg1Default:chv
7584 * See gfxspecs/Related Documents/Performance Guide/
7585 * LSQC Setting Recommendations.
7587 gen8_set_l3sqc_credits(dev_priv
, 38, 2);
7590 * GTT cache may not work with big pages, so if those
7591 * are ever enabled GTT cache may need to be disabled.
7593 I915_WRITE(HSW_GTT_CACHE_EN
, GTT_CACHE_EN_ALL
);
7596 static void g4x_init_clock_gating(struct drm_device
*dev
)
7598 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7599 uint32_t dspclk_gate
;
7601 I915_WRITE(RENCLK_GATE_D1
, 0);
7602 I915_WRITE(RENCLK_GATE_D2
, VF_UNIT_CLOCK_GATE_DISABLE
|
7603 GS_UNIT_CLOCK_GATE_DISABLE
|
7604 CL_UNIT_CLOCK_GATE_DISABLE
);
7605 I915_WRITE(RAMCLK_GATE_D
, 0);
7606 dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
|
7607 OVRUNIT_CLOCK_GATE_DISABLE
|
7608 OVCUNIT_CLOCK_GATE_DISABLE
;
7610 dspclk_gate
|= DSSUNIT_CLOCK_GATE_DISABLE
;
7611 I915_WRITE(DSPCLK_GATE_D
, dspclk_gate
);
7613 /* WaDisableRenderCachePipelinedFlush */
7614 I915_WRITE(CACHE_MODE_0
,
7615 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
7617 /* WaDisable_RenderCache_OperationalFlush:g4x */
7618 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7620 g4x_disable_trickle_feed(dev
);
7623 static void crestline_init_clock_gating(struct drm_device
*dev
)
7625 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7627 I915_WRITE(RENCLK_GATE_D1
, I965_RCC_CLOCK_GATE_DISABLE
);
7628 I915_WRITE(RENCLK_GATE_D2
, 0);
7629 I915_WRITE(DSPCLK_GATE_D
, 0);
7630 I915_WRITE(RAMCLK_GATE_D
, 0);
7631 I915_WRITE16(DEUC
, 0);
7632 I915_WRITE(MI_ARB_STATE
,
7633 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
7635 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7636 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7639 static void broadwater_init_clock_gating(struct drm_device
*dev
)
7641 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7643 I915_WRITE(RENCLK_GATE_D1
, I965_RCZ_CLOCK_GATE_DISABLE
|
7644 I965_RCC_CLOCK_GATE_DISABLE
|
7645 I965_RCPB_CLOCK_GATE_DISABLE
|
7646 I965_ISC_CLOCK_GATE_DISABLE
|
7647 I965_FBC_CLOCK_GATE_DISABLE
);
7648 I915_WRITE(RENCLK_GATE_D2
, 0);
7649 I915_WRITE(MI_ARB_STATE
,
7650 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
7652 /* WaDisable_RenderCache_OperationalFlush:gen4 */
7653 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
7656 static void gen3_init_clock_gating(struct drm_device
*dev
)
7658 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7659 u32 dstate
= I915_READ(D_STATE
);
7661 dstate
|= DSTATE_PLL_D3_OFF
| DSTATE_GFX_CLOCK_GATING
|
7662 DSTATE_DOT_CLOCK_GATING
;
7663 I915_WRITE(D_STATE
, dstate
);
7665 if (IS_PINEVIEW(dev
))
7666 I915_WRITE(ECOSKPD
, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY
));
7668 /* IIR "flip pending" means done if this bit is set */
7669 I915_WRITE(ECOSKPD
, _MASKED_BIT_DISABLE(ECO_FLIP_DONE
));
7671 /* interrupts should cause a wake up from C3 */
7672 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN
));
7674 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7675 I915_WRITE(MI_ARB_STATE
, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE
));
7677 I915_WRITE(MI_ARB_STATE
,
7678 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
7681 static void i85x_init_clock_gating(struct drm_device
*dev
)
7683 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7685 I915_WRITE(RENCLK_GATE_D1
, SV_CLOCK_GATE_DISABLE
);
7687 /* interrupts should cause a wake up from C3 */
7688 I915_WRITE(MI_STATE
, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN
) |
7689 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE
));
7691 I915_WRITE(MEM_MODE
,
7692 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE
));
7695 static void i830_init_clock_gating(struct drm_device
*dev
)
7697 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7699 I915_WRITE(DSPCLK_GATE_D
, OVRUNIT_CLOCK_GATE_DISABLE
);
7701 I915_WRITE(MEM_MODE
,
7702 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE
) |
7703 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE
));
7706 void intel_init_clock_gating(struct drm_device
*dev
)
7708 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7710 dev_priv
->display
.init_clock_gating(dev
);
7713 void intel_suspend_hw(struct drm_device
*dev
)
7715 if (HAS_PCH_LPT(dev
))
7716 lpt_suspend_hw(dev
);
7719 static void nop_init_clock_gating(struct drm_device
*dev
)
7721 DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n");
7725 * intel_init_clock_gating_hooks - setup the clock gating hooks
7726 * @dev_priv: device private
7728 * Setup the hooks that configure which clocks of a given platform can be
7729 * gated and also apply various GT and display specific workarounds for these
7730 * platforms. Note that some GT specific workarounds are applied separately
7731 * when GPU contexts or batchbuffers start their execution.
7733 void intel_init_clock_gating_hooks(struct drm_i915_private
*dev_priv
)
7735 if (IS_SKYLAKE(dev_priv
))
7736 dev_priv
->display
.init_clock_gating
= skylake_init_clock_gating
;
7737 else if (IS_KABYLAKE(dev_priv
))
7738 dev_priv
->display
.init_clock_gating
= kabylake_init_clock_gating
;
7739 else if (IS_BROXTON(dev_priv
))
7740 dev_priv
->display
.init_clock_gating
= bxt_init_clock_gating
;
7741 else if (IS_BROADWELL(dev_priv
))
7742 dev_priv
->display
.init_clock_gating
= broadwell_init_clock_gating
;
7743 else if (IS_CHERRYVIEW(dev_priv
))
7744 dev_priv
->display
.init_clock_gating
= cherryview_init_clock_gating
;
7745 else if (IS_HASWELL(dev_priv
))
7746 dev_priv
->display
.init_clock_gating
= haswell_init_clock_gating
;
7747 else if (IS_IVYBRIDGE(dev_priv
))
7748 dev_priv
->display
.init_clock_gating
= ivybridge_init_clock_gating
;
7749 else if (IS_VALLEYVIEW(dev_priv
))
7750 dev_priv
->display
.init_clock_gating
= valleyview_init_clock_gating
;
7751 else if (IS_GEN6(dev_priv
))
7752 dev_priv
->display
.init_clock_gating
= gen6_init_clock_gating
;
7753 else if (IS_GEN5(dev_priv
))
7754 dev_priv
->display
.init_clock_gating
= ironlake_init_clock_gating
;
7755 else if (IS_G4X(dev_priv
))
7756 dev_priv
->display
.init_clock_gating
= g4x_init_clock_gating
;
7757 else if (IS_CRESTLINE(dev_priv
))
7758 dev_priv
->display
.init_clock_gating
= crestline_init_clock_gating
;
7759 else if (IS_BROADWATER(dev_priv
))
7760 dev_priv
->display
.init_clock_gating
= broadwater_init_clock_gating
;
7761 else if (IS_GEN3(dev_priv
))
7762 dev_priv
->display
.init_clock_gating
= gen3_init_clock_gating
;
7763 else if (IS_I85X(dev_priv
) || IS_I865G(dev_priv
))
7764 dev_priv
->display
.init_clock_gating
= i85x_init_clock_gating
;
7765 else if (IS_GEN2(dev_priv
))
7766 dev_priv
->display
.init_clock_gating
= i830_init_clock_gating
;
7768 MISSING_CASE(INTEL_DEVID(dev_priv
));
7769 dev_priv
->display
.init_clock_gating
= nop_init_clock_gating
;
7773 /* Set up chip specific power management-related functions */
7774 void intel_init_pm(struct drm_device
*dev
)
7776 struct drm_i915_private
*dev_priv
= to_i915(dev
);
7778 intel_fbc_init(dev_priv
);
7781 if (IS_PINEVIEW(dev
))
7782 i915_pineview_get_mem_freq(dev
);
7783 else if (IS_GEN5(dev
))
7784 i915_ironlake_get_mem_freq(dev
);
7786 /* For FIFO watermark updates */
7787 if (INTEL_INFO(dev
)->gen
>= 9) {
7788 skl_setup_wm_latency(dev
);
7789 dev_priv
->display
.update_wm
= skl_update_wm
;
7790 dev_priv
->display
.compute_global_watermarks
= skl_compute_wm
;
7791 } else if (HAS_PCH_SPLIT(dev
)) {
7792 ilk_setup_wm_latency(dev
);
7794 if ((IS_GEN5(dev
) && dev_priv
->wm
.pri_latency
[1] &&
7795 dev_priv
->wm
.spr_latency
[1] && dev_priv
->wm
.cur_latency
[1]) ||
7796 (!IS_GEN5(dev
) && dev_priv
->wm
.pri_latency
[0] &&
7797 dev_priv
->wm
.spr_latency
[0] && dev_priv
->wm
.cur_latency
[0])) {
7798 dev_priv
->display
.compute_pipe_wm
= ilk_compute_pipe_wm
;
7799 dev_priv
->display
.compute_intermediate_wm
=
7800 ilk_compute_intermediate_wm
;
7801 dev_priv
->display
.initial_watermarks
=
7802 ilk_initial_watermarks
;
7803 dev_priv
->display
.optimize_watermarks
=
7804 ilk_optimize_watermarks
;
7806 DRM_DEBUG_KMS("Failed to read display plane latency. "
7809 } else if (IS_CHERRYVIEW(dev
)) {
7810 vlv_setup_wm_latency(dev
);
7811 dev_priv
->display
.update_wm
= vlv_update_wm
;
7812 } else if (IS_VALLEYVIEW(dev
)) {
7813 vlv_setup_wm_latency(dev
);
7814 dev_priv
->display
.update_wm
= vlv_update_wm
;
7815 } else if (IS_PINEVIEW(dev
)) {
7816 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev
),
7819 dev_priv
->mem_freq
)) {
7820 DRM_INFO("failed to find known CxSR latency "
7821 "(found ddr%s fsb freq %d, mem freq %d), "
7823 (dev_priv
->is_ddr3
== 1) ? "3" : "2",
7824 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
7825 /* Disable CxSR and never update its watermark again */
7826 intel_set_memory_cxsr(dev_priv
, false);
7827 dev_priv
->display
.update_wm
= NULL
;
7829 dev_priv
->display
.update_wm
= pineview_update_wm
;
7830 } else if (IS_G4X(dev
)) {
7831 dev_priv
->display
.update_wm
= g4x_update_wm
;
7832 } else if (IS_GEN4(dev
)) {
7833 dev_priv
->display
.update_wm
= i965_update_wm
;
7834 } else if (IS_GEN3(dev
)) {
7835 dev_priv
->display
.update_wm
= i9xx_update_wm
;
7836 dev_priv
->display
.get_fifo_size
= i9xx_get_fifo_size
;
7837 } else if (IS_GEN2(dev
)) {
7838 if (INTEL_INFO(dev
)->num_pipes
== 1) {
7839 dev_priv
->display
.update_wm
= i845_update_wm
;
7840 dev_priv
->display
.get_fifo_size
= i845_get_fifo_size
;
7842 dev_priv
->display
.update_wm
= i9xx_update_wm
;
7843 dev_priv
->display
.get_fifo_size
= i830_get_fifo_size
;
7846 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
7850 static inline int gen6_check_mailbox_status(struct drm_i915_private
*dev_priv
)
7853 I915_READ_FW(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_ERROR_MASK
;
7856 case GEN6_PCODE_SUCCESS
:
7858 case GEN6_PCODE_UNIMPLEMENTED_CMD
:
7859 case GEN6_PCODE_ILLEGAL_CMD
:
7861 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE
:
7863 case GEN6_PCODE_TIMEOUT
:
7871 static inline int gen7_check_mailbox_status(struct drm_i915_private
*dev_priv
)
7874 I915_READ_FW(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_ERROR_MASK
;
7877 case GEN6_PCODE_SUCCESS
:
7879 case GEN6_PCODE_ILLEGAL_CMD
:
7881 case GEN7_PCODE_TIMEOUT
:
7883 case GEN7_PCODE_ILLEGAL_DATA
:
7885 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE
:
7888 MISSING_CASE(flags
);
7893 int sandybridge_pcode_read(struct drm_i915_private
*dev_priv
, u32 mbox
, u32
*val
)
7897 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
7899 /* GEN6_PCODE_* are outside of the forcewake domain, we can
7900 * use te fw I915_READ variants to reduce the amount of work
7901 * required when reading/writing.
7904 if (I915_READ_FW(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) {
7905 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7909 I915_WRITE_FW(GEN6_PCODE_DATA
, *val
);
7910 I915_WRITE_FW(GEN6_PCODE_DATA1
, 0);
7911 I915_WRITE_FW(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
| mbox
);
7913 if (intel_wait_for_register_fw(dev_priv
,
7914 GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
, 0,
7916 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox
);
7920 *val
= I915_READ_FW(GEN6_PCODE_DATA
);
7921 I915_WRITE_FW(GEN6_PCODE_DATA
, 0);
7923 if (INTEL_GEN(dev_priv
) > 6)
7924 status
= gen7_check_mailbox_status(dev_priv
);
7926 status
= gen6_check_mailbox_status(dev_priv
);
7929 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
7937 int sandybridge_pcode_write(struct drm_i915_private
*dev_priv
,
7942 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
7944 /* GEN6_PCODE_* are outside of the forcewake domain, we can
7945 * use te fw I915_READ variants to reduce the amount of work
7946 * required when reading/writing.
7949 if (I915_READ_FW(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) {
7950 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7954 I915_WRITE_FW(GEN6_PCODE_DATA
, val
);
7955 I915_WRITE_FW(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
| mbox
);
7957 if (intel_wait_for_register_fw(dev_priv
,
7958 GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
, 0,
7960 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox
);
7964 I915_WRITE_FW(GEN6_PCODE_DATA
, 0);
7966 if (INTEL_GEN(dev_priv
) > 6)
7967 status
= gen7_check_mailbox_status(dev_priv
);
7969 status
= gen6_check_mailbox_status(dev_priv
);
7972 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
7980 static int byt_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
7984 * Slow = Fast = GPLL ref * N
7986 return DIV_ROUND_CLOSEST(dev_priv
->rps
.gpll_ref_freq
* (val
- 0xb7), 1000);
7989 static int byt_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
7991 return DIV_ROUND_CLOSEST(1000 * val
, dev_priv
->rps
.gpll_ref_freq
) + 0xb7;
7994 static int chv_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
7998 * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2
8000 return DIV_ROUND_CLOSEST(dev_priv
->rps
.gpll_ref_freq
* val
, 2 * 2 * 1000);
8003 static int chv_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
8005 /* CHV needs even values */
8006 return DIV_ROUND_CLOSEST(2 * 1000 * val
, dev_priv
->rps
.gpll_ref_freq
) * 2;
8009 int intel_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
8011 if (IS_GEN9(dev_priv
))
8012 return DIV_ROUND_CLOSEST(val
* GT_FREQUENCY_MULTIPLIER
,
8014 else if (IS_CHERRYVIEW(dev_priv
))
8015 return chv_gpu_freq(dev_priv
, val
);
8016 else if (IS_VALLEYVIEW(dev_priv
))
8017 return byt_gpu_freq(dev_priv
, val
);
8019 return val
* GT_FREQUENCY_MULTIPLIER
;
8022 int intel_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
8024 if (IS_GEN9(dev_priv
))
8025 return DIV_ROUND_CLOSEST(val
* GEN9_FREQ_SCALER
,
8026 GT_FREQUENCY_MULTIPLIER
);
8027 else if (IS_CHERRYVIEW(dev_priv
))
8028 return chv_freq_opcode(dev_priv
, val
);
8029 else if (IS_VALLEYVIEW(dev_priv
))
8030 return byt_freq_opcode(dev_priv
, val
);
8032 return DIV_ROUND_CLOSEST(val
, GT_FREQUENCY_MULTIPLIER
);
8035 struct request_boost
{
8036 struct work_struct work
;
8037 struct drm_i915_gem_request
*req
;
8040 static void __intel_rps_boost_work(struct work_struct
*work
)
8042 struct request_boost
*boost
= container_of(work
, struct request_boost
, work
);
8043 struct drm_i915_gem_request
*req
= boost
->req
;
8045 if (!i915_gem_request_completed(req
))
8046 gen6_rps_boost(req
->i915
, NULL
, req
->emitted_jiffies
);
8048 i915_gem_request_unreference(req
);
8052 void intel_queue_rps_boost_for_request(struct drm_i915_gem_request
*req
)
8054 struct request_boost
*boost
;
8056 if (req
== NULL
|| INTEL_GEN(req
->i915
) < 6)
8059 if (i915_gem_request_completed(req
))
8062 boost
= kmalloc(sizeof(*boost
), GFP_ATOMIC
);
8066 i915_gem_request_reference(req
);
8069 INIT_WORK(&boost
->work
, __intel_rps_boost_work
);
8070 queue_work(req
->i915
->wq
, &boost
->work
);
8073 void intel_pm_setup(struct drm_device
*dev
)
8075 struct drm_i915_private
*dev_priv
= to_i915(dev
);
8077 mutex_init(&dev_priv
->rps
.hw_lock
);
8078 spin_lock_init(&dev_priv
->rps
.client_lock
);
8080 INIT_DELAYED_WORK(&dev_priv
->rps
.delayed_resume_work
,
8081 intel_gen6_powersave_work
);
8082 INIT_LIST_HEAD(&dev_priv
->rps
.clients
);
8083 INIT_LIST_HEAD(&dev_priv
->rps
.semaphores
.link
);
8084 INIT_LIST_HEAD(&dev_priv
->rps
.mmioflips
.link
);
8086 dev_priv
->pm
.suspended
= false;
8087 atomic_set(&dev_priv
->pm
.wakeref_count
, 0);
8088 atomic_set(&dev_priv
->pm
.atomic_seq
, 0);