2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
33 #include <linux/vgaarb.h>
34 #include <drm/i915_powerwell.h>
35 #include <linux/pm_runtime.h>
38 * RC6 is a special power stage which allows the GPU to enter an very
39 * low-voltage mode when idle, using down to 0V while at this stage. This
40 * stage is entered automatically when the GPU is idle when RC6 support is
41 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
43 * There are different RC6 modes available in Intel GPU, which differentiate
44 * among each other with the latency required to enter and leave RC6 and
45 * voltage consumed by the GPU in different states.
47 * The combination of the following flags define which states GPU is allowed
48 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
49 * RC6pp is deepest RC6. Their support by hardware varies according to the
50 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
51 * which brings the most power savings; deeper states save more power, but
52 * require higher latency to switch to and wake up.
54 #define INTEL_RC6_ENABLE (1<<0)
55 #define INTEL_RC6p_ENABLE (1<<1)
56 #define INTEL_RC6pp_ENABLE (1<<2)
58 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
59 * framebuffer contents in-memory, aiming at reducing the required bandwidth
60 * during in-memory transfers and, therefore, reduce the power packet.
62 * The benefits of FBC are mostly visible with solid backgrounds and
63 * variation-less patterns.
65 * FBC-related functionality can be enabled by the means of the
66 * i915.i915_enable_fbc parameter
69 static void i8xx_disable_fbc(struct drm_device
*dev
)
71 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
74 /* Disable compression */
75 fbc_ctl
= I915_READ(FBC_CONTROL
);
76 if ((fbc_ctl
& FBC_CTL_EN
) == 0)
79 fbc_ctl
&= ~FBC_CTL_EN
;
80 I915_WRITE(FBC_CONTROL
, fbc_ctl
);
82 /* Wait for compressing bit to clear */
83 if (wait_for((I915_READ(FBC_STATUS
) & FBC_STAT_COMPRESSING
) == 0, 10)) {
84 DRM_DEBUG_KMS("FBC idle timed out\n");
88 DRM_DEBUG_KMS("disabled FBC\n");
91 static void i8xx_enable_fbc(struct drm_crtc
*crtc
)
93 struct drm_device
*dev
= crtc
->dev
;
94 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
95 struct drm_framebuffer
*fb
= crtc
->primary
->fb
;
96 struct intel_framebuffer
*intel_fb
= to_intel_framebuffer(fb
);
97 struct drm_i915_gem_object
*obj
= intel_fb
->obj
;
98 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
103 cfb_pitch
= dev_priv
->fbc
.size
/ FBC_LL_SIZE
;
104 if (fb
->pitches
[0] < cfb_pitch
)
105 cfb_pitch
= fb
->pitches
[0];
107 /* FBC_CTL wants 32B or 64B units */
109 cfb_pitch
= (cfb_pitch
/ 32) - 1;
111 cfb_pitch
= (cfb_pitch
/ 64) - 1;
114 for (i
= 0; i
< (FBC_LL_SIZE
/ 32) + 1; i
++)
115 I915_WRITE(FBC_TAG
+ (i
* 4), 0);
121 fbc_ctl2
= FBC_CTL_FENCE_DBL
| FBC_CTL_IDLE_IMM
| FBC_CTL_CPU_FENCE
;
122 fbc_ctl2
|= FBC_CTL_PLANE(intel_crtc
->plane
);
123 I915_WRITE(FBC_CONTROL2
, fbc_ctl2
);
124 I915_WRITE(FBC_FENCE_OFF
, crtc
->y
);
128 fbc_ctl
= I915_READ(FBC_CONTROL
);
129 fbc_ctl
&= 0x3fff << FBC_CTL_INTERVAL_SHIFT
;
130 fbc_ctl
|= FBC_CTL_EN
| FBC_CTL_PERIODIC
;
132 fbc_ctl
|= FBC_CTL_C3_IDLE
; /* 945 needs special SR handling */
133 fbc_ctl
|= (cfb_pitch
& 0xff) << FBC_CTL_STRIDE_SHIFT
;
134 fbc_ctl
|= obj
->fence_reg
;
135 I915_WRITE(FBC_CONTROL
, fbc_ctl
);
137 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
138 cfb_pitch
, crtc
->y
, plane_name(intel_crtc
->plane
));
141 static bool i8xx_fbc_enabled(struct drm_device
*dev
)
143 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
145 return I915_READ(FBC_CONTROL
) & FBC_CTL_EN
;
148 static void g4x_enable_fbc(struct drm_crtc
*crtc
)
150 struct drm_device
*dev
= crtc
->dev
;
151 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
152 struct drm_framebuffer
*fb
= crtc
->primary
->fb
;
153 struct intel_framebuffer
*intel_fb
= to_intel_framebuffer(fb
);
154 struct drm_i915_gem_object
*obj
= intel_fb
->obj
;
155 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
158 dpfc_ctl
= DPFC_CTL_PLANE(intel_crtc
->plane
) | DPFC_SR_EN
;
159 if (drm_format_plane_cpp(fb
->pixel_format
, 0) == 2)
160 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
162 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
163 dpfc_ctl
|= DPFC_CTL_FENCE_EN
| obj
->fence_reg
;
165 I915_WRITE(DPFC_FENCE_YOFF
, crtc
->y
);
168 I915_WRITE(DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
170 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc
->plane
));
173 static void g4x_disable_fbc(struct drm_device
*dev
)
175 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
178 /* Disable compression */
179 dpfc_ctl
= I915_READ(DPFC_CONTROL
);
180 if (dpfc_ctl
& DPFC_CTL_EN
) {
181 dpfc_ctl
&= ~DPFC_CTL_EN
;
182 I915_WRITE(DPFC_CONTROL
, dpfc_ctl
);
184 DRM_DEBUG_KMS("disabled FBC\n");
188 static bool g4x_fbc_enabled(struct drm_device
*dev
)
190 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
192 return I915_READ(DPFC_CONTROL
) & DPFC_CTL_EN
;
195 static void sandybridge_blit_fbc_update(struct drm_device
*dev
)
197 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
200 /* Make sure blitter notifies FBC of writes */
202 /* Blitter is part of Media powerwell on VLV. No impact of
203 * his param in other platforms for now */
204 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_MEDIA
);
206 blt_ecoskpd
= I915_READ(GEN6_BLITTER_ECOSKPD
);
207 blt_ecoskpd
|= GEN6_BLITTER_FBC_NOTIFY
<<
208 GEN6_BLITTER_LOCK_SHIFT
;
209 I915_WRITE(GEN6_BLITTER_ECOSKPD
, blt_ecoskpd
);
210 blt_ecoskpd
|= GEN6_BLITTER_FBC_NOTIFY
;
211 I915_WRITE(GEN6_BLITTER_ECOSKPD
, blt_ecoskpd
);
212 blt_ecoskpd
&= ~(GEN6_BLITTER_FBC_NOTIFY
<<
213 GEN6_BLITTER_LOCK_SHIFT
);
214 I915_WRITE(GEN6_BLITTER_ECOSKPD
, blt_ecoskpd
);
215 POSTING_READ(GEN6_BLITTER_ECOSKPD
);
217 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_MEDIA
);
220 static void ironlake_enable_fbc(struct drm_crtc
*crtc
)
222 struct drm_device
*dev
= crtc
->dev
;
223 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
224 struct drm_framebuffer
*fb
= crtc
->primary
->fb
;
225 struct intel_framebuffer
*intel_fb
= to_intel_framebuffer(fb
);
226 struct drm_i915_gem_object
*obj
= intel_fb
->obj
;
227 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
230 dpfc_ctl
= DPFC_CTL_PLANE(intel_crtc
->plane
);
231 if (drm_format_plane_cpp(fb
->pixel_format
, 0) == 2)
232 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
234 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
235 dpfc_ctl
|= DPFC_CTL_FENCE_EN
;
237 dpfc_ctl
|= obj
->fence_reg
;
239 I915_WRITE(ILK_DPFC_FENCE_YOFF
, crtc
->y
);
240 I915_WRITE(ILK_FBC_RT_BASE
, i915_gem_obj_ggtt_offset(obj
) | ILK_FBC_RT_VALID
);
242 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
245 I915_WRITE(SNB_DPFC_CTL_SA
,
246 SNB_CPU_FENCE_ENABLE
| obj
->fence_reg
);
247 I915_WRITE(DPFC_CPU_FENCE_OFFSET
, crtc
->y
);
248 sandybridge_blit_fbc_update(dev
);
251 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc
->plane
));
254 static void ironlake_disable_fbc(struct drm_device
*dev
)
256 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
259 /* Disable compression */
260 dpfc_ctl
= I915_READ(ILK_DPFC_CONTROL
);
261 if (dpfc_ctl
& DPFC_CTL_EN
) {
262 dpfc_ctl
&= ~DPFC_CTL_EN
;
263 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
);
265 DRM_DEBUG_KMS("disabled FBC\n");
269 static bool ironlake_fbc_enabled(struct drm_device
*dev
)
271 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
273 return I915_READ(ILK_DPFC_CONTROL
) & DPFC_CTL_EN
;
276 static void gen7_enable_fbc(struct drm_crtc
*crtc
)
278 struct drm_device
*dev
= crtc
->dev
;
279 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
280 struct drm_framebuffer
*fb
= crtc
->primary
->fb
;
281 struct intel_framebuffer
*intel_fb
= to_intel_framebuffer(fb
);
282 struct drm_i915_gem_object
*obj
= intel_fb
->obj
;
283 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
286 dpfc_ctl
= IVB_DPFC_CTL_PLANE(intel_crtc
->plane
);
287 if (drm_format_plane_cpp(fb
->pixel_format
, 0) == 2)
288 dpfc_ctl
|= DPFC_CTL_LIMIT_2X
;
290 dpfc_ctl
|= DPFC_CTL_LIMIT_1X
;
291 dpfc_ctl
|= IVB_DPFC_CTL_FENCE_EN
;
293 I915_WRITE(ILK_DPFC_CONTROL
, dpfc_ctl
| DPFC_CTL_EN
);
295 if (IS_IVYBRIDGE(dev
)) {
296 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
297 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
298 I915_READ(ILK_DISPLAY_CHICKEN1
) |
301 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
302 I915_WRITE(CHICKEN_PIPESL_1(intel_crtc
->pipe
),
303 I915_READ(CHICKEN_PIPESL_1(intel_crtc
->pipe
)) |
307 I915_WRITE(SNB_DPFC_CTL_SA
,
308 SNB_CPU_FENCE_ENABLE
| obj
->fence_reg
);
309 I915_WRITE(DPFC_CPU_FENCE_OFFSET
, crtc
->y
);
311 sandybridge_blit_fbc_update(dev
);
313 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc
->plane
));
316 bool intel_fbc_enabled(struct drm_device
*dev
)
318 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
320 if (!dev_priv
->display
.fbc_enabled
)
323 return dev_priv
->display
.fbc_enabled(dev
);
326 static void intel_fbc_work_fn(struct work_struct
*__work
)
328 struct intel_fbc_work
*work
=
329 container_of(to_delayed_work(__work
),
330 struct intel_fbc_work
, work
);
331 struct drm_device
*dev
= work
->crtc
->dev
;
332 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
334 mutex_lock(&dev
->struct_mutex
);
335 if (work
== dev_priv
->fbc
.fbc_work
) {
336 /* Double check that we haven't switched fb without cancelling
339 if (work
->crtc
->primary
->fb
== work
->fb
) {
340 dev_priv
->display
.enable_fbc(work
->crtc
);
342 dev_priv
->fbc
.plane
= to_intel_crtc(work
->crtc
)->plane
;
343 dev_priv
->fbc
.fb_id
= work
->crtc
->primary
->fb
->base
.id
;
344 dev_priv
->fbc
.y
= work
->crtc
->y
;
347 dev_priv
->fbc
.fbc_work
= NULL
;
349 mutex_unlock(&dev
->struct_mutex
);
354 static void intel_cancel_fbc_work(struct drm_i915_private
*dev_priv
)
356 if (dev_priv
->fbc
.fbc_work
== NULL
)
359 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
361 /* Synchronisation is provided by struct_mutex and checking of
362 * dev_priv->fbc.fbc_work, so we can perform the cancellation
363 * entirely asynchronously.
365 if (cancel_delayed_work(&dev_priv
->fbc
.fbc_work
->work
))
366 /* tasklet was killed before being run, clean up */
367 kfree(dev_priv
->fbc
.fbc_work
);
369 /* Mark the work as no longer wanted so that if it does
370 * wake-up (because the work was already running and waiting
371 * for our mutex), it will discover that is no longer
374 dev_priv
->fbc
.fbc_work
= NULL
;
377 static void intel_enable_fbc(struct drm_crtc
*crtc
)
379 struct intel_fbc_work
*work
;
380 struct drm_device
*dev
= crtc
->dev
;
381 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
383 if (!dev_priv
->display
.enable_fbc
)
386 intel_cancel_fbc_work(dev_priv
);
388 work
= kzalloc(sizeof(*work
), GFP_KERNEL
);
390 DRM_ERROR("Failed to allocate FBC work structure\n");
391 dev_priv
->display
.enable_fbc(crtc
);
396 work
->fb
= crtc
->primary
->fb
;
397 INIT_DELAYED_WORK(&work
->work
, intel_fbc_work_fn
);
399 dev_priv
->fbc
.fbc_work
= work
;
401 /* Delay the actual enabling to let pageflipping cease and the
402 * display to settle before starting the compression. Note that
403 * this delay also serves a second purpose: it allows for a
404 * vblank to pass after disabling the FBC before we attempt
405 * to modify the control registers.
407 * A more complicated solution would involve tracking vblanks
408 * following the termination of the page-flipping sequence
409 * and indeed performing the enable as a co-routine and not
410 * waiting synchronously upon the vblank.
412 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
414 schedule_delayed_work(&work
->work
, msecs_to_jiffies(50));
417 void intel_disable_fbc(struct drm_device
*dev
)
419 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
421 intel_cancel_fbc_work(dev_priv
);
423 if (!dev_priv
->display
.disable_fbc
)
426 dev_priv
->display
.disable_fbc(dev
);
427 dev_priv
->fbc
.plane
= -1;
430 static bool set_no_fbc_reason(struct drm_i915_private
*dev_priv
,
431 enum no_fbc_reason reason
)
433 if (dev_priv
->fbc
.no_fbc_reason
== reason
)
436 dev_priv
->fbc
.no_fbc_reason
= reason
;
441 * intel_update_fbc - enable/disable FBC as needed
442 * @dev: the drm_device
444 * Set up the framebuffer compression hardware at mode set time. We
445 * enable it if possible:
446 * - plane A only (on pre-965)
447 * - no pixel mulitply/line duplication
448 * - no alpha buffer discard
450 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
452 * We can't assume that any compression will take place (worst case),
453 * so the compressed buffer has to be the same size as the uncompressed
454 * one. It also must reside (along with the line length buffer) in
457 * We need to enable/disable FBC on a global basis.
459 void intel_update_fbc(struct drm_device
*dev
)
461 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
462 struct drm_crtc
*crtc
= NULL
, *tmp_crtc
;
463 struct intel_crtc
*intel_crtc
;
464 struct drm_framebuffer
*fb
;
465 struct intel_framebuffer
*intel_fb
;
466 struct drm_i915_gem_object
*obj
;
467 const struct drm_display_mode
*adjusted_mode
;
468 unsigned int max_width
, max_height
;
471 set_no_fbc_reason(dev_priv
, FBC_UNSUPPORTED
);
475 if (!i915
.powersave
) {
476 if (set_no_fbc_reason(dev_priv
, FBC_MODULE_PARAM
))
477 DRM_DEBUG_KMS("fbc disabled per module param\n");
482 * If FBC is already on, we just have to verify that we can
483 * keep it that way...
484 * Need to disable if:
485 * - more than one pipe is active
486 * - changing FBC params (stride, fence, mode)
487 * - new fb is too large to fit in compressed buffer
488 * - going to an unsupported config (interlace, pixel multiply, etc.)
490 list_for_each_entry(tmp_crtc
, &dev
->mode_config
.crtc_list
, head
) {
491 if (intel_crtc_active(tmp_crtc
) &&
492 to_intel_crtc(tmp_crtc
)->primary_enabled
) {
494 if (set_no_fbc_reason(dev_priv
, FBC_MULTIPLE_PIPES
))
495 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
502 if (!crtc
|| crtc
->primary
->fb
== NULL
) {
503 if (set_no_fbc_reason(dev_priv
, FBC_NO_OUTPUT
))
504 DRM_DEBUG_KMS("no output, disabling\n");
508 intel_crtc
= to_intel_crtc(crtc
);
509 fb
= crtc
->primary
->fb
;
510 intel_fb
= to_intel_framebuffer(fb
);
512 adjusted_mode
= &intel_crtc
->config
.adjusted_mode
;
514 if (i915
.enable_fbc
< 0 &&
515 INTEL_INFO(dev
)->gen
<= 7 && !IS_HASWELL(dev
)) {
516 if (set_no_fbc_reason(dev_priv
, FBC_CHIP_DEFAULT
))
517 DRM_DEBUG_KMS("disabled per chip default\n");
520 if (!i915
.enable_fbc
) {
521 if (set_no_fbc_reason(dev_priv
, FBC_MODULE_PARAM
))
522 DRM_DEBUG_KMS("fbc disabled per module param\n");
525 if ((adjusted_mode
->flags
& DRM_MODE_FLAG_INTERLACE
) ||
526 (adjusted_mode
->flags
& DRM_MODE_FLAG_DBLSCAN
)) {
527 if (set_no_fbc_reason(dev_priv
, FBC_UNSUPPORTED_MODE
))
528 DRM_DEBUG_KMS("mode incompatible with compression, "
533 if (IS_G4X(dev
) || INTEL_INFO(dev
)->gen
>= 5) {
540 if (intel_crtc
->config
.pipe_src_w
> max_width
||
541 intel_crtc
->config
.pipe_src_h
> max_height
) {
542 if (set_no_fbc_reason(dev_priv
, FBC_MODE_TOO_LARGE
))
543 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
546 if ((INTEL_INFO(dev
)->gen
< 4 || HAS_DDI(dev
)) &&
547 intel_crtc
->plane
!= PLANE_A
) {
548 if (set_no_fbc_reason(dev_priv
, FBC_BAD_PLANE
))
549 DRM_DEBUG_KMS("plane not A, disabling compression\n");
553 /* The use of a CPU fence is mandatory in order to detect writes
554 * by the CPU to the scanout and trigger updates to the FBC.
556 if (obj
->tiling_mode
!= I915_TILING_X
||
557 obj
->fence_reg
== I915_FENCE_REG_NONE
) {
558 if (set_no_fbc_reason(dev_priv
, FBC_NOT_TILED
))
559 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
563 /* If the kernel debugger is active, always disable compression */
567 if (i915_gem_stolen_setup_compression(dev
, intel_fb
->obj
->base
.size
)) {
568 if (set_no_fbc_reason(dev_priv
, FBC_STOLEN_TOO_SMALL
))
569 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
573 /* If the scanout has not changed, don't modify the FBC settings.
574 * Note that we make the fundamental assumption that the fb->obj
575 * cannot be unpinned (and have its GTT offset and fence revoked)
576 * without first being decoupled from the scanout and FBC disabled.
578 if (dev_priv
->fbc
.plane
== intel_crtc
->plane
&&
579 dev_priv
->fbc
.fb_id
== fb
->base
.id
&&
580 dev_priv
->fbc
.y
== crtc
->y
)
583 if (intel_fbc_enabled(dev
)) {
584 /* We update FBC along two paths, after changing fb/crtc
585 * configuration (modeswitching) and after page-flipping
586 * finishes. For the latter, we know that not only did
587 * we disable the FBC at the start of the page-flip
588 * sequence, but also more than one vblank has passed.
590 * For the former case of modeswitching, it is possible
591 * to switch between two FBC valid configurations
592 * instantaneously so we do need to disable the FBC
593 * before we can modify its control registers. We also
594 * have to wait for the next vblank for that to take
595 * effect. However, since we delay enabling FBC we can
596 * assume that a vblank has passed since disabling and
597 * that we can safely alter the registers in the deferred
600 * In the scenario that we go from a valid to invalid
601 * and then back to valid FBC configuration we have
602 * no strict enforcement that a vblank occurred since
603 * disabling the FBC. However, along all current pipe
604 * disabling paths we do need to wait for a vblank at
605 * some point. And we wait before enabling FBC anyway.
607 DRM_DEBUG_KMS("disabling active FBC for update\n");
608 intel_disable_fbc(dev
);
611 intel_enable_fbc(crtc
);
612 dev_priv
->fbc
.no_fbc_reason
= FBC_OK
;
616 /* Multiple disables should be harmless */
617 if (intel_fbc_enabled(dev
)) {
618 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
619 intel_disable_fbc(dev
);
621 i915_gem_stolen_cleanup_compression(dev
);
624 static void i915_pineview_get_mem_freq(struct drm_device
*dev
)
626 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
629 tmp
= I915_READ(CLKCFG
);
631 switch (tmp
& CLKCFG_FSB_MASK
) {
633 dev_priv
->fsb_freq
= 533; /* 133*4 */
636 dev_priv
->fsb_freq
= 800; /* 200*4 */
639 dev_priv
->fsb_freq
= 667; /* 167*4 */
642 dev_priv
->fsb_freq
= 400; /* 100*4 */
646 switch (tmp
& CLKCFG_MEM_MASK
) {
648 dev_priv
->mem_freq
= 533;
651 dev_priv
->mem_freq
= 667;
654 dev_priv
->mem_freq
= 800;
658 /* detect pineview DDR3 setting */
659 tmp
= I915_READ(CSHRDDR3CTL
);
660 dev_priv
->is_ddr3
= (tmp
& CSHRDDR3CTL_DDR3
) ? 1 : 0;
663 static void i915_ironlake_get_mem_freq(struct drm_device
*dev
)
665 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
668 ddrpll
= I915_READ16(DDRMPLL1
);
669 csipll
= I915_READ16(CSIPLL0
);
671 switch (ddrpll
& 0xff) {
673 dev_priv
->mem_freq
= 800;
676 dev_priv
->mem_freq
= 1066;
679 dev_priv
->mem_freq
= 1333;
682 dev_priv
->mem_freq
= 1600;
685 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
687 dev_priv
->mem_freq
= 0;
691 dev_priv
->ips
.r_t
= dev_priv
->mem_freq
;
693 switch (csipll
& 0x3ff) {
695 dev_priv
->fsb_freq
= 3200;
698 dev_priv
->fsb_freq
= 3733;
701 dev_priv
->fsb_freq
= 4266;
704 dev_priv
->fsb_freq
= 4800;
707 dev_priv
->fsb_freq
= 5333;
710 dev_priv
->fsb_freq
= 5866;
713 dev_priv
->fsb_freq
= 6400;
716 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
718 dev_priv
->fsb_freq
= 0;
722 if (dev_priv
->fsb_freq
== 3200) {
723 dev_priv
->ips
.c_m
= 0;
724 } else if (dev_priv
->fsb_freq
> 3200 && dev_priv
->fsb_freq
<= 4800) {
725 dev_priv
->ips
.c_m
= 1;
727 dev_priv
->ips
.c_m
= 2;
731 static const struct cxsr_latency cxsr_latency_table
[] = {
732 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
733 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
734 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
735 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
736 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
738 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
739 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
740 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
741 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
742 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
744 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
745 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
746 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
747 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
748 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
750 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
751 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
752 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
753 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
754 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
756 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
757 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
758 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
759 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
760 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
762 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
763 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
764 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
765 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
766 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
769 static const struct cxsr_latency
*intel_get_cxsr_latency(int is_desktop
,
774 const struct cxsr_latency
*latency
;
777 if (fsb
== 0 || mem
== 0)
780 for (i
= 0; i
< ARRAY_SIZE(cxsr_latency_table
); i
++) {
781 latency
= &cxsr_latency_table
[i
];
782 if (is_desktop
== latency
->is_desktop
&&
783 is_ddr3
== latency
->is_ddr3
&&
784 fsb
== latency
->fsb_freq
&& mem
== latency
->mem_freq
)
788 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
793 static void pineview_disable_cxsr(struct drm_device
*dev
)
795 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
797 /* deactivate cxsr */
798 I915_WRITE(DSPFW3
, I915_READ(DSPFW3
) & ~PINEVIEW_SELF_REFRESH_EN
);
802 * Latency for FIFO fetches is dependent on several factors:
803 * - memory configuration (speed, channels)
805 * - current MCH state
806 * It can be fairly high in some situations, so here we assume a fairly
807 * pessimal value. It's a tradeoff between extra memory fetches (if we
808 * set this value too high, the FIFO will fetch frequently to stay full)
809 * and power consumption (set it too low to save power and we might see
810 * FIFO underruns and display "flicker").
812 * A value of 5us seems to be a good balance; safe for very low end
813 * platforms but not overly aggressive on lower latency configs.
815 static const int latency_ns
= 5000;
817 static int i9xx_get_fifo_size(struct drm_device
*dev
, int plane
)
819 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
820 uint32_t dsparb
= I915_READ(DSPARB
);
823 size
= dsparb
& 0x7f;
825 size
= ((dsparb
>> DSPARB_CSTART_SHIFT
) & 0x7f) - size
;
827 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
828 plane
? "B" : "A", size
);
833 static int i830_get_fifo_size(struct drm_device
*dev
, int plane
)
835 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
836 uint32_t dsparb
= I915_READ(DSPARB
);
839 size
= dsparb
& 0x1ff;
841 size
= ((dsparb
>> DSPARB_BEND_SHIFT
) & 0x1ff) - size
;
842 size
>>= 1; /* Convert to cachelines */
844 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
845 plane
? "B" : "A", size
);
850 static int i845_get_fifo_size(struct drm_device
*dev
, int plane
)
852 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
853 uint32_t dsparb
= I915_READ(DSPARB
);
856 size
= dsparb
& 0x7f;
857 size
>>= 2; /* Convert to cachelines */
859 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb
,
866 /* Pineview has different values for various configs */
867 static const struct intel_watermark_params pineview_display_wm
= {
868 PINEVIEW_DISPLAY_FIFO
,
872 PINEVIEW_FIFO_LINE_SIZE
874 static const struct intel_watermark_params pineview_display_hplloff_wm
= {
875 PINEVIEW_DISPLAY_FIFO
,
877 PINEVIEW_DFT_HPLLOFF_WM
,
879 PINEVIEW_FIFO_LINE_SIZE
881 static const struct intel_watermark_params pineview_cursor_wm
= {
882 PINEVIEW_CURSOR_FIFO
,
883 PINEVIEW_CURSOR_MAX_WM
,
884 PINEVIEW_CURSOR_DFT_WM
,
885 PINEVIEW_CURSOR_GUARD_WM
,
886 PINEVIEW_FIFO_LINE_SIZE
,
888 static const struct intel_watermark_params pineview_cursor_hplloff_wm
= {
889 PINEVIEW_CURSOR_FIFO
,
890 PINEVIEW_CURSOR_MAX_WM
,
891 PINEVIEW_CURSOR_DFT_WM
,
892 PINEVIEW_CURSOR_GUARD_WM
,
893 PINEVIEW_FIFO_LINE_SIZE
895 static const struct intel_watermark_params g4x_wm_info
= {
902 static const struct intel_watermark_params g4x_cursor_wm_info
= {
909 static const struct intel_watermark_params valleyview_wm_info
= {
910 VALLEYVIEW_FIFO_SIZE
,
916 static const struct intel_watermark_params valleyview_cursor_wm_info
= {
918 VALLEYVIEW_CURSOR_MAX_WM
,
923 static const struct intel_watermark_params i965_cursor_wm_info
= {
930 static const struct intel_watermark_params i945_wm_info
= {
937 static const struct intel_watermark_params i915_wm_info
= {
944 static const struct intel_watermark_params i830_wm_info
= {
951 static const struct intel_watermark_params i845_wm_info
= {
960 * intel_calculate_wm - calculate watermark level
961 * @clock_in_khz: pixel clock
962 * @wm: chip FIFO params
963 * @pixel_size: display pixel size
964 * @latency_ns: memory latency for the platform
966 * Calculate the watermark level (the level at which the display plane will
967 * start fetching from memory again). Each chip has a different display
968 * FIFO size and allocation, so the caller needs to figure that out and pass
969 * in the correct intel_watermark_params structure.
971 * As the pixel clock runs, the FIFO will be drained at a rate that depends
972 * on the pixel size. When it reaches the watermark level, it'll start
973 * fetching FIFO line sized based chunks from memory until the FIFO fills
974 * past the watermark point. If the FIFO drains completely, a FIFO underrun
975 * will occur, and a display engine hang could result.
977 static unsigned long intel_calculate_wm(unsigned long clock_in_khz
,
978 const struct intel_watermark_params
*wm
,
981 unsigned long latency_ns
)
983 long entries_required
, wm_size
;
986 * Note: we need to make sure we don't overflow for various clock &
988 * clocks go from a few thousand to several hundred thousand.
989 * latency is usually a few thousand
991 entries_required
= ((clock_in_khz
/ 1000) * pixel_size
* latency_ns
) /
993 entries_required
= DIV_ROUND_UP(entries_required
, wm
->cacheline_size
);
995 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required
);
997 wm_size
= fifo_size
- (entries_required
+ wm
->guard_size
);
999 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size
);
1001 /* Don't promote wm_size to unsigned... */
1002 if (wm_size
> (long)wm
->max_wm
)
1003 wm_size
= wm
->max_wm
;
1005 wm_size
= wm
->default_wm
;
1009 static struct drm_crtc
*single_enabled_crtc(struct drm_device
*dev
)
1011 struct drm_crtc
*crtc
, *enabled
= NULL
;
1013 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
1014 if (intel_crtc_active(crtc
)) {
1024 static void pineview_update_wm(struct drm_crtc
*unused_crtc
)
1026 struct drm_device
*dev
= unused_crtc
->dev
;
1027 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1028 struct drm_crtc
*crtc
;
1029 const struct cxsr_latency
*latency
;
1033 latency
= intel_get_cxsr_latency(IS_PINEVIEW_G(dev
), dev_priv
->is_ddr3
,
1034 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
1036 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1037 pineview_disable_cxsr(dev
);
1041 crtc
= single_enabled_crtc(dev
);
1043 const struct drm_display_mode
*adjusted_mode
;
1044 int pixel_size
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1047 adjusted_mode
= &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1048 clock
= adjusted_mode
->crtc_clock
;
1051 wm
= intel_calculate_wm(clock
, &pineview_display_wm
,
1052 pineview_display_wm
.fifo_size
,
1053 pixel_size
, latency
->display_sr
);
1054 reg
= I915_READ(DSPFW1
);
1055 reg
&= ~DSPFW_SR_MASK
;
1056 reg
|= wm
<< DSPFW_SR_SHIFT
;
1057 I915_WRITE(DSPFW1
, reg
);
1058 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg
);
1061 wm
= intel_calculate_wm(clock
, &pineview_cursor_wm
,
1062 pineview_display_wm
.fifo_size
,
1063 pixel_size
, latency
->cursor_sr
);
1064 reg
= I915_READ(DSPFW3
);
1065 reg
&= ~DSPFW_CURSOR_SR_MASK
;
1066 reg
|= (wm
& 0x3f) << DSPFW_CURSOR_SR_SHIFT
;
1067 I915_WRITE(DSPFW3
, reg
);
1069 /* Display HPLL off SR */
1070 wm
= intel_calculate_wm(clock
, &pineview_display_hplloff_wm
,
1071 pineview_display_hplloff_wm
.fifo_size
,
1072 pixel_size
, latency
->display_hpll_disable
);
1073 reg
= I915_READ(DSPFW3
);
1074 reg
&= ~DSPFW_HPLL_SR_MASK
;
1075 reg
|= wm
& DSPFW_HPLL_SR_MASK
;
1076 I915_WRITE(DSPFW3
, reg
);
1078 /* cursor HPLL off SR */
1079 wm
= intel_calculate_wm(clock
, &pineview_cursor_hplloff_wm
,
1080 pineview_display_hplloff_wm
.fifo_size
,
1081 pixel_size
, latency
->cursor_hpll_disable
);
1082 reg
= I915_READ(DSPFW3
);
1083 reg
&= ~DSPFW_HPLL_CURSOR_MASK
;
1084 reg
|= (wm
& 0x3f) << DSPFW_HPLL_CURSOR_SHIFT
;
1085 I915_WRITE(DSPFW3
, reg
);
1086 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg
);
1090 I915_READ(DSPFW3
) | PINEVIEW_SELF_REFRESH_EN
);
1091 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1093 pineview_disable_cxsr(dev
);
1094 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1098 static bool g4x_compute_wm0(struct drm_device
*dev
,
1100 const struct intel_watermark_params
*display
,
1101 int display_latency_ns
,
1102 const struct intel_watermark_params
*cursor
,
1103 int cursor_latency_ns
,
1107 struct drm_crtc
*crtc
;
1108 const struct drm_display_mode
*adjusted_mode
;
1109 int htotal
, hdisplay
, clock
, pixel_size
;
1110 int line_time_us
, line_count
;
1111 int entries
, tlb_miss
;
1113 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1114 if (!intel_crtc_active(crtc
)) {
1115 *cursor_wm
= cursor
->guard_size
;
1116 *plane_wm
= display
->guard_size
;
1120 adjusted_mode
= &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1121 clock
= adjusted_mode
->crtc_clock
;
1122 htotal
= adjusted_mode
->crtc_htotal
;
1123 hdisplay
= to_intel_crtc(crtc
)->config
.pipe_src_w
;
1124 pixel_size
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1126 /* Use the small buffer method to calculate plane watermark */
1127 entries
= ((clock
* pixel_size
/ 1000) * display_latency_ns
) / 1000;
1128 tlb_miss
= display
->fifo_size
*display
->cacheline_size
- hdisplay
* 8;
1130 entries
+= tlb_miss
;
1131 entries
= DIV_ROUND_UP(entries
, display
->cacheline_size
);
1132 *plane_wm
= entries
+ display
->guard_size
;
1133 if (*plane_wm
> (int)display
->max_wm
)
1134 *plane_wm
= display
->max_wm
;
1136 /* Use the large buffer method to calculate cursor watermark */
1137 line_time_us
= max(htotal
* 1000 / clock
, 1);
1138 line_count
= (cursor_latency_ns
/ line_time_us
+ 1000) / 1000;
1139 entries
= line_count
* to_intel_crtc(crtc
)->cursor_width
* pixel_size
;
1140 tlb_miss
= cursor
->fifo_size
*cursor
->cacheline_size
- hdisplay
* 8;
1142 entries
+= tlb_miss
;
1143 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
1144 *cursor_wm
= entries
+ cursor
->guard_size
;
1145 if (*cursor_wm
> (int)cursor
->max_wm
)
1146 *cursor_wm
= (int)cursor
->max_wm
;
1152 * Check the wm result.
1154 * If any calculated watermark values is larger than the maximum value that
1155 * can be programmed into the associated watermark register, that watermark
1158 static bool g4x_check_srwm(struct drm_device
*dev
,
1159 int display_wm
, int cursor_wm
,
1160 const struct intel_watermark_params
*display
,
1161 const struct intel_watermark_params
*cursor
)
1163 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1164 display_wm
, cursor_wm
);
1166 if (display_wm
> display
->max_wm
) {
1167 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1168 display_wm
, display
->max_wm
);
1172 if (cursor_wm
> cursor
->max_wm
) {
1173 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1174 cursor_wm
, cursor
->max_wm
);
1178 if (!(display_wm
|| cursor_wm
)) {
1179 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1186 static bool g4x_compute_srwm(struct drm_device
*dev
,
1189 const struct intel_watermark_params
*display
,
1190 const struct intel_watermark_params
*cursor
,
1191 int *display_wm
, int *cursor_wm
)
1193 struct drm_crtc
*crtc
;
1194 const struct drm_display_mode
*adjusted_mode
;
1195 int hdisplay
, htotal
, pixel_size
, clock
;
1196 unsigned long line_time_us
;
1197 int line_count
, line_size
;
1202 *display_wm
= *cursor_wm
= 0;
1206 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1207 adjusted_mode
= &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1208 clock
= adjusted_mode
->crtc_clock
;
1209 htotal
= adjusted_mode
->crtc_htotal
;
1210 hdisplay
= to_intel_crtc(crtc
)->config
.pipe_src_w
;
1211 pixel_size
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1213 line_time_us
= max(htotal
* 1000 / clock
, 1);
1214 line_count
= (latency_ns
/ line_time_us
+ 1000) / 1000;
1215 line_size
= hdisplay
* pixel_size
;
1217 /* Use the minimum of the small and large buffer method for primary */
1218 small
= ((clock
* pixel_size
/ 1000) * latency_ns
) / 1000;
1219 large
= line_count
* line_size
;
1221 entries
= DIV_ROUND_UP(min(small
, large
), display
->cacheline_size
);
1222 *display_wm
= entries
+ display
->guard_size
;
1224 /* calculate the self-refresh watermark for display cursor */
1225 entries
= line_count
* pixel_size
* to_intel_crtc(crtc
)->cursor_width
;
1226 entries
= DIV_ROUND_UP(entries
, cursor
->cacheline_size
);
1227 *cursor_wm
= entries
+ cursor
->guard_size
;
1229 return g4x_check_srwm(dev
,
1230 *display_wm
, *cursor_wm
,
1234 static bool vlv_compute_drain_latency(struct drm_device
*dev
,
1236 int *plane_prec_mult
,
1238 int *cursor_prec_mult
,
1241 struct drm_crtc
*crtc
;
1242 int clock
, pixel_size
;
1245 crtc
= intel_get_crtc_for_plane(dev
, plane
);
1246 if (!intel_crtc_active(crtc
))
1249 clock
= to_intel_crtc(crtc
)->config
.adjusted_mode
.crtc_clock
;
1250 pixel_size
= crtc
->primary
->fb
->bits_per_pixel
/ 8; /* BPP */
1252 entries
= (clock
/ 1000) * pixel_size
;
1253 *plane_prec_mult
= (entries
> 256) ?
1254 DRAIN_LATENCY_PRECISION_32
: DRAIN_LATENCY_PRECISION_16
;
1255 *plane_dl
= (64 * (*plane_prec_mult
) * 4) / ((clock
/ 1000) *
1258 entries
= (clock
/ 1000) * 4; /* BPP is always 4 for cursor */
1259 *cursor_prec_mult
= (entries
> 256) ?
1260 DRAIN_LATENCY_PRECISION_32
: DRAIN_LATENCY_PRECISION_16
;
1261 *cursor_dl
= (64 * (*cursor_prec_mult
) * 4) / ((clock
/ 1000) * 4);
1267 * Update drain latency registers of memory arbiter
1269 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1270 * to be programmed. Each plane has a drain latency multiplier and a drain
1274 static void vlv_update_drain_latency(struct drm_device
*dev
)
1276 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1277 int planea_prec
, planea_dl
, planeb_prec
, planeb_dl
;
1278 int cursora_prec
, cursora_dl
, cursorb_prec
, cursorb_dl
;
1279 int plane_prec_mult
, cursor_prec_mult
; /* Precision multiplier is
1282 /* For plane A, Cursor A */
1283 if (vlv_compute_drain_latency(dev
, 0, &plane_prec_mult
, &planea_dl
,
1284 &cursor_prec_mult
, &cursora_dl
)) {
1285 cursora_prec
= (cursor_prec_mult
== DRAIN_LATENCY_PRECISION_32
) ?
1286 DDL_CURSORA_PRECISION_32
: DDL_CURSORA_PRECISION_16
;
1287 planea_prec
= (plane_prec_mult
== DRAIN_LATENCY_PRECISION_32
) ?
1288 DDL_PLANEA_PRECISION_32
: DDL_PLANEA_PRECISION_16
;
1290 I915_WRITE(VLV_DDL1
, cursora_prec
|
1291 (cursora_dl
<< DDL_CURSORA_SHIFT
) |
1292 planea_prec
| planea_dl
);
1295 /* For plane B, Cursor B */
1296 if (vlv_compute_drain_latency(dev
, 1, &plane_prec_mult
, &planeb_dl
,
1297 &cursor_prec_mult
, &cursorb_dl
)) {
1298 cursorb_prec
= (cursor_prec_mult
== DRAIN_LATENCY_PRECISION_32
) ?
1299 DDL_CURSORB_PRECISION_32
: DDL_CURSORB_PRECISION_16
;
1300 planeb_prec
= (plane_prec_mult
== DRAIN_LATENCY_PRECISION_32
) ?
1301 DDL_PLANEB_PRECISION_32
: DDL_PLANEB_PRECISION_16
;
1303 I915_WRITE(VLV_DDL2
, cursorb_prec
|
1304 (cursorb_dl
<< DDL_CURSORB_SHIFT
) |
1305 planeb_prec
| planeb_dl
);
1309 #define single_plane_enabled(mask) is_power_of_2(mask)
1311 static void valleyview_update_wm(struct drm_crtc
*crtc
)
1313 struct drm_device
*dev
= crtc
->dev
;
1314 static const int sr_latency_ns
= 12000;
1315 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1316 int planea_wm
, planeb_wm
, cursora_wm
, cursorb_wm
;
1317 int plane_sr
, cursor_sr
;
1318 int ignore_plane_sr
, ignore_cursor_sr
;
1319 unsigned int enabled
= 0;
1321 vlv_update_drain_latency(dev
);
1323 if (g4x_compute_wm0(dev
, PIPE_A
,
1324 &valleyview_wm_info
, latency_ns
,
1325 &valleyview_cursor_wm_info
, latency_ns
,
1326 &planea_wm
, &cursora_wm
))
1327 enabled
|= 1 << PIPE_A
;
1329 if (g4x_compute_wm0(dev
, PIPE_B
,
1330 &valleyview_wm_info
, latency_ns
,
1331 &valleyview_cursor_wm_info
, latency_ns
,
1332 &planeb_wm
, &cursorb_wm
))
1333 enabled
|= 1 << PIPE_B
;
1335 if (single_plane_enabled(enabled
) &&
1336 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1338 &valleyview_wm_info
,
1339 &valleyview_cursor_wm_info
,
1340 &plane_sr
, &ignore_cursor_sr
) &&
1341 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1343 &valleyview_wm_info
,
1344 &valleyview_cursor_wm_info
,
1345 &ignore_plane_sr
, &cursor_sr
)) {
1346 I915_WRITE(FW_BLC_SELF_VLV
, FW_CSPWRDWNEN
);
1348 I915_WRITE(FW_BLC_SELF_VLV
,
1349 I915_READ(FW_BLC_SELF_VLV
) & ~FW_CSPWRDWNEN
);
1350 plane_sr
= cursor_sr
= 0;
1353 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1354 planea_wm
, cursora_wm
,
1355 planeb_wm
, cursorb_wm
,
1356 plane_sr
, cursor_sr
);
1359 (plane_sr
<< DSPFW_SR_SHIFT
) |
1360 (cursorb_wm
<< DSPFW_CURSORB_SHIFT
) |
1361 (planeb_wm
<< DSPFW_PLANEB_SHIFT
) |
1364 (I915_READ(DSPFW2
) & ~DSPFW_CURSORA_MASK
) |
1365 (cursora_wm
<< DSPFW_CURSORA_SHIFT
));
1367 (I915_READ(DSPFW3
) & ~DSPFW_CURSOR_SR_MASK
) |
1368 (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
1371 static void g4x_update_wm(struct drm_crtc
*crtc
)
1373 struct drm_device
*dev
= crtc
->dev
;
1374 static const int sr_latency_ns
= 12000;
1375 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1376 int planea_wm
, planeb_wm
, cursora_wm
, cursorb_wm
;
1377 int plane_sr
, cursor_sr
;
1378 unsigned int enabled
= 0;
1380 if (g4x_compute_wm0(dev
, PIPE_A
,
1381 &g4x_wm_info
, latency_ns
,
1382 &g4x_cursor_wm_info
, latency_ns
,
1383 &planea_wm
, &cursora_wm
))
1384 enabled
|= 1 << PIPE_A
;
1386 if (g4x_compute_wm0(dev
, PIPE_B
,
1387 &g4x_wm_info
, latency_ns
,
1388 &g4x_cursor_wm_info
, latency_ns
,
1389 &planeb_wm
, &cursorb_wm
))
1390 enabled
|= 1 << PIPE_B
;
1392 if (single_plane_enabled(enabled
) &&
1393 g4x_compute_srwm(dev
, ffs(enabled
) - 1,
1396 &g4x_cursor_wm_info
,
1397 &plane_sr
, &cursor_sr
)) {
1398 I915_WRITE(FW_BLC_SELF
, FW_BLC_SELF_EN
);
1400 I915_WRITE(FW_BLC_SELF
,
1401 I915_READ(FW_BLC_SELF
) & ~FW_BLC_SELF_EN
);
1402 plane_sr
= cursor_sr
= 0;
1405 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1406 planea_wm
, cursora_wm
,
1407 planeb_wm
, cursorb_wm
,
1408 plane_sr
, cursor_sr
);
1411 (plane_sr
<< DSPFW_SR_SHIFT
) |
1412 (cursorb_wm
<< DSPFW_CURSORB_SHIFT
) |
1413 (planeb_wm
<< DSPFW_PLANEB_SHIFT
) |
1416 (I915_READ(DSPFW2
) & ~DSPFW_CURSORA_MASK
) |
1417 (cursora_wm
<< DSPFW_CURSORA_SHIFT
));
1418 /* HPLL off in SR has some issues on G4x... disable it */
1420 (I915_READ(DSPFW3
) & ~(DSPFW_HPLL_SR_EN
| DSPFW_CURSOR_SR_MASK
)) |
1421 (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
1424 static void i965_update_wm(struct drm_crtc
*unused_crtc
)
1426 struct drm_device
*dev
= unused_crtc
->dev
;
1427 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1428 struct drm_crtc
*crtc
;
1432 /* Calc sr entries for one plane configs */
1433 crtc
= single_enabled_crtc(dev
);
1435 /* self-refresh has much higher latency */
1436 static const int sr_latency_ns
= 12000;
1437 const struct drm_display_mode
*adjusted_mode
=
1438 &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1439 int clock
= adjusted_mode
->crtc_clock
;
1440 int htotal
= adjusted_mode
->crtc_htotal
;
1441 int hdisplay
= to_intel_crtc(crtc
)->config
.pipe_src_w
;
1442 int pixel_size
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1443 unsigned long line_time_us
;
1446 line_time_us
= max(htotal
* 1000 / clock
, 1);
1448 /* Use ns/us then divide to preserve precision */
1449 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1450 pixel_size
* hdisplay
;
1451 entries
= DIV_ROUND_UP(entries
, I915_FIFO_LINE_SIZE
);
1452 srwm
= I965_FIFO_SIZE
- entries
;
1456 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1459 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1460 pixel_size
* to_intel_crtc(crtc
)->cursor_width
;
1461 entries
= DIV_ROUND_UP(entries
,
1462 i965_cursor_wm_info
.cacheline_size
);
1463 cursor_sr
= i965_cursor_wm_info
.fifo_size
-
1464 (entries
+ i965_cursor_wm_info
.guard_size
);
1466 if (cursor_sr
> i965_cursor_wm_info
.max_wm
)
1467 cursor_sr
= i965_cursor_wm_info
.max_wm
;
1469 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1470 "cursor %d\n", srwm
, cursor_sr
);
1472 if (IS_CRESTLINE(dev
))
1473 I915_WRITE(FW_BLC_SELF
, FW_BLC_SELF_EN
);
1475 /* Turn off self refresh if both pipes are enabled */
1476 if (IS_CRESTLINE(dev
))
1477 I915_WRITE(FW_BLC_SELF
, I915_READ(FW_BLC_SELF
)
1481 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1484 /* 965 has limitations... */
1485 I915_WRITE(DSPFW1
, (srwm
<< DSPFW_SR_SHIFT
) |
1486 (8 << 16) | (8 << 8) | (8 << 0));
1487 I915_WRITE(DSPFW2
, (8 << 8) | (8 << 0));
1488 /* update cursor SR watermark */
1489 I915_WRITE(DSPFW3
, (cursor_sr
<< DSPFW_CURSOR_SR_SHIFT
));
1492 static void i9xx_update_wm(struct drm_crtc
*unused_crtc
)
1494 struct drm_device
*dev
= unused_crtc
->dev
;
1495 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1496 const struct intel_watermark_params
*wm_info
;
1501 int planea_wm
, planeb_wm
;
1502 struct drm_crtc
*crtc
, *enabled
= NULL
;
1505 wm_info
= &i945_wm_info
;
1506 else if (!IS_GEN2(dev
))
1507 wm_info
= &i915_wm_info
;
1509 wm_info
= &i830_wm_info
;
1511 fifo_size
= dev_priv
->display
.get_fifo_size(dev
, 0);
1512 crtc
= intel_get_crtc_for_plane(dev
, 0);
1513 if (intel_crtc_active(crtc
)) {
1514 const struct drm_display_mode
*adjusted_mode
;
1515 int cpp
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1519 adjusted_mode
= &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1520 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1521 wm_info
, fifo_size
, cpp
,
1525 planea_wm
= fifo_size
- wm_info
->guard_size
;
1527 fifo_size
= dev_priv
->display
.get_fifo_size(dev
, 1);
1528 crtc
= intel_get_crtc_for_plane(dev
, 1);
1529 if (intel_crtc_active(crtc
)) {
1530 const struct drm_display_mode
*adjusted_mode
;
1531 int cpp
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
1535 adjusted_mode
= &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1536 planeb_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1537 wm_info
, fifo_size
, cpp
,
1539 if (enabled
== NULL
)
1544 planeb_wm
= fifo_size
- wm_info
->guard_size
;
1546 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm
, planeb_wm
);
1548 if (IS_I915GM(dev
) && enabled
) {
1549 struct intel_framebuffer
*fb
;
1551 fb
= to_intel_framebuffer(enabled
->primary
->fb
);
1553 /* self-refresh seems busted with untiled */
1554 if (fb
->obj
->tiling_mode
== I915_TILING_NONE
)
1559 * Overlay gets an aggressive default since video jitter is bad.
1563 /* Play safe and disable self-refresh before adjusting watermarks. */
1564 if (IS_I945G(dev
) || IS_I945GM(dev
))
1565 I915_WRITE(FW_BLC_SELF
, FW_BLC_SELF_EN_MASK
| 0);
1566 else if (IS_I915GM(dev
))
1567 I915_WRITE(INSTPM
, _MASKED_BIT_DISABLE(INSTPM_SELF_EN
));
1569 /* Calc sr entries for one plane configs */
1570 if (HAS_FW_BLC(dev
) && enabled
) {
1571 /* self-refresh has much higher latency */
1572 static const int sr_latency_ns
= 6000;
1573 const struct drm_display_mode
*adjusted_mode
=
1574 &to_intel_crtc(enabled
)->config
.adjusted_mode
;
1575 int clock
= adjusted_mode
->crtc_clock
;
1576 int htotal
= adjusted_mode
->crtc_htotal
;
1577 int hdisplay
= to_intel_crtc(enabled
)->config
.pipe_src_w
;
1578 int pixel_size
= enabled
->primary
->fb
->bits_per_pixel
/ 8;
1579 unsigned long line_time_us
;
1582 line_time_us
= max(htotal
* 1000 / clock
, 1);
1584 /* Use ns/us then divide to preserve precision */
1585 entries
= (((sr_latency_ns
/ line_time_us
) + 1000) / 1000) *
1586 pixel_size
* hdisplay
;
1587 entries
= DIV_ROUND_UP(entries
, wm_info
->cacheline_size
);
1588 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries
);
1589 srwm
= wm_info
->fifo_size
- entries
;
1593 if (IS_I945G(dev
) || IS_I945GM(dev
))
1594 I915_WRITE(FW_BLC_SELF
,
1595 FW_BLC_SELF_FIFO_MASK
| (srwm
& 0xff));
1596 else if (IS_I915GM(dev
))
1597 I915_WRITE(FW_BLC_SELF
, srwm
& 0x3f);
1600 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1601 planea_wm
, planeb_wm
, cwm
, srwm
);
1603 fwater_lo
= ((planeb_wm
& 0x3f) << 16) | (planea_wm
& 0x3f);
1604 fwater_hi
= (cwm
& 0x1f);
1606 /* Set request length to 8 cachelines per fetch */
1607 fwater_lo
= fwater_lo
| (1 << 24) | (1 << 8);
1608 fwater_hi
= fwater_hi
| (1 << 8);
1610 I915_WRITE(FW_BLC
, fwater_lo
);
1611 I915_WRITE(FW_BLC2
, fwater_hi
);
1613 if (HAS_FW_BLC(dev
)) {
1615 if (IS_I945G(dev
) || IS_I945GM(dev
))
1616 I915_WRITE(FW_BLC_SELF
,
1617 FW_BLC_SELF_EN_MASK
| FW_BLC_SELF_EN
);
1618 else if (IS_I915GM(dev
))
1619 I915_WRITE(INSTPM
, _MASKED_BIT_ENABLE(INSTPM_SELF_EN
));
1620 DRM_DEBUG_KMS("memory self refresh enabled\n");
1622 DRM_DEBUG_KMS("memory self refresh disabled\n");
1626 static void i845_update_wm(struct drm_crtc
*unused_crtc
)
1628 struct drm_device
*dev
= unused_crtc
->dev
;
1629 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1630 struct drm_crtc
*crtc
;
1631 const struct drm_display_mode
*adjusted_mode
;
1635 crtc
= single_enabled_crtc(dev
);
1639 adjusted_mode
= &to_intel_crtc(crtc
)->config
.adjusted_mode
;
1640 planea_wm
= intel_calculate_wm(adjusted_mode
->crtc_clock
,
1642 dev_priv
->display
.get_fifo_size(dev
, 0),
1644 fwater_lo
= I915_READ(FW_BLC
) & ~0xfff;
1645 fwater_lo
|= (3<<8) | planea_wm
;
1647 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm
);
1649 I915_WRITE(FW_BLC
, fwater_lo
);
1652 static uint32_t ilk_pipe_pixel_rate(struct drm_device
*dev
,
1653 struct drm_crtc
*crtc
)
1655 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
1656 uint32_t pixel_rate
;
1658 pixel_rate
= intel_crtc
->config
.adjusted_mode
.crtc_clock
;
1660 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1661 * adjust the pixel_rate here. */
1663 if (intel_crtc
->config
.pch_pfit
.enabled
) {
1664 uint64_t pipe_w
, pipe_h
, pfit_w
, pfit_h
;
1665 uint32_t pfit_size
= intel_crtc
->config
.pch_pfit
.size
;
1667 pipe_w
= intel_crtc
->config
.pipe_src_w
;
1668 pipe_h
= intel_crtc
->config
.pipe_src_h
;
1669 pfit_w
= (pfit_size
>> 16) & 0xFFFF;
1670 pfit_h
= pfit_size
& 0xFFFF;
1671 if (pipe_w
< pfit_w
)
1673 if (pipe_h
< pfit_h
)
1676 pixel_rate
= div_u64((uint64_t) pixel_rate
* pipe_w
* pipe_h
,
1683 /* latency must be in 0.1us units. */
1684 static uint32_t ilk_wm_method1(uint32_t pixel_rate
, uint8_t bytes_per_pixel
,
1689 if (WARN(latency
== 0, "Latency value missing\n"))
1692 ret
= (uint64_t) pixel_rate
* bytes_per_pixel
* latency
;
1693 ret
= DIV_ROUND_UP_ULL(ret
, 64 * 10000) + 2;
1698 /* latency must be in 0.1us units. */
1699 static uint32_t ilk_wm_method2(uint32_t pixel_rate
, uint32_t pipe_htotal
,
1700 uint32_t horiz_pixels
, uint8_t bytes_per_pixel
,
1705 if (WARN(latency
== 0, "Latency value missing\n"))
1708 ret
= (latency
* pixel_rate
) / (pipe_htotal
* 10000);
1709 ret
= (ret
+ 1) * horiz_pixels
* bytes_per_pixel
;
1710 ret
= DIV_ROUND_UP(ret
, 64) + 2;
1714 static uint32_t ilk_wm_fbc(uint32_t pri_val
, uint32_t horiz_pixels
,
1715 uint8_t bytes_per_pixel
)
1717 return DIV_ROUND_UP(pri_val
* 64, horiz_pixels
* bytes_per_pixel
) + 2;
1720 struct ilk_pipe_wm_parameters
{
1722 uint32_t pipe_htotal
;
1723 uint32_t pixel_rate
;
1724 struct intel_plane_wm_parameters pri
;
1725 struct intel_plane_wm_parameters spr
;
1726 struct intel_plane_wm_parameters cur
;
1729 struct ilk_wm_maximums
{
1736 /* used in computing the new watermarks state */
1737 struct intel_wm_config
{
1738 unsigned int num_pipes_active
;
1739 bool sprites_enabled
;
1740 bool sprites_scaled
;
1744 * For both WM_PIPE and WM_LP.
1745 * mem_value must be in 0.1us units.
1747 static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters
*params
,
1751 uint32_t method1
, method2
;
1753 if (!params
->active
|| !params
->pri
.enabled
)
1756 method1
= ilk_wm_method1(params
->pixel_rate
,
1757 params
->pri
.bytes_per_pixel
,
1763 method2
= ilk_wm_method2(params
->pixel_rate
,
1764 params
->pipe_htotal
,
1765 params
->pri
.horiz_pixels
,
1766 params
->pri
.bytes_per_pixel
,
1769 return min(method1
, method2
);
1773 * For both WM_PIPE and WM_LP.
1774 * mem_value must be in 0.1us units.
1776 static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters
*params
,
1779 uint32_t method1
, method2
;
1781 if (!params
->active
|| !params
->spr
.enabled
)
1784 method1
= ilk_wm_method1(params
->pixel_rate
,
1785 params
->spr
.bytes_per_pixel
,
1787 method2
= ilk_wm_method2(params
->pixel_rate
,
1788 params
->pipe_htotal
,
1789 params
->spr
.horiz_pixels
,
1790 params
->spr
.bytes_per_pixel
,
1792 return min(method1
, method2
);
1796 * For both WM_PIPE and WM_LP.
1797 * mem_value must be in 0.1us units.
1799 static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters
*params
,
1802 if (!params
->active
|| !params
->cur
.enabled
)
1805 return ilk_wm_method2(params
->pixel_rate
,
1806 params
->pipe_htotal
,
1807 params
->cur
.horiz_pixels
,
1808 params
->cur
.bytes_per_pixel
,
1812 /* Only for WM_LP. */
1813 static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters
*params
,
1816 if (!params
->active
|| !params
->pri
.enabled
)
1819 return ilk_wm_fbc(pri_val
,
1820 params
->pri
.horiz_pixels
,
1821 params
->pri
.bytes_per_pixel
);
1824 static unsigned int ilk_display_fifo_size(const struct drm_device
*dev
)
1826 if (INTEL_INFO(dev
)->gen
>= 8)
1828 else if (INTEL_INFO(dev
)->gen
>= 7)
1834 static unsigned int ilk_plane_wm_reg_max(const struct drm_device
*dev
,
1835 int level
, bool is_sprite
)
1837 if (INTEL_INFO(dev
)->gen
>= 8)
1838 /* BDW primary/sprite plane watermarks */
1839 return level
== 0 ? 255 : 2047;
1840 else if (INTEL_INFO(dev
)->gen
>= 7)
1841 /* IVB/HSW primary/sprite plane watermarks */
1842 return level
== 0 ? 127 : 1023;
1843 else if (!is_sprite
)
1844 /* ILK/SNB primary plane watermarks */
1845 return level
== 0 ? 127 : 511;
1847 /* ILK/SNB sprite plane watermarks */
1848 return level
== 0 ? 63 : 255;
1851 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device
*dev
,
1854 if (INTEL_INFO(dev
)->gen
>= 7)
1855 return level
== 0 ? 63 : 255;
1857 return level
== 0 ? 31 : 63;
1860 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device
*dev
)
1862 if (INTEL_INFO(dev
)->gen
>= 8)
1868 /* Calculate the maximum primary/sprite plane watermark */
1869 static unsigned int ilk_plane_wm_max(const struct drm_device
*dev
,
1871 const struct intel_wm_config
*config
,
1872 enum intel_ddb_partitioning ddb_partitioning
,
1875 unsigned int fifo_size
= ilk_display_fifo_size(dev
);
1877 /* if sprites aren't enabled, sprites get nothing */
1878 if (is_sprite
&& !config
->sprites_enabled
)
1881 /* HSW allows LP1+ watermarks even with multiple pipes */
1882 if (level
== 0 || config
->num_pipes_active
> 1) {
1883 fifo_size
/= INTEL_INFO(dev
)->num_pipes
;
1886 * For some reason the non self refresh
1887 * FIFO size is only half of the self
1888 * refresh FIFO size on ILK/SNB.
1890 if (INTEL_INFO(dev
)->gen
<= 6)
1894 if (config
->sprites_enabled
) {
1895 /* level 0 is always calculated with 1:1 split */
1896 if (level
> 0 && ddb_partitioning
== INTEL_DDB_PART_5_6
) {
1905 /* clamp to max that the registers can hold */
1906 return min(fifo_size
, ilk_plane_wm_reg_max(dev
, level
, is_sprite
));
1909 /* Calculate the maximum cursor plane watermark */
1910 static unsigned int ilk_cursor_wm_max(const struct drm_device
*dev
,
1912 const struct intel_wm_config
*config
)
1914 /* HSW LP1+ watermarks w/ multiple pipes */
1915 if (level
> 0 && config
->num_pipes_active
> 1)
1918 /* otherwise just report max that registers can hold */
1919 return ilk_cursor_wm_reg_max(dev
, level
);
1922 static void ilk_compute_wm_maximums(const struct drm_device
*dev
,
1924 const struct intel_wm_config
*config
,
1925 enum intel_ddb_partitioning ddb_partitioning
,
1926 struct ilk_wm_maximums
*max
)
1928 max
->pri
= ilk_plane_wm_max(dev
, level
, config
, ddb_partitioning
, false);
1929 max
->spr
= ilk_plane_wm_max(dev
, level
, config
, ddb_partitioning
, true);
1930 max
->cur
= ilk_cursor_wm_max(dev
, level
, config
);
1931 max
->fbc
= ilk_fbc_wm_reg_max(dev
);
1934 static bool ilk_validate_wm_level(int level
,
1935 const struct ilk_wm_maximums
*max
,
1936 struct intel_wm_level
*result
)
1940 /* already determined to be invalid? */
1941 if (!result
->enable
)
1944 result
->enable
= result
->pri_val
<= max
->pri
&&
1945 result
->spr_val
<= max
->spr
&&
1946 result
->cur_val
<= max
->cur
;
1948 ret
= result
->enable
;
1951 * HACK until we can pre-compute everything,
1952 * and thus fail gracefully if LP0 watermarks
1955 if (level
== 0 && !result
->enable
) {
1956 if (result
->pri_val
> max
->pri
)
1957 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
1958 level
, result
->pri_val
, max
->pri
);
1959 if (result
->spr_val
> max
->spr
)
1960 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
1961 level
, result
->spr_val
, max
->spr
);
1962 if (result
->cur_val
> max
->cur
)
1963 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
1964 level
, result
->cur_val
, max
->cur
);
1966 result
->pri_val
= min_t(uint32_t, result
->pri_val
, max
->pri
);
1967 result
->spr_val
= min_t(uint32_t, result
->spr_val
, max
->spr
);
1968 result
->cur_val
= min_t(uint32_t, result
->cur_val
, max
->cur
);
1969 result
->enable
= true;
1975 static void ilk_compute_wm_level(const struct drm_i915_private
*dev_priv
,
1977 const struct ilk_pipe_wm_parameters
*p
,
1978 struct intel_wm_level
*result
)
1980 uint16_t pri_latency
= dev_priv
->wm
.pri_latency
[level
];
1981 uint16_t spr_latency
= dev_priv
->wm
.spr_latency
[level
];
1982 uint16_t cur_latency
= dev_priv
->wm
.cur_latency
[level
];
1984 /* WM1+ latency values stored in 0.5us units */
1991 result
->pri_val
= ilk_compute_pri_wm(p
, pri_latency
, level
);
1992 result
->spr_val
= ilk_compute_spr_wm(p
, spr_latency
);
1993 result
->cur_val
= ilk_compute_cur_wm(p
, cur_latency
);
1994 result
->fbc_val
= ilk_compute_fbc_wm(p
, result
->pri_val
);
1995 result
->enable
= true;
1999 hsw_compute_linetime_wm(struct drm_device
*dev
, struct drm_crtc
*crtc
)
2001 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2002 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2003 struct drm_display_mode
*mode
= &intel_crtc
->config
.adjusted_mode
;
2004 u32 linetime
, ips_linetime
;
2006 if (!intel_crtc_active(crtc
))
2009 /* The WM are computed with base on how long it takes to fill a single
2010 * row at the given clock rate, multiplied by 8.
2012 linetime
= DIV_ROUND_CLOSEST(mode
->crtc_htotal
* 1000 * 8,
2014 ips_linetime
= DIV_ROUND_CLOSEST(mode
->crtc_htotal
* 1000 * 8,
2015 intel_ddi_get_cdclk_freq(dev_priv
));
2017 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime
) |
2018 PIPE_WM_LINETIME_TIME(linetime
);
2021 static void intel_read_wm_latency(struct drm_device
*dev
, uint16_t wm
[5])
2023 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2025 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
2026 uint64_t sskpd
= I915_READ64(MCH_SSKPD
);
2028 wm
[0] = (sskpd
>> 56) & 0xFF;
2030 wm
[0] = sskpd
& 0xF;
2031 wm
[1] = (sskpd
>> 4) & 0xFF;
2032 wm
[2] = (sskpd
>> 12) & 0xFF;
2033 wm
[3] = (sskpd
>> 20) & 0x1FF;
2034 wm
[4] = (sskpd
>> 32) & 0x1FF;
2035 } else if (INTEL_INFO(dev
)->gen
>= 6) {
2036 uint32_t sskpd
= I915_READ(MCH_SSKPD
);
2038 wm
[0] = (sskpd
>> SSKPD_WM0_SHIFT
) & SSKPD_WM_MASK
;
2039 wm
[1] = (sskpd
>> SSKPD_WM1_SHIFT
) & SSKPD_WM_MASK
;
2040 wm
[2] = (sskpd
>> SSKPD_WM2_SHIFT
) & SSKPD_WM_MASK
;
2041 wm
[3] = (sskpd
>> SSKPD_WM3_SHIFT
) & SSKPD_WM_MASK
;
2042 } else if (INTEL_INFO(dev
)->gen
>= 5) {
2043 uint32_t mltr
= I915_READ(MLTR_ILK
);
2045 /* ILK primary LP0 latency is 700 ns */
2047 wm
[1] = (mltr
>> MLTR_WM1_SHIFT
) & ILK_SRLT_MASK
;
2048 wm
[2] = (mltr
>> MLTR_WM2_SHIFT
) & ILK_SRLT_MASK
;
2052 static void intel_fixup_spr_wm_latency(struct drm_device
*dev
, uint16_t wm
[5])
2054 /* ILK sprite LP0 latency is 1300 ns */
2055 if (INTEL_INFO(dev
)->gen
== 5)
2059 static void intel_fixup_cur_wm_latency(struct drm_device
*dev
, uint16_t wm
[5])
2061 /* ILK cursor LP0 latency is 1300 ns */
2062 if (INTEL_INFO(dev
)->gen
== 5)
2065 /* WaDoubleCursorLP3Latency:ivb */
2066 if (IS_IVYBRIDGE(dev
))
2070 static int ilk_wm_max_level(const struct drm_device
*dev
)
2072 /* how many WM levels are we expecting */
2073 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
2075 else if (INTEL_INFO(dev
)->gen
>= 6)
2081 static void intel_print_wm_latency(struct drm_device
*dev
,
2083 const uint16_t wm
[5])
2085 int level
, max_level
= ilk_wm_max_level(dev
);
2087 for (level
= 0; level
<= max_level
; level
++) {
2088 unsigned int latency
= wm
[level
];
2091 DRM_ERROR("%s WM%d latency not provided\n",
2096 /* WM1+ latency values in 0.5us units */
2100 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2101 name
, level
, wm
[level
],
2102 latency
/ 10, latency
% 10);
2106 static void ilk_setup_wm_latency(struct drm_device
*dev
)
2108 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2110 intel_read_wm_latency(dev
, dev_priv
->wm
.pri_latency
);
2112 memcpy(dev_priv
->wm
.spr_latency
, dev_priv
->wm
.pri_latency
,
2113 sizeof(dev_priv
->wm
.pri_latency
));
2114 memcpy(dev_priv
->wm
.cur_latency
, dev_priv
->wm
.pri_latency
,
2115 sizeof(dev_priv
->wm
.pri_latency
));
2117 intel_fixup_spr_wm_latency(dev
, dev_priv
->wm
.spr_latency
);
2118 intel_fixup_cur_wm_latency(dev
, dev_priv
->wm
.cur_latency
);
2120 intel_print_wm_latency(dev
, "Primary", dev_priv
->wm
.pri_latency
);
2121 intel_print_wm_latency(dev
, "Sprite", dev_priv
->wm
.spr_latency
);
2122 intel_print_wm_latency(dev
, "Cursor", dev_priv
->wm
.cur_latency
);
2125 static void ilk_compute_wm_parameters(struct drm_crtc
*crtc
,
2126 struct ilk_pipe_wm_parameters
*p
)
2128 struct drm_device
*dev
= crtc
->dev
;
2129 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2130 enum pipe pipe
= intel_crtc
->pipe
;
2131 struct drm_plane
*plane
;
2133 if (!intel_crtc_active(crtc
))
2137 p
->pipe_htotal
= intel_crtc
->config
.adjusted_mode
.crtc_htotal
;
2138 p
->pixel_rate
= ilk_pipe_pixel_rate(dev
, crtc
);
2139 p
->pri
.bytes_per_pixel
= crtc
->primary
->fb
->bits_per_pixel
/ 8;
2140 p
->cur
.bytes_per_pixel
= 4;
2141 p
->pri
.horiz_pixels
= intel_crtc
->config
.pipe_src_w
;
2142 p
->cur
.horiz_pixels
= intel_crtc
->cursor_width
;
2143 /* TODO: for now, assume primary and cursor planes are always enabled. */
2144 p
->pri
.enabled
= true;
2145 p
->cur
.enabled
= true;
2147 drm_for_each_legacy_plane(plane
, &dev
->mode_config
.plane_list
) {
2148 struct intel_plane
*intel_plane
= to_intel_plane(plane
);
2150 if (intel_plane
->pipe
== pipe
) {
2151 p
->spr
= intel_plane
->wm
;
2157 static void ilk_compute_wm_config(struct drm_device
*dev
,
2158 struct intel_wm_config
*config
)
2160 struct intel_crtc
*intel_crtc
;
2162 /* Compute the currently _active_ config */
2163 list_for_each_entry(intel_crtc
, &dev
->mode_config
.crtc_list
, base
.head
) {
2164 const struct intel_pipe_wm
*wm
= &intel_crtc
->wm
.active
;
2166 if (!wm
->pipe_enabled
)
2169 config
->sprites_enabled
|= wm
->sprites_enabled
;
2170 config
->sprites_scaled
|= wm
->sprites_scaled
;
2171 config
->num_pipes_active
++;
2175 /* Compute new watermarks for the pipe */
2176 static bool intel_compute_pipe_wm(struct drm_crtc
*crtc
,
2177 const struct ilk_pipe_wm_parameters
*params
,
2178 struct intel_pipe_wm
*pipe_wm
)
2180 struct drm_device
*dev
= crtc
->dev
;
2181 const struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2182 int level
, max_level
= ilk_wm_max_level(dev
);
2183 /* LP0 watermark maximums depend on this pipe alone */
2184 struct intel_wm_config config
= {
2185 .num_pipes_active
= 1,
2186 .sprites_enabled
= params
->spr
.enabled
,
2187 .sprites_scaled
= params
->spr
.scaled
,
2189 struct ilk_wm_maximums max
;
2191 /* LP0 watermarks always use 1/2 DDB partitioning */
2192 ilk_compute_wm_maximums(dev
, 0, &config
, INTEL_DDB_PART_1_2
, &max
);
2194 pipe_wm
->pipe_enabled
= params
->active
;
2195 pipe_wm
->sprites_enabled
= params
->spr
.enabled
;
2196 pipe_wm
->sprites_scaled
= params
->spr
.scaled
;
2198 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2199 if (INTEL_INFO(dev
)->gen
<= 6 && params
->spr
.enabled
)
2202 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2203 if (params
->spr
.scaled
)
2206 for (level
= 0; level
<= max_level
; level
++)
2207 ilk_compute_wm_level(dev_priv
, level
, params
,
2208 &pipe_wm
->wm
[level
]);
2210 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
2211 pipe_wm
->linetime
= hsw_compute_linetime_wm(dev
, crtc
);
2213 /* At least LP0 must be valid */
2214 return ilk_validate_wm_level(0, &max
, &pipe_wm
->wm
[0]);
2218 * Merge the watermarks from all active pipes for a specific level.
2220 static void ilk_merge_wm_level(struct drm_device
*dev
,
2222 struct intel_wm_level
*ret_wm
)
2224 const struct intel_crtc
*intel_crtc
;
2226 list_for_each_entry(intel_crtc
, &dev
->mode_config
.crtc_list
, base
.head
) {
2227 const struct intel_pipe_wm
*active
= &intel_crtc
->wm
.active
;
2228 const struct intel_wm_level
*wm
= &active
->wm
[level
];
2230 if (!active
->pipe_enabled
)
2236 ret_wm
->pri_val
= max(ret_wm
->pri_val
, wm
->pri_val
);
2237 ret_wm
->spr_val
= max(ret_wm
->spr_val
, wm
->spr_val
);
2238 ret_wm
->cur_val
= max(ret_wm
->cur_val
, wm
->cur_val
);
2239 ret_wm
->fbc_val
= max(ret_wm
->fbc_val
, wm
->fbc_val
);
2242 ret_wm
->enable
= true;
2246 * Merge all low power watermarks for all active pipes.
2248 static void ilk_wm_merge(struct drm_device
*dev
,
2249 const struct intel_wm_config
*config
,
2250 const struct ilk_wm_maximums
*max
,
2251 struct intel_pipe_wm
*merged
)
2253 int level
, max_level
= ilk_wm_max_level(dev
);
2255 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2256 if ((INTEL_INFO(dev
)->gen
<= 6 || IS_IVYBRIDGE(dev
)) &&
2257 config
->num_pipes_active
> 1)
2260 /* ILK: FBC WM must be disabled always */
2261 merged
->fbc_wm_enabled
= INTEL_INFO(dev
)->gen
>= 6;
2263 /* merge each WM1+ level */
2264 for (level
= 1; level
<= max_level
; level
++) {
2265 struct intel_wm_level
*wm
= &merged
->wm
[level
];
2267 ilk_merge_wm_level(dev
, level
, wm
);
2269 if (!ilk_validate_wm_level(level
, max
, wm
))
2273 * The spec says it is preferred to disable
2274 * FBC WMs instead of disabling a WM level.
2276 if (wm
->fbc_val
> max
->fbc
) {
2277 merged
->fbc_wm_enabled
= false;
2282 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2284 * FIXME this is racy. FBC might get enabled later.
2285 * What we should check here is whether FBC can be
2286 * enabled sometime later.
2288 if (IS_GEN5(dev
) && !merged
->fbc_wm_enabled
&& intel_fbc_enabled(dev
)) {
2289 for (level
= 2; level
<= max_level
; level
++) {
2290 struct intel_wm_level
*wm
= &merged
->wm
[level
];
2297 static int ilk_wm_lp_to_level(int wm_lp
, const struct intel_pipe_wm
*pipe_wm
)
2299 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2300 return wm_lp
+ (wm_lp
>= 2 && pipe_wm
->wm
[4].enable
);
2303 /* The value we need to program into the WM_LPx latency field */
2304 static unsigned int ilk_wm_lp_latency(struct drm_device
*dev
, int level
)
2306 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2308 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
2311 return dev_priv
->wm
.pri_latency
[level
];
2314 static void ilk_compute_wm_results(struct drm_device
*dev
,
2315 const struct intel_pipe_wm
*merged
,
2316 enum intel_ddb_partitioning partitioning
,
2317 struct ilk_wm_values
*results
)
2319 struct intel_crtc
*intel_crtc
;
2322 results
->enable_fbc_wm
= merged
->fbc_wm_enabled
;
2323 results
->partitioning
= partitioning
;
2325 /* LP1+ register values */
2326 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
2327 const struct intel_wm_level
*r
;
2329 level
= ilk_wm_lp_to_level(wm_lp
, merged
);
2331 r
= &merged
->wm
[level
];
2335 results
->wm_lp
[wm_lp
- 1] = WM3_LP_EN
|
2336 (ilk_wm_lp_latency(dev
, level
) << WM1_LP_LATENCY_SHIFT
) |
2337 (r
->pri_val
<< WM1_LP_SR_SHIFT
) |
2340 if (INTEL_INFO(dev
)->gen
>= 8)
2341 results
->wm_lp
[wm_lp
- 1] |=
2342 r
->fbc_val
<< WM1_LP_FBC_SHIFT_BDW
;
2344 results
->wm_lp
[wm_lp
- 1] |=
2345 r
->fbc_val
<< WM1_LP_FBC_SHIFT
;
2347 if (INTEL_INFO(dev
)->gen
<= 6 && r
->spr_val
) {
2348 WARN_ON(wm_lp
!= 1);
2349 results
->wm_lp_spr
[wm_lp
- 1] = WM1S_LP_EN
| r
->spr_val
;
2351 results
->wm_lp_spr
[wm_lp
- 1] = r
->spr_val
;
2354 /* LP0 register values */
2355 list_for_each_entry(intel_crtc
, &dev
->mode_config
.crtc_list
, base
.head
) {
2356 enum pipe pipe
= intel_crtc
->pipe
;
2357 const struct intel_wm_level
*r
=
2358 &intel_crtc
->wm
.active
.wm
[0];
2360 if (WARN_ON(!r
->enable
))
2363 results
->wm_linetime
[pipe
] = intel_crtc
->wm
.active
.linetime
;
2365 results
->wm_pipe
[pipe
] =
2366 (r
->pri_val
<< WM0_PIPE_PLANE_SHIFT
) |
2367 (r
->spr_val
<< WM0_PIPE_SPRITE_SHIFT
) |
2372 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2373 * case both are at the same level. Prefer r1 in case they're the same. */
2374 static struct intel_pipe_wm
*ilk_find_best_result(struct drm_device
*dev
,
2375 struct intel_pipe_wm
*r1
,
2376 struct intel_pipe_wm
*r2
)
2378 int level
, max_level
= ilk_wm_max_level(dev
);
2379 int level1
= 0, level2
= 0;
2381 for (level
= 1; level
<= max_level
; level
++) {
2382 if (r1
->wm
[level
].enable
)
2384 if (r2
->wm
[level
].enable
)
2388 if (level1
== level2
) {
2389 if (r2
->fbc_wm_enabled
&& !r1
->fbc_wm_enabled
)
2393 } else if (level1
> level2
) {
2400 /* dirty bits used to track which watermarks need changes */
2401 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2402 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2403 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2404 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2405 #define WM_DIRTY_FBC (1 << 24)
2406 #define WM_DIRTY_DDB (1 << 25)
2408 static unsigned int ilk_compute_wm_dirty(struct drm_device
*dev
,
2409 const struct ilk_wm_values
*old
,
2410 const struct ilk_wm_values
*new)
2412 unsigned int dirty
= 0;
2416 for_each_pipe(pipe
) {
2417 if (old
->wm_linetime
[pipe
] != new->wm_linetime
[pipe
]) {
2418 dirty
|= WM_DIRTY_LINETIME(pipe
);
2419 /* Must disable LP1+ watermarks too */
2420 dirty
|= WM_DIRTY_LP_ALL
;
2423 if (old
->wm_pipe
[pipe
] != new->wm_pipe
[pipe
]) {
2424 dirty
|= WM_DIRTY_PIPE(pipe
);
2425 /* Must disable LP1+ watermarks too */
2426 dirty
|= WM_DIRTY_LP_ALL
;
2430 if (old
->enable_fbc_wm
!= new->enable_fbc_wm
) {
2431 dirty
|= WM_DIRTY_FBC
;
2432 /* Must disable LP1+ watermarks too */
2433 dirty
|= WM_DIRTY_LP_ALL
;
2436 if (old
->partitioning
!= new->partitioning
) {
2437 dirty
|= WM_DIRTY_DDB
;
2438 /* Must disable LP1+ watermarks too */
2439 dirty
|= WM_DIRTY_LP_ALL
;
2442 /* LP1+ watermarks already deemed dirty, no need to continue */
2443 if (dirty
& WM_DIRTY_LP_ALL
)
2446 /* Find the lowest numbered LP1+ watermark in need of an update... */
2447 for (wm_lp
= 1; wm_lp
<= 3; wm_lp
++) {
2448 if (old
->wm_lp
[wm_lp
- 1] != new->wm_lp
[wm_lp
- 1] ||
2449 old
->wm_lp_spr
[wm_lp
- 1] != new->wm_lp_spr
[wm_lp
- 1])
2453 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2454 for (; wm_lp
<= 3; wm_lp
++)
2455 dirty
|= WM_DIRTY_LP(wm_lp
);
2460 static bool _ilk_disable_lp_wm(struct drm_i915_private
*dev_priv
,
2463 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
2464 bool changed
= false;
2466 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] & WM1_LP_SR_EN
) {
2467 previous
->wm_lp
[2] &= ~WM1_LP_SR_EN
;
2468 I915_WRITE(WM3_LP_ILK
, previous
->wm_lp
[2]);
2471 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] & WM1_LP_SR_EN
) {
2472 previous
->wm_lp
[1] &= ~WM1_LP_SR_EN
;
2473 I915_WRITE(WM2_LP_ILK
, previous
->wm_lp
[1]);
2476 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] & WM1_LP_SR_EN
) {
2477 previous
->wm_lp
[0] &= ~WM1_LP_SR_EN
;
2478 I915_WRITE(WM1_LP_ILK
, previous
->wm_lp
[0]);
2483 * Don't touch WM1S_LP_EN here.
2484 * Doing so could cause underruns.
2491 * The spec says we shouldn't write when we don't need, because every write
2492 * causes WMs to be re-evaluated, expending some power.
2494 static void ilk_write_wm_values(struct drm_i915_private
*dev_priv
,
2495 struct ilk_wm_values
*results
)
2497 struct drm_device
*dev
= dev_priv
->dev
;
2498 struct ilk_wm_values
*previous
= &dev_priv
->wm
.hw
;
2502 dirty
= ilk_compute_wm_dirty(dev
, previous
, results
);
2506 _ilk_disable_lp_wm(dev_priv
, dirty
);
2508 if (dirty
& WM_DIRTY_PIPE(PIPE_A
))
2509 I915_WRITE(WM0_PIPEA_ILK
, results
->wm_pipe
[0]);
2510 if (dirty
& WM_DIRTY_PIPE(PIPE_B
))
2511 I915_WRITE(WM0_PIPEB_ILK
, results
->wm_pipe
[1]);
2512 if (dirty
& WM_DIRTY_PIPE(PIPE_C
))
2513 I915_WRITE(WM0_PIPEC_IVB
, results
->wm_pipe
[2]);
2515 if (dirty
& WM_DIRTY_LINETIME(PIPE_A
))
2516 I915_WRITE(PIPE_WM_LINETIME(PIPE_A
), results
->wm_linetime
[0]);
2517 if (dirty
& WM_DIRTY_LINETIME(PIPE_B
))
2518 I915_WRITE(PIPE_WM_LINETIME(PIPE_B
), results
->wm_linetime
[1]);
2519 if (dirty
& WM_DIRTY_LINETIME(PIPE_C
))
2520 I915_WRITE(PIPE_WM_LINETIME(PIPE_C
), results
->wm_linetime
[2]);
2522 if (dirty
& WM_DIRTY_DDB
) {
2523 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
2524 val
= I915_READ(WM_MISC
);
2525 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
2526 val
&= ~WM_MISC_DATA_PARTITION_5_6
;
2528 val
|= WM_MISC_DATA_PARTITION_5_6
;
2529 I915_WRITE(WM_MISC
, val
);
2531 val
= I915_READ(DISP_ARB_CTL2
);
2532 if (results
->partitioning
== INTEL_DDB_PART_1_2
)
2533 val
&= ~DISP_DATA_PARTITION_5_6
;
2535 val
|= DISP_DATA_PARTITION_5_6
;
2536 I915_WRITE(DISP_ARB_CTL2
, val
);
2540 if (dirty
& WM_DIRTY_FBC
) {
2541 val
= I915_READ(DISP_ARB_CTL
);
2542 if (results
->enable_fbc_wm
)
2543 val
&= ~DISP_FBC_WM_DIS
;
2545 val
|= DISP_FBC_WM_DIS
;
2546 I915_WRITE(DISP_ARB_CTL
, val
);
2549 if (dirty
& WM_DIRTY_LP(1) &&
2550 previous
->wm_lp_spr
[0] != results
->wm_lp_spr
[0])
2551 I915_WRITE(WM1S_LP_ILK
, results
->wm_lp_spr
[0]);
2553 if (INTEL_INFO(dev
)->gen
>= 7) {
2554 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp_spr
[1] != results
->wm_lp_spr
[1])
2555 I915_WRITE(WM2S_LP_IVB
, results
->wm_lp_spr
[1]);
2556 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp_spr
[2] != results
->wm_lp_spr
[2])
2557 I915_WRITE(WM3S_LP_IVB
, results
->wm_lp_spr
[2]);
2560 if (dirty
& WM_DIRTY_LP(1) && previous
->wm_lp
[0] != results
->wm_lp
[0])
2561 I915_WRITE(WM1_LP_ILK
, results
->wm_lp
[0]);
2562 if (dirty
& WM_DIRTY_LP(2) && previous
->wm_lp
[1] != results
->wm_lp
[1])
2563 I915_WRITE(WM2_LP_ILK
, results
->wm_lp
[1]);
2564 if (dirty
& WM_DIRTY_LP(3) && previous
->wm_lp
[2] != results
->wm_lp
[2])
2565 I915_WRITE(WM3_LP_ILK
, results
->wm_lp
[2]);
2567 dev_priv
->wm
.hw
= *results
;
2570 static bool ilk_disable_lp_wm(struct drm_device
*dev
)
2572 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2574 return _ilk_disable_lp_wm(dev_priv
, WM_DIRTY_LP_ALL
);
2577 static void ilk_update_wm(struct drm_crtc
*crtc
)
2579 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2580 struct drm_device
*dev
= crtc
->dev
;
2581 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2582 struct ilk_wm_maximums max
;
2583 struct ilk_pipe_wm_parameters params
= {};
2584 struct ilk_wm_values results
= {};
2585 enum intel_ddb_partitioning partitioning
;
2586 struct intel_pipe_wm pipe_wm
= {};
2587 struct intel_pipe_wm lp_wm_1_2
= {}, lp_wm_5_6
= {}, *best_lp_wm
;
2588 struct intel_wm_config config
= {};
2590 ilk_compute_wm_parameters(crtc
, ¶ms
);
2592 intel_compute_pipe_wm(crtc
, ¶ms
, &pipe_wm
);
2594 if (!memcmp(&intel_crtc
->wm
.active
, &pipe_wm
, sizeof(pipe_wm
)))
2597 intel_crtc
->wm
.active
= pipe_wm
;
2599 ilk_compute_wm_config(dev
, &config
);
2601 ilk_compute_wm_maximums(dev
, 1, &config
, INTEL_DDB_PART_1_2
, &max
);
2602 ilk_wm_merge(dev
, &config
, &max
, &lp_wm_1_2
);
2604 /* 5/6 split only in single pipe config on IVB+ */
2605 if (INTEL_INFO(dev
)->gen
>= 7 &&
2606 config
.num_pipes_active
== 1 && config
.sprites_enabled
) {
2607 ilk_compute_wm_maximums(dev
, 1, &config
, INTEL_DDB_PART_5_6
, &max
);
2608 ilk_wm_merge(dev
, &config
, &max
, &lp_wm_5_6
);
2610 best_lp_wm
= ilk_find_best_result(dev
, &lp_wm_1_2
, &lp_wm_5_6
);
2612 best_lp_wm
= &lp_wm_1_2
;
2615 partitioning
= (best_lp_wm
== &lp_wm_1_2
) ?
2616 INTEL_DDB_PART_1_2
: INTEL_DDB_PART_5_6
;
2618 ilk_compute_wm_results(dev
, best_lp_wm
, partitioning
, &results
);
2620 ilk_write_wm_values(dev_priv
, &results
);
2623 static void ilk_update_sprite_wm(struct drm_plane
*plane
,
2624 struct drm_crtc
*crtc
,
2625 uint32_t sprite_width
, int pixel_size
,
2626 bool enabled
, bool scaled
)
2628 struct drm_device
*dev
= plane
->dev
;
2629 struct intel_plane
*intel_plane
= to_intel_plane(plane
);
2631 intel_plane
->wm
.enabled
= enabled
;
2632 intel_plane
->wm
.scaled
= scaled
;
2633 intel_plane
->wm
.horiz_pixels
= sprite_width
;
2634 intel_plane
->wm
.bytes_per_pixel
= pixel_size
;
2637 * IVB workaround: must disable low power watermarks for at least
2638 * one frame before enabling scaling. LP watermarks can be re-enabled
2639 * when scaling is disabled.
2641 * WaCxSRDisabledForSpriteScaling:ivb
2643 if (IS_IVYBRIDGE(dev
) && scaled
&& ilk_disable_lp_wm(dev
))
2644 intel_wait_for_vblank(dev
, intel_plane
->pipe
);
2646 ilk_update_wm(crtc
);
2649 static void ilk_pipe_wm_get_hw_state(struct drm_crtc
*crtc
)
2651 struct drm_device
*dev
= crtc
->dev
;
2652 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2653 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
2654 struct intel_crtc
*intel_crtc
= to_intel_crtc(crtc
);
2655 struct intel_pipe_wm
*active
= &intel_crtc
->wm
.active
;
2656 enum pipe pipe
= intel_crtc
->pipe
;
2657 static const unsigned int wm0_pipe_reg
[] = {
2658 [PIPE_A
] = WM0_PIPEA_ILK
,
2659 [PIPE_B
] = WM0_PIPEB_ILK
,
2660 [PIPE_C
] = WM0_PIPEC_IVB
,
2663 hw
->wm_pipe
[pipe
] = I915_READ(wm0_pipe_reg
[pipe
]);
2664 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
2665 hw
->wm_linetime
[pipe
] = I915_READ(PIPE_WM_LINETIME(pipe
));
2667 active
->pipe_enabled
= intel_crtc_active(crtc
);
2669 if (active
->pipe_enabled
) {
2670 u32 tmp
= hw
->wm_pipe
[pipe
];
2673 * For active pipes LP0 watermark is marked as
2674 * enabled, and LP1+ watermaks as disabled since
2675 * we can't really reverse compute them in case
2676 * multiple pipes are active.
2678 active
->wm
[0].enable
= true;
2679 active
->wm
[0].pri_val
= (tmp
& WM0_PIPE_PLANE_MASK
) >> WM0_PIPE_PLANE_SHIFT
;
2680 active
->wm
[0].spr_val
= (tmp
& WM0_PIPE_SPRITE_MASK
) >> WM0_PIPE_SPRITE_SHIFT
;
2681 active
->wm
[0].cur_val
= tmp
& WM0_PIPE_CURSOR_MASK
;
2682 active
->linetime
= hw
->wm_linetime
[pipe
];
2684 int level
, max_level
= ilk_wm_max_level(dev
);
2687 * For inactive pipes, all watermark levels
2688 * should be marked as enabled but zeroed,
2689 * which is what we'd compute them to.
2691 for (level
= 0; level
<= max_level
; level
++)
2692 active
->wm
[level
].enable
= true;
2696 void ilk_wm_get_hw_state(struct drm_device
*dev
)
2698 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2699 struct ilk_wm_values
*hw
= &dev_priv
->wm
.hw
;
2700 struct drm_crtc
*crtc
;
2702 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
)
2703 ilk_pipe_wm_get_hw_state(crtc
);
2705 hw
->wm_lp
[0] = I915_READ(WM1_LP_ILK
);
2706 hw
->wm_lp
[1] = I915_READ(WM2_LP_ILK
);
2707 hw
->wm_lp
[2] = I915_READ(WM3_LP_ILK
);
2709 hw
->wm_lp_spr
[0] = I915_READ(WM1S_LP_ILK
);
2710 if (INTEL_INFO(dev
)->gen
>= 7) {
2711 hw
->wm_lp_spr
[1] = I915_READ(WM2S_LP_IVB
);
2712 hw
->wm_lp_spr
[2] = I915_READ(WM3S_LP_IVB
);
2715 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
2716 hw
->partitioning
= (I915_READ(WM_MISC
) & WM_MISC_DATA_PARTITION_5_6
) ?
2717 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
2718 else if (IS_IVYBRIDGE(dev
))
2719 hw
->partitioning
= (I915_READ(DISP_ARB_CTL2
) & DISP_DATA_PARTITION_5_6
) ?
2720 INTEL_DDB_PART_5_6
: INTEL_DDB_PART_1_2
;
2723 !(I915_READ(DISP_ARB_CTL
) & DISP_FBC_WM_DIS
);
2727 * intel_update_watermarks - update FIFO watermark values based on current modes
2729 * Calculate watermark values for the various WM regs based on current mode
2730 * and plane configuration.
2732 * There are several cases to deal with here:
2733 * - normal (i.e. non-self-refresh)
2734 * - self-refresh (SR) mode
2735 * - lines are large relative to FIFO size (buffer can hold up to 2)
2736 * - lines are small relative to FIFO size (buffer can hold more than 2
2737 * lines), so need to account for TLB latency
2739 * The normal calculation is:
2740 * watermark = dotclock * bytes per pixel * latency
2741 * where latency is platform & configuration dependent (we assume pessimal
2744 * The SR calculation is:
2745 * watermark = (trunc(latency/line time)+1) * surface width *
2748 * line time = htotal / dotclock
2749 * surface width = hdisplay for normal plane and 64 for cursor
2750 * and latency is assumed to be high, as above.
2752 * The final value programmed to the register should always be rounded up,
2753 * and include an extra 2 entries to account for clock crossings.
2755 * We don't use the sprite, so we can ignore that. And on Crestline we have
2756 * to set the non-SR watermarks to 8.
2758 void intel_update_watermarks(struct drm_crtc
*crtc
)
2760 struct drm_i915_private
*dev_priv
= crtc
->dev
->dev_private
;
2762 if (dev_priv
->display
.update_wm
)
2763 dev_priv
->display
.update_wm(crtc
);
2766 void intel_update_sprite_watermarks(struct drm_plane
*plane
,
2767 struct drm_crtc
*crtc
,
2768 uint32_t sprite_width
, int pixel_size
,
2769 bool enabled
, bool scaled
)
2771 struct drm_i915_private
*dev_priv
= plane
->dev
->dev_private
;
2773 if (dev_priv
->display
.update_sprite_wm
)
2774 dev_priv
->display
.update_sprite_wm(plane
, crtc
, sprite_width
,
2775 pixel_size
, enabled
, scaled
);
2778 static struct drm_i915_gem_object
*
2779 intel_alloc_context_page(struct drm_device
*dev
)
2781 struct drm_i915_gem_object
*ctx
;
2784 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
2786 ctx
= i915_gem_alloc_object(dev
, 4096);
2788 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2792 ret
= i915_gem_obj_ggtt_pin(ctx
, 4096, 0);
2794 DRM_ERROR("failed to pin power context: %d\n", ret
);
2798 ret
= i915_gem_object_set_to_gtt_domain(ctx
, 1);
2800 DRM_ERROR("failed to set-domain on power context: %d\n", ret
);
2807 i915_gem_object_ggtt_unpin(ctx
);
2809 drm_gem_object_unreference(&ctx
->base
);
2814 * Lock protecting IPS related data structures
2816 DEFINE_SPINLOCK(mchdev_lock
);
2818 /* Global for IPS driver to get at the current i915 device. Protected by
2820 static struct drm_i915_private
*i915_mch_dev
;
2822 bool ironlake_set_drps(struct drm_device
*dev
, u8 val
)
2824 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2827 assert_spin_locked(&mchdev_lock
);
2829 rgvswctl
= I915_READ16(MEMSWCTL
);
2830 if (rgvswctl
& MEMCTL_CMD_STS
) {
2831 DRM_DEBUG("gpu busy, RCS change rejected\n");
2832 return false; /* still busy with another command */
2835 rgvswctl
= (MEMCTL_CMD_CHFREQ
<< MEMCTL_CMD_SHIFT
) |
2836 (val
<< MEMCTL_FREQ_SHIFT
) | MEMCTL_SFCAVM
;
2837 I915_WRITE16(MEMSWCTL
, rgvswctl
);
2838 POSTING_READ16(MEMSWCTL
);
2840 rgvswctl
|= MEMCTL_CMD_STS
;
2841 I915_WRITE16(MEMSWCTL
, rgvswctl
);
2846 static void ironlake_enable_drps(struct drm_device
*dev
)
2848 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2849 u32 rgvmodectl
= I915_READ(MEMMODECTL
);
2850 u8 fmax
, fmin
, fstart
, vstart
;
2852 spin_lock_irq(&mchdev_lock
);
2854 /* Enable temp reporting */
2855 I915_WRITE16(PMMISC
, I915_READ(PMMISC
) | MCPPCE_EN
);
2856 I915_WRITE16(TSC1
, I915_READ(TSC1
) | TSE
);
2858 /* 100ms RC evaluation intervals */
2859 I915_WRITE(RCUPEI
, 100000);
2860 I915_WRITE(RCDNEI
, 100000);
2862 /* Set max/min thresholds to 90ms and 80ms respectively */
2863 I915_WRITE(RCBMAXAVG
, 90000);
2864 I915_WRITE(RCBMINAVG
, 80000);
2866 I915_WRITE(MEMIHYST
, 1);
2868 /* Set up min, max, and cur for interrupt handling */
2869 fmax
= (rgvmodectl
& MEMMODE_FMAX_MASK
) >> MEMMODE_FMAX_SHIFT
;
2870 fmin
= (rgvmodectl
& MEMMODE_FMIN_MASK
);
2871 fstart
= (rgvmodectl
& MEMMODE_FSTART_MASK
) >>
2872 MEMMODE_FSTART_SHIFT
;
2874 vstart
= (I915_READ(PXVFREQ_BASE
+ (fstart
* 4)) & PXVFREQ_PX_MASK
) >>
2877 dev_priv
->ips
.fmax
= fmax
; /* IPS callback will increase this */
2878 dev_priv
->ips
.fstart
= fstart
;
2880 dev_priv
->ips
.max_delay
= fstart
;
2881 dev_priv
->ips
.min_delay
= fmin
;
2882 dev_priv
->ips
.cur_delay
= fstart
;
2884 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2885 fmax
, fmin
, fstart
);
2887 I915_WRITE(MEMINTREN
, MEMINT_CX_SUPR_EN
| MEMINT_EVAL_CHG_EN
);
2890 * Interrupts will be enabled in ironlake_irq_postinstall
2893 I915_WRITE(VIDSTART
, vstart
);
2894 POSTING_READ(VIDSTART
);
2896 rgvmodectl
|= MEMMODE_SWMODE_EN
;
2897 I915_WRITE(MEMMODECTL
, rgvmodectl
);
2899 if (wait_for_atomic((I915_READ(MEMSWCTL
) & MEMCTL_CMD_STS
) == 0, 10))
2900 DRM_ERROR("stuck trying to change perf mode\n");
2903 ironlake_set_drps(dev
, fstart
);
2905 dev_priv
->ips
.last_count1
= I915_READ(0x112e4) + I915_READ(0x112e8) +
2907 dev_priv
->ips
.last_time1
= jiffies_to_msecs(jiffies
);
2908 dev_priv
->ips
.last_count2
= I915_READ(0x112f4);
2909 getrawmonotonic(&dev_priv
->ips
.last_time2
);
2911 spin_unlock_irq(&mchdev_lock
);
2914 static void ironlake_disable_drps(struct drm_device
*dev
)
2916 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2919 spin_lock_irq(&mchdev_lock
);
2921 rgvswctl
= I915_READ16(MEMSWCTL
);
2923 /* Ack interrupts, disable EFC interrupt */
2924 I915_WRITE(MEMINTREN
, I915_READ(MEMINTREN
) & ~MEMINT_EVAL_CHG_EN
);
2925 I915_WRITE(MEMINTRSTS
, MEMINT_EVAL_CHG
);
2926 I915_WRITE(DEIER
, I915_READ(DEIER
) & ~DE_PCU_EVENT
);
2927 I915_WRITE(DEIIR
, DE_PCU_EVENT
);
2928 I915_WRITE(DEIMR
, I915_READ(DEIMR
) | DE_PCU_EVENT
);
2930 /* Go back to the starting frequency */
2931 ironlake_set_drps(dev
, dev_priv
->ips
.fstart
);
2933 rgvswctl
|= MEMCTL_CMD_STS
;
2934 I915_WRITE(MEMSWCTL
, rgvswctl
);
2937 spin_unlock_irq(&mchdev_lock
);
2940 /* There's a funny hw issue where the hw returns all 0 when reading from
2941 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
2942 * ourselves, instead of doing a rmw cycle (which might result in us clearing
2943 * all limits and the gpu stuck at whatever frequency it is at atm).
2945 static u32
gen6_rps_limits(struct drm_i915_private
*dev_priv
, u8 val
)
2949 /* Only set the down limit when we've reached the lowest level to avoid
2950 * getting more interrupts, otherwise leave this clear. This prevents a
2951 * race in the hw when coming out of rc6: There's a tiny window where
2952 * the hw runs at the minimal clock before selecting the desired
2953 * frequency, if the down threshold expires in that window we will not
2954 * receive a down interrupt. */
2955 limits
= dev_priv
->rps
.max_freq_softlimit
<< 24;
2956 if (val
<= dev_priv
->rps
.min_freq_softlimit
)
2957 limits
|= dev_priv
->rps
.min_freq_softlimit
<< 16;
2962 static void gen6_set_rps_thresholds(struct drm_i915_private
*dev_priv
, u8 val
)
2966 new_power
= dev_priv
->rps
.power
;
2967 switch (dev_priv
->rps
.power
) {
2969 if (val
> dev_priv
->rps
.efficient_freq
+ 1 && val
> dev_priv
->rps
.cur_freq
)
2970 new_power
= BETWEEN
;
2974 if (val
<= dev_priv
->rps
.efficient_freq
&& val
< dev_priv
->rps
.cur_freq
)
2975 new_power
= LOW_POWER
;
2976 else if (val
>= dev_priv
->rps
.rp0_freq
&& val
> dev_priv
->rps
.cur_freq
)
2977 new_power
= HIGH_POWER
;
2981 if (val
< (dev_priv
->rps
.rp1_freq
+ dev_priv
->rps
.rp0_freq
) >> 1 && val
< dev_priv
->rps
.cur_freq
)
2982 new_power
= BETWEEN
;
2985 /* Max/min bins are special */
2986 if (val
== dev_priv
->rps
.min_freq_softlimit
)
2987 new_power
= LOW_POWER
;
2988 if (val
== dev_priv
->rps
.max_freq_softlimit
)
2989 new_power
= HIGH_POWER
;
2990 if (new_power
== dev_priv
->rps
.power
)
2993 /* Note the units here are not exactly 1us, but 1280ns. */
2994 switch (new_power
) {
2996 /* Upclock if more than 95% busy over 16ms */
2997 I915_WRITE(GEN6_RP_UP_EI
, 12500);
2998 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 11800);
3000 /* Downclock if less than 85% busy over 32ms */
3001 I915_WRITE(GEN6_RP_DOWN_EI
, 25000);
3002 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 21250);
3004 I915_WRITE(GEN6_RP_CONTROL
,
3005 GEN6_RP_MEDIA_TURBO
|
3006 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
3007 GEN6_RP_MEDIA_IS_GFX
|
3009 GEN6_RP_UP_BUSY_AVG
|
3010 GEN6_RP_DOWN_IDLE_AVG
);
3014 /* Upclock if more than 90% busy over 13ms */
3015 I915_WRITE(GEN6_RP_UP_EI
, 10250);
3016 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 9225);
3018 /* Downclock if less than 75% busy over 32ms */
3019 I915_WRITE(GEN6_RP_DOWN_EI
, 25000);
3020 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 18750);
3022 I915_WRITE(GEN6_RP_CONTROL
,
3023 GEN6_RP_MEDIA_TURBO
|
3024 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
3025 GEN6_RP_MEDIA_IS_GFX
|
3027 GEN6_RP_UP_BUSY_AVG
|
3028 GEN6_RP_DOWN_IDLE_AVG
);
3032 /* Upclock if more than 85% busy over 10ms */
3033 I915_WRITE(GEN6_RP_UP_EI
, 8000);
3034 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 6800);
3036 /* Downclock if less than 60% busy over 32ms */
3037 I915_WRITE(GEN6_RP_DOWN_EI
, 25000);
3038 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 15000);
3040 I915_WRITE(GEN6_RP_CONTROL
,
3041 GEN6_RP_MEDIA_TURBO
|
3042 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
3043 GEN6_RP_MEDIA_IS_GFX
|
3045 GEN6_RP_UP_BUSY_AVG
|
3046 GEN6_RP_DOWN_IDLE_AVG
);
3050 dev_priv
->rps
.power
= new_power
;
3051 dev_priv
->rps
.last_adj
= 0;
3054 static u32
gen6_rps_pm_mask(struct drm_i915_private
*dev_priv
, u8 val
)
3058 if (val
> dev_priv
->rps
.min_freq_softlimit
)
3059 mask
|= GEN6_PM_RP_DOWN_THRESHOLD
| GEN6_PM_RP_DOWN_TIMEOUT
;
3060 if (val
< dev_priv
->rps
.max_freq_softlimit
)
3061 mask
|= GEN6_PM_RP_UP_THRESHOLD
;
3063 /* IVB and SNB hard hangs on looping batchbuffer
3064 * if GEN6_PM_UP_EI_EXPIRED is masked.
3066 if (INTEL_INFO(dev_priv
->dev
)->gen
<= 7 && !IS_HASWELL(dev_priv
->dev
))
3067 mask
|= GEN6_PM_RP_UP_EI_EXPIRED
;
3072 /* gen6_set_rps is called to update the frequency request, but should also be
3073 * called when the range (min_delay and max_delay) is modified so that we can
3074 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
3075 void gen6_set_rps(struct drm_device
*dev
, u8 val
)
3077 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3079 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
3080 WARN_ON(val
> dev_priv
->rps
.max_freq_softlimit
);
3081 WARN_ON(val
< dev_priv
->rps
.min_freq_softlimit
);
3083 /* min/max delay may still have been modified so be sure to
3084 * write the limits value.
3086 if (val
!= dev_priv
->rps
.cur_freq
) {
3087 gen6_set_rps_thresholds(dev_priv
, val
);
3089 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
3090 I915_WRITE(GEN6_RPNSWREQ
,
3091 HSW_FREQUENCY(val
));
3093 I915_WRITE(GEN6_RPNSWREQ
,
3094 GEN6_FREQUENCY(val
) |
3096 GEN6_AGGRESSIVE_TURBO
);
3099 /* Make sure we continue to get interrupts
3100 * until we hit the minimum or maximum frequencies.
3102 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
, gen6_rps_limits(dev_priv
, val
));
3103 I915_WRITE(GEN6_PMINTRMSK
, gen6_rps_pm_mask(dev_priv
, val
));
3105 POSTING_READ(GEN6_RPNSWREQ
);
3107 dev_priv
->rps
.cur_freq
= val
;
3108 trace_intel_gpu_freq_change(val
* 50);
3111 /* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
3113 * * If Gfx is Idle, then
3114 * 1. Mask Turbo interrupts
3115 * 2. Bring up Gfx clock
3116 * 3. Change the freq to Rpn and wait till P-Unit updates freq
3117 * 4. Clear the Force GFX CLK ON bit so that Gfx can down
3118 * 5. Unmask Turbo interrupts
3120 static void vlv_set_rps_idle(struct drm_i915_private
*dev_priv
)
3123 * When we are idle. Drop to min voltage state.
3126 if (dev_priv
->rps
.cur_freq
<= dev_priv
->rps
.min_freq_softlimit
)
3129 /* Mask turbo interrupt so that they will not come in between */
3130 I915_WRITE(GEN6_PMINTRMSK
, 0xffffffff);
3132 vlv_force_gfx_clock(dev_priv
, true);
3134 dev_priv
->rps
.cur_freq
= dev_priv
->rps
.min_freq_softlimit
;
3136 vlv_punit_write(dev_priv
, PUNIT_REG_GPU_FREQ_REQ
,
3137 dev_priv
->rps
.min_freq_softlimit
);
3139 if (wait_for(((vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
))
3140 & GENFREQSTATUS
) == 0, 5))
3141 DRM_ERROR("timed out waiting for Punit\n");
3143 vlv_force_gfx_clock(dev_priv
, false);
3145 I915_WRITE(GEN6_PMINTRMSK
,
3146 gen6_rps_pm_mask(dev_priv
, dev_priv
->rps
.cur_freq
));
3149 void gen6_rps_idle(struct drm_i915_private
*dev_priv
)
3151 struct drm_device
*dev
= dev_priv
->dev
;
3153 mutex_lock(&dev_priv
->rps
.hw_lock
);
3154 if (dev_priv
->rps
.enabled
) {
3155 if (IS_VALLEYVIEW(dev
))
3156 vlv_set_rps_idle(dev_priv
);
3158 gen6_set_rps(dev_priv
->dev
, dev_priv
->rps
.min_freq_softlimit
);
3159 dev_priv
->rps
.last_adj
= 0;
3161 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3164 void gen6_rps_boost(struct drm_i915_private
*dev_priv
)
3166 struct drm_device
*dev
= dev_priv
->dev
;
3168 mutex_lock(&dev_priv
->rps
.hw_lock
);
3169 if (dev_priv
->rps
.enabled
) {
3170 if (IS_VALLEYVIEW(dev
))
3171 valleyview_set_rps(dev_priv
->dev
, dev_priv
->rps
.max_freq_softlimit
);
3173 gen6_set_rps(dev_priv
->dev
, dev_priv
->rps
.max_freq_softlimit
);
3174 dev_priv
->rps
.last_adj
= 0;
3176 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3179 void valleyview_set_rps(struct drm_device
*dev
, u8 val
)
3181 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3183 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
3184 WARN_ON(val
> dev_priv
->rps
.max_freq_softlimit
);
3185 WARN_ON(val
< dev_priv
->rps
.min_freq_softlimit
);
3187 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3188 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
),
3189 dev_priv
->rps
.cur_freq
,
3190 vlv_gpu_freq(dev_priv
, val
), val
);
3192 if (val
!= dev_priv
->rps
.cur_freq
)
3193 vlv_punit_write(dev_priv
, PUNIT_REG_GPU_FREQ_REQ
, val
);
3195 I915_WRITE(GEN6_PMINTRMSK
, gen6_rps_pm_mask(dev_priv
, val
));
3197 dev_priv
->rps
.cur_freq
= val
;
3198 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv
, val
));
3201 static void gen6_disable_rps_interrupts(struct drm_device
*dev
)
3203 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3205 I915_WRITE(GEN6_PMINTRMSK
, 0xffffffff);
3206 I915_WRITE(GEN6_PMIER
, I915_READ(GEN6_PMIER
) &
3207 ~dev_priv
->pm_rps_events
);
3208 /* Complete PM interrupt masking here doesn't race with the rps work
3209 * item again unmasking PM interrupts because that is using a different
3210 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3211 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3213 spin_lock_irq(&dev_priv
->irq_lock
);
3214 dev_priv
->rps
.pm_iir
= 0;
3215 spin_unlock_irq(&dev_priv
->irq_lock
);
3217 I915_WRITE(GEN6_PMIIR
, dev_priv
->pm_rps_events
);
3220 static void gen6_disable_rps(struct drm_device
*dev
)
3222 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3224 I915_WRITE(GEN6_RC_CONTROL
, 0);
3225 I915_WRITE(GEN6_RPNSWREQ
, 1 << 31);
3227 gen6_disable_rps_interrupts(dev
);
3230 static void valleyview_disable_rps(struct drm_device
*dev
)
3232 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3234 I915_WRITE(GEN6_RC_CONTROL
, 0);
3236 gen6_disable_rps_interrupts(dev
);
3239 static void intel_print_rc6_info(struct drm_device
*dev
, u32 mode
)
3241 if (IS_VALLEYVIEW(dev
)) {
3242 if (mode
& (GEN7_RC_CTL_TO_MODE
| GEN6_RC_CTL_EI_MODE(1)))
3243 mode
= GEN6_RC_CTL_RC6_ENABLE
;
3247 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3248 (mode
& GEN6_RC_CTL_RC6_ENABLE
) ? "on" : "off",
3249 (mode
& GEN6_RC_CTL_RC6p_ENABLE
) ? "on" : "off",
3250 (mode
& GEN6_RC_CTL_RC6pp_ENABLE
) ? "on" : "off");
3253 static int sanitize_rc6_option(const struct drm_device
*dev
, int enable_rc6
)
3255 /* No RC6 before Ironlake */
3256 if (INTEL_INFO(dev
)->gen
< 5)
3259 /* RC6 is only on Ironlake mobile not on desktop */
3260 if (INTEL_INFO(dev
)->gen
== 5 && !IS_IRONLAKE_M(dev
))
3263 /* Disable RC6 on Broadwell for now */
3264 if (IS_BROADWELL(dev
))
3267 /* Respect the kernel parameter if it is set */
3268 if (enable_rc6
>= 0) {
3271 if (INTEL_INFO(dev
)->gen
== 6 || IS_IVYBRIDGE(dev
))
3272 mask
= INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
|
3275 mask
= INTEL_RC6_ENABLE
;
3277 if ((enable_rc6
& mask
) != enable_rc6
)
3278 DRM_INFO("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
3279 enable_rc6
, enable_rc6
& mask
, mask
);
3281 return enable_rc6
& mask
;
3284 /* Disable RC6 on Ironlake */
3285 if (INTEL_INFO(dev
)->gen
== 5)
3288 if (IS_IVYBRIDGE(dev
))
3289 return (INTEL_RC6_ENABLE
| INTEL_RC6p_ENABLE
);
3291 return INTEL_RC6_ENABLE
;
3294 int intel_enable_rc6(const struct drm_device
*dev
)
3296 return i915
.enable_rc6
;
3299 static void gen6_enable_rps_interrupts(struct drm_device
*dev
)
3301 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3303 spin_lock_irq(&dev_priv
->irq_lock
);
3304 WARN_ON(dev_priv
->rps
.pm_iir
);
3305 snb_enable_pm_irq(dev_priv
, dev_priv
->pm_rps_events
);
3306 I915_WRITE(GEN6_PMIIR
, dev_priv
->pm_rps_events
);
3307 spin_unlock_irq(&dev_priv
->irq_lock
);
3310 static void parse_rp_state_cap(struct drm_i915_private
*dev_priv
, u32 rp_state_cap
)
3312 /* All of these values are in units of 50MHz */
3313 dev_priv
->rps
.cur_freq
= 0;
3314 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
3315 dev_priv
->rps
.rp1_freq
= (rp_state_cap
>> 8) & 0xff;
3316 dev_priv
->rps
.rp0_freq
= (rp_state_cap
>> 0) & 0xff;
3317 dev_priv
->rps
.min_freq
= (rp_state_cap
>> 16) & 0xff;
3318 /* XXX: only BYT has a special efficient freq */
3319 dev_priv
->rps
.efficient_freq
= dev_priv
->rps
.rp1_freq
;
3320 /* hw_max = RP0 until we check for overclocking */
3321 dev_priv
->rps
.max_freq
= dev_priv
->rps
.rp0_freq
;
3323 /* Preserve min/max settings in case of re-init */
3324 if (dev_priv
->rps
.max_freq_softlimit
== 0)
3325 dev_priv
->rps
.max_freq_softlimit
= dev_priv
->rps
.max_freq
;
3327 if (dev_priv
->rps
.min_freq_softlimit
== 0)
3328 dev_priv
->rps
.min_freq_softlimit
= dev_priv
->rps
.min_freq
;
3331 static void gen8_enable_rps(struct drm_device
*dev
)
3333 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3334 struct intel_ring_buffer
*ring
;
3335 uint32_t rc6_mask
= 0, rp_state_cap
;
3338 /* 1a: Software RC state - RC0 */
3339 I915_WRITE(GEN6_RC_STATE
, 0);
3341 /* 1c & 1d: Get forcewake during program sequence. Although the driver
3342 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
3343 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
3345 /* 2a: Disable RC states. */
3346 I915_WRITE(GEN6_RC_CONTROL
, 0);
3348 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
3349 parse_rp_state_cap(dev_priv
, rp_state_cap
);
3351 /* 2b: Program RC6 thresholds.*/
3352 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16);
3353 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000); /* 12500 * 1280ns */
3354 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25); /* 25 * 1280ns */
3355 for_each_ring(ring
, dev_priv
, unused
)
3356 I915_WRITE(RING_MAX_IDLE(ring
->mmio_base
), 10);
3357 I915_WRITE(GEN6_RC_SLEEP
, 0);
3358 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000); /* 50/125ms per EI */
3361 if (intel_enable_rc6(dev
) & INTEL_RC6_ENABLE
)
3362 rc6_mask
= GEN6_RC_CTL_RC6_ENABLE
;
3363 intel_print_rc6_info(dev
, rc6_mask
);
3364 I915_WRITE(GEN6_RC_CONTROL
, GEN6_RC_CTL_HW_ENABLE
|
3365 GEN6_RC_CTL_EI_MODE(1) |
3368 /* 4 Program defaults and thresholds for RPS*/
3369 I915_WRITE(GEN6_RPNSWREQ
,
3370 HSW_FREQUENCY(dev_priv
->rps
.rp1_freq
));
3371 I915_WRITE(GEN6_RC_VIDEO_FREQ
,
3372 HSW_FREQUENCY(dev_priv
->rps
.rp1_freq
));
3373 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
3374 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 100000000 / 128); /* 1 second timeout */
3376 /* Docs recommend 900MHz, and 300 MHz respectively */
3377 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS
,
3378 dev_priv
->rps
.max_freq_softlimit
<< 24 |
3379 dev_priv
->rps
.min_freq_softlimit
<< 16);
3381 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 7600000 / 128); /* 76ms busyness per EI, 90% */
3382 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 31300000 / 128); /* 313ms busyness per EI, 70%*/
3383 I915_WRITE(GEN6_RP_UP_EI
, 66000); /* 84.48ms, XXX: random? */
3384 I915_WRITE(GEN6_RP_DOWN_EI
, 350000); /* 448ms, XXX: random? */
3386 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
3389 I915_WRITE(GEN6_RP_CONTROL
,
3390 GEN6_RP_MEDIA_TURBO
|
3391 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
3392 GEN6_RP_MEDIA_IS_GFX
|
3394 GEN6_RP_UP_BUSY_AVG
|
3395 GEN6_RP_DOWN_IDLE_AVG
);
3397 /* 6: Ring frequency + overclocking (our driver does this later */
3399 gen6_set_rps(dev
, (I915_READ(GEN6_GT_PERF_STATUS
) & 0xff00) >> 8);
3401 gen6_enable_rps_interrupts(dev
);
3403 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
3406 static void gen6_enable_rps(struct drm_device
*dev
)
3408 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3409 struct intel_ring_buffer
*ring
;
3412 u32 rc6vids
, pcu_mbox
= 0, rc6_mask
= 0;
3417 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
3419 /* Here begins a magic sequence of register writes to enable
3420 * auto-downclocking.
3422 * Perhaps there might be some value in exposing these to
3425 I915_WRITE(GEN6_RC_STATE
, 0);
3427 /* Clear the DBG now so we don't confuse earlier errors */
3428 if ((gtfifodbg
= I915_READ(GTFIFODBG
))) {
3429 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg
);
3430 I915_WRITE(GTFIFODBG
, gtfifodbg
);
3433 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
3435 rp_state_cap
= I915_READ(GEN6_RP_STATE_CAP
);
3436 gt_perf_status
= I915_READ(GEN6_GT_PERF_STATUS
);
3438 parse_rp_state_cap(dev_priv
, rp_state_cap
);
3440 /* disable the counters and set deterministic thresholds */
3441 I915_WRITE(GEN6_RC_CONTROL
, 0);
3443 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT
, 1000 << 16);
3444 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 40 << 16 | 30);
3445 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT
, 30);
3446 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
3447 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
3449 for_each_ring(ring
, dev_priv
, i
)
3450 I915_WRITE(RING_MAX_IDLE(ring
->mmio_base
), 10);
3452 I915_WRITE(GEN6_RC_SLEEP
, 0);
3453 I915_WRITE(GEN6_RC1e_THRESHOLD
, 1000);
3454 if (IS_IVYBRIDGE(dev
))
3455 I915_WRITE(GEN6_RC6_THRESHOLD
, 125000);
3457 I915_WRITE(GEN6_RC6_THRESHOLD
, 50000);
3458 I915_WRITE(GEN6_RC6p_THRESHOLD
, 150000);
3459 I915_WRITE(GEN6_RC6pp_THRESHOLD
, 64000); /* unused */
3461 /* Check if we are enabling RC6 */
3462 rc6_mode
= intel_enable_rc6(dev_priv
->dev
);
3463 if (rc6_mode
& INTEL_RC6_ENABLE
)
3464 rc6_mask
|= GEN6_RC_CTL_RC6_ENABLE
;
3466 /* We don't use those on Haswell */
3467 if (!IS_HASWELL(dev
)) {
3468 if (rc6_mode
& INTEL_RC6p_ENABLE
)
3469 rc6_mask
|= GEN6_RC_CTL_RC6p_ENABLE
;
3471 if (rc6_mode
& INTEL_RC6pp_ENABLE
)
3472 rc6_mask
|= GEN6_RC_CTL_RC6pp_ENABLE
;
3475 intel_print_rc6_info(dev
, rc6_mask
);
3477 I915_WRITE(GEN6_RC_CONTROL
,
3479 GEN6_RC_CTL_EI_MODE(1) |
3480 GEN6_RC_CTL_HW_ENABLE
);
3482 /* Power down if completely idle for over 50ms */
3483 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
, 50000);
3484 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
3486 ret
= sandybridge_pcode_write(dev_priv
, GEN6_PCODE_WRITE_MIN_FREQ_TABLE
, 0);
3488 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3490 ret
= sandybridge_pcode_read(dev_priv
, GEN6_READ_OC_PARAMS
, &pcu_mbox
);
3491 if (!ret
&& (pcu_mbox
& (1<<31))) { /* OC supported */
3492 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
3493 (dev_priv
->rps
.max_freq_softlimit
& 0xff) * 50,
3494 (pcu_mbox
& 0xff) * 50);
3495 dev_priv
->rps
.max_freq
= pcu_mbox
& 0xff;
3498 dev_priv
->rps
.power
= HIGH_POWER
; /* force a reset */
3499 gen6_set_rps(dev_priv
->dev
, dev_priv
->rps
.min_freq_softlimit
);
3501 gen6_enable_rps_interrupts(dev
);
3504 ret
= sandybridge_pcode_read(dev_priv
, GEN6_PCODE_READ_RC6VIDS
, &rc6vids
);
3505 if (IS_GEN6(dev
) && ret
) {
3506 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
3507 } else if (IS_GEN6(dev
) && (GEN6_DECODE_RC6_VID(rc6vids
& 0xff) < 450)) {
3508 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
3509 GEN6_DECODE_RC6_VID(rc6vids
& 0xff), 450);
3510 rc6vids
&= 0xffff00;
3511 rc6vids
|= GEN6_ENCODE_RC6_VID(450);
3512 ret
= sandybridge_pcode_write(dev_priv
, GEN6_PCODE_WRITE_RC6VIDS
, rc6vids
);
3514 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
3517 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
3520 static void __gen6_update_ring_freq(struct drm_device
*dev
)
3522 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3524 unsigned int gpu_freq
;
3525 unsigned int max_ia_freq
, min_ring_freq
;
3526 int scaling_factor
= 180;
3527 struct cpufreq_policy
*policy
;
3529 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
3531 policy
= cpufreq_cpu_get(0);
3533 max_ia_freq
= policy
->cpuinfo
.max_freq
;
3534 cpufreq_cpu_put(policy
);
3537 * Default to measured freq if none found, PCU will ensure we
3540 max_ia_freq
= tsc_khz
;
3543 /* Convert from kHz to MHz */
3544 max_ia_freq
/= 1000;
3546 min_ring_freq
= I915_READ(DCLK
) & 0xf;
3547 /* convert DDR frequency from units of 266.6MHz to bandwidth */
3548 min_ring_freq
= mult_frac(min_ring_freq
, 8, 3);
3551 * For each potential GPU frequency, load a ring frequency we'd like
3552 * to use for memory access. We do this by specifying the IA frequency
3553 * the PCU should use as a reference to determine the ring frequency.
3555 for (gpu_freq
= dev_priv
->rps
.max_freq_softlimit
; gpu_freq
>= dev_priv
->rps
.min_freq_softlimit
;
3557 int diff
= dev_priv
->rps
.max_freq_softlimit
- gpu_freq
;
3558 unsigned int ia_freq
= 0, ring_freq
= 0;
3560 if (INTEL_INFO(dev
)->gen
>= 8) {
3561 /* max(2 * GT, DDR). NB: GT is 50MHz units */
3562 ring_freq
= max(min_ring_freq
, gpu_freq
);
3563 } else if (IS_HASWELL(dev
)) {
3564 ring_freq
= mult_frac(gpu_freq
, 5, 4);
3565 ring_freq
= max(min_ring_freq
, ring_freq
);
3566 /* leave ia_freq as the default, chosen by cpufreq */
3568 /* On older processors, there is no separate ring
3569 * clock domain, so in order to boost the bandwidth
3570 * of the ring, we need to upclock the CPU (ia_freq).
3572 * For GPU frequencies less than 750MHz,
3573 * just use the lowest ring freq.
3575 if (gpu_freq
< min_freq
)
3578 ia_freq
= max_ia_freq
- ((diff
* scaling_factor
) / 2);
3579 ia_freq
= DIV_ROUND_CLOSEST(ia_freq
, 100);
3582 sandybridge_pcode_write(dev_priv
,
3583 GEN6_PCODE_WRITE_MIN_FREQ_TABLE
,
3584 ia_freq
<< GEN6_PCODE_FREQ_IA_RATIO_SHIFT
|
3585 ring_freq
<< GEN6_PCODE_FREQ_RING_RATIO_SHIFT
|
3590 void gen6_update_ring_freq(struct drm_device
*dev
)
3592 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3594 if (INTEL_INFO(dev
)->gen
< 6 || IS_VALLEYVIEW(dev
))
3597 mutex_lock(&dev_priv
->rps
.hw_lock
);
3598 __gen6_update_ring_freq(dev
);
3599 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3602 int valleyview_rps_max_freq(struct drm_i915_private
*dev_priv
)
3606 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FREQ_FUSE
);
3608 rp0
= (val
& FB_GFX_MAX_FREQ_FUSE_MASK
) >> FB_GFX_MAX_FREQ_FUSE_SHIFT
;
3610 rp0
= min_t(u32
, rp0
, 0xea);
3615 static int valleyview_rps_rpe_freq(struct drm_i915_private
*dev_priv
)
3619 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FMAX_FUSE_LO
);
3620 rpe
= (val
& FB_FMAX_VMIN_FREQ_LO_MASK
) >> FB_FMAX_VMIN_FREQ_LO_SHIFT
;
3621 val
= vlv_nc_read(dev_priv
, IOSF_NC_FB_GFX_FMAX_FUSE_HI
);
3622 rpe
|= (val
& FB_FMAX_VMIN_FREQ_HI_MASK
) << 5;
3627 int valleyview_rps_min_freq(struct drm_i915_private
*dev_priv
)
3629 return vlv_punit_read(dev_priv
, PUNIT_REG_GPU_LFM
) & 0xff;
3632 /* Check that the pctx buffer wasn't move under us. */
3633 static void valleyview_check_pctx(struct drm_i915_private
*dev_priv
)
3635 unsigned long pctx_addr
= I915_READ(VLV_PCBR
) & ~4095;
3637 WARN_ON(pctx_addr
!= dev_priv
->mm
.stolen_base
+
3638 dev_priv
->vlv_pctx
->stolen
->start
);
3641 static void valleyview_setup_pctx(struct drm_device
*dev
)
3643 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3644 struct drm_i915_gem_object
*pctx
;
3645 unsigned long pctx_paddr
;
3647 int pctx_size
= 24*1024;
3649 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
3651 pcbr
= I915_READ(VLV_PCBR
);
3653 /* BIOS set it up already, grab the pre-alloc'd space */
3656 pcbr_offset
= (pcbr
& (~4095)) - dev_priv
->mm
.stolen_base
;
3657 pctx
= i915_gem_object_create_stolen_for_preallocated(dev_priv
->dev
,
3659 I915_GTT_OFFSET_NONE
,
3665 * From the Gunit register HAS:
3666 * The Gfx driver is expected to program this register and ensure
3667 * proper allocation within Gfx stolen memory. For example, this
3668 * register should be programmed such than the PCBR range does not
3669 * overlap with other ranges, such as the frame buffer, protected
3670 * memory, or any other relevant ranges.
3672 pctx
= i915_gem_object_create_stolen(dev
, pctx_size
);
3674 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
3678 pctx_paddr
= dev_priv
->mm
.stolen_base
+ pctx
->stolen
->start
;
3679 I915_WRITE(VLV_PCBR
, pctx_paddr
);
3682 dev_priv
->vlv_pctx
= pctx
;
3685 static void valleyview_cleanup_pctx(struct drm_device
*dev
)
3687 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3689 if (WARN_ON(!dev_priv
->vlv_pctx
))
3692 drm_gem_object_unreference(&dev_priv
->vlv_pctx
->base
);
3693 dev_priv
->vlv_pctx
= NULL
;
3696 static void valleyview_init_gt_powersave(struct drm_device
*dev
)
3698 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3700 valleyview_setup_pctx(dev
);
3702 mutex_lock(&dev_priv
->rps
.hw_lock
);
3704 dev_priv
->rps
.max_freq
= valleyview_rps_max_freq(dev_priv
);
3705 dev_priv
->rps
.rp0_freq
= dev_priv
->rps
.max_freq
;
3706 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3707 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.max_freq
),
3708 dev_priv
->rps
.max_freq
);
3710 dev_priv
->rps
.efficient_freq
= valleyview_rps_rpe_freq(dev_priv
);
3711 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3712 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
3713 dev_priv
->rps
.efficient_freq
);
3715 dev_priv
->rps
.min_freq
= valleyview_rps_min_freq(dev_priv
);
3716 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3717 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.min_freq
),
3718 dev_priv
->rps
.min_freq
);
3720 /* Preserve min/max settings in case of re-init */
3721 if (dev_priv
->rps
.max_freq_softlimit
== 0)
3722 dev_priv
->rps
.max_freq_softlimit
= dev_priv
->rps
.max_freq
;
3724 if (dev_priv
->rps
.min_freq_softlimit
== 0)
3725 dev_priv
->rps
.min_freq_softlimit
= dev_priv
->rps
.min_freq
;
3727 mutex_unlock(&dev_priv
->rps
.hw_lock
);
3730 static void valleyview_cleanup_gt_powersave(struct drm_device
*dev
)
3732 valleyview_cleanup_pctx(dev
);
3735 static void valleyview_enable_rps(struct drm_device
*dev
)
3737 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3738 struct intel_ring_buffer
*ring
;
3739 u32 gtfifodbg
, val
, rc6_mode
= 0;
3742 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
3744 valleyview_check_pctx(dev_priv
);
3746 if ((gtfifodbg
= I915_READ(GTFIFODBG
))) {
3747 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
3749 I915_WRITE(GTFIFODBG
, gtfifodbg
);
3752 /* If VLV, Forcewake all wells, else re-direct to regular path */
3753 gen6_gt_force_wake_get(dev_priv
, FORCEWAKE_ALL
);
3755 I915_WRITE(GEN6_RP_UP_THRESHOLD
, 59400);
3756 I915_WRITE(GEN6_RP_DOWN_THRESHOLD
, 245000);
3757 I915_WRITE(GEN6_RP_UP_EI
, 66000);
3758 I915_WRITE(GEN6_RP_DOWN_EI
, 350000);
3760 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS
, 10);
3762 I915_WRITE(GEN6_RP_CONTROL
,
3763 GEN6_RP_MEDIA_TURBO
|
3764 GEN6_RP_MEDIA_HW_NORMAL_MODE
|
3765 GEN6_RP_MEDIA_IS_GFX
|
3767 GEN6_RP_UP_BUSY_AVG
|
3768 GEN6_RP_DOWN_IDLE_CONT
);
3770 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT
, 0x00280000);
3771 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL
, 125000);
3772 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS
, 25);
3774 for_each_ring(ring
, dev_priv
, i
)
3775 I915_WRITE(RING_MAX_IDLE(ring
->mmio_base
), 10);
3777 I915_WRITE(GEN6_RC6_THRESHOLD
, 0x557);
3779 /* allows RC6 residency counter to work */
3780 I915_WRITE(VLV_COUNTER_CONTROL
,
3781 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH
|
3782 VLV_MEDIA_RC6_COUNT_EN
|
3783 VLV_RENDER_RC6_COUNT_EN
));
3784 if (intel_enable_rc6(dev
) & INTEL_RC6_ENABLE
)
3785 rc6_mode
= GEN7_RC_CTL_TO_MODE
| VLV_RC_CTL_CTX_RST_PARALLEL
;
3787 intel_print_rc6_info(dev
, rc6_mode
);
3789 I915_WRITE(GEN6_RC_CONTROL
, rc6_mode
);
3791 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
3793 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val
& 0x10 ? "yes" : "no");
3794 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val
);
3796 dev_priv
->rps
.cur_freq
= (val
>> 8) & 0xff;
3797 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
3798 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.cur_freq
),
3799 dev_priv
->rps
.cur_freq
);
3801 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
3802 vlv_gpu_freq(dev_priv
, dev_priv
->rps
.efficient_freq
),
3803 dev_priv
->rps
.efficient_freq
);
3805 valleyview_set_rps(dev_priv
->dev
, dev_priv
->rps
.efficient_freq
);
3807 gen6_enable_rps_interrupts(dev
);
3809 gen6_gt_force_wake_put(dev_priv
, FORCEWAKE_ALL
);
3812 void ironlake_teardown_rc6(struct drm_device
*dev
)
3814 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3816 if (dev_priv
->ips
.renderctx
) {
3817 i915_gem_object_ggtt_unpin(dev_priv
->ips
.renderctx
);
3818 drm_gem_object_unreference(&dev_priv
->ips
.renderctx
->base
);
3819 dev_priv
->ips
.renderctx
= NULL
;
3822 if (dev_priv
->ips
.pwrctx
) {
3823 i915_gem_object_ggtt_unpin(dev_priv
->ips
.pwrctx
);
3824 drm_gem_object_unreference(&dev_priv
->ips
.pwrctx
->base
);
3825 dev_priv
->ips
.pwrctx
= NULL
;
3829 static void ironlake_disable_rc6(struct drm_device
*dev
)
3831 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3833 if (I915_READ(PWRCTXA
)) {
3834 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
3835 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) | RCX_SW_EXIT
);
3836 wait_for(((I915_READ(RSTDBYCTL
) & RSX_STATUS_MASK
) == RSX_STATUS_ON
),
3839 I915_WRITE(PWRCTXA
, 0);
3840 POSTING_READ(PWRCTXA
);
3842 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) & ~RCX_SW_EXIT
);
3843 POSTING_READ(RSTDBYCTL
);
3847 static int ironlake_setup_rc6(struct drm_device
*dev
)
3849 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3851 if (dev_priv
->ips
.renderctx
== NULL
)
3852 dev_priv
->ips
.renderctx
= intel_alloc_context_page(dev
);
3853 if (!dev_priv
->ips
.renderctx
)
3856 if (dev_priv
->ips
.pwrctx
== NULL
)
3857 dev_priv
->ips
.pwrctx
= intel_alloc_context_page(dev
);
3858 if (!dev_priv
->ips
.pwrctx
) {
3859 ironlake_teardown_rc6(dev
);
3866 static void ironlake_enable_rc6(struct drm_device
*dev
)
3868 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
3869 struct intel_ring_buffer
*ring
= &dev_priv
->ring
[RCS
];
3870 bool was_interruptible
;
3873 /* rc6 disabled by default due to repeated reports of hanging during
3876 if (!intel_enable_rc6(dev
))
3879 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
3881 ret
= ironlake_setup_rc6(dev
);
3885 was_interruptible
= dev_priv
->mm
.interruptible
;
3886 dev_priv
->mm
.interruptible
= false;
3889 * GPU can automatically power down the render unit if given a page
3892 ret
= intel_ring_begin(ring
, 6);
3894 ironlake_teardown_rc6(dev
);
3895 dev_priv
->mm
.interruptible
= was_interruptible
;
3899 intel_ring_emit(ring
, MI_SUSPEND_FLUSH
| MI_SUSPEND_FLUSH_EN
);
3900 intel_ring_emit(ring
, MI_SET_CONTEXT
);
3901 intel_ring_emit(ring
, i915_gem_obj_ggtt_offset(dev_priv
->ips
.renderctx
) |
3903 MI_SAVE_EXT_STATE_EN
|
3904 MI_RESTORE_EXT_STATE_EN
|
3905 MI_RESTORE_INHIBIT
);
3906 intel_ring_emit(ring
, MI_SUSPEND_FLUSH
);
3907 intel_ring_emit(ring
, MI_NOOP
);
3908 intel_ring_emit(ring
, MI_FLUSH
);
3909 intel_ring_advance(ring
);
3912 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
3913 * does an implicit flush, combined with MI_FLUSH above, it should be
3914 * safe to assume that renderctx is valid
3916 ret
= intel_ring_idle(ring
);
3917 dev_priv
->mm
.interruptible
= was_interruptible
;
3919 DRM_ERROR("failed to enable ironlake power savings\n");
3920 ironlake_teardown_rc6(dev
);
3924 I915_WRITE(PWRCTXA
, i915_gem_obj_ggtt_offset(dev_priv
->ips
.pwrctx
) | PWRCTX_EN
);
3925 I915_WRITE(RSTDBYCTL
, I915_READ(RSTDBYCTL
) & ~RCX_SW_EXIT
);
3927 intel_print_rc6_info(dev
, GEN6_RC_CTL_RC6_ENABLE
);
3930 static unsigned long intel_pxfreq(u32 vidfreq
)
3933 int div
= (vidfreq
& 0x3f0000) >> 16;
3934 int post
= (vidfreq
& 0x3000) >> 12;
3935 int pre
= (vidfreq
& 0x7);
3940 freq
= ((div
* 133333) / ((1<<post
) * pre
));
3945 static const struct cparams
{
3951 { 1, 1333, 301, 28664 },
3952 { 1, 1066, 294, 24460 },
3953 { 1, 800, 294, 25192 },
3954 { 0, 1333, 276, 27605 },
3955 { 0, 1066, 276, 27605 },
3956 { 0, 800, 231, 23784 },
3959 static unsigned long __i915_chipset_val(struct drm_i915_private
*dev_priv
)
3961 u64 total_count
, diff
, ret
;
3962 u32 count1
, count2
, count3
, m
= 0, c
= 0;
3963 unsigned long now
= jiffies_to_msecs(jiffies
), diff1
;
3966 assert_spin_locked(&mchdev_lock
);
3968 diff1
= now
- dev_priv
->ips
.last_time1
;
3970 /* Prevent division-by-zero if we are asking too fast.
3971 * Also, we don't get interesting results if we are polling
3972 * faster than once in 10ms, so just return the saved value
3976 return dev_priv
->ips
.chipset_power
;
3978 count1
= I915_READ(DMIEC
);
3979 count2
= I915_READ(DDREC
);
3980 count3
= I915_READ(CSIEC
);
3982 total_count
= count1
+ count2
+ count3
;
3984 /* FIXME: handle per-counter overflow */
3985 if (total_count
< dev_priv
->ips
.last_count1
) {
3986 diff
= ~0UL - dev_priv
->ips
.last_count1
;
3987 diff
+= total_count
;
3989 diff
= total_count
- dev_priv
->ips
.last_count1
;
3992 for (i
= 0; i
< ARRAY_SIZE(cparams
); i
++) {
3993 if (cparams
[i
].i
== dev_priv
->ips
.c_m
&&
3994 cparams
[i
].t
== dev_priv
->ips
.r_t
) {
4001 diff
= div_u64(diff
, diff1
);
4002 ret
= ((m
* diff
) + c
);
4003 ret
= div_u64(ret
, 10);
4005 dev_priv
->ips
.last_count1
= total_count
;
4006 dev_priv
->ips
.last_time1
= now
;
4008 dev_priv
->ips
.chipset_power
= ret
;
4013 unsigned long i915_chipset_val(struct drm_i915_private
*dev_priv
)
4015 struct drm_device
*dev
= dev_priv
->dev
;
4018 if (INTEL_INFO(dev
)->gen
!= 5)
4021 spin_lock_irq(&mchdev_lock
);
4023 val
= __i915_chipset_val(dev_priv
);
4025 spin_unlock_irq(&mchdev_lock
);
4030 unsigned long i915_mch_val(struct drm_i915_private
*dev_priv
)
4032 unsigned long m
, x
, b
;
4035 tsfs
= I915_READ(TSFS
);
4037 m
= ((tsfs
& TSFS_SLOPE_MASK
) >> TSFS_SLOPE_SHIFT
);
4038 x
= I915_READ8(TR1
);
4040 b
= tsfs
& TSFS_INTR_MASK
;
4042 return ((m
* x
) / 127) - b
;
4045 static u16
pvid_to_extvid(struct drm_i915_private
*dev_priv
, u8 pxvid
)
4047 struct drm_device
*dev
= dev_priv
->dev
;
4048 static const struct v_table
{
4049 u16 vd
; /* in .1 mil */
4050 u16 vm
; /* in .1 mil */
4181 if (INTEL_INFO(dev
)->is_mobile
)
4182 return v_table
[pxvid
].vm
;
4184 return v_table
[pxvid
].vd
;
4187 static void __i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
4189 struct timespec now
, diff1
;
4191 unsigned long diffms
;
4194 assert_spin_locked(&mchdev_lock
);
4196 getrawmonotonic(&now
);
4197 diff1
= timespec_sub(now
, dev_priv
->ips
.last_time2
);
4199 /* Don't divide by 0 */
4200 diffms
= diff1
.tv_sec
* 1000 + diff1
.tv_nsec
/ 1000000;
4204 count
= I915_READ(GFXEC
);
4206 if (count
< dev_priv
->ips
.last_count2
) {
4207 diff
= ~0UL - dev_priv
->ips
.last_count2
;
4210 diff
= count
- dev_priv
->ips
.last_count2
;
4213 dev_priv
->ips
.last_count2
= count
;
4214 dev_priv
->ips
.last_time2
= now
;
4216 /* More magic constants... */
4218 diff
= div_u64(diff
, diffms
* 10);
4219 dev_priv
->ips
.gfx_power
= diff
;
4222 void i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
4224 struct drm_device
*dev
= dev_priv
->dev
;
4226 if (INTEL_INFO(dev
)->gen
!= 5)
4229 spin_lock_irq(&mchdev_lock
);
4231 __i915_update_gfx_val(dev_priv
);
4233 spin_unlock_irq(&mchdev_lock
);
4236 static unsigned long __i915_gfx_val(struct drm_i915_private
*dev_priv
)
4238 unsigned long t
, corr
, state1
, corr2
, state2
;
4241 assert_spin_locked(&mchdev_lock
);
4243 pxvid
= I915_READ(PXVFREQ_BASE
+ (dev_priv
->rps
.cur_freq
* 4));
4244 pxvid
= (pxvid
>> 24) & 0x7f;
4245 ext_v
= pvid_to_extvid(dev_priv
, pxvid
);
4249 t
= i915_mch_val(dev_priv
);
4251 /* Revel in the empirically derived constants */
4253 /* Correction factor in 1/100000 units */
4255 corr
= ((t
* 2349) + 135940);
4257 corr
= ((t
* 964) + 29317);
4259 corr
= ((t
* 301) + 1004);
4261 corr
= corr
* ((150142 * state1
) / 10000 - 78642);
4263 corr2
= (corr
* dev_priv
->ips
.corr
);
4265 state2
= (corr2
* state1
) / 10000;
4266 state2
/= 100; /* convert to mW */
4268 __i915_update_gfx_val(dev_priv
);
4270 return dev_priv
->ips
.gfx_power
+ state2
;
4273 unsigned long i915_gfx_val(struct drm_i915_private
*dev_priv
)
4275 struct drm_device
*dev
= dev_priv
->dev
;
4278 if (INTEL_INFO(dev
)->gen
!= 5)
4281 spin_lock_irq(&mchdev_lock
);
4283 val
= __i915_gfx_val(dev_priv
);
4285 spin_unlock_irq(&mchdev_lock
);
4291 * i915_read_mch_val - return value for IPS use
4293 * Calculate and return a value for the IPS driver to use when deciding whether
4294 * we have thermal and power headroom to increase CPU or GPU power budget.
4296 unsigned long i915_read_mch_val(void)
4298 struct drm_i915_private
*dev_priv
;
4299 unsigned long chipset_val
, graphics_val
, ret
= 0;
4301 spin_lock_irq(&mchdev_lock
);
4304 dev_priv
= i915_mch_dev
;
4306 chipset_val
= __i915_chipset_val(dev_priv
);
4307 graphics_val
= __i915_gfx_val(dev_priv
);
4309 ret
= chipset_val
+ graphics_val
;
4312 spin_unlock_irq(&mchdev_lock
);
4316 EXPORT_SYMBOL_GPL(i915_read_mch_val
);
4319 * i915_gpu_raise - raise GPU frequency limit
4321 * Raise the limit; IPS indicates we have thermal headroom.
4323 bool i915_gpu_raise(void)
4325 struct drm_i915_private
*dev_priv
;
4328 spin_lock_irq(&mchdev_lock
);
4329 if (!i915_mch_dev
) {
4333 dev_priv
= i915_mch_dev
;
4335 if (dev_priv
->ips
.max_delay
> dev_priv
->ips
.fmax
)
4336 dev_priv
->ips
.max_delay
--;
4339 spin_unlock_irq(&mchdev_lock
);
4343 EXPORT_SYMBOL_GPL(i915_gpu_raise
);
4346 * i915_gpu_lower - lower GPU frequency limit
4348 * IPS indicates we're close to a thermal limit, so throttle back the GPU
4349 * frequency maximum.
4351 bool i915_gpu_lower(void)
4353 struct drm_i915_private
*dev_priv
;
4356 spin_lock_irq(&mchdev_lock
);
4357 if (!i915_mch_dev
) {
4361 dev_priv
= i915_mch_dev
;
4363 if (dev_priv
->ips
.max_delay
< dev_priv
->ips
.min_delay
)
4364 dev_priv
->ips
.max_delay
++;
4367 spin_unlock_irq(&mchdev_lock
);
4371 EXPORT_SYMBOL_GPL(i915_gpu_lower
);
4374 * i915_gpu_busy - indicate GPU business to IPS
4376 * Tell the IPS driver whether or not the GPU is busy.
4378 bool i915_gpu_busy(void)
4380 struct drm_i915_private
*dev_priv
;
4381 struct intel_ring_buffer
*ring
;
4385 spin_lock_irq(&mchdev_lock
);
4388 dev_priv
= i915_mch_dev
;
4390 for_each_ring(ring
, dev_priv
, i
)
4391 ret
|= !list_empty(&ring
->request_list
);
4394 spin_unlock_irq(&mchdev_lock
);
4398 EXPORT_SYMBOL_GPL(i915_gpu_busy
);
4401 * i915_gpu_turbo_disable - disable graphics turbo
4403 * Disable graphics turbo by resetting the max frequency and setting the
4404 * current frequency to the default.
4406 bool i915_gpu_turbo_disable(void)
4408 struct drm_i915_private
*dev_priv
;
4411 spin_lock_irq(&mchdev_lock
);
4412 if (!i915_mch_dev
) {
4416 dev_priv
= i915_mch_dev
;
4418 dev_priv
->ips
.max_delay
= dev_priv
->ips
.fstart
;
4420 if (!ironlake_set_drps(dev_priv
->dev
, dev_priv
->ips
.fstart
))
4424 spin_unlock_irq(&mchdev_lock
);
4428 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable
);
4431 * Tells the intel_ips driver that the i915 driver is now loaded, if
4432 * IPS got loaded first.
4434 * This awkward dance is so that neither module has to depend on the
4435 * other in order for IPS to do the appropriate communication of
4436 * GPU turbo limits to i915.
4439 ips_ping_for_i915_load(void)
4443 link
= symbol_get(ips_link_to_i915_driver
);
4446 symbol_put(ips_link_to_i915_driver
);
4450 void intel_gpu_ips_init(struct drm_i915_private
*dev_priv
)
4452 /* We only register the i915 ips part with intel-ips once everything is
4453 * set up, to avoid intel-ips sneaking in and reading bogus values. */
4454 spin_lock_irq(&mchdev_lock
);
4455 i915_mch_dev
= dev_priv
;
4456 spin_unlock_irq(&mchdev_lock
);
4458 ips_ping_for_i915_load();
4461 void intel_gpu_ips_teardown(void)
4463 spin_lock_irq(&mchdev_lock
);
4464 i915_mch_dev
= NULL
;
4465 spin_unlock_irq(&mchdev_lock
);
4468 static void intel_init_emon(struct drm_device
*dev
)
4470 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4475 /* Disable to program */
4479 /* Program energy weights for various events */
4480 I915_WRITE(SDEW
, 0x15040d00);
4481 I915_WRITE(CSIEW0
, 0x007f0000);
4482 I915_WRITE(CSIEW1
, 0x1e220004);
4483 I915_WRITE(CSIEW2
, 0x04000004);
4485 for (i
= 0; i
< 5; i
++)
4486 I915_WRITE(PEW
+ (i
* 4), 0);
4487 for (i
= 0; i
< 3; i
++)
4488 I915_WRITE(DEW
+ (i
* 4), 0);
4490 /* Program P-state weights to account for frequency power adjustment */
4491 for (i
= 0; i
< 16; i
++) {
4492 u32 pxvidfreq
= I915_READ(PXVFREQ_BASE
+ (i
* 4));
4493 unsigned long freq
= intel_pxfreq(pxvidfreq
);
4494 unsigned long vid
= (pxvidfreq
& PXVFREQ_PX_MASK
) >>
4499 val
*= (freq
/ 1000);
4501 val
/= (127*127*900);
4503 DRM_ERROR("bad pxval: %ld\n", val
);
4506 /* Render standby states get 0 weight */
4510 for (i
= 0; i
< 4; i
++) {
4511 u32 val
= (pxw
[i
*4] << 24) | (pxw
[(i
*4)+1] << 16) |
4512 (pxw
[(i
*4)+2] << 8) | (pxw
[(i
*4)+3]);
4513 I915_WRITE(PXW
+ (i
* 4), val
);
4516 /* Adjust magic regs to magic values (more experimental results) */
4517 I915_WRITE(OGW0
, 0);
4518 I915_WRITE(OGW1
, 0);
4519 I915_WRITE(EG0
, 0x00007f00);
4520 I915_WRITE(EG1
, 0x0000000e);
4521 I915_WRITE(EG2
, 0x000e0000);
4522 I915_WRITE(EG3
, 0x68000300);
4523 I915_WRITE(EG4
, 0x42000000);
4524 I915_WRITE(EG5
, 0x00140031);
4528 for (i
= 0; i
< 8; i
++)
4529 I915_WRITE(PXWL
+ (i
* 4), 0);
4531 /* Enable PMON + select events */
4532 I915_WRITE(ECR
, 0x80000019);
4534 lcfuse
= I915_READ(LCFUSE02
);
4536 dev_priv
->ips
.corr
= (lcfuse
& LCFUSE_HIV_MASK
);
4539 void intel_init_gt_powersave(struct drm_device
*dev
)
4541 i915
.enable_rc6
= sanitize_rc6_option(dev
, i915
.enable_rc6
);
4543 if (IS_VALLEYVIEW(dev
))
4544 valleyview_init_gt_powersave(dev
);
4547 void intel_cleanup_gt_powersave(struct drm_device
*dev
)
4549 if (IS_VALLEYVIEW(dev
))
4550 valleyview_cleanup_gt_powersave(dev
);
4553 void intel_disable_gt_powersave(struct drm_device
*dev
)
4555 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4557 /* Interrupts should be disabled already to avoid re-arming. */
4558 WARN_ON(dev
->irq_enabled
);
4560 if (IS_IRONLAKE_M(dev
)) {
4561 ironlake_disable_drps(dev
);
4562 ironlake_disable_rc6(dev
);
4563 } else if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
4564 cancel_delayed_work_sync(&dev_priv
->rps
.delayed_resume_work
);
4565 cancel_work_sync(&dev_priv
->rps
.work
);
4566 mutex_lock(&dev_priv
->rps
.hw_lock
);
4567 if (IS_VALLEYVIEW(dev
))
4568 valleyview_disable_rps(dev
);
4570 gen6_disable_rps(dev
);
4571 dev_priv
->rps
.enabled
= false;
4572 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4576 static void intel_gen6_powersave_work(struct work_struct
*work
)
4578 struct drm_i915_private
*dev_priv
=
4579 container_of(work
, struct drm_i915_private
,
4580 rps
.delayed_resume_work
.work
);
4581 struct drm_device
*dev
= dev_priv
->dev
;
4583 mutex_lock(&dev_priv
->rps
.hw_lock
);
4585 if (IS_VALLEYVIEW(dev
)) {
4586 valleyview_enable_rps(dev
);
4587 } else if (IS_BROADWELL(dev
)) {
4588 gen8_enable_rps(dev
);
4589 __gen6_update_ring_freq(dev
);
4591 gen6_enable_rps(dev
);
4592 __gen6_update_ring_freq(dev
);
4594 dev_priv
->rps
.enabled
= true;
4595 mutex_unlock(&dev_priv
->rps
.hw_lock
);
4597 intel_runtime_pm_put(dev_priv
);
4600 void intel_enable_gt_powersave(struct drm_device
*dev
)
4602 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4604 if (IS_IRONLAKE_M(dev
)) {
4605 mutex_lock(&dev
->struct_mutex
);
4606 ironlake_enable_drps(dev
);
4607 ironlake_enable_rc6(dev
);
4608 intel_init_emon(dev
);
4609 mutex_unlock(&dev
->struct_mutex
);
4610 } else if (IS_GEN6(dev
) || IS_GEN7(dev
)) {
4612 * PCU communication is slow and this doesn't need to be
4613 * done at any specific time, so do this out of our fast path
4614 * to make resume and init faster.
4616 * We depend on the HW RC6 power context save/restore
4617 * mechanism when entering D3 through runtime PM suspend. So
4618 * disable RPM until RPS/RC6 is properly setup. We can only
4619 * get here via the driver load/system resume/runtime resume
4620 * paths, so the _noresume version is enough (and in case of
4621 * runtime resume it's necessary).
4623 if (schedule_delayed_work(&dev_priv
->rps
.delayed_resume_work
,
4624 round_jiffies_up_relative(HZ
)))
4625 intel_runtime_pm_get_noresume(dev_priv
);
4629 void intel_reset_gt_powersave(struct drm_device
*dev
)
4631 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4633 dev_priv
->rps
.enabled
= false;
4634 intel_enable_gt_powersave(dev
);
4637 static void ibx_init_clock_gating(struct drm_device
*dev
)
4639 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4642 * On Ibex Peak and Cougar Point, we need to disable clock
4643 * gating for the panel power sequencer or it will fail to
4644 * start up when no ports are active.
4646 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
);
4649 static void g4x_disable_trickle_feed(struct drm_device
*dev
)
4651 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4654 for_each_pipe(pipe
) {
4655 I915_WRITE(DSPCNTR(pipe
),
4656 I915_READ(DSPCNTR(pipe
)) |
4657 DISPPLANE_TRICKLE_FEED_DISABLE
);
4658 intel_flush_primary_plane(dev_priv
, pipe
);
4662 static void ilk_init_lp_watermarks(struct drm_device
*dev
)
4664 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4666 I915_WRITE(WM3_LP_ILK
, I915_READ(WM3_LP_ILK
) & ~WM1_LP_SR_EN
);
4667 I915_WRITE(WM2_LP_ILK
, I915_READ(WM2_LP_ILK
) & ~WM1_LP_SR_EN
);
4668 I915_WRITE(WM1_LP_ILK
, I915_READ(WM1_LP_ILK
) & ~WM1_LP_SR_EN
);
4671 * Don't touch WM1S_LP_EN here.
4672 * Doing so could cause underruns.
4676 static void ironlake_init_clock_gating(struct drm_device
*dev
)
4678 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4679 uint32_t dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
4683 * WaFbcDisableDpfcClockGating:ilk
4685 dspclk_gate
|= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE
|
4686 ILK_DPFCUNIT_CLOCK_GATE_DISABLE
|
4687 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
;
4689 I915_WRITE(PCH_3DCGDIS0
,
4690 MARIUNIT_CLOCK_GATE_DISABLE
|
4691 SVSMUNIT_CLOCK_GATE_DISABLE
);
4692 I915_WRITE(PCH_3DCGDIS1
,
4693 VFMUNIT_CLOCK_GATE_DISABLE
);
4696 * According to the spec the following bits should be set in
4697 * order to enable memory self-refresh
4698 * The bit 22/21 of 0x42004
4699 * The bit 5 of 0x42020
4700 * The bit 15 of 0x45000
4702 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
4703 (I915_READ(ILK_DISPLAY_CHICKEN2
) |
4704 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
));
4705 dspclk_gate
|= ILK_DPARBUNIT_CLOCK_GATE_ENABLE
;
4706 I915_WRITE(DISP_ARB_CTL
,
4707 (I915_READ(DISP_ARB_CTL
) |
4710 ilk_init_lp_watermarks(dev
);
4713 * Based on the document from hardware guys the following bits
4714 * should be set unconditionally in order to enable FBC.
4715 * The bit 22 of 0x42000
4716 * The bit 22 of 0x42004
4717 * The bit 7,8,9 of 0x42020.
4719 if (IS_IRONLAKE_M(dev
)) {
4720 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
4721 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
4722 I915_READ(ILK_DISPLAY_CHICKEN1
) |
4724 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
4725 I915_READ(ILK_DISPLAY_CHICKEN2
) |
4729 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
4731 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
4732 I915_READ(ILK_DISPLAY_CHICKEN2
) |
4733 ILK_ELPIN_409_SELECT
);
4734 I915_WRITE(_3D_CHICKEN2
,
4735 _3D_CHICKEN2_WM_READ_PIPELINED
<< 16 |
4736 _3D_CHICKEN2_WM_READ_PIPELINED
);
4738 /* WaDisableRenderCachePipelinedFlush:ilk */
4739 I915_WRITE(CACHE_MODE_0
,
4740 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
4742 /* WaDisable_RenderCache_OperationalFlush:ilk */
4743 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
4745 g4x_disable_trickle_feed(dev
);
4747 ibx_init_clock_gating(dev
);
4750 static void cpt_init_clock_gating(struct drm_device
*dev
)
4752 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4757 * On Ibex Peak and Cougar Point, we need to disable clock
4758 * gating for the panel power sequencer or it will fail to
4759 * start up when no ports are active.
4761 I915_WRITE(SOUTH_DSPCLK_GATE_D
, PCH_DPLSUNIT_CLOCK_GATE_DISABLE
|
4762 PCH_DPLUNIT_CLOCK_GATE_DISABLE
|
4763 PCH_CPUNIT_CLOCK_GATE_DISABLE
);
4764 I915_WRITE(SOUTH_CHICKEN2
, I915_READ(SOUTH_CHICKEN2
) |
4765 DPLS_EDP_PPS_FIX_DIS
);
4766 /* The below fixes the weird display corruption, a few pixels shifted
4767 * downward, on (only) LVDS of some HP laptops with IVY.
4769 for_each_pipe(pipe
) {
4770 val
= I915_READ(TRANS_CHICKEN2(pipe
));
4771 val
|= TRANS_CHICKEN2_TIMING_OVERRIDE
;
4772 val
&= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
4773 if (dev_priv
->vbt
.fdi_rx_polarity_inverted
)
4774 val
|= TRANS_CHICKEN2_FDI_POLARITY_REVERSED
;
4775 val
&= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK
;
4776 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER
;
4777 val
&= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH
;
4778 I915_WRITE(TRANS_CHICKEN2(pipe
), val
);
4780 /* WADP0ClockGatingDisable */
4781 for_each_pipe(pipe
) {
4782 I915_WRITE(TRANS_CHICKEN1(pipe
),
4783 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
4787 static void gen6_check_mch_setup(struct drm_device
*dev
)
4789 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4792 tmp
= I915_READ(MCH_SSKPD
);
4793 if ((tmp
& MCH_SSKPD_WM0_MASK
) != MCH_SSKPD_WM0_VAL
) {
4794 DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp
);
4795 DRM_INFO("This can cause pipe underruns and display issues.\n");
4796 DRM_INFO("Please upgrade your BIOS to fix this.\n");
4800 static void gen6_init_clock_gating(struct drm_device
*dev
)
4802 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4803 uint32_t dspclk_gate
= ILK_VRHUNIT_CLOCK_GATE_DISABLE
;
4805 I915_WRITE(ILK_DSPCLK_GATE_D
, dspclk_gate
);
4807 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
4808 I915_READ(ILK_DISPLAY_CHICKEN2
) |
4809 ILK_ELPIN_409_SELECT
);
4811 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
4812 I915_WRITE(_3D_CHICKEN
,
4813 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB
));
4815 /* WaSetupGtModeTdRowDispatch:snb */
4816 if (IS_SNB_GT1(dev
))
4817 I915_WRITE(GEN6_GT_MODE
,
4818 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE
));
4820 /* WaDisable_RenderCache_OperationalFlush:snb */
4821 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
4824 * BSpec recoomends 8x4 when MSAA is used,
4825 * however in practice 16x4 seems fastest.
4827 * Note that PS/WM thread counts depend on the WIZ hashing
4828 * disable bit, which we don't touch here, but it's good
4829 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
4831 I915_WRITE(GEN6_GT_MODE
,
4832 GEN6_WIZ_HASHING_MASK
| GEN6_WIZ_HASHING_16x4
);
4834 ilk_init_lp_watermarks(dev
);
4836 I915_WRITE(CACHE_MODE_0
,
4837 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB
));
4839 I915_WRITE(GEN6_UCGCTL1
,
4840 I915_READ(GEN6_UCGCTL1
) |
4841 GEN6_BLBUNIT_CLOCK_GATE_DISABLE
|
4842 GEN6_CSUNIT_CLOCK_GATE_DISABLE
);
4844 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
4845 * gating disable must be set. Failure to set it results in
4846 * flickering pixels due to Z write ordering failures after
4847 * some amount of runtime in the Mesa "fire" demo, and Unigine
4848 * Sanctuary and Tropics, and apparently anything else with
4849 * alpha test or pixel discard.
4851 * According to the spec, bit 11 (RCCUNIT) must also be set,
4852 * but we didn't debug actual testcases to find it out.
4854 * WaDisableRCCUnitClockGating:snb
4855 * WaDisableRCPBUnitClockGating:snb
4857 I915_WRITE(GEN6_UCGCTL2
,
4858 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE
|
4859 GEN6_RCCUNIT_CLOCK_GATE_DISABLE
);
4861 /* WaStripsFansDisableFastClipPerformanceFix:snb */
4862 I915_WRITE(_3D_CHICKEN3
,
4863 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL
));
4867 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
4868 * 3DSTATE_SF number of SF output attributes is more than 16."
4870 I915_WRITE(_3D_CHICKEN3
,
4871 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH
));
4874 * According to the spec the following bits should be
4875 * set in order to enable memory self-refresh and fbc:
4876 * The bit21 and bit22 of 0x42000
4877 * The bit21 and bit22 of 0x42004
4878 * The bit5 and bit7 of 0x42020
4879 * The bit14 of 0x70180
4880 * The bit14 of 0x71180
4882 * WaFbcAsynchFlipDisableFbcQueue:snb
4884 I915_WRITE(ILK_DISPLAY_CHICKEN1
,
4885 I915_READ(ILK_DISPLAY_CHICKEN1
) |
4886 ILK_FBCQ_DIS
| ILK_PABSTRETCH_DIS
);
4887 I915_WRITE(ILK_DISPLAY_CHICKEN2
,
4888 I915_READ(ILK_DISPLAY_CHICKEN2
) |
4889 ILK_DPARB_GATE
| ILK_VSDPFD_FULL
);
4890 I915_WRITE(ILK_DSPCLK_GATE_D
,
4891 I915_READ(ILK_DSPCLK_GATE_D
) |
4892 ILK_DPARBUNIT_CLOCK_GATE_ENABLE
|
4893 ILK_DPFDUNIT_CLOCK_GATE_ENABLE
);
4895 g4x_disable_trickle_feed(dev
);
4897 cpt_init_clock_gating(dev
);
4899 gen6_check_mch_setup(dev
);
4902 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private
*dev_priv
)
4904 uint32_t reg
= I915_READ(GEN7_FF_THREAD_MODE
);
4907 * WaVSThreadDispatchOverride:ivb,vlv
4909 * This actually overrides the dispatch
4910 * mode for all thread types.
4912 reg
&= ~GEN7_FF_SCHED_MASK
;
4913 reg
|= GEN7_FF_TS_SCHED_HW
;
4914 reg
|= GEN7_FF_VS_SCHED_HW
;
4915 reg
|= GEN7_FF_DS_SCHED_HW
;
4917 I915_WRITE(GEN7_FF_THREAD_MODE
, reg
);
4920 static void lpt_init_clock_gating(struct drm_device
*dev
)
4922 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4925 * TODO: this bit should only be enabled when really needed, then
4926 * disabled when not needed anymore in order to save power.
4928 if (dev_priv
->pch_id
== INTEL_PCH_LPT_LP_DEVICE_ID_TYPE
)
4929 I915_WRITE(SOUTH_DSPCLK_GATE_D
,
4930 I915_READ(SOUTH_DSPCLK_GATE_D
) |
4931 PCH_LP_PARTITION_LEVEL_DISABLE
);
4933 /* WADPOClockGatingDisable:hsw */
4934 I915_WRITE(_TRANSA_CHICKEN1
,
4935 I915_READ(_TRANSA_CHICKEN1
) |
4936 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE
);
4939 static void lpt_suspend_hw(struct drm_device
*dev
)
4941 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4943 if (dev_priv
->pch_id
== INTEL_PCH_LPT_LP_DEVICE_ID_TYPE
) {
4944 uint32_t val
= I915_READ(SOUTH_DSPCLK_GATE_D
);
4946 val
&= ~PCH_LP_PARTITION_LEVEL_DISABLE
;
4947 I915_WRITE(SOUTH_DSPCLK_GATE_D
, val
);
4951 static void gen8_init_clock_gating(struct drm_device
*dev
)
4953 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
4956 I915_WRITE(WM3_LP_ILK
, 0);
4957 I915_WRITE(WM2_LP_ILK
, 0);
4958 I915_WRITE(WM1_LP_ILK
, 0);
4960 /* FIXME(BDW): Check all the w/a, some might only apply to
4961 * pre-production hw. */
4963 /* WaDisablePartialInstShootdown:bdw */
4964 I915_WRITE(GEN8_ROW_CHICKEN
,
4965 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE
));
4967 /* WaDisableThreadStallDopClockGating:bdw */
4968 /* FIXME: Unclear whether we really need this on production bdw. */
4969 I915_WRITE(GEN8_ROW_CHICKEN
,
4970 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE
));
4973 * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
4974 * pre-production hardware
4976 I915_WRITE(HALF_SLICE_CHICKEN3
,
4977 _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS
));
4978 I915_WRITE(HALF_SLICE_CHICKEN3
,
4979 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS
));
4980 I915_WRITE(GAMTARBMODE
, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE
));
4982 I915_WRITE(_3D_CHICKEN3
,
4983 _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
4985 I915_WRITE(COMMON_SLICE_CHICKEN2
,
4986 _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE
));
4988 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
4989 _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE
));
4991 /* WaDisableDopClockGating:bdw May not be needed for production */
4992 I915_WRITE(GEN7_ROW_CHICKEN2
,
4993 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
4995 /* WaSwitchSolVfFArbitrationPriority:bdw */
4996 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
4998 /* WaPsrDPAMaskVBlankInSRD:bdw */
4999 I915_WRITE(CHICKEN_PAR1_1
,
5000 I915_READ(CHICKEN_PAR1_1
) | DPA_MASK_VBLANK_SRD
);
5002 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
5003 for_each_pipe(pipe
) {
5004 I915_WRITE(CHICKEN_PIPESL_1(pipe
),
5005 I915_READ(CHICKEN_PIPESL_1(pipe
)) |
5006 BDW_DPRS_MASK_VBLANK_SRD
);
5009 /* Use Force Non-Coherent whenever executing a 3D context. This is a
5010 * workaround for for a possible hang in the unlikely event a TLB
5011 * invalidation occurs during a PSD flush.
5013 I915_WRITE(HDC_CHICKEN0
,
5014 I915_READ(HDC_CHICKEN0
) |
5015 _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT
));
5017 /* WaVSRefCountFullforceMissDisable:bdw */
5018 /* WaDSRefCountFullforceMissDisable:bdw */
5019 I915_WRITE(GEN7_FF_THREAD_MODE
,
5020 I915_READ(GEN7_FF_THREAD_MODE
) &
5021 ~(GEN8_FF_DS_REF_CNT_FFME
| GEN7_FF_VS_REF_CNT_FFME
));
5024 * BSpec recommends 8x4 when MSAA is used,
5025 * however in practice 16x4 seems fastest.
5027 * Note that PS/WM thread counts depend on the WIZ hashing
5028 * disable bit, which we don't touch here, but it's good
5029 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5031 I915_WRITE(GEN7_GT_MODE
,
5032 GEN6_WIZ_HASHING_MASK
| GEN6_WIZ_HASHING_16x4
);
5034 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL
,
5035 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE
));
5037 /* WaDisableSDEUnitClockGating:bdw */
5038 I915_WRITE(GEN8_UCGCTL6
, I915_READ(GEN8_UCGCTL6
) |
5039 GEN8_SDEUNIT_CLOCK_GATE_DISABLE
);
5041 /* Wa4x4STCOptimizationDisable:bdw */
5042 I915_WRITE(CACHE_MODE_1
,
5043 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE
));
5046 static void haswell_init_clock_gating(struct drm_device
*dev
)
5048 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5050 ilk_init_lp_watermarks(dev
);
5052 /* L3 caching of data atomics doesn't work -- disable it. */
5053 I915_WRITE(HSW_SCRATCH1
, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE
);
5054 I915_WRITE(HSW_ROW_CHICKEN3
,
5055 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE
));
5057 /* This is required by WaCatErrorRejectionIssue:hsw */
5058 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
5059 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
5060 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
5062 /* WaVSRefCountFullforceMissDisable:hsw */
5063 I915_WRITE(GEN7_FF_THREAD_MODE
,
5064 I915_READ(GEN7_FF_THREAD_MODE
) & ~GEN7_FF_VS_REF_CNT_FFME
);
5066 /* WaDisable_RenderCache_OperationalFlush:hsw */
5067 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
5069 /* enable HiZ Raw Stall Optimization */
5070 I915_WRITE(CACHE_MODE_0_GEN7
,
5071 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
5073 /* WaDisable4x2SubspanOptimization:hsw */
5074 I915_WRITE(CACHE_MODE_1
,
5075 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
5078 * BSpec recommends 8x4 when MSAA is used,
5079 * however in practice 16x4 seems fastest.
5081 * Note that PS/WM thread counts depend on the WIZ hashing
5082 * disable bit, which we don't touch here, but it's good
5083 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5085 I915_WRITE(GEN7_GT_MODE
,
5086 GEN6_WIZ_HASHING_MASK
| GEN6_WIZ_HASHING_16x4
);
5088 /* WaSwitchSolVfFArbitrationPriority:hsw */
5089 I915_WRITE(GAM_ECOCHK
, I915_READ(GAM_ECOCHK
) | HSW_ECOCHK_ARB_PRIO_SOL
);
5091 /* WaRsPkgCStateDisplayPMReq:hsw */
5092 I915_WRITE(CHICKEN_PAR1_1
,
5093 I915_READ(CHICKEN_PAR1_1
) | FORCE_ARB_IDLE_PLANES
);
5095 lpt_init_clock_gating(dev
);
5098 static void ivybridge_init_clock_gating(struct drm_device
*dev
)
5100 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5103 ilk_init_lp_watermarks(dev
);
5105 I915_WRITE(ILK_DSPCLK_GATE_D
, ILK_VRHUNIT_CLOCK_GATE_DISABLE
);
5107 /* WaDisableEarlyCull:ivb */
5108 I915_WRITE(_3D_CHICKEN3
,
5109 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
5111 /* WaDisableBackToBackFlipFix:ivb */
5112 I915_WRITE(IVB_CHICKEN3
,
5113 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
5114 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
5116 /* WaDisablePSDDualDispatchEnable:ivb */
5117 if (IS_IVB_GT1(dev
))
5118 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
5119 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
5121 /* WaDisable_RenderCache_OperationalFlush:ivb */
5122 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
5124 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
5125 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1
,
5126 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC
);
5128 /* WaApplyL3ControlAndL3ChickenMode:ivb */
5129 I915_WRITE(GEN7_L3CNTLREG1
,
5130 GEN7_WA_FOR_GEN7_L3_CONTROL
);
5131 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER
,
5132 GEN7_WA_L3_CHICKEN_MODE
);
5133 if (IS_IVB_GT1(dev
))
5134 I915_WRITE(GEN7_ROW_CHICKEN2
,
5135 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
5137 /* must write both registers */
5138 I915_WRITE(GEN7_ROW_CHICKEN2
,
5139 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
5140 I915_WRITE(GEN7_ROW_CHICKEN2_GT2
,
5141 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
5144 /* WaForceL3Serialization:ivb */
5145 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
5146 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
5149 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5150 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
5152 I915_WRITE(GEN6_UCGCTL2
,
5153 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
5155 /* This is required by WaCatErrorRejectionIssue:ivb */
5156 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
5157 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
5158 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
5160 g4x_disable_trickle_feed(dev
);
5162 gen7_setup_fixed_func_scheduler(dev_priv
);
5164 if (0) { /* causes HiZ corruption on ivb:gt1 */
5165 /* enable HiZ Raw Stall Optimization */
5166 I915_WRITE(CACHE_MODE_0_GEN7
,
5167 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE
));
5170 /* WaDisable4x2SubspanOptimization:ivb */
5171 I915_WRITE(CACHE_MODE_1
,
5172 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
5175 * BSpec recommends 8x4 when MSAA is used,
5176 * however in practice 16x4 seems fastest.
5178 * Note that PS/WM thread counts depend on the WIZ hashing
5179 * disable bit, which we don't touch here, but it's good
5180 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5182 I915_WRITE(GEN7_GT_MODE
,
5183 GEN6_WIZ_HASHING_MASK
| GEN6_WIZ_HASHING_16x4
);
5185 snpcr
= I915_READ(GEN6_MBCUNIT_SNPCR
);
5186 snpcr
&= ~GEN6_MBC_SNPCR_MASK
;
5187 snpcr
|= GEN6_MBC_SNPCR_MED
;
5188 I915_WRITE(GEN6_MBCUNIT_SNPCR
, snpcr
);
5190 if (!HAS_PCH_NOP(dev
))
5191 cpt_init_clock_gating(dev
);
5193 gen6_check_mch_setup(dev
);
5196 static void valleyview_init_clock_gating(struct drm_device
*dev
)
5198 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5201 mutex_lock(&dev_priv
->rps
.hw_lock
);
5202 val
= vlv_punit_read(dev_priv
, PUNIT_REG_GPU_FREQ_STS
);
5203 mutex_unlock(&dev_priv
->rps
.hw_lock
);
5204 switch ((val
>> 6) & 3) {
5207 dev_priv
->mem_freq
= 800;
5210 dev_priv
->mem_freq
= 1066;
5213 dev_priv
->mem_freq
= 1333;
5216 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv
->mem_freq
);
5218 dev_priv
->vlv_cdclk_freq
= valleyview_cur_cdclk(dev_priv
);
5219 DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz",
5220 dev_priv
->vlv_cdclk_freq
);
5222 I915_WRITE(DSPCLK_GATE_D
, VRHUNIT_CLOCK_GATE_DISABLE
);
5224 /* WaDisableEarlyCull:vlv */
5225 I915_WRITE(_3D_CHICKEN3
,
5226 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL
));
5228 /* WaDisableBackToBackFlipFix:vlv */
5229 I915_WRITE(IVB_CHICKEN3
,
5230 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE
|
5231 CHICKEN3_DGMG_DONE_FIX_DISABLE
);
5233 /* WaPsdDispatchEnable:vlv */
5234 /* WaDisablePSDDualDispatchEnable:vlv */
5235 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1
,
5236 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP
|
5237 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE
));
5239 /* WaDisable_RenderCache_OperationalFlush:vlv */
5240 I915_WRITE(CACHE_MODE_0_GEN7
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
5242 /* WaForceL3Serialization:vlv */
5243 I915_WRITE(GEN7_L3SQCREG4
, I915_READ(GEN7_L3SQCREG4
) &
5244 ~L3SQ_URB_READ_CAM_MATCH_DISABLE
);
5246 /* WaDisableDopClockGating:vlv */
5247 I915_WRITE(GEN7_ROW_CHICKEN2
,
5248 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE
));
5250 /* This is required by WaCatErrorRejectionIssue:vlv */
5251 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
,
5252 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG
) |
5253 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB
);
5255 gen7_setup_fixed_func_scheduler(dev_priv
);
5258 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5259 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
5261 I915_WRITE(GEN6_UCGCTL2
,
5262 GEN6_RCZUNIT_CLOCK_GATE_DISABLE
);
5264 /* WaDisableL3Bank2xClockGate:vlv */
5265 I915_WRITE(GEN7_UCGCTL4
, GEN7_L3BANK2X_CLOCK_GATE_DISABLE
);
5267 I915_WRITE(MI_ARB_VLV
, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
);
5270 * BSpec says this must be set, even though
5271 * WaDisable4x2SubspanOptimization isn't listed for VLV.
5273 I915_WRITE(CACHE_MODE_1
,
5274 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE
));
5277 * WaIncreaseL3CreditsForVLVB0:vlv
5278 * This is the hardware default actually.
5280 I915_WRITE(GEN7_L3SQCREG1
, VLV_B0_WA_L3SQCREG1_VALUE
);
5283 * WaDisableVLVClockGating_VBIIssue:vlv
5284 * Disable clock gating on th GCFG unit to prevent a delay
5285 * in the reporting of vblank events.
5287 I915_WRITE(VLV_GUNIT_CLOCK_GATE
, GCFG_DIS
);
5290 static void g4x_init_clock_gating(struct drm_device
*dev
)
5292 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5293 uint32_t dspclk_gate
;
5295 I915_WRITE(RENCLK_GATE_D1
, 0);
5296 I915_WRITE(RENCLK_GATE_D2
, VF_UNIT_CLOCK_GATE_DISABLE
|
5297 GS_UNIT_CLOCK_GATE_DISABLE
|
5298 CL_UNIT_CLOCK_GATE_DISABLE
);
5299 I915_WRITE(RAMCLK_GATE_D
, 0);
5300 dspclk_gate
= VRHUNIT_CLOCK_GATE_DISABLE
|
5301 OVRUNIT_CLOCK_GATE_DISABLE
|
5302 OVCUNIT_CLOCK_GATE_DISABLE
;
5304 dspclk_gate
|= DSSUNIT_CLOCK_GATE_DISABLE
;
5305 I915_WRITE(DSPCLK_GATE_D
, dspclk_gate
);
5307 /* WaDisableRenderCachePipelinedFlush */
5308 I915_WRITE(CACHE_MODE_0
,
5309 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE
));
5311 /* WaDisable_RenderCache_OperationalFlush:g4x */
5312 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
5314 g4x_disable_trickle_feed(dev
);
5317 static void crestline_init_clock_gating(struct drm_device
*dev
)
5319 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5321 I915_WRITE(RENCLK_GATE_D1
, I965_RCC_CLOCK_GATE_DISABLE
);
5322 I915_WRITE(RENCLK_GATE_D2
, 0);
5323 I915_WRITE(DSPCLK_GATE_D
, 0);
5324 I915_WRITE(RAMCLK_GATE_D
, 0);
5325 I915_WRITE16(DEUC
, 0);
5326 I915_WRITE(MI_ARB_STATE
,
5327 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
5329 /* WaDisable_RenderCache_OperationalFlush:gen4 */
5330 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
5333 static void broadwater_init_clock_gating(struct drm_device
*dev
)
5335 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5337 I915_WRITE(RENCLK_GATE_D1
, I965_RCZ_CLOCK_GATE_DISABLE
|
5338 I965_RCC_CLOCK_GATE_DISABLE
|
5339 I965_RCPB_CLOCK_GATE_DISABLE
|
5340 I965_ISC_CLOCK_GATE_DISABLE
|
5341 I965_FBC_CLOCK_GATE_DISABLE
);
5342 I915_WRITE(RENCLK_GATE_D2
, 0);
5343 I915_WRITE(MI_ARB_STATE
,
5344 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE
));
5346 /* WaDisable_RenderCache_OperationalFlush:gen4 */
5347 I915_WRITE(CACHE_MODE_0
, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE
));
5350 static void gen3_init_clock_gating(struct drm_device
*dev
)
5352 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5353 u32 dstate
= I915_READ(D_STATE
);
5355 dstate
|= DSTATE_PLL_D3_OFF
| DSTATE_GFX_CLOCK_GATING
|
5356 DSTATE_DOT_CLOCK_GATING
;
5357 I915_WRITE(D_STATE
, dstate
);
5359 if (IS_PINEVIEW(dev
))
5360 I915_WRITE(ECOSKPD
, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY
));
5362 /* IIR "flip pending" means done if this bit is set */
5363 I915_WRITE(ECOSKPD
, _MASKED_BIT_DISABLE(ECO_FLIP_DONE
));
5366 static void i85x_init_clock_gating(struct drm_device
*dev
)
5368 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5370 I915_WRITE(RENCLK_GATE_D1
, SV_CLOCK_GATE_DISABLE
);
5373 static void i830_init_clock_gating(struct drm_device
*dev
)
5375 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5377 I915_WRITE(DSPCLK_GATE_D
, OVRUNIT_CLOCK_GATE_DISABLE
);
5380 void intel_init_clock_gating(struct drm_device
*dev
)
5382 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
5384 dev_priv
->display
.init_clock_gating(dev
);
5387 void intel_suspend_hw(struct drm_device
*dev
)
5389 if (HAS_PCH_LPT(dev
))
5390 lpt_suspend_hw(dev
);
5393 #define for_each_power_well(i, power_well, domain_mask, power_domains) \
5395 i < (power_domains)->power_well_count && \
5396 ((power_well) = &(power_domains)->power_wells[i]); \
5398 if ((power_well)->domains & (domain_mask))
5400 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
5401 for (i = (power_domains)->power_well_count - 1; \
5402 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
5404 if ((power_well)->domains & (domain_mask))
5407 * We should only use the power well if we explicitly asked the hardware to
5408 * enable it, so check if it's enabled and also check if we've requested it to
5411 static bool hsw_power_well_enabled(struct drm_i915_private
*dev_priv
,
5412 struct i915_power_well
*power_well
)
5414 return I915_READ(HSW_PWR_WELL_DRIVER
) ==
5415 (HSW_PWR_WELL_ENABLE_REQUEST
| HSW_PWR_WELL_STATE_ENABLED
);
5418 bool intel_display_power_enabled_sw(struct drm_i915_private
*dev_priv
,
5419 enum intel_display_power_domain domain
)
5421 struct i915_power_domains
*power_domains
;
5423 power_domains
= &dev_priv
->power_domains
;
5425 return power_domains
->domain_use_count
[domain
];
5428 bool intel_display_power_enabled(struct drm_i915_private
*dev_priv
,
5429 enum intel_display_power_domain domain
)
5431 struct i915_power_domains
*power_domains
;
5432 struct i915_power_well
*power_well
;
5436 if (dev_priv
->pm
.suspended
)
5439 power_domains
= &dev_priv
->power_domains
;
5443 mutex_lock(&power_domains
->lock
);
5444 for_each_power_well_rev(i
, power_well
, BIT(domain
), power_domains
) {
5445 if (power_well
->always_on
)
5448 if (!power_well
->ops
->is_enabled(dev_priv
, power_well
)) {
5453 mutex_unlock(&power_domains
->lock
);
5459 * Starting with Haswell, we have a "Power Down Well" that can be turned off
5460 * when not needed anymore. We have 4 registers that can request the power well
5461 * to be enabled, and it will only be disabled if none of the registers is
5462 * requesting it to be enabled.
5464 static void hsw_power_well_post_enable(struct drm_i915_private
*dev_priv
)
5466 struct drm_device
*dev
= dev_priv
->dev
;
5467 unsigned long irqflags
;
5470 * After we re-enable the power well, if we touch VGA register 0x3d5
5471 * we'll get unclaimed register interrupts. This stops after we write
5472 * anything to the VGA MSR register. The vgacon module uses this
5473 * register all the time, so if we unbind our driver and, as a
5474 * consequence, bind vgacon, we'll get stuck in an infinite loop at
5475 * console_unlock(). So make here we touch the VGA MSR register, making
5476 * sure vgacon can keep working normally without triggering interrupts
5477 * and error messages.
5479 vga_get_uninterruptible(dev
->pdev
, VGA_RSRC_LEGACY_IO
);
5480 outb(inb(VGA_MSR_READ
), VGA_MSR_WRITE
);
5481 vga_put(dev
->pdev
, VGA_RSRC_LEGACY_IO
);
5483 if (IS_BROADWELL(dev
)) {
5484 spin_lock_irqsave(&dev_priv
->irq_lock
, irqflags
);
5485 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B
),
5486 dev_priv
->de_irq_mask
[PIPE_B
]);
5487 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B
),
5488 ~dev_priv
->de_irq_mask
[PIPE_B
] |
5490 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C
),
5491 dev_priv
->de_irq_mask
[PIPE_C
]);
5492 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C
),
5493 ~dev_priv
->de_irq_mask
[PIPE_C
] |
5495 POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C
));
5496 spin_unlock_irqrestore(&dev_priv
->irq_lock
, irqflags
);
5500 static void reset_vblank_counter(struct drm_device
*dev
, enum pipe pipe
)
5502 assert_spin_locked(&dev
->vbl_lock
);
5504 dev
->vblank
[pipe
].last
= 0;
5507 static void hsw_power_well_post_disable(struct drm_i915_private
*dev_priv
)
5509 struct drm_device
*dev
= dev_priv
->dev
;
5511 unsigned long irqflags
;
5514 * After this, the registers on the pipes that are part of the power
5515 * well will become zero, so we have to adjust our counters according to
5518 * FIXME: Should we do this in general in drm_vblank_post_modeset?
5520 spin_lock_irqsave(&dev
->vbl_lock
, irqflags
);
5523 reset_vblank_counter(dev
, pipe
);
5524 spin_unlock_irqrestore(&dev
->vbl_lock
, irqflags
);
5527 static void hsw_set_power_well(struct drm_i915_private
*dev_priv
,
5528 struct i915_power_well
*power_well
, bool enable
)
5530 bool is_enabled
, enable_requested
;
5533 tmp
= I915_READ(HSW_PWR_WELL_DRIVER
);
5534 is_enabled
= tmp
& HSW_PWR_WELL_STATE_ENABLED
;
5535 enable_requested
= tmp
& HSW_PWR_WELL_ENABLE_REQUEST
;
5538 if (!enable_requested
)
5539 I915_WRITE(HSW_PWR_WELL_DRIVER
,
5540 HSW_PWR_WELL_ENABLE_REQUEST
);
5543 DRM_DEBUG_KMS("Enabling power well\n");
5544 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER
) &
5545 HSW_PWR_WELL_STATE_ENABLED
), 20))
5546 DRM_ERROR("Timeout enabling power well\n");
5549 hsw_power_well_post_enable(dev_priv
);
5551 if (enable_requested
) {
5552 I915_WRITE(HSW_PWR_WELL_DRIVER
, 0);
5553 POSTING_READ(HSW_PWR_WELL_DRIVER
);
5554 DRM_DEBUG_KMS("Requesting to disable the power well\n");
5556 hsw_power_well_post_disable(dev_priv
);
5561 static void hsw_power_well_sync_hw(struct drm_i915_private
*dev_priv
,
5562 struct i915_power_well
*power_well
)
5564 hsw_set_power_well(dev_priv
, power_well
, power_well
->count
> 0);
5567 * We're taking over the BIOS, so clear any requests made by it since
5568 * the driver is in charge now.
5570 if (I915_READ(HSW_PWR_WELL_BIOS
) & HSW_PWR_WELL_ENABLE_REQUEST
)
5571 I915_WRITE(HSW_PWR_WELL_BIOS
, 0);
5574 static void hsw_power_well_enable(struct drm_i915_private
*dev_priv
,
5575 struct i915_power_well
*power_well
)
5577 hsw_set_power_well(dev_priv
, power_well
, true);
5580 static void hsw_power_well_disable(struct drm_i915_private
*dev_priv
,
5581 struct i915_power_well
*power_well
)
5583 hsw_set_power_well(dev_priv
, power_well
, false);
5586 static void i9xx_always_on_power_well_noop(struct drm_i915_private
*dev_priv
,
5587 struct i915_power_well
*power_well
)
5591 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private
*dev_priv
,
5592 struct i915_power_well
*power_well
)
5597 static void vlv_set_power_well(struct drm_i915_private
*dev_priv
,
5598 struct i915_power_well
*power_well
, bool enable
)
5600 enum punit_power_well power_well_id
= power_well
->data
;
5605 mask
= PUNIT_PWRGT_MASK(power_well_id
);
5606 state
= enable
? PUNIT_PWRGT_PWR_ON(power_well_id
) :
5607 PUNIT_PWRGT_PWR_GATE(power_well_id
);
5609 mutex_lock(&dev_priv
->rps
.hw_lock
);
5612 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
5617 ctrl
= vlv_punit_read(dev_priv
, PUNIT_REG_PWRGT_CTRL
);
5620 vlv_punit_write(dev_priv
, PUNIT_REG_PWRGT_CTRL
, ctrl
);
5622 if (wait_for(COND
, 100))
5623 DRM_ERROR("timout setting power well state %08x (%08x)\n",
5625 vlv_punit_read(dev_priv
, PUNIT_REG_PWRGT_CTRL
));
5630 mutex_unlock(&dev_priv
->rps
.hw_lock
);
5633 static void vlv_power_well_sync_hw(struct drm_i915_private
*dev_priv
,
5634 struct i915_power_well
*power_well
)
5636 vlv_set_power_well(dev_priv
, power_well
, power_well
->count
> 0);
5639 static void vlv_power_well_enable(struct drm_i915_private
*dev_priv
,
5640 struct i915_power_well
*power_well
)
5642 vlv_set_power_well(dev_priv
, power_well
, true);
5645 static void vlv_power_well_disable(struct drm_i915_private
*dev_priv
,
5646 struct i915_power_well
*power_well
)
5648 vlv_set_power_well(dev_priv
, power_well
, false);
5651 static bool vlv_power_well_enabled(struct drm_i915_private
*dev_priv
,
5652 struct i915_power_well
*power_well
)
5654 int power_well_id
= power_well
->data
;
5655 bool enabled
= false;
5660 mask
= PUNIT_PWRGT_MASK(power_well_id
);
5661 ctrl
= PUNIT_PWRGT_PWR_ON(power_well_id
);
5663 mutex_lock(&dev_priv
->rps
.hw_lock
);
5665 state
= vlv_punit_read(dev_priv
, PUNIT_REG_PWRGT_STATUS
) & mask
;
5667 * We only ever set the power-on and power-gate states, anything
5668 * else is unexpected.
5670 WARN_ON(state
!= PUNIT_PWRGT_PWR_ON(power_well_id
) &&
5671 state
!= PUNIT_PWRGT_PWR_GATE(power_well_id
));
5676 * A transient state at this point would mean some unexpected party
5677 * is poking at the power controls too.
5679 ctrl
= vlv_punit_read(dev_priv
, PUNIT_REG_PWRGT_CTRL
) & mask
;
5680 WARN_ON(ctrl
!= state
);
5682 mutex_unlock(&dev_priv
->rps
.hw_lock
);
5687 static void vlv_display_power_well_enable(struct drm_i915_private
*dev_priv
,
5688 struct i915_power_well
*power_well
)
5690 WARN_ON_ONCE(power_well
->data
!= PUNIT_POWER_WELL_DISP2D
);
5692 vlv_set_power_well(dev_priv
, power_well
, true);
5694 spin_lock_irq(&dev_priv
->irq_lock
);
5695 valleyview_enable_display_irqs(dev_priv
);
5696 spin_unlock_irq(&dev_priv
->irq_lock
);
5699 * During driver initialization/resume we can avoid restoring the
5700 * part of the HW/SW state that will be inited anyway explicitly.
5702 if (dev_priv
->power_domains
.initializing
)
5705 intel_hpd_init(dev_priv
->dev
);
5707 i915_redisable_vga_power_on(dev_priv
->dev
);
5710 static void vlv_display_power_well_disable(struct drm_i915_private
*dev_priv
,
5711 struct i915_power_well
*power_well
)
5713 struct drm_device
*dev
= dev_priv
->dev
;
5716 WARN_ON_ONCE(power_well
->data
!= PUNIT_POWER_WELL_DISP2D
);
5718 spin_lock_irq(&dev_priv
->irq_lock
);
5720 __intel_set_cpu_fifo_underrun_reporting(dev
, pipe
, false);
5722 valleyview_disable_display_irqs(dev_priv
);
5723 spin_unlock_irq(&dev_priv
->irq_lock
);
5725 spin_lock_irq(&dev
->vbl_lock
);
5727 reset_vblank_counter(dev
, pipe
);
5728 spin_unlock_irq(&dev
->vbl_lock
);
5730 vlv_set_power_well(dev_priv
, power_well
, false);
5733 static void check_power_well_state(struct drm_i915_private
*dev_priv
,
5734 struct i915_power_well
*power_well
)
5736 bool enabled
= power_well
->ops
->is_enabled(dev_priv
, power_well
);
5738 if (power_well
->always_on
|| !i915
.disable_power_well
) {
5745 if (enabled
!= (power_well
->count
> 0))
5751 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
5752 power_well
->name
, power_well
->always_on
, enabled
,
5753 power_well
->count
, i915
.disable_power_well
);
5756 void intel_display_power_get(struct drm_i915_private
*dev_priv
,
5757 enum intel_display_power_domain domain
)
5759 struct i915_power_domains
*power_domains
;
5760 struct i915_power_well
*power_well
;
5763 intel_runtime_pm_get(dev_priv
);
5765 power_domains
= &dev_priv
->power_domains
;
5767 mutex_lock(&power_domains
->lock
);
5769 for_each_power_well(i
, power_well
, BIT(domain
), power_domains
) {
5770 if (!power_well
->count
++) {
5771 DRM_DEBUG_KMS("enabling %s\n", power_well
->name
);
5772 power_well
->ops
->enable(dev_priv
, power_well
);
5775 check_power_well_state(dev_priv
, power_well
);
5778 power_domains
->domain_use_count
[domain
]++;
5780 mutex_unlock(&power_domains
->lock
);
5783 void intel_display_power_put(struct drm_i915_private
*dev_priv
,
5784 enum intel_display_power_domain domain
)
5786 struct i915_power_domains
*power_domains
;
5787 struct i915_power_well
*power_well
;
5790 power_domains
= &dev_priv
->power_domains
;
5792 mutex_lock(&power_domains
->lock
);
5794 WARN_ON(!power_domains
->domain_use_count
[domain
]);
5795 power_domains
->domain_use_count
[domain
]--;
5797 for_each_power_well_rev(i
, power_well
, BIT(domain
), power_domains
) {
5798 WARN_ON(!power_well
->count
);
5800 if (!--power_well
->count
&& i915
.disable_power_well
) {
5801 DRM_DEBUG_KMS("disabling %s\n", power_well
->name
);
5802 power_well
->ops
->disable(dev_priv
, power_well
);
5805 check_power_well_state(dev_priv
, power_well
);
5808 mutex_unlock(&power_domains
->lock
);
5810 intel_runtime_pm_put(dev_priv
);
5813 static struct i915_power_domains
*hsw_pwr
;
5815 /* Display audio driver power well request */
5816 void i915_request_power_well(void)
5818 struct drm_i915_private
*dev_priv
;
5820 if (WARN_ON(!hsw_pwr
))
5823 dev_priv
= container_of(hsw_pwr
, struct drm_i915_private
,
5825 intel_display_power_get(dev_priv
, POWER_DOMAIN_AUDIO
);
5827 EXPORT_SYMBOL_GPL(i915_request_power_well
);
5829 /* Display audio driver power well release */
5830 void i915_release_power_well(void)
5832 struct drm_i915_private
*dev_priv
;
5834 if (WARN_ON(!hsw_pwr
))
5837 dev_priv
= container_of(hsw_pwr
, struct drm_i915_private
,
5839 intel_display_power_put(dev_priv
, POWER_DOMAIN_AUDIO
);
5841 EXPORT_SYMBOL_GPL(i915_release_power_well
);
5843 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
5845 #define HSW_ALWAYS_ON_POWER_DOMAINS ( \
5846 BIT(POWER_DOMAIN_PIPE_A) | \
5847 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
5848 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
5849 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
5850 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
5851 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
5852 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
5853 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
5854 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
5855 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
5856 BIT(POWER_DOMAIN_PORT_CRT) | \
5857 BIT(POWER_DOMAIN_INIT))
5858 #define HSW_DISPLAY_POWER_DOMAINS ( \
5859 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
5860 BIT(POWER_DOMAIN_INIT))
5862 #define BDW_ALWAYS_ON_POWER_DOMAINS ( \
5863 HSW_ALWAYS_ON_POWER_DOMAINS | \
5864 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
5865 #define BDW_DISPLAY_POWER_DOMAINS ( \
5866 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
5867 BIT(POWER_DOMAIN_INIT))
5869 #define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
5870 #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
5872 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
5873 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
5874 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
5875 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
5876 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
5877 BIT(POWER_DOMAIN_PORT_CRT) | \
5878 BIT(POWER_DOMAIN_INIT))
5880 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
5881 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
5882 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
5883 BIT(POWER_DOMAIN_INIT))
5885 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
5886 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
5887 BIT(POWER_DOMAIN_INIT))
5889 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
5890 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
5891 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
5892 BIT(POWER_DOMAIN_INIT))
5894 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
5895 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
5896 BIT(POWER_DOMAIN_INIT))
5898 static const struct i915_power_well_ops i9xx_always_on_power_well_ops
= {
5899 .sync_hw
= i9xx_always_on_power_well_noop
,
5900 .enable
= i9xx_always_on_power_well_noop
,
5901 .disable
= i9xx_always_on_power_well_noop
,
5902 .is_enabled
= i9xx_always_on_power_well_enabled
,
5905 static struct i915_power_well i9xx_always_on_power_well
[] = {
5907 .name
= "always-on",
5909 .domains
= POWER_DOMAIN_MASK
,
5910 .ops
= &i9xx_always_on_power_well_ops
,
5914 static const struct i915_power_well_ops hsw_power_well_ops
= {
5915 .sync_hw
= hsw_power_well_sync_hw
,
5916 .enable
= hsw_power_well_enable
,
5917 .disable
= hsw_power_well_disable
,
5918 .is_enabled
= hsw_power_well_enabled
,
5921 static struct i915_power_well hsw_power_wells
[] = {
5923 .name
= "always-on",
5925 .domains
= HSW_ALWAYS_ON_POWER_DOMAINS
,
5926 .ops
= &i9xx_always_on_power_well_ops
,
5930 .domains
= HSW_DISPLAY_POWER_DOMAINS
,
5931 .ops
= &hsw_power_well_ops
,
5935 static struct i915_power_well bdw_power_wells
[] = {
5937 .name
= "always-on",
5939 .domains
= BDW_ALWAYS_ON_POWER_DOMAINS
,
5940 .ops
= &i9xx_always_on_power_well_ops
,
5944 .domains
= BDW_DISPLAY_POWER_DOMAINS
,
5945 .ops
= &hsw_power_well_ops
,
5949 static const struct i915_power_well_ops vlv_display_power_well_ops
= {
5950 .sync_hw
= vlv_power_well_sync_hw
,
5951 .enable
= vlv_display_power_well_enable
,
5952 .disable
= vlv_display_power_well_disable
,
5953 .is_enabled
= vlv_power_well_enabled
,
5956 static const struct i915_power_well_ops vlv_dpio_power_well_ops
= {
5957 .sync_hw
= vlv_power_well_sync_hw
,
5958 .enable
= vlv_power_well_enable
,
5959 .disable
= vlv_power_well_disable
,
5960 .is_enabled
= vlv_power_well_enabled
,
5963 static struct i915_power_well vlv_power_wells
[] = {
5965 .name
= "always-on",
5967 .domains
= VLV_ALWAYS_ON_POWER_DOMAINS
,
5968 .ops
= &i9xx_always_on_power_well_ops
,
5972 .domains
= VLV_DISPLAY_POWER_DOMAINS
,
5973 .data
= PUNIT_POWER_WELL_DISP2D
,
5974 .ops
= &vlv_display_power_well_ops
,
5977 .name
= "dpio-common",
5978 .domains
= VLV_DPIO_CMN_BC_POWER_DOMAINS
,
5979 .data
= PUNIT_POWER_WELL_DPIO_CMN_BC
,
5980 .ops
= &vlv_dpio_power_well_ops
,
5983 .name
= "dpio-tx-b-01",
5984 .domains
= VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS
|
5985 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS
|
5986 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS
|
5987 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS
,
5988 .ops
= &vlv_dpio_power_well_ops
,
5989 .data
= PUNIT_POWER_WELL_DPIO_TX_B_LANES_01
,
5992 .name
= "dpio-tx-b-23",
5993 .domains
= VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS
|
5994 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS
|
5995 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS
|
5996 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS
,
5997 .ops
= &vlv_dpio_power_well_ops
,
5998 .data
= PUNIT_POWER_WELL_DPIO_TX_B_LANES_23
,
6001 .name
= "dpio-tx-c-01",
6002 .domains
= VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS
|
6003 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS
|
6004 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS
|
6005 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS
,
6006 .ops
= &vlv_dpio_power_well_ops
,
6007 .data
= PUNIT_POWER_WELL_DPIO_TX_C_LANES_01
,
6010 .name
= "dpio-tx-c-23",
6011 .domains
= VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS
|
6012 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS
|
6013 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS
|
6014 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS
,
6015 .ops
= &vlv_dpio_power_well_ops
,
6016 .data
= PUNIT_POWER_WELL_DPIO_TX_C_LANES_23
,
6020 #define set_power_wells(power_domains, __power_wells) ({ \
6021 (power_domains)->power_wells = (__power_wells); \
6022 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
6025 int intel_power_domains_init(struct drm_i915_private
*dev_priv
)
6027 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
6029 mutex_init(&power_domains
->lock
);
6032 * The enabling order will be from lower to higher indexed wells,
6033 * the disabling order is reversed.
6035 if (IS_HASWELL(dev_priv
->dev
)) {
6036 set_power_wells(power_domains
, hsw_power_wells
);
6037 hsw_pwr
= power_domains
;
6038 } else if (IS_BROADWELL(dev_priv
->dev
)) {
6039 set_power_wells(power_domains
, bdw_power_wells
);
6040 hsw_pwr
= power_domains
;
6041 } else if (IS_VALLEYVIEW(dev_priv
->dev
)) {
6042 set_power_wells(power_domains
, vlv_power_wells
);
6044 set_power_wells(power_domains
, i9xx_always_on_power_well
);
6050 void intel_power_domains_remove(struct drm_i915_private
*dev_priv
)
6055 static void intel_power_domains_resume(struct drm_i915_private
*dev_priv
)
6057 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
6058 struct i915_power_well
*power_well
;
6061 mutex_lock(&power_domains
->lock
);
6062 for_each_power_well(i
, power_well
, POWER_DOMAIN_MASK
, power_domains
)
6063 power_well
->ops
->sync_hw(dev_priv
, power_well
);
6064 mutex_unlock(&power_domains
->lock
);
6067 void intel_power_domains_init_hw(struct drm_i915_private
*dev_priv
)
6069 struct i915_power_domains
*power_domains
= &dev_priv
->power_domains
;
6071 power_domains
->initializing
= true;
6072 /* For now, we need the power well to be always enabled. */
6073 intel_display_set_init_power(dev_priv
, true);
6074 intel_power_domains_resume(dev_priv
);
6075 power_domains
->initializing
= false;
6078 void intel_aux_display_runtime_get(struct drm_i915_private
*dev_priv
)
6080 intel_runtime_pm_get(dev_priv
);
6083 void intel_aux_display_runtime_put(struct drm_i915_private
*dev_priv
)
6085 intel_runtime_pm_put(dev_priv
);
6088 void intel_runtime_pm_get(struct drm_i915_private
*dev_priv
)
6090 struct drm_device
*dev
= dev_priv
->dev
;
6091 struct device
*device
= &dev
->pdev
->dev
;
6093 if (!HAS_RUNTIME_PM(dev
))
6096 pm_runtime_get_sync(device
);
6097 WARN(dev_priv
->pm
.suspended
, "Device still suspended.\n");
6100 void intel_runtime_pm_get_noresume(struct drm_i915_private
*dev_priv
)
6102 struct drm_device
*dev
= dev_priv
->dev
;
6103 struct device
*device
= &dev
->pdev
->dev
;
6105 if (!HAS_RUNTIME_PM(dev
))
6108 WARN(dev_priv
->pm
.suspended
, "Getting nosync-ref while suspended.\n");
6109 pm_runtime_get_noresume(device
);
6112 void intel_runtime_pm_put(struct drm_i915_private
*dev_priv
)
6114 struct drm_device
*dev
= dev_priv
->dev
;
6115 struct device
*device
= &dev
->pdev
->dev
;
6117 if (!HAS_RUNTIME_PM(dev
))
6120 pm_runtime_mark_last_busy(device
);
6121 pm_runtime_put_autosuspend(device
);
6124 void intel_init_runtime_pm(struct drm_i915_private
*dev_priv
)
6126 struct drm_device
*dev
= dev_priv
->dev
;
6127 struct device
*device
= &dev
->pdev
->dev
;
6129 if (!HAS_RUNTIME_PM(dev
))
6132 pm_runtime_set_active(device
);
6135 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6138 if (!intel_enable_rc6(dev
)) {
6139 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6143 pm_runtime_set_autosuspend_delay(device
, 10000); /* 10s */
6144 pm_runtime_mark_last_busy(device
);
6145 pm_runtime_use_autosuspend(device
);
6147 pm_runtime_put_autosuspend(device
);
6150 void intel_fini_runtime_pm(struct drm_i915_private
*dev_priv
)
6152 struct drm_device
*dev
= dev_priv
->dev
;
6153 struct device
*device
= &dev
->pdev
->dev
;
6155 if (!HAS_RUNTIME_PM(dev
))
6158 if (!intel_enable_rc6(dev
))
6161 /* Make sure we're not suspended first. */
6162 pm_runtime_get_sync(device
);
6163 pm_runtime_disable(device
);
6166 /* Set up chip specific power management-related functions */
6167 void intel_init_pm(struct drm_device
*dev
)
6169 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6172 if (INTEL_INFO(dev
)->gen
>= 7) {
6173 dev_priv
->display
.fbc_enabled
= ironlake_fbc_enabled
;
6174 dev_priv
->display
.enable_fbc
= gen7_enable_fbc
;
6175 dev_priv
->display
.disable_fbc
= ironlake_disable_fbc
;
6176 } else if (INTEL_INFO(dev
)->gen
>= 5) {
6177 dev_priv
->display
.fbc_enabled
= ironlake_fbc_enabled
;
6178 dev_priv
->display
.enable_fbc
= ironlake_enable_fbc
;
6179 dev_priv
->display
.disable_fbc
= ironlake_disable_fbc
;
6180 } else if (IS_GM45(dev
)) {
6181 dev_priv
->display
.fbc_enabled
= g4x_fbc_enabled
;
6182 dev_priv
->display
.enable_fbc
= g4x_enable_fbc
;
6183 dev_priv
->display
.disable_fbc
= g4x_disable_fbc
;
6185 dev_priv
->display
.fbc_enabled
= i8xx_fbc_enabled
;
6186 dev_priv
->display
.enable_fbc
= i8xx_enable_fbc
;
6187 dev_priv
->display
.disable_fbc
= i8xx_disable_fbc
;
6189 /* This value was pulled out of someone's hat */
6190 I915_WRITE(FBC_CONTROL
, 500 << FBC_CTL_INTERVAL_SHIFT
);
6195 if (IS_PINEVIEW(dev
))
6196 i915_pineview_get_mem_freq(dev
);
6197 else if (IS_GEN5(dev
))
6198 i915_ironlake_get_mem_freq(dev
);
6200 /* For FIFO watermark updates */
6201 if (HAS_PCH_SPLIT(dev
)) {
6202 ilk_setup_wm_latency(dev
);
6204 if ((IS_GEN5(dev
) && dev_priv
->wm
.pri_latency
[1] &&
6205 dev_priv
->wm
.spr_latency
[1] && dev_priv
->wm
.cur_latency
[1]) ||
6206 (!IS_GEN5(dev
) && dev_priv
->wm
.pri_latency
[0] &&
6207 dev_priv
->wm
.spr_latency
[0] && dev_priv
->wm
.cur_latency
[0])) {
6208 dev_priv
->display
.update_wm
= ilk_update_wm
;
6209 dev_priv
->display
.update_sprite_wm
= ilk_update_sprite_wm
;
6211 DRM_DEBUG_KMS("Failed to read display plane latency. "
6216 dev_priv
->display
.init_clock_gating
= ironlake_init_clock_gating
;
6217 else if (IS_GEN6(dev
))
6218 dev_priv
->display
.init_clock_gating
= gen6_init_clock_gating
;
6219 else if (IS_IVYBRIDGE(dev
))
6220 dev_priv
->display
.init_clock_gating
= ivybridge_init_clock_gating
;
6221 else if (IS_HASWELL(dev
))
6222 dev_priv
->display
.init_clock_gating
= haswell_init_clock_gating
;
6223 else if (INTEL_INFO(dev
)->gen
== 8)
6224 dev_priv
->display
.init_clock_gating
= gen8_init_clock_gating
;
6225 } else if (IS_VALLEYVIEW(dev
)) {
6226 dev_priv
->display
.update_wm
= valleyview_update_wm
;
6227 dev_priv
->display
.init_clock_gating
=
6228 valleyview_init_clock_gating
;
6229 } else if (IS_PINEVIEW(dev
)) {
6230 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev
),
6233 dev_priv
->mem_freq
)) {
6234 DRM_INFO("failed to find known CxSR latency "
6235 "(found ddr%s fsb freq %d, mem freq %d), "
6237 (dev_priv
->is_ddr3
== 1) ? "3" : "2",
6238 dev_priv
->fsb_freq
, dev_priv
->mem_freq
);
6239 /* Disable CxSR and never update its watermark again */
6240 pineview_disable_cxsr(dev
);
6241 dev_priv
->display
.update_wm
= NULL
;
6243 dev_priv
->display
.update_wm
= pineview_update_wm
;
6244 dev_priv
->display
.init_clock_gating
= gen3_init_clock_gating
;
6245 } else if (IS_G4X(dev
)) {
6246 dev_priv
->display
.update_wm
= g4x_update_wm
;
6247 dev_priv
->display
.init_clock_gating
= g4x_init_clock_gating
;
6248 } else if (IS_GEN4(dev
)) {
6249 dev_priv
->display
.update_wm
= i965_update_wm
;
6250 if (IS_CRESTLINE(dev
))
6251 dev_priv
->display
.init_clock_gating
= crestline_init_clock_gating
;
6252 else if (IS_BROADWATER(dev
))
6253 dev_priv
->display
.init_clock_gating
= broadwater_init_clock_gating
;
6254 } else if (IS_GEN3(dev
)) {
6255 dev_priv
->display
.update_wm
= i9xx_update_wm
;
6256 dev_priv
->display
.get_fifo_size
= i9xx_get_fifo_size
;
6257 dev_priv
->display
.init_clock_gating
= gen3_init_clock_gating
;
6258 } else if (IS_GEN2(dev
)) {
6259 if (INTEL_INFO(dev
)->num_pipes
== 1) {
6260 dev_priv
->display
.update_wm
= i845_update_wm
;
6261 dev_priv
->display
.get_fifo_size
= i845_get_fifo_size
;
6263 dev_priv
->display
.update_wm
= i9xx_update_wm
;
6264 dev_priv
->display
.get_fifo_size
= i830_get_fifo_size
;
6267 if (IS_I85X(dev
) || IS_I865G(dev
))
6268 dev_priv
->display
.init_clock_gating
= i85x_init_clock_gating
;
6270 dev_priv
->display
.init_clock_gating
= i830_init_clock_gating
;
6272 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
6276 int sandybridge_pcode_read(struct drm_i915_private
*dev_priv
, u8 mbox
, u32
*val
)
6278 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
6280 if (I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) {
6281 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
6285 I915_WRITE(GEN6_PCODE_DATA
, *val
);
6286 I915_WRITE(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
| mbox
);
6288 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) == 0,
6290 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox
);
6294 *val
= I915_READ(GEN6_PCODE_DATA
);
6295 I915_WRITE(GEN6_PCODE_DATA
, 0);
6300 int sandybridge_pcode_write(struct drm_i915_private
*dev_priv
, u8 mbox
, u32 val
)
6302 WARN_ON(!mutex_is_locked(&dev_priv
->rps
.hw_lock
));
6304 if (I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) {
6305 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
6309 I915_WRITE(GEN6_PCODE_DATA
, val
);
6310 I915_WRITE(GEN6_PCODE_MAILBOX
, GEN6_PCODE_READY
| mbox
);
6312 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX
) & GEN6_PCODE_READY
) == 0,
6314 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox
);
6318 I915_WRITE(GEN6_PCODE_DATA
, 0);
6323 int vlv_gpu_freq(struct drm_i915_private
*dev_priv
, int val
)
6328 switch (dev_priv
->mem_freq
) {
6342 return DIV_ROUND_CLOSEST(dev_priv
->mem_freq
* (val
+ 6 - 0xbd), 4 * div
);
6345 int vlv_freq_opcode(struct drm_i915_private
*dev_priv
, int val
)
6350 switch (dev_priv
->mem_freq
) {
6364 return DIV_ROUND_CLOSEST(4 * mul
* val
, dev_priv
->mem_freq
) + 0xbd - 6;
6367 void intel_pm_setup(struct drm_device
*dev
)
6369 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
6371 mutex_init(&dev_priv
->rps
.hw_lock
);
6373 INIT_DELAYED_WORK(&dev_priv
->rps
.delayed_resume_work
,
6374 intel_gen6_powersave_work
);
6376 dev_priv
->pm
.suspended
= false;
6377 dev_priv
->pm
.irqs_disabled
= false;