1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <drm/drm_crtc_helper.h>
33 #include <drm/drm_fb_helper.h>
34 #include <drm/drm_legacy.h>
35 #include "intel_drv.h"
36 #include <drm/i915_drm.h>
38 #include "i915_vgpu.h"
39 #include "i915_trace.h"
40 #include <linux/pci.h>
41 #include <linux/console.h>
43 #include <linux/vgaarb.h>
44 #include <linux/acpi.h>
45 #include <linux/pnp.h>
46 #include <linux/vga_switcheroo.h>
47 #include <linux/slab.h>
48 #include <acpi/video.h>
50 #include <linux/pm_runtime.h>
51 #include <linux/oom.h>
54 static int i915_getparam(struct drm_device
*dev
, void *data
,
55 struct drm_file
*file_priv
)
57 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
58 drm_i915_getparam_t
*param
= data
;
61 switch (param
->param
) {
62 case I915_PARAM_IRQ_ACTIVE
:
63 case I915_PARAM_ALLOW_BATCHBUFFER
:
64 case I915_PARAM_LAST_DISPATCH
:
65 /* Reject all old ums/dri params. */
67 case I915_PARAM_CHIPSET_ID
:
68 value
= dev
->pdev
->device
;
70 case I915_PARAM_REVISION
:
71 value
= dev
->pdev
->revision
;
73 case I915_PARAM_HAS_GEM
:
76 case I915_PARAM_NUM_FENCES_AVAIL
:
77 value
= dev_priv
->num_fence_regs
;
79 case I915_PARAM_HAS_OVERLAY
:
80 value
= dev_priv
->overlay
? 1 : 0;
82 case I915_PARAM_HAS_PAGEFLIPPING
:
85 case I915_PARAM_HAS_EXECBUF2
:
89 case I915_PARAM_HAS_BSD
:
90 value
= intel_ring_initialized(&dev_priv
->ring
[VCS
]);
92 case I915_PARAM_HAS_BLT
:
93 value
= intel_ring_initialized(&dev_priv
->ring
[BCS
]);
95 case I915_PARAM_HAS_VEBOX
:
96 value
= intel_ring_initialized(&dev_priv
->ring
[VECS
]);
98 case I915_PARAM_HAS_BSD2
:
99 value
= intel_ring_initialized(&dev_priv
->ring
[VCS2
]);
101 case I915_PARAM_HAS_RELAXED_FENCING
:
104 case I915_PARAM_HAS_COHERENT_RINGS
:
107 case I915_PARAM_HAS_EXEC_CONSTANTS
:
108 value
= INTEL_INFO(dev
)->gen
>= 4;
110 case I915_PARAM_HAS_RELAXED_DELTA
:
113 case I915_PARAM_HAS_GEN7_SOL_RESET
:
116 case I915_PARAM_HAS_LLC
:
117 value
= HAS_LLC(dev
);
119 case I915_PARAM_HAS_WT
:
122 case I915_PARAM_HAS_ALIASING_PPGTT
:
123 value
= USES_PPGTT(dev
);
125 case I915_PARAM_HAS_WAIT_TIMEOUT
:
128 case I915_PARAM_HAS_SEMAPHORES
:
129 value
= i915_semaphore_is_enabled(dev
);
131 case I915_PARAM_HAS_PRIME_VMAP_FLUSH
:
134 case I915_PARAM_HAS_SECURE_BATCHES
:
135 value
= capable(CAP_SYS_ADMIN
);
137 case I915_PARAM_HAS_PINNED_BATCHES
:
140 case I915_PARAM_HAS_EXEC_NO_RELOC
:
143 case I915_PARAM_HAS_EXEC_HANDLE_LUT
:
146 case I915_PARAM_CMD_PARSER_VERSION
:
147 value
= i915_cmd_parser_get_version();
149 case I915_PARAM_HAS_COHERENT_PHYS_GTT
:
152 case I915_PARAM_MMAP_VERSION
:
155 case I915_PARAM_SUBSLICE_TOTAL
:
156 value
= INTEL_INFO(dev
)->subslice_total
;
160 case I915_PARAM_EU_TOTAL
:
161 value
= INTEL_INFO(dev
)->eu_total
;
165 case I915_PARAM_HAS_GPU_RESET
:
166 value
= i915
.enable_hangcheck
&&
167 intel_has_gpu_reset(dev
);
169 case I915_PARAM_HAS_RESOURCE_STREAMER
:
170 value
= HAS_RESOURCE_STREAMER(dev
);
172 case I915_PARAM_HAS_EXEC_SOFTPIN
:
176 DRM_DEBUG("Unknown parameter %d\n", param
->param
);
180 if (copy_to_user(param
->value
, &value
, sizeof(int))) {
181 DRM_ERROR("copy_to_user failed\n");
188 static int i915_get_bridge_dev(struct drm_device
*dev
)
190 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
192 dev_priv
->bridge_dev
= pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
193 if (!dev_priv
->bridge_dev
) {
194 DRM_ERROR("bridge device not found\n");
200 #define MCHBAR_I915 0x44
201 #define MCHBAR_I965 0x48
202 #define MCHBAR_SIZE (4*4096)
204 #define DEVEN_REG 0x54
205 #define DEVEN_MCHBAR_EN (1 << 28)
207 /* Allocate space for the MCH regs if needed, return nonzero on error */
209 intel_alloc_mchbar_resource(struct drm_device
*dev
)
211 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
212 int reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
213 u32 temp_lo
, temp_hi
= 0;
217 if (INTEL_INFO(dev
)->gen
>= 4)
218 pci_read_config_dword(dev_priv
->bridge_dev
, reg
+ 4, &temp_hi
);
219 pci_read_config_dword(dev_priv
->bridge_dev
, reg
, &temp_lo
);
220 mchbar_addr
= ((u64
)temp_hi
<< 32) | temp_lo
;
222 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
225 pnp_range_reserved(mchbar_addr
, mchbar_addr
+ MCHBAR_SIZE
))
229 /* Get some space for it */
230 dev_priv
->mch_res
.name
= "i915 MCHBAR";
231 dev_priv
->mch_res
.flags
= IORESOURCE_MEM
;
232 ret
= pci_bus_alloc_resource(dev_priv
->bridge_dev
->bus
,
234 MCHBAR_SIZE
, MCHBAR_SIZE
,
236 0, pcibios_align_resource
,
237 dev_priv
->bridge_dev
);
239 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret
);
240 dev_priv
->mch_res
.start
= 0;
244 if (INTEL_INFO(dev
)->gen
>= 4)
245 pci_write_config_dword(dev_priv
->bridge_dev
, reg
+ 4,
246 upper_32_bits(dev_priv
->mch_res
.start
));
248 pci_write_config_dword(dev_priv
->bridge_dev
, reg
,
249 lower_32_bits(dev_priv
->mch_res
.start
));
253 /* Setup MCHBAR if possible, return true if we should disable it again */
255 intel_setup_mchbar(struct drm_device
*dev
)
257 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
258 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
262 if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
))
265 dev_priv
->mchbar_need_disable
= false;
267 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
268 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
269 enabled
= !!(temp
& DEVEN_MCHBAR_EN
);
271 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
275 /* If it's already enabled, don't have to do anything */
279 if (intel_alloc_mchbar_resource(dev
))
282 dev_priv
->mchbar_need_disable
= true;
284 /* Space is allocated or reserved, so enable it. */
285 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
286 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
,
287 temp
| DEVEN_MCHBAR_EN
);
289 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
290 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
| 1);
295 intel_teardown_mchbar(struct drm_device
*dev
)
297 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
298 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
301 if (dev_priv
->mchbar_need_disable
) {
302 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
303 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
304 temp
&= ~DEVEN_MCHBAR_EN
;
305 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, temp
);
307 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
309 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
);
313 if (dev_priv
->mch_res
.start
)
314 release_resource(&dev_priv
->mch_res
);
317 /* true = enable decode, false = disable decoder */
318 static unsigned int i915_vga_set_decode(void *cookie
, bool state
)
320 struct drm_device
*dev
= cookie
;
322 intel_modeset_vga_set_state(dev
, state
);
324 return VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
|
325 VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
327 return VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
330 static void i915_switcheroo_set_state(struct pci_dev
*pdev
, enum vga_switcheroo_state state
)
332 struct drm_device
*dev
= pci_get_drvdata(pdev
);
333 pm_message_t pmm
= { .event
= PM_EVENT_SUSPEND
};
335 if (state
== VGA_SWITCHEROO_ON
) {
336 pr_info("switched on\n");
337 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
338 /* i915 resume handler doesn't set to D0 */
339 pci_set_power_state(dev
->pdev
, PCI_D0
);
340 i915_resume_switcheroo(dev
);
341 dev
->switch_power_state
= DRM_SWITCH_POWER_ON
;
343 pr_info("switched off\n");
344 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
345 i915_suspend_switcheroo(dev
, pmm
);
346 dev
->switch_power_state
= DRM_SWITCH_POWER_OFF
;
350 static bool i915_switcheroo_can_switch(struct pci_dev
*pdev
)
352 struct drm_device
*dev
= pci_get_drvdata(pdev
);
355 * FIXME: open_count is protected by drm_global_mutex but that would lead to
356 * locking inversion with the driver load path. And the access here is
357 * completely racy anyway. So don't bother with locking for now.
359 return dev
->open_count
== 0;
362 static const struct vga_switcheroo_client_ops i915_switcheroo_ops
= {
363 .set_gpu_state
= i915_switcheroo_set_state
,
365 .can_switch
= i915_switcheroo_can_switch
,
368 static int i915_load_modeset_init(struct drm_device
*dev
)
370 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
373 ret
= intel_bios_init(dev_priv
);
375 DRM_INFO("failed to find VBIOS tables\n");
377 /* If we have > 1 VGA cards, then we need to arbitrate access
378 * to the common VGA resources.
380 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
381 * then we do not take part in VGA arbitration and the
382 * vga_client_register() fails with -ENODEV.
384 ret
= vga_client_register(dev
->pdev
, dev
, NULL
, i915_vga_set_decode
);
385 if (ret
&& ret
!= -ENODEV
)
388 intel_register_dsm_handler();
390 ret
= vga_switcheroo_register_client(dev
->pdev
, &i915_switcheroo_ops
, false);
392 goto cleanup_vga_client
;
394 intel_power_domains_init_hw(dev_priv
, false);
396 intel_csr_ucode_init(dev_priv
);
398 ret
= intel_irq_install(dev_priv
);
402 intel_setup_gmbus(dev
);
404 /* Important: The output setup functions called by modeset_init need
405 * working irqs for e.g. gmbus and dp aux transfers. */
406 intel_modeset_init(dev
);
408 intel_guc_ucode_init(dev
);
410 ret
= i915_gem_init(dev
);
414 intel_modeset_gem_init(dev
);
416 /* Always safe in the mode setting case. */
417 /* FIXME: do pre/post-mode set stuff in core KMS code */
418 dev
->vblank_disable_allowed
= true;
419 if (INTEL_INFO(dev
)->num_pipes
== 0)
422 ret
= intel_fbdev_init(dev
);
426 /* Only enable hotplug handling once the fbdev is fully set up. */
427 intel_hpd_init(dev_priv
);
430 * Some ports require correctly set-up hpd registers for detection to
431 * work properly (leading to ghost connected connector status), e.g. VGA
432 * on gm45. Hence we can only set up the initial fbdev config after hpd
433 * irqs are fully enabled. Now we should scan for the initial config
434 * only once hotplug handling is enabled, but due to screwed-up locking
435 * around kms/fbdev init we can't protect the fdbev initial config
436 * scanning against hotplug events. Hence do this first and ignore the
437 * tiny window where we will loose hotplug notifactions.
439 intel_fbdev_initial_config_async(dev
);
441 drm_kms_helper_poll_init(dev
);
446 mutex_lock(&dev
->struct_mutex
);
447 i915_gem_cleanup_ringbuffer(dev
);
448 i915_gem_context_fini(dev
);
449 mutex_unlock(&dev
->struct_mutex
);
451 intel_guc_ucode_fini(dev
);
452 drm_irq_uninstall(dev
);
453 intel_teardown_gmbus(dev
);
455 intel_csr_ucode_fini(dev_priv
);
456 vga_switcheroo_unregister_client(dev
->pdev
);
458 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
463 #if IS_ENABLED(CONFIG_FB)
464 static int i915_kick_out_firmware_fb(struct drm_i915_private
*dev_priv
)
466 struct apertures_struct
*ap
;
467 struct pci_dev
*pdev
= dev_priv
->dev
->pdev
;
471 ap
= alloc_apertures(1);
475 ap
->ranges
[0].base
= dev_priv
->gtt
.mappable_base
;
476 ap
->ranges
[0].size
= dev_priv
->gtt
.mappable_end
;
479 pdev
->resource
[PCI_ROM_RESOURCE
].flags
& IORESOURCE_ROM_SHADOW
;
481 ret
= remove_conflicting_framebuffers(ap
, "inteldrmfb", primary
);
488 static int i915_kick_out_firmware_fb(struct drm_i915_private
*dev_priv
)
494 #if !defined(CONFIG_VGA_CONSOLE)
495 static int i915_kick_out_vgacon(struct drm_i915_private
*dev_priv
)
499 #elif !defined(CONFIG_DUMMY_CONSOLE)
500 static int i915_kick_out_vgacon(struct drm_i915_private
*dev_priv
)
505 static int i915_kick_out_vgacon(struct drm_i915_private
*dev_priv
)
509 DRM_INFO("Replacing VGA console driver\n");
512 if (con_is_bound(&vga_con
))
513 ret
= do_take_over_console(&dummy_con
, 0, MAX_NR_CONSOLES
- 1, 1);
515 ret
= do_unregister_con_driver(&vga_con
);
517 /* Ignore "already unregistered". */
527 static void i915_dump_device_info(struct drm_i915_private
*dev_priv
)
529 const struct intel_device_info
*info
= &dev_priv
->info
;
531 #define PRINT_S(name) "%s"
533 #define PRINT_FLAG(name) info->name ? #name "," : ""
535 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
536 DEV_INFO_FOR_EACH_FLAG(PRINT_S
, SEP_EMPTY
),
538 dev_priv
->dev
->pdev
->device
,
539 dev_priv
->dev
->pdev
->revision
,
540 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
, SEP_COMMA
));
547 static void cherryview_sseu_info_init(struct drm_device
*dev
)
549 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
550 struct intel_device_info
*info
;
553 info
= (struct intel_device_info
*)&dev_priv
->info
;
554 fuse
= I915_READ(CHV_FUSE_GT
);
556 info
->slice_total
= 1;
558 if (!(fuse
& CHV_FGT_DISABLE_SS0
)) {
559 info
->subslice_per_slice
++;
560 eu_dis
= fuse
& (CHV_FGT_EU_DIS_SS0_R0_MASK
|
561 CHV_FGT_EU_DIS_SS0_R1_MASK
);
562 info
->eu_total
+= 8 - hweight32(eu_dis
);
565 if (!(fuse
& CHV_FGT_DISABLE_SS1
)) {
566 info
->subslice_per_slice
++;
567 eu_dis
= fuse
& (CHV_FGT_EU_DIS_SS1_R0_MASK
|
568 CHV_FGT_EU_DIS_SS1_R1_MASK
);
569 info
->eu_total
+= 8 - hweight32(eu_dis
);
572 info
->subslice_total
= info
->subslice_per_slice
;
574 * CHV expected to always have a uniform distribution of EU
577 info
->eu_per_subslice
= info
->subslice_total
?
578 info
->eu_total
/ info
->subslice_total
:
581 * CHV supports subslice power gating on devices with more than
582 * one subslice, and supports EU power gating on devices with
583 * more than one EU pair per subslice.
585 info
->has_slice_pg
= 0;
586 info
->has_subslice_pg
= (info
->subslice_total
> 1);
587 info
->has_eu_pg
= (info
->eu_per_subslice
> 2);
590 static void gen9_sseu_info_init(struct drm_device
*dev
)
592 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
593 struct intel_device_info
*info
;
594 int s_max
= 3, ss_max
= 4, eu_max
= 8;
596 u32 fuse2
, s_enable
, ss_disable
, eu_disable
;
599 info
= (struct intel_device_info
*)&dev_priv
->info
;
600 fuse2
= I915_READ(GEN8_FUSE2
);
601 s_enable
= (fuse2
& GEN8_F2_S_ENA_MASK
) >>
603 ss_disable
= (fuse2
& GEN9_F2_SS_DIS_MASK
) >>
604 GEN9_F2_SS_DIS_SHIFT
;
606 info
->slice_total
= hweight32(s_enable
);
608 * The subslice disable field is global, i.e. it applies
609 * to each of the enabled slices.
611 info
->subslice_per_slice
= ss_max
- hweight32(ss_disable
);
612 info
->subslice_total
= info
->slice_total
*
613 info
->subslice_per_slice
;
616 * Iterate through enabled slices and subslices to
617 * count the total enabled EU.
619 for (s
= 0; s
< s_max
; s
++) {
620 if (!(s_enable
& (0x1 << s
)))
621 /* skip disabled slice */
624 eu_disable
= I915_READ(GEN9_EU_DISABLE(s
));
625 for (ss
= 0; ss
< ss_max
; ss
++) {
628 if (ss_disable
& (0x1 << ss
))
629 /* skip disabled subslice */
632 eu_per_ss
= eu_max
- hweight8((eu_disable
>> (ss
*8)) &
636 * Record which subslice(s) has(have) 7 EUs. we
637 * can tune the hash used to spread work among
638 * subslices if they are unbalanced.
641 info
->subslice_7eu
[s
] |= 1 << ss
;
643 info
->eu_total
+= eu_per_ss
;
648 * SKL is expected to always have a uniform distribution
649 * of EU across subslices with the exception that any one
650 * EU in any one subslice may be fused off for die
651 * recovery. BXT is expected to be perfectly uniform in EU
654 info
->eu_per_subslice
= info
->subslice_total
?
655 DIV_ROUND_UP(info
->eu_total
,
656 info
->subslice_total
) : 0;
658 * SKL supports slice power gating on devices with more than
659 * one slice, and supports EU power gating on devices with
660 * more than one EU pair per subslice. BXT supports subslice
661 * power gating on devices with more than one subslice, and
662 * supports EU power gating on devices with more than one EU
665 info
->has_slice_pg
= ((IS_SKYLAKE(dev
) || IS_KABYLAKE(dev
)) &&
666 (info
->slice_total
> 1));
667 info
->has_subslice_pg
= (IS_BROXTON(dev
) && (info
->subslice_total
> 1));
668 info
->has_eu_pg
= (info
->eu_per_subslice
> 2);
671 static void broadwell_sseu_info_init(struct drm_device
*dev
)
673 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
674 struct intel_device_info
*info
;
675 const int s_max
= 3, ss_max
= 3, eu_max
= 8;
677 u32 fuse2
, eu_disable
[s_max
], s_enable
, ss_disable
;
679 fuse2
= I915_READ(GEN8_FUSE2
);
680 s_enable
= (fuse2
& GEN8_F2_S_ENA_MASK
) >> GEN8_F2_S_ENA_SHIFT
;
681 ss_disable
= (fuse2
& GEN8_F2_SS_DIS_MASK
) >> GEN8_F2_SS_DIS_SHIFT
;
683 eu_disable
[0] = I915_READ(GEN8_EU_DISABLE0
) & GEN8_EU_DIS0_S0_MASK
;
684 eu_disable
[1] = (I915_READ(GEN8_EU_DISABLE0
) >> GEN8_EU_DIS0_S1_SHIFT
) |
685 ((I915_READ(GEN8_EU_DISABLE1
) & GEN8_EU_DIS1_S1_MASK
) <<
686 (32 - GEN8_EU_DIS0_S1_SHIFT
));
687 eu_disable
[2] = (I915_READ(GEN8_EU_DISABLE1
) >> GEN8_EU_DIS1_S2_SHIFT
) |
688 ((I915_READ(GEN8_EU_DISABLE2
) & GEN8_EU_DIS2_S2_MASK
) <<
689 (32 - GEN8_EU_DIS1_S2_SHIFT
));
692 info
= (struct intel_device_info
*)&dev_priv
->info
;
693 info
->slice_total
= hweight32(s_enable
);
696 * The subslice disable field is global, i.e. it applies
697 * to each of the enabled slices.
699 info
->subslice_per_slice
= ss_max
- hweight32(ss_disable
);
700 info
->subslice_total
= info
->slice_total
* info
->subslice_per_slice
;
703 * Iterate through enabled slices and subslices to
704 * count the total enabled EU.
706 for (s
= 0; s
< s_max
; s
++) {
707 if (!(s_enable
& (0x1 << s
)))
708 /* skip disabled slice */
711 for (ss
= 0; ss
< ss_max
; ss
++) {
714 if (ss_disable
& (0x1 << ss
))
715 /* skip disabled subslice */
718 n_disabled
= hweight8(eu_disable
[s
] >> (ss
* eu_max
));
721 * Record which subslices have 7 EUs.
723 if (eu_max
- n_disabled
== 7)
724 info
->subslice_7eu
[s
] |= 1 << ss
;
726 info
->eu_total
+= eu_max
- n_disabled
;
731 * BDW is expected to always have a uniform distribution of EU across
732 * subslices with the exception that any one EU in any one subslice may
733 * be fused off for die recovery.
735 info
->eu_per_subslice
= info
->subslice_total
?
736 DIV_ROUND_UP(info
->eu_total
, info
->subslice_total
) : 0;
739 * BDW supports slice power gating on devices with more than
742 info
->has_slice_pg
= (info
->slice_total
> 1);
743 info
->has_subslice_pg
= 0;
748 * Determine various intel_device_info fields at runtime.
750 * Use it when either:
751 * - it's judged too laborious to fill n static structures with the limit
752 * when a simple if statement does the job,
753 * - run-time checks (eg read fuse/strap registers) are needed.
755 * This function needs to be called:
756 * - after the MMIO has been setup as we are reading registers,
757 * - after the PCH has been detected,
758 * - before the first usage of the fields it can tweak.
760 static void intel_device_info_runtime_init(struct drm_device
*dev
)
762 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
763 struct intel_device_info
*info
;
766 info
= (struct intel_device_info
*)&dev_priv
->info
;
769 * Skylake and Broxton currently don't expose the topmost plane as its
770 * use is exclusive with the legacy cursor and we only want to expose
771 * one of those, not both. Until we can safely expose the topmost plane
772 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
773 * we don't expose the topmost plane at all to prevent ABI breakage
776 if (IS_BROXTON(dev
)) {
777 info
->num_sprites
[PIPE_A
] = 2;
778 info
->num_sprites
[PIPE_B
] = 2;
779 info
->num_sprites
[PIPE_C
] = 1;
780 } else if (IS_VALLEYVIEW(dev
) || IS_CHERRYVIEW(dev
))
781 for_each_pipe(dev_priv
, pipe
)
782 info
->num_sprites
[pipe
] = 2;
784 for_each_pipe(dev_priv
, pipe
)
785 info
->num_sprites
[pipe
] = 1;
787 if (i915
.disable_display
) {
788 DRM_INFO("Display disabled (module parameter)\n");
790 } else if (info
->num_pipes
> 0 &&
791 (INTEL_INFO(dev
)->gen
== 7 || INTEL_INFO(dev
)->gen
== 8) &&
792 HAS_PCH_SPLIT(dev
)) {
793 u32 fuse_strap
= I915_READ(FUSE_STRAP
);
794 u32 sfuse_strap
= I915_READ(SFUSE_STRAP
);
797 * SFUSE_STRAP is supposed to have a bit signalling the display
798 * is fused off. Unfortunately it seems that, at least in
799 * certain cases, fused off display means that PCH display
800 * reads don't land anywhere. In that case, we read 0s.
802 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
803 * should be set when taking over after the firmware.
805 if (fuse_strap
& ILK_INTERNAL_DISPLAY_DISABLE
||
806 sfuse_strap
& SFUSE_STRAP_DISPLAY_DISABLED
||
807 (dev_priv
->pch_type
== PCH_CPT
&&
808 !(sfuse_strap
& SFUSE_STRAP_FUSE_LOCK
))) {
809 DRM_INFO("Display fused off, disabling\n");
811 } else if (fuse_strap
& IVB_PIPE_C_DISABLE
) {
812 DRM_INFO("PipeC fused off\n");
813 info
->num_pipes
-= 1;
815 } else if (info
->num_pipes
> 0 && INTEL_INFO(dev
)->gen
== 9) {
816 u32 dfsm
= I915_READ(SKL_DFSM
);
817 u8 disabled_mask
= 0;
821 if (dfsm
& SKL_DFSM_PIPE_A_DISABLE
)
822 disabled_mask
|= BIT(PIPE_A
);
823 if (dfsm
& SKL_DFSM_PIPE_B_DISABLE
)
824 disabled_mask
|= BIT(PIPE_B
);
825 if (dfsm
& SKL_DFSM_PIPE_C_DISABLE
)
826 disabled_mask
|= BIT(PIPE_C
);
828 num_bits
= hweight8(disabled_mask
);
830 switch (disabled_mask
) {
833 case BIT(PIPE_A
) | BIT(PIPE_B
):
834 case BIT(PIPE_A
) | BIT(PIPE_C
):
841 if (num_bits
> info
->num_pipes
|| invalid
)
842 DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
845 info
->num_pipes
-= num_bits
;
848 /* Initialize slice/subslice/EU info */
849 if (IS_CHERRYVIEW(dev
))
850 cherryview_sseu_info_init(dev
);
851 else if (IS_BROADWELL(dev
))
852 broadwell_sseu_info_init(dev
);
853 else if (INTEL_INFO(dev
)->gen
>= 9)
854 gen9_sseu_info_init(dev
);
856 DRM_DEBUG_DRIVER("slice total: %u\n", info
->slice_total
);
857 DRM_DEBUG_DRIVER("subslice total: %u\n", info
->subslice_total
);
858 DRM_DEBUG_DRIVER("subslice per slice: %u\n", info
->subslice_per_slice
);
859 DRM_DEBUG_DRIVER("EU total: %u\n", info
->eu_total
);
860 DRM_DEBUG_DRIVER("EU per subslice: %u\n", info
->eu_per_subslice
);
861 DRM_DEBUG_DRIVER("has slice power gating: %s\n",
862 info
->has_slice_pg
? "y" : "n");
863 DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
864 info
->has_subslice_pg
? "y" : "n");
865 DRM_DEBUG_DRIVER("has EU power gating: %s\n",
866 info
->has_eu_pg
? "y" : "n");
869 static void intel_init_dpio(struct drm_i915_private
*dev_priv
)
872 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
873 * CHV x1 PHY (DP/HDMI D)
874 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
876 if (IS_CHERRYVIEW(dev_priv
)) {
877 DPIO_PHY_IOSF_PORT(DPIO_PHY0
) = IOSF_PORT_DPIO_2
;
878 DPIO_PHY_IOSF_PORT(DPIO_PHY1
) = IOSF_PORT_DPIO
;
879 } else if (IS_VALLEYVIEW(dev_priv
)) {
880 DPIO_PHY_IOSF_PORT(DPIO_PHY0
) = IOSF_PORT_DPIO
;
884 static int i915_workqueues_init(struct drm_i915_private
*dev_priv
)
887 * The i915 workqueue is primarily used for batched retirement of
888 * requests (and thus managing bo) once the task has been completed
889 * by the GPU. i915_gem_retire_requests() is called directly when we
890 * need high-priority retirement, such as waiting for an explicit
893 * It is also used for periodic low-priority events, such as
894 * idle-timers and recording error state.
896 * All tasks on the workqueue are expected to acquire the dev mutex
897 * so there is no point in running more than one instance of the
898 * workqueue at any time. Use an ordered one.
900 dev_priv
->wq
= alloc_ordered_workqueue("i915", 0);
901 if (dev_priv
->wq
== NULL
)
904 dev_priv
->hotplug
.dp_wq
= alloc_ordered_workqueue("i915-dp", 0);
905 if (dev_priv
->hotplug
.dp_wq
== NULL
)
908 dev_priv
->gpu_error
.hangcheck_wq
=
909 alloc_ordered_workqueue("i915-hangcheck", 0);
910 if (dev_priv
->gpu_error
.hangcheck_wq
== NULL
)
916 destroy_workqueue(dev_priv
->hotplug
.dp_wq
);
918 destroy_workqueue(dev_priv
->wq
);
920 DRM_ERROR("Failed to allocate workqueues.\n");
925 static void i915_workqueues_cleanup(struct drm_i915_private
*dev_priv
)
927 destroy_workqueue(dev_priv
->gpu_error
.hangcheck_wq
);
928 destroy_workqueue(dev_priv
->hotplug
.dp_wq
);
929 destroy_workqueue(dev_priv
->wq
);
932 static int i915_mmio_setup(struct drm_device
*dev
)
934 struct drm_i915_private
*dev_priv
= to_i915(dev
);
938 mmio_bar
= IS_GEN2(dev
) ? 1 : 0;
940 * Before gen4, the registers and the GTT are behind different BARs.
941 * However, from gen4 onwards, the registers and the GTT are shared
942 * in the same BAR, so we want to restrict this ioremap from
943 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
944 * the register BAR remains the same size for all the earlier
945 * generations up to Ironlake.
947 if (INTEL_INFO(dev
)->gen
< 5)
948 mmio_size
= 512 * 1024;
950 mmio_size
= 2 * 1024 * 1024;
951 dev_priv
->regs
= pci_iomap(dev
->pdev
, mmio_bar
, mmio_size
);
952 if (dev_priv
->regs
== NULL
) {
953 DRM_ERROR("failed to map registers\n");
958 /* Try to make sure MCHBAR is enabled before poking at it */
959 intel_setup_mchbar(dev
);
964 static void i915_mmio_cleanup(struct drm_device
*dev
)
966 struct drm_i915_private
*dev_priv
= to_i915(dev
);
968 intel_teardown_mchbar(dev
);
969 pci_iounmap(dev
->pdev
, dev_priv
->regs
);
973 * i915_driver_load - setup chip and create an initial config
975 * @flags: startup flags
977 * The driver load routine has to do several things:
978 * - drive output discovery via intel_modeset_init()
979 * - initialize the memory manager
980 * - allocate initial config memory
981 * - setup the DRM framebuffer with the allocated memory
983 int i915_driver_load(struct drm_device
*dev
, unsigned long flags
)
985 struct drm_i915_private
*dev_priv
;
986 struct intel_device_info
*info
, *device_info
;
988 uint32_t aperture_size
;
990 info
= (struct intel_device_info
*) flags
;
992 dev_priv
= kzalloc(sizeof(*dev_priv
), GFP_KERNEL
);
993 if (dev_priv
== NULL
)
996 dev
->dev_private
= dev_priv
;
999 /* Setup the write-once "constant" device info */
1000 device_info
= (struct intel_device_info
*)&dev_priv
->info
;
1001 memcpy(device_info
, info
, sizeof(dev_priv
->info
));
1002 device_info
->device_id
= dev
->pdev
->device
;
1004 spin_lock_init(&dev_priv
->irq_lock
);
1005 spin_lock_init(&dev_priv
->gpu_error
.lock
);
1006 mutex_init(&dev_priv
->backlight_lock
);
1007 spin_lock_init(&dev_priv
->uncore
.lock
);
1008 spin_lock_init(&dev_priv
->mm
.object_stat_lock
);
1009 spin_lock_init(&dev_priv
->mmio_flip_lock
);
1010 mutex_init(&dev_priv
->sb_lock
);
1011 mutex_init(&dev_priv
->modeset_restore_lock
);
1012 mutex_init(&dev_priv
->av_mutex
);
1014 ret
= i915_workqueues_init(dev_priv
);
1018 intel_pm_setup(dev
);
1020 intel_runtime_pm_get(dev_priv
);
1022 intel_display_crc_init(dev
);
1024 i915_dump_device_info(dev_priv
);
1026 /* Not all pre-production machines fall into this category, only the
1027 * very first ones. Almost everything should work, except for maybe
1028 * suspend/resume. And we don't implement workarounds that affect only
1029 * pre-production machines. */
1030 if (IS_HSW_EARLY_SDV(dev
))
1031 DRM_INFO("This is an early pre-production Haswell machine. "
1032 "It may not be fully functional.\n");
1034 if (i915_get_bridge_dev(dev
)) {
1036 goto out_runtime_pm_put
;
1039 ret
= i915_mmio_setup(dev
);
1043 /* This must be called before any calls to HAS_PCH_* */
1044 intel_detect_pch(dev
);
1046 intel_uncore_init(dev
);
1048 ret
= i915_gem_gtt_init(dev
);
1050 goto out_uncore_fini
;
1052 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
1053 * otherwise the vga fbdev driver falls over. */
1054 ret
= i915_kick_out_firmware_fb(dev_priv
);
1056 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1060 ret
= i915_kick_out_vgacon(dev_priv
);
1062 DRM_ERROR("failed to remove conflicting VGA console\n");
1066 pci_set_master(dev
->pdev
);
1068 /* overlay on gen2 is broken and can't address above 1G */
1070 dma_set_coherent_mask(&dev
->pdev
->dev
, DMA_BIT_MASK(30));
1072 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1073 * using 32bit addressing, overwriting memory if HWS is located
1076 * The documentation also mentions an issue with undefined
1077 * behaviour if any general state is accessed within a page above 4GB,
1078 * which also needs to be handled carefully.
1080 if (IS_BROADWATER(dev
) || IS_CRESTLINE(dev
))
1081 dma_set_coherent_mask(&dev
->pdev
->dev
, DMA_BIT_MASK(32));
1083 aperture_size
= dev_priv
->gtt
.mappable_end
;
1085 dev_priv
->gtt
.mappable
=
1086 io_mapping_create_wc(dev_priv
->gtt
.mappable_base
,
1088 if (dev_priv
->gtt
.mappable
== NULL
) {
1093 dev_priv
->gtt
.mtrr
= arch_phys_wc_add(dev_priv
->gtt
.mappable_base
,
1096 intel_irq_init(dev_priv
);
1097 intel_uncore_sanitize(dev
);
1099 intel_opregion_setup(dev
);
1101 i915_gem_load_init(dev
);
1102 i915_gem_shrinker_init(dev_priv
);
1104 /* On the 945G/GM, the chipset reports the MSI capability on the
1105 * integrated graphics even though the support isn't actually there
1106 * according to the published specs. It doesn't appear to function
1107 * correctly in testing on 945G.
1108 * This may be a side effect of MSI having been made available for PEG
1109 * and the registers being closely associated.
1111 * According to chipset errata, on the 965GM, MSI interrupts may
1112 * be lost or delayed, but we use them anyways to avoid
1113 * stuck interrupts on some machines.
1115 if (!IS_I945G(dev
) && !IS_I945GM(dev
)) {
1116 if (pci_enable_msi(dev
->pdev
) < 0)
1117 DRM_DEBUG_DRIVER("can't enable MSI");
1120 intel_device_info_runtime_init(dev
);
1122 intel_init_dpio(dev_priv
);
1124 if (INTEL_INFO(dev
)->num_pipes
) {
1125 ret
= drm_vblank_init(dev
, INTEL_INFO(dev
)->num_pipes
);
1127 goto out_gem_unload
;
1130 intel_power_domains_init(dev_priv
);
1132 ret
= i915_load_modeset_init(dev
);
1134 DRM_ERROR("failed to init modeset\n");
1135 goto out_power_well
;
1139 * Notify a valid surface after modesetting,
1140 * when running inside a VM.
1142 if (intel_vgpu_active(dev
))
1143 I915_WRITE(vgtif_reg(display_ready
), VGT_DRV_DISPLAY_READY
);
1145 i915_setup_sysfs(dev
);
1147 if (INTEL_INFO(dev
)->num_pipes
) {
1148 /* Must be done after probing outputs */
1149 intel_opregion_init(dev
);
1150 acpi_video_register();
1154 intel_gpu_ips_init(dev_priv
);
1156 intel_runtime_pm_enable(dev_priv
);
1158 i915_audio_component_init(dev_priv
);
1160 intel_runtime_pm_put(dev_priv
);
1165 intel_power_domains_fini(dev_priv
);
1166 drm_vblank_cleanup(dev
);
1168 i915_gem_shrinker_cleanup(dev_priv
);
1170 if (dev
->pdev
->msi_enabled
)
1171 pci_disable_msi(dev
->pdev
);
1173 intel_teardown_mchbar(dev
);
1174 pm_qos_remove_request(&dev_priv
->pm_qos
);
1175 arch_phys_wc_del(dev_priv
->gtt
.mtrr
);
1176 io_mapping_free(dev_priv
->gtt
.mappable
);
1178 i915_global_gtt_cleanup(dev
);
1180 intel_uncore_fini(dev
);
1181 i915_mmio_cleanup(dev
);
1183 pci_dev_put(dev_priv
->bridge_dev
);
1184 i915_gem_load_cleanup(dev
);
1186 intel_runtime_pm_put(dev_priv
);
1187 i915_workqueues_cleanup(dev_priv
);
1194 int i915_driver_unload(struct drm_device
*dev
)
1196 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1199 intel_fbdev_fini(dev
);
1201 i915_audio_component_cleanup(dev_priv
);
1203 ret
= i915_gem_suspend(dev
);
1205 DRM_ERROR("failed to idle hardware: %d\n", ret
);
1209 intel_power_domains_fini(dev_priv
);
1211 intel_gpu_ips_teardown();
1213 i915_teardown_sysfs(dev
);
1215 i915_gem_shrinker_cleanup(dev_priv
);
1217 io_mapping_free(dev_priv
->gtt
.mappable
);
1218 arch_phys_wc_del(dev_priv
->gtt
.mtrr
);
1220 acpi_video_unregister();
1222 drm_vblank_cleanup(dev
);
1224 intel_modeset_cleanup(dev
);
1227 * free the memory space allocated for the child device
1228 * config parsed from VBT
1230 if (dev_priv
->vbt
.child_dev
&& dev_priv
->vbt
.child_dev_num
) {
1231 kfree(dev_priv
->vbt
.child_dev
);
1232 dev_priv
->vbt
.child_dev
= NULL
;
1233 dev_priv
->vbt
.child_dev_num
= 0;
1235 kfree(dev_priv
->vbt
.sdvo_lvds_vbt_mode
);
1236 dev_priv
->vbt
.sdvo_lvds_vbt_mode
= NULL
;
1237 kfree(dev_priv
->vbt
.lfp_lvds_vbt_mode
);
1238 dev_priv
->vbt
.lfp_lvds_vbt_mode
= NULL
;
1240 vga_switcheroo_unregister_client(dev
->pdev
);
1241 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
1243 intel_csr_ucode_fini(dev_priv
);
1245 /* Free error state after interrupts are fully disabled. */
1246 cancel_delayed_work_sync(&dev_priv
->gpu_error
.hangcheck_work
);
1247 i915_destroy_error_state(dev
);
1249 if (dev
->pdev
->msi_enabled
)
1250 pci_disable_msi(dev
->pdev
);
1252 intel_opregion_fini(dev
);
1254 /* Flush any outstanding unpin_work. */
1255 flush_workqueue(dev_priv
->wq
);
1257 intel_guc_ucode_fini(dev
);
1258 mutex_lock(&dev
->struct_mutex
);
1259 i915_gem_cleanup_ringbuffer(dev
);
1260 i915_gem_context_fini(dev
);
1261 mutex_unlock(&dev
->struct_mutex
);
1262 intel_fbc_cleanup_cfb(dev_priv
);
1264 pm_qos_remove_request(&dev_priv
->pm_qos
);
1266 i915_global_gtt_cleanup(dev
);
1268 intel_uncore_fini(dev
);
1269 i915_mmio_cleanup(dev
);
1271 i915_gem_load_cleanup(dev
);
1272 pci_dev_put(dev_priv
->bridge_dev
);
1273 i915_workqueues_cleanup(dev_priv
);
1279 int i915_driver_open(struct drm_device
*dev
, struct drm_file
*file
)
1283 ret
= i915_gem_open(dev
, file
);
1291 * i915_driver_lastclose - clean up after all DRM clients have exited
1294 * Take care of cleaning up after all DRM clients have exited. In the
1295 * mode setting case, we want to restore the kernel's initial mode (just
1296 * in case the last client left us in a bad state).
1298 * Additionally, in the non-mode setting case, we'll tear down the GTT
1299 * and DMA structures, since the kernel won't be using them, and clea
1302 void i915_driver_lastclose(struct drm_device
*dev
)
1304 intel_fbdev_restore_mode(dev
);
1305 vga_switcheroo_process_delayed_switch();
1308 void i915_driver_preclose(struct drm_device
*dev
, struct drm_file
*file
)
1310 mutex_lock(&dev
->struct_mutex
);
1311 i915_gem_context_close(dev
, file
);
1312 i915_gem_release(dev
, file
);
1313 mutex_unlock(&dev
->struct_mutex
);
1316 void i915_driver_postclose(struct drm_device
*dev
, struct drm_file
*file
)
1318 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
1324 i915_gem_reject_pin_ioctl(struct drm_device
*dev
, void *data
,
1325 struct drm_file
*file
)
1330 const struct drm_ioctl_desc i915_ioctls
[] = {
1331 DRM_IOCTL_DEF_DRV(I915_INIT
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1332 DRM_IOCTL_DEF_DRV(I915_FLUSH
, drm_noop
, DRM_AUTH
),
1333 DRM_IOCTL_DEF_DRV(I915_FLIP
, drm_noop
, DRM_AUTH
),
1334 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER
, drm_noop
, DRM_AUTH
),
1335 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT
, drm_noop
, DRM_AUTH
),
1336 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT
, drm_noop
, DRM_AUTH
),
1337 DRM_IOCTL_DEF_DRV(I915_GETPARAM
, i915_getparam
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1338 DRM_IOCTL_DEF_DRV(I915_SETPARAM
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1339 DRM_IOCTL_DEF_DRV(I915_ALLOC
, drm_noop
, DRM_AUTH
),
1340 DRM_IOCTL_DEF_DRV(I915_FREE
, drm_noop
, DRM_AUTH
),
1341 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1342 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER
, drm_noop
, DRM_AUTH
),
1343 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1344 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1345 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE
, drm_noop
, DRM_AUTH
),
1346 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP
, drm_noop
, DRM_AUTH
),
1347 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1348 DRM_IOCTL_DEF_DRV(I915_GEM_INIT
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1349 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER
, i915_gem_execbuffer
, DRM_AUTH
),
1350 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2
, i915_gem_execbuffer2
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1351 DRM_IOCTL_DEF_DRV(I915_GEM_PIN
, i915_gem_reject_pin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
),
1352 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN
, i915_gem_reject_pin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
),
1353 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY
, i915_gem_busy_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1354 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING
, i915_gem_set_caching_ioctl
, DRM_RENDER_ALLOW
),
1355 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING
, i915_gem_get_caching_ioctl
, DRM_RENDER_ALLOW
),
1356 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE
, i915_gem_throttle_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1357 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1358 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1359 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE
, i915_gem_create_ioctl
, DRM_RENDER_ALLOW
),
1360 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD
, i915_gem_pread_ioctl
, DRM_RENDER_ALLOW
),
1361 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE
, i915_gem_pwrite_ioctl
, DRM_RENDER_ALLOW
),
1362 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP
, i915_gem_mmap_ioctl
, DRM_RENDER_ALLOW
),
1363 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT
, i915_gem_mmap_gtt_ioctl
, DRM_RENDER_ALLOW
),
1364 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN
, i915_gem_set_domain_ioctl
, DRM_RENDER_ALLOW
),
1365 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH
, i915_gem_sw_finish_ioctl
, DRM_RENDER_ALLOW
),
1366 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING
, i915_gem_set_tiling
, DRM_RENDER_ALLOW
),
1367 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING
, i915_gem_get_tiling
, DRM_RENDER_ALLOW
),
1368 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE
, i915_gem_get_aperture_ioctl
, DRM_RENDER_ALLOW
),
1369 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID
, intel_get_pipe_from_crtc_id
, 0),
1370 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE
, i915_gem_madvise_ioctl
, DRM_RENDER_ALLOW
),
1371 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE
, intel_overlay_put_image
, DRM_MASTER
|DRM_CONTROL_ALLOW
),
1372 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS
, intel_overlay_attrs
, DRM_MASTER
|DRM_CONTROL_ALLOW
),
1373 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY
, intel_sprite_set_colorkey
, DRM_MASTER
|DRM_CONTROL_ALLOW
),
1374 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY
, drm_noop
, DRM_MASTER
|DRM_CONTROL_ALLOW
),
1375 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT
, i915_gem_wait_ioctl
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1376 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE
, i915_gem_context_create_ioctl
, DRM_RENDER_ALLOW
),
1377 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY
, i915_gem_context_destroy_ioctl
, DRM_RENDER_ALLOW
),
1378 DRM_IOCTL_DEF_DRV(I915_REG_READ
, i915_reg_read_ioctl
, DRM_RENDER_ALLOW
),
1379 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS
, i915_get_reset_stats_ioctl
, DRM_RENDER_ALLOW
),
1380 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR
, i915_gem_userptr_ioctl
, DRM_RENDER_ALLOW
),
1381 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM
, i915_gem_context_getparam_ioctl
, DRM_RENDER_ALLOW
),
1382 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM
, i915_gem_context_setparam_ioctl
, DRM_RENDER_ALLOW
),
1385 int i915_max_ioctl
= ARRAY_SIZE(i915_ioctls
);