1 /**************************************************************************
3 * Copyright © 2009-2016 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
27 #include <linux/module.h>
28 #include <linux/console.h>
31 #include "vmwgfx_drv.h"
32 #include "vmwgfx_binding.h"
33 #include <drm/ttm/ttm_placement.h>
34 #include <drm/ttm/ttm_bo_driver.h>
35 #include <drm/ttm/ttm_object.h>
36 #include <drm/ttm/ttm_module.h>
37 #include <linux/dma_remapping.h>
39 #define VMWGFX_DRIVER_NAME "vmwgfx"
40 #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
41 #define VMWGFX_CHIP_SVGAII 0
42 #define VMW_FB_RESERVATION 0
44 #define VMW_MIN_INITIAL_WIDTH 800
45 #define VMW_MIN_INITIAL_HEIGHT 600
47 #ifndef VMWGFX_GIT_VERSION
48 #define VMWGFX_GIT_VERSION "Unknown"
51 #define VMWGFX_REPO "In Tree"
55 * Fully encoded drm commands. Might move to vmw_drm.h
58 #define DRM_IOCTL_VMW_GET_PARAM \
59 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM, \
60 struct drm_vmw_getparam_arg)
61 #define DRM_IOCTL_VMW_ALLOC_DMABUF \
62 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF, \
63 union drm_vmw_alloc_dmabuf_arg)
64 #define DRM_IOCTL_VMW_UNREF_DMABUF \
65 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF, \
66 struct drm_vmw_unref_dmabuf_arg)
67 #define DRM_IOCTL_VMW_CURSOR_BYPASS \
68 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS, \
69 struct drm_vmw_cursor_bypass_arg)
71 #define DRM_IOCTL_VMW_CONTROL_STREAM \
72 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM, \
73 struct drm_vmw_control_stream_arg)
74 #define DRM_IOCTL_VMW_CLAIM_STREAM \
75 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM, \
76 struct drm_vmw_stream_arg)
77 #define DRM_IOCTL_VMW_UNREF_STREAM \
78 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM, \
79 struct drm_vmw_stream_arg)
81 #define DRM_IOCTL_VMW_CREATE_CONTEXT \
82 DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT, \
83 struct drm_vmw_context_arg)
84 #define DRM_IOCTL_VMW_UNREF_CONTEXT \
85 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT, \
86 struct drm_vmw_context_arg)
87 #define DRM_IOCTL_VMW_CREATE_SURFACE \
88 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE, \
89 union drm_vmw_surface_create_arg)
90 #define DRM_IOCTL_VMW_UNREF_SURFACE \
91 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE, \
92 struct drm_vmw_surface_arg)
93 #define DRM_IOCTL_VMW_REF_SURFACE \
94 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE, \
95 union drm_vmw_surface_reference_arg)
96 #define DRM_IOCTL_VMW_EXECBUF \
97 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF, \
98 struct drm_vmw_execbuf_arg)
99 #define DRM_IOCTL_VMW_GET_3D_CAP \
100 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP, \
101 struct drm_vmw_get_3d_cap_arg)
102 #define DRM_IOCTL_VMW_FENCE_WAIT \
103 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT, \
104 struct drm_vmw_fence_wait_arg)
105 #define DRM_IOCTL_VMW_FENCE_SIGNALED \
106 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED, \
107 struct drm_vmw_fence_signaled_arg)
108 #define DRM_IOCTL_VMW_FENCE_UNREF \
109 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF, \
110 struct drm_vmw_fence_arg)
111 #define DRM_IOCTL_VMW_FENCE_EVENT \
112 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT, \
113 struct drm_vmw_fence_event_arg)
114 #define DRM_IOCTL_VMW_PRESENT \
115 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT, \
116 struct drm_vmw_present_arg)
117 #define DRM_IOCTL_VMW_PRESENT_READBACK \
118 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK, \
119 struct drm_vmw_present_readback_arg)
120 #define DRM_IOCTL_VMW_UPDATE_LAYOUT \
121 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT, \
122 struct drm_vmw_update_layout_arg)
123 #define DRM_IOCTL_VMW_CREATE_SHADER \
124 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER, \
125 struct drm_vmw_shader_create_arg)
126 #define DRM_IOCTL_VMW_UNREF_SHADER \
127 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER, \
128 struct drm_vmw_shader_arg)
129 #define DRM_IOCTL_VMW_GB_SURFACE_CREATE \
130 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE, \
131 union drm_vmw_gb_surface_create_arg)
132 #define DRM_IOCTL_VMW_GB_SURFACE_REF \
133 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
134 union drm_vmw_gb_surface_reference_arg)
135 #define DRM_IOCTL_VMW_SYNCCPU \
136 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
137 struct drm_vmw_synccpu_arg)
138 #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \
139 DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \
140 struct drm_vmw_context_arg)
143 * The core DRM version of this macro doesn't account for
147 #define VMW_IOCTL_DEF(ioctl, func, flags) \
148 [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
154 static const struct drm_ioctl_desc vmw_ioctls
[] = {
155 VMW_IOCTL_DEF(VMW_GET_PARAM
, vmw_getparam_ioctl
,
156 DRM_AUTH
| DRM_RENDER_ALLOW
),
157 VMW_IOCTL_DEF(VMW_ALLOC_DMABUF
, vmw_dmabuf_alloc_ioctl
,
158 DRM_AUTH
| DRM_RENDER_ALLOW
),
159 VMW_IOCTL_DEF(VMW_UNREF_DMABUF
, vmw_dmabuf_unref_ioctl
,
161 VMW_IOCTL_DEF(VMW_CURSOR_BYPASS
,
162 vmw_kms_cursor_bypass_ioctl
,
163 DRM_MASTER
| DRM_CONTROL_ALLOW
),
165 VMW_IOCTL_DEF(VMW_CONTROL_STREAM
, vmw_overlay_ioctl
,
166 DRM_MASTER
| DRM_CONTROL_ALLOW
),
167 VMW_IOCTL_DEF(VMW_CLAIM_STREAM
, vmw_stream_claim_ioctl
,
168 DRM_MASTER
| DRM_CONTROL_ALLOW
),
169 VMW_IOCTL_DEF(VMW_UNREF_STREAM
, vmw_stream_unref_ioctl
,
170 DRM_MASTER
| DRM_CONTROL_ALLOW
),
172 VMW_IOCTL_DEF(VMW_CREATE_CONTEXT
, vmw_context_define_ioctl
,
173 DRM_AUTH
| DRM_RENDER_ALLOW
),
174 VMW_IOCTL_DEF(VMW_UNREF_CONTEXT
, vmw_context_destroy_ioctl
,
176 VMW_IOCTL_DEF(VMW_CREATE_SURFACE
, vmw_surface_define_ioctl
,
177 DRM_AUTH
| DRM_RENDER_ALLOW
),
178 VMW_IOCTL_DEF(VMW_UNREF_SURFACE
, vmw_surface_destroy_ioctl
,
180 VMW_IOCTL_DEF(VMW_REF_SURFACE
, vmw_surface_reference_ioctl
,
181 DRM_AUTH
| DRM_RENDER_ALLOW
),
182 VMW_IOCTL_DEF(VMW_EXECBUF
, NULL
, DRM_AUTH
|
184 VMW_IOCTL_DEF(VMW_FENCE_WAIT
, vmw_fence_obj_wait_ioctl
,
186 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED
,
187 vmw_fence_obj_signaled_ioctl
,
189 VMW_IOCTL_DEF(VMW_FENCE_UNREF
, vmw_fence_obj_unref_ioctl
,
191 VMW_IOCTL_DEF(VMW_FENCE_EVENT
, vmw_fence_event_ioctl
,
192 DRM_AUTH
| DRM_RENDER_ALLOW
),
193 VMW_IOCTL_DEF(VMW_GET_3D_CAP
, vmw_get_cap_3d_ioctl
,
194 DRM_AUTH
| DRM_RENDER_ALLOW
),
196 /* these allow direct access to the framebuffers mark as master only */
197 VMW_IOCTL_DEF(VMW_PRESENT
, vmw_present_ioctl
,
198 DRM_MASTER
| DRM_AUTH
),
199 VMW_IOCTL_DEF(VMW_PRESENT_READBACK
,
200 vmw_present_readback_ioctl
,
201 DRM_MASTER
| DRM_AUTH
),
202 VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT
,
203 vmw_kms_update_layout_ioctl
,
204 DRM_MASTER
| DRM_CONTROL_ALLOW
),
205 VMW_IOCTL_DEF(VMW_CREATE_SHADER
,
206 vmw_shader_define_ioctl
,
207 DRM_AUTH
| DRM_RENDER_ALLOW
),
208 VMW_IOCTL_DEF(VMW_UNREF_SHADER
,
209 vmw_shader_destroy_ioctl
,
211 VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE
,
212 vmw_gb_surface_define_ioctl
,
213 DRM_AUTH
| DRM_RENDER_ALLOW
),
214 VMW_IOCTL_DEF(VMW_GB_SURFACE_REF
,
215 vmw_gb_surface_reference_ioctl
,
216 DRM_AUTH
| DRM_RENDER_ALLOW
),
217 VMW_IOCTL_DEF(VMW_SYNCCPU
,
218 vmw_user_dmabuf_synccpu_ioctl
,
220 VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT
,
221 vmw_extended_context_define_ioctl
,
222 DRM_AUTH
| DRM_RENDER_ALLOW
),
225 static struct pci_device_id vmw_pci_id_list
[] = {
226 {0x15ad, 0x0405, PCI_ANY_ID
, PCI_ANY_ID
, 0, 0, VMWGFX_CHIP_SVGAII
},
229 MODULE_DEVICE_TABLE(pci
, vmw_pci_id_list
);
231 static int enable_fbdev
= IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON
);
232 static int vmw_force_iommu
;
233 static int vmw_restrict_iommu
;
234 static int vmw_force_coherent
;
235 static int vmw_restrict_dma_mask
;
237 static int vmw_probe(struct pci_dev
*, const struct pci_device_id
*);
238 static void vmw_master_init(struct vmw_master
*);
239 static int vmwgfx_pm_notifier(struct notifier_block
*nb
, unsigned long val
,
242 MODULE_PARM_DESC(enable_fbdev
, "Enable vmwgfx fbdev");
243 module_param_named(enable_fbdev
, enable_fbdev
, int, 0600);
244 MODULE_PARM_DESC(force_dma_api
, "Force using the DMA API for TTM pages");
245 module_param_named(force_dma_api
, vmw_force_iommu
, int, 0600);
246 MODULE_PARM_DESC(restrict_iommu
, "Try to limit IOMMU usage for TTM pages");
247 module_param_named(restrict_iommu
, vmw_restrict_iommu
, int, 0600);
248 MODULE_PARM_DESC(force_coherent
, "Force coherent TTM pages");
249 module_param_named(force_coherent
, vmw_force_coherent
, int, 0600);
250 MODULE_PARM_DESC(restrict_dma_mask
, "Restrict DMA mask to 44 bits with IOMMU");
251 module_param_named(restrict_dma_mask
, vmw_restrict_dma_mask
, int, 0600);
254 static void vmw_print_capabilities(uint32_t capabilities
)
256 DRM_INFO("Capabilities:\n");
257 if (capabilities
& SVGA_CAP_RECT_COPY
)
258 DRM_INFO(" Rect copy.\n");
259 if (capabilities
& SVGA_CAP_CURSOR
)
260 DRM_INFO(" Cursor.\n");
261 if (capabilities
& SVGA_CAP_CURSOR_BYPASS
)
262 DRM_INFO(" Cursor bypass.\n");
263 if (capabilities
& SVGA_CAP_CURSOR_BYPASS_2
)
264 DRM_INFO(" Cursor bypass 2.\n");
265 if (capabilities
& SVGA_CAP_8BIT_EMULATION
)
266 DRM_INFO(" 8bit emulation.\n");
267 if (capabilities
& SVGA_CAP_ALPHA_CURSOR
)
268 DRM_INFO(" Alpha cursor.\n");
269 if (capabilities
& SVGA_CAP_3D
)
271 if (capabilities
& SVGA_CAP_EXTENDED_FIFO
)
272 DRM_INFO(" Extended Fifo.\n");
273 if (capabilities
& SVGA_CAP_MULTIMON
)
274 DRM_INFO(" Multimon.\n");
275 if (capabilities
& SVGA_CAP_PITCHLOCK
)
276 DRM_INFO(" Pitchlock.\n");
277 if (capabilities
& SVGA_CAP_IRQMASK
)
278 DRM_INFO(" Irq mask.\n");
279 if (capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
)
280 DRM_INFO(" Display Topology.\n");
281 if (capabilities
& SVGA_CAP_GMR
)
283 if (capabilities
& SVGA_CAP_TRACES
)
284 DRM_INFO(" Traces.\n");
285 if (capabilities
& SVGA_CAP_GMR2
)
286 DRM_INFO(" GMR2.\n");
287 if (capabilities
& SVGA_CAP_SCREEN_OBJECT_2
)
288 DRM_INFO(" Screen Object 2.\n");
289 if (capabilities
& SVGA_CAP_COMMAND_BUFFERS
)
290 DRM_INFO(" Command Buffers.\n");
291 if (capabilities
& SVGA_CAP_CMD_BUFFERS_2
)
292 DRM_INFO(" Command Buffers 2.\n");
293 if (capabilities
& SVGA_CAP_GBOBJECTS
)
294 DRM_INFO(" Guest Backed Resources.\n");
295 if (capabilities
& SVGA_CAP_DX
)
296 DRM_INFO(" DX Features.\n");
300 * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
302 * @dev_priv: A device private structure.
304 * This function creates a small buffer object that holds the query
305 * result for dummy queries emitted as query barriers.
306 * The function will then map the first page and initialize a pending
307 * occlusion query result structure, Finally it will unmap the buffer.
308 * No interruptible waits are done within this function.
310 * Returns an error if bo creation or initialization fails.
312 static int vmw_dummy_query_bo_create(struct vmw_private
*dev_priv
)
315 struct vmw_dma_buffer
*vbo
;
316 struct ttm_bo_kmap_obj map
;
317 volatile SVGA3dQueryResult
*result
;
321 * Create the vbo as pinned, so that a tryreserve will
322 * immediately succeed. This is because we're the only
323 * user of the bo currently.
325 vbo
= kzalloc(sizeof(*vbo
), GFP_KERNEL
);
329 ret
= vmw_dmabuf_init(dev_priv
, vbo
, PAGE_SIZE
,
330 &vmw_sys_ne_placement
, false,
331 &vmw_dmabuf_bo_free
);
332 if (unlikely(ret
!= 0))
335 ret
= ttm_bo_reserve(&vbo
->base
, false, true, NULL
);
337 vmw_bo_pin_reserved(vbo
, true);
339 ret
= ttm_bo_kmap(&vbo
->base
, 0, 1, &map
);
340 if (likely(ret
== 0)) {
341 result
= ttm_kmap_obj_virtual(&map
, &dummy
);
342 result
->totalSize
= sizeof(*result
);
343 result
->state
= SVGA3D_QUERYSTATE_PENDING
;
344 result
->result32
= 0xff;
347 vmw_bo_pin_reserved(vbo
, false);
348 ttm_bo_unreserve(&vbo
->base
);
350 if (unlikely(ret
!= 0)) {
351 DRM_ERROR("Dummy query buffer map failed.\n");
352 vmw_dmabuf_unreference(&vbo
);
354 dev_priv
->dummy_query_bo
= vbo
;
360 * vmw_request_device_late - Perform late device setup
362 * @dev_priv: Pointer to device private.
364 * This function performs setup of otables and enables large command
365 * buffer submission. These tasks are split out to a separate function
366 * because it reverts vmw_release_device_early and is intended to be used
367 * by an error path in the hibernation code.
369 static int vmw_request_device_late(struct vmw_private
*dev_priv
)
373 if (dev_priv
->has_mob
) {
374 ret
= vmw_otables_setup(dev_priv
);
375 if (unlikely(ret
!= 0)) {
376 DRM_ERROR("Unable to initialize "
377 "guest Memory OBjects.\n");
382 if (dev_priv
->cman
) {
383 ret
= vmw_cmdbuf_set_pool_size(dev_priv
->cman
,
386 struct vmw_cmdbuf_man
*man
= dev_priv
->cman
;
388 dev_priv
->cman
= NULL
;
389 vmw_cmdbuf_man_destroy(man
);
396 static int vmw_request_device(struct vmw_private
*dev_priv
)
400 ret
= vmw_fifo_init(dev_priv
, &dev_priv
->fifo
);
401 if (unlikely(ret
!= 0)) {
402 DRM_ERROR("Unable to initialize FIFO.\n");
405 vmw_fence_fifo_up(dev_priv
->fman
);
406 dev_priv
->cman
= vmw_cmdbuf_man_create(dev_priv
);
407 if (IS_ERR(dev_priv
->cman
)) {
408 dev_priv
->cman
= NULL
;
409 dev_priv
->has_dx
= false;
412 ret
= vmw_request_device_late(dev_priv
);
416 ret
= vmw_dummy_query_bo_create(dev_priv
);
417 if (unlikely(ret
!= 0))
418 goto out_no_query_bo
;
424 vmw_cmdbuf_remove_pool(dev_priv
->cman
);
425 if (dev_priv
->has_mob
) {
426 (void) ttm_bo_evict_mm(&dev_priv
->bdev
, VMW_PL_MOB
);
427 vmw_otables_takedown(dev_priv
);
430 vmw_cmdbuf_man_destroy(dev_priv
->cman
);
432 vmw_fence_fifo_down(dev_priv
->fman
);
433 vmw_fifo_release(dev_priv
, &dev_priv
->fifo
);
438 * vmw_release_device_early - Early part of fifo takedown.
440 * @dev_priv: Pointer to device private struct.
442 * This is the first part of command submission takedown, to be called before
443 * buffer management is taken down.
445 static void vmw_release_device_early(struct vmw_private
*dev_priv
)
448 * Previous destructions should've released
452 BUG_ON(dev_priv
->pinned_bo
!= NULL
);
454 vmw_dmabuf_unreference(&dev_priv
->dummy_query_bo
);
456 vmw_cmdbuf_remove_pool(dev_priv
->cman
);
458 if (dev_priv
->has_mob
) {
459 ttm_bo_evict_mm(&dev_priv
->bdev
, VMW_PL_MOB
);
460 vmw_otables_takedown(dev_priv
);
465 * vmw_release_device_late - Late part of fifo takedown.
467 * @dev_priv: Pointer to device private struct.
469 * This is the last part of the command submission takedown, to be called when
470 * command submission is no longer needed. It may wait on pending fences.
472 static void vmw_release_device_late(struct vmw_private
*dev_priv
)
474 vmw_fence_fifo_down(dev_priv
->fman
);
476 vmw_cmdbuf_man_destroy(dev_priv
->cman
);
478 vmw_fifo_release(dev_priv
, &dev_priv
->fifo
);
482 * Sets the initial_[width|height] fields on the given vmw_private.
484 * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
485 * clamping the value to fb_max_[width|height] fields and the
486 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
487 * If the values appear to be invalid, set them to
488 * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
490 static void vmw_get_initial_size(struct vmw_private
*dev_priv
)
495 width
= vmw_read(dev_priv
, SVGA_REG_WIDTH
);
496 height
= vmw_read(dev_priv
, SVGA_REG_HEIGHT
);
498 width
= max_t(uint32_t, width
, VMW_MIN_INITIAL_WIDTH
);
499 height
= max_t(uint32_t, height
, VMW_MIN_INITIAL_HEIGHT
);
501 if (width
> dev_priv
->fb_max_width
||
502 height
> dev_priv
->fb_max_height
) {
505 * This is a host error and shouldn't occur.
508 width
= VMW_MIN_INITIAL_WIDTH
;
509 height
= VMW_MIN_INITIAL_HEIGHT
;
512 dev_priv
->initial_width
= width
;
513 dev_priv
->initial_height
= height
;
517 * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
520 * @dev_priv: Pointer to a struct vmw_private
522 * This functions tries to determine the IOMMU setup and what actions
523 * need to be taken by the driver to make system pages visible to the
525 * If this function decides that DMA is not possible, it returns -EINVAL.
526 * The driver may then try to disable features of the device that require
529 static int vmw_dma_select_mode(struct vmw_private
*dev_priv
)
531 static const char *names
[vmw_dma_map_max
] = {
532 [vmw_dma_phys
] = "Using physical TTM page addresses.",
533 [vmw_dma_alloc_coherent
] = "Using coherent TTM pages.",
534 [vmw_dma_map_populate
] = "Keeping DMA mappings.",
535 [vmw_dma_map_bind
] = "Giving up DMA mappings early."};
537 const struct dma_map_ops
*dma_ops
= get_dma_ops(dev_priv
->dev
->dev
);
539 #ifdef CONFIG_INTEL_IOMMU
540 if (intel_iommu_enabled
) {
541 dev_priv
->map_mode
= vmw_dma_map_populate
;
546 if (!(vmw_force_iommu
|| vmw_force_coherent
)) {
547 dev_priv
->map_mode
= vmw_dma_phys
;
548 DRM_INFO("DMA map mode: %s\n", names
[dev_priv
->map_mode
]);
552 dev_priv
->map_mode
= vmw_dma_map_populate
;
554 if (dma_ops
->sync_single_for_cpu
)
555 dev_priv
->map_mode
= vmw_dma_alloc_coherent
;
556 #ifdef CONFIG_SWIOTLB
557 if (swiotlb_nr_tbl() == 0)
558 dev_priv
->map_mode
= vmw_dma_map_populate
;
561 #ifdef CONFIG_INTEL_IOMMU
564 if (dev_priv
->map_mode
== vmw_dma_map_populate
&&
566 dev_priv
->map_mode
= vmw_dma_map_bind
;
568 if (vmw_force_coherent
)
569 dev_priv
->map_mode
= vmw_dma_alloc_coherent
;
571 #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU)
573 * No coherent page pool
575 if (dev_priv
->map_mode
== vmw_dma_alloc_coherent
)
579 #else /* CONFIG_X86 */
580 dev_priv
->map_mode
= vmw_dma_map_populate
;
581 #endif /* CONFIG_X86 */
583 DRM_INFO("DMA map mode: %s\n", names
[dev_priv
->map_mode
]);
589 * vmw_dma_masks - set required page- and dma masks
591 * @dev: Pointer to struct drm-device
593 * With 32-bit we can only handle 32 bit PFNs. Optionally set that
594 * restriction also for 64-bit systems.
596 #ifdef CONFIG_INTEL_IOMMU
597 static int vmw_dma_masks(struct vmw_private
*dev_priv
)
599 struct drm_device
*dev
= dev_priv
->dev
;
601 if (intel_iommu_enabled
&&
602 (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask
)) {
603 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
604 return dma_set_mask(dev
->dev
, DMA_BIT_MASK(44));
609 static int vmw_dma_masks(struct vmw_private
*dev_priv
)
615 static int vmw_driver_load(struct drm_device
*dev
, unsigned long chipset
)
617 struct vmw_private
*dev_priv
;
621 bool refuse_dma
= false;
622 char host_log
[100] = {0};
624 dev_priv
= kzalloc(sizeof(*dev_priv
), GFP_KERNEL
);
625 if (unlikely(dev_priv
== NULL
)) {
626 DRM_ERROR("Failed allocating a device private struct.\n");
630 pci_set_master(dev
->pdev
);
633 dev_priv
->vmw_chipset
= chipset
;
634 dev_priv
->last_read_seqno
= (uint32_t) -100;
635 mutex_init(&dev_priv
->cmdbuf_mutex
);
636 mutex_init(&dev_priv
->release_mutex
);
637 mutex_init(&dev_priv
->binding_mutex
);
638 mutex_init(&dev_priv
->global_kms_state_mutex
);
639 rwlock_init(&dev_priv
->resource_lock
);
640 ttm_lock_init(&dev_priv
->reservation_sem
);
641 spin_lock_init(&dev_priv
->hw_lock
);
642 spin_lock_init(&dev_priv
->waiter_lock
);
643 spin_lock_init(&dev_priv
->cap_lock
);
644 spin_lock_init(&dev_priv
->svga_lock
);
646 for (i
= vmw_res_context
; i
< vmw_res_max
; ++i
) {
647 idr_init(&dev_priv
->res_idr
[i
]);
648 INIT_LIST_HEAD(&dev_priv
->res_lru
[i
]);
651 mutex_init(&dev_priv
->init_mutex
);
652 init_waitqueue_head(&dev_priv
->fence_queue
);
653 init_waitqueue_head(&dev_priv
->fifo_queue
);
654 dev_priv
->fence_queue_waiters
= 0;
655 dev_priv
->fifo_queue_waiters
= 0;
657 dev_priv
->used_memory_size
= 0;
659 dev_priv
->io_start
= pci_resource_start(dev
->pdev
, 0);
660 dev_priv
->vram_start
= pci_resource_start(dev
->pdev
, 1);
661 dev_priv
->mmio_start
= pci_resource_start(dev
->pdev
, 2);
663 dev_priv
->enable_fb
= enable_fbdev
;
665 vmw_write(dev_priv
, SVGA_REG_ID
, SVGA_ID_2
);
666 svga_id
= vmw_read(dev_priv
, SVGA_REG_ID
);
667 if (svga_id
!= SVGA_ID_2
) {
669 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id
);
673 dev_priv
->capabilities
= vmw_read(dev_priv
, SVGA_REG_CAPABILITIES
);
674 ret
= vmw_dma_select_mode(dev_priv
);
675 if (unlikely(ret
!= 0)) {
676 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
680 dev_priv
->vram_size
= vmw_read(dev_priv
, SVGA_REG_VRAM_SIZE
);
681 dev_priv
->mmio_size
= vmw_read(dev_priv
, SVGA_REG_MEM_SIZE
);
682 dev_priv
->fb_max_width
= vmw_read(dev_priv
, SVGA_REG_MAX_WIDTH
);
683 dev_priv
->fb_max_height
= vmw_read(dev_priv
, SVGA_REG_MAX_HEIGHT
);
685 vmw_get_initial_size(dev_priv
);
687 if (dev_priv
->capabilities
& SVGA_CAP_GMR2
) {
688 dev_priv
->max_gmr_ids
=
689 vmw_read(dev_priv
, SVGA_REG_GMR_MAX_IDS
);
690 dev_priv
->max_gmr_pages
=
691 vmw_read(dev_priv
, SVGA_REG_GMRS_MAX_PAGES
);
692 dev_priv
->memory_size
=
693 vmw_read(dev_priv
, SVGA_REG_MEMORY_SIZE
);
694 dev_priv
->memory_size
-= dev_priv
->vram_size
;
697 * An arbitrary limit of 512MiB on surface
698 * memory. But all HWV8 hardware supports GMR2.
700 dev_priv
->memory_size
= 512*1024*1024;
702 dev_priv
->max_mob_pages
= 0;
703 dev_priv
->max_mob_size
= 0;
704 if (dev_priv
->capabilities
& SVGA_CAP_GBOBJECTS
) {
707 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB
);
709 dev_priv
->max_mob_pages
= mem_size
* 1024 / PAGE_SIZE
;
710 dev_priv
->prim_bb_mem
=
712 SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM
);
713 dev_priv
->max_mob_size
=
714 vmw_read(dev_priv
, SVGA_REG_MOB_MAX_SIZE
);
715 dev_priv
->stdu_max_width
=
716 vmw_read(dev_priv
, SVGA_REG_SCREENTARGET_MAX_WIDTH
);
717 dev_priv
->stdu_max_height
=
718 vmw_read(dev_priv
, SVGA_REG_SCREENTARGET_MAX_HEIGHT
);
720 vmw_write(dev_priv
, SVGA_REG_DEV_CAP
,
721 SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH
);
722 dev_priv
->texture_max_width
= vmw_read(dev_priv
,
724 vmw_write(dev_priv
, SVGA_REG_DEV_CAP
,
725 SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT
);
726 dev_priv
->texture_max_height
= vmw_read(dev_priv
,
729 dev_priv
->texture_max_width
= 8192;
730 dev_priv
->texture_max_height
= 8192;
731 dev_priv
->prim_bb_mem
= dev_priv
->vram_size
;
734 vmw_print_capabilities(dev_priv
->capabilities
);
736 ret
= vmw_dma_masks(dev_priv
);
737 if (unlikely(ret
!= 0))
740 if (dev_priv
->capabilities
& SVGA_CAP_GMR2
) {
741 DRM_INFO("Max GMR ids is %u\n",
742 (unsigned)dev_priv
->max_gmr_ids
);
743 DRM_INFO("Max number of GMR pages is %u\n",
744 (unsigned)dev_priv
->max_gmr_pages
);
745 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
746 (unsigned)dev_priv
->memory_size
/ 1024);
748 DRM_INFO("Maximum display memory size is %u kiB\n",
749 dev_priv
->prim_bb_mem
/ 1024);
750 DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
751 dev_priv
->vram_start
, dev_priv
->vram_size
/ 1024);
752 DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
753 dev_priv
->mmio_start
, dev_priv
->mmio_size
/ 1024);
755 ret
= vmw_ttm_global_init(dev_priv
);
756 if (unlikely(ret
!= 0))
760 vmw_master_init(&dev_priv
->fbdev_master
);
761 ttm_lock_set_kill(&dev_priv
->fbdev_master
.lock
, false, SIGTERM
);
762 dev_priv
->active_master
= &dev_priv
->fbdev_master
;
764 dev_priv
->mmio_virt
= memremap(dev_priv
->mmio_start
,
765 dev_priv
->mmio_size
, MEMREMAP_WB
);
767 if (unlikely(dev_priv
->mmio_virt
== NULL
)) {
769 DRM_ERROR("Failed mapping MMIO.\n");
773 /* Need mmio memory to check for fifo pitchlock cap. */
774 if (!(dev_priv
->capabilities
& SVGA_CAP_DISPLAY_TOPOLOGY
) &&
775 !(dev_priv
->capabilities
& SVGA_CAP_PITCHLOCK
) &&
776 !vmw_fifo_have_pitchlock(dev_priv
)) {
778 DRM_ERROR("Hardware has no pitchlock\n");
782 dev_priv
->tdev
= ttm_object_device_init
783 (dev_priv
->mem_global_ref
.object
, 12, &vmw_prime_dmabuf_ops
);
785 if (unlikely(dev_priv
->tdev
== NULL
)) {
786 DRM_ERROR("Unable to initialize TTM object management.\n");
791 dev
->dev_private
= dev_priv
;
793 ret
= pci_request_regions(dev
->pdev
, "vmwgfx probe");
794 dev_priv
->stealth
= (ret
!= 0);
795 if (dev_priv
->stealth
) {
797 * Request at least the mmio PCI resource.
800 DRM_INFO("It appears like vesafb is loaded. "
801 "Ignore above error if any.\n");
802 ret
= pci_request_region(dev
->pdev
, 2, "vmwgfx stealth probe");
803 if (unlikely(ret
!= 0)) {
804 DRM_ERROR("Failed reserving the SVGA MMIO resource.\n");
809 if (dev_priv
->capabilities
& SVGA_CAP_IRQMASK
) {
810 ret
= drm_irq_install(dev
, dev
->pdev
->irq
);
812 DRM_ERROR("Failed installing irq: %d\n", ret
);
817 dev_priv
->fman
= vmw_fence_manager_init(dev_priv
);
818 if (unlikely(dev_priv
->fman
== NULL
)) {
823 ret
= ttm_bo_device_init(&dev_priv
->bdev
,
824 dev_priv
->bo_global_ref
.ref
.object
,
826 dev
->anon_inode
->i_mapping
,
827 VMWGFX_FILE_PAGE_OFFSET
,
829 if (unlikely(ret
!= 0)) {
830 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
835 * Enable VRAM, but initially don't use it until SVGA is enabled and
838 ret
= ttm_bo_init_mm(&dev_priv
->bdev
, TTM_PL_VRAM
,
839 (dev_priv
->vram_size
>> PAGE_SHIFT
));
840 if (unlikely(ret
!= 0)) {
841 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
844 dev_priv
->bdev
.man
[TTM_PL_VRAM
].use_type
= false;
846 dev_priv
->has_gmr
= true;
847 if (((dev_priv
->capabilities
& (SVGA_CAP_GMR
| SVGA_CAP_GMR2
)) == 0) ||
848 refuse_dma
|| ttm_bo_init_mm(&dev_priv
->bdev
, VMW_PL_GMR
,
850 DRM_INFO("No GMR memory available. "
851 "Graphics memory resources are very limited.\n");
852 dev_priv
->has_gmr
= false;
855 if (dev_priv
->capabilities
& SVGA_CAP_GBOBJECTS
) {
856 dev_priv
->has_mob
= true;
857 if (ttm_bo_init_mm(&dev_priv
->bdev
, VMW_PL_MOB
,
859 DRM_INFO("No MOB memory available. "
860 "3D will be disabled.\n");
861 dev_priv
->has_mob
= false;
865 if (dev_priv
->has_mob
) {
866 spin_lock(&dev_priv
->cap_lock
);
867 vmw_write(dev_priv
, SVGA_REG_DEV_CAP
, SVGA3D_DEVCAP_DX
);
868 dev_priv
->has_dx
= !!vmw_read(dev_priv
, SVGA_REG_DEV_CAP
);
869 spin_unlock(&dev_priv
->cap_lock
);
873 ret
= vmw_kms_init(dev_priv
);
874 if (unlikely(ret
!= 0))
876 vmw_overlay_init(dev_priv
);
878 ret
= vmw_request_device(dev_priv
);
882 DRM_INFO("DX: %s\n", dev_priv
->has_dx
? "yes." : "no.");
884 snprintf(host_log
, sizeof(host_log
), "vmwgfx: %s-%s",
885 VMWGFX_REPO
, VMWGFX_GIT_VERSION
);
886 vmw_host_log(host_log
);
888 memset(host_log
, 0, sizeof(host_log
));
889 snprintf(host_log
, sizeof(host_log
), "vmwgfx: Module Version: %d.%d.%d",
890 VMWGFX_DRIVER_MAJOR
, VMWGFX_DRIVER_MINOR
,
891 VMWGFX_DRIVER_PATCHLEVEL
);
892 vmw_host_log(host_log
);
894 if (dev_priv
->enable_fb
) {
895 vmw_fifo_resource_inc(dev_priv
);
896 vmw_svga_enable(dev_priv
);
897 vmw_fb_init(dev_priv
);
900 dev_priv
->pm_nb
.notifier_call
= vmwgfx_pm_notifier
;
901 register_pm_notifier(&dev_priv
->pm_nb
);
906 vmw_overlay_close(dev_priv
);
907 vmw_kms_close(dev_priv
);
909 if (dev_priv
->has_mob
)
910 (void) ttm_bo_clean_mm(&dev_priv
->bdev
, VMW_PL_MOB
);
911 if (dev_priv
->has_gmr
)
912 (void) ttm_bo_clean_mm(&dev_priv
->bdev
, VMW_PL_GMR
);
913 (void)ttm_bo_clean_mm(&dev_priv
->bdev
, TTM_PL_VRAM
);
915 (void)ttm_bo_device_release(&dev_priv
->bdev
);
917 vmw_fence_manager_takedown(dev_priv
->fman
);
919 if (dev_priv
->capabilities
& SVGA_CAP_IRQMASK
)
920 drm_irq_uninstall(dev_priv
->dev
);
922 if (dev_priv
->stealth
)
923 pci_release_region(dev
->pdev
, 2);
925 pci_release_regions(dev
->pdev
);
927 ttm_object_device_release(&dev_priv
->tdev
);
929 memunmap(dev_priv
->mmio_virt
);
931 vmw_ttm_global_release(dev_priv
);
933 for (i
= vmw_res_context
; i
< vmw_res_max
; ++i
)
934 idr_destroy(&dev_priv
->res_idr
[i
]);
936 if (dev_priv
->ctx
.staged_bindings
)
937 vmw_binding_state_free(dev_priv
->ctx
.staged_bindings
);
942 static int vmw_driver_unload(struct drm_device
*dev
)
944 struct vmw_private
*dev_priv
= vmw_priv(dev
);
947 unregister_pm_notifier(&dev_priv
->pm_nb
);
949 if (dev_priv
->ctx
.res_ht_initialized
)
950 drm_ht_remove(&dev_priv
->ctx
.res_ht
);
951 vfree(dev_priv
->ctx
.cmd_bounce
);
952 if (dev_priv
->enable_fb
) {
953 vmw_fb_off(dev_priv
);
954 vmw_fb_close(dev_priv
);
955 vmw_fifo_resource_dec(dev_priv
);
956 vmw_svga_disable(dev_priv
);
959 vmw_kms_close(dev_priv
);
960 vmw_overlay_close(dev_priv
);
962 if (dev_priv
->has_gmr
)
963 (void)ttm_bo_clean_mm(&dev_priv
->bdev
, VMW_PL_GMR
);
964 (void)ttm_bo_clean_mm(&dev_priv
->bdev
, TTM_PL_VRAM
);
966 vmw_release_device_early(dev_priv
);
967 if (dev_priv
->has_mob
)
968 (void) ttm_bo_clean_mm(&dev_priv
->bdev
, VMW_PL_MOB
);
969 (void) ttm_bo_device_release(&dev_priv
->bdev
);
970 vmw_release_device_late(dev_priv
);
971 vmw_fence_manager_takedown(dev_priv
->fman
);
972 if (dev_priv
->capabilities
& SVGA_CAP_IRQMASK
)
973 drm_irq_uninstall(dev_priv
->dev
);
974 if (dev_priv
->stealth
)
975 pci_release_region(dev
->pdev
, 2);
977 pci_release_regions(dev
->pdev
);
979 ttm_object_device_release(&dev_priv
->tdev
);
980 memunmap(dev_priv
->mmio_virt
);
981 if (dev_priv
->ctx
.staged_bindings
)
982 vmw_binding_state_free(dev_priv
->ctx
.staged_bindings
);
983 vmw_ttm_global_release(dev_priv
);
985 for (i
= vmw_res_context
; i
< vmw_res_max
; ++i
)
986 idr_destroy(&dev_priv
->res_idr
[i
]);
993 static void vmw_postclose(struct drm_device
*dev
,
994 struct drm_file
*file_priv
)
996 struct vmw_fpriv
*vmw_fp
;
998 vmw_fp
= vmw_fpriv(file_priv
);
1000 if (vmw_fp
->locked_master
) {
1001 struct vmw_master
*vmaster
=
1002 vmw_master(vmw_fp
->locked_master
);
1004 ttm_lock_set_kill(&vmaster
->lock
, true, SIGTERM
);
1005 ttm_vt_unlock(&vmaster
->lock
);
1006 drm_master_put(&vmw_fp
->locked_master
);
1009 ttm_object_file_release(&vmw_fp
->tfile
);
1013 static int vmw_driver_open(struct drm_device
*dev
, struct drm_file
*file_priv
)
1015 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1016 struct vmw_fpriv
*vmw_fp
;
1019 vmw_fp
= kzalloc(sizeof(*vmw_fp
), GFP_KERNEL
);
1020 if (unlikely(vmw_fp
== NULL
))
1023 vmw_fp
->tfile
= ttm_object_file_init(dev_priv
->tdev
, 10);
1024 if (unlikely(vmw_fp
->tfile
== NULL
))
1027 file_priv
->driver_priv
= vmw_fp
;
1036 static struct vmw_master
*vmw_master_check(struct drm_device
*dev
,
1037 struct drm_file
*file_priv
,
1041 struct vmw_fpriv
*vmw_fp
= vmw_fpriv(file_priv
);
1042 struct vmw_master
*vmaster
;
1044 if (file_priv
->minor
->type
!= DRM_MINOR_LEGACY
||
1045 !(flags
& DRM_AUTH
))
1048 ret
= mutex_lock_interruptible(&dev
->master_mutex
);
1049 if (unlikely(ret
!= 0))
1050 return ERR_PTR(-ERESTARTSYS
);
1052 if (file_priv
->is_master
) {
1053 mutex_unlock(&dev
->master_mutex
);
1058 * Check if we were previously master, but now dropped. In that
1059 * case, allow at least render node functionality.
1061 if (vmw_fp
->locked_master
) {
1062 mutex_unlock(&dev
->master_mutex
);
1064 if (flags
& DRM_RENDER_ALLOW
)
1067 DRM_ERROR("Dropped master trying to access ioctl that "
1068 "requires authentication.\n");
1069 return ERR_PTR(-EACCES
);
1071 mutex_unlock(&dev
->master_mutex
);
1074 * Take the TTM lock. Possibly sleep waiting for the authenticating
1075 * master to become master again, or for a SIGTERM if the
1076 * authenticating master exits.
1078 vmaster
= vmw_master(file_priv
->master
);
1079 ret
= ttm_read_lock(&vmaster
->lock
, true);
1080 if (unlikely(ret
!= 0))
1081 vmaster
= ERR_PTR(ret
);
1086 static long vmw_generic_ioctl(struct file
*filp
, unsigned int cmd
,
1088 long (*ioctl_func
)(struct file
*, unsigned int,
1091 struct drm_file
*file_priv
= filp
->private_data
;
1092 struct drm_device
*dev
= file_priv
->minor
->dev
;
1093 unsigned int nr
= DRM_IOCTL_NR(cmd
);
1094 struct vmw_master
*vmaster
;
1099 * Do extra checking on driver private ioctls.
1102 if ((nr
>= DRM_COMMAND_BASE
) && (nr
< DRM_COMMAND_END
)
1103 && (nr
< DRM_COMMAND_BASE
+ dev
->driver
->num_ioctls
)) {
1104 const struct drm_ioctl_desc
*ioctl
=
1105 &vmw_ioctls
[nr
- DRM_COMMAND_BASE
];
1107 if (nr
== DRM_COMMAND_BASE
+ DRM_VMW_EXECBUF
) {
1108 ret
= (long) drm_ioctl_permit(ioctl
->flags
, file_priv
);
1109 if (unlikely(ret
!= 0))
1112 if (unlikely((cmd
& (IOC_IN
| IOC_OUT
)) != IOC_IN
))
1113 goto out_io_encoding
;
1115 return (long) vmw_execbuf_ioctl(dev
, arg
, file_priv
,
1119 if (unlikely(ioctl
->cmd
!= cmd
))
1120 goto out_io_encoding
;
1122 flags
= ioctl
->flags
;
1123 } else if (!drm_ioctl_flags(nr
, &flags
))
1126 vmaster
= vmw_master_check(dev
, file_priv
, flags
);
1127 if (IS_ERR(vmaster
)) {
1128 ret
= PTR_ERR(vmaster
);
1130 if (ret
!= -ERESTARTSYS
)
1131 DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1136 ret
= ioctl_func(filp
, cmd
, arg
);
1138 ttm_read_unlock(&vmaster
->lock
);
1143 DRM_ERROR("Invalid command format, ioctl %d\n",
1144 nr
- DRM_COMMAND_BASE
);
1149 static long vmw_unlocked_ioctl(struct file
*filp
, unsigned int cmd
,
1152 return vmw_generic_ioctl(filp
, cmd
, arg
, &drm_ioctl
);
1155 #ifdef CONFIG_COMPAT
1156 static long vmw_compat_ioctl(struct file
*filp
, unsigned int cmd
,
1159 return vmw_generic_ioctl(filp
, cmd
, arg
, &drm_compat_ioctl
);
1163 static void vmw_lastclose(struct drm_device
*dev
)
1167 static void vmw_master_init(struct vmw_master
*vmaster
)
1169 ttm_lock_init(&vmaster
->lock
);
1172 static int vmw_master_create(struct drm_device
*dev
,
1173 struct drm_master
*master
)
1175 struct vmw_master
*vmaster
;
1177 vmaster
= kzalloc(sizeof(*vmaster
), GFP_KERNEL
);
1178 if (unlikely(vmaster
== NULL
))
1181 vmw_master_init(vmaster
);
1182 ttm_lock_set_kill(&vmaster
->lock
, true, SIGTERM
);
1183 master
->driver_priv
= vmaster
;
1188 static void vmw_master_destroy(struct drm_device
*dev
,
1189 struct drm_master
*master
)
1191 struct vmw_master
*vmaster
= vmw_master(master
);
1193 master
->driver_priv
= NULL
;
1197 static int vmw_master_set(struct drm_device
*dev
,
1198 struct drm_file
*file_priv
,
1201 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1202 struct vmw_fpriv
*vmw_fp
= vmw_fpriv(file_priv
);
1203 struct vmw_master
*active
= dev_priv
->active_master
;
1204 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
1208 BUG_ON(active
!= &dev_priv
->fbdev_master
);
1209 ret
= ttm_vt_lock(&active
->lock
, false, vmw_fp
->tfile
);
1210 if (unlikely(ret
!= 0))
1213 ttm_lock_set_kill(&active
->lock
, true, SIGTERM
);
1214 dev_priv
->active_master
= NULL
;
1217 ttm_lock_set_kill(&vmaster
->lock
, false, SIGTERM
);
1219 ttm_vt_unlock(&vmaster
->lock
);
1220 BUG_ON(vmw_fp
->locked_master
!= file_priv
->master
);
1221 drm_master_put(&vmw_fp
->locked_master
);
1224 dev_priv
->active_master
= vmaster
;
1225 drm_sysfs_hotplug_event(dev
);
1230 static void vmw_master_drop(struct drm_device
*dev
,
1231 struct drm_file
*file_priv
,
1234 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1235 struct vmw_fpriv
*vmw_fp
= vmw_fpriv(file_priv
);
1236 struct vmw_master
*vmaster
= vmw_master(file_priv
->master
);
1240 * Make sure the master doesn't disappear while we have
1244 vmw_fp
->locked_master
= drm_master_get(file_priv
->master
);
1245 ret
= ttm_vt_lock(&vmaster
->lock
, false, vmw_fp
->tfile
);
1246 vmw_kms_legacy_hotspot_clear(dev_priv
);
1247 if (unlikely((ret
!= 0))) {
1248 DRM_ERROR("Unable to lock TTM at VT switch.\n");
1249 drm_master_put(&vmw_fp
->locked_master
);
1252 ttm_lock_set_kill(&vmaster
->lock
, false, SIGTERM
);
1254 if (!dev_priv
->enable_fb
)
1255 vmw_svga_disable(dev_priv
);
1257 dev_priv
->active_master
= &dev_priv
->fbdev_master
;
1258 ttm_lock_set_kill(&dev_priv
->fbdev_master
.lock
, false, SIGTERM
);
1259 ttm_vt_unlock(&dev_priv
->fbdev_master
.lock
);
1261 if (dev_priv
->enable_fb
)
1262 vmw_fb_on(dev_priv
);
1266 * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1268 * @dev_priv: Pointer to device private struct.
1269 * Needs the reservation sem to be held in non-exclusive mode.
1271 static void __vmw_svga_enable(struct vmw_private
*dev_priv
)
1273 spin_lock(&dev_priv
->svga_lock
);
1274 if (!dev_priv
->bdev
.man
[TTM_PL_VRAM
].use_type
) {
1275 vmw_write(dev_priv
, SVGA_REG_ENABLE
, SVGA_REG_ENABLE
);
1276 dev_priv
->bdev
.man
[TTM_PL_VRAM
].use_type
= true;
1278 spin_unlock(&dev_priv
->svga_lock
);
1282 * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM.
1284 * @dev_priv: Pointer to device private struct.
1286 void vmw_svga_enable(struct vmw_private
*dev_priv
)
1288 ttm_read_lock(&dev_priv
->reservation_sem
, false);
1289 __vmw_svga_enable(dev_priv
);
1290 ttm_read_unlock(&dev_priv
->reservation_sem
);
1294 * __vmw_svga_disable - Disable SVGA mode and use of VRAM.
1296 * @dev_priv: Pointer to device private struct.
1297 * Needs the reservation sem to be held in exclusive mode.
1298 * Will not empty VRAM. VRAM must be emptied by caller.
1300 static void __vmw_svga_disable(struct vmw_private
*dev_priv
)
1302 spin_lock(&dev_priv
->svga_lock
);
1303 if (dev_priv
->bdev
.man
[TTM_PL_VRAM
].use_type
) {
1304 dev_priv
->bdev
.man
[TTM_PL_VRAM
].use_type
= false;
1305 vmw_write(dev_priv
, SVGA_REG_ENABLE
,
1306 SVGA_REG_ENABLE_HIDE
|
1307 SVGA_REG_ENABLE_ENABLE
);
1309 spin_unlock(&dev_priv
->svga_lock
);
1313 * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo
1316 * @dev_priv: Pointer to device private struct.
1319 void vmw_svga_disable(struct vmw_private
*dev_priv
)
1321 ttm_write_lock(&dev_priv
->reservation_sem
, false);
1322 spin_lock(&dev_priv
->svga_lock
);
1323 if (dev_priv
->bdev
.man
[TTM_PL_VRAM
].use_type
) {
1324 dev_priv
->bdev
.man
[TTM_PL_VRAM
].use_type
= false;
1325 spin_unlock(&dev_priv
->svga_lock
);
1326 if (ttm_bo_evict_mm(&dev_priv
->bdev
, TTM_PL_VRAM
))
1327 DRM_ERROR("Failed evicting VRAM buffers.\n");
1328 vmw_write(dev_priv
, SVGA_REG_ENABLE
,
1329 SVGA_REG_ENABLE_HIDE
|
1330 SVGA_REG_ENABLE_ENABLE
);
1332 spin_unlock(&dev_priv
->svga_lock
);
1333 ttm_write_unlock(&dev_priv
->reservation_sem
);
1336 static void vmw_remove(struct pci_dev
*pdev
)
1338 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1340 pci_disable_device(pdev
);
1344 static int vmwgfx_pm_notifier(struct notifier_block
*nb
, unsigned long val
,
1347 struct vmw_private
*dev_priv
=
1348 container_of(nb
, struct vmw_private
, pm_nb
);
1351 case PM_HIBERNATION_PREPARE
:
1352 if (dev_priv
->enable_fb
)
1353 vmw_fb_off(dev_priv
);
1354 ttm_suspend_lock(&dev_priv
->reservation_sem
);
1357 * This empties VRAM and unbinds all GMR bindings.
1358 * Buffer contents is moved to swappable memory.
1360 vmw_execbuf_release_pinned_bo(dev_priv
);
1361 vmw_resource_evict_all(dev_priv
);
1362 vmw_release_device_early(dev_priv
);
1363 ttm_bo_swapout_all(&dev_priv
->bdev
);
1364 vmw_fence_fifo_down(dev_priv
->fman
);
1366 case PM_POST_HIBERNATION
:
1367 case PM_POST_RESTORE
:
1368 vmw_fence_fifo_up(dev_priv
->fman
);
1369 ttm_suspend_unlock(&dev_priv
->reservation_sem
);
1370 if (dev_priv
->enable_fb
)
1371 vmw_fb_on(dev_priv
);
1373 case PM_RESTORE_PREPARE
:
1381 static int vmw_pci_suspend(struct pci_dev
*pdev
, pm_message_t state
)
1383 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1384 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1386 if (dev_priv
->refuse_hibernation
)
1389 pci_save_state(pdev
);
1390 pci_disable_device(pdev
);
1391 pci_set_power_state(pdev
, PCI_D3hot
);
1395 static int vmw_pci_resume(struct pci_dev
*pdev
)
1397 pci_set_power_state(pdev
, PCI_D0
);
1398 pci_restore_state(pdev
);
1399 return pci_enable_device(pdev
);
1402 static int vmw_pm_suspend(struct device
*kdev
)
1404 struct pci_dev
*pdev
= to_pci_dev(kdev
);
1405 struct pm_message dummy
;
1409 return vmw_pci_suspend(pdev
, dummy
);
1412 static int vmw_pm_resume(struct device
*kdev
)
1414 struct pci_dev
*pdev
= to_pci_dev(kdev
);
1416 return vmw_pci_resume(pdev
);
1419 static int vmw_pm_freeze(struct device
*kdev
)
1421 struct pci_dev
*pdev
= to_pci_dev(kdev
);
1422 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1423 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1425 dev_priv
->suspended
= true;
1426 if (dev_priv
->enable_fb
)
1427 vmw_fifo_resource_dec(dev_priv
);
1429 if (atomic_read(&dev_priv
->num_fifo_resources
) != 0) {
1430 DRM_ERROR("Can't hibernate while 3D resources are active.\n");
1431 if (dev_priv
->enable_fb
)
1432 vmw_fifo_resource_inc(dev_priv
);
1433 WARN_ON(vmw_request_device_late(dev_priv
));
1434 dev_priv
->suspended
= false;
1438 if (dev_priv
->enable_fb
)
1439 __vmw_svga_disable(dev_priv
);
1441 vmw_release_device_late(dev_priv
);
1446 static int vmw_pm_restore(struct device
*kdev
)
1448 struct pci_dev
*pdev
= to_pci_dev(kdev
);
1449 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1450 struct vmw_private
*dev_priv
= vmw_priv(dev
);
1453 vmw_write(dev_priv
, SVGA_REG_ID
, SVGA_ID_2
);
1454 (void) vmw_read(dev_priv
, SVGA_REG_ID
);
1456 if (dev_priv
->enable_fb
)
1457 vmw_fifo_resource_inc(dev_priv
);
1459 ret
= vmw_request_device(dev_priv
);
1463 if (dev_priv
->enable_fb
)
1464 __vmw_svga_enable(dev_priv
);
1466 dev_priv
->suspended
= false;
1471 static const struct dev_pm_ops vmw_pm_ops
= {
1472 .freeze
= vmw_pm_freeze
,
1473 .thaw
= vmw_pm_restore
,
1474 .restore
= vmw_pm_restore
,
1475 .suspend
= vmw_pm_suspend
,
1476 .resume
= vmw_pm_resume
,
1479 static const struct file_operations vmwgfx_driver_fops
= {
1480 .owner
= THIS_MODULE
,
1482 .release
= drm_release
,
1483 .unlocked_ioctl
= vmw_unlocked_ioctl
,
1485 .poll
= vmw_fops_poll
,
1486 .read
= vmw_fops_read
,
1487 #if defined(CONFIG_COMPAT)
1488 .compat_ioctl
= vmw_compat_ioctl
,
1490 .llseek
= noop_llseek
,
1493 static struct drm_driver driver
= {
1494 .driver_features
= DRIVER_HAVE_IRQ
| DRIVER_IRQ_SHARED
|
1495 DRIVER_MODESET
| DRIVER_PRIME
| DRIVER_RENDER
,
1496 .load
= vmw_driver_load
,
1497 .unload
= vmw_driver_unload
,
1498 .lastclose
= vmw_lastclose
,
1499 .irq_preinstall
= vmw_irq_preinstall
,
1500 .irq_postinstall
= vmw_irq_postinstall
,
1501 .irq_uninstall
= vmw_irq_uninstall
,
1502 .irq_handler
= vmw_irq_handler
,
1503 .get_vblank_counter
= vmw_get_vblank_counter
,
1504 .enable_vblank
= vmw_enable_vblank
,
1505 .disable_vblank
= vmw_disable_vblank
,
1506 .ioctls
= vmw_ioctls
,
1507 .num_ioctls
= ARRAY_SIZE(vmw_ioctls
),
1508 .master_create
= vmw_master_create
,
1509 .master_destroy
= vmw_master_destroy
,
1510 .master_set
= vmw_master_set
,
1511 .master_drop
= vmw_master_drop
,
1512 .open
= vmw_driver_open
,
1513 .postclose
= vmw_postclose
,
1514 .set_busid
= drm_pci_set_busid
,
1516 .dumb_create
= vmw_dumb_create
,
1517 .dumb_map_offset
= vmw_dumb_map_offset
,
1518 .dumb_destroy
= vmw_dumb_destroy
,
1520 .prime_fd_to_handle
= vmw_prime_fd_to_handle
,
1521 .prime_handle_to_fd
= vmw_prime_handle_to_fd
,
1523 .fops
= &vmwgfx_driver_fops
,
1524 .name
= VMWGFX_DRIVER_NAME
,
1525 .desc
= VMWGFX_DRIVER_DESC
,
1526 .date
= VMWGFX_DRIVER_DATE
,
1527 .major
= VMWGFX_DRIVER_MAJOR
,
1528 .minor
= VMWGFX_DRIVER_MINOR
,
1529 .patchlevel
= VMWGFX_DRIVER_PATCHLEVEL
1532 static struct pci_driver vmw_pci_driver
= {
1533 .name
= VMWGFX_DRIVER_NAME
,
1534 .id_table
= vmw_pci_id_list
,
1536 .remove
= vmw_remove
,
1542 static int vmw_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
1544 return drm_get_pci_dev(pdev
, ent
, &driver
);
1547 static int __init
vmwgfx_init(void)
1551 if (vgacon_text_force())
1554 ret
= drm_pci_init(&driver
, &vmw_pci_driver
);
1556 DRM_ERROR("Failed initializing DRM.\n");
1560 static void __exit
vmwgfx_exit(void)
1562 drm_pci_exit(&driver
, &vmw_pci_driver
);
1565 module_init(vmwgfx_init
);
1566 module_exit(vmwgfx_exit
);
1568 MODULE_AUTHOR("VMware Inc. and others");
1569 MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
1570 MODULE_LICENSE("GPL and additional rights");
1571 MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR
) "."
1572 __stringify(VMWGFX_DRIVER_MINOR
) "."
1573 __stringify(VMWGFX_DRIVER_PATCHLEVEL
) "."