Merge remote-tracking branch 'rockchip/for-next'
[deliverable/linux.git] / drivers / gpu / drm / cirrus / cirrus_main.c
1 /*
2 * Copyright 2012 Red Hat
3 *
4 * This file is subject to the terms and conditions of the GNU General
5 * Public License version 2. See the file COPYING in the main
6 * directory of this archive for more details.
7 *
8 * Authors: Matthew Garrett
9 * Dave Airlie
10 */
11 #include <drm/drmP.h>
12 #include <drm/drm_crtc_helper.h>
13
14 #include "cirrus_drv.h"
15
16
17 static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
18 {
19 struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
20
21 drm_gem_object_unreference_unlocked(cirrus_fb->obj);
22 drm_framebuffer_cleanup(fb);
23 kfree(fb);
24 }
25
26 static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
27 .destroy = cirrus_user_framebuffer_destroy,
28 };
29
30 int cirrus_framebuffer_init(struct drm_device *dev,
31 struct cirrus_framebuffer *gfb,
32 const struct drm_mode_fb_cmd2 *mode_cmd,
33 struct drm_gem_object *obj)
34 {
35 int ret;
36
37 drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd);
38 gfb->obj = obj;
39 ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs);
40 if (ret) {
41 DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
42 return ret;
43 }
44 return 0;
45 }
46
47 static struct drm_framebuffer *
48 cirrus_user_framebuffer_create(struct drm_device *dev,
49 struct drm_file *filp,
50 const struct drm_mode_fb_cmd2 *mode_cmd)
51 {
52 struct cirrus_device *cdev = dev->dev_private;
53 struct drm_gem_object *obj;
54 struct cirrus_framebuffer *cirrus_fb;
55 int ret;
56 u32 bpp, depth;
57
58 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
59
60 if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
61 bpp, mode_cmd->pitches[0]))
62 return ERR_PTR(-EINVAL);
63
64 obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
65 if (obj == NULL)
66 return ERR_PTR(-ENOENT);
67
68 cirrus_fb = kzalloc(sizeof(*cirrus_fb), GFP_KERNEL);
69 if (!cirrus_fb) {
70 drm_gem_object_unreference_unlocked(obj);
71 return ERR_PTR(-ENOMEM);
72 }
73
74 ret = cirrus_framebuffer_init(dev, cirrus_fb, mode_cmd, obj);
75 if (ret) {
76 drm_gem_object_unreference_unlocked(obj);
77 kfree(cirrus_fb);
78 return ERR_PTR(ret);
79 }
80 return &cirrus_fb->base;
81 }
82
83 static const struct drm_mode_config_funcs cirrus_mode_funcs = {
84 .fb_create = cirrus_user_framebuffer_create,
85 };
86
87 /* Unmap the framebuffer from the core and release the memory */
88 static void cirrus_vram_fini(struct cirrus_device *cdev)
89 {
90 iounmap(cdev->rmmio);
91 cdev->rmmio = NULL;
92 if (cdev->mc.vram_base)
93 release_mem_region(cdev->mc.vram_base, cdev->mc.vram_size);
94 }
95
96 /* Map the framebuffer from the card and configure the core */
97 static int cirrus_vram_init(struct cirrus_device *cdev)
98 {
99 /* BAR 0 is VRAM */
100 cdev->mc.vram_base = pci_resource_start(cdev->dev->pdev, 0);
101 cdev->mc.vram_size = pci_resource_len(cdev->dev->pdev, 0);
102
103 if (!request_mem_region(cdev->mc.vram_base, cdev->mc.vram_size,
104 "cirrusdrmfb_vram")) {
105 DRM_ERROR("can't reserve VRAM\n");
106 return -ENXIO;
107 }
108
109 return 0;
110 }
111
112 /*
113 * Our emulated hardware has two sets of memory. One is video RAM and can
114 * simply be used as a linear framebuffer - the other provides mmio access
115 * to the display registers. The latter can also be accessed via IO port
116 * access, but we map the range and use mmio to program them instead
117 */
118
119 int cirrus_device_init(struct cirrus_device *cdev,
120 struct drm_device *ddev,
121 struct pci_dev *pdev, uint32_t flags)
122 {
123 int ret;
124
125 cdev->dev = ddev;
126 cdev->flags = flags;
127
128 /* Hardcode the number of CRTCs to 1 */
129 cdev->num_crtc = 1;
130
131 /* BAR 0 is the framebuffer, BAR 1 contains registers */
132 cdev->rmmio_base = pci_resource_start(cdev->dev->pdev, 1);
133 cdev->rmmio_size = pci_resource_len(cdev->dev->pdev, 1);
134
135 if (!request_mem_region(cdev->rmmio_base, cdev->rmmio_size,
136 "cirrusdrmfb_mmio")) {
137 DRM_ERROR("can't reserve mmio registers\n");
138 return -ENOMEM;
139 }
140
141 cdev->rmmio = ioremap(cdev->rmmio_base, cdev->rmmio_size);
142
143 if (cdev->rmmio == NULL)
144 return -ENOMEM;
145
146 ret = cirrus_vram_init(cdev);
147 if (ret) {
148 release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
149 return ret;
150 }
151
152 return 0;
153 }
154
155 void cirrus_device_fini(struct cirrus_device *cdev)
156 {
157 release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
158 cirrus_vram_fini(cdev);
159 }
160
161 /*
162 * Functions here will be called by the core once it's bound the driver to
163 * a PCI device
164 */
165
166 int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
167 {
168 struct cirrus_device *cdev;
169 int r;
170
171 cdev = kzalloc(sizeof(struct cirrus_device), GFP_KERNEL);
172 if (cdev == NULL)
173 return -ENOMEM;
174 dev->dev_private = (void *)cdev;
175
176 r = cirrus_device_init(cdev, dev, dev->pdev, flags);
177 if (r) {
178 dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
179 goto out;
180 }
181
182 r = cirrus_mm_init(cdev);
183 if (r) {
184 dev_err(&dev->pdev->dev, "fatal err on mm init\n");
185 goto out;
186 }
187
188 /*
189 * cirrus_modeset_init() is initializing/registering the emulated fbdev
190 * and DRM internals can access/test some of the fields in
191 * mode_config->funcs as part of the fbdev registration process.
192 * Make sure dev->mode_config.funcs is properly set to avoid
193 * dereferencing a NULL pointer.
194 * FIXME: mode_config.funcs assignment should probably be done in
195 * cirrus_modeset_init() (that's a common pattern seen in other DRM
196 * drivers).
197 */
198 dev->mode_config.funcs = &cirrus_mode_funcs;
199 r = cirrus_modeset_init(cdev);
200 if (r) {
201 dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
202 goto out;
203 }
204
205 return 0;
206 out:
207 cirrus_driver_unload(dev);
208 return r;
209 }
210
211 int cirrus_driver_unload(struct drm_device *dev)
212 {
213 struct cirrus_device *cdev = dev->dev_private;
214
215 if (cdev == NULL)
216 return 0;
217 cirrus_modeset_fini(cdev);
218 cirrus_mm_fini(cdev);
219 cirrus_device_fini(cdev);
220 kfree(cdev);
221 dev->dev_private = NULL;
222 return 0;
223 }
224
225 int cirrus_gem_create(struct drm_device *dev,
226 u32 size, bool iskernel,
227 struct drm_gem_object **obj)
228 {
229 struct cirrus_bo *cirrusbo;
230 int ret;
231
232 *obj = NULL;
233
234 size = roundup(size, PAGE_SIZE);
235 if (size == 0)
236 return -EINVAL;
237
238 ret = cirrus_bo_create(dev, size, 0, 0, &cirrusbo);
239 if (ret) {
240 if (ret != -ERESTARTSYS)
241 DRM_ERROR("failed to allocate GEM object\n");
242 return ret;
243 }
244 *obj = &cirrusbo->gem;
245 return 0;
246 }
247
248 int cirrus_dumb_create(struct drm_file *file,
249 struct drm_device *dev,
250 struct drm_mode_create_dumb *args)
251 {
252 int ret;
253 struct drm_gem_object *gobj;
254 u32 handle;
255
256 args->pitch = args->width * ((args->bpp + 7) / 8);
257 args->size = args->pitch * args->height;
258
259 ret = cirrus_gem_create(dev, args->size, false,
260 &gobj);
261 if (ret)
262 return ret;
263
264 ret = drm_gem_handle_create(file, gobj, &handle);
265 drm_gem_object_unreference_unlocked(gobj);
266 if (ret)
267 return ret;
268
269 args->handle = handle;
270 return 0;
271 }
272
273 static void cirrus_bo_unref(struct cirrus_bo **bo)
274 {
275 struct ttm_buffer_object *tbo;
276
277 if ((*bo) == NULL)
278 return;
279
280 tbo = &((*bo)->bo);
281 ttm_bo_unref(&tbo);
282 *bo = NULL;
283 }
284
285 void cirrus_gem_free_object(struct drm_gem_object *obj)
286 {
287 struct cirrus_bo *cirrus_bo = gem_to_cirrus_bo(obj);
288
289 cirrus_bo_unref(&cirrus_bo);
290 }
291
292
293 static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
294 {
295 return drm_vma_node_offset_addr(&bo->bo.vma_node);
296 }
297
298 int
299 cirrus_dumb_mmap_offset(struct drm_file *file,
300 struct drm_device *dev,
301 uint32_t handle,
302 uint64_t *offset)
303 {
304 struct drm_gem_object *obj;
305 struct cirrus_bo *bo;
306
307 obj = drm_gem_object_lookup(file, handle);
308 if (obj == NULL)
309 return -ENOENT;
310
311 bo = gem_to_cirrus_bo(obj);
312 *offset = cirrus_bo_mmap_offset(bo);
313
314 drm_gem_object_unreference_unlocked(obj);
315
316 return 0;
317 }
318
319 bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
320 int bpp, int pitch)
321 {
322 const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */
323 const int max_size = cdev->mc.vram_size;
324
325 if (bpp > cirrus_bpp)
326 return false;
327 if (bpp > 32)
328 return false;
329
330 if (pitch > max_pitch)
331 return false;
332
333 if (pitch * height > max_size)
334 return false;
335
336 return true;
337 }
This page took 0.037278 seconds and 6 git commands to generate.