gma500/cdv: add the bits that don't need the new code
[deliverable/linux.git] / drivers / gpu / drm / gma500 / framebuffer.c
1 /**************************************************************************
2 * Copyright (c) 2007-2011, Intel Corporation.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 **************************************************************************/
19
20 #include <linux/module.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/string.h>
24 #include <linux/mm.h>
25 #include <linux/tty.h>
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/fb.h>
29 #include <linux/init.h>
30 #include <linux/console.h>
31
32 #include <drm/drmP.h>
33 #include <drm/drm.h>
34 #include <drm/drm_crtc.h>
35 #include <drm/drm_fb_helper.h>
36
37 #include "psb_drv.h"
38 #include "psb_intel_reg.h"
39 #include "psb_intel_drv.h"
40 #include "framebuffer.h"
41 #include "gtt.h"
42
43 static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
44 static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
45 struct drm_file *file_priv,
46 unsigned int *handle);
47
48 static const struct drm_framebuffer_funcs psb_fb_funcs = {
49 .destroy = psb_user_framebuffer_destroy,
50 .create_handle = psb_user_framebuffer_create_handle,
51 };
52
53 #define CMAP_TOHW(_val, _width) ((((_val) << (_width)) + 0x7FFF - (_val)) >> 16)
54
55 static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green,
56 unsigned blue, unsigned transp,
57 struct fb_info *info)
58 {
59 struct psb_fbdev *fbdev = info->par;
60 struct drm_framebuffer *fb = fbdev->psb_fb_helper.fb;
61 uint32_t v;
62
63 if (!fb)
64 return -ENOMEM;
65
66 if (regno > 255)
67 return 1;
68
69 red = CMAP_TOHW(red, info->var.red.length);
70 blue = CMAP_TOHW(blue, info->var.blue.length);
71 green = CMAP_TOHW(green, info->var.green.length);
72 transp = CMAP_TOHW(transp, info->var.transp.length);
73
74 v = (red << info->var.red.offset) |
75 (green << info->var.green.offset) |
76 (blue << info->var.blue.offset) |
77 (transp << info->var.transp.offset);
78
79 if (regno < 16) {
80 switch (fb->bits_per_pixel) {
81 case 16:
82 ((uint32_t *) info->pseudo_palette)[regno] = v;
83 break;
84 case 24:
85 case 32:
86 ((uint32_t *) info->pseudo_palette)[regno] = v;
87 break;
88 }
89 }
90
91 return 0;
92 }
93
94 static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
95 {
96 struct psb_fbdev *fbdev = info->par;
97 struct psb_framebuffer *psbfb = &fbdev->pfb;
98 struct drm_device *dev = psbfb->base.dev;
99
100 /*
101 * We have to poke our nose in here. The core fb code assumes
102 * panning is part of the hardware that can be invoked before
103 * the actual fb is mapped. In our case that isn't quite true.
104 */
105 if (psbfb->gtt->npage) {
106 /* GTT roll shifts in 4K pages, we need to shift the right
107 number of pages */
108 int pages = info->fix.line_length >> 12;
109 psb_gtt_roll(dev, psbfb->gtt, var->yoffset * pages);
110 }
111 return 0;
112 }
113
114 static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
115 {
116 struct psb_framebuffer *psbfb = vma->vm_private_data;
117 struct drm_device *dev = psbfb->base.dev;
118 struct drm_psb_private *dev_priv = dev->dev_private;
119 int page_num;
120 int i;
121 unsigned long address;
122 int ret;
123 unsigned long pfn;
124 /* FIXME: assumes fb at stolen base which may not be true */
125 unsigned long phys_addr = (unsigned long)dev_priv->stolen_base;
126
127 page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
128 address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
129
130 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
131
132 for (i = 0; i < page_num; i++) {
133 pfn = (phys_addr >> PAGE_SHIFT);
134
135 ret = vm_insert_mixed(vma, address, pfn);
136 if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
137 break;
138 else if (unlikely(ret != 0)) {
139 ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
140 return ret;
141 }
142 address += PAGE_SIZE;
143 phys_addr += PAGE_SIZE;
144 }
145 return VM_FAULT_NOPAGE;
146 }
147
148 static void psbfb_vm_open(struct vm_area_struct *vma)
149 {
150 }
151
152 static void psbfb_vm_close(struct vm_area_struct *vma)
153 {
154 }
155
156 static const struct vm_operations_struct psbfb_vm_ops = {
157 .fault = psbfb_vm_fault,
158 .open = psbfb_vm_open,
159 .close = psbfb_vm_close
160 };
161
162 static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
163 {
164 struct psb_fbdev *fbdev = info->par;
165 struct psb_framebuffer *psbfb = &fbdev->pfb;
166
167 if (vma->vm_pgoff != 0)
168 return -EINVAL;
169 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
170 return -EINVAL;
171
172 if (!psbfb->addr_space)
173 psbfb->addr_space = vma->vm_file->f_mapping;
174 /*
175 * If this is a GEM object then info->screen_base is the virtual
176 * kernel remapping of the object. FIXME: Review if this is
177 * suitable for our mmap work
178 */
179 vma->vm_ops = &psbfb_vm_ops;
180 vma->vm_private_data = (void *)psbfb;
181 vma->vm_flags |= VM_RESERVED | VM_IO |
182 VM_MIXEDMAP | VM_DONTEXPAND;
183 return 0;
184 }
185
186 static int psbfb_ioctl(struct fb_info *info, unsigned int cmd,
187 unsigned long arg)
188 {
189 return -ENOTTY;
190 }
191
192 static struct fb_ops psbfb_ops = {
193 .owner = THIS_MODULE,
194 .fb_check_var = drm_fb_helper_check_var,
195 .fb_set_par = drm_fb_helper_set_par,
196 .fb_blank = drm_fb_helper_blank,
197 .fb_setcolreg = psbfb_setcolreg,
198 .fb_fillrect = cfb_fillrect,
199 .fb_copyarea = psbfb_copyarea,
200 .fb_imageblit = cfb_imageblit,
201 .fb_mmap = psbfb_mmap,
202 .fb_sync = psbfb_sync,
203 .fb_ioctl = psbfb_ioctl,
204 };
205
206 static struct fb_ops psbfb_roll_ops = {
207 .owner = THIS_MODULE,
208 .fb_check_var = drm_fb_helper_check_var,
209 .fb_set_par = drm_fb_helper_set_par,
210 .fb_blank = drm_fb_helper_blank,
211 .fb_setcolreg = psbfb_setcolreg,
212 .fb_fillrect = cfb_fillrect,
213 .fb_copyarea = cfb_copyarea,
214 .fb_imageblit = cfb_imageblit,
215 .fb_pan_display = psbfb_pan,
216 .fb_mmap = psbfb_mmap,
217 .fb_ioctl = psbfb_ioctl,
218 };
219
220 static struct fb_ops psbfb_unaccel_ops = {
221 .owner = THIS_MODULE,
222 .fb_check_var = drm_fb_helper_check_var,
223 .fb_set_par = drm_fb_helper_set_par,
224 .fb_blank = drm_fb_helper_blank,
225 .fb_setcolreg = psbfb_setcolreg,
226 .fb_fillrect = cfb_fillrect,
227 .fb_copyarea = cfb_copyarea,
228 .fb_imageblit = cfb_imageblit,
229 .fb_mmap = psbfb_mmap,
230 .fb_ioctl = psbfb_ioctl,
231 };
232
233 /**
234 * psb_framebuffer_init - initialize a framebuffer
235 * @dev: our DRM device
236 * @fb: framebuffer to set up
237 * @mode_cmd: mode description
238 * @gt: backing object
239 *
240 * Configure and fill in the boilerplate for our frame buffer. Return
241 * 0 on success or an error code if we fail.
242 */
243 static int psb_framebuffer_init(struct drm_device *dev,
244 struct psb_framebuffer *fb,
245 struct drm_mode_fb_cmd2 *mode_cmd,
246 struct gtt_range *gt)
247 {
248 u32 bpp, depth;
249 int ret;
250
251 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
252
253 if (mode_cmd->pitches[0] & 63)
254 return -EINVAL;
255 switch (bpp) {
256 case 8:
257 case 16:
258 case 24:
259 case 32:
260 break;
261 default:
262 return -EINVAL;
263 }
264 ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
265 if (ret) {
266 dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
267 return ret;
268 }
269 drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
270 fb->gtt = gt;
271 return 0;
272 }
273
274 /**
275 * psb_framebuffer_create - create a framebuffer backed by gt
276 * @dev: our DRM device
277 * @mode_cmd: the description of the requested mode
278 * @gt: the backing object
279 *
280 * Create a framebuffer object backed by the gt, and fill in the
281 * boilerplate required
282 *
283 * TODO: review object references
284 */
285
286 static struct drm_framebuffer *psb_framebuffer_create
287 (struct drm_device *dev,
288 struct drm_mode_fb_cmd2 *mode_cmd,
289 struct gtt_range *gt)
290 {
291 struct psb_framebuffer *fb;
292 int ret;
293
294 fb = kzalloc(sizeof(*fb), GFP_KERNEL);
295 if (!fb)
296 return ERR_PTR(-ENOMEM);
297
298 ret = psb_framebuffer_init(dev, fb, mode_cmd, gt);
299 if (ret) {
300 kfree(fb);
301 return ERR_PTR(ret);
302 }
303 return &fb->base;
304 }
305
306 /**
307 * psbfb_alloc - allocate frame buffer memory
308 * @dev: the DRM device
309 * @aligned_size: space needed
310 * @force: fall back to GEM buffers if need be
311 *
312 * Allocate the frame buffer. In the usual case we get a GTT range that
313 * is stolen memory backed and life is simple. If there isn't sufficient
314 * we fail as we don't have the virtual mapping space to really vmap it
315 * and the kernel console code can't handle non linear framebuffers.
316 *
317 * Re-address this as and if the framebuffer layer grows this ability.
318 */
319 static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
320 {
321 struct gtt_range *backing;
322 /* Begin by trying to use stolen memory backing */
323 backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
324 if (backing) {
325 if (drm_gem_private_object_init(dev,
326 &backing->gem, aligned_size) == 0)
327 return backing;
328 psb_gtt_free_range(dev, backing);
329 }
330 return NULL;
331 }
332
333 /**
334 * psbfb_create - create a framebuffer
335 * @fbdev: the framebuffer device
336 * @sizes: specification of the layout
337 *
338 * Create a framebuffer to the specifications provided
339 */
340 static int psbfb_create(struct psb_fbdev *fbdev,
341 struct drm_fb_helper_surface_size *sizes)
342 {
343 struct drm_device *dev = fbdev->psb_fb_helper.dev;
344 struct drm_psb_private *dev_priv = dev->dev_private;
345 struct fb_info *info;
346 struct drm_framebuffer *fb;
347 struct psb_framebuffer *psbfb = &fbdev->pfb;
348 struct drm_mode_fb_cmd2 mode_cmd;
349 struct device *device = &dev->pdev->dev;
350 int size;
351 int ret;
352 struct gtt_range *backing;
353 u32 bpp, depth;
354 int gtt_roll = 0;
355 int pitch_lines = 0;
356
357 mode_cmd.width = sizes->surface_width;
358 mode_cmd.height = sizes->surface_height;
359 bpp = sizes->surface_bpp;
360 depth = sizes->surface_depth;
361
362 /* No 24bit packed */
363 if (bpp == 24)
364 bpp = 32;
365
366 do {
367 /*
368 * Acceleration via the GTT requires pitch to be
369 * power of two aligned. Preferably page but less
370 * is ok with some fonts
371 */
372 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
373
374 size = mode_cmd.pitches[0] * mode_cmd.height;
375 size = ALIGN(size, PAGE_SIZE);
376
377 /* Allocate the fb in the GTT with stolen page backing */
378 backing = psbfb_alloc(dev, size);
379
380 if (pitch_lines)
381 pitch_lines *= 2;
382 else
383 pitch_lines = 1;
384 gtt_roll++;
385 } while (backing == NULL && pitch_lines <= 16);
386
387 /* The final pitch we accepted if we succeeded */
388 pitch_lines /= 2;
389
390 if (backing == NULL) {
391 /*
392 * We couldn't get the space we wanted, fall back to the
393 * display engine requirement instead. The HW requires
394 * the pitch to be 64 byte aligned
395 */
396
397 gtt_roll = 0; /* Don't use GTT accelerated scrolling */
398 pitch_lines = 64;
399
400 mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
401
402 size = mode_cmd.pitches[0] * mode_cmd.height;
403 size = ALIGN(size, PAGE_SIZE);
404
405 /* Allocate the framebuffer in the GTT with stolen page backing */
406 backing = psbfb_alloc(dev, size);
407 if (backing == NULL)
408 return -ENOMEM;
409 }
410
411 memset(dev_priv->vram_addr + backing->offset, 0, size);
412
413 mutex_lock(&dev->struct_mutex);
414
415 info = framebuffer_alloc(0, device);
416 if (!info) {
417 ret = -ENOMEM;
418 goto out_err1;
419 }
420 info->par = fbdev;
421
422 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
423
424 ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
425 if (ret)
426 goto out_unref;
427
428 fb = &psbfb->base;
429 psbfb->fbdev = info;
430
431 fbdev->psb_fb_helper.fb = fb;
432 fbdev->psb_fb_helper.fbdev = info;
433
434 drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
435 strcpy(info->fix.id, "psbfb");
436
437 info->flags = FBINFO_DEFAULT;
438 if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
439 info->fbops = &psbfb_ops;
440 else if (gtt_roll) { /* GTT rolling seems best */
441 info->fbops = &psbfb_roll_ops;
442 info->flags |= FBINFO_HWACCEL_YPAN;
443 } else /* Software */
444 info->fbops = &psbfb_unaccel_ops;
445
446 ret = fb_alloc_cmap(&info->cmap, 256, 0);
447 if (ret) {
448 ret = -ENOMEM;
449 goto out_unref;
450 }
451
452 info->fix.smem_start = dev->mode_config.fb_base;
453 info->fix.smem_len = size;
454 info->fix.ywrapstep = gtt_roll;
455 info->fix.ypanstep = 0;
456
457 /* Accessed stolen memory directly */
458 info->screen_base = dev_priv->vram_addr + backing->offset;
459 info->screen_size = size;
460
461 if (dev_priv->gtt.stolen_size) {
462 info->apertures = alloc_apertures(1);
463 if (!info->apertures) {
464 ret = -ENOMEM;
465 goto out_unref;
466 }
467 info->apertures->ranges[0].base = dev->mode_config.fb_base;
468 info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
469 }
470
471 drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
472 sizes->fb_width, sizes->fb_height);
473
474 info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
475 info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
476
477 /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
478
479 dev_dbg(dev->dev, "allocated %dx%d fb\n",
480 psbfb->base.width, psbfb->base.height);
481
482 mutex_unlock(&dev->struct_mutex);
483 return 0;
484 out_unref:
485 if (backing->stolen)
486 psb_gtt_free_range(dev, backing);
487 else
488 drm_gem_object_unreference(&backing->gem);
489 out_err1:
490 mutex_unlock(&dev->struct_mutex);
491 psb_gtt_free_range(dev, backing);
492 return ret;
493 }
494
495 /**
496 * psb_user_framebuffer_create - create framebuffer
497 * @dev: our DRM device
498 * @filp: client file
499 * @cmd: mode request
500 *
501 * Create a new framebuffer backed by a userspace GEM object
502 */
503 static struct drm_framebuffer *psb_user_framebuffer_create
504 (struct drm_device *dev, struct drm_file *filp,
505 struct drm_mode_fb_cmd2 *cmd)
506 {
507 struct gtt_range *r;
508 struct drm_gem_object *obj;
509
510 /*
511 * Find the GEM object and thus the gtt range object that is
512 * to back this space
513 */
514 obj = drm_gem_object_lookup(dev, filp, cmd->handles[0]);
515 if (obj == NULL)
516 return ERR_PTR(-ENOENT);
517
518 /* Let the core code do all the work */
519 r = container_of(obj, struct gtt_range, gem);
520 return psb_framebuffer_create(dev, cmd, r);
521 }
522
523 static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
524 u16 blue, int regno)
525 {
526 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
527
528 intel_crtc->lut_r[regno] = red >> 8;
529 intel_crtc->lut_g[regno] = green >> 8;
530 intel_crtc->lut_b[regno] = blue >> 8;
531 }
532
533 static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
534 u16 *green, u16 *blue, int regno)
535 {
536 struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
537
538 *red = intel_crtc->lut_r[regno] << 8;
539 *green = intel_crtc->lut_g[regno] << 8;
540 *blue = intel_crtc->lut_b[regno] << 8;
541 }
542
543 static int psbfb_probe(struct drm_fb_helper *helper,
544 struct drm_fb_helper_surface_size *sizes)
545 {
546 struct psb_fbdev *psb_fbdev = (struct psb_fbdev *)helper;
547 struct drm_device *dev = psb_fbdev->psb_fb_helper.dev;
548 struct drm_psb_private *dev_priv = dev->dev_private;
549 int new_fb = 0;
550 int bytespp;
551 int ret;
552
553 bytespp = sizes->surface_bpp / 8;
554 if (bytespp == 3) /* no 24bit packed */
555 bytespp = 4;
556
557 /* If the mode will not fit in 32bit then switch to 16bit to get
558 a console on full resolution. The X mode setting server will
559 allocate its own 32bit GEM framebuffer */
560 if (ALIGN(sizes->fb_width * bytespp, 64) * sizes->fb_height >
561 dev_priv->vram_stolen_size) {
562 sizes->surface_bpp = 16;
563 sizes->surface_depth = 16;
564 }
565
566 if (!helper->fb) {
567 ret = psbfb_create(psb_fbdev, sizes);
568 if (ret)
569 return ret;
570 new_fb = 1;
571 }
572 return new_fb;
573 }
574
575 static struct drm_fb_helper_funcs psb_fb_helper_funcs = {
576 .gamma_set = psbfb_gamma_set,
577 .gamma_get = psbfb_gamma_get,
578 .fb_probe = psbfb_probe,
579 };
580
581 static int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
582 {
583 struct fb_info *info;
584 struct psb_framebuffer *psbfb = &fbdev->pfb;
585
586 if (fbdev->psb_fb_helper.fbdev) {
587 info = fbdev->psb_fb_helper.fbdev;
588 unregister_framebuffer(info);
589 if (info->cmap.len)
590 fb_dealloc_cmap(&info->cmap);
591 framebuffer_release(info);
592 }
593 drm_fb_helper_fini(&fbdev->psb_fb_helper);
594 drm_framebuffer_cleanup(&psbfb->base);
595
596 if (psbfb->gtt)
597 drm_gem_object_unreference(&psbfb->gtt->gem);
598 return 0;
599 }
600
601 int psb_fbdev_init(struct drm_device *dev)
602 {
603 struct psb_fbdev *fbdev;
604 struct drm_psb_private *dev_priv = dev->dev_private;
605
606 fbdev = kzalloc(sizeof(struct psb_fbdev), GFP_KERNEL);
607 if (!fbdev) {
608 dev_err(dev->dev, "no memory\n");
609 return -ENOMEM;
610 }
611
612 dev_priv->fbdev = fbdev;
613 fbdev->psb_fb_helper.funcs = &psb_fb_helper_funcs;
614
615 drm_fb_helper_init(dev, &fbdev->psb_fb_helper, dev_priv->ops->crtcs,
616 INTELFB_CONN_LIMIT);
617
618 drm_fb_helper_single_add_all_connectors(&fbdev->psb_fb_helper);
619 drm_fb_helper_initial_config(&fbdev->psb_fb_helper, 32);
620 return 0;
621 }
622
623 static void psb_fbdev_fini(struct drm_device *dev)
624 {
625 struct drm_psb_private *dev_priv = dev->dev_private;
626
627 if (!dev_priv->fbdev)
628 return;
629
630 psb_fbdev_destroy(dev, dev_priv->fbdev);
631 kfree(dev_priv->fbdev);
632 dev_priv->fbdev = NULL;
633 }
634
635 static void psbfb_output_poll_changed(struct drm_device *dev)
636 {
637 struct drm_psb_private *dev_priv = dev->dev_private;
638 struct psb_fbdev *fbdev = (struct psb_fbdev *)dev_priv->fbdev;
639 drm_fb_helper_hotplug_event(&fbdev->psb_fb_helper);
640 }
641
642 /**
643 * psb_user_framebuffer_create_handle - add hamdle to a framebuffer
644 * @fb: framebuffer
645 * @file_priv: our DRM file
646 * @handle: returned handle
647 *
648 * Our framebuffer object is a GTT range which also contains a GEM
649 * object. We need to turn it into a handle for userspace. GEM will do
650 * the work for us
651 */
652 static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
653 struct drm_file *file_priv,
654 unsigned int *handle)
655 {
656 struct psb_framebuffer *psbfb = to_psb_fb(fb);
657 struct gtt_range *r = psbfb->gtt;
658 return drm_gem_handle_create(file_priv, &r->gem, handle);
659 }
660
661 /**
662 * psb_user_framebuffer_destroy - destruct user created fb
663 * @fb: framebuffer
664 *
665 * User framebuffers are backed by GEM objects so all we have to do is
666 * clean up a bit and drop the reference, GEM will handle the fallout
667 */
668 static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb)
669 {
670 struct psb_framebuffer *psbfb = to_psb_fb(fb);
671 struct gtt_range *r = psbfb->gtt;
672 struct drm_device *dev = fb->dev;
673 struct drm_psb_private *dev_priv = dev->dev_private;
674 struct psb_fbdev *fbdev = dev_priv->fbdev;
675 struct drm_crtc *crtc;
676 int reset = 0;
677
678 /* Should never get stolen memory for a user fb */
679 WARN_ON(r->stolen);
680
681 /* Check if we are erroneously live */
682 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
683 if (crtc->fb == fb)
684 reset = 1;
685
686 if (reset)
687 /*
688 * Now force a sane response before we permit the DRM CRTC
689 * layer to do stupid things like blank the display. Instead
690 * we reset this framebuffer as if the user had forced a reset.
691 * We must do this before the cleanup so that the DRM layer
692 * doesn't get a chance to stick its oar in where it isn't
693 * wanted.
694 */
695 drm_fb_helper_restore_fbdev_mode(&fbdev->psb_fb_helper);
696
697 /* Let DRM do its clean up */
698 drm_framebuffer_cleanup(fb);
699 /* We are no longer using the resource in GEM */
700 drm_gem_object_unreference_unlocked(&r->gem);
701 kfree(fb);
702 }
703
704 static const struct drm_mode_config_funcs psb_mode_funcs = {
705 .fb_create = psb_user_framebuffer_create,
706 .output_poll_changed = psbfb_output_poll_changed,
707 };
708
709 static int psb_create_backlight_property(struct drm_device *dev)
710 {
711 struct drm_psb_private *dev_priv = dev->dev_private;
712 struct drm_property *backlight;
713
714 if (dev_priv->backlight_property)
715 return 0;
716
717 backlight = drm_property_create_range(dev, 0, "backlight", 0, 100);
718
719 dev_priv->backlight_property = backlight;
720
721 return 0;
722 }
723
724 static void psb_setup_outputs(struct drm_device *dev)
725 {
726 struct drm_psb_private *dev_priv = dev->dev_private;
727 struct drm_connector *connector;
728
729 drm_mode_create_scaling_mode_property(dev);
730 psb_create_backlight_property(dev);
731
732 dev_priv->ops->output_init(dev);
733
734 list_for_each_entry(connector, &dev->mode_config.connector_list,
735 head) {
736 struct psb_intel_encoder *psb_intel_encoder =
737 psb_intel_attached_encoder(connector);
738 struct drm_encoder *encoder = &psb_intel_encoder->base;
739 int crtc_mask = 0, clone_mask = 0;
740
741 /* valid crtcs */
742 switch (psb_intel_encoder->type) {
743 case INTEL_OUTPUT_ANALOG:
744 crtc_mask = (1 << 0);
745 clone_mask = (1 << INTEL_OUTPUT_ANALOG);
746 break;
747 case INTEL_OUTPUT_SDVO:
748 crtc_mask = ((1 << 0) | (1 << 1));
749 clone_mask = (1 << INTEL_OUTPUT_SDVO);
750 break;
751 case INTEL_OUTPUT_LVDS:
752 crtc_mask = dev_priv->ops->lvds_mask;
753 clone_mask = (1 << INTEL_OUTPUT_LVDS);
754 break;
755 case INTEL_OUTPUT_MIPI:
756 crtc_mask = (1 << 0);
757 clone_mask = (1 << INTEL_OUTPUT_MIPI);
758 break;
759 case INTEL_OUTPUT_MIPI2:
760 crtc_mask = (1 << 2);
761 clone_mask = (1 << INTEL_OUTPUT_MIPI2);
762 break;
763 case INTEL_OUTPUT_HDMI:
764 crtc_mask = dev_priv->ops->hdmi_mask;
765 clone_mask = (1 << INTEL_OUTPUT_HDMI);
766 break;
767 case INTEL_OUTPUT_DISPLAYPORT:
768 crtc_mask = (1 << 0) | (1 << 1);
769 clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
770 break;
771 }
772 encoder->possible_crtcs = crtc_mask;
773 encoder->possible_clones =
774 psb_intel_connector_clones(dev, clone_mask);
775 }
776 }
777
778 void psb_modeset_init(struct drm_device *dev)
779 {
780 struct drm_psb_private *dev_priv = dev->dev_private;
781 struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
782 int i;
783
784 drm_mode_config_init(dev);
785
786 dev->mode_config.min_width = 0;
787 dev->mode_config.min_height = 0;
788
789 dev->mode_config.funcs = &psb_mode_funcs;
790
791 /* set memory base */
792 /* Oaktrail and Poulsbo should use BAR 2*/
793 pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
794 &(dev->mode_config.fb_base));
795
796 /* num pipes is 2 for PSB but 1 for Mrst */
797 for (i = 0; i < dev_priv->num_pipe; i++)
798 psb_intel_crtc_init(dev, i, mode_dev);
799
800 dev->mode_config.max_width = 2048;
801 dev->mode_config.max_height = 2048;
802
803 psb_setup_outputs(dev);
804
805 if (dev_priv->ops->errata)
806 dev_priv->ops->errata(dev);
807
808 dev_priv->modeset = true;
809 }
810
811 void psb_modeset_cleanup(struct drm_device *dev)
812 {
813 struct drm_psb_private *dev_priv = dev->dev_private;
814 if (dev_priv->modeset) {
815 mutex_lock(&dev->struct_mutex);
816
817 drm_kms_helper_poll_fini(dev);
818 psb_fbdev_fini(dev);
819 drm_mode_config_cleanup(dev);
820
821 mutex_unlock(&dev->struct_mutex);
822 }
823 }
This page took 0.062204 seconds and 5 git commands to generate.