drm: add plane support v3
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nouveau_fbcon.c
1 /*
2 * Copyright © 2007 David Airlie
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * David Airlie
25 */
26
27 #include <linux/module.h>
28 #include <linux/kernel.h>
29 #include <linux/errno.h>
30 #include <linux/string.h>
31 #include <linux/mm.h>
32 #include <linux/tty.h>
33 #include <linux/sysrq.h>
34 #include <linux/delay.h>
35 #include <linux/fb.h>
36 #include <linux/init.h>
37 #include <linux/screen_info.h>
38 #include <linux/vga_switcheroo.h>
39
40 #include "drmP.h"
41 #include "drm.h"
42 #include "drm_crtc.h"
43 #include "drm_crtc_helper.h"
44 #include "drm_fb_helper.h"
45 #include "nouveau_drv.h"
46 #include "nouveau_drm.h"
47 #include "nouveau_crtc.h"
48 #include "nouveau_fb.h"
49 #include "nouveau_fbcon.h"
50 #include "nouveau_dma.h"
51
52 static void
53 nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
54 {
55 struct nouveau_fbdev *nfbdev = info->par;
56 struct drm_device *dev = nfbdev->dev;
57 struct drm_nouveau_private *dev_priv = dev->dev_private;
58 int ret;
59
60 if (info->state != FBINFO_STATE_RUNNING)
61 return;
62
63 ret = -ENODEV;
64 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
65 mutex_trylock(&dev_priv->channel->mutex)) {
66 if (dev_priv->card_type < NV_50)
67 ret = nv04_fbcon_fillrect(info, rect);
68 else
69 if (dev_priv->card_type < NV_C0)
70 ret = nv50_fbcon_fillrect(info, rect);
71 else
72 ret = nvc0_fbcon_fillrect(info, rect);
73 mutex_unlock(&dev_priv->channel->mutex);
74 }
75
76 if (ret == 0)
77 return;
78
79 if (ret != -ENODEV)
80 nouveau_fbcon_gpu_lockup(info);
81 cfb_fillrect(info, rect);
82 }
83
84 static void
85 nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
86 {
87 struct nouveau_fbdev *nfbdev = info->par;
88 struct drm_device *dev = nfbdev->dev;
89 struct drm_nouveau_private *dev_priv = dev->dev_private;
90 int ret;
91
92 if (info->state != FBINFO_STATE_RUNNING)
93 return;
94
95 ret = -ENODEV;
96 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
97 mutex_trylock(&dev_priv->channel->mutex)) {
98 if (dev_priv->card_type < NV_50)
99 ret = nv04_fbcon_copyarea(info, image);
100 else
101 if (dev_priv->card_type < NV_C0)
102 ret = nv50_fbcon_copyarea(info, image);
103 else
104 ret = nvc0_fbcon_copyarea(info, image);
105 mutex_unlock(&dev_priv->channel->mutex);
106 }
107
108 if (ret == 0)
109 return;
110
111 if (ret != -ENODEV)
112 nouveau_fbcon_gpu_lockup(info);
113 cfb_copyarea(info, image);
114 }
115
116 static void
117 nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
118 {
119 struct nouveau_fbdev *nfbdev = info->par;
120 struct drm_device *dev = nfbdev->dev;
121 struct drm_nouveau_private *dev_priv = dev->dev_private;
122 int ret;
123
124 if (info->state != FBINFO_STATE_RUNNING)
125 return;
126
127 ret = -ENODEV;
128 if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
129 mutex_trylock(&dev_priv->channel->mutex)) {
130 if (dev_priv->card_type < NV_50)
131 ret = nv04_fbcon_imageblit(info, image);
132 else
133 if (dev_priv->card_type < NV_C0)
134 ret = nv50_fbcon_imageblit(info, image);
135 else
136 ret = nvc0_fbcon_imageblit(info, image);
137 mutex_unlock(&dev_priv->channel->mutex);
138 }
139
140 if (ret == 0)
141 return;
142
143 if (ret != -ENODEV)
144 nouveau_fbcon_gpu_lockup(info);
145 cfb_imageblit(info, image);
146 }
147
148 static int
149 nouveau_fbcon_sync(struct fb_info *info)
150 {
151 struct nouveau_fbdev *nfbdev = info->par;
152 struct drm_device *dev = nfbdev->dev;
153 struct drm_nouveau_private *dev_priv = dev->dev_private;
154 struct nouveau_channel *chan = dev_priv->channel;
155 int ret, i;
156
157 if (!chan || !chan->accel_done || in_interrupt() ||
158 info->state != FBINFO_STATE_RUNNING ||
159 info->flags & FBINFO_HWACCEL_DISABLED)
160 return 0;
161
162 if (!mutex_trylock(&chan->mutex))
163 return 0;
164
165 ret = RING_SPACE(chan, 4);
166 if (ret) {
167 mutex_unlock(&chan->mutex);
168 nouveau_fbcon_gpu_lockup(info);
169 return 0;
170 }
171
172 if (dev_priv->card_type >= NV_C0) {
173 BEGIN_NVC0(chan, 2, NvSub2D, 0x010c, 1);
174 OUT_RING (chan, 0);
175 BEGIN_NVC0(chan, 2, NvSub2D, 0x0100, 1);
176 OUT_RING (chan, 0);
177 } else {
178 BEGIN_RING(chan, 0, 0x0104, 1);
179 OUT_RING (chan, 0);
180 BEGIN_RING(chan, 0, 0x0100, 1);
181 OUT_RING (chan, 0);
182 }
183
184 nouveau_bo_wr32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3, 0xffffffff);
185 FIRE_RING(chan);
186 mutex_unlock(&chan->mutex);
187
188 ret = -EBUSY;
189 for (i = 0; i < 100000; i++) {
190 if (!nouveau_bo_rd32(chan->notifier_bo, chan->m2mf_ntfy/4 + 3)) {
191 ret = 0;
192 break;
193 }
194 DRM_UDELAY(1);
195 }
196
197 if (ret) {
198 nouveau_fbcon_gpu_lockup(info);
199 return 0;
200 }
201
202 chan->accel_done = false;
203 return 0;
204 }
205
206 static struct fb_ops nouveau_fbcon_ops = {
207 .owner = THIS_MODULE,
208 .fb_check_var = drm_fb_helper_check_var,
209 .fb_set_par = drm_fb_helper_set_par,
210 .fb_fillrect = nouveau_fbcon_fillrect,
211 .fb_copyarea = nouveau_fbcon_copyarea,
212 .fb_imageblit = nouveau_fbcon_imageblit,
213 .fb_sync = nouveau_fbcon_sync,
214 .fb_pan_display = drm_fb_helper_pan_display,
215 .fb_blank = drm_fb_helper_blank,
216 .fb_setcmap = drm_fb_helper_setcmap,
217 .fb_debug_enter = drm_fb_helper_debug_enter,
218 .fb_debug_leave = drm_fb_helper_debug_leave,
219 };
220
221 static struct fb_ops nouveau_fbcon_sw_ops = {
222 .owner = THIS_MODULE,
223 .fb_check_var = drm_fb_helper_check_var,
224 .fb_set_par = drm_fb_helper_set_par,
225 .fb_fillrect = cfb_fillrect,
226 .fb_copyarea = cfb_copyarea,
227 .fb_imageblit = cfb_imageblit,
228 .fb_pan_display = drm_fb_helper_pan_display,
229 .fb_blank = drm_fb_helper_blank,
230 .fb_setcmap = drm_fb_helper_setcmap,
231 .fb_debug_enter = drm_fb_helper_debug_enter,
232 .fb_debug_leave = drm_fb_helper_debug_leave,
233 };
234
235 static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
236 u16 blue, int regno)
237 {
238 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
239
240 nv_crtc->lut.r[regno] = red;
241 nv_crtc->lut.g[regno] = green;
242 nv_crtc->lut.b[regno] = blue;
243 }
244
245 static void nouveau_fbcon_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
246 u16 *blue, int regno)
247 {
248 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
249
250 *red = nv_crtc->lut.r[regno];
251 *green = nv_crtc->lut.g[regno];
252 *blue = nv_crtc->lut.b[regno];
253 }
254
255 static void
256 nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
257 {
258 struct fb_info *info = nfbdev->helper.fbdev;
259 struct fb_fillrect rect;
260
261 /* Clear the entire fbcon. The drm will program every connector
262 * with it's preferred mode. If the sizes differ, one display will
263 * quite likely have garbage around the console.
264 */
265 rect.dx = rect.dy = 0;
266 rect.width = info->var.xres_virtual;
267 rect.height = info->var.yres_virtual;
268 rect.color = 0;
269 rect.rop = ROP_COPY;
270 info->fbops->fb_fillrect(info, &rect);
271 }
272
273 static int
274 nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
275 struct drm_fb_helper_surface_size *sizes)
276 {
277 struct drm_device *dev = nfbdev->dev;
278 struct drm_nouveau_private *dev_priv = dev->dev_private;
279 struct fb_info *info;
280 struct drm_framebuffer *fb;
281 struct nouveau_framebuffer *nouveau_fb;
282 struct nouveau_channel *chan;
283 struct nouveau_bo *nvbo;
284 struct drm_mode_fb_cmd mode_cmd;
285 struct pci_dev *pdev = dev->pdev;
286 struct device *device = &pdev->dev;
287 int size, ret;
288
289 mode_cmd.width = sizes->surface_width;
290 mode_cmd.height = sizes->surface_height;
291
292 mode_cmd.bpp = sizes->surface_bpp;
293 mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
294 mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
295 mode_cmd.depth = sizes->surface_depth;
296
297 size = mode_cmd.pitch * mode_cmd.height;
298 size = roundup(size, PAGE_SIZE);
299
300 ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
301 0, 0x0000, &nvbo);
302 if (ret) {
303 NV_ERROR(dev, "failed to allocate framebuffer\n");
304 goto out;
305 }
306
307 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM);
308 if (ret) {
309 NV_ERROR(dev, "failed to pin fb: %d\n", ret);
310 nouveau_bo_ref(NULL, &nvbo);
311 goto out;
312 }
313
314 ret = nouveau_bo_map(nvbo);
315 if (ret) {
316 NV_ERROR(dev, "failed to map fb: %d\n", ret);
317 nouveau_bo_unpin(nvbo);
318 nouveau_bo_ref(NULL, &nvbo);
319 goto out;
320 }
321
322 chan = nouveau_nofbaccel ? NULL : dev_priv->channel;
323 if (chan && dev_priv->card_type >= NV_50) {
324 ret = nouveau_bo_vma_add(nvbo, chan->vm, &nfbdev->nouveau_fb.vma);
325 if (ret) {
326 NV_ERROR(dev, "failed to map fb into chan: %d\n", ret);
327 chan = NULL;
328 }
329 }
330
331 mutex_lock(&dev->struct_mutex);
332
333 info = framebuffer_alloc(0, device);
334 if (!info) {
335 ret = -ENOMEM;
336 goto out_unref;
337 }
338
339 ret = fb_alloc_cmap(&info->cmap, 256, 0);
340 if (ret) {
341 ret = -ENOMEM;
342 goto out_unref;
343 }
344
345 info->par = nfbdev;
346
347 nouveau_framebuffer_init(dev, &nfbdev->nouveau_fb, &mode_cmd, nvbo);
348
349 nouveau_fb = &nfbdev->nouveau_fb;
350 fb = &nouveau_fb->base;
351
352 /* setup helper */
353 nfbdev->helper.fb = fb;
354 nfbdev->helper.fbdev = info;
355
356 strcpy(info->fix.id, "nouveaufb");
357 if (nouveau_nofbaccel)
358 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_DISABLED;
359 else
360 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
361 FBINFO_HWACCEL_FILLRECT |
362 FBINFO_HWACCEL_IMAGEBLIT;
363 info->flags |= FBINFO_CAN_FORCE_OUTPUT;
364 info->fbops = &nouveau_fbcon_sw_ops;
365 info->fix.smem_start = nvbo->bo.mem.bus.base +
366 nvbo->bo.mem.bus.offset;
367 info->fix.smem_len = size;
368
369 info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
370 info->screen_size = size;
371
372 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
373 drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
374
375 /* Set aperture base/size for vesafb takeover */
376 info->apertures = dev_priv->apertures;
377 if (!info->apertures) {
378 ret = -ENOMEM;
379 goto out_unref;
380 }
381
382 info->pixmap.size = 64*1024;
383 info->pixmap.buf_align = 8;
384 info->pixmap.access_align = 32;
385 info->pixmap.flags = FB_PIXMAP_SYSTEM;
386 info->pixmap.scan_align = 1;
387
388 mutex_unlock(&dev->struct_mutex);
389
390 if (dev_priv->channel && !nouveau_nofbaccel) {
391 ret = -ENODEV;
392 if (dev_priv->card_type < NV_50)
393 ret = nv04_fbcon_accel_init(info);
394 else
395 if (dev_priv->card_type < NV_C0)
396 ret = nv50_fbcon_accel_init(info);
397 else
398 ret = nvc0_fbcon_accel_init(info);
399
400 if (ret == 0)
401 info->fbops = &nouveau_fbcon_ops;
402 }
403
404 nouveau_fbcon_zfill(dev, nfbdev);
405
406 /* To allow resizeing without swapping buffers */
407 NV_INFO(dev, "allocated %dx%d fb: 0x%lx, bo %p\n",
408 nouveau_fb->base.width,
409 nouveau_fb->base.height,
410 nvbo->bo.offset, nvbo);
411
412 vga_switcheroo_client_fb_set(dev->pdev, info);
413 return 0;
414
415 out_unref:
416 mutex_unlock(&dev->struct_mutex);
417 out:
418 return ret;
419 }
420
421 static int
422 nouveau_fbcon_find_or_create_single(struct drm_fb_helper *helper,
423 struct drm_fb_helper_surface_size *sizes)
424 {
425 struct nouveau_fbdev *nfbdev = (struct nouveau_fbdev *)helper;
426 int new_fb = 0;
427 int ret;
428
429 if (!helper->fb) {
430 ret = nouveau_fbcon_create(nfbdev, sizes);
431 if (ret)
432 return ret;
433 new_fb = 1;
434 }
435 return new_fb;
436 }
437
438 void
439 nouveau_fbcon_output_poll_changed(struct drm_device *dev)
440 {
441 struct drm_nouveau_private *dev_priv = dev->dev_private;
442 drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper);
443 }
444
445 static int
446 nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
447 {
448 struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb;
449 struct fb_info *info;
450
451 if (nfbdev->helper.fbdev) {
452 info = nfbdev->helper.fbdev;
453 unregister_framebuffer(info);
454 if (info->cmap.len)
455 fb_dealloc_cmap(&info->cmap);
456 framebuffer_release(info);
457 }
458
459 if (nouveau_fb->nvbo) {
460 nouveau_bo_unmap(nouveau_fb->nvbo);
461 nouveau_bo_vma_del(nouveau_fb->nvbo, &nouveau_fb->vma);
462 drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
463 nouveau_fb->nvbo = NULL;
464 }
465 drm_fb_helper_fini(&nfbdev->helper);
466 drm_framebuffer_cleanup(&nouveau_fb->base);
467 return 0;
468 }
469
470 void nouveau_fbcon_gpu_lockup(struct fb_info *info)
471 {
472 struct nouveau_fbdev *nfbdev = info->par;
473 struct drm_device *dev = nfbdev->dev;
474
475 NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
476 info->flags |= FBINFO_HWACCEL_DISABLED;
477 }
478
479 static struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
480 .gamma_set = nouveau_fbcon_gamma_set,
481 .gamma_get = nouveau_fbcon_gamma_get,
482 .fb_probe = nouveau_fbcon_find_or_create_single,
483 };
484
485
486 int nouveau_fbcon_init(struct drm_device *dev)
487 {
488 struct drm_nouveau_private *dev_priv = dev->dev_private;
489 struct nouveau_fbdev *nfbdev;
490 int ret;
491
492 nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
493 if (!nfbdev)
494 return -ENOMEM;
495
496 nfbdev->dev = dev;
497 dev_priv->nfbdev = nfbdev;
498 nfbdev->helper.funcs = &nouveau_fbcon_helper_funcs;
499
500 ret = drm_fb_helper_init(dev, &nfbdev->helper,
501 nv_two_heads(dev) ? 2 : 1, 4);
502 if (ret) {
503 kfree(nfbdev);
504 return ret;
505 }
506
507 drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
508 drm_fb_helper_initial_config(&nfbdev->helper, 32);
509 return 0;
510 }
511
512 void nouveau_fbcon_fini(struct drm_device *dev)
513 {
514 struct drm_nouveau_private *dev_priv = dev->dev_private;
515
516 if (!dev_priv->nfbdev)
517 return;
518
519 nouveau_fbcon_destroy(dev, dev_priv->nfbdev);
520 kfree(dev_priv->nfbdev);
521 dev_priv->nfbdev = NULL;
522 }
523
524 void nouveau_fbcon_save_disable_accel(struct drm_device *dev)
525 {
526 struct drm_nouveau_private *dev_priv = dev->dev_private;
527
528 dev_priv->nfbdev->saved_flags = dev_priv->nfbdev->helper.fbdev->flags;
529 dev_priv->nfbdev->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED;
530 }
531
532 void nouveau_fbcon_restore_accel(struct drm_device *dev)
533 {
534 struct drm_nouveau_private *dev_priv = dev->dev_private;
535 dev_priv->nfbdev->helper.fbdev->flags = dev_priv->nfbdev->saved_flags;
536 }
537
538 void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
539 {
540 struct drm_nouveau_private *dev_priv = dev->dev_private;
541 fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
542 }
543
544 void nouveau_fbcon_zfill_all(struct drm_device *dev)
545 {
546 struct drm_nouveau_private *dev_priv = dev->dev_private;
547 nouveau_fbcon_zfill(dev, dev_priv->nfbdev);
548 }
This page took 0.055302 seconds and 5 git commands to generate.