mmc: sdhci-acpi: Set MMC_CAP_CMD_DURING_TFR for Intel eMMC controllers
[deliverable/linux.git] / drivers / gpu / drm / etnaviv / etnaviv_drv.c
1 /*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17 #include <linux/component.h>
18 #include <linux/of_platform.h>
19
20 #include "etnaviv_drv.h"
21 #include "etnaviv_gpu.h"
22 #include "etnaviv_gem.h"
23 #include "etnaviv_mmu.h"
24 #include "etnaviv_gem.h"
25
26 #ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
27 static bool reglog;
28 MODULE_PARM_DESC(reglog, "Enable register read/write logging");
29 module_param(reglog, bool, 0600);
30 #else
31 #define reglog 0
32 #endif
33
34 void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name,
35 const char *dbgname)
36 {
37 struct resource *res;
38 void __iomem *ptr;
39
40 if (name)
41 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
42 else
43 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
44
45 ptr = devm_ioremap_resource(&pdev->dev, res);
46 if (IS_ERR(ptr)) {
47 dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name,
48 PTR_ERR(ptr));
49 return ptr;
50 }
51
52 if (reglog)
53 dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n",
54 dbgname, ptr, (size_t)resource_size(res));
55
56 return ptr;
57 }
58
59 void etnaviv_writel(u32 data, void __iomem *addr)
60 {
61 if (reglog)
62 printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
63
64 writel(data, addr);
65 }
66
67 u32 etnaviv_readl(const void __iomem *addr)
68 {
69 u32 val = readl(addr);
70
71 if (reglog)
72 printk(KERN_DEBUG "IO:R %p %08x\n", addr, val);
73
74 return val;
75 }
76
77 /*
78 * DRM operations:
79 */
80
81
82 static void load_gpu(struct drm_device *dev)
83 {
84 struct etnaviv_drm_private *priv = dev->dev_private;
85 unsigned int i;
86
87 for (i = 0; i < ETNA_MAX_PIPES; i++) {
88 struct etnaviv_gpu *g = priv->gpu[i];
89
90 if (g) {
91 int ret;
92
93 ret = etnaviv_gpu_init(g);
94 if (ret)
95 priv->gpu[i] = NULL;
96 }
97 }
98 }
99
100 static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
101 {
102 struct etnaviv_file_private *ctx;
103
104 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
105 if (!ctx)
106 return -ENOMEM;
107
108 file->driver_priv = ctx;
109
110 return 0;
111 }
112
113 static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file)
114 {
115 struct etnaviv_drm_private *priv = dev->dev_private;
116 struct etnaviv_file_private *ctx = file->driver_priv;
117 unsigned int i;
118
119 for (i = 0; i < ETNA_MAX_PIPES; i++) {
120 struct etnaviv_gpu *gpu = priv->gpu[i];
121
122 if (gpu) {
123 mutex_lock(&gpu->lock);
124 if (gpu->lastctx == ctx)
125 gpu->lastctx = NULL;
126 mutex_unlock(&gpu->lock);
127 }
128 }
129
130 kfree(ctx);
131 }
132
133 /*
134 * DRM debugfs:
135 */
136
137 #ifdef CONFIG_DEBUG_FS
138 static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
139 {
140 struct etnaviv_drm_private *priv = dev->dev_private;
141
142 etnaviv_gem_describe_objects(priv, m);
143
144 return 0;
145 }
146
147 static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
148 {
149 int ret;
150
151 read_lock(&dev->vma_offset_manager->vm_lock);
152 ret = drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
153 read_unlock(&dev->vma_offset_manager->vm_lock);
154
155 return ret;
156 }
157
158 static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
159 {
160 seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
161
162 mutex_lock(&gpu->mmu->lock);
163 drm_mm_dump_table(m, &gpu->mmu->mm);
164 mutex_unlock(&gpu->mmu->lock);
165
166 return 0;
167 }
168
169 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
170 {
171 struct etnaviv_cmdbuf *buf = gpu->buffer;
172 u32 size = buf->size;
173 u32 *ptr = buf->vaddr;
174 u32 i;
175
176 seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
177 buf->vaddr, (u64)buf->paddr, size - buf->user_size);
178
179 for (i = 0; i < size / 4; i++) {
180 if (i && !(i % 4))
181 seq_puts(m, "\n");
182 if (i % 4 == 0)
183 seq_printf(m, "\t0x%p: ", ptr + i);
184 seq_printf(m, "%08x ", *(ptr + i));
185 }
186 seq_puts(m, "\n");
187 }
188
189 static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
190 {
191 seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
192
193 mutex_lock(&gpu->lock);
194 etnaviv_buffer_dump(gpu, m);
195 mutex_unlock(&gpu->lock);
196
197 return 0;
198 }
199
200 static int show_unlocked(struct seq_file *m, void *arg)
201 {
202 struct drm_info_node *node = (struct drm_info_node *) m->private;
203 struct drm_device *dev = node->minor->dev;
204 int (*show)(struct drm_device *dev, struct seq_file *m) =
205 node->info_ent->data;
206
207 return show(dev, m);
208 }
209
210 static int show_each_gpu(struct seq_file *m, void *arg)
211 {
212 struct drm_info_node *node = (struct drm_info_node *) m->private;
213 struct drm_device *dev = node->minor->dev;
214 struct etnaviv_drm_private *priv = dev->dev_private;
215 struct etnaviv_gpu *gpu;
216 int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
217 node->info_ent->data;
218 unsigned int i;
219 int ret = 0;
220
221 for (i = 0; i < ETNA_MAX_PIPES; i++) {
222 gpu = priv->gpu[i];
223 if (!gpu)
224 continue;
225
226 ret = show(gpu, m);
227 if (ret < 0)
228 break;
229 }
230
231 return ret;
232 }
233
234 static struct drm_info_list etnaviv_debugfs_list[] = {
235 {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
236 {"gem", show_unlocked, 0, etnaviv_gem_show},
237 { "mm", show_unlocked, 0, etnaviv_mm_show },
238 {"mmu", show_each_gpu, 0, etnaviv_mmu_show},
239 {"ring", show_each_gpu, 0, etnaviv_ring_show},
240 };
241
242 static int etnaviv_debugfs_init(struct drm_minor *minor)
243 {
244 struct drm_device *dev = minor->dev;
245 int ret;
246
247 ret = drm_debugfs_create_files(etnaviv_debugfs_list,
248 ARRAY_SIZE(etnaviv_debugfs_list),
249 minor->debugfs_root, minor);
250
251 if (ret) {
252 dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
253 return ret;
254 }
255
256 return ret;
257 }
258
259 static void etnaviv_debugfs_cleanup(struct drm_minor *minor)
260 {
261 drm_debugfs_remove_files(etnaviv_debugfs_list,
262 ARRAY_SIZE(etnaviv_debugfs_list), minor);
263 }
264 #endif
265
266 /*
267 * DRM ioctls:
268 */
269
270 static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
271 struct drm_file *file)
272 {
273 struct etnaviv_drm_private *priv = dev->dev_private;
274 struct drm_etnaviv_param *args = data;
275 struct etnaviv_gpu *gpu;
276
277 if (args->pipe >= ETNA_MAX_PIPES)
278 return -EINVAL;
279
280 gpu = priv->gpu[args->pipe];
281 if (!gpu)
282 return -ENXIO;
283
284 return etnaviv_gpu_get_param(gpu, args->param, &args->value);
285 }
286
287 static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
288 struct drm_file *file)
289 {
290 struct drm_etnaviv_gem_new *args = data;
291
292 if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
293 ETNA_BO_FORCE_MMU))
294 return -EINVAL;
295
296 return etnaviv_gem_new_handle(dev, file, args->size,
297 args->flags, &args->handle);
298 }
299
300 #define TS(t) ((struct timespec){ \
301 .tv_sec = (t).tv_sec, \
302 .tv_nsec = (t).tv_nsec \
303 })
304
305 static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
306 struct drm_file *file)
307 {
308 struct drm_etnaviv_gem_cpu_prep *args = data;
309 struct drm_gem_object *obj;
310 int ret;
311
312 if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
313 return -EINVAL;
314
315 obj = drm_gem_object_lookup(file, args->handle);
316 if (!obj)
317 return -ENOENT;
318
319 ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
320
321 drm_gem_object_unreference_unlocked(obj);
322
323 return ret;
324 }
325
326 static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
327 struct drm_file *file)
328 {
329 struct drm_etnaviv_gem_cpu_fini *args = data;
330 struct drm_gem_object *obj;
331 int ret;
332
333 if (args->flags)
334 return -EINVAL;
335
336 obj = drm_gem_object_lookup(file, args->handle);
337 if (!obj)
338 return -ENOENT;
339
340 ret = etnaviv_gem_cpu_fini(obj);
341
342 drm_gem_object_unreference_unlocked(obj);
343
344 return ret;
345 }
346
347 static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
348 struct drm_file *file)
349 {
350 struct drm_etnaviv_gem_info *args = data;
351 struct drm_gem_object *obj;
352 int ret;
353
354 if (args->pad)
355 return -EINVAL;
356
357 obj = drm_gem_object_lookup(file, args->handle);
358 if (!obj)
359 return -ENOENT;
360
361 ret = etnaviv_gem_mmap_offset(obj, &args->offset);
362 drm_gem_object_unreference_unlocked(obj);
363
364 return ret;
365 }
366
367 static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
368 struct drm_file *file)
369 {
370 struct drm_etnaviv_wait_fence *args = data;
371 struct etnaviv_drm_private *priv = dev->dev_private;
372 struct timespec *timeout = &TS(args->timeout);
373 struct etnaviv_gpu *gpu;
374
375 if (args->flags & ~(ETNA_WAIT_NONBLOCK))
376 return -EINVAL;
377
378 if (args->pipe >= ETNA_MAX_PIPES)
379 return -EINVAL;
380
381 gpu = priv->gpu[args->pipe];
382 if (!gpu)
383 return -ENXIO;
384
385 if (args->flags & ETNA_WAIT_NONBLOCK)
386 timeout = NULL;
387
388 return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
389 timeout);
390 }
391
392 static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
393 struct drm_file *file)
394 {
395 struct drm_etnaviv_gem_userptr *args = data;
396 int access;
397
398 if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
399 args->flags == 0)
400 return -EINVAL;
401
402 if (offset_in_page(args->user_ptr | args->user_size) ||
403 (uintptr_t)args->user_ptr != args->user_ptr ||
404 (u32)args->user_size != args->user_size ||
405 args->user_ptr & ~PAGE_MASK)
406 return -EINVAL;
407
408 if (args->flags & ETNA_USERPTR_WRITE)
409 access = VERIFY_WRITE;
410 else
411 access = VERIFY_READ;
412
413 if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr,
414 args->user_size))
415 return -EFAULT;
416
417 return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
418 args->user_size, args->flags,
419 &args->handle);
420 }
421
422 static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
423 struct drm_file *file)
424 {
425 struct etnaviv_drm_private *priv = dev->dev_private;
426 struct drm_etnaviv_gem_wait *args = data;
427 struct timespec *timeout = &TS(args->timeout);
428 struct drm_gem_object *obj;
429 struct etnaviv_gpu *gpu;
430 int ret;
431
432 if (args->flags & ~(ETNA_WAIT_NONBLOCK))
433 return -EINVAL;
434
435 if (args->pipe >= ETNA_MAX_PIPES)
436 return -EINVAL;
437
438 gpu = priv->gpu[args->pipe];
439 if (!gpu)
440 return -ENXIO;
441
442 obj = drm_gem_object_lookup(file, args->handle);
443 if (!obj)
444 return -ENOENT;
445
446 if (args->flags & ETNA_WAIT_NONBLOCK)
447 timeout = NULL;
448
449 ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
450
451 drm_gem_object_unreference_unlocked(obj);
452
453 return ret;
454 }
455
456 static const struct drm_ioctl_desc etnaviv_ioctls[] = {
457 #define ETNA_IOCTL(n, func, flags) \
458 DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
459 ETNA_IOCTL(GET_PARAM, get_param, DRM_AUTH|DRM_RENDER_ALLOW),
460 ETNA_IOCTL(GEM_NEW, gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
461 ETNA_IOCTL(GEM_INFO, gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
462 ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
463 ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
464 ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
465 ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
466 ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW),
467 ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
468 };
469
470 static const struct vm_operations_struct vm_ops = {
471 .fault = etnaviv_gem_fault,
472 .open = drm_gem_vm_open,
473 .close = drm_gem_vm_close,
474 };
475
476 static const struct file_operations fops = {
477 .owner = THIS_MODULE,
478 .open = drm_open,
479 .release = drm_release,
480 .unlocked_ioctl = drm_ioctl,
481 #ifdef CONFIG_COMPAT
482 .compat_ioctl = drm_compat_ioctl,
483 #endif
484 .poll = drm_poll,
485 .read = drm_read,
486 .llseek = no_llseek,
487 .mmap = etnaviv_gem_mmap,
488 };
489
490 static struct drm_driver etnaviv_drm_driver = {
491 .driver_features = DRIVER_HAVE_IRQ |
492 DRIVER_GEM |
493 DRIVER_PRIME |
494 DRIVER_RENDER,
495 .open = etnaviv_open,
496 .preclose = etnaviv_preclose,
497 .gem_free_object_unlocked = etnaviv_gem_free_object,
498 .gem_vm_ops = &vm_ops,
499 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
500 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
501 .gem_prime_export = drm_gem_prime_export,
502 .gem_prime_import = drm_gem_prime_import,
503 .gem_prime_pin = etnaviv_gem_prime_pin,
504 .gem_prime_unpin = etnaviv_gem_prime_unpin,
505 .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
506 .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
507 .gem_prime_vmap = etnaviv_gem_prime_vmap,
508 .gem_prime_vunmap = etnaviv_gem_prime_vunmap,
509 #ifdef CONFIG_DEBUG_FS
510 .debugfs_init = etnaviv_debugfs_init,
511 .debugfs_cleanup = etnaviv_debugfs_cleanup,
512 #endif
513 .ioctls = etnaviv_ioctls,
514 .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
515 .fops = &fops,
516 .name = "etnaviv",
517 .desc = "etnaviv DRM",
518 .date = "20151214",
519 .major = 1,
520 .minor = 0,
521 };
522
523 /*
524 * Platform driver:
525 */
526 static int etnaviv_bind(struct device *dev)
527 {
528 struct etnaviv_drm_private *priv;
529 struct drm_device *drm;
530 int ret;
531
532 drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
533 if (!drm)
534 return -ENOMEM;
535
536 drm->platformdev = to_platform_device(dev);
537
538 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
539 if (!priv) {
540 dev_err(dev, "failed to allocate private data\n");
541 ret = -ENOMEM;
542 goto out_unref;
543 }
544 drm->dev_private = priv;
545
546 priv->wq = alloc_ordered_workqueue("etnaviv", 0);
547 if (!priv->wq) {
548 ret = -ENOMEM;
549 goto out_wq;
550 }
551
552 mutex_init(&priv->gem_lock);
553 INIT_LIST_HEAD(&priv->gem_list);
554 priv->num_gpus = 0;
555
556 dev_set_drvdata(dev, drm);
557
558 ret = component_bind_all(dev, drm);
559 if (ret < 0)
560 goto out_bind;
561
562 load_gpu(drm);
563
564 ret = drm_dev_register(drm, 0);
565 if (ret)
566 goto out_register;
567
568 return 0;
569
570 out_register:
571 component_unbind_all(dev, drm);
572 out_bind:
573 flush_workqueue(priv->wq);
574 destroy_workqueue(priv->wq);
575 out_wq:
576 kfree(priv);
577 out_unref:
578 drm_dev_unref(drm);
579
580 return ret;
581 }
582
583 static void etnaviv_unbind(struct device *dev)
584 {
585 struct drm_device *drm = dev_get_drvdata(dev);
586 struct etnaviv_drm_private *priv = drm->dev_private;
587
588 drm_dev_unregister(drm);
589
590 flush_workqueue(priv->wq);
591 destroy_workqueue(priv->wq);
592
593 component_unbind_all(dev, drm);
594
595 drm->dev_private = NULL;
596 kfree(priv);
597
598 drm_put_dev(drm);
599 }
600
601 static const struct component_master_ops etnaviv_master_ops = {
602 .bind = etnaviv_bind,
603 .unbind = etnaviv_unbind,
604 };
605
606 static int compare_of(struct device *dev, void *data)
607 {
608 struct device_node *np = data;
609
610 return dev->of_node == np;
611 }
612
613 static int compare_str(struct device *dev, void *data)
614 {
615 return !strcmp(dev_name(dev), data);
616 }
617
618 static int etnaviv_pdev_probe(struct platform_device *pdev)
619 {
620 struct device *dev = &pdev->dev;
621 struct device_node *node = dev->of_node;
622 struct component_match *match = NULL;
623
624 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
625
626 if (node) {
627 struct device_node *core_node;
628 int i;
629
630 for (i = 0; ; i++) {
631 core_node = of_parse_phandle(node, "cores", i);
632 if (!core_node)
633 break;
634
635 component_match_add(&pdev->dev, &match, compare_of,
636 core_node);
637 of_node_put(core_node);
638 }
639 } else if (dev->platform_data) {
640 char **names = dev->platform_data;
641 unsigned i;
642
643 for (i = 0; names[i]; i++)
644 component_match_add(dev, &match, compare_str, names[i]);
645 }
646
647 return component_master_add_with_match(dev, &etnaviv_master_ops, match);
648 }
649
650 static int etnaviv_pdev_remove(struct platform_device *pdev)
651 {
652 component_master_del(&pdev->dev, &etnaviv_master_ops);
653
654 return 0;
655 }
656
657 static const struct of_device_id dt_match[] = {
658 { .compatible = "fsl,imx-gpu-subsystem" },
659 { .compatible = "marvell,dove-gpu-subsystem" },
660 {}
661 };
662 MODULE_DEVICE_TABLE(of, dt_match);
663
664 static struct platform_driver etnaviv_platform_driver = {
665 .probe = etnaviv_pdev_probe,
666 .remove = etnaviv_pdev_remove,
667 .driver = {
668 .name = "etnaviv",
669 .of_match_table = dt_match,
670 },
671 };
672
673 static int __init etnaviv_init(void)
674 {
675 int ret;
676
677 etnaviv_validate_init();
678
679 ret = platform_driver_register(&etnaviv_gpu_driver);
680 if (ret != 0)
681 return ret;
682
683 ret = platform_driver_register(&etnaviv_platform_driver);
684 if (ret != 0)
685 platform_driver_unregister(&etnaviv_gpu_driver);
686
687 return ret;
688 }
689 module_init(etnaviv_init);
690
691 static void __exit etnaviv_exit(void)
692 {
693 platform_driver_unregister(&etnaviv_gpu_driver);
694 platform_driver_unregister(&etnaviv_platform_driver);
695 }
696 module_exit(etnaviv_exit);
697
698 MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
699 MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
700 MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
701 MODULE_DESCRIPTION("etnaviv DRM Driver");
702 MODULE_LICENSE("GPL v2");
703 MODULE_ALIAS("platform:etnaviv");
This page took 0.070918 seconds and 5 git commands to generate.