Merge remote-tracking branch 'regulator/for-next'
[deliverable/linux.git] / drivers / gpu / drm / etnaviv / etnaviv_drv.c
1 /*
2 * Copyright (C) 2015 Etnaviv Project
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17 #include <linux/component.h>
18 #include <linux/of_platform.h>
19
20 #include "etnaviv_drv.h"
21 #include "etnaviv_gpu.h"
22 #include "etnaviv_gem.h"
23 #include "etnaviv_mmu.h"
24 #include "etnaviv_gem.h"
25
26 #ifdef CONFIG_DRM_ETNAVIV_REGISTER_LOGGING
27 static bool reglog;
28 MODULE_PARM_DESC(reglog, "Enable register read/write logging");
29 module_param(reglog, bool, 0600);
30 #else
31 #define reglog 0
32 #endif
33
34 void __iomem *etnaviv_ioremap(struct platform_device *pdev, const char *name,
35 const char *dbgname)
36 {
37 struct resource *res;
38 void __iomem *ptr;
39
40 if (name)
41 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
42 else
43 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
44
45 ptr = devm_ioremap_resource(&pdev->dev, res);
46 if (IS_ERR(ptr)) {
47 dev_err(&pdev->dev, "failed to ioremap %s: %ld\n", name,
48 PTR_ERR(ptr));
49 return ptr;
50 }
51
52 if (reglog)
53 dev_printk(KERN_DEBUG, &pdev->dev, "IO:region %s 0x%p %08zx\n",
54 dbgname, ptr, (size_t)resource_size(res));
55
56 return ptr;
57 }
58
59 void etnaviv_writel(u32 data, void __iomem *addr)
60 {
61 if (reglog)
62 printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
63
64 writel(data, addr);
65 }
66
67 u32 etnaviv_readl(const void __iomem *addr)
68 {
69 u32 val = readl(addr);
70
71 if (reglog)
72 printk(KERN_DEBUG "IO:R %p %08x\n", addr, val);
73
74 return val;
75 }
76
77 /*
78 * DRM operations:
79 */
80
81
82 static void load_gpu(struct drm_device *dev)
83 {
84 struct etnaviv_drm_private *priv = dev->dev_private;
85 unsigned int i;
86
87 for (i = 0; i < ETNA_MAX_PIPES; i++) {
88 struct etnaviv_gpu *g = priv->gpu[i];
89
90 if (g) {
91 int ret;
92
93 ret = etnaviv_gpu_init(g);
94 if (ret)
95 priv->gpu[i] = NULL;
96 }
97 }
98 }
99
100 static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
101 {
102 struct etnaviv_file_private *ctx;
103
104 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
105 if (!ctx)
106 return -ENOMEM;
107
108 file->driver_priv = ctx;
109
110 return 0;
111 }
112
113 static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file)
114 {
115 struct etnaviv_drm_private *priv = dev->dev_private;
116 struct etnaviv_file_private *ctx = file->driver_priv;
117 unsigned int i;
118
119 for (i = 0; i < ETNA_MAX_PIPES; i++) {
120 struct etnaviv_gpu *gpu = priv->gpu[i];
121
122 if (gpu) {
123 mutex_lock(&gpu->lock);
124 if (gpu->lastctx == ctx)
125 gpu->lastctx = NULL;
126 mutex_unlock(&gpu->lock);
127 }
128 }
129
130 kfree(ctx);
131 }
132
133 /*
134 * DRM debugfs:
135 */
136
137 #ifdef CONFIG_DEBUG_FS
138 static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
139 {
140 struct etnaviv_drm_private *priv = dev->dev_private;
141
142 etnaviv_gem_describe_objects(priv, m);
143
144 return 0;
145 }
146
147 static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
148 {
149 int ret;
150
151 read_lock(&dev->vma_offset_manager->vm_lock);
152 ret = drm_mm_dump_table(m, &dev->vma_offset_manager->vm_addr_space_mm);
153 read_unlock(&dev->vma_offset_manager->vm_lock);
154
155 return ret;
156 }
157
158 static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
159 {
160 seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
161
162 mutex_lock(&gpu->mmu->lock);
163 drm_mm_dump_table(m, &gpu->mmu->mm);
164 mutex_unlock(&gpu->mmu->lock);
165
166 return 0;
167 }
168
169 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
170 {
171 struct etnaviv_cmdbuf *buf = gpu->buffer;
172 u32 size = buf->size;
173 u32 *ptr = buf->vaddr;
174 u32 i;
175
176 seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
177 buf->vaddr, (u64)buf->paddr, size - buf->user_size);
178
179 for (i = 0; i < size / 4; i++) {
180 if (i && !(i % 4))
181 seq_puts(m, "\n");
182 if (i % 4 == 0)
183 seq_printf(m, "\t0x%p: ", ptr + i);
184 seq_printf(m, "%08x ", *(ptr + i));
185 }
186 seq_puts(m, "\n");
187 }
188
189 static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
190 {
191 seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
192
193 mutex_lock(&gpu->lock);
194 etnaviv_buffer_dump(gpu, m);
195 mutex_unlock(&gpu->lock);
196
197 return 0;
198 }
199
200 static int show_unlocked(struct seq_file *m, void *arg)
201 {
202 struct drm_info_node *node = (struct drm_info_node *) m->private;
203 struct drm_device *dev = node->minor->dev;
204 int (*show)(struct drm_device *dev, struct seq_file *m) =
205 node->info_ent->data;
206
207 return show(dev, m);
208 }
209
210 static int show_each_gpu(struct seq_file *m, void *arg)
211 {
212 struct drm_info_node *node = (struct drm_info_node *) m->private;
213 struct drm_device *dev = node->minor->dev;
214 struct etnaviv_drm_private *priv = dev->dev_private;
215 struct etnaviv_gpu *gpu;
216 int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
217 node->info_ent->data;
218 unsigned int i;
219 int ret = 0;
220
221 for (i = 0; i < ETNA_MAX_PIPES; i++) {
222 gpu = priv->gpu[i];
223 if (!gpu)
224 continue;
225
226 ret = show(gpu, m);
227 if (ret < 0)
228 break;
229 }
230
231 return ret;
232 }
233
234 static struct drm_info_list etnaviv_debugfs_list[] = {
235 {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
236 {"gem", show_unlocked, 0, etnaviv_gem_show},
237 { "mm", show_unlocked, 0, etnaviv_mm_show },
238 {"mmu", show_each_gpu, 0, etnaviv_mmu_show},
239 {"ring", show_each_gpu, 0, etnaviv_ring_show},
240 };
241
242 static int etnaviv_debugfs_init(struct drm_minor *minor)
243 {
244 struct drm_device *dev = minor->dev;
245 int ret;
246
247 ret = drm_debugfs_create_files(etnaviv_debugfs_list,
248 ARRAY_SIZE(etnaviv_debugfs_list),
249 minor->debugfs_root, minor);
250
251 if (ret) {
252 dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
253 return ret;
254 }
255
256 return ret;
257 }
258
259 static void etnaviv_debugfs_cleanup(struct drm_minor *minor)
260 {
261 drm_debugfs_remove_files(etnaviv_debugfs_list,
262 ARRAY_SIZE(etnaviv_debugfs_list), minor);
263 }
264 #endif
265
266 /*
267 * DRM ioctls:
268 */
269
270 static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
271 struct drm_file *file)
272 {
273 struct etnaviv_drm_private *priv = dev->dev_private;
274 struct drm_etnaviv_param *args = data;
275 struct etnaviv_gpu *gpu;
276
277 if (args->pipe >= ETNA_MAX_PIPES)
278 return -EINVAL;
279
280 gpu = priv->gpu[args->pipe];
281 if (!gpu)
282 return -ENXIO;
283
284 return etnaviv_gpu_get_param(gpu, args->param, &args->value);
285 }
286
287 static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
288 struct drm_file *file)
289 {
290 struct drm_etnaviv_gem_new *args = data;
291
292 if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
293 ETNA_BO_FORCE_MMU))
294 return -EINVAL;
295
296 return etnaviv_gem_new_handle(dev, file, args->size,
297 args->flags, &args->handle);
298 }
299
300 #define TS(t) ((struct timespec){ \
301 .tv_sec = (t).tv_sec, \
302 .tv_nsec = (t).tv_nsec \
303 })
304
305 static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
306 struct drm_file *file)
307 {
308 struct drm_etnaviv_gem_cpu_prep *args = data;
309 struct drm_gem_object *obj;
310 int ret;
311
312 if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
313 return -EINVAL;
314
315 obj = drm_gem_object_lookup(file, args->handle);
316 if (!obj)
317 return -ENOENT;
318
319 ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
320
321 drm_gem_object_unreference_unlocked(obj);
322
323 return ret;
324 }
325
326 static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
327 struct drm_file *file)
328 {
329 struct drm_etnaviv_gem_cpu_fini *args = data;
330 struct drm_gem_object *obj;
331 int ret;
332
333 if (args->flags)
334 return -EINVAL;
335
336 obj = drm_gem_object_lookup(file, args->handle);
337 if (!obj)
338 return -ENOENT;
339
340 ret = etnaviv_gem_cpu_fini(obj);
341
342 drm_gem_object_unreference_unlocked(obj);
343
344 return ret;
345 }
346
347 static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
348 struct drm_file *file)
349 {
350 struct drm_etnaviv_gem_info *args = data;
351 struct drm_gem_object *obj;
352 int ret;
353
354 if (args->pad)
355 return -EINVAL;
356
357 obj = drm_gem_object_lookup(file, args->handle);
358 if (!obj)
359 return -ENOENT;
360
361 ret = etnaviv_gem_mmap_offset(obj, &args->offset);
362 drm_gem_object_unreference_unlocked(obj);
363
364 return ret;
365 }
366
367 static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
368 struct drm_file *file)
369 {
370 struct drm_etnaviv_wait_fence *args = data;
371 struct etnaviv_drm_private *priv = dev->dev_private;
372 struct timespec *timeout = &TS(args->timeout);
373 struct etnaviv_gpu *gpu;
374
375 if (args->flags & ~(ETNA_WAIT_NONBLOCK))
376 return -EINVAL;
377
378 if (args->pipe >= ETNA_MAX_PIPES)
379 return -EINVAL;
380
381 gpu = priv->gpu[args->pipe];
382 if (!gpu)
383 return -ENXIO;
384
385 if (args->flags & ETNA_WAIT_NONBLOCK)
386 timeout = NULL;
387
388 return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
389 timeout);
390 }
391
392 static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
393 struct drm_file *file)
394 {
395 struct drm_etnaviv_gem_userptr *args = data;
396 int access;
397
398 if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
399 args->flags == 0)
400 return -EINVAL;
401
402 if (offset_in_page(args->user_ptr | args->user_size) ||
403 (uintptr_t)args->user_ptr != args->user_ptr ||
404 (u32)args->user_size != args->user_size ||
405 args->user_ptr & ~PAGE_MASK)
406 return -EINVAL;
407
408 if (args->flags & ETNA_USERPTR_WRITE)
409 access = VERIFY_WRITE;
410 else
411 access = VERIFY_READ;
412
413 if (!access_ok(access, (void __user *)(unsigned long)args->user_ptr,
414 args->user_size))
415 return -EFAULT;
416
417 return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
418 args->user_size, args->flags,
419 &args->handle);
420 }
421
422 static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
423 struct drm_file *file)
424 {
425 struct etnaviv_drm_private *priv = dev->dev_private;
426 struct drm_etnaviv_gem_wait *args = data;
427 struct timespec *timeout = &TS(args->timeout);
428 struct drm_gem_object *obj;
429 struct etnaviv_gpu *gpu;
430 int ret;
431
432 if (args->flags & ~(ETNA_WAIT_NONBLOCK))
433 return -EINVAL;
434
435 if (args->pipe >= ETNA_MAX_PIPES)
436 return -EINVAL;
437
438 gpu = priv->gpu[args->pipe];
439 if (!gpu)
440 return -ENXIO;
441
442 obj = drm_gem_object_lookup(file, args->handle);
443 if (!obj)
444 return -ENOENT;
445
446 if (args->flags & ETNA_WAIT_NONBLOCK)
447 timeout = NULL;
448
449 ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
450
451 drm_gem_object_unreference_unlocked(obj);
452
453 return ret;
454 }
455
456 static const struct drm_ioctl_desc etnaviv_ioctls[] = {
457 #define ETNA_IOCTL(n, func, flags) \
458 DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
459 ETNA_IOCTL(GET_PARAM, get_param, DRM_AUTH|DRM_RENDER_ALLOW),
460 ETNA_IOCTL(GEM_NEW, gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
461 ETNA_IOCTL(GEM_INFO, gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
462 ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
463 ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
464 ETNA_IOCTL(GEM_SUBMIT, gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
465 ETNA_IOCTL(WAIT_FENCE, wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
466 ETNA_IOCTL(GEM_USERPTR, gem_userptr, DRM_AUTH|DRM_RENDER_ALLOW),
467 ETNA_IOCTL(GEM_WAIT, gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
468 };
469
470 static const struct vm_operations_struct vm_ops = {
471 .fault = etnaviv_gem_fault,
472 .open = drm_gem_vm_open,
473 .close = drm_gem_vm_close,
474 };
475
476 static const struct file_operations fops = {
477 .owner = THIS_MODULE,
478 .open = drm_open,
479 .release = drm_release,
480 .unlocked_ioctl = drm_ioctl,
481 #ifdef CONFIG_COMPAT
482 .compat_ioctl = drm_compat_ioctl,
483 #endif
484 .poll = drm_poll,
485 .read = drm_read,
486 .llseek = no_llseek,
487 .mmap = etnaviv_gem_mmap,
488 };
489
490 static struct drm_driver etnaviv_drm_driver = {
491 .driver_features = DRIVER_GEM |
492 DRIVER_PRIME |
493 DRIVER_RENDER,
494 .open = etnaviv_open,
495 .preclose = etnaviv_preclose,
496 .gem_free_object_unlocked = etnaviv_gem_free_object,
497 .gem_vm_ops = &vm_ops,
498 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
499 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
500 .gem_prime_export = drm_gem_prime_export,
501 .gem_prime_import = drm_gem_prime_import,
502 .gem_prime_pin = etnaviv_gem_prime_pin,
503 .gem_prime_unpin = etnaviv_gem_prime_unpin,
504 .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
505 .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
506 .gem_prime_vmap = etnaviv_gem_prime_vmap,
507 .gem_prime_vunmap = etnaviv_gem_prime_vunmap,
508 #ifdef CONFIG_DEBUG_FS
509 .debugfs_init = etnaviv_debugfs_init,
510 .debugfs_cleanup = etnaviv_debugfs_cleanup,
511 #endif
512 .ioctls = etnaviv_ioctls,
513 .num_ioctls = DRM_ETNAVIV_NUM_IOCTLS,
514 .fops = &fops,
515 .name = "etnaviv",
516 .desc = "etnaviv DRM",
517 .date = "20151214",
518 .major = 1,
519 .minor = 0,
520 };
521
522 /*
523 * Platform driver:
524 */
525 static int etnaviv_bind(struct device *dev)
526 {
527 struct etnaviv_drm_private *priv;
528 struct drm_device *drm;
529 int ret;
530
531 drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
532 if (!drm)
533 return -ENOMEM;
534
535 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
536 if (!priv) {
537 dev_err(dev, "failed to allocate private data\n");
538 ret = -ENOMEM;
539 goto out_unref;
540 }
541 drm->dev_private = priv;
542
543 priv->wq = alloc_ordered_workqueue("etnaviv", 0);
544 if (!priv->wq) {
545 ret = -ENOMEM;
546 goto out_wq;
547 }
548
549 mutex_init(&priv->gem_lock);
550 INIT_LIST_HEAD(&priv->gem_list);
551 priv->num_gpus = 0;
552
553 dev_set_drvdata(dev, drm);
554
555 ret = component_bind_all(dev, drm);
556 if (ret < 0)
557 goto out_bind;
558
559 load_gpu(drm);
560
561 ret = drm_dev_register(drm, 0);
562 if (ret)
563 goto out_register;
564
565 return 0;
566
567 out_register:
568 component_unbind_all(dev, drm);
569 out_bind:
570 flush_workqueue(priv->wq);
571 destroy_workqueue(priv->wq);
572 out_wq:
573 kfree(priv);
574 out_unref:
575 drm_dev_unref(drm);
576
577 return ret;
578 }
579
580 static void etnaviv_unbind(struct device *dev)
581 {
582 struct drm_device *drm = dev_get_drvdata(dev);
583 struct etnaviv_drm_private *priv = drm->dev_private;
584
585 drm_dev_unregister(drm);
586
587 flush_workqueue(priv->wq);
588 destroy_workqueue(priv->wq);
589
590 component_unbind_all(dev, drm);
591
592 drm->dev_private = NULL;
593 kfree(priv);
594
595 drm_put_dev(drm);
596 }
597
598 static const struct component_master_ops etnaviv_master_ops = {
599 .bind = etnaviv_bind,
600 .unbind = etnaviv_unbind,
601 };
602
603 static int compare_of(struct device *dev, void *data)
604 {
605 struct device_node *np = data;
606
607 return dev->of_node == np;
608 }
609
610 static int compare_str(struct device *dev, void *data)
611 {
612 return !strcmp(dev_name(dev), data);
613 }
614
615 static int etnaviv_pdev_probe(struct platform_device *pdev)
616 {
617 struct device *dev = &pdev->dev;
618 struct device_node *node = dev->of_node;
619 struct component_match *match = NULL;
620
621 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
622
623 if (node) {
624 struct device_node *core_node;
625 int i;
626
627 for (i = 0; ; i++) {
628 core_node = of_parse_phandle(node, "cores", i);
629 if (!core_node)
630 break;
631
632 component_match_add(&pdev->dev, &match, compare_of,
633 core_node);
634 of_node_put(core_node);
635 }
636 } else if (dev->platform_data) {
637 char **names = dev->platform_data;
638 unsigned i;
639
640 for (i = 0; names[i]; i++)
641 component_match_add(dev, &match, compare_str, names[i]);
642 }
643
644 return component_master_add_with_match(dev, &etnaviv_master_ops, match);
645 }
646
647 static int etnaviv_pdev_remove(struct platform_device *pdev)
648 {
649 component_master_del(&pdev->dev, &etnaviv_master_ops);
650
651 return 0;
652 }
653
654 static const struct of_device_id dt_match[] = {
655 { .compatible = "fsl,imx-gpu-subsystem" },
656 { .compatible = "marvell,dove-gpu-subsystem" },
657 {}
658 };
659 MODULE_DEVICE_TABLE(of, dt_match);
660
661 static struct platform_driver etnaviv_platform_driver = {
662 .probe = etnaviv_pdev_probe,
663 .remove = etnaviv_pdev_remove,
664 .driver = {
665 .name = "etnaviv",
666 .of_match_table = dt_match,
667 },
668 };
669
670 static int __init etnaviv_init(void)
671 {
672 int ret;
673
674 etnaviv_validate_init();
675
676 ret = platform_driver_register(&etnaviv_gpu_driver);
677 if (ret != 0)
678 return ret;
679
680 ret = platform_driver_register(&etnaviv_platform_driver);
681 if (ret != 0)
682 platform_driver_unregister(&etnaviv_gpu_driver);
683
684 return ret;
685 }
686 module_init(etnaviv_init);
687
688 static void __exit etnaviv_exit(void)
689 {
690 platform_driver_unregister(&etnaviv_gpu_driver);
691 platform_driver_unregister(&etnaviv_platform_driver);
692 }
693 module_exit(etnaviv_exit);
694
695 MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
696 MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
697 MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
698 MODULE_DESCRIPTION("etnaviv DRM Driver");
699 MODULE_LICENSE("GPL v2");
700 MODULE_ALIAS("platform:etnaviv");
This page took 0.053083 seconds and 5 git commands to generate.