Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / drivers / staging / omapdrm / omap_gem.c
1 /*
2 * drivers/staging/omapdrm/omap_gem.c
3 *
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20
21 #include <linux/spinlock.h>
22 #include <linux/shmem_fs.h>
23
24 #include "omap_drv.h"
25 #include "omap_dmm_tiler.h"
26
27 /* remove these once drm core helpers are merged */
28 struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
29 void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
30 bool dirty, bool accessed);
31 int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
32
33 /*
34 * GEM buffer object implementation.
35 */
36
37 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
38
39 /* note: we use upper 8 bits of flags for driver-internal flags: */
40 #define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
41 #define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
42 #define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
43
44
45 struct omap_gem_object {
46 struct drm_gem_object base;
47
48 struct list_head mm_list;
49
50 uint32_t flags;
51
52 /** width/height for tiled formats (rounded up to slot boundaries) */
53 uint16_t width, height;
54
55 /** roll applied when mapping to DMM */
56 uint32_t roll;
57
58 /**
59 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
60 * is set and the paddr is valid. Also if the buffer is remapped in
61 * TILER and paddr_cnt > 0, then paddr is valid. But if you are using
62 * the physical address and OMAP_BO_DMA is not set, then you should
63 * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
64 * not removed from under your feet.
65 *
66 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
67 * buffer is requested, but doesn't mean that it is. Use the
68 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
69 * physical address.
70 */
71 dma_addr_t paddr;
72
73 /**
74 * # of users of paddr
75 */
76 uint32_t paddr_cnt;
77
78 /**
79 * tiler block used when buffer is remapped in DMM/TILER.
80 */
81 struct tiler_block *block;
82
83 /**
84 * Array of backing pages, if allocated. Note that pages are never
85 * allocated for buffers originally allocated from contiguous memory
86 */
87 struct page **pages;
88
89 /** addresses corresponding to pages in above array */
90 dma_addr_t *addrs;
91
92 /**
93 * Virtual address, if mapped.
94 */
95 void *vaddr;
96
97 /**
98 * sync-object allocated on demand (if needed)
99 *
100 * Per-buffer sync-object for tracking pending and completed hw/dma
101 * read and write operations. The layout in memory is dictated by
102 * the SGX firmware, which uses this information to stall the command
103 * stream if a surface is not ready yet.
104 *
105 * Note that when buffer is used by SGX, the sync-object needs to be
106 * allocated from a special heap of sync-objects. This way many sync
107 * objects can be packed in a page, and not waste GPU virtual address
108 * space. Because of this we have to have a omap_gem_set_sync_object()
109 * API to allow replacement of the syncobj after it has (potentially)
110 * already been allocated. A bit ugly but I haven't thought of a
111 * better alternative.
112 */
113 struct {
114 uint32_t write_pending;
115 uint32_t write_complete;
116 uint32_t read_pending;
117 uint32_t read_complete;
118 } *sync;
119 };
120
121 static int get_pages(struct drm_gem_object *obj, struct page ***pages);
122 static uint64_t mmap_offset(struct drm_gem_object *obj);
123
124 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
125 * not necessarily pinned in TILER all the time, and (b) when they are
126 * they are not necessarily page aligned, we reserve one or more small
127 * regions in each of the 2d containers to use as a user-GART where we
128 * can create a second page-aligned mapping of parts of the buffer
129 * being accessed from userspace.
130 *
131 * Note that we could optimize slightly when we know that multiple
132 * tiler containers are backed by the same PAT.. but I'll leave that
133 * for later..
134 */
135 #define NUM_USERGART_ENTRIES 2
136 struct usergart_entry {
137 struct tiler_block *block; /* the reserved tiler block */
138 dma_addr_t paddr;
139 struct drm_gem_object *obj; /* the current pinned obj */
140 pgoff_t obj_pgoff; /* page offset of obj currently
141 mapped in */
142 };
143 static struct {
144 struct usergart_entry entry[NUM_USERGART_ENTRIES];
145 int height; /* height in rows */
146 int height_shift; /* ilog2(height in rows) */
147 int slot_shift; /* ilog2(width per slot) */
148 int stride_pfn; /* stride in pages */
149 int last; /* index of last used entry */
150 } *usergart;
151
152 static void evict_entry(struct drm_gem_object *obj,
153 enum tiler_fmt fmt, struct usergart_entry *entry)
154 {
155 if (obj->dev->dev_mapping) {
156 struct omap_gem_object *omap_obj = to_omap_bo(obj);
157 int n = usergart[fmt].height;
158 size_t size = PAGE_SIZE * n;
159 loff_t off = mmap_offset(obj) +
160 (entry->obj_pgoff << PAGE_SHIFT);
161 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
162 if (m > 1) {
163 int i;
164 /* if stride > than PAGE_SIZE then sparse mapping: */
165 for (i = n; i > 0; i--) {
166 unmap_mapping_range(obj->dev->dev_mapping,
167 off, PAGE_SIZE, 1);
168 off += PAGE_SIZE * m;
169 }
170 } else {
171 unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
172 }
173 }
174
175 entry->obj = NULL;
176 }
177
178 /* Evict a buffer from usergart, if it is mapped there */
179 static void evict(struct drm_gem_object *obj)
180 {
181 struct omap_gem_object *omap_obj = to_omap_bo(obj);
182
183 if (omap_obj->flags & OMAP_BO_TILED) {
184 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
185 int i;
186
187 if (!usergart)
188 return;
189
190 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
191 struct usergart_entry *entry = &usergart[fmt].entry[i];
192 if (entry->obj == obj)
193 evict_entry(obj, fmt, entry);
194 }
195 }
196 }
197
198 /* GEM objects can either be allocated from contiguous memory (in which
199 * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
200 * contiguous buffers can be remapped in TILER/DMM if they need to be
201 * contiguous... but we don't do this all the time to reduce pressure
202 * on TILER/DMM space when we know at allocation time that the buffer
203 * will need to be scanned out.
204 */
205 static inline bool is_shmem(struct drm_gem_object *obj)
206 {
207 return obj->filp != NULL;
208 }
209
210 /**
211 * shmem buffers that are mapped cached can simulate coherency via using
212 * page faulting to keep track of dirty pages
213 */
214 static inline bool is_cached_coherent(struct drm_gem_object *obj)
215 {
216 struct omap_gem_object *omap_obj = to_omap_bo(obj);
217 return is_shmem(obj) &&
218 ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
219 }
220
221 static DEFINE_SPINLOCK(sync_lock);
222
223 /** ensure backing pages are allocated */
224 static int omap_gem_attach_pages(struct drm_gem_object *obj)
225 {
226 struct drm_device *dev = obj->dev;
227 struct omap_gem_object *omap_obj = to_omap_bo(obj);
228 struct page **pages;
229 int npages = obj->size >> PAGE_SHIFT;
230 int i, ret;
231 dma_addr_t *addrs;
232
233 WARN_ON(omap_obj->pages);
234
235 /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
236 * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
237 * we actually want CMA memory for it all anyways..
238 */
239 pages = _drm_gem_get_pages(obj, GFP_KERNEL);
240 if (IS_ERR(pages)) {
241 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
242 return PTR_ERR(pages);
243 }
244
245 /* for non-cached buffers, ensure the new pages are clean because
246 * DSS, GPU, etc. are not cache coherent:
247 */
248 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
249 addrs = kmalloc(npages * sizeof(*addrs), GFP_KERNEL);
250 if (!addrs) {
251 ret = -ENOMEM;
252 goto free_pages;
253 }
254
255 for (i = 0; i < npages; i++) {
256 addrs[i] = dma_map_page(dev->dev, pages[i],
257 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
258 }
259 } else {
260 addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
261 if (!addrs) {
262 ret = -ENOMEM;
263 goto free_pages;
264 }
265 }
266
267 omap_obj->addrs = addrs;
268 omap_obj->pages = pages;
269
270 return 0;
271
272 free_pages:
273 _drm_gem_put_pages(obj, pages, true, false);
274
275 return ret;
276 }
277
278 /** release backing pages */
279 static void omap_gem_detach_pages(struct drm_gem_object *obj)
280 {
281 struct omap_gem_object *omap_obj = to_omap_bo(obj);
282
283 /* for non-cached buffers, ensure the new pages are clean because
284 * DSS, GPU, etc. are not cache coherent:
285 */
286 if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
287 int i, npages = obj->size >> PAGE_SHIFT;
288 for (i = 0; i < npages; i++) {
289 dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
290 PAGE_SIZE, DMA_BIDIRECTIONAL);
291 }
292 }
293
294 kfree(omap_obj->addrs);
295 omap_obj->addrs = NULL;
296
297 _drm_gem_put_pages(obj, omap_obj->pages, true, false);
298 omap_obj->pages = NULL;
299 }
300
301 /* get buffer flags */
302 uint32_t omap_gem_flags(struct drm_gem_object *obj)
303 {
304 return to_omap_bo(obj)->flags;
305 }
306
307 /** get mmap offset */
308 static uint64_t mmap_offset(struct drm_gem_object *obj)
309 {
310 struct drm_device *dev = obj->dev;
311
312 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
313
314 if (!obj->map_list.map) {
315 /* Make it mmapable */
316 size_t size = omap_gem_mmap_size(obj);
317 int ret = _drm_gem_create_mmap_offset_size(obj, size);
318
319 if (ret) {
320 dev_err(dev->dev, "could not allocate mmap offset\n");
321 return 0;
322 }
323 }
324
325 return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
326 }
327
328 uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
329 {
330 uint64_t offset;
331 mutex_lock(&obj->dev->struct_mutex);
332 offset = mmap_offset(obj);
333 mutex_unlock(&obj->dev->struct_mutex);
334 return offset;
335 }
336
337 /** get mmap size */
338 size_t omap_gem_mmap_size(struct drm_gem_object *obj)
339 {
340 struct omap_gem_object *omap_obj = to_omap_bo(obj);
341 size_t size = obj->size;
342
343 if (omap_obj->flags & OMAP_BO_TILED) {
344 /* for tiled buffers, the virtual size has stride rounded up
345 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
346 * 32kb later!). But we don't back the entire buffer with
347 * pages, only the valid picture part.. so need to adjust for
348 * this in the size used to mmap and generate mmap offset
349 */
350 size = tiler_vsize(gem2fmt(omap_obj->flags),
351 omap_obj->width, omap_obj->height);
352 }
353
354 return size;
355 }
356
357 /* get tiled size, returns -EINVAL if not tiled buffer */
358 int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
359 {
360 struct omap_gem_object *omap_obj = to_omap_bo(obj);
361 if (omap_obj->flags & OMAP_BO_TILED) {
362 *w = omap_obj->width;
363 *h = omap_obj->height;
364 return 0;
365 }
366 return -EINVAL;
367 }
368
369 /* Normal handling for the case of faulting in non-tiled buffers */
370 static int fault_1d(struct drm_gem_object *obj,
371 struct vm_area_struct *vma, struct vm_fault *vmf)
372 {
373 struct omap_gem_object *omap_obj = to_omap_bo(obj);
374 unsigned long pfn;
375 pgoff_t pgoff;
376
377 /* We don't use vmf->pgoff since that has the fake offset: */
378 pgoff = ((unsigned long)vmf->virtual_address -
379 vma->vm_start) >> PAGE_SHIFT;
380
381 if (omap_obj->pages) {
382 omap_gem_cpu_sync(obj, pgoff);
383 pfn = page_to_pfn(omap_obj->pages[pgoff]);
384 } else {
385 BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
386 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
387 }
388
389 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
390 pfn, pfn << PAGE_SHIFT);
391
392 return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
393 }
394
395 /* Special handling for the case of faulting in 2d tiled buffers */
396 static int fault_2d(struct drm_gem_object *obj,
397 struct vm_area_struct *vma, struct vm_fault *vmf)
398 {
399 struct omap_gem_object *omap_obj = to_omap_bo(obj);
400 struct usergart_entry *entry;
401 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
402 struct page *pages[64]; /* XXX is this too much to have on stack? */
403 unsigned long pfn;
404 pgoff_t pgoff, base_pgoff;
405 void __user *vaddr;
406 int i, ret, slots;
407
408 /*
409 * Note the height of the slot is also equal to the number of pages
410 * that need to be mapped in to fill 4kb wide CPU page. If the slot
411 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
412 */
413 const int n = usergart[fmt].height;
414 const int n_shift = usergart[fmt].height_shift;
415
416 /*
417 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
418 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
419 * into account in some of the math, so figure out virtual stride
420 * in pages
421 */
422 const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
423
424 /* We don't use vmf->pgoff since that has the fake offset: */
425 pgoff = ((unsigned long)vmf->virtual_address -
426 vma->vm_start) >> PAGE_SHIFT;
427
428 /*
429 * Actual address we start mapping at is rounded down to previous slot
430 * boundary in the y direction:
431 */
432 base_pgoff = round_down(pgoff, m << n_shift);
433
434 /* figure out buffer width in slots */
435 slots = omap_obj->width >> usergart[fmt].slot_shift;
436
437 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
438
439 entry = &usergart[fmt].entry[usergart[fmt].last];
440
441 /* evict previous buffer using this usergart entry, if any: */
442 if (entry->obj)
443 evict_entry(entry->obj, fmt, entry);
444
445 entry->obj = obj;
446 entry->obj_pgoff = base_pgoff;
447
448 /* now convert base_pgoff to phys offset from virt offset: */
449 base_pgoff = (base_pgoff >> n_shift) * slots;
450
451 /* for wider-than 4k.. figure out which part of the slot-row we want: */
452 if (m > 1) {
453 int off = pgoff % m;
454 entry->obj_pgoff += off;
455 base_pgoff /= m;
456 slots = min(slots - (off << n_shift), n);
457 base_pgoff += off << n_shift;
458 vaddr += off << PAGE_SHIFT;
459 }
460
461 /*
462 * Map in pages. Beyond the valid pixel part of the buffer, we set
463 * pages[i] to NULL to get a dummy page mapped in.. if someone
464 * reads/writes it they will get random/undefined content, but at
465 * least it won't be corrupting whatever other random page used to
466 * be mapped in, or other undefined behavior.
467 */
468 memcpy(pages, &omap_obj->pages[base_pgoff],
469 sizeof(struct page *) * slots);
470 memset(pages + slots, 0,
471 sizeof(struct page *) * (n - slots));
472
473 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
474 if (ret) {
475 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
476 return ret;
477 }
478
479 pfn = entry->paddr >> PAGE_SHIFT;
480
481 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
482 pfn, pfn << PAGE_SHIFT);
483
484 for (i = n; i > 0; i--) {
485 vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
486 pfn += usergart[fmt].stride_pfn;
487 vaddr += PAGE_SIZE * m;
488 }
489
490 /* simple round-robin: */
491 usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
492
493 return 0;
494 }
495
496 /**
497 * omap_gem_fault - pagefault handler for GEM objects
498 * @vma: the VMA of the GEM object
499 * @vmf: fault detail
500 *
501 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
502 * does most of the work for us including the actual map/unmap calls
503 * but we need to do the actual page work.
504 *
505 * The VMA was set up by GEM. In doing so it also ensured that the
506 * vma->vm_private_data points to the GEM object that is backing this
507 * mapping.
508 */
509 int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
510 {
511 struct drm_gem_object *obj = vma->vm_private_data;
512 struct omap_gem_object *omap_obj = to_omap_bo(obj);
513 struct drm_device *dev = obj->dev;
514 struct page **pages;
515 int ret;
516
517 /* Make sure we don't parallel update on a fault, nor move or remove
518 * something from beneath our feet
519 */
520 mutex_lock(&dev->struct_mutex);
521
522 /* if a shmem backed object, make sure we have pages attached now */
523 ret = get_pages(obj, &pages);
524 if (ret)
525 goto fail;
526
527 /* where should we do corresponding put_pages().. we are mapping
528 * the original page, rather than thru a GART, so we can't rely
529 * on eviction to trigger this. But munmap() or all mappings should
530 * probably trigger put_pages()?
531 */
532
533 if (omap_obj->flags & OMAP_BO_TILED)
534 ret = fault_2d(obj, vma, vmf);
535 else
536 ret = fault_1d(obj, vma, vmf);
537
538
539 fail:
540 mutex_unlock(&dev->struct_mutex);
541 switch (ret) {
542 case 0:
543 case -ERESTARTSYS:
544 case -EINTR:
545 return VM_FAULT_NOPAGE;
546 case -ENOMEM:
547 return VM_FAULT_OOM;
548 default:
549 return VM_FAULT_SIGBUS;
550 }
551 }
552
553 /** We override mainly to fix up some of the vm mapping flags.. */
554 int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
555 {
556 int ret;
557
558 ret = drm_gem_mmap(filp, vma);
559 if (ret) {
560 DBG("mmap failed: %d", ret);
561 return ret;
562 }
563
564 return omap_gem_mmap_obj(vma->vm_private_data, vma);
565 }
566
567 int omap_gem_mmap_obj(struct drm_gem_object *obj,
568 struct vm_area_struct *vma)
569 {
570 struct omap_gem_object *omap_obj = to_omap_bo(obj);
571
572 vma->vm_flags &= ~VM_PFNMAP;
573 vma->vm_flags |= VM_MIXEDMAP;
574
575 if (omap_obj->flags & OMAP_BO_WC) {
576 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
577 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
578 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
579 } else {
580 /*
581 * We do have some private objects, at least for scanout buffers
582 * on hardware without DMM/TILER. But these are allocated write-
583 * combine
584 */
585 if (WARN_ON(!obj->filp))
586 return -EINVAL;
587
588 /*
589 * Shunt off cached objs to shmem file so they have their own
590 * address_space (so unmap_mapping_range does what we want,
591 * in particular in the case of mmap'd dmabufs)
592 */
593 fput(vma->vm_file);
594 vma->vm_pgoff = 0;
595 vma->vm_file = get_file(obj->filp);
596
597 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
598 }
599
600 return 0;
601 }
602
603
604 /**
605 * omap_gem_dumb_create - create a dumb buffer
606 * @drm_file: our client file
607 * @dev: our device
608 * @args: the requested arguments copied from userspace
609 *
610 * Allocate a buffer suitable for use for a frame buffer of the
611 * form described by user space. Give userspace a handle by which
612 * to reference it.
613 */
614 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
615 struct drm_mode_create_dumb *args)
616 {
617 union omap_gem_size gsize;
618
619 /* in case someone tries to feed us a completely bogus stride: */
620 args->pitch = align_pitch(args->pitch, args->width, args->bpp);
621 args->size = PAGE_ALIGN(args->pitch * args->height);
622
623 gsize = (union omap_gem_size){
624 .bytes = args->size,
625 };
626
627 return omap_gem_new_handle(dev, file, gsize,
628 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
629 }
630
631 /**
632 * omap_gem_dumb_destroy - destroy a dumb buffer
633 * @file: client file
634 * @dev: our DRM device
635 * @handle: the object handle
636 *
637 * Destroy a handle that was created via omap_gem_dumb_create.
638 */
639 int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
640 uint32_t handle)
641 {
642 /* No special work needed, drop the reference and see what falls out */
643 return drm_gem_handle_delete(file, handle);
644 }
645
646 /**
647 * omap_gem_dumb_map - buffer mapping for dumb interface
648 * @file: our drm client file
649 * @dev: drm device
650 * @handle: GEM handle to the object (from dumb_create)
651 *
652 * Do the necessary setup to allow the mapping of the frame buffer
653 * into user memory. We don't have to do much here at the moment.
654 */
655 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
656 uint32_t handle, uint64_t *offset)
657 {
658 struct drm_gem_object *obj;
659 int ret = 0;
660
661 /* GEM does all our handle to object mapping */
662 obj = drm_gem_object_lookup(dev, file, handle);
663 if (obj == NULL) {
664 ret = -ENOENT;
665 goto fail;
666 }
667
668 *offset = omap_gem_mmap_offset(obj);
669
670 drm_gem_object_unreference_unlocked(obj);
671
672 fail:
673 return ret;
674 }
675
676 /* Set scrolling position. This allows us to implement fast scrolling
677 * for console.
678 *
679 * Call only from non-atomic contexts.
680 */
681 int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
682 {
683 struct omap_gem_object *omap_obj = to_omap_bo(obj);
684 uint32_t npages = obj->size >> PAGE_SHIFT;
685 int ret = 0;
686
687 if (roll > npages) {
688 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
689 return -EINVAL;
690 }
691
692 omap_obj->roll = roll;
693
694 mutex_lock(&obj->dev->struct_mutex);
695
696 /* if we aren't mapped yet, we don't need to do anything */
697 if (omap_obj->block) {
698 struct page **pages;
699 ret = get_pages(obj, &pages);
700 if (ret)
701 goto fail;
702 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
703 if (ret)
704 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
705 }
706
707 fail:
708 mutex_unlock(&obj->dev->struct_mutex);
709
710 return ret;
711 }
712
713 /* Sync the buffer for CPU access.. note pages should already be
714 * attached, ie. omap_gem_get_pages()
715 */
716 void omap_gem_cpu_sync(struct drm_gem_object *obj, int pgoff)
717 {
718 struct drm_device *dev = obj->dev;
719 struct omap_gem_object *omap_obj = to_omap_bo(obj);
720
721 if (is_cached_coherent(obj) && omap_obj->addrs[pgoff]) {
722 dma_unmap_page(dev->dev, omap_obj->addrs[pgoff],
723 PAGE_SIZE, DMA_BIDIRECTIONAL);
724 omap_obj->addrs[pgoff] = 0;
725 }
726 }
727
728 /* sync the buffer for DMA access */
729 void omap_gem_dma_sync(struct drm_gem_object *obj,
730 enum dma_data_direction dir)
731 {
732 struct drm_device *dev = obj->dev;
733 struct omap_gem_object *omap_obj = to_omap_bo(obj);
734
735 if (is_cached_coherent(obj)) {
736 int i, npages = obj->size >> PAGE_SHIFT;
737 struct page **pages = omap_obj->pages;
738 bool dirty = false;
739
740 for (i = 0; i < npages; i++) {
741 if (!omap_obj->addrs[i]) {
742 omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
743 PAGE_SIZE, DMA_BIDIRECTIONAL);
744 dirty = true;
745 }
746 }
747
748 if (dirty) {
749 unmap_mapping_range(obj->filp->f_mapping, 0,
750 omap_gem_mmap_size(obj), 1);
751 }
752 }
753 }
754
755 /* Get physical address for DMA.. if 'remap' is true, and the buffer is not
756 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
757 * map in TILER)
758 */
759 int omap_gem_get_paddr(struct drm_gem_object *obj,
760 dma_addr_t *paddr, bool remap)
761 {
762 struct omap_drm_private *priv = obj->dev->dev_private;
763 struct omap_gem_object *omap_obj = to_omap_bo(obj);
764 int ret = 0;
765
766 mutex_lock(&obj->dev->struct_mutex);
767
768 if (remap && is_shmem(obj) && priv->has_dmm) {
769 if (omap_obj->paddr_cnt == 0) {
770 struct page **pages;
771 uint32_t npages = obj->size >> PAGE_SHIFT;
772 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
773 struct tiler_block *block;
774
775 BUG_ON(omap_obj->block);
776
777 ret = get_pages(obj, &pages);
778 if (ret)
779 goto fail;
780
781 if (omap_obj->flags & OMAP_BO_TILED) {
782 block = tiler_reserve_2d(fmt,
783 omap_obj->width,
784 omap_obj->height, 0);
785 } else {
786 block = tiler_reserve_1d(obj->size);
787 }
788
789 if (IS_ERR(block)) {
790 ret = PTR_ERR(block);
791 dev_err(obj->dev->dev,
792 "could not remap: %d (%d)\n", ret, fmt);
793 goto fail;
794 }
795
796 /* TODO: enable async refill.. */
797 ret = tiler_pin(block, pages, npages,
798 omap_obj->roll, true);
799 if (ret) {
800 tiler_release(block);
801 dev_err(obj->dev->dev,
802 "could not pin: %d\n", ret);
803 goto fail;
804 }
805
806 omap_obj->paddr = tiler_ssptr(block);
807 omap_obj->block = block;
808
809 DBG("got paddr: %08x", omap_obj->paddr);
810 }
811
812 omap_obj->paddr_cnt++;
813
814 *paddr = omap_obj->paddr;
815 } else if (omap_obj->flags & OMAP_BO_DMA) {
816 *paddr = omap_obj->paddr;
817 } else {
818 ret = -EINVAL;
819 goto fail;
820 }
821
822 fail:
823 mutex_unlock(&obj->dev->struct_mutex);
824
825 return ret;
826 }
827
828 /* Release physical address, when DMA is no longer being performed.. this
829 * could potentially unpin and unmap buffers from TILER
830 */
831 int omap_gem_put_paddr(struct drm_gem_object *obj)
832 {
833 struct omap_gem_object *omap_obj = to_omap_bo(obj);
834 int ret = 0;
835
836 mutex_lock(&obj->dev->struct_mutex);
837 if (omap_obj->paddr_cnt > 0) {
838 omap_obj->paddr_cnt--;
839 if (omap_obj->paddr_cnt == 0) {
840 ret = tiler_unpin(omap_obj->block);
841 if (ret) {
842 dev_err(obj->dev->dev,
843 "could not unpin pages: %d\n", ret);
844 goto fail;
845 }
846 ret = tiler_release(omap_obj->block);
847 if (ret) {
848 dev_err(obj->dev->dev,
849 "could not release unmap: %d\n", ret);
850 }
851 omap_obj->block = NULL;
852 }
853 }
854 fail:
855 mutex_unlock(&obj->dev->struct_mutex);
856 return ret;
857 }
858
859 /* Get rotated scanout address (only valid if already pinned), at the
860 * specified orientation and x,y offset from top-left corner of buffer
861 * (only valid for tiled 2d buffers)
862 */
863 int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
864 int x, int y, dma_addr_t *paddr)
865 {
866 struct omap_gem_object *omap_obj = to_omap_bo(obj);
867 int ret = -EINVAL;
868
869 mutex_lock(&obj->dev->struct_mutex);
870 if ((omap_obj->paddr_cnt > 0) && omap_obj->block &&
871 (omap_obj->flags & OMAP_BO_TILED)) {
872 *paddr = tiler_tsptr(omap_obj->block, orient, x, y);
873 ret = 0;
874 }
875 mutex_unlock(&obj->dev->struct_mutex);
876 return ret;
877 }
878
879 /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
880 int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
881 {
882 struct omap_gem_object *omap_obj = to_omap_bo(obj);
883 int ret = -EINVAL;
884 if (omap_obj->flags & OMAP_BO_TILED)
885 ret = tiler_stride(gem2fmt(omap_obj->flags), orient);
886 return ret;
887 }
888
889 /* acquire pages when needed (for example, for DMA where physically
890 * contiguous buffer is not required
891 */
892 static int get_pages(struct drm_gem_object *obj, struct page ***pages)
893 {
894 struct omap_gem_object *omap_obj = to_omap_bo(obj);
895 int ret = 0;
896
897 if (is_shmem(obj) && !omap_obj->pages) {
898 ret = omap_gem_attach_pages(obj);
899 if (ret) {
900 dev_err(obj->dev->dev, "could not attach pages\n");
901 return ret;
902 }
903 }
904
905 /* TODO: even phys-contig.. we should have a list of pages? */
906 *pages = omap_obj->pages;
907
908 return 0;
909 }
910
911 /* if !remap, and we don't have pages backing, then fail, rather than
912 * increasing the pin count (which we don't really do yet anyways,
913 * because we don't support swapping pages back out). And 'remap'
914 * might not be quite the right name, but I wanted to keep it working
915 * similarly to omap_gem_get_paddr(). Note though that mutex is not
916 * aquired if !remap (because this can be called in atomic ctxt),
917 * but probably omap_gem_get_paddr() should be changed to work in the
918 * same way. If !remap, a matching omap_gem_put_pages() call is not
919 * required (and should not be made).
920 */
921 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
922 bool remap)
923 {
924 int ret;
925 if (!remap) {
926 struct omap_gem_object *omap_obj = to_omap_bo(obj);
927 if (!omap_obj->pages)
928 return -ENOMEM;
929 *pages = omap_obj->pages;
930 return 0;
931 }
932 mutex_lock(&obj->dev->struct_mutex);
933 ret = get_pages(obj, pages);
934 mutex_unlock(&obj->dev->struct_mutex);
935 return ret;
936 }
937
938 /* release pages when DMA no longer being performed */
939 int omap_gem_put_pages(struct drm_gem_object *obj)
940 {
941 /* do something here if we dynamically attach/detach pages.. at
942 * least they would no longer need to be pinned if everyone has
943 * released the pages..
944 */
945 return 0;
946 }
947
948 /* Get kernel virtual address for CPU access.. this more or less only
949 * exists for omap_fbdev. This should be called with struct_mutex
950 * held.
951 */
952 void *omap_gem_vaddr(struct drm_gem_object *obj)
953 {
954 struct omap_gem_object *omap_obj = to_omap_bo(obj);
955 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
956 if (!omap_obj->vaddr) {
957 struct page **pages;
958 int ret = get_pages(obj, &pages);
959 if (ret)
960 return ERR_PTR(ret);
961 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
962 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
963 }
964 return omap_obj->vaddr;
965 }
966
967 #ifdef CONFIG_DEBUG_FS
968 void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
969 {
970 struct drm_device *dev = obj->dev;
971 struct omap_gem_object *omap_obj = to_omap_bo(obj);
972 uint64_t off = 0;
973
974 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
975
976 if (obj->map_list.map)
977 off = (uint64_t)obj->map_list.hash.key;
978
979 seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
980 omap_obj->flags, obj->name, obj->refcount.refcount.counter,
981 off, omap_obj->paddr, omap_obj->paddr_cnt,
982 omap_obj->vaddr, omap_obj->roll);
983
984 if (omap_obj->flags & OMAP_BO_TILED) {
985 seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height);
986 if (omap_obj->block) {
987 struct tcm_area *area = &omap_obj->block->area;
988 seq_printf(m, " (%dx%d, %dx%d)",
989 area->p0.x, area->p0.y,
990 area->p1.x, area->p1.y);
991 }
992 } else {
993 seq_printf(m, " %d", obj->size);
994 }
995
996 seq_printf(m, "\n");
997 }
998
999 void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
1000 {
1001 struct omap_gem_object *omap_obj;
1002 int count = 0;
1003 size_t size = 0;
1004
1005 list_for_each_entry(omap_obj, list, mm_list) {
1006 struct drm_gem_object *obj = &omap_obj->base;
1007 seq_printf(m, " ");
1008 omap_gem_describe(obj, m);
1009 count++;
1010 size += obj->size;
1011 }
1012
1013 seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
1014 }
1015 #endif
1016
1017 /* Buffer Synchronization:
1018 */
1019
1020 struct omap_gem_sync_waiter {
1021 struct list_head list;
1022 struct omap_gem_object *omap_obj;
1023 enum omap_gem_op op;
1024 uint32_t read_target, write_target;
1025 /* notify called w/ sync_lock held */
1026 void (*notify)(void *arg);
1027 void *arg;
1028 };
1029
1030 /* list of omap_gem_sync_waiter.. the notify fxn gets called back when
1031 * the read and/or write target count is achieved which can call a user
1032 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
1033 * cpu access), etc.
1034 */
1035 static LIST_HEAD(waiters);
1036
1037 static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
1038 {
1039 struct omap_gem_object *omap_obj = waiter->omap_obj;
1040 if ((waiter->op & OMAP_GEM_READ) &&
1041 (omap_obj->sync->read_complete < waiter->read_target))
1042 return true;
1043 if ((waiter->op & OMAP_GEM_WRITE) &&
1044 (omap_obj->sync->write_complete < waiter->write_target))
1045 return true;
1046 return false;
1047 }
1048
1049 /* macro for sync debug.. */
1050 #define SYNCDBG 0
1051 #define SYNC(fmt, ...) do { if (SYNCDBG) \
1052 printk(KERN_ERR "%s:%d: "fmt"\n", \
1053 __func__, __LINE__, ##__VA_ARGS__); \
1054 } while (0)
1055
1056
1057 static void sync_op_update(void)
1058 {
1059 struct omap_gem_sync_waiter *waiter, *n;
1060 list_for_each_entry_safe(waiter, n, &waiters, list) {
1061 if (!is_waiting(waiter)) {
1062 list_del(&waiter->list);
1063 SYNC("notify: %p", waiter);
1064 waiter->notify(waiter->arg);
1065 kfree(waiter);
1066 }
1067 }
1068 }
1069
1070 static inline int sync_op(struct drm_gem_object *obj,
1071 enum omap_gem_op op, bool start)
1072 {
1073 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1074 int ret = 0;
1075
1076 spin_lock(&sync_lock);
1077
1078 if (!omap_obj->sync) {
1079 omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
1080 if (!omap_obj->sync) {
1081 ret = -ENOMEM;
1082 goto unlock;
1083 }
1084 }
1085
1086 if (start) {
1087 if (op & OMAP_GEM_READ)
1088 omap_obj->sync->read_pending++;
1089 if (op & OMAP_GEM_WRITE)
1090 omap_obj->sync->write_pending++;
1091 } else {
1092 if (op & OMAP_GEM_READ)
1093 omap_obj->sync->read_complete++;
1094 if (op & OMAP_GEM_WRITE)
1095 omap_obj->sync->write_complete++;
1096 sync_op_update();
1097 }
1098
1099 unlock:
1100 spin_unlock(&sync_lock);
1101
1102 return ret;
1103 }
1104
1105 /* it is a bit lame to handle updates in this sort of polling way, but
1106 * in case of PVR, the GPU can directly update read/write complete
1107 * values, and not really tell us which ones it updated.. this also
1108 * means that sync_lock is not quite sufficient. So we'll need to
1109 * do something a bit better when it comes time to add support for
1110 * separate 2d hw..
1111 */
1112 void omap_gem_op_update(void)
1113 {
1114 spin_lock(&sync_lock);
1115 sync_op_update();
1116 spin_unlock(&sync_lock);
1117 }
1118
1119 /* mark the start of read and/or write operation */
1120 int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
1121 {
1122 return sync_op(obj, op, true);
1123 }
1124
1125 int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
1126 {
1127 return sync_op(obj, op, false);
1128 }
1129
1130 static DECLARE_WAIT_QUEUE_HEAD(sync_event);
1131
1132 static void sync_notify(void *arg)
1133 {
1134 struct task_struct **waiter_task = arg;
1135 *waiter_task = NULL;
1136 wake_up_all(&sync_event);
1137 }
1138
1139 int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
1140 {
1141 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1142 int ret = 0;
1143 if (omap_obj->sync) {
1144 struct task_struct *waiter_task = current;
1145 struct omap_gem_sync_waiter *waiter =
1146 kzalloc(sizeof(*waiter), GFP_KERNEL);
1147
1148 if (!waiter)
1149 return -ENOMEM;
1150
1151 waiter->omap_obj = omap_obj;
1152 waiter->op = op;
1153 waiter->read_target = omap_obj->sync->read_pending;
1154 waiter->write_target = omap_obj->sync->write_pending;
1155 waiter->notify = sync_notify;
1156 waiter->arg = &waiter_task;
1157
1158 spin_lock(&sync_lock);
1159 if (is_waiting(waiter)) {
1160 SYNC("waited: %p", waiter);
1161 list_add_tail(&waiter->list, &waiters);
1162 spin_unlock(&sync_lock);
1163 ret = wait_event_interruptible(sync_event,
1164 (waiter_task == NULL));
1165 spin_lock(&sync_lock);
1166 if (waiter_task) {
1167 SYNC("interrupted: %p", waiter);
1168 /* we were interrupted */
1169 list_del(&waiter->list);
1170 waiter_task = NULL;
1171 } else {
1172 /* freed in sync_op_update() */
1173 waiter = NULL;
1174 }
1175 }
1176 spin_unlock(&sync_lock);
1177
1178 if (waiter)
1179 kfree(waiter);
1180 }
1181 return ret;
1182 }
1183
1184 /* call fxn(arg), either synchronously or asynchronously if the op
1185 * is currently blocked.. fxn() can be called from any context
1186 *
1187 * (TODO for now fxn is called back from whichever context calls
1188 * omap_gem_op_update().. but this could be better defined later
1189 * if needed)
1190 *
1191 * TODO more code in common w/ _sync()..
1192 */
1193 int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
1194 void (*fxn)(void *arg), void *arg)
1195 {
1196 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1197 if (omap_obj->sync) {
1198 struct omap_gem_sync_waiter *waiter =
1199 kzalloc(sizeof(*waiter), GFP_ATOMIC);
1200
1201 if (!waiter)
1202 return -ENOMEM;
1203
1204 waiter->omap_obj = omap_obj;
1205 waiter->op = op;
1206 waiter->read_target = omap_obj->sync->read_pending;
1207 waiter->write_target = omap_obj->sync->write_pending;
1208 waiter->notify = fxn;
1209 waiter->arg = arg;
1210
1211 spin_lock(&sync_lock);
1212 if (is_waiting(waiter)) {
1213 SYNC("waited: %p", waiter);
1214 list_add_tail(&waiter->list, &waiters);
1215 spin_unlock(&sync_lock);
1216 return 0;
1217 }
1218
1219 spin_unlock(&sync_lock);
1220 }
1221
1222 /* no waiting.. */
1223 fxn(arg);
1224
1225 return 0;
1226 }
1227
1228 /* special API so PVR can update the buffer to use a sync-object allocated
1229 * from it's sync-obj heap. Only used for a newly allocated (from PVR's
1230 * perspective) sync-object, so we overwrite the new syncobj w/ values
1231 * from the already allocated syncobj (if there is one)
1232 */
1233 int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
1234 {
1235 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1236 int ret = 0;
1237
1238 spin_lock(&sync_lock);
1239
1240 if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
1241 /* clearing a previously set syncobj */
1242 syncobj = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
1243 if (!syncobj) {
1244 ret = -ENOMEM;
1245 goto unlock;
1246 }
1247 memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
1248 omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
1249 omap_obj->sync = syncobj;
1250 } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
1251 /* replacing an existing syncobj */
1252 if (omap_obj->sync) {
1253 memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
1254 kfree(omap_obj->sync);
1255 }
1256 omap_obj->flags |= OMAP_BO_EXT_SYNC;
1257 omap_obj->sync = syncobj;
1258 }
1259
1260 unlock:
1261 spin_unlock(&sync_lock);
1262 return ret;
1263 }
1264
1265 int omap_gem_init_object(struct drm_gem_object *obj)
1266 {
1267 return -EINVAL; /* unused */
1268 }
1269
1270 /* don't call directly.. called from GEM core when it is time to actually
1271 * free the object..
1272 */
1273 void omap_gem_free_object(struct drm_gem_object *obj)
1274 {
1275 struct drm_device *dev = obj->dev;
1276 struct omap_gem_object *omap_obj = to_omap_bo(obj);
1277
1278 evict(obj);
1279
1280 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1281
1282 list_del(&omap_obj->mm_list);
1283
1284 if (obj->map_list.map)
1285 drm_gem_free_mmap_offset(obj);
1286
1287 /* this means the object is still pinned.. which really should
1288 * not happen. I think..
1289 */
1290 WARN_ON(omap_obj->paddr_cnt > 0);
1291
1292 /* don't free externally allocated backing memory */
1293 if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
1294 if (omap_obj->pages)
1295 omap_gem_detach_pages(obj);
1296
1297 if (!is_shmem(obj)) {
1298 dma_free_writecombine(dev->dev, obj->size,
1299 omap_obj->vaddr, omap_obj->paddr);
1300 } else if (omap_obj->vaddr) {
1301 vunmap(omap_obj->vaddr);
1302 }
1303 }
1304
1305 /* don't free externally allocated syncobj */
1306 if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
1307 kfree(omap_obj->sync);
1308
1309 drm_gem_object_release(obj);
1310
1311 kfree(obj);
1312 }
1313
1314 /* convenience method to construct a GEM buffer object, and userspace handle */
1315 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1316 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1317 {
1318 struct drm_gem_object *obj;
1319 int ret;
1320
1321 obj = omap_gem_new(dev, gsize, flags);
1322 if (!obj)
1323 return -ENOMEM;
1324
1325 ret = drm_gem_handle_create(file, obj, handle);
1326 if (ret) {
1327 drm_gem_object_release(obj);
1328 kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
1329 return ret;
1330 }
1331
1332 /* drop reference from allocate - handle holds it now */
1333 drm_gem_object_unreference_unlocked(obj);
1334
1335 return 0;
1336 }
1337
1338 /* GEM buffer object constructor */
1339 struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1340 union omap_gem_size gsize, uint32_t flags)
1341 {
1342 struct omap_drm_private *priv = dev->dev_private;
1343 struct omap_gem_object *omap_obj;
1344 struct drm_gem_object *obj = NULL;
1345 size_t size;
1346 int ret;
1347
1348 if (flags & OMAP_BO_TILED) {
1349 if (!usergart) {
1350 dev_err(dev->dev, "Tiled buffers require DMM\n");
1351 goto fail;
1352 }
1353
1354 /* tiled buffers are always shmem paged backed.. when they are
1355 * scanned out, they are remapped into DMM/TILER
1356 */
1357 flags &= ~OMAP_BO_SCANOUT;
1358
1359 /* currently don't allow cached buffers.. there is some caching
1360 * stuff that needs to be handled better
1361 */
1362 flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED);
1363 flags |= OMAP_BO_WC;
1364
1365 /* align dimensions to slot boundaries... */
1366 tiler_align(gem2fmt(flags),
1367 &gsize.tiled.width, &gsize.tiled.height);
1368
1369 /* ...and calculate size based on aligned dimensions */
1370 size = tiler_size(gem2fmt(flags),
1371 gsize.tiled.width, gsize.tiled.height);
1372 } else {
1373 size = PAGE_ALIGN(gsize.bytes);
1374 }
1375
1376 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1377 if (!omap_obj) {
1378 dev_err(dev->dev, "could not allocate GEM object\n");
1379 goto fail;
1380 }
1381
1382 list_add(&omap_obj->mm_list, &priv->obj_list);
1383
1384 obj = &omap_obj->base;
1385
1386 if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1387 /* attempt to allocate contiguous memory if we don't
1388 * have DMM for remappign discontiguous buffers
1389 */
1390 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
1391 &omap_obj->paddr, GFP_KERNEL);
1392 if (omap_obj->vaddr)
1393 flags |= OMAP_BO_DMA;
1394
1395 }
1396
1397 omap_obj->flags = flags;
1398
1399 if (flags & OMAP_BO_TILED) {
1400 omap_obj->width = gsize.tiled.width;
1401 omap_obj->height = gsize.tiled.height;
1402 }
1403
1404 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
1405 ret = drm_gem_private_object_init(dev, obj, size);
1406 else
1407 ret = drm_gem_object_init(dev, obj, size);
1408
1409 if (ret)
1410 goto fail;
1411
1412 return obj;
1413
1414 fail:
1415 if (obj)
1416 omap_gem_free_object(obj);
1417
1418 return NULL;
1419 }
1420
1421 /* init/cleanup.. if DMM is used, we need to set some stuff up.. */
1422 void omap_gem_init(struct drm_device *dev)
1423 {
1424 struct omap_drm_private *priv = dev->dev_private;
1425 const enum tiler_fmt fmts[] = {
1426 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1427 };
1428 int i, j;
1429
1430 if (!dmm_is_available()) {
1431 /* DMM only supported on OMAP4 and later, so this isn't fatal */
1432 dev_warn(dev->dev, "DMM not available, disable DMM support\n");
1433 return;
1434 }
1435
1436 usergart = kzalloc(3 * sizeof(*usergart), GFP_KERNEL);
1437 if (!usergart) {
1438 dev_warn(dev->dev, "could not allocate usergart\n");
1439 return;
1440 }
1441
1442 /* reserve 4k aligned/wide regions for userspace mappings: */
1443 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1444 uint16_t h = 1, w = PAGE_SIZE >> i;
1445 tiler_align(fmts[i], &w, &h);
1446 /* note: since each region is 1 4kb page wide, and minimum
1447 * number of rows, the height ends up being the same as the
1448 * # of pages in the region
1449 */
1450 usergart[i].height = h;
1451 usergart[i].height_shift = ilog2(h);
1452 usergart[i].stride_pfn = tiler_stride(fmts[i], 0) >> PAGE_SHIFT;
1453 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1454 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1455 struct usergart_entry *entry = &usergart[i].entry[j];
1456 struct tiler_block *block =
1457 tiler_reserve_2d(fmts[i], w, h,
1458 PAGE_SIZE);
1459 if (IS_ERR(block)) {
1460 dev_err(dev->dev,
1461 "reserve failed: %d, %d, %ld\n",
1462 i, j, PTR_ERR(block));
1463 return;
1464 }
1465 entry->paddr = tiler_ssptr(block);
1466 entry->block = block;
1467
1468 DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
1469 entry->paddr,
1470 usergart[i].stride_pfn << PAGE_SHIFT);
1471 }
1472 }
1473
1474 priv->has_dmm = true;
1475 }
1476
1477 void omap_gem_deinit(struct drm_device *dev)
1478 {
1479 /* I believe we can rely on there being no more outstanding GEM
1480 * objects which could depend on usergart/dmm at this point.
1481 */
1482 kfree(usergart);
1483 }
This page took 0.062974 seconds and 5 git commands to generate.