2 * drivers/gpu/drm/omapdrm/omap_gem.c
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/shmem_fs.h>
21 #include <linux/spinlock.h>
22 #include <linux/pfn_t.h>
24 #include <drm/drm_vma_manager.h>
27 #include "omap_dmm_tiler.h"
30 * GEM buffer object implementation.
33 /* note: we use upper 8 bits of flags for driver-internal flags: */
34 #define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
35 #define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
36 #define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
38 struct omap_gem_object
{
39 struct drm_gem_object base
;
41 struct list_head mm_list
;
45 /** width/height for tiled formats (rounded up to slot boundaries) */
46 uint16_t width
, height
;
48 /** roll applied when mapping to DMM */
52 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
53 * is set and the paddr is valid. Also if the buffer is remapped in
54 * TILER and paddr_cnt > 0, then paddr is valid. But if you are using
55 * the physical address and OMAP_BO_DMA is not set, then you should
56 * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
57 * not removed from under your feet.
59 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
60 * buffer is requested, but doesn't mean that it is. Use the
61 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
72 * tiler block used when buffer is remapped in DMM/TILER.
74 struct tiler_block
*block
;
77 * Array of backing pages, if allocated. Note that pages are never
78 * allocated for buffers originally allocated from contiguous memory
82 /** addresses corresponding to pages in above array */
86 * Virtual address, if mapped.
91 * sync-object allocated on demand (if needed)
93 * Per-buffer sync-object for tracking pending and completed hw/dma
94 * read and write operations. The layout in memory is dictated by
95 * the SGX firmware, which uses this information to stall the command
96 * stream if a surface is not ready yet.
98 * Note that when buffer is used by SGX, the sync-object needs to be
99 * allocated from a special heap of sync-objects. This way many sync
100 * objects can be packed in a page, and not waste GPU virtual address
101 * space. Because of this we have to have a omap_gem_set_sync_object()
102 * API to allow replacement of the syncobj after it has (potentially)
103 * already been allocated. A bit ugly but I haven't thought of a
104 * better alternative.
107 uint32_t write_pending
;
108 uint32_t write_complete
;
109 uint32_t read_pending
;
110 uint32_t read_complete
;
114 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
116 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
117 * not necessarily pinned in TILER all the time, and (b) when they are
118 * they are not necessarily page aligned, we reserve one or more small
119 * regions in each of the 2d containers to use as a user-GART where we
120 * can create a second page-aligned mapping of parts of the buffer
121 * being accessed from userspace.
123 * Note that we could optimize slightly when we know that multiple
124 * tiler containers are backed by the same PAT.. but I'll leave that
127 #define NUM_USERGART_ENTRIES 2
128 struct omap_drm_usergart_entry
{
129 struct tiler_block
*block
; /* the reserved tiler block */
131 struct drm_gem_object
*obj
; /* the current pinned obj */
132 pgoff_t obj_pgoff
; /* page offset of obj currently
136 struct omap_drm_usergart
{
137 struct omap_drm_usergart_entry entry
[NUM_USERGART_ENTRIES
];
138 int height
; /* height in rows */
139 int height_shift
; /* ilog2(height in rows) */
140 int slot_shift
; /* ilog2(width per slot) */
141 int stride_pfn
; /* stride in pages */
142 int last
; /* index of last used entry */
145 /* -----------------------------------------------------------------------------
149 /** get mmap offset */
150 static uint64_t mmap_offset(struct drm_gem_object
*obj
)
152 struct drm_device
*dev
= obj
->dev
;
156 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
158 /* Make it mmapable */
159 size
= omap_gem_mmap_size(obj
);
160 ret
= drm_gem_create_mmap_offset_size(obj
, size
);
162 dev_err(dev
->dev
, "could not allocate mmap offset\n");
166 return drm_vma_node_offset_addr(&obj
->vma_node
);
169 /* GEM objects can either be allocated from contiguous memory (in which
170 * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
171 * contiguous buffers can be remapped in TILER/DMM if they need to be
172 * contiguous... but we don't do this all the time to reduce pressure
173 * on TILER/DMM space when we know at allocation time that the buffer
174 * will need to be scanned out.
176 static inline bool is_shmem(struct drm_gem_object
*obj
)
178 return obj
->filp
!= NULL
;
181 /* -----------------------------------------------------------------------------
185 static void evict_entry(struct drm_gem_object
*obj
,
186 enum tiler_fmt fmt
, struct omap_drm_usergart_entry
*entry
)
188 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
189 struct omap_drm_private
*priv
= obj
->dev
->dev_private
;
190 int n
= priv
->usergart
[fmt
].height
;
191 size_t size
= PAGE_SIZE
* n
;
192 loff_t off
= mmap_offset(obj
) +
193 (entry
->obj_pgoff
<< PAGE_SHIFT
);
194 const int m
= 1 + ((omap_obj
->width
<< fmt
) / PAGE_SIZE
);
198 /* if stride > than PAGE_SIZE then sparse mapping: */
199 for (i
= n
; i
> 0; i
--) {
200 unmap_mapping_range(obj
->dev
->anon_inode
->i_mapping
,
202 off
+= PAGE_SIZE
* m
;
205 unmap_mapping_range(obj
->dev
->anon_inode
->i_mapping
,
212 /* Evict a buffer from usergart, if it is mapped there */
213 static void evict(struct drm_gem_object
*obj
)
215 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
216 struct omap_drm_private
*priv
= obj
->dev
->dev_private
;
218 if (omap_obj
->flags
& OMAP_BO_TILED
) {
219 enum tiler_fmt fmt
= gem2fmt(omap_obj
->flags
);
222 for (i
= 0; i
< NUM_USERGART_ENTRIES
; i
++) {
223 struct omap_drm_usergart_entry
*entry
=
224 &priv
->usergart
[fmt
].entry
[i
];
226 if (entry
->obj
== obj
)
227 evict_entry(obj
, fmt
, entry
);
232 /* -----------------------------------------------------------------------------
236 /** ensure backing pages are allocated */
237 static int omap_gem_attach_pages(struct drm_gem_object
*obj
)
239 struct drm_device
*dev
= obj
->dev
;
240 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
242 int npages
= obj
->size
>> PAGE_SHIFT
;
246 WARN_ON(omap_obj
->pages
);
248 pages
= drm_gem_get_pages(obj
);
250 dev_err(obj
->dev
->dev
, "could not get pages: %ld\n", PTR_ERR(pages
));
251 return PTR_ERR(pages
);
254 /* for non-cached buffers, ensure the new pages are clean because
255 * DSS, GPU, etc. are not cache coherent:
257 if (omap_obj
->flags
& (OMAP_BO_WC
|OMAP_BO_UNCACHED
)) {
258 addrs
= kmalloc(npages
* sizeof(*addrs
), GFP_KERNEL
);
264 for (i
= 0; i
< npages
; i
++) {
265 addrs
[i
] = dma_map_page(dev
->dev
, pages
[i
],
266 0, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
269 addrs
= kzalloc(npages
* sizeof(*addrs
), GFP_KERNEL
);
276 omap_obj
->addrs
= addrs
;
277 omap_obj
->pages
= pages
;
282 drm_gem_put_pages(obj
, pages
, true, false);
287 /* acquire pages when needed (for example, for DMA where physically
288 * contiguous buffer is not required
290 static int get_pages(struct drm_gem_object
*obj
, struct page
***pages
)
292 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
295 if (is_shmem(obj
) && !omap_obj
->pages
) {
296 ret
= omap_gem_attach_pages(obj
);
298 dev_err(obj
->dev
->dev
, "could not attach pages\n");
303 /* TODO: even phys-contig.. we should have a list of pages? */
304 *pages
= omap_obj
->pages
;
309 /** release backing pages */
310 static void omap_gem_detach_pages(struct drm_gem_object
*obj
)
312 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
314 /* for non-cached buffers, ensure the new pages are clean because
315 * DSS, GPU, etc. are not cache coherent:
317 if (omap_obj
->flags
& (OMAP_BO_WC
|OMAP_BO_UNCACHED
)) {
318 int i
, npages
= obj
->size
>> PAGE_SHIFT
;
319 for (i
= 0; i
< npages
; i
++) {
320 dma_unmap_page(obj
->dev
->dev
, omap_obj
->addrs
[i
],
321 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
325 kfree(omap_obj
->addrs
);
326 omap_obj
->addrs
= NULL
;
328 drm_gem_put_pages(obj
, omap_obj
->pages
, true, false);
329 omap_obj
->pages
= NULL
;
332 /* get buffer flags */
333 uint32_t omap_gem_flags(struct drm_gem_object
*obj
)
335 return to_omap_bo(obj
)->flags
;
338 uint64_t omap_gem_mmap_offset(struct drm_gem_object
*obj
)
341 mutex_lock(&obj
->dev
->struct_mutex
);
342 offset
= mmap_offset(obj
);
343 mutex_unlock(&obj
->dev
->struct_mutex
);
348 size_t omap_gem_mmap_size(struct drm_gem_object
*obj
)
350 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
351 size_t size
= obj
->size
;
353 if (omap_obj
->flags
& OMAP_BO_TILED
) {
354 /* for tiled buffers, the virtual size has stride rounded up
355 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
356 * 32kb later!). But we don't back the entire buffer with
357 * pages, only the valid picture part.. so need to adjust for
358 * this in the size used to mmap and generate mmap offset
360 size
= tiler_vsize(gem2fmt(omap_obj
->flags
),
361 omap_obj
->width
, omap_obj
->height
);
367 /* get tiled size, returns -EINVAL if not tiled buffer */
368 int omap_gem_tiled_size(struct drm_gem_object
*obj
, uint16_t *w
, uint16_t *h
)
370 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
371 if (omap_obj
->flags
& OMAP_BO_TILED
) {
372 *w
= omap_obj
->width
;
373 *h
= omap_obj
->height
;
379 /* -----------------------------------------------------------------------------
383 /* Normal handling for the case of faulting in non-tiled buffers */
384 static int fault_1d(struct drm_gem_object
*obj
,
385 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
387 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
391 /* We don't use vmf->pgoff since that has the fake offset: */
392 pgoff
= ((unsigned long)vmf
->virtual_address
-
393 vma
->vm_start
) >> PAGE_SHIFT
;
395 if (omap_obj
->pages
) {
396 omap_gem_cpu_sync(obj
, pgoff
);
397 pfn
= page_to_pfn(omap_obj
->pages
[pgoff
]);
399 BUG_ON(!(omap_obj
->flags
& OMAP_BO_DMA
));
400 pfn
= (omap_obj
->paddr
>> PAGE_SHIFT
) + pgoff
;
403 VERB("Inserting %p pfn %lx, pa %lx", vmf
->virtual_address
,
404 pfn
, pfn
<< PAGE_SHIFT
);
406 return vm_insert_mixed(vma
, (unsigned long)vmf
->virtual_address
,
407 __pfn_to_pfn_t(pfn
, PFN_DEV
));
410 /* Special handling for the case of faulting in 2d tiled buffers */
411 static int fault_2d(struct drm_gem_object
*obj
,
412 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
414 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
415 struct omap_drm_private
*priv
= obj
->dev
->dev_private
;
416 struct omap_drm_usergart_entry
*entry
;
417 enum tiler_fmt fmt
= gem2fmt(omap_obj
->flags
);
418 struct page
*pages
[64]; /* XXX is this too much to have on stack? */
420 pgoff_t pgoff
, base_pgoff
;
425 * Note the height of the slot is also equal to the number of pages
426 * that need to be mapped in to fill 4kb wide CPU page. If the slot
427 * height is 64, then 64 pages fill a 4kb wide by 64 row region.
429 const int n
= priv
->usergart
[fmt
].height
;
430 const int n_shift
= priv
->usergart
[fmt
].height_shift
;
433 * If buffer width in bytes > PAGE_SIZE then the virtual stride is
434 * rounded up to next multiple of PAGE_SIZE.. this need to be taken
435 * into account in some of the math, so figure out virtual stride
438 const int m
= 1 + ((omap_obj
->width
<< fmt
) / PAGE_SIZE
);
440 /* We don't use vmf->pgoff since that has the fake offset: */
441 pgoff
= ((unsigned long)vmf
->virtual_address
-
442 vma
->vm_start
) >> PAGE_SHIFT
;
445 * Actual address we start mapping at is rounded down to previous slot
446 * boundary in the y direction:
448 base_pgoff
= round_down(pgoff
, m
<< n_shift
);
450 /* figure out buffer width in slots */
451 slots
= omap_obj
->width
>> priv
->usergart
[fmt
].slot_shift
;
453 vaddr
= vmf
->virtual_address
- ((pgoff
- base_pgoff
) << PAGE_SHIFT
);
455 entry
= &priv
->usergart
[fmt
].entry
[priv
->usergart
[fmt
].last
];
457 /* evict previous buffer using this usergart entry, if any: */
459 evict_entry(entry
->obj
, fmt
, entry
);
462 entry
->obj_pgoff
= base_pgoff
;
464 /* now convert base_pgoff to phys offset from virt offset: */
465 base_pgoff
= (base_pgoff
>> n_shift
) * slots
;
467 /* for wider-than 4k.. figure out which part of the slot-row we want: */
470 entry
->obj_pgoff
+= off
;
472 slots
= min(slots
- (off
<< n_shift
), n
);
473 base_pgoff
+= off
<< n_shift
;
474 vaddr
+= off
<< PAGE_SHIFT
;
478 * Map in pages. Beyond the valid pixel part of the buffer, we set
479 * pages[i] to NULL to get a dummy page mapped in.. if someone
480 * reads/writes it they will get random/undefined content, but at
481 * least it won't be corrupting whatever other random page used to
482 * be mapped in, or other undefined behavior.
484 memcpy(pages
, &omap_obj
->pages
[base_pgoff
],
485 sizeof(struct page
*) * slots
);
486 memset(pages
+ slots
, 0,
487 sizeof(struct page
*) * (n
- slots
));
489 ret
= tiler_pin(entry
->block
, pages
, ARRAY_SIZE(pages
), 0, true);
491 dev_err(obj
->dev
->dev
, "failed to pin: %d\n", ret
);
495 pfn
= entry
->paddr
>> PAGE_SHIFT
;
497 VERB("Inserting %p pfn %lx, pa %lx", vmf
->virtual_address
,
498 pfn
, pfn
<< PAGE_SHIFT
);
500 for (i
= n
; i
> 0; i
--) {
501 vm_insert_mixed(vma
, (unsigned long)vaddr
,
502 __pfn_to_pfn_t(pfn
, PFN_DEV
));
503 pfn
+= priv
->usergart
[fmt
].stride_pfn
;
504 vaddr
+= PAGE_SIZE
* m
;
507 /* simple round-robin: */
508 priv
->usergart
[fmt
].last
= (priv
->usergart
[fmt
].last
+ 1)
509 % NUM_USERGART_ENTRIES
;
515 * omap_gem_fault - pagefault handler for GEM objects
516 * @vma: the VMA of the GEM object
519 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
520 * does most of the work for us including the actual map/unmap calls
521 * but we need to do the actual page work.
523 * The VMA was set up by GEM. In doing so it also ensured that the
524 * vma->vm_private_data points to the GEM object that is backing this
527 int omap_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
529 struct drm_gem_object
*obj
= vma
->vm_private_data
;
530 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
531 struct drm_device
*dev
= obj
->dev
;
535 /* Make sure we don't parallel update on a fault, nor move or remove
536 * something from beneath our feet
538 mutex_lock(&dev
->struct_mutex
);
540 /* if a shmem backed object, make sure we have pages attached now */
541 ret
= get_pages(obj
, &pages
);
545 /* where should we do corresponding put_pages().. we are mapping
546 * the original page, rather than thru a GART, so we can't rely
547 * on eviction to trigger this. But munmap() or all mappings should
548 * probably trigger put_pages()?
551 if (omap_obj
->flags
& OMAP_BO_TILED
)
552 ret
= fault_2d(obj
, vma
, vmf
);
554 ret
= fault_1d(obj
, vma
, vmf
);
558 mutex_unlock(&dev
->struct_mutex
);
563 return VM_FAULT_NOPAGE
;
567 return VM_FAULT_SIGBUS
;
571 /** We override mainly to fix up some of the vm mapping flags.. */
572 int omap_gem_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
576 ret
= drm_gem_mmap(filp
, vma
);
578 DBG("mmap failed: %d", ret
);
582 return omap_gem_mmap_obj(vma
->vm_private_data
, vma
);
585 int omap_gem_mmap_obj(struct drm_gem_object
*obj
,
586 struct vm_area_struct
*vma
)
588 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
590 vma
->vm_flags
&= ~VM_PFNMAP
;
591 vma
->vm_flags
|= VM_MIXEDMAP
;
593 if (omap_obj
->flags
& OMAP_BO_WC
) {
594 vma
->vm_page_prot
= pgprot_writecombine(vm_get_page_prot(vma
->vm_flags
));
595 } else if (omap_obj
->flags
& OMAP_BO_UNCACHED
) {
596 vma
->vm_page_prot
= pgprot_noncached(vm_get_page_prot(vma
->vm_flags
));
599 * We do have some private objects, at least for scanout buffers
600 * on hardware without DMM/TILER. But these are allocated write-
603 if (WARN_ON(!obj
->filp
))
607 * Shunt off cached objs to shmem file so they have their own
608 * address_space (so unmap_mapping_range does what we want,
609 * in particular in the case of mmap'd dmabufs)
613 vma
->vm_file
= get_file(obj
->filp
);
615 vma
->vm_page_prot
= vm_get_page_prot(vma
->vm_flags
);
621 /* -----------------------------------------------------------------------------
626 * omap_gem_dumb_create - create a dumb buffer
627 * @drm_file: our client file
629 * @args: the requested arguments copied from userspace
631 * Allocate a buffer suitable for use for a frame buffer of the
632 * form described by user space. Give userspace a handle by which
635 int omap_gem_dumb_create(struct drm_file
*file
, struct drm_device
*dev
,
636 struct drm_mode_create_dumb
*args
)
638 union omap_gem_size gsize
;
640 args
->pitch
= align_pitch(0, args
->width
, args
->bpp
);
641 args
->size
= PAGE_ALIGN(args
->pitch
* args
->height
);
643 gsize
= (union omap_gem_size
){
647 return omap_gem_new_handle(dev
, file
, gsize
,
648 OMAP_BO_SCANOUT
| OMAP_BO_WC
, &args
->handle
);
652 * omap_gem_dumb_map - buffer mapping for dumb interface
653 * @file: our drm client file
655 * @handle: GEM handle to the object (from dumb_create)
657 * Do the necessary setup to allow the mapping of the frame buffer
658 * into user memory. We don't have to do much here at the moment.
660 int omap_gem_dumb_map_offset(struct drm_file
*file
, struct drm_device
*dev
,
661 uint32_t handle
, uint64_t *offset
)
663 struct drm_gem_object
*obj
;
666 /* GEM does all our handle to object mapping */
667 obj
= drm_gem_object_lookup(dev
, file
, handle
);
673 *offset
= omap_gem_mmap_offset(obj
);
675 drm_gem_object_unreference_unlocked(obj
);
681 #ifdef CONFIG_DRM_FBDEV_EMULATION
682 /* Set scrolling position. This allows us to implement fast scrolling
685 * Call only from non-atomic contexts.
687 int omap_gem_roll(struct drm_gem_object
*obj
, uint32_t roll
)
689 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
690 uint32_t npages
= obj
->size
>> PAGE_SHIFT
;
694 dev_err(obj
->dev
->dev
, "invalid roll: %d\n", roll
);
698 omap_obj
->roll
= roll
;
700 mutex_lock(&obj
->dev
->struct_mutex
);
702 /* if we aren't mapped yet, we don't need to do anything */
703 if (omap_obj
->block
) {
705 ret
= get_pages(obj
, &pages
);
708 ret
= tiler_pin(omap_obj
->block
, pages
, npages
, roll
, true);
710 dev_err(obj
->dev
->dev
, "could not repin: %d\n", ret
);
714 mutex_unlock(&obj
->dev
->struct_mutex
);
720 /* -----------------------------------------------------------------------------
721 * Memory Management & DMA Sync
725 * shmem buffers that are mapped cached can simulate coherency via using
726 * page faulting to keep track of dirty pages
728 static inline bool is_cached_coherent(struct drm_gem_object
*obj
)
730 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
731 return is_shmem(obj
) &&
732 ((omap_obj
->flags
& OMAP_BO_CACHE_MASK
) == OMAP_BO_CACHED
);
735 /* Sync the buffer for CPU access.. note pages should already be
736 * attached, ie. omap_gem_get_pages()
738 void omap_gem_cpu_sync(struct drm_gem_object
*obj
, int pgoff
)
740 struct drm_device
*dev
= obj
->dev
;
741 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
743 if (is_cached_coherent(obj
) && omap_obj
->addrs
[pgoff
]) {
744 dma_unmap_page(dev
->dev
, omap_obj
->addrs
[pgoff
],
745 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
746 omap_obj
->addrs
[pgoff
] = 0;
750 /* sync the buffer for DMA access */
751 void omap_gem_dma_sync(struct drm_gem_object
*obj
,
752 enum dma_data_direction dir
)
754 struct drm_device
*dev
= obj
->dev
;
755 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
757 if (is_cached_coherent(obj
)) {
758 int i
, npages
= obj
->size
>> PAGE_SHIFT
;
759 struct page
**pages
= omap_obj
->pages
;
762 for (i
= 0; i
< npages
; i
++) {
763 if (!omap_obj
->addrs
[i
]) {
764 omap_obj
->addrs
[i
] = dma_map_page(dev
->dev
, pages
[i
], 0,
765 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
771 unmap_mapping_range(obj
->filp
->f_mapping
, 0,
772 omap_gem_mmap_size(obj
), 1);
777 /* Get physical address for DMA.. if 'remap' is true, and the buffer is not
778 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
781 int omap_gem_get_paddr(struct drm_gem_object
*obj
,
782 dma_addr_t
*paddr
, bool remap
)
784 struct omap_drm_private
*priv
= obj
->dev
->dev_private
;
785 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
788 mutex_lock(&obj
->dev
->struct_mutex
);
790 if (remap
&& is_shmem(obj
) && priv
->has_dmm
) {
791 if (omap_obj
->paddr_cnt
== 0) {
793 uint32_t npages
= obj
->size
>> PAGE_SHIFT
;
794 enum tiler_fmt fmt
= gem2fmt(omap_obj
->flags
);
795 struct tiler_block
*block
;
797 BUG_ON(omap_obj
->block
);
799 ret
= get_pages(obj
, &pages
);
803 if (omap_obj
->flags
& OMAP_BO_TILED
) {
804 block
= tiler_reserve_2d(fmt
,
806 omap_obj
->height
, 0);
808 block
= tiler_reserve_1d(obj
->size
);
812 ret
= PTR_ERR(block
);
813 dev_err(obj
->dev
->dev
,
814 "could not remap: %d (%d)\n", ret
, fmt
);
818 /* TODO: enable async refill.. */
819 ret
= tiler_pin(block
, pages
, npages
,
820 omap_obj
->roll
, true);
822 tiler_release(block
);
823 dev_err(obj
->dev
->dev
,
824 "could not pin: %d\n", ret
);
828 omap_obj
->paddr
= tiler_ssptr(block
);
829 omap_obj
->block
= block
;
831 DBG("got paddr: %pad", &omap_obj
->paddr
);
834 omap_obj
->paddr_cnt
++;
836 *paddr
= omap_obj
->paddr
;
837 } else if (omap_obj
->flags
& OMAP_BO_DMA
) {
838 *paddr
= omap_obj
->paddr
;
845 mutex_unlock(&obj
->dev
->struct_mutex
);
850 /* Release physical address, when DMA is no longer being performed.. this
851 * could potentially unpin and unmap buffers from TILER
853 void omap_gem_put_paddr(struct drm_gem_object
*obj
)
855 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
858 mutex_lock(&obj
->dev
->struct_mutex
);
859 if (omap_obj
->paddr_cnt
> 0) {
860 omap_obj
->paddr_cnt
--;
861 if (omap_obj
->paddr_cnt
== 0) {
862 ret
= tiler_unpin(omap_obj
->block
);
864 dev_err(obj
->dev
->dev
,
865 "could not unpin pages: %d\n", ret
);
867 ret
= tiler_release(omap_obj
->block
);
869 dev_err(obj
->dev
->dev
,
870 "could not release unmap: %d\n", ret
);
873 omap_obj
->block
= NULL
;
877 mutex_unlock(&obj
->dev
->struct_mutex
);
880 /* Get rotated scanout address (only valid if already pinned), at the
881 * specified orientation and x,y offset from top-left corner of buffer
882 * (only valid for tiled 2d buffers)
884 int omap_gem_rotated_paddr(struct drm_gem_object
*obj
, uint32_t orient
,
885 int x
, int y
, dma_addr_t
*paddr
)
887 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
890 mutex_lock(&obj
->dev
->struct_mutex
);
891 if ((omap_obj
->paddr_cnt
> 0) && omap_obj
->block
&&
892 (omap_obj
->flags
& OMAP_BO_TILED
)) {
893 *paddr
= tiler_tsptr(omap_obj
->block
, orient
, x
, y
);
896 mutex_unlock(&obj
->dev
->struct_mutex
);
900 /* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
901 int omap_gem_tiled_stride(struct drm_gem_object
*obj
, uint32_t orient
)
903 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
905 if (omap_obj
->flags
& OMAP_BO_TILED
)
906 ret
= tiler_stride(gem2fmt(omap_obj
->flags
), orient
);
910 /* if !remap, and we don't have pages backing, then fail, rather than
911 * increasing the pin count (which we don't really do yet anyways,
912 * because we don't support swapping pages back out). And 'remap'
913 * might not be quite the right name, but I wanted to keep it working
914 * similarly to omap_gem_get_paddr(). Note though that mutex is not
915 * aquired if !remap (because this can be called in atomic ctxt),
916 * but probably omap_gem_get_paddr() should be changed to work in the
917 * same way. If !remap, a matching omap_gem_put_pages() call is not
918 * required (and should not be made).
920 int omap_gem_get_pages(struct drm_gem_object
*obj
, struct page
***pages
,
925 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
926 if (!omap_obj
->pages
)
928 *pages
= omap_obj
->pages
;
931 mutex_lock(&obj
->dev
->struct_mutex
);
932 ret
= get_pages(obj
, pages
);
933 mutex_unlock(&obj
->dev
->struct_mutex
);
937 /* release pages when DMA no longer being performed */
938 int omap_gem_put_pages(struct drm_gem_object
*obj
)
940 /* do something here if we dynamically attach/detach pages.. at
941 * least they would no longer need to be pinned if everyone has
942 * released the pages..
947 #ifdef CONFIG_DRM_FBDEV_EMULATION
948 /* Get kernel virtual address for CPU access.. this more or less only
949 * exists for omap_fbdev. This should be called with struct_mutex
952 void *omap_gem_vaddr(struct drm_gem_object
*obj
)
954 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
955 WARN_ON(!mutex_is_locked(&obj
->dev
->struct_mutex
));
956 if (!omap_obj
->vaddr
) {
958 int ret
= get_pages(obj
, &pages
);
961 omap_obj
->vaddr
= vmap(pages
, obj
->size
>> PAGE_SHIFT
,
962 VM_MAP
, pgprot_writecombine(PAGE_KERNEL
));
964 return omap_obj
->vaddr
;
968 /* -----------------------------------------------------------------------------
973 /* re-pin objects in DMM in resume path: */
974 int omap_gem_resume(struct device
*dev
)
976 struct drm_device
*drm_dev
= dev_get_drvdata(dev
);
977 struct omap_drm_private
*priv
= drm_dev
->dev_private
;
978 struct omap_gem_object
*omap_obj
;
981 list_for_each_entry(omap_obj
, &priv
->obj_list
, mm_list
) {
982 if (omap_obj
->block
) {
983 struct drm_gem_object
*obj
= &omap_obj
->base
;
984 uint32_t npages
= obj
->size
>> PAGE_SHIFT
;
985 WARN_ON(!omap_obj
->pages
); /* this can't happen */
986 ret
= tiler_pin(omap_obj
->block
,
987 omap_obj
->pages
, npages
,
988 omap_obj
->roll
, true);
990 dev_err(dev
, "could not repin: %d\n", ret
);
1000 /* -----------------------------------------------------------------------------
1004 #ifdef CONFIG_DEBUG_FS
1005 void omap_gem_describe(struct drm_gem_object
*obj
, struct seq_file
*m
)
1007 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
1010 off
= drm_vma_node_start(&obj
->vma_node
);
1012 seq_printf(m
, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d",
1013 omap_obj
->flags
, obj
->name
, obj
->refcount
.refcount
.counter
,
1014 off
, &omap_obj
->paddr
, omap_obj
->paddr_cnt
,
1015 omap_obj
->vaddr
, omap_obj
->roll
);
1017 if (omap_obj
->flags
& OMAP_BO_TILED
) {
1018 seq_printf(m
, " %dx%d", omap_obj
->width
, omap_obj
->height
);
1019 if (omap_obj
->block
) {
1020 struct tcm_area
*area
= &omap_obj
->block
->area
;
1021 seq_printf(m
, " (%dx%d, %dx%d)",
1022 area
->p0
.x
, area
->p0
.y
,
1023 area
->p1
.x
, area
->p1
.y
);
1026 seq_printf(m
, " %d", obj
->size
);
1029 seq_printf(m
, "\n");
1032 void omap_gem_describe_objects(struct list_head
*list
, struct seq_file
*m
)
1034 struct omap_gem_object
*omap_obj
;
1038 list_for_each_entry(omap_obj
, list
, mm_list
) {
1039 struct drm_gem_object
*obj
= &omap_obj
->base
;
1041 omap_gem_describe(obj
, m
);
1046 seq_printf(m
, "Total %d objects, %zu bytes\n", count
, size
);
1050 /* -----------------------------------------------------------------------------
1051 * Buffer Synchronization
1054 static DEFINE_SPINLOCK(sync_lock
);
1056 struct omap_gem_sync_waiter
{
1057 struct list_head list
;
1058 struct omap_gem_object
*omap_obj
;
1059 enum omap_gem_op op
;
1060 uint32_t read_target
, write_target
;
1061 /* notify called w/ sync_lock held */
1062 void (*notify
)(void *arg
);
1066 /* list of omap_gem_sync_waiter.. the notify fxn gets called back when
1067 * the read and/or write target count is achieved which can call a user
1068 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
1071 static LIST_HEAD(waiters
);
1073 static inline bool is_waiting(struct omap_gem_sync_waiter
*waiter
)
1075 struct omap_gem_object
*omap_obj
= waiter
->omap_obj
;
1076 if ((waiter
->op
& OMAP_GEM_READ
) &&
1077 (omap_obj
->sync
->write_complete
< waiter
->write_target
))
1079 if ((waiter
->op
& OMAP_GEM_WRITE
) &&
1080 (omap_obj
->sync
->read_complete
< waiter
->read_target
))
1085 /* macro for sync debug.. */
1087 #define SYNC(fmt, ...) do { if (SYNCDBG) \
1088 printk(KERN_ERR "%s:%d: "fmt"\n", \
1089 __func__, __LINE__, ##__VA_ARGS__); \
1093 static void sync_op_update(void)
1095 struct omap_gem_sync_waiter
*waiter
, *n
;
1096 list_for_each_entry_safe(waiter
, n
, &waiters
, list
) {
1097 if (!is_waiting(waiter
)) {
1098 list_del(&waiter
->list
);
1099 SYNC("notify: %p", waiter
);
1100 waiter
->notify(waiter
->arg
);
1106 static inline int sync_op(struct drm_gem_object
*obj
,
1107 enum omap_gem_op op
, bool start
)
1109 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
1112 spin_lock(&sync_lock
);
1114 if (!omap_obj
->sync
) {
1115 omap_obj
->sync
= kzalloc(sizeof(*omap_obj
->sync
), GFP_ATOMIC
);
1116 if (!omap_obj
->sync
) {
1123 if (op
& OMAP_GEM_READ
)
1124 omap_obj
->sync
->read_pending
++;
1125 if (op
& OMAP_GEM_WRITE
)
1126 omap_obj
->sync
->write_pending
++;
1128 if (op
& OMAP_GEM_READ
)
1129 omap_obj
->sync
->read_complete
++;
1130 if (op
& OMAP_GEM_WRITE
)
1131 omap_obj
->sync
->write_complete
++;
1136 spin_unlock(&sync_lock
);
1141 /* it is a bit lame to handle updates in this sort of polling way, but
1142 * in case of PVR, the GPU can directly update read/write complete
1143 * values, and not really tell us which ones it updated.. this also
1144 * means that sync_lock is not quite sufficient. So we'll need to
1145 * do something a bit better when it comes time to add support for
1148 void omap_gem_op_update(void)
1150 spin_lock(&sync_lock
);
1152 spin_unlock(&sync_lock
);
1155 /* mark the start of read and/or write operation */
1156 int omap_gem_op_start(struct drm_gem_object
*obj
, enum omap_gem_op op
)
1158 return sync_op(obj
, op
, true);
1161 int omap_gem_op_finish(struct drm_gem_object
*obj
, enum omap_gem_op op
)
1163 return sync_op(obj
, op
, false);
1166 static DECLARE_WAIT_QUEUE_HEAD(sync_event
);
1168 static void sync_notify(void *arg
)
1170 struct task_struct
**waiter_task
= arg
;
1171 *waiter_task
= NULL
;
1172 wake_up_all(&sync_event
);
1175 int omap_gem_op_sync(struct drm_gem_object
*obj
, enum omap_gem_op op
)
1177 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
1179 if (omap_obj
->sync
) {
1180 struct task_struct
*waiter_task
= current
;
1181 struct omap_gem_sync_waiter
*waiter
=
1182 kzalloc(sizeof(*waiter
), GFP_KERNEL
);
1187 waiter
->omap_obj
= omap_obj
;
1189 waiter
->read_target
= omap_obj
->sync
->read_pending
;
1190 waiter
->write_target
= omap_obj
->sync
->write_pending
;
1191 waiter
->notify
= sync_notify
;
1192 waiter
->arg
= &waiter_task
;
1194 spin_lock(&sync_lock
);
1195 if (is_waiting(waiter
)) {
1196 SYNC("waited: %p", waiter
);
1197 list_add_tail(&waiter
->list
, &waiters
);
1198 spin_unlock(&sync_lock
);
1199 ret
= wait_event_interruptible(sync_event
,
1200 (waiter_task
== NULL
));
1201 spin_lock(&sync_lock
);
1203 SYNC("interrupted: %p", waiter
);
1204 /* we were interrupted */
1205 list_del(&waiter
->list
);
1208 /* freed in sync_op_update() */
1212 spin_unlock(&sync_lock
);
1218 /* call fxn(arg), either synchronously or asynchronously if the op
1219 * is currently blocked.. fxn() can be called from any context
1221 * (TODO for now fxn is called back from whichever context calls
1222 * omap_gem_op_update().. but this could be better defined later
1225 * TODO more code in common w/ _sync()..
1227 int omap_gem_op_async(struct drm_gem_object
*obj
, enum omap_gem_op op
,
1228 void (*fxn
)(void *arg
), void *arg
)
1230 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
1231 if (omap_obj
->sync
) {
1232 struct omap_gem_sync_waiter
*waiter
=
1233 kzalloc(sizeof(*waiter
), GFP_ATOMIC
);
1238 waiter
->omap_obj
= omap_obj
;
1240 waiter
->read_target
= omap_obj
->sync
->read_pending
;
1241 waiter
->write_target
= omap_obj
->sync
->write_pending
;
1242 waiter
->notify
= fxn
;
1245 spin_lock(&sync_lock
);
1246 if (is_waiting(waiter
)) {
1247 SYNC("waited: %p", waiter
);
1248 list_add_tail(&waiter
->list
, &waiters
);
1249 spin_unlock(&sync_lock
);
1253 spin_unlock(&sync_lock
);
1264 /* special API so PVR can update the buffer to use a sync-object allocated
1265 * from it's sync-obj heap. Only used for a newly allocated (from PVR's
1266 * perspective) sync-object, so we overwrite the new syncobj w/ values
1267 * from the already allocated syncobj (if there is one)
1269 int omap_gem_set_sync_object(struct drm_gem_object
*obj
, void *syncobj
)
1271 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
1274 spin_lock(&sync_lock
);
1276 if ((omap_obj
->flags
& OMAP_BO_EXT_SYNC
) && !syncobj
) {
1277 /* clearing a previously set syncobj */
1278 syncobj
= kmemdup(omap_obj
->sync
, sizeof(*omap_obj
->sync
),
1284 omap_obj
->flags
&= ~OMAP_BO_EXT_SYNC
;
1285 omap_obj
->sync
= syncobj
;
1286 } else if (syncobj
&& !(omap_obj
->flags
& OMAP_BO_EXT_SYNC
)) {
1287 /* replacing an existing syncobj */
1288 if (omap_obj
->sync
) {
1289 memcpy(syncobj
, omap_obj
->sync
, sizeof(*omap_obj
->sync
));
1290 kfree(omap_obj
->sync
);
1292 omap_obj
->flags
|= OMAP_BO_EXT_SYNC
;
1293 omap_obj
->sync
= syncobj
;
1297 spin_unlock(&sync_lock
);
1301 /* -----------------------------------------------------------------------------
1302 * Constructor & Destructor
1305 /* don't call directly.. called from GEM core when it is time to actually
1308 void omap_gem_free_object(struct drm_gem_object
*obj
)
1310 struct drm_device
*dev
= obj
->dev
;
1311 struct omap_drm_private
*priv
= dev
->dev_private
;
1312 struct omap_gem_object
*omap_obj
= to_omap_bo(obj
);
1316 WARN_ON(!mutex_is_locked(&dev
->struct_mutex
));
1318 spin_lock(&priv
->list_lock
);
1319 list_del(&omap_obj
->mm_list
);
1320 spin_unlock(&priv
->list_lock
);
1322 /* this means the object is still pinned.. which really should
1323 * not happen. I think..
1325 WARN_ON(omap_obj
->paddr_cnt
> 0);
1327 /* don't free externally allocated backing memory */
1328 if (!(omap_obj
->flags
& OMAP_BO_EXT_MEM
)) {
1329 if (omap_obj
->pages
)
1330 omap_gem_detach_pages(obj
);
1332 if (!is_shmem(obj
)) {
1333 dma_free_writecombine(dev
->dev
, obj
->size
,
1334 omap_obj
->vaddr
, omap_obj
->paddr
);
1335 } else if (omap_obj
->vaddr
) {
1336 vunmap(omap_obj
->vaddr
);
1340 /* don't free externally allocated syncobj */
1341 if (!(omap_obj
->flags
& OMAP_BO_EXT_SYNC
))
1342 kfree(omap_obj
->sync
);
1344 drm_gem_object_release(obj
);
1349 /* GEM buffer object constructor */
1350 struct drm_gem_object
*omap_gem_new(struct drm_device
*dev
,
1351 union omap_gem_size gsize
, uint32_t flags
)
1353 struct omap_drm_private
*priv
= dev
->dev_private
;
1354 struct omap_gem_object
*omap_obj
;
1355 struct drm_gem_object
*obj
;
1356 struct address_space
*mapping
;
1360 if (flags
& OMAP_BO_TILED
) {
1361 if (!priv
->usergart
) {
1362 dev_err(dev
->dev
, "Tiled buffers require DMM\n");
1366 /* tiled buffers are always shmem paged backed.. when they are
1367 * scanned out, they are remapped into DMM/TILER
1369 flags
&= ~OMAP_BO_SCANOUT
;
1371 /* currently don't allow cached buffers.. there is some caching
1372 * stuff that needs to be handled better
1374 flags
&= ~(OMAP_BO_CACHED
|OMAP_BO_WC
|OMAP_BO_UNCACHED
);
1375 flags
|= tiler_get_cpu_cache_flags();
1377 /* align dimensions to slot boundaries... */
1378 tiler_align(gem2fmt(flags
),
1379 &gsize
.tiled
.width
, &gsize
.tiled
.height
);
1381 /* ...and calculate size based on aligned dimensions */
1382 size
= tiler_size(gem2fmt(flags
),
1383 gsize
.tiled
.width
, gsize
.tiled
.height
);
1385 size
= PAGE_ALIGN(gsize
.bytes
);
1388 omap_obj
= kzalloc(sizeof(*omap_obj
), GFP_KERNEL
);
1392 obj
= &omap_obj
->base
;
1394 if ((flags
& OMAP_BO_SCANOUT
) && !priv
->has_dmm
) {
1395 /* attempt to allocate contiguous memory if we don't
1396 * have DMM for remappign discontiguous buffers
1398 omap_obj
->vaddr
= dma_alloc_writecombine(dev
->dev
, size
,
1399 &omap_obj
->paddr
, GFP_KERNEL
);
1400 if (!omap_obj
->vaddr
) {
1406 flags
|= OMAP_BO_DMA
;
1409 spin_lock(&priv
->list_lock
);
1410 list_add(&omap_obj
->mm_list
, &priv
->obj_list
);
1411 spin_unlock(&priv
->list_lock
);
1413 omap_obj
->flags
= flags
;
1415 if (flags
& OMAP_BO_TILED
) {
1416 omap_obj
->width
= gsize
.tiled
.width
;
1417 omap_obj
->height
= gsize
.tiled
.height
;
1420 if (flags
& (OMAP_BO_DMA
|OMAP_BO_EXT_MEM
)) {
1421 drm_gem_private_object_init(dev
, obj
, size
);
1423 ret
= drm_gem_object_init(dev
, obj
, size
);
1427 mapping
= file_inode(obj
->filp
)->i_mapping
;
1428 mapping_set_gfp_mask(mapping
, GFP_USER
| __GFP_DMA32
);
1434 omap_gem_free_object(obj
);
1438 /* convenience method to construct a GEM buffer object, and userspace handle */
1439 int omap_gem_new_handle(struct drm_device
*dev
, struct drm_file
*file
,
1440 union omap_gem_size gsize
, uint32_t flags
, uint32_t *handle
)
1442 struct drm_gem_object
*obj
;
1445 obj
= omap_gem_new(dev
, gsize
, flags
);
1449 ret
= drm_gem_handle_create(file
, obj
, handle
);
1451 omap_gem_free_object(obj
);
1455 /* drop reference from allocate - handle holds it now */
1456 drm_gem_object_unreference_unlocked(obj
);
1461 /* -----------------------------------------------------------------------------
1465 /* If DMM is used, we need to set some stuff up.. */
1466 void omap_gem_init(struct drm_device
*dev
)
1468 struct omap_drm_private
*priv
= dev
->dev_private
;
1469 struct omap_drm_usergart
*usergart
;
1470 const enum tiler_fmt fmts
[] = {
1471 TILFMT_8BIT
, TILFMT_16BIT
, TILFMT_32BIT
1475 if (!dmm_is_available()) {
1476 /* DMM only supported on OMAP4 and later, so this isn't fatal */
1477 dev_warn(dev
->dev
, "DMM not available, disable DMM support\n");
1481 usergart
= kcalloc(3, sizeof(*usergart
), GFP_KERNEL
);
1485 /* reserve 4k aligned/wide regions for userspace mappings: */
1486 for (i
= 0; i
< ARRAY_SIZE(fmts
); i
++) {
1487 uint16_t h
= 1, w
= PAGE_SIZE
>> i
;
1488 tiler_align(fmts
[i
], &w
, &h
);
1489 /* note: since each region is 1 4kb page wide, and minimum
1490 * number of rows, the height ends up being the same as the
1491 * # of pages in the region
1493 usergart
[i
].height
= h
;
1494 usergart
[i
].height_shift
= ilog2(h
);
1495 usergart
[i
].stride_pfn
= tiler_stride(fmts
[i
], 0) >> PAGE_SHIFT
;
1496 usergart
[i
].slot_shift
= ilog2((PAGE_SIZE
/ h
) >> i
);
1497 for (j
= 0; j
< NUM_USERGART_ENTRIES
; j
++) {
1498 struct omap_drm_usergart_entry
*entry
;
1499 struct tiler_block
*block
;
1501 entry
= &usergart
[i
].entry
[j
];
1502 block
= tiler_reserve_2d(fmts
[i
], w
, h
, PAGE_SIZE
);
1503 if (IS_ERR(block
)) {
1505 "reserve failed: %d, %d, %ld\n",
1506 i
, j
, PTR_ERR(block
));
1509 entry
->paddr
= tiler_ssptr(block
);
1510 entry
->block
= block
;
1512 DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i
, j
, w
, h
,
1514 usergart
[i
].stride_pfn
<< PAGE_SHIFT
);
1518 priv
->usergart
= usergart
;
1519 priv
->has_dmm
= true;
1522 void omap_gem_deinit(struct drm_device
*dev
)
1524 struct omap_drm_private
*priv
= dev
->dev_private
;
1526 /* I believe we can rely on there being no more outstanding GEM
1527 * objects which could depend on usergart/dmm at this point.
1529 kfree(priv
->usergart
);