drm/exynos: stop using sgtable in page fault handler
authorJoonyoung Shim <jy0922.shim@samsung.com>
Tue, 28 Jul 2015 08:53:16 +0000 (17:53 +0900)
committerInki Dae <inki.dae@samsung.com>
Sun, 16 Aug 2015 04:25:21 +0000 (13:25 +0900)
Already struct exynos_drm_gem_buf has pages of the buffer when buffer is
created, so just can use pages in page fault handler, we don't have to
make sgtable of the buffer. But this needs to construct pages of the
buffer that is imported from dma-buf prime.

Signed-off-by: Joonyoung Shim <jy0922.shim@samsung.com>
Signed-off-by: Inki Dae <inki.dae@samsung.com>
drivers/gpu/drm/exynos/exynos_drm_buf.c
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
drivers/gpu/drm/exynos/exynos_drm_gem.c

index 24994ba10e28af4dfc46d14c544edc36bd6f94bf..9260dfb3b7e5a74d91b26f5426016454bbcf29ba 100644 (file)
@@ -90,23 +90,12 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
                }
        }
 
-       buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
-       if (IS_ERR(buf->sgt)) {
-               DRM_ERROR("failed to get sg table.\n");
-               ret = PTR_ERR(buf->sgt);
-               goto err_free_attrs;
-       }
-
        DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
                        (unsigned long)buf->dma_addr,
                        buf->size);
 
        return ret;
 
-err_free_attrs:
-       dma_free_attrs(dev->dev, buf->size, buf->pages,
-                       (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
-       buf->dma_addr = (dma_addr_t)NULL;
 err_free:
        if (!is_drm_iommu_supported(dev))
                drm_free_large(buf->pages);
@@ -126,11 +115,6 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
                        (unsigned long)buf->dma_addr,
                        buf->size);
 
-       sg_free_table(buf->sgt);
-
-       kfree(buf->sgt);
-       buf->sgt = NULL;
-
        if (!is_drm_iommu_supported(dev)) {
                dma_free_attrs(dev->dev, buf->size, buf->cookie,
                                (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
index cd485c091b30dcf3219ef1edc778a1aa17c4fdfa..d10f9b602bf720dadf2bef5eaf584898fa057cac 100644 (file)
@@ -203,6 +203,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
        struct scatterlist *sgl;
        struct exynos_drm_gem_obj *exynos_gem_obj;
        struct exynos_drm_gem_buf *buffer;
+       int npages;
        int ret;
 
        /* is this one of own objects? */
@@ -251,6 +252,20 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
        buffer->size = dma_buf->size;
        buffer->dma_addr = sg_dma_address(sgl);
 
+       npages = dma_buf->size >> PAGE_SHIFT;
+       buffer->pages = drm_malloc_ab(npages, sizeof(struct page *));
+       if (!buffer->pages) {
+               ret = -ENOMEM;
+               goto err_free_gem;
+       }
+
+       ret = drm_prime_sg_to_page_addr_arrays(sgt, buffer->pages, NULL,
+                       npages);
+       if (ret < 0) {
+               drm_free_large(buffer->pages);
+               goto err_free_gem;
+       }
+
        if (sgt->nents == 1) {
                /* always physically continuous memory if sgt->nents is 1. */
                exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
@@ -273,6 +288,9 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
 
        return &exynos_gem_obj->base;
 
+err_free_gem:
+       drm_gem_object_release(&exynos_gem_obj->base);
+       kfree(exynos_gem_obj);
 err_free_buffer:
        kfree(buffer);
        buffer = NULL;
index 0d5b9698d38402d912304b3915e95e37bd207978..d320acd20986cff9e8763eeb9c59e3001065ea11 100644 (file)
@@ -83,26 +83,14 @@ static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
 {
        struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
        struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
-       struct scatterlist *sgl;
        unsigned long pfn;
-       int i;
-
-       if (!buf->sgt)
-               return -EINTR;
 
        if (page_offset >= (buf->size >> PAGE_SHIFT)) {
                DRM_ERROR("invalid page offset\n");
                return -EINVAL;
        }
 
-       sgl = buf->sgt->sgl;
-       for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
-               if (page_offset < (sgl->length >> PAGE_SHIFT))
-                       break;
-               page_offset -=  (sgl->length >> PAGE_SHIFT);
-       }
-
-       pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
+       pfn = page_to_pfn(buf->pages[page_offset]);
 
        return vm_insert_mixed(vma, f_vaddr, pfn);
 }
This page took 0.031241 seconds and 5 git commands to generate.