Merge remote-tracking branch 'lightnvm/for-next'
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_dmabuf.c
index 80bbe43a2e92a5b363d46d784fda9accd6f8d3f9..10265bb356041adc86988568f188f1fe60cb6058 100644 (file)
  * Authors:
  *     Dave Airlie <airlied@redhat.com>
  */
+
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
 #include <drm/drmP.h>
+
 #include "i915_drv.h"
-#include <linux/dma-buf.h>
 
 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
 {
@@ -115,7 +119,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
        if (ret)
                return ERR_PTR(ret);
 
-       addr = i915_gem_object_pin_map(obj);
+       addr = i915_gem_object_pin_map(obj, I915_MAP_WB);
        mutex_unlock(&dev->struct_mutex);
 
        return addr;
@@ -218,25 +222,73 @@ static const struct dma_buf_ops i915_dmabuf_ops =  {
        .end_cpu_access = i915_gem_end_cpu_access,
 };
 
+static void export_fences(struct drm_i915_gem_object *obj,
+                         struct dma_buf *dma_buf)
+{
+       struct reservation_object *resv = dma_buf->resv;
+       struct drm_i915_gem_request *req;
+       unsigned long active;
+       int idx;
+
+       active = __I915_BO_ACTIVE(obj);
+       if (!active)
+               return;
+
+       /* Serialise with execbuf to prevent concurrent fence-loops */
+       mutex_lock(&obj->base.dev->struct_mutex);
+
+       /* Mark the object for future fences before racily adding old fences */
+       obj->base.dma_buf = dma_buf;
+
+       ww_mutex_lock(&resv->lock, NULL);
+
+       for_each_active(active, idx) {
+               req = i915_gem_active_get(&obj->last_read[idx],
+                                         &obj->base.dev->struct_mutex);
+               if (!req)
+                       continue;
+
+               if (reservation_object_reserve_shared(resv) == 0)
+                       reservation_object_add_shared_fence(resv, &req->fence);
+
+               i915_gem_request_put(req);
+       }
+
+       req = i915_gem_active_get(&obj->last_write,
+                                 &obj->base.dev->struct_mutex);
+       if (req) {
+               reservation_object_add_excl_fence(resv, &req->fence);
+               i915_gem_request_put(req);
+       }
+
+       ww_mutex_unlock(&resv->lock);
+       mutex_unlock(&obj->base.dev->struct_mutex);
+}
+
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
                                      struct drm_gem_object *gem_obj, int flags)
 {
        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
        DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+       struct dma_buf *dma_buf;
 
        exp_info.ops = &i915_dmabuf_ops;
        exp_info.size = gem_obj->size;
        exp_info.flags = flags;
        exp_info.priv = gem_obj;
 
-
        if (obj->ops->dmabuf_export) {
                int ret = obj->ops->dmabuf_export(obj);
                if (ret)
                        return ERR_PTR(ret);
        }
 
-       return dma_buf_export(&exp_info);
+       dma_buf = dma_buf_export(&exp_info);
+       if (IS_ERR(dma_buf))
+               return dma_buf;
+
+       export_fences(obj, dma_buf);
+       return dma_buf;
 }
 
 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
@@ -278,8 +330,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
                         * Importing dmabuf exported from out own gem increases
                         * refcount on gem itself instead of f_count of dmabuf.
                         */
-                       drm_gem_object_reference(&obj->base);
-                       return &obj->base;
+                       return &i915_gem_object_get(obj)->base;
                }
        }
 
@@ -300,6 +351,16 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
        i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
        obj->base.import_attach = attach;
 
+       /* We use GTT as shorthand for a coherent domain, one that is
+        * neither in the GPU cache nor in the CPU cache, where all
+        * writes are immediately visible in memory. (That's not strictly
+        * true, but it's close! There are internal buffers such as the
+        * write-combined buffer or a delay through the chipset for GTT
+        * writes that do require us to treat GTT as a separate cache domain.)
+        */
+       obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+       obj->base.write_domain = 0;
+
        return &obj->base;
 
 fail_detach:
This page took 0.030378 seconds and 5 git commands to generate.