{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_get_aperture *args = data;
- struct drm_i915_gem_object *obj;
+ struct i915_gtt *ggtt = &dev_priv->gtt;
+ struct i915_vma *vma;
size_t pinned;
pinned = 0;
mutex_lock(&dev->struct_mutex);
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
- if (i915_gem_obj_is_pinned(obj))
- pinned += i915_gem_obj_ggtt_size(obj);
+ list_for_each_entry(vma, &ggtt->base.active_list, mm_list)
+ if (vma->pin_count)
+ pinned += vma->node.size;
+ list_for_each_entry(vma, &ggtt->base.inactive_list, mm_list)
+ if (vma->pin_count)
+ pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
args->aper_size = dev_priv->gtt.base.total;
i915_gem_chipset_flush(dev);
out:
- intel_fb_obj_flush(obj, false);
+ intel_fb_obj_flush(obj, false, ORIGIN_CPU);
return ret;
}
}
out_flush:
- intel_fb_obj_flush(obj, false);
+ intel_fb_obj_flush(obj, false, ORIGIN_GTT);
out_unpin:
i915_gem_object_ggtt_unpin(obj);
out:
if (needs_clflush_after)
i915_gem_chipset_flush(dev);
- intel_fb_obj_flush(obj, false);
+ intel_fb_obj_flush(obj, false, ORIGIN_CPU);
return ret;
}
return 0;
}
-/*
- * Compare arbitrary request against outstanding lazy request. Emit on match.
- */
-int
-i915_gem_check_olr(struct drm_i915_gem_request *req)
-{
- WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
-
- if (req == req->ring->outstanding_lazy_request)
- i915_add_request(req);
-
- return 0;
-}
-
static void fake_irq(unsigned long data)
{
wake_up_process((struct task_struct *)data);
return ret;
}
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+ struct drm_file *file)
+{
+ struct drm_i915_private *dev_private;
+ struct drm_i915_file_private *file_priv;
+
+ WARN_ON(!req || !file || req->file_priv);
+
+ if (!req || !file)
+ return -EINVAL;
+
+ if (req->file_priv)
+ return -EINVAL;
+
+ dev_private = req->ring->dev->dev_private;
+ file_priv = file->driver_priv;
+
+ spin_lock(&file_priv->mm.lock);
+ req->file_priv = file_priv;
+ list_add_tail(&req->client_list, &file_priv->mm.request_list);
+ spin_unlock(&file_priv->mm.lock);
+
+ req->pid = get_pid(task_pid(current));
+
+ return 0;
+}
+
static inline void
i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
{
list_del(&request->client_list);
request->file_priv = NULL;
spin_unlock(&file_priv->mm.lock);
+
+ put_pid(request->pid);
+ request->pid = NULL;
}
static void i915_gem_request_retire(struct drm_i915_gem_request *request)
list_del_init(&request->list);
i915_gem_request_remove_from_client(request);
- put_pid(request->pid);
-
i915_gem_request_unreference(request);
}
if (ret)
return ret;
- ret = i915_gem_check_olr(req);
- if (ret)
- return ret;
-
ret = __i915_wait_request(req,
atomic_read(&dev_priv->gpu_error.reset_counter),
interruptible, NULL, NULL);
if (req == NULL)
return 0;
- ret = i915_gem_check_olr(req);
- if (ret)
- goto err;
-
requests[n++] = i915_gem_request_reference(req);
} else {
for (i = 0; i < I915_NUM_RINGS; i++) {
if (req == NULL)
continue;
- ret = i915_gem_check_olr(req);
- if (ret)
- goto err;
-
requests[n++] = i915_gem_request_reference(req);
}
}
NULL, rps);
mutex_lock(&dev->struct_mutex);
-err:
for (i = 0; i < n; i++) {
if (ret == 0)
i915_gem_object_retire_request(obj, requests[i]);
else
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
+ if (write_domain != 0)
+ intel_fb_obj_invalidate(obj,
+ write_domain == I915_GEM_DOMAIN_GTT ?
+ ORIGIN_GTT : ORIGIN_CPU);
+
unref:
drm_gem_object_unreference(&obj->base);
unlock:
RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
i915_gem_request_assign(&obj->last_write_req, NULL);
- intel_fb_obj_flush(obj, true);
+ intel_fb_obj_flush(obj, true, ORIGIN_CS);
}
static void
* going to happen on the hardware. This would be a Bad Thing(tm).
*/
void __i915_add_request(struct drm_i915_gem_request *request,
- struct drm_file *file,
struct drm_i915_gem_object *obj,
bool flush_caches)
{
dev_priv = ring->dev->dev_private;
ringbuf = request->ringbuf;
- WARN_ON(request != ring->outstanding_lazy_request);
-
/*
* To ensure that this call will not fail, space for its emissions
* should already have been reserved in the ring buffer. Let the ring
request->emitted_jiffies = jiffies;
list_add_tail(&request->list, &ring->request_list);
- request->file_priv = NULL;
-
- if (file) {
- struct drm_i915_file_private *file_priv = file->driver_priv;
-
- spin_lock(&file_priv->mm.lock);
- request->file_priv = file_priv;
- list_add_tail(&request->client_list,
- &file_priv->mm.request_list);
- spin_unlock(&file_priv->mm.lock);
-
- request->pid = get_pid(task_pid(current));
- }
trace_i915_gem_request_add(request);
- ring->outstanding_lazy_request = NULL;
i915_queue_hangcheck(ring->dev);
typeof(*req), ref);
struct intel_context *ctx = req->ctx;
+ if (req->file_priv)
+ i915_gem_request_remove_from_client(req);
+
if (ctx) {
if (i915.enable_execlists) {
- struct intel_engine_cs *ring = req->ring;
-
- if (ctx != ring->default_context)
- intel_lr_context_unpin(ring, ctx);
+ if (ctx != req->ring->default_context)
+ intel_lr_context_unpin(req);
}
i915_gem_context_unreference(ctx);
if (!req_out)
return -EINVAL;
- if ((*req_out = ring->outstanding_lazy_request) != NULL)
- return 0;
+ *req_out = NULL;
req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
if (req == NULL)
* i915_add_request() call can't fail. Note that the reserve may need
* to be redone if the request is not actually submitted straight
* away, e.g. because a GPU scheduler has deferred it.
- *
- * Note further that this call merely notes the reserve request. A
- * subsequent call to *_ring_begin() is required to actually ensure
- * that the reservation is available. Without the begin, if the
- * request creator immediately submitted the request without adding
- * any commands to it then there might not actually be sufficient
- * room for the submission commands. Unfortunately, the current
- * *_ring_begin() implementations potentially call back here to
- * i915_gem_request_alloc(). Thus calling _begin() here would lead to
- * infinite recursion! Until that back call path is removed, it is
- * necessary to do a manual _begin() outside.
*/
- intel_ring_reserved_space_reserve(req->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
+ if (i915.enable_execlists)
+ ret = intel_logical_ring_reserve_space(req);
+ else
+ ret = intel_ring_reserve_space(req);
+ if (ret) {
+ /*
+ * At this point, the request is fully allocated even if not
+ * fully prepared. Thus it can be cleaned up using the proper
+ * free code.
+ */
+ i915_gem_request_cancel(req);
+ return ret;
+ }
- *req_out = ring->outstanding_lazy_request = req;
+ *req_out = req;
return 0;
err:
list_del(&submit_req->execlist_link);
if (submit_req->ctx != ring->default_context)
- intel_lr_context_unpin(ring, submit_req->ctx);
+ intel_lr_context_unpin(submit_req);
i915_gem_request_unreference(submit_req);
}
i915_gem_request_retire(request);
}
-
- /* This may not have been flushed before the reset, so clean it now */
- i915_gem_request_assign(&ring->outstanding_lazy_request, NULL);
}
void i915_gem_restore_fences(struct drm_device *dev)
static int
i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
{
- int ret, i;
+ int i;
if (!obj->active)
return 0;
if (list_empty(&req->list))
goto retire;
- ret = i915_gem_check_olr(req);
- if (ret)
- return ret;
-
if (i915_gem_request_completed(req, true)) {
__i915_gem_request_retire__upto(req);
retire:
if (i915_gem_request_completed(from_req, true))
return 0;
- ret = i915_gem_check_olr(from_req);
- if (ret)
- return ret;
-
if (!i915_semaphore_is_enabled(obj->base.dev)) {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
ret = __i915_wait_request(from_req,
i915_add_request_no_flush(req);
}
- WARN_ON(ring->outstanding_lazy_request);
-
ret = intel_ring_idle(ring);
if (ret)
return ret;
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 size, fence_size, fence_alignment, unfenced_alignment;
- unsigned long start =
+ u64 start =
flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
- unsigned long end =
+ u64 end =
flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
struct i915_vma *vma;
int ret;
* attempt to find space.
*/
if (size > end) {
- DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%lu\n",
+ DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%llu\n",
ggtt_view ? ggtt_view->type : 0,
size,
flags & PIN_MAPPABLE ? "mappable" : "total",
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
- intel_fb_obj_flush(obj, false);
+ intel_fb_obj_flush(obj, false, ORIGIN_GTT);
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
- intel_fb_obj_flush(obj, false);
+ intel_fb_obj_flush(obj, false, ORIGIN_CPU);
trace_i915_gem_object_change_domain(obj,
obj->base.read_domains,
obj->dirty = 1;
}
- if (write)
- intel_fb_obj_invalidate(obj, ORIGIN_GTT);
-
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
- if (write)
- intel_fb_obj_invalidate(obj, ORIGIN_CPU);
-
trace_i915_gem_object_change_domain(obj,
old_read_domains,
old_write_domain);
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
+ /*
+ * Note that the request might not have been submitted yet.
+ * In which case emitted_jiffies will be zero.
+ */
+ if (!request->emitted_jiffies)
+ continue;
+
target = request;
}
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
if (!HAS_L3_DPF(dev) || !remap_info)
return 0;
- ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
+ ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
if (ret)
return ret;
for_each_ring(ring, dev_priv, i)
dev_priv->gt.cleanup_ring(ring);
+
+ if (i915.enable_execlists)
+ /*
+ * Neither the BIOS, ourselves or any other kernel
+ * expects the system to be in execlists mode on startup,
+ * so we need to reset the GPU back to legacy mode.
+ */
+ intel_gpu_reset(dev);
}
static void
return false;
}
-