2 * (C) Copyright 2016 Intel Corporation
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; version 2
10 #include <linux/slab.h>
11 #include <linux/fence.h>
12 #include <linux/reservation.h>
14 #include "i915_sw_fence.h"
16 static DEFINE_SPINLOCK(i915_sw_fence_lock
);
18 static int __i915_sw_fence_notify(struct i915_sw_fence
*fence
,
19 enum i915_sw_fence_notify state
)
21 i915_sw_fence_notify_t fn
;
23 fn
= (i915_sw_fence_notify_t
)(fence
->flags
& I915_SW_FENCE_MASK
);
24 return fn(fence
, state
);
27 static void i915_sw_fence_free(struct kref
*kref
)
29 struct i915_sw_fence
*fence
= container_of(kref
, typeof(*fence
), kref
);
31 WARN_ON(atomic_read(&fence
->pending
) > 0);
33 if (fence
->flags
& I915_SW_FENCE_MASK
)
34 __i915_sw_fence_notify(fence
, FENCE_FREE
);
39 static void i915_sw_fence_put(struct i915_sw_fence
*fence
)
41 kref_put(&fence
->kref
, i915_sw_fence_free
);
44 static struct i915_sw_fence
*i915_sw_fence_get(struct i915_sw_fence
*fence
)
46 kref_get(&fence
->kref
);
50 static void __i915_sw_fence_wake_up_all(struct i915_sw_fence
*fence
,
51 struct list_head
*continuation
)
53 wait_queue_head_t
*x
= &fence
->wait
;
54 wait_queue_t
*pos
, *next
;
57 atomic_set_release(&fence
->pending
, -1); /* 0 -> -1 [done] */
60 * To prevent unbounded recursion as we traverse the graph of
61 * i915_sw_fences, we move the task_list from this, the next ready
62 * fence, to the tail of the original fence's task_list
63 * (and so added to the list to be woken).
66 spin_lock_irqsave_nested(&x
->lock
, flags
, 1 + !!continuation
);
68 list_for_each_entry_safe(pos
, next
, &x
->task_list
, task_list
) {
69 if (pos
->func
== autoremove_wake_function
)
70 pos
->func(pos
, TASK_NORMAL
, 0, continuation
);
72 list_move_tail(&pos
->task_list
, continuation
);
78 list_for_each_entry_safe(pos
, next
,
79 &x
->task_list
, task_list
)
80 pos
->func(pos
, TASK_NORMAL
, 0, &extra
);
82 if (list_empty(&extra
))
85 list_splice_tail_init(&extra
, &x
->task_list
);
88 spin_unlock_irqrestore(&x
->lock
, flags
);
91 static void __i915_sw_fence_complete(struct i915_sw_fence
*fence
,
92 struct list_head
*continuation
)
94 if (!atomic_dec_and_test(&fence
->pending
))
97 if (fence
->flags
& I915_SW_FENCE_MASK
&&
98 __i915_sw_fence_notify(fence
, FENCE_COMPLETE
) != NOTIFY_DONE
)
101 __i915_sw_fence_wake_up_all(fence
, continuation
);
104 static void i915_sw_fence_complete(struct i915_sw_fence
*fence
)
106 if (WARN_ON(i915_sw_fence_done(fence
)))
109 __i915_sw_fence_complete(fence
, NULL
);
112 static void i915_sw_fence_await(struct i915_sw_fence
*fence
)
114 WARN_ON(atomic_inc_return(&fence
->pending
) <= 1);
117 void i915_sw_fence_init(struct i915_sw_fence
*fence
, i915_sw_fence_notify_t fn
)
119 BUG_ON((unsigned long)fn
& ~I915_SW_FENCE_MASK
);
121 init_waitqueue_head(&fence
->wait
);
122 kref_init(&fence
->kref
);
123 atomic_set(&fence
->pending
, 1);
124 fence
->flags
= (unsigned long)fn
;
127 void i915_sw_fence_commit(struct i915_sw_fence
*fence
)
129 i915_sw_fence_complete(fence
);
130 i915_sw_fence_put(fence
);
133 static int i915_sw_fence_wake(wait_queue_t
*wq
, unsigned mode
, int flags
, void *key
)
135 list_del(&wq
->task_list
);
136 __i915_sw_fence_complete(wq
->private, key
);
137 i915_sw_fence_put(wq
->private);
141 static bool __i915_sw_fence_check_if_after(struct i915_sw_fence
*fence
,
142 const struct i915_sw_fence
* const signaler
)
146 if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT
, &fence
->flags
))
149 if (fence
== signaler
)
152 list_for_each_entry(wq
, &fence
->wait
.task_list
, task_list
) {
153 if (wq
->func
!= i915_sw_fence_wake
)
156 if (__i915_sw_fence_check_if_after(wq
->private, signaler
))
163 static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence
*fence
)
167 if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT
, &fence
->flags
))
170 list_for_each_entry(wq
, &fence
->wait
.task_list
, task_list
) {
171 if (wq
->func
!= i915_sw_fence_wake
)
174 __i915_sw_fence_clear_checked_bit(wq
->private);
178 static bool i915_sw_fence_check_if_after(struct i915_sw_fence
*fence
,
179 const struct i915_sw_fence
* const signaler
)
184 if (!IS_ENABLED(CONFIG_I915_SW_FENCE_CHECK_DAG
))
187 spin_lock_irqsave(&i915_sw_fence_lock
, flags
);
188 err
= __i915_sw_fence_check_if_after(fence
, signaler
);
189 __i915_sw_fence_clear_checked_bit(fence
);
190 spin_unlock_irqrestore(&i915_sw_fence_lock
, flags
);
195 int i915_sw_fence_await_sw_fence(struct i915_sw_fence
*fence
,
196 struct i915_sw_fence
*signaler
,
202 if (i915_sw_fence_done(signaler
))
205 /* The dependency graph must be acyclic. */
206 if (unlikely(i915_sw_fence_check_if_after(fence
, signaler
)))
209 INIT_LIST_HEAD(&wq
->task_list
);
211 wq
->func
= i915_sw_fence_wake
;
212 wq
->private = i915_sw_fence_get(fence
);
214 i915_sw_fence_await(fence
);
216 spin_lock_irqsave(&signaler
->wait
.lock
, flags
);
217 if (likely(!i915_sw_fence_done(signaler
))) {
218 __add_wait_queue_tail(&signaler
->wait
, wq
);
221 i915_sw_fence_wake(wq
, 0, 0, NULL
);
224 spin_unlock_irqrestore(&signaler
->wait
.lock
, flags
);
229 struct dma_fence_cb
{
230 struct fence_cb base
;
231 struct i915_sw_fence
*fence
;
233 struct timer_list timer
;
236 static void timer_i915_sw_fence_wake(unsigned long data
)
238 struct dma_fence_cb
*cb
= (struct dma_fence_cb
*)data
;
240 printk(KERN_WARNING
"asynchronous wait on fence %s:%s:%x timed out\n",
241 cb
->dma
->ops
->get_driver_name(cb
->dma
),
242 cb
->dma
->ops
->get_timeline_name(cb
->dma
),
247 i915_sw_fence_commit(cb
->fence
);
248 cb
->timer
.function
= NULL
;
251 static void dma_i915_sw_fence_wake(struct fence
*dma
, struct fence_cb
*data
)
253 struct dma_fence_cb
*cb
= container_of(data
, typeof(*cb
), base
);
255 del_timer_sync(&cb
->timer
);
256 if (cb
->timer
.function
)
257 i915_sw_fence_commit(cb
->fence
);
263 int i915_sw_fence_await_dma_fence(struct i915_sw_fence
*fence
,
265 unsigned long timeout
,
268 struct dma_fence_cb
*cb
;
271 if (fence_is_signaled(dma
))
274 cb
= kmalloc(sizeof(*cb
), gfp
);
276 if (!gfpflags_allow_blocking(gfp
))
279 return fence_wait(dma
, false);
282 cb
->fence
= i915_sw_fence_get(fence
);
283 i915_sw_fence_await(fence
);
286 __setup_timer(&cb
->timer
,
287 timer_i915_sw_fence_wake
, (unsigned long)cb
,
290 cb
->dma
= fence_get(dma
);
291 mod_timer(&cb
->timer
, round_jiffies_up(jiffies
+ timeout
));
294 ret
= fence_add_callback(dma
, &cb
->base
, dma_i915_sw_fence_wake
);
298 dma_i915_sw_fence_wake(dma
, &cb
->base
);
299 if (ret
== -ENOENT
) /* fence already signaled */
306 int i915_sw_fence_await_reservation(struct i915_sw_fence
*fence
,
307 struct reservation_object
*resv
,
308 const struct fence_ops
*exclude
,
310 unsigned long timeout
,
314 int ret
= 0, pending
;
317 struct fence
**shared
;
318 unsigned int count
, i
;
320 ret
= reservation_object_get_fences_rcu(resv
,
321 &excl
, &count
, &shared
);
325 for (i
= 0; i
< count
; i
++) {
326 if (shared
[i
]->ops
== exclude
)
329 pending
= i915_sw_fence_await_dma_fence(fence
,
341 for (i
= 0; i
< count
; i
++)
342 fence_put(shared
[i
]);
345 excl
= reservation_object_get_excl_rcu(resv
);
348 if (ret
>= 0 && excl
&& excl
->ops
!= exclude
) {
349 pending
= i915_sw_fence_await_dma_fence(fence
,