2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
28 #include "gpu_scheduler.h"
30 struct amd_sched_fence
*amd_sched_fence_create(struct amd_sched_entity
*s_entity
, void *owner
)
32 struct amd_sched_fence
*fence
= NULL
;
35 fence
= kmem_cache_zalloc(sched_fence_slab
, GFP_KERNEL
);
39 INIT_LIST_HEAD(&fence
->scheduled_cb
);
41 fence
->sched
= s_entity
->sched
;
42 spin_lock_init(&fence
->lock
);
44 seq
= atomic_inc_return(&s_entity
->fence_seq
);
45 fence_init(&fence
->base
, &amd_sched_fence_ops
, &fence
->lock
,
46 s_entity
->fence_context
, seq
);
51 void amd_sched_fence_signal(struct amd_sched_fence
*fence
)
53 int ret
= fence_signal(&fence
->base
);
55 FENCE_TRACE(&fence
->base
, "signaled from irq context\n");
57 FENCE_TRACE(&fence
->base
, "was already signaled\n");
60 void amd_sched_fence_scheduled(struct amd_sched_fence
*s_fence
)
62 struct fence_cb
*cur
, *tmp
;
64 set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT
, &s_fence
->base
.flags
);
65 list_for_each_entry_safe(cur
, tmp
, &s_fence
->scheduled_cb
, node
) {
66 list_del_init(&cur
->node
);
67 cur
->func(&s_fence
->base
, cur
);
71 static const char *amd_sched_fence_get_driver_name(struct fence
*fence
)
76 static const char *amd_sched_fence_get_timeline_name(struct fence
*f
)
78 struct amd_sched_fence
*fence
= to_amd_sched_fence(f
);
79 return (const char *)fence
->sched
->name
;
82 static bool amd_sched_fence_enable_signaling(struct fence
*f
)
87 static void amd_sched_fence_release(struct fence
*f
)
89 struct amd_sched_fence
*fence
= to_amd_sched_fence(f
);
90 kmem_cache_free(sched_fence_slab
, fence
);
93 const struct fence_ops amd_sched_fence_ops
= {
94 .get_driver_name
= amd_sched_fence_get_driver_name
,
95 .get_timeline_name
= amd_sched_fence_get_timeline_name
,
96 .enable_signaling
= amd_sched_fence_enable_signaling
,
98 .wait
= fence_default_wait
,
99 .release
= amd_sched_fence_release
,