Commit | Line | Data |
---|---|---|
a7878709 PZ |
1 | #ifndef __ASM_PREEMPT_H |
2 | #define __ASM_PREEMPT_H | |
3 | ||
4 | #include <linux/thread_info.h> | |
5 | ||
ba1f14fb PZ |
6 | #define PREEMPT_ENABLED (0) |
7 | ||
a7878709 PZ |
8 | static __always_inline int preempt_count(void) |
9 | { | |
ba1f14fb | 10 | return current_thread_info()->preempt_count; |
a7878709 PZ |
11 | } |
12 | ||
13 | static __always_inline int *preempt_count_ptr(void) | |
14 | { | |
15 | return ¤t_thread_info()->preempt_count; | |
16 | } | |
17 | ||
a7878709 PZ |
18 | static __always_inline void preempt_count_set(int pc) |
19 | { | |
20 | *preempt_count_ptr() = pc; | |
21 | } | |
22 | ||
01028747 PZ |
23 | /* |
24 | * must be macros to avoid header recursion hell | |
25 | */ | |
01028747 PZ |
26 | #define init_task_preempt_count(p) do { \ |
27 | task_thread_info(p)->preempt_count = PREEMPT_DISABLED; \ | |
28 | } while (0) | |
29 | ||
30 | #define init_idle_preempt_count(p, cpu) do { \ | |
31 | task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \ | |
32 | } while (0) | |
33 | ||
a7878709 PZ |
34 | static __always_inline void set_preempt_need_resched(void) |
35 | { | |
a7878709 PZ |
36 | } |
37 | ||
38 | static __always_inline void clear_preempt_need_resched(void) | |
39 | { | |
a7878709 PZ |
40 | } |
41 | ||
42 | static __always_inline bool test_preempt_need_resched(void) | |
43 | { | |
ba1f14fb | 44 | return false; |
a7878709 PZ |
45 | } |
46 | ||
bdb43806 PZ |
47 | /* |
48 | * The various preempt_count add/sub methods | |
49 | */ | |
50 | ||
51 | static __always_inline void __preempt_count_add(int val) | |
52 | { | |
53 | *preempt_count_ptr() += val; | |
54 | } | |
55 | ||
56 | static __always_inline void __preempt_count_sub(int val) | |
57 | { | |
58 | *preempt_count_ptr() -= val; | |
59 | } | |
60 | ||
61 | static __always_inline bool __preempt_count_dec_and_test(void) | |
62 | { | |
ba1f14fb PZ |
63 | /* |
64 | * Because of load-store architectures cannot do per-cpu atomic | |
65 | * operations; we cannot use PREEMPT_NEED_RESCHED because it might get | |
66 | * lost. | |
67 | */ | |
68 | return !--*preempt_count_ptr() && tif_need_resched(); | |
bdb43806 PZ |
69 | } |
70 | ||
bdb43806 PZ |
71 | /* |
72 | * Returns true when we need to resched and can (barring IRQ state). | |
73 | */ | |
fe32d3cd | 74 | static __always_inline bool should_resched(int preempt_offset) |
bdb43806 | 75 | { |
fe32d3cd KK |
76 | return unlikely(preempt_count() == preempt_offset && |
77 | tif_need_resched()); | |
bdb43806 PZ |
78 | } |
79 | ||
1a338ac3 PZ |
80 | #ifdef CONFIG_PREEMPT |
81 | extern asmlinkage void preempt_schedule(void); | |
82 | #define __preempt_schedule() preempt_schedule() | |
4eaca0a8 FW |
83 | extern asmlinkage void preempt_schedule_notrace(void); |
84 | #define __preempt_schedule_notrace() preempt_schedule_notrace() | |
1a338ac3 PZ |
85 | #endif /* CONFIG_PREEMPT */ |
86 | ||
a7878709 | 87 | #endif /* __ASM_PREEMPT_H */ |