Commit | Line | Data |
---|---|---|
d73a3397 WL |
1 | #ifndef _ASM_X86_QSPINLOCK_H |
2 | #define _ASM_X86_QSPINLOCK_H | |
3 | ||
2aa79af6 | 4 | #include <asm/cpufeature.h> |
d73a3397 | 5 | #include <asm-generic/qspinlock_types.h> |
f233f7f1 | 6 | #include <asm/paravirt.h> |
d73a3397 WL |
7 | |
8 | #define queued_spin_unlock queued_spin_unlock | |
9 | /** | |
10 | * queued_spin_unlock - release a queued spinlock | |
11 | * @lock : Pointer to queued spinlock structure | |
12 | * | |
13 | * A smp_store_release() on the least-significant byte. | |
14 | */ | |
f233f7f1 | 15 | static inline void native_queued_spin_unlock(struct qspinlock *lock) |
d73a3397 WL |
16 | { |
17 | smp_store_release((u8 *)lock, 0); | |
18 | } | |
19 | ||
f233f7f1 PZI |
20 | #ifdef CONFIG_PARAVIRT_SPINLOCKS |
21 | extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); | |
22 | extern void __pv_init_lock_hash(void); | |
23 | extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); | |
24 | extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock); | |
25 | ||
26 | static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) | |
27 | { | |
28 | pv_queued_spin_lock_slowpath(lock, val); | |
29 | } | |
30 | ||
31 | static inline void queued_spin_unlock(struct qspinlock *lock) | |
32 | { | |
33 | pv_queued_spin_unlock(lock); | |
34 | } | |
35 | #else | |
36 | static inline void queued_spin_unlock(struct qspinlock *lock) | |
37 | { | |
38 | native_queued_spin_unlock(lock); | |
39 | } | |
40 | #endif | |
41 | ||
a6b27785 | 42 | #ifdef CONFIG_PARAVIRT |
43b3f028 | 43 | #define virt_spin_lock virt_spin_lock |
43b3f028 | 44 | static inline bool virt_spin_lock(struct qspinlock *lock) |
2aa79af6 PZI |
45 | { |
46 | if (!static_cpu_has(X86_FEATURE_HYPERVISOR)) | |
47 | return false; | |
48 | ||
43b3f028 PZ |
49 | /* |
50 | * On hypervisors without PARAVIRT_SPINLOCKS support we fall | |
51 | * back to a Test-and-Set spinlock, because fair locks have | |
52 | * horrible lock 'holder' preemption issues. | |
53 | */ | |
54 | ||
55 | do { | |
56 | while (atomic_read(&lock->val) != 0) | |
57 | cpu_relax(); | |
58 | } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0); | |
2aa79af6 PZI |
59 | |
60 | return true; | |
61 | } | |
a6b27785 | 62 | #endif /* CONFIG_PARAVIRT */ |
2aa79af6 | 63 | |
d73a3397 WL |
64 | #include <asm-generic/qspinlock.h> |
65 | ||
66 | #endif /* _ASM_X86_QSPINLOCK_H */ |