Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_SPINLOCK_H |
2 | #define _ASM_X86_SPINLOCK_H | |
2fed0c50 | 3 | |
96f853ea | 4 | #include <linux/jump_label.h> |
60063497 | 5 | #include <linux/atomic.h> |
1075cf7a TG |
6 | #include <asm/page.h> |
7 | #include <asm/processor.h> | |
314cdbef | 8 | #include <linux/compiler.h> |
74d4affd | 9 | #include <asm/paravirt.h> |
96f853ea JF |
10 | #include <asm/bitops.h> |
11 | ||
1075cf7a TG |
12 | /* |
13 | * Your basic SMP spinlocks, allowing only a single CPU anywhere | |
14 | * | |
15 | * Simple spin lock operations. There are two variants, one clears IRQ's | |
16 | * on the local processor, one does not. | |
17 | * | |
83be4ffa | 18 | * These are fair FIFO ticket locks, which support up to 2^16 CPUs. |
1075cf7a TG |
19 | * |
20 | * (the type definitions are in asm/spinlock_types.h) | |
21 | */ | |
22 | ||
96a388de | 23 | #ifdef CONFIG_X86_32 |
1075cf7a | 24 | # define LOCK_PTR_REG "a" |
96a388de | 25 | #else |
1075cf7a TG |
26 | # define LOCK_PTR_REG "D" |
27 | #endif | |
28 | ||
09df7c4c | 29 | #if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE)) |
3a556b26 | 30 | /* |
09df7c4c | 31 | * On PPro SMP, we use a locked operation to unlock |
3a556b26 NP |
32 | * (PPro errata 66, 92) |
33 | */ | |
34 | # define UNLOCK_LOCK_PREFIX LOCK_PREFIX | |
35 | #else | |
36 | # define UNLOCK_LOCK_PREFIX | |
314cdbef NP |
37 | #endif |
38 | ||
545ac138 JF |
39 | /* How long a lock should spin before we consider blocking */ |
40 | #define SPIN_THRESHOLD (1 << 15) | |
41 | ||
96f853ea JF |
42 | extern struct static_key paravirt_ticketlocks_enabled; |
43 | static __always_inline bool static_key_false(struct static_key *key); | |
545ac138 | 44 | |
62c7a1e9 | 45 | #ifdef CONFIG_QUEUED_SPINLOCKS |
d73a3397 WL |
46 | #include <asm/qspinlock.h> |
47 | #else | |
48 | ||
96f853ea JF |
49 | #ifdef CONFIG_PARAVIRT_SPINLOCKS |
50 | ||
51 | static inline void __ticket_enter_slowpath(arch_spinlock_t *lock) | |
545ac138 | 52 | { |
d6abfdb2 | 53 | set_bit(0, (volatile unsigned long *)&lock->tickets.head); |
545ac138 JF |
54 | } |
55 | ||
96f853ea JF |
56 | #else /* !CONFIG_PARAVIRT_SPINLOCKS */ |
57 | static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock, | |
58 | __ticket_t ticket) | |
545ac138 JF |
59 | { |
60 | } | |
96f853ea JF |
61 | static inline void __ticket_unlock_kick(arch_spinlock_t *lock, |
62 | __ticket_t ticket) | |
545ac138 | 63 | { |
545ac138 JF |
64 | } |
65 | ||
96f853ea | 66 | #endif /* CONFIG_PARAVIRT_SPINLOCKS */ |
d6abfdb2 R |
67 | static inline int __tickets_equal(__ticket_t one, __ticket_t two) |
68 | { | |
69 | return !((one ^ two) & ~TICKET_SLOWPATH_FLAG); | |
70 | } | |
71 | ||
72 | static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock, | |
73 | __ticket_t head) | |
74 | { | |
75 | if (head & TICKET_SLOWPATH_FLAG) { | |
76 | arch_spinlock_t old, new; | |
77 | ||
78 | old.tickets.head = head; | |
79 | new.tickets.head = head & ~TICKET_SLOWPATH_FLAG; | |
80 | old.tickets.tail = new.tickets.head + TICKET_LOCK_INC; | |
81 | new.tickets.tail = old.tickets.tail; | |
82 | ||
83 | /* try to clear slowpath flag when there are no contenders */ | |
84 | cmpxchg(&lock->head_tail, old.head_tail, new.head_tail); | |
85 | } | |
86 | } | |
96f853ea | 87 | |
bc08b449 LT |
88 | static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) |
89 | { | |
d6abfdb2 | 90 | return __tickets_equal(lock.tickets.head, lock.tickets.tail); |
bc08b449 LT |
91 | } |
92 | ||
3a556b26 NP |
93 | /* |
94 | * Ticket locks are conceptually two parts, one indicating the current head of | |
95 | * the queue, and the other indicating the current tail. The lock is acquired | |
96 | * by atomically noting the tail and incrementing it by one (thus adding | |
97 | * ourself to the queue and noting our position), then waiting until the head | |
98 | * becomes equal to the the initial value of the tail. | |
99 | * | |
100 | * We use an xadd covering *both* parts of the lock, to increment the tail and | |
101 | * also load the position of the head, which takes care of memory ordering | |
102 | * issues and should be optimal for the uncontended case. Note the tail must be | |
103 | * in the high part, because a wide xadd increment of the low part would carry | |
104 | * up and contaminate the high part. | |
3a556b26 | 105 | */ |
96f853ea | 106 | static __always_inline void arch_spin_lock(arch_spinlock_t *lock) |
1075cf7a | 107 | { |
4a1ed4ca | 108 | register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC }; |
314cdbef | 109 | |
2994488f | 110 | inc = xadd(&lock->tickets, inc); |
96f853ea JF |
111 | if (likely(inc.head == inc.tail)) |
112 | goto out; | |
c576a3ea JF |
113 | |
114 | for (;;) { | |
545ac138 JF |
115 | unsigned count = SPIN_THRESHOLD; |
116 | ||
117 | do { | |
d6abfdb2 R |
118 | inc.head = READ_ONCE(lock->tickets.head); |
119 | if (__tickets_equal(inc.head, inc.tail)) | |
120 | goto clear_slowpath; | |
545ac138 | 121 | cpu_relax(); |
545ac138 JF |
122 | } while (--count); |
123 | __ticket_lock_spinning(lock, inc.tail); | |
c576a3ea | 124 | } |
d6abfdb2 R |
125 | clear_slowpath: |
126 | __ticket_check_and_clear_slowpath(lock, inc.head); | |
127 | out: | |
128 | barrier(); /* make sure nothing creeps before the lock is taken */ | |
1075cf7a | 129 | } |
314cdbef | 130 | |
b798df09 | 131 | static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) |
1075cf7a | 132 | { |
229855d6 JF |
133 | arch_spinlock_t old, new; |
134 | ||
4f9d1382 | 135 | old.tickets = READ_ONCE(lock->tickets); |
d6abfdb2 | 136 | if (!__tickets_equal(old.tickets.head, old.tickets.tail)) |
229855d6 | 137 | return 0; |
314cdbef | 138 | |
4a1ed4ca | 139 | new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT); |
d6abfdb2 | 140 | new.head_tail &= ~TICKET_SLOWPATH_FLAG; |
229855d6 JF |
141 | |
142 | /* cmpxchg is a full barrier, so nothing can move before it */ | |
143 | return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; | |
1075cf7a TG |
144 | } |
145 | ||
b798df09 | 146 | static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) |
08f5fcbe | 147 | { |
96f853ea | 148 | if (TICKET_SLOWPATH_FLAG && |
d6abfdb2 R |
149 | static_key_false(¶virt_ticketlocks_enabled)) { |
150 | __ticket_t head; | |
08f5fcbe | 151 | |
d6abfdb2 | 152 | BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS); |
08f5fcbe | 153 | |
d6abfdb2 | 154 | head = xadd(&lock->tickets.head, TICKET_LOCK_INC); |
08f5fcbe | 155 | |
d6abfdb2 R |
156 | if (unlikely(head & TICKET_SLOWPATH_FLAG)) { |
157 | head &= ~TICKET_SLOWPATH_FLAG; | |
158 | __ticket_unlock_kick(lock, (head + TICKET_LOCK_INC)); | |
159 | } | |
96f853ea JF |
160 | } else |
161 | __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX); | |
08f5fcbe | 162 | } |
74d4affd | 163 | |
0199c4e6 | 164 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
74d4affd | 165 | { |
4f9d1382 | 166 | struct __raw_tickets tmp = READ_ONCE(lock->tickets); |
74d4affd | 167 | |
d6abfdb2 | 168 | return !__tickets_equal(tmp.tail, tmp.head); |
74d4affd JF |
169 | } |
170 | ||
b798df09 | 171 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
74d4affd | 172 | { |
4f9d1382 | 173 | struct __raw_tickets tmp = READ_ONCE(lock->tickets); |
74d4affd | 174 | |
d6abfdb2 | 175 | tmp.head &= ~TICKET_SLOWPATH_FLAG; |
e8a4a269 | 176 | return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC; |
74d4affd | 177 | } |
0199c4e6 | 178 | #define arch_spin_is_contended arch_spin_is_contended |
63d3a75d | 179 | |
0199c4e6 | 180 | static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, |
63d3a75d JF |
181 | unsigned long flags) |
182 | { | |
0199c4e6 | 183 | arch_spin_lock(lock); |
63d3a75d JF |
184 | } |
185 | ||
0199c4e6 | 186 | static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) |
1075cf7a | 187 | { |
d6abfdb2 | 188 | __ticket_t head = READ_ONCE(lock->tickets.head); |
78bff1c8 ON |
189 | |
190 | for (;;) { | |
d6abfdb2 | 191 | struct __raw_tickets tmp = READ_ONCE(lock->tickets); |
78bff1c8 ON |
192 | /* |
193 | * We need to check "unlocked" in a loop, tmp.head == head | |
194 | * can be false positive because of overflow. | |
195 | */ | |
d6abfdb2 R |
196 | if (__tickets_equal(tmp.head, tmp.tail) || |
197 | !__tickets_equal(tmp.head, head)) | |
78bff1c8 ON |
198 | break; |
199 | ||
1075cf7a | 200 | cpu_relax(); |
78bff1c8 | 201 | } |
1075cf7a | 202 | } |
62c7a1e9 | 203 | #endif /* CONFIG_QUEUED_SPINLOCKS */ |
1075cf7a TG |
204 | |
205 | /* | |
206 | * Read-write spinlocks, allowing multiple readers | |
207 | * but only one writer. | |
208 | * | |
209 | * NOTE! it is quite common to have readers in interrupts | |
210 | * but no interrupt writers. For those circumstances we | |
211 | * can "mix" irq-safe locks - any writer needs to get a | |
212 | * irq-safe write-lock, but readers can get non-irqsafe | |
213 | * read-locks. | |
214 | * | |
2ff810a7 WL |
215 | * On x86, we implement read-write locks using the generic qrwlock with |
216 | * x86 specific optimization. | |
1075cf7a TG |
217 | */ |
218 | ||
bd01ec1a | 219 | #include <asm/qrwlock.h> |
1075cf7a | 220 | |
e5931943 TG |
221 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) |
222 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | |
f5f7eac4 | 223 | |
0199c4e6 TG |
224 | #define arch_spin_relax(lock) cpu_relax() |
225 | #define arch_read_relax(lock) cpu_relax() | |
226 | #define arch_write_relax(lock) cpu_relax() | |
1075cf7a | 227 | |
1965aae3 | 228 | #endif /* _ASM_X86_SPINLOCK_H */ |