1 #ifndef __ASM_SPINLOCK_H
2 #define __ASM_SPINLOCK_H
6 * Simple spin lock operations.
8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
11 * Rework to support virtual processors
13 * Type of int is used as a full 64b word is not necessary.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
20 * (the type definitions are in asm/spinlock_types.h)
22 #include <linux/irqflags.h>
25 #include <asm/hvcall.h>
27 #include <asm/asm-compat.h>
28 #include <asm/synch.h>
29 #include <asm/ppc-opcode.h>
31 #define arch_spin_is_locked(x) ((x)->slock != 0)
34 /* use 0x800000yy when locked, where yy == CPU number */
35 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
40 #if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
41 #define CLEAR_IO_SYNC (get_paca()->io_sync = 0)
42 #define SYNC_IO do { \
43 if (unlikely(get_paca()->io_sync)) { \
45 get_paca()->io_sync = 0; \
54 * This returns the old value in the lock, so we succeeded
55 * in getting the lock if the return value is 0.
57 static inline unsigned long __arch_spin_trylock(arch_spinlock_t
*lock
)
59 unsigned long tmp
, token
;
63 "1: " PPC_LWARX(%0,0,%2,1) "\n\
71 : "r" (token
), "r" (&lock
->slock
)
77 static inline int arch_spin_trylock(arch_spinlock_t
*lock
)
80 return __arch_spin_trylock(lock
) == 0;
84 * On a system with shared processors (that is, where a physical
85 * processor is multiplexed between several virtual processors),
86 * there is no point spinning on a lock if the holder of the lock
87 * isn't currently scheduled on a physical processor. Instead
88 * we detect this situation and ask the hypervisor to give the
89 * rest of our timeslice to the lock holder.
91 * So that we can tell which virtual processor is holding a lock,
92 * we put 0x80000000 | smp_processor_id() in the lock when it is
93 * held. Conveniently, we have a word in the paca that holds this
97 #if defined(CONFIG_PPC_SPLPAR)
98 /* We only yield to the hypervisor if we are in shared processor mode */
99 #define SHARED_PROCESSOR (local_paca->lppaca_ptr->shared_proc)
100 extern void __spin_yield(arch_spinlock_t
*lock
);
101 extern void __rw_yield(arch_rwlock_t
*lock
);
103 #define __spin_yield(x) barrier()
104 #define __rw_yield(x) barrier()
105 #define SHARED_PROCESSOR 0
108 static inline void arch_spin_lock(arch_spinlock_t
*lock
)
112 if (likely(__arch_spin_trylock(lock
) == 0))
116 if (SHARED_PROCESSOR
)
118 } while (unlikely(lock
->slock
!= 0));
124 void arch_spin_lock_flags(arch_spinlock_t
*lock
, unsigned long flags
)
126 unsigned long flags_dis
;
130 if (likely(__arch_spin_trylock(lock
) == 0))
132 local_save_flags(flags_dis
);
133 local_irq_restore(flags
);
136 if (SHARED_PROCESSOR
)
138 } while (unlikely(lock
->slock
!= 0));
140 local_irq_restore(flags_dis
);
144 static inline void arch_spin_unlock(arch_spinlock_t
*lock
)
147 __asm__
__volatile__("# arch_spin_unlock\n\t"
148 PPC_RELEASE_BARRIER
: : :"memory");
153 extern void arch_spin_unlock_wait(arch_spinlock_t
*lock
);
155 #define arch_spin_unlock_wait(lock) \
156 do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
160 * Read-write spinlocks, allowing multiple readers
161 * but only one writer.
163 * NOTE! it is quite common to have readers in interrupts
164 * but no interrupt writers. For those circumstances we
165 * can "mix" irq-safe locks - any writer needs to get a
166 * irq-safe write-lock, but readers can get non-irqsafe
170 #define arch_read_can_lock(rw) ((rw)->lock >= 0)
171 #define arch_write_can_lock(rw) (!(rw)->lock)
174 #define __DO_SIGN_EXTEND "extsw %0,%0\n"
175 #define WRLOCK_TOKEN LOCK_TOKEN /* it's negative */
177 #define __DO_SIGN_EXTEND
178 #define WRLOCK_TOKEN (-1)
182 * This returns the old value in the lock + 1,
183 * so we got a read lock if the return value is > 0.
185 static inline long __arch_read_trylock(arch_rwlock_t
*rw
)
189 __asm__
__volatile__(
190 "1: " PPC_LWARX(%0,0,%1,1) "\n"
200 : "cr0", "xer", "memory");
206 * This returns the old value in the lock,
207 * so we got the write lock if the return value is 0.
209 static inline long __arch_write_trylock(arch_rwlock_t
*rw
)
213 token
= WRLOCK_TOKEN
;
214 __asm__
__volatile__(
215 "1: " PPC_LWARX(%0,0,%2,1) "\n\
223 : "r" (token
), "r" (&rw
->lock
)
229 static inline void arch_read_lock(arch_rwlock_t
*rw
)
232 if (likely(__arch_read_trylock(rw
) > 0))
236 if (SHARED_PROCESSOR
)
238 } while (unlikely(rw
->lock
< 0));
243 static inline void arch_write_lock(arch_rwlock_t
*rw
)
246 if (likely(__arch_write_trylock(rw
) == 0))
250 if (SHARED_PROCESSOR
)
252 } while (unlikely(rw
->lock
!= 0));
257 static inline int arch_read_trylock(arch_rwlock_t
*rw
)
259 return __arch_read_trylock(rw
) > 0;
262 static inline int arch_write_trylock(arch_rwlock_t
*rw
)
264 return __arch_write_trylock(rw
) == 0;
267 static inline void arch_read_unlock(arch_rwlock_t
*rw
)
271 __asm__
__volatile__(
281 : "cr0", "xer", "memory");
284 static inline void arch_write_unlock(arch_rwlock_t
*rw
)
286 __asm__
__volatile__("# write_unlock\n\t"
287 PPC_RELEASE_BARRIER
: : :"memory");
291 #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
292 #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
294 #define arch_spin_relax(lock) __spin_yield(lock)
295 #define arch_read_relax(lock) __rw_yield(lock)
296 #define arch_write_relax(lock) __rw_yield(lock)
298 #endif /* __KERNEL__ */
299 #endif /* __ASM_SPINLOCK_H */
This page took 0.062219 seconds and 5 git commands to generate.