powerpc/spinlock: Fix spin_unlock_wait()
[deliverable/linux.git] / arch / powerpc / include / asm / spinlock.h
index 523673d7583c349a8ea95ec5c2f85cbf59f71339..fa37fe93bc029eb9965754021c027b2de484b809 100644 (file)
@@ -162,12 +162,38 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
        lock->slock = 0;
 }
 
-#ifdef CONFIG_PPC64
-extern void arch_spin_unlock_wait(arch_spinlock_t *lock);
-#else
-#define arch_spin_unlock_wait(lock) \
-       do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
-#endif
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+       arch_spinlock_t lock_val;
+
+       smp_mb();
+
+       /*
+        * Atomically load and store back the lock value (unchanged). This
+        * ensures that our observation of the lock value is ordered with
+        * respect to other lock operations.
+        */
+       __asm__ __volatile__(
+"1:    " PPC_LWARX(%0, 0, %2, 0) "\n"
+"      stwcx. %0, 0, %2\n"
+"      bne- 1b\n"
+       : "=&r" (lock_val), "+m" (*lock)
+       : "r" (lock)
+       : "cr0", "xer");
+
+       if (arch_spin_value_unlocked(lock_val))
+               goto out;
+
+       while (lock->slock) {
+               HMT_low();
+               if (SHARED_PROCESSOR)
+                       __spin_yield(lock);
+       }
+       HMT_medium();
+
+out:
+       smp_mb();
+}
 
 /*
  * Read-write spinlocks, allowing multiple readers
This page took 0.026865 seconds and 5 git commands to generate.