Commit | Line | Data |
---|---|---|
dd472da3 RK |
1 | /* |
2 | * Spinlock support for the Hexagon architecture | |
3 | * | |
e1858b2a | 4 | * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. |
dd472da3 RK |
5 | * |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 and | |
9 | * only version 2 as published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | |
19 | * 02110-1301, USA. | |
20 | */ | |
21 | ||
22 | #ifndef _ASM_SPINLOCK_H | |
23 | #define _ASM_SPINLOCK_H | |
24 | ||
25 | #include <asm/irqflags.h> | |
26 | ||
27 | /* | |
28 | * This file is pulled in for SMP builds. | |
29 | * Really need to check all the barrier stuff for "true" SMP | |
30 | */ | |
31 | ||
32 | /* | |
33 | * Read locks: | |
34 | * - load the lock value | |
35 | * - increment it | |
36 | * - if the lock value is still negative, go back and try again. | |
37 | * - unsuccessful store is unsuccessful. Go back and try again. Loser. | |
38 | * - successful store new lock value if positive -> lock acquired | |
39 | */ | |
40 | static inline void arch_read_lock(arch_rwlock_t *lock) | |
41 | { | |
42 | __asm__ __volatile__( | |
43 | "1: R6 = memw_locked(%0);\n" | |
44 | " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" | |
45 | " { if !P3 jump 1b; }\n" | |
46 | " memw_locked(%0,P3) = R6;\n" | |
47 | " { if !P3 jump 1b; }\n" | |
48 | : | |
49 | : "r" (&lock->lock) | |
50 | : "memory", "r6", "p3" | |
51 | ); | |
52 | ||
53 | } | |
54 | ||
55 | static inline void arch_read_unlock(arch_rwlock_t *lock) | |
56 | { | |
57 | __asm__ __volatile__( | |
58 | "1: R6 = memw_locked(%0);\n" | |
59 | " R6 = add(R6,#-1);\n" | |
60 | " memw_locked(%0,P3) = R6\n" | |
61 | " if !P3 jump 1b;\n" | |
62 | : | |
63 | : "r" (&lock->lock) | |
64 | : "memory", "r6", "p3" | |
65 | ); | |
66 | ||
67 | } | |
68 | ||
69 | /* I think this returns 0 on fail, 1 on success. */ | |
70 | static inline int arch_read_trylock(arch_rwlock_t *lock) | |
71 | { | |
72 | int temp; | |
73 | __asm__ __volatile__( | |
74 | " R6 = memw_locked(%1);\n" | |
75 | " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n" | |
76 | " { if !P3 jump 1f; }\n" | |
77 | " memw_locked(%1,P3) = R6;\n" | |
78 | " { %0 = P3 }\n" | |
79 | "1:\n" | |
80 | : "=&r" (temp) | |
81 | : "r" (&lock->lock) | |
82 | : "memory", "r6", "p3" | |
83 | ); | |
84 | return temp; | |
85 | } | |
86 | ||
87 | static inline int arch_read_can_lock(arch_rwlock_t *rwlock) | |
88 | { | |
89 | return rwlock->lock == 0; | |
90 | } | |
91 | ||
92 | static inline int arch_write_can_lock(arch_rwlock_t *rwlock) | |
93 | { | |
94 | return rwlock->lock == 0; | |
95 | } | |
96 | ||
97 | /* Stuffs a -1 in the lock value? */ | |
98 | static inline void arch_write_lock(arch_rwlock_t *lock) | |
99 | { | |
100 | __asm__ __volatile__( | |
101 | "1: R6 = memw_locked(%0)\n" | |
102 | " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n" | |
103 | " { if !P3 jump 1b; }\n" | |
104 | " memw_locked(%0,P3) = R6;\n" | |
105 | " { if !P3 jump 1b; }\n" | |
106 | : | |
107 | : "r" (&lock->lock) | |
108 | : "memory", "r6", "p3" | |
109 | ); | |
110 | } | |
111 | ||
112 | ||
113 | static inline int arch_write_trylock(arch_rwlock_t *lock) | |
114 | { | |
115 | int temp; | |
116 | __asm__ __volatile__( | |
117 | " R6 = memw_locked(%1)\n" | |
118 | " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n" | |
119 | " { if !P3 jump 1f; }\n" | |
120 | " memw_locked(%1,P3) = R6;\n" | |
121 | " %0 = P3;\n" | |
122 | "1:\n" | |
123 | : "=&r" (temp) | |
124 | : "r" (&lock->lock) | |
125 | : "memory", "r6", "p3" | |
126 | ); | |
127 | return temp; | |
128 | ||
129 | } | |
130 | ||
131 | static inline void arch_write_unlock(arch_rwlock_t *lock) | |
132 | { | |
133 | smp_mb(); | |
134 | lock->lock = 0; | |
135 | } | |
136 | ||
137 | static inline void arch_spin_lock(arch_spinlock_t *lock) | |
138 | { | |
139 | __asm__ __volatile__( | |
140 | "1: R6 = memw_locked(%0);\n" | |
141 | " P3 = cmp.eq(R6,#0);\n" | |
142 | " { if !P3 jump 1b; R6 = #1; }\n" | |
143 | " memw_locked(%0,P3) = R6;\n" | |
144 | " { if !P3 jump 1b; }\n" | |
145 | : | |
146 | : "r" (&lock->lock) | |
147 | : "memory", "r6", "p3" | |
148 | ); | |
149 | ||
150 | } | |
151 | ||
152 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | |
153 | { | |
154 | smp_mb(); | |
155 | lock->lock = 0; | |
156 | } | |
157 | ||
158 | static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) | |
159 | { | |
160 | int temp; | |
161 | __asm__ __volatile__( | |
162 | " R6 = memw_locked(%1);\n" | |
163 | " P3 = cmp.eq(R6,#0);\n" | |
164 | " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n" | |
165 | " memw_locked(%1,P3) = R6;\n" | |
166 | " %0 = P3;\n" | |
167 | "1:\n" | |
168 | : "=&r" (temp) | |
169 | : "r" (&lock->lock) | |
170 | : "memory", "r6", "p3" | |
171 | ); | |
172 | return temp; | |
173 | } | |
174 | ||
175 | /* | |
176 | * SMP spinlocks are intended to allow only a single CPU at the lock | |
177 | */ | |
178 | #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | |
179 | #define arch_spin_unlock_wait(lock) \ | |
180 | do {while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) | |
181 | #define arch_spin_is_locked(x) ((x)->lock != 0) | |
182 | ||
183 | #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | |
184 | #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | |
185 | ||
186 | #endif |