Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ARCH_M68KNOMMU_ATOMIC__ |
2 | #define __ARCH_M68KNOMMU_ATOMIC__ | |
3 | ||
2856f5e3 | 4 | #include <asm/system.h> |
1da177e4 LT |
5 | |
6 | /* | |
7 | * Atomic operations that C can't guarantee us. Useful for | |
8 | * resource counting etc.. | |
9 | */ | |
10 | ||
11 | /* | |
12 | * We do not have SMP m68k systems, so we don't have to deal with that. | |
13 | */ | |
14 | ||
15 | typedef struct { int counter; } atomic_t; | |
16 | #define ATOMIC_INIT(i) { (i) } | |
17 | ||
18 | #define atomic_read(v) ((v)->counter) | |
19 | #define atomic_set(v, i) (((v)->counter) = i) | |
20 | ||
21 | static __inline__ void atomic_add(int i, atomic_t *v) | |
22 | { | |
23 | #ifdef CONFIG_COLDFIRE | |
24 | __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "d" (i)); | |
25 | #else | |
26 | __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "di" (i)); | |
27 | #endif | |
28 | } | |
29 | ||
30 | static __inline__ void atomic_sub(int i, atomic_t *v) | |
31 | { | |
32 | #ifdef CONFIG_COLDFIRE | |
33 | __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "d" (i)); | |
34 | #else | |
35 | __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "di" (i)); | |
36 | #endif | |
37 | } | |
38 | ||
39 | static __inline__ int atomic_sub_and_test(int i, atomic_t * v) | |
40 | { | |
41 | char c; | |
42 | #ifdef CONFIG_COLDFIRE | |
43 | __asm__ __volatile__("subl %2,%1; seq %0" | |
44 | : "=d" (c), "+m" (*v) | |
45 | : "d" (i)); | |
46 | #else | |
47 | __asm__ __volatile__("subl %2,%1; seq %0" | |
48 | : "=d" (c), "+m" (*v) | |
49 | : "di" (i)); | |
50 | #endif | |
51 | return c != 0; | |
52 | } | |
53 | ||
54 | static __inline__ void atomic_inc(volatile atomic_t *v) | |
55 | { | |
56 | __asm__ __volatile__("addql #1,%0" : "+m" (*v)); | |
57 | } | |
58 | ||
59 | /* | |
60 | * atomic_inc_and_test - increment and test | |
61 | * @v: pointer of type atomic_t | |
62 | * | |
63 | * Atomically increments @v by 1 | |
64 | * and returns true if the result is zero, or false for all | |
65 | * other cases. | |
66 | */ | |
67 | ||
68 | static __inline__ int atomic_inc_and_test(volatile atomic_t *v) | |
69 | { | |
70 | char c; | |
71 | __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v)); | |
72 | return c != 0; | |
73 | } | |
74 | ||
75 | static __inline__ void atomic_dec(volatile atomic_t *v) | |
76 | { | |
77 | __asm__ __volatile__("subql #1,%0" : "+m" (*v)); | |
78 | } | |
79 | ||
80 | static __inline__ int atomic_dec_and_test(volatile atomic_t *v) | |
81 | { | |
82 | char c; | |
83 | __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v)); | |
84 | return c != 0; | |
85 | } | |
86 | ||
87 | static __inline__ void atomic_clear_mask(unsigned long mask, unsigned long *v) | |
88 | { | |
89 | __asm__ __volatile__("andl %1,%0" : "+m" (*v) : "id" (~(mask))); | |
90 | } | |
91 | ||
92 | static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v) | |
93 | { | |
94 | __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask)); | |
95 | } | |
96 | ||
97 | /* Atomic operations are already serializing */ | |
98 | #define smp_mb__before_atomic_dec() barrier() | |
99 | #define smp_mb__after_atomic_dec() barrier() | |
100 | #define smp_mb__before_atomic_inc() barrier() | |
101 | #define smp_mb__after_atomic_inc() barrier() | |
102 | ||
c514b8be | 103 | static inline int atomic_add_return(int i, atomic_t * v) |
1da177e4 LT |
104 | { |
105 | unsigned long temp, flags; | |
106 | ||
107 | local_irq_save(flags); | |
108 | temp = *(long *)v; | |
109 | temp += i; | |
110 | *(long *)v = temp; | |
111 | local_irq_restore(flags); | |
112 | ||
113 | return temp; | |
114 | } | |
115 | ||
116 | #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0) | |
117 | ||
c514b8be | 118 | static inline int atomic_sub_return(int i, atomic_t * v) |
1da177e4 LT |
119 | { |
120 | unsigned long temp, flags; | |
121 | ||
122 | local_irq_save(flags); | |
123 | temp = *(long *)v; | |
124 | temp -= i; | |
125 | *(long *)v = temp; | |
126 | local_irq_restore(flags); | |
127 | ||
128 | return temp; | |
129 | } | |
130 | ||
4a6dae6d | 131 | #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) |
ffbf670f | 132 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
4a6dae6d | 133 | |
2856f5e3 MD |
134 | static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) |
135 | { | |
136 | int c, old; | |
137 | c = atomic_read(v); | |
138 | for (;;) { | |
139 | if (unlikely(c == (u))) | |
140 | break; | |
141 | old = atomic_cmpxchg((v), c, c + (a)); | |
142 | if (likely(old == c)) | |
143 | break; | |
144 | c = old; | |
145 | } | |
146 | return c != (u); | |
147 | } | |
148 | ||
8426e1f6 NP |
149 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
150 | ||
1da177e4 LT |
151 | #define atomic_dec_return(v) atomic_sub_return(1,(v)) |
152 | #define atomic_inc_return(v) atomic_add_return(1,(v)) | |
153 | ||
d3cb4871 | 154 | #include <asm-generic/atomic.h> |
1da177e4 | 155 | #endif /* __ARCH_M68KNOMMU_ATOMIC __ */ |