Merge ../linux-2.6
[deliverable/linux.git] / arch / sparc64 / lib / atomic.S
1 /* atomic.S: These things are too big to do inline.
2 *
3 * Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
4 */
5
6 #include <asm/asi.h>
7 #include <asm/backoff.h>
8
9 .text
10
11 /* Two versions of the atomic routines, one that
12 * does not return a value and does not perform
13 * memory barriers, and a second which returns
14 * a value and does the barriers.
15 */
16 .globl atomic_add
17 .type atomic_add,#function
18 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
19 BACKOFF_SETUP(%o2)
20 1: lduw [%o1], %g1
21 add %g1, %o0, %g7
22 cas [%o1], %g1, %g7
23 cmp %g1, %g7
24 bne,pn %icc, 2f
25 nop
26 retl
27 nop
28 2: BACKOFF_SPIN(%o2, %o3, 1b)
29 .size atomic_add, .-atomic_add
30
31 .globl atomic_sub
32 .type atomic_sub,#function
33 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
34 BACKOFF_SETUP(%o2)
35 1: lduw [%o1], %g1
36 sub %g1, %o0, %g7
37 cas [%o1], %g1, %g7
38 cmp %g1, %g7
39 bne,pn %icc, 2f
40 nop
41 retl
42 nop
43 2: BACKOFF_SPIN(%o2, %o3, 1b)
44 .size atomic_sub, .-atomic_sub
45
46 /* On SMP we need to use memory barriers to ensure
47 * correct memory operation ordering, nop these out
48 * for uniprocessor.
49 */
50 #ifdef CONFIG_SMP
51
52 #define ATOMIC_PRE_BARRIER membar #StoreLoad | #LoadLoad;
53 #define ATOMIC_POST_BARRIER \
54 ba,pt %xcc, 80b; \
55 membar #StoreLoad | #StoreStore
56
57 80: retl
58 nop
59 #else
60 #define ATOMIC_PRE_BARRIER
61 #define ATOMIC_POST_BARRIER
62 #endif
63
64 .globl atomic_add_ret
65 .type atomic_add_ret,#function
66 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
67 BACKOFF_SETUP(%o2)
68 ATOMIC_PRE_BARRIER
69 1: lduw [%o1], %g1
70 add %g1, %o0, %g7
71 cas [%o1], %g1, %g7
72 cmp %g1, %g7
73 bne,pn %icc, 2f
74 add %g7, %o0, %g7
75 sra %g7, 0, %o0
76 ATOMIC_POST_BARRIER
77 retl
78 nop
79 2: BACKOFF_SPIN(%o2, %o3, 1b)
80 .size atomic_add_ret, .-atomic_add_ret
81
82 .globl atomic_sub_ret
83 .type atomic_sub_ret,#function
84 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
85 BACKOFF_SETUP(%o2)
86 ATOMIC_PRE_BARRIER
87 1: lduw [%o1], %g1
88 sub %g1, %o0, %g7
89 cas [%o1], %g1, %g7
90 cmp %g1, %g7
91 bne,pn %icc, 2f
92 sub %g7, %o0, %g7
93 sra %g7, 0, %o0
94 ATOMIC_POST_BARRIER
95 retl
96 nop
97 2: BACKOFF_SPIN(%o2, %o3, 1b)
98 .size atomic_sub_ret, .-atomic_sub_ret
99
100 .globl atomic64_add
101 .type atomic64_add,#function
102 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
103 BACKOFF_SETUP(%o2)
104 1: ldx [%o1], %g1
105 add %g1, %o0, %g7
106 casx [%o1], %g1, %g7
107 cmp %g1, %g7
108 bne,pn %xcc, 2f
109 nop
110 retl
111 nop
112 2: BACKOFF_SPIN(%o2, %o3, 1b)
113 .size atomic64_add, .-atomic64_add
114
115 .globl atomic64_sub
116 .type atomic64_sub,#function
117 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
118 BACKOFF_SETUP(%o2)
119 1: ldx [%o1], %g1
120 sub %g1, %o0, %g7
121 casx [%o1], %g1, %g7
122 cmp %g1, %g7
123 bne,pn %xcc, 2f
124 nop
125 retl
126 nop
127 2: BACKOFF_SPIN(%o2, %o3, 1b)
128 .size atomic64_sub, .-atomic64_sub
129
130 .globl atomic64_add_ret
131 .type atomic64_add_ret,#function
132 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
133 BACKOFF_SETUP(%o2)
134 ATOMIC_PRE_BARRIER
135 1: ldx [%o1], %g1
136 add %g1, %o0, %g7
137 casx [%o1], %g1, %g7
138 cmp %g1, %g7
139 bne,pn %xcc, 2f
140 add %g7, %o0, %g7
141 mov %g7, %o0
142 ATOMIC_POST_BARRIER
143 retl
144 nop
145 2: BACKOFF_SPIN(%o2, %o3, 1b)
146 .size atomic64_add_ret, .-atomic64_add_ret
147
148 .globl atomic64_sub_ret
149 .type atomic64_sub_ret,#function
150 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
151 BACKOFF_SETUP(%o2)
152 ATOMIC_PRE_BARRIER
153 1: ldx [%o1], %g1
154 sub %g1, %o0, %g7
155 casx [%o1], %g1, %g7
156 cmp %g1, %g7
157 bne,pn %xcc, 2f
158 sub %g7, %o0, %g7
159 mov %g7, %o0
160 ATOMIC_POST_BARRIER
161 retl
162 nop
163 2: BACKOFF_SPIN(%o2, %o3, 1b)
164 .size atomic64_sub_ret, .-atomic64_sub_ret
This page took 0.034695 seconds and 5 git commands to generate.