locking/atomic, arch/sh: Implement atomic_fetch_{add,sub,and,or,xor}()
[deliverable/linux.git] / arch / sparc / lib / atomic_64.S
1 /* atomic.S: These things are too big to do inline.
2 *
3 * Copyright (C) 1999, 2007 2012 David S. Miller (davem@davemloft.net)
4 */
5
6 #include <linux/linkage.h>
7 #include <asm/asi.h>
8 #include <asm/backoff.h>
9
10 .text
11
12 /* Two versions of the atomic routines, one that
13 * does not return a value and does not perform
14 * memory barriers, and a second which returns
15 * a value and does the barriers.
16 */
17
18 #define ATOMIC_OP(op) \
19 ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
20 BACKOFF_SETUP(%o2); \
21 1: lduw [%o1], %g1; \
22 op %g1, %o0, %g7; \
23 cas [%o1], %g1, %g7; \
24 cmp %g1, %g7; \
25 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
26 nop; \
27 retl; \
28 nop; \
29 2: BACKOFF_SPIN(%o2, %o3, 1b); \
30 ENDPROC(atomic_##op); \
31
32 #define ATOMIC_OP_RETURN(op) \
33 ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
34 BACKOFF_SETUP(%o2); \
35 1: lduw [%o1], %g1; \
36 op %g1, %o0, %g7; \
37 cas [%o1], %g1, %g7; \
38 cmp %g1, %g7; \
39 bne,pn %icc, BACKOFF_LABEL(2f, 1b); \
40 op %g1, %o0, %g1; \
41 retl; \
42 sra %g1, 0, %o0; \
43 2: BACKOFF_SPIN(%o2, %o3, 1b); \
44 ENDPROC(atomic_##op##_return);
45
46 #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
47
48 ATOMIC_OPS(add)
49 ATOMIC_OPS(sub)
50 ATOMIC_OP(and)
51 ATOMIC_OP(or)
52 ATOMIC_OP(xor)
53
54 #undef ATOMIC_OPS
55 #undef ATOMIC_OP_RETURN
56 #undef ATOMIC_OP
57
58 #define ATOMIC64_OP(op) \
59 ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
60 BACKOFF_SETUP(%o2); \
61 1: ldx [%o1], %g1; \
62 op %g1, %o0, %g7; \
63 casx [%o1], %g1, %g7; \
64 cmp %g1, %g7; \
65 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
66 nop; \
67 retl; \
68 nop; \
69 2: BACKOFF_SPIN(%o2, %o3, 1b); \
70 ENDPROC(atomic64_##op); \
71
72 #define ATOMIC64_OP_RETURN(op) \
73 ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
74 BACKOFF_SETUP(%o2); \
75 1: ldx [%o1], %g1; \
76 op %g1, %o0, %g7; \
77 casx [%o1], %g1, %g7; \
78 cmp %g1, %g7; \
79 bne,pn %xcc, BACKOFF_LABEL(2f, 1b); \
80 nop; \
81 retl; \
82 op %g1, %o0, %o0; \
83 2: BACKOFF_SPIN(%o2, %o3, 1b); \
84 ENDPROC(atomic64_##op##_return);
85
86 #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op)
87
88 ATOMIC64_OPS(add)
89 ATOMIC64_OPS(sub)
90 ATOMIC64_OP(and)
91 ATOMIC64_OP(or)
92 ATOMIC64_OP(xor)
93
94 #undef ATOMIC64_OPS
95 #undef ATOMIC64_OP_RETURN
96 #undef ATOMIC64_OP
97
98 ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
99 BACKOFF_SETUP(%o2)
100 1: ldx [%o0], %g1
101 brlez,pn %g1, 3f
102 sub %g1, 1, %g7
103 casx [%o0], %g1, %g7
104 cmp %g1, %g7
105 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
106 nop
107 3: retl
108 sub %g1, 1, %o0
109 2: BACKOFF_SPIN(%o2, %o3, 1b)
110 ENDPROC(atomic64_dec_if_positive)
This page took 0.035393 seconds and 5 git commands to generate.