Commit | Line | Data |
---|---|---|
ec723fbe PM |
1 | #ifndef __ASM_SH_ATOMIC_LLSC_H |
2 | #define __ASM_SH_ATOMIC_LLSC_H | |
3 | ||
4 | /* | |
5 | * To get proper branch prediction for the main line, we must branch | |
6 | * forward to code at the end of this object's .text section, then | |
7 | * branch back to restart the operation. | |
8 | */ | |
9 | static inline void atomic_add(int i, atomic_t *v) | |
10 | { | |
11 | unsigned long tmp; | |
12 | ||
13 | __asm__ __volatile__ ( | |
14 | "1: movli.l @%2, %0 ! atomic_add \n" | |
15 | " add %1, %0 \n" | |
16 | " movco.l %0, @%2 \n" | |
17 | " bf 1b \n" | |
18 | : "=&z" (tmp) | |
19 | : "r" (i), "r" (&v->counter) | |
20 | : "t"); | |
21 | } | |
22 | ||
23 | static inline void atomic_sub(int i, atomic_t *v) | |
24 | { | |
25 | unsigned long tmp; | |
26 | ||
27 | __asm__ __volatile__ ( | |
28 | "1: movli.l @%2, %0 ! atomic_sub \n" | |
29 | " sub %1, %0 \n" | |
30 | " movco.l %0, @%2 \n" | |
31 | " bf 1b \n" | |
32 | : "=&z" (tmp) | |
33 | : "r" (i), "r" (&v->counter) | |
34 | : "t"); | |
35 | } | |
36 | ||
37 | /* | |
38 | * SH-4A note: | |
39 | * | |
40 | * We basically get atomic_xxx_return() for free compared with | |
41 | * atomic_xxx(). movli.l/movco.l require r0 due to the instruction | |
42 | * encoding, so the retval is automatically set without having to | |
43 | * do any special work. | |
44 | */ | |
45 | static inline int atomic_add_return(int i, atomic_t *v) | |
46 | { | |
47 | unsigned long temp; | |
48 | ||
49 | __asm__ __volatile__ ( | |
50 | "1: movli.l @%2, %0 ! atomic_add_return \n" | |
51 | " add %1, %0 \n" | |
52 | " movco.l %0, @%2 \n" | |
53 | " bf 1b \n" | |
54 | " synco \n" | |
55 | : "=&z" (temp) | |
56 | : "r" (i), "r" (&v->counter) | |
57 | : "t"); | |
58 | ||
59 | return temp; | |
60 | } | |
61 | ||
62 | static inline int atomic_sub_return(int i, atomic_t *v) | |
63 | { | |
64 | unsigned long temp; | |
65 | ||
66 | __asm__ __volatile__ ( | |
67 | "1: movli.l @%2, %0 ! atomic_sub_return \n" | |
68 | " sub %1, %0 \n" | |
69 | " movco.l %0, @%2 \n" | |
70 | " bf 1b \n" | |
71 | " synco \n" | |
72 | : "=&z" (temp) | |
73 | : "r" (i), "r" (&v->counter) | |
74 | : "t"); | |
75 | ||
76 | return temp; | |
77 | } | |
78 | ||
79 | static inline void atomic_clear_mask(unsigned int mask, atomic_t *v) | |
80 | { | |
81 | unsigned long tmp; | |
82 | ||
83 | __asm__ __volatile__ ( | |
84 | "1: movli.l @%2, %0 ! atomic_clear_mask \n" | |
85 | " and %1, %0 \n" | |
86 | " movco.l %0, @%2 \n" | |
87 | " bf 1b \n" | |
88 | : "=&z" (tmp) | |
89 | : "r" (~mask), "r" (&v->counter) | |
90 | : "t"); | |
91 | } | |
92 | ||
93 | static inline void atomic_set_mask(unsigned int mask, atomic_t *v) | |
94 | { | |
95 | unsigned long tmp; | |
96 | ||
97 | __asm__ __volatile__ ( | |
98 | "1: movli.l @%2, %0 ! atomic_set_mask \n" | |
99 | " or %1, %0 \n" | |
100 | " movco.l %0, @%2 \n" | |
101 | " bf 1b \n" | |
102 | : "=&z" (tmp) | |
103 | : "r" (mask), "r" (&v->counter) | |
104 | : "t"); | |
105 | } | |
106 | ||
107 | #endif /* __ASM_SH_ATOMIC_LLSC_H */ |