Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * These routines make two important assumptions: | |
15 | * | |
16 | * 1. atomic_t is really an int and can be freely cast back and forth | |
17 | * (validated in __init_atomic_per_cpu). | |
18 | * | |
19 | * 2. userspace uses sys_cmpxchg() for all atomic operations, thus using | |
20 | * the same locking convention that all the kernel atomic routines use. | |
21 | */ | |
22 | ||
23 | #ifndef _ASM_TILE_FUTEX_H | |
24 | #define _ASM_TILE_FUTEX_H | |
25 | ||
26 | #ifndef __ASSEMBLY__ | |
27 | ||
28 | #include <linux/futex.h> | |
29 | #include <linux/uaccess.h> | |
30 | #include <linux/errno.h> | |
47d632f9 | 31 | #include <asm/atomic.h> |
867e359b | 32 | |
47d632f9 CM |
33 | /* |
34 | * Support macros for futex operations. Do not use these macros directly. | |
35 | * They assume "ret", "val", "oparg", and "uaddr" in the lexical context. | |
36 | * __futex_cmpxchg() additionally assumes "oldval". | |
37 | */ | |
38 | ||
39 | #ifdef __tilegx__ | |
40 | ||
41 | #define __futex_asm(OP) \ | |
42 | asm("1: {" #OP " %1, %3, %4; movei %0, 0 }\n" \ | |
43 | ".pushsection .fixup,\"ax\"\n" \ | |
44 | "0: { movei %0, %5; j 9f }\n" \ | |
45 | ".section __ex_table,\"a\"\n" \ | |
d4d9eab4 | 46 | ".align 8\n" \ |
47d632f9 CM |
47 | ".quad 1b, 0b\n" \ |
48 | ".popsection\n" \ | |
49 | "9:" \ | |
50 | : "=r" (ret), "=r" (val), "+m" (*(uaddr)) \ | |
51 | : "r" (uaddr), "r" (oparg), "i" (-EFAULT)) | |
52 | ||
53 | #define __futex_set() __futex_asm(exch4) | |
54 | #define __futex_add() __futex_asm(fetchadd4) | |
55 | #define __futex_or() __futex_asm(fetchor4) | |
56 | #define __futex_andn() ({ oparg = ~oparg; __futex_asm(fetchand4); }) | |
57 | #define __futex_cmpxchg() \ | |
58 | ({ __insn_mtspr(SPR_CMPEXCH_VALUE, oldval); __futex_asm(cmpexch4); }) | |
59 | ||
60 | #define __futex_xor() \ | |
61 | ({ \ | |
62 | u32 oldval, n = oparg; \ | |
63 | if ((ret = __get_user(oldval, uaddr)) == 0) { \ | |
64 | do { \ | |
65 | oparg = oldval ^ n; \ | |
66 | __futex_cmpxchg(); \ | |
67 | } while (ret == 0 && oldval != val); \ | |
68 | } \ | |
69 | }) | |
70 | ||
71 | /* No need to prefetch, since the atomic ops go to the home cache anyway. */ | |
72 | #define __futex_prolog() | |
867e359b | 73 | |
867e359b | 74 | #else |
47d632f9 CM |
75 | |
76 | #define __futex_call(FN) \ | |
77 | { \ | |
78 | struct __get_user gu = FN((u32 __force *)uaddr, lock, oparg); \ | |
79 | val = gu.val; \ | |
80 | ret = gu.err; \ | |
867e359b | 81 | } |
47d632f9 CM |
82 | |
83 | #define __futex_set() __futex_call(__atomic_xchg) | |
84 | #define __futex_add() __futex_call(__atomic_xchg_add) | |
85 | #define __futex_or() __futex_call(__atomic_or) | |
86 | #define __futex_andn() __futex_call(__atomic_andn) | |
87 | #define __futex_xor() __futex_call(__atomic_xor) | |
88 | ||
89 | #define __futex_cmpxchg() \ | |
90 | { \ | |
91 | struct __get_user gu = __atomic_cmpxchg((u32 __force *)uaddr, \ | |
92 | lock, oldval, oparg); \ | |
93 | val = gu.val; \ | |
94 | ret = gu.err; \ | |
95 | } | |
96 | ||
97 | /* | |
98 | * Find the lock pointer for the atomic calls to use, and issue a | |
99 | * prefetch to the user address to bring it into cache. Similar to | |
100 | * __atomic_setup(), but we can't do a read into the L1 since it might | |
101 | * fault; instead we do a prefetch into the L2. | |
102 | */ | |
103 | #define __futex_prolog() \ | |
104 | int *lock; \ | |
105 | __insn_prefetch(uaddr); \ | |
106 | lock = __atomic_hashed_lock((int __force *)uaddr) | |
867e359b CM |
107 | #endif |
108 | ||
8d7718aa | 109 | static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) |
867e359b CM |
110 | { |
111 | int op = (encoded_op >> 28) & 7; | |
112 | int cmp = (encoded_op >> 24) & 15; | |
113 | int oparg = (encoded_op << 8) >> 20; | |
114 | int cmparg = (encoded_op << 20) >> 20; | |
47d632f9 CM |
115 | int uninitialized_var(val), ret; |
116 | ||
117 | __futex_prolog(); | |
118 | ||
119 | /* The 32-bit futex code makes this assumption, so validate it here. */ | |
120 | BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int)); | |
867e359b CM |
121 | |
122 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | |
123 | oparg = 1 << oparg; | |
124 | ||
8d7718aa | 125 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
867e359b CM |
126 | return -EFAULT; |
127 | ||
128 | pagefault_disable(); | |
129 | switch (op) { | |
130 | case FUTEX_OP_SET: | |
47d632f9 | 131 | __futex_set(); |
867e359b CM |
132 | break; |
133 | case FUTEX_OP_ADD: | |
47d632f9 | 134 | __futex_add(); |
867e359b CM |
135 | break; |
136 | case FUTEX_OP_OR: | |
47d632f9 | 137 | __futex_or(); |
867e359b CM |
138 | break; |
139 | case FUTEX_OP_ANDN: | |
47d632f9 | 140 | __futex_andn(); |
867e359b CM |
141 | break; |
142 | case FUTEX_OP_XOR: | |
47d632f9 | 143 | __futex_xor(); |
867e359b CM |
144 | break; |
145 | default: | |
47d632f9 CM |
146 | ret = -ENOSYS; |
147 | break; | |
867e359b CM |
148 | } |
149 | pagefault_enable(); | |
150 | ||
867e359b CM |
151 | if (!ret) { |
152 | switch (cmp) { | |
153 | case FUTEX_OP_CMP_EQ: | |
47d632f9 | 154 | ret = (val == cmparg); |
867e359b CM |
155 | break; |
156 | case FUTEX_OP_CMP_NE: | |
47d632f9 | 157 | ret = (val != cmparg); |
867e359b CM |
158 | break; |
159 | case FUTEX_OP_CMP_LT: | |
47d632f9 | 160 | ret = (val < cmparg); |
867e359b CM |
161 | break; |
162 | case FUTEX_OP_CMP_GE: | |
47d632f9 | 163 | ret = (val >= cmparg); |
867e359b CM |
164 | break; |
165 | case FUTEX_OP_CMP_LE: | |
47d632f9 | 166 | ret = (val <= cmparg); |
867e359b CM |
167 | break; |
168 | case FUTEX_OP_CMP_GT: | |
47d632f9 | 169 | ret = (val > cmparg); |
867e359b CM |
170 | break; |
171 | default: | |
172 | ret = -ENOSYS; | |
173 | } | |
174 | } | |
175 | return ret; | |
176 | } | |
177 | ||
8d7718aa | 178 | static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
47d632f9 | 179 | u32 oldval, u32 oparg) |
867e359b | 180 | { |
47d632f9 CM |
181 | int ret, val; |
182 | ||
183 | __futex_prolog(); | |
867e359b | 184 | |
8d7718aa | 185 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
867e359b CM |
186 | return -EFAULT; |
187 | ||
47d632f9 | 188 | __futex_cmpxchg(); |
867e359b | 189 | |
47d632f9 CM |
190 | *uval = val; |
191 | return ret; | |
192 | } | |
0707ad30 | 193 | |
867e359b CM |
194 | #endif /* !__ASSEMBLY__ */ |
195 | ||
196 | #endif /* _ASM_TILE_FUTEX_H */ |