Commit | Line | Data |
---|---|---|
b8aa0361 IM |
1 | /* |
2 | * Assembly implementation of the mutex fastpath, based on atomic | |
3 | * decrement/increment. | |
4 | * | |
5 | * started by Ingo Molnar: | |
6 | * | |
7 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | |
8 | */ | |
9 | #ifndef _ASM_MUTEX_H | |
10 | #define _ASM_MUTEX_H | |
11 | ||
12 | /** | |
13 | * __mutex_fastpath_lock - decrement and call function if negative | |
14 | * @v: pointer of type atomic_t | |
15 | * @fail_fn: function to call if the result is negative | |
16 | * | |
17 | * Atomically decrements @v and calls <fail_fn> if the result is negative. | |
18 | */ | |
19 | #define __mutex_fastpath_lock(v, fail_fn) \ | |
20 | do { \ | |
21 | unsigned long dummy; \ | |
22 | \ | |
23 | typecheck(atomic_t *, v); \ | |
c49c5330 | 24 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ |
b8aa0361 IM |
25 | \ |
26 | __asm__ __volatile__( \ | |
d167a518 | 27 | LOCK_PREFIX " decl (%%rdi) \n" \ |
8578bdf8 AK |
28 | " jns 1f \n" \ |
29 | " call "#fail_fn" \n" \ | |
30 | "1:" \ | |
b8aa0361 IM |
31 | \ |
32 | :"=D" (dummy) \ | |
33 | : "D" (v) \ | |
34 | : "rax", "rsi", "rdx", "rcx", \ | |
35 | "r8", "r9", "r10", "r11", "memory"); \ | |
36 | } while (0) | |
37 | ||
38 | /** | |
39 | * __mutex_fastpath_lock_retval - try to take the lock by moving the count | |
40 | * from 1 to a 0 value | |
41 | * @count: pointer of type atomic_t | |
42 | * @fail_fn: function to call if the original value was not 1 | |
43 | * | |
44 | * Change the count from 1 to a value lower than 1, and call <fail_fn> if | |
45 | * it wasn't 1 originally. This function returns 0 if the fastpath succeeds, | |
46 | * or anything the slow path function returns | |
47 | */ | |
48 | static inline int | |
49 | __mutex_fastpath_lock_retval(atomic_t *count, | |
c49c5330 | 50 | int (*fail_fn)(atomic_t *)) |
b8aa0361 IM |
51 | { |
52 | if (unlikely(atomic_dec_return(count) < 0)) | |
53 | return fail_fn(count); | |
54 | else | |
55 | return 0; | |
56 | } | |
57 | ||
58 | /** | |
59 | * __mutex_fastpath_unlock - increment and call function if nonpositive | |
60 | * @v: pointer of type atomic_t | |
61 | * @fail_fn: function to call if the result is nonpositive | |
62 | * | |
63 | * Atomically increments @v and calls <fail_fn> if the result is nonpositive. | |
64 | */ | |
65 | #define __mutex_fastpath_unlock(v, fail_fn) \ | |
66 | do { \ | |
67 | unsigned long dummy; \ | |
68 | \ | |
69 | typecheck(atomic_t *, v); \ | |
c49c5330 | 70 | typecheck_fn(void (*)(atomic_t *), fail_fn); \ |
b8aa0361 IM |
71 | \ |
72 | __asm__ __volatile__( \ | |
d167a518 | 73 | LOCK_PREFIX " incl (%%rdi) \n" \ |
8578bdf8 AK |
74 | " jg 1f \n" \ |
75 | " call "#fail_fn" \n" \ | |
76 | "1: " \ | |
b8aa0361 IM |
77 | \ |
78 | :"=D" (dummy) \ | |
79 | : "D" (v) \ | |
80 | : "rax", "rsi", "rdx", "rcx", \ | |
81 | "r8", "r9", "r10", "r11", "memory"); \ | |
82 | } while (0) | |
83 | ||
84 | #define __mutex_slowpath_needs_to_unlock() 1 | |
85 | ||
86 | /** | |
87 | * __mutex_fastpath_trylock - try to acquire the mutex, without waiting | |
88 | * | |
89 | * @count: pointer of type atomic_t | |
90 | * @fail_fn: fallback function | |
91 | * | |
92 | * Change the count from 1 to 0 and return 1 (success), or return 0 (failure) | |
93 | * if it wasn't 1 originally. [the fallback function is never used on | |
94 | * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.] | |
95 | */ | |
96 | static inline int | |
97 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) | |
98 | { | |
4cec8736 | 99 | if (likely(atomic_cmpxchg(count, 1, 0) == 1)) |
b8aa0361 IM |
100 | return 1; |
101 | else | |
102 | return 0; | |
103 | } | |
104 | ||
105 | #endif |