Commit | Line | Data |
---|---|---|
d550bbd4 DH |
1 | #ifndef __SPARC64_BARRIER_H |
2 | #define __SPARC64_BARRIER_H | |
3 | ||
4 | /* These are here in an effort to more fully work around Spitfire Errata | |
5 | * #51. Essentially, if a memory barrier occurs soon after a mispredicted | |
6 | * branch, the chip can stop executing instructions until a trap occurs. | |
7 | * Therefore, if interrupts are disabled, the chip can hang forever. | |
8 | * | |
9 | * It used to be believed that the memory barrier had to be right in the | |
10 | * delay slot, but a case has been traced recently wherein the memory barrier | |
11 | * was one instruction after the branch delay slot and the chip still hung. | |
12 | * The offending sequence was the following in sym_wakeup_done() of the | |
13 | * sym53c8xx_2 driver: | |
14 | * | |
15 | * call sym_ccb_from_dsa, 0 | |
16 | * movge %icc, 0, %l0 | |
17 | * brz,pn %o0, .LL1303 | |
18 | * mov %o0, %l2 | |
19 | * membar #LoadLoad | |
20 | * | |
21 | * The branch has to be mispredicted for the bug to occur. Therefore, we put | |
22 | * the memory barrier explicitly into a "branch always, predicted taken" | |
23 | * delay slot to avoid the problem case. | |
24 | */ | |
25 | #define membar_safe(type) \ | |
26 | do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ | |
27 | " membar " type "\n" \ | |
28 | "1:\n" \ | |
29 | : : : "memory"); \ | |
30 | } while (0) | |
31 | ||
32 | /* The kernel always executes in TSO memory model these days, | |
33 | * and furthermore most sparc64 chips implement more stringent | |
34 | * memory ordering than required by the specifications. | |
35 | */ | |
36 | #define mb() membar_safe("#StoreLoad") | |
37 | #define rmb() __asm__ __volatile__("":::"memory") | |
38 | #define wmb() __asm__ __volatile__("":::"memory") | |
39 | ||
1077fa36 AD |
40 | #define dma_rmb() rmb() |
41 | #define dma_wmb() wmb() | |
42 | ||
b92b8b35 | 43 | #define smp_store_mb(__var, __value) \ |
ab3f02fc | 44 | do { WRITE_ONCE(__var, __value); membar_safe("#StoreLoad"); } while(0) |
d550bbd4 DH |
45 | |
46 | #ifdef CONFIG_SMP | |
47 | #define smp_mb() mb() | |
48 | #define smp_rmb() rmb() | |
49 | #define smp_wmb() wmb() | |
50 | #else | |
51 | #define smp_mb() __asm__ __volatile__("":::"memory") | |
52 | #define smp_rmb() __asm__ __volatile__("":::"memory") | |
53 | #define smp_wmb() __asm__ __volatile__("":::"memory") | |
54 | #endif | |
55 | ||
8a449718 AD |
56 | #define read_barrier_depends() do { } while (0) |
57 | #define smp_read_barrier_depends() do { } while (0) | |
d550bbd4 | 58 | |
47933ad4 PZ |
59 | #define smp_store_release(p, v) \ |
60 | do { \ | |
61 | compiletime_assert_atomic_type(*p); \ | |
62 | barrier(); \ | |
76695af2 | 63 | WRITE_ONCE(*p, v); \ |
47933ad4 PZ |
64 | } while (0) |
65 | ||
66 | #define smp_load_acquire(p) \ | |
67 | ({ \ | |
76695af2 | 68 | typeof(*p) ___p1 = READ_ONCE(*p); \ |
47933ad4 PZ |
69 | compiletime_assert_atomic_type(*p); \ |
70 | barrier(); \ | |
71 | ___p1; \ | |
72 | }) | |
73 | ||
56d36489 PZ |
74 | #define smp_mb__before_atomic() barrier() |
75 | #define smp_mb__after_atomic() barrier() | |
76 | ||
d550bbd4 | 77 | #endif /* !(__SPARC64_BARRIER_H) */ |