Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __ASM_CRIS_SYSTEM_H |
2 | #define __ASM_CRIS_SYSTEM_H | |
3 | ||
4 | #include <asm/arch/system.h> | |
5 | ||
6 | /* the switch_to macro calls resume, an asm function in entry.S which does the actual | |
7 | * task switching. | |
8 | */ | |
9 | ||
10 | extern struct task_struct *resume(struct task_struct *prev, struct task_struct *next, int); | |
11 | #define prepare_to_switch() do { } while(0) | |
12 | #define switch_to(prev,next,last) last = resume(prev,next, \ | |
13 | (int)&((struct task_struct *)0)->thread) | |
14 | ||
15 | #define barrier() __asm__ __volatile__("": : :"memory") | |
16 | #define mb() barrier() | |
17 | #define rmb() mb() | |
18 | #define wmb() mb() | |
19 | #define read_barrier_depends() do { } while(0) | |
20 | #define set_mb(var, value) do { var = value; mb(); } while (0) | |
21 | #define set_wmb(var, value) do { var = value; wmb(); } while (0) | |
22 | ||
23 | #ifdef CONFIG_SMP | |
24 | #define smp_mb() mb() | |
25 | #define smp_rmb() rmb() | |
26 | #define smp_wmb() wmb() | |
27 | #define smp_read_barrier_depends() read_barrier_depends() | |
28 | #else | |
29 | #define smp_mb() barrier() | |
30 | #define smp_rmb() barrier() | |
31 | #define smp_wmb() barrier() | |
32 | #define smp_read_barrier_depends() do { } while(0) | |
33 | #endif | |
34 | ||
35 | #define iret() | |
36 | ||
37 | /* | |
38 | * disable hlt during certain critical i/o operations | |
39 | */ | |
40 | #define HAVE_DISABLE_HLT | |
41 | void disable_hlt(void); | |
42 | void enable_hlt(void); | |
43 | ||
d9b5444e | 44 | static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) |
1da177e4 LT |
45 | { |
46 | /* since Etrax doesn't have any atomic xchg instructions, we need to disable | |
47 | irq's (if enabled) and do it with move.d's */ | |
48 | unsigned long flags,temp; | |
49 | local_save_flags(flags); /* save flags, including irq enable bit */ | |
50 | local_irq_disable(); /* shut off irq's */ | |
51 | switch (size) { | |
52 | case 1: | |
53 | *((unsigned char *)&temp) = x; | |
54 | x = *(unsigned char *)ptr; | |
55 | *(unsigned char *)ptr = *((unsigned char *)&temp); | |
56 | break; | |
57 | case 2: | |
58 | *((unsigned short *)&temp) = x; | |
59 | x = *(unsigned short *)ptr; | |
60 | *(unsigned short *)ptr = *((unsigned short *)&temp); | |
61 | break; | |
62 | case 4: | |
63 | temp = x; | |
64 | x = *(unsigned long *)ptr; | |
65 | *(unsigned long *)ptr = temp; | |
66 | break; | |
67 | } | |
68 | local_irq_restore(flags); /* restore irq enable bit */ | |
69 | return x; | |
70 | } | |
71 | ||
72 | #define arch_align_stack(x) (x) | |
73 | ||
74 | #endif |