Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #ifndef _ASM_TILE_SYSTEM_H | |
16 | #define _ASM_TILE_SYSTEM_H | |
17 | ||
18 | #ifndef __ASSEMBLY__ | |
19 | ||
20 | #include <linux/types.h> | |
21 | #include <linux/irqflags.h> | |
22 | ||
23 | /* NOTE: we can't include <linux/ptrace.h> due to #include dependencies. */ | |
24 | #include <asm/ptrace.h> | |
25 | ||
26 | #include <arch/chip.h> | |
27 | #include <arch/sim_def.h> | |
28 | #include <arch/spr_def.h> | |
29 | ||
30 | /* | |
31 | * read_barrier_depends - Flush all pending reads that subsequents reads | |
32 | * depend on. | |
33 | * | |
34 | * No data-dependent reads from memory-like regions are ever reordered | |
35 | * over this barrier. All reads preceding this primitive are guaranteed | |
36 | * to access memory (but not necessarily other CPUs' caches) before any | |
37 | * reads following this primitive that depend on the data return by | |
38 | * any of the preceding reads. This primitive is much lighter weight than | |
39 | * rmb() on most CPUs, and is never heavier weight than is | |
40 | * rmb(). | |
41 | * | |
42 | * These ordering constraints are respected by both the local CPU | |
43 | * and the compiler. | |
44 | * | |
45 | * Ordering is not guaranteed by anything other than these primitives, | |
46 | * not even by data dependencies. See the documentation for | |
47 | * memory_barrier() for examples and URLs to more information. | |
48 | * | |
49 | * For example, the following code would force ordering (the initial | |
50 | * value of "a" is zero, "b" is one, and "p" is "&a"): | |
51 | * | |
52 | * <programlisting> | |
53 | * CPU 0 CPU 1 | |
54 | * | |
55 | * b = 2; | |
56 | * memory_barrier(); | |
57 | * p = &b; q = p; | |
58 | * read_barrier_depends(); | |
59 | * d = *q; | |
60 | * </programlisting> | |
61 | * | |
62 | * because the read of "*q" depends on the read of "p" and these | |
63 | * two reads are separated by a read_barrier_depends(). However, | |
64 | * the following code, with the same initial values for "a" and "b": | |
65 | * | |
66 | * <programlisting> | |
67 | * CPU 0 CPU 1 | |
68 | * | |
69 | * a = 2; | |
70 | * memory_barrier(); | |
71 | * b = 3; y = b; | |
72 | * read_barrier_depends(); | |
73 | * x = a; | |
74 | * </programlisting> | |
75 | * | |
76 | * does not enforce ordering, since there is no data dependency between | |
77 | * the read of "a" and the read of "b". Therefore, on some CPUs, such | |
78 | * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() | |
79 | * in cases like this where there are no data dependencies. | |
80 | */ | |
81 | ||
82 | #define read_barrier_depends() do { } while (0) | |
83 | ||
84 | #define __sync() __insn_mf() | |
85 | ||
86 | #if CHIP_HAS_SPLIT_CYCLE() | |
87 | #define get_cycles_low() __insn_mfspr(SPR_CYCLE_LOW) | |
88 | #else | |
89 | #define get_cycles_low() __insn_mfspr(SPR_CYCLE) /* just get all 64 bits */ | |
90 | #endif | |
91 | ||
92 | /* Fence to guarantee visibility of stores to incoherent memory. */ | |
93 | static inline void | |
94 | mb_incoherent(void) | |
95 | { | |
96 | __insn_mf(); | |
97 | ||
98 | #if !CHIP_HAS_MF_WAITS_FOR_VICTIMS() | |
99 | { | |
100 | int __mb_incoherent(void); | |
101 | #if CHIP_HAS_TILE_WRITE_PENDING() | |
102 | const unsigned long WRITE_TIMEOUT_CYCLES = 400; | |
103 | unsigned long start = get_cycles_low(); | |
104 | do { | |
105 | if (__insn_mfspr(SPR_TILE_WRITE_PENDING) == 0) | |
106 | return; | |
107 | } while ((get_cycles_low() - start) < WRITE_TIMEOUT_CYCLES); | |
108 | #endif /* CHIP_HAS_TILE_WRITE_PENDING() */ | |
109 | (void) __mb_incoherent(); | |
110 | } | |
111 | #endif /* CHIP_HAS_MF_WAITS_FOR_VICTIMS() */ | |
112 | } | |
113 | ||
114 | #define fast_wmb() __sync() | |
115 | #define fast_rmb() __sync() | |
116 | #define fast_mb() __sync() | |
117 | #define fast_iob() mb_incoherent() | |
118 | ||
119 | #define wmb() fast_wmb() | |
120 | #define rmb() fast_rmb() | |
121 | #define mb() fast_mb() | |
122 | #define iob() fast_iob() | |
123 | ||
124 | #ifdef CONFIG_SMP | |
125 | #define smp_mb() mb() | |
126 | #define smp_rmb() rmb() | |
127 | #define smp_wmb() wmb() | |
128 | #define smp_read_barrier_depends() read_barrier_depends() | |
129 | #else | |
130 | #define smp_mb() barrier() | |
131 | #define smp_rmb() barrier() | |
132 | #define smp_wmb() barrier() | |
133 | #define smp_read_barrier_depends() do { } while (0) | |
134 | #endif | |
135 | ||
136 | #define set_mb(var, value) \ | |
137 | do { var = value; mb(); } while (0) | |
138 | ||
867e359b CM |
139 | /* |
140 | * Pause the DMA engine and static network before task switching. | |
141 | */ | |
142 | #define prepare_arch_switch(next) _prepare_arch_switch(next) | |
143 | void _prepare_arch_switch(struct task_struct *next); | |
144 | ||
145 | ||
146 | /* | |
147 | * switch_to(n) should switch tasks to task nr n, first | |
148 | * checking that n isn't the current task, in which case it does nothing. | |
149 | * The number of callee-saved registers saved on the kernel stack | |
150 | * is defined here for use in copy_thread() and must agree with __switch_to(). | |
151 | */ | |
152 | #endif /* !__ASSEMBLY__ */ | |
153 | #define CALLEE_SAVED_FIRST_REG 30 | |
154 | #define CALLEE_SAVED_REGS_COUNT 24 /* r30 to r52, plus an empty to align */ | |
155 | #ifndef __ASSEMBLY__ | |
156 | struct task_struct; | |
157 | #define switch_to(prev, next, last) ((last) = _switch_to((prev), (next))) | |
158 | extern struct task_struct *_switch_to(struct task_struct *prev, | |
159 | struct task_struct *next); | |
160 | ||
0707ad30 CM |
161 | /* Helper function for _switch_to(). */ |
162 | extern struct task_struct *__switch_to(struct task_struct *prev, | |
163 | struct task_struct *next, | |
164 | unsigned long new_system_save_1_0); | |
165 | ||
166 | /* Address that switched-away from tasks are at. */ | |
167 | extern unsigned long get_switch_to_pc(void); | |
168 | ||
867e359b CM |
169 | /* |
170 | * On SMP systems, when the scheduler does migration-cost autodetection, | |
171 | * it needs a way to flush as much of the CPU's caches as possible: | |
172 | * | |
173 | * TODO: fill this in! | |
174 | */ | |
175 | static inline void sched_cacheflush(void) | |
176 | { | |
177 | } | |
178 | ||
179 | #define arch_align_stack(x) (x) | |
180 | ||
181 | /* | |
182 | * Is the kernel doing fixups of unaligned accesses? If <0, no kernel | |
183 | * intervention occurs and SIGBUS is delivered with no data address | |
184 | * info. If 0, the kernel single-steps the instruction to discover | |
185 | * the data address to provide with the SIGBUS. If 1, the kernel does | |
186 | * a fixup. | |
187 | */ | |
188 | extern int unaligned_fixup; | |
189 | ||
190 | /* Is the kernel printing on each unaligned fixup? */ | |
191 | extern int unaligned_printk; | |
192 | ||
193 | /* Number of unaligned fixups performed */ | |
194 | extern unsigned int unaligned_fixup_count; | |
195 | ||
0707ad30 CM |
196 | /* Init-time routine to do tile-specific per-cpu setup. */ |
197 | void setup_cpu(int boot); | |
198 | ||
867e359b CM |
199 | /* User-level DMA management functions */ |
200 | void grant_dma_mpls(void); | |
201 | void restrict_dma_mpls(void); | |
202 | ||
0707ad30 CM |
203 | #ifdef CONFIG_HARDWALL |
204 | /* User-level network management functions */ | |
205 | void reset_network_state(void); | |
206 | void grant_network_mpls(void); | |
207 | void restrict_network_mpls(void); | |
208 | int hardwall_deactivate(struct task_struct *task); | |
209 | ||
210 | /* Hook hardwall code into changes in affinity. */ | |
211 | #define arch_set_cpus_allowed(p, new_mask) do { \ | |
212 | if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \ | |
213 | hardwall_deactivate(p); \ | |
214 | } while (0) | |
215 | #endif | |
867e359b CM |
216 | |
217 | /* Invoke the simulator "syscall" mechanism (see arch/tile/kernel/entry.S). */ | |
218 | extern int _sim_syscall(int syscall_num, ...); | |
219 | #define sim_syscall(syscall_num, ...) \ | |
220 | _sim_syscall(SIM_CONTROL_SYSCALL + \ | |
221 | ((syscall_num) << _SIM_CONTROL_OPERATOR_BITS), \ | |
222 | ## __VA_ARGS__) | |
223 | ||
224 | /* | |
225 | * Kernel threads can check to see if they need to migrate their | |
226 | * stack whenever they return from a context switch; for user | |
227 | * threads, we defer until they are returning to user-space. | |
228 | */ | |
229 | #define finish_arch_switch(prev) do { \ | |
230 | if (unlikely((prev)->state == TASK_DEAD)) \ | |
231 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \ | |
232 | ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \ | |
233 | __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \ | |
234 | (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \ | |
235 | if (current->mm == NULL && !kstack_hash && \ | |
236 | current_thread_info()->homecache_cpu != smp_processor_id()) \ | |
237 | homecache_migrate_kthread(); \ | |
238 | } while (0) | |
239 | ||
0707ad30 CM |
240 | /* Support function for forking a new task. */ |
241 | void ret_from_fork(void); | |
242 | ||
243 | /* Called from ret_from_fork() when a new process starts up. */ | |
244 | struct task_struct *sim_notify_fork(struct task_struct *prev); | |
245 | ||
867e359b CM |
246 | #endif /* !__ASSEMBLY__ */ |
247 | ||
248 | #endif /* _ASM_TILE_SYSTEM_H */ |