tile: fix put_user sparse errors
[deliverable/linux.git] / arch / tile / include / asm / irqflags.h
CommitLineData
867e359b
CM
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _ASM_TILE_IRQFLAGS_H
16#define _ASM_TILE_IRQFLAGS_H
17
867e359b
CM
18#include <arch/interrupts.h>
19#include <arch/chip.h>
20
21/*
22 * The set of interrupts we want to allow when interrupts are nominally
23 * disabled. The remainder are effectively "NMI" interrupts from
24 * the point of view of the generic Linux code. Note that synchronous
25 * interrupts (aka "non-queued") are not blocked by the mask in any case.
26 */
867e359b 27#define LINUX_MASKABLE_INTERRUPTS \
7f04f081 28 (~((_AC(1,ULL) << INT_PERF_COUNT) | (_AC(1,ULL) << INT_AUX_PERF_COUNT)))
867e359b 29
7f04f081
CM
30#if CHIP_HAS_SPLIT_INTR_MASK()
31/* The same macro, but for the two 32-bit SPRs separately. */
32#define LINUX_MASKABLE_INTERRUPTS_LO (-1)
33#define LINUX_MASKABLE_INTERRUPTS_HI \
34 (~((1 << (INT_PERF_COUNT - 32)) | (1 << (INT_AUX_PERF_COUNT - 32))))
0dccb048
CM
35#endif
36
867e359b
CM
37#ifndef __ASSEMBLY__
38
39/* NOTE: we can't include <linux/percpu.h> due to #include dependencies. */
40#include <asm/percpu.h>
41#include <arch/spr_def.h>
42
3e2e0d2c
CM
43/*
44 * Set and clear kernel interrupt masks.
45 *
46 * NOTE: __insn_mtspr() is a compiler builtin marked as a memory
47 * clobber. We rely on it being equivalent to a compiler barrier in
48 * this code since arch_local_irq_save() and friends must act as
49 * compiler barriers. This compiler semantic is baked into enough
50 * places that the compiler will maintain it going forward.
51 */
867e359b
CM
52#if CHIP_HAS_SPLIT_INTR_MASK()
53#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32
54# error Fix assumptions about which word various interrupts are in
55#endif
56#define interrupt_mask_set(n) do { \
57 int __n = (n); \
58 int __mask = 1 << (__n & 0x1f); \
59 if (__n < 32) \
a78c942d 60 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, __mask); \
867e359b 61 else \
a78c942d 62 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, __mask); \
867e359b
CM
63} while (0)
64#define interrupt_mask_reset(n) do { \
65 int __n = (n); \
66 int __mask = 1 << (__n & 0x1f); \
67 if (__n < 32) \
a78c942d 68 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, __mask); \
867e359b 69 else \
a78c942d 70 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, __mask); \
867e359b
CM
71} while (0)
72#define interrupt_mask_check(n) ({ \
73 int __n = (n); \
74 (((__n < 32) ? \
a78c942d
CM
75 __insn_mfspr(SPR_INTERRUPT_MASK_K_0) : \
76 __insn_mfspr(SPR_INTERRUPT_MASK_K_1)) \
867e359b
CM
77 >> (__n & 0x1f)) & 1; \
78})
79#define interrupt_mask_set_mask(mask) do { \
80 unsigned long long __m = (mask); \
a78c942d
CM
81 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_0, (unsigned long)(__m)); \
82 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K_1, (unsigned long)(__m>>32)); \
867e359b
CM
83} while (0)
84#define interrupt_mask_reset_mask(mask) do { \
85 unsigned long long __m = (mask); \
a78c942d
CM
86 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \
87 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \
867e359b 88} while (0)
51007004
CM
89#define interrupt_mask_save_mask() \
90 (__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_0) | \
91 (((unsigned long long)__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_1))<<32))
92#define interrupt_mask_restore_mask(mask) do { \
93 unsigned long long __m = (mask); \
94 __insn_mtspr(SPR_INTERRUPT_MASK_K_0, (unsigned long)(__m)); \
95 __insn_mtspr(SPR_INTERRUPT_MASK_K_1, (unsigned long)(__m>>32)); \
96} while (0)
867e359b
CM
97#else
98#define interrupt_mask_set(n) \
a78c942d 99 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n)))
867e359b 100#define interrupt_mask_reset(n) \
a78c942d 101 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (1UL << (n)))
867e359b 102#define interrupt_mask_check(n) \
a78c942d 103 ((__insn_mfspr(SPR_INTERRUPT_MASK_K) >> (n)) & 1)
867e359b 104#define interrupt_mask_set_mask(mask) \
a78c942d 105 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask))
867e359b 106#define interrupt_mask_reset_mask(mask) \
a78c942d 107 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask))
51007004
CM
108#define interrupt_mask_save_mask() \
109 __insn_mfspr(SPR_INTERRUPT_MASK_K)
110#define interrupt_mask_restore_mask(mask) \
111 __insn_mtspr(SPR_INTERRUPT_MASK_K, (mask))
867e359b
CM
112#endif
113
114/*
115 * The set of interrupts we want active if irqs are enabled.
116 * Note that in particular, the tile timer interrupt comes and goes
117 * from this set, since we have no other way to turn off the timer.
a78c942d 118 * Likewise, INTCTRL_K is removed and re-added during device
867e359b
CM
119 * interrupts, as is the the hardwall UDN_FIREWALL interrupt.
120 * We use a low bit (MEM_ERROR) as our sentinel value and make sure it
121 * is always claimed as an "active interrupt" so we can query that bit
122 * to know our current state.
123 */
124DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
7f04f081 125#define INITIAL_INTERRUPTS_ENABLED (1ULL << INT_MEM_ERROR)
867e359b 126
bc1a298f
CM
127#ifdef CONFIG_DEBUG_PREEMPT
128/* Due to inclusion issues, we can't rely on <linux/smp.h> here. */
129extern unsigned int debug_smp_processor_id(void);
130# define smp_processor_id() debug_smp_processor_id()
131#endif
132
867e359b 133/* Disable interrupts. */
df9ee292 134#define arch_local_irq_disable() \
867e359b
CM
135 interrupt_mask_set_mask(LINUX_MASKABLE_INTERRUPTS)
136
137/* Disable all interrupts, including NMIs. */
df9ee292 138#define arch_local_irq_disable_all() \
51007004 139 interrupt_mask_set_mask(-1ULL)
867e359b 140
bc1a298f
CM
141/*
142 * Read the set of maskable interrupts.
b4f50191 143 * We avoid the preemption warning here via raw_cpu_ptr since even
bc1a298f
CM
144 * if irqs are already enabled, it's harmless to read the wrong cpu's
145 * enabled mask.
146 */
147#define arch_local_irqs_enabled() \
b4f50191 148 (*raw_cpu_ptr(&interrupts_enabled_mask))
bc1a298f 149
867e359b 150/* Re-enable all maskable interrupts. */
df9ee292 151#define arch_local_irq_enable() \
bc1a298f 152 interrupt_mask_reset_mask(arch_local_irqs_enabled())
867e359b
CM
153
154/* Disable or enable interrupts based on flag argument. */
df9ee292 155#define arch_local_irq_restore(disabled) do { \
867e359b 156 if (disabled) \
df9ee292 157 arch_local_irq_disable(); \
867e359b 158 else \
df9ee292 159 arch_local_irq_enable(); \
867e359b
CM
160} while (0)
161
162/* Return true if "flags" argument means interrupts are disabled. */
df9ee292 163#define arch_irqs_disabled_flags(flags) ((flags) != 0)
867e359b
CM
164
165/* Return true if interrupts are currently disabled. */
df9ee292 166#define arch_irqs_disabled() interrupt_mask_check(INT_MEM_ERROR)
867e359b
CM
167
168/* Save whether interrupts are currently disabled. */
df9ee292 169#define arch_local_save_flags() arch_irqs_disabled()
867e359b
CM
170
171/* Save whether interrupts are currently disabled, then disable them. */
df9ee292
DH
172#define arch_local_irq_save() ({ \
173 unsigned long __flags = arch_local_save_flags(); \
174 arch_local_irq_disable(); \
175 __flags; })
867e359b
CM
176
177/* Prevent the given interrupt from being enabled next time we enable irqs. */
df9ee292 178#define arch_local_irq_mask(interrupt) \
bc1a298f 179 this_cpu_and(interrupts_enabled_mask, ~(1ULL << (interrupt)))
867e359b
CM
180
181/* Prevent the given interrupt from being enabled immediately. */
df9ee292
DH
182#define arch_local_irq_mask_now(interrupt) do { \
183 arch_local_irq_mask(interrupt); \
867e359b
CM
184 interrupt_mask_set(interrupt); \
185} while (0)
186
187/* Allow the given interrupt to be enabled next time we enable irqs. */
df9ee292 188#define arch_local_irq_unmask(interrupt) \
bc1a298f 189 this_cpu_or(interrupts_enabled_mask, (1ULL << (interrupt)))
867e359b
CM
190
191/* Allow the given interrupt to be enabled immediately, if !irqs_disabled. */
df9ee292
DH
192#define arch_local_irq_unmask_now(interrupt) do { \
193 arch_local_irq_unmask(interrupt); \
867e359b
CM
194 if (!irqs_disabled()) \
195 interrupt_mask_reset(interrupt); \
196} while (0)
197
198#else /* __ASSEMBLY__ */
199
200/* We provide a somewhat more restricted set for assembly. */
201
202#ifdef __tilegx__
203
204#if INT_MEM_ERROR != 0
51007004 205# error Fix IRQS_DISABLED() macro
867e359b
CM
206#endif
207
208/* Return 0 or 1 to indicate whether interrupts are currently disabled. */
209#define IRQS_DISABLED(tmp) \
a78c942d 210 mfspr tmp, SPR_INTERRUPT_MASK_K; \
867e359b
CM
211 andi tmp, tmp, 1
212
213/* Load up a pointer to &interrupts_enabled_mask. */
214#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
a78c942d
CM
215 moveli reg, hw2_last(interrupts_enabled_mask); \
216 shl16insli reg, reg, hw1(interrupts_enabled_mask); \
217 shl16insli reg, reg, hw0(interrupts_enabled_mask); \
867e359b
CM
218 add reg, reg, tp
219
220/* Disable interrupts. */
221#define IRQ_DISABLE(tmp0, tmp1) \
222 moveli tmp0, hw2_last(LINUX_MASKABLE_INTERRUPTS); \
223 shl16insli tmp0, tmp0, hw1(LINUX_MASKABLE_INTERRUPTS); \
224 shl16insli tmp0, tmp0, hw0(LINUX_MASKABLE_INTERRUPTS); \
a78c942d 225 mtspr SPR_INTERRUPT_MASK_SET_K, tmp0
867e359b
CM
226
227/* Disable ALL synchronous interrupts (used by NMI entry). */
228#define IRQ_DISABLE_ALL(tmp) \
229 movei tmp, -1; \
a78c942d 230 mtspr SPR_INTERRUPT_MASK_SET_K, tmp
867e359b
CM
231
232/* Enable interrupts. */
51007004 233#define IRQ_ENABLE_LOAD(tmp0, tmp1) \
867e359b 234 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
51007004
CM
235 ld tmp0, tmp0
236#define IRQ_ENABLE_APPLY(tmp0, tmp1) \
a78c942d 237 mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0
867e359b
CM
238
239#else /* !__tilegx__ */
240
241/*
242 * Return 0 or 1 to indicate whether interrupts are currently disabled.
243 * Note that it's important that we use a bit from the "low" mask word,
244 * since when we are enabling, that is the word we write first, so if we
245 * are interrupted after only writing half of the mask, the interrupt
246 * handler will correctly observe that we have interrupts enabled, and
247 * will enable interrupts itself on return from the interrupt handler
248 * (making the original code's write of the "high" mask word idempotent).
249 */
250#define IRQS_DISABLED(tmp) \
a78c942d 251 mfspr tmp, SPR_INTERRUPT_MASK_K_0; \
867e359b
CM
252 shri tmp, tmp, INT_MEM_ERROR; \
253 andi tmp, tmp, 1
254
255/* Load up a pointer to &interrupts_enabled_mask. */
256#define GET_INTERRUPTS_ENABLED_MASK_PTR(reg) \
a78c942d
CM
257 moveli reg, lo16(interrupts_enabled_mask); \
258 auli reg, reg, ha16(interrupts_enabled_mask); \
867e359b
CM
259 add reg, reg, tp
260
261/* Disable interrupts. */
262#define IRQ_DISABLE(tmp0, tmp1) \
263 { \
7f04f081 264 movei tmp0, LINUX_MASKABLE_INTERRUPTS_LO; \
0dccb048 265 moveli tmp1, lo16(LINUX_MASKABLE_INTERRUPTS_HI) \
867e359b
CM
266 }; \
267 { \
a78c942d 268 mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp0; \
0dccb048 269 auli tmp1, tmp1, ha16(LINUX_MASKABLE_INTERRUPTS_HI) \
867e359b 270 }; \
a78c942d 271 mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp1
867e359b
CM
272
273/* Disable ALL synchronous interrupts (used by NMI entry). */
274#define IRQ_DISABLE_ALL(tmp) \
275 movei tmp, -1; \
a78c942d
CM
276 mtspr SPR_INTERRUPT_MASK_SET_K_0, tmp; \
277 mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp
867e359b
CM
278
279/* Enable interrupts. */
51007004 280#define IRQ_ENABLE_LOAD(tmp0, tmp1) \
867e359b
CM
281 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
282 { \
283 lw tmp0, tmp0; \
284 addi tmp1, tmp0, 4 \
285 }; \
51007004
CM
286 lw tmp1, tmp1
287#define IRQ_ENABLE_APPLY(tmp0, tmp1) \
a78c942d
CM
288 mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \
289 mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1
867e359b
CM
290#endif
291
51007004
CM
292#define IRQ_ENABLE(tmp0, tmp1) \
293 IRQ_ENABLE_LOAD(tmp0, tmp1); \
294 IRQ_ENABLE_APPLY(tmp0, tmp1)
295
867e359b
CM
296/*
297 * Do the CPU's IRQ-state tracing from assembly code. We call a
298 * C function, but almost everywhere we do, we don't mind clobbering
299 * all the caller-saved registers.
300 */
301#ifdef CONFIG_TRACE_IRQFLAGS
302# define TRACE_IRQS_ON jal trace_hardirqs_on
303# define TRACE_IRQS_OFF jal trace_hardirqs_off
304#else
305# define TRACE_IRQS_ON
306# define TRACE_IRQS_OFF
307#endif
308
309#endif /* __ASSEMBLY__ */
310
311#endif /* _ASM_TILE_IRQFLAGS_H */
This page took 0.23611 seconds and 5 git commands to generate.