Merge tag 'regulator-v3.13' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[deliverable/linux.git] / arch / x86 / kernel / irq_32.c
1 /*
2 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
3 *
4 * This file contains the lowest level x86-specific interrupt
5 * entry, irq-stacks and irq statistics code. All the remaining
6 * irq logic is done by the generic kernel/irq/ code and
7 * by the x86-specific irq controller code. (e.g. i8259.c and
8 * io_apic.c.)
9 */
10
11 #include <linux/module.h>
12 #include <linux/seq_file.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/notifier.h>
16 #include <linux/cpu.h>
17 #include <linux/delay.h>
18 #include <linux/uaccess.h>
19 #include <linux/percpu.h>
20 #include <linux/mm.h>
21
22 #include <asm/apic.h>
23
24 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
25 EXPORT_PER_CPU_SYMBOL(irq_stat);
26
27 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
28 EXPORT_PER_CPU_SYMBOL(irq_regs);
29
30 #ifdef CONFIG_DEBUG_STACKOVERFLOW
31
32 int sysctl_panic_on_stackoverflow __read_mostly;
33
34 /* Debugging check for stack overflow: is there less than 1KB free? */
35 static int check_stack_overflow(void)
36 {
37 long sp;
38
39 __asm__ __volatile__("andl %%esp,%0" :
40 "=r" (sp) : "0" (THREAD_SIZE - 1));
41
42 return sp < (sizeof(struct thread_info) + STACK_WARN);
43 }
44
45 static void print_stack_overflow(void)
46 {
47 printk(KERN_WARNING "low stack detected by irq handler\n");
48 dump_stack();
49 if (sysctl_panic_on_stackoverflow)
50 panic("low stack detected by irq handler - check messages\n");
51 }
52
53 #else
54 static inline int check_stack_overflow(void) { return 0; }
55 static inline void print_stack_overflow(void) { }
56 #endif
57
58 /*
59 * per-CPU IRQ handling contexts (thread information and stack)
60 */
61 union irq_ctx {
62 struct thread_info tinfo;
63 u32 stack[THREAD_SIZE/sizeof(u32)];
64 } __attribute__((aligned(THREAD_SIZE)));
65
66 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
67 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
68
69 static void call_on_stack(void *func, void *stack)
70 {
71 asm volatile("xchgl %%ebx,%%esp \n"
72 "call *%%edi \n"
73 "movl %%ebx,%%esp \n"
74 : "=b" (stack)
75 : "0" (stack),
76 "D"(func)
77 : "memory", "cc", "edx", "ecx", "eax");
78 }
79
80 static inline int
81 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
82 {
83 union irq_ctx *curctx, *irqctx;
84 u32 *isp, arg1, arg2;
85
86 curctx = (union irq_ctx *) current_thread_info();
87 irqctx = __this_cpu_read(hardirq_ctx);
88
89 /*
90 * this is where we switch to the IRQ stack. However, if we are
91 * already using the IRQ stack (because we interrupted a hardirq
92 * handler) we can't do that and just have to keep using the
93 * current stack (which is the irq stack already after all)
94 */
95 if (unlikely(curctx == irqctx))
96 return 0;
97
98 /* build the stack frame on the IRQ stack */
99 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
100 irqctx->tinfo.task = curctx->tinfo.task;
101 irqctx->tinfo.previous_esp = current_stack_pointer;
102
103 if (unlikely(overflow))
104 call_on_stack(print_stack_overflow, isp);
105
106 asm volatile("xchgl %%ebx,%%esp \n"
107 "call *%%edi \n"
108 "movl %%ebx,%%esp \n"
109 : "=a" (arg1), "=d" (arg2), "=b" (isp)
110 : "0" (irq), "1" (desc), "2" (isp),
111 "D" (desc->handle_irq)
112 : "memory", "cc", "ecx");
113 return 1;
114 }
115
116 /*
117 * allocate per-cpu stacks for hardirq and for softirq processing
118 */
119 void irq_ctx_init(int cpu)
120 {
121 union irq_ctx *irqctx;
122
123 if (per_cpu(hardirq_ctx, cpu))
124 return;
125
126 irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
127 THREADINFO_GFP,
128 THREAD_SIZE_ORDER));
129 memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
130 irqctx->tinfo.cpu = cpu;
131 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
132
133 per_cpu(hardirq_ctx, cpu) = irqctx;
134
135 irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
136 THREADINFO_GFP,
137 THREAD_SIZE_ORDER));
138 memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
139 irqctx->tinfo.cpu = cpu;
140 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
141
142 per_cpu(softirq_ctx, cpu) = irqctx;
143
144 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
145 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
146 }
147
148 void do_softirq_own_stack(void)
149 {
150 struct thread_info *curctx;
151 union irq_ctx *irqctx;
152 u32 *isp;
153
154 curctx = current_thread_info();
155 irqctx = __this_cpu_read(softirq_ctx);
156 irqctx->tinfo.task = curctx->task;
157 irqctx->tinfo.previous_esp = current_stack_pointer;
158
159 /* build the stack frame on the softirq stack */
160 isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
161
162 call_on_stack(__do_softirq, isp);
163 }
164
165 bool handle_irq(unsigned irq, struct pt_regs *regs)
166 {
167 struct irq_desc *desc;
168 int overflow;
169
170 overflow = check_stack_overflow();
171
172 desc = irq_to_desc(irq);
173 if (unlikely(!desc))
174 return false;
175
176 if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
177 if (unlikely(overflow))
178 print_stack_overflow();
179 desc->handle_irq(irq, desc);
180 }
181
182 return true;
183 }
This page took 0.033804 seconds and 5 git commands to generate.