xtensa: fix oprofile building as module
[deliverable/linux.git] / arch / xtensa / kernel / process.c
CommitLineData
5a0015d6
CZ
1/*
2 * arch/xtensa/kernel/process.c
3 *
4 * Xtensa Processor version.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 *
12 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
13 * Chris Zankel <chris@zankel.net>
14 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
15 * Kevin Chea
16 */
17
5a0015d6
CZ
18#include <linux/errno.h>
19#include <linux/sched.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
5a0015d6
CZ
23#include <linux/stddef.h>
24#include <linux/unistd.h>
25#include <linux/ptrace.h>
5a0015d6
CZ
26#include <linux/elf.h>
27#include <linux/init.h>
28#include <linux/prctl.h>
29#include <linux/init_task.h>
30#include <linux/module.h>
31#include <linux/mqueue.h>
73089cbf 32#include <linux/fs.h>
5a0e3ad6 33#include <linux/slab.h>
11ad47a0 34#include <linux/rcupdate.h>
5a0015d6
CZ
35
36#include <asm/pgtable.h>
37#include <asm/uaccess.h>
5a0015d6
CZ
38#include <asm/io.h>
39#include <asm/processor.h>
40#include <asm/platform.h>
41#include <asm/mmu.h>
42#include <asm/irq.h>
60063497 43#include <linux/atomic.h>
0013a854 44#include <asm/asm-offsets.h>
173d6681 45#include <asm/regs.h>
5a0015d6
CZ
46
47extern void ret_from_fork(void);
3306a726 48extern void ret_from_kernel_thread(void);
5a0015d6 49
5a0015d6
CZ
50struct task_struct *current_set[NR_CPUS] = {&init_task, };
51
47f3fc94
AB
52void (*pm_power_off)(void) = NULL;
53EXPORT_SYMBOL(pm_power_off);
54
5a0015d6 55
c658eac6
CZ
56#if XTENSA_HAVE_COPROCESSORS
57
58void coprocessor_release_all(struct thread_info *ti)
59{
60 unsigned long cpenable;
61 int i;
62
63 /* Make sure we don't switch tasks during this operation. */
64
65 preempt_disable();
66
67 /* Walk through all cp owners and release it for the requested one. */
68
69 cpenable = ti->cpenable;
70
71 for (i = 0; i < XCHAL_CP_MAX; i++) {
72 if (coprocessor_owner[i] == ti) {
73 coprocessor_owner[i] = 0;
74 cpenable &= ~(1 << i);
75 }
76 }
77
78 ti->cpenable = cpenable;
79 coprocessor_clear_cpenable();
80
81 preempt_enable();
82}
83
84void coprocessor_flush_all(struct thread_info *ti)
85{
86 unsigned long cpenable;
87 int i;
88
89 preempt_disable();
90
91 cpenable = ti->cpenable;
92
93 for (i = 0; i < XCHAL_CP_MAX; i++) {
94 if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
95 coprocessor_flush(ti, i);
96 cpenable >>= 1;
97 }
98
99 preempt_enable();
100}
101
102#endif
103
104
5a0015d6
CZ
105/*
106 * Powermanagement idle function, if any is provided by the platform.
107 */
108
109void cpu_idle(void)
110{
c4c4594b 111 local_irq_enable();
5a0015d6
CZ
112
113 /* endless idle loop with no priority at all */
114 while (1) {
11ad47a0 115 rcu_idle_enter();
5a0015d6
CZ
116 while (!need_resched())
117 platform_idle();
11ad47a0 118 rcu_idle_exit();
bd2f5536 119 schedule_preempt_disabled();
5a0015d6
CZ
120 }
121}
122
123/*
c658eac6 124 * This is called when the thread calls exit().
5a0015d6 125 */
5a0015d6
CZ
126void exit_thread(void)
127{
c658eac6
CZ
128#if XTENSA_HAVE_COPROCESSORS
129 coprocessor_release_all(current_thread_info());
130#endif
5a0015d6
CZ
131}
132
c658eac6
CZ
133/*
134 * Flush thread state. This is called when a thread does an execve()
135 * Note that we flush coprocessor registers for the case execve fails.
136 */
5a0015d6
CZ
137void flush_thread(void)
138{
c658eac6
CZ
139#if XTENSA_HAVE_COPROCESSORS
140 struct thread_info *ti = current_thread_info();
141 coprocessor_flush_all(ti);
142 coprocessor_release_all(ti);
143#endif
144}
145
146/*
55ccf3fe
SS
147 * this gets called so that we can store coprocessor state into memory and
148 * copy the current task into the new thread.
c658eac6 149 */
55ccf3fe 150int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
c658eac6
CZ
151{
152#if XTENSA_HAVE_COPROCESSORS
55ccf3fe 153 coprocessor_flush_all(task_thread_info(src));
c658eac6 154#endif
55ccf3fe
SS
155 *dst = *src;
156 return 0;
5a0015d6
CZ
157}
158
159/*
160 * Copy thread.
161 *
3306a726
MF
162 * There are two modes in which this function is called:
163 * 1) Userspace thread creation,
164 * regs != NULL, usp_thread_fn is userspace stack pointer.
165 * It is expected to copy parent regs (in case CLONE_VM is not set
166 * in the clone_flags) and set up passed usp in the childregs.
167 * 2) Kernel thread creation,
168 * regs == NULL, usp_thread_fn is the function to run in the new thread
169 * and thread_fn_arg is its parameter.
170 * childregs are not used for the kernel threads.
171 *
5a0015d6
CZ
172 * The stack layout for the new thread looks like this:
173 *
3306a726 174 * +------------------------+
5a0015d6
CZ
175 * | childregs |
176 * +------------------------+ <- thread.sp = sp in dummy-frame
177 * | dummy-frame | (saved in dummy-frame spill-area)
178 * +------------------------+
179 *
3306a726
MF
180 * We create a dummy frame to return to either ret_from_fork or
181 * ret_from_kernel_thread:
182 * a0 points to ret_from_fork/ret_from_kernel_thread (simulating a call4)
5a0015d6 183 * sp points to itself (thread.sp)
3306a726
MF
184 * a2, a3 are unused for userspace threads,
185 * a2 points to thread_fn, a3 holds thread_fn arg for kernel threads.
5a0015d6
CZ
186 *
187 * Note: This is a pristine frame, so we don't need any spill region on top of
188 * childregs.
84ed3053
MG
189 *
190 * The fun part: if we're keeping the same VM (i.e. cloning a thread,
191 * not an entire process), we're normally given a new usp, and we CANNOT share
192 * any live address register windows. If we just copy those live frames over,
193 * the two threads (parent and child) will overflow the same frames onto the
194 * parent stack at different times, likely corrupting the parent stack (esp.
195 * if the parent returns from functions that called clone() and calls new
196 * ones, before the child overflows its now old copies of its parent windows).
197 * One solution is to spill windows to the parent stack, but that's fairly
198 * involved. Much simpler to just not copy those live frames across.
5a0015d6
CZ
199 */
200
3306a726 201int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
afa86fc4 202 unsigned long thread_fn_arg, struct task_struct *p)
5a0015d6 203{
3306a726 204 struct pt_regs *childregs = task_pt_regs(p);
5a0015d6 205
39070cb8
CZ
206#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
207 struct thread_info *ti;
208#endif
209
5a0015d6
CZ
210 /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
211 *((int*)childregs - 3) = (unsigned long)childregs;
212 *((int*)childregs - 4) = 0;
213
5a0015d6 214 p->thread.sp = (unsigned long)childregs;
c658eac6 215
3306a726
MF
216 if (!(p->flags & PF_KTHREAD)) {
217 struct pt_regs *regs = current_pt_regs();
218 unsigned long usp = usp_thread_fn ?
219 usp_thread_fn : regs->areg[1];
220
221 p->thread.ra = MAKE_RA_FOR_CALL(
222 (unsigned long)ret_from_fork, 0x1);
5a0015d6 223
3306a726
MF
224 /* This does not copy all the regs.
225 * In a bout of brilliance or madness,
226 * ARs beyond a0-a15 exist past the end of the struct.
227 */
228 *childregs = *regs;
5a0015d6 229 childregs->areg[1] = usp;
3306a726 230 childregs->areg[2] = 0;
6ebe7da2
CZ
231
232 /* When sharing memory with the parent thread, the child
233 usually starts on a pristine stack, so we have to reset
234 windowbase, windowstart and wmask.
235 (Note that such a new thread is required to always create
236 an initial call4 frame)
237 The exception is vfork, where the new thread continues to
238 run on the parent's stack until it calls execve. This could
239 be a call8 or call12, which requires a legal stack frame
240 of the previous caller for the overflow handlers to work.
241 (Note that it's always legal to overflow live registers).
242 In this case, ensure to spill at least the stack pointer
243 of that frame. */
244
84ed3053 245 if (clone_flags & CLONE_VM) {
6ebe7da2
CZ
246 /* check that caller window is live and same stack */
247 int len = childregs->wmask & ~0xf;
248 if (regs->areg[1] == usp && len != 0) {
249 int callinc = (regs->areg[0] >> 30) & 3;
250 int caller_ars = XCHAL_NUM_AREGS - callinc * 4;
251 put_user(regs->areg[caller_ars+1],
252 (unsigned __user*)(usp - 12));
253 }
254 childregs->wmask = 1;
255 childregs->windowstart = 1;
256 childregs->windowbase = 0;
84ed3053
MG
257 } else {
258 int len = childregs->wmask & ~0xf;
259 memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
260 &regs->areg[XCHAL_NUM_AREGS - len/4], len);
261 }
c50842df
CZ
262
263 /* The thread pointer is passed in the '4th argument' (= a5) */
5a0015d6 264 if (clone_flags & CLONE_SETTLS)
c50842df 265 childregs->threadptr = childregs->areg[5];
5a0015d6 266 } else {
3306a726
MF
267 p->thread.ra = MAKE_RA_FOR_CALL(
268 (unsigned long)ret_from_kernel_thread, 1);
269
270 /* pass parameters to ret_from_kernel_thread:
271 * a2 = thread_fn, a3 = thread_fn arg
272 */
273 *((int *)childregs - 1) = thread_fn_arg;
274 *((int *)childregs - 2) = usp_thread_fn;
275
276 /* Childregs are only used when we're going to userspace
277 * in which case start_thread will set them up.
278 */
5a0015d6 279 }
c658eac6
CZ
280
281#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
282 ti = task_thread_info(p);
283 ti->cpenable = 0;
284#endif
285
5a0015d6
CZ
286 return 0;
287}
288
289
5a0015d6
CZ
290/*
291 * These bracket the sleeping functions..
292 */
293
294unsigned long get_wchan(struct task_struct *p)
295{
296 unsigned long sp, pc;
04fe6faf 297 unsigned long stack_page = (unsigned long) task_stack_page(p);
5a0015d6
CZ
298 int count = 0;
299
300 if (!p || p == current || p->state == TASK_RUNNING)
301 return 0;
302
303 sp = p->thread.sp;
304 pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
305
306 do {
307 if (sp < stack_page + sizeof(struct task_struct) ||
308 sp >= (stack_page + THREAD_SIZE) ||
309 pc == 0)
310 return 0;
311 if (!in_sched_functions(pc))
312 return pc;
313
314 /* Stack layout: sp-4: ra, sp-3: sp' */
315
316 pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
317 sp = *(unsigned long *)sp - 3;
318 } while (count++ < 16);
319 return 0;
320}
321
322/*
5a0015d6
CZ
323 * xtensa_gregset_t and 'struct pt_regs' are vastly different formats
324 * of processor registers. Besides different ordering,
325 * xtensa_gregset_t contains non-live register information that
326 * 'struct pt_regs' does not. Exception handling (primarily) uses
327 * 'struct pt_regs'. Core files and ptrace use xtensa_gregset_t.
328 *
329 */
330
c658eac6 331void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
5a0015d6 332{
c658eac6
CZ
333 unsigned long wb, ws, wm;
334 int live, last;
335
336 wb = regs->windowbase;
337 ws = regs->windowstart;
338 wm = regs->wmask;
339 ws = ((ws >> wb) | (ws << (WSBITS - wb))) & ((1 << WSBITS) - 1);
340
341 /* Don't leak any random bits. */
342
688bb415 343 memset(elfregs, 0, sizeof(*elfregs));
c658eac6 344
5a0015d6
CZ
345 /* Note: PS.EXCM is not set while user task is running; its
346 * being set in regs->ps is for exception handling convenience.
347 */
348
349 elfregs->pc = regs->pc;
173d6681 350 elfregs->ps = (regs->ps & ~(1 << PS_EXCM_BIT));
5a0015d6
CZ
351 elfregs->lbeg = regs->lbeg;
352 elfregs->lend = regs->lend;
353 elfregs->lcount = regs->lcount;
354 elfregs->sar = regs->sar;
c658eac6 355 elfregs->windowstart = ws;
5a0015d6 356
c658eac6
CZ
357 live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
358 last = XCHAL_NUM_AREGS - (wm >> 4) * 4;
359 memcpy(elfregs->a, regs->areg, live * 4);
360 memcpy(elfregs->a + last, regs->areg + last, (wm >> 4) * 16);
5a0015d6
CZ
361}
362
c658eac6 363int dump_fpu(void)
5a0015d6 364{
5a0015d6
CZ
365 return 0;
366}
This page took 0.780675 seconds and 5 git commands to generate.