ARC: Process-creation/scheduling/idle-loop
[deliverable/linux.git] / arch / arc / kernel / process.c
1 /*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Amit Bhor, Kanika Nema: Codito Technologies 2004
9 */
10
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <linux/fs.h>
16 #include <linux/unistd.h>
17 #include <linux/ptrace.h>
18 #include <linux/slab.h>
19 #include <linux/syscalls.h>
20 #include <linux/elf.h>
21 #include <linux/tick.h>
22
23 SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
24 {
25 task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
26 return 0;
27 }
28
29 /*
30 * We return the user space TLS data ptr as sys-call return code
31 * Ideally it should be copy to user.
32 * However we can cheat by the fact that some sys-calls do return
33 * absurdly high values
34 * Since the tls dat aptr is not going to be in range of 0xFFFF_xxxx
35 * it won't be considered a sys-call error
36 * and it will be loads better than copy-to-user, which is a definite
37 * D-TLB Miss
38 */
39 SYSCALL_DEFINE0(arc_gettls)
40 {
41 return task_thread_info(current)->thr_ptr;
42 }
43
44 static inline void arch_idle(void)
45 {
46 /* sleep, but enable all interrupts before committing */
47 __asm__("sleep 0x3");
48 }
49
50 void cpu_idle(void)
51 {
52 /* Since we SLEEP in idle loop, TIF_POLLING_NRFLAG can't be set */
53
54 /* endless idle loop with no priority at all */
55 while (1) {
56 tick_nohz_idle_enter();
57 rcu_idle_enter();
58
59 doze:
60 local_irq_disable();
61 if (!need_resched()) {
62 arch_idle();
63 goto doze;
64 } else {
65 local_irq_enable();
66 }
67
68 rcu_idle_exit();
69 tick_nohz_idle_exit();
70
71 schedule_preempt_disabled();
72 }
73 }
74
75 asmlinkage void ret_from_fork(void);
76
77 /* Layout of Child kernel mode stack as setup at the end of this function is
78 *
79 * | ... |
80 * | ... |
81 * | unused |
82 * | |
83 * ------------------ <==== top of Stack (thread.ksp)
84 * | UNUSED 1 word|
85 * ------------------
86 * | r25 |
87 * ~ ~
88 * | --to-- | (CALLEE Regs of user mode)
89 * | r13 |
90 * ------------------
91 * | fp |
92 * | blink | @ret_from_fork
93 * ------------------
94 * | |
95 * ~ ~
96 * ~ ~
97 * | |
98 * ------------------
99 * | r12 |
100 * ~ ~
101 * | --to-- | (scratch Regs of user mode)
102 * | r0 |
103 * ------------------
104 * | UNUSED 1 word|
105 * ------------------ <===== END of PAGE
106 */
107 int copy_thread(unsigned long clone_flags,
108 unsigned long usp, unsigned long arg,
109 struct task_struct *p)
110 {
111 struct pt_regs *c_regs; /* child's pt_regs */
112 unsigned long *childksp; /* to unwind out of __switch_to() */
113 struct callee_regs *c_callee; /* child's callee regs */
114 struct callee_regs *parent_callee; /* paren't callee */
115 struct pt_regs *regs = current_pt_regs();
116
117 /* Mark the specific anchors to begin with (see pic above) */
118 c_regs = task_pt_regs(p);
119 childksp = (unsigned long *)c_regs - 2; /* 2 words for FP/BLINK */
120 c_callee = ((struct callee_regs *)childksp) - 1;
121
122 /*
123 * __switch_to() uses thread.ksp to start unwinding stack
124 * For kernel threads we don't need to create callee regs, the
125 * stack layout nevertheless needs to remain the same.
126 * Also, since __switch_to anyways unwinds callee regs, we use
127 * this to populate kernel thread entry-pt/args into callee regs,
128 * so that ret_from_kernel_thread() becomes simpler.
129 */
130 p->thread.ksp = (unsigned long)c_callee; /* THREAD_KSP */
131
132 /* __switch_to expects FP(0), BLINK(return addr) at top */
133 childksp[0] = 0; /* fp */
134 childksp[1] = (unsigned long)ret_from_fork; /* blink */
135
136 if (unlikely(p->flags & PF_KTHREAD)) {
137 memset(c_regs, 0, sizeof(struct pt_regs));
138
139 c_callee->r13 = arg; /* argument to kernel thread */
140 c_callee->r14 = usp; /* function */
141
142 return 0;
143 }
144
145 /*--------- User Task Only --------------*/
146
147 /* __switch_to expects FP(0), BLINK(return addr) at top of stack */
148 childksp[0] = 0; /* for POP fp */
149 childksp[1] = (unsigned long)ret_from_fork; /* for POP blink */
150
151 /* Copy parents pt regs on child's kernel mode stack */
152 *c_regs = *regs;
153
154 if (usp)
155 c_regs->sp = usp;
156
157 c_regs->r0 = 0; /* fork returns 0 in child */
158
159 parent_callee = ((struct callee_regs *)regs) - 1;
160 *c_callee = *parent_callee;
161
162 if (unlikely(clone_flags & CLONE_SETTLS)) {
163 /*
164 * set task's userland tls data ptr from 4th arg
165 * clone C-lib call is difft from clone sys-call
166 */
167 task_thread_info(p)->thr_ptr = regs->r3;
168 } else {
169 /* Normal fork case: set parent's TLS ptr in child */
170 task_thread_info(p)->thr_ptr =
171 task_thread_info(current)->thr_ptr;
172 }
173
174 return 0;
175 }
176
177 /*
178 * Some archs flush debug and FPU info here
179 */
180 void flush_thread(void)
181 {
182 }
183
184 /*
185 * Free any architecture-specific thread data structures, etc.
186 */
187 void exit_thread(void)
188 {
189 }
190
191 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
192 {
193 return 0;
194 }
195
196 /*
197 * API: expected by schedular Code: If thread is sleeping where is that.
198 * What is this good for? it will be always the scheduler or ret_from_fork.
199 * So we hard code that anyways.
200 */
201 unsigned long thread_saved_pc(struct task_struct *t)
202 {
203 struct pt_regs *regs = task_pt_regs(t);
204 unsigned long blink = 0;
205
206 /*
207 * If the thread being queried for in not itself calling this, then it
208 * implies it is not executing, which in turn implies it is sleeping,
209 * which in turn implies it got switched OUT by the schedular.
210 * In that case, it's kernel mode blink can reliably retrieved as per
211 * the picture above (right above pt_regs).
212 */
213 if (t != current && t->state != TASK_RUNNING)
214 blink = *((unsigned int *)regs - 1);
215
216 return blink;
217 }
218
219 int elf_check_arch(const struct elf32_hdr *x)
220 {
221 unsigned int eflags;
222
223 if (x->e_machine != EM_ARCOMPACT)
224 return 0;
225
226 eflags = x->e_flags;
227 if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_V2) {
228 pr_err("ABI mismatch - you need newer toolchain\n");
229 force_sigsegv(SIGSEGV, current);
230 return 0;
231 }
232
233 return 1;
234 }
235 EXPORT_SYMBOL(elf_check_arch);
This page took 0.036714 seconds and 5 git commands to generate.