b7f18e298a208e9a6b593200aa7c8d72f928eda0
[deliverable/linux.git] / arch / sh64 / kernel / sys_sh64.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * arch/sh64/kernel/sys_sh64.c
7 *
8 * Copyright (C) 2000, 2001 Paolo Alberelli
9 *
10 * This file contains various random system calls that
11 * have a non-standard calling sequence on the Linux/SH5
12 * platform.
13 *
14 * Mostly taken from i386 version.
15 *
16 */
17
18 #include <linux/errno.h>
19 #include <linux/rwsem.h>
20 #include <linux/sched.h>
21 #include <linux/mm.h>
22 #include <linux/fs.h>
23 #include <linux/smp.h>
24 #include <linux/sem.h>
25 #include <linux/msg.h>
26 #include <linux/shm.h>
27 #include <linux/stat.h>
28 #include <linux/mman.h>
29 #include <linux/file.h>
30 #include <linux/utsname.h>
31 #include <linux/syscalls.h>
32 #include <asm/uaccess.h>
33 #include <asm/ipc.h>
34 #include <asm/ptrace.h>
35 #include <asm/unistd.h>
36
37 #define REG_3 3
38
39 /*
40 * sys_pipe() is the normal C calling standard for creating
41 * a pipe. It's not the way Unix traditionally does this, though.
42 */
43 #ifdef NEW_PIPE_IMPLEMENTATION
44 asmlinkage int sys_pipe(unsigned long * fildes,
45 unsigned long dummy_r3,
46 unsigned long dummy_r4,
47 unsigned long dummy_r5,
48 unsigned long dummy_r6,
49 unsigned long dummy_r7,
50 struct pt_regs * regs) /* r8 = pt_regs forced by entry.S */
51 {
52 int fd[2];
53 int ret;
54
55 ret = do_pipe(fd);
56 if (ret == 0)
57 /*
58 ***********************************************************************
59 * To avoid the copy_to_user we prefer to break the ABIs convention, *
60 * packing the valid pair of file IDs into a single register (r3); *
61 * while r2 is the return code as defined by the sh5-ABIs. *
62 * BE CAREFUL: pipe stub, into glibc, must be aware of this solution *
63 ***********************************************************************
64
65 #ifdef __LITTLE_ENDIAN__
66 regs->regs[REG_3] = (((unsigned long long) fd[1]) << 32) | ((unsigned long long) fd[0]);
67 #else
68 regs->regs[REG_3] = (((unsigned long long) fd[0]) << 32) | ((unsigned long long) fd[1]);
69 #endif
70
71 */
72 /* although not very clever this is endianess independent */
73 regs->regs[REG_3] = (unsigned long long) *((unsigned long long *) fd);
74
75 return ret;
76 }
77
78 #else
79 asmlinkage int sys_pipe(unsigned long * fildes)
80 {
81 int fd[2];
82 int error;
83
84 error = do_pipe(fd);
85 if (!error) {
86 if (copy_to_user(fildes, fd, 2*sizeof(int)))
87 error = -EFAULT;
88 }
89 return error;
90 }
91
92 #endif
93
94 /*
95 * To avoid cache alias, we map the shard page with same color.
96 */
97 #define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1))
98
99 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
100 unsigned long len, unsigned long pgoff, unsigned long flags)
101 {
102 struct vm_area_struct *vma;
103
104 if (flags & MAP_FIXED) {
105 /* We do not accept a shared mapping if it would violate
106 * cache aliasing constraints.
107 */
108 if ((flags & MAP_SHARED) && (addr & (SHMLBA - 1)))
109 return -EINVAL;
110 return addr;
111 }
112
113 if (len > TASK_SIZE)
114 return -ENOMEM;
115 if (!addr)
116 addr = TASK_UNMAPPED_BASE;
117
118 if (flags & MAP_PRIVATE)
119 addr = PAGE_ALIGN(addr);
120 else
121 addr = COLOUR_ALIGN(addr);
122
123 for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
124 /* At this point: (!vma || addr < vma->vm_end). */
125 if (TASK_SIZE - len < addr)
126 return -ENOMEM;
127 if (!vma || addr + len <= vma->vm_start)
128 return addr;
129 addr = vma->vm_end;
130 if (!(flags & MAP_PRIVATE))
131 addr = COLOUR_ALIGN(addr);
132 }
133 }
134
135 /* common code for old and new mmaps */
136 static inline long do_mmap2(
137 unsigned long addr, unsigned long len,
138 unsigned long prot, unsigned long flags,
139 unsigned long fd, unsigned long pgoff)
140 {
141 int error = -EBADF;
142 struct file * file = NULL;
143
144 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
145 if (!(flags & MAP_ANONYMOUS)) {
146 file = fget(fd);
147 if (!file)
148 goto out;
149 }
150
151 down_write(&current->mm->mmap_sem);
152 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
153 up_write(&current->mm->mmap_sem);
154
155 if (file)
156 fput(file);
157 out:
158 return error;
159 }
160
161 asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
162 unsigned long prot, unsigned long flags,
163 unsigned long fd, unsigned long pgoff)
164 {
165 return do_mmap2(addr, len, prot, flags, fd, pgoff);
166 }
167
168 asmlinkage int old_mmap(unsigned long addr, unsigned long len,
169 unsigned long prot, unsigned long flags,
170 int fd, unsigned long off)
171 {
172 if (off & ~PAGE_MASK)
173 return -EINVAL;
174 return do_mmap2(addr, len, prot, flags, fd, off>>PAGE_SHIFT);
175 }
176
177 /*
178 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
179 *
180 * This is really horribly ugly.
181 */
182 asmlinkage int sys_ipc(uint call, int first, int second,
183 int third, void __user *ptr, long fifth)
184 {
185 int version, ret;
186
187 version = call >> 16; /* hack for backward compatibility */
188 call &= 0xffff;
189
190 if (call <= SEMCTL)
191 switch (call) {
192 case SEMOP:
193 return sys_semtimedop(first, (struct sembuf __user *)ptr,
194 second, NULL);
195 case SEMTIMEDOP:
196 return sys_semtimedop(first, (struct sembuf __user *)ptr,
197 second,
198 (const struct timespec __user *)fifth);
199 case SEMGET:
200 return sys_semget (first, second, third);
201 case SEMCTL: {
202 union semun fourth;
203 if (!ptr)
204 return -EINVAL;
205 if (get_user(fourth.__pad, (void * __user *) ptr))
206 return -EFAULT;
207 return sys_semctl (first, second, third, fourth);
208 }
209 default:
210 return -EINVAL;
211 }
212
213 if (call <= MSGCTL)
214 switch (call) {
215 case MSGSND:
216 return sys_msgsnd (first, (struct msgbuf __user *) ptr,
217 second, third);
218 case MSGRCV:
219 switch (version) {
220 case 0: {
221 struct ipc_kludge tmp;
222 if (!ptr)
223 return -EINVAL;
224
225 if (copy_from_user(&tmp,
226 (struct ipc_kludge __user *) ptr,
227 sizeof (tmp)))
228 return -EFAULT;
229 return sys_msgrcv (first, tmp.msgp, second,
230 tmp.msgtyp, third);
231 }
232 default:
233 return sys_msgrcv (first,
234 (struct msgbuf __user *) ptr,
235 second, fifth, third);
236 }
237 case MSGGET:
238 return sys_msgget ((key_t) first, second);
239 case MSGCTL:
240 return sys_msgctl (first, second,
241 (struct msqid_ds __user *) ptr);
242 default:
243 return -EINVAL;
244 }
245 if (call <= SHMCTL)
246 switch (call) {
247 case SHMAT:
248 switch (version) {
249 default: {
250 ulong raddr;
251 ret = do_shmat (first, (char __user *) ptr,
252 second, &raddr);
253 if (ret)
254 return ret;
255 return put_user (raddr, (ulong __user *) third);
256 }
257 case 1: /* iBCS2 emulator entry point */
258 if (!segment_eq(get_fs(), get_ds()))
259 return -EINVAL;
260 return do_shmat (first, (char __user *) ptr,
261 second, (ulong *) third);
262 }
263 case SHMDT:
264 return sys_shmdt ((char __user *)ptr);
265 case SHMGET:
266 return sys_shmget (first, second, third);
267 case SHMCTL:
268 return sys_shmctl (first, second,
269 (struct shmid_ds __user *) ptr);
270 default:
271 return -EINVAL;
272 }
273
274 return -EINVAL;
275 }
276
277 asmlinkage int sys_uname(struct old_utsname * name)
278 {
279 int err;
280 if (!name)
281 return -EFAULT;
282 down_read(&uts_sem);
283 err = copy_to_user(name, utsname(), sizeof (*name));
284 up_read(&uts_sem);
285 return err?-EFAULT:0;
286 }
287
288 /*
289 * Do a system call from kernel instead of calling sys_execve so we
290 * end up with proper pt_regs.
291 */
292 int kernel_execve(const char *filename, char *const argv[], char *const envp[])
293 {
294 register unsigned long __sc0 __asm__ ("r9") = ((0x13 << 16) | __NR_execve);
295 register unsigned long __sc2 __asm__ ("r2") = (unsigned long) filename;
296 register unsigned long __sc3 __asm__ ("r3") = (unsigned long) argv;
297 register unsigned long __sc4 __asm__ ("r4") = (unsigned long) envp;
298 __asm__ __volatile__ ("trapa %1 !\t\t\t execve(%2,%3,%4)"
299 : "=r" (__sc0)
300 : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) );
301 __asm__ __volatile__ ("!dummy %0 %1 %2 %3"
302 : : "r" (__sc0), "r" (__sc2), "r" (__sc3), "r" (__sc4) : "memory");
303 return __sc0;
304 }
This page took 0.03622 seconds and 4 git commands to generate.