x86/vm86: Move vm86 fields out of 'thread_struct'
[deliverable/linux.git] / arch / x86 / kernel / vm86_32.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1994 Linus Torvalds
3 *
4 * 29 dec 2001 - Fixed oopses caused by unchecked access to the vm86
624dffcb 5 * stack - Manfred Spraul <manfred@colorfullife.com>
1da177e4
LT
6 *
7 * 22 mar 2002 - Manfred detected the stackfaults, but didn't handle
8 * them correctly. Now the emulation will be in a
9 * consistent state after stackfaults - Kasper Dupont
10 * <kasperd@daimi.au.dk>
11 *
12 * 22 mar 2002 - Added missing clear_IF in set_vflags_* Kasper Dupont
13 * <kasperd@daimi.au.dk>
14 *
15 * ?? ??? 2002 - Fixed premature returns from handle_vm86_fault
16 * caused by Kasper Dupont's changes - Stas Sergeev
17 *
18 * 4 apr 2002 - Fixed CHECK_IF_IN_TRAP broken by Stas' changes.
19 * Kasper Dupont <kasperd@daimi.au.dk>
20 *
21 * 9 apr 2002 - Changed syntax of macros in handle_vm86_fault.
22 * Kasper Dupont <kasperd@daimi.au.dk>
23 *
24 * 9 apr 2002 - Changed stack access macros to jump to a label
25 * instead of returning to userspace. This simplifies
26 * do_int, and is needed by handle_vm6_fault. Kasper
27 * Dupont <kasperd@daimi.au.dk>
28 *
29 */
30
c767a54b
JP
31#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32
a9415644 33#include <linux/capability.h>
1da177e4
LT
34#include <linux/errno.h>
35#include <linux/interrupt.h>
5522ddb3 36#include <linux/syscalls.h>
1da177e4
LT
37#include <linux/sched.h>
38#include <linux/kernel.h>
39#include <linux/signal.h>
40#include <linux/string.h>
41#include <linux/mm.h>
42#include <linux/smp.h>
1da177e4
LT
43#include <linux/highmem.h>
44#include <linux/ptrace.h>
7e7f8a03 45#include <linux/audit.h>
49d26b6e 46#include <linux/stddef.h>
9fda6a06 47#include <linux/slab.h>
1da177e4
LT
48
49#include <asm/uaccess.h>
50#include <asm/io.h>
51#include <asm/tlbflush.h>
52#include <asm/irq.h>
53
54/*
55 * Known problems:
56 *
57 * Interrupt handling is not guaranteed:
58 * - a real x86 will disable all interrupts for one instruction
59 * after a "mov ss,xx" to make stack handling atomic even without
60 * the 'lss' instruction. We can't guarantee this in v86 mode,
61 * as the next instruction might result in a page fault or similar.
62 * - a real x86 will have interrupts disabled for one instruction
63 * past the 'sti' that enables them. We don't bother with all the
64 * details yet.
65 *
66 * Let's hope these problems do not actually matter for anything.
67 */
68
69
70#define KVM86 ((struct kernel_vm86_struct *)regs)
83e714e8 71#define VMPI KVM86->vm86plus
1da177e4
LT
72
73
74/*
75 * 8- and 16-bit register defines..
76 */
65ea5b03
PA
77#define AL(regs) (((unsigned char *)&((regs)->pt.ax))[0])
78#define AH(regs) (((unsigned char *)&((regs)->pt.ax))[1])
79#define IP(regs) (*(unsigned short *)&((regs)->pt.ip))
80#define SP(regs) (*(unsigned short *)&((regs)->pt.sp))
1da177e4
LT
81
82/*
83 * virtual flags (16 and 32-bit versions)
84 */
9fda6a06
BG
85#define VFLAGS (*(unsigned short *)&(current->thread.vm86->v86flags))
86#define VEFLAGS (current->thread.vm86->v86flags)
1da177e4 87
83e714e8 88#define set_flags(X, new, mask) \
1da177e4
LT
89((X) = ((X) & ~(mask)) | ((new) & (mask)))
90
91#define SAFE_MASK (0xDD5)
92#define RETURN_MASK (0xDFF)
93
83e714e8 94struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
1da177e4
LT
95{
96 struct tss_struct *tss;
97 struct pt_regs *ret;
ed0b2edb
BG
98 struct task_struct *tsk = current;
99 struct vm86plus_struct __user *user;
9fda6a06 100 struct vm86 *vm86 = current->thread.vm86;
ed0b2edb 101 long err = 0;
1da177e4
LT
102
103 /*
104 * This gets called from entry.S with interrupts disabled, but
105 * from process context. Enable interrupts here, before trying
106 * to access user space.
107 */
108 local_irq_enable();
109
9fda6a06 110 if (!vm86 || !vm86->vm86_info) {
c767a54b 111 pr_alert("no vm86_info: BAD\n");
1da177e4
LT
112 do_exit(SIGSEGV);
113 }
9fda6a06
BG
114 set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | vm86->v86mask);
115 user = vm86->vm86_info;
ed0b2edb
BG
116
117 if (!access_ok(VERIFY_WRITE, user, VMPI.is_vm86pus ?
118 sizeof(struct vm86plus_struct) :
119 sizeof(struct vm86_struct))) {
120 pr_alert("could not access userspace vm86_info\n");
121 do_exit(SIGSEGV);
122 }
123
124 put_user_try {
125 put_user_ex(regs->pt.bx, &user->regs.ebx);
126 put_user_ex(regs->pt.cx, &user->regs.ecx);
127 put_user_ex(regs->pt.dx, &user->regs.edx);
128 put_user_ex(regs->pt.si, &user->regs.esi);
129 put_user_ex(regs->pt.di, &user->regs.edi);
130 put_user_ex(regs->pt.bp, &user->regs.ebp);
131 put_user_ex(regs->pt.ax, &user->regs.eax);
132 put_user_ex(regs->pt.ip, &user->regs.eip);
133 put_user_ex(regs->pt.cs, &user->regs.cs);
134 put_user_ex(regs->pt.flags, &user->regs.eflags);
135 put_user_ex(regs->pt.sp, &user->regs.esp);
136 put_user_ex(regs->pt.ss, &user->regs.ss);
137 put_user_ex(regs->es, &user->regs.es);
138 put_user_ex(regs->ds, &user->regs.ds);
139 put_user_ex(regs->fs, &user->regs.fs);
140 put_user_ex(regs->gs, &user->regs.gs);
141
9fda6a06 142 put_user_ex(vm86->screen_bitmap, &user->screen_bitmap);
ed0b2edb
BG
143 } put_user_catch(err);
144 if (err) {
c767a54b 145 pr_alert("could not access userspace vm86_info\n");
1da177e4
LT
146 do_exit(SIGSEGV);
147 }
148
24933b82 149 tss = &per_cpu(cpu_tss, get_cpu());
9fda6a06 150 tsk->thread.sp0 = vm86->saved_sp0;
ed0b2edb
BG
151 tsk->thread.sysenter_cs = __KERNEL_CS;
152 load_sp0(tss, &tsk->thread);
9fda6a06 153 vm86->saved_sp0 = 0;
1da177e4
LT
154 put_cpu();
155
1da177e4 156 ret = KVM86->regs32;
49d26b6e 157
0233606c 158 lazy_load_gs(ret->gs);
49d26b6e 159
1da177e4
LT
160 return ret;
161}
162
60ec5585 163static void mark_screen_rdonly(struct mm_struct *mm)
1da177e4
LT
164{
165 pgd_t *pgd;
166 pud_t *pud;
167 pmd_t *pmd;
60ec5585
HD
168 pte_t *pte;
169 spinlock_t *ptl;
1da177e4
LT
170 int i;
171
1a5a9906 172 down_write(&mm->mmap_sem);
60ec5585 173 pgd = pgd_offset(mm, 0xA0000);
1da177e4
LT
174 if (pgd_none_or_clear_bad(pgd))
175 goto out;
176 pud = pud_offset(pgd, 0xA0000);
177 if (pud_none_or_clear_bad(pud))
178 goto out;
179 pmd = pmd_offset(pud, 0xA0000);
e180377f 180 split_huge_page_pmd_mm(mm, 0xA0000, pmd);
1da177e4
LT
181 if (pmd_none_or_clear_bad(pmd))
182 goto out;
60ec5585 183 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
1da177e4
LT
184 for (i = 0; i < 32; i++) {
185 if (pte_present(*pte))
186 set_pte(pte, pte_wrprotect(*pte));
187 pte++;
188 }
60ec5585 189 pte_unmap_unlock(pte, ptl);
1da177e4 190out:
1a5a9906 191 up_write(&mm->mmap_sem);
1da177e4
LT
192 flush_tlb();
193}
194
195
196
197static int do_vm86_irq_handling(int subfunction, int irqnumber);
ed0b2edb
BG
198static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
199 struct kernel_vm86_struct *info);
1da177e4 200
5522ddb3 201SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
1da177e4 202{
1da177e4
LT
203 struct kernel_vm86_struct info; /* declare this _on top_,
204 * this avoids wasting of stack space.
205 * This remains on the stack until we
206 * return to 32 bit user space.
207 */
1da177e4 208
ed0b2edb 209 return do_sys_vm86((struct vm86plus_struct __user *) v86, false, &info);
1da177e4
LT
210}
211
212
5522ddb3 213SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
1da177e4
LT
214{
215 struct kernel_vm86_struct info; /* declare this _on top_,
216 * this avoids wasting of stack space.
217 * This remains on the stack until we
218 * return to 32 bit user space.
219 */
1da177e4 220
f1382f15 221 switch (cmd) {
83e714e8
PC
222 case VM86_REQUEST_IRQ:
223 case VM86_FREE_IRQ:
224 case VM86_GET_IRQ_BITS:
225 case VM86_GET_AND_RESET_IRQ:
5522ddb3 226 return do_vm86_irq_handling(cmd, (int)arg);
83e714e8
PC
227 case VM86_PLUS_INSTALL_CHECK:
228 /*
229 * NOTE: on old vm86 stuff this will return the error
230 * from access_ok(), because the subfunction is
231 * interpreted as (invalid) address to vm86_struct.
232 * So the installation check works.
233 */
5522ddb3 234 return 0;
1da177e4
LT
235 }
236
237 /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
ed0b2edb 238 return do_sys_vm86((struct vm86plus_struct __user *) arg, true, &info);
1da177e4
LT
239}
240
241
ed0b2edb
BG
242static long do_sys_vm86(struct vm86plus_struct __user *v86, bool plus,
243 struct kernel_vm86_struct *info)
1da177e4
LT
244{
245 struct tss_struct *tss;
ed0b2edb 246 struct task_struct *tsk = current;
9fda6a06 247 struct vm86 *vm86 = tsk->thread.vm86;
ed0b2edb
BG
248 unsigned long err = 0;
249
9fda6a06
BG
250 if (!vm86) {
251 if (!(vm86 = kzalloc(sizeof(*vm86), GFP_KERNEL)))
252 return -ENOMEM;
253 tsk->thread.vm86 = vm86;
254 }
255 if (vm86->saved_sp0)
ed0b2edb
BG
256 return -EPERM;
257
258 if (!access_ok(VERIFY_READ, v86, plus ?
259 sizeof(struct vm86_struct) :
260 sizeof(struct vm86plus_struct)))
261 return -EFAULT;
262
263 memset(info, 0, sizeof(*info));
264 get_user_try {
265 unsigned short seg;
266 get_user_ex(info->regs.pt.bx, &v86->regs.ebx);
267 get_user_ex(info->regs.pt.cx, &v86->regs.ecx);
268 get_user_ex(info->regs.pt.dx, &v86->regs.edx);
269 get_user_ex(info->regs.pt.si, &v86->regs.esi);
270 get_user_ex(info->regs.pt.di, &v86->regs.edi);
271 get_user_ex(info->regs.pt.bp, &v86->regs.ebp);
272 get_user_ex(info->regs.pt.ax, &v86->regs.eax);
273 get_user_ex(info->regs.pt.ip, &v86->regs.eip);
274 get_user_ex(seg, &v86->regs.cs);
275 info->regs.pt.cs = seg;
276 get_user_ex(info->regs.pt.flags, &v86->regs.eflags);
277 get_user_ex(info->regs.pt.sp, &v86->regs.esp);
278 get_user_ex(seg, &v86->regs.ss);
279 info->regs.pt.ss = seg;
280 get_user_ex(info->regs.es, &v86->regs.es);
281 get_user_ex(info->regs.ds, &v86->regs.ds);
282 get_user_ex(info->regs.fs, &v86->regs.fs);
283 get_user_ex(info->regs.gs, &v86->regs.gs);
284
285 get_user_ex(info->flags, &v86->flags);
286 get_user_ex(info->screen_bitmap, &v86->screen_bitmap);
287 get_user_ex(info->cpu_type, &v86->cpu_type);
288 } get_user_catch(err);
289 if (err)
290 return err;
291
292 if (copy_from_user(&info->int_revectored, &v86->int_revectored,
293 sizeof(struct revectored_struct)))
294 return -EFAULT;
295 if (copy_from_user(&info->int21_revectored, &v86->int21_revectored,
296 sizeof(struct revectored_struct)))
297 return -EFAULT;
298 if (plus) {
299 if (copy_from_user(&info->vm86plus, &v86->vm86plus,
300 sizeof(struct vm86plus_info_struct)))
301 return -EFAULT;
302 info->vm86plus.is_vm86pus = 1;
303 }
304
305 info->regs32 = current_pt_regs();
9fda6a06 306 vm86->vm86_info = v86;
1da177e4
LT
307
308/*
65ea5b03 309 * The flags register is also special: we cannot trust that the user
1da177e4
LT
310 * has set it up safely, so this makes sure interrupt etc flags are
311 * inherited from protected mode.
312 */
65ea5b03
PA
313 VEFLAGS = info->regs.pt.flags;
314 info->regs.pt.flags &= SAFE_MASK;
315 info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK;
6b6891f9 316 info->regs.pt.flags |= X86_VM_MASK;
1da177e4 317
df1ae9a5
BG
318 info->regs.pt.orig_ax = info->regs32->orig_ax;
319
1da177e4 320 switch (info->cpu_type) {
83e714e8 321 case CPU_286:
9fda6a06 322 vm86->v86mask = 0;
83e714e8
PC
323 break;
324 case CPU_386:
9fda6a06 325 vm86->v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
83e714e8
PC
326 break;
327 case CPU_486:
9fda6a06 328 vm86->v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
83e714e8
PC
329 break;
330 default:
9fda6a06 331 vm86->v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
83e714e8 332 break;
1da177e4
LT
333 }
334
335/*
975e5f45 336 * Save old state, set default return value (%ax) to 0 (VM86_SIGNAL)
1da177e4 337 */
975e5f45 338 info->regs32->ax = VM86_SIGNAL;
9fda6a06 339 vm86->saved_sp0 = tsk->thread.sp0;
0233606c 340 lazy_save_gs(info->regs32->gs);
1da177e4 341
24933b82 342 tss = &per_cpu(cpu_tss, get_cpu());
faca6227 343 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
1da177e4
LT
344 if (cpu_has_sep)
345 tsk->thread.sysenter_cs = 0;
faca6227 346 load_sp0(tss, &tsk->thread);
1da177e4
LT
347 put_cpu();
348
9fda6a06 349 vm86->screen_bitmap = info->screen_bitmap;
1da177e4 350 if (info->flags & VM86_SCREEN_BITMAP)
60ec5585 351 mark_screen_rdonly(tsk->mm);
7e7f8a03 352
d7e7528b 353 /*call __audit_syscall_exit since we do not exit via the normal paths */
6015ff10 354#ifdef CONFIG_AUDITSYSCALL
7e7f8a03 355 if (unlikely(current->audit_context))
d7e7528b 356 __audit_syscall_exit(1, 0);
6015ff10 357#endif
7e7f8a03 358
1da177e4 359 __asm__ __volatile__(
1da177e4
LT
360 "movl %0,%%esp\n\t"
361 "movl %1,%%ebp\n\t"
3aa6b186 362#ifdef CONFIG_X86_32_LAZY_GS
464d1a78 363 "mov %2, %%gs\n\t"
3aa6b186 364#endif
1da177e4
LT
365 "jmp resume_userspace"
366 : /* no outputs */
49d26b6e 367 :"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0));
ed0b2edb 368 unreachable(); /* we never return here */
1da177e4
LT
369}
370
83e714e8 371static inline void return_to_32bit(struct kernel_vm86_regs *regs16, int retval)
1da177e4 372{
83e714e8 373 struct pt_regs *regs32;
1da177e4
LT
374
375 regs32 = save_v86_state(regs16);
65ea5b03 376 regs32->ax = retval;
1da177e4
LT
377 __asm__ __volatile__("movl %0,%%esp\n\t"
378 "movl %1,%%ebp\n\t"
379 "jmp resume_userspace"
380 : : "r" (regs32), "r" (current_thread_info()));
381}
382
83e714e8 383static inline void set_IF(struct kernel_vm86_regs *regs)
1da177e4 384{
a5c15d41 385 VEFLAGS |= X86_EFLAGS_VIF;
386 if (VEFLAGS & X86_EFLAGS_VIP)
1da177e4
LT
387 return_to_32bit(regs, VM86_STI);
388}
389
83e714e8 390static inline void clear_IF(struct kernel_vm86_regs *regs)
1da177e4 391{
a5c15d41 392 VEFLAGS &= ~X86_EFLAGS_VIF;
1da177e4
LT
393}
394
83e714e8 395static inline void clear_TF(struct kernel_vm86_regs *regs)
1da177e4 396{
a5c15d41 397 regs->pt.flags &= ~X86_EFLAGS_TF;
1da177e4
LT
398}
399
83e714e8 400static inline void clear_AC(struct kernel_vm86_regs *regs)
1da177e4 401{
a5c15d41 402 regs->pt.flags &= ~X86_EFLAGS_AC;
1da177e4
LT
403}
404
83e714e8
PC
405/*
406 * It is correct to call set_IF(regs) from the set_vflags_*
1da177e4
LT
407 * functions. However someone forgot to call clear_IF(regs)
408 * in the opposite case.
409 * After the command sequence CLI PUSHF STI POPF you should
ab4a574e 410 * end up with interrupts disabled, but you ended up with
1da177e4
LT
411 * interrupts enabled.
412 * ( I was testing my own changes, but the only bug I
413 * could find was in a function I had not changed. )
414 * [KD]
415 */
416
83e714e8 417static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
1da177e4 418{
9fda6a06 419 set_flags(VEFLAGS, flags, current->thread.vm86->v86mask);
65ea5b03 420 set_flags(regs->pt.flags, flags, SAFE_MASK);
a5c15d41 421 if (flags & X86_EFLAGS_IF)
1da177e4
LT
422 set_IF(regs);
423 else
424 clear_IF(regs);
425}
426
83e714e8 427static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
1da177e4 428{
9fda6a06 429 set_flags(VFLAGS, flags, current->thread.vm86->v86mask);
65ea5b03 430 set_flags(regs->pt.flags, flags, SAFE_MASK);
a5c15d41 431 if (flags & X86_EFLAGS_IF)
1da177e4
LT
432 set_IF(regs);
433 else
434 clear_IF(regs);
435}
436
83e714e8 437static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
1da177e4 438{
65ea5b03 439 unsigned long flags = regs->pt.flags & RETURN_MASK;
1da177e4 440
a5c15d41 441 if (VEFLAGS & X86_EFLAGS_VIF)
442 flags |= X86_EFLAGS_IF;
443 flags |= X86_EFLAGS_IOPL;
9fda6a06 444 return flags | (VEFLAGS & current->thread.vm86->v86mask);
1da177e4
LT
445}
446
83e714e8 447static inline int is_revectored(int nr, struct revectored_struct *bitmap)
1da177e4
LT
448{
449 __asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
450 :"=r" (nr)
83e714e8 451 :"m" (*bitmap), "r" (nr));
1da177e4
LT
452 return nr;
453}
454
455#define val_byte(val, n) (((__u8 *)&val)[n])
456
457#define pushb(base, ptr, val, err_label) \
458 do { \
459 __u8 __val = val; \
460 ptr--; \
461 if (put_user(__val, base + ptr) < 0) \
462 goto err_label; \
83e714e8 463 } while (0)
1da177e4
LT
464
465#define pushw(base, ptr, val, err_label) \
466 do { \
467 __u16 __val = val; \
468 ptr--; \
469 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
470 goto err_label; \
471 ptr--; \
472 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
473 goto err_label; \
83e714e8 474 } while (0)
1da177e4
LT
475
476#define pushl(base, ptr, val, err_label) \
477 do { \
478 __u32 __val = val; \
479 ptr--; \
480 if (put_user(val_byte(__val, 3), base + ptr) < 0) \
481 goto err_label; \
482 ptr--; \
483 if (put_user(val_byte(__val, 2), base + ptr) < 0) \
484 goto err_label; \
485 ptr--; \
486 if (put_user(val_byte(__val, 1), base + ptr) < 0) \
487 goto err_label; \
488 ptr--; \
489 if (put_user(val_byte(__val, 0), base + ptr) < 0) \
490 goto err_label; \
83e714e8 491 } while (0)
1da177e4
LT
492
493#define popb(base, ptr, err_label) \
494 ({ \
495 __u8 __res; \
496 if (get_user(__res, base + ptr) < 0) \
497 goto err_label; \
498 ptr++; \
499 __res; \
500 })
501
502#define popw(base, ptr, err_label) \
503 ({ \
504 __u16 __res; \
505 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
506 goto err_label; \
507 ptr++; \
508 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
509 goto err_label; \
510 ptr++; \
511 __res; \
512 })
513
514#define popl(base, ptr, err_label) \
515 ({ \
516 __u32 __res; \
517 if (get_user(val_byte(__res, 0), base + ptr) < 0) \
518 goto err_label; \
519 ptr++; \
520 if (get_user(val_byte(__res, 1), base + ptr) < 0) \
521 goto err_label; \
522 ptr++; \
523 if (get_user(val_byte(__res, 2), base + ptr) < 0) \
524 goto err_label; \
525 ptr++; \
526 if (get_user(val_byte(__res, 3), base + ptr) < 0) \
527 goto err_label; \
528 ptr++; \
529 __res; \
530 })
531
532/* There are so many possible reasons for this function to return
533 * VM86_INTx, so adding another doesn't bother me. We can expect
534 * userspace programs to be able to handle it. (Getting a problem
535 * in userspace is always better than an Oops anyway.) [KD]
536 */
537static void do_int(struct kernel_vm86_regs *regs, int i,
83e714e8 538 unsigned char __user *ssp, unsigned short sp)
1da177e4
LT
539{
540 unsigned long __user *intr_ptr;
541 unsigned long segoffs;
542
65ea5b03 543 if (regs->pt.cs == BIOSSEG)
1da177e4
LT
544 goto cannot_handle;
545 if (is_revectored(i, &KVM86->int_revectored))
546 goto cannot_handle;
83e714e8 547 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
1da177e4
LT
548 goto cannot_handle;
549 intr_ptr = (unsigned long __user *) (i << 2);
550 if (get_user(segoffs, intr_ptr))
551 goto cannot_handle;
552 if ((segoffs >> 16) == BIOSSEG)
553 goto cannot_handle;
554 pushw(ssp, sp, get_vflags(regs), cannot_handle);
65ea5b03 555 pushw(ssp, sp, regs->pt.cs, cannot_handle);
1da177e4 556 pushw(ssp, sp, IP(regs), cannot_handle);
65ea5b03 557 regs->pt.cs = segoffs >> 16;
1da177e4
LT
558 SP(regs) -= 6;
559 IP(regs) = segoffs & 0xffff;
560 clear_TF(regs);
561 clear_IF(regs);
562 clear_AC(regs);
563 return;
564
565cannot_handle:
566 return_to_32bit(regs, VM86_INTx + (i << 8));
567}
568
83e714e8 569int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
1da177e4
LT
570{
571 if (VMPI.is_vm86pus) {
6554287b
BO
572 if ((trapno == 3) || (trapno == 1)) {
573 KVM86->regs32->ax = VM86_TRAP + (trapno << 8);
574 /* setting this flag forces the code in entry_32.S to
e76623d6
AV
575 the path where we call save_v86_state() and change
576 the stack pointer to KVM86->regs32 */
577 set_thread_flag(TIF_NOTIFY_RESUME);
6554287b
BO
578 return 0;
579 }
65ea5b03 580 do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
1da177e4
LT
581 return 0;
582 }
83e714e8 583 if (trapno != 1)
1da177e4 584 return 1; /* we let this handle by the calling routine */
51e7dc70 585 current->thread.trap_nr = trapno;
1da177e4 586 current->thread.error_code = error_code;
0f540910 587 force_sig(SIGTRAP, current);
1da177e4
LT
588 return 0;
589}
590
83e714e8 591void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
1da177e4
LT
592{
593 unsigned char opcode;
594 unsigned char __user *csp;
595 unsigned char __user *ssp;
5fd75ebb 596 unsigned short ip, sp, orig_flags;
1da177e4
LT
597 int data32, pref_done;
598
599#define CHECK_IF_IN_TRAP \
600 if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \
a5c15d41 601 newflags |= X86_EFLAGS_TF
1da177e4 602#define VM86_FAULT_RETURN do { \
a5c15d41 603 if (VMPI.force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \
1da177e4 604 return_to_32bit(regs, VM86_PICRETURN); \
a5c15d41 605 if (orig_flags & X86_EFLAGS_TF) \
5fd75ebb 606 handle_vm86_trap(regs, 0, 1); \
1da177e4
LT
607 return; } while (0)
608
65ea5b03 609 orig_flags = *(unsigned short *)&regs->pt.flags;
5fd75ebb 610
65ea5b03
PA
611 csp = (unsigned char __user *) (regs->pt.cs << 4);
612 ssp = (unsigned char __user *) (regs->pt.ss << 4);
1da177e4
LT
613 sp = SP(regs);
614 ip = IP(regs);
615
616 data32 = 0;
617 pref_done = 0;
618 do {
619 switch (opcode = popb(csp, ip, simulate_sigsegv)) {
83e714e8
PC
620 case 0x66: /* 32-bit data */ data32 = 1; break;
621 case 0x67: /* 32-bit address */ break;
622 case 0x2e: /* CS */ break;
623 case 0x3e: /* DS */ break;
624 case 0x26: /* ES */ break;
625 case 0x36: /* SS */ break;
626 case 0x65: /* GS */ break;
627 case 0x64: /* FS */ break;
628 case 0xf2: /* repnz */ break;
629 case 0xf3: /* rep */ break;
630 default: pref_done = 1;
1da177e4
LT
631 }
632 } while (!pref_done);
633
634 switch (opcode) {
635
636 /* pushf */
637 case 0x9c:
638 if (data32) {
639 pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
640 SP(regs) -= 4;
641 } else {
642 pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
643 SP(regs) -= 2;
644 }
645 IP(regs) = ip;
646 VM86_FAULT_RETURN;
647
648 /* popf */
649 case 0x9d:
650 {
651 unsigned long newflags;
652 if (data32) {
83e714e8 653 newflags = popl(ssp, sp, simulate_sigsegv);
1da177e4
LT
654 SP(regs) += 4;
655 } else {
656 newflags = popw(ssp, sp, simulate_sigsegv);
657 SP(regs) += 2;
658 }
659 IP(regs) = ip;
660 CHECK_IF_IN_TRAP;
83e714e8 661 if (data32)
1da177e4 662 set_vflags_long(newflags, regs);
83e714e8 663 else
1da177e4 664 set_vflags_short(newflags, regs);
83e714e8 665
1da177e4
LT
666 VM86_FAULT_RETURN;
667 }
668
669 /* int xx */
670 case 0xcd: {
83e714e8 671 int intno = popb(csp, ip, simulate_sigsegv);
1da177e4
LT
672 IP(regs) = ip;
673 if (VMPI.vm86dbg_active) {
83e714e8 674 if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3])
1da177e4
LT
675 return_to_32bit(regs, VM86_INTx + (intno << 8));
676 }
677 do_int(regs, intno, ssp, sp);
678 return;
679 }
680
681 /* iret */
682 case 0xcf:
683 {
684 unsigned long newip;
685 unsigned long newcs;
686 unsigned long newflags;
687 if (data32) {
83e714e8
PC
688 newip = popl(ssp, sp, simulate_sigsegv);
689 newcs = popl(ssp, sp, simulate_sigsegv);
690 newflags = popl(ssp, sp, simulate_sigsegv);
1da177e4
LT
691 SP(regs) += 12;
692 } else {
693 newip = popw(ssp, sp, simulate_sigsegv);
694 newcs = popw(ssp, sp, simulate_sigsegv);
695 newflags = popw(ssp, sp, simulate_sigsegv);
696 SP(regs) += 6;
697 }
698 IP(regs) = newip;
65ea5b03 699 regs->pt.cs = newcs;
1da177e4
LT
700 CHECK_IF_IN_TRAP;
701 if (data32) {
702 set_vflags_long(newflags, regs);
703 } else {
704 set_vflags_short(newflags, regs);
705 }
706 VM86_FAULT_RETURN;
707 }
708
709 /* cli */
710 case 0xfa:
711 IP(regs) = ip;
712 clear_IF(regs);
713 VM86_FAULT_RETURN;
714
715 /* sti */
716 /*
717 * Damn. This is incorrect: the 'sti' instruction should actually
718 * enable interrupts after the /next/ instruction. Not good.
719 *
720 * Probably needs some horsing around with the TF flag. Aiee..
721 */
722 case 0xfb:
723 IP(regs) = ip;
724 set_IF(regs);
725 VM86_FAULT_RETURN;
726
727 default:
728 return_to_32bit(regs, VM86_UNKNOWN);
729 }
730
731 return;
732
733simulate_sigsegv:
734 /* FIXME: After a long discussion with Stas we finally
735 * agreed, that this is wrong. Here we should
736 * really send a SIGSEGV to the user program.
737 * But how do we create the correct context? We
738 * are inside a general protection fault handler
739 * and has just returned from a page fault handler.
740 * The correct context for the signal handler
741 * should be a mixture of the two, but how do we
742 * get the information? [KD]
743 */
744 return_to_32bit(regs, VM86_UNKNOWN);
745}
746
747/* ---------------- vm86 special IRQ passing stuff ----------------- */
748
749#define VM86_IRQNAME "vm86irq"
750
751static struct vm86_irqs {
752 struct task_struct *tsk;
753 int sig;
754} vm86_irqs[16];
755
756static DEFINE_SPINLOCK(irqbits_lock);
757static int irqbits;
758
83e714e8 759#define ALLOWED_SIGS (1 /* 0 = don't send a signal */ \
1da177e4 760 | (1 << SIGUSR1) | (1 << SIGUSR2) | (1 << SIGIO) | (1 << SIGURG) \
83e714e8
PC
761 | (1 << SIGUNUSED))
762
7d12e780 763static irqreturn_t irq_handler(int intno, void *dev_id)
1da177e4
LT
764{
765 int irq_bit;
766 unsigned long flags;
767
83e714e8 768 spin_lock_irqsave(&irqbits_lock, flags);
1da177e4 769 irq_bit = 1 << intno;
83e714e8 770 if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
1da177e4
LT
771 goto out;
772 irqbits |= irq_bit;
773 if (vm86_irqs[intno].sig)
774 send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
1da177e4
LT
775 /*
776 * IRQ will be re-enabled when user asks for the irq (whether
777 * polling or as a result of the signal)
778 */
ad671423
PP
779 disable_irq_nosync(intno);
780 spin_unlock_irqrestore(&irqbits_lock, flags);
1da177e4
LT
781 return IRQ_HANDLED;
782
783out:
83e714e8 784 spin_unlock_irqrestore(&irqbits_lock, flags);
1da177e4
LT
785 return IRQ_NONE;
786}
787
788static inline void free_vm86_irq(int irqnumber)
789{
790 unsigned long flags;
791
792 free_irq(irqnumber, NULL);
793 vm86_irqs[irqnumber].tsk = NULL;
794
83e714e8 795 spin_lock_irqsave(&irqbits_lock, flags);
1da177e4 796 irqbits &= ~(1 << irqnumber);
83e714e8 797 spin_unlock_irqrestore(&irqbits_lock, flags);
1da177e4
LT
798}
799
800void release_vm86_irqs(struct task_struct *task)
801{
802 int i;
803 for (i = FIRST_VM86_IRQ ; i <= LAST_VM86_IRQ; i++)
804 if (vm86_irqs[i].tsk == task)
805 free_vm86_irq(i);
806}
807
808static inline int get_and_reset_irq(int irqnumber)
809{
810 int bit;
811 unsigned long flags;
ad671423 812 int ret = 0;
83e714e8 813
1da177e4
LT
814 if (invalid_vm86_irq(irqnumber)) return 0;
815 if (vm86_irqs[irqnumber].tsk != current) return 0;
83e714e8 816 spin_lock_irqsave(&irqbits_lock, flags);
1da177e4
LT
817 bit = irqbits & (1 << irqnumber);
818 irqbits &= ~bit;
ad671423
PP
819 if (bit) {
820 enable_irq(irqnumber);
821 ret = 1;
822 }
823
83e714e8 824 spin_unlock_irqrestore(&irqbits_lock, flags);
ad671423 825 return ret;
1da177e4
LT
826}
827
828
829static int do_vm86_irq_handling(int subfunction, int irqnumber)
830{
831 int ret;
832 switch (subfunction) {
833 case VM86_GET_AND_RESET_IRQ: {
834 return get_and_reset_irq(irqnumber);
835 }
836 case VM86_GET_IRQ_BITS: {
837 return irqbits;
838 }
839 case VM86_REQUEST_IRQ: {
840 int sig = irqnumber >> 8;
841 int irq = irqnumber & 255;
842 if (!capable(CAP_SYS_ADMIN)) return -EPERM;
843 if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
844 if (invalid_vm86_irq(irq)) return -EPERM;
845 if (vm86_irqs[irq].tsk) return -EPERM;
846 ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
847 if (ret) return ret;
848 vm86_irqs[irq].sig = sig;
849 vm86_irqs[irq].tsk = current;
850 return irq;
851 }
852 case VM86_FREE_IRQ: {
853 if (invalid_vm86_irq(irqnumber)) return -EPERM;
854 if (!vm86_irqs[irqnumber].tsk) return 0;
855 if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
856 free_vm86_irq(irqnumber);
857 return 0;
858 }
859 }
860 return -EINVAL;
861}
862
This page took 1.036041 seconds and 5 git commands to generate.