Merge branch 'sched/core' into cpus4096
[deliverable/linux.git] / arch / ia64 / include / asm / paravirt_privop.h
1 /******************************************************************************
2 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
3 * VA Linux Systems Japan K.K.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 *
19 */
20
21 #ifndef _ASM_IA64_PARAVIRT_PRIVOP_H
22 #define _ASM_IA64_PARAVIRT_PRIVOP_H
23
24 #ifdef CONFIG_PARAVIRT
25
26 #ifndef __ASSEMBLY__
27
28 #include <linux/types.h>
29 #include <asm/kregs.h> /* for IA64_PSR_I */
30
31 /******************************************************************************
32 * replacement of intrinsics operations.
33 */
34
35 struct pv_cpu_ops {
36 void (*fc)(unsigned long addr);
37 unsigned long (*thash)(unsigned long addr);
38 unsigned long (*get_cpuid)(int index);
39 unsigned long (*get_pmd)(int index);
40 unsigned long (*getreg)(int reg);
41 void (*setreg)(int reg, unsigned long val);
42 void (*ptcga)(unsigned long addr, unsigned long size);
43 unsigned long (*get_rr)(unsigned long index);
44 void (*set_rr)(unsigned long index, unsigned long val);
45 void (*set_rr0_to_rr4)(unsigned long val0, unsigned long val1,
46 unsigned long val2, unsigned long val3,
47 unsigned long val4);
48 void (*ssm_i)(void);
49 void (*rsm_i)(void);
50 unsigned long (*get_psr_i)(void);
51 void (*intrin_local_irq_restore)(unsigned long flags);
52 };
53
54 extern struct pv_cpu_ops pv_cpu_ops;
55
56 extern void ia64_native_setreg_func(int regnum, unsigned long val);
57 extern unsigned long ia64_native_getreg_func(int regnum);
58
59 /************************************************/
60 /* Instructions paravirtualized for performance */
61 /************************************************/
62
63 /* mask for ia64_native_ssm/rsm() must be constant.("i" constraing).
64 * static inline function doesn't satisfy it. */
65 #define paravirt_ssm(mask) \
66 do { \
67 if ((mask) == IA64_PSR_I) \
68 pv_cpu_ops.ssm_i(); \
69 else \
70 ia64_native_ssm(mask); \
71 } while (0)
72
73 #define paravirt_rsm(mask) \
74 do { \
75 if ((mask) == IA64_PSR_I) \
76 pv_cpu_ops.rsm_i(); \
77 else \
78 ia64_native_rsm(mask); \
79 } while (0)
80
81 /* returned ip value should be the one in the caller,
82 * not in __paravirt_getreg() */
83 #define paravirt_getreg(reg) \
84 ({ \
85 unsigned long res; \
86 if ((reg) == _IA64_REG_IP) \
87 res = ia64_native_getreg(_IA64_REG_IP); \
88 else \
89 res = pv_cpu_ops.getreg(reg); \
90 res; \
91 })
92
93 /******************************************************************************
94 * replacement of hand written assembly codes.
95 */
96 struct pv_cpu_asm_switch {
97 unsigned long switch_to;
98 unsigned long leave_syscall;
99 unsigned long work_processed_syscall;
100 unsigned long leave_kernel;
101 };
102 void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);
103
104 #endif /* __ASSEMBLY__ */
105
106 #define IA64_PARAVIRT_ASM_FUNC(name) paravirt_ ## name
107
108 #else
109
110 /* fallback for native case */
111 #define IA64_PARAVIRT_ASM_FUNC(name) ia64_native_ ## name
112
113 #endif /* CONFIG_PARAVIRT */
114
115 /* these routines utilize privilege-sensitive or performance-sensitive
116 * privileged instructions so the code must be replaced with
117 * paravirtualized versions */
118 #define ia64_switch_to IA64_PARAVIRT_ASM_FUNC(switch_to)
119 #define ia64_leave_syscall IA64_PARAVIRT_ASM_FUNC(leave_syscall)
120 #define ia64_work_processed_syscall \
121 IA64_PARAVIRT_ASM_FUNC(work_processed_syscall)
122 #define ia64_leave_kernel IA64_PARAVIRT_ASM_FUNC(leave_kernel)
123
124 #endif /* _ASM_IA64_PARAVIRT_PRIVOP_H */
This page took 0.06474 seconds and 5 git commands to generate.