Merge branch 'for-linus' of git://one.firstfloor.org/home/andi/git/linux-2.6
[deliverable/linux.git] / include / asm-x86_64 / msr.h
1 #ifndef X86_64_MSR_H
2 #define X86_64_MSR_H 1
3
4 #include <asm/msr-index.h>
5
6 #ifndef __ASSEMBLY__
7 /*
8 * Access to machine-specific registers (available on 586 and better only)
9 * Note: the rd* operations modify the parameters directly (without using
10 * pointer indirection), this allows gcc to optimize better
11 */
12
13 #define rdmsr(msr,val1,val2) \
14 __asm__ __volatile__("rdmsr" \
15 : "=a" (val1), "=d" (val2) \
16 : "c" (msr))
17
18
19 #define rdmsrl(msr,val) do { unsigned long a__,b__; \
20 __asm__ __volatile__("rdmsr" \
21 : "=a" (a__), "=d" (b__) \
22 : "c" (msr)); \
23 val = a__ | (b__<<32); \
24 } while(0)
25
26 #define wrmsr(msr,val1,val2) \
27 __asm__ __volatile__("wrmsr" \
28 : /* no outputs */ \
29 : "c" (msr), "a" (val1), "d" (val2))
30
31 #define wrmsrl(msr,val) wrmsr(msr,(__u32)((__u64)(val)),((__u64)(val))>>32)
32
33 /* wrmsr with exception handling */
34 #define wrmsr_safe(msr,a,b) ({ int ret__; \
35 asm volatile("2: wrmsr ; xorl %0,%0\n" \
36 "1:\n\t" \
37 ".section .fixup,\"ax\"\n\t" \
38 "3: movl %4,%0 ; jmp 1b\n\t" \
39 ".previous\n\t" \
40 ".section __ex_table,\"a\"\n" \
41 " .align 8\n\t" \
42 " .quad 2b,3b\n\t" \
43 ".previous" \
44 : "=a" (ret__) \
45 : "c" (msr), "0" (a), "d" (b), "i" (-EFAULT)); \
46 ret__; })
47
48 #define checking_wrmsrl(msr,val) wrmsr_safe(msr,(u32)(val),(u32)((val)>>32))
49
50 #define rdmsr_safe(msr,a,b) \
51 ({ int ret__; \
52 asm volatile ("1: rdmsr\n" \
53 "2:\n" \
54 ".section .fixup,\"ax\"\n" \
55 "3: movl %4,%0\n" \
56 " jmp 2b\n" \
57 ".previous\n" \
58 ".section __ex_table,\"a\"\n" \
59 " .align 8\n" \
60 " .quad 1b,3b\n" \
61 ".previous":"=&bDS" (ret__), "=a"(*(a)), "=d"(*(b))\
62 :"c"(msr), "i"(-EIO), "0"(0)); \
63 ret__; })
64
65 #define rdtsc(low,high) \
66 __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
67
68 #define rdtscl(low) \
69 __asm__ __volatile__ ("rdtsc" : "=a" (low) : : "edx")
70
71 #define rdtscp(low,high,aux) \
72 asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (low), "=d" (high), "=c" (aux))
73
74 #define rdtscll(val) do { \
75 unsigned int __a,__d; \
76 asm volatile("rdtsc" : "=a" (__a), "=d" (__d)); \
77 (val) = ((unsigned long)__a) | (((unsigned long)__d)<<32); \
78 } while(0)
79
80 #define rdtscpll(val, aux) do { \
81 unsigned long __a, __d; \
82 asm volatile (".byte 0x0f,0x01,0xf9" : "=a" (__a), "=d" (__d), "=c" (aux)); \
83 (val) = (__d << 32) | __a; \
84 } while (0)
85
86 #define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
87
88 #define write_rdtscp_aux(val) wrmsr(0xc0000103, val, 0)
89
90 #define rdpmc(counter,low,high) \
91 __asm__ __volatile__("rdpmc" \
92 : "=a" (low), "=d" (high) \
93 : "c" (counter))
94
95 static inline void cpuid(int op, unsigned int *eax, unsigned int *ebx,
96 unsigned int *ecx, unsigned int *edx)
97 {
98 __asm__("cpuid"
99 : "=a" (*eax),
100 "=b" (*ebx),
101 "=c" (*ecx),
102 "=d" (*edx)
103 : "0" (op));
104 }
105
106 /* Some CPUID calls want 'count' to be placed in ecx */
107 static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
108 int *edx)
109 {
110 __asm__("cpuid"
111 : "=a" (*eax),
112 "=b" (*ebx),
113 "=c" (*ecx),
114 "=d" (*edx)
115 : "0" (op), "c" (count));
116 }
117
118 /*
119 * CPUID functions returning a single datum
120 */
121 static inline unsigned int cpuid_eax(unsigned int op)
122 {
123 unsigned int eax;
124
125 __asm__("cpuid"
126 : "=a" (eax)
127 : "0" (op)
128 : "bx", "cx", "dx");
129 return eax;
130 }
131 static inline unsigned int cpuid_ebx(unsigned int op)
132 {
133 unsigned int eax, ebx;
134
135 __asm__("cpuid"
136 : "=a" (eax), "=b" (ebx)
137 : "0" (op)
138 : "cx", "dx" );
139 return ebx;
140 }
141 static inline unsigned int cpuid_ecx(unsigned int op)
142 {
143 unsigned int eax, ecx;
144
145 __asm__("cpuid"
146 : "=a" (eax), "=c" (ecx)
147 : "0" (op)
148 : "bx", "dx" );
149 return ecx;
150 }
151 static inline unsigned int cpuid_edx(unsigned int op)
152 {
153 unsigned int eax, edx;
154
155 __asm__("cpuid"
156 : "=a" (eax), "=d" (edx)
157 : "0" (op)
158 : "bx", "cx");
159 return edx;
160 }
161
162 #ifdef CONFIG_SMP
163 void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
164 void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
165 #else /* CONFIG_SMP */
166 static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
167 {
168 rdmsr(msr_no, *l, *h);
169 }
170 static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
171 {
172 wrmsr(msr_no, l, h);
173 }
174 #endif /* CONFIG_SMP */
175 #endif /* __ASSEMBLY__ */
176 #endif /* X86_64_MSR_H */
This page took 0.035934 seconds and 6 git commands to generate.