1 /* Paravirtualization interfaces
2 Copyright (C) 2006 Rusty Russell IBM Corporation
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/errno.h>
19 #include <linux/module.h>
20 #include <linux/efi.h>
21 #include <linux/bcd.h>
22 #include <linux/start_kernel.h>
25 #include <asm/paravirt.h>
27 #include <asm/setup.h>
28 #include <asm/arch_hooks.h>
31 #include <asm/delay.h>
32 #include <asm/fixmap.h>
34 #include <asm/tlbflush.h>
37 static void native_nop(void)
41 static void __init
default_banner(void)
43 printk(KERN_INFO
"Booting paravirtualized kernel on %s\n",
47 char *memory_setup(void)
49 return paravirt_ops
.memory_setup();
52 /* Simple instruction patching code. */
53 #define DEF_NATIVE(name, code) \
54 extern const char start_##name[], end_##name[]; \
55 asm("start_" #name ": " code "; end_" #name ":")
56 DEF_NATIVE(cli
, "cli");
57 DEF_NATIVE(sti
, "sti");
58 DEF_NATIVE(popf
, "push %eax; popf");
59 DEF_NATIVE(pushf
, "pushf; pop %eax");
60 DEF_NATIVE(pushf_cli
, "pushf; pop %eax; cli");
61 DEF_NATIVE(iret
, "iret");
62 DEF_NATIVE(sti_sysexit
, "sti; sysexit");
64 static const struct native_insns
66 const char *start
, *end
;
68 [PARAVIRT_IRQ_DISABLE
] = { start_cli
, end_cli
},
69 [PARAVIRT_IRQ_ENABLE
] = { start_sti
, end_sti
},
70 [PARAVIRT_RESTORE_FLAGS
] = { start_popf
, end_popf
},
71 [PARAVIRT_SAVE_FLAGS
] = { start_pushf
, end_pushf
},
72 [PARAVIRT_SAVE_FLAGS_IRQ_DISABLE
] = { start_pushf_cli
, end_pushf_cli
},
73 [PARAVIRT_INTERRUPT_RETURN
] = { start_iret
, end_iret
},
74 [PARAVIRT_STI_SYSEXIT
] = { start_sti_sysexit
, end_sti_sysexit
},
77 static unsigned native_patch(u8 type
, u16 clobbers
, void *insns
, unsigned len
)
79 unsigned int insn_len
;
81 /* Don't touch it if we don't have a replacement */
82 if (type
>= ARRAY_SIZE(native_insns
) || !native_insns
[type
].start
)
85 insn_len
= native_insns
[type
].end
- native_insns
[type
].start
;
87 /* Similarly if we can't fit replacement. */
91 memcpy(insns
, native_insns
[type
].start
, insn_len
);
95 static fastcall
unsigned long native_get_debugreg(int regno
)
97 unsigned long val
= 0; /* Damn you, gcc! */
101 asm("movl %%db0, %0" :"=r" (val
)); break;
103 asm("movl %%db1, %0" :"=r" (val
)); break;
105 asm("movl %%db2, %0" :"=r" (val
)); break;
107 asm("movl %%db3, %0" :"=r" (val
)); break;
109 asm("movl %%db6, %0" :"=r" (val
)); break;
111 asm("movl %%db7, %0" :"=r" (val
)); break;
118 static fastcall
void native_set_debugreg(int regno
, unsigned long value
)
122 asm("movl %0,%%db0" : /* no output */ :"r" (value
));
125 asm("movl %0,%%db1" : /* no output */ :"r" (value
));
128 asm("movl %0,%%db2" : /* no output */ :"r" (value
));
131 asm("movl %0,%%db3" : /* no output */ :"r" (value
));
134 asm("movl %0,%%db6" : /* no output */ :"r" (value
));
137 asm("movl %0,%%db7" : /* no output */ :"r" (value
));
146 paravirt_ops
.init_IRQ();
149 static fastcall
void native_clts(void)
151 asm volatile ("clts");
154 static fastcall
unsigned long native_read_cr0(void)
157 asm volatile("movl %%cr0,%0\n\t" :"=r" (val
));
161 static fastcall
void native_write_cr0(unsigned long val
)
163 asm volatile("movl %0,%%cr0": :"r" (val
));
166 static fastcall
unsigned long native_read_cr2(void)
169 asm volatile("movl %%cr2,%0\n\t" :"=r" (val
));
173 static fastcall
void native_write_cr2(unsigned long val
)
175 asm volatile("movl %0,%%cr2": :"r" (val
));
178 static fastcall
unsigned long native_read_cr3(void)
181 asm volatile("movl %%cr3,%0\n\t" :"=r" (val
));
185 static fastcall
void native_write_cr3(unsigned long val
)
187 asm volatile("movl %0,%%cr3": :"r" (val
));
190 static fastcall
unsigned long native_read_cr4(void)
193 asm volatile("movl %%cr4,%0\n\t" :"=r" (val
));
197 static fastcall
unsigned long native_read_cr4_safe(void)
200 /* This could fault if %cr4 does not exist */
201 asm("1: movl %%cr4, %0 \n"
203 ".section __ex_table,\"a\" \n"
206 : "=r" (val
): "0" (0));
210 static fastcall
void native_write_cr4(unsigned long val
)
212 asm volatile("movl %0,%%cr4": :"r" (val
));
215 static fastcall
unsigned long native_save_fl(void)
218 asm volatile("pushfl ; popl %0":"=g" (f
): /* no input */);
222 static fastcall
void native_restore_fl(unsigned long f
)
224 asm volatile("pushl %0 ; popfl": /* no output */
229 static fastcall
void native_irq_disable(void)
231 asm volatile("cli": : :"memory");
234 static fastcall
void native_irq_enable(void)
236 asm volatile("sti": : :"memory");
239 static fastcall
void native_safe_halt(void)
241 asm volatile("sti; hlt": : :"memory");
244 static fastcall
void native_halt(void)
246 asm volatile("hlt": : :"memory");
249 static fastcall
void native_wbinvd(void)
251 asm volatile("wbinvd": : :"memory");
254 static fastcall
unsigned long long native_read_msr(unsigned int msr
, int *err
)
256 unsigned long long val
;
258 asm volatile("2: rdmsr ; xorl %0,%0\n"
260 ".section .fixup,\"ax\"\n\t"
261 "3: movl %3,%0 ; jmp 1b\n\t"
263 ".section __ex_table,\"a\"\n"
267 : "=r" (*err
), "=A" (val
)
268 : "c" (msr
), "i" (-EFAULT
));
273 static fastcall
int native_write_msr(unsigned int msr
, unsigned long long val
)
276 asm volatile("2: wrmsr ; xorl %0,%0\n"
278 ".section .fixup,\"ax\"\n\t"
279 "3: movl %4,%0 ; jmp 1b\n\t"
281 ".section __ex_table,\"a\"\n"
286 : "c" (msr
), "0" ((u32
)val
), "d" ((u32
)(val
>>32)),
291 static fastcall
unsigned long long native_read_tsc(void)
293 unsigned long long val
;
294 asm volatile("rdtsc" : "=A" (val
));
298 static fastcall
unsigned long long native_read_pmc(void)
300 unsigned long long val
;
301 asm volatile("rdpmc" : "=A" (val
));
305 static fastcall
void native_load_tr_desc(void)
307 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS
*8));
310 static fastcall
void native_load_gdt(const struct Xgt_desc_struct
*dtr
)
312 asm volatile("lgdt %0"::"m" (*dtr
));
315 static fastcall
void native_load_idt(const struct Xgt_desc_struct
*dtr
)
317 asm volatile("lidt %0"::"m" (*dtr
));
320 static fastcall
void native_store_gdt(struct Xgt_desc_struct
*dtr
)
322 asm ("sgdt %0":"=m" (*dtr
));
325 static fastcall
void native_store_idt(struct Xgt_desc_struct
*dtr
)
327 asm ("sidt %0":"=m" (*dtr
));
330 static fastcall
unsigned long native_store_tr(void)
333 asm ("str %0":"=r" (tr
));
337 static fastcall
void native_load_tls(struct thread_struct
*t
, unsigned int cpu
)
339 #define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
344 static inline void native_write_dt_entry(void *dt
, int entry
, u32 entry_low
, u32 entry_high
)
346 u32
*lp
= (u32
*)((char *)dt
+ entry
*8);
351 static fastcall
void native_write_ldt_entry(void *dt
, int entrynum
, u32 low
, u32 high
)
353 native_write_dt_entry(dt
, entrynum
, low
, high
);
356 static fastcall
void native_write_gdt_entry(void *dt
, int entrynum
, u32 low
, u32 high
)
358 native_write_dt_entry(dt
, entrynum
, low
, high
);
361 static fastcall
void native_write_idt_entry(void *dt
, int entrynum
, u32 low
, u32 high
)
363 native_write_dt_entry(dt
, entrynum
, low
, high
);
366 static fastcall
void native_load_esp0(struct tss_struct
*tss
,
367 struct thread_struct
*thread
)
369 tss
->esp0
= thread
->esp0
;
371 /* This can only happen when SEP is enabled, no need to test "SEP"arately */
372 if (unlikely(tss
->ss1
!= thread
->sysenter_cs
)) {
373 tss
->ss1
= thread
->sysenter_cs
;
374 wrmsr(MSR_IA32_SYSENTER_CS
, thread
->sysenter_cs
, 0);
378 static fastcall
void native_io_delay(void)
380 asm volatile("outb %al,$0x80");
383 static fastcall
void native_flush_tlb(void)
385 __native_flush_tlb();
389 * Global pages have to be flushed a bit differently. Not a real
390 * performance problem because this does not happen often.
392 static fastcall
void native_flush_tlb_global(void)
394 __native_flush_tlb_global();
397 static fastcall
void native_flush_tlb_single(u32 addr
)
399 __native_flush_tlb_single(addr
);
402 #ifndef CONFIG_X86_PAE
403 static fastcall
void native_set_pte(pte_t
*ptep
, pte_t pteval
)
408 static fastcall
void native_set_pte_at(struct mm_struct
*mm
, u32 addr
, pte_t
*ptep
, pte_t pteval
)
413 static fastcall
void native_set_pmd(pmd_t
*pmdp
, pmd_t pmdval
)
418 #else /* CONFIG_X86_PAE */
420 static fastcall
void native_set_pte(pte_t
*ptep
, pte_t pte
)
422 ptep
->pte_high
= pte
.pte_high
;
424 ptep
->pte_low
= pte
.pte_low
;
427 static fastcall
void native_set_pte_at(struct mm_struct
*mm
, u32 addr
, pte_t
*ptep
, pte_t pte
)
429 ptep
->pte_high
= pte
.pte_high
;
431 ptep
->pte_low
= pte
.pte_low
;
434 static fastcall
void native_set_pte_present(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
, pte_t pte
)
438 ptep
->pte_high
= pte
.pte_high
;
440 ptep
->pte_low
= pte
.pte_low
;
443 static fastcall
void native_set_pte_atomic(pte_t
*ptep
, pte_t pteval
)
445 set_64bit((unsigned long long *)ptep
,pte_val(pteval
));
448 static fastcall
void native_set_pmd(pmd_t
*pmdp
, pmd_t pmdval
)
450 set_64bit((unsigned long long *)pmdp
,pmd_val(pmdval
));
453 static fastcall
void native_set_pud(pud_t
*pudp
, pud_t pudval
)
458 static fastcall
void native_pte_clear(struct mm_struct
*mm
, unsigned long addr
, pte_t
*ptep
)
465 static fastcall
void native_pmd_clear(pmd_t
*pmd
)
467 u32
*tmp
= (u32
*)pmd
;
472 #endif /* CONFIG_X86_PAE */
474 /* These are in entry.S */
475 extern fastcall
void native_iret(void);
476 extern fastcall
void native_irq_enable_sysexit(void);
478 static int __init
print_banner(void)
480 paravirt_ops
.banner();
483 core_initcall(print_banner
);
485 /* We simply declare start_kernel to be the paravirt probe of last resort. */
486 paravirt_probe(start_kernel
);
488 struct paravirt_ops paravirt_ops
= {
489 .name
= "bare hardware",
490 .paravirt_enabled
= 0,
493 .patch
= native_patch
,
494 .banner
= default_banner
,
495 .arch_setup
= native_nop
,
496 .memory_setup
= machine_specific_memory_setup
,
497 .get_wallclock
= native_get_wallclock
,
498 .set_wallclock
= native_set_wallclock
,
499 .time_init
= time_init_hook
,
500 .init_IRQ
= native_init_IRQ
,
502 .cpuid
= native_cpuid
,
503 .get_debugreg
= native_get_debugreg
,
504 .set_debugreg
= native_set_debugreg
,
506 .read_cr0
= native_read_cr0
,
507 .write_cr0
= native_write_cr0
,
508 .read_cr2
= native_read_cr2
,
509 .write_cr2
= native_write_cr2
,
510 .read_cr3
= native_read_cr3
,
511 .write_cr3
= native_write_cr3
,
512 .read_cr4
= native_read_cr4
,
513 .read_cr4_safe
= native_read_cr4_safe
,
514 .write_cr4
= native_write_cr4
,
515 .save_fl
= native_save_fl
,
516 .restore_fl
= native_restore_fl
,
517 .irq_disable
= native_irq_disable
,
518 .irq_enable
= native_irq_enable
,
519 .safe_halt
= native_safe_halt
,
521 .wbinvd
= native_wbinvd
,
522 .read_msr
= native_read_msr
,
523 .write_msr
= native_write_msr
,
524 .read_tsc
= native_read_tsc
,
525 .read_pmc
= native_read_pmc
,
526 .load_tr_desc
= native_load_tr_desc
,
527 .set_ldt
= native_set_ldt
,
528 .load_gdt
= native_load_gdt
,
529 .load_idt
= native_load_idt
,
530 .store_gdt
= native_store_gdt
,
531 .store_idt
= native_store_idt
,
532 .store_tr
= native_store_tr
,
533 .load_tls
= native_load_tls
,
534 .write_ldt_entry
= native_write_ldt_entry
,
535 .write_gdt_entry
= native_write_gdt_entry
,
536 .write_idt_entry
= native_write_idt_entry
,
537 .load_esp0
= native_load_esp0
,
539 .set_iopl_mask
= native_set_iopl_mask
,
540 .io_delay
= native_io_delay
,
541 .const_udelay
= __const_udelay
,
543 #ifdef CONFIG_X86_LOCAL_APIC
544 .apic_write
= native_apic_write
,
545 .apic_write_atomic
= native_apic_write_atomic
,
546 .apic_read
= native_apic_read
,
548 .set_lazy_mode
= (void *)native_nop
,
550 .flush_tlb_user
= native_flush_tlb
,
551 .flush_tlb_kernel
= native_flush_tlb_global
,
552 .flush_tlb_single
= native_flush_tlb_single
,
554 .alloc_pt
= (void *)native_nop
,
555 .alloc_pd
= (void *)native_nop
,
556 .alloc_pd_clone
= (void *)native_nop
,
557 .release_pt
= (void *)native_nop
,
558 .release_pd
= (void *)native_nop
,
560 .set_pte
= native_set_pte
,
561 .set_pte_at
= native_set_pte_at
,
562 .set_pmd
= native_set_pmd
,
563 .pte_update
= (void *)native_nop
,
564 .pte_update_defer
= (void *)native_nop
,
565 #ifdef CONFIG_X86_PAE
566 .set_pte_atomic
= native_set_pte_atomic
,
567 .set_pte_present
= native_set_pte_present
,
568 .set_pud
= native_set_pud
,
569 .pte_clear
= native_pte_clear
,
570 .pmd_clear
= native_pmd_clear
,
573 .irq_enable_sysexit
= native_irq_enable_sysexit
,
576 .startup_ipi_hook
= (void *)native_nop
,
580 * NOTE: CONFIG_PARAVIRT is experimental and the paravirt_ops
581 * semantics are subject to change. Hence we only do this
582 * internal-only export of this, until it gets sorted out and
583 * all lowlevel CPU ops used by modules are separately exported.
585 EXPORT_SYMBOL_GPL(paravirt_ops
);