x86/paravirt: Add _safe to the read_ms()r and write_msr() PV callbacks
[deliverable/linux.git] / arch / x86 / include / asm / paravirt.h
1 #ifndef _ASM_X86_PARAVIRT_H
2 #define _ASM_X86_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4 * para-virtualization: those hooks are defined here. */
5
6 #ifdef CONFIG_PARAVIRT
7 #include <asm/pgtable_types.h>
8 #include <asm/asm.h>
9
10 #include <asm/paravirt_types.h>
11
12 #ifndef __ASSEMBLY__
13 #include <linux/bug.h>
14 #include <linux/types.h>
15 #include <linux/cpumask.h>
16 #include <asm/frame.h>
17
18 static inline int paravirt_enabled(void)
19 {
20 return pv_info.paravirt_enabled;
21 }
22
23 static inline int paravirt_has_feature(unsigned int feature)
24 {
25 WARN_ON_ONCE(!pv_info.paravirt_enabled);
26 return (pv_info.features & feature);
27 }
28
29 static inline void load_sp0(struct tss_struct *tss,
30 struct thread_struct *thread)
31 {
32 PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
33 }
34
35 /* The paravirtualized CPUID instruction. */
36 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
37 unsigned int *ecx, unsigned int *edx)
38 {
39 PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
40 }
41
42 /*
43 * These special macros can be used to get or set a debugging register
44 */
45 static inline unsigned long paravirt_get_debugreg(int reg)
46 {
47 return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
48 }
49 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
50 static inline void set_debugreg(unsigned long val, int reg)
51 {
52 PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
53 }
54
55 static inline void clts(void)
56 {
57 PVOP_VCALL0(pv_cpu_ops.clts);
58 }
59
60 static inline unsigned long read_cr0(void)
61 {
62 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
63 }
64
65 static inline void write_cr0(unsigned long x)
66 {
67 PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
68 }
69
70 static inline unsigned long read_cr2(void)
71 {
72 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
73 }
74
75 static inline void write_cr2(unsigned long x)
76 {
77 PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
78 }
79
80 static inline unsigned long read_cr3(void)
81 {
82 return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
83 }
84
85 static inline void write_cr3(unsigned long x)
86 {
87 PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
88 }
89
90 static inline unsigned long __read_cr4(void)
91 {
92 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
93 }
94 static inline unsigned long __read_cr4_safe(void)
95 {
96 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
97 }
98
99 static inline void __write_cr4(unsigned long x)
100 {
101 PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
102 }
103
104 #ifdef CONFIG_X86_64
105 static inline unsigned long read_cr8(void)
106 {
107 return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
108 }
109
110 static inline void write_cr8(unsigned long x)
111 {
112 PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
113 }
114 #endif
115
116 static inline void arch_safe_halt(void)
117 {
118 PVOP_VCALL0(pv_irq_ops.safe_halt);
119 }
120
121 static inline void halt(void)
122 {
123 PVOP_VCALL0(pv_irq_ops.halt);
124 }
125
126 static inline void wbinvd(void)
127 {
128 PVOP_VCALL0(pv_cpu_ops.wbinvd);
129 }
130
131 #define get_kernel_rpl() (pv_info.kernel_rpl)
132
133 static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
134 {
135 return PVOP_CALL2(u64, pv_cpu_ops.read_msr_safe, msr, err);
136 }
137
138 static inline int paravirt_write_msr_safe(unsigned msr,
139 unsigned low, unsigned high)
140 {
141 return PVOP_CALL3(int, pv_cpu_ops.write_msr_safe, msr, low, high);
142 }
143
144 /* These should all do BUG_ON(_err), but our headers are too tangled. */
145 #define rdmsr(msr, val1, val2) \
146 do { \
147 int _err; \
148 u64 _l = paravirt_read_msr_safe(msr, &_err); \
149 val1 = (u32)_l; \
150 val2 = _l >> 32; \
151 } while (0)
152
153 #define wrmsr(msr, val1, val2) \
154 do { \
155 paravirt_write_msr_safe(msr, val1, val2); \
156 } while (0)
157
158 #define rdmsrl(msr, val) \
159 do { \
160 int _err; \
161 val = paravirt_read_msr_safe(msr, &_err); \
162 } while (0)
163
164 static inline void wrmsrl(unsigned msr, u64 val)
165 {
166 wrmsr(msr, (u32)val, (u32)(val>>32));
167 }
168
169 #define wrmsr_safe(msr, a, b) paravirt_write_msr_safe(msr, a, b)
170
171 /* rdmsr with exception handling */
172 #define rdmsr_safe(msr, a, b) \
173 ({ \
174 int _err; \
175 u64 _l = paravirt_read_msr_safe(msr, &_err); \
176 (*a) = (u32)_l; \
177 (*b) = _l >> 32; \
178 _err; \
179 })
180
181 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
182 {
183 int err;
184
185 *p = paravirt_read_msr_safe(msr, &err);
186 return err;
187 }
188
189 static inline unsigned long long paravirt_sched_clock(void)
190 {
191 return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
192 }
193
194 struct static_key;
195 extern struct static_key paravirt_steal_enabled;
196 extern struct static_key paravirt_steal_rq_enabled;
197
198 static inline u64 paravirt_steal_clock(int cpu)
199 {
200 return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
201 }
202
203 static inline unsigned long long paravirt_read_pmc(int counter)
204 {
205 return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
206 }
207
208 #define rdpmc(counter, low, high) \
209 do { \
210 u64 _l = paravirt_read_pmc(counter); \
211 low = (u32)_l; \
212 high = _l >> 32; \
213 } while (0)
214
215 #define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))
216
217 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
218 {
219 PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
220 }
221
222 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
223 {
224 PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
225 }
226
227 static inline void load_TR_desc(void)
228 {
229 PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
230 }
231 static inline void load_gdt(const struct desc_ptr *dtr)
232 {
233 PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
234 }
235 static inline void load_idt(const struct desc_ptr *dtr)
236 {
237 PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
238 }
239 static inline void set_ldt(const void *addr, unsigned entries)
240 {
241 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
242 }
243 static inline void store_idt(struct desc_ptr *dtr)
244 {
245 PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
246 }
247 static inline unsigned long paravirt_store_tr(void)
248 {
249 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
250 }
251 #define store_tr(tr) ((tr) = paravirt_store_tr())
252 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
253 {
254 PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
255 }
256
257 #ifdef CONFIG_X86_64
258 static inline void load_gs_index(unsigned int gs)
259 {
260 PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
261 }
262 #endif
263
264 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
265 const void *desc)
266 {
267 PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
268 }
269
270 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
271 void *desc, int type)
272 {
273 PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
274 }
275
276 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
277 {
278 PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
279 }
280 static inline void set_iopl_mask(unsigned mask)
281 {
282 PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
283 }
284
285 /* The paravirtualized I/O functions */
286 static inline void slow_down_io(void)
287 {
288 pv_cpu_ops.io_delay();
289 #ifdef REALLY_SLOW_IO
290 pv_cpu_ops.io_delay();
291 pv_cpu_ops.io_delay();
292 pv_cpu_ops.io_delay();
293 #endif
294 }
295
296 static inline void paravirt_activate_mm(struct mm_struct *prev,
297 struct mm_struct *next)
298 {
299 PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
300 }
301
302 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
303 struct mm_struct *mm)
304 {
305 PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
306 }
307
308 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
309 {
310 PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
311 }
312
313 static inline void __flush_tlb(void)
314 {
315 PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
316 }
317 static inline void __flush_tlb_global(void)
318 {
319 PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
320 }
321 static inline void __flush_tlb_single(unsigned long addr)
322 {
323 PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
324 }
325
326 static inline void flush_tlb_others(const struct cpumask *cpumask,
327 struct mm_struct *mm,
328 unsigned long start,
329 unsigned long end)
330 {
331 PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
332 }
333
334 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
335 {
336 return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
337 }
338
339 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
340 {
341 PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
342 }
343
344 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
345 {
346 PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
347 }
348 static inline void paravirt_release_pte(unsigned long pfn)
349 {
350 PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
351 }
352
353 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
354 {
355 PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
356 }
357
358 static inline void paravirt_release_pmd(unsigned long pfn)
359 {
360 PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
361 }
362
363 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
364 {
365 PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
366 }
367 static inline void paravirt_release_pud(unsigned long pfn)
368 {
369 PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
370 }
371
372 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
373 pte_t *ptep)
374 {
375 PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
376 }
377
378 static inline pte_t __pte(pteval_t val)
379 {
380 pteval_t ret;
381
382 if (sizeof(pteval_t) > sizeof(long))
383 ret = PVOP_CALLEE2(pteval_t,
384 pv_mmu_ops.make_pte,
385 val, (u64)val >> 32);
386 else
387 ret = PVOP_CALLEE1(pteval_t,
388 pv_mmu_ops.make_pte,
389 val);
390
391 return (pte_t) { .pte = ret };
392 }
393
394 static inline pteval_t pte_val(pte_t pte)
395 {
396 pteval_t ret;
397
398 if (sizeof(pteval_t) > sizeof(long))
399 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
400 pte.pte, (u64)pte.pte >> 32);
401 else
402 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
403 pte.pte);
404
405 return ret;
406 }
407
408 static inline pgd_t __pgd(pgdval_t val)
409 {
410 pgdval_t ret;
411
412 if (sizeof(pgdval_t) > sizeof(long))
413 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
414 val, (u64)val >> 32);
415 else
416 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
417 val);
418
419 return (pgd_t) { ret };
420 }
421
422 static inline pgdval_t pgd_val(pgd_t pgd)
423 {
424 pgdval_t ret;
425
426 if (sizeof(pgdval_t) > sizeof(long))
427 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
428 pgd.pgd, (u64)pgd.pgd >> 32);
429 else
430 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
431 pgd.pgd);
432
433 return ret;
434 }
435
436 #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
437 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
438 pte_t *ptep)
439 {
440 pteval_t ret;
441
442 ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
443 mm, addr, ptep);
444
445 return (pte_t) { .pte = ret };
446 }
447
448 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
449 pte_t *ptep, pte_t pte)
450 {
451 if (sizeof(pteval_t) > sizeof(long))
452 /* 5 arg words */
453 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
454 else
455 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
456 mm, addr, ptep, pte.pte);
457 }
458
459 static inline void set_pte(pte_t *ptep, pte_t pte)
460 {
461 if (sizeof(pteval_t) > sizeof(long))
462 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
463 pte.pte, (u64)pte.pte >> 32);
464 else
465 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
466 pte.pte);
467 }
468
469 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
470 pte_t *ptep, pte_t pte)
471 {
472 if (sizeof(pteval_t) > sizeof(long))
473 /* 5 arg words */
474 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
475 else
476 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
477 }
478
479 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
480 pmd_t *pmdp, pmd_t pmd)
481 {
482 if (sizeof(pmdval_t) > sizeof(long))
483 /* 5 arg words */
484 pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
485 else
486 PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
487 native_pmd_val(pmd));
488 }
489
490 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
491 {
492 pmdval_t val = native_pmd_val(pmd);
493
494 if (sizeof(pmdval_t) > sizeof(long))
495 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
496 else
497 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
498 }
499
500 #if CONFIG_PGTABLE_LEVELS >= 3
501 static inline pmd_t __pmd(pmdval_t val)
502 {
503 pmdval_t ret;
504
505 if (sizeof(pmdval_t) > sizeof(long))
506 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
507 val, (u64)val >> 32);
508 else
509 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
510 val);
511
512 return (pmd_t) { ret };
513 }
514
515 static inline pmdval_t pmd_val(pmd_t pmd)
516 {
517 pmdval_t ret;
518
519 if (sizeof(pmdval_t) > sizeof(long))
520 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
521 pmd.pmd, (u64)pmd.pmd >> 32);
522 else
523 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
524 pmd.pmd);
525
526 return ret;
527 }
528
529 static inline void set_pud(pud_t *pudp, pud_t pud)
530 {
531 pudval_t val = native_pud_val(pud);
532
533 if (sizeof(pudval_t) > sizeof(long))
534 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
535 val, (u64)val >> 32);
536 else
537 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
538 val);
539 }
540 #if CONFIG_PGTABLE_LEVELS == 4
541 static inline pud_t __pud(pudval_t val)
542 {
543 pudval_t ret;
544
545 if (sizeof(pudval_t) > sizeof(long))
546 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
547 val, (u64)val >> 32);
548 else
549 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
550 val);
551
552 return (pud_t) { ret };
553 }
554
555 static inline pudval_t pud_val(pud_t pud)
556 {
557 pudval_t ret;
558
559 if (sizeof(pudval_t) > sizeof(long))
560 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
561 pud.pud, (u64)pud.pud >> 32);
562 else
563 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
564 pud.pud);
565
566 return ret;
567 }
568
569 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
570 {
571 pgdval_t val = native_pgd_val(pgd);
572
573 if (sizeof(pgdval_t) > sizeof(long))
574 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
575 val, (u64)val >> 32);
576 else
577 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
578 val);
579 }
580
581 static inline void pgd_clear(pgd_t *pgdp)
582 {
583 set_pgd(pgdp, __pgd(0));
584 }
585
586 static inline void pud_clear(pud_t *pudp)
587 {
588 set_pud(pudp, __pud(0));
589 }
590
591 #endif /* CONFIG_PGTABLE_LEVELS == 4 */
592
593 #endif /* CONFIG_PGTABLE_LEVELS >= 3 */
594
595 #ifdef CONFIG_X86_PAE
596 /* Special-case pte-setting operations for PAE, which can't update a
597 64-bit pte atomically */
598 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
599 {
600 PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
601 pte.pte, pte.pte >> 32);
602 }
603
604 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
605 pte_t *ptep)
606 {
607 PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
608 }
609
610 static inline void pmd_clear(pmd_t *pmdp)
611 {
612 PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
613 }
614 #else /* !CONFIG_X86_PAE */
615 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
616 {
617 set_pte(ptep, pte);
618 }
619
620 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
621 pte_t *ptep)
622 {
623 set_pte_at(mm, addr, ptep, __pte(0));
624 }
625
626 static inline void pmd_clear(pmd_t *pmdp)
627 {
628 set_pmd(pmdp, __pmd(0));
629 }
630 #endif /* CONFIG_X86_PAE */
631
632 #define __HAVE_ARCH_START_CONTEXT_SWITCH
633 static inline void arch_start_context_switch(struct task_struct *prev)
634 {
635 PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
636 }
637
638 static inline void arch_end_context_switch(struct task_struct *next)
639 {
640 PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
641 }
642
643 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
644 static inline void arch_enter_lazy_mmu_mode(void)
645 {
646 PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
647 }
648
649 static inline void arch_leave_lazy_mmu_mode(void)
650 {
651 PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
652 }
653
654 static inline void arch_flush_lazy_mmu_mode(void)
655 {
656 PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
657 }
658
659 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
660 phys_addr_t phys, pgprot_t flags)
661 {
662 pv_mmu_ops.set_fixmap(idx, phys, flags);
663 }
664
665 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
666
667 #ifdef CONFIG_QUEUED_SPINLOCKS
668
669 static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
670 u32 val)
671 {
672 PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val);
673 }
674
675 static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
676 {
677 PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock);
678 }
679
680 static __always_inline void pv_wait(u8 *ptr, u8 val)
681 {
682 PVOP_VCALL2(pv_lock_ops.wait, ptr, val);
683 }
684
685 static __always_inline void pv_kick(int cpu)
686 {
687 PVOP_VCALL1(pv_lock_ops.kick, cpu);
688 }
689
690 #else /* !CONFIG_QUEUED_SPINLOCKS */
691
692 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
693 __ticket_t ticket)
694 {
695 PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
696 }
697
698 static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
699 __ticket_t ticket)
700 {
701 PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
702 }
703
704 #endif /* CONFIG_QUEUED_SPINLOCKS */
705
706 #endif /* SMP && PARAVIRT_SPINLOCKS */
707
708 #ifdef CONFIG_X86_32
709 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
710 #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
711
712 /* save and restore all caller-save registers, except return value */
713 #define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
714 #define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
715
716 #define PV_FLAGS_ARG "0"
717 #define PV_EXTRA_CLOBBERS
718 #define PV_VEXTRA_CLOBBERS
719 #else
720 /* save and restore all caller-save registers, except return value */
721 #define PV_SAVE_ALL_CALLER_REGS \
722 "push %rcx;" \
723 "push %rdx;" \
724 "push %rsi;" \
725 "push %rdi;" \
726 "push %r8;" \
727 "push %r9;" \
728 "push %r10;" \
729 "push %r11;"
730 #define PV_RESTORE_ALL_CALLER_REGS \
731 "pop %r11;" \
732 "pop %r10;" \
733 "pop %r9;" \
734 "pop %r8;" \
735 "pop %rdi;" \
736 "pop %rsi;" \
737 "pop %rdx;" \
738 "pop %rcx;"
739
740 /* We save some registers, but all of them, that's too much. We clobber all
741 * caller saved registers but the argument parameter */
742 #define PV_SAVE_REGS "pushq %%rdi;"
743 #define PV_RESTORE_REGS "popq %%rdi;"
744 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
745 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
746 #define PV_FLAGS_ARG "D"
747 #endif
748
749 /*
750 * Generate a thunk around a function which saves all caller-save
751 * registers except for the return value. This allows C functions to
752 * be called from assembler code where fewer than normal registers are
753 * available. It may also help code generation around calls from C
754 * code if the common case doesn't use many registers.
755 *
756 * When a callee is wrapped in a thunk, the caller can assume that all
757 * arg regs and all scratch registers are preserved across the
758 * call. The return value in rax/eax will not be saved, even for void
759 * functions.
760 */
761 #define PV_THUNK_NAME(func) "__raw_callee_save_" #func
762 #define PV_CALLEE_SAVE_REGS_THUNK(func) \
763 extern typeof(func) __raw_callee_save_##func; \
764 \
765 asm(".pushsection .text;" \
766 ".globl " PV_THUNK_NAME(func) ";" \
767 ".type " PV_THUNK_NAME(func) ", @function;" \
768 PV_THUNK_NAME(func) ":" \
769 FRAME_BEGIN \
770 PV_SAVE_ALL_CALLER_REGS \
771 "call " #func ";" \
772 PV_RESTORE_ALL_CALLER_REGS \
773 FRAME_END \
774 "ret;" \
775 ".popsection")
776
777 /* Get a reference to a callee-save function */
778 #define PV_CALLEE_SAVE(func) \
779 ((struct paravirt_callee_save) { __raw_callee_save_##func })
780
781 /* Promise that "func" already uses the right calling convention */
782 #define __PV_IS_CALLEE_SAVE(func) \
783 ((struct paravirt_callee_save) { func })
784
785 static inline notrace unsigned long arch_local_save_flags(void)
786 {
787 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
788 }
789
790 static inline notrace void arch_local_irq_restore(unsigned long f)
791 {
792 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
793 }
794
795 static inline notrace void arch_local_irq_disable(void)
796 {
797 PVOP_VCALLEE0(pv_irq_ops.irq_disable);
798 }
799
800 static inline notrace void arch_local_irq_enable(void)
801 {
802 PVOP_VCALLEE0(pv_irq_ops.irq_enable);
803 }
804
805 static inline notrace unsigned long arch_local_irq_save(void)
806 {
807 unsigned long f;
808
809 f = arch_local_save_flags();
810 arch_local_irq_disable();
811 return f;
812 }
813
814
815 /* Make sure as little as possible of this mess escapes. */
816 #undef PARAVIRT_CALL
817 #undef __PVOP_CALL
818 #undef __PVOP_VCALL
819 #undef PVOP_VCALL0
820 #undef PVOP_CALL0
821 #undef PVOP_VCALL1
822 #undef PVOP_CALL1
823 #undef PVOP_VCALL2
824 #undef PVOP_CALL2
825 #undef PVOP_VCALL3
826 #undef PVOP_CALL3
827 #undef PVOP_VCALL4
828 #undef PVOP_CALL4
829
830 extern void default_banner(void);
831
832 #else /* __ASSEMBLY__ */
833
834 #define _PVSITE(ptype, clobbers, ops, word, algn) \
835 771:; \
836 ops; \
837 772:; \
838 .pushsection .parainstructions,"a"; \
839 .align algn; \
840 word 771b; \
841 .byte ptype; \
842 .byte 772b-771b; \
843 .short clobbers; \
844 .popsection
845
846
847 #define COND_PUSH(set, mask, reg) \
848 .if ((~(set)) & mask); push %reg; .endif
849 #define COND_POP(set, mask, reg) \
850 .if ((~(set)) & mask); pop %reg; .endif
851
852 #ifdef CONFIG_X86_64
853
854 #define PV_SAVE_REGS(set) \
855 COND_PUSH(set, CLBR_RAX, rax); \
856 COND_PUSH(set, CLBR_RCX, rcx); \
857 COND_PUSH(set, CLBR_RDX, rdx); \
858 COND_PUSH(set, CLBR_RSI, rsi); \
859 COND_PUSH(set, CLBR_RDI, rdi); \
860 COND_PUSH(set, CLBR_R8, r8); \
861 COND_PUSH(set, CLBR_R9, r9); \
862 COND_PUSH(set, CLBR_R10, r10); \
863 COND_PUSH(set, CLBR_R11, r11)
864 #define PV_RESTORE_REGS(set) \
865 COND_POP(set, CLBR_R11, r11); \
866 COND_POP(set, CLBR_R10, r10); \
867 COND_POP(set, CLBR_R9, r9); \
868 COND_POP(set, CLBR_R8, r8); \
869 COND_POP(set, CLBR_RDI, rdi); \
870 COND_POP(set, CLBR_RSI, rsi); \
871 COND_POP(set, CLBR_RDX, rdx); \
872 COND_POP(set, CLBR_RCX, rcx); \
873 COND_POP(set, CLBR_RAX, rax)
874
875 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 8)
876 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
877 #define PARA_INDIRECT(addr) *addr(%rip)
878 #else
879 #define PV_SAVE_REGS(set) \
880 COND_PUSH(set, CLBR_EAX, eax); \
881 COND_PUSH(set, CLBR_EDI, edi); \
882 COND_PUSH(set, CLBR_ECX, ecx); \
883 COND_PUSH(set, CLBR_EDX, edx)
884 #define PV_RESTORE_REGS(set) \
885 COND_POP(set, CLBR_EDX, edx); \
886 COND_POP(set, CLBR_ECX, ecx); \
887 COND_POP(set, CLBR_EDI, edi); \
888 COND_POP(set, CLBR_EAX, eax)
889
890 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
891 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
892 #define PARA_INDIRECT(addr) *%cs:addr
893 #endif
894
895 #define INTERRUPT_RETURN \
896 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE, \
897 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
898
899 #define DISABLE_INTERRUPTS(clobbers) \
900 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
901 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
902 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable); \
903 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
904
905 #define ENABLE_INTERRUPTS(clobbers) \
906 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers, \
907 PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE); \
908 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable); \
909 PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
910
911 #ifdef CONFIG_X86_32
912 #define GET_CR0_INTO_EAX \
913 push %ecx; push %edx; \
914 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
915 pop %edx; pop %ecx
916 #else /* !CONFIG_X86_32 */
917
918 /*
919 * If swapgs is used while the userspace stack is still current,
920 * there's no way to call a pvop. The PV replacement *must* be
921 * inlined, or the swapgs instruction must be trapped and emulated.
922 */
923 #define SWAPGS_UNSAFE_STACK \
924 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
925 swapgs)
926
927 /*
928 * Note: swapgs is very special, and in practise is either going to be
929 * implemented with a single "swapgs" instruction or something very
930 * special. Either way, we don't need to save any registers for
931 * it.
932 */
933 #define SWAPGS \
934 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE, \
935 call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs) \
936 )
937
938 #define GET_CR2_INTO_RAX \
939 call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
940
941 #define PARAVIRT_ADJUST_EXCEPTION_FRAME \
942 PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
943 CLBR_NONE, \
944 call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
945
946 #define USERGS_SYSRET64 \
947 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
948 CLBR_NONE, \
949 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
950 #endif /* CONFIG_X86_32 */
951
952 #endif /* __ASSEMBLY__ */
953 #else /* CONFIG_PARAVIRT */
954 # define default_banner x86_init_noop
955 #ifndef __ASSEMBLY__
956 static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
957 struct mm_struct *mm)
958 {
959 }
960
961 static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
962 {
963 }
964 #endif /* __ASSEMBLY__ */
965 #endif /* !CONFIG_PARAVIRT */
966 #endif /* _ASM_X86_PARAVIRT_H */
This page took 0.052637 seconds and 5 git commands to generate.