Merge branches 'pm-core', 'pm-clk', 'pm-domains' and 'pm-pci'
[deliverable/linux.git] / arch / powerpc / include / asm / book3s / 64 / tlbflush.h
CommitLineData
676012a6
AK
1#ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
2#define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
3
1a472c9d
AK
4#define MMU_NO_CONTEXT ~0UL
5
6
676012a6 7#include <asm/book3s/64/tlbflush-hash.h>
1a472c9d 8#include <asm/book3s/64/tlbflush-radix.h>
676012a6
AK
9
10static inline void flush_tlb_range(struct vm_area_struct *vma,
11 unsigned long start, unsigned long end)
12{
1a472c9d
AK
13 if (radix_enabled())
14 return radix__flush_tlb_range(vma, start, end);
676012a6
AK
15 return hash__flush_tlb_range(vma, start, end);
16}
17
18static inline void flush_tlb_kernel_range(unsigned long start,
19 unsigned long end)
20{
1a472c9d
AK
21 if (radix_enabled())
22 return radix__flush_tlb_kernel_range(start, end);
676012a6
AK
23 return hash__flush_tlb_kernel_range(start, end);
24}
25
26static inline void local_flush_tlb_mm(struct mm_struct *mm)
27{
1a472c9d
AK
28 if (radix_enabled())
29 return radix__local_flush_tlb_mm(mm);
676012a6
AK
30 return hash__local_flush_tlb_mm(mm);
31}
32
33static inline void local_flush_tlb_page(struct vm_area_struct *vma,
34 unsigned long vmaddr)
35{
1a472c9d
AK
36 if (radix_enabled())
37 return radix__local_flush_tlb_page(vma, vmaddr);
676012a6
AK
38 return hash__local_flush_tlb_page(vma, vmaddr);
39}
40
41static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
42 unsigned long vmaddr)
43{
1a472c9d
AK
44 if (radix_enabled())
45 return radix__flush_tlb_page(vma, vmaddr);
676012a6
AK
46 return hash__flush_tlb_page_nohash(vma, vmaddr);
47}
48
49static inline void tlb_flush(struct mmu_gather *tlb)
50{
1a472c9d
AK
51 if (radix_enabled())
52 return radix__tlb_flush(tlb);
676012a6
AK
53 return hash__tlb_flush(tlb);
54}
55
56#ifdef CONFIG_SMP
57static inline void flush_tlb_mm(struct mm_struct *mm)
58{
1a472c9d
AK
59 if (radix_enabled())
60 return radix__flush_tlb_mm(mm);
676012a6
AK
61 return hash__flush_tlb_mm(mm);
62}
63
64static inline void flush_tlb_page(struct vm_area_struct *vma,
65 unsigned long vmaddr)
66{
1a472c9d
AK
67 if (radix_enabled())
68 return radix__flush_tlb_page(vma, vmaddr);
676012a6
AK
69 return hash__flush_tlb_page(vma, vmaddr);
70}
71#else
72#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
73#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
74#endif /* CONFIG_SMP */
a145abf1
AK
75/*
76 * flush the page walk cache for the address
77 */
78static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
79{
80 /*
81 * Flush the page table walk cache on freeing a page table. We already
82 * have marked the upper/higher level page table entry none by now.
83 * So it is safe to flush PWC here.
84 */
85 if (!radix_enabled())
86 return;
676012a6 87
a145abf1
AK
88 radix__flush_tlb_pwc(tlb, address);
89}
676012a6 90#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
This page took 0.041345 seconds and 5 git commands to generate.