s390: mmu_gather rework
[deliverable/linux.git] / arch / arm / include / asm / tlb.h
CommitLineData
1da177e4 1/*
4baa9922 2 * arch/arm/include/asm/tlb.h
1da177e4
LT
3 *
4 * Copyright (C) 2002 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Experimentation shows that on a StrongARM, it appears to be faster
11 * to use the "invalidate whole tlb" rather than "invalidate single
12 * tlb" for this.
13 *
14 * This appears true for both the process fork+exit case, as well as
15 * the munmap-large-area case.
16 */
17#ifndef __ASMARM_TLB_H
18#define __ASMARM_TLB_H
19
20#include <asm/cacheflush.h>
0157903e
HC
21
22#ifndef CONFIG_MMU
23
24#include <linux/pagemap.h>
58e9c47f
RK
25
26#define tlb_flush(tlb) ((void) tlb)
27
0157903e
HC
28#include <asm-generic/tlb.h>
29
30#else /* !CONFIG_MMU */
31
06824ba8 32#include <linux/swap.h>
1da177e4 33#include <asm/pgalloc.h>
06824ba8
RK
34#include <asm/tlbflush.h>
35
36/*
37 * We need to delay page freeing for SMP as other CPUs can access pages
38 * which have been removed but not yet had their TLB entries invalidated.
39 * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
40 * we need to apply this same delaying tactic to ensure correct operation.
41 */
42#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
43#define tlb_fast_mode(tlb) 0
44#define FREE_PTE_NR 500
45#else
46#define tlb_fast_mode(tlb) 1
47#define FREE_PTE_NR 0
48#endif
1da177e4
LT
49
50/*
51 * TLB handling. This allows us to remove pages from the page
52 * tables, and efficiently handle the TLB issues.
53 */
54struct mmu_gather {
55 struct mm_struct *mm;
1da177e4 56 unsigned int fullmm;
06824ba8 57 struct vm_area_struct *vma;
7fccfc00
AK
58 unsigned long range_start;
59 unsigned long range_end;
06824ba8
RK
60 unsigned int nr;
61 struct page *pages[FREE_PTE_NR];
1da177e4
LT
62};
63
64DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
65
06824ba8
RK
66/*
67 * This is unnecessarily complex. There's three ways the TLB shootdown
68 * code is used:
69 * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
70 * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
71 * tlb->vma will be non-NULL.
72 * 2. Unmapping all vmas. See exit_mmap().
73 * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
74 * tlb->vma will be non-NULL. Additionally, page tables will be freed.
75 * 3. Unmapping argument pages. See shift_arg_pages().
76 * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
77 * tlb->vma will be NULL.
78 */
79static inline void tlb_flush(struct mmu_gather *tlb)
80{
81 if (tlb->fullmm || !tlb->vma)
82 flush_tlb_mm(tlb->mm);
83 else if (tlb->range_end > 0) {
84 flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
85 tlb->range_start = TASK_SIZE;
86 tlb->range_end = 0;
87 }
88}
89
90static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
91{
92 if (!tlb->fullmm) {
93 if (addr < tlb->range_start)
94 tlb->range_start = addr;
95 if (addr + PAGE_SIZE > tlb->range_end)
96 tlb->range_end = addr + PAGE_SIZE;
97 }
98}
99
100static inline void tlb_flush_mmu(struct mmu_gather *tlb)
101{
102 tlb_flush(tlb);
103 if (!tlb_fast_mode(tlb)) {
104 free_pages_and_swap_cache(tlb->pages, tlb->nr);
105 tlb->nr = 0;
106 }
107}
108
1da177e4
LT
109static inline struct mmu_gather *
110tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
111{
15a23ffa 112 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
1da177e4
LT
113
114 tlb->mm = mm;
1da177e4 115 tlb->fullmm = full_mm_flush;
06824ba8
RK
116 tlb->vma = NULL;
117 tlb->nr = 0;
1da177e4
LT
118
119 return tlb;
120}
121
122static inline void
123tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
124{
06824ba8 125 tlb_flush_mmu(tlb);
1da177e4
LT
126
127 /* keep the page table cache within bounds */
128 check_pgt_cache();
15a23ffa
HD
129
130 put_cpu_var(mmu_gathers);
1da177e4
LT
131}
132
7fccfc00
AK
133/*
134 * Memorize the range for the TLB flush.
135 */
136static inline void
137tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
138{
06824ba8 139 tlb_add_flush(tlb, addr);
7fccfc00 140}
1da177e4
LT
141
142/*
143 * In the case of tlb vma handling, we can optimise these away in the
144 * case where we're doing a full MM flush. When we're doing a munmap,
145 * the vmas are adjusted to only cover the region to be torn down.
146 */
147static inline void
148tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
149{
7fccfc00 150 if (!tlb->fullmm) {
1da177e4 151 flush_cache_range(vma, vma->vm_start, vma->vm_end);
06824ba8 152 tlb->vma = vma;
7fccfc00
AK
153 tlb->range_start = TASK_SIZE;
154 tlb->range_end = 0;
155 }
1da177e4
LT
156}
157
158static inline void
159tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
160{
06824ba8
RK
161 if (!tlb->fullmm)
162 tlb_flush(tlb);
163}
164
165static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
166{
167 if (tlb_fast_mode(tlb)) {
168 free_page_and_swap_cache(page);
169 } else {
170 tlb->pages[tlb->nr++] = page;
171 if (tlb->nr >= FREE_PTE_NR)
172 tlb_flush_mmu(tlb);
173 }
174}
175
176static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
177 unsigned long addr)
178{
179 pgtable_page_dtor(pte);
180 tlb_add_flush(tlb, addr);
181 tlb_remove_page(tlb, pte);
1da177e4
LT
182}
183
06824ba8 184#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
9e1b32ca 185#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
1da177e4
LT
186
187#define tlb_migrate_finish(mm) do { } while (0)
188
0157903e 189#endif /* CONFIG_MMU */
1da177e4 190#endif
This page took 0.731699 seconds and 5 git commands to generate.