[PATCH] i386: PARAVIRT: revert map_pt_hook.
[deliverable/linux.git] / include / asm-s390 / pgalloc.h
CommitLineData
1da177e4
LT
1/*
2 * include/asm-s390/pgalloc.h
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "include/asm-i386/pgalloc.h"
10 * Copyright (C) 1994 Linus Torvalds
11 */
12
13#ifndef _S390_PGALLOC_H
14#define _S390_PGALLOC_H
15
1da177e4
LT
16#include <linux/threads.h>
17#include <linux/gfp.h>
18#include <linux/mm.h>
19
20#define check_pgt_cache() do {} while (0)
21
22extern void diag10(unsigned long addr);
23
9282ed92
GS
24/*
25 * Page allocation orders.
26 */
27#ifndef __s390x__
f4eb07c1
HC
28# define PTE_ALLOC_ORDER 0
29# define PMD_ALLOC_ORDER 0
9282ed92
GS
30# define PGD_ALLOC_ORDER 1
31#else /* __s390x__ */
f4eb07c1 32# define PTE_ALLOC_ORDER 0
9282ed92
GS
33# define PMD_ALLOC_ORDER 2
34# define PGD_ALLOC_ORDER 2
35#endif /* __s390x__ */
36
1da177e4
LT
37/*
38 * Allocate and free page tables. The xxx_kernel() versions are
39 * used to allocate a kernel page table - this turns on ASN bits
40 * if any.
41 */
42
43static inline pgd_t *pgd_alloc(struct mm_struct *mm)
44{
9282ed92 45 pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
1da177e4
LT
46 int i;
47
9282ed92
GS
48 if (!pgd)
49 return NULL;
c1821c2e
GS
50 if (s390_noexec) {
51 pgd_t *shadow_pgd = (pgd_t *)
52 __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
53 struct page *page = virt_to_page(pgd);
54
55 if (!shadow_pgd) {
56 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
57 return NULL;
58 }
59 page->lru.next = (void *) shadow_pgd;
60 }
9282ed92 61 for (i = 0; i < PTRS_PER_PGD; i++)
1da177e4 62#ifndef __s390x__
9282ed92
GS
63 pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
64#else
65 pgd_clear(pgd + i);
66#endif
1da177e4
LT
67 return pgd;
68}
69
70static inline void pgd_free(pgd_t *pgd)
71{
c1821c2e
GS
72 pgd_t *shadow_pgd = get_shadow_pgd(pgd);
73
74 if (shadow_pgd)
75 free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER);
9282ed92 76 free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
1da177e4
LT
77}
78
79#ifndef __s390x__
80/*
81 * page middle directory allocation/free routines.
82 * We use pmd cache only on s390x, so these are dummy routines. This
83 * code never triggers because the pgd will always be present.
84 */
85#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
86#define pmd_free(x) do { } while (0)
87#define __pmd_free_tlb(tlb,x) do { } while (0)
88#define pgd_populate(mm, pmd, pte) BUG()
c1821c2e 89#define pgd_populate_kernel(mm, pmd, pte) BUG()
1da177e4
LT
90#else /* __s390x__ */
91static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
92{
9282ed92
GS
93 pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
94 int i;
1da177e4 95
9282ed92
GS
96 if (!pmd)
97 return NULL;
c1821c2e
GS
98 if (s390_noexec) {
99 pmd_t *shadow_pmd = (pmd_t *)
100 __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
101 struct page *page = virt_to_page(pmd);
102
103 if (!shadow_pmd) {
104 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
105 return NULL;
106 }
107 page->lru.next = (void *) shadow_pmd;
108 }
9282ed92
GS
109 for (i=0; i < PTRS_PER_PMD; i++)
110 pmd_clear(pmd + i);
1da177e4
LT
111 return pmd;
112}
113
114static inline void pmd_free (pmd_t *pmd)
115{
c1821c2e
GS
116 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
117
118 if (shadow_pmd)
119 free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER);
9282ed92 120 free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
1da177e4
LT
121}
122
123#define __pmd_free_tlb(tlb,pmd) \
124 do { \
125 tlb_flush_mmu(tlb, 0, 0); \
126 pmd_free(pmd); \
127 } while (0)
128
c1821c2e
GS
129static inline void
130pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
1da177e4
LT
131{
132 pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd);
133}
134
c1821c2e
GS
135static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
136{
137 pgd_t *shadow_pgd = get_shadow_pgd(pgd);
138 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
139
140 if (shadow_pgd && shadow_pmd)
141 pgd_populate_kernel(mm, shadow_pgd, shadow_pmd);
142 pgd_populate_kernel(mm, pgd, pmd);
143}
144
1da177e4
LT
145#endif /* __s390x__ */
146
147static inline void
148pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
149{
150#ifndef __s390x__
151 pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte);
152 pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256);
153 pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512);
154 pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768);
155#else /* __s390x__ */
156 pmd_val(*pmd) = _PMD_ENTRY + __pa(pte);
157 pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256);
158#endif /* __s390x__ */
159}
160
161static inline void
162pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
163{
c1821c2e
GS
164 pte_t *pte = (pte_t *)page_to_phys(page);
165 pmd_t *shadow_pmd = get_shadow_pmd(pmd);
166 pte_t *shadow_pte = get_shadow_pte(pte);
167
168 pmd_populate_kernel(mm, pmd, pte);
169 if (shadow_pmd && shadow_pte)
170 pmd_populate_kernel(mm, shadow_pmd, shadow_pte);
1da177e4
LT
171}
172
173/*
174 * page table entry allocation/free routines.
175 */
176static inline pte_t *
177pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
178{
9282ed92
GS
179 pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
180 int i;
181
182 if (!pte)
183 return NULL;
c1821c2e
GS
184 if (s390_noexec) {
185 pte_t *shadow_pte = (pte_t *)
186 __get_free_page(GFP_KERNEL|__GFP_REPEAT);
187 struct page *page = virt_to_page(pte);
188
189 if (!shadow_pte) {
190 free_page((unsigned long) pte);
191 return NULL;
192 }
193 page->lru.next = (void *) shadow_pte;
194 }
9282ed92
GS
195 for (i=0; i < PTRS_PER_PTE; i++) {
196 pte_clear(mm, vmaddr, pte + i);
197 vmaddr += PAGE_SIZE;
1da177e4
LT
198 }
199 return pte;
200}
201
202static inline struct page *
203pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
204{
205 pte_t *pte = pte_alloc_one_kernel(mm, vmaddr);
206 if (pte)
207 return virt_to_page(pte);
d2c993d8 208 return NULL;
1da177e4
LT
209}
210
211static inline void pte_free_kernel(pte_t *pte)
212{
c1821c2e
GS
213 pte_t *shadow_pte = get_shadow_pte(pte);
214
215 if (shadow_pte)
216 free_page((unsigned long) shadow_pte);
217 free_page((unsigned long) pte);
1da177e4
LT
218}
219
220static inline void pte_free(struct page *pte)
221{
c1821c2e
GS
222 struct page *shadow_page = get_shadow_page(pte);
223
224 if (shadow_page)
225 __free_page(shadow_page);
226 __free_page(pte);
1da177e4
LT
227}
228
c1821c2e
GS
229#define __pte_free_tlb(tlb, pte) \
230({ \
231 struct mmu_gather *__tlb = (tlb); \
232 struct page *__pte = (pte); \
233 struct page *shadow_page = get_shadow_page(__pte); \
234 if (shadow_page) \
235 tlb_remove_page(__tlb, shadow_page); \
236 tlb_remove_page(__tlb, __pte); \
237})
1da177e4 238
1da177e4 239#endif /* _S390_PGALLOC_H */
This page took 1.298189 seconds and 5 git commands to generate.