Pull thermal into release branch
[deliverable/linux.git] / include / asm-x86_64 / pgalloc.h
1 #ifndef _X86_64_PGALLOC_H
2 #define _X86_64_PGALLOC_H
3
4 #include <asm/pda.h>
5 #include <linux/threads.h>
6 #include <linux/mm.h>
7 #include <linux/quicklist.h>
8
9 #define QUICK_PGD 0 /* We preserve special mappings over free */
10 #define QUICK_PT 1 /* Other page table pages that are zero on free */
11
12 #define pmd_populate_kernel(mm, pmd, pte) \
13 set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
14 #define pud_populate(mm, pud, pmd) \
15 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)))
16 #define pgd_populate(mm, pgd, pud) \
17 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)))
18
19 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
20 {
21 set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
22 }
23
24 static inline void pmd_free(pmd_t *pmd)
25 {
26 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
27 quicklist_free(QUICK_PT, NULL, pmd);
28 }
29
30 static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
31 {
32 return (pmd_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL);
33 }
34
35 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
36 {
37 return (pud_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL);
38 }
39
40 static inline void pud_free (pud_t *pud)
41 {
42 BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
43 quicklist_free(QUICK_PT, NULL, pud);
44 }
45
46 static inline void pgd_list_add(pgd_t *pgd)
47 {
48 struct page *page = virt_to_page(pgd);
49
50 spin_lock(&pgd_lock);
51 list_add(&page->lru, &pgd_list);
52 spin_unlock(&pgd_lock);
53 }
54
55 static inline void pgd_list_del(pgd_t *pgd)
56 {
57 struct page *page = virt_to_page(pgd);
58
59 spin_lock(&pgd_lock);
60 list_del(&page->lru);
61 spin_unlock(&pgd_lock);
62 }
63
64 static inline void pgd_ctor(void *x)
65 {
66 unsigned boundary;
67 pgd_t *pgd = x;
68 struct page *page = virt_to_page(pgd);
69
70 /*
71 * Copy kernel pointers in from init.
72 */
73 boundary = pgd_index(__PAGE_OFFSET);
74 memcpy(pgd + boundary,
75 init_level4_pgt + boundary,
76 (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
77
78 spin_lock(&pgd_lock);
79 list_add(&page->lru, &pgd_list);
80 spin_unlock(&pgd_lock);
81 }
82
83 static inline void pgd_dtor(void *x)
84 {
85 pgd_t *pgd = x;
86 struct page *page = virt_to_page(pgd);
87
88 spin_lock(&pgd_lock);
89 list_del(&page->lru);
90 spin_unlock(&pgd_lock);
91 }
92
93 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
94 {
95 pgd_t *pgd = (pgd_t *)quicklist_alloc(QUICK_PGD,
96 GFP_KERNEL|__GFP_REPEAT, pgd_ctor);
97 return pgd;
98 }
99
100 static inline void pgd_free(pgd_t *pgd)
101 {
102 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
103 quicklist_free(QUICK_PGD, pgd_dtor, pgd);
104 }
105
106 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
107 {
108 return (pte_t *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL);
109 }
110
111 static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
112 {
113 void *p = (void *)quicklist_alloc(QUICK_PT, GFP_KERNEL|__GFP_REPEAT, NULL);
114
115 if (!p)
116 return NULL;
117 return virt_to_page(p);
118 }
119
120 /* Should really implement gc for free page table pages. This could be
121 done with a reference count in struct page. */
122
123 static inline void pte_free_kernel(pte_t *pte)
124 {
125 BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
126 quicklist_free(QUICK_PT, NULL, pte);
127 }
128
129 static inline void pte_free(struct page *pte)
130 {
131 quicklist_free_page(QUICK_PT, NULL, pte);
132 }
133
134 #define __pte_free_tlb(tlb,pte) quicklist_free_page(QUICK_PT, NULL,(pte))
135
136 #define __pmd_free_tlb(tlb,x) quicklist_free(QUICK_PT, NULL, (x))
137 #define __pud_free_tlb(tlb,x) quicklist_free(QUICK_PT, NULL, (x))
138
139 static inline void check_pgt_cache(void)
140 {
141 quicklist_trim(QUICK_PGD, pgd_dtor, 25, 16);
142 quicklist_trim(QUICK_PT, NULL, 25, 16);
143 }
144 #endif /* _X86_64_PGALLOC_H */
This page took 0.033825 seconds and 5 git commands to generate.