Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[deliverable/linux.git] / arch / x86 / mm / highmem_32.c
1 #include <linux/highmem.h>
2 #include <linux/module.h>
3 #include <linux/swap.h> /* for totalram_pages */
4
5 void *kmap(struct page *page)
6 {
7 might_sleep();
8 if (!PageHighMem(page))
9 return page_address(page);
10 return kmap_high(page);
11 }
12
13 void kunmap(struct page *page)
14 {
15 if (in_interrupt())
16 BUG();
17 if (!PageHighMem(page))
18 return;
19 kunmap_high(page);
20 }
21
22 /*
23 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
24 * no global lock is needed and because the kmap code must perform a global TLB
25 * invalidation when the kmap pool wraps.
26 *
27 * However when holding an atomic kmap is is not legal to sleep, so atomic
28 * kmaps are appropriate for short, tight code paths only.
29 */
30 void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
31 {
32 enum fixed_addresses idx;
33 unsigned long vaddr;
34
35 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
36 pagefault_disable();
37
38 if (!PageHighMem(page))
39 return page_address(page);
40
41 debug_kmap_atomic(type);
42
43 debug_kmap_atomic(type);
44 idx = type + KM_TYPE_NR*smp_processor_id();
45 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
46 BUG_ON(!pte_none(*(kmap_pte-idx)));
47 set_pte(kmap_pte-idx, mk_pte(page, prot));
48 arch_flush_lazy_mmu_mode();
49
50 return (void *)vaddr;
51 }
52
53 void *kmap_atomic(struct page *page, enum km_type type)
54 {
55 return kmap_atomic_prot(page, type, kmap_prot);
56 }
57
58 void kunmap_atomic(void *kvaddr, enum km_type type)
59 {
60 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
61 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
62
63 /*
64 * Force other mappings to Oops if they'll try to access this pte
65 * without first remap it. Keeping stale mappings around is a bad idea
66 * also, in case the page changes cacheability attributes or becomes
67 * a protected page in a hypervisor.
68 */
69 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
70 kpte_clear_flush(kmap_pte-idx, vaddr);
71 else {
72 #ifdef CONFIG_DEBUG_HIGHMEM
73 BUG_ON(vaddr < PAGE_OFFSET);
74 BUG_ON(vaddr >= (unsigned long)high_memory);
75 #endif
76 }
77
78 arch_flush_lazy_mmu_mode();
79 pagefault_enable();
80 }
81
82 /*
83 * This is the same as kmap_atomic() but can map memory that doesn't
84 * have a struct page associated with it.
85 */
86 void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
87 {
88 return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
89 }
90 EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
91
92 struct page *kmap_atomic_to_page(void *ptr)
93 {
94 unsigned long idx, vaddr = (unsigned long)ptr;
95 pte_t *pte;
96
97 if (vaddr < FIXADDR_START)
98 return virt_to_page(ptr);
99
100 idx = virt_to_fix(vaddr);
101 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
102 return pte_page(*pte);
103 }
104
105 EXPORT_SYMBOL(kmap);
106 EXPORT_SYMBOL(kunmap);
107 EXPORT_SYMBOL(kmap_atomic);
108 EXPORT_SYMBOL(kunmap_atomic);
109
110 void __init set_highmem_pages_init(void)
111 {
112 struct zone *zone;
113 int nid;
114
115 for_each_zone(zone) {
116 unsigned long zone_start_pfn, zone_end_pfn;
117
118 if (!is_highmem(zone))
119 continue;
120
121 zone_start_pfn = zone->zone_start_pfn;
122 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
123
124 nid = zone_to_nid(zone);
125 printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
126 zone->name, nid, zone_start_pfn, zone_end_pfn);
127
128 add_highpages_with_active_regions(nid, zone_start_pfn,
129 zone_end_pfn);
130 }
131 totalram_pages += totalhigh_pages;
132 }
This page took 0.055661 seconds and 6 git commands to generate.