Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/s390/mm/init.c | |
3 | * | |
4 | * S390 version | |
5 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | |
6 | * Author(s): Hartmut Penner (hp@de.ibm.com) | |
7 | * | |
8 | * Derived from "arch/i386/mm/init.c" | |
9 | * Copyright (C) 1995 Linus Torvalds | |
10 | */ | |
11 | ||
1da177e4 LT |
12 | #include <linux/signal.h> |
13 | #include <linux/sched.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/errno.h> | |
16 | #include <linux/string.h> | |
17 | #include <linux/types.h> | |
18 | #include <linux/ptrace.h> | |
19 | #include <linux/mman.h> | |
20 | #include <linux/mm.h> | |
21 | #include <linux/swap.h> | |
22 | #include <linux/smp.h> | |
23 | #include <linux/init.h> | |
24 | #include <linux/pagemap.h> | |
25 | #include <linux/bootmem.h> | |
d882b172 | 26 | #include <linux/pfn.h> |
028d9b3c | 27 | #include <linux/poison.h> |
2b67fc46 | 28 | #include <linux/initrd.h> |
1da177e4 LT |
29 | #include <asm/processor.h> |
30 | #include <asm/system.h> | |
31 | #include <asm/uaccess.h> | |
32 | #include <asm/pgtable.h> | |
33 | #include <asm/pgalloc.h> | |
34 | #include <asm/dma.h> | |
35 | #include <asm/lowcore.h> | |
36 | #include <asm/tlb.h> | |
37 | #include <asm/tlbflush.h> | |
d882b172 | 38 | #include <asm/sections.h> |
1da177e4 LT |
39 | |
40 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |
41 | ||
42 | pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE))); | |
43 | char empty_zero_page[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); | |
44 | ||
1da177e4 LT |
45 | void show_mem(void) |
46 | { | |
c1bb7f31 HC |
47 | unsigned long i, total = 0, reserved = 0; |
48 | unsigned long shared = 0, cached = 0; | |
49 | unsigned long flags; | |
0b2b6e1d | 50 | struct page *page; |
c1bb7f31 | 51 | pg_data_t *pgdat; |
1da177e4 | 52 | |
be2864b5 HC |
53 | printk("Mem-info:\n"); |
54 | show_free_areas(); | |
c1bb7f31 HC |
55 | for_each_online_pgdat(pgdat) { |
56 | pgdat_resize_lock(pgdat, &flags); | |
57 | for (i = 0; i < pgdat->node_spanned_pages; i++) { | |
58 | if (!pfn_valid(pgdat->node_start_pfn + i)) | |
59 | continue; | |
60 | page = pfn_to_page(pgdat->node_start_pfn + i); | |
61 | total++; | |
62 | if (PageReserved(page)) | |
63 | reserved++; | |
64 | else if (PageSwapCache(page)) | |
65 | cached++; | |
66 | else if (page_count(page)) | |
67 | shared += page_count(page) - 1; | |
68 | } | |
69 | pgdat_resize_unlock(pgdat, &flags); | |
be2864b5 | 70 | } |
c1bb7f31 HC |
71 | printk("%ld pages of RAM\n", total); |
72 | printk("%ld reserved pages\n", reserved); | |
73 | printk("%ld pages shared\n", shared); | |
74 | printk("%ld pages swap cached\n", cached); | |
1da177e4 LT |
75 | } |
76 | ||
1da177e4 LT |
77 | /* |
78 | * paging_init() sets up the page tables | |
79 | */ | |
1da177e4 LT |
80 | void __init paging_init(void) |
81 | { | |
f4eb07c1 | 82 | static const int ssm_mask = 0x04000000L; |
39b742f9 | 83 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
3610cce8 | 84 | unsigned long pgd_type; |
d882b172 | 85 | |
3610cce8 MS |
86 | init_mm.pgd = swapper_pg_dir; |
87 | S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK; | |
f4eb07c1 | 88 | #ifdef CONFIG_64BIT |
6252d702 MS |
89 | /* A three level page table (4TB) is enough for the kernel space. */ |
90 | S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; | |
91 | pgd_type = _REGION3_ENTRY_EMPTY; | |
f4eb07c1 | 92 | #else |
3610cce8 MS |
93 | S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH; |
94 | pgd_type = _SEGMENT_ENTRY_EMPTY; | |
f4eb07c1 | 95 | #endif |
3610cce8 MS |
96 | clear_table((unsigned long *) init_mm.pgd, pgd_type, |
97 | sizeof(unsigned long)*2048); | |
f4eb07c1 | 98 | vmem_map_init(); |
1da177e4 | 99 | |
1da177e4 | 100 | /* enable virtual mapping in kernel mode */ |
3610cce8 MS |
101 | __ctl_load(S390_lowcore.kernel_asce, 1, 1); |
102 | __ctl_load(S390_lowcore.kernel_asce, 7, 7); | |
103 | __ctl_load(S390_lowcore.kernel_asce, 13, 13); | |
94c12cc7 | 104 | __raw_local_irq_ssm(ssm_mask); |
1da177e4 | 105 | |
17f34580 HC |
106 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
107 | sparse_init(); | |
39b742f9 | 108 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
118bcd31 | 109 | #ifdef CONFIG_ZONE_DMA |
39b742f9 | 110 | max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); |
118bcd31 | 111 | #endif |
39b742f9 HC |
112 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
113 | free_area_init_nodes(max_zone_pfns); | |
1da177e4 | 114 | } |
1da177e4 LT |
115 | |
116 | void __init mem_init(void) | |
117 | { | |
118 | unsigned long codesize, reservedpages, datasize, initsize; | |
119 | ||
120 | max_mapnr = num_physpages = max_low_pfn; | |
121 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | |
122 | ||
123 | /* clear the zero-page */ | |
124 | memset(empty_zero_page, 0, PAGE_SIZE); | |
125 | ||
45e576b1 MS |
126 | /* Setup guest page hinting */ |
127 | cmma_init(); | |
128 | ||
1da177e4 LT |
129 | /* this will put all low memory onto the freelists */ |
130 | totalram_pages += free_all_bootmem(); | |
131 | ||
132 | reservedpages = 0; | |
133 | ||
134 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | |
135 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | |
136 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | |
137 | printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n", | |
138 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | |
139 | max_mapnr << (PAGE_SHIFT-10), | |
140 | codesize >> 10, | |
141 | reservedpages << (PAGE_SHIFT-10), | |
142 | datasize >>10, | |
143 | initsize >> 10); | |
d882b172 | 144 | printk("Write protected kernel read-only data: %#lx - %#lx\n", |
162e006e HC |
145 | (unsigned long)&_stext, |
146 | PFN_ALIGN((unsigned long)&_eshared) - 1); | |
1da177e4 LT |
147 | } |
148 | ||
2485579b HC |
149 | #ifdef CONFIG_DEBUG_PAGEALLOC |
150 | void kernel_map_pages(struct page *page, int numpages, int enable) | |
151 | { | |
152 | pgd_t *pgd; | |
153 | pud_t *pud; | |
154 | pmd_t *pmd; | |
155 | pte_t *pte; | |
156 | unsigned long address; | |
157 | int i; | |
158 | ||
159 | for (i = 0; i < numpages; i++) { | |
160 | address = page_to_phys(page + i); | |
161 | pgd = pgd_offset_k(address); | |
162 | pud = pud_offset(pgd, address); | |
163 | pmd = pmd_offset(pud, address); | |
164 | pte = pte_offset_kernel(pmd, address); | |
165 | if (!enable) { | |
146e4b3c | 166 | ptep_invalidate(&init_mm, address, pte); |
2485579b HC |
167 | continue; |
168 | } | |
169 | *pte = mk_pte_phys(address, __pgprot(_PAGE_TYPE_RW)); | |
170 | /* Flush cpu write queue. */ | |
171 | mb(); | |
172 | } | |
173 | } | |
174 | #endif | |
175 | ||
1da177e4 LT |
176 | void free_initmem(void) |
177 | { | |
178 | unsigned long addr; | |
179 | ||
180 | addr = (unsigned long)(&__init_begin); | |
181 | for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { | |
182 | ClearPageReserved(virt_to_page(addr)); | |
7835e98b | 183 | init_page_count(virt_to_page(addr)); |
028d9b3c | 184 | memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); |
1da177e4 LT |
185 | free_page(addr); |
186 | totalram_pages++; | |
187 | } | |
188 | printk ("Freeing unused kernel memory: %ldk freed\n", | |
189 | ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10); | |
190 | } | |
191 | ||
192 | #ifdef CONFIG_BLK_DEV_INITRD | |
193 | void free_initrd_mem(unsigned long start, unsigned long end) | |
194 | { | |
195 | if (start < end) | |
196 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | |
197 | for (; start < end; start += PAGE_SIZE) { | |
198 | ClearPageReserved(virt_to_page(start)); | |
7835e98b | 199 | init_page_count(virt_to_page(start)); |
1da177e4 LT |
200 | free_page(start); |
201 | totalram_pages++; | |
202 | } | |
203 | } | |
204 | #endif |