Merge ../linus
[deliverable/linux.git] / arch / m68knommu / mm / init.c
1 /*
2 * linux/arch/m68knommu/mm/init.c
3 *
4 * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
5 * Kenneth Albanowski <kjahds@kjahds.com>,
6 * Copyright (C) 2000 Lineo, Inc. (www.lineo.com)
7 *
8 * Based on:
9 *
10 * linux/arch/m68k/mm/init.c
11 *
12 * Copyright (C) 1995 Hamish Macdonald
13 *
14 * JAN/1999 -- hacked to support ColdFire (gerg@snapgear.com)
15 * DEC/2000 -- linux 2.4 support <davidm@snapgear.com>
16 */
17
18 #include <linux/config.h>
19 #include <linux/signal.h>
20 #include <linux/sched.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/string.h>
24 #include <linux/types.h>
25 #include <linux/ptrace.h>
26 #include <linux/mman.h>
27 #include <linux/mm.h>
28 #include <linux/swap.h>
29 #include <linux/init.h>
30 #include <linux/highmem.h>
31 #include <linux/pagemap.h>
32 #include <linux/bootmem.h>
33 #include <linux/slab.h>
34
35 #include <asm/setup.h>
36 #include <asm/segment.h>
37 #include <asm/page.h>
38 #include <asm/pgtable.h>
39 #include <asm/system.h>
40 #include <asm/machdep.h>
41
42 #undef DEBUG
43
44 extern void die_if_kernel(char *,struct pt_regs *,long);
45 extern void free_initmem(void);
46
47 /*
48 * BAD_PAGE is the page that is used for page faults when linux
49 * is out-of-memory. Older versions of linux just did a
50 * do_exit(), but using this instead means there is less risk
51 * for a process dying in kernel mode, possibly leaving a inode
52 * unused etc..
53 *
54 * BAD_PAGETABLE is the accompanying page-table: it is initialized
55 * to point to BAD_PAGE entries.
56 *
57 * ZERO_PAGE is a special page that is used for zero-initialized
58 * data and COW.
59 */
60 static unsigned long empty_bad_page_table;
61
62 static unsigned long empty_bad_page;
63
64 unsigned long empty_zero_page;
65
66 void show_mem(void)
67 {
68 unsigned long i;
69 int free = 0, total = 0, reserved = 0, shared = 0;
70 int cached = 0;
71
72 printk(KERN_INFO "\nMem-info:\n");
73 show_free_areas();
74 i = max_mapnr;
75 while (i-- > 0) {
76 total++;
77 if (PageReserved(mem_map+i))
78 reserved++;
79 else if (PageSwapCache(mem_map+i))
80 cached++;
81 else if (!page_count(mem_map+i))
82 free++;
83 else
84 shared += page_count(mem_map+i) - 1;
85 }
86 printk(KERN_INFO "%d pages of RAM\n",total);
87 printk(KERN_INFO "%d free pages\n",free);
88 printk(KERN_INFO "%d reserved pages\n",reserved);
89 printk(KERN_INFO "%d pages shared\n",shared);
90 printk(KERN_INFO "%d pages swap cached\n",cached);
91 }
92
93 extern unsigned long memory_start;
94 extern unsigned long memory_end;
95
96 /*
97 * paging_init() continues the virtual memory environment setup which
98 * was begun by the code in arch/head.S.
99 * The parameters are pointers to where to stick the starting and ending
100 * addresses of available kernel virtual memory.
101 */
102 void paging_init(void)
103 {
104 /*
105 * Make sure start_mem is page aligned, otherwise bootmem and
106 * page_alloc get different views of the world.
107 */
108 #ifdef DEBUG
109 unsigned long start_mem = PAGE_ALIGN(memory_start);
110 #endif
111 unsigned long end_mem = memory_end & PAGE_MASK;
112
113 #ifdef DEBUG
114 printk (KERN_DEBUG "start_mem is %#lx\nvirtual_end is %#lx\n",
115 start_mem, end_mem);
116 #endif
117
118 /*
119 * Initialize the bad page table and bad page to point
120 * to a couple of allocated pages.
121 */
122 empty_bad_page_table = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
123 empty_bad_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
124 empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
125 memset((void *)empty_zero_page, 0, PAGE_SIZE);
126
127 /*
128 * Set up SFC/DFC registers (user data space).
129 */
130 set_fs (USER_DS);
131
132 #ifdef DEBUG
133 printk (KERN_DEBUG "before free_area_init\n");
134
135 printk (KERN_DEBUG "free_area_init -> start_mem is %#lx\nvirtual_end is %#lx\n",
136 start_mem, end_mem);
137 #endif
138
139 {
140 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
141
142 zones_size[ZONE_DMA] = 0 >> PAGE_SHIFT;
143 zones_size[ZONE_NORMAL] = (end_mem - PAGE_OFFSET) >> PAGE_SHIFT;
144 #ifdef CONFIG_HIGHMEM
145 zones_size[ZONE_HIGHMEM] = 0;
146 #endif
147 free_area_init(zones_size);
148 }
149 }
150
151 void mem_init(void)
152 {
153 int codek = 0, datak = 0, initk = 0;
154 unsigned long tmp;
155 extern char _etext, _stext, _sdata, _ebss, __init_begin, __init_end;
156 extern unsigned int _ramend, _rambase;
157 unsigned long len = _ramend - _rambase;
158 unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */
159 unsigned long end_mem = memory_end; /* DAVIDM - this must not include kernel stack at top */
160
161 #ifdef DEBUG
162 printk(KERN_DEBUG "Mem_init: start=%lx, end=%lx\n", start_mem, end_mem);
163 #endif
164
165 end_mem &= PAGE_MASK;
166 high_memory = (void *) end_mem;
167
168 start_mem = PAGE_ALIGN(start_mem);
169 max_mapnr = num_physpages = (((unsigned long) high_memory) - PAGE_OFFSET) >> PAGE_SHIFT;
170
171 /* this will put all memory onto the freelists */
172 totalram_pages = free_all_bootmem();
173
174 codek = (&_etext - &_stext) >> 10;
175 datak = (&_ebss - &_sdata) >> 10;
176 initk = (&__init_begin - &__init_end) >> 10;
177
178 tmp = nr_free_pages() << PAGE_SHIFT;
179 printk(KERN_INFO "Memory available: %luk/%luk RAM, (%dk kernel code, %dk data)\n",
180 tmp >> 10,
181 len >> 10,
182 codek,
183 datak
184 );
185 }
186
187
188 #ifdef CONFIG_BLK_DEV_INITRD
189 void free_initrd_mem(unsigned long start, unsigned long end)
190 {
191 int pages = 0;
192 for (; start < end; start += PAGE_SIZE) {
193 ClearPageReserved(virt_to_page(start));
194 init_page_count(virt_to_page(start));
195 free_page(start);
196 totalram_pages++;
197 pages++;
198 }
199 printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages);
200 }
201 #endif
202
203 void
204 free_initmem()
205 {
206 #ifdef CONFIG_RAMKERNEL
207 unsigned long addr;
208 extern char __init_begin, __init_end;
209 /*
210 * The following code should be cool even if these sections
211 * are not page aligned.
212 */
213 addr = PAGE_ALIGN((unsigned long)(&__init_begin));
214 /* next to check that the page we free is not a partial page */
215 for (; addr + PAGE_SIZE < (unsigned long)(&__init_end); addr +=PAGE_SIZE) {
216 ClearPageReserved(virt_to_page(addr));
217 init_page_count(virt_to_page(addr));
218 free_page(addr);
219 totalram_pages++;
220 }
221 printk(KERN_NOTICE "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n",
222 (addr - PAGE_ALIGN((long) &__init_begin)) >> 10,
223 (int)(PAGE_ALIGN((unsigned long)(&__init_begin))),
224 (int)(addr - PAGE_SIZE));
225 #endif
226 }
227
This page took 0.035826 seconds and 6 git commands to generate.