[MIPS] get_wchan(): remove uses of mfinfo[64]
[deliverable/linux.git] / arch / mips / kernel / setup.c
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1995 Linus Torvalds
7 * Copyright (C) 1995 Waldorf Electronics
8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
9 * Copyright (C) 1996 Stoned Elipot
10 * Copyright (C) 1999 Silicon Graphics, Inc.
11 * Copyright (C) 2000 2001, 2002 Maciej W. Rozycki
12 */
1da177e4
LT
13#include <linux/errno.h>
14#include <linux/init.h>
15#include <linux/ioport.h>
16#include <linux/sched.h>
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/module.h>
20#include <linux/stddef.h>
21#include <linux/string.h>
22#include <linux/unistd.h>
23#include <linux/slab.h>
24#include <linux/user.h>
25#include <linux/utsname.h>
26#include <linux/a.out.h>
894673ee 27#include <linux/screen_info.h>
1da177e4
LT
28#include <linux/bootmem.h>
29#include <linux/initrd.h>
30#include <linux/major.h>
31#include <linux/kdev_t.h>
32#include <linux/root_dev.h>
33#include <linux/highmem.h>
34#include <linux/console.h>
b4819b59 35#include <linux/mmzone.h>
22a9835c 36#include <linux/pfn.h>
1da177e4
LT
37
38#include <asm/addrspace.h>
39#include <asm/bootinfo.h>
ec74e361 40#include <asm/cache.h>
1da177e4
LT
41#include <asm/cpu.h>
42#include <asm/sections.h>
43#include <asm/setup.h>
44#include <asm/system.h>
45
ec74e361 46struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
1da177e4
LT
47
48EXPORT_SYMBOL(cpu_data);
49
50#ifdef CONFIG_VT
51struct screen_info screen_info;
52#endif
53
54/*
55 * Despite it's name this variable is even if we don't have PCI
56 */
57unsigned int PCI_DMA_BUS_IS_PHYS;
58
59EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS);
60
61/*
62 * Setup information
63 *
64 * These are initialized so they are in the .data section
65 */
ec74e361
RB
66unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
67unsigned long mips_machgroup __read_mostly = MACH_GROUP_UNKNOWN;
1da177e4
LT
68
69EXPORT_SYMBOL(mips_machtype);
70EXPORT_SYMBOL(mips_machgroup);
71
72struct boot_mem_map boot_mem_map;
73
74static char command_line[CL_SIZE];
75 char arcs_cmdline[CL_SIZE]=CONFIG_CMDLINE;
76
77/*
78 * mips_io_port_base is the begin of the address space to which x86 style
79 * I/O ports are mapped.
80 */
ec74e361 81const unsigned long mips_io_port_base __read_mostly = -1;
1da177e4
LT
82EXPORT_SYMBOL(mips_io_port_base);
83
84/*
85 * isa_slot_offset is the address where E(ISA) busaddress 0 is mapped
86 * for the processor.
87 */
88unsigned long isa_slot_offset;
89EXPORT_SYMBOL(isa_slot_offset);
90
91static struct resource code_resource = { .name = "Kernel code", };
92static struct resource data_resource = { .name = "Kernel data", };
93
94void __init add_memory_region(phys_t start, phys_t size, long type)
95{
96 int x = boot_mem_map.nr_map;
97 struct boot_mem_map_entry *prev = boot_mem_map.map + x - 1;
98
99 /*
100 * Try to merge with previous entry if any. This is far less than
101 * perfect but is sufficient for most real world cases.
102 */
103 if (x && prev->addr + prev->size == start && prev->type == type) {
104 prev->size += size;
105 return;
106 }
107
108 if (x == BOOT_MEM_MAP_MAX) {
109 printk("Ooops! Too many entries in the memory map!\n");
110 return;
111 }
112
113 boot_mem_map.map[x].addr = start;
114 boot_mem_map.map[x].size = size;
115 boot_mem_map.map[x].type = type;
116 boot_mem_map.nr_map++;
117}
118
119static void __init print_memory_map(void)
120{
121 int i;
122 const int field = 2 * sizeof(unsigned long);
123
124 for (i = 0; i < boot_mem_map.nr_map; i++) {
125 printk(" memory: %0*Lx @ %0*Lx ",
126 field, (unsigned long long) boot_mem_map.map[i].size,
127 field, (unsigned long long) boot_mem_map.map[i].addr);
128
129 switch (boot_mem_map.map[i].type) {
130 case BOOT_MEM_RAM:
131 printk("(usable)\n");
132 break;
133 case BOOT_MEM_ROM_DATA:
134 printk("(ROM data)\n");
135 break;
136 case BOOT_MEM_RESERVED:
137 printk("(reserved)\n");
138 break;
139 default:
140 printk("type %lu\n", boot_mem_map.map[i].type);
141 break;
142 }
143 }
144}
145
146static inline void parse_cmdline_early(void)
147{
148 char c = ' ', *to = command_line, *from = saved_command_line;
149 unsigned long start_at, mem_size;
150 int len = 0;
151 int usermem = 0;
152
153 printk("Determined physical RAM map:\n");
154 print_memory_map();
155
156 for (;;) {
157 /*
158 * "mem=XXX[kKmM]" defines a memory region from
159 * 0 to <XXX>, overriding the determined size.
160 * "mem=XXX[KkmM]@YYY[KkmM]" defines a memory region from
161 * <YYY> to <YYY>+<XXX>, overriding the determined size.
162 */
163 if (c == ' ' && !memcmp(from, "mem=", 4)) {
164 if (to != command_line)
165 to--;
166 /*
167 * If a user specifies memory size, we
168 * blow away any automatically generated
169 * size.
170 */
171 if (usermem == 0) {
172 boot_mem_map.nr_map = 0;
173 usermem = 1;
174 }
175 mem_size = memparse(from + 4, &from);
176 if (*from == '@')
177 start_at = memparse(from + 1, &from);
178 else
179 start_at = 0;
180 add_memory_region(start_at, mem_size, BOOT_MEM_RAM);
181 }
182 c = *(from++);
183 if (!c)
184 break;
185 if (CL_SIZE <= ++len)
186 break;
187 *(to++) = c;
188 }
189 *to = '\0';
190
191 if (usermem) {
192 printk("User-defined physical RAM map:\n");
193 print_memory_map();
194 }
195}
196
197static inline int parse_rd_cmdline(unsigned long* rd_start, unsigned long* rd_end)
198{
199 /*
200 * "rd_start=0xNNNNNNNN" defines the memory address of an initrd
201 * "rd_size=0xNN" it's size
202 */
203 unsigned long start = 0;
204 unsigned long size = 0;
205 unsigned long end;
206 char cmd_line[CL_SIZE];
207 char *start_str;
208 char *size_str;
209 char *tmp;
210
211 strcpy(cmd_line, command_line);
212 *command_line = 0;
213 tmp = cmd_line;
214 /* Ignore "rd_start=" strings in other parameters. */
215 start_str = strstr(cmd_line, "rd_start=");
216 if (start_str && start_str != cmd_line && *(start_str - 1) != ' ')
217 start_str = strstr(start_str, " rd_start=");
218 while (start_str) {
219 if (start_str != cmd_line)
220 strncat(command_line, tmp, start_str - tmp);
221 start = memparse(start_str + 9, &start_str);
222 tmp = start_str + 1;
223 start_str = strstr(start_str, " rd_start=");
224 }
225 if (*tmp)
226 strcat(command_line, tmp);
227
228 strcpy(cmd_line, command_line);
229 *command_line = 0;
230 tmp = cmd_line;
231 /* Ignore "rd_size" strings in other parameters. */
232 size_str = strstr(cmd_line, "rd_size=");
233 if (size_str && size_str != cmd_line && *(size_str - 1) != ' ')
234 size_str = strstr(size_str, " rd_size=");
235 while (size_str) {
236 if (size_str != cmd_line)
237 strncat(command_line, tmp, size_str - tmp);
238 size = memparse(size_str + 8, &size_str);
239 tmp = size_str + 1;
240 size_str = strstr(size_str, " rd_size=");
241 }
242 if (*tmp)
243 strcat(command_line, tmp);
244
875d43e7 245#ifdef CONFIG_64BIT
1da177e4
LT
246 /* HACK: Guess if the sign extension was forgotten */
247 if (start > 0x0000000080000000 && start < 0x00000000ffffffff)
460c0422 248 start |= 0xffffffff00000000UL;
1da177e4
LT
249#endif
250
251 end = start + size;
252 if (start && end) {
253 *rd_start = start;
254 *rd_end = end;
255 return 1;
256 }
257 return 0;
258}
259
1da177e4
LT
260#define MAXMEM HIGHMEM_START
261#define MAXMEM_PFN PFN_DOWN(MAXMEM)
262
263static inline void bootmem_init(void)
264{
265 unsigned long start_pfn;
266 unsigned long reserved_end = (unsigned long)&_end;
267#ifndef CONFIG_SGI_IP27
268 unsigned long first_usable_pfn;
269 unsigned long bootmap_size;
270 int i;
271#endif
272#ifdef CONFIG_BLK_DEV_INITRD
273 int initrd_reserve_bootmem = 0;
274
275 /* Board specific code should have set up initrd_start and initrd_end */
276 ROOT_DEV = Root_RAM0;
277 if (parse_rd_cmdline(&initrd_start, &initrd_end)) {
278 reserved_end = max(reserved_end, initrd_end);
279 initrd_reserve_bootmem = 1;
280 } else {
281 unsigned long tmp;
282 u32 *initrd_header;
283
284 tmp = ((reserved_end + PAGE_SIZE-1) & PAGE_MASK) - sizeof(u32) * 2;
285 if (tmp < reserved_end)
286 tmp += PAGE_SIZE;
287 initrd_header = (u32 *)tmp;
288 if (initrd_header[0] == 0x494E5244) {
289 initrd_start = (unsigned long)&initrd_header[2];
290 initrd_end = initrd_start + initrd_header[1];
291 reserved_end = max(reserved_end, initrd_end);
292 initrd_reserve_bootmem = 1;
293 }
294 }
295#endif /* CONFIG_BLK_DEV_INITRD */
296
297 /*
298 * Partially used pages are not usable - thus
299 * we are rounding upwards.
300 */
301 start_pfn = PFN_UP(CPHYSADDR(reserved_end));
302
303#ifndef CONFIG_SGI_IP27
304 /* Find the highest page frame number we have available. */
305 max_pfn = 0;
306 first_usable_pfn = -1UL;
307 for (i = 0; i < boot_mem_map.nr_map; i++) {
308 unsigned long start, end;
309
310 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
311 continue;
312
313 start = PFN_UP(boot_mem_map.map[i].addr);
314 end = PFN_DOWN(boot_mem_map.map[i].addr
315 + boot_mem_map.map[i].size);
316
317 if (start >= end)
318 continue;
319 if (end > max_pfn)
320 max_pfn = end;
321 if (start < first_usable_pfn) {
322 if (start > start_pfn) {
323 first_usable_pfn = start;
324 } else if (end > start_pfn) {
325 first_usable_pfn = start_pfn;
326 }
327 }
328 }
329
330 /*
331 * Determine low and high memory ranges
332 */
333 max_low_pfn = max_pfn;
334 if (max_low_pfn > MAXMEM_PFN) {
335 max_low_pfn = MAXMEM_PFN;
336#ifndef CONFIG_HIGHMEM
337 /* Maximum memory usable is what is directly addressable */
338 printk(KERN_WARNING "Warning only %ldMB will be used.\n",
339 MAXMEM >> 20);
340 printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
341#endif
342 }
343
344#ifdef CONFIG_HIGHMEM
345 /*
346 * Crude, we really should make a better attempt at detecting
347 * highstart_pfn
348 */
349 highstart_pfn = highend_pfn = max_pfn;
350 if (max_pfn > MAXMEM_PFN) {
351 highstart_pfn = MAXMEM_PFN;
352 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
353 (highend_pfn - highstart_pfn) >> (20 - PAGE_SHIFT));
354 }
355#endif
356
357 /* Initialize the boot-time allocator with low memory only. */
358 bootmap_size = init_bootmem(first_usable_pfn, max_low_pfn);
359
360 /*
361 * Register fully available low RAM pages with the bootmem allocator.
362 */
363 for (i = 0; i < boot_mem_map.nr_map; i++) {
364 unsigned long curr_pfn, last_pfn, size;
365
366 /*
367 * Reserve usable memory.
368 */
369 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
370 continue;
371
372 /*
373 * We are rounding up the start address of usable memory:
374 */
375 curr_pfn = PFN_UP(boot_mem_map.map[i].addr);
376 if (curr_pfn >= max_low_pfn)
377 continue;
378 if (curr_pfn < start_pfn)
379 curr_pfn = start_pfn;
380
381 /*
382 * ... and at the end of the usable range downwards:
383 */
384 last_pfn = PFN_DOWN(boot_mem_map.map[i].addr
385 + boot_mem_map.map[i].size);
386
387 if (last_pfn > max_low_pfn)
388 last_pfn = max_low_pfn;
389
390 /*
391 * Only register lowmem part of lowmem segment with bootmem.
392 */
393 size = last_pfn - curr_pfn;
394 if (curr_pfn > PFN_DOWN(HIGHMEM_START))
395 continue;
396 if (curr_pfn + size - 1 > PFN_DOWN(HIGHMEM_START))
397 size = PFN_DOWN(HIGHMEM_START) - curr_pfn;
398 if (!size)
399 continue;
400
401 /*
402 * ... finally, did all the rounding and playing
403 * around just make the area go away?
404 */
405 if (last_pfn <= curr_pfn)
406 continue;
407
408 /* Register lowmem ranges */
409 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
b1c231f5 410 memory_present(0, curr_pfn, curr_pfn + size - 1);
1da177e4
LT
411 }
412
413 /* Reserve the bootmap memory. */
414 reserve_bootmem(PFN_PHYS(first_usable_pfn), bootmap_size);
415#endif /* CONFIG_SGI_IP27 */
416
417#ifdef CONFIG_BLK_DEV_INITRD
418 initrd_below_start_ok = 1;
419 if (initrd_start) {
ecf52d3c
AN
420 unsigned long initrd_size = ((unsigned char *)initrd_end) -
421 ((unsigned char *)initrd_start);
422 const int width = sizeof(long) * 2;
423
1da177e4
LT
424 printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
425 (void *)initrd_start, initrd_size);
426
427 if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) {
428 printk("initrd extends beyond end of memory "
429 "(0x%0*Lx > 0x%0*Lx)\ndisabling initrd\n",
ecf52d3c
AN
430 width,
431 (unsigned long long) CPHYSADDR(initrd_end),
432 width,
433 (unsigned long long) PFN_PHYS(max_low_pfn));
1da177e4
LT
434 initrd_start = initrd_end = 0;
435 initrd_reserve_bootmem = 0;
436 }
437
438 if (initrd_reserve_bootmem)
439 reserve_bootmem(CPHYSADDR(initrd_start), initrd_size);
440 }
441#endif /* CONFIG_BLK_DEV_INITRD */
442}
443
2925aba4
RB
444/*
445 * arch_mem_init - initialize memory managment subsystem
446 *
447 * o plat_mem_setup() detects the memory configuration and will record detected
448 * memory areas using add_memory_region.
449 * o parse_cmdline_early() parses the command line for mem= options which,
450 * iff detected, will override the results of the automatic detection.
451 *
452 * At this stage the memory configuration of the system is known to the
453 * kernel but generic memory managment system is still entirely uninitialized.
454 *
455 * o bootmem_init()
456 * o sparse_init()
457 * o paging_init()
458 *
459 * At this stage the bootmem allocator is ready to use.
460 *
461 * NOTE: historically plat_mem_setup did the entire platform initialization.
462 * This was rather impractical because it meant plat_mem_setup had to
463 * get away without any kind of memory allocator. To keep old code from
464 * breaking plat_setup was just renamed to plat_setup and a second platform
465 * initialization hook for anything else was introduced.
466 */
467
468extern void plat_mem_setup(void);
469
470static void __init arch_mem_init(char **cmdline_p)
471{
472 /* call board setup routine */
473 plat_mem_setup();
474
475 strlcpy(command_line, arcs_cmdline, sizeof(command_line));
476 strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
477
478 *cmdline_p = command_line;
479
480 parse_cmdline_early();
481 bootmem_init();
482 sparse_init();
483 paging_init();
484}
485
1da177e4
LT
486static inline void resource_init(void)
487{
488 int i;
489
6adb5fe7
RB
490 if (UNCAC_BASE != IO_BASE)
491 return;
492
1da177e4
LT
493 code_resource.start = virt_to_phys(&_text);
494 code_resource.end = virt_to_phys(&_etext) - 1;
495 data_resource.start = virt_to_phys(&_etext);
496 data_resource.end = virt_to_phys(&_edata) - 1;
1da177e4
LT
497
498 /*
499 * Request address space for all standard RAM.
500 */
501 for (i = 0; i < boot_mem_map.nr_map; i++) {
502 struct resource *res;
503 unsigned long start, end;
504
505 start = boot_mem_map.map[i].addr;
506 end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
507 if (start >= MAXMEM)
508 continue;
509 if (end >= MAXMEM)
510 end = MAXMEM - 1;
511
512 res = alloc_bootmem(sizeof(struct resource));
513 switch (boot_mem_map.map[i].type) {
514 case BOOT_MEM_RAM:
515 case BOOT_MEM_ROM_DATA:
516 res->name = "System RAM";
517 break;
518 case BOOT_MEM_RESERVED:
519 default:
520 res->name = "reserved";
521 }
522
523 res->start = start;
524 res->end = end;
525
526 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
527 request_resource(&iomem_resource, res);
528
529 /*
530 * We don't know which RAM region contains kernel data,
531 * so we try it repeatedly and let the resource manager
532 * test it.
533 */
534 request_resource(res, &code_resource);
535 request_resource(res, &data_resource);
536 }
537}
538
1da177e4
LT
539#undef MAXMEM
540#undef MAXMEM_PFN
541
1da177e4
LT
542void __init setup_arch(char **cmdline_p)
543{
544 cpu_probe();
545 prom_init();
546 cpu_report();
547
548#if defined(CONFIG_VT)
549#if defined(CONFIG_VGA_CONSOLE)
550 conswitchp = &vga_con;
551#elif defined(CONFIG_DUMMY_CONSOLE)
552 conswitchp = &dummy_con;
553#endif
554#endif
555
2925aba4 556 arch_mem_init(cmdline_p);
1da177e4 557
1da177e4 558 resource_init();
9b6695a8
RB
559#ifdef CONFIG_SMP
560 plat_smp_setup();
561#endif
1da177e4
LT
562}
563
564int __init fpu_disable(char *s)
565{
f088fc84
RB
566 int i;
567
568 for (i = 0; i < NR_CPUS; i++)
569 cpu_data[i].options &= ~MIPS_CPU_FPU;
1da177e4
LT
570
571 return 1;
572}
573
574__setup("nofpu", fpu_disable);
e50c0a8f
RB
575
576int __init dsp_disable(char *s)
577{
578 cpu_data[0].ases &= ~MIPS_ASE_DSP;
579
580 return 1;
581}
582
583__setup("nodsp", dsp_disable);
This page took 0.189971 seconds and 5 git commands to generate.