[LIB]: Make PowerPC LMB code generic so sparc64 can use it too.
[deliverable/linux.git] / arch / powerpc / kernel / crash_dump.c
CommitLineData
0cc4746c
ME
1/*
2 * Routines for doing kexec-based kdump.
3 *
4 * Copyright (C) 2005, IBM Corp.
5 *
6 * Created by: Michael Ellerman
7 *
8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details.
10 */
11
12#undef DEBUG
13
cc532915
ME
14#include <linux/crash_dump.h>
15#include <linux/bootmem.h>
d9b2b2a2 16#include <linux/lmb.h>
0cc4746c 17#include <asm/kdump.h>
d9b2b2a2 18#include <asm/prom.h>
0cc4746c 19#include <asm/firmware.h>
54c32021 20#include <asm/uaccess.h>
0cc4746c
ME
21
22#ifdef DEBUG
23#include <asm/udbg.h>
24#define DBG(fmt...) udbg_printf(fmt)
25#else
26#define DBG(fmt...)
27#endif
28
d56c3aaa 29void __init reserve_kdump_trampoline(void)
47310413
ME
30{
31 lmb_reserve(0, KDUMP_RESERVE_LIMIT);
32}
33
0cc4746c
ME
34static void __init create_trampoline(unsigned long addr)
35{
36 /* The maximum range of a single instruction branch, is the current
37 * instruction's address + (32 MB - 4) bytes. For the trampoline we
38 * need to branch to current address + 32 MB. So we insert a nop at
39 * the trampoline address, then the next instruction (+ 4 bytes)
40 * does a branch to (32 MB - 4). The net effect is that when we
41 * branch to "addr" we jump to ("addr" + 32 MB). Although it requires
42 * two instructions it doesn't require any registers.
43 */
44 create_instruction(addr, 0x60000000); /* nop */
45 create_branch(addr + 4, addr + PHYSICAL_START, 0);
46}
47
47310413 48void __init setup_kdump_trampoline(void)
0cc4746c
ME
49{
50 unsigned long i;
51
47310413 52 DBG(" -> setup_kdump_trampoline()\n");
0cc4746c
ME
53
54 for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) {
55 create_trampoline(i);
56 }
57
9e4859ef 58#ifdef CONFIG_PPC_PSERIES
0cc4746c
ME
59 create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START);
60 create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START);
9e4859ef 61#endif /* CONFIG_PPC_PSERIES */
0cc4746c 62
47310413 63 DBG(" <- setup_kdump_trampoline()\n");
0cc4746c 64}
cc532915 65
6bac953f 66#ifdef CONFIG_PROC_VMCORE
cc532915
ME
67static int __init parse_elfcorehdr(char *p)
68{
69 if (p)
70 elfcorehdr_addr = memparse(p, &p);
71
9b41046c 72 return 1;
cc532915
ME
73}
74__setup("elfcorehdr=", parse_elfcorehdr);
6bac953f 75#endif
cc532915
ME
76
77static int __init parse_savemaxmem(char *p)
78{
79 if (p)
80 saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1;
81
9b41046c 82 return 1;
cc532915
ME
83}
84__setup("savemaxmem=", parse_savemaxmem);
54c32021 85
40681b95 86/**
54c32021
ME
87 * copy_oldmem_page - copy one page from "oldmem"
88 * @pfn: page frame number to be copied
89 * @buf: target memory address for the copy; this can be in kernel address
90 * space or user address space (see @userbuf)
91 * @csize: number of bytes to copy
92 * @offset: offset in bytes into the page (based on pfn) to begin the copy
93 * @userbuf: if set, @buf is in user address space, use copy_to_user(),
94 * otherwise @buf is in kernel address space, use memcpy().
95 *
96 * Copy a page from "oldmem". For this page, there is no pte mapped
97 * in the current kernel. We stitch up a pte, similar to kmap_atomic.
98 */
99ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
100 size_t csize, unsigned long offset, int userbuf)
101{
102 void *vaddr;
103
104 if (!csize)
105 return 0;
106
107 vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
108
109 if (userbuf) {
110 if (copy_to_user((char __user *)buf, (vaddr + offset), csize)) {
111 iounmap(vaddr);
112 return -EFAULT;
113 }
114 } else
115 memcpy(buf, (vaddr + offset), csize);
116
117 iounmap(vaddr);
118 return csize;
119}
This page took 0.360803 seconds and 5 git commands to generate.