Merge branch 'drm-intel-fixes' of git://people.freedesktop.org/~danvet/drm-intel...
[deliverable/linux.git] / fs / proc / vmcore.c
1 /*
2 * fs/proc/vmcore.c Interface for accessing the crash
3 * dump from the system's previous life.
4 * Heavily borrowed from fs/proc/kcore.c
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
7 *
8 */
9
10 #include <linux/mm.h>
11 #include <linux/proc_fs.h>
12 #include <linux/user.h>
13 #include <linux/elf.h>
14 #include <linux/elfcore.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/highmem.h>
18 #include <linux/printk.h>
19 #include <linux/bootmem.h>
20 #include <linux/init.h>
21 #include <linux/crash_dump.h>
22 #include <linux/list.h>
23 #include <asm/uaccess.h>
24 #include <asm/io.h>
25
26 /* List representing chunks of contiguous memory areas and their offsets in
27 * vmcore file.
28 */
29 static LIST_HEAD(vmcore_list);
30
31 /* Stores the pointer to the buffer containing kernel elf core headers. */
32 static char *elfcorebuf;
33 static size_t elfcorebuf_sz;
34
35 /* Total size of vmcore file. */
36 static u64 vmcore_size;
37
38 static struct proc_dir_entry *proc_vmcore = NULL;
39
40 /*
41 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
42 * The called function has to take care of module refcounting.
43 */
44 static int (*oldmem_pfn_is_ram)(unsigned long pfn);
45
46 int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
47 {
48 if (oldmem_pfn_is_ram)
49 return -EBUSY;
50 oldmem_pfn_is_ram = fn;
51 return 0;
52 }
53 EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
54
55 void unregister_oldmem_pfn_is_ram(void)
56 {
57 oldmem_pfn_is_ram = NULL;
58 wmb();
59 }
60 EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
61
62 static int pfn_is_ram(unsigned long pfn)
63 {
64 int (*fn)(unsigned long pfn);
65 /* pfn is ram unless fn() checks pagetype */
66 int ret = 1;
67
68 /*
69 * Ask hypervisor if the pfn is really ram.
70 * A ballooned page contains no data and reading from such a page
71 * will cause high load in the hypervisor.
72 */
73 fn = oldmem_pfn_is_ram;
74 if (fn)
75 ret = fn(pfn);
76
77 return ret;
78 }
79
80 /* Reads a page from the oldmem device from given offset. */
81 static ssize_t read_from_oldmem(char *buf, size_t count,
82 u64 *ppos, int userbuf)
83 {
84 unsigned long pfn, offset;
85 size_t nr_bytes;
86 ssize_t read = 0, tmp;
87
88 if (!count)
89 return 0;
90
91 offset = (unsigned long)(*ppos % PAGE_SIZE);
92 pfn = (unsigned long)(*ppos / PAGE_SIZE);
93
94 do {
95 if (count > (PAGE_SIZE - offset))
96 nr_bytes = PAGE_SIZE - offset;
97 else
98 nr_bytes = count;
99
100 /* If pfn is not ram, return zeros for sparse dump files */
101 if (pfn_is_ram(pfn) == 0)
102 memset(buf, 0, nr_bytes);
103 else {
104 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
105 offset, userbuf);
106 if (tmp < 0)
107 return tmp;
108 }
109 *ppos += nr_bytes;
110 count -= nr_bytes;
111 buf += nr_bytes;
112 read += nr_bytes;
113 ++pfn;
114 offset = 0;
115 } while (count);
116
117 return read;
118 }
119
120 /* Maps vmcore file offset to respective physical address in memroy. */
121 static u64 map_offset_to_paddr(loff_t offset, struct list_head *vc_list,
122 struct vmcore **m_ptr)
123 {
124 struct vmcore *m;
125 u64 paddr;
126
127 list_for_each_entry(m, vc_list, list) {
128 u64 start, end;
129 start = m->offset;
130 end = m->offset + m->size - 1;
131 if (offset >= start && offset <= end) {
132 paddr = m->paddr + offset - start;
133 *m_ptr = m;
134 return paddr;
135 }
136 }
137 *m_ptr = NULL;
138 return 0;
139 }
140
141 /* Read from the ELF header and then the crash dump. On error, negative value is
142 * returned otherwise number of bytes read are returned.
143 */
144 static ssize_t read_vmcore(struct file *file, char __user *buffer,
145 size_t buflen, loff_t *fpos)
146 {
147 ssize_t acc = 0, tmp;
148 size_t tsz;
149 u64 start, nr_bytes;
150 struct vmcore *curr_m = NULL;
151
152 if (buflen == 0 || *fpos >= vmcore_size)
153 return 0;
154
155 /* trim buflen to not go beyond EOF */
156 if (buflen > vmcore_size - *fpos)
157 buflen = vmcore_size - *fpos;
158
159 /* Read ELF core header */
160 if (*fpos < elfcorebuf_sz) {
161 tsz = elfcorebuf_sz - *fpos;
162 if (buflen < tsz)
163 tsz = buflen;
164 if (copy_to_user(buffer, elfcorebuf + *fpos, tsz))
165 return -EFAULT;
166 buflen -= tsz;
167 *fpos += tsz;
168 buffer += tsz;
169 acc += tsz;
170
171 /* leave now if filled buffer already */
172 if (buflen == 0)
173 return acc;
174 }
175
176 start = map_offset_to_paddr(*fpos, &vmcore_list, &curr_m);
177 if (!curr_m)
178 return -EINVAL;
179
180 while (buflen) {
181 tsz = min_t(size_t, buflen, PAGE_SIZE - (start & ~PAGE_MASK));
182
183 /* Calculate left bytes in current memory segment. */
184 nr_bytes = (curr_m->size - (start - curr_m->paddr));
185 if (tsz > nr_bytes)
186 tsz = nr_bytes;
187
188 tmp = read_from_oldmem(buffer, tsz, &start, 1);
189 if (tmp < 0)
190 return tmp;
191 buflen -= tsz;
192 *fpos += tsz;
193 buffer += tsz;
194 acc += tsz;
195 if (start >= (curr_m->paddr + curr_m->size)) {
196 if (curr_m->list.next == &vmcore_list)
197 return acc; /*EOF*/
198 curr_m = list_entry(curr_m->list.next,
199 struct vmcore, list);
200 start = curr_m->paddr;
201 }
202 }
203 return acc;
204 }
205
206 static const struct file_operations proc_vmcore_operations = {
207 .read = read_vmcore,
208 .llseek = default_llseek,
209 };
210
211 static struct vmcore* __init get_new_element(void)
212 {
213 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
214 }
215
216 static u64 __init get_vmcore_size_elf64(char *elfptr)
217 {
218 int i;
219 u64 size;
220 Elf64_Ehdr *ehdr_ptr;
221 Elf64_Phdr *phdr_ptr;
222
223 ehdr_ptr = (Elf64_Ehdr *)elfptr;
224 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
225 size = sizeof(Elf64_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr));
226 for (i = 0; i < ehdr_ptr->e_phnum; i++) {
227 size += phdr_ptr->p_memsz;
228 phdr_ptr++;
229 }
230 return size;
231 }
232
233 static u64 __init get_vmcore_size_elf32(char *elfptr)
234 {
235 int i;
236 u64 size;
237 Elf32_Ehdr *ehdr_ptr;
238 Elf32_Phdr *phdr_ptr;
239
240 ehdr_ptr = (Elf32_Ehdr *)elfptr;
241 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
242 size = sizeof(Elf32_Ehdr) + ((ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr));
243 for (i = 0; i < ehdr_ptr->e_phnum; i++) {
244 size += phdr_ptr->p_memsz;
245 phdr_ptr++;
246 }
247 return size;
248 }
249
250 /* Merges all the PT_NOTE headers into one. */
251 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
252 struct list_head *vc_list)
253 {
254 int i, nr_ptnote=0, rc=0;
255 char *tmp;
256 Elf64_Ehdr *ehdr_ptr;
257 Elf64_Phdr phdr, *phdr_ptr;
258 Elf64_Nhdr *nhdr_ptr;
259 u64 phdr_sz = 0, note_off;
260
261 ehdr_ptr = (Elf64_Ehdr *)elfptr;
262 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
263 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
264 int j;
265 void *notes_section;
266 struct vmcore *new;
267 u64 offset, max_sz, sz, real_sz = 0;
268 if (phdr_ptr->p_type != PT_NOTE)
269 continue;
270 nr_ptnote++;
271 max_sz = phdr_ptr->p_memsz;
272 offset = phdr_ptr->p_offset;
273 notes_section = kmalloc(max_sz, GFP_KERNEL);
274 if (!notes_section)
275 return -ENOMEM;
276 rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
277 if (rc < 0) {
278 kfree(notes_section);
279 return rc;
280 }
281 nhdr_ptr = notes_section;
282 for (j = 0; j < max_sz; j += sz) {
283 if (nhdr_ptr->n_namesz == 0)
284 break;
285 sz = sizeof(Elf64_Nhdr) +
286 ((nhdr_ptr->n_namesz + 3) & ~3) +
287 ((nhdr_ptr->n_descsz + 3) & ~3);
288 real_sz += sz;
289 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
290 }
291
292 /* Add this contiguous chunk of notes section to vmcore list.*/
293 new = get_new_element();
294 if (!new) {
295 kfree(notes_section);
296 return -ENOMEM;
297 }
298 new->paddr = phdr_ptr->p_offset;
299 new->size = real_sz;
300 list_add_tail(&new->list, vc_list);
301 phdr_sz += real_sz;
302 kfree(notes_section);
303 }
304
305 /* Prepare merged PT_NOTE program header. */
306 phdr.p_type = PT_NOTE;
307 phdr.p_flags = 0;
308 note_off = sizeof(Elf64_Ehdr) +
309 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
310 phdr.p_offset = note_off;
311 phdr.p_vaddr = phdr.p_paddr = 0;
312 phdr.p_filesz = phdr.p_memsz = phdr_sz;
313 phdr.p_align = 0;
314
315 /* Add merged PT_NOTE program header*/
316 tmp = elfptr + sizeof(Elf64_Ehdr);
317 memcpy(tmp, &phdr, sizeof(phdr));
318 tmp += sizeof(phdr);
319
320 /* Remove unwanted PT_NOTE program headers. */
321 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
322 *elfsz = *elfsz - i;
323 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
324
325 /* Modify e_phnum to reflect merged headers. */
326 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
327
328 return 0;
329 }
330
331 /* Merges all the PT_NOTE headers into one. */
332 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
333 struct list_head *vc_list)
334 {
335 int i, nr_ptnote=0, rc=0;
336 char *tmp;
337 Elf32_Ehdr *ehdr_ptr;
338 Elf32_Phdr phdr, *phdr_ptr;
339 Elf32_Nhdr *nhdr_ptr;
340 u64 phdr_sz = 0, note_off;
341
342 ehdr_ptr = (Elf32_Ehdr *)elfptr;
343 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
344 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
345 int j;
346 void *notes_section;
347 struct vmcore *new;
348 u64 offset, max_sz, sz, real_sz = 0;
349 if (phdr_ptr->p_type != PT_NOTE)
350 continue;
351 nr_ptnote++;
352 max_sz = phdr_ptr->p_memsz;
353 offset = phdr_ptr->p_offset;
354 notes_section = kmalloc(max_sz, GFP_KERNEL);
355 if (!notes_section)
356 return -ENOMEM;
357 rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
358 if (rc < 0) {
359 kfree(notes_section);
360 return rc;
361 }
362 nhdr_ptr = notes_section;
363 for (j = 0; j < max_sz; j += sz) {
364 if (nhdr_ptr->n_namesz == 0)
365 break;
366 sz = sizeof(Elf32_Nhdr) +
367 ((nhdr_ptr->n_namesz + 3) & ~3) +
368 ((nhdr_ptr->n_descsz + 3) & ~3);
369 real_sz += sz;
370 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
371 }
372
373 /* Add this contiguous chunk of notes section to vmcore list.*/
374 new = get_new_element();
375 if (!new) {
376 kfree(notes_section);
377 return -ENOMEM;
378 }
379 new->paddr = phdr_ptr->p_offset;
380 new->size = real_sz;
381 list_add_tail(&new->list, vc_list);
382 phdr_sz += real_sz;
383 kfree(notes_section);
384 }
385
386 /* Prepare merged PT_NOTE program header. */
387 phdr.p_type = PT_NOTE;
388 phdr.p_flags = 0;
389 note_off = sizeof(Elf32_Ehdr) +
390 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
391 phdr.p_offset = note_off;
392 phdr.p_vaddr = phdr.p_paddr = 0;
393 phdr.p_filesz = phdr.p_memsz = phdr_sz;
394 phdr.p_align = 0;
395
396 /* Add merged PT_NOTE program header*/
397 tmp = elfptr + sizeof(Elf32_Ehdr);
398 memcpy(tmp, &phdr, sizeof(phdr));
399 tmp += sizeof(phdr);
400
401 /* Remove unwanted PT_NOTE program headers. */
402 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
403 *elfsz = *elfsz - i;
404 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
405
406 /* Modify e_phnum to reflect merged headers. */
407 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
408
409 return 0;
410 }
411
412 /* Add memory chunks represented by program headers to vmcore list. Also update
413 * the new offset fields of exported program headers. */
414 static int __init process_ptload_program_headers_elf64(char *elfptr,
415 size_t elfsz,
416 struct list_head *vc_list)
417 {
418 int i;
419 Elf64_Ehdr *ehdr_ptr;
420 Elf64_Phdr *phdr_ptr;
421 loff_t vmcore_off;
422 struct vmcore *new;
423
424 ehdr_ptr = (Elf64_Ehdr *)elfptr;
425 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
426
427 /* First program header is PT_NOTE header. */
428 vmcore_off = sizeof(Elf64_Ehdr) +
429 (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr) +
430 phdr_ptr->p_memsz; /* Note sections */
431
432 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
433 if (phdr_ptr->p_type != PT_LOAD)
434 continue;
435
436 /* Add this contiguous chunk of memory to vmcore list.*/
437 new = get_new_element();
438 if (!new)
439 return -ENOMEM;
440 new->paddr = phdr_ptr->p_offset;
441 new->size = phdr_ptr->p_memsz;
442 list_add_tail(&new->list, vc_list);
443
444 /* Update the program header offset. */
445 phdr_ptr->p_offset = vmcore_off;
446 vmcore_off = vmcore_off + phdr_ptr->p_memsz;
447 }
448 return 0;
449 }
450
451 static int __init process_ptload_program_headers_elf32(char *elfptr,
452 size_t elfsz,
453 struct list_head *vc_list)
454 {
455 int i;
456 Elf32_Ehdr *ehdr_ptr;
457 Elf32_Phdr *phdr_ptr;
458 loff_t vmcore_off;
459 struct vmcore *new;
460
461 ehdr_ptr = (Elf32_Ehdr *)elfptr;
462 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
463
464 /* First program header is PT_NOTE header. */
465 vmcore_off = sizeof(Elf32_Ehdr) +
466 (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr) +
467 phdr_ptr->p_memsz; /* Note sections */
468
469 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
470 if (phdr_ptr->p_type != PT_LOAD)
471 continue;
472
473 /* Add this contiguous chunk of memory to vmcore list.*/
474 new = get_new_element();
475 if (!new)
476 return -ENOMEM;
477 new->paddr = phdr_ptr->p_offset;
478 new->size = phdr_ptr->p_memsz;
479 list_add_tail(&new->list, vc_list);
480
481 /* Update the program header offset */
482 phdr_ptr->p_offset = vmcore_off;
483 vmcore_off = vmcore_off + phdr_ptr->p_memsz;
484 }
485 return 0;
486 }
487
488 /* Sets offset fields of vmcore elements. */
489 static void __init set_vmcore_list_offsets_elf64(char *elfptr,
490 struct list_head *vc_list)
491 {
492 loff_t vmcore_off;
493 Elf64_Ehdr *ehdr_ptr;
494 struct vmcore *m;
495
496 ehdr_ptr = (Elf64_Ehdr *)elfptr;
497
498 /* Skip Elf header and program headers. */
499 vmcore_off = sizeof(Elf64_Ehdr) +
500 (ehdr_ptr->e_phnum) * sizeof(Elf64_Phdr);
501
502 list_for_each_entry(m, vc_list, list) {
503 m->offset = vmcore_off;
504 vmcore_off += m->size;
505 }
506 }
507
508 /* Sets offset fields of vmcore elements. */
509 static void __init set_vmcore_list_offsets_elf32(char *elfptr,
510 struct list_head *vc_list)
511 {
512 loff_t vmcore_off;
513 Elf32_Ehdr *ehdr_ptr;
514 struct vmcore *m;
515
516 ehdr_ptr = (Elf32_Ehdr *)elfptr;
517
518 /* Skip Elf header and program headers. */
519 vmcore_off = sizeof(Elf32_Ehdr) +
520 (ehdr_ptr->e_phnum) * sizeof(Elf32_Phdr);
521
522 list_for_each_entry(m, vc_list, list) {
523 m->offset = vmcore_off;
524 vmcore_off += m->size;
525 }
526 }
527
528 static int __init parse_crash_elf64_headers(void)
529 {
530 int rc=0;
531 Elf64_Ehdr ehdr;
532 u64 addr;
533
534 addr = elfcorehdr_addr;
535
536 /* Read Elf header */
537 rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0);
538 if (rc < 0)
539 return rc;
540
541 /* Do some basic Verification. */
542 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
543 (ehdr.e_type != ET_CORE) ||
544 !vmcore_elf64_check_arch(&ehdr) ||
545 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
546 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
547 ehdr.e_version != EV_CURRENT ||
548 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
549 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
550 ehdr.e_phnum == 0) {
551 pr_warn("Warning: Core image elf header is not sane\n");
552 return -EINVAL;
553 }
554
555 /* Read in all elf headers. */
556 elfcorebuf_sz = sizeof(Elf64_Ehdr) + ehdr.e_phnum * sizeof(Elf64_Phdr);
557 elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL);
558 if (!elfcorebuf)
559 return -ENOMEM;
560 addr = elfcorehdr_addr;
561 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0);
562 if (rc < 0) {
563 kfree(elfcorebuf);
564 return rc;
565 }
566
567 /* Merge all PT_NOTE headers into one. */
568 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
569 if (rc) {
570 kfree(elfcorebuf);
571 return rc;
572 }
573 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
574 &vmcore_list);
575 if (rc) {
576 kfree(elfcorebuf);
577 return rc;
578 }
579 set_vmcore_list_offsets_elf64(elfcorebuf, &vmcore_list);
580 return 0;
581 }
582
583 static int __init parse_crash_elf32_headers(void)
584 {
585 int rc=0;
586 Elf32_Ehdr ehdr;
587 u64 addr;
588
589 addr = elfcorehdr_addr;
590
591 /* Read Elf header */
592 rc = read_from_oldmem((char*)&ehdr, sizeof(Elf32_Ehdr), &addr, 0);
593 if (rc < 0)
594 return rc;
595
596 /* Do some basic Verification. */
597 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
598 (ehdr.e_type != ET_CORE) ||
599 !elf_check_arch(&ehdr) ||
600 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
601 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
602 ehdr.e_version != EV_CURRENT ||
603 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
604 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
605 ehdr.e_phnum == 0) {
606 pr_warn("Warning: Core image elf header is not sane\n");
607 return -EINVAL;
608 }
609
610 /* Read in all elf headers. */
611 elfcorebuf_sz = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
612 elfcorebuf = kmalloc(elfcorebuf_sz, GFP_KERNEL);
613 if (!elfcorebuf)
614 return -ENOMEM;
615 addr = elfcorehdr_addr;
616 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz, &addr, 0);
617 if (rc < 0) {
618 kfree(elfcorebuf);
619 return rc;
620 }
621
622 /* Merge all PT_NOTE headers into one. */
623 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
624 if (rc) {
625 kfree(elfcorebuf);
626 return rc;
627 }
628 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
629 &vmcore_list);
630 if (rc) {
631 kfree(elfcorebuf);
632 return rc;
633 }
634 set_vmcore_list_offsets_elf32(elfcorebuf, &vmcore_list);
635 return 0;
636 }
637
638 static int __init parse_crash_elf_headers(void)
639 {
640 unsigned char e_ident[EI_NIDENT];
641 u64 addr;
642 int rc=0;
643
644 addr = elfcorehdr_addr;
645 rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0);
646 if (rc < 0)
647 return rc;
648 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
649 pr_warn("Warning: Core image elf header not found\n");
650 return -EINVAL;
651 }
652
653 if (e_ident[EI_CLASS] == ELFCLASS64) {
654 rc = parse_crash_elf64_headers();
655 if (rc)
656 return rc;
657
658 /* Determine vmcore size. */
659 vmcore_size = get_vmcore_size_elf64(elfcorebuf);
660 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
661 rc = parse_crash_elf32_headers();
662 if (rc)
663 return rc;
664
665 /* Determine vmcore size. */
666 vmcore_size = get_vmcore_size_elf32(elfcorebuf);
667 } else {
668 pr_warn("Warning: Core image elf header is not sane\n");
669 return -EINVAL;
670 }
671 return 0;
672 }
673
674 /* Init function for vmcore module. */
675 static int __init vmcore_init(void)
676 {
677 int rc = 0;
678
679 /* If elfcorehdr= has been passed in cmdline, then capture the dump.*/
680 if (!(is_vmcore_usable()))
681 return rc;
682 rc = parse_crash_elf_headers();
683 if (rc) {
684 pr_warn("Kdump: vmcore not initialized\n");
685 return rc;
686 }
687
688 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
689 if (proc_vmcore)
690 proc_vmcore->size = vmcore_size;
691 return 0;
692 }
693 module_init(vmcore_init)
694
695 /* Cleanup function for vmcore module. */
696 void vmcore_cleanup(void)
697 {
698 struct list_head *pos, *next;
699
700 if (proc_vmcore) {
701 remove_proc_entry(proc_vmcore->name, proc_vmcore->parent);
702 proc_vmcore = NULL;
703 }
704
705 /* clear the vmcore list. */
706 list_for_each_safe(pos, next, &vmcore_list) {
707 struct vmcore *m;
708
709 m = list_entry(pos, struct vmcore, list);
710 list_del(&m->list);
711 kfree(m);
712 }
713 kfree(elfcorebuf);
714 elfcorebuf = NULL;
715 }
716 EXPORT_SYMBOL_GPL(vmcore_cleanup);
This page took 0.0486839999999999 seconds and 5 git commands to generate.