Merge branch 'fixes' of git://git.armlinux.org.uk/~rmk/linux-arm
[deliverable/linux.git] / drivers / misc / lkdtm.c
1 /*
2 * Kprobe module for testing crash dumps
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright (C) IBM Corporation, 2006
19 *
20 * Author: Ankita Garg <ankita@in.ibm.com>
21 *
22 * This module induces system failures at predefined crashpoints to
23 * evaluate the reliability of crash dumps obtained using different dumping
24 * solutions.
25 *
26 * It is adapted from the Linux Kernel Dump Test Tool by
27 * Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
28 *
29 * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net>
30 *
31 * See Documentation/fault-injection/provoke-crashes.txt for instructions
32 */
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
35 #include <linux/kernel.h>
36 #include <linux/fs.h>
37 #include <linux/module.h>
38 #include <linux/buffer_head.h>
39 #include <linux/kprobes.h>
40 #include <linux/list.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/hrtimer.h>
44 #include <linux/slab.h>
45 #include <scsi/scsi_cmnd.h>
46 #include <linux/debugfs.h>
47 #include <linux/vmalloc.h>
48 #include <linux/mman.h>
49 #include <asm/cacheflush.h>
50
51 #ifdef CONFIG_IDE
52 #include <linux/ide.h>
53 #endif
54
55 /*
56 * Make sure our attempts to over run the kernel stack doesn't trigger
57 * a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
58 * recurse past the end of THREAD_SIZE by default.
59 */
60 #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
61 #define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
62 #else
63 #define REC_STACK_SIZE (THREAD_SIZE / 8)
64 #endif
65 #define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
66
67 #define DEFAULT_COUNT 10
68 #define EXEC_SIZE 64
69
70 enum cname {
71 CN_INVALID,
72 CN_INT_HARDWARE_ENTRY,
73 CN_INT_HW_IRQ_EN,
74 CN_INT_TASKLET_ENTRY,
75 CN_FS_DEVRW,
76 CN_MEM_SWAPOUT,
77 CN_TIMERADD,
78 CN_SCSI_DISPATCH_CMD,
79 CN_IDE_CORE_CP,
80 CN_DIRECT,
81 };
82
83 enum ctype {
84 CT_NONE,
85 CT_PANIC,
86 CT_BUG,
87 CT_WARNING,
88 CT_EXCEPTION,
89 CT_LOOP,
90 CT_OVERFLOW,
91 CT_CORRUPT_STACK,
92 CT_UNALIGNED_LOAD_STORE_WRITE,
93 CT_OVERWRITE_ALLOCATION,
94 CT_WRITE_AFTER_FREE,
95 CT_READ_AFTER_FREE,
96 CT_WRITE_BUDDY_AFTER_FREE,
97 CT_READ_BUDDY_AFTER_FREE,
98 CT_SOFTLOCKUP,
99 CT_HARDLOCKUP,
100 CT_SPINLOCKUP,
101 CT_HUNG_TASK,
102 CT_EXEC_DATA,
103 CT_EXEC_STACK,
104 CT_EXEC_KMALLOC,
105 CT_EXEC_VMALLOC,
106 CT_EXEC_USERSPACE,
107 CT_ACCESS_USERSPACE,
108 CT_WRITE_RO,
109 CT_WRITE_RO_AFTER_INIT,
110 CT_WRITE_KERN,
111 CT_WRAP_ATOMIC
112 };
113
114 static char* cp_name[] = {
115 "INT_HARDWARE_ENTRY",
116 "INT_HW_IRQ_EN",
117 "INT_TASKLET_ENTRY",
118 "FS_DEVRW",
119 "MEM_SWAPOUT",
120 "TIMERADD",
121 "SCSI_DISPATCH_CMD",
122 "IDE_CORE_CP",
123 "DIRECT",
124 };
125
126 static char* cp_type[] = {
127 "PANIC",
128 "BUG",
129 "WARNING",
130 "EXCEPTION",
131 "LOOP",
132 "OVERFLOW",
133 "CORRUPT_STACK",
134 "UNALIGNED_LOAD_STORE_WRITE",
135 "OVERWRITE_ALLOCATION",
136 "WRITE_AFTER_FREE",
137 "READ_AFTER_FREE",
138 "WRITE_BUDDY_AFTER_FREE",
139 "READ_BUDDY_AFTER_FREE",
140 "SOFTLOCKUP",
141 "HARDLOCKUP",
142 "SPINLOCKUP",
143 "HUNG_TASK",
144 "EXEC_DATA",
145 "EXEC_STACK",
146 "EXEC_KMALLOC",
147 "EXEC_VMALLOC",
148 "EXEC_USERSPACE",
149 "ACCESS_USERSPACE",
150 "WRITE_RO",
151 "WRITE_RO_AFTER_INIT",
152 "WRITE_KERN",
153 "WRAP_ATOMIC"
154 };
155
156 static struct jprobe lkdtm;
157
158 static int lkdtm_parse_commandline(void);
159 static void lkdtm_handler(void);
160
161 static char* cpoint_name;
162 static char* cpoint_type;
163 static int cpoint_count = DEFAULT_COUNT;
164 static int recur_count = REC_NUM_DEFAULT;
165
166 static enum cname cpoint = CN_INVALID;
167 static enum ctype cptype = CT_NONE;
168 static int count = DEFAULT_COUNT;
169 static DEFINE_SPINLOCK(count_lock);
170 static DEFINE_SPINLOCK(lock_me_up);
171
172 static u8 data_area[EXEC_SIZE];
173
174 static const unsigned long rodata = 0xAA55AA55;
175 static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
176
177 module_param(recur_count, int, 0644);
178 MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
179 module_param(cpoint_name, charp, 0444);
180 MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
181 module_param(cpoint_type, charp, 0444);
182 MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
183 "hitting the crash point");
184 module_param(cpoint_count, int, 0644);
185 MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
186 "crash point is to be hit to trigger action");
187
188 static unsigned int jp_do_irq(unsigned int irq)
189 {
190 lkdtm_handler();
191 jprobe_return();
192 return 0;
193 }
194
195 static irqreturn_t jp_handle_irq_event(unsigned int irq,
196 struct irqaction *action)
197 {
198 lkdtm_handler();
199 jprobe_return();
200 return 0;
201 }
202
203 static void jp_tasklet_action(struct softirq_action *a)
204 {
205 lkdtm_handler();
206 jprobe_return();
207 }
208
209 static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
210 {
211 lkdtm_handler();
212 jprobe_return();
213 }
214
215 struct scan_control;
216
217 static unsigned long jp_shrink_inactive_list(unsigned long max_scan,
218 struct zone *zone,
219 struct scan_control *sc)
220 {
221 lkdtm_handler();
222 jprobe_return();
223 return 0;
224 }
225
226 static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim,
227 const enum hrtimer_mode mode)
228 {
229 lkdtm_handler();
230 jprobe_return();
231 return 0;
232 }
233
234 static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd)
235 {
236 lkdtm_handler();
237 jprobe_return();
238 return 0;
239 }
240
241 #ifdef CONFIG_IDE
242 static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
243 struct block_device *bdev, unsigned int cmd,
244 unsigned long arg)
245 {
246 lkdtm_handler();
247 jprobe_return();
248 return 0;
249 }
250 #endif
251
252 /* Return the crashpoint number or NONE if the name is invalid */
253 static enum ctype parse_cp_type(const char *what, size_t count)
254 {
255 int i;
256
257 for (i = 0; i < ARRAY_SIZE(cp_type); i++) {
258 if (!strcmp(what, cp_type[i]))
259 return i + 1;
260 }
261
262 return CT_NONE;
263 }
264
265 static const char *cp_type_to_str(enum ctype type)
266 {
267 if (type == CT_NONE || type < 0 || type > ARRAY_SIZE(cp_type))
268 return "None";
269
270 return cp_type[type - 1];
271 }
272
273 static const char *cp_name_to_str(enum cname name)
274 {
275 if (name == CN_INVALID || name < 0 || name > ARRAY_SIZE(cp_name))
276 return "INVALID";
277
278 return cp_name[name - 1];
279 }
280
281
282 static int lkdtm_parse_commandline(void)
283 {
284 int i;
285 unsigned long flags;
286
287 if (cpoint_count < 1 || recur_count < 1)
288 return -EINVAL;
289
290 spin_lock_irqsave(&count_lock, flags);
291 count = cpoint_count;
292 spin_unlock_irqrestore(&count_lock, flags);
293
294 /* No special parameters */
295 if (!cpoint_type && !cpoint_name)
296 return 0;
297
298 /* Neither or both of these need to be set */
299 if (!cpoint_type || !cpoint_name)
300 return -EINVAL;
301
302 cptype = parse_cp_type(cpoint_type, strlen(cpoint_type));
303 if (cptype == CT_NONE)
304 return -EINVAL;
305
306 for (i = 0; i < ARRAY_SIZE(cp_name); i++) {
307 if (!strcmp(cpoint_name, cp_name[i])) {
308 cpoint = i + 1;
309 return 0;
310 }
311 }
312
313 /* Could not find a valid crash point */
314 return -EINVAL;
315 }
316
317 static int recursive_loop(int remaining)
318 {
319 char buf[REC_STACK_SIZE];
320
321 /* Make sure compiler does not optimize this away. */
322 memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
323 if (!remaining)
324 return 0;
325 else
326 return recursive_loop(remaining - 1);
327 }
328
329 static void do_nothing(void)
330 {
331 return;
332 }
333
334 /* Must immediately follow do_nothing for size calculuations to work out. */
335 static void do_overwritten(void)
336 {
337 pr_info("do_overwritten wasn't overwritten!\n");
338 return;
339 }
340
341 static noinline void corrupt_stack(void)
342 {
343 /* Use default char array length that triggers stack protection. */
344 char data[8];
345
346 memset((void *)data, 0, 64);
347 }
348
349 static void noinline execute_location(void *dst)
350 {
351 void (*func)(void) = dst;
352
353 pr_info("attempting ok execution at %p\n", do_nothing);
354 do_nothing();
355
356 memcpy(dst, do_nothing, EXEC_SIZE);
357 flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
358 pr_info("attempting bad execution at %p\n", func);
359 func();
360 }
361
362 static void execute_user_location(void *dst)
363 {
364 /* Intentionally crossing kernel/user memory boundary. */
365 void (*func)(void) = dst;
366
367 pr_info("attempting ok execution at %p\n", do_nothing);
368 do_nothing();
369
370 if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
371 return;
372 flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
373 pr_info("attempting bad execution at %p\n", func);
374 func();
375 }
376
377 static void lkdtm_do_action(enum ctype which)
378 {
379 switch (which) {
380 case CT_PANIC:
381 panic("dumptest");
382 break;
383 case CT_BUG:
384 BUG();
385 break;
386 case CT_WARNING:
387 WARN_ON(1);
388 break;
389 case CT_EXCEPTION:
390 *((int *) 0) = 0;
391 break;
392 case CT_LOOP:
393 for (;;)
394 ;
395 break;
396 case CT_OVERFLOW:
397 (void) recursive_loop(recur_count);
398 break;
399 case CT_CORRUPT_STACK:
400 corrupt_stack();
401 break;
402 case CT_UNALIGNED_LOAD_STORE_WRITE: {
403 static u8 data[5] __attribute__((aligned(4))) = {1, 2,
404 3, 4, 5};
405 u32 *p;
406 u32 val = 0x12345678;
407
408 p = (u32 *)(data + 1);
409 if (*p == 0)
410 val = 0x87654321;
411 *p = val;
412 break;
413 }
414 case CT_OVERWRITE_ALLOCATION: {
415 size_t len = 1020;
416 u32 *data = kmalloc(len, GFP_KERNEL);
417
418 data[1024 / sizeof(u32)] = 0x12345678;
419 kfree(data);
420 break;
421 }
422 case CT_WRITE_AFTER_FREE: {
423 int *base, *again;
424 size_t len = 1024;
425 /*
426 * The slub allocator uses the first word to store the free
427 * pointer in some configurations. Use the middle of the
428 * allocation to avoid running into the freelist
429 */
430 size_t offset = (len / sizeof(*base)) / 2;
431
432 base = kmalloc(len, GFP_KERNEL);
433 pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
434 pr_info("Attempting bad write to freed memory at %p\n",
435 &base[offset]);
436 kfree(base);
437 base[offset] = 0x0abcdef0;
438 /* Attempt to notice the overwrite. */
439 again = kmalloc(len, GFP_KERNEL);
440 kfree(again);
441 if (again != base)
442 pr_info("Hmm, didn't get the same memory range.\n");
443
444 break;
445 }
446 case CT_READ_AFTER_FREE: {
447 int *base, *val, saw;
448 size_t len = 1024;
449 /*
450 * The slub allocator uses the first word to store the free
451 * pointer in some configurations. Use the middle of the
452 * allocation to avoid running into the freelist
453 */
454 size_t offset = (len / sizeof(*base)) / 2;
455
456 base = kmalloc(len, GFP_KERNEL);
457 if (!base)
458 break;
459
460 val = kmalloc(len, GFP_KERNEL);
461 if (!val) {
462 kfree(base);
463 break;
464 }
465
466 *val = 0x12345678;
467 base[offset] = *val;
468 pr_info("Value in memory before free: %x\n", base[offset]);
469
470 kfree(base);
471
472 pr_info("Attempting bad read from freed memory\n");
473 saw = base[offset];
474 if (saw != *val) {
475 /* Good! Poisoning happened, so declare a win. */
476 pr_info("Memory correctly poisoned (%x)\n", saw);
477 BUG();
478 }
479 pr_info("Memory was not poisoned\n");
480
481 kfree(val);
482 break;
483 }
484 case CT_WRITE_BUDDY_AFTER_FREE: {
485 unsigned long p = __get_free_page(GFP_KERNEL);
486 if (!p)
487 break;
488 pr_info("Writing to the buddy page before free\n");
489 memset((void *)p, 0x3, PAGE_SIZE);
490 free_page(p);
491 schedule();
492 pr_info("Attempting bad write to the buddy page after free\n");
493 memset((void *)p, 0x78, PAGE_SIZE);
494 /* Attempt to notice the overwrite. */
495 p = __get_free_page(GFP_KERNEL);
496 free_page(p);
497 schedule();
498
499 break;
500 }
501 case CT_READ_BUDDY_AFTER_FREE: {
502 unsigned long p = __get_free_page(GFP_KERNEL);
503 int saw, *val;
504 int *base;
505
506 if (!p)
507 break;
508
509 val = kmalloc(1024, GFP_KERNEL);
510 if (!val) {
511 free_page(p);
512 break;
513 }
514
515 base = (int *)p;
516
517 *val = 0x12345678;
518 base[0] = *val;
519 pr_info("Value in memory before free: %x\n", base[0]);
520 free_page(p);
521 pr_info("Attempting to read from freed memory\n");
522 saw = base[0];
523 if (saw != *val) {
524 /* Good! Poisoning happened, so declare a win. */
525 pr_info("Memory correctly poisoned (%x)\n", saw);
526 BUG();
527 }
528 pr_info("Buddy page was not poisoned\n");
529
530 kfree(val);
531 break;
532 }
533 case CT_SOFTLOCKUP:
534 preempt_disable();
535 for (;;)
536 cpu_relax();
537 break;
538 case CT_HARDLOCKUP:
539 local_irq_disable();
540 for (;;)
541 cpu_relax();
542 break;
543 case CT_SPINLOCKUP:
544 /* Must be called twice to trigger. */
545 spin_lock(&lock_me_up);
546 /* Let sparse know we intended to exit holding the lock. */
547 __release(&lock_me_up);
548 break;
549 case CT_HUNG_TASK:
550 set_current_state(TASK_UNINTERRUPTIBLE);
551 schedule();
552 break;
553 case CT_EXEC_DATA:
554 execute_location(data_area);
555 break;
556 case CT_EXEC_STACK: {
557 u8 stack_area[EXEC_SIZE];
558 execute_location(stack_area);
559 break;
560 }
561 case CT_EXEC_KMALLOC: {
562 u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
563 execute_location(kmalloc_area);
564 kfree(kmalloc_area);
565 break;
566 }
567 case CT_EXEC_VMALLOC: {
568 u32 *vmalloc_area = vmalloc(EXEC_SIZE);
569 execute_location(vmalloc_area);
570 vfree(vmalloc_area);
571 break;
572 }
573 case CT_EXEC_USERSPACE: {
574 unsigned long user_addr;
575
576 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
577 PROT_READ | PROT_WRITE | PROT_EXEC,
578 MAP_ANONYMOUS | MAP_PRIVATE, 0);
579 if (user_addr >= TASK_SIZE) {
580 pr_warn("Failed to allocate user memory\n");
581 return;
582 }
583 execute_user_location((void *)user_addr);
584 vm_munmap(user_addr, PAGE_SIZE);
585 break;
586 }
587 case CT_ACCESS_USERSPACE: {
588 unsigned long user_addr, tmp = 0;
589 unsigned long *ptr;
590
591 user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
592 PROT_READ | PROT_WRITE | PROT_EXEC,
593 MAP_ANONYMOUS | MAP_PRIVATE, 0);
594 if (user_addr >= TASK_SIZE) {
595 pr_warn("Failed to allocate user memory\n");
596 return;
597 }
598
599 if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
600 pr_warn("copy_to_user failed\n");
601 vm_munmap(user_addr, PAGE_SIZE);
602 return;
603 }
604
605 ptr = (unsigned long *)user_addr;
606
607 pr_info("attempting bad read at %p\n", ptr);
608 tmp = *ptr;
609 tmp += 0xc0dec0de;
610
611 pr_info("attempting bad write at %p\n", ptr);
612 *ptr = tmp;
613
614 vm_munmap(user_addr, PAGE_SIZE);
615
616 break;
617 }
618 case CT_WRITE_RO: {
619 /* Explicitly cast away "const" for the test. */
620 unsigned long *ptr = (unsigned long *)&rodata;
621
622 pr_info("attempting bad rodata write at %p\n", ptr);
623 *ptr ^= 0xabcd1234;
624
625 break;
626 }
627 case CT_WRITE_RO_AFTER_INIT: {
628 unsigned long *ptr = &ro_after_init;
629
630 /*
631 * Verify we were written to during init. Since an Oops
632 * is considered a "success", a failure is to just skip the
633 * real test.
634 */
635 if ((*ptr & 0xAA) != 0xAA) {
636 pr_info("%p was NOT written during init!?\n", ptr);
637 break;
638 }
639
640 pr_info("attempting bad ro_after_init write at %p\n", ptr);
641 *ptr ^= 0xabcd1234;
642
643 break;
644 }
645 case CT_WRITE_KERN: {
646 size_t size;
647 unsigned char *ptr;
648
649 size = (unsigned long)do_overwritten -
650 (unsigned long)do_nothing;
651 ptr = (unsigned char *)do_overwritten;
652
653 pr_info("attempting bad %zu byte write at %p\n", size, ptr);
654 memcpy(ptr, (unsigned char *)do_nothing, size);
655 flush_icache_range((unsigned long)ptr,
656 (unsigned long)(ptr + size));
657
658 do_overwritten();
659 break;
660 }
661 case CT_WRAP_ATOMIC: {
662 atomic_t under = ATOMIC_INIT(INT_MIN);
663 atomic_t over = ATOMIC_INIT(INT_MAX);
664
665 pr_info("attempting atomic underflow\n");
666 atomic_dec(&under);
667 pr_info("attempting atomic overflow\n");
668 atomic_inc(&over);
669
670 return;
671 }
672 case CT_NONE:
673 default:
674 break;
675 }
676
677 }
678
679 static void lkdtm_handler(void)
680 {
681 unsigned long flags;
682 bool do_it = false;
683
684 spin_lock_irqsave(&count_lock, flags);
685 count--;
686 pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
687 cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
688
689 if (count == 0) {
690 do_it = true;
691 count = cpoint_count;
692 }
693 spin_unlock_irqrestore(&count_lock, flags);
694
695 if (do_it)
696 lkdtm_do_action(cptype);
697 }
698
699 static int lkdtm_register_cpoint(enum cname which)
700 {
701 int ret;
702
703 cpoint = CN_INVALID;
704 if (lkdtm.entry != NULL)
705 unregister_jprobe(&lkdtm);
706
707 switch (which) {
708 case CN_DIRECT:
709 lkdtm_do_action(cptype);
710 return 0;
711 case CN_INT_HARDWARE_ENTRY:
712 lkdtm.kp.symbol_name = "do_IRQ";
713 lkdtm.entry = (kprobe_opcode_t*) jp_do_irq;
714 break;
715 case CN_INT_HW_IRQ_EN:
716 lkdtm.kp.symbol_name = "handle_IRQ_event";
717 lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event;
718 break;
719 case CN_INT_TASKLET_ENTRY:
720 lkdtm.kp.symbol_name = "tasklet_action";
721 lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action;
722 break;
723 case CN_FS_DEVRW:
724 lkdtm.kp.symbol_name = "ll_rw_block";
725 lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block;
726 break;
727 case CN_MEM_SWAPOUT:
728 lkdtm.kp.symbol_name = "shrink_inactive_list";
729 lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list;
730 break;
731 case CN_TIMERADD:
732 lkdtm.kp.symbol_name = "hrtimer_start";
733 lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start;
734 break;
735 case CN_SCSI_DISPATCH_CMD:
736 lkdtm.kp.symbol_name = "scsi_dispatch_cmd";
737 lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd;
738 break;
739 case CN_IDE_CORE_CP:
740 #ifdef CONFIG_IDE
741 lkdtm.kp.symbol_name = "generic_ide_ioctl";
742 lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
743 #else
744 pr_info("Crash point not available\n");
745 return -EINVAL;
746 #endif
747 break;
748 default:
749 pr_info("Invalid Crash Point\n");
750 return -EINVAL;
751 }
752
753 cpoint = which;
754 if ((ret = register_jprobe(&lkdtm)) < 0) {
755 pr_info("Couldn't register jprobe\n");
756 cpoint = CN_INVALID;
757 }
758
759 return ret;
760 }
761
762 static ssize_t do_register_entry(enum cname which, struct file *f,
763 const char __user *user_buf, size_t count, loff_t *off)
764 {
765 char *buf;
766 int err;
767
768 if (count >= PAGE_SIZE)
769 return -EINVAL;
770
771 buf = (char *)__get_free_page(GFP_KERNEL);
772 if (!buf)
773 return -ENOMEM;
774 if (copy_from_user(buf, user_buf, count)) {
775 free_page((unsigned long) buf);
776 return -EFAULT;
777 }
778 /* NULL-terminate and remove enter */
779 buf[count] = '\0';
780 strim(buf);
781
782 cptype = parse_cp_type(buf, count);
783 free_page((unsigned long) buf);
784
785 if (cptype == CT_NONE)
786 return -EINVAL;
787
788 err = lkdtm_register_cpoint(which);
789 if (err < 0)
790 return err;
791
792 *off += count;
793
794 return count;
795 }
796
797 /* Generic read callback that just prints out the available crash types */
798 static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
799 size_t count, loff_t *off)
800 {
801 char *buf;
802 int i, n, out;
803
804 buf = (char *)__get_free_page(GFP_KERNEL);
805 if (buf == NULL)
806 return -ENOMEM;
807
808 n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
809 for (i = 0; i < ARRAY_SIZE(cp_type); i++)
810 n += snprintf(buf + n, PAGE_SIZE - n, "%s\n", cp_type[i]);
811 buf[n] = '\0';
812
813 out = simple_read_from_buffer(user_buf, count, off,
814 buf, n);
815 free_page((unsigned long) buf);
816
817 return out;
818 }
819
820 static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
821 {
822 return 0;
823 }
824
825
826 static ssize_t int_hardware_entry(struct file *f, const char __user *buf,
827 size_t count, loff_t *off)
828 {
829 return do_register_entry(CN_INT_HARDWARE_ENTRY, f, buf, count, off);
830 }
831
832 static ssize_t int_hw_irq_en(struct file *f, const char __user *buf,
833 size_t count, loff_t *off)
834 {
835 return do_register_entry(CN_INT_HW_IRQ_EN, f, buf, count, off);
836 }
837
838 static ssize_t int_tasklet_entry(struct file *f, const char __user *buf,
839 size_t count, loff_t *off)
840 {
841 return do_register_entry(CN_INT_TASKLET_ENTRY, f, buf, count, off);
842 }
843
844 static ssize_t fs_devrw_entry(struct file *f, const char __user *buf,
845 size_t count, loff_t *off)
846 {
847 return do_register_entry(CN_FS_DEVRW, f, buf, count, off);
848 }
849
850 static ssize_t mem_swapout_entry(struct file *f, const char __user *buf,
851 size_t count, loff_t *off)
852 {
853 return do_register_entry(CN_MEM_SWAPOUT, f, buf, count, off);
854 }
855
856 static ssize_t timeradd_entry(struct file *f, const char __user *buf,
857 size_t count, loff_t *off)
858 {
859 return do_register_entry(CN_TIMERADD, f, buf, count, off);
860 }
861
862 static ssize_t scsi_dispatch_cmd_entry(struct file *f,
863 const char __user *buf, size_t count, loff_t *off)
864 {
865 return do_register_entry(CN_SCSI_DISPATCH_CMD, f, buf, count, off);
866 }
867
868 static ssize_t ide_core_cp_entry(struct file *f, const char __user *buf,
869 size_t count, loff_t *off)
870 {
871 return do_register_entry(CN_IDE_CORE_CP, f, buf, count, off);
872 }
873
874 /* Special entry to just crash directly. Available without KPROBEs */
875 static ssize_t direct_entry(struct file *f, const char __user *user_buf,
876 size_t count, loff_t *off)
877 {
878 enum ctype type;
879 char *buf;
880
881 if (count >= PAGE_SIZE)
882 return -EINVAL;
883 if (count < 1)
884 return -EINVAL;
885
886 buf = (char *)__get_free_page(GFP_KERNEL);
887 if (!buf)
888 return -ENOMEM;
889 if (copy_from_user(buf, user_buf, count)) {
890 free_page((unsigned long) buf);
891 return -EFAULT;
892 }
893 /* NULL-terminate and remove enter */
894 buf[count] = '\0';
895 strim(buf);
896
897 type = parse_cp_type(buf, count);
898 free_page((unsigned long) buf);
899 if (type == CT_NONE)
900 return -EINVAL;
901
902 pr_info("Performing direct entry %s\n", cp_type_to_str(type));
903 lkdtm_do_action(type);
904 *off += count;
905
906 return count;
907 }
908
909 struct crash_entry {
910 const char *name;
911 const struct file_operations fops;
912 };
913
914 static const struct crash_entry crash_entries[] = {
915 {"DIRECT", {.read = lkdtm_debugfs_read,
916 .llseek = generic_file_llseek,
917 .open = lkdtm_debugfs_open,
918 .write = direct_entry} },
919 {"INT_HARDWARE_ENTRY", {.read = lkdtm_debugfs_read,
920 .llseek = generic_file_llseek,
921 .open = lkdtm_debugfs_open,
922 .write = int_hardware_entry} },
923 {"INT_HW_IRQ_EN", {.read = lkdtm_debugfs_read,
924 .llseek = generic_file_llseek,
925 .open = lkdtm_debugfs_open,
926 .write = int_hw_irq_en} },
927 {"INT_TASKLET_ENTRY", {.read = lkdtm_debugfs_read,
928 .llseek = generic_file_llseek,
929 .open = lkdtm_debugfs_open,
930 .write = int_tasklet_entry} },
931 {"FS_DEVRW", {.read = lkdtm_debugfs_read,
932 .llseek = generic_file_llseek,
933 .open = lkdtm_debugfs_open,
934 .write = fs_devrw_entry} },
935 {"MEM_SWAPOUT", {.read = lkdtm_debugfs_read,
936 .llseek = generic_file_llseek,
937 .open = lkdtm_debugfs_open,
938 .write = mem_swapout_entry} },
939 {"TIMERADD", {.read = lkdtm_debugfs_read,
940 .llseek = generic_file_llseek,
941 .open = lkdtm_debugfs_open,
942 .write = timeradd_entry} },
943 {"SCSI_DISPATCH_CMD", {.read = lkdtm_debugfs_read,
944 .llseek = generic_file_llseek,
945 .open = lkdtm_debugfs_open,
946 .write = scsi_dispatch_cmd_entry} },
947 {"IDE_CORE_CP", {.read = lkdtm_debugfs_read,
948 .llseek = generic_file_llseek,
949 .open = lkdtm_debugfs_open,
950 .write = ide_core_cp_entry} },
951 };
952
953 static struct dentry *lkdtm_debugfs_root;
954
955 static int __init lkdtm_module_init(void)
956 {
957 int ret = -EINVAL;
958 int n_debugfs_entries = 1; /* Assume only the direct entry */
959 int i;
960
961 /* Make sure we can write to __ro_after_init values during __init */
962 ro_after_init |= 0xAA;
963
964 /* Register debugfs interface */
965 lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
966 if (!lkdtm_debugfs_root) {
967 pr_err("creating root dir failed\n");
968 return -ENODEV;
969 }
970
971 #ifdef CONFIG_KPROBES
972 n_debugfs_entries = ARRAY_SIZE(crash_entries);
973 #endif
974
975 for (i = 0; i < n_debugfs_entries; i++) {
976 const struct crash_entry *cur = &crash_entries[i];
977 struct dentry *de;
978
979 de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
980 NULL, &cur->fops);
981 if (de == NULL) {
982 pr_err("could not create %s\n", cur->name);
983 goto out_err;
984 }
985 }
986
987 if (lkdtm_parse_commandline() == -EINVAL) {
988 pr_info("Invalid command\n");
989 goto out_err;
990 }
991
992 if (cpoint != CN_INVALID && cptype != CT_NONE) {
993 ret = lkdtm_register_cpoint(cpoint);
994 if (ret < 0) {
995 pr_info("Invalid crash point %d\n", cpoint);
996 goto out_err;
997 }
998 pr_info("Crash point %s of type %s registered\n",
999 cpoint_name, cpoint_type);
1000 } else {
1001 pr_info("No crash points registered, enable through debugfs\n");
1002 }
1003
1004 return 0;
1005
1006 out_err:
1007 debugfs_remove_recursive(lkdtm_debugfs_root);
1008 return ret;
1009 }
1010
1011 static void __exit lkdtm_module_exit(void)
1012 {
1013 debugfs_remove_recursive(lkdtm_debugfs_root);
1014
1015 unregister_jprobe(&lkdtm);
1016 pr_info("Crash point unregistered\n");
1017 }
1018
1019 module_init(lkdtm_module_init);
1020 module_exit(lkdtm_module_exit);
1021
1022 MODULE_LICENSE("GPL");
1023 MODULE_DESCRIPTION("Kprobe module for testing crash dumps");
This page took 0.050936 seconds and 5 git commands to generate.