Merge tag 'module_init-alternate_initcall-v4.1-rc8' of git://git.kernel.org/pub/scm...
[deliverable/linux.git] / kernel / livepatch / core.c
1 /*
2 * core.c - Kernel Live Patching Core
3 *
4 * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
5 * Copyright (C) 2014 SUSE
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version 2
10 * of the License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/mutex.h>
26 #include <linux/slab.h>
27 #include <linux/ftrace.h>
28 #include <linux/list.h>
29 #include <linux/kallsyms.h>
30 #include <linux/livepatch.h>
31
32 /**
33 * struct klp_ops - structure for tracking registered ftrace ops structs
34 *
35 * A single ftrace_ops is shared between all enabled replacement functions
36 * (klp_func structs) which have the same old_addr. This allows the switch
37 * between function versions to happen instantaneously by updating the klp_ops
38 * struct's func_stack list. The winner is the klp_func at the top of the
39 * func_stack (front of the list).
40 *
41 * @node: node for the global klp_ops list
42 * @func_stack: list head for the stack of klp_func's (active func is on top)
43 * @fops: registered ftrace ops struct
44 */
45 struct klp_ops {
46 struct list_head node;
47 struct list_head func_stack;
48 struct ftrace_ops fops;
49 };
50
51 /*
52 * The klp_mutex protects the global lists and state transitions of any
53 * structure reachable from them. References to any structure must be obtained
54 * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
55 * ensure it gets consistent data).
56 */
57 static DEFINE_MUTEX(klp_mutex);
58
59 static LIST_HEAD(klp_patches);
60 static LIST_HEAD(klp_ops);
61
62 static struct kobject *klp_root_kobj;
63
64 static struct klp_ops *klp_find_ops(unsigned long old_addr)
65 {
66 struct klp_ops *ops;
67 struct klp_func *func;
68
69 list_for_each_entry(ops, &klp_ops, node) {
70 func = list_first_entry(&ops->func_stack, struct klp_func,
71 stack_node);
72 if (func->old_addr == old_addr)
73 return ops;
74 }
75
76 return NULL;
77 }
78
79 static bool klp_is_module(struct klp_object *obj)
80 {
81 return obj->name;
82 }
83
84 static bool klp_is_object_loaded(struct klp_object *obj)
85 {
86 return !obj->name || obj->mod;
87 }
88
89 /* sets obj->mod if object is not vmlinux and module is found */
90 static void klp_find_object_module(struct klp_object *obj)
91 {
92 struct module *mod;
93
94 if (!klp_is_module(obj))
95 return;
96
97 mutex_lock(&module_mutex);
98 /*
99 * We do not want to block removal of patched modules and therefore
100 * we do not take a reference here. The patches are removed by
101 * a going module handler instead.
102 */
103 mod = find_module(obj->name);
104 /*
105 * Do not mess work of the module coming and going notifiers.
106 * Note that the patch might still be needed before the going handler
107 * is called. Module functions can be called even in the GOING state
108 * until mod->exit() finishes. This is especially important for
109 * patches that modify semantic of the functions.
110 */
111 if (mod && mod->klp_alive)
112 obj->mod = mod;
113
114 mutex_unlock(&module_mutex);
115 }
116
117 /* klp_mutex must be held by caller */
118 static bool klp_is_patch_registered(struct klp_patch *patch)
119 {
120 struct klp_patch *mypatch;
121
122 list_for_each_entry(mypatch, &klp_patches, list)
123 if (mypatch == patch)
124 return true;
125
126 return false;
127 }
128
129 static bool klp_initialized(void)
130 {
131 return !!klp_root_kobj;
132 }
133
134 struct klp_find_arg {
135 const char *objname;
136 const char *name;
137 unsigned long addr;
138 /*
139 * If count == 0, the symbol was not found. If count == 1, a unique
140 * match was found and addr is set. If count > 1, there is
141 * unresolvable ambiguity among "count" number of symbols with the same
142 * name in the same object.
143 */
144 unsigned long count;
145 };
146
147 static int klp_find_callback(void *data, const char *name,
148 struct module *mod, unsigned long addr)
149 {
150 struct klp_find_arg *args = data;
151
152 if ((mod && !args->objname) || (!mod && args->objname))
153 return 0;
154
155 if (strcmp(args->name, name))
156 return 0;
157
158 if (args->objname && strcmp(args->objname, mod->name))
159 return 0;
160
161 /*
162 * args->addr might be overwritten if another match is found
163 * but klp_find_object_symbol() handles this and only returns the
164 * addr if count == 1.
165 */
166 args->addr = addr;
167 args->count++;
168
169 return 0;
170 }
171
172 static int klp_find_object_symbol(const char *objname, const char *name,
173 unsigned long *addr)
174 {
175 struct klp_find_arg args = {
176 .objname = objname,
177 .name = name,
178 .addr = 0,
179 .count = 0
180 };
181
182 mutex_lock(&module_mutex);
183 kallsyms_on_each_symbol(klp_find_callback, &args);
184 mutex_unlock(&module_mutex);
185
186 if (args.count == 0)
187 pr_err("symbol '%s' not found in symbol table\n", name);
188 else if (args.count > 1)
189 pr_err("unresolvable ambiguity (%lu matches) on symbol '%s' in object '%s'\n",
190 args.count, name, objname);
191 else {
192 *addr = args.addr;
193 return 0;
194 }
195
196 *addr = 0;
197 return -EINVAL;
198 }
199
200 struct klp_verify_args {
201 const char *name;
202 const unsigned long addr;
203 };
204
205 static int klp_verify_callback(void *data, const char *name,
206 struct module *mod, unsigned long addr)
207 {
208 struct klp_verify_args *args = data;
209
210 if (!mod &&
211 !strcmp(args->name, name) &&
212 args->addr == addr)
213 return 1;
214
215 return 0;
216 }
217
218 static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr)
219 {
220 struct klp_verify_args args = {
221 .name = name,
222 .addr = addr,
223 };
224 int ret;
225
226 mutex_lock(&module_mutex);
227 ret = kallsyms_on_each_symbol(klp_verify_callback, &args);
228 mutex_unlock(&module_mutex);
229
230 if (!ret) {
231 pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n",
232 name, addr);
233 return -EINVAL;
234 }
235
236 return 0;
237 }
238
239 static int klp_find_verify_func_addr(struct klp_object *obj,
240 struct klp_func *func)
241 {
242 int ret;
243
244 #if defined(CONFIG_RANDOMIZE_BASE)
245 /* If KASLR has been enabled, adjust old_addr accordingly */
246 if (kaslr_enabled() && func->old_addr)
247 func->old_addr += kaslr_offset();
248 #endif
249
250 if (!func->old_addr || klp_is_module(obj))
251 ret = klp_find_object_symbol(obj->name, func->old_name,
252 &func->old_addr);
253 else
254 ret = klp_verify_vmlinux_symbol(func->old_name,
255 func->old_addr);
256
257 return ret;
258 }
259
260 /*
261 * external symbols are located outside the parent object (where the parent
262 * object is either vmlinux or the kmod being patched).
263 */
264 static int klp_find_external_symbol(struct module *pmod, const char *name,
265 unsigned long *addr)
266 {
267 const struct kernel_symbol *sym;
268
269 /* first, check if it's an exported symbol */
270 preempt_disable();
271 sym = find_symbol(name, NULL, NULL, true, true);
272 if (sym) {
273 *addr = sym->value;
274 preempt_enable();
275 return 0;
276 }
277 preempt_enable();
278
279 /* otherwise check if it's in another .o within the patch module */
280 return klp_find_object_symbol(pmod->name, name, addr);
281 }
282
283 static int klp_write_object_relocations(struct module *pmod,
284 struct klp_object *obj)
285 {
286 int ret;
287 struct klp_reloc *reloc;
288
289 if (WARN_ON(!klp_is_object_loaded(obj)))
290 return -EINVAL;
291
292 if (WARN_ON(!obj->relocs))
293 return -EINVAL;
294
295 for (reloc = obj->relocs; reloc->name; reloc++) {
296 if (!klp_is_module(obj)) {
297 ret = klp_verify_vmlinux_symbol(reloc->name,
298 reloc->val);
299 if (ret)
300 return ret;
301 } else {
302 /* module, reloc->val needs to be discovered */
303 if (reloc->external)
304 ret = klp_find_external_symbol(pmod,
305 reloc->name,
306 &reloc->val);
307 else
308 ret = klp_find_object_symbol(obj->mod->name,
309 reloc->name,
310 &reloc->val);
311 if (ret)
312 return ret;
313 }
314 ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
315 reloc->val + reloc->addend);
316 if (ret) {
317 pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
318 reloc->name, reloc->val, ret);
319 return ret;
320 }
321 }
322
323 return 0;
324 }
325
326 static void notrace klp_ftrace_handler(unsigned long ip,
327 unsigned long parent_ip,
328 struct ftrace_ops *fops,
329 struct pt_regs *regs)
330 {
331 struct klp_ops *ops;
332 struct klp_func *func;
333
334 ops = container_of(fops, struct klp_ops, fops);
335
336 rcu_read_lock();
337 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
338 stack_node);
339 if (WARN_ON_ONCE(!func))
340 goto unlock;
341
342 klp_arch_set_pc(regs, (unsigned long)func->new_func);
343 unlock:
344 rcu_read_unlock();
345 }
346
347 static void klp_disable_func(struct klp_func *func)
348 {
349 struct klp_ops *ops;
350
351 WARN_ON(func->state != KLP_ENABLED);
352 WARN_ON(!func->old_addr);
353
354 ops = klp_find_ops(func->old_addr);
355 if (WARN_ON(!ops))
356 return;
357
358 if (list_is_singular(&ops->func_stack)) {
359 WARN_ON(unregister_ftrace_function(&ops->fops));
360 WARN_ON(ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0));
361
362 list_del_rcu(&func->stack_node);
363 list_del(&ops->node);
364 kfree(ops);
365 } else {
366 list_del_rcu(&func->stack_node);
367 }
368
369 func->state = KLP_DISABLED;
370 }
371
372 static int klp_enable_func(struct klp_func *func)
373 {
374 struct klp_ops *ops;
375 int ret;
376
377 if (WARN_ON(!func->old_addr))
378 return -EINVAL;
379
380 if (WARN_ON(func->state != KLP_DISABLED))
381 return -EINVAL;
382
383 ops = klp_find_ops(func->old_addr);
384 if (!ops) {
385 ops = kzalloc(sizeof(*ops), GFP_KERNEL);
386 if (!ops)
387 return -ENOMEM;
388
389 ops->fops.func = klp_ftrace_handler;
390 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
391 FTRACE_OPS_FL_DYNAMIC |
392 FTRACE_OPS_FL_IPMODIFY;
393
394 list_add(&ops->node, &klp_ops);
395
396 INIT_LIST_HEAD(&ops->func_stack);
397 list_add_rcu(&func->stack_node, &ops->func_stack);
398
399 ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
400 if (ret) {
401 pr_err("failed to set ftrace filter for function '%s' (%d)\n",
402 func->old_name, ret);
403 goto err;
404 }
405
406 ret = register_ftrace_function(&ops->fops);
407 if (ret) {
408 pr_err("failed to register ftrace handler for function '%s' (%d)\n",
409 func->old_name, ret);
410 ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
411 goto err;
412 }
413
414
415 } else {
416 list_add_rcu(&func->stack_node, &ops->func_stack);
417 }
418
419 func->state = KLP_ENABLED;
420
421 return 0;
422
423 err:
424 list_del_rcu(&func->stack_node);
425 list_del(&ops->node);
426 kfree(ops);
427 return ret;
428 }
429
430 static void klp_disable_object(struct klp_object *obj)
431 {
432 struct klp_func *func;
433
434 klp_for_each_func(obj, func)
435 if (func->state == KLP_ENABLED)
436 klp_disable_func(func);
437
438 obj->state = KLP_DISABLED;
439 }
440
441 static int klp_enable_object(struct klp_object *obj)
442 {
443 struct klp_func *func;
444 int ret;
445
446 if (WARN_ON(obj->state != KLP_DISABLED))
447 return -EINVAL;
448
449 if (WARN_ON(!klp_is_object_loaded(obj)))
450 return -EINVAL;
451
452 klp_for_each_func(obj, func) {
453 ret = klp_enable_func(func);
454 if (ret) {
455 klp_disable_object(obj);
456 return ret;
457 }
458 }
459 obj->state = KLP_ENABLED;
460
461 return 0;
462 }
463
464 static int __klp_disable_patch(struct klp_patch *patch)
465 {
466 struct klp_object *obj;
467
468 /* enforce stacking: only the last enabled patch can be disabled */
469 if (!list_is_last(&patch->list, &klp_patches) &&
470 list_next_entry(patch, list)->state == KLP_ENABLED)
471 return -EBUSY;
472
473 pr_notice("disabling patch '%s'\n", patch->mod->name);
474
475 klp_for_each_object(patch, obj) {
476 if (obj->state == KLP_ENABLED)
477 klp_disable_object(obj);
478 }
479
480 patch->state = KLP_DISABLED;
481
482 return 0;
483 }
484
485 /**
486 * klp_disable_patch() - disables a registered patch
487 * @patch: The registered, enabled patch to be disabled
488 *
489 * Unregisters the patched functions from ftrace.
490 *
491 * Return: 0 on success, otherwise error
492 */
493 int klp_disable_patch(struct klp_patch *patch)
494 {
495 int ret;
496
497 mutex_lock(&klp_mutex);
498
499 if (!klp_is_patch_registered(patch)) {
500 ret = -EINVAL;
501 goto err;
502 }
503
504 if (patch->state == KLP_DISABLED) {
505 ret = -EINVAL;
506 goto err;
507 }
508
509 ret = __klp_disable_patch(patch);
510
511 err:
512 mutex_unlock(&klp_mutex);
513 return ret;
514 }
515 EXPORT_SYMBOL_GPL(klp_disable_patch);
516
517 static int __klp_enable_patch(struct klp_patch *patch)
518 {
519 struct klp_object *obj;
520 int ret;
521
522 if (WARN_ON(patch->state != KLP_DISABLED))
523 return -EINVAL;
524
525 /* enforce stacking: only the first disabled patch can be enabled */
526 if (patch->list.prev != &klp_patches &&
527 list_prev_entry(patch, list)->state == KLP_DISABLED)
528 return -EBUSY;
529
530 pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
531 add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
532
533 pr_notice("enabling patch '%s'\n", patch->mod->name);
534
535 klp_for_each_object(patch, obj) {
536 if (!klp_is_object_loaded(obj))
537 continue;
538
539 ret = klp_enable_object(obj);
540 if (ret)
541 goto unregister;
542 }
543
544 patch->state = KLP_ENABLED;
545
546 return 0;
547
548 unregister:
549 WARN_ON(__klp_disable_patch(patch));
550 return ret;
551 }
552
553 /**
554 * klp_enable_patch() - enables a registered patch
555 * @patch: The registered, disabled patch to be enabled
556 *
557 * Performs the needed symbol lookups and code relocations,
558 * then registers the patched functions with ftrace.
559 *
560 * Return: 0 on success, otherwise error
561 */
562 int klp_enable_patch(struct klp_patch *patch)
563 {
564 int ret;
565
566 mutex_lock(&klp_mutex);
567
568 if (!klp_is_patch_registered(patch)) {
569 ret = -EINVAL;
570 goto err;
571 }
572
573 ret = __klp_enable_patch(patch);
574
575 err:
576 mutex_unlock(&klp_mutex);
577 return ret;
578 }
579 EXPORT_SYMBOL_GPL(klp_enable_patch);
580
581 /*
582 * Sysfs Interface
583 *
584 * /sys/kernel/livepatch
585 * /sys/kernel/livepatch/<patch>
586 * /sys/kernel/livepatch/<patch>/enabled
587 * /sys/kernel/livepatch/<patch>/<object>
588 * /sys/kernel/livepatch/<patch>/<object>/<func>
589 */
590
591 static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
592 const char *buf, size_t count)
593 {
594 struct klp_patch *patch;
595 int ret;
596 unsigned long val;
597
598 ret = kstrtoul(buf, 10, &val);
599 if (ret)
600 return -EINVAL;
601
602 if (val != KLP_DISABLED && val != KLP_ENABLED)
603 return -EINVAL;
604
605 patch = container_of(kobj, struct klp_patch, kobj);
606
607 mutex_lock(&klp_mutex);
608
609 if (val == patch->state) {
610 /* already in requested state */
611 ret = -EINVAL;
612 goto err;
613 }
614
615 if (val == KLP_ENABLED) {
616 ret = __klp_enable_patch(patch);
617 if (ret)
618 goto err;
619 } else {
620 ret = __klp_disable_patch(patch);
621 if (ret)
622 goto err;
623 }
624
625 mutex_unlock(&klp_mutex);
626
627 return count;
628
629 err:
630 mutex_unlock(&klp_mutex);
631 return ret;
632 }
633
634 static ssize_t enabled_show(struct kobject *kobj,
635 struct kobj_attribute *attr, char *buf)
636 {
637 struct klp_patch *patch;
638
639 patch = container_of(kobj, struct klp_patch, kobj);
640 return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
641 }
642
643 static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
644 static struct attribute *klp_patch_attrs[] = {
645 &enabled_kobj_attr.attr,
646 NULL
647 };
648
649 static void klp_kobj_release_patch(struct kobject *kobj)
650 {
651 /*
652 * Once we have a consistency model we'll need to module_put() the
653 * patch module here. See klp_register_patch() for more details.
654 */
655 }
656
657 static struct kobj_type klp_ktype_patch = {
658 .release = klp_kobj_release_patch,
659 .sysfs_ops = &kobj_sysfs_ops,
660 .default_attrs = klp_patch_attrs,
661 };
662
663 static void klp_kobj_release_object(struct kobject *kobj)
664 {
665 }
666
667 static struct kobj_type klp_ktype_object = {
668 .release = klp_kobj_release_object,
669 .sysfs_ops = &kobj_sysfs_ops,
670 };
671
672 static void klp_kobj_release_func(struct kobject *kobj)
673 {
674 }
675
676 static struct kobj_type klp_ktype_func = {
677 .release = klp_kobj_release_func,
678 .sysfs_ops = &kobj_sysfs_ops,
679 };
680
681 /*
682 * Free all functions' kobjects in the array up to some limit. When limit is
683 * NULL, all kobjects are freed.
684 */
685 static void klp_free_funcs_limited(struct klp_object *obj,
686 struct klp_func *limit)
687 {
688 struct klp_func *func;
689
690 for (func = obj->funcs; func->old_name && func != limit; func++)
691 kobject_put(&func->kobj);
692 }
693
694 /* Clean up when a patched object is unloaded */
695 static void klp_free_object_loaded(struct klp_object *obj)
696 {
697 struct klp_func *func;
698
699 obj->mod = NULL;
700
701 klp_for_each_func(obj, func)
702 func->old_addr = 0;
703 }
704
705 /*
706 * Free all objects' kobjects in the array up to some limit. When limit is
707 * NULL, all kobjects are freed.
708 */
709 static void klp_free_objects_limited(struct klp_patch *patch,
710 struct klp_object *limit)
711 {
712 struct klp_object *obj;
713
714 for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
715 klp_free_funcs_limited(obj, NULL);
716 kobject_put(&obj->kobj);
717 }
718 }
719
720 static void klp_free_patch(struct klp_patch *patch)
721 {
722 klp_free_objects_limited(patch, NULL);
723 if (!list_empty(&patch->list))
724 list_del(&patch->list);
725 kobject_put(&patch->kobj);
726 }
727
728 static int klp_init_func(struct klp_object *obj, struct klp_func *func)
729 {
730 INIT_LIST_HEAD(&func->stack_node);
731 func->state = KLP_DISABLED;
732
733 return kobject_init_and_add(&func->kobj, &klp_ktype_func,
734 &obj->kobj, "%s", func->old_name);
735 }
736
737 /* parts of the initialization that is done only when the object is loaded */
738 static int klp_init_object_loaded(struct klp_patch *patch,
739 struct klp_object *obj)
740 {
741 struct klp_func *func;
742 int ret;
743
744 if (obj->relocs) {
745 ret = klp_write_object_relocations(patch->mod, obj);
746 if (ret)
747 return ret;
748 }
749
750 klp_for_each_func(obj, func) {
751 ret = klp_find_verify_func_addr(obj, func);
752 if (ret)
753 return ret;
754 }
755
756 return 0;
757 }
758
759 static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
760 {
761 struct klp_func *func;
762 int ret;
763 const char *name;
764
765 if (!obj->funcs)
766 return -EINVAL;
767
768 obj->state = KLP_DISABLED;
769 obj->mod = NULL;
770
771 klp_find_object_module(obj);
772
773 name = klp_is_module(obj) ? obj->name : "vmlinux";
774 ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
775 &patch->kobj, "%s", name);
776 if (ret)
777 return ret;
778
779 klp_for_each_func(obj, func) {
780 ret = klp_init_func(obj, func);
781 if (ret)
782 goto free;
783 }
784
785 if (klp_is_object_loaded(obj)) {
786 ret = klp_init_object_loaded(patch, obj);
787 if (ret)
788 goto free;
789 }
790
791 return 0;
792
793 free:
794 klp_free_funcs_limited(obj, func);
795 kobject_put(&obj->kobj);
796 return ret;
797 }
798
799 static int klp_init_patch(struct klp_patch *patch)
800 {
801 struct klp_object *obj;
802 int ret;
803
804 if (!patch->objs)
805 return -EINVAL;
806
807 mutex_lock(&klp_mutex);
808
809 patch->state = KLP_DISABLED;
810
811 ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
812 klp_root_kobj, "%s", patch->mod->name);
813 if (ret)
814 goto unlock;
815
816 klp_for_each_object(patch, obj) {
817 ret = klp_init_object(patch, obj);
818 if (ret)
819 goto free;
820 }
821
822 list_add_tail(&patch->list, &klp_patches);
823
824 mutex_unlock(&klp_mutex);
825
826 return 0;
827
828 free:
829 klp_free_objects_limited(patch, obj);
830 kobject_put(&patch->kobj);
831 unlock:
832 mutex_unlock(&klp_mutex);
833 return ret;
834 }
835
836 /**
837 * klp_unregister_patch() - unregisters a patch
838 * @patch: Disabled patch to be unregistered
839 *
840 * Frees the data structures and removes the sysfs interface.
841 *
842 * Return: 0 on success, otherwise error
843 */
844 int klp_unregister_patch(struct klp_patch *patch)
845 {
846 int ret = 0;
847
848 mutex_lock(&klp_mutex);
849
850 if (!klp_is_patch_registered(patch)) {
851 ret = -EINVAL;
852 goto out;
853 }
854
855 if (patch->state == KLP_ENABLED) {
856 ret = -EBUSY;
857 goto out;
858 }
859
860 klp_free_patch(patch);
861
862 out:
863 mutex_unlock(&klp_mutex);
864 return ret;
865 }
866 EXPORT_SYMBOL_GPL(klp_unregister_patch);
867
868 /**
869 * klp_register_patch() - registers a patch
870 * @patch: Patch to be registered
871 *
872 * Initializes the data structure associated with the patch and
873 * creates the sysfs interface.
874 *
875 * Return: 0 on success, otherwise error
876 */
877 int klp_register_patch(struct klp_patch *patch)
878 {
879 int ret;
880
881 if (!klp_initialized())
882 return -ENODEV;
883
884 if (!patch || !patch->mod)
885 return -EINVAL;
886
887 /*
888 * A reference is taken on the patch module to prevent it from being
889 * unloaded. Right now, we don't allow patch modules to unload since
890 * there is currently no method to determine if a thread is still
891 * running in the patched code contained in the patch module once
892 * the ftrace registration is successful.
893 */
894 if (!try_module_get(patch->mod))
895 return -ENODEV;
896
897 ret = klp_init_patch(patch);
898 if (ret)
899 module_put(patch->mod);
900
901 return ret;
902 }
903 EXPORT_SYMBOL_GPL(klp_register_patch);
904
905 static int klp_module_notify_coming(struct klp_patch *patch,
906 struct klp_object *obj)
907 {
908 struct module *pmod = patch->mod;
909 struct module *mod = obj->mod;
910 int ret;
911
912 ret = klp_init_object_loaded(patch, obj);
913 if (ret) {
914 pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
915 pmod->name, mod->name, ret);
916 return ret;
917 }
918
919 if (patch->state == KLP_DISABLED)
920 return 0;
921
922 pr_notice("applying patch '%s' to loading module '%s'\n",
923 pmod->name, mod->name);
924
925 ret = klp_enable_object(obj);
926 if (ret)
927 pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
928 pmod->name, mod->name, ret);
929 return ret;
930 }
931
932 static void klp_module_notify_going(struct klp_patch *patch,
933 struct klp_object *obj)
934 {
935 struct module *pmod = patch->mod;
936 struct module *mod = obj->mod;
937
938 if (patch->state == KLP_DISABLED)
939 goto disabled;
940
941 pr_notice("reverting patch '%s' on unloading module '%s'\n",
942 pmod->name, mod->name);
943
944 klp_disable_object(obj);
945
946 disabled:
947 klp_free_object_loaded(obj);
948 }
949
950 static int klp_module_notify(struct notifier_block *nb, unsigned long action,
951 void *data)
952 {
953 int ret;
954 struct module *mod = data;
955 struct klp_patch *patch;
956 struct klp_object *obj;
957
958 if (action != MODULE_STATE_COMING && action != MODULE_STATE_GOING)
959 return 0;
960
961 mutex_lock(&klp_mutex);
962
963 /*
964 * Each module has to know that the notifier has been called.
965 * We never know what module will get patched by a new patch.
966 */
967 if (action == MODULE_STATE_COMING)
968 mod->klp_alive = true;
969 else /* MODULE_STATE_GOING */
970 mod->klp_alive = false;
971
972 list_for_each_entry(patch, &klp_patches, list) {
973 klp_for_each_object(patch, obj) {
974 if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
975 continue;
976
977 if (action == MODULE_STATE_COMING) {
978 obj->mod = mod;
979 ret = klp_module_notify_coming(patch, obj);
980 if (ret) {
981 obj->mod = NULL;
982 pr_warn("patch '%s' is in an inconsistent state!\n",
983 patch->mod->name);
984 }
985 } else /* MODULE_STATE_GOING */
986 klp_module_notify_going(patch, obj);
987
988 break;
989 }
990 }
991
992 mutex_unlock(&klp_mutex);
993
994 return 0;
995 }
996
997 static struct notifier_block klp_module_nb = {
998 .notifier_call = klp_module_notify,
999 .priority = INT_MIN+1, /* called late but before ftrace notifier */
1000 };
1001
1002 static int __init klp_init(void)
1003 {
1004 int ret;
1005
1006 ret = klp_check_compiler_support();
1007 if (ret) {
1008 pr_info("Your compiler is too old; turning off.\n");
1009 return -EINVAL;
1010 }
1011
1012 ret = register_module_notifier(&klp_module_nb);
1013 if (ret)
1014 return ret;
1015
1016 klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
1017 if (!klp_root_kobj) {
1018 ret = -ENOMEM;
1019 goto unregister;
1020 }
1021
1022 return 0;
1023
1024 unregister:
1025 unregister_module_notifier(&klp_module_nb);
1026 return ret;
1027 }
1028
1029 module_init(klp_init);
This page took 0.083157 seconds and 5 git commands to generate.