s390/kprobes: make use of NOKPROBE_SYMBOL()
[deliverable/linux.git] / arch / s390 / kernel / ftrace.c
CommitLineData
dfd9f7ab
HC
1/*
2 * Dynamic function tracer architecture backend.
3 *
3d1e220d 4 * Copyright IBM Corp. 2009,2014
dfd9f7ab
HC
5 *
6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>,
4cc9bed0 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
dfd9f7ab
HC
8 */
9
c933146a 10#include <linux/moduleloader.h>
88dbd203 11#include <linux/hardirq.h>
dfd9f7ab
HC
12#include <linux/uaccess.h>
13#include <linux/ftrace.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
4cc9bed0 16#include <linux/kprobes.h>
9bf1226b 17#include <trace/syscall.h>
cbb870c8 18#include <asm/asm-offsets.h>
c933146a 19#include <asm/cacheflush.h>
63df41d6 20#include "entry.h"
dfd9f7ab 21
4cc9bed0 22/*
53255c9a 23 * The mcount code looks like this:
4cc9bed0 24 * stg %r14,8(%r15) # offset 0
3d1e220d
HC
25 * larl %r1,<&counter> # offset 6
26 * brasl %r14,_mcount # offset 12
4cc9bed0 27 * lg %r14,8(%r15) # offset 18
c933146a
HC
28 * Total length is 24 bytes. Only the first instruction will be patched
29 * by ftrace_make_call / ftrace_make_nop.
53255c9a 30 * The enabled ftrace code block looks like this:
c933146a
HC
31 * > brasl %r0,ftrace_caller # offset 0
32 * larl %r1,<&counter> # offset 6
33 * brasl %r14,_mcount # offset 12
34 * lg %r14,8(%r15) # offset 18
3d1e220d
HC
35 * The ftrace function gets called with a non-standard C function call ABI
36 * where r0 contains the return address. It is also expected that the called
37 * function only clobbers r0 and r1, but restores r2-r15.
c933146a
HC
38 * For module code we can't directly jump to ftrace caller, but need a
39 * trampoline (ftrace_plt), which clobbers also r1.
3d1e220d
HC
40 * The return point of the ftrace function has offset 24, so execution
41 * continues behind the mcount block.
c933146a
HC
42 * The disabled ftrace code block looks like this:
43 * > jg .+24 # offset 0
44 * larl %r1,<&counter> # offset 6
45 * brasl %r14,_mcount # offset 12
46 * lg %r14,8(%r15) # offset 18
4cc9bed0
MS
47 * The jg instruction branches to offset 24 to skip as many instructions
48 * as possible.
49 */
dfd9f7ab 50
c933146a 51unsigned long ftrace_plt;
dfd9f7ab 52
10dec7db
HC
53int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
54 unsigned long addr)
55{
56 return 0;
57}
dfd9f7ab
HC
58
59int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
60 unsigned long addr)
61{
c933146a
HC
62 struct ftrace_insn insn;
63 unsigned short op;
64 void *from, *to;
65 size_t size;
66
67 ftrace_generate_nop_insn(&insn);
68 size = sizeof(insn);
69 from = &insn;
70 to = (void *) rec->ip;
71 if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
72 return -EFAULT;
73 /*
74 * If we find a breakpoint instruction, a kprobe has been placed
75 * at the beginning of the function. We write the constant
76 * KPROBE_ON_FTRACE_NOP into the remaining four bytes of the original
77 * instruction so that the kprobes handler can execute a nop, if it
78 * reaches this breakpoint.
79 */
80 if (op == BREAKPOINT_INSTRUCTION) {
81 size -= 2;
82 from += 2;
83 to += 2;
84 insn.disp = KPROBE_ON_FTRACE_NOP;
3d1e220d 85 }
c933146a 86 if (probe_kernel_write(to, from, size))
4cc9bed0
MS
87 return -EPERM;
88 return 0;
dfd9f7ab
HC
89}
90
91int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
92{
c933146a
HC
93 struct ftrace_insn insn;
94 unsigned short op;
95 void *from, *to;
96 size_t size;
97
98 ftrace_generate_call_insn(&insn, rec->ip);
99 size = sizeof(insn);
100 from = &insn;
101 to = (void *) rec->ip;
102 if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
103 return -EFAULT;
104 /*
105 * If we find a breakpoint instruction, a kprobe has been placed
106 * at the beginning of the function. We write the constant
107 * KPROBE_ON_FTRACE_CALL into the remaining four bytes of the original
108 * instruction so that the kprobes handler can execute a brasl if it
109 * reaches this breakpoint.
110 */
111 if (op == BREAKPOINT_INSTRUCTION) {
112 size -= 2;
113 from += 2;
114 to += 2;
115 insn.disp = KPROBE_ON_FTRACE_CALL;
116 }
117 if (probe_kernel_write(to, from, size))
4cc9bed0
MS
118 return -EPERM;
119 return 0;
dfd9f7ab
HC
120}
121
122int ftrace_update_ftrace_func(ftrace_func_t func)
123{
dfd9f7ab
HC
124 return 0;
125}
126
3a36cb11 127int __init ftrace_dyn_arch_init(void)
dfd9f7ab 128{
dfd9f7ab
HC
129 return 0;
130}
88dbd203 131
c933146a
HC
132static int __init ftrace_plt_init(void)
133{
134 unsigned int *ip;
135
136 ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE);
137 if (!ftrace_plt)
138 panic("cannot allocate ftrace plt\n");
139 ip = (unsigned int *) ftrace_plt;
140 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
141 ip[1] = 0x100a0004;
142 ip[2] = 0x07f10000;
143 ip[3] = FTRACE_ADDR >> 32;
144 ip[4] = FTRACE_ADDR & 0xffffffff;
145 set_memory_ro(ftrace_plt, 1);
146 return 0;
147}
148device_initcall(ftrace_plt_init);
149
88dbd203 150#ifdef CONFIG_FUNCTION_GRAPH_TRACER
88dbd203
HC
151/*
152 * Hook the return address and push it in the stack of return addresses
153 * in current thread info.
154 */
7a5388de 155unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
88dbd203
HC
156{
157 struct ftrace_graph_ent trace;
158
88dbd203
HC
159 if (unlikely(atomic_read(&current->tracing_graph_pause)))
160 goto out;
aca91209 161 ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
aca91209 162 trace.func = ip;
05e0baaf 163 trace.depth = current->curr_ret_stack + 1;
88dbd203 164 /* Only trace if the calling function expects to. */
05e0baaf
HC
165 if (!ftrace_graph_entry(&trace))
166 goto out;
167 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
88dbd203 168 goto out;
4cc9bed0 169 parent = (unsigned long) return_to_handler;
88dbd203
HC
170out:
171 return parent;
172}
7a5388de 173NOKPROBE_SYMBOL(prepare_ftrace_return);
4cc9bed0 174
4cc9bed0
MS
175/*
176 * Patch the kernel code at ftrace_graph_caller location. The instruction
0cccdda8
HC
177 * there is branch relative on condition. To enable the ftrace graph code
178 * block, we simply patch the mask field of the instruction to zero and
179 * turn the instruction into a nop.
180 * To disable the ftrace graph code the mask field will be patched to
181 * all ones, which turns the instruction into an unconditional branch.
4cc9bed0 182 */
2481a87b
HC
183int ftrace_enable_ftrace_graph_caller(void)
184{
0cccdda8 185 u8 op = 0x04; /* set mask field to zero */
2481a87b 186
0cccdda8 187 return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
2481a87b
HC
188}
189
190int ftrace_disable_ftrace_graph_caller(void)
191{
0cccdda8 192 u8 op = 0xf4; /* set mask field to all ones */
2481a87b 193
0cccdda8 194 return probe_kernel_write(__va(ftrace_graph_caller)+1, &op, sizeof(op));
2481a87b
HC
195}
196
88dbd203 197#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
This page took 0.294077 seconds and 5 git commands to generate.