Commit | Line | Data |
---|---|---|
2b144498 | 1 | /* |
7b2d81d4 | 2 | * User-space Probes (UProbes) |
2b144498 SD |
3 | * |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; either version 2 of the License, or | |
7 | * (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write to the Free Software | |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
35aa621b | 18 | * Copyright (C) IBM Corporation, 2008-2012 |
2b144498 SD |
19 | * Authors: |
20 | * Srikar Dronamraju | |
21 | * Jim Keniston | |
35aa621b | 22 | * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> |
2b144498 SD |
23 | */ |
24 | ||
25 | #include <linux/kernel.h> | |
26 | #include <linux/highmem.h> | |
27 | #include <linux/pagemap.h> /* read_mapping_page */ | |
28 | #include <linux/slab.h> | |
29 | #include <linux/sched.h> | |
30 | #include <linux/rmap.h> /* anon_vma_prepare */ | |
31 | #include <linux/mmu_notifier.h> /* set_pte_at_notify */ | |
32 | #include <linux/swap.h> /* try_to_free_swap */ | |
7b2d81d4 | 33 | |
2b144498 SD |
34 | #include <linux/uprobes.h> |
35 | ||
36 | static struct rb_root uprobes_tree = RB_ROOT; | |
7b2d81d4 | 37 | |
2b144498 SD |
38 | static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ |
39 | ||
40 | #define UPROBES_HASH_SZ 13 | |
7b2d81d4 | 41 | |
2b144498 SD |
42 | /* serialize (un)register */ |
43 | static struct mutex uprobes_mutex[UPROBES_HASH_SZ]; | |
7b2d81d4 IM |
44 | |
45 | #define uprobes_hash(v) (&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) | |
2b144498 SD |
46 | |
47 | /* serialize uprobe->pending_list */ | |
48 | static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; | |
7b2d81d4 | 49 | #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ]) |
2b144498 SD |
50 | |
51 | /* | |
7b2d81d4 | 52 | * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe |
2b144498 SD |
53 | * events active at this time. Probably a fine grained per inode count is |
54 | * better? | |
55 | */ | |
56 | static atomic_t uprobe_events = ATOMIC_INIT(0); | |
57 | ||
58 | /* | |
59 | * Maintain a temporary per vma info that can be used to search if a vma | |
60 | * has already been handled. This structure is introduced since extending | |
61 | * vm_area_struct wasnt recommended. | |
62 | */ | |
63 | struct vma_info { | |
7b2d81d4 IM |
64 | struct list_head probe_list; |
65 | struct mm_struct *mm; | |
66 | loff_t vaddr; | |
2b144498 SD |
67 | }; |
68 | ||
3ff54efd SD |
69 | struct uprobe { |
70 | struct rb_node rb_node; /* node in the rb tree */ | |
71 | atomic_t ref; | |
72 | struct rw_semaphore consumer_rwsem; | |
73 | struct list_head pending_list; | |
74 | struct uprobe_consumer *consumers; | |
75 | struct inode *inode; /* Also hold a ref to inode */ | |
76 | loff_t offset; | |
77 | int flags; | |
78 | struct arch_uprobe arch; | |
79 | }; | |
80 | ||
2b144498 SD |
81 | /* |
82 | * valid_vma: Verify if the specified vma is an executable vma | |
83 | * Relax restrictions while unregistering: vm_flags might have | |
84 | * changed after breakpoint was inserted. | |
85 | * - is_register: indicates if we are in register context. | |
86 | * - Return 1 if the specified virtual address is in an | |
87 | * executable vma. | |
88 | */ | |
89 | static bool valid_vma(struct vm_area_struct *vma, bool is_register) | |
90 | { | |
91 | if (!vma->vm_file) | |
92 | return false; | |
93 | ||
94 | if (!is_register) | |
95 | return true; | |
96 | ||
7b2d81d4 | 97 | if ((vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) == (VM_READ|VM_EXEC)) |
2b144498 SD |
98 | return true; |
99 | ||
100 | return false; | |
101 | } | |
102 | ||
103 | static loff_t vma_address(struct vm_area_struct *vma, loff_t offset) | |
104 | { | |
105 | loff_t vaddr; | |
106 | ||
107 | vaddr = vma->vm_start + offset; | |
108 | vaddr -= vma->vm_pgoff << PAGE_SHIFT; | |
7b2d81d4 | 109 | |
2b144498 SD |
110 | return vaddr; |
111 | } | |
112 | ||
113 | /** | |
114 | * __replace_page - replace page in vma by new page. | |
115 | * based on replace_page in mm/ksm.c | |
116 | * | |
117 | * @vma: vma that holds the pte pointing to page | |
118 | * @page: the cowed page we are replacing by kpage | |
119 | * @kpage: the modified page we replace page by | |
120 | * | |
121 | * Returns 0 on success, -EFAULT on failure. | |
122 | */ | |
7b2d81d4 | 123 | static int __replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage) |
2b144498 SD |
124 | { |
125 | struct mm_struct *mm = vma->vm_mm; | |
126 | pgd_t *pgd; | |
127 | pud_t *pud; | |
128 | pmd_t *pmd; | |
129 | pte_t *ptep; | |
130 | spinlock_t *ptl; | |
131 | unsigned long addr; | |
132 | int err = -EFAULT; | |
133 | ||
134 | addr = page_address_in_vma(page, vma); | |
135 | if (addr == -EFAULT) | |
136 | goto out; | |
137 | ||
138 | pgd = pgd_offset(mm, addr); | |
139 | if (!pgd_present(*pgd)) | |
140 | goto out; | |
141 | ||
142 | pud = pud_offset(pgd, addr); | |
143 | if (!pud_present(*pud)) | |
144 | goto out; | |
145 | ||
146 | pmd = pmd_offset(pud, addr); | |
147 | if (!pmd_present(*pmd)) | |
148 | goto out; | |
149 | ||
150 | ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); | |
151 | if (!ptep) | |
152 | goto out; | |
153 | ||
154 | get_page(kpage); | |
155 | page_add_new_anon_rmap(kpage, vma, addr); | |
156 | ||
157 | flush_cache_page(vma, addr, pte_pfn(*ptep)); | |
158 | ptep_clear_flush(vma, addr, ptep); | |
159 | set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); | |
160 | ||
161 | page_remove_rmap(page); | |
162 | if (!page_mapped(page)) | |
163 | try_to_free_swap(page); | |
164 | put_page(page); | |
165 | pte_unmap_unlock(ptep, ptl); | |
166 | err = 0; | |
167 | ||
168 | out: | |
169 | return err; | |
170 | } | |
171 | ||
172 | /** | |
173 | * is_bkpt_insn - check if instruction is breakpoint instruction. | |
174 | * @insn: instruction to be checked. | |
175 | * Default implementation of is_bkpt_insn | |
176 | * Returns true if @insn is a breakpoint instruction. | |
177 | */ | |
178 | bool __weak is_bkpt_insn(uprobe_opcode_t *insn) | |
179 | { | |
900771a4 | 180 | return *insn == UPROBE_BKPT_INSN; |
2b144498 SD |
181 | } |
182 | ||
183 | /* | |
184 | * NOTE: | |
185 | * Expect the breakpoint instruction to be the smallest size instruction for | |
186 | * the architecture. If an arch has variable length instruction and the | |
187 | * breakpoint instruction is not of the smallest length instruction | |
188 | * supported by that architecture then we need to modify read_opcode / | |
189 | * write_opcode accordingly. This would never be a problem for archs that | |
190 | * have fixed length instructions. | |
191 | */ | |
192 | ||
193 | /* | |
194 | * write_opcode - write the opcode at a given virtual address. | |
e3343e6a | 195 | * @auprobe: arch breakpointing information. |
2b144498 | 196 | * @mm: the probed process address space. |
2b144498 SD |
197 | * @vaddr: the virtual address to store the opcode. |
198 | * @opcode: opcode to be written at @vaddr. | |
199 | * | |
200 | * Called with mm->mmap_sem held (for read and with a reference to | |
201 | * mm). | |
202 | * | |
203 | * For mm @mm, write the opcode at @vaddr. | |
204 | * Return 0 (success) or a negative errno. | |
205 | */ | |
e3343e6a | 206 | static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, |
2b144498 SD |
207 | unsigned long vaddr, uprobe_opcode_t opcode) |
208 | { | |
209 | struct page *old_page, *new_page; | |
210 | struct address_space *mapping; | |
211 | void *vaddr_old, *vaddr_new; | |
212 | struct vm_area_struct *vma; | |
3ff54efd | 213 | struct uprobe *uprobe; |
2b144498 SD |
214 | loff_t addr; |
215 | int ret; | |
216 | ||
217 | /* Read the page with vaddr into memory */ | |
218 | ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma); | |
219 | if (ret <= 0) | |
220 | return ret; | |
7b2d81d4 | 221 | |
2b144498 SD |
222 | ret = -EINVAL; |
223 | ||
224 | /* | |
225 | * We are interested in text pages only. Our pages of interest | |
226 | * should be mapped for read and execute only. We desist from | |
227 | * adding probes in write mapped pages since the breakpoints | |
228 | * might end up in the file copy. | |
229 | */ | |
230 | if (!valid_vma(vma, is_bkpt_insn(&opcode))) | |
231 | goto put_out; | |
232 | ||
3ff54efd | 233 | uprobe = container_of(auprobe, struct uprobe, arch); |
2b144498 SD |
234 | mapping = uprobe->inode->i_mapping; |
235 | if (mapping != vma->vm_file->f_mapping) | |
236 | goto put_out; | |
237 | ||
238 | addr = vma_address(vma, uprobe->offset); | |
239 | if (vaddr != (unsigned long)addr) | |
240 | goto put_out; | |
241 | ||
242 | ret = -ENOMEM; | |
243 | new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr); | |
244 | if (!new_page) | |
245 | goto put_out; | |
246 | ||
247 | __SetPageUptodate(new_page); | |
248 | ||
249 | /* | |
250 | * lock page will serialize against do_wp_page()'s | |
251 | * PageAnon() handling | |
252 | */ | |
253 | lock_page(old_page); | |
254 | /* copy the page now that we've got it stable */ | |
255 | vaddr_old = kmap_atomic(old_page); | |
256 | vaddr_new = kmap_atomic(new_page); | |
257 | ||
258 | memcpy(vaddr_new, vaddr_old, PAGE_SIZE); | |
7b2d81d4 | 259 | |
2b144498 SD |
260 | /* poke the new insn in, ASSUMES we don't cross page boundary */ |
261 | vaddr &= ~PAGE_MASK; | |
900771a4 SD |
262 | BUG_ON(vaddr + UPROBE_BKPT_INSN_SIZE > PAGE_SIZE); |
263 | memcpy(vaddr_new + vaddr, &opcode, UPROBE_BKPT_INSN_SIZE); | |
2b144498 SD |
264 | |
265 | kunmap_atomic(vaddr_new); | |
266 | kunmap_atomic(vaddr_old); | |
267 | ||
268 | ret = anon_vma_prepare(vma); | |
269 | if (ret) | |
270 | goto unlock_out; | |
271 | ||
272 | lock_page(new_page); | |
273 | ret = __replace_page(vma, old_page, new_page); | |
274 | unlock_page(new_page); | |
275 | ||
276 | unlock_out: | |
277 | unlock_page(old_page); | |
278 | page_cache_release(new_page); | |
279 | ||
280 | put_out: | |
7b2d81d4 IM |
281 | put_page(old_page); |
282 | ||
2b144498 SD |
283 | return ret; |
284 | } | |
285 | ||
286 | /** | |
287 | * read_opcode - read the opcode at a given virtual address. | |
288 | * @mm: the probed process address space. | |
289 | * @vaddr: the virtual address to read the opcode. | |
290 | * @opcode: location to store the read opcode. | |
291 | * | |
292 | * Called with mm->mmap_sem held (for read and with a reference to | |
293 | * mm. | |
294 | * | |
295 | * For mm @mm, read the opcode at @vaddr and store it in @opcode. | |
296 | * Return 0 (success) or a negative errno. | |
297 | */ | |
7b2d81d4 | 298 | static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t *opcode) |
2b144498 SD |
299 | { |
300 | struct page *page; | |
301 | void *vaddr_new; | |
302 | int ret; | |
303 | ||
304 | ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &page, NULL); | |
305 | if (ret <= 0) | |
306 | return ret; | |
307 | ||
308 | lock_page(page); | |
309 | vaddr_new = kmap_atomic(page); | |
310 | vaddr &= ~PAGE_MASK; | |
900771a4 | 311 | memcpy(opcode, vaddr_new + vaddr, UPROBE_BKPT_INSN_SIZE); |
2b144498 SD |
312 | kunmap_atomic(vaddr_new); |
313 | unlock_page(page); | |
7b2d81d4 IM |
314 | |
315 | put_page(page); | |
316 | ||
2b144498 SD |
317 | return 0; |
318 | } | |
319 | ||
320 | static int is_bkpt_at_addr(struct mm_struct *mm, unsigned long vaddr) | |
321 | { | |
322 | uprobe_opcode_t opcode; | |
7b2d81d4 | 323 | int result; |
2b144498 | 324 | |
7b2d81d4 | 325 | result = read_opcode(mm, vaddr, &opcode); |
2b144498 SD |
326 | if (result) |
327 | return result; | |
328 | ||
329 | if (is_bkpt_insn(&opcode)) | |
330 | return 1; | |
331 | ||
332 | return 0; | |
333 | } | |
334 | ||
335 | /** | |
336 | * set_bkpt - store breakpoint at a given address. | |
e3343e6a | 337 | * @auprobe: arch specific probepoint information. |
2b144498 | 338 | * @mm: the probed process address space. |
2b144498 SD |
339 | * @vaddr: the virtual address to insert the opcode. |
340 | * | |
341 | * For mm @mm, store the breakpoint instruction at @vaddr. | |
342 | * Return 0 (success) or a negative errno. | |
343 | */ | |
e3343e6a | 344 | int __weak set_bkpt(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) |
2b144498 | 345 | { |
7b2d81d4 | 346 | int result; |
2b144498 | 347 | |
7b2d81d4 | 348 | result = is_bkpt_at_addr(mm, vaddr); |
2b144498 SD |
349 | if (result == 1) |
350 | return -EEXIST; | |
351 | ||
352 | if (result) | |
353 | return result; | |
354 | ||
e3343e6a | 355 | return write_opcode(auprobe, mm, vaddr, UPROBE_BKPT_INSN); |
2b144498 SD |
356 | } |
357 | ||
358 | /** | |
359 | * set_orig_insn - Restore the original instruction. | |
360 | * @mm: the probed process address space. | |
e3343e6a | 361 | * @auprobe: arch specific probepoint information. |
2b144498 SD |
362 | * @vaddr: the virtual address to insert the opcode. |
363 | * @verify: if true, verify existance of breakpoint instruction. | |
364 | * | |
365 | * For mm @mm, restore the original opcode (opcode) at @vaddr. | |
366 | * Return 0 (success) or a negative errno. | |
367 | */ | |
7b2d81d4 | 368 | int __weak |
e3343e6a | 369 | set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, bool verify) |
2b144498 SD |
370 | { |
371 | if (verify) { | |
7b2d81d4 | 372 | int result; |
2b144498 | 373 | |
7b2d81d4 | 374 | result = is_bkpt_at_addr(mm, vaddr); |
2b144498 SD |
375 | if (!result) |
376 | return -EINVAL; | |
377 | ||
378 | if (result != 1) | |
379 | return result; | |
380 | } | |
e3343e6a | 381 | return write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)auprobe->insn); |
2b144498 SD |
382 | } |
383 | ||
384 | static int match_uprobe(struct uprobe *l, struct uprobe *r) | |
385 | { | |
386 | if (l->inode < r->inode) | |
387 | return -1; | |
7b2d81d4 | 388 | |
2b144498 SD |
389 | if (l->inode > r->inode) |
390 | return 1; | |
2b144498 | 391 | |
7b2d81d4 IM |
392 | if (l->offset < r->offset) |
393 | return -1; | |
394 | ||
395 | if (l->offset > r->offset) | |
396 | return 1; | |
2b144498 SD |
397 | |
398 | return 0; | |
399 | } | |
400 | ||
401 | static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset) | |
402 | { | |
403 | struct uprobe u = { .inode = inode, .offset = offset }; | |
404 | struct rb_node *n = uprobes_tree.rb_node; | |
405 | struct uprobe *uprobe; | |
406 | int match; | |
407 | ||
408 | while (n) { | |
409 | uprobe = rb_entry(n, struct uprobe, rb_node); | |
410 | match = match_uprobe(&u, uprobe); | |
411 | if (!match) { | |
412 | atomic_inc(&uprobe->ref); | |
413 | return uprobe; | |
414 | } | |
7b2d81d4 | 415 | |
2b144498 SD |
416 | if (match < 0) |
417 | n = n->rb_left; | |
418 | else | |
419 | n = n->rb_right; | |
420 | } | |
421 | return NULL; | |
422 | } | |
423 | ||
424 | /* | |
425 | * Find a uprobe corresponding to a given inode:offset | |
426 | * Acquires uprobes_treelock | |
427 | */ | |
428 | static struct uprobe *find_uprobe(struct inode *inode, loff_t offset) | |
429 | { | |
430 | struct uprobe *uprobe; | |
431 | unsigned long flags; | |
432 | ||
433 | spin_lock_irqsave(&uprobes_treelock, flags); | |
434 | uprobe = __find_uprobe(inode, offset); | |
435 | spin_unlock_irqrestore(&uprobes_treelock, flags); | |
7b2d81d4 | 436 | |
2b144498 SD |
437 | return uprobe; |
438 | } | |
439 | ||
440 | static struct uprobe *__insert_uprobe(struct uprobe *uprobe) | |
441 | { | |
442 | struct rb_node **p = &uprobes_tree.rb_node; | |
443 | struct rb_node *parent = NULL; | |
444 | struct uprobe *u; | |
445 | int match; | |
446 | ||
447 | while (*p) { | |
448 | parent = *p; | |
449 | u = rb_entry(parent, struct uprobe, rb_node); | |
450 | match = match_uprobe(uprobe, u); | |
451 | if (!match) { | |
452 | atomic_inc(&u->ref); | |
453 | return u; | |
454 | } | |
455 | ||
456 | if (match < 0) | |
457 | p = &parent->rb_left; | |
458 | else | |
459 | p = &parent->rb_right; | |
460 | ||
461 | } | |
7b2d81d4 | 462 | |
2b144498 SD |
463 | u = NULL; |
464 | rb_link_node(&uprobe->rb_node, parent, p); | |
465 | rb_insert_color(&uprobe->rb_node, &uprobes_tree); | |
466 | /* get access + creation ref */ | |
467 | atomic_set(&uprobe->ref, 2); | |
7b2d81d4 | 468 | |
2b144498 SD |
469 | return u; |
470 | } | |
471 | ||
472 | /* | |
7b2d81d4 | 473 | * Acquire uprobes_treelock. |
2b144498 SD |
474 | * Matching uprobe already exists in rbtree; |
475 | * increment (access refcount) and return the matching uprobe. | |
476 | * | |
477 | * No matching uprobe; insert the uprobe in rb_tree; | |
478 | * get a double refcount (access + creation) and return NULL. | |
479 | */ | |
480 | static struct uprobe *insert_uprobe(struct uprobe *uprobe) | |
481 | { | |
482 | unsigned long flags; | |
483 | struct uprobe *u; | |
484 | ||
485 | spin_lock_irqsave(&uprobes_treelock, flags); | |
486 | u = __insert_uprobe(uprobe); | |
487 | spin_unlock_irqrestore(&uprobes_treelock, flags); | |
7b2d81d4 | 488 | |
2b144498 SD |
489 | return u; |
490 | } | |
491 | ||
492 | static void put_uprobe(struct uprobe *uprobe) | |
493 | { | |
494 | if (atomic_dec_and_test(&uprobe->ref)) | |
495 | kfree(uprobe); | |
496 | } | |
497 | ||
498 | static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) | |
499 | { | |
500 | struct uprobe *uprobe, *cur_uprobe; | |
501 | ||
502 | uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL); | |
503 | if (!uprobe) | |
504 | return NULL; | |
505 | ||
506 | uprobe->inode = igrab(inode); | |
507 | uprobe->offset = offset; | |
508 | init_rwsem(&uprobe->consumer_rwsem); | |
509 | INIT_LIST_HEAD(&uprobe->pending_list); | |
510 | ||
511 | /* add to uprobes_tree, sorted on inode:offset */ | |
512 | cur_uprobe = insert_uprobe(uprobe); | |
513 | ||
514 | /* a uprobe exists for this inode:offset combination */ | |
515 | if (cur_uprobe) { | |
516 | kfree(uprobe); | |
517 | uprobe = cur_uprobe; | |
518 | iput(inode); | |
7b2d81d4 | 519 | } else { |
2b144498 | 520 | atomic_inc(&uprobe_events); |
7b2d81d4 IM |
521 | } |
522 | ||
2b144498 SD |
523 | return uprobe; |
524 | } | |
525 | ||
526 | /* Returns the previous consumer */ | |
7b2d81d4 | 527 | static struct uprobe_consumer * |
e3343e6a | 528 | consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc) |
2b144498 SD |
529 | { |
530 | down_write(&uprobe->consumer_rwsem); | |
e3343e6a SD |
531 | uc->next = uprobe->consumers; |
532 | uprobe->consumers = uc; | |
2b144498 | 533 | up_write(&uprobe->consumer_rwsem); |
7b2d81d4 | 534 | |
e3343e6a | 535 | return uc->next; |
2b144498 SD |
536 | } |
537 | ||
538 | /* | |
e3343e6a SD |
539 | * For uprobe @uprobe, delete the consumer @uc. |
540 | * Return true if the @uc is deleted successfully | |
2b144498 SD |
541 | * or return false. |
542 | */ | |
e3343e6a | 543 | static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc) |
2b144498 SD |
544 | { |
545 | struct uprobe_consumer **con; | |
546 | bool ret = false; | |
547 | ||
548 | down_write(&uprobe->consumer_rwsem); | |
549 | for (con = &uprobe->consumers; *con; con = &(*con)->next) { | |
e3343e6a SD |
550 | if (*con == uc) { |
551 | *con = uc->next; | |
2b144498 SD |
552 | ret = true; |
553 | break; | |
554 | } | |
555 | } | |
556 | up_write(&uprobe->consumer_rwsem); | |
7b2d81d4 | 557 | |
2b144498 SD |
558 | return ret; |
559 | } | |
560 | ||
e3343e6a SD |
561 | static int |
562 | __copy_insn(struct address_space *mapping, struct vm_area_struct *vma, char *insn, | |
2b144498 SD |
563 | unsigned long nbytes, unsigned long offset) |
564 | { | |
565 | struct file *filp = vma->vm_file; | |
566 | struct page *page; | |
567 | void *vaddr; | |
568 | unsigned long off1; | |
569 | unsigned long idx; | |
570 | ||
571 | if (!filp) | |
572 | return -EINVAL; | |
573 | ||
574 | idx = (unsigned long)(offset >> PAGE_CACHE_SHIFT); | |
575 | off1 = offset &= ~PAGE_MASK; | |
576 | ||
577 | /* | |
578 | * Ensure that the page that has the original instruction is | |
579 | * populated and in page-cache. | |
580 | */ | |
581 | page = read_mapping_page(mapping, idx, filp); | |
582 | if (IS_ERR(page)) | |
583 | return PTR_ERR(page); | |
584 | ||
585 | vaddr = kmap_atomic(page); | |
586 | memcpy(insn, vaddr + off1, nbytes); | |
587 | kunmap_atomic(vaddr); | |
588 | page_cache_release(page); | |
7b2d81d4 | 589 | |
2b144498 SD |
590 | return 0; |
591 | } | |
592 | ||
e3343e6a SD |
593 | static int |
594 | copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr) | |
2b144498 SD |
595 | { |
596 | struct address_space *mapping; | |
2b144498 | 597 | unsigned long nbytes; |
7b2d81d4 | 598 | int bytes; |
2b144498 SD |
599 | |
600 | addr &= ~PAGE_MASK; | |
601 | nbytes = PAGE_SIZE - addr; | |
602 | mapping = uprobe->inode->i_mapping; | |
603 | ||
604 | /* Instruction at end of binary; copy only available bytes */ | |
605 | if (uprobe->offset + MAX_UINSN_BYTES > uprobe->inode->i_size) | |
606 | bytes = uprobe->inode->i_size - uprobe->offset; | |
607 | else | |
608 | bytes = MAX_UINSN_BYTES; | |
609 | ||
610 | /* Instruction at the page-boundary; copy bytes in second page */ | |
611 | if (nbytes < bytes) { | |
3ff54efd | 612 | if (__copy_insn(mapping, vma, uprobe->arch.insn + nbytes, |
2b144498 SD |
613 | bytes - nbytes, uprobe->offset + nbytes)) |
614 | return -ENOMEM; | |
615 | ||
616 | bytes = nbytes; | |
617 | } | |
3ff54efd | 618 | return __copy_insn(mapping, vma, uprobe->arch.insn, bytes, uprobe->offset); |
2b144498 SD |
619 | } |
620 | ||
e3343e6a SD |
621 | static int |
622 | install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, | |
623 | struct vm_area_struct *vma, loff_t vaddr) | |
2b144498 SD |
624 | { |
625 | unsigned long addr; | |
626 | int ret; | |
627 | ||
628 | /* | |
629 | * If probe is being deleted, unregister thread could be done with | |
630 | * the vma-rmap-walk through. Adding a probe now can be fatal since | |
631 | * nobody will be able to cleanup. Also we could be from fork or | |
632 | * mremap path, where the probe might have already been inserted. | |
633 | * Hence behave as if probe already existed. | |
634 | */ | |
635 | if (!uprobe->consumers) | |
636 | return -EEXIST; | |
637 | ||
638 | addr = (unsigned long)vaddr; | |
7b2d81d4 | 639 | |
900771a4 | 640 | if (!(uprobe->flags & UPROBE_COPY_INSN)) { |
2b144498 SD |
641 | ret = copy_insn(uprobe, vma, addr); |
642 | if (ret) | |
643 | return ret; | |
644 | ||
3ff54efd | 645 | if (is_bkpt_insn((uprobe_opcode_t *)uprobe->arch.insn)) |
2b144498 SD |
646 | return -EEXIST; |
647 | ||
e3343e6a | 648 | ret = arch_uprobes_analyze_insn(&uprobe->arch, mm); |
2b144498 SD |
649 | if (ret) |
650 | return ret; | |
651 | ||
900771a4 | 652 | uprobe->flags |= UPROBE_COPY_INSN; |
2b144498 | 653 | } |
e3343e6a | 654 | ret = set_bkpt(&uprobe->arch, mm, addr); |
2b144498 SD |
655 | |
656 | return ret; | |
657 | } | |
658 | ||
e3343e6a SD |
659 | static void |
660 | remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, loff_t vaddr) | |
2b144498 | 661 | { |
e3343e6a | 662 | set_orig_insn(&uprobe->arch, mm, (unsigned long)vaddr, true); |
2b144498 SD |
663 | } |
664 | ||
665 | static void delete_uprobe(struct uprobe *uprobe) | |
666 | { | |
667 | unsigned long flags; | |
668 | ||
669 | spin_lock_irqsave(&uprobes_treelock, flags); | |
670 | rb_erase(&uprobe->rb_node, &uprobes_tree); | |
671 | spin_unlock_irqrestore(&uprobes_treelock, flags); | |
672 | iput(uprobe->inode); | |
673 | put_uprobe(uprobe); | |
674 | atomic_dec(&uprobe_events); | |
675 | } | |
676 | ||
e3343e6a SD |
677 | static struct vma_info * |
678 | __find_next_vma_info(struct address_space *mapping, struct list_head *head, | |
679 | struct vma_info *vi, loff_t offset, bool is_register) | |
2b144498 SD |
680 | { |
681 | struct prio_tree_iter iter; | |
682 | struct vm_area_struct *vma; | |
683 | struct vma_info *tmpvi; | |
7b2d81d4 | 684 | unsigned long pgoff; |
2b144498 | 685 | int existing_vma; |
7b2d81d4 IM |
686 | loff_t vaddr; |
687 | ||
688 | pgoff = offset >> PAGE_SHIFT; | |
2b144498 SD |
689 | |
690 | vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { | |
691 | if (!valid_vma(vma, is_register)) | |
692 | continue; | |
693 | ||
694 | existing_vma = 0; | |
695 | vaddr = vma_address(vma, offset); | |
7b2d81d4 | 696 | |
2b144498 SD |
697 | list_for_each_entry(tmpvi, head, probe_list) { |
698 | if (tmpvi->mm == vma->vm_mm && tmpvi->vaddr == vaddr) { | |
699 | existing_vma = 1; | |
700 | break; | |
701 | } | |
702 | } | |
703 | ||
704 | /* | |
705 | * Another vma needs a probe to be installed. However skip | |
706 | * installing the probe if the vma is about to be unlinked. | |
707 | */ | |
7b2d81d4 | 708 | if (!existing_vma && atomic_inc_not_zero(&vma->vm_mm->mm_users)) { |
2b144498 SD |
709 | vi->mm = vma->vm_mm; |
710 | vi->vaddr = vaddr; | |
711 | list_add(&vi->probe_list, head); | |
7b2d81d4 | 712 | |
2b144498 SD |
713 | return vi; |
714 | } | |
715 | } | |
7b2d81d4 | 716 | |
2b144498 SD |
717 | return NULL; |
718 | } | |
719 | ||
720 | /* | |
721 | * Iterate in the rmap prio tree and find a vma where a probe has not | |
722 | * yet been inserted. | |
723 | */ | |
7b2d81d4 | 724 | static struct vma_info * |
e3343e6a SD |
725 | find_next_vma_info(struct address_space *mapping, struct list_head *head, |
726 | loff_t offset, bool is_register) | |
2b144498 SD |
727 | { |
728 | struct vma_info *vi, *retvi; | |
7b2d81d4 | 729 | |
2b144498 SD |
730 | vi = kzalloc(sizeof(struct vma_info), GFP_KERNEL); |
731 | if (!vi) | |
732 | return ERR_PTR(-ENOMEM); | |
733 | ||
734 | mutex_lock(&mapping->i_mmap_mutex); | |
e3343e6a | 735 | retvi = __find_next_vma_info(mapping, head, vi, offset, is_register); |
2b144498 SD |
736 | mutex_unlock(&mapping->i_mmap_mutex); |
737 | ||
738 | if (!retvi) | |
739 | kfree(vi); | |
7b2d81d4 | 740 | |
2b144498 SD |
741 | return retvi; |
742 | } | |
743 | ||
744 | static int register_for_each_vma(struct uprobe *uprobe, bool is_register) | |
745 | { | |
746 | struct list_head try_list; | |
747 | struct vm_area_struct *vma; | |
748 | struct address_space *mapping; | |
749 | struct vma_info *vi, *tmpvi; | |
750 | struct mm_struct *mm; | |
751 | loff_t vaddr; | |
7b2d81d4 | 752 | int ret; |
2b144498 SD |
753 | |
754 | mapping = uprobe->inode->i_mapping; | |
755 | INIT_LIST_HEAD(&try_list); | |
7b2d81d4 IM |
756 | |
757 | ret = 0; | |
758 | ||
759 | for (;;) { | |
e3343e6a | 760 | vi = find_next_vma_info(mapping, &try_list, uprobe->offset, is_register); |
7b2d81d4 IM |
761 | if (!vi) |
762 | break; | |
763 | ||
2b144498 SD |
764 | if (IS_ERR(vi)) { |
765 | ret = PTR_ERR(vi); | |
766 | break; | |
767 | } | |
7b2d81d4 | 768 | |
2b144498 SD |
769 | mm = vi->mm; |
770 | down_read(&mm->mmap_sem); | |
771 | vma = find_vma(mm, (unsigned long)vi->vaddr); | |
772 | if (!vma || !valid_vma(vma, is_register)) { | |
773 | list_del(&vi->probe_list); | |
774 | kfree(vi); | |
775 | up_read(&mm->mmap_sem); | |
776 | mmput(mm); | |
777 | continue; | |
778 | } | |
779 | vaddr = vma_address(vma, uprobe->offset); | |
780 | if (vma->vm_file->f_mapping->host != uprobe->inode || | |
781 | vaddr != vi->vaddr) { | |
782 | list_del(&vi->probe_list); | |
783 | kfree(vi); | |
784 | up_read(&mm->mmap_sem); | |
785 | mmput(mm); | |
786 | continue; | |
787 | } | |
788 | ||
789 | if (is_register) | |
e3343e6a | 790 | ret = install_breakpoint(uprobe, mm, vma, vi->vaddr); |
2b144498 | 791 | else |
e3343e6a | 792 | remove_breakpoint(uprobe, mm, vi->vaddr); |
2b144498 SD |
793 | |
794 | up_read(&mm->mmap_sem); | |
795 | mmput(mm); | |
796 | if (is_register) { | |
797 | if (ret && ret == -EEXIST) | |
798 | ret = 0; | |
799 | if (ret) | |
800 | break; | |
801 | } | |
802 | } | |
7b2d81d4 | 803 | |
2b144498 SD |
804 | list_for_each_entry_safe(vi, tmpvi, &try_list, probe_list) { |
805 | list_del(&vi->probe_list); | |
806 | kfree(vi); | |
807 | } | |
7b2d81d4 | 808 | |
2b144498 SD |
809 | return ret; |
810 | } | |
811 | ||
7b2d81d4 | 812 | static int __uprobe_register(struct uprobe *uprobe) |
2b144498 SD |
813 | { |
814 | return register_for_each_vma(uprobe, true); | |
815 | } | |
816 | ||
7b2d81d4 | 817 | static void __uprobe_unregister(struct uprobe *uprobe) |
2b144498 SD |
818 | { |
819 | if (!register_for_each_vma(uprobe, false)) | |
820 | delete_uprobe(uprobe); | |
821 | ||
822 | /* TODO : cant unregister? schedule a worker thread */ | |
823 | } | |
824 | ||
825 | /* | |
7b2d81d4 | 826 | * uprobe_register - register a probe |
2b144498 SD |
827 | * @inode: the file in which the probe has to be placed. |
828 | * @offset: offset from the start of the file. | |
e3343e6a | 829 | * @uc: information on howto handle the probe.. |
2b144498 | 830 | * |
7b2d81d4 | 831 | * Apart from the access refcount, uprobe_register() takes a creation |
2b144498 SD |
832 | * refcount (thro alloc_uprobe) if and only if this @uprobe is getting |
833 | * inserted into the rbtree (i.e first consumer for a @inode:@offset | |
7b2d81d4 | 834 | * tuple). Creation refcount stops uprobe_unregister from freeing the |
2b144498 | 835 | * @uprobe even before the register operation is complete. Creation |
e3343e6a | 836 | * refcount is released when the last @uc for the @uprobe |
2b144498 SD |
837 | * unregisters. |
838 | * | |
839 | * Return errno if it cannot successully install probes | |
840 | * else return 0 (success) | |
841 | */ | |
e3343e6a | 842 | int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) |
2b144498 SD |
843 | { |
844 | struct uprobe *uprobe; | |
7b2d81d4 | 845 | int ret; |
2b144498 | 846 | |
e3343e6a | 847 | if (!inode || !uc || uc->next) |
7b2d81d4 | 848 | return -EINVAL; |
2b144498 SD |
849 | |
850 | if (offset > i_size_read(inode)) | |
7b2d81d4 | 851 | return -EINVAL; |
2b144498 SD |
852 | |
853 | ret = 0; | |
854 | mutex_lock(uprobes_hash(inode)); | |
855 | uprobe = alloc_uprobe(inode, offset); | |
7b2d81d4 | 856 | |
e3343e6a | 857 | if (uprobe && !consumer_add(uprobe, uc)) { |
7b2d81d4 | 858 | ret = __uprobe_register(uprobe); |
2b144498 SD |
859 | if (ret) { |
860 | uprobe->consumers = NULL; | |
7b2d81d4 IM |
861 | __uprobe_unregister(uprobe); |
862 | } else { | |
900771a4 | 863 | uprobe->flags |= UPROBE_RUN_HANDLER; |
7b2d81d4 | 864 | } |
2b144498 SD |
865 | } |
866 | ||
867 | mutex_unlock(uprobes_hash(inode)); | |
868 | put_uprobe(uprobe); | |
869 | ||
870 | return ret; | |
871 | } | |
872 | ||
873 | /* | |
7b2d81d4 | 874 | * uprobe_unregister - unregister a already registered probe. |
2b144498 SD |
875 | * @inode: the file in which the probe has to be removed. |
876 | * @offset: offset from the start of the file. | |
e3343e6a | 877 | * @uc: identify which probe if multiple probes are colocated. |
2b144498 | 878 | */ |
e3343e6a | 879 | void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) |
2b144498 | 880 | { |
7b2d81d4 | 881 | struct uprobe *uprobe; |
2b144498 | 882 | |
e3343e6a | 883 | if (!inode || !uc) |
2b144498 SD |
884 | return; |
885 | ||
886 | uprobe = find_uprobe(inode, offset); | |
887 | if (!uprobe) | |
888 | return; | |
889 | ||
890 | mutex_lock(uprobes_hash(inode)); | |
2b144498 | 891 | |
e3343e6a | 892 | if (consumer_del(uprobe, uc)) { |
7b2d81d4 IM |
893 | if (!uprobe->consumers) { |
894 | __uprobe_unregister(uprobe); | |
900771a4 | 895 | uprobe->flags &= ~UPROBE_RUN_HANDLER; |
7b2d81d4 | 896 | } |
2b144498 SD |
897 | } |
898 | ||
2b144498 SD |
899 | mutex_unlock(uprobes_hash(inode)); |
900 | if (uprobe) | |
901 | put_uprobe(uprobe); | |
902 | } | |
903 | ||
904 | /* | |
905 | * Of all the nodes that correspond to the given inode, return the node | |
906 | * with the least offset. | |
907 | */ | |
908 | static struct rb_node *find_least_offset_node(struct inode *inode) | |
909 | { | |
910 | struct uprobe u = { .inode = inode, .offset = 0}; | |
911 | struct rb_node *n = uprobes_tree.rb_node; | |
912 | struct rb_node *close_node = NULL; | |
913 | struct uprobe *uprobe; | |
914 | int match; | |
915 | ||
916 | while (n) { | |
917 | uprobe = rb_entry(n, struct uprobe, rb_node); | |
918 | match = match_uprobe(&u, uprobe); | |
7b2d81d4 | 919 | |
2b144498 SD |
920 | if (uprobe->inode == inode) |
921 | close_node = n; | |
922 | ||
923 | if (!match) | |
924 | return close_node; | |
925 | ||
926 | if (match < 0) | |
927 | n = n->rb_left; | |
928 | else | |
929 | n = n->rb_right; | |
930 | } | |
7b2d81d4 | 931 | |
2b144498 SD |
932 | return close_node; |
933 | } | |
934 | ||
935 | /* | |
936 | * For a given inode, build a list of probes that need to be inserted. | |
937 | */ | |
938 | static void build_probe_list(struct inode *inode, struct list_head *head) | |
939 | { | |
940 | struct uprobe *uprobe; | |
2b144498 | 941 | unsigned long flags; |
7b2d81d4 | 942 | struct rb_node *n; |
2b144498 SD |
943 | |
944 | spin_lock_irqsave(&uprobes_treelock, flags); | |
7b2d81d4 | 945 | |
2b144498 | 946 | n = find_least_offset_node(inode); |
7b2d81d4 | 947 | |
2b144498 SD |
948 | for (; n; n = rb_next(n)) { |
949 | uprobe = rb_entry(n, struct uprobe, rb_node); | |
950 | if (uprobe->inode != inode) | |
951 | break; | |
952 | ||
953 | list_add(&uprobe->pending_list, head); | |
954 | atomic_inc(&uprobe->ref); | |
955 | } | |
7b2d81d4 | 956 | |
2b144498 SD |
957 | spin_unlock_irqrestore(&uprobes_treelock, flags); |
958 | } | |
959 | ||
960 | /* | |
961 | * Called from mmap_region. | |
962 | * called with mm->mmap_sem acquired. | |
963 | * | |
964 | * Return -ve no if we fail to insert probes and we cannot | |
965 | * bail-out. | |
7b2d81d4 IM |
966 | * Return 0 otherwise. i.e: |
967 | * | |
2b144498 SD |
968 | * - successful insertion of probes |
969 | * - (or) no possible probes to be inserted. | |
970 | * - (or) insertion of probes failed but we can bail-out. | |
971 | */ | |
7b2d81d4 | 972 | int uprobe_mmap(struct vm_area_struct *vma) |
2b144498 SD |
973 | { |
974 | struct list_head tmp_list; | |
975 | struct uprobe *uprobe, *u; | |
976 | struct inode *inode; | |
7b2d81d4 | 977 | int ret; |
2b144498 SD |
978 | |
979 | if (!atomic_read(&uprobe_events) || !valid_vma(vma, true)) | |
7b2d81d4 | 980 | return 0; |
2b144498 SD |
981 | |
982 | inode = vma->vm_file->f_mapping->host; | |
983 | if (!inode) | |
7b2d81d4 | 984 | return 0; |
2b144498 SD |
985 | |
986 | INIT_LIST_HEAD(&tmp_list); | |
987 | mutex_lock(uprobes_mmap_hash(inode)); | |
988 | build_probe_list(inode, &tmp_list); | |
7b2d81d4 IM |
989 | |
990 | ret = 0; | |
991 | ||
2b144498 SD |
992 | list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { |
993 | loff_t vaddr; | |
994 | ||
995 | list_del(&uprobe->pending_list); | |
996 | if (!ret) { | |
997 | vaddr = vma_address(vma, uprobe->offset); | |
7b2d81d4 | 998 | if (vaddr >= vma->vm_start && vaddr < vma->vm_end) { |
e3343e6a | 999 | ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); |
7b2d81d4 IM |
1000 | /* Ignore double add: */ |
1001 | if (ret == -EEXIST) | |
1002 | ret = 0; | |
2b144498 | 1003 | } |
2b144498 SD |
1004 | } |
1005 | put_uprobe(uprobe); | |
1006 | } | |
1007 | ||
1008 | mutex_unlock(uprobes_mmap_hash(inode)); | |
1009 | ||
1010 | return ret; | |
1011 | } | |
1012 | ||
1013 | static int __init init_uprobes(void) | |
1014 | { | |
1015 | int i; | |
1016 | ||
1017 | for (i = 0; i < UPROBES_HASH_SZ; i++) { | |
1018 | mutex_init(&uprobes_mutex[i]); | |
1019 | mutex_init(&uprobes_mmap_mutex[i]); | |
1020 | } | |
1021 | return 0; | |
1022 | } | |
1023 | ||
1024 | static void __exit exit_uprobes(void) | |
1025 | { | |
1026 | } | |
1027 | ||
1028 | module_init(init_uprobes); | |
1029 | module_exit(exit_uprobes); |