uprobes/core: Handle breakpoint and singlestep exceptions
[deliverable/linux.git] / kernel / events / uprobes.c
CommitLineData
2b144498 1/*
7b2d81d4 2 * User-space Probes (UProbes)
2b144498
SD
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
35aa621b 18 * Copyright (C) IBM Corporation, 2008-2012
2b144498
SD
19 * Authors:
20 * Srikar Dronamraju
21 * Jim Keniston
35aa621b 22 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
2b144498
SD
23 */
24
25#include <linux/kernel.h>
26#include <linux/highmem.h>
27#include <linux/pagemap.h> /* read_mapping_page */
28#include <linux/slab.h>
29#include <linux/sched.h>
30#include <linux/rmap.h> /* anon_vma_prepare */
31#include <linux/mmu_notifier.h> /* set_pte_at_notify */
32#include <linux/swap.h> /* try_to_free_swap */
0326f5a9
SD
33#include <linux/ptrace.h> /* user_enable_single_step */
34#include <linux/kdebug.h> /* notifier mechanism */
7b2d81d4 35
2b144498
SD
36#include <linux/uprobes.h>
37
0326f5a9 38static struct srcu_struct uprobes_srcu;
2b144498 39static struct rb_root uprobes_tree = RB_ROOT;
7b2d81d4 40
2b144498
SD
41static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */
42
43#define UPROBES_HASH_SZ 13
7b2d81d4 44
2b144498
SD
45/* serialize (un)register */
46static struct mutex uprobes_mutex[UPROBES_HASH_SZ];
7b2d81d4
IM
47
48#define uprobes_hash(v) (&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
2b144498
SD
49
50/* serialize uprobe->pending_list */
51static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
7b2d81d4 52#define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
2b144498
SD
53
54/*
7b2d81d4 55 * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe
2b144498
SD
56 * events active at this time. Probably a fine grained per inode count is
57 * better?
58 */
59static atomic_t uprobe_events = ATOMIC_INIT(0);
60
61/*
62 * Maintain a temporary per vma info that can be used to search if a vma
63 * has already been handled. This structure is introduced since extending
64 * vm_area_struct wasnt recommended.
65 */
66struct vma_info {
7b2d81d4
IM
67 struct list_head probe_list;
68 struct mm_struct *mm;
69 loff_t vaddr;
2b144498
SD
70};
71
3ff54efd
SD
72struct uprobe {
73 struct rb_node rb_node; /* node in the rb tree */
74 atomic_t ref;
75 struct rw_semaphore consumer_rwsem;
76 struct list_head pending_list;
77 struct uprobe_consumer *consumers;
78 struct inode *inode; /* Also hold a ref to inode */
79 loff_t offset;
80 int flags;
81 struct arch_uprobe arch;
82};
83
2b144498
SD
84/*
85 * valid_vma: Verify if the specified vma is an executable vma
86 * Relax restrictions while unregistering: vm_flags might have
87 * changed after breakpoint was inserted.
88 * - is_register: indicates if we are in register context.
89 * - Return 1 if the specified virtual address is in an
90 * executable vma.
91 */
92static bool valid_vma(struct vm_area_struct *vma, bool is_register)
93{
94 if (!vma->vm_file)
95 return false;
96
97 if (!is_register)
98 return true;
99
7b2d81d4 100 if ((vma->vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)) == (VM_READ|VM_EXEC))
2b144498
SD
101 return true;
102
103 return false;
104}
105
106static loff_t vma_address(struct vm_area_struct *vma, loff_t offset)
107{
108 loff_t vaddr;
109
110 vaddr = vma->vm_start + offset;
111 vaddr -= vma->vm_pgoff << PAGE_SHIFT;
7b2d81d4 112
2b144498
SD
113 return vaddr;
114}
115
116/**
117 * __replace_page - replace page in vma by new page.
118 * based on replace_page in mm/ksm.c
119 *
120 * @vma: vma that holds the pte pointing to page
121 * @page: the cowed page we are replacing by kpage
122 * @kpage: the modified page we replace page by
123 *
124 * Returns 0 on success, -EFAULT on failure.
125 */
7b2d81d4 126static int __replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage)
2b144498
SD
127{
128 struct mm_struct *mm = vma->vm_mm;
129 pgd_t *pgd;
130 pud_t *pud;
131 pmd_t *pmd;
132 pte_t *ptep;
133 spinlock_t *ptl;
134 unsigned long addr;
135 int err = -EFAULT;
136
137 addr = page_address_in_vma(page, vma);
138 if (addr == -EFAULT)
139 goto out;
140
141 pgd = pgd_offset(mm, addr);
142 if (!pgd_present(*pgd))
143 goto out;
144
145 pud = pud_offset(pgd, addr);
146 if (!pud_present(*pud))
147 goto out;
148
149 pmd = pmd_offset(pud, addr);
150 if (!pmd_present(*pmd))
151 goto out;
152
153 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl);
154 if (!ptep)
155 goto out;
156
157 get_page(kpage);
158 page_add_new_anon_rmap(kpage, vma, addr);
159
160 flush_cache_page(vma, addr, pte_pfn(*ptep));
161 ptep_clear_flush(vma, addr, ptep);
162 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
163
164 page_remove_rmap(page);
165 if (!page_mapped(page))
166 try_to_free_swap(page);
167 put_page(page);
168 pte_unmap_unlock(ptep, ptl);
169 err = 0;
170
171out:
172 return err;
173}
174
175/**
5cb4ac3a 176 * is_swbp_insn - check if instruction is breakpoint instruction.
2b144498 177 * @insn: instruction to be checked.
5cb4ac3a 178 * Default implementation of is_swbp_insn
2b144498
SD
179 * Returns true if @insn is a breakpoint instruction.
180 */
5cb4ac3a 181bool __weak is_swbp_insn(uprobe_opcode_t *insn)
2b144498 182{
5cb4ac3a 183 return *insn == UPROBE_SWBP_INSN;
2b144498
SD
184}
185
186/*
187 * NOTE:
188 * Expect the breakpoint instruction to be the smallest size instruction for
189 * the architecture. If an arch has variable length instruction and the
190 * breakpoint instruction is not of the smallest length instruction
191 * supported by that architecture then we need to modify read_opcode /
192 * write_opcode accordingly. This would never be a problem for archs that
193 * have fixed length instructions.
194 */
195
196/*
197 * write_opcode - write the opcode at a given virtual address.
e3343e6a 198 * @auprobe: arch breakpointing information.
2b144498 199 * @mm: the probed process address space.
2b144498
SD
200 * @vaddr: the virtual address to store the opcode.
201 * @opcode: opcode to be written at @vaddr.
202 *
203 * Called with mm->mmap_sem held (for read and with a reference to
204 * mm).
205 *
206 * For mm @mm, write the opcode at @vaddr.
207 * Return 0 (success) or a negative errno.
208 */
e3343e6a 209static int write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
2b144498
SD
210 unsigned long vaddr, uprobe_opcode_t opcode)
211{
212 struct page *old_page, *new_page;
213 struct address_space *mapping;
214 void *vaddr_old, *vaddr_new;
215 struct vm_area_struct *vma;
3ff54efd 216 struct uprobe *uprobe;
2b144498
SD
217 loff_t addr;
218 int ret;
219
220 /* Read the page with vaddr into memory */
221 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &old_page, &vma);
222 if (ret <= 0)
223 return ret;
7b2d81d4 224
2b144498
SD
225 ret = -EINVAL;
226
227 /*
228 * We are interested in text pages only. Our pages of interest
229 * should be mapped for read and execute only. We desist from
230 * adding probes in write mapped pages since the breakpoints
231 * might end up in the file copy.
232 */
5cb4ac3a 233 if (!valid_vma(vma, is_swbp_insn(&opcode)))
2b144498
SD
234 goto put_out;
235
3ff54efd 236 uprobe = container_of(auprobe, struct uprobe, arch);
2b144498
SD
237 mapping = uprobe->inode->i_mapping;
238 if (mapping != vma->vm_file->f_mapping)
239 goto put_out;
240
241 addr = vma_address(vma, uprobe->offset);
242 if (vaddr != (unsigned long)addr)
243 goto put_out;
244
245 ret = -ENOMEM;
246 new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
247 if (!new_page)
248 goto put_out;
249
250 __SetPageUptodate(new_page);
251
252 /*
253 * lock page will serialize against do_wp_page()'s
254 * PageAnon() handling
255 */
256 lock_page(old_page);
257 /* copy the page now that we've got it stable */
258 vaddr_old = kmap_atomic(old_page);
259 vaddr_new = kmap_atomic(new_page);
260
261 memcpy(vaddr_new, vaddr_old, PAGE_SIZE);
7b2d81d4 262
2b144498
SD
263 /* poke the new insn in, ASSUMES we don't cross page boundary */
264 vaddr &= ~PAGE_MASK;
5cb4ac3a
SD
265 BUG_ON(vaddr + UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
266 memcpy(vaddr_new + vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
2b144498
SD
267
268 kunmap_atomic(vaddr_new);
269 kunmap_atomic(vaddr_old);
270
271 ret = anon_vma_prepare(vma);
272 if (ret)
273 goto unlock_out;
274
275 lock_page(new_page);
276 ret = __replace_page(vma, old_page, new_page);
277 unlock_page(new_page);
278
279unlock_out:
280 unlock_page(old_page);
281 page_cache_release(new_page);
282
283put_out:
7b2d81d4
IM
284 put_page(old_page);
285
2b144498
SD
286 return ret;
287}
288
289/**
290 * read_opcode - read the opcode at a given virtual address.
291 * @mm: the probed process address space.
292 * @vaddr: the virtual address to read the opcode.
293 * @opcode: location to store the read opcode.
294 *
295 * Called with mm->mmap_sem held (for read and with a reference to
296 * mm.
297 *
298 * For mm @mm, read the opcode at @vaddr and store it in @opcode.
299 * Return 0 (success) or a negative errno.
300 */
7b2d81d4 301static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t *opcode)
2b144498
SD
302{
303 struct page *page;
304 void *vaddr_new;
305 int ret;
306
307 ret = get_user_pages(NULL, mm, vaddr, 1, 0, 0, &page, NULL);
308 if (ret <= 0)
309 return ret;
310
311 lock_page(page);
312 vaddr_new = kmap_atomic(page);
313 vaddr &= ~PAGE_MASK;
5cb4ac3a 314 memcpy(opcode, vaddr_new + vaddr, UPROBE_SWBP_INSN_SIZE);
2b144498
SD
315 kunmap_atomic(vaddr_new);
316 unlock_page(page);
7b2d81d4
IM
317
318 put_page(page);
319
2b144498
SD
320 return 0;
321}
322
5cb4ac3a 323static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
2b144498
SD
324{
325 uprobe_opcode_t opcode;
7b2d81d4 326 int result;
2b144498 327
7b2d81d4 328 result = read_opcode(mm, vaddr, &opcode);
2b144498
SD
329 if (result)
330 return result;
331
5cb4ac3a 332 if (is_swbp_insn(&opcode))
2b144498
SD
333 return 1;
334
335 return 0;
336}
337
338/**
5cb4ac3a 339 * set_swbp - store breakpoint at a given address.
e3343e6a 340 * @auprobe: arch specific probepoint information.
2b144498 341 * @mm: the probed process address space.
2b144498
SD
342 * @vaddr: the virtual address to insert the opcode.
343 *
344 * For mm @mm, store the breakpoint instruction at @vaddr.
345 * Return 0 (success) or a negative errno.
346 */
5cb4ac3a 347int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
2b144498 348{
7b2d81d4 349 int result;
2b144498 350
5cb4ac3a 351 result = is_swbp_at_addr(mm, vaddr);
2b144498
SD
352 if (result == 1)
353 return -EEXIST;
354
355 if (result)
356 return result;
357
5cb4ac3a 358 return write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
2b144498
SD
359}
360
361/**
362 * set_orig_insn - Restore the original instruction.
363 * @mm: the probed process address space.
e3343e6a 364 * @auprobe: arch specific probepoint information.
2b144498
SD
365 * @vaddr: the virtual address to insert the opcode.
366 * @verify: if true, verify existance of breakpoint instruction.
367 *
368 * For mm @mm, restore the original opcode (opcode) at @vaddr.
369 * Return 0 (success) or a negative errno.
370 */
7b2d81d4 371int __weak
e3343e6a 372set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, bool verify)
2b144498
SD
373{
374 if (verify) {
7b2d81d4 375 int result;
2b144498 376
5cb4ac3a 377 result = is_swbp_at_addr(mm, vaddr);
2b144498
SD
378 if (!result)
379 return -EINVAL;
380
381 if (result != 1)
382 return result;
383 }
e3343e6a 384 return write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
2b144498
SD
385}
386
387static int match_uprobe(struct uprobe *l, struct uprobe *r)
388{
389 if (l->inode < r->inode)
390 return -1;
7b2d81d4 391
2b144498
SD
392 if (l->inode > r->inode)
393 return 1;
2b144498 394
7b2d81d4
IM
395 if (l->offset < r->offset)
396 return -1;
397
398 if (l->offset > r->offset)
399 return 1;
2b144498
SD
400
401 return 0;
402}
403
404static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
405{
406 struct uprobe u = { .inode = inode, .offset = offset };
407 struct rb_node *n = uprobes_tree.rb_node;
408 struct uprobe *uprobe;
409 int match;
410
411 while (n) {
412 uprobe = rb_entry(n, struct uprobe, rb_node);
413 match = match_uprobe(&u, uprobe);
414 if (!match) {
415 atomic_inc(&uprobe->ref);
416 return uprobe;
417 }
7b2d81d4 418
2b144498
SD
419 if (match < 0)
420 n = n->rb_left;
421 else
422 n = n->rb_right;
423 }
424 return NULL;
425}
426
427/*
428 * Find a uprobe corresponding to a given inode:offset
429 * Acquires uprobes_treelock
430 */
431static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
432{
433 struct uprobe *uprobe;
434 unsigned long flags;
435
436 spin_lock_irqsave(&uprobes_treelock, flags);
437 uprobe = __find_uprobe(inode, offset);
438 spin_unlock_irqrestore(&uprobes_treelock, flags);
7b2d81d4 439
2b144498
SD
440 return uprobe;
441}
442
443static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
444{
445 struct rb_node **p = &uprobes_tree.rb_node;
446 struct rb_node *parent = NULL;
447 struct uprobe *u;
448 int match;
449
450 while (*p) {
451 parent = *p;
452 u = rb_entry(parent, struct uprobe, rb_node);
453 match = match_uprobe(uprobe, u);
454 if (!match) {
455 atomic_inc(&u->ref);
456 return u;
457 }
458
459 if (match < 0)
460 p = &parent->rb_left;
461 else
462 p = &parent->rb_right;
463
464 }
7b2d81d4 465
2b144498
SD
466 u = NULL;
467 rb_link_node(&uprobe->rb_node, parent, p);
468 rb_insert_color(&uprobe->rb_node, &uprobes_tree);
469 /* get access + creation ref */
470 atomic_set(&uprobe->ref, 2);
7b2d81d4 471
2b144498
SD
472 return u;
473}
474
475/*
7b2d81d4 476 * Acquire uprobes_treelock.
2b144498
SD
477 * Matching uprobe already exists in rbtree;
478 * increment (access refcount) and return the matching uprobe.
479 *
480 * No matching uprobe; insert the uprobe in rb_tree;
481 * get a double refcount (access + creation) and return NULL.
482 */
483static struct uprobe *insert_uprobe(struct uprobe *uprobe)
484{
485 unsigned long flags;
486 struct uprobe *u;
487
488 spin_lock_irqsave(&uprobes_treelock, flags);
489 u = __insert_uprobe(uprobe);
490 spin_unlock_irqrestore(&uprobes_treelock, flags);
7b2d81d4 491
0326f5a9
SD
492 /* For now assume that the instruction need not be single-stepped */
493 uprobe->flags |= UPROBE_SKIP_SSTEP;
494
2b144498
SD
495 return u;
496}
497
498static void put_uprobe(struct uprobe *uprobe)
499{
500 if (atomic_dec_and_test(&uprobe->ref))
501 kfree(uprobe);
502}
503
504static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
505{
506 struct uprobe *uprobe, *cur_uprobe;
507
508 uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
509 if (!uprobe)
510 return NULL;
511
512 uprobe->inode = igrab(inode);
513 uprobe->offset = offset;
514 init_rwsem(&uprobe->consumer_rwsem);
515 INIT_LIST_HEAD(&uprobe->pending_list);
516
517 /* add to uprobes_tree, sorted on inode:offset */
518 cur_uprobe = insert_uprobe(uprobe);
519
520 /* a uprobe exists for this inode:offset combination */
521 if (cur_uprobe) {
522 kfree(uprobe);
523 uprobe = cur_uprobe;
524 iput(inode);
7b2d81d4 525 } else {
2b144498 526 atomic_inc(&uprobe_events);
7b2d81d4
IM
527 }
528
2b144498
SD
529 return uprobe;
530}
531
0326f5a9
SD
532static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
533{
534 struct uprobe_consumer *uc;
535
536 if (!(uprobe->flags & UPROBE_RUN_HANDLER))
537 return;
538
539 down_read(&uprobe->consumer_rwsem);
540 for (uc = uprobe->consumers; uc; uc = uc->next) {
541 if (!uc->filter || uc->filter(uc, current))
542 uc->handler(uc, regs);
543 }
544 up_read(&uprobe->consumer_rwsem);
545}
546
2b144498 547/* Returns the previous consumer */
7b2d81d4 548static struct uprobe_consumer *
e3343e6a 549consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
2b144498
SD
550{
551 down_write(&uprobe->consumer_rwsem);
e3343e6a
SD
552 uc->next = uprobe->consumers;
553 uprobe->consumers = uc;
2b144498 554 up_write(&uprobe->consumer_rwsem);
7b2d81d4 555
e3343e6a 556 return uc->next;
2b144498
SD
557}
558
559/*
e3343e6a
SD
560 * For uprobe @uprobe, delete the consumer @uc.
561 * Return true if the @uc is deleted successfully
2b144498
SD
562 * or return false.
563 */
e3343e6a 564static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
2b144498
SD
565{
566 struct uprobe_consumer **con;
567 bool ret = false;
568
569 down_write(&uprobe->consumer_rwsem);
570 for (con = &uprobe->consumers; *con; con = &(*con)->next) {
e3343e6a
SD
571 if (*con == uc) {
572 *con = uc->next;
2b144498
SD
573 ret = true;
574 break;
575 }
576 }
577 up_write(&uprobe->consumer_rwsem);
7b2d81d4 578
2b144498
SD
579 return ret;
580}
581
e3343e6a
SD
582static int
583__copy_insn(struct address_space *mapping, struct vm_area_struct *vma, char *insn,
2b144498
SD
584 unsigned long nbytes, unsigned long offset)
585{
586 struct file *filp = vma->vm_file;
587 struct page *page;
588 void *vaddr;
589 unsigned long off1;
590 unsigned long idx;
591
592 if (!filp)
593 return -EINVAL;
594
595 idx = (unsigned long)(offset >> PAGE_CACHE_SHIFT);
596 off1 = offset &= ~PAGE_MASK;
597
598 /*
599 * Ensure that the page that has the original instruction is
600 * populated and in page-cache.
601 */
602 page = read_mapping_page(mapping, idx, filp);
603 if (IS_ERR(page))
604 return PTR_ERR(page);
605
606 vaddr = kmap_atomic(page);
607 memcpy(insn, vaddr + off1, nbytes);
608 kunmap_atomic(vaddr);
609 page_cache_release(page);
7b2d81d4 610
2b144498
SD
611 return 0;
612}
613
e3343e6a
SD
614static int
615copy_insn(struct uprobe *uprobe, struct vm_area_struct *vma, unsigned long addr)
2b144498
SD
616{
617 struct address_space *mapping;
2b144498 618 unsigned long nbytes;
7b2d81d4 619 int bytes;
2b144498
SD
620
621 addr &= ~PAGE_MASK;
622 nbytes = PAGE_SIZE - addr;
623 mapping = uprobe->inode->i_mapping;
624
625 /* Instruction at end of binary; copy only available bytes */
626 if (uprobe->offset + MAX_UINSN_BYTES > uprobe->inode->i_size)
627 bytes = uprobe->inode->i_size - uprobe->offset;
628 else
629 bytes = MAX_UINSN_BYTES;
630
631 /* Instruction at the page-boundary; copy bytes in second page */
632 if (nbytes < bytes) {
3ff54efd 633 if (__copy_insn(mapping, vma, uprobe->arch.insn + nbytes,
2b144498
SD
634 bytes - nbytes, uprobe->offset + nbytes))
635 return -ENOMEM;
636
637 bytes = nbytes;
638 }
3ff54efd 639 return __copy_insn(mapping, vma, uprobe->arch.insn, bytes, uprobe->offset);
2b144498
SD
640}
641
e3343e6a
SD
642static int
643install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
644 struct vm_area_struct *vma, loff_t vaddr)
2b144498
SD
645{
646 unsigned long addr;
647 int ret;
648
649 /*
650 * If probe is being deleted, unregister thread could be done with
651 * the vma-rmap-walk through. Adding a probe now can be fatal since
652 * nobody will be able to cleanup. Also we could be from fork or
653 * mremap path, where the probe might have already been inserted.
654 * Hence behave as if probe already existed.
655 */
656 if (!uprobe->consumers)
657 return -EEXIST;
658
659 addr = (unsigned long)vaddr;
7b2d81d4 660
900771a4 661 if (!(uprobe->flags & UPROBE_COPY_INSN)) {
2b144498
SD
662 ret = copy_insn(uprobe, vma, addr);
663 if (ret)
664 return ret;
665
5cb4ac3a 666 if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
2b144498
SD
667 return -EEXIST;
668
0326f5a9 669 ret = arch_uprobe_analyze_insn(&uprobe->arch, mm);
2b144498
SD
670 if (ret)
671 return ret;
672
900771a4 673 uprobe->flags |= UPROBE_COPY_INSN;
2b144498 674 }
5cb4ac3a 675 ret = set_swbp(&uprobe->arch, mm, addr);
2b144498
SD
676
677 return ret;
678}
679
e3343e6a
SD
680static void
681remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, loff_t vaddr)
2b144498 682{
e3343e6a 683 set_orig_insn(&uprobe->arch, mm, (unsigned long)vaddr, true);
2b144498
SD
684}
685
0326f5a9
SD
686/*
687 * There could be threads that have hit the breakpoint and are entering the
688 * notifier code and trying to acquire the uprobes_treelock. The thread
689 * calling delete_uprobe() that is removing the uprobe from the rb_tree can
690 * race with these threads and might acquire the uprobes_treelock compared
691 * to some of the breakpoint hit threads. In such a case, the breakpoint
692 * hit threads will not find the uprobe. The current unregistering thread
693 * waits till all other threads have hit a breakpoint, to acquire the
694 * uprobes_treelock before the uprobe is removed from the rbtree.
695 */
2b144498
SD
696static void delete_uprobe(struct uprobe *uprobe)
697{
698 unsigned long flags;
699
0326f5a9 700 synchronize_srcu(&uprobes_srcu);
2b144498
SD
701 spin_lock_irqsave(&uprobes_treelock, flags);
702 rb_erase(&uprobe->rb_node, &uprobes_tree);
703 spin_unlock_irqrestore(&uprobes_treelock, flags);
704 iput(uprobe->inode);
705 put_uprobe(uprobe);
706 atomic_dec(&uprobe_events);
707}
708
e3343e6a
SD
709static struct vma_info *
710__find_next_vma_info(struct address_space *mapping, struct list_head *head,
711 struct vma_info *vi, loff_t offset, bool is_register)
2b144498
SD
712{
713 struct prio_tree_iter iter;
714 struct vm_area_struct *vma;
715 struct vma_info *tmpvi;
7b2d81d4 716 unsigned long pgoff;
2b144498 717 int existing_vma;
7b2d81d4
IM
718 loff_t vaddr;
719
720 pgoff = offset >> PAGE_SHIFT;
2b144498
SD
721
722 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
723 if (!valid_vma(vma, is_register))
724 continue;
725
726 existing_vma = 0;
727 vaddr = vma_address(vma, offset);
7b2d81d4 728
2b144498
SD
729 list_for_each_entry(tmpvi, head, probe_list) {
730 if (tmpvi->mm == vma->vm_mm && tmpvi->vaddr == vaddr) {
731 existing_vma = 1;
732 break;
733 }
734 }
735
736 /*
737 * Another vma needs a probe to be installed. However skip
738 * installing the probe if the vma is about to be unlinked.
739 */
7b2d81d4 740 if (!existing_vma && atomic_inc_not_zero(&vma->vm_mm->mm_users)) {
2b144498
SD
741 vi->mm = vma->vm_mm;
742 vi->vaddr = vaddr;
743 list_add(&vi->probe_list, head);
7b2d81d4 744
2b144498
SD
745 return vi;
746 }
747 }
7b2d81d4 748
2b144498
SD
749 return NULL;
750}
751
752/*
753 * Iterate in the rmap prio tree and find a vma where a probe has not
754 * yet been inserted.
755 */
7b2d81d4 756static struct vma_info *
e3343e6a
SD
757find_next_vma_info(struct address_space *mapping, struct list_head *head,
758 loff_t offset, bool is_register)
2b144498
SD
759{
760 struct vma_info *vi, *retvi;
7b2d81d4 761
2b144498
SD
762 vi = kzalloc(sizeof(struct vma_info), GFP_KERNEL);
763 if (!vi)
764 return ERR_PTR(-ENOMEM);
765
766 mutex_lock(&mapping->i_mmap_mutex);
e3343e6a 767 retvi = __find_next_vma_info(mapping, head, vi, offset, is_register);
2b144498
SD
768 mutex_unlock(&mapping->i_mmap_mutex);
769
770 if (!retvi)
771 kfree(vi);
7b2d81d4 772
2b144498
SD
773 return retvi;
774}
775
776static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
777{
778 struct list_head try_list;
779 struct vm_area_struct *vma;
780 struct address_space *mapping;
781 struct vma_info *vi, *tmpvi;
782 struct mm_struct *mm;
783 loff_t vaddr;
7b2d81d4 784 int ret;
2b144498
SD
785
786 mapping = uprobe->inode->i_mapping;
787 INIT_LIST_HEAD(&try_list);
7b2d81d4
IM
788
789 ret = 0;
790
791 for (;;) {
e3343e6a 792 vi = find_next_vma_info(mapping, &try_list, uprobe->offset, is_register);
7b2d81d4
IM
793 if (!vi)
794 break;
795
2b144498
SD
796 if (IS_ERR(vi)) {
797 ret = PTR_ERR(vi);
798 break;
799 }
7b2d81d4 800
2b144498
SD
801 mm = vi->mm;
802 down_read(&mm->mmap_sem);
803 vma = find_vma(mm, (unsigned long)vi->vaddr);
804 if (!vma || !valid_vma(vma, is_register)) {
805 list_del(&vi->probe_list);
806 kfree(vi);
807 up_read(&mm->mmap_sem);
808 mmput(mm);
809 continue;
810 }
811 vaddr = vma_address(vma, uprobe->offset);
812 if (vma->vm_file->f_mapping->host != uprobe->inode ||
813 vaddr != vi->vaddr) {
814 list_del(&vi->probe_list);
815 kfree(vi);
816 up_read(&mm->mmap_sem);
817 mmput(mm);
818 continue;
819 }
820
821 if (is_register)
e3343e6a 822 ret = install_breakpoint(uprobe, mm, vma, vi->vaddr);
2b144498 823 else
e3343e6a 824 remove_breakpoint(uprobe, mm, vi->vaddr);
2b144498
SD
825
826 up_read(&mm->mmap_sem);
827 mmput(mm);
828 if (is_register) {
829 if (ret && ret == -EEXIST)
830 ret = 0;
831 if (ret)
832 break;
833 }
834 }
7b2d81d4 835
2b144498
SD
836 list_for_each_entry_safe(vi, tmpvi, &try_list, probe_list) {
837 list_del(&vi->probe_list);
838 kfree(vi);
839 }
7b2d81d4 840
2b144498
SD
841 return ret;
842}
843
7b2d81d4 844static int __uprobe_register(struct uprobe *uprobe)
2b144498
SD
845{
846 return register_for_each_vma(uprobe, true);
847}
848
7b2d81d4 849static void __uprobe_unregister(struct uprobe *uprobe)
2b144498
SD
850{
851 if (!register_for_each_vma(uprobe, false))
852 delete_uprobe(uprobe);
853
854 /* TODO : cant unregister? schedule a worker thread */
855}
856
857/*
7b2d81d4 858 * uprobe_register - register a probe
2b144498
SD
859 * @inode: the file in which the probe has to be placed.
860 * @offset: offset from the start of the file.
e3343e6a 861 * @uc: information on howto handle the probe..
2b144498 862 *
7b2d81d4 863 * Apart from the access refcount, uprobe_register() takes a creation
2b144498
SD
864 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
865 * inserted into the rbtree (i.e first consumer for a @inode:@offset
7b2d81d4 866 * tuple). Creation refcount stops uprobe_unregister from freeing the
2b144498 867 * @uprobe even before the register operation is complete. Creation
e3343e6a 868 * refcount is released when the last @uc for the @uprobe
2b144498
SD
869 * unregisters.
870 *
871 * Return errno if it cannot successully install probes
872 * else return 0 (success)
873 */
e3343e6a 874int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
2b144498
SD
875{
876 struct uprobe *uprobe;
7b2d81d4 877 int ret;
2b144498 878
e3343e6a 879 if (!inode || !uc || uc->next)
7b2d81d4 880 return -EINVAL;
2b144498
SD
881
882 if (offset > i_size_read(inode))
7b2d81d4 883 return -EINVAL;
2b144498
SD
884
885 ret = 0;
886 mutex_lock(uprobes_hash(inode));
887 uprobe = alloc_uprobe(inode, offset);
7b2d81d4 888
e3343e6a 889 if (uprobe && !consumer_add(uprobe, uc)) {
7b2d81d4 890 ret = __uprobe_register(uprobe);
2b144498
SD
891 if (ret) {
892 uprobe->consumers = NULL;
7b2d81d4
IM
893 __uprobe_unregister(uprobe);
894 } else {
900771a4 895 uprobe->flags |= UPROBE_RUN_HANDLER;
7b2d81d4 896 }
2b144498
SD
897 }
898
899 mutex_unlock(uprobes_hash(inode));
900 put_uprobe(uprobe);
901
902 return ret;
903}
904
905/*
7b2d81d4 906 * uprobe_unregister - unregister a already registered probe.
2b144498
SD
907 * @inode: the file in which the probe has to be removed.
908 * @offset: offset from the start of the file.
e3343e6a 909 * @uc: identify which probe if multiple probes are colocated.
2b144498 910 */
e3343e6a 911void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
2b144498 912{
7b2d81d4 913 struct uprobe *uprobe;
2b144498 914
e3343e6a 915 if (!inode || !uc)
2b144498
SD
916 return;
917
918 uprobe = find_uprobe(inode, offset);
919 if (!uprobe)
920 return;
921
922 mutex_lock(uprobes_hash(inode));
2b144498 923
e3343e6a 924 if (consumer_del(uprobe, uc)) {
7b2d81d4
IM
925 if (!uprobe->consumers) {
926 __uprobe_unregister(uprobe);
900771a4 927 uprobe->flags &= ~UPROBE_RUN_HANDLER;
7b2d81d4 928 }
2b144498
SD
929 }
930
2b144498
SD
931 mutex_unlock(uprobes_hash(inode));
932 if (uprobe)
933 put_uprobe(uprobe);
934}
935
936/*
937 * Of all the nodes that correspond to the given inode, return the node
938 * with the least offset.
939 */
940static struct rb_node *find_least_offset_node(struct inode *inode)
941{
942 struct uprobe u = { .inode = inode, .offset = 0};
943 struct rb_node *n = uprobes_tree.rb_node;
944 struct rb_node *close_node = NULL;
945 struct uprobe *uprobe;
946 int match;
947
948 while (n) {
949 uprobe = rb_entry(n, struct uprobe, rb_node);
950 match = match_uprobe(&u, uprobe);
7b2d81d4 951
2b144498
SD
952 if (uprobe->inode == inode)
953 close_node = n;
954
955 if (!match)
956 return close_node;
957
958 if (match < 0)
959 n = n->rb_left;
960 else
961 n = n->rb_right;
962 }
7b2d81d4 963
2b144498
SD
964 return close_node;
965}
966
967/*
968 * For a given inode, build a list of probes that need to be inserted.
969 */
970static void build_probe_list(struct inode *inode, struct list_head *head)
971{
972 struct uprobe *uprobe;
2b144498 973 unsigned long flags;
7b2d81d4 974 struct rb_node *n;
2b144498
SD
975
976 spin_lock_irqsave(&uprobes_treelock, flags);
7b2d81d4 977
2b144498 978 n = find_least_offset_node(inode);
7b2d81d4 979
2b144498
SD
980 for (; n; n = rb_next(n)) {
981 uprobe = rb_entry(n, struct uprobe, rb_node);
982 if (uprobe->inode != inode)
983 break;
984
985 list_add(&uprobe->pending_list, head);
986 atomic_inc(&uprobe->ref);
987 }
7b2d81d4 988
2b144498
SD
989 spin_unlock_irqrestore(&uprobes_treelock, flags);
990}
991
992/*
993 * Called from mmap_region.
994 * called with mm->mmap_sem acquired.
995 *
996 * Return -ve no if we fail to insert probes and we cannot
997 * bail-out.
7b2d81d4
IM
998 * Return 0 otherwise. i.e:
999 *
2b144498
SD
1000 * - successful insertion of probes
1001 * - (or) no possible probes to be inserted.
1002 * - (or) insertion of probes failed but we can bail-out.
1003 */
7b2d81d4 1004int uprobe_mmap(struct vm_area_struct *vma)
2b144498
SD
1005{
1006 struct list_head tmp_list;
1007 struct uprobe *uprobe, *u;
1008 struct inode *inode;
7b2d81d4 1009 int ret;
2b144498
SD
1010
1011 if (!atomic_read(&uprobe_events) || !valid_vma(vma, true))
7b2d81d4 1012 return 0;
2b144498
SD
1013
1014 inode = vma->vm_file->f_mapping->host;
1015 if (!inode)
7b2d81d4 1016 return 0;
2b144498
SD
1017
1018 INIT_LIST_HEAD(&tmp_list);
1019 mutex_lock(uprobes_mmap_hash(inode));
1020 build_probe_list(inode, &tmp_list);
7b2d81d4
IM
1021
1022 ret = 0;
1023
2b144498
SD
1024 list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
1025 loff_t vaddr;
1026
1027 list_del(&uprobe->pending_list);
1028 if (!ret) {
1029 vaddr = vma_address(vma, uprobe->offset);
7b2d81d4 1030 if (vaddr >= vma->vm_start && vaddr < vma->vm_end) {
e3343e6a 1031 ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
7b2d81d4
IM
1032 /* Ignore double add: */
1033 if (ret == -EEXIST)
1034 ret = 0;
2b144498 1035 }
2b144498
SD
1036 }
1037 put_uprobe(uprobe);
1038 }
1039
1040 mutex_unlock(uprobes_mmap_hash(inode));
1041
1042 return ret;
1043}
1044
0326f5a9
SD
1045/**
1046 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1047 * @regs: Reflects the saved state of the task after it has hit a breakpoint
1048 * instruction.
1049 * Return the address of the breakpoint instruction.
1050 */
1051unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
1052{
1053 return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
1054}
1055
1056/*
1057 * Called with no locks held.
1058 * Called in context of a exiting or a exec-ing thread.
1059 */
1060void uprobe_free_utask(struct task_struct *t)
1061{
1062 struct uprobe_task *utask = t->utask;
1063
1064 if (t->uprobe_srcu_id != -1)
1065 srcu_read_unlock_raw(&uprobes_srcu, t->uprobe_srcu_id);
1066
1067 if (!utask)
1068 return;
1069
1070 if (utask->active_uprobe)
1071 put_uprobe(utask->active_uprobe);
1072
1073 kfree(utask);
1074 t->utask = NULL;
1075}
1076
1077/*
1078 * Called in context of a new clone/fork from copy_process.
1079 */
1080void uprobe_copy_process(struct task_struct *t)
1081{
1082 t->utask = NULL;
1083 t->uprobe_srcu_id = -1;
1084}
1085
1086/*
1087 * Allocate a uprobe_task object for the task.
1088 * Called when the thread hits a breakpoint for the first time.
1089 *
1090 * Returns:
1091 * - pointer to new uprobe_task on success
1092 * - NULL otherwise
1093 */
1094static struct uprobe_task *add_utask(void)
1095{
1096 struct uprobe_task *utask;
1097
1098 utask = kzalloc(sizeof *utask, GFP_KERNEL);
1099 if (unlikely(!utask))
1100 return NULL;
1101
1102 utask->active_uprobe = NULL;
1103 current->utask = utask;
1104 return utask;
1105}
1106
1107/* Prepare to single-step probed instruction out of line. */
1108static int
1109pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long vaddr)
1110{
1111 return -EFAULT;
1112}
1113
1114/*
1115 * If we are singlestepping, then ensure this thread is not connected to
1116 * non-fatal signals until completion of singlestep. When xol insn itself
1117 * triggers the signal, restart the original insn even if the task is
1118 * already SIGKILL'ed (since coredump should report the correct ip). This
1119 * is even more important if the task has a handler for SIGSEGV/etc, The
1120 * _same_ instruction should be repeated again after return from the signal
1121 * handler, and SSTEP can never finish in this case.
1122 */
1123bool uprobe_deny_signal(void)
1124{
1125 struct task_struct *t = current;
1126 struct uprobe_task *utask = t->utask;
1127
1128 if (likely(!utask || !utask->active_uprobe))
1129 return false;
1130
1131 WARN_ON_ONCE(utask->state != UTASK_SSTEP);
1132
1133 if (signal_pending(t)) {
1134 spin_lock_irq(&t->sighand->siglock);
1135 clear_tsk_thread_flag(t, TIF_SIGPENDING);
1136 spin_unlock_irq(&t->sighand->siglock);
1137
1138 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1139 utask->state = UTASK_SSTEP_TRAPPED;
1140 set_tsk_thread_flag(t, TIF_UPROBE);
1141 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1142 }
1143 }
1144
1145 return true;
1146}
1147
1148/*
1149 * Avoid singlestepping the original instruction if the original instruction
1150 * is a NOP or can be emulated.
1151 */
1152static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs)
1153{
1154 if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
1155 return true;
1156
1157 uprobe->flags &= ~UPROBE_SKIP_SSTEP;
1158 return false;
1159}
1160
1161/*
1162 * Run handler and ask thread to singlestep.
1163 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
1164 */
1165static void handle_swbp(struct pt_regs *regs)
1166{
1167 struct vm_area_struct *vma;
1168 struct uprobe_task *utask;
1169 struct uprobe *uprobe;
1170 struct mm_struct *mm;
1171 unsigned long bp_vaddr;
1172
1173 uprobe = NULL;
1174 bp_vaddr = uprobe_get_swbp_addr(regs);
1175 mm = current->mm;
1176 down_read(&mm->mmap_sem);
1177 vma = find_vma(mm, bp_vaddr);
1178
1179 if (vma && vma->vm_start <= bp_vaddr && valid_vma(vma, false)) {
1180 struct inode *inode;
1181 loff_t offset;
1182
1183 inode = vma->vm_file->f_mapping->host;
1184 offset = bp_vaddr - vma->vm_start;
1185 offset += (vma->vm_pgoff << PAGE_SHIFT);
1186 uprobe = find_uprobe(inode, offset);
1187 }
1188
1189 srcu_read_unlock_raw(&uprobes_srcu, current->uprobe_srcu_id);
1190 current->uprobe_srcu_id = -1;
1191 up_read(&mm->mmap_sem);
1192
1193 if (!uprobe) {
1194 /* No matching uprobe; signal SIGTRAP. */
1195 send_sig(SIGTRAP, current, 0);
1196 return;
1197 }
1198
1199 utask = current->utask;
1200 if (!utask) {
1201 utask = add_utask();
1202 /* Cannot allocate; re-execute the instruction. */
1203 if (!utask)
1204 goto cleanup_ret;
1205 }
1206 utask->active_uprobe = uprobe;
1207 handler_chain(uprobe, regs);
1208 if (uprobe->flags & UPROBE_SKIP_SSTEP && can_skip_sstep(uprobe, regs))
1209 goto cleanup_ret;
1210
1211 utask->state = UTASK_SSTEP;
1212 if (!pre_ssout(uprobe, regs, bp_vaddr)) {
1213 user_enable_single_step(current);
1214 return;
1215 }
1216
1217cleanup_ret:
1218 if (utask) {
1219 utask->active_uprobe = NULL;
1220 utask->state = UTASK_RUNNING;
1221 }
1222 if (uprobe) {
1223 if (!(uprobe->flags & UPROBE_SKIP_SSTEP))
1224
1225 /*
1226 * cannot singlestep; cannot skip instruction;
1227 * re-execute the instruction.
1228 */
1229 instruction_pointer_set(regs, bp_vaddr);
1230
1231 put_uprobe(uprobe);
1232 }
1233}
1234
1235/*
1236 * Perform required fix-ups and disable singlestep.
1237 * Allow pending signals to take effect.
1238 */
1239static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
1240{
1241 struct uprobe *uprobe;
1242
1243 uprobe = utask->active_uprobe;
1244 if (utask->state == UTASK_SSTEP_ACK)
1245 arch_uprobe_post_xol(&uprobe->arch, regs);
1246 else if (utask->state == UTASK_SSTEP_TRAPPED)
1247 arch_uprobe_abort_xol(&uprobe->arch, regs);
1248 else
1249 WARN_ON_ONCE(1);
1250
1251 put_uprobe(uprobe);
1252 utask->active_uprobe = NULL;
1253 utask->state = UTASK_RUNNING;
1254 user_disable_single_step(current);
1255
1256 spin_lock_irq(&current->sighand->siglock);
1257 recalc_sigpending(); /* see uprobe_deny_signal() */
1258 spin_unlock_irq(&current->sighand->siglock);
1259}
1260
1261/*
1262 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag. (and on
1263 * subsequent probe hits on the thread sets the state to UTASK_BP_HIT) and
1264 * allows the thread to return from interrupt.
1265 *
1266 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag and
1267 * also sets the state to UTASK_SSTEP_ACK and allows the thread to return from
1268 * interrupt.
1269 *
1270 * While returning to userspace, thread notices the TIF_UPROBE flag and calls
1271 * uprobe_notify_resume().
1272 */
1273void uprobe_notify_resume(struct pt_regs *regs)
1274{
1275 struct uprobe_task *utask;
1276
1277 utask = current->utask;
1278 if (!utask || utask->state == UTASK_BP_HIT)
1279 handle_swbp(regs);
1280 else
1281 handle_singlestep(utask, regs);
1282}
1283
1284/*
1285 * uprobe_pre_sstep_notifier gets called from interrupt context as part of
1286 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
1287 */
1288int uprobe_pre_sstep_notifier(struct pt_regs *regs)
1289{
1290 struct uprobe_task *utask;
1291
1292 if (!current->mm)
1293 return 0;
1294
1295 utask = current->utask;
1296 if (utask)
1297 utask->state = UTASK_BP_HIT;
1298
1299 set_thread_flag(TIF_UPROBE);
1300 current->uprobe_srcu_id = srcu_read_lock_raw(&uprobes_srcu);
1301
1302 return 1;
1303}
1304
1305/*
1306 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
1307 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
1308 */
1309int uprobe_post_sstep_notifier(struct pt_regs *regs)
1310{
1311 struct uprobe_task *utask = current->utask;
1312
1313 if (!current->mm || !utask || !utask->active_uprobe)
1314 /* task is currently not uprobed */
1315 return 0;
1316
1317 utask->state = UTASK_SSTEP_ACK;
1318 set_thread_flag(TIF_UPROBE);
1319 return 1;
1320}
1321
1322static struct notifier_block uprobe_exception_nb = {
1323 .notifier_call = arch_uprobe_exception_notify,
1324 .priority = INT_MAX-1, /* notified after kprobes, kgdb */
1325};
1326
2b144498
SD
1327static int __init init_uprobes(void)
1328{
1329 int i;
1330
1331 for (i = 0; i < UPROBES_HASH_SZ; i++) {
1332 mutex_init(&uprobes_mutex[i]);
1333 mutex_init(&uprobes_mmap_mutex[i]);
1334 }
0326f5a9
SD
1335 init_srcu_struct(&uprobes_srcu);
1336
1337 return register_die_notifier(&uprobe_exception_nb);
2b144498 1338}
0326f5a9 1339module_init(init_uprobes);
2b144498
SD
1340
1341static void __exit exit_uprobes(void)
1342{
1343}
2b144498 1344module_exit(exit_uprobes);
This page took 0.093978 seconds and 5 git commands to generate.