2 * User-space Probes (UProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2008-2012
22 * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
25 #include <linux/kernel.h>
26 #include <linux/highmem.h>
27 #include <linux/pagemap.h> /* read_mapping_page */
28 #include <linux/slab.h>
29 #include <linux/sched.h>
30 #include <linux/rmap.h> /* anon_vma_prepare */
31 #include <linux/mmu_notifier.h> /* set_pte_at_notify */
32 #include <linux/swap.h> /* try_to_free_swap */
33 #include <linux/ptrace.h> /* user_enable_single_step */
34 #include <linux/kdebug.h> /* notifier mechanism */
36 #include <linux/uprobes.h>
38 #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
39 #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
41 static struct srcu_struct uprobes_srcu
;
42 static struct rb_root uprobes_tree
= RB_ROOT
;
44 static DEFINE_SPINLOCK(uprobes_treelock
); /* serialize rbtree access */
46 #define UPROBES_HASH_SZ 13
48 /* serialize (un)register */
49 static struct mutex uprobes_mutex
[UPROBES_HASH_SZ
];
51 #define uprobes_hash(v) (&uprobes_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
53 /* serialize uprobe->pending_list */
54 static struct mutex uprobes_mmap_mutex
[UPROBES_HASH_SZ
];
55 #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
58 * uprobe_events allows us to skip the uprobe_mmap if there are no uprobe
59 * events active at this time. Probably a fine grained per inode count is
62 static atomic_t uprobe_events
= ATOMIC_INIT(0);
65 * Maintain a temporary per vma info that can be used to search if a vma
66 * has already been handled. This structure is introduced since extending
67 * vm_area_struct wasnt recommended.
70 struct list_head probe_list
;
76 struct rb_node rb_node
; /* node in the rb tree */
78 struct rw_semaphore consumer_rwsem
;
79 struct list_head pending_list
;
80 struct uprobe_consumer
*consumers
;
81 struct inode
*inode
; /* Also hold a ref to inode */
84 struct arch_uprobe arch
;
88 * valid_vma: Verify if the specified vma is an executable vma
89 * Relax restrictions while unregistering: vm_flags might have
90 * changed after breakpoint was inserted.
91 * - is_register: indicates if we are in register context.
92 * - Return 1 if the specified virtual address is in an
95 static bool valid_vma(struct vm_area_struct
*vma
, bool is_register
)
103 if ((vma
->vm_flags
& (VM_READ
|VM_WRITE
|VM_EXEC
|VM_SHARED
)) == (VM_READ
|VM_EXEC
))
109 static loff_t
vma_address(struct vm_area_struct
*vma
, loff_t offset
)
113 vaddr
= vma
->vm_start
+ offset
;
114 vaddr
-= vma
->vm_pgoff
<< PAGE_SHIFT
;
120 * __replace_page - replace page in vma by new page.
121 * based on replace_page in mm/ksm.c
123 * @vma: vma that holds the pte pointing to page
124 * @page: the cowed page we are replacing by kpage
125 * @kpage: the modified page we replace page by
127 * Returns 0 on success, -EFAULT on failure.
129 static int __replace_page(struct vm_area_struct
*vma
, struct page
*page
, struct page
*kpage
)
131 struct mm_struct
*mm
= vma
->vm_mm
;
140 addr
= page_address_in_vma(page
, vma
);
144 pgd
= pgd_offset(mm
, addr
);
145 if (!pgd_present(*pgd
))
148 pud
= pud_offset(pgd
, addr
);
149 if (!pud_present(*pud
))
152 pmd
= pmd_offset(pud
, addr
);
153 if (!pmd_present(*pmd
))
156 ptep
= pte_offset_map_lock(mm
, pmd
, addr
, &ptl
);
161 page_add_new_anon_rmap(kpage
, vma
, addr
);
163 if (!PageAnon(page
)) {
164 dec_mm_counter(mm
, MM_FILEPAGES
);
165 inc_mm_counter(mm
, MM_ANONPAGES
);
168 flush_cache_page(vma
, addr
, pte_pfn(*ptep
));
169 ptep_clear_flush(vma
, addr
, ptep
);
170 set_pte_at_notify(mm
, addr
, ptep
, mk_pte(kpage
, vma
->vm_page_prot
));
172 page_remove_rmap(page
);
173 if (!page_mapped(page
))
174 try_to_free_swap(page
);
176 pte_unmap_unlock(ptep
, ptl
);
184 * is_swbp_insn - check if instruction is breakpoint instruction.
185 * @insn: instruction to be checked.
186 * Default implementation of is_swbp_insn
187 * Returns true if @insn is a breakpoint instruction.
189 bool __weak
is_swbp_insn(uprobe_opcode_t
*insn
)
191 return *insn
== UPROBE_SWBP_INSN
;
196 * Expect the breakpoint instruction to be the smallest size instruction for
197 * the architecture. If an arch has variable length instruction and the
198 * breakpoint instruction is not of the smallest length instruction
199 * supported by that architecture then we need to modify read_opcode /
200 * write_opcode accordingly. This would never be a problem for archs that
201 * have fixed length instructions.
205 * write_opcode - write the opcode at a given virtual address.
206 * @auprobe: arch breakpointing information.
207 * @mm: the probed process address space.
208 * @vaddr: the virtual address to store the opcode.
209 * @opcode: opcode to be written at @vaddr.
211 * Called with mm->mmap_sem held (for read and with a reference to
214 * For mm @mm, write the opcode at @vaddr.
215 * Return 0 (success) or a negative errno.
217 static int write_opcode(struct arch_uprobe
*auprobe
, struct mm_struct
*mm
,
218 unsigned long vaddr
, uprobe_opcode_t opcode
)
220 struct page
*old_page
, *new_page
;
221 struct address_space
*mapping
;
222 void *vaddr_old
, *vaddr_new
;
223 struct vm_area_struct
*vma
;
224 struct uprobe
*uprobe
;
228 /* Read the page with vaddr into memory */
229 ret
= get_user_pages(NULL
, mm
, vaddr
, 1, 0, 0, &old_page
, &vma
);
236 * We are interested in text pages only. Our pages of interest
237 * should be mapped for read and execute only. We desist from
238 * adding probes in write mapped pages since the breakpoints
239 * might end up in the file copy.
241 if (!valid_vma(vma
, is_swbp_insn(&opcode
)))
244 uprobe
= container_of(auprobe
, struct uprobe
, arch
);
245 mapping
= uprobe
->inode
->i_mapping
;
246 if (mapping
!= vma
->vm_file
->f_mapping
)
249 addr
= vma_address(vma
, uprobe
->offset
);
250 if (vaddr
!= (unsigned long)addr
)
254 new_page
= alloc_page_vma(GFP_HIGHUSER_MOVABLE
, vma
, vaddr
);
258 __SetPageUptodate(new_page
);
261 * lock page will serialize against do_wp_page()'s
262 * PageAnon() handling
265 /* copy the page now that we've got it stable */
266 vaddr_old
= kmap_atomic(old_page
);
267 vaddr_new
= kmap_atomic(new_page
);
269 memcpy(vaddr_new
, vaddr_old
, PAGE_SIZE
);
271 /* poke the new insn in, ASSUMES we don't cross page boundary */
273 BUG_ON(vaddr
+ UPROBE_SWBP_INSN_SIZE
> PAGE_SIZE
);
274 memcpy(vaddr_new
+ vaddr
, &opcode
, UPROBE_SWBP_INSN_SIZE
);
276 kunmap_atomic(vaddr_new
);
277 kunmap_atomic(vaddr_old
);
279 ret
= anon_vma_prepare(vma
);
284 ret
= __replace_page(vma
, old_page
, new_page
);
285 unlock_page(new_page
);
288 unlock_page(old_page
);
289 page_cache_release(new_page
);
298 * read_opcode - read the opcode at a given virtual address.
299 * @mm: the probed process address space.
300 * @vaddr: the virtual address to read the opcode.
301 * @opcode: location to store the read opcode.
303 * Called with mm->mmap_sem held (for read and with a reference to
306 * For mm @mm, read the opcode at @vaddr and store it in @opcode.
307 * Return 0 (success) or a negative errno.
309 static int read_opcode(struct mm_struct
*mm
, unsigned long vaddr
, uprobe_opcode_t
*opcode
)
315 ret
= get_user_pages(NULL
, mm
, vaddr
, 1, 0, 0, &page
, NULL
);
320 vaddr_new
= kmap_atomic(page
);
322 memcpy(opcode
, vaddr_new
+ vaddr
, UPROBE_SWBP_INSN_SIZE
);
323 kunmap_atomic(vaddr_new
);
331 static int is_swbp_at_addr(struct mm_struct
*mm
, unsigned long vaddr
)
333 uprobe_opcode_t opcode
;
336 result
= read_opcode(mm
, vaddr
, &opcode
);
340 if (is_swbp_insn(&opcode
))
347 * set_swbp - store breakpoint at a given address.
348 * @auprobe: arch specific probepoint information.
349 * @mm: the probed process address space.
350 * @vaddr: the virtual address to insert the opcode.
352 * For mm @mm, store the breakpoint instruction at @vaddr.
353 * Return 0 (success) or a negative errno.
355 int __weak
set_swbp(struct arch_uprobe
*auprobe
, struct mm_struct
*mm
, unsigned long vaddr
)
359 result
= is_swbp_at_addr(mm
, vaddr
);
366 return write_opcode(auprobe
, mm
, vaddr
, UPROBE_SWBP_INSN
);
370 * set_orig_insn - Restore the original instruction.
371 * @mm: the probed process address space.
372 * @auprobe: arch specific probepoint information.
373 * @vaddr: the virtual address to insert the opcode.
374 * @verify: if true, verify existance of breakpoint instruction.
376 * For mm @mm, restore the original opcode (opcode) at @vaddr.
377 * Return 0 (success) or a negative errno.
380 set_orig_insn(struct arch_uprobe
*auprobe
, struct mm_struct
*mm
, unsigned long vaddr
, bool verify
)
385 result
= is_swbp_at_addr(mm
, vaddr
);
392 return write_opcode(auprobe
, mm
, vaddr
, *(uprobe_opcode_t
*)auprobe
->insn
);
395 static int match_uprobe(struct uprobe
*l
, struct uprobe
*r
)
397 if (l
->inode
< r
->inode
)
400 if (l
->inode
> r
->inode
)
403 if (l
->offset
< r
->offset
)
406 if (l
->offset
> r
->offset
)
412 static struct uprobe
*__find_uprobe(struct inode
*inode
, loff_t offset
)
414 struct uprobe u
= { .inode
= inode
, .offset
= offset
};
415 struct rb_node
*n
= uprobes_tree
.rb_node
;
416 struct uprobe
*uprobe
;
420 uprobe
= rb_entry(n
, struct uprobe
, rb_node
);
421 match
= match_uprobe(&u
, uprobe
);
423 atomic_inc(&uprobe
->ref
);
436 * Find a uprobe corresponding to a given inode:offset
437 * Acquires uprobes_treelock
439 static struct uprobe
*find_uprobe(struct inode
*inode
, loff_t offset
)
441 struct uprobe
*uprobe
;
444 spin_lock_irqsave(&uprobes_treelock
, flags
);
445 uprobe
= __find_uprobe(inode
, offset
);
446 spin_unlock_irqrestore(&uprobes_treelock
, flags
);
451 static struct uprobe
*__insert_uprobe(struct uprobe
*uprobe
)
453 struct rb_node
**p
= &uprobes_tree
.rb_node
;
454 struct rb_node
*parent
= NULL
;
460 u
= rb_entry(parent
, struct uprobe
, rb_node
);
461 match
= match_uprobe(uprobe
, u
);
468 p
= &parent
->rb_left
;
470 p
= &parent
->rb_right
;
475 rb_link_node(&uprobe
->rb_node
, parent
, p
);
476 rb_insert_color(&uprobe
->rb_node
, &uprobes_tree
);
477 /* get access + creation ref */
478 atomic_set(&uprobe
->ref
, 2);
484 * Acquire uprobes_treelock.
485 * Matching uprobe already exists in rbtree;
486 * increment (access refcount) and return the matching uprobe.
488 * No matching uprobe; insert the uprobe in rb_tree;
489 * get a double refcount (access + creation) and return NULL.
491 static struct uprobe
*insert_uprobe(struct uprobe
*uprobe
)
496 spin_lock_irqsave(&uprobes_treelock
, flags
);
497 u
= __insert_uprobe(uprobe
);
498 spin_unlock_irqrestore(&uprobes_treelock
, flags
);
500 /* For now assume that the instruction need not be single-stepped */
501 uprobe
->flags
|= UPROBE_SKIP_SSTEP
;
506 static void put_uprobe(struct uprobe
*uprobe
)
508 if (atomic_dec_and_test(&uprobe
->ref
))
512 static struct uprobe
*alloc_uprobe(struct inode
*inode
, loff_t offset
)
514 struct uprobe
*uprobe
, *cur_uprobe
;
516 uprobe
= kzalloc(sizeof(struct uprobe
), GFP_KERNEL
);
520 uprobe
->inode
= igrab(inode
);
521 uprobe
->offset
= offset
;
522 init_rwsem(&uprobe
->consumer_rwsem
);
523 INIT_LIST_HEAD(&uprobe
->pending_list
);
525 /* add to uprobes_tree, sorted on inode:offset */
526 cur_uprobe
= insert_uprobe(uprobe
);
528 /* a uprobe exists for this inode:offset combination */
534 atomic_inc(&uprobe_events
);
540 static void handler_chain(struct uprobe
*uprobe
, struct pt_regs
*regs
)
542 struct uprobe_consumer
*uc
;
544 if (!(uprobe
->flags
& UPROBE_RUN_HANDLER
))
547 down_read(&uprobe
->consumer_rwsem
);
548 for (uc
= uprobe
->consumers
; uc
; uc
= uc
->next
) {
549 if (!uc
->filter
|| uc
->filter(uc
, current
))
550 uc
->handler(uc
, regs
);
552 up_read(&uprobe
->consumer_rwsem
);
555 /* Returns the previous consumer */
556 static struct uprobe_consumer
*
557 consumer_add(struct uprobe
*uprobe
, struct uprobe_consumer
*uc
)
559 down_write(&uprobe
->consumer_rwsem
);
560 uc
->next
= uprobe
->consumers
;
561 uprobe
->consumers
= uc
;
562 up_write(&uprobe
->consumer_rwsem
);
568 * For uprobe @uprobe, delete the consumer @uc.
569 * Return true if the @uc is deleted successfully
572 static bool consumer_del(struct uprobe
*uprobe
, struct uprobe_consumer
*uc
)
574 struct uprobe_consumer
**con
;
577 down_write(&uprobe
->consumer_rwsem
);
578 for (con
= &uprobe
->consumers
; *con
; con
= &(*con
)->next
) {
585 up_write(&uprobe
->consumer_rwsem
);
591 __copy_insn(struct address_space
*mapping
, struct vm_area_struct
*vma
, char *insn
,
592 unsigned long nbytes
, unsigned long offset
)
594 struct file
*filp
= vma
->vm_file
;
603 idx
= (unsigned long)(offset
>> PAGE_CACHE_SHIFT
);
604 off1
= offset
&= ~PAGE_MASK
;
607 * Ensure that the page that has the original instruction is
608 * populated and in page-cache.
610 page
= read_mapping_page(mapping
, idx
, filp
);
612 return PTR_ERR(page
);
614 vaddr
= kmap_atomic(page
);
615 memcpy(insn
, vaddr
+ off1
, nbytes
);
616 kunmap_atomic(vaddr
);
617 page_cache_release(page
);
623 copy_insn(struct uprobe
*uprobe
, struct vm_area_struct
*vma
, unsigned long addr
)
625 struct address_space
*mapping
;
626 unsigned long nbytes
;
630 nbytes
= PAGE_SIZE
- addr
;
631 mapping
= uprobe
->inode
->i_mapping
;
633 /* Instruction at end of binary; copy only available bytes */
634 if (uprobe
->offset
+ MAX_UINSN_BYTES
> uprobe
->inode
->i_size
)
635 bytes
= uprobe
->inode
->i_size
- uprobe
->offset
;
637 bytes
= MAX_UINSN_BYTES
;
639 /* Instruction at the page-boundary; copy bytes in second page */
640 if (nbytes
< bytes
) {
641 if (__copy_insn(mapping
, vma
, uprobe
->arch
.insn
+ nbytes
,
642 bytes
- nbytes
, uprobe
->offset
+ nbytes
))
647 return __copy_insn(mapping
, vma
, uprobe
->arch
.insn
, bytes
, uprobe
->offset
);
651 * How mm->uprobes_state.count gets updated
652 * uprobe_mmap() increments the count if
653 * - it successfully adds a breakpoint.
654 * - it cannot add a breakpoint, but sees that there is a underlying
655 * breakpoint (via a is_swbp_at_addr()).
657 * uprobe_munmap() decrements the count if
658 * - it sees a underlying breakpoint, (via is_swbp_at_addr)
659 * (Subsequent uprobe_unregister wouldnt find the breakpoint
660 * unless a uprobe_mmap kicks in, since the old vma would be
661 * dropped just after uprobe_munmap.)
663 * uprobe_register increments the count if:
664 * - it successfully adds a breakpoint.
666 * uprobe_unregister decrements the count if:
667 * - it sees a underlying breakpoint and removes successfully.
668 * (via is_swbp_at_addr)
669 * (Subsequent uprobe_munmap wouldnt find the breakpoint
670 * since there is no underlying breakpoint after the
671 * breakpoint removal.)
674 install_breakpoint(struct uprobe
*uprobe
, struct mm_struct
*mm
,
675 struct vm_area_struct
*vma
, loff_t vaddr
)
681 * If probe is being deleted, unregister thread could be done with
682 * the vma-rmap-walk through. Adding a probe now can be fatal since
683 * nobody will be able to cleanup. Also we could be from fork or
684 * mremap path, where the probe might have already been inserted.
685 * Hence behave as if probe already existed.
687 if (!uprobe
->consumers
)
690 addr
= (unsigned long)vaddr
;
692 if (!(uprobe
->flags
& UPROBE_COPY_INSN
)) {
693 ret
= copy_insn(uprobe
, vma
, addr
);
697 if (is_swbp_insn((uprobe_opcode_t
*)uprobe
->arch
.insn
))
700 ret
= arch_uprobe_analyze_insn(&uprobe
->arch
, mm
);
704 uprobe
->flags
|= UPROBE_COPY_INSN
;
708 * Ideally, should be updating the probe count after the breakpoint
709 * has been successfully inserted. However a thread could hit the
710 * breakpoint we just inserted even before the probe count is
711 * incremented. If this is the first breakpoint placed, breakpoint
712 * notifier might ignore uprobes and pass the trap to the thread.
713 * Hence increment before and decrement on failure.
715 atomic_inc(&mm
->uprobes_state
.count
);
716 ret
= set_swbp(&uprobe
->arch
, mm
, addr
);
718 atomic_dec(&mm
->uprobes_state
.count
);
724 remove_breakpoint(struct uprobe
*uprobe
, struct mm_struct
*mm
, loff_t vaddr
)
726 if (!set_orig_insn(&uprobe
->arch
, mm
, (unsigned long)vaddr
, true))
727 atomic_dec(&mm
->uprobes_state
.count
);
731 * There could be threads that have hit the breakpoint and are entering the
732 * notifier code and trying to acquire the uprobes_treelock. The thread
733 * calling delete_uprobe() that is removing the uprobe from the rb_tree can
734 * race with these threads and might acquire the uprobes_treelock compared
735 * to some of the breakpoint hit threads. In such a case, the breakpoint
736 * hit threads will not find the uprobe. The current unregistering thread
737 * waits till all other threads have hit a breakpoint, to acquire the
738 * uprobes_treelock before the uprobe is removed from the rbtree.
740 static void delete_uprobe(struct uprobe
*uprobe
)
744 synchronize_srcu(&uprobes_srcu
);
745 spin_lock_irqsave(&uprobes_treelock
, flags
);
746 rb_erase(&uprobe
->rb_node
, &uprobes_tree
);
747 spin_unlock_irqrestore(&uprobes_treelock
, flags
);
750 atomic_dec(&uprobe_events
);
753 static struct vma_info
*
754 __find_next_vma_info(struct address_space
*mapping
, struct list_head
*head
,
755 struct vma_info
*vi
, loff_t offset
, bool is_register
)
757 struct prio_tree_iter iter
;
758 struct vm_area_struct
*vma
;
759 struct vma_info
*tmpvi
;
764 pgoff
= offset
>> PAGE_SHIFT
;
766 vma_prio_tree_foreach(vma
, &iter
, &mapping
->i_mmap
, pgoff
, pgoff
) {
767 if (!valid_vma(vma
, is_register
))
771 vaddr
= vma_address(vma
, offset
);
773 list_for_each_entry(tmpvi
, head
, probe_list
) {
774 if (tmpvi
->mm
== vma
->vm_mm
&& tmpvi
->vaddr
== vaddr
) {
781 * Another vma needs a probe to be installed. However skip
782 * installing the probe if the vma is about to be unlinked.
784 if (!existing_vma
&& atomic_inc_not_zero(&vma
->vm_mm
->mm_users
)) {
787 list_add(&vi
->probe_list
, head
);
797 * Iterate in the rmap prio tree and find a vma where a probe has not
800 static struct vma_info
*
801 find_next_vma_info(struct address_space
*mapping
, struct list_head
*head
,
802 loff_t offset
, bool is_register
)
804 struct vma_info
*vi
, *retvi
;
806 vi
= kzalloc(sizeof(struct vma_info
), GFP_KERNEL
);
808 return ERR_PTR(-ENOMEM
);
810 mutex_lock(&mapping
->i_mmap_mutex
);
811 retvi
= __find_next_vma_info(mapping
, head
, vi
, offset
, is_register
);
812 mutex_unlock(&mapping
->i_mmap_mutex
);
820 static int register_for_each_vma(struct uprobe
*uprobe
, bool is_register
)
822 struct list_head try_list
;
823 struct vm_area_struct
*vma
;
824 struct address_space
*mapping
;
825 struct vma_info
*vi
, *tmpvi
;
826 struct mm_struct
*mm
;
830 mapping
= uprobe
->inode
->i_mapping
;
831 INIT_LIST_HEAD(&try_list
);
836 vi
= find_next_vma_info(mapping
, &try_list
, uprobe
->offset
, is_register
);
846 down_read(&mm
->mmap_sem
);
847 vma
= find_vma(mm
, (unsigned long)vi
->vaddr
);
848 if (!vma
|| !valid_vma(vma
, is_register
)) {
849 list_del(&vi
->probe_list
);
851 up_read(&mm
->mmap_sem
);
855 vaddr
= vma_address(vma
, uprobe
->offset
);
856 if (vma
->vm_file
->f_mapping
->host
!= uprobe
->inode
||
857 vaddr
!= vi
->vaddr
) {
858 list_del(&vi
->probe_list
);
860 up_read(&mm
->mmap_sem
);
866 ret
= install_breakpoint(uprobe
, mm
, vma
, vi
->vaddr
);
868 remove_breakpoint(uprobe
, mm
, vi
->vaddr
);
870 up_read(&mm
->mmap_sem
);
873 if (ret
&& ret
== -EEXIST
)
880 list_for_each_entry_safe(vi
, tmpvi
, &try_list
, probe_list
) {
881 list_del(&vi
->probe_list
);
888 static int __uprobe_register(struct uprobe
*uprobe
)
890 return register_for_each_vma(uprobe
, true);
893 static void __uprobe_unregister(struct uprobe
*uprobe
)
895 if (!register_for_each_vma(uprobe
, false))
896 delete_uprobe(uprobe
);
898 /* TODO : cant unregister? schedule a worker thread */
902 * uprobe_register - register a probe
903 * @inode: the file in which the probe has to be placed.
904 * @offset: offset from the start of the file.
905 * @uc: information on howto handle the probe..
907 * Apart from the access refcount, uprobe_register() takes a creation
908 * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
909 * inserted into the rbtree (i.e first consumer for a @inode:@offset
910 * tuple). Creation refcount stops uprobe_unregister from freeing the
911 * @uprobe even before the register operation is complete. Creation
912 * refcount is released when the last @uc for the @uprobe
915 * Return errno if it cannot successully install probes
916 * else return 0 (success)
918 int uprobe_register(struct inode
*inode
, loff_t offset
, struct uprobe_consumer
*uc
)
920 struct uprobe
*uprobe
;
923 if (!inode
|| !uc
|| uc
->next
)
926 if (offset
> i_size_read(inode
))
930 mutex_lock(uprobes_hash(inode
));
931 uprobe
= alloc_uprobe(inode
, offset
);
933 if (uprobe
&& !consumer_add(uprobe
, uc
)) {
934 ret
= __uprobe_register(uprobe
);
936 uprobe
->consumers
= NULL
;
937 __uprobe_unregister(uprobe
);
939 uprobe
->flags
|= UPROBE_RUN_HANDLER
;
943 mutex_unlock(uprobes_hash(inode
));
950 * uprobe_unregister - unregister a already registered probe.
951 * @inode: the file in which the probe has to be removed.
952 * @offset: offset from the start of the file.
953 * @uc: identify which probe if multiple probes are colocated.
955 void uprobe_unregister(struct inode
*inode
, loff_t offset
, struct uprobe_consumer
*uc
)
957 struct uprobe
*uprobe
;
962 uprobe
= find_uprobe(inode
, offset
);
966 mutex_lock(uprobes_hash(inode
));
968 if (consumer_del(uprobe
, uc
)) {
969 if (!uprobe
->consumers
) {
970 __uprobe_unregister(uprobe
);
971 uprobe
->flags
&= ~UPROBE_RUN_HANDLER
;
975 mutex_unlock(uprobes_hash(inode
));
981 * Of all the nodes that correspond to the given inode, return the node
982 * with the least offset.
984 static struct rb_node
*find_least_offset_node(struct inode
*inode
)
986 struct uprobe u
= { .inode
= inode
, .offset
= 0};
987 struct rb_node
*n
= uprobes_tree
.rb_node
;
988 struct rb_node
*close_node
= NULL
;
989 struct uprobe
*uprobe
;
993 uprobe
= rb_entry(n
, struct uprobe
, rb_node
);
994 match
= match_uprobe(&u
, uprobe
);
996 if (uprobe
->inode
== inode
)
1012 * For a given inode, build a list of probes that need to be inserted.
1014 static void build_probe_list(struct inode
*inode
, struct list_head
*head
)
1016 struct uprobe
*uprobe
;
1017 unsigned long flags
;
1020 spin_lock_irqsave(&uprobes_treelock
, flags
);
1022 n
= find_least_offset_node(inode
);
1024 for (; n
; n
= rb_next(n
)) {
1025 uprobe
= rb_entry(n
, struct uprobe
, rb_node
);
1026 if (uprobe
->inode
!= inode
)
1029 list_add(&uprobe
->pending_list
, head
);
1030 atomic_inc(&uprobe
->ref
);
1033 spin_unlock_irqrestore(&uprobes_treelock
, flags
);
1037 * Called from mmap_region.
1038 * called with mm->mmap_sem acquired.
1040 * Return -ve no if we fail to insert probes and we cannot
1042 * Return 0 otherwise. i.e:
1044 * - successful insertion of probes
1045 * - (or) no possible probes to be inserted.
1046 * - (or) insertion of probes failed but we can bail-out.
1048 int uprobe_mmap(struct vm_area_struct
*vma
)
1050 struct list_head tmp_list
;
1051 struct uprobe
*uprobe
, *u
;
1052 struct inode
*inode
;
1055 if (!atomic_read(&uprobe_events
) || !valid_vma(vma
, true))
1058 inode
= vma
->vm_file
->f_mapping
->host
;
1062 INIT_LIST_HEAD(&tmp_list
);
1063 mutex_lock(uprobes_mmap_hash(inode
));
1064 build_probe_list(inode
, &tmp_list
);
1069 list_for_each_entry_safe(uprobe
, u
, &tmp_list
, pending_list
) {
1072 list_del(&uprobe
->pending_list
);
1074 vaddr
= vma_address(vma
, uprobe
->offset
);
1076 if (vaddr
< vma
->vm_start
|| vaddr
>= vma
->vm_end
) {
1081 ret
= install_breakpoint(uprobe
, vma
->vm_mm
, vma
, vaddr
);
1083 /* Ignore double add: */
1084 if (ret
== -EEXIST
) {
1087 if (!is_swbp_at_addr(vma
->vm_mm
, vaddr
))
1091 * Unable to insert a breakpoint, but
1092 * breakpoint lies underneath. Increment the
1095 atomic_inc(&vma
->vm_mm
->uprobes_state
.count
);
1104 mutex_unlock(uprobes_mmap_hash(inode
));
1107 atomic_sub(count
, &vma
->vm_mm
->uprobes_state
.count
);
1113 * Called in context of a munmap of a vma.
1115 void uprobe_munmap(struct vm_area_struct
*vma
, unsigned long start
, unsigned long end
)
1117 struct list_head tmp_list
;
1118 struct uprobe
*uprobe
, *u
;
1119 struct inode
*inode
;
1121 if (!atomic_read(&uprobe_events
) || !valid_vma(vma
, false))
1124 if (!atomic_read(&vma
->vm_mm
->uprobes_state
.count
))
1127 inode
= vma
->vm_file
->f_mapping
->host
;
1131 INIT_LIST_HEAD(&tmp_list
);
1132 mutex_lock(uprobes_mmap_hash(inode
));
1133 build_probe_list(inode
, &tmp_list
);
1135 list_for_each_entry_safe(uprobe
, u
, &tmp_list
, pending_list
) {
1138 list_del(&uprobe
->pending_list
);
1139 vaddr
= vma_address(vma
, uprobe
->offset
);
1141 if (vaddr
>= start
&& vaddr
< end
) {
1143 * An unregister could have removed the probe before
1144 * unmap. So check before we decrement the count.
1146 if (is_swbp_at_addr(vma
->vm_mm
, vaddr
) == 1)
1147 atomic_dec(&vma
->vm_mm
->uprobes_state
.count
);
1151 mutex_unlock(uprobes_mmap_hash(inode
));
1154 /* Slot allocation for XOL */
1155 static int xol_add_vma(struct xol_area
*area
)
1157 struct mm_struct
*mm
;
1160 area
->page
= alloc_page(GFP_HIGHUSER
);
1167 down_write(&mm
->mmap_sem
);
1168 if (mm
->uprobes_state
.xol_area
)
1173 /* Try to map as high as possible, this is only a hint. */
1174 area
->vaddr
= get_unmapped_area(NULL
, TASK_SIZE
- PAGE_SIZE
, PAGE_SIZE
, 0, 0);
1175 if (area
->vaddr
& ~PAGE_MASK
) {
1180 ret
= install_special_mapping(mm
, area
->vaddr
, PAGE_SIZE
,
1181 VM_EXEC
|VM_MAYEXEC
|VM_DONTCOPY
|VM_IO
, &area
->page
);
1185 smp_wmb(); /* pairs with get_xol_area() */
1186 mm
->uprobes_state
.xol_area
= area
;
1190 up_write(&mm
->mmap_sem
);
1192 __free_page(area
->page
);
1197 static struct xol_area
*get_xol_area(struct mm_struct
*mm
)
1199 struct xol_area
*area
;
1201 area
= mm
->uprobes_state
.xol_area
;
1202 smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */
1208 * xol_alloc_area - Allocate process's xol_area.
1209 * This area will be used for storing instructions for execution out of
1212 * Returns the allocated area or NULL.
1214 static struct xol_area
*xol_alloc_area(void)
1216 struct xol_area
*area
;
1218 area
= kzalloc(sizeof(*area
), GFP_KERNEL
);
1219 if (unlikely(!area
))
1222 area
->bitmap
= kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE
) * sizeof(long), GFP_KERNEL
);
1227 init_waitqueue_head(&area
->wq
);
1228 if (!xol_add_vma(area
))
1232 kfree(area
->bitmap
);
1235 return get_xol_area(current
->mm
);
1239 * uprobe_clear_state - Free the area allocated for slots.
1241 void uprobe_clear_state(struct mm_struct
*mm
)
1243 struct xol_area
*area
= mm
->uprobes_state
.xol_area
;
1248 put_page(area
->page
);
1249 kfree(area
->bitmap
);
1254 * uprobe_reset_state - Free the area allocated for slots.
1256 void uprobe_reset_state(struct mm_struct
*mm
)
1258 mm
->uprobes_state
.xol_area
= NULL
;
1259 atomic_set(&mm
->uprobes_state
.count
, 0);
1263 * - search for a free slot.
1265 static unsigned long xol_take_insn_slot(struct xol_area
*area
)
1267 unsigned long slot_addr
;
1271 slot_nr
= find_first_zero_bit(area
->bitmap
, UINSNS_PER_PAGE
);
1272 if (slot_nr
< UINSNS_PER_PAGE
) {
1273 if (!test_and_set_bit(slot_nr
, area
->bitmap
))
1276 slot_nr
= UINSNS_PER_PAGE
;
1279 wait_event(area
->wq
, (atomic_read(&area
->slot_count
) < UINSNS_PER_PAGE
));
1280 } while (slot_nr
>= UINSNS_PER_PAGE
);
1282 slot_addr
= area
->vaddr
+ (slot_nr
* UPROBE_XOL_SLOT_BYTES
);
1283 atomic_inc(&area
->slot_count
);
1289 * xol_get_insn_slot - If was not allocated a slot, then
1291 * Returns the allocated slot address or 0.
1293 static unsigned long xol_get_insn_slot(struct uprobe
*uprobe
, unsigned long slot_addr
)
1295 struct xol_area
*area
;
1296 unsigned long offset
;
1299 area
= get_xol_area(current
->mm
);
1301 area
= xol_alloc_area();
1305 current
->utask
->xol_vaddr
= xol_take_insn_slot(area
);
1308 * Initialize the slot if xol_vaddr points to valid
1311 if (unlikely(!current
->utask
->xol_vaddr
))
1314 current
->utask
->vaddr
= slot_addr
;
1315 offset
= current
->utask
->xol_vaddr
& ~PAGE_MASK
;
1316 vaddr
= kmap_atomic(area
->page
);
1317 memcpy(vaddr
+ offset
, uprobe
->arch
.insn
, MAX_UINSN_BYTES
);
1318 kunmap_atomic(vaddr
);
1320 return current
->utask
->xol_vaddr
;
1324 * xol_free_insn_slot - If slot was earlier allocated by
1325 * @xol_get_insn_slot(), make the slot available for
1326 * subsequent requests.
1328 static void xol_free_insn_slot(struct task_struct
*tsk
)
1330 struct xol_area
*area
;
1331 unsigned long vma_end
;
1332 unsigned long slot_addr
;
1334 if (!tsk
->mm
|| !tsk
->mm
->uprobes_state
.xol_area
|| !tsk
->utask
)
1337 slot_addr
= tsk
->utask
->xol_vaddr
;
1339 if (unlikely(!slot_addr
|| IS_ERR_VALUE(slot_addr
)))
1342 area
= tsk
->mm
->uprobes_state
.xol_area
;
1343 vma_end
= area
->vaddr
+ PAGE_SIZE
;
1344 if (area
->vaddr
<= slot_addr
&& slot_addr
< vma_end
) {
1345 unsigned long offset
;
1348 offset
= slot_addr
- area
->vaddr
;
1349 slot_nr
= offset
/ UPROBE_XOL_SLOT_BYTES
;
1350 if (slot_nr
>= UINSNS_PER_PAGE
)
1353 clear_bit(slot_nr
, area
->bitmap
);
1354 atomic_dec(&area
->slot_count
);
1355 if (waitqueue_active(&area
->wq
))
1358 tsk
->utask
->xol_vaddr
= 0;
1363 * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
1364 * @regs: Reflects the saved state of the task after it has hit a breakpoint
1366 * Return the address of the breakpoint instruction.
1368 unsigned long __weak
uprobe_get_swbp_addr(struct pt_regs
*regs
)
1370 return instruction_pointer(regs
) - UPROBE_SWBP_INSN_SIZE
;
1374 * Called with no locks held.
1375 * Called in context of a exiting or a exec-ing thread.
1377 void uprobe_free_utask(struct task_struct
*t
)
1379 struct uprobe_task
*utask
= t
->utask
;
1381 if (t
->uprobe_srcu_id
!= -1)
1382 srcu_read_unlock_raw(&uprobes_srcu
, t
->uprobe_srcu_id
);
1387 if (utask
->active_uprobe
)
1388 put_uprobe(utask
->active_uprobe
);
1390 xol_free_insn_slot(t
);
1396 * Called in context of a new clone/fork from copy_process.
1398 void uprobe_copy_process(struct task_struct
*t
)
1401 t
->uprobe_srcu_id
= -1;
1405 * Allocate a uprobe_task object for the task.
1406 * Called when the thread hits a breakpoint for the first time.
1409 * - pointer to new uprobe_task on success
1412 static struct uprobe_task
*add_utask(void)
1414 struct uprobe_task
*utask
;
1416 utask
= kzalloc(sizeof *utask
, GFP_KERNEL
);
1417 if (unlikely(!utask
))
1420 utask
->active_uprobe
= NULL
;
1421 current
->utask
= utask
;
1425 /* Prepare to single-step probed instruction out of line. */
1427 pre_ssout(struct uprobe
*uprobe
, struct pt_regs
*regs
, unsigned long vaddr
)
1429 if (xol_get_insn_slot(uprobe
, vaddr
) && !arch_uprobe_pre_xol(&uprobe
->arch
, regs
))
1436 * If we are singlestepping, then ensure this thread is not connected to
1437 * non-fatal signals until completion of singlestep. When xol insn itself
1438 * triggers the signal, restart the original insn even if the task is
1439 * already SIGKILL'ed (since coredump should report the correct ip). This
1440 * is even more important if the task has a handler for SIGSEGV/etc, The
1441 * _same_ instruction should be repeated again after return from the signal
1442 * handler, and SSTEP can never finish in this case.
1444 bool uprobe_deny_signal(void)
1446 struct task_struct
*t
= current
;
1447 struct uprobe_task
*utask
= t
->utask
;
1449 if (likely(!utask
|| !utask
->active_uprobe
))
1452 WARN_ON_ONCE(utask
->state
!= UTASK_SSTEP
);
1454 if (signal_pending(t
)) {
1455 spin_lock_irq(&t
->sighand
->siglock
);
1456 clear_tsk_thread_flag(t
, TIF_SIGPENDING
);
1457 spin_unlock_irq(&t
->sighand
->siglock
);
1459 if (__fatal_signal_pending(t
) || arch_uprobe_xol_was_trapped(t
)) {
1460 utask
->state
= UTASK_SSTEP_TRAPPED
;
1461 set_tsk_thread_flag(t
, TIF_UPROBE
);
1462 set_tsk_thread_flag(t
, TIF_NOTIFY_RESUME
);
1470 * Avoid singlestepping the original instruction if the original instruction
1471 * is a NOP or can be emulated.
1473 static bool can_skip_sstep(struct uprobe
*uprobe
, struct pt_regs
*regs
)
1475 if (arch_uprobe_skip_sstep(&uprobe
->arch
, regs
))
1478 uprobe
->flags
&= ~UPROBE_SKIP_SSTEP
;
1483 * Run handler and ask thread to singlestep.
1484 * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
1486 static void handle_swbp(struct pt_regs
*regs
)
1488 struct vm_area_struct
*vma
;
1489 struct uprobe_task
*utask
;
1490 struct uprobe
*uprobe
;
1491 struct mm_struct
*mm
;
1492 unsigned long bp_vaddr
;
1495 bp_vaddr
= uprobe_get_swbp_addr(regs
);
1497 down_read(&mm
->mmap_sem
);
1498 vma
= find_vma(mm
, bp_vaddr
);
1500 if (vma
&& vma
->vm_start
<= bp_vaddr
&& valid_vma(vma
, false)) {
1501 struct inode
*inode
;
1504 inode
= vma
->vm_file
->f_mapping
->host
;
1505 offset
= bp_vaddr
- vma
->vm_start
;
1506 offset
+= (vma
->vm_pgoff
<< PAGE_SHIFT
);
1507 uprobe
= find_uprobe(inode
, offset
);
1510 srcu_read_unlock_raw(&uprobes_srcu
, current
->uprobe_srcu_id
);
1511 current
->uprobe_srcu_id
= -1;
1512 up_read(&mm
->mmap_sem
);
1515 /* No matching uprobe; signal SIGTRAP. */
1516 send_sig(SIGTRAP
, current
, 0);
1520 utask
= current
->utask
;
1522 utask
= add_utask();
1523 /* Cannot allocate; re-execute the instruction. */
1527 utask
->active_uprobe
= uprobe
;
1528 handler_chain(uprobe
, regs
);
1529 if (uprobe
->flags
& UPROBE_SKIP_SSTEP
&& can_skip_sstep(uprobe
, regs
))
1532 utask
->state
= UTASK_SSTEP
;
1533 if (!pre_ssout(uprobe
, regs
, bp_vaddr
)) {
1534 user_enable_single_step(current
);
1540 utask
->active_uprobe
= NULL
;
1541 utask
->state
= UTASK_RUNNING
;
1544 if (!(uprobe
->flags
& UPROBE_SKIP_SSTEP
))
1547 * cannot singlestep; cannot skip instruction;
1548 * re-execute the instruction.
1550 instruction_pointer_set(regs
, bp_vaddr
);
1557 * Perform required fix-ups and disable singlestep.
1558 * Allow pending signals to take effect.
1560 static void handle_singlestep(struct uprobe_task
*utask
, struct pt_regs
*regs
)
1562 struct uprobe
*uprobe
;
1564 uprobe
= utask
->active_uprobe
;
1565 if (utask
->state
== UTASK_SSTEP_ACK
)
1566 arch_uprobe_post_xol(&uprobe
->arch
, regs
);
1567 else if (utask
->state
== UTASK_SSTEP_TRAPPED
)
1568 arch_uprobe_abort_xol(&uprobe
->arch
, regs
);
1573 utask
->active_uprobe
= NULL
;
1574 utask
->state
= UTASK_RUNNING
;
1575 user_disable_single_step(current
);
1576 xol_free_insn_slot(current
);
1578 spin_lock_irq(¤t
->sighand
->siglock
);
1579 recalc_sigpending(); /* see uprobe_deny_signal() */
1580 spin_unlock_irq(¤t
->sighand
->siglock
);
1584 * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag. (and on
1585 * subsequent probe hits on the thread sets the state to UTASK_BP_HIT) and
1586 * allows the thread to return from interrupt.
1588 * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag and
1589 * also sets the state to UTASK_SSTEP_ACK and allows the thread to return from
1592 * While returning to userspace, thread notices the TIF_UPROBE flag and calls
1593 * uprobe_notify_resume().
1595 void uprobe_notify_resume(struct pt_regs
*regs
)
1597 struct uprobe_task
*utask
;
1599 utask
= current
->utask
;
1600 if (!utask
|| utask
->state
== UTASK_BP_HIT
)
1603 handle_singlestep(utask
, regs
);
1607 * uprobe_pre_sstep_notifier gets called from interrupt context as part of
1608 * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
1610 int uprobe_pre_sstep_notifier(struct pt_regs
*regs
)
1612 struct uprobe_task
*utask
;
1614 if (!current
->mm
|| !atomic_read(¤t
->mm
->uprobes_state
.count
))
1615 /* task is currently not uprobed */
1618 utask
= current
->utask
;
1620 utask
->state
= UTASK_BP_HIT
;
1622 set_thread_flag(TIF_UPROBE
);
1623 current
->uprobe_srcu_id
= srcu_read_lock_raw(&uprobes_srcu
);
1629 * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
1630 * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
1632 int uprobe_post_sstep_notifier(struct pt_regs
*regs
)
1634 struct uprobe_task
*utask
= current
->utask
;
1636 if (!current
->mm
|| !utask
|| !utask
->active_uprobe
)
1637 /* task is currently not uprobed */
1640 utask
->state
= UTASK_SSTEP_ACK
;
1641 set_thread_flag(TIF_UPROBE
);
1645 static struct notifier_block uprobe_exception_nb
= {
1646 .notifier_call
= arch_uprobe_exception_notify
,
1647 .priority
= INT_MAX
-1, /* notified after kprobes, kgdb */
1650 static int __init
init_uprobes(void)
1654 for (i
= 0; i
< UPROBES_HASH_SZ
; i
++) {
1655 mutex_init(&uprobes_mutex
[i
]);
1656 mutex_init(&uprobes_mmap_mutex
[i
]);
1658 init_srcu_struct(&uprobes_srcu
);
1660 return register_die_notifier(&uprobe_exception_nb
);
1662 module_init(init_uprobes
);
1664 static void __exit
exit_uprobes(void)
1667 module_exit(exit_uprobes
);