2 * Kernel Probes (KProbes)
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * Copyright (C) IBM Corporation, 2002, 2004
21 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
22 * Probes initial implementation (includes suggestions from
24 * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
25 * hlists and exceptions notifier as suggested by Andi Kleen.
26 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
27 * interface to access function arguments.
28 * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
29 * exceptions notifier to be first on the priority list.
30 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
31 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
32 * <prasanna@in.ibm.com> added function-return probes.
34 #include <linux/kprobes.h>
35 #include <linux/hash.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/stddef.h>
39 #include <linux/export.h>
40 #include <linux/moduleloader.h>
41 #include <linux/kallsyms.h>
42 #include <linux/freezer.h>
43 #include <linux/seq_file.h>
44 #include <linux/debugfs.h>
45 #include <linux/sysctl.h>
46 #include <linux/kdebug.h>
47 #include <linux/memory.h>
48 #include <linux/ftrace.h>
49 #include <linux/cpu.h>
50 #include <linux/jump_label.h>
52 #include <asm-generic/sections.h>
53 #include <asm/cacheflush.h>
54 #include <asm/errno.h>
55 #include <asm/uaccess.h>
56 #ifdef CONFIG_RETPOLINE
57 #include <asm/nospec-branch.h>
60 #define KPROBE_HASH_BITS 6
61 #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
65 * Some oddball architectures like 64bit powerpc have function descriptors
66 * so this must be overridable.
68 #ifndef kprobe_lookup_name
69 #define kprobe_lookup_name(name, addr) \
70 addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
73 static int kprobes_initialized;
74 static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
75 static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
77 /* NOTE: change this value only with kprobe_mutex held */
78 static bool kprobes_all_disarmed;
80 /* This protects kprobe_table and optimizing_list */
81 static DEFINE_MUTEX(kprobe_mutex);
82 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
84 raw_spinlock_t lock ____cacheline_aligned_in_smp;
85 } kretprobe_table_locks[KPROBE_TABLE_SIZE];
87 static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
89 return &(kretprobe_table_locks[hash].lock);
93 * Normally, functions that we'd want to prohibit kprobes in, are marked
94 * __kprobes. But, there are cases where such functions already belong to
95 * a different section (__sched for preempt_schedule)
97 * For such cases, we now have a blacklist
99 static struct kprobe_blackpoint kprobe_blacklist[] = {
100 {"preempt_schedule",},
101 {"native_get_debugreg",},
102 {"irq_entries_start",},
103 {"common_interrupt",},
104 {"mcount",}, /* mcount can be called from everywhere */
105 #ifdef CONFIG_RETPOLINE
106 {"__indirect_thunk_start",
107 /* Linker scripts can't set symbol sizes */
108 .range = (size_t)__indirect_thunk_size},
110 {NULL} /* Terminator */
113 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
115 * kprobe->ainsn.insn points to the copy of the instruction to be
116 * single-stepped. x86_64, POWER4 and above have no-exec support and
117 * stepping on the instruction on a vmalloced/kmalloced/data page
118 * is a recipe for disaster
120 struct kprobe_insn_page {
121 struct list_head list;
122 kprobe_opcode_t *insns; /* Page of instruction slots */
128 #define KPROBE_INSN_PAGE_SIZE(slots) \
129 (offsetof(struct kprobe_insn_page, slot_used) + \
130 (sizeof(char) * (slots)))
132 struct kprobe_insn_cache {
133 struct list_head pages; /* list of kprobe_insn_page */
134 size_t insn_size; /* size of instruction slot */
138 static int slots_per_page(struct kprobe_insn_cache *c)
140 return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
143 enum kprobe_slot_state {
149 static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_slots */
150 static struct kprobe_insn_cache kprobe_insn_slots = {
151 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
152 .insn_size = MAX_INSN_SIZE,
155 static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
158 * __get_insn_slot() - Find a slot on an executable page for an instruction.
159 * We allocate an executable page if there's no room on existing ones.
161 static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
163 struct kprobe_insn_page *kip;
166 list_for_each_entry(kip, &c->pages, list) {
167 if (kip->nused < slots_per_page(c)) {
169 for (i = 0; i < slots_per_page(c); i++) {
170 if (kip->slot_used[i] == SLOT_CLEAN) {
171 kip->slot_used[i] = SLOT_USED;
173 return kip->insns + (i * c->insn_size);
176 /* kip->nused is broken. Fix it. */
177 kip->nused = slots_per_page(c);
182 /* If there are any garbage slots, collect it and try again. */
183 if (c->nr_garbage && collect_garbage_slots(c) == 0)
186 /* All out of space. Need to allocate a new page. */
187 kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
192 * Use module_alloc so this page is within +/- 2GB of where the
193 * kernel image and loaded module images reside. This is required
194 * so x86_64 can correctly handle the %rip-relative fixups.
196 kip->insns = module_alloc(PAGE_SIZE);
201 INIT_LIST_HEAD(&kip->list);
202 memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
203 kip->slot_used[0] = SLOT_USED;
206 list_add(&kip->list, &c->pages);
211 kprobe_opcode_t __kprobes *get_insn_slot(void)
213 kprobe_opcode_t *ret = NULL;
215 mutex_lock(&kprobe_insn_mutex);
216 ret = __get_insn_slot(&kprobe_insn_slots);
217 mutex_unlock(&kprobe_insn_mutex);
222 /* Return 1 if all garbages are collected, otherwise 0. */
223 static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
225 kip->slot_used[idx] = SLOT_CLEAN;
227 if (kip->nused == 0) {
229 * Page is no longer in use. Free it unless
230 * it's the last one. We keep the last one
231 * so as not to have to set it up again the
232 * next time somebody inserts a probe.
234 if (!list_is_singular(&kip->list)) {
235 list_del(&kip->list);
236 module_free(NULL, kip->insns);
244 static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
246 struct kprobe_insn_page *kip, *next;
248 /* Ensure no-one is interrupted on the garbages */
251 list_for_each_entry_safe(kip, next, &c->pages, list) {
253 if (kip->ngarbage == 0)
255 kip->ngarbage = 0; /* we will collect all garbages */
256 for (i = 0; i < slots_per_page(c); i++) {
257 if (kip->slot_used[i] == SLOT_DIRTY &&
258 collect_one_slot(kip, i))
266 static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
267 kprobe_opcode_t *slot, int dirty)
269 struct kprobe_insn_page *kip;
271 list_for_each_entry(kip, &c->pages, list) {
272 long idx = ((long)slot - (long)kip->insns) /
273 (c->insn_size * sizeof(kprobe_opcode_t));
274 if (idx >= 0 && idx < slots_per_page(c)) {
275 WARN_ON(kip->slot_used[idx] != SLOT_USED);
277 kip->slot_used[idx] = SLOT_DIRTY;
279 if (++c->nr_garbage > slots_per_page(c))
280 collect_garbage_slots(c);
282 collect_one_slot(kip, idx);
286 /* Could not free this slot. */
290 void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
292 mutex_lock(&kprobe_insn_mutex);
293 __free_insn_slot(&kprobe_insn_slots, slot, dirty);
294 mutex_unlock(&kprobe_insn_mutex);
296 #ifdef CONFIG_OPTPROBES
297 /* For optimized_kprobe buffer */
298 static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */
299 static struct kprobe_insn_cache kprobe_optinsn_slots = {
300 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
301 /* .insn_size is initialized later */
304 /* Get a slot for optimized_kprobe buffer */
305 kprobe_opcode_t __kprobes *get_optinsn_slot(void)
307 kprobe_opcode_t *ret = NULL;
309 mutex_lock(&kprobe_optinsn_mutex);
310 ret = __get_insn_slot(&kprobe_optinsn_slots);
311 mutex_unlock(&kprobe_optinsn_mutex);
316 void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
318 mutex_lock(&kprobe_optinsn_mutex);
319 __free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
320 mutex_unlock(&kprobe_optinsn_mutex);
325 /* We have preemption disabled.. so it is safe to use __ versions */
326 static inline void set_kprobe_instance(struct kprobe *kp)
328 __this_cpu_write(kprobe_instance, kp);
331 static inline void reset_kprobe_instance(void)
333 __this_cpu_write(kprobe_instance, NULL);
337 * This routine is called either:
338 * - under the kprobe_mutex - during kprobe_[un]register()
340 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
342 struct kprobe __kprobes *get_kprobe(void *addr)
344 struct hlist_head *head;
345 struct hlist_node *node;
348 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
349 hlist_for_each_entry_rcu(p, node, head, hlist) {
357 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
359 /* Return true if the kprobe is an aggregator */
360 static inline int kprobe_aggrprobe(struct kprobe *p)
362 return p->pre_handler == aggr_pre_handler;
365 /* Return true(!0) if the kprobe is unused */
366 static inline int kprobe_unused(struct kprobe *p)
368 return kprobe_aggrprobe(p) && kprobe_disabled(p) &&
369 list_empty(&p->list);
373 * Keep all fields in the kprobe consistent
375 static inline void copy_kprobe(struct kprobe *ap, struct kprobe *p)
377 memcpy(&p->opcode, &ap->opcode, sizeof(kprobe_opcode_t));
378 memcpy(&p->ainsn, &ap->ainsn, sizeof(struct arch_specific_insn));
381 #ifdef CONFIG_OPTPROBES
382 /* NOTE: change this value only with kprobe_mutex held */
383 static bool kprobes_allow_optimization;
386 * Call all pre_handler on the list, but ignores its return value.
387 * This must be called from arch-dep optimized caller.
389 void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
393 list_for_each_entry_rcu(kp, &p->list, list) {
394 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
395 set_kprobe_instance(kp);
396 kp->pre_handler(kp, regs);
398 reset_kprobe_instance();
402 /* Free optimized instructions and optimized_kprobe */
403 static __kprobes void free_aggr_kprobe(struct kprobe *p)
405 struct optimized_kprobe *op;
407 op = container_of(p, struct optimized_kprobe, kp);
408 arch_remove_optimized_kprobe(op);
409 arch_remove_kprobe(p);
413 /* Return true(!0) if the kprobe is ready for optimization. */
414 static inline int kprobe_optready(struct kprobe *p)
416 struct optimized_kprobe *op;
418 if (kprobe_aggrprobe(p)) {
419 op = container_of(p, struct optimized_kprobe, kp);
420 return arch_prepared_optinsn(&op->optinsn);
426 /* Return true(!0) if the kprobe is disarmed. Note: p must be on hash list */
427 static inline int kprobe_disarmed(struct kprobe *p)
429 struct optimized_kprobe *op;
431 /* If kprobe is not aggr/opt probe, just return kprobe is disabled */
432 if (!kprobe_aggrprobe(p))
433 return kprobe_disabled(p);
435 op = container_of(p, struct optimized_kprobe, kp);
437 return kprobe_disabled(p) && list_empty(&op->list);
440 /* Return true(!0) if the probe is queued on (un)optimizing lists */
441 static int __kprobes kprobe_queued(struct kprobe *p)
443 struct optimized_kprobe *op;
445 if (kprobe_aggrprobe(p)) {
446 op = container_of(p, struct optimized_kprobe, kp);
447 if (!list_empty(&op->list))
454 * Return an optimized kprobe whose optimizing code replaces
455 * instructions including addr (exclude breakpoint).
457 static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
460 struct kprobe *p = NULL;
461 struct optimized_kprobe *op;
463 /* Don't check i == 0, since that is a breakpoint case. */
464 for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
465 p = get_kprobe((void *)(addr - i));
467 if (p && kprobe_optready(p)) {
468 op = container_of(p, struct optimized_kprobe, kp);
469 if (arch_within_optimized_kprobe(op, addr))
476 /* Optimization staging list, protected by kprobe_mutex */
477 static LIST_HEAD(optimizing_list);
478 static LIST_HEAD(unoptimizing_list);
480 static void kprobe_optimizer(struct work_struct *work);
481 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
482 static DECLARE_COMPLETION(optimizer_comp);
483 #define OPTIMIZE_DELAY 5
486 * Optimize (replace a breakpoint with a jump) kprobes listed on
489 static __kprobes void do_optimize_kprobes(void)
491 /* Optimization never be done when disarmed */
492 if (kprobes_all_disarmed || !kprobes_allow_optimization ||
493 list_empty(&optimizing_list))
497 * The optimization/unoptimization refers online_cpus via
498 * stop_machine() and cpu-hotplug modifies online_cpus.
499 * And same time, text_mutex will be held in cpu-hotplug and here.
500 * This combination can cause a deadlock (cpu-hotplug try to lock
501 * text_mutex but stop_machine can not be done because online_cpus
503 * To avoid this deadlock, we need to call get_online_cpus()
504 * for preventing cpu-hotplug outside of text_mutex locking.
507 mutex_lock(&text_mutex);
508 arch_optimize_kprobes(&optimizing_list);
509 mutex_unlock(&text_mutex);
514 * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
515 * if need) kprobes listed on unoptimizing_list.
517 static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
519 struct optimized_kprobe *op, *tmp;
521 /* Unoptimization must be done anytime */
522 if (list_empty(&unoptimizing_list))
525 /* Ditto to do_optimize_kprobes */
527 mutex_lock(&text_mutex);
528 arch_unoptimize_kprobes(&unoptimizing_list, free_list);
529 /* Loop free_list for disarming */
530 list_for_each_entry_safe(op, tmp, free_list, list) {
531 /* Disarm probes if marked disabled */
532 if (kprobe_disabled(&op->kp))
533 arch_disarm_kprobe(&op->kp);
534 if (kprobe_unused(&op->kp)) {
536 * Remove unused probes from hash list. After waiting
537 * for synchronization, these probes are reclaimed.
538 * (reclaiming is done by do_free_cleaned_kprobes.)
540 hlist_del_rcu(&op->kp.hlist);
542 list_del_init(&op->list);
544 mutex_unlock(&text_mutex);
548 /* Reclaim all kprobes on the free_list */
549 static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list)
551 struct optimized_kprobe *op, *tmp;
553 list_for_each_entry_safe(op, tmp, free_list, list) {
554 BUG_ON(!kprobe_unused(&op->kp));
555 list_del_init(&op->list);
556 free_aggr_kprobe(&op->kp);
560 /* Start optimizer after OPTIMIZE_DELAY passed */
561 static __kprobes void kick_kprobe_optimizer(void)
563 if (!delayed_work_pending(&optimizing_work))
564 schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
567 /* Kprobe jump optimizer */
568 static __kprobes void kprobe_optimizer(struct work_struct *work)
570 LIST_HEAD(free_list);
572 /* Lock modules while optimizing kprobes */
573 mutex_lock(&module_mutex);
574 mutex_lock(&kprobe_mutex);
577 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
578 * kprobes before waiting for quiesence period.
580 do_unoptimize_kprobes(&free_list);
583 * Step 2: Wait for quiesence period to ensure all running interrupts
584 * are done. Because optprobe may modify multiple instructions
585 * there is a chance that Nth instruction is interrupted. In that
586 * case, running interrupt can return to 2nd-Nth byte of jump
587 * instruction. This wait is for avoiding it.
591 /* Step 3: Optimize kprobes after quiesence period */
592 do_optimize_kprobes();
594 /* Step 4: Free cleaned kprobes after quiesence period */
595 do_free_cleaned_kprobes(&free_list);
597 mutex_unlock(&kprobe_mutex);
598 mutex_unlock(&module_mutex);
600 /* Step 5: Kick optimizer again if needed */
601 if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
602 kick_kprobe_optimizer();
604 /* Wake up all waiters */
605 complete_all(&optimizer_comp);
608 /* Wait for completing optimization and unoptimization */
609 __kprobes void wait_for_kprobe_optimizer(void)
611 if (delayed_work_pending(&optimizing_work))
612 wait_for_completion(&optimizer_comp);
615 /* Optimize kprobe if p is ready to be optimized */
616 static __kprobes void optimize_kprobe(struct kprobe *p)
618 struct optimized_kprobe *op;
620 /* Check if the kprobe is disabled or not ready for optimization. */
621 if (!kprobe_optready(p) || !kprobes_allow_optimization ||
622 (kprobe_disabled(p) || kprobes_all_disarmed))
625 /* Both of break_handler and post_handler are not supported. */
626 if (p->break_handler || p->post_handler)
629 op = container_of(p, struct optimized_kprobe, kp);
631 /* Check there is no other kprobes at the optimized instructions */
632 if (arch_check_optimized_kprobe(op) < 0)
635 /* Check if it is already optimized. */
636 if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
638 op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
640 if (!list_empty(&op->list))
641 /* This is under unoptimizing. Just dequeue the probe */
642 list_del_init(&op->list);
644 list_add(&op->list, &optimizing_list);
645 kick_kprobe_optimizer();
649 /* Short cut to direct unoptimizing */
650 static __kprobes void force_unoptimize_kprobe(struct optimized_kprobe *op)
653 arch_unoptimize_kprobe(op);
655 if (kprobe_disabled(&op->kp))
656 arch_disarm_kprobe(&op->kp);
659 /* Unoptimize a kprobe if p is optimized */
660 static __kprobes void unoptimize_kprobe(struct kprobe *p, bool force)
662 struct optimized_kprobe *op;
664 if (!kprobe_aggrprobe(p) || kprobe_disarmed(p))
665 return; /* This is not an optprobe nor optimized */
667 op = container_of(p, struct optimized_kprobe, kp);
668 if (!kprobe_optimized(p)) {
669 /* Unoptimized or unoptimizing case */
670 if (force && !list_empty(&op->list)) {
672 * Only if this is unoptimizing kprobe and forced,
673 * forcibly unoptimize it. (No need to unoptimize
674 * unoptimized kprobe again :)
676 list_del_init(&op->list);
677 force_unoptimize_kprobe(op);
682 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
683 if (!list_empty(&op->list)) {
684 /* Dequeue from the optimization queue */
685 list_del_init(&op->list);
688 /* Optimized kprobe case */
690 /* Forcibly update the code: this is a special case */
691 force_unoptimize_kprobe(op);
693 list_add(&op->list, &unoptimizing_list);
694 kick_kprobe_optimizer();
698 /* Cancel unoptimizing for reusing */
699 static void reuse_unused_kprobe(struct kprobe *ap)
701 struct optimized_kprobe *op;
703 BUG_ON(!kprobe_unused(ap));
705 * Unused kprobe MUST be on the way of delayed unoptimizing (means
706 * there is still a relative jump) and disabled.
708 op = container_of(ap, struct optimized_kprobe, kp);
709 if (unlikely(list_empty(&op->list)))
710 printk(KERN_WARNING "Warning: found a stray unused "
711 "aggrprobe@%p\n", ap->addr);
712 /* Enable the probe again */
713 ap->flags &= ~KPROBE_FLAG_DISABLED;
714 /* Optimize it again (remove from op->list) */
715 BUG_ON(!kprobe_optready(ap));
719 /* Remove optimized instructions */
720 static void __kprobes kill_optimized_kprobe(struct kprobe *p)
722 struct optimized_kprobe *op;
724 op = container_of(p, struct optimized_kprobe, kp);
725 if (!list_empty(&op->list))
726 /* Dequeue from the (un)optimization queue */
727 list_del_init(&op->list);
729 op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
730 /* Don't touch the code, because it is already freed. */
731 arch_remove_optimized_kprobe(op);
734 /* Try to prepare optimized instructions */
735 static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
737 struct optimized_kprobe *op;
739 op = container_of(p, struct optimized_kprobe, kp);
740 arch_prepare_optimized_kprobe(op);
743 /* Allocate new optimized_kprobe and try to prepare optimized instructions */
744 static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
746 struct optimized_kprobe *op;
748 op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
752 INIT_LIST_HEAD(&op->list);
753 op->kp.addr = p->addr;
754 arch_prepare_optimized_kprobe(op);
759 static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
762 * Prepare an optimized_kprobe and optimize it
763 * NOTE: p must be a normal registered kprobe
765 static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
768 struct optimized_kprobe *op;
770 ap = alloc_aggr_kprobe(p);
774 op = container_of(ap, struct optimized_kprobe, kp);
775 if (!arch_prepared_optinsn(&op->optinsn)) {
776 /* If failed to setup optimizing, fallback to kprobe */
777 arch_remove_optimized_kprobe(op);
782 init_aggr_kprobe(ap, p);
787 /* This should be called with kprobe_mutex locked */
788 static void __kprobes optimize_all_kprobes(void)
790 struct hlist_head *head;
791 struct hlist_node *node;
795 /* If optimization is already allowed, just return */
796 if (kprobes_allow_optimization)
799 kprobes_allow_optimization = true;
800 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
801 head = &kprobe_table[i];
802 hlist_for_each_entry_rcu(p, node, head, hlist)
803 if (!kprobe_disabled(p))
806 printk(KERN_INFO "Kprobes globally optimized\n");
809 /* This should be called with kprobe_mutex locked */
810 static void __kprobes unoptimize_all_kprobes(void)
812 struct hlist_head *head;
813 struct hlist_node *node;
817 /* If optimization is already prohibited, just return */
818 if (!kprobes_allow_optimization)
821 kprobes_allow_optimization = false;
822 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
823 head = &kprobe_table[i];
824 hlist_for_each_entry_rcu(p, node, head, hlist) {
825 if (!kprobe_disabled(p))
826 unoptimize_kprobe(p, false);
829 /* Wait for unoptimizing completion */
830 wait_for_kprobe_optimizer();
831 printk(KERN_INFO "Kprobes globally unoptimized\n");
834 int sysctl_kprobes_optimization;
835 int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
836 void __user *buffer, size_t *length,
841 mutex_lock(&kprobe_mutex);
842 sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
843 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
845 if (sysctl_kprobes_optimization)
846 optimize_all_kprobes();
848 unoptimize_all_kprobes();
849 mutex_unlock(&kprobe_mutex);
853 #endif /* CONFIG_SYSCTL */
855 /* Put a breakpoint for a probe. Must be called with text_mutex locked */
856 static void __kprobes __arm_kprobe(struct kprobe *p)
860 /* Check collision with other optimized kprobes */
861 _p = get_optimized_kprobe((unsigned long)p->addr);
863 /* Fallback to unoptimized kprobe */
864 unoptimize_kprobe(_p, true);
867 optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
870 /* Remove the breakpoint of a probe. Must be called with text_mutex locked */
871 static void __kprobes __disarm_kprobe(struct kprobe *p, bool reopt)
875 unoptimize_kprobe(p, false); /* Try to unoptimize */
877 if (!kprobe_queued(p)) {
878 arch_disarm_kprobe(p);
879 /* If another kprobe was blocked, optimize it. */
880 _p = get_optimized_kprobe((unsigned long)p->addr);
881 if (unlikely(_p) && reopt)
884 /* TODO: reoptimize others after unoptimized this probe */
887 #else /* !CONFIG_OPTPROBES */
889 #define optimize_kprobe(p) do {} while (0)
890 #define unoptimize_kprobe(p, f) do {} while (0)
891 #define kill_optimized_kprobe(p) do {} while (0)
892 #define prepare_optimized_kprobe(p) do {} while (0)
893 #define try_to_optimize_kprobe(p) do {} while (0)
894 #define __arm_kprobe(p) arch_arm_kprobe(p)
895 #define __disarm_kprobe(p, o) arch_disarm_kprobe(p)
896 #define kprobe_disarmed(p) kprobe_disabled(p)
897 #define wait_for_kprobe_optimizer() do {} while (0)
899 /* There should be no unused kprobes can be reused without optimization */
900 static void reuse_unused_kprobe(struct kprobe *ap)
902 printk(KERN_ERR "Error: There should be no unused kprobe here.\n");
903 BUG_ON(kprobe_unused(ap));
906 static __kprobes void free_aggr_kprobe(struct kprobe *p)
908 arch_remove_kprobe(p);
912 static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
914 return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
916 #endif /* CONFIG_OPTPROBES */
918 /* Arm a kprobe with text_mutex */
919 static void __kprobes arm_kprobe(struct kprobe *kp)
922 * Here, since __arm_kprobe() doesn't use stop_machine(),
923 * this doesn't cause deadlock on text_mutex. So, we don't
924 * need get_online_cpus().
926 mutex_lock(&text_mutex);
928 mutex_unlock(&text_mutex);
931 /* Disarm a kprobe with text_mutex */
932 static void __kprobes disarm_kprobe(struct kprobe *kp)
935 mutex_lock(&text_mutex);
936 __disarm_kprobe(kp, true);
937 mutex_unlock(&text_mutex);
941 * Aggregate handlers for multiple kprobes support - these handlers
942 * take care of invoking the individual kprobe handlers on p->list
944 static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
948 list_for_each_entry_rcu(kp, &p->list, list) {
949 if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
950 set_kprobe_instance(kp);
951 if (kp->pre_handler(kp, regs))
954 reset_kprobe_instance();
959 static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
964 list_for_each_entry_rcu(kp, &p->list, list) {
965 if (kp->post_handler && likely(!kprobe_disabled(kp))) {
966 set_kprobe_instance(kp);
967 kp->post_handler(kp, regs, flags);
968 reset_kprobe_instance();
973 static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
976 struct kprobe *cur = __this_cpu_read(kprobe_instance);
979 * if we faulted "during" the execution of a user specified
980 * probe handler, invoke just that probe's fault handler
982 if (cur && cur->fault_handler) {
983 if (cur->fault_handler(cur, regs, trapnr))
989 static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
991 struct kprobe *cur = __this_cpu_read(kprobe_instance);
994 if (cur && cur->break_handler) {
995 if (cur->break_handler(cur, regs))
998 reset_kprobe_instance();
1002 /* Walks the list and increments nmissed count for multiprobe case */
1003 void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
1006 if (!kprobe_aggrprobe(p)) {
1009 list_for_each_entry_rcu(kp, &p->list, list)
1015 void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
1016 struct hlist_head *head)
1018 struct kretprobe *rp = ri->rp;
1020 /* remove rp inst off the rprobe_inst_table */
1021 hlist_del(&ri->hlist);
1022 INIT_HLIST_NODE(&ri->hlist);
1024 raw_spin_lock(&rp->lock);
1025 hlist_add_head(&ri->hlist, &rp->free_instances);
1026 raw_spin_unlock(&rp->lock);
1029 hlist_add_head(&ri->hlist, head);
1032 void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
1033 struct hlist_head **head, unsigned long *flags)
1034 __acquires(hlist_lock)
1036 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1037 raw_spinlock_t *hlist_lock;
1039 *head = &kretprobe_inst_table[hash];
1040 hlist_lock = kretprobe_table_lock_ptr(hash);
1041 raw_spin_lock_irqsave(hlist_lock, *flags);
1044 static void __kprobes kretprobe_table_lock(unsigned long hash,
1045 unsigned long *flags)
1046 __acquires(hlist_lock)
1048 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1049 raw_spin_lock_irqsave(hlist_lock, *flags);
1052 void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
1053 unsigned long *flags)
1054 __releases(hlist_lock)
1056 unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
1057 raw_spinlock_t *hlist_lock;
1059 hlist_lock = kretprobe_table_lock_ptr(hash);
1060 raw_spin_unlock_irqrestore(hlist_lock, *flags);
1063 static void __kprobes kretprobe_table_unlock(unsigned long hash,
1064 unsigned long *flags)
1065 __releases(hlist_lock)
1067 raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
1068 raw_spin_unlock_irqrestore(hlist_lock, *flags);
1072 * This function is called from finish_task_switch when task tk becomes dead,
1073 * so that we can recycle any function-return probe instances associated
1074 * with this task. These left over instances represent probed functions
1075 * that have been called but will never return.
1077 void __kprobes kprobe_flush_task(struct task_struct *tk)
1079 struct kretprobe_instance *ri;
1080 struct hlist_head *head, empty_rp;
1081 struct hlist_node *node, *tmp;
1082 unsigned long hash, flags = 0;
1084 if (unlikely(!kprobes_initialized))
1085 /* Early boot. kretprobe_table_locks not yet initialized. */
1088 INIT_HLIST_HEAD(&empty_rp);
1089 hash = hash_ptr(tk, KPROBE_HASH_BITS);
1090 head = &kretprobe_inst_table[hash];
1091 kretprobe_table_lock(hash, &flags);
1092 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
1094 recycle_rp_inst(ri, &empty_rp);
1096 kretprobe_table_unlock(hash, &flags);
1097 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
1098 hlist_del(&ri->hlist);
1103 static inline void free_rp_inst(struct kretprobe *rp)
1105 struct kretprobe_instance *ri;
1106 struct hlist_node *pos, *next;
1108 hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
1109 hlist_del(&ri->hlist);
1114 static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
1116 unsigned long flags, hash;
1117 struct kretprobe_instance *ri;
1118 struct hlist_node *pos, *next;
1119 struct hlist_head *head;
1122 for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
1123 kretprobe_table_lock(hash, &flags);
1124 head = &kretprobe_inst_table[hash];
1125 hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
1129 kretprobe_table_unlock(hash, &flags);
1135 * Add the new probe to ap->list. Fail if this is the
1136 * second jprobe at the address - two jprobes can't coexist
1138 static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
1140 BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
1142 if (p->break_handler || p->post_handler)
1143 unoptimize_kprobe(ap, true); /* Fall back to normal kprobe */
1145 if (p->break_handler) {
1146 if (ap->break_handler)
1148 list_add_tail_rcu(&p->list, &ap->list);
1149 ap->break_handler = aggr_break_handler;
1151 list_add_rcu(&p->list, &ap->list);
1152 if (p->post_handler && !ap->post_handler)
1153 ap->post_handler = aggr_post_handler;
1155 if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
1156 ap->flags &= ~KPROBE_FLAG_DISABLED;
1157 if (!kprobes_all_disarmed)
1158 /* Arm the breakpoint again. */
1165 * Fill in the required fields of the "manager kprobe". Replace the
1166 * earlier kprobe in the hlist with the manager kprobe
1168 static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
1170 /* Copy p's insn slot to ap */
1172 flush_insn_slot(ap);
1174 ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
1175 ap->pre_handler = aggr_pre_handler;
1176 ap->fault_handler = aggr_fault_handler;
1177 /* We don't care the kprobe which has gone. */
1178 if (p->post_handler && !kprobe_gone(p))
1179 ap->post_handler = aggr_post_handler;
1180 if (p->break_handler && !kprobe_gone(p))
1181 ap->break_handler = aggr_break_handler;
1183 INIT_LIST_HEAD(&ap->list);
1184 INIT_HLIST_NODE(&ap->hlist);
1186 list_add_rcu(&p->list, &ap->list);
1187 hlist_replace_rcu(&p->hlist, &ap->hlist);
1191 * This is the second or subsequent kprobe at the address - handle
1194 static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
1198 struct kprobe *ap = orig_p;
1200 if (!kprobe_aggrprobe(orig_p)) {
1201 /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
1202 ap = alloc_aggr_kprobe(orig_p);
1205 init_aggr_kprobe(ap, orig_p);
1206 } else if (kprobe_unused(ap))
1207 /* This probe is going to die. Rescue it */
1208 reuse_unused_kprobe(ap);
1210 if (kprobe_gone(ap)) {
1212 * Attempting to insert new probe at the same location that
1213 * had a probe in the module vaddr area which already
1214 * freed. So, the instruction slot has already been
1215 * released. We need a new slot for the new probe.
1217 ret = arch_prepare_kprobe(ap);
1220 * Even if fail to allocate new slot, don't need to
1221 * free aggr_probe. It will be used next time, or
1222 * freed by unregister_kprobe.
1226 /* Prepare optimized instructions if possible. */
1227 prepare_optimized_kprobe(ap);
1230 * Clear gone flag to prevent allocating new slot again, and
1231 * set disabled flag because it is not armed yet.
1233 ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
1234 | KPROBE_FLAG_DISABLED;
1237 /* Copy ap's insn slot to p */
1239 return add_new_kprobe(ap, p);
1242 static int __kprobes in_kprobes_functions(unsigned long addr)
1244 struct kprobe_blackpoint *kb;
1246 if (addr >= (unsigned long)__kprobes_text_start &&
1247 addr < (unsigned long)__kprobes_text_end)
1250 * If there exists a kprobe_blacklist, verify and
1251 * fail any probe registration in the prohibited area
1253 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1254 if (kb->start_addr) {
1255 if (addr >= kb->start_addr &&
1256 addr < (kb->start_addr + kb->range))
1264 * If we have a symbol_name argument, look it up and add the offset field
1265 * to it. This way, we can specify a relative address to a symbol.
1266 * This returns encoded errors if it fails to look up symbol or invalid
1267 * combination of parameters.
1269 static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
1271 kprobe_opcode_t *addr = p->addr;
1273 if ((p->symbol_name && p->addr) ||
1274 (!p->symbol_name && !p->addr))
1277 if (p->symbol_name) {
1278 kprobe_lookup_name(p->symbol_name, addr);
1280 return ERR_PTR(-ENOENT);
1283 addr = (kprobe_opcode_t *)(((char *)addr) + p->offset);
1288 return ERR_PTR(-EINVAL);
1291 /* Check passed kprobe is valid and return kprobe in kprobe_table. */
1292 static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
1294 struct kprobe *ap, *list_p;
1296 ap = get_kprobe(p->addr);
1301 list_for_each_entry_rcu(list_p, &ap->list, list)
1303 /* kprobe p is a valid probe */
1311 /* Return error if the kprobe is being re-registered */
1312 static inline int check_kprobe_rereg(struct kprobe *p)
1316 mutex_lock(&kprobe_mutex);
1317 if (__get_valid_kprobe(p))
1319 mutex_unlock(&kprobe_mutex);
1324 int __kprobes register_kprobe(struct kprobe *p)
1327 struct kprobe *old_p;
1328 struct module *probed_mod;
1329 kprobe_opcode_t *addr;
1331 addr = kprobe_addr(p);
1333 return PTR_ERR(addr);
1336 ret = check_kprobe_rereg(p);
1342 if (!kernel_text_address((unsigned long) p->addr) ||
1343 in_kprobes_functions((unsigned long) p->addr) ||
1344 ftrace_text_reserved(p->addr, p->addr) ||
1345 jump_label_text_reserved(p->addr, p->addr)) {
1350 /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
1351 p->flags &= KPROBE_FLAG_DISABLED;
1354 * Check if are we probing a module.
1356 probed_mod = __module_text_address((unsigned long) p->addr);
1358 /* Return -ENOENT if fail. */
1361 * We must hold a refcount of the probed module while updating
1362 * its code to prohibit unexpected unloading.
1364 if (unlikely(!try_module_get(probed_mod)))
1368 * If the module freed .init.text, we couldn't insert
1371 if (within_module_init((unsigned long)p->addr, probed_mod) &&
1372 probed_mod->state != MODULE_STATE_COMING) {
1373 module_put(probed_mod);
1376 /* ret will be updated by following code */
1379 jump_label_unlock();
1382 INIT_LIST_HEAD(&p->list);
1383 mutex_lock(&kprobe_mutex);
1385 jump_label_lock(); /* needed to call jump_label_text_reserved() */
1387 get_online_cpus(); /* For avoiding text_mutex deadlock. */
1388 mutex_lock(&text_mutex);
1390 old_p = get_kprobe(p->addr);
1392 /* Since this may unoptimize old_p, locking text_mutex. */
1393 ret = register_aggr_kprobe(old_p, p);
1397 ret = arch_prepare_kprobe(p);
1401 INIT_HLIST_NODE(&p->hlist);
1402 hlist_add_head_rcu(&p->hlist,
1403 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
1405 if (!kprobes_all_disarmed && !kprobe_disabled(p))
1408 /* Try to optimize kprobe */
1409 try_to_optimize_kprobe(p);
1412 mutex_unlock(&text_mutex);
1414 jump_label_unlock();
1415 mutex_unlock(&kprobe_mutex);
1418 module_put(probed_mod);
1424 jump_label_unlock();
1427 EXPORT_SYMBOL_GPL(register_kprobe);
1429 /* Check if all probes on the aggrprobe are disabled */
1430 static int __kprobes aggr_kprobe_disabled(struct kprobe *ap)
1434 list_for_each_entry_rcu(kp, &ap->list, list)
1435 if (!kprobe_disabled(kp))
1437 * There is an active probe on the list.
1438 * We can't disable this ap.
1445 /* Disable one kprobe: Make sure called under kprobe_mutex is locked */
1446 static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p)
1448 struct kprobe *orig_p;
1450 /* Get an original kprobe for return */
1451 orig_p = __get_valid_kprobe(p);
1452 if (unlikely(orig_p == NULL))
1455 if (!kprobe_disabled(p)) {
1456 /* Disable probe if it is a child probe */
1458 p->flags |= KPROBE_FLAG_DISABLED;
1460 /* Try to disarm and disable this/parent probe */
1461 if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
1462 disarm_kprobe(orig_p);
1463 orig_p->flags |= KPROBE_FLAG_DISABLED;
1471 * Unregister a kprobe without a scheduler synchronization.
1473 static int __kprobes __unregister_kprobe_top(struct kprobe *p)
1475 struct kprobe *ap, *list_p;
1477 /* Disable kprobe. This will disarm it if needed. */
1478 ap = __disable_kprobe(p);
1484 * This probe is an independent(and non-optimized) kprobe
1485 * (not an aggrprobe). Remove from the hash list.
1489 /* Following process expects this probe is an aggrprobe */
1490 WARN_ON(!kprobe_aggrprobe(ap));
1492 if (list_is_singular(&ap->list) && kprobe_disarmed(ap))
1494 * !disarmed could be happen if the probe is under delayed
1499 /* If disabling probe has special handlers, update aggrprobe */
1500 if (p->break_handler && !kprobe_gone(p))
1501 ap->break_handler = NULL;
1502 if (p->post_handler && !kprobe_gone(p)) {
1503 list_for_each_entry_rcu(list_p, &ap->list, list) {
1504 if ((list_p != p) && (list_p->post_handler))
1507 ap->post_handler = NULL;
1511 * Remove from the aggrprobe: this path will do nothing in
1512 * __unregister_kprobe_bottom().
1514 list_del_rcu(&p->list);
1515 if (!kprobe_disabled(ap) && !kprobes_all_disarmed)
1517 * Try to optimize this probe again, because post
1518 * handler may have been changed.
1520 optimize_kprobe(ap);
1525 BUG_ON(!kprobe_disarmed(ap));
1526 hlist_del_rcu(&ap->hlist);
1530 static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
1534 if (list_empty(&p->list))
1535 /* This is an independent kprobe */
1536 arch_remove_kprobe(p);
1537 else if (list_is_singular(&p->list)) {
1538 /* This is the last child of an aggrprobe */
1539 ap = list_entry(p->list.next, struct kprobe, list);
1541 free_aggr_kprobe(ap);
1543 /* Otherwise, do nothing. */
1546 int __kprobes register_kprobes(struct kprobe **kps, int num)
1552 for (i = 0; i < num; i++) {
1553 ret = register_kprobe(kps[i]);
1556 unregister_kprobes(kps, i);
1562 EXPORT_SYMBOL_GPL(register_kprobes);
1564 void __kprobes unregister_kprobe(struct kprobe *p)
1566 unregister_kprobes(&p, 1);
1568 EXPORT_SYMBOL_GPL(unregister_kprobe);
1570 void __kprobes unregister_kprobes(struct kprobe **kps, int num)
1576 mutex_lock(&kprobe_mutex);
1577 for (i = 0; i < num; i++)
1578 if (__unregister_kprobe_top(kps[i]) < 0)
1579 kps[i]->addr = NULL;
1580 mutex_unlock(&kprobe_mutex);
1582 synchronize_sched();
1583 for (i = 0; i < num; i++)
1585 __unregister_kprobe_bottom(kps[i]);
1587 EXPORT_SYMBOL_GPL(unregister_kprobes);
1589 static struct notifier_block kprobe_exceptions_nb = {
1590 .notifier_call = kprobe_exceptions_notify,
1591 .priority = 0x7fffffff /* we need to be notified first */
1594 unsigned long __weak arch_deref_entry_point(void *entry)
1596 return (unsigned long)entry;
1599 int __kprobes register_jprobes(struct jprobe **jps, int num)
1606 for (i = 0; i < num; i++) {
1607 unsigned long addr, offset;
1609 addr = arch_deref_entry_point(jp->entry);
1611 /* Verify probepoint is a function entry point */
1612 if (kallsyms_lookup_size_offset(addr, NULL, &offset) &&
1614 jp->kp.pre_handler = setjmp_pre_handler;
1615 jp->kp.break_handler = longjmp_break_handler;
1616 ret = register_kprobe(&jp->kp);
1622 unregister_jprobes(jps, i);
1628 EXPORT_SYMBOL_GPL(register_jprobes);
1630 int __kprobes register_jprobe(struct jprobe *jp)
1632 return register_jprobes(&jp, 1);
1634 EXPORT_SYMBOL_GPL(register_jprobe);
1636 void __kprobes unregister_jprobe(struct jprobe *jp)
1638 unregister_jprobes(&jp, 1);
1640 EXPORT_SYMBOL_GPL(unregister_jprobe);
1642 void __kprobes unregister_jprobes(struct jprobe **jps, int num)
1648 mutex_lock(&kprobe_mutex);
1649 for (i = 0; i < num; i++)
1650 if (__unregister_kprobe_top(&jps[i]->kp) < 0)
1651 jps[i]->kp.addr = NULL;
1652 mutex_unlock(&kprobe_mutex);
1654 synchronize_sched();
1655 for (i = 0; i < num; i++) {
1656 if (jps[i]->kp.addr)
1657 __unregister_kprobe_bottom(&jps[i]->kp);
1660 EXPORT_SYMBOL_GPL(unregister_jprobes);
1662 #ifdef CONFIG_KRETPROBES
1664 * This kprobe pre_handler is registered with every kretprobe. When probe
1665 * hits it will set up the return probe.
1667 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1668 struct pt_regs *regs)
1670 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
1671 unsigned long hash, flags = 0;
1672 struct kretprobe_instance *ri;
1674 /*TODO: consider to only swap the RA after the last pre_handler fired */
1675 hash = hash_ptr(current, KPROBE_HASH_BITS);
1676 raw_spin_lock_irqsave(&rp->lock, flags);
1677 if (!hlist_empty(&rp->free_instances)) {
1678 ri = hlist_entry(rp->free_instances.first,
1679 struct kretprobe_instance, hlist);
1680 hlist_del(&ri->hlist);
1681 raw_spin_unlock_irqrestore(&rp->lock, flags);
1686 if (rp->entry_handler && rp->entry_handler(ri, regs)) {
1687 raw_spin_lock_irqsave(&rp->lock, flags);
1688 hlist_add_head(&ri->hlist, &rp->free_instances);
1689 raw_spin_unlock_irqrestore(&rp->lock, flags);
1693 arch_prepare_kretprobe(ri, regs);
1695 /* XXX(hch): why is there no hlist_move_head? */
1696 INIT_HLIST_NODE(&ri->hlist);
1697 kretprobe_table_lock(hash, &flags);
1698 hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
1699 kretprobe_table_unlock(hash, &flags);
1702 raw_spin_unlock_irqrestore(&rp->lock, flags);
1707 int __kprobes register_kretprobe(struct kretprobe *rp)
1710 struct kretprobe_instance *inst;
1714 if (kretprobe_blacklist_size) {
1715 addr = kprobe_addr(&rp->kp);
1717 return PTR_ERR(addr);
1719 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
1720 if (kretprobe_blacklist[i].addr == addr)
1725 rp->kp.pre_handler = pre_handler_kretprobe;
1726 rp->kp.post_handler = NULL;
1727 rp->kp.fault_handler = NULL;
1728 rp->kp.break_handler = NULL;
1730 /* Pre-allocate memory for max kretprobe instances */
1731 if (rp->maxactive <= 0) {
1732 #ifdef CONFIG_PREEMPT
1733 rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
1735 rp->maxactive = num_possible_cpus();
1738 raw_spin_lock_init(&rp->lock);
1739 INIT_HLIST_HEAD(&rp->free_instances);
1740 for (i = 0; i < rp->maxactive; i++) {
1741 inst = kmalloc(sizeof(struct kretprobe_instance) +
1742 rp->data_size, GFP_KERNEL);
1747 INIT_HLIST_NODE(&inst->hlist);
1748 hlist_add_head(&inst->hlist, &rp->free_instances);
1752 /* Establish function entry probe point */
1753 ret = register_kprobe(&rp->kp);
1758 EXPORT_SYMBOL_GPL(register_kretprobe);
1760 int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1766 for (i = 0; i < num; i++) {
1767 ret = register_kretprobe(rps[i]);
1770 unregister_kretprobes(rps, i);
1776 EXPORT_SYMBOL_GPL(register_kretprobes);
1778 void __kprobes unregister_kretprobe(struct kretprobe *rp)
1780 unregister_kretprobes(&rp, 1);
1782 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1784 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1790 mutex_lock(&kprobe_mutex);
1791 for (i = 0; i < num; i++)
1792 if (__unregister_kprobe_top(&rps[i]->kp) < 0)
1793 rps[i]->kp.addr = NULL;
1794 mutex_unlock(&kprobe_mutex);
1796 synchronize_sched();
1797 for (i = 0; i < num; i++) {
1798 if (rps[i]->kp.addr) {
1799 __unregister_kprobe_bottom(&rps[i]->kp);
1800 cleanup_rp_inst(rps[i]);
1804 EXPORT_SYMBOL_GPL(unregister_kretprobes);
1806 #else /* CONFIG_KRETPROBES */
1807 int __kprobes register_kretprobe(struct kretprobe *rp)
1811 EXPORT_SYMBOL_GPL(register_kretprobe);
1813 int __kprobes register_kretprobes(struct kretprobe **rps, int num)
1817 EXPORT_SYMBOL_GPL(register_kretprobes);
1819 void __kprobes unregister_kretprobe(struct kretprobe *rp)
1822 EXPORT_SYMBOL_GPL(unregister_kretprobe);
1824 void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
1827 EXPORT_SYMBOL_GPL(unregister_kretprobes);
1829 static int __kprobes pre_handler_kretprobe(struct kprobe *p,
1830 struct pt_regs *regs)
1835 #endif /* CONFIG_KRETPROBES */
1837 /* Set the kprobe gone and remove its instruction buffer. */
1838 static void __kprobes kill_kprobe(struct kprobe *p)
1842 p->flags |= KPROBE_FLAG_GONE;
1843 if (kprobe_aggrprobe(p)) {
1845 * If this is an aggr_kprobe, we have to list all the
1846 * chained probes and mark them GONE.
1848 list_for_each_entry_rcu(kp, &p->list, list)
1849 kp->flags |= KPROBE_FLAG_GONE;
1850 p->post_handler = NULL;
1851 p->break_handler = NULL;
1852 kill_optimized_kprobe(p);
1855 * Here, we can remove insn_slot safely, because no thread calls
1856 * the original probed function (which will be freed soon) any more.
1858 arch_remove_kprobe(p);
1861 /* Disable one kprobe */
1862 int __kprobes disable_kprobe(struct kprobe *kp)
1866 mutex_lock(&kprobe_mutex);
1868 /* Disable this kprobe */
1869 if (__disable_kprobe(kp) == NULL)
1872 mutex_unlock(&kprobe_mutex);
1875 EXPORT_SYMBOL_GPL(disable_kprobe);
1877 /* Enable one kprobe */
1878 int __kprobes enable_kprobe(struct kprobe *kp)
1883 mutex_lock(&kprobe_mutex);
1885 /* Check whether specified probe is valid. */
1886 p = __get_valid_kprobe(kp);
1887 if (unlikely(p == NULL)) {
1892 if (kprobe_gone(kp)) {
1893 /* This kprobe has gone, we couldn't enable it. */
1899 kp->flags &= ~KPROBE_FLAG_DISABLED;
1901 if (!kprobes_all_disarmed && kprobe_disabled(p)) {
1902 p->flags &= ~KPROBE_FLAG_DISABLED;
1906 mutex_unlock(&kprobe_mutex);
1909 EXPORT_SYMBOL_GPL(enable_kprobe);
1911 void __kprobes dump_kprobe(struct kprobe *kp)
1913 printk(KERN_WARNING "Dumping kprobe:\n");
1914 printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
1915 kp->symbol_name, kp->addr, kp->offset);
1918 /* Module notifier call back, checking kprobes on the module */
1919 static int __kprobes kprobes_module_callback(struct notifier_block *nb,
1920 unsigned long val, void *data)
1922 struct module *mod = data;
1923 struct hlist_head *head;
1924 struct hlist_node *node;
1927 int checkcore = (val == MODULE_STATE_GOING);
1929 if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
1933 * When MODULE_STATE_GOING was notified, both of module .text and
1934 * .init.text sections would be freed. When MODULE_STATE_LIVE was
1935 * notified, only .init.text section would be freed. We need to
1936 * disable kprobes which have been inserted in the sections.
1938 mutex_lock(&kprobe_mutex);
1939 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1940 head = &kprobe_table[i];
1941 hlist_for_each_entry_rcu(p, node, head, hlist)
1942 if (within_module_init((unsigned long)p->addr, mod) ||
1944 within_module_core((unsigned long)p->addr, mod))) {
1946 * The vaddr this probe is installed will soon
1947 * be vfreed buy not synced to disk. Hence,
1948 * disarming the breakpoint isn't needed.
1953 mutex_unlock(&kprobe_mutex);
1957 static struct notifier_block kprobe_module_nb = {
1958 .notifier_call = kprobes_module_callback,
1962 static int __init init_kprobes(void)
1965 unsigned long offset = 0, size = 0;
1966 char *modname, namebuf[128];
1967 const char *symbol_name;
1969 struct kprobe_blackpoint *kb;
1971 /* FIXME allocate the probe table, currently defined statically */
1972 /* initialize all list heads */
1973 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
1974 INIT_HLIST_HEAD(&kprobe_table[i]);
1975 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
1976 raw_spin_lock_init(&(kretprobe_table_locks[i].lock));
1980 * Lookup and populate the kprobe_blacklist.
1982 * Unlike the kretprobe blacklist, we'll need to determine
1983 * the range of addresses that belong to the said functions,
1984 * since a kprobe need not necessarily be at the beginning
1987 for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
1988 kprobe_lookup_name(kb->name, addr);
1992 kb->start_addr = (unsigned long)addr;
1993 symbol_name = kallsyms_lookup(kb->start_addr,
1994 &size, &offset, &modname, namebuf);
2001 if (kretprobe_blacklist_size) {
2002 /* lookup the function address from its name */
2003 for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
2004 kprobe_lookup_name(kretprobe_blacklist[i].name,
2005 kretprobe_blacklist[i].addr);
2006 if (!kretprobe_blacklist[i].addr)
2007 printk("kretprobe: lookup failed: %s\n",
2008 kretprobe_blacklist[i].name);
2012 #if defined(CONFIG_OPTPROBES)
2013 #if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
2014 /* Init kprobe_optinsn_slots */
2015 kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
2017 /* By default, kprobes can be optimized */
2018 kprobes_allow_optimization = true;
2021 /* By default, kprobes are armed */
2022 kprobes_all_disarmed = false;
2024 err = arch_init_kprobes();
2026 err = register_die_notifier(&kprobe_exceptions_nb);
2028 err = register_module_notifier(&kprobe_module_nb);
2030 kprobes_initialized = (err == 0);
2037 #ifdef CONFIG_DEBUG_FS
2038 static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
2039 const char *sym, int offset, char *modname, struct kprobe *pp)
2043 if (p->pre_handler == pre_handler_kretprobe)
2045 else if (p->pre_handler == setjmp_pre_handler)
2051 seq_printf(pi, "%p %s %s+0x%x %s ",
2052 p->addr, kprobe_type, sym, offset,
2053 (modname ? modname : " "));
2055 seq_printf(pi, "%p %s %p ",
2056 p->addr, kprobe_type, p->addr);
2060 seq_printf(pi, "%s%s%s\n",
2061 (kprobe_gone(p) ? "[GONE]" : ""),
2062 ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
2063 (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""));
2066 static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
2068 return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
2071 static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
2074 if (*pos >= KPROBE_TABLE_SIZE)
2079 static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
2084 static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
2086 struct hlist_head *head;
2087 struct hlist_node *node;
2088 struct kprobe *p, *kp;
2089 const char *sym = NULL;
2090 unsigned int i = *(loff_t *) v;
2091 unsigned long offset = 0;
2092 char *modname, namebuf[128];
2094 head = &kprobe_table[i];
2096 hlist_for_each_entry_rcu(p, node, head, hlist) {
2097 sym = kallsyms_lookup((unsigned long)p->addr, NULL,
2098 &offset, &modname, namebuf);
2099 if (kprobe_aggrprobe(p)) {
2100 list_for_each_entry_rcu(kp, &p->list, list)
2101 report_probe(pi, kp, sym, offset, modname, p);
2103 report_probe(pi, p, sym, offset, modname, NULL);
2109 static const struct seq_operations kprobes_seq_ops = {
2110 .start = kprobe_seq_start,
2111 .next = kprobe_seq_next,
2112 .stop = kprobe_seq_stop,
2113 .show = show_kprobe_addr
2116 static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
2118 return seq_open(filp, &kprobes_seq_ops);
2121 static const struct file_operations debugfs_kprobes_operations = {
2122 .open = kprobes_open,
2124 .llseek = seq_lseek,
2125 .release = seq_release,
2128 static void __kprobes arm_all_kprobes(void)
2130 struct hlist_head *head;
2131 struct hlist_node *node;
2135 mutex_lock(&kprobe_mutex);
2137 /* If kprobes are armed, just return */
2138 if (!kprobes_all_disarmed)
2139 goto already_enabled;
2141 /* Arming kprobes doesn't optimize kprobe itself */
2142 mutex_lock(&text_mutex);
2143 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2144 head = &kprobe_table[i];
2145 hlist_for_each_entry_rcu(p, node, head, hlist)
2146 if (!kprobe_disabled(p))
2149 mutex_unlock(&text_mutex);
2151 kprobes_all_disarmed = false;
2152 printk(KERN_INFO "Kprobes globally enabled\n");
2155 mutex_unlock(&kprobe_mutex);
2159 static void __kprobes disarm_all_kprobes(void)
2161 struct hlist_head *head;
2162 struct hlist_node *node;
2166 mutex_lock(&kprobe_mutex);
2168 /* If kprobes are already disarmed, just return */
2169 if (kprobes_all_disarmed) {
2170 mutex_unlock(&kprobe_mutex);
2174 kprobes_all_disarmed = true;
2175 printk(KERN_INFO "Kprobes globally disabled\n");
2177 mutex_lock(&text_mutex);
2178 for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
2179 head = &kprobe_table[i];
2180 hlist_for_each_entry_rcu(p, node, head, hlist) {
2181 if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
2182 __disarm_kprobe(p, false);
2185 mutex_unlock(&text_mutex);
2186 mutex_unlock(&kprobe_mutex);
2188 /* Wait for disarming all kprobes by optimizer */
2189 wait_for_kprobe_optimizer();
2193 * XXX: The debugfs bool file interface doesn't allow for callbacks
2194 * when the bool state is switched. We can reuse that facility when
2197 static ssize_t read_enabled_file_bool(struct file *file,
2198 char __user *user_buf, size_t count, loff_t *ppos)
2202 if (!kprobes_all_disarmed)
2208 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
2211 static ssize_t write_enabled_file_bool(struct file *file,
2212 const char __user *user_buf, size_t count, loff_t *ppos)
2217 buf_size = min(count, (sizeof(buf)-1));
2218 if (copy_from_user(buf, user_buf, buf_size))
2230 disarm_all_kprobes();
2237 static const struct file_operations fops_kp = {
2238 .read = read_enabled_file_bool,
2239 .write = write_enabled_file_bool,
2240 .llseek = default_llseek,
2243 static int __kprobes debugfs_kprobe_init(void)
2245 struct dentry *dir, *file;
2246 unsigned int value = 1;
2248 dir = debugfs_create_dir("kprobes", NULL);
2252 file = debugfs_create_file("list", 0444, dir, NULL,
2253 &debugfs_kprobes_operations);
2255 debugfs_remove(dir);
2259 file = debugfs_create_file("enabled", 0600, dir,
2262 debugfs_remove(dir);
2269 late_initcall(debugfs_kprobe_init);
2270 #endif /* CONFIG_DEBUG_FS */
2272 module_init(init_kprobes);
2274 /* defined in arch/.../kernel/kprobes.c */
2275 EXPORT_SYMBOL_GPL(jprobe_return);