2 * Kernel Probes (KProbes)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2002, 2004
20 * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
21 * Probes initial implementation ( includes contributions from
23 * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
24 * interface to access function arguments.
25 * 2004-Oct Jim Keniston <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
26 * <prasanna@in.ibm.com> adapted for x86_64 from i386.
27 * 2005-Mar Roland McGrath <roland@redhat.com>
28 * Fixed to handle %rip-relative addressing mode correctly.
29 * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
30 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
31 * <prasanna@in.ibm.com> added function-return probes.
32 * 2005-May Rusty Lynch <rusty.lynch@intel.com>
33 * Added function return probes functionality
34 * 2006-Feb Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
35 * kprobe-booster and kretprobe-booster for i386.
36 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
37 * and kretprobe-booster for x86-64
38 * 2007-Dec Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
39 * <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
40 * unified x86 kprobes code.
43 #include <linux/kprobes.h>
44 #include <linux/ptrace.h>
45 #include <linux/string.h>
46 #include <linux/slab.h>
47 #include <linux/hardirq.h>
48 #include <linux/preempt.h>
49 #include <linux/module.h>
50 #include <linux/kdebug.h>
51 #include <linux/kallsyms.h>
52 #include <linux/ftrace.h>
54 #include <asm/cacheflush.h>
56 #include <asm/pgtable.h>
57 #include <asm/uaccess.h>
58 #include <asm/alternative.h>
60 #include <asm/debugreg.h>
61 #include <asm/nospec-branch.h>
63 void jprobe_return_end(void);
65 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
66 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
68 #define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
70 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
71 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
72 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
73 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
74 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
77 * Undefined/reserved opcodes, conditional jump, Opcode Extension
78 * Groups, and some special opcodes can not boost.
79 * This is non-const and volatile to keep gcc from statically
80 * optimizing it out, as variable_test_bit makes gcc think only
81 * *(unsigned long*) is used.
83 static volatile u32 twobyte_is_boostable[256 / 32] = {
84 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
85 /* ---------------------------------------------- */
86 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
87 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */
88 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
89 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
90 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
91 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
92 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
93 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
94 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
95 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
96 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
97 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
98 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
99 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
100 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
101 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
102 /* ----------------------------------------------- */
103 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
107 struct kretprobe_blackpoint kretprobe_blacklist[] = {
108 {"__switch_to", }, /* This function switches only current task, but
109 doesn't switch kernel stack.*/
110 {NULL, NULL} /* Terminator */
112 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
114 static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
116 struct __arch_relative_insn {
119 } __attribute__((packed)) *insn;
121 insn = (struct __arch_relative_insn *)from;
122 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
126 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
127 static void __kprobes synthesize_reljump(void *from, void *to)
129 __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE);
133 * Skip the prefixes of the instruction.
135 static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn)
139 attr = inat_get_opcode_attribute((insn_byte_t)*insn);
140 while (inat_is_legacy_prefix(attr)) {
142 attr = inat_get_opcode_attribute((insn_byte_t)*insn);
145 if (inat_is_rex_prefix(attr))
152 * Returns non-zero if opcode is boostable.
153 * RIP relative instructions are adjusted at copying time in 64 bits mode
155 static int __kprobes can_boost(kprobe_opcode_t *opcodes)
157 kprobe_opcode_t opcode;
158 kprobe_opcode_t *orig_opcodes = opcodes;
160 if (search_exception_tables((unsigned long)opcodes))
161 return 0; /* Page fault may occur on this address. */
164 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
166 opcode = *(opcodes++);
168 /* 2nd-byte opcode */
169 if (opcode == 0x0f) {
170 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
172 return test_bit(*opcodes,
173 (unsigned long *)twobyte_is_boostable);
176 switch (opcode & 0xf0) {
179 goto retry; /* REX prefix is boostable */
182 if (0x63 < opcode && opcode < 0x67)
183 goto retry; /* prefixes */
184 /* can't boost Address-size override and bound */
185 return (opcode != 0x62 && opcode != 0x67);
187 return 0; /* can't boost conditional jump */
189 /* can't boost software-interruptions */
190 return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
192 /* can boost AA* and XLAT */
193 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
195 /* can boost in/out and absolute jmps */
196 return ((opcode & 0x04) || opcode == 0xea);
198 if ((opcode & 0x0c) == 0 && opcode != 0xf1)
199 goto retry; /* lock/rep(ne) prefix */
200 /* clear and set flags are boostable */
201 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
203 /* segment override prefixes are boostable */
204 if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
205 goto retry; /* prefixes */
206 /* CS override prefix and call are not boostable */
207 return (opcode != 0x2e && opcode != 0x9a);
211 /* Recover the probed instruction at addr for further analysis. */
212 static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
215 kp = get_kprobe((void *)addr);
220 * Basically, kp->ainsn.insn has an original instruction.
221 * However, RIP-relative instruction can not do single-stepping
222 * at different place, __copy_instruction() tweaks the displacement of
223 * that instruction. In that case, we can't recover the instruction
224 * from the kp->ainsn.insn.
226 * On the other hand, kp->opcode has a copy of the first byte of
227 * the probed instruction, which is overwritten by int3. And
228 * the instruction at kp->addr is not modified by kprobes except
229 * for the first byte, we can recover the original instruction
230 * from it and kp->opcode.
232 memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
237 /* Check if paddr is at an instruction boundary */
238 static int __kprobes can_probe(unsigned long paddr)
241 unsigned long addr, offset = 0;
243 kprobe_opcode_t buf[MAX_INSN_SIZE];
245 if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
248 /* Decode instructions */
249 addr = paddr - offset;
250 while (addr < paddr) {
251 kernel_insn_init(&insn, (void *)addr);
252 insn_get_opcode(&insn);
255 * Check if the instruction has been modified by another
256 * kprobe, in which case we replace the breakpoint by the
257 * original instruction in our buffer.
259 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) {
260 ret = recover_probed_instruction(buf, addr);
263 * Another debugging subsystem might insert
264 * this breakpoint. In that case, we can't
268 kernel_insn_init(&insn, buf);
270 insn_get_length(&insn);
274 return (addr == paddr);
278 * Returns non-zero if opcode modifies the interrupt flag.
280 static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
283 insn = skip_prefixes(insn);
288 case 0xcf: /* iret/iretd */
289 case 0x9d: /* popf/popfd */
297 * Copy an instruction and adjust the displacement if the instruction
298 * uses the %rip-relative addressing mode.
299 * If it does, Return the address of the 32-bit displacement word.
300 * If not, return null.
301 * Only applicable to 64-bit x86.
303 static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
307 kprobe_opcode_t buf[MAX_INSN_SIZE];
309 kernel_insn_init(&insn, src);
311 insn_get_opcode(&insn);
312 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) {
313 ret = recover_probed_instruction(buf,
317 kernel_insn_init(&insn, buf);
320 insn_get_length(&insn);
321 memcpy(dest, insn.kaddr, insn.length);
324 if (insn_rip_relative(&insn)) {
327 kernel_insn_init(&insn, dest);
328 insn_get_displacement(&insn);
330 * The copied instruction uses the %rip-relative addressing
331 * mode. Adjust the displacement for the difference between
332 * the original location of this instruction and the location
333 * of the copy that will actually be run. The tricky bit here
334 * is making sure that the sign extension happens correctly in
335 * this calculation, since we need a signed 32-bit result to
336 * be sign-extended to 64 bits when it's added to the %rip
337 * value and yield the same 64-bit result that the sign-
338 * extension of the original signed 32-bit displacement would
341 newdisp = (u8 *) src + (s64) insn.displacement.value -
343 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
344 disp = (u8 *) dest + insn_offset_displacement(&insn);
345 *(s32 *) disp = (s32) newdisp;
351 static void __kprobes arch_copy_kprobe(struct kprobe *p)
354 * Copy an instruction without recovering int3, because it will be
355 * put by another subsystem.
357 __copy_instruction(p->ainsn.insn, p->addr, 0);
359 if (can_boost(p->addr))
360 p->ainsn.boostable = 0;
362 p->ainsn.boostable = -1;
364 p->opcode = *p->addr;
367 int __kprobes arch_prepare_kprobe(struct kprobe *p)
369 if (alternatives_text_reserved(p->addr, p->addr))
372 if (!can_probe((unsigned long)p->addr))
374 /* insn: must be on special executable page on x86. */
375 p->ainsn.insn = get_insn_slot();
382 void __kprobes arch_arm_kprobe(struct kprobe *p)
384 text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
387 void __kprobes arch_disarm_kprobe(struct kprobe *p)
389 text_poke(p->addr, &p->opcode, 1);
392 void __kprobes arch_remove_kprobe(struct kprobe *p)
395 free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
396 p->ainsn.insn = NULL;
400 static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
402 kcb->prev_kprobe.kp = kprobe_running();
403 kcb->prev_kprobe.status = kcb->kprobe_status;
404 kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
405 kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
408 static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
410 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
411 kcb->kprobe_status = kcb->prev_kprobe.status;
412 kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
413 kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
416 static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
417 struct kprobe_ctlblk *kcb)
419 __this_cpu_write(current_kprobe, p);
420 kcb->kprobe_saved_flags = kcb->kprobe_old_flags
421 = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
422 if (is_IF_modifier(p->ainsn.insn))
423 kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
426 static void __kprobes clear_btf(void)
428 if (test_thread_flag(TIF_BLOCKSTEP)) {
429 unsigned long debugctl = get_debugctlmsr();
431 debugctl &= ~DEBUGCTLMSR_BTF;
432 update_debugctlmsr(debugctl);
436 static void __kprobes restore_btf(void)
438 if (test_thread_flag(TIF_BLOCKSTEP)) {
439 unsigned long debugctl = get_debugctlmsr();
441 debugctl |= DEBUGCTLMSR_BTF;
442 update_debugctlmsr(debugctl);
446 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
447 struct pt_regs *regs)
449 unsigned long *sara = stack_addr(regs);
451 ri->ret_addr = (kprobe_opcode_t *) *sara;
453 /* Replace the return addr with trampoline addr */
454 *sara = (unsigned long) &kretprobe_trampoline;
457 #ifdef CONFIG_OPTPROBES
458 static int __kprobes setup_detour_execution(struct kprobe *p,
459 struct pt_regs *regs,
462 #define setup_detour_execution(p, regs, reenter) (0)
465 static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
466 struct kprobe_ctlblk *kcb, int reenter)
468 if (setup_detour_execution(p, regs, reenter))
471 #if !defined(CONFIG_PREEMPT)
472 if (p->ainsn.boostable == 1 && !p->post_handler) {
473 /* Boost up -- we can execute copied instructions directly */
475 reset_current_kprobe();
477 * Reentering boosted probe doesn't reset current_kprobe,
478 * nor set current_kprobe, because it doesn't use single
481 regs->ip = (unsigned long)p->ainsn.insn;
482 preempt_enable_no_resched();
487 save_previous_kprobe(kcb);
488 set_current_kprobe(p, regs, kcb);
489 kcb->kprobe_status = KPROBE_REENTER;
491 kcb->kprobe_status = KPROBE_HIT_SS;
492 /* Prepare real single stepping */
494 regs->flags |= X86_EFLAGS_TF;
495 regs->flags &= ~X86_EFLAGS_IF;
496 /* single step inline if the instruction is an int3 */
497 if (p->opcode == BREAKPOINT_INSTRUCTION)
498 regs->ip = (unsigned long)p->addr;
500 regs->ip = (unsigned long)p->ainsn.insn;
504 * We have reentered the kprobe_handler(), since another probe was hit while
505 * within the handler. We save the original kprobes variables and just single
506 * step on the instruction of the new probe without calling any user handlers.
508 static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
509 struct kprobe_ctlblk *kcb)
511 switch (kcb->kprobe_status) {
512 case KPROBE_HIT_SSDONE:
513 case KPROBE_HIT_ACTIVE:
514 kprobes_inc_nmissed_count(p);
515 setup_singlestep(p, regs, kcb, 1);
518 /* A probe has been hit in the codepath leading up to, or just
519 * after, single-stepping of a probed instruction. This entire
520 * codepath should strictly reside in .kprobes.text section.
521 * Raise a BUG or we'll continue in an endless reentering loop
522 * and eventually a stack overflow.
524 printk(KERN_WARNING "Unrecoverable kprobe detected at %p.\n",
529 /* impossible cases */
538 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
539 * remain disabled throughout this function.
541 static int __kprobes kprobe_handler(struct pt_regs *regs)
543 kprobe_opcode_t *addr;
545 struct kprobe_ctlblk *kcb;
547 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
549 * We don't want to be preempted for the entire
550 * duration of kprobe processing. We conditionally
551 * re-enable preemption at the end of this function,
552 * and also in reenter_kprobe() and setup_singlestep().
556 kcb = get_kprobe_ctlblk();
557 p = get_kprobe(addr);
560 if (kprobe_running()) {
561 if (reenter_kprobe(p, regs, kcb))
564 set_current_kprobe(p, regs, kcb);
565 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
568 * If we have no pre-handler or it returned 0, we
569 * continue with normal processing. If we have a
570 * pre-handler and it returned non-zero, it prepped
571 * for calling the break_handler below on re-entry
572 * for jprobe processing, so get out doing nothing
575 if (!p->pre_handler || !p->pre_handler(p, regs))
576 setup_singlestep(p, regs, kcb, 0);
579 } else if (*addr != BREAKPOINT_INSTRUCTION) {
581 * The breakpoint instruction was removed right
582 * after we hit it. Another cpu has removed
583 * either a probepoint or a debugger breakpoint
584 * at this address. In either case, no further
585 * handling of this interrupt is appropriate.
586 * Back up over the (now missing) int3 and run
587 * the original instruction.
589 regs->ip = (unsigned long)addr;
590 preempt_enable_no_resched();
592 } else if (kprobe_running()) {
593 p = __this_cpu_read(current_kprobe);
594 if (p->break_handler && p->break_handler(p, regs)) {
595 setup_singlestep(p, regs, kcb, 0);
598 } /* else: not a kprobe fault; let the kernel handle it */
600 preempt_enable_no_resched();
605 #define SAVE_REGS_STRING \
606 /* Skip cs, ip, orig_ax. */ \
607 " subq $24, %rsp\n" \
623 #define RESTORE_REGS_STRING \
639 /* Skip orig_ax, ip, cs */ \
642 #define SAVE_REGS_STRING \
643 /* Skip cs, ip, orig_ax and gs. */ \
644 " subl $16, %esp\n" \
655 #define RESTORE_REGS_STRING \
663 /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\
668 * When a retprobed function returns, this code saves registers and
669 * calls trampoline_handler() runs, which calls the kretprobe's handler.
671 static void __used __kprobes kretprobe_trampoline_holder(void)
674 ".global kretprobe_trampoline\n"
675 "kretprobe_trampoline: \n"
677 /* We don't bother saving the ss register */
682 " call trampoline_handler\n"
683 /* Replace saved sp with true return address. */
684 " movq %rax, 152(%rsp)\n"
691 " call trampoline_handler\n"
692 /* Move flags to cs */
693 " movl 56(%esp), %edx\n"
694 " movl %edx, 52(%esp)\n"
695 /* Replace saved flags with true return address. */
696 " movl %eax, 56(%esp)\n"
704 * Called from kretprobe_trampoline
706 static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
708 struct kretprobe_instance *ri = NULL;
709 struct hlist_head *head, empty_rp;
710 struct hlist_node *node, *tmp;
711 unsigned long flags, orig_ret_address = 0;
712 unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
713 kprobe_opcode_t *correct_ret_addr = NULL;
715 INIT_HLIST_HEAD(&empty_rp);
716 kretprobe_hash_lock(current, &head, &flags);
717 /* fixup registers */
719 regs->cs = __KERNEL_CS;
721 regs->cs = __KERNEL_CS | get_kernel_rpl();
724 regs->ip = trampoline_address;
725 regs->orig_ax = ~0UL;
728 * It is possible to have multiple instances associated with a given
729 * task either because multiple functions in the call path have
730 * return probes installed on them, and/or more than one
731 * return probe was registered for a target function.
733 * We can handle this because:
734 * - instances are always pushed into the head of the list
735 * - when multiple return probes are registered for the same
736 * function, the (chronologically) first instance's ret_addr
737 * will be the real return address, and all the rest will
738 * point to kretprobe_trampoline.
740 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
741 if (ri->task != current)
742 /* another task is sharing our hash bucket */
745 orig_ret_address = (unsigned long)ri->ret_addr;
747 if (orig_ret_address != trampoline_address)
749 * This is the real return address. Any other
750 * instances associated with this task are for
751 * other calls deeper on the call stack
756 kretprobe_assert(ri, orig_ret_address, trampoline_address);
758 correct_ret_addr = ri->ret_addr;
759 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
760 if (ri->task != current)
761 /* another task is sharing our hash bucket */
764 orig_ret_address = (unsigned long)ri->ret_addr;
765 if (ri->rp && ri->rp->handler) {
766 __this_cpu_write(current_kprobe, &ri->rp->kp);
767 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
768 ri->ret_addr = correct_ret_addr;
769 ri->rp->handler(ri, regs);
770 __this_cpu_write(current_kprobe, NULL);
773 recycle_rp_inst(ri, &empty_rp);
775 if (orig_ret_address != trampoline_address)
777 * This is the real return address. Any other
778 * instances associated with this task are for
779 * other calls deeper on the call stack
784 kretprobe_hash_unlock(current, &flags);
786 hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
787 hlist_del(&ri->hlist);
790 return (void *)orig_ret_address;
794 * Called after single-stepping. p->addr is the address of the
795 * instruction whose first byte has been replaced by the "int 3"
796 * instruction. To avoid the SMP problems that can occur when we
797 * temporarily put back the original opcode to single-step, we
798 * single-stepped a copy of the instruction. The address of this
799 * copy is p->ainsn.insn.
801 * This function prepares to return from the post-single-step
802 * interrupt. We have to fix up the stack as follows:
804 * 0) Except in the case of absolute or indirect jump or call instructions,
805 * the new ip is relative to the copied instruction. We need to make
806 * it relative to the original instruction.
808 * 1) If the single-stepped instruction was pushfl, then the TF and IF
809 * flags are set in the just-pushed flags, and may need to be cleared.
811 * 2) If the single-stepped instruction was a call, the return address
812 * that is atop the stack is the address following the copied instruction.
813 * We need to make it the address following the original instruction.
815 * If this is the first time we've single-stepped the instruction at
816 * this probepoint, and the instruction is boostable, boost it: add a
817 * jump instruction after the copied instruction, that jumps to the next
818 * instruction after the probepoint.
820 static void __kprobes resume_execution(struct kprobe *p,
821 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
823 unsigned long *tos = stack_addr(regs);
824 unsigned long copy_ip = (unsigned long)p->ainsn.insn;
825 unsigned long orig_ip = (unsigned long)p->addr;
826 kprobe_opcode_t *insn = p->ainsn.insn;
829 insn = skip_prefixes(insn);
831 regs->flags &= ~X86_EFLAGS_TF;
833 case 0x9c: /* pushfl */
834 *tos &= ~(X86_EFLAGS_TF | X86_EFLAGS_IF);
835 *tos |= kcb->kprobe_old_flags;
837 case 0xc2: /* iret/ret/lret */
842 case 0xea: /* jmp absolute -- ip is correct */
843 /* ip is already adjusted, no more changes required */
844 p->ainsn.boostable = 1;
846 case 0xe8: /* call relative - Fix return addr */
847 *tos = orig_ip + (*tos - copy_ip);
850 case 0x9a: /* call absolute -- same as call absolute, indirect */
851 *tos = orig_ip + (*tos - copy_ip);
855 if ((insn[1] & 0x30) == 0x10) {
857 * call absolute, indirect
858 * Fix return addr; ip is correct.
859 * But this is not boostable
861 *tos = orig_ip + (*tos - copy_ip);
863 } else if (((insn[1] & 0x31) == 0x20) ||
864 ((insn[1] & 0x31) == 0x21)) {
866 * jmp near and far, absolute indirect
867 * ip is correct. And this is boostable
869 p->ainsn.boostable = 1;
876 if (p->ainsn.boostable == 0) {
877 if ((regs->ip > copy_ip) &&
878 (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) {
880 * These instructions can be executed directly if it
881 * jumps back to correct address.
883 synthesize_reljump((void *)regs->ip,
884 (void *)orig_ip + (regs->ip - copy_ip));
885 p->ainsn.boostable = 1;
887 p->ainsn.boostable = -1;
891 regs->ip += orig_ip - copy_ip;
898 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
899 * remain disabled throughout this function.
901 static int __kprobes post_kprobe_handler(struct pt_regs *regs)
903 struct kprobe *cur = kprobe_running();
904 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
909 resume_execution(cur, regs, kcb);
910 regs->flags |= kcb->kprobe_saved_flags;
912 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
913 kcb->kprobe_status = KPROBE_HIT_SSDONE;
914 cur->post_handler(cur, regs, 0);
917 /* Restore back the original saved kprobes variables and continue. */
918 if (kcb->kprobe_status == KPROBE_REENTER) {
919 restore_previous_kprobe(kcb);
922 reset_current_kprobe();
924 preempt_enable_no_resched();
927 * if somebody else is singlestepping across a probe point, flags
928 * will have TF set, in which case, continue the remaining processing
929 * of do_debug, as if this is not a probe hit.
931 if (regs->flags & X86_EFLAGS_TF)
937 int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
939 struct kprobe *cur = kprobe_running();
940 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
942 switch (kcb->kprobe_status) {
946 * We are here because the instruction being single
947 * stepped caused a page fault. We reset the current
948 * kprobe and the ip points back to the probe address
949 * and allow the page fault handler to continue as a
952 regs->ip = (unsigned long)cur->addr;
954 * Trap flag (TF) has been set here because this fault
955 * happened where the single stepping will be done.
956 * So clear it by resetting the current kprobe:
958 regs->flags &= ~X86_EFLAGS_TF;
961 * If the TF flag was set before the kprobe hit,
964 regs->flags |= kcb->kprobe_old_flags;
966 if (kcb->kprobe_status == KPROBE_REENTER)
967 restore_previous_kprobe(kcb);
969 reset_current_kprobe();
970 preempt_enable_no_resched();
972 case KPROBE_HIT_ACTIVE:
973 case KPROBE_HIT_SSDONE:
975 * We increment the nmissed count for accounting,
976 * we can also use npre/npostfault count for accounting
977 * these specific fault cases.
979 kprobes_inc_nmissed_count(cur);
982 * We come here because instructions in the pre/post
983 * handler caused the page_fault, this could happen
984 * if handler tries to access user space by
985 * copy_from_user(), get_user() etc. Let the
986 * user-specified handler try to fix it first.
988 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
992 * In case the user-specified fault handler returned
993 * zero, try to fix up.
995 if (fixup_exception(regs))
999 * fixup routine could not handle it,
1000 * Let do_page_fault() fix it.
1010 * Wrapper routine for handling exceptions.
1012 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
1013 unsigned long val, void *data)
1015 struct die_args *args = data;
1016 int ret = NOTIFY_DONE;
1018 if (args->regs && user_mode_vm(args->regs))
1023 if (kprobe_handler(args->regs))
1027 if (post_kprobe_handler(args->regs)) {
1029 * Reset the BS bit in dr6 (pointed by args->err) to
1030 * denote completion of processing
1032 (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP;
1038 * To be potentially processing a kprobe fault and to
1039 * trust the result from kprobe_running(), we have
1040 * be non-preemptible.
1042 if (!preemptible() && kprobe_running() &&
1043 kprobe_fault_handler(args->regs, args->trapnr))
1052 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
1054 struct jprobe *jp = container_of(p, struct jprobe, kp);
1056 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1058 kcb->jprobe_saved_regs = *regs;
1059 kcb->jprobe_saved_sp = stack_addr(regs);
1060 addr = (unsigned long)(kcb->jprobe_saved_sp);
1063 * As Linus pointed out, gcc assumes that the callee
1064 * owns the argument space and could overwrite it, e.g.
1065 * tailcall optimization. So, to be absolutely safe
1066 * we also save and restore enough stack bytes to cover
1067 * the argument area.
1069 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
1070 MIN_STACK_SIZE(addr));
1071 regs->flags &= ~X86_EFLAGS_IF;
1072 trace_hardirqs_off();
1073 regs->ip = (unsigned long)(jp->entry);
1076 * jprobes use jprobe_return() which skips the normal return
1077 * path of the function, and this messes up the accounting of the
1078 * function graph tracer to get messed up.
1080 * Pause function graph tracing while performing the jprobe function.
1082 pause_graph_tracing();
1086 void __kprobes jprobe_return(void)
1088 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1091 #ifdef CONFIG_X86_64
1092 " xchg %%rbx,%%rsp \n"
1094 " xchgl %%ebx,%%esp \n"
1097 " .globl jprobe_return_end\n"
1098 " jprobe_return_end: \n"
1100 (kcb->jprobe_saved_sp):"memory");
1103 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
1105 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1106 u8 *addr = (u8 *) (regs->ip - 1);
1107 struct jprobe *jp = container_of(p, struct jprobe, kp);
1108 void *saved_sp = kcb->jprobe_saved_sp;
1110 if ((addr > (u8 *) jprobe_return) &&
1111 (addr < (u8 *) jprobe_return_end)) {
1112 if (stack_addr(regs) != saved_sp) {
1113 struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
1115 "current sp %p does not match saved sp %p\n",
1116 stack_addr(regs), saved_sp);
1117 printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
1118 show_registers(saved_regs);
1119 printk(KERN_ERR "Current registers\n");
1120 show_registers(regs);
1123 /* It's OK to start function graph tracing again */
1124 unpause_graph_tracing();
1125 *regs = kcb->jprobe_saved_regs;
1126 memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
1127 preempt_enable_no_resched();
1134 #ifdef CONFIG_OPTPROBES
1136 /* Insert a call instruction at address 'from', which calls address 'to'.*/
1137 static void __kprobes synthesize_relcall(void *from, void *to)
1139 __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
1142 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
1143 static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr,
1146 #ifdef CONFIG_X86_64
1152 *(unsigned long *)addr = val;
1155 static void __used __kprobes kprobes_optinsn_template_holder(void)
1158 ".global optprobe_template_entry\n"
1159 "optprobe_template_entry: \n"
1160 #ifdef CONFIG_X86_64
1161 /* We don't bother saving the ss register */
1165 " movq %rsp, %rsi\n"
1166 ".global optprobe_template_val\n"
1167 "optprobe_template_val: \n"
1170 ".global optprobe_template_call\n"
1171 "optprobe_template_call: \n"
1173 /* Move flags to rsp */
1174 " movq 144(%rsp), %rdx\n"
1175 " movq %rdx, 152(%rsp)\n"
1177 /* Skip flags entry */
1180 #else /* CONFIG_X86_32 */
1183 " movl %esp, %edx\n"
1184 ".global optprobe_template_val\n"
1185 "optprobe_template_val: \n"
1187 ".global optprobe_template_call\n"
1188 "optprobe_template_call: \n"
1191 " addl $4, %esp\n" /* skip cs */
1194 ".global optprobe_template_end\n"
1195 "optprobe_template_end: \n");
1198 #define TMPL_MOVE_IDX \
1199 ((long)&optprobe_template_val - (long)&optprobe_template_entry)
1200 #define TMPL_CALL_IDX \
1201 ((long)&optprobe_template_call - (long)&optprobe_template_entry)
1202 #define TMPL_END_IDX \
1203 ((long)&optprobe_template_end - (long)&optprobe_template_entry)
1205 #define INT3_SIZE sizeof(kprobe_opcode_t)
1207 /* Optimized kprobe call back function: called from optinsn */
1208 static void __kprobes optimized_callback(struct optimized_kprobe *op,
1209 struct pt_regs *regs)
1211 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
1212 unsigned long flags;
1214 /* This is possible if op is under delayed unoptimizing */
1215 if (kprobe_disabled(&op->kp))
1218 local_irq_save(flags);
1219 if (kprobe_running()) {
1220 kprobes_inc_nmissed_count(&op->kp);
1222 /* Save skipped registers */
1223 #ifdef CONFIG_X86_64
1224 regs->cs = __KERNEL_CS;
1226 regs->cs = __KERNEL_CS | get_kernel_rpl();
1229 regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
1230 regs->orig_ax = ~0UL;
1232 __this_cpu_write(current_kprobe, &op->kp);
1233 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1234 opt_pre_handler(&op->kp, regs);
1235 __this_cpu_write(current_kprobe, NULL);
1237 local_irq_restore(flags);
1240 static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
1244 while (len < RELATIVEJUMP_SIZE) {
1245 ret = __copy_instruction(dest + len, src + len, 1);
1246 if (!ret || !can_boost(dest + len))
1250 /* Check whether the address range is reserved */
1251 if (ftrace_text_reserved(src, src + len - 1) ||
1252 alternatives_text_reserved(src, src + len - 1) ||
1253 jump_label_text_reserved(src, src + len - 1))
1259 /* Check whether insn is indirect jump */
1260 static int __kprobes __insn_is_indirect_jump(struct insn *insn)
1262 return ((insn->opcode.bytes[0] == 0xff &&
1263 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
1264 insn->opcode.bytes[0] == 0xea); /* Segment based jump */
1267 /* Check whether insn jumps into specified address range */
1268 static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
1270 unsigned long target = 0;
1272 switch (insn->opcode.bytes[0]) {
1273 case 0xe0: /* loopne */
1274 case 0xe1: /* loope */
1275 case 0xe2: /* loop */
1276 case 0xe3: /* jcxz */
1277 case 0xe9: /* near relative jump */
1278 case 0xeb: /* short relative jump */
1281 if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
1285 if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
1289 target = (unsigned long)insn->next_byte + insn->immediate.value;
1291 return (start <= target && target <= start + len);
1294 static int __kprobes insn_is_indirect_jump(struct insn *insn)
1296 int ret = __insn_is_indirect_jump(insn);
1298 #ifdef CONFIG_RETPOLINE
1300 * Jump to x86_indirect_thunk_* is treated as an indirect jump.
1301 * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
1302 * older gcc may use indirect jump. So we add this check instead of
1303 * replace indirect-jump check.
1306 ret = insn_jump_into_range(insn,
1307 (unsigned long)__indirect_thunk_start,
1308 (unsigned long)__indirect_thunk_end -
1309 (unsigned long)__indirect_thunk_start);
1314 /* Decode whole function to ensure any instructions don't jump into target */
1315 static int __kprobes can_optimize(unsigned long paddr)
1318 unsigned long addr, size = 0, offset = 0;
1320 kprobe_opcode_t buf[MAX_INSN_SIZE];
1322 /* Lookup symbol including addr */
1323 if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
1327 * Do not optimize in the entry code due to the unstable
1330 if ((paddr >= (unsigned long )__entry_text_start) &&
1331 (paddr < (unsigned long )__entry_text_end))
1334 /* Check there is enough space for a relative jump. */
1335 if (size - offset < RELATIVEJUMP_SIZE)
1338 /* Decode instructions */
1339 addr = paddr - offset;
1340 while (addr < paddr - offset + size) { /* Decode until function end */
1341 if (search_exception_tables(addr))
1343 * Since some fixup code will jumps into this function,
1344 * we can't optimize kprobe in this function.
1347 kernel_insn_init(&insn, (void *)addr);
1348 insn_get_opcode(&insn);
1349 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) {
1350 ret = recover_probed_instruction(buf, addr);
1353 kernel_insn_init(&insn, buf);
1355 insn_get_length(&insn);
1356 /* Recover address */
1357 insn.kaddr = (void *)addr;
1358 insn.next_byte = (void *)(addr + insn.length);
1359 /* Check any instructions don't jump into target */
1360 if (insn_is_indirect_jump(&insn) ||
1361 insn_jump_into_range(&insn, paddr + INT3_SIZE,
1362 RELATIVE_ADDR_SIZE))
1364 addr += insn.length;
1370 /* Check optimized_kprobe can actually be optimized. */
1371 int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op)
1376 for (i = 1; i < op->optinsn.size; i++) {
1377 p = get_kprobe(op->kp.addr + i);
1378 if (p && !kprobe_disabled(p))
1385 /* Check the addr is within the optimized instructions. */
1386 int __kprobes arch_within_optimized_kprobe(struct optimized_kprobe *op,
1389 return ((unsigned long)op->kp.addr <= addr &&
1390 (unsigned long)op->kp.addr + op->optinsn.size > addr);
1393 /* Free optimized instruction slot */
1395 void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
1397 if (op->optinsn.insn) {
1398 free_optinsn_slot(op->optinsn.insn, dirty);
1399 op->optinsn.insn = NULL;
1400 op->optinsn.size = 0;
1404 void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op)
1406 __arch_remove_optimized_kprobe(op, 1);
1410 * Copy replacing target instructions
1411 * Target instructions MUST be relocatable (checked inside)
1413 int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
1419 if (!can_optimize((unsigned long)op->kp.addr))
1422 op->optinsn.insn = get_optinsn_slot();
1423 if (!op->optinsn.insn)
1427 * Verify if the address gap is in 2GB range, because this uses
1430 rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
1431 if (abs(rel) > 0x7fffffff)
1434 buf = (u8 *)op->optinsn.insn;
1436 /* Copy instructions into the out-of-line buffer */
1437 ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
1439 __arch_remove_optimized_kprobe(op, 0);
1442 op->optinsn.size = ret;
1444 /* Copy arch-dep-instance from template */
1445 memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
1447 /* Set probe information */
1448 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
1450 /* Set probe function call */
1451 synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
1453 /* Set returning jmp instruction at the tail of out-of-line buffer */
1454 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
1455 (u8 *)op->kp.addr + op->optinsn.size);
1457 flush_icache_range((unsigned long) buf,
1458 (unsigned long) buf + TMPL_END_IDX +
1459 op->optinsn.size + RELATIVEJUMP_SIZE);
1463 #define MAX_OPTIMIZE_PROBES 256
1464 static struct text_poke_param *jump_poke_params;
1465 static struct jump_poke_buffer {
1466 u8 buf[RELATIVEJUMP_SIZE];
1469 static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
1471 struct optimized_kprobe *op)
1473 s32 rel = (s32)((long)op->optinsn.insn -
1474 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
1476 /* Backup instructions which will be replaced by jump address */
1477 memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
1478 RELATIVE_ADDR_SIZE);
1480 insn_buf[0] = RELATIVEJUMP_OPCODE;
1481 *(s32 *)(&insn_buf[1]) = rel;
1483 tprm->addr = op->kp.addr;
1484 tprm->opcode = insn_buf;
1485 tprm->len = RELATIVEJUMP_SIZE;
1489 * Replace breakpoints (int3) with relative jumps.
1490 * Caller must call with locking kprobe_mutex and text_mutex.
1492 void __kprobes arch_optimize_kprobes(struct list_head *oplist)
1494 struct optimized_kprobe *op, *tmp;
1497 list_for_each_entry_safe(op, tmp, oplist, list) {
1498 WARN_ON(kprobe_disabled(&op->kp));
1500 setup_optimize_kprobe(&jump_poke_params[c],
1501 jump_poke_bufs[c].buf, op);
1502 list_del_init(&op->list);
1503 if (++c >= MAX_OPTIMIZE_PROBES)
1508 * text_poke_smp doesn't support NMI/MCE code modifying.
1509 * However, since kprobes itself also doesn't support NMI/MCE
1510 * code probing, it's not a problem.
1512 text_poke_smp_batch(jump_poke_params, c);
1515 static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm,
1517 struct optimized_kprobe *op)
1519 /* Set int3 to first byte for kprobes */
1520 insn_buf[0] = BREAKPOINT_INSTRUCTION;
1521 memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
1523 tprm->addr = op->kp.addr;
1524 tprm->opcode = insn_buf;
1525 tprm->len = RELATIVEJUMP_SIZE;
1529 * Recover original instructions and breakpoints from relative jumps.
1530 * Caller must call with locking kprobe_mutex.
1532 extern void arch_unoptimize_kprobes(struct list_head *oplist,
1533 struct list_head *done_list)
1535 struct optimized_kprobe *op, *tmp;
1538 list_for_each_entry_safe(op, tmp, oplist, list) {
1540 setup_unoptimize_kprobe(&jump_poke_params[c],
1541 jump_poke_bufs[c].buf, op);
1542 list_move(&op->list, done_list);
1543 if (++c >= MAX_OPTIMIZE_PROBES)
1548 * text_poke_smp doesn't support NMI/MCE code modifying.
1549 * However, since kprobes itself also doesn't support NMI/MCE
1550 * code probing, it's not a problem.
1552 text_poke_smp_batch(jump_poke_params, c);
1555 /* Replace a relative jump with a breakpoint (int3). */
1556 void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op)
1558 u8 buf[RELATIVEJUMP_SIZE];
1560 /* Set int3 to first byte for kprobes */
1561 buf[0] = BREAKPOINT_INSTRUCTION;
1562 memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
1563 text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE);
1566 static int __kprobes setup_detour_execution(struct kprobe *p,
1567 struct pt_regs *regs,
1570 struct optimized_kprobe *op;
1572 if (p->flags & KPROBE_FLAG_OPTIMIZED) {
1573 /* This kprobe is really able to run optimized path. */
1574 op = container_of(p, struct optimized_kprobe, kp);
1575 /* Detour through copied instructions */
1576 regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
1578 reset_current_kprobe();
1579 preempt_enable_no_resched();
1585 static int __kprobes init_poke_params(void)
1587 /* Allocate code buffer and parameter array */
1588 jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) *
1589 MAX_OPTIMIZE_PROBES, GFP_KERNEL);
1590 if (!jump_poke_bufs)
1593 jump_poke_params = kmalloc(sizeof(struct text_poke_param) *
1594 MAX_OPTIMIZE_PROBES, GFP_KERNEL);
1595 if (!jump_poke_params) {
1596 kfree(jump_poke_bufs);
1597 jump_poke_bufs = NULL;
1603 #else /* !CONFIG_OPTPROBES */
1604 static int __kprobes init_poke_params(void)
1610 int __init arch_init_kprobes(void)
1612 return init_poke_params();
1615 int __kprobes arch_trampoline_kprobe(struct kprobe *p)