Merge branch 'topic/isa' into topic/misc
[pandora-kernel.git] / arch / mips / kernel / kprobes.c
1 /*
2  *  Kernel Probes (KProbes)
3  *  arch/mips/kernel/kprobes.c
4  *
5  *  Copyright 2006 Sony Corp.
6  *  Copyright 2010 Cavium Networks
7  *
8  *  Some portions copied from the powerpc version.
9  *
10  *   Copyright (C) IBM Corporation, 2002, 2004
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; version 2 of the License.
15  *
16  *  This program is distributed in the hope that it will be useful,
17  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19  *  GNU General Public License for more details.
20  *
21  *  You should have received a copy of the GNU General Public License
22  *  along with this program; if not, write to the Free Software
23  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
24  */
25
26 #include <linux/kprobes.h>
27 #include <linux/preempt.h>
28 #include <linux/kdebug.h>
29 #include <linux/slab.h>
30
31 #include <asm/ptrace.h>
32 #include <asm/break.h>
33 #include <asm/inst.h>
34
35 static const union mips_instruction breakpoint_insn = {
36         .b_format = {
37                 .opcode = spec_op,
38                 .code = BRK_KPROBE_BP,
39                 .func = break_op
40         }
41 };
42
43 static const union mips_instruction breakpoint2_insn = {
44         .b_format = {
45                 .opcode = spec_op,
46                 .code = BRK_KPROBE_SSTEPBP,
47                 .func = break_op
48         }
49 };
50
51 DEFINE_PER_CPU(struct kprobe *, current_kprobe);
52 DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
53
54 static int __kprobes insn_has_delayslot(union mips_instruction insn)
55 {
56         switch (insn.i_format.opcode) {
57
58                 /*
59                  * This group contains:
60                  * jr and jalr are in r_format format.
61                  */
62         case spec_op:
63                 switch (insn.r_format.func) {
64                 case jr_op:
65                 case jalr_op:
66                         break;
67                 default:
68                         goto insn_ok;
69                 }
70
71                 /*
72                  * This group contains:
73                  * bltz_op, bgez_op, bltzl_op, bgezl_op,
74                  * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
75                  */
76         case bcond_op:
77
78                 /*
79                  * These are unconditional and in j_format.
80                  */
81         case jal_op:
82         case j_op:
83
84                 /*
85                  * These are conditional and in i_format.
86                  */
87         case beq_op:
88         case beql_op:
89         case bne_op:
90         case bnel_op:
91         case blez_op:
92         case blezl_op:
93         case bgtz_op:
94         case bgtzl_op:
95
96                 /*
97                  * These are the FPA/cp1 branch instructions.
98                  */
99         case cop1_op:
100
101 #ifdef CONFIG_CPU_CAVIUM_OCTEON
102         case lwc2_op: /* This is bbit0 on Octeon */
103         case ldc2_op: /* This is bbit032 on Octeon */
104         case swc2_op: /* This is bbit1 on Octeon */
105         case sdc2_op: /* This is bbit132 on Octeon */
106 #endif
107                 return 1;
108         default:
109                 break;
110         }
111 insn_ok:
112         return 0;
113 }
114
115 int __kprobes arch_prepare_kprobe(struct kprobe *p)
116 {
117         union mips_instruction insn;
118         union mips_instruction prev_insn;
119         int ret = 0;
120
121         prev_insn = p->addr[-1];
122         insn = p->addr[0];
123
124         if (insn_has_delayslot(insn) || insn_has_delayslot(prev_insn)) {
125                 pr_notice("Kprobes for branch and jump instructions are not supported\n");
126                 ret = -EINVAL;
127                 goto out;
128         }
129
130         /* insn: must be on special executable page on mips. */
131         p->ainsn.insn = get_insn_slot();
132         if (!p->ainsn.insn) {
133                 ret = -ENOMEM;
134                 goto out;
135         }
136
137         /*
138          * In the kprobe->ainsn.insn[] array we store the original
139          * instruction at index zero and a break trap instruction at
140          * index one.
141          */
142
143         memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
144         p->ainsn.insn[1] = breakpoint2_insn;
145         p->opcode = *p->addr;
146
147 out:
148         return ret;
149 }
150
151 void __kprobes arch_arm_kprobe(struct kprobe *p)
152 {
153         *p->addr = breakpoint_insn;
154         flush_insn_slot(p);
155 }
156
157 void __kprobes arch_disarm_kprobe(struct kprobe *p)
158 {
159         *p->addr = p->opcode;
160         flush_insn_slot(p);
161 }
162
163 void __kprobes arch_remove_kprobe(struct kprobe *p)
164 {
165         free_insn_slot(p->ainsn.insn, 0);
166 }
167
168 static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
169 {
170         kcb->prev_kprobe.kp = kprobe_running();
171         kcb->prev_kprobe.status = kcb->kprobe_status;
172         kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR;
173         kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR;
174         kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc;
175 }
176
177 static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
178 {
179         __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
180         kcb->kprobe_status = kcb->prev_kprobe.status;
181         kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR;
182         kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR;
183         kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc;
184 }
185
186 static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
187                                struct kprobe_ctlblk *kcb)
188 {
189         __get_cpu_var(current_kprobe) = p;
190         kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE);
191         kcb->kprobe_saved_epc = regs->cp0_epc;
192 }
193
194 static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
195 {
196         regs->cp0_status &= ~ST0_IE;
197
198         /* single step inline if the instruction is a break */
199         if (p->opcode.word == breakpoint_insn.word ||
200             p->opcode.word == breakpoint2_insn.word)
201                 regs->cp0_epc = (unsigned long)p->addr;
202         else
203                 regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
204 }
205
206 static int __kprobes kprobe_handler(struct pt_regs *regs)
207 {
208         struct kprobe *p;
209         int ret = 0;
210         kprobe_opcode_t *addr;
211         struct kprobe_ctlblk *kcb;
212
213         addr = (kprobe_opcode_t *) regs->cp0_epc;
214
215         /*
216          * We don't want to be preempted for the entire
217          * duration of kprobe processing
218          */
219         preempt_disable();
220         kcb = get_kprobe_ctlblk();
221
222         /* Check we're not actually recursing */
223         if (kprobe_running()) {
224                 p = get_kprobe(addr);
225                 if (p) {
226                         if (kcb->kprobe_status == KPROBE_HIT_SS &&
227                             p->ainsn.insn->word == breakpoint_insn.word) {
228                                 regs->cp0_status &= ~ST0_IE;
229                                 regs->cp0_status |= kcb->kprobe_saved_SR;
230                                 goto no_kprobe;
231                         }
232                         /*
233                          * We have reentered the kprobe_handler(), since
234                          * another probe was hit while within the handler.
235                          * We here save the original kprobes variables and
236                          * just single step on the instruction of the new probe
237                          * without calling any user handlers.
238                          */
239                         save_previous_kprobe(kcb);
240                         set_current_kprobe(p, regs, kcb);
241                         kprobes_inc_nmissed_count(p);
242                         prepare_singlestep(p, regs);
243                         kcb->kprobe_status = KPROBE_REENTER;
244                         return 1;
245                 } else {
246                         if (addr->word != breakpoint_insn.word) {
247                                 /*
248                                  * The breakpoint instruction was removed by
249                                  * another cpu right after we hit, no further
250                                  * handling of this interrupt is appropriate
251                                  */
252                                 ret = 1;
253                                 goto no_kprobe;
254                         }
255                         p = __get_cpu_var(current_kprobe);
256                         if (p->break_handler && p->break_handler(p, regs))
257                                 goto ss_probe;
258                 }
259                 goto no_kprobe;
260         }
261
262         p = get_kprobe(addr);
263         if (!p) {
264                 if (addr->word != breakpoint_insn.word) {
265                         /*
266                          * The breakpoint instruction was removed right
267                          * after we hit it.  Another cpu has removed
268                          * either a probepoint or a debugger breakpoint
269                          * at this address.  In either case, no further
270                          * handling of this interrupt is appropriate.
271                          */
272                         ret = 1;
273                 }
274                 /* Not one of ours: let kernel handle it */
275                 goto no_kprobe;
276         }
277
278         set_current_kprobe(p, regs, kcb);
279         kcb->kprobe_status = KPROBE_HIT_ACTIVE;
280
281         if (p->pre_handler && p->pre_handler(p, regs)) {
282                 /* handler has already set things up, so skip ss setup */
283                 return 1;
284         }
285
286 ss_probe:
287         prepare_singlestep(p, regs);
288         kcb->kprobe_status = KPROBE_HIT_SS;
289         return 1;
290
291 no_kprobe:
292         preempt_enable_no_resched();
293         return ret;
294
295 }
296
297 /*
298  * Called after single-stepping.  p->addr is the address of the
299  * instruction whose first byte has been replaced by the "break 0"
300  * instruction.  To avoid the SMP problems that can occur when we
301  * temporarily put back the original opcode to single-step, we
302  * single-stepped a copy of the instruction.  The address of this
303  * copy is p->ainsn.insn.
304  *
305  * This function prepares to return from the post-single-step
306  * breakpoint trap.
307  */
308 static void __kprobes resume_execution(struct kprobe *p,
309                                        struct pt_regs *regs,
310                                        struct kprobe_ctlblk *kcb)
311 {
312         unsigned long orig_epc = kcb->kprobe_saved_epc;
313         regs->cp0_epc = orig_epc + 4;
314 }
315
316 static inline int post_kprobe_handler(struct pt_regs *regs)
317 {
318         struct kprobe *cur = kprobe_running();
319         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
320
321         if (!cur)
322                 return 0;
323
324         if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
325                 kcb->kprobe_status = KPROBE_HIT_SSDONE;
326                 cur->post_handler(cur, regs, 0);
327         }
328
329         resume_execution(cur, regs, kcb);
330
331         regs->cp0_status |= kcb->kprobe_saved_SR;
332
333         /* Restore back the original saved kprobes variables and continue. */
334         if (kcb->kprobe_status == KPROBE_REENTER) {
335                 restore_previous_kprobe(kcb);
336                 goto out;
337         }
338         reset_current_kprobe();
339 out:
340         preempt_enable_no_resched();
341
342         return 1;
343 }
344
345 static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
346 {
347         struct kprobe *cur = kprobe_running();
348         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
349
350         if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
351                 return 1;
352
353         if (kcb->kprobe_status & KPROBE_HIT_SS) {
354                 resume_execution(cur, regs, kcb);
355                 regs->cp0_status |= kcb->kprobe_old_SR;
356
357                 reset_current_kprobe();
358                 preempt_enable_no_resched();
359         }
360         return 0;
361 }
362
363 /*
364  * Wrapper routine for handling exceptions.
365  */
366 int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
367                                        unsigned long val, void *data)
368 {
369
370         struct die_args *args = (struct die_args *)data;
371         int ret = NOTIFY_DONE;
372
373         switch (val) {
374         case DIE_BREAK:
375                 if (kprobe_handler(args->regs))
376                         ret = NOTIFY_STOP;
377                 break;
378         case DIE_SSTEPBP:
379                 if (post_kprobe_handler(args->regs))
380                         ret = NOTIFY_STOP;
381                 break;
382
383         case DIE_PAGE_FAULT:
384                 /* kprobe_running() needs smp_processor_id() */
385                 preempt_disable();
386
387                 if (kprobe_running()
388                     && kprobe_fault_handler(args->regs, args->trapnr))
389                         ret = NOTIFY_STOP;
390                 preempt_enable();
391                 break;
392         default:
393                 break;
394         }
395         return ret;
396 }
397
398 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
399 {
400         struct jprobe *jp = container_of(p, struct jprobe, kp);
401         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
402
403         kcb->jprobe_saved_regs = *regs;
404         kcb->jprobe_saved_sp = regs->regs[29];
405
406         memcpy(kcb->jprobes_stack, (void *)kcb->jprobe_saved_sp,
407                MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
408
409         regs->cp0_epc = (unsigned long)(jp->entry);
410
411         return 1;
412 }
413
414 /* Defined in the inline asm below. */
415 void jprobe_return_end(void);
416
417 void __kprobes jprobe_return(void)
418 {
419         /* Assembler quirk necessitates this '0,code' business.  */
420         asm volatile(
421                 "break 0,%0\n\t"
422                 ".globl jprobe_return_end\n"
423                 "jprobe_return_end:\n"
424                 : : "n" (BRK_KPROBE_BP) : "memory");
425 }
426
427 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
428 {
429         struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
430
431         if (regs->cp0_epc >= (unsigned long)jprobe_return &&
432             regs->cp0_epc <= (unsigned long)jprobe_return_end) {
433                 *regs = kcb->jprobe_saved_regs;
434                 memcpy((void *)kcb->jprobe_saved_sp, kcb->jprobes_stack,
435                        MIN_JPROBES_STACK_SIZE(kcb->jprobe_saved_sp));
436                 preempt_enable_no_resched();
437
438                 return 1;
439         }
440         return 0;
441 }
442
443 /*
444  * Function return probe trampoline:
445  *      - init_kprobes() establishes a probepoint here
446  *      - When the probed function returns, this probe causes the
447  *        handlers to fire
448  */
449 static void __used kretprobe_trampoline_holder(void)
450 {
451         asm volatile(
452                 ".set push\n\t"
453                 /* Keep the assembler from reordering and placing JR here. */
454                 ".set noreorder\n\t"
455                 "nop\n\t"
456                 ".global kretprobe_trampoline\n"
457                 "kretprobe_trampoline:\n\t"
458                 "nop\n\t"
459                 ".set pop"
460                 : : : "memory");
461 }
462
463 void kretprobe_trampoline(void);
464
465 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
466                                       struct pt_regs *regs)
467 {
468         ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
469
470         /* Replace the return addr with trampoline addr */
471         regs->regs[31] = (unsigned long)kretprobe_trampoline;
472 }
473
474 /*
475  * Called when the probe at kretprobe trampoline is hit
476  */
477 static int __kprobes trampoline_probe_handler(struct kprobe *p,
478                                                 struct pt_regs *regs)
479 {
480         struct kretprobe_instance *ri = NULL;
481         struct hlist_head *head, empty_rp;
482         struct hlist_node *node, *tmp;
483         unsigned long flags, orig_ret_address = 0;
484         unsigned long trampoline_address = (unsigned long)kretprobe_trampoline;
485
486         INIT_HLIST_HEAD(&empty_rp);
487         kretprobe_hash_lock(current, &head, &flags);
488
489         /*
490          * It is possible to have multiple instances associated with a given
491          * task either because an multiple functions in the call path
492          * have a return probe installed on them, and/or more than one return
493          * return probe was registered for a target function.
494          *
495          * We can handle this because:
496          *     - instances are always inserted at the head of the list
497          *     - when multiple return probes are registered for the same
498          *       function, the first instance's ret_addr will point to the
499          *       real return address, and all the rest will point to
500          *       kretprobe_trampoline
501          */
502         hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
503                 if (ri->task != current)
504                         /* another task is sharing our hash bucket */
505                         continue;
506
507                 if (ri->rp && ri->rp->handler)
508                         ri->rp->handler(ri, regs);
509
510                 orig_ret_address = (unsigned long)ri->ret_addr;
511                 recycle_rp_inst(ri, &empty_rp);
512
513                 if (orig_ret_address != trampoline_address)
514                         /*
515                          * This is the real return address. Any other
516                          * instances associated with this task are for
517                          * other calls deeper on the call stack
518                          */
519                         break;
520         }
521
522         kretprobe_assert(ri, orig_ret_address, trampoline_address);
523         instruction_pointer(regs) = orig_ret_address;
524
525         reset_current_kprobe();
526         kretprobe_hash_unlock(current, &flags);
527         preempt_enable_no_resched();
528
529         hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
530                 hlist_del(&ri->hlist);
531                 kfree(ri);
532         }
533         /*
534          * By returning a non-zero value, we are telling
535          * kprobe_handler() that we don't want the post_handler
536          * to run (and have re-enabled preemption)
537          */
538         return 1;
539 }
540
541 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
542 {
543         if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline)
544                 return 1;
545
546         return 0;
547 }
548
549 static struct kprobe trampoline_p = {
550         .addr = (kprobe_opcode_t *)kretprobe_trampoline,
551         .pre_handler = trampoline_probe_handler
552 };
553
554 int __init arch_init_kprobes(void)
555 {
556         return register_kprobe(&trampoline_p);
557 }