2 * arch/sh/kernel/hw_breakpoint.c
4 * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
6 * Copyright (C) 2009 - 2010 Paul Mundt
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
12 #include <linux/init.h>
13 #include <linux/perf_event.h>
14 #include <linux/hw_breakpoint.h>
15 #include <linux/percpu.h>
16 #include <linux/kallsyms.h>
17 #include <linux/notifier.h>
18 #include <linux/kprobes.h>
19 #include <linux/kdebug.h>
21 #include <linux/clk.h>
22 #include <asm/hw_breakpoint.h>
23 #include <asm/mmu_context.h>
24 #include <asm/ptrace.h>
27 * Stores the breakpoints currently in use on each breakpoint address
28 * register for each cpus
30 static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
33 * A dummy placeholder for early accesses until the CPUs get a chance to
34 * register their UBCs later in the boot process.
36 static struct sh_ubc ubc_dummy = { .num_events = 0 };
38 static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
41 * Install a perf counter breakpoint.
43 * We seek a free UBC channel and use it for this breakpoint.
45 * Atomic: we hold the counter->ctx->lock and we only handle variables
46 * and registers local to this cpu.
48 int arch_install_hw_breakpoint(struct perf_event *bp)
50 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
53 for (i = 0; i < sh_ubc->num_events; i++) {
54 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
62 if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
65 clk_enable(sh_ubc->clk);
66 sh_ubc->enable(info, i);
72 * Uninstall the breakpoint contained in the given counter.
74 * First we search the debug address register it uses and then we disable
77 * Atomic: we hold the counter->ctx->lock and we only handle variables
78 * and registers local to this cpu.
80 void arch_uninstall_hw_breakpoint(struct perf_event *bp)
82 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
85 for (i = 0; i < sh_ubc->num_events; i++) {
86 struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
94 if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
97 sh_ubc->disable(info, i);
98 clk_disable(sh_ubc->clk);
101 static int get_hbp_len(u16 hbp_len)
103 unsigned int len_in_bytes = 0;
106 case SH_BREAKPOINT_LEN_1:
109 case SH_BREAKPOINT_LEN_2:
112 case SH_BREAKPOINT_LEN_4:
115 case SH_BREAKPOINT_LEN_8:
123 * Check for virtual address in user space.
125 int arch_check_va_in_userspace(unsigned long va, u16 hbp_len)
129 len = get_hbp_len(hbp_len);
131 return (va <= TASK_SIZE - len);
135 * Check for virtual address in kernel space.
137 static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
141 len = get_hbp_len(hbp_len);
143 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
146 int arch_bp_generic_fields(int sh_len, int sh_type,
147 int *gen_len, int *gen_type)
151 case SH_BREAKPOINT_LEN_1:
152 *gen_len = HW_BREAKPOINT_LEN_1;
154 case SH_BREAKPOINT_LEN_2:
155 *gen_len = HW_BREAKPOINT_LEN_2;
157 case SH_BREAKPOINT_LEN_4:
158 *gen_len = HW_BREAKPOINT_LEN_4;
160 case SH_BREAKPOINT_LEN_8:
161 *gen_len = HW_BREAKPOINT_LEN_8;
169 case SH_BREAKPOINT_READ:
170 *gen_type = HW_BREAKPOINT_R;
171 case SH_BREAKPOINT_WRITE:
172 *gen_type = HW_BREAKPOINT_W;
174 case SH_BREAKPOINT_RW:
175 *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
184 static int arch_build_bp_info(struct perf_event *bp)
186 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
188 info->address = bp->attr.bp_addr;
191 switch (bp->attr.bp_len) {
192 case HW_BREAKPOINT_LEN_1:
193 info->len = SH_BREAKPOINT_LEN_1;
195 case HW_BREAKPOINT_LEN_2:
196 info->len = SH_BREAKPOINT_LEN_2;
198 case HW_BREAKPOINT_LEN_4:
199 info->len = SH_BREAKPOINT_LEN_4;
201 case HW_BREAKPOINT_LEN_8:
202 info->len = SH_BREAKPOINT_LEN_8;
209 switch (bp->attr.bp_type) {
210 case HW_BREAKPOINT_R:
211 info->type = SH_BREAKPOINT_READ;
213 case HW_BREAKPOINT_W:
214 info->type = SH_BREAKPOINT_WRITE;
216 case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
217 info->type = SH_BREAKPOINT_RW;
227 * Validate the arch-specific HW Breakpoint register settings
229 int arch_validate_hwbkpt_settings(struct perf_event *bp,
230 struct task_struct *tsk)
232 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
236 ret = arch_build_bp_info(bp);
243 case SH_BREAKPOINT_LEN_1:
246 case SH_BREAKPOINT_LEN_2:
249 case SH_BREAKPOINT_LEN_4:
252 case SH_BREAKPOINT_LEN_8:
260 * For kernel-addresses, either the address or symbol name can be
264 info->address = (unsigned long)kallsyms_lookup_name(info->name);
267 * Check that the low-order bits of the address are appropriate
268 * for the alignment implied by len.
270 if (info->address & align)
273 /* Check that the virtual address is in the proper range */
275 if (!arch_check_va_in_userspace(info->address, info->len))
278 if (!arch_check_va_in_kernelspace(info->address, info->len))
286 * Release the user breakpoints used by ptrace
288 void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
291 struct thread_struct *t = &tsk->thread;
293 for (i = 0; i < sh_ubc->num_events; i++) {
294 unregister_hw_breakpoint(t->ptrace_bps[i]);
295 t->ptrace_bps[i] = NULL;
299 static int __kprobes hw_breakpoint_handler(struct die_args *args)
301 int cpu, i, rc = NOTIFY_STOP;
302 struct perf_event *bp;
303 unsigned int cmf, resume_mask;
306 * Do an early return if none of the channels triggered.
308 cmf = sh_ubc->triggered_mask();
313 * By default, resume all of the active channels.
315 resume_mask = sh_ubc->active_mask();
318 * Disable breakpoints during exception handling.
320 sh_ubc->disable_all();
323 for (i = 0; i < sh_ubc->num_events; i++) {
324 unsigned long event_mask = (1 << i);
326 if (likely(!(cmf & event_mask)))
330 * The counter may be concurrently released but that can only
331 * occur from a call_rcu() path. We can then safely fetch
332 * the breakpoint, use its callback, touch its counter
333 * while we are in an rcu_read_lock() path.
337 bp = per_cpu(bp_per_reg[i], cpu);
342 * Reset the condition match flag to denote completion of
343 * exception handling.
345 sh_ubc->clear_triggered_mask(event_mask);
348 * bp can be NULL due to concurrent perf counter
357 * Don't restore the channel if the breakpoint is from
358 * ptrace, as it always operates in one-shot mode.
360 if (bp->overflow_handler == ptrace_triggered)
361 resume_mask &= ~(1 << i);
363 perf_bp_event(bp, args->regs);
365 /* Deliver the signal to userspace */
366 if (arch_check_va_in_userspace(bp->attr.bp_addr,
370 info.si_signo = args->signr;
371 info.si_errno = notifier_to_errno(rc);
372 info.si_code = TRAP_HWBKPT;
374 force_sig_info(args->signr, &info, current);
383 sh_ubc->enable_all(resume_mask);
390 BUILD_TRAP_HANDLER(breakpoint)
392 unsigned long ex = lookup_exception_vector();
395 notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
399 * Handle debug exception notifications.
401 int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
402 unsigned long val, void *data)
404 struct die_args *args = data;
406 if (val != DIE_BREAKPOINT)
410 * If the breakpoint hasn't been triggered by the UBC, it's
411 * probably from a debugger, so don't do anything more here.
413 * This also permits the UBC interface clock to remain off for
414 * non-UBC breakpoints, as we don't need to check the triggered
415 * or active channel masks.
417 if (args->trapnr != sh_ubc->trap_nr)
420 return hw_breakpoint_handler(data);
423 void hw_breakpoint_pmu_read(struct perf_event *bp)
428 void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
433 int register_sh_ubc(struct sh_ubc *ubc)
435 /* Bail if it's already assigned */
436 if (sh_ubc != &ubc_dummy)
440 pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
442 WARN_ON(ubc->num_events > HBP_NUM);