Merge branch 'tip/perf/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/roste...
authorIngo Molnar <mingo@elte.hu>
Thu, 19 Aug 2010 10:48:09 +0000 (12:48 +0200)
committerIngo Molnar <mingo@elte.hu>
Thu, 19 Aug 2010 10:48:09 +0000 (12:48 +0200)
arch/arm/kernel/perf_event.c
arch/powerpc/kernel/perf_callchain.c
arch/sh/kernel/perf_callchain.c
arch/sparc/kernel/perf_event.c
arch/x86/kernel/cpu/perf_event.c
include/linux/ftrace_event.h
include/linux/perf_event.h
kernel/perf_event.c
kernel/trace/trace_event_perf.c
kernel/trace/trace_functions_graph.c

index 417c392..64ca8c3 100644 (file)
@@ -3001,13 +3001,6 @@ arch_initcall(init_hw_perf_events);
 /*
  * Callchain handling code.
  */
-static inline void
-callchain_store(struct perf_callchain_entry *entry,
-               u64 ip)
-{
-       if (entry->nr < PERF_MAX_STACK_DEPTH)
-               entry->ip[entry->nr++] = ip;
-}
 
 /*
  * The registers we're interested in are at the end of the variable
@@ -3039,7 +3032,7 @@ user_backtrace(struct frame_tail *tail,
        if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
                return NULL;
 
-       callchain_store(entry, buftail.lr);
+       perf_callchain_store(entry, buftail.lr);
 
        /*
         * Frame pointers should strictly progress back up the stack
@@ -3051,16 +3044,11 @@ user_backtrace(struct frame_tail *tail,
        return buftail.fp - 1;
 }
 
-static void
-perf_callchain_user(struct pt_regs *regs,
-                   struct perf_callchain_entry *entry)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
        struct frame_tail *tail;
 
-       callchain_store(entry, PERF_CONTEXT_USER);
-
-       if (!user_mode(regs))
-               regs = task_pt_regs(current);
 
        tail = (struct frame_tail *)regs->ARM_fp - 1;
 
@@ -3078,56 +3066,18 @@ callchain_trace(struct stackframe *fr,
                void *data)
 {
        struct perf_callchain_entry *entry = data;
-       callchain_store(entry, fr->pc);
+       perf_callchain_store(entry, fr->pc);
        return 0;
 }
 
-static void
-perf_callchain_kernel(struct pt_regs *regs,
-                     struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
        struct stackframe fr;
 
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
        fr.fp = regs->ARM_fp;
        fr.sp = regs->ARM_sp;
        fr.lr = regs->ARM_lr;
        fr.pc = regs->ARM_pc;
        walk_stackframe(&fr, callchain_trace, entry);
 }
-
-static void
-perf_do_callchain(struct pt_regs *regs,
-                 struct perf_callchain_entry *entry)
-{
-       int is_user;
-
-       if (!regs)
-               return;
-
-       is_user = user_mode(regs);
-
-       if (!current || !current->pid)
-               return;
-
-       if (is_user && current->state != TASK_RUNNING)
-               return;
-
-       if (!is_user)
-               perf_callchain_kernel(regs, entry);
-
-       if (current->mm)
-               perf_callchain_user(regs, entry);
-}
-
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
-
-struct perf_callchain_entry *
-perf_callchain(struct pt_regs *regs)
-{
-       struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
-
-       entry->nr = 0;
-       perf_do_callchain(regs, entry);
-       return entry;
-}
index 95ad9da..d05ae42 100644 (file)
 #include "ppc32.h"
 #endif
 
-/*
- * Store another value in a callchain_entry.
- */
-static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
-       unsigned int nr = entry->nr;
-
-       if (nr < PERF_MAX_STACK_DEPTH) {
-               entry->ip[nr] = ip;
-               entry->nr = nr + 1;
-       }
-}
 
 /*
  * Is sp valid as the address of the next kernel stack frame after prev_sp?
@@ -58,8 +46,8 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
        return 0;
 }
 
-static void perf_callchain_kernel(struct pt_regs *regs,
-                                 struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
        unsigned long sp, next_sp;
        unsigned long next_ip;
@@ -69,8 +57,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
 
        lr = regs->link;
        sp = regs->gpr[1];
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
-       callchain_store(entry, regs->nip);
+       perf_callchain_store(entry, regs->nip);
 
        if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
                return;
@@ -89,7 +76,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
                        next_ip = regs->nip;
                        lr = regs->link;
                        level = 0;
-                       callchain_store(entry, PERF_CONTEXT_KERNEL);
+                       perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
 
                } else {
                        if (level == 0)
@@ -111,7 +98,7 @@ static void perf_callchain_kernel(struct pt_regs *regs,
                        ++level;
                }
 
-               callchain_store(entry, next_ip);
+               perf_callchain_store(entry, next_ip);
                if (!valid_next_sp(next_sp, sp))
                        return;
                sp = next_sp;
@@ -233,8 +220,8 @@ static int sane_signal_64_frame(unsigned long sp)
                puc == (unsigned long) &sf->uc;
 }
 
-static void perf_callchain_user_64(struct pt_regs *regs,
-                                  struct perf_callchain_entry *entry)
+static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+                                  struct pt_regs *regs)
 {
        unsigned long sp, next_sp;
        unsigned long next_ip;
@@ -246,8 +233,7 @@ static void perf_callchain_user_64(struct pt_regs *regs,
        next_ip = regs->nip;
        lr = regs->link;
        sp = regs->gpr[1];
-       callchain_store(entry, PERF_CONTEXT_USER);
-       callchain_store(entry, next_ip);
+       perf_callchain_store(entry, next_ip);
 
        for (;;) {
                fp = (unsigned long __user *) sp;
@@ -276,14 +262,14 @@ static void perf_callchain_user_64(struct pt_regs *regs,
                            read_user_stack_64(&uregs[PT_R1], &sp))
                                return;
                        level = 0;
-                       callchain_store(entry, PERF_CONTEXT_USER);
-                       callchain_store(entry, next_ip);
+                       perf_callchain_store(entry, PERF_CONTEXT_USER);
+                       perf_callchain_store(entry, next_ip);
                        continue;
                }
 
                if (level == 0)
                        next_ip = lr;
-               callchain_store(entry, next_ip);
+               perf_callchain_store(entry, next_ip);
                ++level;
                sp = next_sp;
        }
@@ -315,8 +301,8 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
        return __get_user_inatomic(*ret, ptr);
 }
 
-static inline void perf_callchain_user_64(struct pt_regs *regs,
-                                         struct perf_callchain_entry *entry)
+static inline void perf_callchain_user_64(struct perf_callchain_entry *entry,
+                                         struct pt_regs *regs)
 {
 }
 
@@ -435,8 +421,8 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp,
        return mctx->mc_gregs;
 }
 
-static void perf_callchain_user_32(struct pt_regs *regs,
-                                  struct perf_callchain_entry *entry)
+static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+                                  struct pt_regs *regs)
 {
        unsigned int sp, next_sp;
        unsigned int next_ip;
@@ -447,8 +433,7 @@ static void perf_callchain_user_32(struct pt_regs *regs,
        next_ip = regs->nip;
        lr = regs->link;
        sp = regs->gpr[1];
-       callchain_store(entry, PERF_CONTEXT_USER);
-       callchain_store(entry, next_ip);
+       perf_callchain_store(entry, next_ip);
 
        while (entry->nr < PERF_MAX_STACK_DEPTH) {
                fp = (unsigned int __user *) (unsigned long) sp;
@@ -470,45 +455,24 @@ static void perf_callchain_user_32(struct pt_regs *regs,
                            read_user_stack_32(&uregs[PT_R1], &sp))
                                return;
                        level = 0;
-                       callchain_store(entry, PERF_CONTEXT_USER);
-                       callchain_store(entry, next_ip);
+                       perf_callchain_store(entry, PERF_CONTEXT_USER);
+                       perf_callchain_store(entry, next_ip);
                        continue;
                }
 
                if (level == 0)
                        next_ip = lr;
-               callchain_store(entry, next_ip);
+               perf_callchain_store(entry, next_ip);
                ++level;
                sp = next_sp;
        }
 }
 
-/*
- * Since we can't get PMU interrupts inside a PMU interrupt handler,
- * we don't need separate irq and nmi entries here.
- */
-static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain);
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
-       struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain);
-
-       entry->nr = 0;
-
-       if (!user_mode(regs)) {
-               perf_callchain_kernel(regs, entry);
-               if (current->mm)
-                       regs = task_pt_regs(current);
-               else
-                       regs = NULL;
-       }
-
-       if (regs) {
-               if (current_is_64bit())
-                       perf_callchain_user_64(regs, entry);
-               else
-                       perf_callchain_user_32(regs, entry);
-       }
-
-       return entry;
+       if (current_is_64bit())
+               perf_callchain_user_64(entry, regs);
+       else
+               perf_callchain_user_32(entry, regs);
 }
index a9dd3ab..d5ca1ef 100644 (file)
 #include <asm/unwinder.h>
 #include <asm/ptrace.h>
 
-static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
-       if (entry->nr < PERF_MAX_STACK_DEPTH)
-               entry->ip[entry->nr++] = ip;
-}
 
 static void callchain_warning(void *data, char *msg)
 {
@@ -39,7 +34,7 @@ static void callchain_address(void *data, unsigned long addr, int reliable)
        struct perf_callchain_entry *entry = data;
 
        if (reliable)
-               callchain_store(entry, addr);
+               perf_callchain_store(entry, addr);
 }
 
 static const struct stacktrace_ops callchain_ops = {
@@ -49,47 +44,10 @@ static const struct stacktrace_ops callchain_ops = {
        .address        = callchain_address,
 };
 
-static void
-perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
-       callchain_store(entry, regs->pc);
+       perf_callchain_store(entry, regs->pc);
 
        unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
 }
-
-static void
-perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
-{
-       int is_user;
-
-       if (!regs)
-               return;
-
-       is_user = user_mode(regs);
-
-       if (is_user && current->state != TASK_RUNNING)
-               return;
-
-       /*
-        * Only the kernel side is implemented for now.
-        */
-       if (!is_user)
-               perf_callchain_kernel(regs, entry);
-}
-
-/*
- * No need for separate IRQ and NMI entries.
- */
-static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
-{
-       struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
-
-       entry->nr = 0;
-
-       perf_do_callchain(regs, entry);
-
-       return entry;
-}
index 357ced3..4bc4029 100644 (file)
@@ -1283,22 +1283,17 @@ void __init init_hw_perf_events(void)
        register_die_notifier(&perf_event_nmi_notifier);
 }
 
-static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
-       if (entry->nr < PERF_MAX_STACK_DEPTH)
-               entry->ip[entry->nr++] = ip;
-}
-
-static void perf_callchain_kernel(struct pt_regs *regs,
-                                 struct perf_callchain_entry *entry)
+void perf_callchain_kernel(struct perf_callchain_entry *entry,
+                          struct pt_regs *regs)
 {
        unsigned long ksp, fp;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        int graph = 0;
 #endif
 
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
-       callchain_store(entry, regs->tpc);
+       stack_trace_flush();
+
+       perf_callchain_store(entry, regs->tpc);
 
        ksp = regs->u_regs[UREG_I6];
        fp = ksp + STACK_BIAS;
@@ -1322,13 +1317,13 @@ static void perf_callchain_kernel(struct pt_regs *regs,
                        pc = sf->callers_pc;
                        fp = (unsigned long)sf->fp + STACK_BIAS;
                }
-               callchain_store(entry, pc);
+               perf_callchain_store(entry, pc);
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
                if ((pc + 8UL) == (unsigned long) &return_to_handler) {
                        int index = current->curr_ret_stack;
                        if (current->ret_stack && index >= graph) {
                                pc = current->ret_stack[index - graph].ret;
-                               callchain_store(entry, pc);
+                               perf_callchain_store(entry, pc);
                                graph++;
                        }
                }
@@ -1336,13 +1331,12 @@ static void perf_callchain_kernel(struct pt_regs *regs,
        } while (entry->nr < PERF_MAX_STACK_DEPTH);
 }
 
-static void perf_callchain_user_64(struct pt_regs *regs,
-                                  struct perf_callchain_entry *entry)
+static void perf_callchain_user_64(struct perf_callchain_entry *entry,
+                                  struct pt_regs *regs)
 {
        unsigned long ufp;
 
-       callchain_store(entry, PERF_CONTEXT_USER);
-       callchain_store(entry, regs->tpc);
+       perf_callchain_store(entry, regs->tpc);
 
        ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
        do {
@@ -1355,17 +1349,16 @@ static void perf_callchain_user_64(struct pt_regs *regs,
 
                pc = sf.callers_pc;
                ufp = (unsigned long)sf.fp + STACK_BIAS;
-               callchain_store(entry, pc);
+               perf_callchain_store(entry, pc);
        } while (entry->nr < PERF_MAX_STACK_DEPTH);
 }
 
-static void perf_callchain_user_32(struct pt_regs *regs,
-                                  struct perf_callchain_entry *entry)
+static void perf_callchain_user_32(struct perf_callchain_entry *entry,
+                                  struct pt_regs *regs)
 {
        unsigned long ufp;
 
-       callchain_store(entry, PERF_CONTEXT_USER);
-       callchain_store(entry, regs->tpc);
+       perf_callchain_store(entry, regs->tpc);
 
        ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
        do {
@@ -1378,34 +1371,16 @@ static void perf_callchain_user_32(struct pt_regs *regs,
 
                pc = sf.callers_pc;
                ufp = (unsigned long)sf.fp;
-               callchain_store(entry, pc);
+               perf_callchain_store(entry, pc);
        } while (entry->nr < PERF_MAX_STACK_DEPTH);
 }
 
-/* Like powerpc we can't get PMU interrupts within the PMU handler,
- * so no need for separate NMI and IRQ chains as on x86.
- */
-static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
-       struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
-
-       entry->nr = 0;
-       if (!user_mode(regs)) {
-               stack_trace_flush();
-               perf_callchain_kernel(regs, entry);
-               if (current->mm)
-                       regs = task_pt_regs(current);
-               else
-                       regs = NULL;
-       }
-       if (regs) {
-               flushw_user();
-               if (test_thread_flag(TIF_32BIT))
-                       perf_callchain_user_32(regs, entry);
-               else
-                       perf_callchain_user_64(regs, entry);
-       }
-       return entry;
+       flushw_user();
+       if (test_thread_flag(TIF_32BIT))
+               perf_callchain_user_32(entry, regs);
+       else
+               perf_callchain_user_64(entry, regs);
 }
index f2da20f..8e91cf3 100644 (file)
@@ -1571,15 +1571,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
  * callchain support
  */
 
-static inline
-void callchain_store(struct perf_callchain_entry *entry, u64 ip)
-{
-       if (entry->nr < PERF_MAX_STACK_DEPTH)
-               entry->ip[entry->nr++] = ip;
-}
-
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
-static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
+static DEFINE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry_nmi);
 
 
 static void
@@ -1602,7 +1594,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
 {
        struct perf_callchain_entry *entry = data;
 
-       callchain_store(entry, addr);
+       perf_callchain_store(entry, addr);
 }
 
 static const struct stacktrace_ops backtrace_ops = {
@@ -1613,11 +1605,15 @@ static const struct stacktrace_ops backtrace_ops = {
        .walk_stack             = print_context_stack_bp,
 };
 
-static void
-perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
+void
+perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
-       callchain_store(entry, PERF_CONTEXT_KERNEL);
-       callchain_store(entry, regs->ip);
+       if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+               /* TODO: We don't support guest os callchain now */
+               return NULL;
+       }
+
+       perf_callchain_store(entry, regs->ip);
 
        dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
 }
@@ -1646,7 +1642,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
                if (fp < compat_ptr(regs->sp))
                        break;
 
-               callchain_store(entry, frame.return_address);
+               perf_callchain_store(entry, frame.return_address);
                fp = compat_ptr(frame.next_frame);
        }
        return 1;
@@ -1659,19 +1655,20 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
 }
 #endif
 
-static void
-perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
+void
+perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
 {
        struct stack_frame frame;
        const void __user *fp;
 
-       if (!user_mode(regs))
-               regs = task_pt_regs(current);
+       if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+               /* TODO: We don't support guest os callchain now */
+               return NULL;
+       }
 
        fp = (void __user *)regs->bp;
 
-       callchain_store(entry, PERF_CONTEXT_USER);
-       callchain_store(entry, regs->ip);
+       perf_callchain_store(entry, regs->ip);
 
        if (perf_callchain_user32(regs, entry))
                return;
@@ -1688,52 +1685,11 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
                if ((unsigned long)fp < regs->sp)
                        break;
 
-               callchain_store(entry, frame.return_address);
+               perf_callchain_store(entry, frame.return_address);
                fp = frame.next_frame;
        }
 }
 
-static void
-perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
-{
-       int is_user;
-
-       if (!regs)
-               return;
-
-       is_user = user_mode(regs);
-
-       if (is_user && current->state != TASK_RUNNING)
-               return;
-
-       if (!is_user)
-               perf_callchain_kernel(regs, entry);
-
-       if (current->mm)
-               perf_callchain_user(regs, entry);
-}
-
-struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
-{
-       struct perf_callchain_entry *entry;
-
-       if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
-               /* TODO: We don't support guest os callchain now */
-               return NULL;
-       }
-
-       if (in_nmi())
-               entry = &__get_cpu_var(pmc_nmi_entry);
-       else
-               entry = &__get_cpu_var(pmc_irq_entry);
-
-       entry->nr = 0;
-
-       perf_do_callchain(regs, entry);
-
-       return entry;
-}
-
 unsigned long perf_instruction_pointer(struct pt_regs *regs)
 {
        unsigned long ip;
index 02b8b24..5f8ad7b 100644 (file)
@@ -191,8 +191,8 @@ struct ftrace_event_call {
        unsigned int            flags;
 
 #ifdef CONFIG_PERF_EVENTS
-       int                     perf_refcount;
-       struct hlist_head       *perf_events;
+       int                             perf_refcount;
+       struct hlist_head __percpu      *perf_events;
 #endif
 };
 
index 716f99b..000610c 100644 (file)
@@ -808,6 +808,12 @@ struct perf_event_context {
        struct rcu_head                 rcu_head;
 };
 
+/*
+ * Number of contexts where an event can trigger:
+ *     task, softirq, hardirq, nmi.
+ */
+#define PERF_NR_CONTEXTS       4
+
 /**
  * struct perf_event_cpu_context - per cpu event context structure
  */
@@ -821,12 +827,8 @@ struct perf_cpu_context {
        struct mutex                    hlist_mutex;
        int                             hlist_refcount;
 
-       /*
-        * Recursion avoidance:
-        *
-        * task, softirq, irq, nmi context
-        */
-       int                             recursion[4];
+       /* Recursion avoidance in each contexts */
+       int                             recursion[PERF_NR_CONTEXTS];
 };
 
 struct perf_output_handle {
@@ -976,7 +978,21 @@ extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks
 extern void perf_event_comm(struct task_struct *tsk);
 extern void perf_event_fork(struct task_struct *tsk);
 
-extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
+/* Callchains */
+DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
+
+extern void perf_callchain_user(struct perf_callchain_entry *entry,
+                               struct pt_regs *regs);
+extern void perf_callchain_kernel(struct perf_callchain_entry *entry,
+                                 struct pt_regs *regs);
+
+
+static inline void
+perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
+{
+       if (entry->nr < PERF_MAX_STACK_DEPTH)
+               entry->ip[entry->nr++] = ip;
+}
 
 extern int sysctl_perf_event_paranoid;
 extern int sysctl_perf_event_mlock;
index 403d180..0d38f27 100644 (file)
@@ -1763,6 +1763,216 @@ static u64 perf_event_read(struct perf_event *event)
        return perf_event_count(event);
 }
 
+/*
+ * Callchain support
+ */
+
+struct callchain_cpus_entries {
+       struct rcu_head                 rcu_head;
+       struct perf_callchain_entry     *cpu_entries[0];
+};
+
+static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
+static atomic_t nr_callchain_events;
+static DEFINE_MUTEX(callchain_mutex);
+struct callchain_cpus_entries *callchain_cpus_entries;
+
+
+__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
+                                 struct pt_regs *regs)
+{
+}
+
+__weak void perf_callchain_user(struct perf_callchain_entry *entry,
+                               struct pt_regs *regs)
+{
+}
+
+static void release_callchain_buffers_rcu(struct rcu_head *head)
+{
+       struct callchain_cpus_entries *entries;
+       int cpu;
+
+       entries = container_of(head, struct callchain_cpus_entries, rcu_head);
+
+       for_each_possible_cpu(cpu)
+               kfree(entries->cpu_entries[cpu]);
+
+       kfree(entries);
+}
+
+static void release_callchain_buffers(void)
+{
+       struct callchain_cpus_entries *entries;
+
+       entries = callchain_cpus_entries;
+       rcu_assign_pointer(callchain_cpus_entries, NULL);
+       call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
+}
+
+static int alloc_callchain_buffers(void)
+{
+       int cpu;
+       int size;
+       struct callchain_cpus_entries *entries;
+
+       /*
+        * We can't use the percpu allocation API for data that can be
+        * accessed from NMI. Use a temporary manual per cpu allocation
+        * until that gets sorted out.
+        */
+       size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
+               num_possible_cpus();
+
+       entries = kzalloc(size, GFP_KERNEL);
+       if (!entries)
+               return -ENOMEM;
+
+       size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
+
+       for_each_possible_cpu(cpu) {
+               entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
+                                                        cpu_to_node(cpu));
+               if (!entries->cpu_entries[cpu])
+                       goto fail;
+       }
+
+       rcu_assign_pointer(callchain_cpus_entries, entries);
+
+       return 0;
+
+fail:
+       for_each_possible_cpu(cpu)
+               kfree(entries->cpu_entries[cpu]);
+       kfree(entries);
+
+       return -ENOMEM;
+}
+
+static int get_callchain_buffers(void)
+{
+       int err = 0;
+       int count;
+
+       mutex_lock(&callchain_mutex);
+
+       count = atomic_inc_return(&nr_callchain_events);
+       if (WARN_ON_ONCE(count < 1)) {
+               err = -EINVAL;
+               goto exit;
+       }
+
+       if (count > 1) {
+               /* If the allocation failed, give up */
+               if (!callchain_cpus_entries)
+                       err = -ENOMEM;
+               goto exit;
+       }
+
+       err = alloc_callchain_buffers();
+       if (err)
+               release_callchain_buffers();
+exit:
+       mutex_unlock(&callchain_mutex);
+
+       return err;
+}
+
+static void put_callchain_buffers(void)
+{
+       if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
+               release_callchain_buffers();
+               mutex_unlock(&callchain_mutex);
+       }
+}
+
+static int get_recursion_context(int *recursion)
+{
+       int rctx;
+
+       if (in_nmi())
+               rctx = 3;
+       else if (in_irq())
+               rctx = 2;
+       else if (in_softirq())
+               rctx = 1;
+       else
+               rctx = 0;
+
+       if (recursion[rctx])
+               return -1;
+
+       recursion[rctx]++;
+       barrier();
+
+       return rctx;
+}
+
+static inline void put_recursion_context(int *recursion, int rctx)
+{
+       barrier();
+       recursion[rctx]--;
+}
+
+static struct perf_callchain_entry *get_callchain_entry(int *rctx)
+{
+       int cpu;
+       struct callchain_cpus_entries *entries;
+
+       *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
+       if (*rctx == -1)
+               return NULL;
+
+       entries = rcu_dereference(callchain_cpus_entries);
+       if (!entries)
+               return NULL;
+
+       cpu = smp_processor_id();
+
+       return &entries->cpu_entries[cpu][*rctx];
+}
+
+static void
+put_callchain_entry(int rctx)
+{
+       put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
+}
+
+static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+{
+       int rctx;
+       struct perf_callchain_entry *entry;
+
+
+       entry = get_callchain_entry(&rctx);
+       if (rctx == -1)
+               return NULL;
+
+       if (!entry)
+               goto exit_put;
+
+       entry->nr = 0;
+
+       if (!user_mode(regs)) {
+               perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
+               perf_callchain_kernel(entry, regs);
+               if (current->mm)
+                       regs = task_pt_regs(current);
+               else
+                       regs = NULL;
+       }
+
+       if (regs) {
+               perf_callchain_store(entry, PERF_CONTEXT_USER);
+               perf_callchain_user(entry, regs);
+       }
+
+exit_put:
+       put_callchain_entry(rctx);
+
+       return entry;
+}
+
 /*
  * Initialize the perf_event context in a task_struct:
  */
@@ -1895,6 +2105,8 @@ static void free_event(struct perf_event *event)
                        atomic_dec(&nr_comm_events);
                if (event->attr.task)
                        atomic_dec(&nr_task_events);
+               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+                       put_callchain_buffers();
        }
 
        if (event->buffer) {
@@ -2937,16 +3149,6 @@ void perf_event_do_pending(void)
        __perf_pending_run();
 }
 
-/*
- * Callchain support -- arch specific
- */
-
-__weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
-{
-       return NULL;
-}
-
-
 /*
  * We assume there is only KVM supporting the callbacks.
  * Later on, we might change it to a list if there is
@@ -3441,14 +3643,20 @@ static void perf_event_output(struct perf_event *event, int nmi,
        struct perf_output_handle handle;
        struct perf_event_header header;
 
+       /* protect the callchain buffers */
+       rcu_read_lock();
+
        perf_prepare_sample(&header, data, event, regs);
 
        if (perf_output_begin(&handle, event, header.size, nmi, 1))
-               return;
+               goto exit;
 
        perf_output_sample(&handle, &header, data, event);
 
        perf_output_end(&handle);
+
+exit:
+       rcu_read_unlock();
 }
 
 /*
@@ -4204,32 +4412,16 @@ end:
 int perf_swevent_get_recursion_context(void)
 {
        struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       int rctx;
 
-       if (in_nmi())
-               rctx = 3;
-       else if (in_irq())
-               rctx = 2;
-       else if (in_softirq())
-               rctx = 1;
-       else
-               rctx = 0;
-
-       if (cpuctx->recursion[rctx])
-               return -1;
-
-       cpuctx->recursion[rctx]++;
-       barrier();
-
-       return rctx;
+       return get_recursion_context(cpuctx->recursion);
 }
 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
 
 void inline perf_swevent_put_recursion_context(int rctx)
 {
        struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
-       barrier();
-       cpuctx->recursion[rctx]--;
+
+       put_recursion_context(cpuctx->recursion, rctx);
 }
 
 void __perf_sw_event(u32 event_id, u64 nr, int nmi,
@@ -4929,6 +5121,13 @@ done:
                        atomic_inc(&nr_comm_events);
                if (event->attr.task)
                        atomic_inc(&nr_task_events);
+               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
+                       err = get_callchain_buffers();
+                       if (err) {
+                               free_event(event);
+                               return ERR_PTR(err);
+                       }
+               }
        }
 
        return event;
index 000e6e8..92f5477 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/kprobes.h>
 #include "trace.h"
 
-static char *perf_trace_buf[4];
+static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
 
 /*
  * Force it to be aligned to unsigned long to avoid misaligned accesses
@@ -24,7 +24,7 @@ static int    total_ref_count;
 static int perf_trace_event_init(struct ftrace_event_call *tp_event,
                                 struct perf_event *p_event)
 {
-       struct hlist_head *list;
+       struct hlist_head __percpu *list;
        int ret = -ENOMEM;
        int cpu;
 
@@ -42,11 +42,11 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event,
        tp_event->perf_events = list;
 
        if (!total_ref_count) {
-               char *buf;
+               char __percpu *buf;
                int i;
 
-               for (i = 0; i < 4; i++) {
-                       buf = (char *)alloc_percpu(perf_trace_t);
+               for (i = 0; i < PERF_NR_CONTEXTS; i++) {
+                       buf = (char __percpu *)alloc_percpu(perf_trace_t);
                        if (!buf)
                                goto fail;
 
@@ -65,7 +65,7 @@ fail:
        if (!total_ref_count) {
                int i;
 
-               for (i = 0; i < 4; i++) {
+               for (i = 0; i < PERF_NR_CONTEXTS; i++) {
                        free_percpu(perf_trace_buf[i]);
                        perf_trace_buf[i] = NULL;
                }
@@ -102,13 +102,14 @@ int perf_trace_init(struct perf_event *p_event)
 int perf_trace_enable(struct perf_event *p_event)
 {
        struct ftrace_event_call *tp_event = p_event->tp_event;
+       struct hlist_head __percpu *pcpu_list;
        struct hlist_head *list;
 
-       list = tp_event->perf_events;
-       if (WARN_ON_ONCE(!list))
+       pcpu_list = tp_event->perf_events;
+       if (WARN_ON_ONCE(!pcpu_list))
                return -EINVAL;
 
-       list = this_cpu_ptr(list);
+       list = this_cpu_ptr(pcpu_list);
        hlist_add_head_rcu(&p_event->hlist_entry, list);
 
        return 0;
@@ -140,7 +141,7 @@ void perf_trace_destroy(struct perf_event *p_event)
        tp_event->perf_events = NULL;
 
        if (!--total_ref_count) {
-               for (i = 0; i < 4; i++) {
+               for (i = 0; i < PERF_NR_CONTEXTS; i++) {
                        free_percpu(perf_trace_buf[i]);
                        perf_trace_buf[i] = NULL;
                }
index 6f23369..c93bcb2 100644 (file)
@@ -23,7 +23,7 @@ struct fgraph_cpu_data {
 };
 
 struct fgraph_data {
-       struct fgraph_cpu_data          *cpu_data;
+       struct fgraph_cpu_data __percpu *cpu_data;
 
        /* Place to preserve last processed entry. */
        struct ftrace_graph_ent_entry   ent;