perf: Validate cpu early in perf_event_alloc()
[pandora-kernel.git] / kernel / perf_event.c
index 77ad22c..67d9bd7 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/mm.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
+#include <linux/idr.h>
 #include <linux/file.h>
 #include <linux/poll.h>
 #include <linux/slab.h>
@@ -21,7 +22,9 @@
 #include <linux/dcache.h>
 #include <linux/percpu.h>
 #include <linux/ptrace.h>
+#include <linux/reboot.h>
 #include <linux/vmstat.h>
+#include <linux/device.h>
 #include <linux/vmalloc.h>
 #include <linux/hardirq.h>
 #include <linux/rculist.h>
 
 #include <asm/irq_regs.h>
 
+enum event_type_t {
+       EVENT_FLEXIBLE = 0x1,
+       EVENT_PINNED = 0x2,
+       EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
+};
+
 atomic_t perf_task_events __read_mostly;
 static atomic_t nr_mmap_events __read_mostly;
 static atomic_t nr_comm_events __read_mostly;
@@ -62,6 +71,12 @@ int sysctl_perf_event_sample_rate __read_mostly = 100000;
 
 static atomic64_t perf_event_id;
 
+static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
+                             enum event_type_t event_type);
+
+static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
+                            enum event_type_t event_type);
+
 void __weak perf_event_print_debug(void)       { }
 
 extern __weak const char *perf_pmu_name(void)
@@ -69,6 +84,11 @@ extern __weak const char *perf_pmu_name(void)
        return "pmu";
 }
 
+static inline u64 perf_clock(void)
+{
+       return local_clock();
+}
+
 void perf_pmu_disable(struct pmu *pmu)
 {
        int *count = this_cpu_ptr(pmu->pmu_disable_count);
@@ -237,11 +257,6 @@ static void perf_unpin_context(struct perf_event_context *ctx)
        put_ctx(ctx);
 }
 
-static inline u64 perf_clock(void)
-{
-       return local_clock();
-}
-
 /*
  * Update the record of the current time in a context.
  */
@@ -253,6 +268,12 @@ static void update_context_time(struct perf_event_context *ctx)
        ctx->timestamp = now;
 }
 
+static u64 perf_event_time(struct perf_event *event)
+{
+       struct perf_event_context *ctx = event->ctx;
+       return ctx ? ctx->time : 0;
+}
+
 /*
  * Update the total_time_enabled and total_time_running fields for a event.
  */
@@ -266,7 +287,7 @@ static void update_event_times(struct perf_event *event)
                return;
 
        if (ctx->is_active)
-               run_end = ctx->time;
+               run_end = perf_event_time(event);
        else
                run_end = event->tstamp_stopped;
 
@@ -275,7 +296,7 @@ static void update_event_times(struct perf_event *event)
        if (event->state == PERF_EVENT_STATE_INACTIVE)
                run_end = event->tstamp_stopped;
        else
-               run_end = ctx->time;
+               run_end = perf_event_time(event);
 
        event->total_time_running = run_end - event->tstamp_running;
 }
@@ -531,6 +552,7 @@ event_sched_out(struct perf_event *event,
                  struct perf_cpu_context *cpuctx,
                  struct perf_event_context *ctx)
 {
+       u64 tstamp = perf_event_time(event);
        u64 delta;
        /*
         * An event which could not be activated because of
@@ -542,7 +564,7 @@ event_sched_out(struct perf_event *event,
            && !event_filter_match(event)) {
                delta = ctx->time - event->tstamp_stopped;
                event->tstamp_running += delta;
-               event->tstamp_stopped = ctx->time;
+               event->tstamp_stopped = tstamp;
        }
 
        if (event->state != PERF_EVENT_STATE_ACTIVE)
@@ -553,7 +575,7 @@ event_sched_out(struct perf_event *event,
                event->pending_disable = 0;
                event->state = PERF_EVENT_STATE_OFF;
        }
-       event->tstamp_stopped = ctx->time;
+       event->tstamp_stopped = tstamp;
        event->pmu->del(event, 0);
        event->oncpu = -1;
 
@@ -765,6 +787,8 @@ event_sched_in(struct perf_event *event,
                 struct perf_cpu_context *cpuctx,
                 struct perf_event_context *ctx)
 {
+       u64 tstamp = perf_event_time(event);
+
        if (event->state <= PERF_EVENT_STATE_OFF)
                return 0;
 
@@ -781,9 +805,9 @@ event_sched_in(struct perf_event *event,
                return -EAGAIN;
        }
 
-       event->tstamp_running += ctx->time - event->tstamp_stopped;
+       event->tstamp_running += tstamp - event->tstamp_stopped;
 
-       event->shadow_ctx_time = ctx->time - ctx->timestamp;
+       event->shadow_ctx_time = tstamp - ctx->timestamp;
 
        if (!is_software_event(event))
                cpuctx->active_oncpu++;
@@ -895,11 +919,13 @@ static int group_can_go_on(struct perf_event *event,
 static void add_event_to_ctx(struct perf_event *event,
                               struct perf_event_context *ctx)
 {
+       u64 tstamp = perf_event_time(event);
+
        list_add_event(event, ctx);
        perf_group_attach(event);
-       event->tstamp_enabled = ctx->time;
-       event->tstamp_running = ctx->time;
-       event->tstamp_stopped = ctx->time;
+       event->tstamp_enabled = tstamp;
+       event->tstamp_running = tstamp;
+       event->tstamp_stopped = tstamp;
 }
 
 /*
@@ -934,7 +960,7 @@ static void __perf_install_in_context(void *info)
 
        add_event_to_ctx(event, ctx);
 
-       if (event->cpu != -1 && event->cpu != smp_processor_id())
+       if (!event_filter_match(event))
                goto unlock;
 
        /*
@@ -1039,14 +1065,13 @@ static void __perf_event_mark_enabled(struct perf_event *event,
                                        struct perf_event_context *ctx)
 {
        struct perf_event *sub;
+       u64 tstamp = perf_event_time(event);
 
        event->state = PERF_EVENT_STATE_INACTIVE;
-       event->tstamp_enabled = ctx->time - event->total_time_enabled;
+       event->tstamp_enabled = tstamp - event->total_time_enabled;
        list_for_each_entry(sub, &event->sibling_list, group_entry) {
-               if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
-                       sub->tstamp_enabled =
-                               ctx->time - sub->total_time_enabled;
-               }
+               if (sub->state >= PERF_EVENT_STATE_INACTIVE)
+                       sub->tstamp_enabled = tstamp - sub->total_time_enabled;
        }
 }
 
@@ -1079,7 +1104,7 @@ static void __perf_event_enable(void *info)
                goto unlock;
        __perf_event_mark_enabled(event, ctx);
 
-       if (event->cpu != -1 && event->cpu != smp_processor_id())
+       if (!event_filter_match(event))
                goto unlock;
 
        /*
@@ -1190,12 +1215,6 @@ static int perf_event_refresh(struct perf_event *event, int refresh)
        return 0;
 }
 
-enum event_type_t {
-       EVENT_FLEXIBLE = 0x1,
-       EVENT_PINNED = 0x2,
-       EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
-};
-
 static void ctx_sched_out(struct perf_event_context *ctx,
                          struct perf_cpu_context *cpuctx,
                          enum event_type_t event_type)
@@ -1432,7 +1451,7 @@ ctx_pinned_sched_in(struct perf_event_context *ctx,
        list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
                if (event->state <= PERF_EVENT_STATE_OFF)
                        continue;
-               if (event->cpu != -1 && event->cpu != smp_processor_id())
+               if (!event_filter_match(event))
                        continue;
 
                if (group_can_go_on(event, cpuctx, 1))
@@ -1464,7 +1483,7 @@ ctx_flexible_sched_in(struct perf_event_context *ctx,
                 * Listen to the 'cpu' scheduling filter constraint
                 * of events:
                 */
-               if (event->cpu != -1 && event->cpu != smp_processor_id())
+               if (!event_filter_match(event))
                        continue;
 
                if (group_can_go_on(event, cpuctx, can_add_hw)) {
@@ -1691,7 +1710,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
                if (event->state != PERF_EVENT_STATE_ACTIVE)
                        continue;
 
-               if (event->cpu != -1 && event->cpu != smp_processor_id())
+               if (!event_filter_match(event))
                        continue;
 
                hwc = &event->hw;
@@ -2209,14 +2228,11 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
        unsigned long flags;
        int ctxn, err;
 
-       if (!task && cpu != -1) {
+       if (!task) {
                /* Must be root to operate on a CPU event: */
                if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
                        return ERR_PTR(-EACCES);
 
-               if (cpu < 0 || cpu >= nr_cpumask_bits)
-                       return ERR_PTR(-EINVAL);
-
                /*
                 * We could be clever and allow to attach a event to an
                 * offline CPU and activate it when the CPU comes up, but
@@ -3890,7 +3906,7 @@ static int perf_event_task_match(struct perf_event *event)
        if (event->state < PERF_EVENT_STATE_INACTIVE)
                return 0;
 
-       if (event->cpu != -1 && event->cpu != smp_processor_id())
+       if (!event_filter_match(event))
                return 0;
 
        if (event->attr.comm || event->attr.mmap ||
@@ -3921,6 +3937,8 @@ static void perf_event_task_event(struct perf_task_event *task_event)
        rcu_read_lock();
        list_for_each_entry_rcu(pmu, &pmus, entry) {
                cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               if (cpuctx->active_pmu != pmu)
+                       goto next;
                perf_event_task_ctx(&cpuctx->ctx, task_event);
 
                ctx = task_event->task_ctx;
@@ -4025,7 +4043,7 @@ static int perf_event_comm_match(struct perf_event *event)
        if (event->state < PERF_EVENT_STATE_INACTIVE)
                return 0;
 
-       if (event->cpu != -1 && event->cpu != smp_processor_id())
+       if (!event_filter_match(event))
                return 0;
 
        if (event->attr.comm)
@@ -4065,6 +4083,8 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
        rcu_read_lock();
        list_for_each_entry_rcu(pmu, &pmus, entry) {
                cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               if (cpuctx->active_pmu != pmu)
+                       goto next;
                perf_event_comm_ctx(&cpuctx->ctx, comm_event);
 
                ctxn = pmu->task_ctx_nr;
@@ -4171,7 +4191,7 @@ static int perf_event_mmap_match(struct perf_event *event,
        if (event->state < PERF_EVENT_STATE_INACTIVE)
                return 0;
 
-       if (event->cpu != -1 && event->cpu != smp_processor_id())
+       if (!event_filter_match(event))
                return 0;
 
        if ((!executable && event->attr.mmap_data) ||
@@ -4259,6 +4279,8 @@ got_name:
        rcu_read_lock();
        list_for_each_entry_rcu(pmu, &pmus, entry) {
                cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               if (cpuctx->active_pmu != pmu)
+                       goto next;
                perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
                                        vma->vm_flags & VM_EXEC);
 
@@ -4840,7 +4862,7 @@ static int perf_swevent_init(struct perf_event *event)
                break;
        }
 
-       if (event_id > PERF_COUNT_SW_MAX)
+       if (event_id >= PERF_COUNT_SW_MAX)
                return -ENOENT;
 
        if (!event->parent) {
@@ -4954,7 +4976,7 @@ static struct pmu perf_tracepoint = {
 
 static inline void perf_tp_register(void)
 {
-       perf_pmu_register(&perf_tracepoint);
+       perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
 }
 
 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
@@ -5265,25 +5287,94 @@ static void *find_pmu_context(int ctxn)
        return NULL;
 }
 
-static void free_pmu_context(void * __percpu cpu_context)
+static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
 {
-       struct pmu *pmu;
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               struct perf_cpu_context *cpuctx;
+
+               cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+
+               if (cpuctx->active_pmu == old_pmu)
+                       cpuctx->active_pmu = pmu;
+       }
+}
+
+static void free_pmu_context(struct pmu *pmu)
+{
+       struct pmu *i;
 
        mutex_lock(&pmus_lock);
        /*
         * Like a real lame refcount.
         */
-       list_for_each_entry(pmu, &pmus, entry) {
-               if (pmu->pmu_cpu_context == cpu_context)
+       list_for_each_entry(i, &pmus, entry) {
+               if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
+                       update_pmu_context(i, pmu);
                        goto out;
+               }
        }
 
-       free_percpu(cpu_context);
+       free_percpu(pmu->pmu_cpu_context);
 out:
        mutex_unlock(&pmus_lock);
 }
+static struct idr pmu_idr;
+
+static ssize_t
+type_show(struct device *dev, struct device_attribute *attr, char *page)
+{
+       struct pmu *pmu = dev_get_drvdata(dev);
+
+       return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
+}
+
+static struct device_attribute pmu_dev_attrs[] = {
+       __ATTR_RO(type),
+       __ATTR_NULL,
+};
+
+static int pmu_bus_running;
+static struct bus_type pmu_bus = {
+       .name           = "event_source",
+       .dev_attrs      = pmu_dev_attrs,
+};
+
+static void pmu_dev_release(struct device *dev)
+{
+       kfree(dev);
+}
+
+static int pmu_dev_alloc(struct pmu *pmu)
+{
+       int ret = -ENOMEM;
+
+       pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+       if (!pmu->dev)
+               goto out;
+
+       device_initialize(pmu->dev);
+       ret = dev_set_name(pmu->dev, "%s", pmu->name);
+       if (ret)
+               goto free_dev;
+
+       dev_set_drvdata(pmu->dev, pmu);
+       pmu->dev->bus = &pmu_bus;
+       pmu->dev->release = pmu_dev_release;
+       ret = device_add(pmu->dev);
+       if (ret)
+               goto free_dev;
+
+out:
+       return ret;
+
+free_dev:
+       put_device(pmu->dev);
+       goto out;
+}
 
-int perf_pmu_register(struct pmu *pmu)
+int perf_pmu_register(struct pmu *pmu, char *name, int type)
 {
        int cpu, ret;
 
@@ -5293,13 +5384,38 @@ int perf_pmu_register(struct pmu *pmu)
        if (!pmu->pmu_disable_count)
                goto unlock;
 
+       pmu->type = -1;
+       if (!name)
+               goto skip_type;
+       pmu->name = name;
+
+       if (type < 0) {
+               int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
+               if (!err)
+                       goto free_pdc;
+
+               err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
+               if (err) {
+                       ret = err;
+                       goto free_pdc;
+               }
+       }
+       pmu->type = type;
+
+       if (pmu_bus_running) {
+               ret = pmu_dev_alloc(pmu);
+               if (ret)
+                       goto free_idr;
+       }
+
+skip_type:
        pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
        if (pmu->pmu_cpu_context)
                goto got_cpu_context;
 
        pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
        if (!pmu->pmu_cpu_context)
-               goto free_pdc;
+               goto free_dev;
 
        for_each_possible_cpu(cpu) {
                struct perf_cpu_context *cpuctx;
@@ -5310,6 +5426,7 @@ int perf_pmu_register(struct pmu *pmu)
                cpuctx->ctx.pmu = pmu;
                cpuctx->jiffies_interval = 1;
                INIT_LIST_HEAD(&cpuctx->rotation_list);
+               cpuctx->active_pmu = pmu;
        }
 
 got_cpu_context:
@@ -5342,6 +5459,14 @@ unlock:
 
        return ret;
 
+free_dev:
+       device_del(pmu->dev);
+       put_device(pmu->dev);
+
+free_idr:
+       if (pmu->type >= PERF_TYPE_MAX)
+               idr_remove(&pmu_idr, pmu->type);
+
 free_pdc:
        free_percpu(pmu->pmu_disable_count);
        goto unlock;
@@ -5361,7 +5486,11 @@ void perf_pmu_unregister(struct pmu *pmu)
        synchronize_rcu();
 
        free_percpu(pmu->pmu_disable_count);
-       free_pmu_context(pmu->pmu_cpu_context);
+       if (pmu->type >= PERF_TYPE_MAX)
+               idr_remove(&pmu_idr, pmu->type);
+       device_del(pmu->dev);
+       put_device(pmu->dev);
+       free_pmu_context(pmu);
 }
 
 struct pmu *perf_init_event(struct perf_event *event)
@@ -5370,6 +5499,13 @@ struct pmu *perf_init_event(struct perf_event *event)
        int idx;
 
        idx = srcu_read_lock(&pmus_srcu);
+
+       rcu_read_lock();
+       pmu = idr_find(&pmu_idr, event->attr.type);
+       rcu_read_unlock();
+       if (pmu)
+               goto unlock;
+
        list_for_each_entry_rcu(pmu, &pmus, entry) {
                int ret = pmu->event_init(event);
                if (!ret)
@@ -5402,6 +5538,11 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
        struct hw_perf_event *hwc;
        long err;
 
+       if ((unsigned)cpu >= nr_cpu_ids) {
+               if (!task || cpu != -1)
+                       return ERR_PTR(-EINVAL);
+       }
+
        event = kzalloc(sizeof(*event), GFP_KERNEL);
        if (!event)
                return ERR_PTR(-ENOMEM);
@@ -5450,7 +5591,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
 
        if (!overflow_handler && parent_event)
                overflow_handler = parent_event->overflow_handler;
-       
+
        event->overflow_handler = overflow_handler;
 
        if (attr->disabled)
@@ -6355,7 +6496,6 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
 
        raw_spin_lock_irqsave(&parent_ctx->lock, flags);
        parent_ctx->rotate_disable = 0;
-       raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
 
        child_ctx = child->perf_event_ctxp[ctxn];
 
@@ -6363,12 +6503,11 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
                /*
                 * Mark the child context as a clone of the parent
                 * context, or of whatever the parent is a clone of.
-                * Note that if the parent is a clone, it could get
-                * uncloned at any point, but that doesn't matter
-                * because the list of events and the generation
-                * count can't have changed since we took the mutex.
+                *
+                * Note that if the parent is a clone, the holding of
+                * parent_ctx->lock avoids it from being uncloned.
                 */
-               cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
+               cloned_ctx = parent_ctx->parent_ctx;
                if (cloned_ctx) {
                        child_ctx->parent_ctx = cloned_ctx;
                        child_ctx->parent_gen = parent_ctx->parent_gen;
@@ -6379,6 +6518,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
                get_ctx(child_ctx->parent_ctx);
        }
 
+       raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
        mutex_unlock(&parent_ctx->mutex);
 
        perf_unpin_context(parent_ctx);
@@ -6429,7 +6569,7 @@ static void __cpuinit perf_event_init_cpu(int cpu)
        mutex_unlock(&swhash->hlist_mutex);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
+#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
 static void perf_pmu_rotate_stop(struct pmu *pmu)
 {
        struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
@@ -6483,6 +6623,26 @@ static void perf_event_exit_cpu(int cpu)
 static inline void perf_event_exit_cpu(int cpu) { }
 #endif
 
+static int
+perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
+{
+       int cpu;
+
+       for_each_online_cpu(cpu)
+               perf_event_exit_cpu(cpu);
+
+       return NOTIFY_OK;
+}
+
+/*
+ * Run the perf reboot notifier at the very last possible moment so that
+ * the generic watchdog code runs as long as possible.
+ */
+static struct notifier_block perf_reboot_notifier = {
+       .notifier_call = perf_reboot,
+       .priority = INT_MIN,
+};
+
 static int __cpuinit
 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
 {
@@ -6511,14 +6671,45 @@ void __init perf_event_init(void)
 {
        int ret;
 
+       idr_init(&pmu_idr);
+
        perf_event_init_all_cpus();
        init_srcu_struct(&pmus_srcu);
-       perf_pmu_register(&perf_swevent);
-       perf_pmu_register(&perf_cpu_clock);
-       perf_pmu_register(&perf_task_clock);
+       perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
+       perf_pmu_register(&perf_cpu_clock, NULL, -1);
+       perf_pmu_register(&perf_task_clock, NULL, -1);
        perf_tp_register();
        perf_cpu_notifier(perf_cpu_notify);
+       register_reboot_notifier(&perf_reboot_notifier);
 
        ret = init_hw_breakpoint();
        WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
 }
+
+static int __init perf_event_sysfs_init(void)
+{
+       struct pmu *pmu;
+       int ret;
+
+       mutex_lock(&pmus_lock);
+
+       ret = bus_register(&pmu_bus);
+       if (ret)
+               goto unlock;
+
+       list_for_each_entry(pmu, &pmus, entry) {
+               if (!pmu->name || pmu->type < 0)
+                       continue;
+
+               ret = pmu_dev_alloc(pmu);
+               WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
+       }
+       pmu_bus_running = 1;
+       ret = 0;
+
+unlock:
+       mutex_unlock(&pmus_lock);
+
+       return ret;
+}
+device_initcall(perf_event_sysfs_init);