watchdog: Improve initialisation error message and documentation
[pandora-kernel.git] / kernel / perf_event.c
index cb6c0d2..2870fee 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/perf_event.h>
 #include <linux/ftrace_event.h>
+#include <linux/hw_breakpoint.h>
 
 #include <asm/irq_regs.h>
 
@@ -1286,8 +1287,6 @@ void __perf_event_task_sched_out(struct task_struct *task,
 {
        int ctxn;
 
-       perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
-
        for_each_task_context_nr(ctxn)
                perf_event_context_sched_out(task, ctxn, next);
 }
@@ -1621,8 +1620,12 @@ static void rotate_ctx(struct perf_event_context *ctx)
 {
        raw_spin_lock(&ctx->lock);
 
-       /* Rotate the first entry last of non-pinned groups */
-       list_rotate_left(&ctx->flexible_groups);
+       /*
+        * Rotate the first entry last of non-pinned groups. Rotation might be
+        * disabled by the inheritance code.
+        */
+       if (!ctx->rotate_disable)
+               list_rotate_left(&ctx->flexible_groups);
 
        raw_spin_unlock(&ctx->lock);
 }
@@ -2234,11 +2237,6 @@ int perf_event_release_kernel(struct perf_event *event)
        raw_spin_unlock_irq(&ctx->lock);
        mutex_unlock(&ctx->mutex);
 
-       mutex_lock(&event->owner->perf_event_mutex);
-       list_del_init(&event->owner_entry);
-       mutex_unlock(&event->owner->perf_event_mutex);
-       put_task_struct(event->owner);
-
        free_event(event);
 
        return 0;
@@ -2251,9 +2249,43 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
 static int perf_release(struct inode *inode, struct file *file)
 {
        struct perf_event *event = file->private_data;
+       struct task_struct *owner;
 
        file->private_data = NULL;
 
+       rcu_read_lock();
+       owner = ACCESS_ONCE(event->owner);
+       /*
+        * Matches the smp_wmb() in perf_event_exit_task(). If we observe
+        * !owner it means the list deletion is complete and we can indeed
+        * free this event, otherwise we need to serialize on
+        * owner->perf_event_mutex.
+        */
+       smp_read_barrier_depends();
+       if (owner) {
+               /*
+                * Since delayed_put_task_struct() also drops the last
+                * task reference we can safely take a new reference
+                * while holding the rcu_read_lock().
+                */
+               get_task_struct(owner);
+       }
+       rcu_read_unlock();
+
+       if (owner) {
+               mutex_lock(&owner->perf_event_mutex);
+               /*
+                * We have to re-check the event->owner field, if it is cleared
+                * we raced with perf_event_exit_task(), acquiring the mutex
+                * ensured they're done, and we can proceed with freeing the
+                * event.
+                */
+               if (event->owner)
+                       list_del_init(&event->owner_entry);
+               mutex_unlock(&owner->perf_event_mutex);
+               put_task_struct(owner);
+       }
+
        return perf_event_release_kernel(event);
 }
 
@@ -3792,6 +3824,8 @@ static void perf_event_task_event(struct perf_task_event *task_event)
        rcu_read_lock();
        list_for_each_entry_rcu(pmu, &pmus, entry) {
                cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               if (cpuctx->active_pmu != pmu)
+                       goto next;
                perf_event_task_ctx(&cpuctx->ctx, task_event);
 
                ctx = task_event->task_ctx;
@@ -3927,6 +3961,8 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
        rcu_read_lock();
        list_for_each_entry_rcu(pmu, &pmus, entry) {
                cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               if (cpuctx->active_pmu != pmu)
+                       goto next;
                perf_event_comm_ctx(&cpuctx->ctx, comm_event);
 
                ctxn = pmu->task_ctx_nr;
@@ -4112,6 +4148,8 @@ got_name:
        rcu_read_lock();
        list_for_each_entry_rcu(pmu, &pmus, entry) {
                cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+               if (cpuctx->active_pmu != pmu)
+                       goto next;
                perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
                                        vma->vm_flags & VM_EXEC);
 
@@ -4681,7 +4719,7 @@ static int perf_swevent_init(struct perf_event *event)
                break;
        }
 
-       if (event_id > PERF_COUNT_SW_MAX)
+       if (event_id >= PERF_COUNT_SW_MAX)
                return -ENOENT;
 
        if (!event->parent) {
@@ -5113,20 +5151,36 @@ static void *find_pmu_context(int ctxn)
        return NULL;
 }
 
-static void free_pmu_context(void * __percpu cpu_context)
+static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
 {
-       struct pmu *pmu;
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               struct perf_cpu_context *cpuctx;
+
+               cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+
+               if (cpuctx->active_pmu == old_pmu)
+                       cpuctx->active_pmu = pmu;
+       }
+}
+
+static void free_pmu_context(struct pmu *pmu)
+{
+       struct pmu *i;
 
        mutex_lock(&pmus_lock);
        /*
         * Like a real lame refcount.
         */
-       list_for_each_entry(pmu, &pmus, entry) {
-               if (pmu->pmu_cpu_context == cpu_context)
+       list_for_each_entry(i, &pmus, entry) {
+               if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
+                       update_pmu_context(i, pmu);
                        goto out;
+               }
        }
 
-       free_percpu(cpu_context);
+       free_percpu(pmu->pmu_cpu_context);
 out:
        mutex_unlock(&pmus_lock);
 }
@@ -5158,6 +5212,7 @@ int perf_pmu_register(struct pmu *pmu)
                cpuctx->ctx.pmu = pmu;
                cpuctx->jiffies_interval = 1;
                INIT_LIST_HEAD(&cpuctx->rotation_list);
+               cpuctx->active_pmu = pmu;
        }
 
 got_cpu_context:
@@ -5209,7 +5264,7 @@ void perf_pmu_unregister(struct pmu *pmu)
        synchronize_rcu();
 
        free_percpu(pmu->pmu_disable_count);
-       free_pmu_context(pmu->pmu_cpu_context);
+       free_pmu_context(pmu);
 }
 
 struct pmu *perf_init_event(struct perf_event *event)
@@ -5677,7 +5732,7 @@ SYSCALL_DEFINE5(perf_event_open,
        mutex_unlock(&ctx->mutex);
 
        event->owner = current;
-       get_task_struct(current);
+
        mutex_lock(&current->perf_event_mutex);
        list_add_tail(&event->owner_entry, &current->perf_event_list);
        mutex_unlock(&current->perf_event_mutex);
@@ -5745,12 +5800,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
        ++ctx->generation;
        mutex_unlock(&ctx->mutex);
 
-       event->owner = current;
-       get_task_struct(current);
-       mutex_lock(&current->perf_event_mutex);
-       list_add_tail(&event->owner_entry, &current->perf_event_list);
-       mutex_unlock(&current->perf_event_mutex);
-
        return event;
 
 err_free:
@@ -5901,8 +5950,24 @@ again:
  */
 void perf_event_exit_task(struct task_struct *child)
 {
+       struct perf_event *event, *tmp;
        int ctxn;
 
+       mutex_lock(&child->perf_event_mutex);
+       list_for_each_entry_safe(event, tmp, &child->perf_event_list,
+                                owner_entry) {
+               list_del_init(&event->owner_entry);
+
+               /*
+                * Ensure the list deletion is visible before we clear
+                * the owner, closes a race against perf_release() where
+                * we need to serialize on the owner->perf_event_mutex.
+                */
+               smp_wmb();
+               event->owner = NULL;
+       }
+       mutex_unlock(&child->perf_event_mutex);
+
        for_each_task_context_nr(ctxn)
                perf_event_exit_task_context(child, ctxn);
 }
@@ -6122,6 +6187,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
        struct perf_event *event;
        struct task_struct *parent = current;
        int inherited_all = 1;
+       unsigned long flags;
        int ret = 0;
 
        child->perf_event_ctxp[ctxn] = NULL;
@@ -6162,6 +6228,15 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
                        break;
        }
 
+       /*
+        * We can't hold ctx->lock when iterating the ->flexible_group list due
+        * to allocations, but we need to prevent rotation because
+        * rotate_ctx() will change the list from interrupt context.
+        */
+       raw_spin_lock_irqsave(&parent_ctx->lock, flags);
+       parent_ctx->rotate_disable = 1;
+       raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
+
        list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
                ret = inherit_task_group(event, parent, parent_ctx,
                                         child, ctxn, &inherited_all);
@@ -6169,6 +6244,10 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
                        break;
        }
 
+       raw_spin_lock_irqsave(&parent_ctx->lock, flags);
+       parent_ctx->rotate_disable = 0;
+       raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
+
        child_ctx = child->perf_event_ctxp[ctxn];
 
        if (child_ctx && inherited_all) {
@@ -6321,6 +6400,8 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
 
 void __init perf_event_init(void)
 {
+       int ret;
+
        perf_event_init_all_cpus();
        init_srcu_struct(&pmus_srcu);
        perf_pmu_register(&perf_swevent);
@@ -6328,4 +6409,7 @@ void __init perf_event_init(void)
        perf_pmu_register(&perf_task_clock);
        perf_tp_register();
        perf_cpu_notifier(perf_cpu_notify);
+
+       ret = init_hw_breakpoint();
+       WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
 }