perf_events: Optimize perf_event_task_tick()
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Thu, 28 Jan 2010 12:57:44 +0000 (13:57 +0100)
committerIngo Molnar <mingo@elte.hu>
Thu, 4 Feb 2010 08:59:49 +0000 (09:59 +0100)
Pretty much all of the calls do perf_disable/perf_enable cycles, pull
that out to cut back on hardware programming.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/perf_event.c

index 40f8b07..087025f 100644 (file)
@@ -1573,12 +1573,8 @@ static void rotate_ctx(struct perf_event_context *ctx)
        raw_spin_lock(&ctx->lock);
 
        /* Rotate the first entry last of non-pinned groups */
-       perf_disable();
-
        list_rotate_left(&ctx->flexible_groups);
 
-       perf_enable();
-
        raw_spin_unlock(&ctx->lock);
 }
 
@@ -1593,6 +1589,8 @@ void perf_event_task_tick(struct task_struct *curr)
        cpuctx = &__get_cpu_var(perf_cpu_context);
        ctx = curr->perf_event_ctxp;
 
+       perf_disable();
+
        perf_ctx_adjust_freq(&cpuctx->ctx);
        if (ctx)
                perf_ctx_adjust_freq(ctx);
@@ -1608,6 +1606,8 @@ void perf_event_task_tick(struct task_struct *curr)
        cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
        if (ctx)
                task_ctx_sched_in(curr, EVENT_FLEXIBLE);
+
+       perf_enable();
 }
 
 static int event_enable_on_exec(struct perf_event *event,