perf: Collect the schedule-in rules in one function
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Sat, 9 Apr 2011 19:17:46 +0000 (21:17 +0200)
committerIngo Molnar <mingo@elte.hu>
Sat, 28 May 2011 16:01:19 +0000 (18:01 +0200)
This was scattered out - refactor it into a single function.
No change in functionality.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20110409192141.979862055@chello.nl
Signed-off-by: Ingo Molnar <mingo@elte.hu>
kernel/events/core.c

index 71c2d44..802f3b2 100644 (file)
@@ -1476,6 +1476,18 @@ ctx_sched_in(struct perf_event_context *ctx,
             enum event_type_t event_type,
             struct task_struct *task);
 
+static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
+                               struct perf_event_context *ctx,
+                               struct task_struct *task)
+{
+       cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
+       if (ctx)
+               ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
+       cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
+       if (ctx)
+               ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
+}
+
 /*
  * Cross CPU call to install and enable a performance event
  *
@@ -1523,12 +1535,7 @@ static int  __perf_install_in_context(void *info)
        /*
         * Schedule everything back in
         */
-       cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
-       if (task_ctx)
-               ctx_sched_in(task_ctx, cpuctx, EVENT_PINNED, task);
-       cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
-       if (task_ctx)
-               ctx_sched_in(task_ctx, cpuctx, EVENT_FLEXIBLE, task);
+       perf_event_sched_in(cpuctx, task_ctx, task);
 
        perf_pmu_enable(cpuctx->ctx.pmu);
        perf_ctx_unlock(cpuctx, task_ctx);
@@ -2107,9 +2114,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
         */
        cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
 
-       ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
-       cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
-       ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
+       perf_event_sched_in(cpuctx, ctx, task);
 
        cpuctx->task_ctx = ctx;
 
@@ -2347,9 +2352,7 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
        if (ctx)
                rotate_ctx(ctx);
 
-       cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current);
-       if (ctx)
-               ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, current);
+       perf_event_sched_in(cpuctx, ctx, current);
 
 done:
        if (remove)