2 * Performance counter core code
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/sysfs.h>
19 #include <linux/dcache.h>
20 #include <linux/percpu.h>
21 #include <linux/ptrace.h>
22 #include <linux/vmstat.h>
23 #include <linux/hardirq.h>
24 #include <linux/rculist.h>
25 #include <linux/uaccess.h>
26 #include <linux/syscalls.h>
27 #include <linux/anon_inodes.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/perf_counter.h>
31 #include <asm/irq_regs.h>
34 * Each CPU has a list of per CPU counters:
36 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
38 int perf_max_counters __read_mostly = 1;
39 static int perf_reserved_percpu __read_mostly;
40 static int perf_overcommit __read_mostly = 1;
42 static atomic_t nr_counters __read_mostly;
43 static atomic_t nr_mmap_counters __read_mostly;
44 static atomic_t nr_comm_counters __read_mostly;
46 int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
47 int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
48 int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
50 static atomic64_t perf_counter_id;
53 * Lock for (sysadmin-configurable) counter reservations:
55 static DEFINE_SPINLOCK(perf_resource_lock);
58 * Architecture provided APIs - weak aliases:
60 extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
65 void __weak hw_perf_disable(void) { barrier(); }
66 void __weak hw_perf_enable(void) { barrier(); }
68 void __weak hw_perf_counter_setup(int cpu) { barrier(); }
71 hw_perf_group_sched_in(struct perf_counter *group_leader,
72 struct perf_cpu_context *cpuctx,
73 struct perf_counter_context *ctx, int cpu)
78 void __weak perf_counter_print_debug(void) { }
80 static DEFINE_PER_CPU(int, disable_count);
82 void __perf_disable(void)
84 __get_cpu_var(disable_count)++;
87 bool __perf_enable(void)
89 return !--__get_cpu_var(disable_count);
92 void perf_disable(void)
98 void perf_enable(void)
104 static void get_ctx(struct perf_counter_context *ctx)
106 atomic_inc(&ctx->refcount);
109 static void free_ctx(struct rcu_head *head)
111 struct perf_counter_context *ctx;
113 ctx = container_of(head, struct perf_counter_context, rcu_head);
117 static void put_ctx(struct perf_counter_context *ctx)
119 if (atomic_dec_and_test(&ctx->refcount)) {
121 put_ctx(ctx->parent_ctx);
123 put_task_struct(ctx->task);
124 call_rcu(&ctx->rcu_head, free_ctx);
129 * Get the perf_counter_context for a task and lock it.
130 * This has to cope with with the fact that until it is locked,
131 * the context could get moved to another task.
133 static struct perf_counter_context *
134 perf_lock_task_context(struct task_struct *task, unsigned long *flags)
136 struct perf_counter_context *ctx;
140 ctx = rcu_dereference(task->perf_counter_ctxp);
143 * If this context is a clone of another, it might
144 * get swapped for another underneath us by
145 * perf_counter_task_sched_out, though the
146 * rcu_read_lock() protects us from any context
147 * getting freed. Lock the context and check if it
148 * got swapped before we could get the lock, and retry
149 * if so. If we locked the right context, then it
150 * can't get swapped on us any more.
152 spin_lock_irqsave(&ctx->lock, *flags);
153 if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
154 spin_unlock_irqrestore(&ctx->lock, *flags);
163 * Get the context for a task and increment its pin_count so it
164 * can't get swapped to another task. This also increments its
165 * reference count so that the context can't get freed.
167 static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
169 struct perf_counter_context *ctx;
172 ctx = perf_lock_task_context(task, &flags);
176 spin_unlock_irqrestore(&ctx->lock, flags);
181 static void perf_unpin_context(struct perf_counter_context *ctx)
185 spin_lock_irqsave(&ctx->lock, flags);
187 spin_unlock_irqrestore(&ctx->lock, flags);
192 * Add a counter from the lists for its context.
193 * Must be called with ctx->mutex and ctx->lock held.
196 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
198 struct perf_counter *group_leader = counter->group_leader;
201 * Depending on whether it is a standalone or sibling counter,
202 * add it straight to the context's counter list, or to the group
203 * leader's sibling list:
205 if (group_leader == counter)
206 list_add_tail(&counter->list_entry, &ctx->counter_list);
208 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
209 group_leader->nr_siblings++;
212 list_add_rcu(&counter->event_entry, &ctx->event_list);
217 * Remove a counter from the lists for its context.
218 * Must be called with ctx->mutex and ctx->lock held.
221 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
223 struct perf_counter *sibling, *tmp;
225 if (list_empty(&counter->list_entry))
229 list_del_init(&counter->list_entry);
230 list_del_rcu(&counter->event_entry);
232 if (counter->group_leader != counter)
233 counter->group_leader->nr_siblings--;
236 * If this was a group counter with sibling counters then
237 * upgrade the siblings to singleton counters by adding them
238 * to the context list directly:
240 list_for_each_entry_safe(sibling, tmp,
241 &counter->sibling_list, list_entry) {
243 list_move_tail(&sibling->list_entry, &ctx->counter_list);
244 sibling->group_leader = sibling;
249 counter_sched_out(struct perf_counter *counter,
250 struct perf_cpu_context *cpuctx,
251 struct perf_counter_context *ctx)
253 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
256 counter->state = PERF_COUNTER_STATE_INACTIVE;
257 counter->tstamp_stopped = ctx->time;
258 counter->pmu->disable(counter);
261 if (!is_software_counter(counter))
262 cpuctx->active_oncpu--;
264 if (counter->attr.exclusive || !cpuctx->active_oncpu)
265 cpuctx->exclusive = 0;
269 group_sched_out(struct perf_counter *group_counter,
270 struct perf_cpu_context *cpuctx,
271 struct perf_counter_context *ctx)
273 struct perf_counter *counter;
275 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
278 counter_sched_out(group_counter, cpuctx, ctx);
281 * Schedule out siblings (if any):
283 list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
284 counter_sched_out(counter, cpuctx, ctx);
286 if (group_counter->attr.exclusive)
287 cpuctx->exclusive = 0;
291 * Cross CPU call to remove a performance counter
293 * We disable the counter on the hardware level first. After that we
294 * remove it from the context list.
296 static void __perf_counter_remove_from_context(void *info)
298 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
299 struct perf_counter *counter = info;
300 struct perf_counter_context *ctx = counter->ctx;
303 * If this is a task context, we need to check whether it is
304 * the current task context of this cpu. If not it has been
305 * scheduled out before the smp call arrived.
307 if (ctx->task && cpuctx->task_ctx != ctx)
310 spin_lock(&ctx->lock);
312 * Protect the list operation against NMI by disabling the
313 * counters on a global level.
317 counter_sched_out(counter, cpuctx, ctx);
319 list_del_counter(counter, ctx);
323 * Allow more per task counters with respect to the
326 cpuctx->max_pertask =
327 min(perf_max_counters - ctx->nr_counters,
328 perf_max_counters - perf_reserved_percpu);
332 spin_unlock(&ctx->lock);
337 * Remove the counter from a task's (or a CPU's) list of counters.
339 * Must be called with ctx->mutex held.
341 * CPU counters are removed with a smp call. For task counters we only
342 * call when the task is on a CPU.
344 * If counter->ctx is a cloned context, callers must make sure that
345 * every task struct that counter->ctx->task could possibly point to
346 * remains valid. This is OK when called from perf_release since
347 * that only calls us on the top-level context, which can't be a clone.
348 * When called from perf_counter_exit_task, it's OK because the
349 * context has been detached from its task.
351 static void perf_counter_remove_from_context(struct perf_counter *counter)
353 struct perf_counter_context *ctx = counter->ctx;
354 struct task_struct *task = ctx->task;
358 * Per cpu counters are removed via an smp call and
359 * the removal is always sucessful.
361 smp_call_function_single(counter->cpu,
362 __perf_counter_remove_from_context,
368 task_oncpu_function_call(task, __perf_counter_remove_from_context,
371 spin_lock_irq(&ctx->lock);
373 * If the context is active we need to retry the smp call.
375 if (ctx->nr_active && !list_empty(&counter->list_entry)) {
376 spin_unlock_irq(&ctx->lock);
381 * The lock prevents that this context is scheduled in so we
382 * can remove the counter safely, if the call above did not
385 if (!list_empty(&counter->list_entry)) {
386 list_del_counter(counter, ctx);
388 spin_unlock_irq(&ctx->lock);
391 static inline u64 perf_clock(void)
393 return cpu_clock(smp_processor_id());
397 * Update the record of the current time in a context.
399 static void update_context_time(struct perf_counter_context *ctx)
401 u64 now = perf_clock();
403 ctx->time += now - ctx->timestamp;
404 ctx->timestamp = now;
408 * Update the total_time_enabled and total_time_running fields for a counter.
410 static void update_counter_times(struct perf_counter *counter)
412 struct perf_counter_context *ctx = counter->ctx;
415 if (counter->state < PERF_COUNTER_STATE_INACTIVE)
418 counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
420 if (counter->state == PERF_COUNTER_STATE_INACTIVE)
421 run_end = counter->tstamp_stopped;
425 counter->total_time_running = run_end - counter->tstamp_running;
429 * Update total_time_enabled and total_time_running for all counters in a group.
431 static void update_group_times(struct perf_counter *leader)
433 struct perf_counter *counter;
435 update_counter_times(leader);
436 list_for_each_entry(counter, &leader->sibling_list, list_entry)
437 update_counter_times(counter);
441 * Cross CPU call to disable a performance counter
443 static void __perf_counter_disable(void *info)
445 struct perf_counter *counter = info;
446 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
447 struct perf_counter_context *ctx = counter->ctx;
450 * If this is a per-task counter, need to check whether this
451 * counter's task is the current task on this cpu.
453 if (ctx->task && cpuctx->task_ctx != ctx)
456 spin_lock(&ctx->lock);
459 * If the counter is on, turn it off.
460 * If it is in error state, leave it in error state.
462 if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
463 update_context_time(ctx);
464 update_counter_times(counter);
465 if (counter == counter->group_leader)
466 group_sched_out(counter, cpuctx, ctx);
468 counter_sched_out(counter, cpuctx, ctx);
469 counter->state = PERF_COUNTER_STATE_OFF;
472 spin_unlock(&ctx->lock);
478 * If counter->ctx is a cloned context, callers must make sure that
479 * every task struct that counter->ctx->task could possibly point to
480 * remains valid. This condition is satisifed when called through
481 * perf_counter_for_each_child or perf_counter_for_each because they
482 * hold the top-level counter's child_mutex, so any descendant that
483 * goes to exit will block in sync_child_counter.
484 * When called from perf_pending_counter it's OK because counter->ctx
485 * is the current context on this CPU and preemption is disabled,
486 * hence we can't get into perf_counter_task_sched_out for this context.
488 static void perf_counter_disable(struct perf_counter *counter)
490 struct perf_counter_context *ctx = counter->ctx;
491 struct task_struct *task = ctx->task;
495 * Disable the counter on the cpu that it's on
497 smp_call_function_single(counter->cpu, __perf_counter_disable,
503 task_oncpu_function_call(task, __perf_counter_disable, counter);
505 spin_lock_irq(&ctx->lock);
507 * If the counter is still active, we need to retry the cross-call.
509 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
510 spin_unlock_irq(&ctx->lock);
515 * Since we have the lock this context can't be scheduled
516 * in, so we can change the state safely.
518 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
519 update_counter_times(counter);
520 counter->state = PERF_COUNTER_STATE_OFF;
523 spin_unlock_irq(&ctx->lock);
527 counter_sched_in(struct perf_counter *counter,
528 struct perf_cpu_context *cpuctx,
529 struct perf_counter_context *ctx,
532 if (counter->state <= PERF_COUNTER_STATE_OFF)
535 counter->state = PERF_COUNTER_STATE_ACTIVE;
536 counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
538 * The new state must be visible before we turn it on in the hardware:
542 if (counter->pmu->enable(counter)) {
543 counter->state = PERF_COUNTER_STATE_INACTIVE;
548 counter->tstamp_running += ctx->time - counter->tstamp_stopped;
550 if (!is_software_counter(counter))
551 cpuctx->active_oncpu++;
554 if (counter->attr.exclusive)
555 cpuctx->exclusive = 1;
561 group_sched_in(struct perf_counter *group_counter,
562 struct perf_cpu_context *cpuctx,
563 struct perf_counter_context *ctx,
566 struct perf_counter *counter, *partial_group;
569 if (group_counter->state == PERF_COUNTER_STATE_OFF)
572 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
574 return ret < 0 ? ret : 0;
576 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
580 * Schedule in siblings as one group (if any):
582 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
583 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
584 partial_group = counter;
593 * Groups can be scheduled in as one unit only, so undo any
594 * partial group before returning:
596 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
597 if (counter == partial_group)
599 counter_sched_out(counter, cpuctx, ctx);
601 counter_sched_out(group_counter, cpuctx, ctx);
607 * Return 1 for a group consisting entirely of software counters,
608 * 0 if the group contains any hardware counters.
610 static int is_software_only_group(struct perf_counter *leader)
612 struct perf_counter *counter;
614 if (!is_software_counter(leader))
617 list_for_each_entry(counter, &leader->sibling_list, list_entry)
618 if (!is_software_counter(counter))
625 * Work out whether we can put this counter group on the CPU now.
627 static int group_can_go_on(struct perf_counter *counter,
628 struct perf_cpu_context *cpuctx,
632 * Groups consisting entirely of software counters can always go on.
634 if (is_software_only_group(counter))
637 * If an exclusive group is already on, no other hardware
638 * counters can go on.
640 if (cpuctx->exclusive)
643 * If this group is exclusive and there are already
644 * counters on the CPU, it can't go on.
646 if (counter->attr.exclusive && cpuctx->active_oncpu)
649 * Otherwise, try to add it if all previous groups were able
655 static void add_counter_to_ctx(struct perf_counter *counter,
656 struct perf_counter_context *ctx)
658 list_add_counter(counter, ctx);
659 counter->tstamp_enabled = ctx->time;
660 counter->tstamp_running = ctx->time;
661 counter->tstamp_stopped = ctx->time;
665 * Cross CPU call to install and enable a performance counter
667 * Must be called with ctx->mutex held
669 static void __perf_install_in_context(void *info)
671 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
672 struct perf_counter *counter = info;
673 struct perf_counter_context *ctx = counter->ctx;
674 struct perf_counter *leader = counter->group_leader;
675 int cpu = smp_processor_id();
679 * If this is a task context, we need to check whether it is
680 * the current task context of this cpu. If not it has been
681 * scheduled out before the smp call arrived.
682 * Or possibly this is the right context but it isn't
683 * on this cpu because it had no counters.
685 if (ctx->task && cpuctx->task_ctx != ctx) {
686 if (cpuctx->task_ctx || ctx->task != current)
688 cpuctx->task_ctx = ctx;
691 spin_lock(&ctx->lock);
693 update_context_time(ctx);
696 * Protect the list operation against NMI by disabling the
697 * counters on a global level. NOP for non NMI based counters.
701 add_counter_to_ctx(counter, ctx);
704 * Don't put the counter on if it is disabled or if
705 * it is in a group and the group isn't on.
707 if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
708 (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
712 * An exclusive counter can't go on if there are already active
713 * hardware counters, and no hardware counter can go on if there
714 * is already an exclusive counter on.
716 if (!group_can_go_on(counter, cpuctx, 1))
719 err = counter_sched_in(counter, cpuctx, ctx, cpu);
723 * This counter couldn't go on. If it is in a group
724 * then we have to pull the whole group off.
725 * If the counter group is pinned then put it in error state.
727 if (leader != counter)
728 group_sched_out(leader, cpuctx, ctx);
729 if (leader->attr.pinned) {
730 update_group_times(leader);
731 leader->state = PERF_COUNTER_STATE_ERROR;
735 if (!err && !ctx->task && cpuctx->max_pertask)
736 cpuctx->max_pertask--;
741 spin_unlock(&ctx->lock);
745 * Attach a performance counter to a context
747 * First we add the counter to the list with the hardware enable bit
748 * in counter->hw_config cleared.
750 * If the counter is attached to a task which is on a CPU we use a smp
751 * call to enable it in the task context. The task might have been
752 * scheduled away, but we check this in the smp call again.
754 * Must be called with ctx->mutex held.
757 perf_install_in_context(struct perf_counter_context *ctx,
758 struct perf_counter *counter,
761 struct task_struct *task = ctx->task;
765 * Per cpu counters are installed via an smp call and
766 * the install is always sucessful.
768 smp_call_function_single(cpu, __perf_install_in_context,
774 task_oncpu_function_call(task, __perf_install_in_context,
777 spin_lock_irq(&ctx->lock);
779 * we need to retry the smp call.
781 if (ctx->is_active && list_empty(&counter->list_entry)) {
782 spin_unlock_irq(&ctx->lock);
787 * The lock prevents that this context is scheduled in so we
788 * can add the counter safely, if it the call above did not
791 if (list_empty(&counter->list_entry))
792 add_counter_to_ctx(counter, ctx);
793 spin_unlock_irq(&ctx->lock);
797 * Cross CPU call to enable a performance counter
799 static void __perf_counter_enable(void *info)
801 struct perf_counter *counter = info;
802 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
803 struct perf_counter_context *ctx = counter->ctx;
804 struct perf_counter *leader = counter->group_leader;
808 * If this is a per-task counter, need to check whether this
809 * counter's task is the current task on this cpu.
811 if (ctx->task && cpuctx->task_ctx != ctx) {
812 if (cpuctx->task_ctx || ctx->task != current)
814 cpuctx->task_ctx = ctx;
817 spin_lock(&ctx->lock);
819 update_context_time(ctx);
821 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
823 counter->state = PERF_COUNTER_STATE_INACTIVE;
824 counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
827 * If the counter is in a group and isn't the group leader,
828 * then don't put it on unless the group is on.
830 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
833 if (!group_can_go_on(counter, cpuctx, 1)) {
837 if (counter == leader)
838 err = group_sched_in(counter, cpuctx, ctx,
841 err = counter_sched_in(counter, cpuctx, ctx,
848 * If this counter can't go on and it's part of a
849 * group, then the whole group has to come off.
851 if (leader != counter)
852 group_sched_out(leader, cpuctx, ctx);
853 if (leader->attr.pinned) {
854 update_group_times(leader);
855 leader->state = PERF_COUNTER_STATE_ERROR;
860 spin_unlock(&ctx->lock);
866 * If counter->ctx is a cloned context, callers must make sure that
867 * every task struct that counter->ctx->task could possibly point to
868 * remains valid. This condition is satisfied when called through
869 * perf_counter_for_each_child or perf_counter_for_each as described
870 * for perf_counter_disable.
872 static void perf_counter_enable(struct perf_counter *counter)
874 struct perf_counter_context *ctx = counter->ctx;
875 struct task_struct *task = ctx->task;
879 * Enable the counter on the cpu that it's on
881 smp_call_function_single(counter->cpu, __perf_counter_enable,
886 spin_lock_irq(&ctx->lock);
887 if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
891 * If the counter is in error state, clear that first.
892 * That way, if we see the counter in error state below, we
893 * know that it has gone back into error state, as distinct
894 * from the task having been scheduled away before the
895 * cross-call arrived.
897 if (counter->state == PERF_COUNTER_STATE_ERROR)
898 counter->state = PERF_COUNTER_STATE_OFF;
901 spin_unlock_irq(&ctx->lock);
902 task_oncpu_function_call(task, __perf_counter_enable, counter);
904 spin_lock_irq(&ctx->lock);
907 * If the context is active and the counter is still off,
908 * we need to retry the cross-call.
910 if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
914 * Since we have the lock this context can't be scheduled
915 * in, so we can change the state safely.
917 if (counter->state == PERF_COUNTER_STATE_OFF) {
918 counter->state = PERF_COUNTER_STATE_INACTIVE;
919 counter->tstamp_enabled =
920 ctx->time - counter->total_time_enabled;
923 spin_unlock_irq(&ctx->lock);
926 static int perf_counter_refresh(struct perf_counter *counter, int refresh)
929 * not supported on inherited counters
931 if (counter->attr.inherit)
934 atomic_add(refresh, &counter->event_limit);
935 perf_counter_enable(counter);
940 void __perf_counter_sched_out(struct perf_counter_context *ctx,
941 struct perf_cpu_context *cpuctx)
943 struct perf_counter *counter;
945 spin_lock(&ctx->lock);
947 if (likely(!ctx->nr_counters))
949 update_context_time(ctx);
952 if (ctx->nr_active) {
953 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
954 if (counter != counter->group_leader)
955 counter_sched_out(counter, cpuctx, ctx);
957 group_sched_out(counter, cpuctx, ctx);
962 spin_unlock(&ctx->lock);
966 * Test whether two contexts are equivalent, i.e. whether they
967 * have both been cloned from the same version of the same context
968 * and they both have the same number of enabled counters.
969 * If the number of enabled counters is the same, then the set
970 * of enabled counters should be the same, because these are both
971 * inherited contexts, therefore we can't access individual counters
972 * in them directly with an fd; we can only enable/disable all
973 * counters via prctl, or enable/disable all counters in a family
974 * via ioctl, which will have the same effect on both contexts.
976 static int context_equiv(struct perf_counter_context *ctx1,
977 struct perf_counter_context *ctx2)
979 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
980 && ctx1->parent_gen == ctx2->parent_gen
981 && !ctx1->pin_count && !ctx2->pin_count;
985 * Called from scheduler to remove the counters of the current task,
986 * with interrupts disabled.
988 * We stop each counter and update the counter value in counter->count.
990 * This does not protect us against NMI, but disable()
991 * sets the disabled bit in the control field of counter _before_
992 * accessing the counter control register. If a NMI hits, then it will
993 * not restart the counter.
995 void perf_counter_task_sched_out(struct task_struct *task,
996 struct task_struct *next, int cpu)
998 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
999 struct perf_counter_context *ctx = task->perf_counter_ctxp;
1000 struct perf_counter_context *next_ctx;
1001 struct perf_counter_context *parent;
1002 struct pt_regs *regs;
1005 regs = task_pt_regs(task);
1006 perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);
1008 if (likely(!ctx || !cpuctx->task_ctx))
1011 update_context_time(ctx);
1014 parent = rcu_dereference(ctx->parent_ctx);
1015 next_ctx = next->perf_counter_ctxp;
1016 if (parent && next_ctx &&
1017 rcu_dereference(next_ctx->parent_ctx) == parent) {
1019 * Looks like the two contexts are clones, so we might be
1020 * able to optimize the context switch. We lock both
1021 * contexts and check that they are clones under the
1022 * lock (including re-checking that neither has been
1023 * uncloned in the meantime). It doesn't matter which
1024 * order we take the locks because no other cpu could
1025 * be trying to lock both of these tasks.
1027 spin_lock(&ctx->lock);
1028 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1029 if (context_equiv(ctx, next_ctx)) {
1031 * XXX do we need a memory barrier of sorts
1032 * wrt to rcu_dereference() of perf_counter_ctxp
1034 task->perf_counter_ctxp = next_ctx;
1035 next->perf_counter_ctxp = ctx;
1037 next_ctx->task = task;
1040 spin_unlock(&next_ctx->lock);
1041 spin_unlock(&ctx->lock);
1046 __perf_counter_sched_out(ctx, cpuctx);
1047 cpuctx->task_ctx = NULL;
1052 * Called with IRQs disabled
1054 static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
1056 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1058 if (!cpuctx->task_ctx)
1061 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1064 __perf_counter_sched_out(ctx, cpuctx);
1065 cpuctx->task_ctx = NULL;
1069 * Called with IRQs disabled
1071 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
1073 __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
1077 __perf_counter_sched_in(struct perf_counter_context *ctx,
1078 struct perf_cpu_context *cpuctx, int cpu)
1080 struct perf_counter *counter;
1083 spin_lock(&ctx->lock);
1085 if (likely(!ctx->nr_counters))
1088 ctx->timestamp = perf_clock();
1093 * First go through the list and put on any pinned groups
1094 * in order to give them the best chance of going on.
1096 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1097 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1098 !counter->attr.pinned)
1100 if (counter->cpu != -1 && counter->cpu != cpu)
1103 if (counter != counter->group_leader)
1104 counter_sched_in(counter, cpuctx, ctx, cpu);
1106 if (group_can_go_on(counter, cpuctx, 1))
1107 group_sched_in(counter, cpuctx, ctx, cpu);
1111 * If this pinned group hasn't been scheduled,
1112 * put it in error state.
1114 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1115 update_group_times(counter);
1116 counter->state = PERF_COUNTER_STATE_ERROR;
1120 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1122 * Ignore counters in OFF or ERROR state, and
1123 * ignore pinned counters since we did them already.
1125 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1126 counter->attr.pinned)
1130 * Listen to the 'cpu' scheduling filter constraint
1133 if (counter->cpu != -1 && counter->cpu != cpu)
1136 if (counter != counter->group_leader) {
1137 if (counter_sched_in(counter, cpuctx, ctx, cpu))
1140 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
1141 if (group_sched_in(counter, cpuctx, ctx, cpu))
1148 spin_unlock(&ctx->lock);
1152 * Called from scheduler to add the counters of the current task
1153 * with interrupts disabled.
1155 * We restore the counter value and then enable it.
1157 * This does not protect us against NMI, but enable()
1158 * sets the enabled bit in the control field of counter _before_
1159 * accessing the counter control register. If a NMI hits, then it will
1160 * keep the counter running.
1162 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
1164 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1165 struct perf_counter_context *ctx = task->perf_counter_ctxp;
1169 if (cpuctx->task_ctx == ctx)
1171 __perf_counter_sched_in(ctx, cpuctx, cpu);
1172 cpuctx->task_ctx = ctx;
1175 static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1177 struct perf_counter_context *ctx = &cpuctx->ctx;
1179 __perf_counter_sched_in(ctx, cpuctx, cpu);
1182 #define MAX_INTERRUPTS (~0ULL)
1184 static void perf_log_throttle(struct perf_counter *counter, int enable);
1185 static void perf_log_period(struct perf_counter *counter, u64 period);
1187 static void perf_adjust_freq(struct perf_counter_context *ctx)
1189 struct perf_counter *counter;
1190 u64 interrupts, sample_period;
1194 spin_lock(&ctx->lock);
1195 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1196 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1199 interrupts = counter->hw.interrupts;
1200 counter->hw.interrupts = 0;
1202 if (interrupts == MAX_INTERRUPTS) {
1203 perf_log_throttle(counter, 1);
1204 counter->pmu->unthrottle(counter);
1205 interrupts = 2*sysctl_perf_counter_limit/HZ;
1208 if (!counter->attr.freq || !counter->attr.sample_freq)
1211 events = HZ * interrupts * counter->hw.sample_period;
1212 period = div64_u64(events, counter->attr.sample_freq);
1214 delta = (s64)(1 + period - counter->hw.sample_period);
1217 sample_period = counter->hw.sample_period + delta;
1222 perf_log_period(counter, sample_period);
1224 counter->hw.sample_period = sample_period;
1226 spin_unlock(&ctx->lock);
1230 * Round-robin a context's counters:
1232 static void rotate_ctx(struct perf_counter_context *ctx)
1234 struct perf_counter *counter;
1236 if (!ctx->nr_counters)
1239 spin_lock(&ctx->lock);
1241 * Rotate the first entry last (works just fine for group counters too):
1244 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1245 list_move_tail(&counter->list_entry, &ctx->counter_list);
1250 spin_unlock(&ctx->lock);
1253 void perf_counter_task_tick(struct task_struct *curr, int cpu)
1255 struct perf_cpu_context *cpuctx;
1256 struct perf_counter_context *ctx;
1258 if (!atomic_read(&nr_counters))
1261 cpuctx = &per_cpu(perf_cpu_context, cpu);
1262 ctx = curr->perf_counter_ctxp;
1264 perf_adjust_freq(&cpuctx->ctx);
1266 perf_adjust_freq(ctx);
1268 perf_counter_cpu_sched_out(cpuctx);
1270 __perf_counter_task_sched_out(ctx);
1272 rotate_ctx(&cpuctx->ctx);
1276 perf_counter_cpu_sched_in(cpuctx, cpu);
1278 perf_counter_task_sched_in(curr, cpu);
1282 * Cross CPU call to read the hardware counter
1284 static void __read(void *info)
1286 struct perf_counter *counter = info;
1287 struct perf_counter_context *ctx = counter->ctx;
1288 unsigned long flags;
1290 local_irq_save(flags);
1292 update_context_time(ctx);
1293 counter->pmu->read(counter);
1294 update_counter_times(counter);
1295 local_irq_restore(flags);
1298 static u64 perf_counter_read(struct perf_counter *counter)
1301 * If counter is enabled and currently active on a CPU, update the
1302 * value in the counter structure:
1304 if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
1305 smp_call_function_single(counter->oncpu,
1306 __read, counter, 1);
1307 } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1308 update_counter_times(counter);
1311 return atomic64_read(&counter->count);
1315 * Initialize the perf_counter context in a task_struct:
1318 __perf_counter_init_context(struct perf_counter_context *ctx,
1319 struct task_struct *task)
1321 memset(ctx, 0, sizeof(*ctx));
1322 spin_lock_init(&ctx->lock);
1323 mutex_init(&ctx->mutex);
1324 INIT_LIST_HEAD(&ctx->counter_list);
1325 INIT_LIST_HEAD(&ctx->event_list);
1326 atomic_set(&ctx->refcount, 1);
1330 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1332 struct perf_counter_context *parent_ctx;
1333 struct perf_counter_context *ctx;
1334 struct perf_cpu_context *cpuctx;
1335 struct task_struct *task;
1336 unsigned long flags;
1340 * If cpu is not a wildcard then this is a percpu counter:
1343 /* Must be root to operate on a CPU counter: */
1344 if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
1345 return ERR_PTR(-EACCES);
1347 if (cpu < 0 || cpu > num_possible_cpus())
1348 return ERR_PTR(-EINVAL);
1351 * We could be clever and allow to attach a counter to an
1352 * offline CPU and activate it when the CPU comes up, but
1355 if (!cpu_isset(cpu, cpu_online_map))
1356 return ERR_PTR(-ENODEV);
1358 cpuctx = &per_cpu(perf_cpu_context, cpu);
1369 task = find_task_by_vpid(pid);
1371 get_task_struct(task);
1375 return ERR_PTR(-ESRCH);
1378 * Can't attach counters to a dying task.
1381 if (task->flags & PF_EXITING)
1384 /* Reuse ptrace permission checks for now. */
1386 if (!ptrace_may_access(task, PTRACE_MODE_READ))
1390 ctx = perf_lock_task_context(task, &flags);
1392 parent_ctx = ctx->parent_ctx;
1394 put_ctx(parent_ctx);
1395 ctx->parent_ctx = NULL; /* no longer a clone */
1398 * Get an extra reference before dropping the lock so that
1399 * this context won't get freed if the task exits.
1402 spin_unlock_irqrestore(&ctx->lock, flags);
1406 ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
1410 __perf_counter_init_context(ctx, task);
1412 if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
1414 * We raced with some other task; use
1415 * the context they set.
1420 get_task_struct(task);
1423 put_task_struct(task);
1427 put_task_struct(task);
1428 return ERR_PTR(err);
1431 static void free_counter_rcu(struct rcu_head *head)
1433 struct perf_counter *counter;
1435 counter = container_of(head, struct perf_counter, rcu_head);
1437 put_pid_ns(counter->ns);
1441 static void perf_pending_sync(struct perf_counter *counter);
1443 static void free_counter(struct perf_counter *counter)
1445 perf_pending_sync(counter);
1447 atomic_dec(&nr_counters);
1448 if (counter->attr.mmap)
1449 atomic_dec(&nr_mmap_counters);
1450 if (counter->attr.comm)
1451 atomic_dec(&nr_comm_counters);
1453 if (counter->destroy)
1454 counter->destroy(counter);
1456 put_ctx(counter->ctx);
1457 call_rcu(&counter->rcu_head, free_counter_rcu);
1461 * Called when the last reference to the file is gone.
1463 static int perf_release(struct inode *inode, struct file *file)
1465 struct perf_counter *counter = file->private_data;
1466 struct perf_counter_context *ctx = counter->ctx;
1468 file->private_data = NULL;
1470 WARN_ON_ONCE(ctx->parent_ctx);
1471 mutex_lock(&ctx->mutex);
1472 perf_counter_remove_from_context(counter);
1473 mutex_unlock(&ctx->mutex);
1475 mutex_lock(&counter->owner->perf_counter_mutex);
1476 list_del_init(&counter->owner_entry);
1477 mutex_unlock(&counter->owner->perf_counter_mutex);
1478 put_task_struct(counter->owner);
1480 free_counter(counter);
1486 * Read the performance counter - simple non blocking version for now
1489 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1495 * Return end-of-file for a read on a counter that is in
1496 * error state (i.e. because it was pinned but it couldn't be
1497 * scheduled on to the CPU at some point).
1499 if (counter->state == PERF_COUNTER_STATE_ERROR)
1502 WARN_ON_ONCE(counter->ctx->parent_ctx);
1503 mutex_lock(&counter->child_mutex);
1504 values[0] = perf_counter_read(counter);
1506 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1507 values[n++] = counter->total_time_enabled +
1508 atomic64_read(&counter->child_total_time_enabled);
1509 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1510 values[n++] = counter->total_time_running +
1511 atomic64_read(&counter->child_total_time_running);
1512 if (counter->attr.read_format & PERF_FORMAT_ID)
1513 values[n++] = counter->id;
1514 mutex_unlock(&counter->child_mutex);
1516 if (count < n * sizeof(u64))
1518 count = n * sizeof(u64);
1520 if (copy_to_user(buf, values, count))
1527 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1529 struct perf_counter *counter = file->private_data;
1531 return perf_read_hw(counter, buf, count);
1534 static unsigned int perf_poll(struct file *file, poll_table *wait)
1536 struct perf_counter *counter = file->private_data;
1537 struct perf_mmap_data *data;
1538 unsigned int events = POLL_HUP;
1541 data = rcu_dereference(counter->data);
1543 events = atomic_xchg(&data->poll, 0);
1546 poll_wait(file, &counter->waitq, wait);
1551 static void perf_counter_reset(struct perf_counter *counter)
1553 (void)perf_counter_read(counter);
1554 atomic64_set(&counter->count, 0);
1555 perf_counter_update_userpage(counter);
1558 static void perf_counter_for_each_sibling(struct perf_counter *counter,
1559 void (*func)(struct perf_counter *))
1561 struct perf_counter_context *ctx = counter->ctx;
1562 struct perf_counter *sibling;
1564 WARN_ON_ONCE(ctx->parent_ctx);
1565 mutex_lock(&ctx->mutex);
1566 counter = counter->group_leader;
1569 list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1571 mutex_unlock(&ctx->mutex);
1575 * Holding the top-level counter's child_mutex means that any
1576 * descendant process that has inherited this counter will block
1577 * in sync_child_counter if it goes to exit, thus satisfying the
1578 * task existence requirements of perf_counter_enable/disable.
1580 static void perf_counter_for_each_child(struct perf_counter *counter,
1581 void (*func)(struct perf_counter *))
1583 struct perf_counter *child;
1585 WARN_ON_ONCE(counter->ctx->parent_ctx);
1586 mutex_lock(&counter->child_mutex);
1588 list_for_each_entry(child, &counter->child_list, child_list)
1590 mutex_unlock(&counter->child_mutex);
1593 static void perf_counter_for_each(struct perf_counter *counter,
1594 void (*func)(struct perf_counter *))
1596 struct perf_counter *child;
1598 WARN_ON_ONCE(counter->ctx->parent_ctx);
1599 mutex_lock(&counter->child_mutex);
1600 perf_counter_for_each_sibling(counter, func);
1601 list_for_each_entry(child, &counter->child_list, child_list)
1602 perf_counter_for_each_sibling(child, func);
1603 mutex_unlock(&counter->child_mutex);
1606 static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1608 struct perf_counter_context *ctx = counter->ctx;
1613 if (!counter->attr.sample_period)
1616 size = copy_from_user(&value, arg, sizeof(value));
1617 if (size != sizeof(value))
1623 spin_lock_irq(&ctx->lock);
1624 if (counter->attr.freq) {
1625 if (value > sysctl_perf_counter_limit) {
1630 counter->attr.sample_freq = value;
1632 counter->attr.sample_period = value;
1633 counter->hw.sample_period = value;
1635 perf_log_period(counter, value);
1638 spin_unlock_irq(&ctx->lock);
1643 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1645 struct perf_counter *counter = file->private_data;
1646 void (*func)(struct perf_counter *);
1650 case PERF_COUNTER_IOC_ENABLE:
1651 func = perf_counter_enable;
1653 case PERF_COUNTER_IOC_DISABLE:
1654 func = perf_counter_disable;
1656 case PERF_COUNTER_IOC_RESET:
1657 func = perf_counter_reset;
1660 case PERF_COUNTER_IOC_REFRESH:
1661 return perf_counter_refresh(counter, arg);
1663 case PERF_COUNTER_IOC_PERIOD:
1664 return perf_counter_period(counter, (u64 __user *)arg);
1670 if (flags & PERF_IOC_FLAG_GROUP)
1671 perf_counter_for_each(counter, func);
1673 perf_counter_for_each_child(counter, func);
1678 int perf_counter_task_enable(void)
1680 struct perf_counter *counter;
1682 mutex_lock(¤t->perf_counter_mutex);
1683 list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry)
1684 perf_counter_for_each_child(counter, perf_counter_enable);
1685 mutex_unlock(¤t->perf_counter_mutex);
1690 int perf_counter_task_disable(void)
1692 struct perf_counter *counter;
1694 mutex_lock(¤t->perf_counter_mutex);
1695 list_for_each_entry(counter, ¤t->perf_counter_list, owner_entry)
1696 perf_counter_for_each_child(counter, perf_counter_disable);
1697 mutex_unlock(¤t->perf_counter_mutex);
1703 * Callers need to ensure there can be no nesting of this function, otherwise
1704 * the seqlock logic goes bad. We can not serialize this because the arch
1705 * code calls this from NMI context.
1707 void perf_counter_update_userpage(struct perf_counter *counter)
1709 struct perf_counter_mmap_page *userpg;
1710 struct perf_mmap_data *data;
1713 data = rcu_dereference(counter->data);
1717 userpg = data->user_page;
1720 * Disable preemption so as to not let the corresponding user-space
1721 * spin too long if we get preempted.
1726 userpg->index = counter->hw.idx;
1727 userpg->offset = atomic64_read(&counter->count);
1728 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1729 userpg->offset -= atomic64_read(&counter->hw.prev_count);
1738 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1740 struct perf_counter *counter = vma->vm_file->private_data;
1741 struct perf_mmap_data *data;
1742 int ret = VM_FAULT_SIGBUS;
1745 data = rcu_dereference(counter->data);
1749 if (vmf->pgoff == 0) {
1750 vmf->page = virt_to_page(data->user_page);
1752 int nr = vmf->pgoff - 1;
1754 if ((unsigned)nr > data->nr_pages)
1757 vmf->page = virt_to_page(data->data_pages[nr]);
1759 get_page(vmf->page);
1767 static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
1769 struct perf_mmap_data *data;
1773 WARN_ON(atomic_read(&counter->mmap_count));
1775 size = sizeof(struct perf_mmap_data);
1776 size += nr_pages * sizeof(void *);
1778 data = kzalloc(size, GFP_KERNEL);
1782 data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
1783 if (!data->user_page)
1784 goto fail_user_page;
1786 for (i = 0; i < nr_pages; i++) {
1787 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1788 if (!data->data_pages[i])
1789 goto fail_data_pages;
1792 data->nr_pages = nr_pages;
1793 atomic_set(&data->lock, -1);
1795 rcu_assign_pointer(counter->data, data);
1800 for (i--; i >= 0; i--)
1801 free_page((unsigned long)data->data_pages[i]);
1803 free_page((unsigned long)data->user_page);
1812 static void __perf_mmap_data_free(struct rcu_head *rcu_head)
1814 struct perf_mmap_data *data;
1817 data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
1819 free_page((unsigned long)data->user_page);
1820 for (i = 0; i < data->nr_pages; i++)
1821 free_page((unsigned long)data->data_pages[i]);
1825 static void perf_mmap_data_free(struct perf_counter *counter)
1827 struct perf_mmap_data *data = counter->data;
1829 WARN_ON(atomic_read(&counter->mmap_count));
1831 rcu_assign_pointer(counter->data, NULL);
1832 call_rcu(&data->rcu_head, __perf_mmap_data_free);
1835 static void perf_mmap_open(struct vm_area_struct *vma)
1837 struct perf_counter *counter = vma->vm_file->private_data;
1839 atomic_inc(&counter->mmap_count);
1842 static void perf_mmap_close(struct vm_area_struct *vma)
1844 struct perf_counter *counter = vma->vm_file->private_data;
1846 WARN_ON_ONCE(counter->ctx->parent_ctx);
1847 if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
1848 struct user_struct *user = current_user();
1850 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
1851 vma->vm_mm->locked_vm -= counter->data->nr_locked;
1852 perf_mmap_data_free(counter);
1853 mutex_unlock(&counter->mmap_mutex);
1857 static struct vm_operations_struct perf_mmap_vmops = {
1858 .open = perf_mmap_open,
1859 .close = perf_mmap_close,
1860 .fault = perf_mmap_fault,
1863 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1865 struct perf_counter *counter = file->private_data;
1866 unsigned long user_locked, user_lock_limit;
1867 struct user_struct *user = current_user();
1868 unsigned long locked, lock_limit;
1869 unsigned long vma_size;
1870 unsigned long nr_pages;
1871 long user_extra, extra;
1874 if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
1877 vma_size = vma->vm_end - vma->vm_start;
1878 nr_pages = (vma_size / PAGE_SIZE) - 1;
1881 * If we have data pages ensure they're a power-of-two number, so we
1882 * can do bitmasks instead of modulo.
1884 if (nr_pages != 0 && !is_power_of_2(nr_pages))
1887 if (vma_size != PAGE_SIZE * (1 + nr_pages))
1890 if (vma->vm_pgoff != 0)
1893 WARN_ON_ONCE(counter->ctx->parent_ctx);
1894 mutex_lock(&counter->mmap_mutex);
1895 if (atomic_inc_not_zero(&counter->mmap_count)) {
1896 if (nr_pages != counter->data->nr_pages)
1901 user_extra = nr_pages + 1;
1902 user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
1905 * Increase the limit linearly with more CPUs:
1907 user_lock_limit *= num_online_cpus();
1909 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
1912 if (user_locked > user_lock_limit)
1913 extra = user_locked - user_lock_limit;
1915 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1916 lock_limit >>= PAGE_SHIFT;
1917 locked = vma->vm_mm->locked_vm + extra;
1919 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
1924 WARN_ON(counter->data);
1925 ret = perf_mmap_data_alloc(counter, nr_pages);
1929 atomic_set(&counter->mmap_count, 1);
1930 atomic_long_add(user_extra, &user->locked_vm);
1931 vma->vm_mm->locked_vm += extra;
1932 counter->data->nr_locked = extra;
1934 mutex_unlock(&counter->mmap_mutex);
1936 vma->vm_flags &= ~VM_MAYWRITE;
1937 vma->vm_flags |= VM_RESERVED;
1938 vma->vm_ops = &perf_mmap_vmops;
1943 static int perf_fasync(int fd, struct file *filp, int on)
1945 struct inode *inode = filp->f_path.dentry->d_inode;
1946 struct perf_counter *counter = filp->private_data;
1949 mutex_lock(&inode->i_mutex);
1950 retval = fasync_helper(fd, filp, on, &counter->fasync);
1951 mutex_unlock(&inode->i_mutex);
1959 static const struct file_operations perf_fops = {
1960 .release = perf_release,
1963 .unlocked_ioctl = perf_ioctl,
1964 .compat_ioctl = perf_ioctl,
1966 .fasync = perf_fasync,
1970 * Perf counter wakeup
1972 * If there's data, ensure we set the poll() state and publish everything
1973 * to user-space before waking everybody up.
1976 void perf_counter_wakeup(struct perf_counter *counter)
1978 wake_up_all(&counter->waitq);
1980 if (counter->pending_kill) {
1981 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
1982 counter->pending_kill = 0;
1989 * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
1991 * The NMI bit means we cannot possibly take locks. Therefore, maintain a
1992 * single linked list and use cmpxchg() to add entries lockless.
1995 static void perf_pending_counter(struct perf_pending_entry *entry)
1997 struct perf_counter *counter = container_of(entry,
1998 struct perf_counter, pending);
2000 if (counter->pending_disable) {
2001 counter->pending_disable = 0;
2002 perf_counter_disable(counter);
2005 if (counter->pending_wakeup) {
2006 counter->pending_wakeup = 0;
2007 perf_counter_wakeup(counter);
2011 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2013 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
2017 static void perf_pending_queue(struct perf_pending_entry *entry,
2018 void (*func)(struct perf_pending_entry *))
2020 struct perf_pending_entry **head;
2022 if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
2027 head = &get_cpu_var(perf_pending_head);
2030 entry->next = *head;
2031 } while (cmpxchg(head, entry->next, entry) != entry->next);
2033 set_perf_counter_pending();
2035 put_cpu_var(perf_pending_head);
2038 static int __perf_pending_run(void)
2040 struct perf_pending_entry *list;
2043 list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
2044 while (list != PENDING_TAIL) {
2045 void (*func)(struct perf_pending_entry *);
2046 struct perf_pending_entry *entry = list;
2053 * Ensure we observe the unqueue before we issue the wakeup,
2054 * so that we won't be waiting forever.
2055 * -- see perf_not_pending().
2066 static inline int perf_not_pending(struct perf_counter *counter)
2069 * If we flush on whatever cpu we run, there is a chance we don't
2073 __perf_pending_run();
2077 * Ensure we see the proper queue state before going to sleep
2078 * so that we do not miss the wakeup. -- see perf_pending_handle()
2081 return counter->pending.next == NULL;
2084 static void perf_pending_sync(struct perf_counter *counter)
2086 wait_event(counter->waitq, perf_not_pending(counter));
2089 void perf_counter_do_pending(void)
2091 __perf_pending_run();
2095 * Callchain support -- arch specific
2098 __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2107 struct perf_output_handle {
2108 struct perf_counter *counter;
2109 struct perf_mmap_data *data;
2111 unsigned long offset;
2115 unsigned long flags;
2118 static void perf_output_wakeup(struct perf_output_handle *handle)
2120 atomic_set(&handle->data->poll, POLL_IN);
2123 handle->counter->pending_wakeup = 1;
2124 perf_pending_queue(&handle->counter->pending,
2125 perf_pending_counter);
2127 perf_counter_wakeup(handle->counter);
2131 * Curious locking construct.
2133 * We need to ensure a later event doesn't publish a head when a former
2134 * event isn't done writing. However since we need to deal with NMIs we
2135 * cannot fully serialize things.
2137 * What we do is serialize between CPUs so we only have to deal with NMI
2138 * nesting on a single CPU.
2140 * We only publish the head (and generate a wakeup) when the outer-most
2143 static void perf_output_lock(struct perf_output_handle *handle)
2145 struct perf_mmap_data *data = handle->data;
2150 local_irq_save(handle->flags);
2151 cpu = smp_processor_id();
2153 if (in_nmi() && atomic_read(&data->lock) == cpu)
2156 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2162 static void perf_output_unlock(struct perf_output_handle *handle)
2164 struct perf_mmap_data *data = handle->data;
2168 data->done_head = data->head;
2170 if (!handle->locked)
2175 * The xchg implies a full barrier that ensures all writes are done
2176 * before we publish the new head, matched by a rmb() in userspace when
2177 * reading this position.
2179 while ((head = atomic_long_xchg(&data->done_head, 0)))
2180 data->user_page->data_head = head;
2183 * NMI can happen here, which means we can miss a done_head update.
2186 cpu = atomic_xchg(&data->lock, -1);
2187 WARN_ON_ONCE(cpu != smp_processor_id());
2190 * Therefore we have to validate we did not indeed do so.
2192 if (unlikely(atomic_long_read(&data->done_head))) {
2194 * Since we had it locked, we can lock it again.
2196 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2202 if (atomic_xchg(&data->wakeup, 0))
2203 perf_output_wakeup(handle);
2205 local_irq_restore(handle->flags);
2208 static int perf_output_begin(struct perf_output_handle *handle,
2209 struct perf_counter *counter, unsigned int size,
2210 int nmi, int overflow)
2212 struct perf_mmap_data *data;
2213 unsigned int offset, head;
2216 * For inherited counters we send all the output towards the parent.
2218 if (counter->parent)
2219 counter = counter->parent;
2222 data = rcu_dereference(counter->data);
2226 handle->data = data;
2227 handle->counter = counter;
2229 handle->overflow = overflow;
2231 if (!data->nr_pages)
2234 perf_output_lock(handle);
2237 offset = head = atomic_long_read(&data->head);
2239 } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
2241 handle->offset = offset;
2242 handle->head = head;
2244 if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
2245 atomic_set(&data->wakeup, 1);
2250 perf_output_wakeup(handle);
2257 static void perf_output_copy(struct perf_output_handle *handle,
2258 const void *buf, unsigned int len)
2260 unsigned int pages_mask;
2261 unsigned int offset;
2265 offset = handle->offset;
2266 pages_mask = handle->data->nr_pages - 1;
2267 pages = handle->data->data_pages;
2270 unsigned int page_offset;
2273 nr = (offset >> PAGE_SHIFT) & pages_mask;
2274 page_offset = offset & (PAGE_SIZE - 1);
2275 size = min_t(unsigned int, PAGE_SIZE - page_offset, len);
2277 memcpy(pages[nr] + page_offset, buf, size);
2284 handle->offset = offset;
2287 * Check we didn't copy past our reservation window, taking the
2288 * possible unsigned int wrap into account.
2290 WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
2293 #define perf_output_put(handle, x) \
2294 perf_output_copy((handle), &(x), sizeof(x))
2296 static void perf_output_end(struct perf_output_handle *handle)
2298 struct perf_counter *counter = handle->counter;
2299 struct perf_mmap_data *data = handle->data;
2301 int wakeup_events = counter->attr.wakeup_events;
2303 if (handle->overflow && wakeup_events) {
2304 int events = atomic_inc_return(&data->events);
2305 if (events >= wakeup_events) {
2306 atomic_sub(wakeup_events, &data->events);
2307 atomic_set(&data->wakeup, 1);
2311 perf_output_unlock(handle);
2315 static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
2318 * only top level counters have the pid namespace they were created in
2320 if (counter->parent)
2321 counter = counter->parent;
2323 return task_tgid_nr_ns(p, counter->ns);
2326 static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
2329 * only top level counters have the pid namespace they were created in
2331 if (counter->parent)
2332 counter = counter->parent;
2334 return task_pid_nr_ns(p, counter->ns);
2337 static void perf_counter_output(struct perf_counter *counter,
2338 int nmi, struct pt_regs *regs, u64 addr)
2341 u64 sample_type = counter->attr.sample_type;
2342 struct perf_output_handle handle;
2343 struct perf_event_header header;
2352 struct perf_callchain_entry *callchain = NULL;
2353 int callchain_size = 0;
2360 header.size = sizeof(header);
2362 header.misc = PERF_EVENT_MISC_OVERFLOW;
2363 header.misc |= perf_misc_flags(regs);
2365 if (sample_type & PERF_SAMPLE_IP) {
2366 ip = perf_instruction_pointer(regs);
2367 header.type |= PERF_SAMPLE_IP;
2368 header.size += sizeof(ip);
2371 if (sample_type & PERF_SAMPLE_TID) {
2372 /* namespace issues */
2373 tid_entry.pid = perf_counter_pid(counter, current);
2374 tid_entry.tid = perf_counter_tid(counter, current);
2376 header.type |= PERF_SAMPLE_TID;
2377 header.size += sizeof(tid_entry);
2380 if (sample_type & PERF_SAMPLE_TIME) {
2382 * Maybe do better on x86 and provide cpu_clock_nmi()
2384 time = sched_clock();
2386 header.type |= PERF_SAMPLE_TIME;
2387 header.size += sizeof(u64);
2390 if (sample_type & PERF_SAMPLE_ADDR) {
2391 header.type |= PERF_SAMPLE_ADDR;
2392 header.size += sizeof(u64);
2395 if (sample_type & PERF_SAMPLE_ID) {
2396 header.type |= PERF_SAMPLE_ID;
2397 header.size += sizeof(u64);
2400 if (sample_type & PERF_SAMPLE_CPU) {
2401 header.type |= PERF_SAMPLE_CPU;
2402 header.size += sizeof(cpu_entry);
2404 cpu_entry.cpu = raw_smp_processor_id();
2407 if (sample_type & PERF_SAMPLE_PERIOD) {
2408 header.type |= PERF_SAMPLE_PERIOD;
2409 header.size += sizeof(u64);
2412 if (sample_type & PERF_SAMPLE_GROUP) {
2413 header.type |= PERF_SAMPLE_GROUP;
2414 header.size += sizeof(u64) +
2415 counter->nr_siblings * sizeof(group_entry);
2418 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2419 callchain = perf_callchain(regs);
2422 callchain_size = (1 + callchain->nr) * sizeof(u64);
2424 header.type |= PERF_SAMPLE_CALLCHAIN;
2425 header.size += callchain_size;
2429 ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2433 perf_output_put(&handle, header);
2435 if (sample_type & PERF_SAMPLE_IP)
2436 perf_output_put(&handle, ip);
2438 if (sample_type & PERF_SAMPLE_TID)
2439 perf_output_put(&handle, tid_entry);
2441 if (sample_type & PERF_SAMPLE_TIME)
2442 perf_output_put(&handle, time);
2444 if (sample_type & PERF_SAMPLE_ADDR)
2445 perf_output_put(&handle, addr);
2447 if (sample_type & PERF_SAMPLE_ID)
2448 perf_output_put(&handle, counter->id);
2450 if (sample_type & PERF_SAMPLE_CPU)
2451 perf_output_put(&handle, cpu_entry);
2453 if (sample_type & PERF_SAMPLE_PERIOD)
2454 perf_output_put(&handle, counter->hw.sample_period);
2457 * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
2459 if (sample_type & PERF_SAMPLE_GROUP) {
2460 struct perf_counter *leader, *sub;
2461 u64 nr = counter->nr_siblings;
2463 perf_output_put(&handle, nr);
2465 leader = counter->group_leader;
2466 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2468 sub->pmu->read(sub);
2470 group_entry.id = sub->id;
2471 group_entry.counter = atomic64_read(&sub->count);
2473 perf_output_put(&handle, group_entry);
2478 perf_output_copy(&handle, callchain, callchain_size);
2480 perf_output_end(&handle);
2487 struct perf_fork_event {
2488 struct task_struct *task;
2491 struct perf_event_header header;
2498 static void perf_counter_fork_output(struct perf_counter *counter,
2499 struct perf_fork_event *fork_event)
2501 struct perf_output_handle handle;
2502 int size = fork_event->event.header.size;
2503 struct task_struct *task = fork_event->task;
2504 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2509 fork_event->event.pid = perf_counter_pid(counter, task);
2510 fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);
2512 perf_output_put(&handle, fork_event->event);
2513 perf_output_end(&handle);
2516 static int perf_counter_fork_match(struct perf_counter *counter)
2518 if (counter->attr.comm || counter->attr.mmap)
2524 static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
2525 struct perf_fork_event *fork_event)
2527 struct perf_counter *counter;
2529 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2533 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2534 if (perf_counter_fork_match(counter))
2535 perf_counter_fork_output(counter, fork_event);
2540 static void perf_counter_fork_event(struct perf_fork_event *fork_event)
2542 struct perf_cpu_context *cpuctx;
2543 struct perf_counter_context *ctx;
2545 cpuctx = &get_cpu_var(perf_cpu_context);
2546 perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
2547 put_cpu_var(perf_cpu_context);
2551 * doesn't really matter which of the child contexts the
2552 * events ends up in.
2554 ctx = rcu_dereference(current->perf_counter_ctxp);
2556 perf_counter_fork_ctx(ctx, fork_event);
2560 void perf_counter_fork(struct task_struct *task)
2562 struct perf_fork_event fork_event;
2564 if (!atomic_read(&nr_comm_counters) &&
2565 !atomic_read(&nr_mmap_counters))
2568 fork_event = (struct perf_fork_event){
2572 .type = PERF_EVENT_FORK,
2573 .size = sizeof(fork_event.event),
2578 perf_counter_fork_event(&fork_event);
2585 struct perf_comm_event {
2586 struct task_struct *task;
2591 struct perf_event_header header;
2598 static void perf_counter_comm_output(struct perf_counter *counter,
2599 struct perf_comm_event *comm_event)
2601 struct perf_output_handle handle;
2602 int size = comm_event->event.header.size;
2603 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2608 comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
2609 comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
2611 perf_output_put(&handle, comm_event->event);
2612 perf_output_copy(&handle, comm_event->comm,
2613 comm_event->comm_size);
2614 perf_output_end(&handle);
2617 static int perf_counter_comm_match(struct perf_counter *counter)
2619 if (counter->attr.comm)
2625 static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
2626 struct perf_comm_event *comm_event)
2628 struct perf_counter *counter;
2630 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2634 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2635 if (perf_counter_comm_match(counter))
2636 perf_counter_comm_output(counter, comm_event);
2641 static void perf_counter_comm_event(struct perf_comm_event *comm_event)
2643 struct perf_cpu_context *cpuctx;
2644 struct perf_counter_context *ctx;
2646 char *comm = comm_event->task->comm;
2648 size = ALIGN(strlen(comm)+1, sizeof(u64));
2650 comm_event->comm = comm;
2651 comm_event->comm_size = size;
2653 comm_event->event.header.size = sizeof(comm_event->event) + size;
2655 cpuctx = &get_cpu_var(perf_cpu_context);
2656 perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
2657 put_cpu_var(perf_cpu_context);
2661 * doesn't really matter which of the child contexts the
2662 * events ends up in.
2664 ctx = rcu_dereference(current->perf_counter_ctxp);
2666 perf_counter_comm_ctx(ctx, comm_event);
2670 void perf_counter_comm(struct task_struct *task)
2672 struct perf_comm_event comm_event;
2674 if (!atomic_read(&nr_comm_counters))
2677 comm_event = (struct perf_comm_event){
2680 .header = { .type = PERF_EVENT_COMM, },
2684 perf_counter_comm_event(&comm_event);
2691 struct perf_mmap_event {
2692 struct vm_area_struct *vma;
2694 const char *file_name;
2698 struct perf_event_header header;
2708 static void perf_counter_mmap_output(struct perf_counter *counter,
2709 struct perf_mmap_event *mmap_event)
2711 struct perf_output_handle handle;
2712 int size = mmap_event->event.header.size;
2713 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2718 mmap_event->event.pid = perf_counter_pid(counter, current);
2719 mmap_event->event.tid = perf_counter_tid(counter, current);
2721 perf_output_put(&handle, mmap_event->event);
2722 perf_output_copy(&handle, mmap_event->file_name,
2723 mmap_event->file_size);
2724 perf_output_end(&handle);
2727 static int perf_counter_mmap_match(struct perf_counter *counter,
2728 struct perf_mmap_event *mmap_event)
2730 if (counter->attr.mmap)
2736 static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
2737 struct perf_mmap_event *mmap_event)
2739 struct perf_counter *counter;
2741 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2745 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2746 if (perf_counter_mmap_match(counter, mmap_event))
2747 perf_counter_mmap_output(counter, mmap_event);
2752 static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
2754 struct perf_cpu_context *cpuctx;
2755 struct perf_counter_context *ctx;
2756 struct vm_area_struct *vma = mmap_event->vma;
2757 struct file *file = vma->vm_file;
2764 buf = kzalloc(PATH_MAX, GFP_KERNEL);
2766 name = strncpy(tmp, "//enomem", sizeof(tmp));
2769 name = d_path(&file->f_path, buf, PATH_MAX);
2771 name = strncpy(tmp, "//toolong", sizeof(tmp));
2775 name = arch_vma_name(mmap_event->vma);
2780 name = strncpy(tmp, "[vdso]", sizeof(tmp));
2784 name = strncpy(tmp, "//anon", sizeof(tmp));
2789 size = ALIGN(strlen(name)+1, sizeof(u64));
2791 mmap_event->file_name = name;
2792 mmap_event->file_size = size;
2794 mmap_event->event.header.size = sizeof(mmap_event->event) + size;
2796 cpuctx = &get_cpu_var(perf_cpu_context);
2797 perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
2798 put_cpu_var(perf_cpu_context);
2802 * doesn't really matter which of the child contexts the
2803 * events ends up in.
2805 ctx = rcu_dereference(current->perf_counter_ctxp);
2807 perf_counter_mmap_ctx(ctx, mmap_event);
2813 void __perf_counter_mmap(struct vm_area_struct *vma)
2815 struct perf_mmap_event mmap_event;
2817 if (!atomic_read(&nr_mmap_counters))
2820 mmap_event = (struct perf_mmap_event){
2823 .header = { .type = PERF_EVENT_MMAP, },
2824 .start = vma->vm_start,
2825 .len = vma->vm_end - vma->vm_start,
2826 .pgoff = vma->vm_pgoff,
2830 perf_counter_mmap_event(&mmap_event);
2834 * Log sample_period changes so that analyzing tools can re-normalize the
2838 static void perf_log_period(struct perf_counter *counter, u64 period)
2840 struct perf_output_handle handle;
2844 struct perf_event_header header;
2850 .type = PERF_EVENT_PERIOD,
2852 .size = sizeof(freq_event),
2854 .time = sched_clock(),
2859 if (counter->hw.sample_period == period)
2862 ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
2866 perf_output_put(&handle, freq_event);
2867 perf_output_end(&handle);
2871 * IRQ throttle logging
2874 static void perf_log_throttle(struct perf_counter *counter, int enable)
2876 struct perf_output_handle handle;
2880 struct perf_event_header header;
2882 } throttle_event = {
2884 .type = PERF_EVENT_THROTTLE + 1,
2886 .size = sizeof(throttle_event),
2888 .time = sched_clock(),
2891 ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
2895 perf_output_put(&handle, throttle_event);
2896 perf_output_end(&handle);
2900 * Generic counter overflow handling.
2903 int perf_counter_overflow(struct perf_counter *counter,
2904 int nmi, struct pt_regs *regs, u64 addr)
2906 int events = atomic_read(&counter->event_limit);
2907 int throttle = counter->pmu->unthrottle != NULL;
2911 counter->hw.interrupts++;
2913 if (counter->hw.interrupts != MAX_INTERRUPTS) {
2914 counter->hw.interrupts++;
2915 if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) {
2916 counter->hw.interrupts = MAX_INTERRUPTS;
2917 perf_log_throttle(counter, 0);
2922 * Keep re-disabling counters even though on the previous
2923 * pass we disabled it - just in case we raced with a
2924 * sched-in and the counter got enabled again:
2931 * XXX event_limit might not quite work as expected on inherited
2935 counter->pending_kill = POLL_IN;
2936 if (events && atomic_dec_and_test(&counter->event_limit)) {
2938 counter->pending_kill = POLL_HUP;
2940 counter->pending_disable = 1;
2941 perf_pending_queue(&counter->pending,
2942 perf_pending_counter);
2944 perf_counter_disable(counter);
2947 perf_counter_output(counter, nmi, regs, addr);
2952 * Generic software counter infrastructure
2955 static void perf_swcounter_update(struct perf_counter *counter)
2957 struct hw_perf_counter *hwc = &counter->hw;
2962 prev = atomic64_read(&hwc->prev_count);
2963 now = atomic64_read(&hwc->count);
2964 if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
2969 atomic64_add(delta, &counter->count);
2970 atomic64_sub(delta, &hwc->period_left);
2973 static void perf_swcounter_set_period(struct perf_counter *counter)
2975 struct hw_perf_counter *hwc = &counter->hw;
2976 s64 left = atomic64_read(&hwc->period_left);
2977 s64 period = hwc->sample_period;
2979 if (unlikely(left <= -period)) {
2981 atomic64_set(&hwc->period_left, left);
2984 if (unlikely(left <= 0)) {
2986 atomic64_add(period, &hwc->period_left);
2989 atomic64_set(&hwc->prev_count, -left);
2990 atomic64_set(&hwc->count, -left);
2993 static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
2995 enum hrtimer_restart ret = HRTIMER_RESTART;
2996 struct perf_counter *counter;
2997 struct pt_regs *regs;
3000 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3001 counter->pmu->read(counter);
3003 regs = get_irq_regs();
3005 * In case we exclude kernel IPs or are somehow not in interrupt
3006 * context, provide the next best thing, the user IP.
3008 if ((counter->attr.exclude_kernel || !regs) &&
3009 !counter->attr.exclude_user)
3010 regs = task_pt_regs(current);
3013 if (perf_counter_overflow(counter, 0, regs, 0))
3014 ret = HRTIMER_NORESTART;
3017 period = max_t(u64, 10000, counter->hw.sample_period);
3018 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3023 static void perf_swcounter_overflow(struct perf_counter *counter,
3024 int nmi, struct pt_regs *regs, u64 addr)
3026 perf_swcounter_update(counter);
3027 perf_swcounter_set_period(counter);
3028 if (perf_counter_overflow(counter, nmi, regs, addr))
3029 /* soft-disable the counter */
3034 static int perf_swcounter_is_counting(struct perf_counter *counter)
3036 struct perf_counter_context *ctx;
3037 unsigned long flags;
3040 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
3043 if (counter->state != PERF_COUNTER_STATE_INACTIVE)
3047 * If the counter is inactive, it could be just because
3048 * its task is scheduled out, or because it's in a group
3049 * which could not go on the PMU. We want to count in
3050 * the first case but not the second. If the context is
3051 * currently active then an inactive software counter must
3052 * be the second case. If it's not currently active then
3053 * we need to know whether the counter was active when the
3054 * context was last active, which we can determine by
3055 * comparing counter->tstamp_stopped with ctx->time.
3057 * We are within an RCU read-side critical section,
3058 * which protects the existence of *ctx.
3061 spin_lock_irqsave(&ctx->lock, flags);
3063 /* Re-check state now we have the lock */
3064 if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
3065 counter->ctx->is_active ||
3066 counter->tstamp_stopped < ctx->time)
3068 spin_unlock_irqrestore(&ctx->lock, flags);
3072 static int perf_swcounter_match(struct perf_counter *counter,
3073 enum perf_event_types type,
3074 u32 event, struct pt_regs *regs)
3078 event_config = ((u64) type << PERF_COUNTER_TYPE_SHIFT) | event;
3080 if (!perf_swcounter_is_counting(counter))
3083 if (counter->attr.config != event_config)
3087 if (counter->attr.exclude_user && user_mode(regs))
3090 if (counter->attr.exclude_kernel && !user_mode(regs))
3097 static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3098 int nmi, struct pt_regs *regs, u64 addr)
3100 int neg = atomic64_add_negative(nr, &counter->hw.count);
3102 if (counter->hw.sample_period && !neg && regs)
3103 perf_swcounter_overflow(counter, nmi, regs, addr);
3106 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3107 enum perf_event_types type, u32 event,
3108 u64 nr, int nmi, struct pt_regs *regs,
3111 struct perf_counter *counter;
3113 if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3117 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
3118 if (perf_swcounter_match(counter, type, event, regs))
3119 perf_swcounter_add(counter, nr, nmi, regs, addr);
3124 static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
3127 return &cpuctx->recursion[3];
3130 return &cpuctx->recursion[2];
3133 return &cpuctx->recursion[1];
3135 return &cpuctx->recursion[0];
3138 static void __perf_swcounter_event(enum perf_event_types type, u32 event,
3139 u64 nr, int nmi, struct pt_regs *regs,
3142 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
3143 int *recursion = perf_swcounter_recursion_context(cpuctx);
3144 struct perf_counter_context *ctx;
3152 perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
3153 nr, nmi, regs, addr);
3156 * doesn't really matter which of the child contexts the
3157 * events ends up in.
3159 ctx = rcu_dereference(current->perf_counter_ctxp);
3161 perf_swcounter_ctx_event(ctx, type, event, nr, nmi, regs, addr);
3168 put_cpu_var(perf_cpu_context);
3172 perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
3174 __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
3177 static void perf_swcounter_read(struct perf_counter *counter)
3179 perf_swcounter_update(counter);
3182 static int perf_swcounter_enable(struct perf_counter *counter)
3184 perf_swcounter_set_period(counter);
3188 static void perf_swcounter_disable(struct perf_counter *counter)
3190 perf_swcounter_update(counter);
3193 static const struct pmu perf_ops_generic = {
3194 .enable = perf_swcounter_enable,
3195 .disable = perf_swcounter_disable,
3196 .read = perf_swcounter_read,
3200 * Software counter: cpu wall time clock
3203 static void cpu_clock_perf_counter_update(struct perf_counter *counter)
3205 int cpu = raw_smp_processor_id();
3209 now = cpu_clock(cpu);
3210 prev = atomic64_read(&counter->hw.prev_count);
3211 atomic64_set(&counter->hw.prev_count, now);
3212 atomic64_add(now - prev, &counter->count);
3215 static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
3217 struct hw_perf_counter *hwc = &counter->hw;
3218 int cpu = raw_smp_processor_id();
3220 atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3221 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3222 hwc->hrtimer.function = perf_swcounter_hrtimer;
3223 if (hwc->sample_period) {
3224 u64 period = max_t(u64, 10000, hwc->sample_period);
3225 __hrtimer_start_range_ns(&hwc->hrtimer,
3226 ns_to_ktime(period), 0,
3227 HRTIMER_MODE_REL, 0);
3233 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
3235 if (counter->hw.sample_period)
3236 hrtimer_cancel(&counter->hw.hrtimer);
3237 cpu_clock_perf_counter_update(counter);
3240 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
3242 cpu_clock_perf_counter_update(counter);
3245 static const struct pmu perf_ops_cpu_clock = {
3246 .enable = cpu_clock_perf_counter_enable,
3247 .disable = cpu_clock_perf_counter_disable,
3248 .read = cpu_clock_perf_counter_read,
3252 * Software counter: task time clock
3255 static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
3260 prev = atomic64_xchg(&counter->hw.prev_count, now);
3262 atomic64_add(delta, &counter->count);
3265 static int task_clock_perf_counter_enable(struct perf_counter *counter)
3267 struct hw_perf_counter *hwc = &counter->hw;
3270 now = counter->ctx->time;
3272 atomic64_set(&hwc->prev_count, now);
3273 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3274 hwc->hrtimer.function = perf_swcounter_hrtimer;
3275 if (hwc->sample_period) {
3276 u64 period = max_t(u64, 10000, hwc->sample_period);
3277 __hrtimer_start_range_ns(&hwc->hrtimer,
3278 ns_to_ktime(period), 0,
3279 HRTIMER_MODE_REL, 0);
3285 static void task_clock_perf_counter_disable(struct perf_counter *counter)
3287 if (counter->hw.sample_period)
3288 hrtimer_cancel(&counter->hw.hrtimer);
3289 task_clock_perf_counter_update(counter, counter->ctx->time);
3293 static void task_clock_perf_counter_read(struct perf_counter *counter)
3298 update_context_time(counter->ctx);
3299 time = counter->ctx->time;
3301 u64 now = perf_clock();
3302 u64 delta = now - counter->ctx->timestamp;
3303 time = counter->ctx->time + delta;
3306 task_clock_perf_counter_update(counter, time);
3309 static const struct pmu perf_ops_task_clock = {
3310 .enable = task_clock_perf_counter_enable,
3311 .disable = task_clock_perf_counter_disable,
3312 .read = task_clock_perf_counter_read,
3316 * Software counter: cpu migrations
3318 void perf_counter_task_migration(struct task_struct *task, int cpu)
3320 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
3321 struct perf_counter_context *ctx;
3323 perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE,
3324 PERF_COUNT_CPU_MIGRATIONS,
3327 ctx = perf_pin_task_context(task);
3329 perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE,
3330 PERF_COUNT_CPU_MIGRATIONS,
3332 perf_unpin_context(ctx);
3336 #ifdef CONFIG_EVENT_PROFILE
3337 void perf_tpcounter_event(int event_id)
3339 struct pt_regs *regs = get_irq_regs();
3342 regs = task_pt_regs(current);
3344 __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
3346 EXPORT_SYMBOL_GPL(perf_tpcounter_event);
3348 extern int ftrace_profile_enable(int);
3349 extern void ftrace_profile_disable(int);
3351 static void tp_perf_counter_destroy(struct perf_counter *counter)
3353 ftrace_profile_disable(perf_event_id(&counter->attr));
3356 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3358 int event_id = perf_event_id(&counter->attr);
3361 ret = ftrace_profile_enable(event_id);
3365 counter->destroy = tp_perf_counter_destroy;
3366 counter->hw.sample_period = counter->attr.sample_period;
3368 return &perf_ops_generic;
3371 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3377 static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
3379 const struct pmu *pmu = NULL;
3382 * Software counters (currently) can't in general distinguish
3383 * between user, kernel and hypervisor events.
3384 * However, context switches and cpu migrations are considered
3385 * to be kernel events, and page faults are never hypervisor
3388 switch (perf_event_id(&counter->attr)) {
3389 case PERF_COUNT_CPU_CLOCK:
3390 pmu = &perf_ops_cpu_clock;
3393 case PERF_COUNT_TASK_CLOCK:
3395 * If the user instantiates this as a per-cpu counter,
3396 * use the cpu_clock counter instead.
3398 if (counter->ctx->task)
3399 pmu = &perf_ops_task_clock;
3401 pmu = &perf_ops_cpu_clock;
3404 case PERF_COUNT_PAGE_FAULTS:
3405 case PERF_COUNT_PAGE_FAULTS_MIN:
3406 case PERF_COUNT_PAGE_FAULTS_MAJ:
3407 case PERF_COUNT_CONTEXT_SWITCHES:
3408 case PERF_COUNT_CPU_MIGRATIONS:
3409 pmu = &perf_ops_generic;
3417 * Allocate and initialize a counter structure
3419 static struct perf_counter *
3420 perf_counter_alloc(struct perf_counter_attr *attr,
3422 struct perf_counter_context *ctx,
3423 struct perf_counter *group_leader,
3426 const struct pmu *pmu;
3427 struct perf_counter *counter;
3428 struct hw_perf_counter *hwc;
3431 counter = kzalloc(sizeof(*counter), gfpflags);
3433 return ERR_PTR(-ENOMEM);
3436 * Single counters are their own group leaders, with an
3437 * empty sibling list:
3440 group_leader = counter;
3442 mutex_init(&counter->child_mutex);
3443 INIT_LIST_HEAD(&counter->child_list);
3445 INIT_LIST_HEAD(&counter->list_entry);
3446 INIT_LIST_HEAD(&counter->event_entry);
3447 INIT_LIST_HEAD(&counter->sibling_list);
3448 init_waitqueue_head(&counter->waitq);
3450 mutex_init(&counter->mmap_mutex);
3453 counter->attr = *attr;
3454 counter->group_leader = group_leader;
3455 counter->pmu = NULL;
3457 counter->oncpu = -1;
3459 counter->ns = get_pid_ns(current->nsproxy->pid_ns);
3460 counter->id = atomic64_inc_return(&perf_counter_id);
3462 counter->state = PERF_COUNTER_STATE_INACTIVE;
3465 counter->state = PERF_COUNTER_STATE_OFF;
3470 if (attr->freq && attr->sample_freq)
3471 hwc->sample_period = div64_u64(TICK_NSEC, attr->sample_freq);
3473 hwc->sample_period = attr->sample_period;
3476 * we currently do not support PERF_SAMPLE_GROUP on inherited counters
3478 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
3481 if (perf_event_raw(attr)) {
3482 pmu = hw_perf_counter_init(counter);
3486 switch (perf_event_type(attr)) {
3487 case PERF_TYPE_HARDWARE:
3488 pmu = hw_perf_counter_init(counter);
3491 case PERF_TYPE_SOFTWARE:
3492 pmu = sw_perf_counter_init(counter);
3495 case PERF_TYPE_TRACEPOINT:
3496 pmu = tp_perf_counter_init(counter);
3503 else if (IS_ERR(pmu))
3508 put_pid_ns(counter->ns);
3510 return ERR_PTR(err);
3515 atomic_inc(&nr_counters);
3516 if (counter->attr.mmap)
3517 atomic_inc(&nr_mmap_counters);
3518 if (counter->attr.comm)
3519 atomic_inc(&nr_comm_counters);
3525 * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
3527 * @attr_uptr: event type attributes for monitoring/sampling
3530 * @group_fd: group leader counter fd
3532 SYSCALL_DEFINE5(perf_counter_open,
3533 const struct perf_counter_attr __user *, attr_uptr,
3534 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
3536 struct perf_counter *counter, *group_leader;
3537 struct perf_counter_attr attr;
3538 struct perf_counter_context *ctx;
3539 struct file *counter_file = NULL;
3540 struct file *group_file = NULL;
3541 int fput_needed = 0;
3542 int fput_needed2 = 0;
3545 /* for future expandability... */
3549 if (copy_from_user(&attr, attr_uptr, sizeof(attr)) != 0)
3553 * Get the target context (task or percpu):
3555 ctx = find_get_context(pid, cpu);
3557 return PTR_ERR(ctx);
3560 * Look up the group leader (we will attach this counter to it):
3562 group_leader = NULL;
3563 if (group_fd != -1) {
3565 group_file = fget_light(group_fd, &fput_needed);
3567 goto err_put_context;
3568 if (group_file->f_op != &perf_fops)
3569 goto err_put_context;
3571 group_leader = group_file->private_data;
3573 * Do not allow a recursive hierarchy (this new sibling
3574 * becoming part of another group-sibling):
3576 if (group_leader->group_leader != group_leader)
3577 goto err_put_context;
3579 * Do not allow to attach to a group in a different
3580 * task or CPU context:
3582 if (group_leader->ctx != ctx)
3583 goto err_put_context;
3585 * Only a group leader can be exclusive or pinned
3587 if (attr.exclusive || attr.pinned)
3588 goto err_put_context;
3591 counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
3593 ret = PTR_ERR(counter);
3594 if (IS_ERR(counter))
3595 goto err_put_context;
3597 ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
3599 goto err_free_put_context;
3601 counter_file = fget_light(ret, &fput_needed2);
3603 goto err_free_put_context;
3605 counter->filp = counter_file;
3606 WARN_ON_ONCE(ctx->parent_ctx);
3607 mutex_lock(&ctx->mutex);
3608 perf_install_in_context(ctx, counter, cpu);
3610 mutex_unlock(&ctx->mutex);
3612 counter->owner = current;
3613 get_task_struct(current);
3614 mutex_lock(¤t->perf_counter_mutex);
3615 list_add_tail(&counter->owner_entry, ¤t->perf_counter_list);
3616 mutex_unlock(¤t->perf_counter_mutex);
3618 fput_light(counter_file, fput_needed2);
3621 fput_light(group_file, fput_needed);
3625 err_free_put_context:
3635 * inherit a counter from parent task to child task:
3637 static struct perf_counter *
3638 inherit_counter(struct perf_counter *parent_counter,
3639 struct task_struct *parent,
3640 struct perf_counter_context *parent_ctx,
3641 struct task_struct *child,
3642 struct perf_counter *group_leader,
3643 struct perf_counter_context *child_ctx)
3645 struct perf_counter *child_counter;
3648 * Instead of creating recursive hierarchies of counters,
3649 * we link inherited counters back to the original parent,
3650 * which has a filp for sure, which we use as the reference
3653 if (parent_counter->parent)
3654 parent_counter = parent_counter->parent;
3656 child_counter = perf_counter_alloc(&parent_counter->attr,
3657 parent_counter->cpu, child_ctx,
3658 group_leader, GFP_KERNEL);
3659 if (IS_ERR(child_counter))
3660 return child_counter;
3664 * Make the child state follow the state of the parent counter,
3665 * not its attr.disabled bit. We hold the parent's mutex,
3666 * so we won't race with perf_counter_{en, dis}able_family.
3668 if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
3669 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
3671 child_counter->state = PERF_COUNTER_STATE_OFF;
3674 * Link it up in the child's context:
3676 add_counter_to_ctx(child_counter, child_ctx);
3678 child_counter->parent = parent_counter;
3680 * inherit into child's child as well:
3682 child_counter->attr.inherit = 1;
3685 * Get a reference to the parent filp - we will fput it
3686 * when the child counter exits. This is safe to do because
3687 * we are in the parent and we know that the filp still
3688 * exists and has a nonzero count:
3690 atomic_long_inc(&parent_counter->filp->f_count);
3693 * Link this into the parent counter's child list
3695 WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
3696 mutex_lock(&parent_counter->child_mutex);
3697 list_add_tail(&child_counter->child_list, &parent_counter->child_list);
3698 mutex_unlock(&parent_counter->child_mutex);
3700 return child_counter;
3703 static int inherit_group(struct perf_counter *parent_counter,
3704 struct task_struct *parent,
3705 struct perf_counter_context *parent_ctx,
3706 struct task_struct *child,
3707 struct perf_counter_context *child_ctx)
3709 struct perf_counter *leader;
3710 struct perf_counter *sub;
3711 struct perf_counter *child_ctr;
3713 leader = inherit_counter(parent_counter, parent, parent_ctx,
3714 child, NULL, child_ctx);
3716 return PTR_ERR(leader);
3717 list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
3718 child_ctr = inherit_counter(sub, parent, parent_ctx,
3719 child, leader, child_ctx);
3720 if (IS_ERR(child_ctr))
3721 return PTR_ERR(child_ctr);
3726 static void sync_child_counter(struct perf_counter *child_counter,
3727 struct perf_counter *parent_counter)
3731 child_val = atomic64_read(&child_counter->count);
3734 * Add back the child's count to the parent's count:
3736 atomic64_add(child_val, &parent_counter->count);
3737 atomic64_add(child_counter->total_time_enabled,
3738 &parent_counter->child_total_time_enabled);
3739 atomic64_add(child_counter->total_time_running,
3740 &parent_counter->child_total_time_running);
3743 * Remove this counter from the parent's list
3745 WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
3746 mutex_lock(&parent_counter->child_mutex);
3747 list_del_init(&child_counter->child_list);
3748 mutex_unlock(&parent_counter->child_mutex);
3751 * Release the parent counter, if this was the last
3754 fput(parent_counter->filp);
3758 __perf_counter_exit_task(struct perf_counter *child_counter,
3759 struct perf_counter_context *child_ctx)
3761 struct perf_counter *parent_counter;
3763 update_counter_times(child_counter);
3764 perf_counter_remove_from_context(child_counter);
3766 parent_counter = child_counter->parent;
3768 * It can happen that parent exits first, and has counters
3769 * that are still around due to the child reference. These
3770 * counters need to be zapped - but otherwise linger.
3772 if (parent_counter) {
3773 sync_child_counter(child_counter, parent_counter);
3774 free_counter(child_counter);
3779 * When a child task exits, feed back counter values to parent counters.
3781 void perf_counter_exit_task(struct task_struct *child)
3783 struct perf_counter *child_counter, *tmp;
3784 struct perf_counter_context *child_ctx;
3785 unsigned long flags;
3787 if (likely(!child->perf_counter_ctxp))
3790 local_irq_save(flags);
3792 * We can't reschedule here because interrupts are disabled,
3793 * and either child is current or it is a task that can't be
3794 * scheduled, so we are now safe from rescheduling changing
3797 child_ctx = child->perf_counter_ctxp;
3798 __perf_counter_task_sched_out(child_ctx);
3801 * Take the context lock here so that if find_get_context is
3802 * reading child->perf_counter_ctxp, we wait until it has
3803 * incremented the context's refcount before we do put_ctx below.
3805 spin_lock(&child_ctx->lock);
3806 child->perf_counter_ctxp = NULL;
3807 if (child_ctx->parent_ctx) {
3809 * This context is a clone; unclone it so it can't get
3810 * swapped to another process while we're removing all
3811 * the counters from it.
3813 put_ctx(child_ctx->parent_ctx);
3814 child_ctx->parent_ctx = NULL;
3816 spin_unlock(&child_ctx->lock);
3817 local_irq_restore(flags);
3819 mutex_lock(&child_ctx->mutex);
3822 list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
3824 __perf_counter_exit_task(child_counter, child_ctx);
3827 * If the last counter was a group counter, it will have appended all
3828 * its siblings to the list, but we obtained 'tmp' before that which
3829 * will still point to the list head terminating the iteration.
3831 if (!list_empty(&child_ctx->counter_list))
3834 mutex_unlock(&child_ctx->mutex);
3840 * free an unexposed, unused context as created by inheritance by
3841 * init_task below, used by fork() in case of fail.
3843 void perf_counter_free_task(struct task_struct *task)
3845 struct perf_counter_context *ctx = task->perf_counter_ctxp;
3846 struct perf_counter *counter, *tmp;
3851 mutex_lock(&ctx->mutex);
3853 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
3854 struct perf_counter *parent = counter->parent;
3856 if (WARN_ON_ONCE(!parent))
3859 mutex_lock(&parent->child_mutex);
3860 list_del_init(&counter->child_list);
3861 mutex_unlock(&parent->child_mutex);
3865 list_del_counter(counter, ctx);
3866 free_counter(counter);
3869 if (!list_empty(&ctx->counter_list))
3872 mutex_unlock(&ctx->mutex);
3878 * Initialize the perf_counter context in task_struct
3880 int perf_counter_init_task(struct task_struct *child)
3882 struct perf_counter_context *child_ctx, *parent_ctx;
3883 struct perf_counter_context *cloned_ctx;
3884 struct perf_counter *counter;
3885 struct task_struct *parent = current;
3886 int inherited_all = 1;
3889 child->perf_counter_ctxp = NULL;
3891 mutex_init(&child->perf_counter_mutex);
3892 INIT_LIST_HEAD(&child->perf_counter_list);
3894 if (likely(!parent->perf_counter_ctxp))
3898 * This is executed from the parent task context, so inherit
3899 * counters that have been marked for cloning.
3900 * First allocate and initialize a context for the child.
3903 child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
3907 __perf_counter_init_context(child_ctx, child);
3908 child->perf_counter_ctxp = child_ctx;
3909 get_task_struct(child);
3912 * If the parent's context is a clone, pin it so it won't get
3915 parent_ctx = perf_pin_task_context(parent);
3918 * No need to check if parent_ctx != NULL here; since we saw
3919 * it non-NULL earlier, the only reason for it to become NULL
3920 * is if we exit, and since we're currently in the middle of
3921 * a fork we can't be exiting at the same time.
3925 * Lock the parent list. No need to lock the child - not PID
3926 * hashed yet and not running, so nobody can access it.
3928 mutex_lock(&parent_ctx->mutex);
3931 * We dont have to disable NMIs - we are only looking at
3932 * the list, not manipulating it:
3934 list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
3935 if (counter != counter->group_leader)
3938 if (!counter->attr.inherit) {
3943 ret = inherit_group(counter, parent, parent_ctx,
3951 if (inherited_all) {
3953 * Mark the child context as a clone of the parent
3954 * context, or of whatever the parent is a clone of.
3955 * Note that if the parent is a clone, it could get
3956 * uncloned at any point, but that doesn't matter
3957 * because the list of counters and the generation
3958 * count can't have changed since we took the mutex.
3960 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
3962 child_ctx->parent_ctx = cloned_ctx;
3963 child_ctx->parent_gen = parent_ctx->parent_gen;
3965 child_ctx->parent_ctx = parent_ctx;
3966 child_ctx->parent_gen = parent_ctx->generation;
3968 get_ctx(child_ctx->parent_ctx);
3971 mutex_unlock(&parent_ctx->mutex);
3973 perf_unpin_context(parent_ctx);
3978 static void __cpuinit perf_counter_init_cpu(int cpu)
3980 struct perf_cpu_context *cpuctx;
3982 cpuctx = &per_cpu(perf_cpu_context, cpu);
3983 __perf_counter_init_context(&cpuctx->ctx, NULL);
3985 spin_lock(&perf_resource_lock);
3986 cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
3987 spin_unlock(&perf_resource_lock);
3989 hw_perf_counter_setup(cpu);
3992 #ifdef CONFIG_HOTPLUG_CPU
3993 static void __perf_counter_exit_cpu(void *info)
3995 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
3996 struct perf_counter_context *ctx = &cpuctx->ctx;
3997 struct perf_counter *counter, *tmp;
3999 list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
4000 __perf_counter_remove_from_context(counter);
4002 static void perf_counter_exit_cpu(int cpu)
4004 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4005 struct perf_counter_context *ctx = &cpuctx->ctx;
4007 mutex_lock(&ctx->mutex);
4008 smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
4009 mutex_unlock(&ctx->mutex);
4012 static inline void perf_counter_exit_cpu(int cpu) { }
4015 static int __cpuinit
4016 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4018 unsigned int cpu = (long)hcpu;
4022 case CPU_UP_PREPARE:
4023 case CPU_UP_PREPARE_FROZEN:
4024 perf_counter_init_cpu(cpu);
4027 case CPU_DOWN_PREPARE:
4028 case CPU_DOWN_PREPARE_FROZEN:
4029 perf_counter_exit_cpu(cpu);
4040 * This has to have a higher priority than migration_notifier in sched.c.
4042 static struct notifier_block __cpuinitdata perf_cpu_nb = {
4043 .notifier_call = perf_cpu_notify,
4047 void __init perf_counter_init(void)
4049 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4050 (void *)(long)smp_processor_id());
4051 register_cpu_notifier(&perf_cpu_nb);
4054 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
4056 return sprintf(buf, "%d\n", perf_reserved_percpu);
4060 perf_set_reserve_percpu(struct sysdev_class *class,
4064 struct perf_cpu_context *cpuctx;
4068 err = strict_strtoul(buf, 10, &val);
4071 if (val > perf_max_counters)
4074 spin_lock(&perf_resource_lock);
4075 perf_reserved_percpu = val;
4076 for_each_online_cpu(cpu) {
4077 cpuctx = &per_cpu(perf_cpu_context, cpu);
4078 spin_lock_irq(&cpuctx->ctx.lock);
4079 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
4080 perf_max_counters - perf_reserved_percpu);
4081 cpuctx->max_pertask = mpt;
4082 spin_unlock_irq(&cpuctx->ctx.lock);
4084 spin_unlock(&perf_resource_lock);
4089 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
4091 return sprintf(buf, "%d\n", perf_overcommit);
4095 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
4100 err = strict_strtoul(buf, 10, &val);
4106 spin_lock(&perf_resource_lock);
4107 perf_overcommit = val;
4108 spin_unlock(&perf_resource_lock);
4113 static SYSDEV_CLASS_ATTR(
4116 perf_show_reserve_percpu,
4117 perf_set_reserve_percpu
4120 static SYSDEV_CLASS_ATTR(
4123 perf_show_overcommit,
4127 static struct attribute *perfclass_attrs[] = {
4128 &attr_reserve_percpu.attr,
4129 &attr_overcommit.attr,
4133 static struct attribute_group perfclass_attr_group = {
4134 .attrs = perfclass_attrs,
4135 .name = "perf_counters",
4138 static int __init perf_counter_sysfs_init(void)
4140 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
4141 &perfclass_attr_group);
4143 device_initcall(perf_counter_sysfs_init);