perf_counter: Add PERF_SAMPLE_PERIOD
[pandora-kernel.git] / kernel / perf_counter.c
1 /*
2  * Performance counter core code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8  *
9  *  For licensing details see kernel-base/COPYING
10  */
11
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/sysfs.h>
19 #include <linux/dcache.h>
20 #include <linux/percpu.h>
21 #include <linux/ptrace.h>
22 #include <linux/vmstat.h>
23 #include <linux/hardirq.h>
24 #include <linux/rculist.h>
25 #include <linux/uaccess.h>
26 #include <linux/syscalls.h>
27 #include <linux/anon_inodes.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/perf_counter.h>
30
31 #include <asm/irq_regs.h>
32
33 /*
34  * Each CPU has a list of per CPU counters:
35  */
36 DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
37
38 int perf_max_counters __read_mostly = 1;
39 static int perf_reserved_percpu __read_mostly;
40 static int perf_overcommit __read_mostly = 1;
41
42 static atomic_t nr_counters __read_mostly;
43 static atomic_t nr_mmap_counters __read_mostly;
44 static atomic_t nr_comm_counters __read_mostly;
45
46 int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
47 int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
48 int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
49
50 static atomic64_t perf_counter_id;
51
52 /*
53  * Lock for (sysadmin-configurable) counter reservations:
54  */
55 static DEFINE_SPINLOCK(perf_resource_lock);
56
57 /*
58  * Architecture provided APIs - weak aliases:
59  */
60 extern __weak const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
61 {
62         return NULL;
63 }
64
65 void __weak hw_perf_disable(void)               { barrier(); }
66 void __weak hw_perf_enable(void)                { barrier(); }
67
68 void __weak hw_perf_counter_setup(int cpu)      { barrier(); }
69
70 int __weak
71 hw_perf_group_sched_in(struct perf_counter *group_leader,
72                struct perf_cpu_context *cpuctx,
73                struct perf_counter_context *ctx, int cpu)
74 {
75         return 0;
76 }
77
78 void __weak perf_counter_print_debug(void)      { }
79
80 static DEFINE_PER_CPU(int, disable_count);
81
82 void __perf_disable(void)
83 {
84         __get_cpu_var(disable_count)++;
85 }
86
87 bool __perf_enable(void)
88 {
89         return !--__get_cpu_var(disable_count);
90 }
91
92 void perf_disable(void)
93 {
94         __perf_disable();
95         hw_perf_disable();
96 }
97
98 void perf_enable(void)
99 {
100         if (__perf_enable())
101                 hw_perf_enable();
102 }
103
104 static void get_ctx(struct perf_counter_context *ctx)
105 {
106         atomic_inc(&ctx->refcount);
107 }
108
109 static void free_ctx(struct rcu_head *head)
110 {
111         struct perf_counter_context *ctx;
112
113         ctx = container_of(head, struct perf_counter_context, rcu_head);
114         kfree(ctx);
115 }
116
117 static void put_ctx(struct perf_counter_context *ctx)
118 {
119         if (atomic_dec_and_test(&ctx->refcount)) {
120                 if (ctx->parent_ctx)
121                         put_ctx(ctx->parent_ctx);
122                 if (ctx->task)
123                         put_task_struct(ctx->task);
124                 call_rcu(&ctx->rcu_head, free_ctx);
125         }
126 }
127
128 /*
129  * Get the perf_counter_context for a task and lock it.
130  * This has to cope with with the fact that until it is locked,
131  * the context could get moved to another task.
132  */
133 static struct perf_counter_context *
134 perf_lock_task_context(struct task_struct *task, unsigned long *flags)
135 {
136         struct perf_counter_context *ctx;
137
138         rcu_read_lock();
139  retry:
140         ctx = rcu_dereference(task->perf_counter_ctxp);
141         if (ctx) {
142                 /*
143                  * If this context is a clone of another, it might
144                  * get swapped for another underneath us by
145                  * perf_counter_task_sched_out, though the
146                  * rcu_read_lock() protects us from any context
147                  * getting freed.  Lock the context and check if it
148                  * got swapped before we could get the lock, and retry
149                  * if so.  If we locked the right context, then it
150                  * can't get swapped on us any more.
151                  */
152                 spin_lock_irqsave(&ctx->lock, *flags);
153                 if (ctx != rcu_dereference(task->perf_counter_ctxp)) {
154                         spin_unlock_irqrestore(&ctx->lock, *flags);
155                         goto retry;
156                 }
157         }
158         rcu_read_unlock();
159         return ctx;
160 }
161
162 /*
163  * Get the context for a task and increment its pin_count so it
164  * can't get swapped to another task.  This also increments its
165  * reference count so that the context can't get freed.
166  */
167 static struct perf_counter_context *perf_pin_task_context(struct task_struct *task)
168 {
169         struct perf_counter_context *ctx;
170         unsigned long flags;
171
172         ctx = perf_lock_task_context(task, &flags);
173         if (ctx) {
174                 ++ctx->pin_count;
175                 get_ctx(ctx);
176                 spin_unlock_irqrestore(&ctx->lock, flags);
177         }
178         return ctx;
179 }
180
181 static void perf_unpin_context(struct perf_counter_context *ctx)
182 {
183         unsigned long flags;
184
185         spin_lock_irqsave(&ctx->lock, flags);
186         --ctx->pin_count;
187         spin_unlock_irqrestore(&ctx->lock, flags);
188         put_ctx(ctx);
189 }
190
191 /*
192  * Add a counter from the lists for its context.
193  * Must be called with ctx->mutex and ctx->lock held.
194  */
195 static void
196 list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
197 {
198         struct perf_counter *group_leader = counter->group_leader;
199
200         /*
201          * Depending on whether it is a standalone or sibling counter,
202          * add it straight to the context's counter list, or to the group
203          * leader's sibling list:
204          */
205         if (group_leader == counter)
206                 list_add_tail(&counter->list_entry, &ctx->counter_list);
207         else {
208                 list_add_tail(&counter->list_entry, &group_leader->sibling_list);
209                 group_leader->nr_siblings++;
210         }
211
212         list_add_rcu(&counter->event_entry, &ctx->event_list);
213         ctx->nr_counters++;
214 }
215
216 /*
217  * Remove a counter from the lists for its context.
218  * Must be called with ctx->mutex and ctx->lock held.
219  */
220 static void
221 list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
222 {
223         struct perf_counter *sibling, *tmp;
224
225         if (list_empty(&counter->list_entry))
226                 return;
227         ctx->nr_counters--;
228
229         list_del_init(&counter->list_entry);
230         list_del_rcu(&counter->event_entry);
231
232         if (counter->group_leader != counter)
233                 counter->group_leader->nr_siblings--;
234
235         /*
236          * If this was a group counter with sibling counters then
237          * upgrade the siblings to singleton counters by adding them
238          * to the context list directly:
239          */
240         list_for_each_entry_safe(sibling, tmp,
241                                  &counter->sibling_list, list_entry) {
242
243                 list_move_tail(&sibling->list_entry, &ctx->counter_list);
244                 sibling->group_leader = sibling;
245         }
246 }
247
248 static void
249 counter_sched_out(struct perf_counter *counter,
250                   struct perf_cpu_context *cpuctx,
251                   struct perf_counter_context *ctx)
252 {
253         if (counter->state != PERF_COUNTER_STATE_ACTIVE)
254                 return;
255
256         counter->state = PERF_COUNTER_STATE_INACTIVE;
257         counter->tstamp_stopped = ctx->time;
258         counter->pmu->disable(counter);
259         counter->oncpu = -1;
260
261         if (!is_software_counter(counter))
262                 cpuctx->active_oncpu--;
263         ctx->nr_active--;
264         if (counter->attr.exclusive || !cpuctx->active_oncpu)
265                 cpuctx->exclusive = 0;
266 }
267
268 static void
269 group_sched_out(struct perf_counter *group_counter,
270                 struct perf_cpu_context *cpuctx,
271                 struct perf_counter_context *ctx)
272 {
273         struct perf_counter *counter;
274
275         if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
276                 return;
277
278         counter_sched_out(group_counter, cpuctx, ctx);
279
280         /*
281          * Schedule out siblings (if any):
282          */
283         list_for_each_entry(counter, &group_counter->sibling_list, list_entry)
284                 counter_sched_out(counter, cpuctx, ctx);
285
286         if (group_counter->attr.exclusive)
287                 cpuctx->exclusive = 0;
288 }
289
290 /*
291  * Cross CPU call to remove a performance counter
292  *
293  * We disable the counter on the hardware level first. After that we
294  * remove it from the context list.
295  */
296 static void __perf_counter_remove_from_context(void *info)
297 {
298         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
299         struct perf_counter *counter = info;
300         struct perf_counter_context *ctx = counter->ctx;
301
302         /*
303          * If this is a task context, we need to check whether it is
304          * the current task context of this cpu. If not it has been
305          * scheduled out before the smp call arrived.
306          */
307         if (ctx->task && cpuctx->task_ctx != ctx)
308                 return;
309
310         spin_lock(&ctx->lock);
311         /*
312          * Protect the list operation against NMI by disabling the
313          * counters on a global level.
314          */
315         perf_disable();
316
317         counter_sched_out(counter, cpuctx, ctx);
318
319         list_del_counter(counter, ctx);
320
321         if (!ctx->task) {
322                 /*
323                  * Allow more per task counters with respect to the
324                  * reservation:
325                  */
326                 cpuctx->max_pertask =
327                         min(perf_max_counters - ctx->nr_counters,
328                             perf_max_counters - perf_reserved_percpu);
329         }
330
331         perf_enable();
332         spin_unlock(&ctx->lock);
333 }
334
335
336 /*
337  * Remove the counter from a task's (or a CPU's) list of counters.
338  *
339  * Must be called with ctx->mutex held.
340  *
341  * CPU counters are removed with a smp call. For task counters we only
342  * call when the task is on a CPU.
343  *
344  * If counter->ctx is a cloned context, callers must make sure that
345  * every task struct that counter->ctx->task could possibly point to
346  * remains valid.  This is OK when called from perf_release since
347  * that only calls us on the top-level context, which can't be a clone.
348  * When called from perf_counter_exit_task, it's OK because the
349  * context has been detached from its task.
350  */
351 static void perf_counter_remove_from_context(struct perf_counter *counter)
352 {
353         struct perf_counter_context *ctx = counter->ctx;
354         struct task_struct *task = ctx->task;
355
356         if (!task) {
357                 /*
358                  * Per cpu counters are removed via an smp call and
359                  * the removal is always sucessful.
360                  */
361                 smp_call_function_single(counter->cpu,
362                                          __perf_counter_remove_from_context,
363                                          counter, 1);
364                 return;
365         }
366
367 retry:
368         task_oncpu_function_call(task, __perf_counter_remove_from_context,
369                                  counter);
370
371         spin_lock_irq(&ctx->lock);
372         /*
373          * If the context is active we need to retry the smp call.
374          */
375         if (ctx->nr_active && !list_empty(&counter->list_entry)) {
376                 spin_unlock_irq(&ctx->lock);
377                 goto retry;
378         }
379
380         /*
381          * The lock prevents that this context is scheduled in so we
382          * can remove the counter safely, if the call above did not
383          * succeed.
384          */
385         if (!list_empty(&counter->list_entry)) {
386                 list_del_counter(counter, ctx);
387         }
388         spin_unlock_irq(&ctx->lock);
389 }
390
391 static inline u64 perf_clock(void)
392 {
393         return cpu_clock(smp_processor_id());
394 }
395
396 /*
397  * Update the record of the current time in a context.
398  */
399 static void update_context_time(struct perf_counter_context *ctx)
400 {
401         u64 now = perf_clock();
402
403         ctx->time += now - ctx->timestamp;
404         ctx->timestamp = now;
405 }
406
407 /*
408  * Update the total_time_enabled and total_time_running fields for a counter.
409  */
410 static void update_counter_times(struct perf_counter *counter)
411 {
412         struct perf_counter_context *ctx = counter->ctx;
413         u64 run_end;
414
415         if (counter->state < PERF_COUNTER_STATE_INACTIVE)
416                 return;
417
418         counter->total_time_enabled = ctx->time - counter->tstamp_enabled;
419
420         if (counter->state == PERF_COUNTER_STATE_INACTIVE)
421                 run_end = counter->tstamp_stopped;
422         else
423                 run_end = ctx->time;
424
425         counter->total_time_running = run_end - counter->tstamp_running;
426 }
427
428 /*
429  * Update total_time_enabled and total_time_running for all counters in a group.
430  */
431 static void update_group_times(struct perf_counter *leader)
432 {
433         struct perf_counter *counter;
434
435         update_counter_times(leader);
436         list_for_each_entry(counter, &leader->sibling_list, list_entry)
437                 update_counter_times(counter);
438 }
439
440 /*
441  * Cross CPU call to disable a performance counter
442  */
443 static void __perf_counter_disable(void *info)
444 {
445         struct perf_counter *counter = info;
446         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
447         struct perf_counter_context *ctx = counter->ctx;
448
449         /*
450          * If this is a per-task counter, need to check whether this
451          * counter's task is the current task on this cpu.
452          */
453         if (ctx->task && cpuctx->task_ctx != ctx)
454                 return;
455
456         spin_lock(&ctx->lock);
457
458         /*
459          * If the counter is on, turn it off.
460          * If it is in error state, leave it in error state.
461          */
462         if (counter->state >= PERF_COUNTER_STATE_INACTIVE) {
463                 update_context_time(ctx);
464                 update_counter_times(counter);
465                 if (counter == counter->group_leader)
466                         group_sched_out(counter, cpuctx, ctx);
467                 else
468                         counter_sched_out(counter, cpuctx, ctx);
469                 counter->state = PERF_COUNTER_STATE_OFF;
470         }
471
472         spin_unlock(&ctx->lock);
473 }
474
475 /*
476  * Disable a counter.
477  *
478  * If counter->ctx is a cloned context, callers must make sure that
479  * every task struct that counter->ctx->task could possibly point to
480  * remains valid.  This condition is satisifed when called through
481  * perf_counter_for_each_child or perf_counter_for_each because they
482  * hold the top-level counter's child_mutex, so any descendant that
483  * goes to exit will block in sync_child_counter.
484  * When called from perf_pending_counter it's OK because counter->ctx
485  * is the current context on this CPU and preemption is disabled,
486  * hence we can't get into perf_counter_task_sched_out for this context.
487  */
488 static void perf_counter_disable(struct perf_counter *counter)
489 {
490         struct perf_counter_context *ctx = counter->ctx;
491         struct task_struct *task = ctx->task;
492
493         if (!task) {
494                 /*
495                  * Disable the counter on the cpu that it's on
496                  */
497                 smp_call_function_single(counter->cpu, __perf_counter_disable,
498                                          counter, 1);
499                 return;
500         }
501
502  retry:
503         task_oncpu_function_call(task, __perf_counter_disable, counter);
504
505         spin_lock_irq(&ctx->lock);
506         /*
507          * If the counter is still active, we need to retry the cross-call.
508          */
509         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
510                 spin_unlock_irq(&ctx->lock);
511                 goto retry;
512         }
513
514         /*
515          * Since we have the lock this context can't be scheduled
516          * in, so we can change the state safely.
517          */
518         if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
519                 update_counter_times(counter);
520                 counter->state = PERF_COUNTER_STATE_OFF;
521         }
522
523         spin_unlock_irq(&ctx->lock);
524 }
525
526 static int
527 counter_sched_in(struct perf_counter *counter,
528                  struct perf_cpu_context *cpuctx,
529                  struct perf_counter_context *ctx,
530                  int cpu)
531 {
532         if (counter->state <= PERF_COUNTER_STATE_OFF)
533                 return 0;
534
535         counter->state = PERF_COUNTER_STATE_ACTIVE;
536         counter->oncpu = cpu;   /* TODO: put 'cpu' into cpuctx->cpu */
537         /*
538          * The new state must be visible before we turn it on in the hardware:
539          */
540         smp_wmb();
541
542         if (counter->pmu->enable(counter)) {
543                 counter->state = PERF_COUNTER_STATE_INACTIVE;
544                 counter->oncpu = -1;
545                 return -EAGAIN;
546         }
547
548         counter->tstamp_running += ctx->time - counter->tstamp_stopped;
549
550         if (!is_software_counter(counter))
551                 cpuctx->active_oncpu++;
552         ctx->nr_active++;
553
554         if (counter->attr.exclusive)
555                 cpuctx->exclusive = 1;
556
557         return 0;
558 }
559
560 static int
561 group_sched_in(struct perf_counter *group_counter,
562                struct perf_cpu_context *cpuctx,
563                struct perf_counter_context *ctx,
564                int cpu)
565 {
566         struct perf_counter *counter, *partial_group;
567         int ret;
568
569         if (group_counter->state == PERF_COUNTER_STATE_OFF)
570                 return 0;
571
572         ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
573         if (ret)
574                 return ret < 0 ? ret : 0;
575
576         if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
577                 return -EAGAIN;
578
579         /*
580          * Schedule in siblings as one group (if any):
581          */
582         list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
583                 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
584                         partial_group = counter;
585                         goto group_error;
586                 }
587         }
588
589         return 0;
590
591 group_error:
592         /*
593          * Groups can be scheduled in as one unit only, so undo any
594          * partial group before returning:
595          */
596         list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
597                 if (counter == partial_group)
598                         break;
599                 counter_sched_out(counter, cpuctx, ctx);
600         }
601         counter_sched_out(group_counter, cpuctx, ctx);
602
603         return -EAGAIN;
604 }
605
606 /*
607  * Return 1 for a group consisting entirely of software counters,
608  * 0 if the group contains any hardware counters.
609  */
610 static int is_software_only_group(struct perf_counter *leader)
611 {
612         struct perf_counter *counter;
613
614         if (!is_software_counter(leader))
615                 return 0;
616
617         list_for_each_entry(counter, &leader->sibling_list, list_entry)
618                 if (!is_software_counter(counter))
619                         return 0;
620
621         return 1;
622 }
623
624 /*
625  * Work out whether we can put this counter group on the CPU now.
626  */
627 static int group_can_go_on(struct perf_counter *counter,
628                            struct perf_cpu_context *cpuctx,
629                            int can_add_hw)
630 {
631         /*
632          * Groups consisting entirely of software counters can always go on.
633          */
634         if (is_software_only_group(counter))
635                 return 1;
636         /*
637          * If an exclusive group is already on, no other hardware
638          * counters can go on.
639          */
640         if (cpuctx->exclusive)
641                 return 0;
642         /*
643          * If this group is exclusive and there are already
644          * counters on the CPU, it can't go on.
645          */
646         if (counter->attr.exclusive && cpuctx->active_oncpu)
647                 return 0;
648         /*
649          * Otherwise, try to add it if all previous groups were able
650          * to go on.
651          */
652         return can_add_hw;
653 }
654
655 static void add_counter_to_ctx(struct perf_counter *counter,
656                                struct perf_counter_context *ctx)
657 {
658         list_add_counter(counter, ctx);
659         counter->tstamp_enabled = ctx->time;
660         counter->tstamp_running = ctx->time;
661         counter->tstamp_stopped = ctx->time;
662 }
663
664 /*
665  * Cross CPU call to install and enable a performance counter
666  *
667  * Must be called with ctx->mutex held
668  */
669 static void __perf_install_in_context(void *info)
670 {
671         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
672         struct perf_counter *counter = info;
673         struct perf_counter_context *ctx = counter->ctx;
674         struct perf_counter *leader = counter->group_leader;
675         int cpu = smp_processor_id();
676         int err;
677
678         /*
679          * If this is a task context, we need to check whether it is
680          * the current task context of this cpu. If not it has been
681          * scheduled out before the smp call arrived.
682          * Or possibly this is the right context but it isn't
683          * on this cpu because it had no counters.
684          */
685         if (ctx->task && cpuctx->task_ctx != ctx) {
686                 if (cpuctx->task_ctx || ctx->task != current)
687                         return;
688                 cpuctx->task_ctx = ctx;
689         }
690
691         spin_lock(&ctx->lock);
692         ctx->is_active = 1;
693         update_context_time(ctx);
694
695         /*
696          * Protect the list operation against NMI by disabling the
697          * counters on a global level. NOP for non NMI based counters.
698          */
699         perf_disable();
700
701         add_counter_to_ctx(counter, ctx);
702
703         /*
704          * Don't put the counter on if it is disabled or if
705          * it is in a group and the group isn't on.
706          */
707         if (counter->state != PERF_COUNTER_STATE_INACTIVE ||
708             (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE))
709                 goto unlock;
710
711         /*
712          * An exclusive counter can't go on if there are already active
713          * hardware counters, and no hardware counter can go on if there
714          * is already an exclusive counter on.
715          */
716         if (!group_can_go_on(counter, cpuctx, 1))
717                 err = -EEXIST;
718         else
719                 err = counter_sched_in(counter, cpuctx, ctx, cpu);
720
721         if (err) {
722                 /*
723                  * This counter couldn't go on.  If it is in a group
724                  * then we have to pull the whole group off.
725                  * If the counter group is pinned then put it in error state.
726                  */
727                 if (leader != counter)
728                         group_sched_out(leader, cpuctx, ctx);
729                 if (leader->attr.pinned) {
730                         update_group_times(leader);
731                         leader->state = PERF_COUNTER_STATE_ERROR;
732                 }
733         }
734
735         if (!err && !ctx->task && cpuctx->max_pertask)
736                 cpuctx->max_pertask--;
737
738  unlock:
739         perf_enable();
740
741         spin_unlock(&ctx->lock);
742 }
743
744 /*
745  * Attach a performance counter to a context
746  *
747  * First we add the counter to the list with the hardware enable bit
748  * in counter->hw_config cleared.
749  *
750  * If the counter is attached to a task which is on a CPU we use a smp
751  * call to enable it in the task context. The task might have been
752  * scheduled away, but we check this in the smp call again.
753  *
754  * Must be called with ctx->mutex held.
755  */
756 static void
757 perf_install_in_context(struct perf_counter_context *ctx,
758                         struct perf_counter *counter,
759                         int cpu)
760 {
761         struct task_struct *task = ctx->task;
762
763         if (!task) {
764                 /*
765                  * Per cpu counters are installed via an smp call and
766                  * the install is always sucessful.
767                  */
768                 smp_call_function_single(cpu, __perf_install_in_context,
769                                          counter, 1);
770                 return;
771         }
772
773 retry:
774         task_oncpu_function_call(task, __perf_install_in_context,
775                                  counter);
776
777         spin_lock_irq(&ctx->lock);
778         /*
779          * we need to retry the smp call.
780          */
781         if (ctx->is_active && list_empty(&counter->list_entry)) {
782                 spin_unlock_irq(&ctx->lock);
783                 goto retry;
784         }
785
786         /*
787          * The lock prevents that this context is scheduled in so we
788          * can add the counter safely, if it the call above did not
789          * succeed.
790          */
791         if (list_empty(&counter->list_entry))
792                 add_counter_to_ctx(counter, ctx);
793         spin_unlock_irq(&ctx->lock);
794 }
795
796 /*
797  * Cross CPU call to enable a performance counter
798  */
799 static void __perf_counter_enable(void *info)
800 {
801         struct perf_counter *counter = info;
802         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
803         struct perf_counter_context *ctx = counter->ctx;
804         struct perf_counter *leader = counter->group_leader;
805         int err;
806
807         /*
808          * If this is a per-task counter, need to check whether this
809          * counter's task is the current task on this cpu.
810          */
811         if (ctx->task && cpuctx->task_ctx != ctx) {
812                 if (cpuctx->task_ctx || ctx->task != current)
813                         return;
814                 cpuctx->task_ctx = ctx;
815         }
816
817         spin_lock(&ctx->lock);
818         ctx->is_active = 1;
819         update_context_time(ctx);
820
821         if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
822                 goto unlock;
823         counter->state = PERF_COUNTER_STATE_INACTIVE;
824         counter->tstamp_enabled = ctx->time - counter->total_time_enabled;
825
826         /*
827          * If the counter is in a group and isn't the group leader,
828          * then don't put it on unless the group is on.
829          */
830         if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
831                 goto unlock;
832
833         if (!group_can_go_on(counter, cpuctx, 1)) {
834                 err = -EEXIST;
835         } else {
836                 perf_disable();
837                 if (counter == leader)
838                         err = group_sched_in(counter, cpuctx, ctx,
839                                              smp_processor_id());
840                 else
841                         err = counter_sched_in(counter, cpuctx, ctx,
842                                                smp_processor_id());
843                 perf_enable();
844         }
845
846         if (err) {
847                 /*
848                  * If this counter can't go on and it's part of a
849                  * group, then the whole group has to come off.
850                  */
851                 if (leader != counter)
852                         group_sched_out(leader, cpuctx, ctx);
853                 if (leader->attr.pinned) {
854                         update_group_times(leader);
855                         leader->state = PERF_COUNTER_STATE_ERROR;
856                 }
857         }
858
859  unlock:
860         spin_unlock(&ctx->lock);
861 }
862
863 /*
864  * Enable a counter.
865  *
866  * If counter->ctx is a cloned context, callers must make sure that
867  * every task struct that counter->ctx->task could possibly point to
868  * remains valid.  This condition is satisfied when called through
869  * perf_counter_for_each_child or perf_counter_for_each as described
870  * for perf_counter_disable.
871  */
872 static void perf_counter_enable(struct perf_counter *counter)
873 {
874         struct perf_counter_context *ctx = counter->ctx;
875         struct task_struct *task = ctx->task;
876
877         if (!task) {
878                 /*
879                  * Enable the counter on the cpu that it's on
880                  */
881                 smp_call_function_single(counter->cpu, __perf_counter_enable,
882                                          counter, 1);
883                 return;
884         }
885
886         spin_lock_irq(&ctx->lock);
887         if (counter->state >= PERF_COUNTER_STATE_INACTIVE)
888                 goto out;
889
890         /*
891          * If the counter is in error state, clear that first.
892          * That way, if we see the counter in error state below, we
893          * know that it has gone back into error state, as distinct
894          * from the task having been scheduled away before the
895          * cross-call arrived.
896          */
897         if (counter->state == PERF_COUNTER_STATE_ERROR)
898                 counter->state = PERF_COUNTER_STATE_OFF;
899
900  retry:
901         spin_unlock_irq(&ctx->lock);
902         task_oncpu_function_call(task, __perf_counter_enable, counter);
903
904         spin_lock_irq(&ctx->lock);
905
906         /*
907          * If the context is active and the counter is still off,
908          * we need to retry the cross-call.
909          */
910         if (ctx->is_active && counter->state == PERF_COUNTER_STATE_OFF)
911                 goto retry;
912
913         /*
914          * Since we have the lock this context can't be scheduled
915          * in, so we can change the state safely.
916          */
917         if (counter->state == PERF_COUNTER_STATE_OFF) {
918                 counter->state = PERF_COUNTER_STATE_INACTIVE;
919                 counter->tstamp_enabled =
920                         ctx->time - counter->total_time_enabled;
921         }
922  out:
923         spin_unlock_irq(&ctx->lock);
924 }
925
926 static int perf_counter_refresh(struct perf_counter *counter, int refresh)
927 {
928         /*
929          * not supported on inherited counters
930          */
931         if (counter->attr.inherit)
932                 return -EINVAL;
933
934         atomic_add(refresh, &counter->event_limit);
935         perf_counter_enable(counter);
936
937         return 0;
938 }
939
940 void __perf_counter_sched_out(struct perf_counter_context *ctx,
941                               struct perf_cpu_context *cpuctx)
942 {
943         struct perf_counter *counter;
944
945         spin_lock(&ctx->lock);
946         ctx->is_active = 0;
947         if (likely(!ctx->nr_counters))
948                 goto out;
949         update_context_time(ctx);
950
951         perf_disable();
952         if (ctx->nr_active) {
953                 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
954                         if (counter != counter->group_leader)
955                                 counter_sched_out(counter, cpuctx, ctx);
956                         else
957                                 group_sched_out(counter, cpuctx, ctx);
958                 }
959         }
960         perf_enable();
961  out:
962         spin_unlock(&ctx->lock);
963 }
964
965 /*
966  * Test whether two contexts are equivalent, i.e. whether they
967  * have both been cloned from the same version of the same context
968  * and they both have the same number of enabled counters.
969  * If the number of enabled counters is the same, then the set
970  * of enabled counters should be the same, because these are both
971  * inherited contexts, therefore we can't access individual counters
972  * in them directly with an fd; we can only enable/disable all
973  * counters via prctl, or enable/disable all counters in a family
974  * via ioctl, which will have the same effect on both contexts.
975  */
976 static int context_equiv(struct perf_counter_context *ctx1,
977                          struct perf_counter_context *ctx2)
978 {
979         return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
980                 && ctx1->parent_gen == ctx2->parent_gen
981                 && !ctx1->pin_count && !ctx2->pin_count;
982 }
983
984 /*
985  * Called from scheduler to remove the counters of the current task,
986  * with interrupts disabled.
987  *
988  * We stop each counter and update the counter value in counter->count.
989  *
990  * This does not protect us against NMI, but disable()
991  * sets the disabled bit in the control field of counter _before_
992  * accessing the counter control register. If a NMI hits, then it will
993  * not restart the counter.
994  */
995 void perf_counter_task_sched_out(struct task_struct *task,
996                                  struct task_struct *next, int cpu)
997 {
998         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
999         struct perf_counter_context *ctx = task->perf_counter_ctxp;
1000         struct perf_counter_context *next_ctx;
1001         struct perf_counter_context *parent;
1002         struct pt_regs *regs;
1003         int do_switch = 1;
1004
1005         regs = task_pt_regs(task);
1006         perf_swcounter_event(PERF_COUNT_CONTEXT_SWITCHES, 1, 1, regs, 0);
1007
1008         if (likely(!ctx || !cpuctx->task_ctx))
1009                 return;
1010
1011         update_context_time(ctx);
1012
1013         rcu_read_lock();
1014         parent = rcu_dereference(ctx->parent_ctx);
1015         next_ctx = next->perf_counter_ctxp;
1016         if (parent && next_ctx &&
1017             rcu_dereference(next_ctx->parent_ctx) == parent) {
1018                 /*
1019                  * Looks like the two contexts are clones, so we might be
1020                  * able to optimize the context switch.  We lock both
1021                  * contexts and check that they are clones under the
1022                  * lock (including re-checking that neither has been
1023                  * uncloned in the meantime).  It doesn't matter which
1024                  * order we take the locks because no other cpu could
1025                  * be trying to lock both of these tasks.
1026                  */
1027                 spin_lock(&ctx->lock);
1028                 spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1029                 if (context_equiv(ctx, next_ctx)) {
1030                         /*
1031                          * XXX do we need a memory barrier of sorts
1032                          * wrt to rcu_dereference() of perf_counter_ctxp
1033                          */
1034                         task->perf_counter_ctxp = next_ctx;
1035                         next->perf_counter_ctxp = ctx;
1036                         ctx->task = next;
1037                         next_ctx->task = task;
1038                         do_switch = 0;
1039                 }
1040                 spin_unlock(&next_ctx->lock);
1041                 spin_unlock(&ctx->lock);
1042         }
1043         rcu_read_unlock();
1044
1045         if (do_switch) {
1046                 __perf_counter_sched_out(ctx, cpuctx);
1047                 cpuctx->task_ctx = NULL;
1048         }
1049 }
1050
1051 /*
1052  * Called with IRQs disabled
1053  */
1054 static void __perf_counter_task_sched_out(struct perf_counter_context *ctx)
1055 {
1056         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1057
1058         if (!cpuctx->task_ctx)
1059                 return;
1060
1061         if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1062                 return;
1063
1064         __perf_counter_sched_out(ctx, cpuctx);
1065         cpuctx->task_ctx = NULL;
1066 }
1067
1068 /*
1069  * Called with IRQs disabled
1070  */
1071 static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
1072 {
1073         __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
1074 }
1075
1076 static void
1077 __perf_counter_sched_in(struct perf_counter_context *ctx,
1078                         struct perf_cpu_context *cpuctx, int cpu)
1079 {
1080         struct perf_counter *counter;
1081         int can_add_hw = 1;
1082
1083         spin_lock(&ctx->lock);
1084         ctx->is_active = 1;
1085         if (likely(!ctx->nr_counters))
1086                 goto out;
1087
1088         ctx->timestamp = perf_clock();
1089
1090         perf_disable();
1091
1092         /*
1093          * First go through the list and put on any pinned groups
1094          * in order to give them the best chance of going on.
1095          */
1096         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1097                 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1098                     !counter->attr.pinned)
1099                         continue;
1100                 if (counter->cpu != -1 && counter->cpu != cpu)
1101                         continue;
1102
1103                 if (counter != counter->group_leader)
1104                         counter_sched_in(counter, cpuctx, ctx, cpu);
1105                 else {
1106                         if (group_can_go_on(counter, cpuctx, 1))
1107                                 group_sched_in(counter, cpuctx, ctx, cpu);
1108                 }
1109
1110                 /*
1111                  * If this pinned group hasn't been scheduled,
1112                  * put it in error state.
1113                  */
1114                 if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1115                         update_group_times(counter);
1116                         counter->state = PERF_COUNTER_STATE_ERROR;
1117                 }
1118         }
1119
1120         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1121                 /*
1122                  * Ignore counters in OFF or ERROR state, and
1123                  * ignore pinned counters since we did them already.
1124                  */
1125                 if (counter->state <= PERF_COUNTER_STATE_OFF ||
1126                     counter->attr.pinned)
1127                         continue;
1128
1129                 /*
1130                  * Listen to the 'cpu' scheduling filter constraint
1131                  * of counters:
1132                  */
1133                 if (counter->cpu != -1 && counter->cpu != cpu)
1134                         continue;
1135
1136                 if (counter != counter->group_leader) {
1137                         if (counter_sched_in(counter, cpuctx, ctx, cpu))
1138                                 can_add_hw = 0;
1139                 } else {
1140                         if (group_can_go_on(counter, cpuctx, can_add_hw)) {
1141                                 if (group_sched_in(counter, cpuctx, ctx, cpu))
1142                                         can_add_hw = 0;
1143                         }
1144                 }
1145         }
1146         perf_enable();
1147  out:
1148         spin_unlock(&ctx->lock);
1149 }
1150
1151 /*
1152  * Called from scheduler to add the counters of the current task
1153  * with interrupts disabled.
1154  *
1155  * We restore the counter value and then enable it.
1156  *
1157  * This does not protect us against NMI, but enable()
1158  * sets the enabled bit in the control field of counter _before_
1159  * accessing the counter control register. If a NMI hits, then it will
1160  * keep the counter running.
1161  */
1162 void perf_counter_task_sched_in(struct task_struct *task, int cpu)
1163 {
1164         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
1165         struct perf_counter_context *ctx = task->perf_counter_ctxp;
1166
1167         if (likely(!ctx))
1168                 return;
1169         if (cpuctx->task_ctx == ctx)
1170                 return;
1171         __perf_counter_sched_in(ctx, cpuctx, cpu);
1172         cpuctx->task_ctx = ctx;
1173 }
1174
1175 static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1176 {
1177         struct perf_counter_context *ctx = &cpuctx->ctx;
1178
1179         __perf_counter_sched_in(ctx, cpuctx, cpu);
1180 }
1181
1182 #define MAX_INTERRUPTS (~0ULL)
1183
1184 static void perf_log_throttle(struct perf_counter *counter, int enable);
1185 static void perf_log_period(struct perf_counter *counter, u64 period);
1186
1187 static void perf_adjust_freq(struct perf_counter_context *ctx)
1188 {
1189         struct perf_counter *counter;
1190         u64 interrupts, sample_period;
1191         u64 events, period;
1192         s64 delta;
1193
1194         spin_lock(&ctx->lock);
1195         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1196                 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
1197                         continue;
1198
1199                 interrupts = counter->hw.interrupts;
1200                 counter->hw.interrupts = 0;
1201
1202                 if (interrupts == MAX_INTERRUPTS) {
1203                         perf_log_throttle(counter, 1);
1204                         counter->pmu->unthrottle(counter);
1205                         interrupts = 2*sysctl_perf_counter_limit/HZ;
1206                 }
1207
1208                 if (!counter->attr.freq || !counter->attr.sample_freq)
1209                         continue;
1210
1211                 events = HZ * interrupts * counter->hw.sample_period;
1212                 period = div64_u64(events, counter->attr.sample_freq);
1213
1214                 delta = (s64)(1 + period - counter->hw.sample_period);
1215                 delta >>= 1;
1216
1217                 sample_period = counter->hw.sample_period + delta;
1218
1219                 if (!sample_period)
1220                         sample_period = 1;
1221
1222                 perf_log_period(counter, sample_period);
1223
1224                 counter->hw.sample_period = sample_period;
1225         }
1226         spin_unlock(&ctx->lock);
1227 }
1228
1229 /*
1230  * Round-robin a context's counters:
1231  */
1232 static void rotate_ctx(struct perf_counter_context *ctx)
1233 {
1234         struct perf_counter *counter;
1235
1236         if (!ctx->nr_counters)
1237                 return;
1238
1239         spin_lock(&ctx->lock);
1240         /*
1241          * Rotate the first entry last (works just fine for group counters too):
1242          */
1243         perf_disable();
1244         list_for_each_entry(counter, &ctx->counter_list, list_entry) {
1245                 list_move_tail(&counter->list_entry, &ctx->counter_list);
1246                 break;
1247         }
1248         perf_enable();
1249
1250         spin_unlock(&ctx->lock);
1251 }
1252
1253 void perf_counter_task_tick(struct task_struct *curr, int cpu)
1254 {
1255         struct perf_cpu_context *cpuctx;
1256         struct perf_counter_context *ctx;
1257
1258         if (!atomic_read(&nr_counters))
1259                 return;
1260
1261         cpuctx = &per_cpu(perf_cpu_context, cpu);
1262         ctx = curr->perf_counter_ctxp;
1263
1264         perf_adjust_freq(&cpuctx->ctx);
1265         if (ctx)
1266                 perf_adjust_freq(ctx);
1267
1268         perf_counter_cpu_sched_out(cpuctx);
1269         if (ctx)
1270                 __perf_counter_task_sched_out(ctx);
1271
1272         rotate_ctx(&cpuctx->ctx);
1273         if (ctx)
1274                 rotate_ctx(ctx);
1275
1276         perf_counter_cpu_sched_in(cpuctx, cpu);
1277         if (ctx)
1278                 perf_counter_task_sched_in(curr, cpu);
1279 }
1280
1281 /*
1282  * Cross CPU call to read the hardware counter
1283  */
1284 static void __read(void *info)
1285 {
1286         struct perf_counter *counter = info;
1287         struct perf_counter_context *ctx = counter->ctx;
1288         unsigned long flags;
1289
1290         local_irq_save(flags);
1291         if (ctx->is_active)
1292                 update_context_time(ctx);
1293         counter->pmu->read(counter);
1294         update_counter_times(counter);
1295         local_irq_restore(flags);
1296 }
1297
1298 static u64 perf_counter_read(struct perf_counter *counter)
1299 {
1300         /*
1301          * If counter is enabled and currently active on a CPU, update the
1302          * value in the counter structure:
1303          */
1304         if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
1305                 smp_call_function_single(counter->oncpu,
1306                                          __read, counter, 1);
1307         } else if (counter->state == PERF_COUNTER_STATE_INACTIVE) {
1308                 update_counter_times(counter);
1309         }
1310
1311         return atomic64_read(&counter->count);
1312 }
1313
1314 /*
1315  * Initialize the perf_counter context in a task_struct:
1316  */
1317 static void
1318 __perf_counter_init_context(struct perf_counter_context *ctx,
1319                             struct task_struct *task)
1320 {
1321         memset(ctx, 0, sizeof(*ctx));
1322         spin_lock_init(&ctx->lock);
1323         mutex_init(&ctx->mutex);
1324         INIT_LIST_HEAD(&ctx->counter_list);
1325         INIT_LIST_HEAD(&ctx->event_list);
1326         atomic_set(&ctx->refcount, 1);
1327         ctx->task = task;
1328 }
1329
1330 static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1331 {
1332         struct perf_counter_context *parent_ctx;
1333         struct perf_counter_context *ctx;
1334         struct perf_cpu_context *cpuctx;
1335         struct task_struct *task;
1336         unsigned long flags;
1337         int err;
1338
1339         /*
1340          * If cpu is not a wildcard then this is a percpu counter:
1341          */
1342         if (cpu != -1) {
1343                 /* Must be root to operate on a CPU counter: */
1344                 if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
1345                         return ERR_PTR(-EACCES);
1346
1347                 if (cpu < 0 || cpu > num_possible_cpus())
1348                         return ERR_PTR(-EINVAL);
1349
1350                 /*
1351                  * We could be clever and allow to attach a counter to an
1352                  * offline CPU and activate it when the CPU comes up, but
1353                  * that's for later.
1354                  */
1355                 if (!cpu_isset(cpu, cpu_online_map))
1356                         return ERR_PTR(-ENODEV);
1357
1358                 cpuctx = &per_cpu(perf_cpu_context, cpu);
1359                 ctx = &cpuctx->ctx;
1360                 get_ctx(ctx);
1361
1362                 return ctx;
1363         }
1364
1365         rcu_read_lock();
1366         if (!pid)
1367                 task = current;
1368         else
1369                 task = find_task_by_vpid(pid);
1370         if (task)
1371                 get_task_struct(task);
1372         rcu_read_unlock();
1373
1374         if (!task)
1375                 return ERR_PTR(-ESRCH);
1376
1377         /*
1378          * Can't attach counters to a dying task.
1379          */
1380         err = -ESRCH;
1381         if (task->flags & PF_EXITING)
1382                 goto errout;
1383
1384         /* Reuse ptrace permission checks for now. */
1385         err = -EACCES;
1386         if (!ptrace_may_access(task, PTRACE_MODE_READ))
1387                 goto errout;
1388
1389  retry:
1390         ctx = perf_lock_task_context(task, &flags);
1391         if (ctx) {
1392                 parent_ctx = ctx->parent_ctx;
1393                 if (parent_ctx) {
1394                         put_ctx(parent_ctx);
1395                         ctx->parent_ctx = NULL;         /* no longer a clone */
1396                 }
1397                 /*
1398                  * Get an extra reference before dropping the lock so that
1399                  * this context won't get freed if the task exits.
1400                  */
1401                 get_ctx(ctx);
1402                 spin_unlock_irqrestore(&ctx->lock, flags);
1403         }
1404
1405         if (!ctx) {
1406                 ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
1407                 err = -ENOMEM;
1408                 if (!ctx)
1409                         goto errout;
1410                 __perf_counter_init_context(ctx, task);
1411                 get_ctx(ctx);
1412                 if (cmpxchg(&task->perf_counter_ctxp, NULL, ctx)) {
1413                         /*
1414                          * We raced with some other task; use
1415                          * the context they set.
1416                          */
1417                         kfree(ctx);
1418                         goto retry;
1419                 }
1420                 get_task_struct(task);
1421         }
1422
1423         put_task_struct(task);
1424         return ctx;
1425
1426  errout:
1427         put_task_struct(task);
1428         return ERR_PTR(err);
1429 }
1430
1431 static void free_counter_rcu(struct rcu_head *head)
1432 {
1433         struct perf_counter *counter;
1434
1435         counter = container_of(head, struct perf_counter, rcu_head);
1436         if (counter->ns)
1437                 put_pid_ns(counter->ns);
1438         kfree(counter);
1439 }
1440
1441 static void perf_pending_sync(struct perf_counter *counter);
1442
1443 static void free_counter(struct perf_counter *counter)
1444 {
1445         perf_pending_sync(counter);
1446
1447         atomic_dec(&nr_counters);
1448         if (counter->attr.mmap)
1449                 atomic_dec(&nr_mmap_counters);
1450         if (counter->attr.comm)
1451                 atomic_dec(&nr_comm_counters);
1452
1453         if (counter->destroy)
1454                 counter->destroy(counter);
1455
1456         put_ctx(counter->ctx);
1457         call_rcu(&counter->rcu_head, free_counter_rcu);
1458 }
1459
1460 /*
1461  * Called when the last reference to the file is gone.
1462  */
1463 static int perf_release(struct inode *inode, struct file *file)
1464 {
1465         struct perf_counter *counter = file->private_data;
1466         struct perf_counter_context *ctx = counter->ctx;
1467
1468         file->private_data = NULL;
1469
1470         WARN_ON_ONCE(ctx->parent_ctx);
1471         mutex_lock(&ctx->mutex);
1472         perf_counter_remove_from_context(counter);
1473         mutex_unlock(&ctx->mutex);
1474
1475         mutex_lock(&counter->owner->perf_counter_mutex);
1476         list_del_init(&counter->owner_entry);
1477         mutex_unlock(&counter->owner->perf_counter_mutex);
1478         put_task_struct(counter->owner);
1479
1480         free_counter(counter);
1481
1482         return 0;
1483 }
1484
1485 /*
1486  * Read the performance counter - simple non blocking version for now
1487  */
1488 static ssize_t
1489 perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1490 {
1491         u64 values[3];
1492         int n;
1493
1494         /*
1495          * Return end-of-file for a read on a counter that is in
1496          * error state (i.e. because it was pinned but it couldn't be
1497          * scheduled on to the CPU at some point).
1498          */
1499         if (counter->state == PERF_COUNTER_STATE_ERROR)
1500                 return 0;
1501
1502         WARN_ON_ONCE(counter->ctx->parent_ctx);
1503         mutex_lock(&counter->child_mutex);
1504         values[0] = perf_counter_read(counter);
1505         n = 1;
1506         if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1507                 values[n++] = counter->total_time_enabled +
1508                         atomic64_read(&counter->child_total_time_enabled);
1509         if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1510                 values[n++] = counter->total_time_running +
1511                         atomic64_read(&counter->child_total_time_running);
1512         if (counter->attr.read_format & PERF_FORMAT_ID)
1513                 values[n++] = counter->id;
1514         mutex_unlock(&counter->child_mutex);
1515
1516         if (count < n * sizeof(u64))
1517                 return -EINVAL;
1518         count = n * sizeof(u64);
1519
1520         if (copy_to_user(buf, values, count))
1521                 return -EFAULT;
1522
1523         return count;
1524 }
1525
1526 static ssize_t
1527 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
1528 {
1529         struct perf_counter *counter = file->private_data;
1530
1531         return perf_read_hw(counter, buf, count);
1532 }
1533
1534 static unsigned int perf_poll(struct file *file, poll_table *wait)
1535 {
1536         struct perf_counter *counter = file->private_data;
1537         struct perf_mmap_data *data;
1538         unsigned int events = POLL_HUP;
1539
1540         rcu_read_lock();
1541         data = rcu_dereference(counter->data);
1542         if (data)
1543                 events = atomic_xchg(&data->poll, 0);
1544         rcu_read_unlock();
1545
1546         poll_wait(file, &counter->waitq, wait);
1547
1548         return events;
1549 }
1550
1551 static void perf_counter_reset(struct perf_counter *counter)
1552 {
1553         (void)perf_counter_read(counter);
1554         atomic64_set(&counter->count, 0);
1555         perf_counter_update_userpage(counter);
1556 }
1557
1558 static void perf_counter_for_each_sibling(struct perf_counter *counter,
1559                                           void (*func)(struct perf_counter *))
1560 {
1561         struct perf_counter_context *ctx = counter->ctx;
1562         struct perf_counter *sibling;
1563
1564         WARN_ON_ONCE(ctx->parent_ctx);
1565         mutex_lock(&ctx->mutex);
1566         counter = counter->group_leader;
1567
1568         func(counter);
1569         list_for_each_entry(sibling, &counter->sibling_list, list_entry)
1570                 func(sibling);
1571         mutex_unlock(&ctx->mutex);
1572 }
1573
1574 /*
1575  * Holding the top-level counter's child_mutex means that any
1576  * descendant process that has inherited this counter will block
1577  * in sync_child_counter if it goes to exit, thus satisfying the
1578  * task existence requirements of perf_counter_enable/disable.
1579  */
1580 static void perf_counter_for_each_child(struct perf_counter *counter,
1581                                         void (*func)(struct perf_counter *))
1582 {
1583         struct perf_counter *child;
1584
1585         WARN_ON_ONCE(counter->ctx->parent_ctx);
1586         mutex_lock(&counter->child_mutex);
1587         func(counter);
1588         list_for_each_entry(child, &counter->child_list, child_list)
1589                 func(child);
1590         mutex_unlock(&counter->child_mutex);
1591 }
1592
1593 static void perf_counter_for_each(struct perf_counter *counter,
1594                                   void (*func)(struct perf_counter *))
1595 {
1596         struct perf_counter *child;
1597
1598         WARN_ON_ONCE(counter->ctx->parent_ctx);
1599         mutex_lock(&counter->child_mutex);
1600         perf_counter_for_each_sibling(counter, func);
1601         list_for_each_entry(child, &counter->child_list, child_list)
1602                 perf_counter_for_each_sibling(child, func);
1603         mutex_unlock(&counter->child_mutex);
1604 }
1605
1606 static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1607 {
1608         struct perf_counter_context *ctx = counter->ctx;
1609         unsigned long size;
1610         int ret = 0;
1611         u64 value;
1612
1613         if (!counter->attr.sample_period)
1614                 return -EINVAL;
1615
1616         size = copy_from_user(&value, arg, sizeof(value));
1617         if (size != sizeof(value))
1618                 return -EFAULT;
1619
1620         if (!value)
1621                 return -EINVAL;
1622
1623         spin_lock_irq(&ctx->lock);
1624         if (counter->attr.freq) {
1625                 if (value > sysctl_perf_counter_limit) {
1626                         ret = -EINVAL;
1627                         goto unlock;
1628                 }
1629
1630                 counter->attr.sample_freq = value;
1631         } else {
1632                 counter->attr.sample_period = value;
1633                 counter->hw.sample_period = value;
1634
1635                 perf_log_period(counter, value);
1636         }
1637 unlock:
1638         spin_unlock_irq(&ctx->lock);
1639
1640         return ret;
1641 }
1642
1643 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1644 {
1645         struct perf_counter *counter = file->private_data;
1646         void (*func)(struct perf_counter *);
1647         u32 flags = arg;
1648
1649         switch (cmd) {
1650         case PERF_COUNTER_IOC_ENABLE:
1651                 func = perf_counter_enable;
1652                 break;
1653         case PERF_COUNTER_IOC_DISABLE:
1654                 func = perf_counter_disable;
1655                 break;
1656         case PERF_COUNTER_IOC_RESET:
1657                 func = perf_counter_reset;
1658                 break;
1659
1660         case PERF_COUNTER_IOC_REFRESH:
1661                 return perf_counter_refresh(counter, arg);
1662
1663         case PERF_COUNTER_IOC_PERIOD:
1664                 return perf_counter_period(counter, (u64 __user *)arg);
1665
1666         default:
1667                 return -ENOTTY;
1668         }
1669
1670         if (flags & PERF_IOC_FLAG_GROUP)
1671                 perf_counter_for_each(counter, func);
1672         else
1673                 perf_counter_for_each_child(counter, func);
1674
1675         return 0;
1676 }
1677
1678 int perf_counter_task_enable(void)
1679 {
1680         struct perf_counter *counter;
1681
1682         mutex_lock(&current->perf_counter_mutex);
1683         list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
1684                 perf_counter_for_each_child(counter, perf_counter_enable);
1685         mutex_unlock(&current->perf_counter_mutex);
1686
1687         return 0;
1688 }
1689
1690 int perf_counter_task_disable(void)
1691 {
1692         struct perf_counter *counter;
1693
1694         mutex_lock(&current->perf_counter_mutex);
1695         list_for_each_entry(counter, &current->perf_counter_list, owner_entry)
1696                 perf_counter_for_each_child(counter, perf_counter_disable);
1697         mutex_unlock(&current->perf_counter_mutex);
1698
1699         return 0;
1700 }
1701
1702 /*
1703  * Callers need to ensure there can be no nesting of this function, otherwise
1704  * the seqlock logic goes bad. We can not serialize this because the arch
1705  * code calls this from NMI context.
1706  */
1707 void perf_counter_update_userpage(struct perf_counter *counter)
1708 {
1709         struct perf_counter_mmap_page *userpg;
1710         struct perf_mmap_data *data;
1711
1712         rcu_read_lock();
1713         data = rcu_dereference(counter->data);
1714         if (!data)
1715                 goto unlock;
1716
1717         userpg = data->user_page;
1718
1719         /*
1720          * Disable preemption so as to not let the corresponding user-space
1721          * spin too long if we get preempted.
1722          */
1723         preempt_disable();
1724         ++userpg->lock;
1725         barrier();
1726         userpg->index = counter->hw.idx;
1727         userpg->offset = atomic64_read(&counter->count);
1728         if (counter->state == PERF_COUNTER_STATE_ACTIVE)
1729                 userpg->offset -= atomic64_read(&counter->hw.prev_count);
1730
1731         barrier();
1732         ++userpg->lock;
1733         preempt_enable();
1734 unlock:
1735         rcu_read_unlock();
1736 }
1737
1738 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1739 {
1740         struct perf_counter *counter = vma->vm_file->private_data;
1741         struct perf_mmap_data *data;
1742         int ret = VM_FAULT_SIGBUS;
1743
1744         rcu_read_lock();
1745         data = rcu_dereference(counter->data);
1746         if (!data)
1747                 goto unlock;
1748
1749         if (vmf->pgoff == 0) {
1750                 vmf->page = virt_to_page(data->user_page);
1751         } else {
1752                 int nr = vmf->pgoff - 1;
1753
1754                 if ((unsigned)nr > data->nr_pages)
1755                         goto unlock;
1756
1757                 vmf->page = virt_to_page(data->data_pages[nr]);
1758         }
1759         get_page(vmf->page);
1760         ret = 0;
1761 unlock:
1762         rcu_read_unlock();
1763
1764         return ret;
1765 }
1766
1767 static int perf_mmap_data_alloc(struct perf_counter *counter, int nr_pages)
1768 {
1769         struct perf_mmap_data *data;
1770         unsigned long size;
1771         int i;
1772
1773         WARN_ON(atomic_read(&counter->mmap_count));
1774
1775         size = sizeof(struct perf_mmap_data);
1776         size += nr_pages * sizeof(void *);
1777
1778         data = kzalloc(size, GFP_KERNEL);
1779         if (!data)
1780                 goto fail;
1781
1782         data->user_page = (void *)get_zeroed_page(GFP_KERNEL);
1783         if (!data->user_page)
1784                 goto fail_user_page;
1785
1786         for (i = 0; i < nr_pages; i++) {
1787                 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL);
1788                 if (!data->data_pages[i])
1789                         goto fail_data_pages;
1790         }
1791
1792         data->nr_pages = nr_pages;
1793         atomic_set(&data->lock, -1);
1794
1795         rcu_assign_pointer(counter->data, data);
1796
1797         return 0;
1798
1799 fail_data_pages:
1800         for (i--; i >= 0; i--)
1801                 free_page((unsigned long)data->data_pages[i]);
1802
1803         free_page((unsigned long)data->user_page);
1804
1805 fail_user_page:
1806         kfree(data);
1807
1808 fail:
1809         return -ENOMEM;
1810 }
1811
1812 static void __perf_mmap_data_free(struct rcu_head *rcu_head)
1813 {
1814         struct perf_mmap_data *data;
1815         int i;
1816
1817         data = container_of(rcu_head, struct perf_mmap_data, rcu_head);
1818
1819         free_page((unsigned long)data->user_page);
1820         for (i = 0; i < data->nr_pages; i++)
1821                 free_page((unsigned long)data->data_pages[i]);
1822         kfree(data);
1823 }
1824
1825 static void perf_mmap_data_free(struct perf_counter *counter)
1826 {
1827         struct perf_mmap_data *data = counter->data;
1828
1829         WARN_ON(atomic_read(&counter->mmap_count));
1830
1831         rcu_assign_pointer(counter->data, NULL);
1832         call_rcu(&data->rcu_head, __perf_mmap_data_free);
1833 }
1834
1835 static void perf_mmap_open(struct vm_area_struct *vma)
1836 {
1837         struct perf_counter *counter = vma->vm_file->private_data;
1838
1839         atomic_inc(&counter->mmap_count);
1840 }
1841
1842 static void perf_mmap_close(struct vm_area_struct *vma)
1843 {
1844         struct perf_counter *counter = vma->vm_file->private_data;
1845
1846         WARN_ON_ONCE(counter->ctx->parent_ctx);
1847         if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) {
1848                 struct user_struct *user = current_user();
1849
1850                 atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
1851                 vma->vm_mm->locked_vm -= counter->data->nr_locked;
1852                 perf_mmap_data_free(counter);
1853                 mutex_unlock(&counter->mmap_mutex);
1854         }
1855 }
1856
1857 static struct vm_operations_struct perf_mmap_vmops = {
1858         .open  = perf_mmap_open,
1859         .close = perf_mmap_close,
1860         .fault = perf_mmap_fault,
1861 };
1862
1863 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
1864 {
1865         struct perf_counter *counter = file->private_data;
1866         unsigned long user_locked, user_lock_limit;
1867         struct user_struct *user = current_user();
1868         unsigned long locked, lock_limit;
1869         unsigned long vma_size;
1870         unsigned long nr_pages;
1871         long user_extra, extra;
1872         int ret = 0;
1873
1874         if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
1875                 return -EINVAL;
1876
1877         vma_size = vma->vm_end - vma->vm_start;
1878         nr_pages = (vma_size / PAGE_SIZE) - 1;
1879
1880         /*
1881          * If we have data pages ensure they're a power-of-two number, so we
1882          * can do bitmasks instead of modulo.
1883          */
1884         if (nr_pages != 0 && !is_power_of_2(nr_pages))
1885                 return -EINVAL;
1886
1887         if (vma_size != PAGE_SIZE * (1 + nr_pages))
1888                 return -EINVAL;
1889
1890         if (vma->vm_pgoff != 0)
1891                 return -EINVAL;
1892
1893         WARN_ON_ONCE(counter->ctx->parent_ctx);
1894         mutex_lock(&counter->mmap_mutex);
1895         if (atomic_inc_not_zero(&counter->mmap_count)) {
1896                 if (nr_pages != counter->data->nr_pages)
1897                         ret = -EINVAL;
1898                 goto unlock;
1899         }
1900
1901         user_extra = nr_pages + 1;
1902         user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
1903
1904         /*
1905          * Increase the limit linearly with more CPUs:
1906          */
1907         user_lock_limit *= num_online_cpus();
1908
1909         user_locked = atomic_long_read(&user->locked_vm) + user_extra;
1910
1911         extra = 0;
1912         if (user_locked > user_lock_limit)
1913                 extra = user_locked - user_lock_limit;
1914
1915         lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1916         lock_limit >>= PAGE_SHIFT;
1917         locked = vma->vm_mm->locked_vm + extra;
1918
1919         if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
1920                 ret = -EPERM;
1921                 goto unlock;
1922         }
1923
1924         WARN_ON(counter->data);
1925         ret = perf_mmap_data_alloc(counter, nr_pages);
1926         if (ret)
1927                 goto unlock;
1928
1929         atomic_set(&counter->mmap_count, 1);
1930         atomic_long_add(user_extra, &user->locked_vm);
1931         vma->vm_mm->locked_vm += extra;
1932         counter->data->nr_locked = extra;
1933 unlock:
1934         mutex_unlock(&counter->mmap_mutex);
1935
1936         vma->vm_flags &= ~VM_MAYWRITE;
1937         vma->vm_flags |= VM_RESERVED;
1938         vma->vm_ops = &perf_mmap_vmops;
1939
1940         return ret;
1941 }
1942
1943 static int perf_fasync(int fd, struct file *filp, int on)
1944 {
1945         struct inode *inode = filp->f_path.dentry->d_inode;
1946         struct perf_counter *counter = filp->private_data;
1947         int retval;
1948
1949         mutex_lock(&inode->i_mutex);
1950         retval = fasync_helper(fd, filp, on, &counter->fasync);
1951         mutex_unlock(&inode->i_mutex);
1952
1953         if (retval < 0)
1954                 return retval;
1955
1956         return 0;
1957 }
1958
1959 static const struct file_operations perf_fops = {
1960         .release                = perf_release,
1961         .read                   = perf_read,
1962         .poll                   = perf_poll,
1963         .unlocked_ioctl         = perf_ioctl,
1964         .compat_ioctl           = perf_ioctl,
1965         .mmap                   = perf_mmap,
1966         .fasync                 = perf_fasync,
1967 };
1968
1969 /*
1970  * Perf counter wakeup
1971  *
1972  * If there's data, ensure we set the poll() state and publish everything
1973  * to user-space before waking everybody up.
1974  */
1975
1976 void perf_counter_wakeup(struct perf_counter *counter)
1977 {
1978         wake_up_all(&counter->waitq);
1979
1980         if (counter->pending_kill) {
1981                 kill_fasync(&counter->fasync, SIGIO, counter->pending_kill);
1982                 counter->pending_kill = 0;
1983         }
1984 }
1985
1986 /*
1987  * Pending wakeups
1988  *
1989  * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
1990  *
1991  * The NMI bit means we cannot possibly take locks. Therefore, maintain a
1992  * single linked list and use cmpxchg() to add entries lockless.
1993  */
1994
1995 static void perf_pending_counter(struct perf_pending_entry *entry)
1996 {
1997         struct perf_counter *counter = container_of(entry,
1998                         struct perf_counter, pending);
1999
2000         if (counter->pending_disable) {
2001                 counter->pending_disable = 0;
2002                 perf_counter_disable(counter);
2003         }
2004
2005         if (counter->pending_wakeup) {
2006                 counter->pending_wakeup = 0;
2007                 perf_counter_wakeup(counter);
2008         }
2009 }
2010
2011 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
2012
2013 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
2014         PENDING_TAIL,
2015 };
2016
2017 static void perf_pending_queue(struct perf_pending_entry *entry,
2018                                void (*func)(struct perf_pending_entry *))
2019 {
2020         struct perf_pending_entry **head;
2021
2022         if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
2023                 return;
2024
2025         entry->func = func;
2026
2027         head = &get_cpu_var(perf_pending_head);
2028
2029         do {
2030                 entry->next = *head;
2031         } while (cmpxchg(head, entry->next, entry) != entry->next);
2032
2033         set_perf_counter_pending();
2034
2035         put_cpu_var(perf_pending_head);
2036 }
2037
2038 static int __perf_pending_run(void)
2039 {
2040         struct perf_pending_entry *list;
2041         int nr = 0;
2042
2043         list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
2044         while (list != PENDING_TAIL) {
2045                 void (*func)(struct perf_pending_entry *);
2046                 struct perf_pending_entry *entry = list;
2047
2048                 list = list->next;
2049
2050                 func = entry->func;
2051                 entry->next = NULL;
2052                 /*
2053                  * Ensure we observe the unqueue before we issue the wakeup,
2054                  * so that we won't be waiting forever.
2055                  * -- see perf_not_pending().
2056                  */
2057                 smp_wmb();
2058
2059                 func(entry);
2060                 nr++;
2061         }
2062
2063         return nr;
2064 }
2065
2066 static inline int perf_not_pending(struct perf_counter *counter)
2067 {
2068         /*
2069          * If we flush on whatever cpu we run, there is a chance we don't
2070          * need to wait.
2071          */
2072         get_cpu();
2073         __perf_pending_run();
2074         put_cpu();
2075
2076         /*
2077          * Ensure we see the proper queue state before going to sleep
2078          * so that we do not miss the wakeup. -- see perf_pending_handle()
2079          */
2080         smp_rmb();
2081         return counter->pending.next == NULL;
2082 }
2083
2084 static void perf_pending_sync(struct perf_counter *counter)
2085 {
2086         wait_event(counter->waitq, perf_not_pending(counter));
2087 }
2088
2089 void perf_counter_do_pending(void)
2090 {
2091         __perf_pending_run();
2092 }
2093
2094 /*
2095  * Callchain support -- arch specific
2096  */
2097
2098 __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2099 {
2100         return NULL;
2101 }
2102
2103 /*
2104  * Output
2105  */
2106
2107 struct perf_output_handle {
2108         struct perf_counter     *counter;
2109         struct perf_mmap_data   *data;
2110         unsigned long           head;
2111         unsigned long           offset;
2112         int                     nmi;
2113         int                     overflow;
2114         int                     locked;
2115         unsigned long           flags;
2116 };
2117
2118 static void perf_output_wakeup(struct perf_output_handle *handle)
2119 {
2120         atomic_set(&handle->data->poll, POLL_IN);
2121
2122         if (handle->nmi) {
2123                 handle->counter->pending_wakeup = 1;
2124                 perf_pending_queue(&handle->counter->pending,
2125                                    perf_pending_counter);
2126         } else
2127                 perf_counter_wakeup(handle->counter);
2128 }
2129
2130 /*
2131  * Curious locking construct.
2132  *
2133  * We need to ensure a later event doesn't publish a head when a former
2134  * event isn't done writing. However since we need to deal with NMIs we
2135  * cannot fully serialize things.
2136  *
2137  * What we do is serialize between CPUs so we only have to deal with NMI
2138  * nesting on a single CPU.
2139  *
2140  * We only publish the head (and generate a wakeup) when the outer-most
2141  * event completes.
2142  */
2143 static void perf_output_lock(struct perf_output_handle *handle)
2144 {
2145         struct perf_mmap_data *data = handle->data;
2146         int cpu;
2147
2148         handle->locked = 0;
2149
2150         local_irq_save(handle->flags);
2151         cpu = smp_processor_id();
2152
2153         if (in_nmi() && atomic_read(&data->lock) == cpu)
2154                 return;
2155
2156         while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2157                 cpu_relax();
2158
2159         handle->locked = 1;
2160 }
2161
2162 static void perf_output_unlock(struct perf_output_handle *handle)
2163 {
2164         struct perf_mmap_data *data = handle->data;
2165         unsigned long head;
2166         int cpu;
2167
2168         data->done_head = data->head;
2169
2170         if (!handle->locked)
2171                 goto out;
2172
2173 again:
2174         /*
2175          * The xchg implies a full barrier that ensures all writes are done
2176          * before we publish the new head, matched by a rmb() in userspace when
2177          * reading this position.
2178          */
2179         while ((head = atomic_long_xchg(&data->done_head, 0)))
2180                 data->user_page->data_head = head;
2181
2182         /*
2183          * NMI can happen here, which means we can miss a done_head update.
2184          */
2185
2186         cpu = atomic_xchg(&data->lock, -1);
2187         WARN_ON_ONCE(cpu != smp_processor_id());
2188
2189         /*
2190          * Therefore we have to validate we did not indeed do so.
2191          */
2192         if (unlikely(atomic_long_read(&data->done_head))) {
2193                 /*
2194                  * Since we had it locked, we can lock it again.
2195                  */
2196                 while (atomic_cmpxchg(&data->lock, -1, cpu) != -1)
2197                         cpu_relax();
2198
2199                 goto again;
2200         }
2201
2202         if (atomic_xchg(&data->wakeup, 0))
2203                 perf_output_wakeup(handle);
2204 out:
2205         local_irq_restore(handle->flags);
2206 }
2207
2208 static int perf_output_begin(struct perf_output_handle *handle,
2209                              struct perf_counter *counter, unsigned int size,
2210                              int nmi, int overflow)
2211 {
2212         struct perf_mmap_data *data;
2213         unsigned int offset, head;
2214
2215         /*
2216          * For inherited counters we send all the output towards the parent.
2217          */
2218         if (counter->parent)
2219                 counter = counter->parent;
2220
2221         rcu_read_lock();
2222         data = rcu_dereference(counter->data);
2223         if (!data)
2224                 goto out;
2225
2226         handle->data     = data;
2227         handle->counter  = counter;
2228         handle->nmi      = nmi;
2229         handle->overflow = overflow;
2230
2231         if (!data->nr_pages)
2232                 goto fail;
2233
2234         perf_output_lock(handle);
2235
2236         do {
2237                 offset = head = atomic_long_read(&data->head);
2238                 head += size;
2239         } while (atomic_long_cmpxchg(&data->head, offset, head) != offset);
2240
2241         handle->offset  = offset;
2242         handle->head    = head;
2243
2244         if ((offset >> PAGE_SHIFT) != (head >> PAGE_SHIFT))
2245                 atomic_set(&data->wakeup, 1);
2246
2247         return 0;
2248
2249 fail:
2250         perf_output_wakeup(handle);
2251 out:
2252         rcu_read_unlock();
2253
2254         return -ENOSPC;
2255 }
2256
2257 static void perf_output_copy(struct perf_output_handle *handle,
2258                              const void *buf, unsigned int len)
2259 {
2260         unsigned int pages_mask;
2261         unsigned int offset;
2262         unsigned int size;
2263         void **pages;
2264
2265         offset          = handle->offset;
2266         pages_mask      = handle->data->nr_pages - 1;
2267         pages           = handle->data->data_pages;
2268
2269         do {
2270                 unsigned int page_offset;
2271                 int nr;
2272
2273                 nr          = (offset >> PAGE_SHIFT) & pages_mask;
2274                 page_offset = offset & (PAGE_SIZE - 1);
2275                 size        = min_t(unsigned int, PAGE_SIZE - page_offset, len);
2276
2277                 memcpy(pages[nr] + page_offset, buf, size);
2278
2279                 len         -= size;
2280                 buf         += size;
2281                 offset      += size;
2282         } while (len);
2283
2284         handle->offset = offset;
2285
2286         /*
2287          * Check we didn't copy past our reservation window, taking the
2288          * possible unsigned int wrap into account.
2289          */
2290         WARN_ON_ONCE(((long)(handle->head - handle->offset)) < 0);
2291 }
2292
2293 #define perf_output_put(handle, x) \
2294         perf_output_copy((handle), &(x), sizeof(x))
2295
2296 static void perf_output_end(struct perf_output_handle *handle)
2297 {
2298         struct perf_counter *counter = handle->counter;
2299         struct perf_mmap_data *data = handle->data;
2300
2301         int wakeup_events = counter->attr.wakeup_events;
2302
2303         if (handle->overflow && wakeup_events) {
2304                 int events = atomic_inc_return(&data->events);
2305                 if (events >= wakeup_events) {
2306                         atomic_sub(wakeup_events, &data->events);
2307                         atomic_set(&data->wakeup, 1);
2308                 }
2309         }
2310
2311         perf_output_unlock(handle);
2312         rcu_read_unlock();
2313 }
2314
2315 static u32 perf_counter_pid(struct perf_counter *counter, struct task_struct *p)
2316 {
2317         /*
2318          * only top level counters have the pid namespace they were created in
2319          */
2320         if (counter->parent)
2321                 counter = counter->parent;
2322
2323         return task_tgid_nr_ns(p, counter->ns);
2324 }
2325
2326 static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
2327 {
2328         /*
2329          * only top level counters have the pid namespace they were created in
2330          */
2331         if (counter->parent)
2332                 counter = counter->parent;
2333
2334         return task_pid_nr_ns(p, counter->ns);
2335 }
2336
2337 static void perf_counter_output(struct perf_counter *counter,
2338                                 int nmi, struct pt_regs *regs, u64 addr)
2339 {
2340         int ret;
2341         u64 sample_type = counter->attr.sample_type;
2342         struct perf_output_handle handle;
2343         struct perf_event_header header;
2344         u64 ip;
2345         struct {
2346                 u32 pid, tid;
2347         } tid_entry;
2348         struct {
2349                 u64 id;
2350                 u64 counter;
2351         } group_entry;
2352         struct perf_callchain_entry *callchain = NULL;
2353         int callchain_size = 0;
2354         u64 time;
2355         struct {
2356                 u32 cpu, reserved;
2357         } cpu_entry;
2358
2359         header.type = 0;
2360         header.size = sizeof(header);
2361
2362         header.misc = PERF_EVENT_MISC_OVERFLOW;
2363         header.misc |= perf_misc_flags(regs);
2364
2365         if (sample_type & PERF_SAMPLE_IP) {
2366                 ip = perf_instruction_pointer(regs);
2367                 header.type |= PERF_SAMPLE_IP;
2368                 header.size += sizeof(ip);
2369         }
2370
2371         if (sample_type & PERF_SAMPLE_TID) {
2372                 /* namespace issues */
2373                 tid_entry.pid = perf_counter_pid(counter, current);
2374                 tid_entry.tid = perf_counter_tid(counter, current);
2375
2376                 header.type |= PERF_SAMPLE_TID;
2377                 header.size += sizeof(tid_entry);
2378         }
2379
2380         if (sample_type & PERF_SAMPLE_TIME) {
2381                 /*
2382                  * Maybe do better on x86 and provide cpu_clock_nmi()
2383                  */
2384                 time = sched_clock();
2385
2386                 header.type |= PERF_SAMPLE_TIME;
2387                 header.size += sizeof(u64);
2388         }
2389
2390         if (sample_type & PERF_SAMPLE_ADDR) {
2391                 header.type |= PERF_SAMPLE_ADDR;
2392                 header.size += sizeof(u64);
2393         }
2394
2395         if (sample_type & PERF_SAMPLE_ID) {
2396                 header.type |= PERF_SAMPLE_ID;
2397                 header.size += sizeof(u64);
2398         }
2399
2400         if (sample_type & PERF_SAMPLE_CPU) {
2401                 header.type |= PERF_SAMPLE_CPU;
2402                 header.size += sizeof(cpu_entry);
2403
2404                 cpu_entry.cpu = raw_smp_processor_id();
2405         }
2406
2407         if (sample_type & PERF_SAMPLE_PERIOD) {
2408                 header.type |= PERF_SAMPLE_PERIOD;
2409                 header.size += sizeof(u64);
2410         }
2411
2412         if (sample_type & PERF_SAMPLE_GROUP) {
2413                 header.type |= PERF_SAMPLE_GROUP;
2414                 header.size += sizeof(u64) +
2415                         counter->nr_siblings * sizeof(group_entry);
2416         }
2417
2418         if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2419                 callchain = perf_callchain(regs);
2420
2421                 if (callchain) {
2422                         callchain_size = (1 + callchain->nr) * sizeof(u64);
2423
2424                         header.type |= PERF_SAMPLE_CALLCHAIN;
2425                         header.size += callchain_size;
2426                 }
2427         }
2428
2429         ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2430         if (ret)
2431                 return;
2432
2433         perf_output_put(&handle, header);
2434
2435         if (sample_type & PERF_SAMPLE_IP)
2436                 perf_output_put(&handle, ip);
2437
2438         if (sample_type & PERF_SAMPLE_TID)
2439                 perf_output_put(&handle, tid_entry);
2440
2441         if (sample_type & PERF_SAMPLE_TIME)
2442                 perf_output_put(&handle, time);
2443
2444         if (sample_type & PERF_SAMPLE_ADDR)
2445                 perf_output_put(&handle, addr);
2446
2447         if (sample_type & PERF_SAMPLE_ID)
2448                 perf_output_put(&handle, counter->id);
2449
2450         if (sample_type & PERF_SAMPLE_CPU)
2451                 perf_output_put(&handle, cpu_entry);
2452
2453         if (sample_type & PERF_SAMPLE_PERIOD)
2454                 perf_output_put(&handle, counter->hw.sample_period);
2455
2456         /*
2457          * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult.
2458          */
2459         if (sample_type & PERF_SAMPLE_GROUP) {
2460                 struct perf_counter *leader, *sub;
2461                 u64 nr = counter->nr_siblings;
2462
2463                 perf_output_put(&handle, nr);
2464
2465                 leader = counter->group_leader;
2466                 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2467                         if (sub != counter)
2468                                 sub->pmu->read(sub);
2469
2470                         group_entry.id = sub->id;
2471                         group_entry.counter = atomic64_read(&sub->count);
2472
2473                         perf_output_put(&handle, group_entry);
2474                 }
2475         }
2476
2477         if (callchain)
2478                 perf_output_copy(&handle, callchain, callchain_size);
2479
2480         perf_output_end(&handle);
2481 }
2482
2483 /*
2484  * fork tracking
2485  */
2486
2487 struct perf_fork_event {
2488         struct task_struct      *task;
2489
2490         struct {
2491                 struct perf_event_header        header;
2492
2493                 u32                             pid;
2494                 u32                             ppid;
2495         } event;
2496 };
2497
2498 static void perf_counter_fork_output(struct perf_counter *counter,
2499                                      struct perf_fork_event *fork_event)
2500 {
2501         struct perf_output_handle handle;
2502         int size = fork_event->event.header.size;
2503         struct task_struct *task = fork_event->task;
2504         int ret = perf_output_begin(&handle, counter, size, 0, 0);
2505
2506         if (ret)
2507                 return;
2508
2509         fork_event->event.pid = perf_counter_pid(counter, task);
2510         fork_event->event.ppid = perf_counter_pid(counter, task->real_parent);
2511
2512         perf_output_put(&handle, fork_event->event);
2513         perf_output_end(&handle);
2514 }
2515
2516 static int perf_counter_fork_match(struct perf_counter *counter)
2517 {
2518         if (counter->attr.comm || counter->attr.mmap)
2519                 return 1;
2520
2521         return 0;
2522 }
2523
2524 static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
2525                                   struct perf_fork_event *fork_event)
2526 {
2527         struct perf_counter *counter;
2528
2529         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2530                 return;
2531
2532         rcu_read_lock();
2533         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2534                 if (perf_counter_fork_match(counter))
2535                         perf_counter_fork_output(counter, fork_event);
2536         }
2537         rcu_read_unlock();
2538 }
2539
2540 static void perf_counter_fork_event(struct perf_fork_event *fork_event)
2541 {
2542         struct perf_cpu_context *cpuctx;
2543         struct perf_counter_context *ctx;
2544
2545         cpuctx = &get_cpu_var(perf_cpu_context);
2546         perf_counter_fork_ctx(&cpuctx->ctx, fork_event);
2547         put_cpu_var(perf_cpu_context);
2548
2549         rcu_read_lock();
2550         /*
2551          * doesn't really matter which of the child contexts the
2552          * events ends up in.
2553          */
2554         ctx = rcu_dereference(current->perf_counter_ctxp);
2555         if (ctx)
2556                 perf_counter_fork_ctx(ctx, fork_event);
2557         rcu_read_unlock();
2558 }
2559
2560 void perf_counter_fork(struct task_struct *task)
2561 {
2562         struct perf_fork_event fork_event;
2563
2564         if (!atomic_read(&nr_comm_counters) &&
2565             !atomic_read(&nr_mmap_counters))
2566                 return;
2567
2568         fork_event = (struct perf_fork_event){
2569                 .task   = task,
2570                 .event  = {
2571                         .header = {
2572                                 .type = PERF_EVENT_FORK,
2573                                 .size = sizeof(fork_event.event),
2574                         },
2575                 },
2576         };
2577
2578         perf_counter_fork_event(&fork_event);
2579 }
2580
2581 /*
2582  * comm tracking
2583  */
2584
2585 struct perf_comm_event {
2586         struct task_struct      *task;
2587         char                    *comm;
2588         int                     comm_size;
2589
2590         struct {
2591                 struct perf_event_header        header;
2592
2593                 u32                             pid;
2594                 u32                             tid;
2595         } event;
2596 };
2597
2598 static void perf_counter_comm_output(struct perf_counter *counter,
2599                                      struct perf_comm_event *comm_event)
2600 {
2601         struct perf_output_handle handle;
2602         int size = comm_event->event.header.size;
2603         int ret = perf_output_begin(&handle, counter, size, 0, 0);
2604
2605         if (ret)
2606                 return;
2607
2608         comm_event->event.pid = perf_counter_pid(counter, comm_event->task);
2609         comm_event->event.tid = perf_counter_tid(counter, comm_event->task);
2610
2611         perf_output_put(&handle, comm_event->event);
2612         perf_output_copy(&handle, comm_event->comm,
2613                                    comm_event->comm_size);
2614         perf_output_end(&handle);
2615 }
2616
2617 static int perf_counter_comm_match(struct perf_counter *counter)
2618 {
2619         if (counter->attr.comm)
2620                 return 1;
2621
2622         return 0;
2623 }
2624
2625 static void perf_counter_comm_ctx(struct perf_counter_context *ctx,
2626                                   struct perf_comm_event *comm_event)
2627 {
2628         struct perf_counter *counter;
2629
2630         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2631                 return;
2632
2633         rcu_read_lock();
2634         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2635                 if (perf_counter_comm_match(counter))
2636                         perf_counter_comm_output(counter, comm_event);
2637         }
2638         rcu_read_unlock();
2639 }
2640
2641 static void perf_counter_comm_event(struct perf_comm_event *comm_event)
2642 {
2643         struct perf_cpu_context *cpuctx;
2644         struct perf_counter_context *ctx;
2645         unsigned int size;
2646         char *comm = comm_event->task->comm;
2647
2648         size = ALIGN(strlen(comm)+1, sizeof(u64));
2649
2650         comm_event->comm = comm;
2651         comm_event->comm_size = size;
2652
2653         comm_event->event.header.size = sizeof(comm_event->event) + size;
2654
2655         cpuctx = &get_cpu_var(perf_cpu_context);
2656         perf_counter_comm_ctx(&cpuctx->ctx, comm_event);
2657         put_cpu_var(perf_cpu_context);
2658
2659         rcu_read_lock();
2660         /*
2661          * doesn't really matter which of the child contexts the
2662          * events ends up in.
2663          */
2664         ctx = rcu_dereference(current->perf_counter_ctxp);
2665         if (ctx)
2666                 perf_counter_comm_ctx(ctx, comm_event);
2667         rcu_read_unlock();
2668 }
2669
2670 void perf_counter_comm(struct task_struct *task)
2671 {
2672         struct perf_comm_event comm_event;
2673
2674         if (!atomic_read(&nr_comm_counters))
2675                 return;
2676
2677         comm_event = (struct perf_comm_event){
2678                 .task   = task,
2679                 .event  = {
2680                         .header = { .type = PERF_EVENT_COMM, },
2681                 },
2682         };
2683
2684         perf_counter_comm_event(&comm_event);
2685 }
2686
2687 /*
2688  * mmap tracking
2689  */
2690
2691 struct perf_mmap_event {
2692         struct vm_area_struct   *vma;
2693
2694         const char              *file_name;
2695         int                     file_size;
2696
2697         struct {
2698                 struct perf_event_header        header;
2699
2700                 u32                             pid;
2701                 u32                             tid;
2702                 u64                             start;
2703                 u64                             len;
2704                 u64                             pgoff;
2705         } event;
2706 };
2707
2708 static void perf_counter_mmap_output(struct perf_counter *counter,
2709                                      struct perf_mmap_event *mmap_event)
2710 {
2711         struct perf_output_handle handle;
2712         int size = mmap_event->event.header.size;
2713         int ret = perf_output_begin(&handle, counter, size, 0, 0);
2714
2715         if (ret)
2716                 return;
2717
2718         mmap_event->event.pid = perf_counter_pid(counter, current);
2719         mmap_event->event.tid = perf_counter_tid(counter, current);
2720
2721         perf_output_put(&handle, mmap_event->event);
2722         perf_output_copy(&handle, mmap_event->file_name,
2723                                    mmap_event->file_size);
2724         perf_output_end(&handle);
2725 }
2726
2727 static int perf_counter_mmap_match(struct perf_counter *counter,
2728                                    struct perf_mmap_event *mmap_event)
2729 {
2730         if (counter->attr.mmap)
2731                 return 1;
2732
2733         return 0;
2734 }
2735
2736 static void perf_counter_mmap_ctx(struct perf_counter_context *ctx,
2737                                   struct perf_mmap_event *mmap_event)
2738 {
2739         struct perf_counter *counter;
2740
2741         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
2742                 return;
2743
2744         rcu_read_lock();
2745         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2746                 if (perf_counter_mmap_match(counter, mmap_event))
2747                         perf_counter_mmap_output(counter, mmap_event);
2748         }
2749         rcu_read_unlock();
2750 }
2751
2752 static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
2753 {
2754         struct perf_cpu_context *cpuctx;
2755         struct perf_counter_context *ctx;
2756         struct vm_area_struct *vma = mmap_event->vma;
2757         struct file *file = vma->vm_file;
2758         unsigned int size;
2759         char tmp[16];
2760         char *buf = NULL;
2761         const char *name;
2762
2763         if (file) {
2764                 buf = kzalloc(PATH_MAX, GFP_KERNEL);
2765                 if (!buf) {
2766                         name = strncpy(tmp, "//enomem", sizeof(tmp));
2767                         goto got_name;
2768                 }
2769                 name = d_path(&file->f_path, buf, PATH_MAX);
2770                 if (IS_ERR(name)) {
2771                         name = strncpy(tmp, "//toolong", sizeof(tmp));
2772                         goto got_name;
2773                 }
2774         } else {
2775                 name = arch_vma_name(mmap_event->vma);
2776                 if (name)
2777                         goto got_name;
2778
2779                 if (!vma->vm_mm) {
2780                         name = strncpy(tmp, "[vdso]", sizeof(tmp));
2781                         goto got_name;
2782                 }
2783
2784                 name = strncpy(tmp, "//anon", sizeof(tmp));
2785                 goto got_name;
2786         }
2787
2788 got_name:
2789         size = ALIGN(strlen(name)+1, sizeof(u64));
2790
2791         mmap_event->file_name = name;
2792         mmap_event->file_size = size;
2793
2794         mmap_event->event.header.size = sizeof(mmap_event->event) + size;
2795
2796         cpuctx = &get_cpu_var(perf_cpu_context);
2797         perf_counter_mmap_ctx(&cpuctx->ctx, mmap_event);
2798         put_cpu_var(perf_cpu_context);
2799
2800         rcu_read_lock();
2801         /*
2802          * doesn't really matter which of the child contexts the
2803          * events ends up in.
2804          */
2805         ctx = rcu_dereference(current->perf_counter_ctxp);
2806         if (ctx)
2807                 perf_counter_mmap_ctx(ctx, mmap_event);
2808         rcu_read_unlock();
2809
2810         kfree(buf);
2811 }
2812
2813 void __perf_counter_mmap(struct vm_area_struct *vma)
2814 {
2815         struct perf_mmap_event mmap_event;
2816
2817         if (!atomic_read(&nr_mmap_counters))
2818                 return;
2819
2820         mmap_event = (struct perf_mmap_event){
2821                 .vma    = vma,
2822                 .event  = {
2823                         .header = { .type = PERF_EVENT_MMAP, },
2824                         .start  = vma->vm_start,
2825                         .len    = vma->vm_end - vma->vm_start,
2826                         .pgoff  = vma->vm_pgoff,
2827                 },
2828         };
2829
2830         perf_counter_mmap_event(&mmap_event);
2831 }
2832
2833 /*
2834  * Log sample_period changes so that analyzing tools can re-normalize the
2835  * event flow.
2836  */
2837
2838 static void perf_log_period(struct perf_counter *counter, u64 period)
2839 {
2840         struct perf_output_handle handle;
2841         int ret;
2842
2843         struct {
2844                 struct perf_event_header        header;
2845                 u64                             time;
2846                 u64                             id;
2847                 u64                             period;
2848         } freq_event = {
2849                 .header = {
2850                         .type = PERF_EVENT_PERIOD,
2851                         .misc = 0,
2852                         .size = sizeof(freq_event),
2853                 },
2854                 .time = sched_clock(),
2855                 .id = counter->id,
2856                 .period = period,
2857         };
2858
2859         if (counter->hw.sample_period == period)
2860                 return;
2861
2862         ret = perf_output_begin(&handle, counter, sizeof(freq_event), 0, 0);
2863         if (ret)
2864                 return;
2865
2866         perf_output_put(&handle, freq_event);
2867         perf_output_end(&handle);
2868 }
2869
2870 /*
2871  * IRQ throttle logging
2872  */
2873
2874 static void perf_log_throttle(struct perf_counter *counter, int enable)
2875 {
2876         struct perf_output_handle handle;
2877         int ret;
2878
2879         struct {
2880                 struct perf_event_header        header;
2881                 u64                             time;
2882         } throttle_event = {
2883                 .header = {
2884                         .type = PERF_EVENT_THROTTLE + 1,
2885                         .misc = 0,
2886                         .size = sizeof(throttle_event),
2887                 },
2888                 .time = sched_clock(),
2889         };
2890
2891         ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
2892         if (ret)
2893                 return;
2894
2895         perf_output_put(&handle, throttle_event);
2896         perf_output_end(&handle);
2897 }
2898
2899 /*
2900  * Generic counter overflow handling.
2901  */
2902
2903 int perf_counter_overflow(struct perf_counter *counter,
2904                           int nmi, struct pt_regs *regs, u64 addr)
2905 {
2906         int events = atomic_read(&counter->event_limit);
2907         int throttle = counter->pmu->unthrottle != NULL;
2908         int ret = 0;
2909
2910         if (!throttle) {
2911                 counter->hw.interrupts++;
2912         } else {
2913                 if (counter->hw.interrupts != MAX_INTERRUPTS) {
2914                         counter->hw.interrupts++;
2915                         if (HZ*counter->hw.interrupts > (u64)sysctl_perf_counter_limit) {
2916                                 counter->hw.interrupts = MAX_INTERRUPTS;
2917                                 perf_log_throttle(counter, 0);
2918                                 ret = 1;
2919                         }
2920                 } else {
2921                         /*
2922                          * Keep re-disabling counters even though on the previous
2923                          * pass we disabled it - just in case we raced with a
2924                          * sched-in and the counter got enabled again:
2925                          */
2926                         ret = 1;
2927                 }
2928         }
2929
2930         /*
2931          * XXX event_limit might not quite work as expected on inherited
2932          * counters
2933          */
2934
2935         counter->pending_kill = POLL_IN;
2936         if (events && atomic_dec_and_test(&counter->event_limit)) {
2937                 ret = 1;
2938                 counter->pending_kill = POLL_HUP;
2939                 if (nmi) {
2940                         counter->pending_disable = 1;
2941                         perf_pending_queue(&counter->pending,
2942                                            perf_pending_counter);
2943                 } else
2944                         perf_counter_disable(counter);
2945         }
2946
2947         perf_counter_output(counter, nmi, regs, addr);
2948         return ret;
2949 }
2950
2951 /*
2952  * Generic software counter infrastructure
2953  */
2954
2955 static void perf_swcounter_update(struct perf_counter *counter)
2956 {
2957         struct hw_perf_counter *hwc = &counter->hw;
2958         u64 prev, now;
2959         s64 delta;
2960
2961 again:
2962         prev = atomic64_read(&hwc->prev_count);
2963         now = atomic64_read(&hwc->count);
2964         if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev)
2965                 goto again;
2966
2967         delta = now - prev;
2968
2969         atomic64_add(delta, &counter->count);
2970         atomic64_sub(delta, &hwc->period_left);
2971 }
2972
2973 static void perf_swcounter_set_period(struct perf_counter *counter)
2974 {
2975         struct hw_perf_counter *hwc = &counter->hw;
2976         s64 left = atomic64_read(&hwc->period_left);
2977         s64 period = hwc->sample_period;
2978
2979         if (unlikely(left <= -period)) {
2980                 left = period;
2981                 atomic64_set(&hwc->period_left, left);
2982         }
2983
2984         if (unlikely(left <= 0)) {
2985                 left += period;
2986                 atomic64_add(period, &hwc->period_left);
2987         }
2988
2989         atomic64_set(&hwc->prev_count, -left);
2990         atomic64_set(&hwc->count, -left);
2991 }
2992
2993 static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
2994 {
2995         enum hrtimer_restart ret = HRTIMER_RESTART;
2996         struct perf_counter *counter;
2997         struct pt_regs *regs;
2998         u64 period;
2999
3000         counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3001         counter->pmu->read(counter);
3002
3003         regs = get_irq_regs();
3004         /*
3005          * In case we exclude kernel IPs or are somehow not in interrupt
3006          * context, provide the next best thing, the user IP.
3007          */
3008         if ((counter->attr.exclude_kernel || !regs) &&
3009                         !counter->attr.exclude_user)
3010                 regs = task_pt_regs(current);
3011
3012         if (regs) {
3013                 if (perf_counter_overflow(counter, 0, regs, 0))
3014                         ret = HRTIMER_NORESTART;
3015         }
3016
3017         period = max_t(u64, 10000, counter->hw.sample_period);
3018         hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3019
3020         return ret;
3021 }
3022
3023 static void perf_swcounter_overflow(struct perf_counter *counter,
3024                                     int nmi, struct pt_regs *regs, u64 addr)
3025 {
3026         perf_swcounter_update(counter);
3027         perf_swcounter_set_period(counter);
3028         if (perf_counter_overflow(counter, nmi, regs, addr))
3029                 /* soft-disable the counter */
3030                 ;
3031
3032 }
3033
3034 static int perf_swcounter_is_counting(struct perf_counter *counter)
3035 {
3036         struct perf_counter_context *ctx;
3037         unsigned long flags;
3038         int count;
3039
3040         if (counter->state == PERF_COUNTER_STATE_ACTIVE)
3041                 return 1;
3042
3043         if (counter->state != PERF_COUNTER_STATE_INACTIVE)
3044                 return 0;
3045
3046         /*
3047          * If the counter is inactive, it could be just because
3048          * its task is scheduled out, or because it's in a group
3049          * which could not go on the PMU.  We want to count in
3050          * the first case but not the second.  If the context is
3051          * currently active then an inactive software counter must
3052          * be the second case.  If it's not currently active then
3053          * we need to know whether the counter was active when the
3054          * context was last active, which we can determine by
3055          * comparing counter->tstamp_stopped with ctx->time.
3056          *
3057          * We are within an RCU read-side critical section,
3058          * which protects the existence of *ctx.
3059          */
3060         ctx = counter->ctx;
3061         spin_lock_irqsave(&ctx->lock, flags);
3062         count = 1;
3063         /* Re-check state now we have the lock */
3064         if (counter->state < PERF_COUNTER_STATE_INACTIVE ||
3065             counter->ctx->is_active ||
3066             counter->tstamp_stopped < ctx->time)
3067                 count = 0;
3068         spin_unlock_irqrestore(&ctx->lock, flags);
3069         return count;
3070 }
3071
3072 static int perf_swcounter_match(struct perf_counter *counter,
3073                                 enum perf_event_types type,
3074                                 u32 event, struct pt_regs *regs)
3075 {
3076         u64 event_config;
3077
3078         event_config = ((u64) type << PERF_COUNTER_TYPE_SHIFT) | event;
3079
3080         if (!perf_swcounter_is_counting(counter))
3081                 return 0;
3082
3083         if (counter->attr.config != event_config)
3084                 return 0;
3085
3086         if (regs) {
3087                 if (counter->attr.exclude_user && user_mode(regs))
3088                         return 0;
3089
3090                 if (counter->attr.exclude_kernel && !user_mode(regs))
3091                         return 0;
3092         }
3093
3094         return 1;
3095 }
3096
3097 static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3098                                int nmi, struct pt_regs *regs, u64 addr)
3099 {
3100         int neg = atomic64_add_negative(nr, &counter->hw.count);
3101
3102         if (counter->hw.sample_period && !neg && regs)
3103                 perf_swcounter_overflow(counter, nmi, regs, addr);
3104 }
3105
3106 static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3107                                      enum perf_event_types type, u32 event,
3108                                      u64 nr, int nmi, struct pt_regs *regs,
3109                                      u64 addr)
3110 {
3111         struct perf_counter *counter;
3112
3113         if (system_state != SYSTEM_RUNNING || list_empty(&ctx->event_list))
3114                 return;
3115
3116         rcu_read_lock();
3117         list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
3118                 if (perf_swcounter_match(counter, type, event, regs))
3119                         perf_swcounter_add(counter, nr, nmi, regs, addr);
3120         }
3121         rcu_read_unlock();
3122 }
3123
3124 static int *perf_swcounter_recursion_context(struct perf_cpu_context *cpuctx)
3125 {
3126         if (in_nmi())
3127                 return &cpuctx->recursion[3];
3128
3129         if (in_irq())
3130                 return &cpuctx->recursion[2];
3131
3132         if (in_softirq())
3133                 return &cpuctx->recursion[1];
3134
3135         return &cpuctx->recursion[0];
3136 }
3137
3138 static void __perf_swcounter_event(enum perf_event_types type, u32 event,
3139                                    u64 nr, int nmi, struct pt_regs *regs,
3140                                    u64 addr)
3141 {
3142         struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context);
3143         int *recursion = perf_swcounter_recursion_context(cpuctx);
3144         struct perf_counter_context *ctx;
3145
3146         if (*recursion)
3147                 goto out;
3148
3149         (*recursion)++;
3150         barrier();
3151
3152         perf_swcounter_ctx_event(&cpuctx->ctx, type, event,
3153                                  nr, nmi, regs, addr);
3154         rcu_read_lock();
3155         /*
3156          * doesn't really matter which of the child contexts the
3157          * events ends up in.
3158          */
3159         ctx = rcu_dereference(current->perf_counter_ctxp);
3160         if (ctx)
3161                 perf_swcounter_ctx_event(ctx, type, event, nr, nmi, regs, addr);
3162         rcu_read_unlock();
3163
3164         barrier();
3165         (*recursion)--;
3166
3167 out:
3168         put_cpu_var(perf_cpu_context);
3169 }
3170
3171 void
3172 perf_swcounter_event(u32 event, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
3173 {
3174         __perf_swcounter_event(PERF_TYPE_SOFTWARE, event, nr, nmi, regs, addr);
3175 }
3176
3177 static void perf_swcounter_read(struct perf_counter *counter)
3178 {
3179         perf_swcounter_update(counter);
3180 }
3181
3182 static int perf_swcounter_enable(struct perf_counter *counter)
3183 {
3184         perf_swcounter_set_period(counter);
3185         return 0;
3186 }
3187
3188 static void perf_swcounter_disable(struct perf_counter *counter)
3189 {
3190         perf_swcounter_update(counter);
3191 }
3192
3193 static const struct pmu perf_ops_generic = {
3194         .enable         = perf_swcounter_enable,
3195         .disable        = perf_swcounter_disable,
3196         .read           = perf_swcounter_read,
3197 };
3198
3199 /*
3200  * Software counter: cpu wall time clock
3201  */
3202
3203 static void cpu_clock_perf_counter_update(struct perf_counter *counter)
3204 {
3205         int cpu = raw_smp_processor_id();
3206         s64 prev;
3207         u64 now;
3208
3209         now = cpu_clock(cpu);
3210         prev = atomic64_read(&counter->hw.prev_count);
3211         atomic64_set(&counter->hw.prev_count, now);
3212         atomic64_add(now - prev, &counter->count);
3213 }
3214
3215 static int cpu_clock_perf_counter_enable(struct perf_counter *counter)
3216 {
3217         struct hw_perf_counter *hwc = &counter->hw;
3218         int cpu = raw_smp_processor_id();
3219
3220         atomic64_set(&hwc->prev_count, cpu_clock(cpu));
3221         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3222         hwc->hrtimer.function = perf_swcounter_hrtimer;
3223         if (hwc->sample_period) {
3224                 u64 period = max_t(u64, 10000, hwc->sample_period);
3225                 __hrtimer_start_range_ns(&hwc->hrtimer,
3226                                 ns_to_ktime(period), 0,
3227                                 HRTIMER_MODE_REL, 0);
3228         }
3229
3230         return 0;
3231 }
3232
3233 static void cpu_clock_perf_counter_disable(struct perf_counter *counter)
3234 {
3235         if (counter->hw.sample_period)
3236                 hrtimer_cancel(&counter->hw.hrtimer);
3237         cpu_clock_perf_counter_update(counter);
3238 }
3239
3240 static void cpu_clock_perf_counter_read(struct perf_counter *counter)
3241 {
3242         cpu_clock_perf_counter_update(counter);
3243 }
3244
3245 static const struct pmu perf_ops_cpu_clock = {
3246         .enable         = cpu_clock_perf_counter_enable,
3247         .disable        = cpu_clock_perf_counter_disable,
3248         .read           = cpu_clock_perf_counter_read,
3249 };
3250
3251 /*
3252  * Software counter: task time clock
3253  */
3254
3255 static void task_clock_perf_counter_update(struct perf_counter *counter, u64 now)
3256 {
3257         u64 prev;
3258         s64 delta;
3259
3260         prev = atomic64_xchg(&counter->hw.prev_count, now);
3261         delta = now - prev;
3262         atomic64_add(delta, &counter->count);
3263 }
3264
3265 static int task_clock_perf_counter_enable(struct perf_counter *counter)
3266 {
3267         struct hw_perf_counter *hwc = &counter->hw;
3268         u64 now;
3269
3270         now = counter->ctx->time;
3271
3272         atomic64_set(&hwc->prev_count, now);
3273         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3274         hwc->hrtimer.function = perf_swcounter_hrtimer;
3275         if (hwc->sample_period) {
3276                 u64 period = max_t(u64, 10000, hwc->sample_period);
3277                 __hrtimer_start_range_ns(&hwc->hrtimer,
3278                                 ns_to_ktime(period), 0,
3279                                 HRTIMER_MODE_REL, 0);
3280         }
3281
3282         return 0;
3283 }
3284
3285 static void task_clock_perf_counter_disable(struct perf_counter *counter)
3286 {
3287         if (counter->hw.sample_period)
3288                 hrtimer_cancel(&counter->hw.hrtimer);
3289         task_clock_perf_counter_update(counter, counter->ctx->time);
3290
3291 }
3292
3293 static void task_clock_perf_counter_read(struct perf_counter *counter)
3294 {
3295         u64 time;
3296
3297         if (!in_nmi()) {
3298                 update_context_time(counter->ctx);
3299                 time = counter->ctx->time;
3300         } else {
3301                 u64 now = perf_clock();
3302                 u64 delta = now - counter->ctx->timestamp;
3303                 time = counter->ctx->time + delta;
3304         }
3305
3306         task_clock_perf_counter_update(counter, time);
3307 }
3308
3309 static const struct pmu perf_ops_task_clock = {
3310         .enable         = task_clock_perf_counter_enable,
3311         .disable        = task_clock_perf_counter_disable,
3312         .read           = task_clock_perf_counter_read,
3313 };
3314
3315 /*
3316  * Software counter: cpu migrations
3317  */
3318 void perf_counter_task_migration(struct task_struct *task, int cpu)
3319 {
3320         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
3321         struct perf_counter_context *ctx;
3322
3323         perf_swcounter_ctx_event(&cpuctx->ctx, PERF_TYPE_SOFTWARE,
3324                                  PERF_COUNT_CPU_MIGRATIONS,
3325                                  1, 1, NULL, 0);
3326
3327         ctx = perf_pin_task_context(task);
3328         if (ctx) {
3329                 perf_swcounter_ctx_event(ctx, PERF_TYPE_SOFTWARE,
3330                                          PERF_COUNT_CPU_MIGRATIONS,
3331                                          1, 1, NULL, 0);
3332                 perf_unpin_context(ctx);
3333         }
3334 }
3335
3336 #ifdef CONFIG_EVENT_PROFILE
3337 void perf_tpcounter_event(int event_id)
3338 {
3339         struct pt_regs *regs = get_irq_regs();
3340
3341         if (!regs)
3342                 regs = task_pt_regs(current);
3343
3344         __perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, regs, 0);
3345 }
3346 EXPORT_SYMBOL_GPL(perf_tpcounter_event);
3347
3348 extern int ftrace_profile_enable(int);
3349 extern void ftrace_profile_disable(int);
3350
3351 static void tp_perf_counter_destroy(struct perf_counter *counter)
3352 {
3353         ftrace_profile_disable(perf_event_id(&counter->attr));
3354 }
3355
3356 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3357 {
3358         int event_id = perf_event_id(&counter->attr);
3359         int ret;
3360
3361         ret = ftrace_profile_enable(event_id);
3362         if (ret)
3363                 return NULL;
3364
3365         counter->destroy = tp_perf_counter_destroy;
3366         counter->hw.sample_period = counter->attr.sample_period;
3367
3368         return &perf_ops_generic;
3369 }
3370 #else
3371 static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3372 {
3373         return NULL;
3374 }
3375 #endif
3376
3377 static const struct pmu *sw_perf_counter_init(struct perf_counter *counter)
3378 {
3379         const struct pmu *pmu = NULL;
3380
3381         /*
3382          * Software counters (currently) can't in general distinguish
3383          * between user, kernel and hypervisor events.
3384          * However, context switches and cpu migrations are considered
3385          * to be kernel events, and page faults are never hypervisor
3386          * events.
3387          */
3388         switch (perf_event_id(&counter->attr)) {
3389         case PERF_COUNT_CPU_CLOCK:
3390                 pmu = &perf_ops_cpu_clock;
3391
3392                 break;
3393         case PERF_COUNT_TASK_CLOCK:
3394                 /*
3395                  * If the user instantiates this as a per-cpu counter,
3396                  * use the cpu_clock counter instead.
3397                  */
3398                 if (counter->ctx->task)
3399                         pmu = &perf_ops_task_clock;
3400                 else
3401                         pmu = &perf_ops_cpu_clock;
3402
3403                 break;
3404         case PERF_COUNT_PAGE_FAULTS:
3405         case PERF_COUNT_PAGE_FAULTS_MIN:
3406         case PERF_COUNT_PAGE_FAULTS_MAJ:
3407         case PERF_COUNT_CONTEXT_SWITCHES:
3408         case PERF_COUNT_CPU_MIGRATIONS:
3409                 pmu = &perf_ops_generic;
3410                 break;
3411         }
3412
3413         return pmu;
3414 }
3415
3416 /*
3417  * Allocate and initialize a counter structure
3418  */
3419 static struct perf_counter *
3420 perf_counter_alloc(struct perf_counter_attr *attr,
3421                    int cpu,
3422                    struct perf_counter_context *ctx,
3423                    struct perf_counter *group_leader,
3424                    gfp_t gfpflags)
3425 {
3426         const struct pmu *pmu;
3427         struct perf_counter *counter;
3428         struct hw_perf_counter *hwc;
3429         long err;
3430
3431         counter = kzalloc(sizeof(*counter), gfpflags);
3432         if (!counter)
3433                 return ERR_PTR(-ENOMEM);
3434
3435         /*
3436          * Single counters are their own group leaders, with an
3437          * empty sibling list:
3438          */
3439         if (!group_leader)
3440                 group_leader = counter;
3441
3442         mutex_init(&counter->child_mutex);
3443         INIT_LIST_HEAD(&counter->child_list);
3444
3445         INIT_LIST_HEAD(&counter->list_entry);
3446         INIT_LIST_HEAD(&counter->event_entry);
3447         INIT_LIST_HEAD(&counter->sibling_list);
3448         init_waitqueue_head(&counter->waitq);
3449
3450         mutex_init(&counter->mmap_mutex);
3451
3452         counter->cpu            = cpu;
3453         counter->attr           = *attr;
3454         counter->group_leader   = group_leader;
3455         counter->pmu            = NULL;
3456         counter->ctx            = ctx;
3457         counter->oncpu          = -1;
3458
3459         counter->ns             = get_pid_ns(current->nsproxy->pid_ns);
3460         counter->id             = atomic64_inc_return(&perf_counter_id);
3461
3462         counter->state          = PERF_COUNTER_STATE_INACTIVE;
3463
3464         if (attr->disabled)
3465                 counter->state = PERF_COUNTER_STATE_OFF;
3466
3467         pmu = NULL;
3468
3469         hwc = &counter->hw;
3470         if (attr->freq && attr->sample_freq)
3471                 hwc->sample_period = div64_u64(TICK_NSEC, attr->sample_freq);
3472         else
3473                 hwc->sample_period = attr->sample_period;
3474
3475         /*
3476          * we currently do not support PERF_SAMPLE_GROUP on inherited counters
3477          */
3478         if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
3479                 goto done;
3480
3481         if (perf_event_raw(attr)) {
3482                 pmu = hw_perf_counter_init(counter);
3483                 goto done;
3484         }
3485
3486         switch (perf_event_type(attr)) {
3487         case PERF_TYPE_HARDWARE:
3488                 pmu = hw_perf_counter_init(counter);
3489                 break;
3490
3491         case PERF_TYPE_SOFTWARE:
3492                 pmu = sw_perf_counter_init(counter);
3493                 break;
3494
3495         case PERF_TYPE_TRACEPOINT:
3496                 pmu = tp_perf_counter_init(counter);
3497                 break;
3498         }
3499 done:
3500         err = 0;
3501         if (!pmu)
3502                 err = -EINVAL;
3503         else if (IS_ERR(pmu))
3504                 err = PTR_ERR(pmu);
3505
3506         if (err) {
3507                 if (counter->ns)
3508                         put_pid_ns(counter->ns);
3509                 kfree(counter);
3510                 return ERR_PTR(err);
3511         }
3512
3513         counter->pmu = pmu;
3514
3515         atomic_inc(&nr_counters);
3516         if (counter->attr.mmap)
3517                 atomic_inc(&nr_mmap_counters);
3518         if (counter->attr.comm)
3519                 atomic_inc(&nr_comm_counters);
3520
3521         return counter;
3522 }
3523
3524 /**
3525  * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
3526  *
3527  * @attr_uptr:  event type attributes for monitoring/sampling
3528  * @pid:                target pid
3529  * @cpu:                target cpu
3530  * @group_fd:           group leader counter fd
3531  */
3532 SYSCALL_DEFINE5(perf_counter_open,
3533                 const struct perf_counter_attr __user *, attr_uptr,
3534                 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
3535 {
3536         struct perf_counter *counter, *group_leader;
3537         struct perf_counter_attr attr;
3538         struct perf_counter_context *ctx;
3539         struct file *counter_file = NULL;
3540         struct file *group_file = NULL;
3541         int fput_needed = 0;
3542         int fput_needed2 = 0;
3543         int ret;
3544
3545         /* for future expandability... */
3546         if (flags)
3547                 return -EINVAL;
3548
3549         if (copy_from_user(&attr, attr_uptr, sizeof(attr)) != 0)
3550                 return -EFAULT;
3551
3552         /*
3553          * Get the target context (task or percpu):
3554          */
3555         ctx = find_get_context(pid, cpu);
3556         if (IS_ERR(ctx))
3557                 return PTR_ERR(ctx);
3558
3559         /*
3560          * Look up the group leader (we will attach this counter to it):
3561          */
3562         group_leader = NULL;
3563         if (group_fd != -1) {
3564                 ret = -EINVAL;
3565                 group_file = fget_light(group_fd, &fput_needed);
3566                 if (!group_file)
3567                         goto err_put_context;
3568                 if (group_file->f_op != &perf_fops)
3569                         goto err_put_context;
3570
3571                 group_leader = group_file->private_data;
3572                 /*
3573                  * Do not allow a recursive hierarchy (this new sibling
3574                  * becoming part of another group-sibling):
3575                  */
3576                 if (group_leader->group_leader != group_leader)
3577                         goto err_put_context;
3578                 /*
3579                  * Do not allow to attach to a group in a different
3580                  * task or CPU context:
3581                  */
3582                 if (group_leader->ctx != ctx)
3583                         goto err_put_context;
3584                 /*
3585                  * Only a group leader can be exclusive or pinned
3586                  */
3587                 if (attr.exclusive || attr.pinned)
3588                         goto err_put_context;
3589         }
3590
3591         counter = perf_counter_alloc(&attr, cpu, ctx, group_leader,
3592                                      GFP_KERNEL);
3593         ret = PTR_ERR(counter);
3594         if (IS_ERR(counter))
3595                 goto err_put_context;
3596
3597         ret = anon_inode_getfd("[perf_counter]", &perf_fops, counter, 0);
3598         if (ret < 0)
3599                 goto err_free_put_context;
3600
3601         counter_file = fget_light(ret, &fput_needed2);
3602         if (!counter_file)
3603                 goto err_free_put_context;
3604
3605         counter->filp = counter_file;
3606         WARN_ON_ONCE(ctx->parent_ctx);
3607         mutex_lock(&ctx->mutex);
3608         perf_install_in_context(ctx, counter, cpu);
3609         ++ctx->generation;
3610         mutex_unlock(&ctx->mutex);
3611
3612         counter->owner = current;
3613         get_task_struct(current);
3614         mutex_lock(&current->perf_counter_mutex);
3615         list_add_tail(&counter->owner_entry, &current->perf_counter_list);
3616         mutex_unlock(&current->perf_counter_mutex);
3617
3618         fput_light(counter_file, fput_needed2);
3619
3620 out_fput:
3621         fput_light(group_file, fput_needed);
3622
3623         return ret;
3624
3625 err_free_put_context:
3626         kfree(counter);
3627
3628 err_put_context:
3629         put_ctx(ctx);
3630
3631         goto out_fput;
3632 }
3633
3634 /*
3635  * inherit a counter from parent task to child task:
3636  */
3637 static struct perf_counter *
3638 inherit_counter(struct perf_counter *parent_counter,
3639               struct task_struct *parent,
3640               struct perf_counter_context *parent_ctx,
3641               struct task_struct *child,
3642               struct perf_counter *group_leader,
3643               struct perf_counter_context *child_ctx)
3644 {
3645         struct perf_counter *child_counter;
3646
3647         /*
3648          * Instead of creating recursive hierarchies of counters,
3649          * we link inherited counters back to the original parent,
3650          * which has a filp for sure, which we use as the reference
3651          * count:
3652          */
3653         if (parent_counter->parent)
3654                 parent_counter = parent_counter->parent;
3655
3656         child_counter = perf_counter_alloc(&parent_counter->attr,
3657                                            parent_counter->cpu, child_ctx,
3658                                            group_leader, GFP_KERNEL);
3659         if (IS_ERR(child_counter))
3660                 return child_counter;
3661         get_ctx(child_ctx);
3662
3663         /*
3664          * Make the child state follow the state of the parent counter,
3665          * not its attr.disabled bit.  We hold the parent's mutex,
3666          * so we won't race with perf_counter_{en, dis}able_family.
3667          */
3668         if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE)
3669                 child_counter->state = PERF_COUNTER_STATE_INACTIVE;
3670         else
3671                 child_counter->state = PERF_COUNTER_STATE_OFF;
3672
3673         /*
3674          * Link it up in the child's context:
3675          */
3676         add_counter_to_ctx(child_counter, child_ctx);
3677
3678         child_counter->parent = parent_counter;
3679         /*
3680          * inherit into child's child as well:
3681          */
3682         child_counter->attr.inherit = 1;
3683
3684         /*
3685          * Get a reference to the parent filp - we will fput it
3686          * when the child counter exits. This is safe to do because
3687          * we are in the parent and we know that the filp still
3688          * exists and has a nonzero count:
3689          */
3690         atomic_long_inc(&parent_counter->filp->f_count);
3691
3692         /*
3693          * Link this into the parent counter's child list
3694          */
3695         WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
3696         mutex_lock(&parent_counter->child_mutex);
3697         list_add_tail(&child_counter->child_list, &parent_counter->child_list);
3698         mutex_unlock(&parent_counter->child_mutex);
3699
3700         return child_counter;
3701 }
3702
3703 static int inherit_group(struct perf_counter *parent_counter,
3704               struct task_struct *parent,
3705               struct perf_counter_context *parent_ctx,
3706               struct task_struct *child,
3707               struct perf_counter_context *child_ctx)
3708 {
3709         struct perf_counter *leader;
3710         struct perf_counter *sub;
3711         struct perf_counter *child_ctr;
3712
3713         leader = inherit_counter(parent_counter, parent, parent_ctx,
3714                                  child, NULL, child_ctx);
3715         if (IS_ERR(leader))
3716                 return PTR_ERR(leader);
3717         list_for_each_entry(sub, &parent_counter->sibling_list, list_entry) {
3718                 child_ctr = inherit_counter(sub, parent, parent_ctx,
3719                                             child, leader, child_ctx);
3720                 if (IS_ERR(child_ctr))
3721                         return PTR_ERR(child_ctr);
3722         }
3723         return 0;
3724 }
3725
3726 static void sync_child_counter(struct perf_counter *child_counter,
3727                                struct perf_counter *parent_counter)
3728 {
3729         u64 child_val;
3730
3731         child_val = atomic64_read(&child_counter->count);
3732
3733         /*
3734          * Add back the child's count to the parent's count:
3735          */
3736         atomic64_add(child_val, &parent_counter->count);
3737         atomic64_add(child_counter->total_time_enabled,
3738                      &parent_counter->child_total_time_enabled);
3739         atomic64_add(child_counter->total_time_running,
3740                      &parent_counter->child_total_time_running);
3741
3742         /*
3743          * Remove this counter from the parent's list
3744          */
3745         WARN_ON_ONCE(parent_counter->ctx->parent_ctx);
3746         mutex_lock(&parent_counter->child_mutex);
3747         list_del_init(&child_counter->child_list);
3748         mutex_unlock(&parent_counter->child_mutex);
3749
3750         /*
3751          * Release the parent counter, if this was the last
3752          * reference to it.
3753          */
3754         fput(parent_counter->filp);
3755 }
3756
3757 static void
3758 __perf_counter_exit_task(struct perf_counter *child_counter,
3759                          struct perf_counter_context *child_ctx)
3760 {
3761         struct perf_counter *parent_counter;
3762
3763         update_counter_times(child_counter);
3764         perf_counter_remove_from_context(child_counter);
3765
3766         parent_counter = child_counter->parent;
3767         /*
3768          * It can happen that parent exits first, and has counters
3769          * that are still around due to the child reference. These
3770          * counters need to be zapped - but otherwise linger.
3771          */
3772         if (parent_counter) {
3773                 sync_child_counter(child_counter, parent_counter);
3774                 free_counter(child_counter);
3775         }
3776 }
3777
3778 /*
3779  * When a child task exits, feed back counter values to parent counters.
3780  */
3781 void perf_counter_exit_task(struct task_struct *child)
3782 {
3783         struct perf_counter *child_counter, *tmp;
3784         struct perf_counter_context *child_ctx;
3785         unsigned long flags;
3786
3787         if (likely(!child->perf_counter_ctxp))
3788                 return;
3789
3790         local_irq_save(flags);
3791         /*
3792          * We can't reschedule here because interrupts are disabled,
3793          * and either child is current or it is a task that can't be
3794          * scheduled, so we are now safe from rescheduling changing
3795          * our context.
3796          */
3797         child_ctx = child->perf_counter_ctxp;
3798         __perf_counter_task_sched_out(child_ctx);
3799
3800         /*
3801          * Take the context lock here so that if find_get_context is
3802          * reading child->perf_counter_ctxp, we wait until it has
3803          * incremented the context's refcount before we do put_ctx below.
3804          */
3805         spin_lock(&child_ctx->lock);
3806         child->perf_counter_ctxp = NULL;
3807         if (child_ctx->parent_ctx) {
3808                 /*
3809                  * This context is a clone; unclone it so it can't get
3810                  * swapped to another process while we're removing all
3811                  * the counters from it.
3812                  */
3813                 put_ctx(child_ctx->parent_ctx);
3814                 child_ctx->parent_ctx = NULL;
3815         }
3816         spin_unlock(&child_ctx->lock);
3817         local_irq_restore(flags);
3818
3819         mutex_lock(&child_ctx->mutex);
3820
3821 again:
3822         list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list,
3823                                  list_entry)
3824                 __perf_counter_exit_task(child_counter, child_ctx);
3825
3826         /*
3827          * If the last counter was a group counter, it will have appended all
3828          * its siblings to the list, but we obtained 'tmp' before that which
3829          * will still point to the list head terminating the iteration.
3830          */
3831         if (!list_empty(&child_ctx->counter_list))
3832                 goto again;
3833
3834         mutex_unlock(&child_ctx->mutex);
3835
3836         put_ctx(child_ctx);
3837 }
3838
3839 /*
3840  * free an unexposed, unused context as created by inheritance by
3841  * init_task below, used by fork() in case of fail.
3842  */
3843 void perf_counter_free_task(struct task_struct *task)
3844 {
3845         struct perf_counter_context *ctx = task->perf_counter_ctxp;
3846         struct perf_counter *counter, *tmp;
3847
3848         if (!ctx)
3849                 return;
3850
3851         mutex_lock(&ctx->mutex);
3852 again:
3853         list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry) {
3854                 struct perf_counter *parent = counter->parent;
3855
3856                 if (WARN_ON_ONCE(!parent))
3857                         continue;
3858
3859                 mutex_lock(&parent->child_mutex);
3860                 list_del_init(&counter->child_list);
3861                 mutex_unlock(&parent->child_mutex);
3862
3863                 fput(parent->filp);
3864
3865                 list_del_counter(counter, ctx);
3866                 free_counter(counter);
3867         }
3868
3869         if (!list_empty(&ctx->counter_list))
3870                 goto again;
3871
3872         mutex_unlock(&ctx->mutex);
3873
3874         put_ctx(ctx);
3875 }
3876
3877 /*
3878  * Initialize the perf_counter context in task_struct
3879  */
3880 int perf_counter_init_task(struct task_struct *child)
3881 {
3882         struct perf_counter_context *child_ctx, *parent_ctx;
3883         struct perf_counter_context *cloned_ctx;
3884         struct perf_counter *counter;
3885         struct task_struct *parent = current;
3886         int inherited_all = 1;
3887         int ret = 0;
3888
3889         child->perf_counter_ctxp = NULL;
3890
3891         mutex_init(&child->perf_counter_mutex);
3892         INIT_LIST_HEAD(&child->perf_counter_list);
3893
3894         if (likely(!parent->perf_counter_ctxp))
3895                 return 0;
3896
3897         /*
3898          * This is executed from the parent task context, so inherit
3899          * counters that have been marked for cloning.
3900          * First allocate and initialize a context for the child.
3901          */
3902
3903         child_ctx = kmalloc(sizeof(struct perf_counter_context), GFP_KERNEL);
3904         if (!child_ctx)
3905                 return -ENOMEM;
3906
3907         __perf_counter_init_context(child_ctx, child);
3908         child->perf_counter_ctxp = child_ctx;
3909         get_task_struct(child);
3910
3911         /*
3912          * If the parent's context is a clone, pin it so it won't get
3913          * swapped under us.
3914          */
3915         parent_ctx = perf_pin_task_context(parent);
3916
3917         /*
3918          * No need to check if parent_ctx != NULL here; since we saw
3919          * it non-NULL earlier, the only reason for it to become NULL
3920          * is if we exit, and since we're currently in the middle of
3921          * a fork we can't be exiting at the same time.
3922          */
3923
3924         /*
3925          * Lock the parent list. No need to lock the child - not PID
3926          * hashed yet and not running, so nobody can access it.
3927          */
3928         mutex_lock(&parent_ctx->mutex);
3929
3930         /*
3931          * We dont have to disable NMIs - we are only looking at
3932          * the list, not manipulating it:
3933          */
3934         list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
3935                 if (counter != counter->group_leader)
3936                         continue;
3937
3938                 if (!counter->attr.inherit) {
3939                         inherited_all = 0;
3940                         continue;
3941                 }
3942
3943                 ret = inherit_group(counter, parent, parent_ctx,
3944                                              child, child_ctx);
3945                 if (ret) {
3946                         inherited_all = 0;
3947                         break;
3948                 }
3949         }
3950
3951         if (inherited_all) {
3952                 /*
3953                  * Mark the child context as a clone of the parent
3954                  * context, or of whatever the parent is a clone of.
3955                  * Note that if the parent is a clone, it could get
3956                  * uncloned at any point, but that doesn't matter
3957                  * because the list of counters and the generation
3958                  * count can't have changed since we took the mutex.
3959                  */
3960                 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
3961                 if (cloned_ctx) {
3962                         child_ctx->parent_ctx = cloned_ctx;
3963                         child_ctx->parent_gen = parent_ctx->parent_gen;
3964                 } else {
3965                         child_ctx->parent_ctx = parent_ctx;
3966                         child_ctx->parent_gen = parent_ctx->generation;
3967                 }
3968                 get_ctx(child_ctx->parent_ctx);
3969         }
3970
3971         mutex_unlock(&parent_ctx->mutex);
3972
3973         perf_unpin_context(parent_ctx);
3974
3975         return ret;
3976 }
3977
3978 static void __cpuinit perf_counter_init_cpu(int cpu)
3979 {
3980         struct perf_cpu_context *cpuctx;
3981
3982         cpuctx = &per_cpu(perf_cpu_context, cpu);
3983         __perf_counter_init_context(&cpuctx->ctx, NULL);
3984
3985         spin_lock(&perf_resource_lock);
3986         cpuctx->max_pertask = perf_max_counters - perf_reserved_percpu;
3987         spin_unlock(&perf_resource_lock);
3988
3989         hw_perf_counter_setup(cpu);
3990 }
3991
3992 #ifdef CONFIG_HOTPLUG_CPU
3993 static void __perf_counter_exit_cpu(void *info)
3994 {
3995         struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
3996         struct perf_counter_context *ctx = &cpuctx->ctx;
3997         struct perf_counter *counter, *tmp;
3998
3999         list_for_each_entry_safe(counter, tmp, &ctx->counter_list, list_entry)
4000                 __perf_counter_remove_from_context(counter);
4001 }
4002 static void perf_counter_exit_cpu(int cpu)
4003 {
4004         struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu);
4005         struct perf_counter_context *ctx = &cpuctx->ctx;
4006
4007         mutex_lock(&ctx->mutex);
4008         smp_call_function_single(cpu, __perf_counter_exit_cpu, NULL, 1);
4009         mutex_unlock(&ctx->mutex);
4010 }
4011 #else
4012 static inline void perf_counter_exit_cpu(int cpu) { }
4013 #endif
4014
4015 static int __cpuinit
4016 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4017 {
4018         unsigned int cpu = (long)hcpu;
4019
4020         switch (action) {
4021
4022         case CPU_UP_PREPARE:
4023         case CPU_UP_PREPARE_FROZEN:
4024                 perf_counter_init_cpu(cpu);
4025                 break;
4026
4027         case CPU_DOWN_PREPARE:
4028         case CPU_DOWN_PREPARE_FROZEN:
4029                 perf_counter_exit_cpu(cpu);
4030                 break;
4031
4032         default:
4033                 break;
4034         }
4035
4036         return NOTIFY_OK;
4037 }
4038
4039 /*
4040  * This has to have a higher priority than migration_notifier in sched.c.
4041  */
4042 static struct notifier_block __cpuinitdata perf_cpu_nb = {
4043         .notifier_call          = perf_cpu_notify,
4044         .priority               = 20,
4045 };
4046
4047 void __init perf_counter_init(void)
4048 {
4049         perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4050                         (void *)(long)smp_processor_id());
4051         register_cpu_notifier(&perf_cpu_nb);
4052 }
4053
4054 static ssize_t perf_show_reserve_percpu(struct sysdev_class *class, char *buf)
4055 {
4056         return sprintf(buf, "%d\n", perf_reserved_percpu);
4057 }
4058
4059 static ssize_t
4060 perf_set_reserve_percpu(struct sysdev_class *class,
4061                         const char *buf,
4062                         size_t count)
4063 {
4064         struct perf_cpu_context *cpuctx;
4065         unsigned long val;
4066         int err, cpu, mpt;
4067
4068         err = strict_strtoul(buf, 10, &val);
4069         if (err)
4070                 return err;
4071         if (val > perf_max_counters)
4072                 return -EINVAL;
4073
4074         spin_lock(&perf_resource_lock);
4075         perf_reserved_percpu = val;
4076         for_each_online_cpu(cpu) {
4077                 cpuctx = &per_cpu(perf_cpu_context, cpu);
4078                 spin_lock_irq(&cpuctx->ctx.lock);
4079                 mpt = min(perf_max_counters - cpuctx->ctx.nr_counters,
4080                           perf_max_counters - perf_reserved_percpu);
4081                 cpuctx->max_pertask = mpt;
4082                 spin_unlock_irq(&cpuctx->ctx.lock);
4083         }
4084         spin_unlock(&perf_resource_lock);
4085
4086         return count;
4087 }
4088
4089 static ssize_t perf_show_overcommit(struct sysdev_class *class, char *buf)
4090 {
4091         return sprintf(buf, "%d\n", perf_overcommit);
4092 }
4093
4094 static ssize_t
4095 perf_set_overcommit(struct sysdev_class *class, const char *buf, size_t count)
4096 {
4097         unsigned long val;
4098         int err;
4099
4100         err = strict_strtoul(buf, 10, &val);
4101         if (err)
4102                 return err;
4103         if (val > 1)
4104                 return -EINVAL;
4105
4106         spin_lock(&perf_resource_lock);
4107         perf_overcommit = val;
4108         spin_unlock(&perf_resource_lock);
4109
4110         return count;
4111 }
4112
4113 static SYSDEV_CLASS_ATTR(
4114                                 reserve_percpu,
4115                                 0644,
4116                                 perf_show_reserve_percpu,
4117                                 perf_set_reserve_percpu
4118                         );
4119
4120 static SYSDEV_CLASS_ATTR(
4121                                 overcommit,
4122                                 0644,
4123                                 perf_show_overcommit,
4124                                 perf_set_overcommit
4125                         );
4126
4127 static struct attribute *perfclass_attrs[] = {
4128         &attr_reserve_percpu.attr,
4129         &attr_overcommit.attr,
4130         NULL
4131 };
4132
4133 static struct attribute_group perfclass_attr_group = {
4134         .attrs                  = perfclass_attrs,
4135         .name                   = "perf_counters",
4136 };
4137
4138 static int __init perf_counter_sysfs_init(void)
4139 {
4140         return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
4141                                   &perfclass_attr_group);
4142 }
4143 device_initcall(perf_counter_sysfs_init);