tracing: Remove parent recording in latency tracer graph options
[pandora-kernel.git] / kernel / perf_event.c
1 /*
2  * Performance events core code:
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
7  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8  *
9  * For licensing details see kernel-base/COPYING
10  */
11
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include <linux/cpu.h>
15 #include <linux/smp.h>
16 #include <linux/file.h>
17 #include <linux/poll.h>
18 #include <linux/slab.h>
19 #include <linux/hash.h>
20 #include <linux/sysfs.h>
21 #include <linux/dcache.h>
22 #include <linux/percpu.h>
23 #include <linux/ptrace.h>
24 #include <linux/vmstat.h>
25 #include <linux/vmalloc.h>
26 #include <linux/hardirq.h>
27 #include <linux/rculist.h>
28 #include <linux/uaccess.h>
29 #include <linux/syscalls.h>
30 #include <linux/anon_inodes.h>
31 #include <linux/kernel_stat.h>
32 #include <linux/perf_event.h>
33 #include <linux/ftrace_event.h>
34
35 #include <asm/irq_regs.h>
36
37 static atomic_t nr_events __read_mostly;
38 static atomic_t nr_mmap_events __read_mostly;
39 static atomic_t nr_comm_events __read_mostly;
40 static atomic_t nr_task_events __read_mostly;
41
42 static LIST_HEAD(pmus);
43 static DEFINE_MUTEX(pmus_lock);
44 static struct srcu_struct pmus_srcu;
45
46 /*
47  * perf event paranoia level:
48  *  -1 - not paranoid at all
49  *   0 - disallow raw tracepoint access for unpriv
50  *   1 - disallow cpu events for unpriv
51  *   2 - disallow kernel profiling for unpriv
52  */
53 int sysctl_perf_event_paranoid __read_mostly = 1;
54
55 int sysctl_perf_event_mlock __read_mostly = 512; /* 'free' kb per user */
56
57 /*
58  * max perf event sample rate
59  */
60 int sysctl_perf_event_sample_rate __read_mostly = 100000;
61
62 static atomic64_t perf_event_id;
63
64 void __weak perf_event_print_debug(void)        { }
65
66 extern __weak const char *perf_pmu_name(void)
67 {
68         return "pmu";
69 }
70
71 void perf_pmu_disable(struct pmu *pmu)
72 {
73         int *count = this_cpu_ptr(pmu->pmu_disable_count);
74         if (!(*count)++)
75                 pmu->pmu_disable(pmu);
76 }
77
78 void perf_pmu_enable(struct pmu *pmu)
79 {
80         int *count = this_cpu_ptr(pmu->pmu_disable_count);
81         if (!--(*count))
82                 pmu->pmu_enable(pmu);
83 }
84
85 static DEFINE_PER_CPU(struct list_head, rotation_list);
86
87 /*
88  * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
89  * because they're strictly cpu affine and rotate_start is called with IRQs
90  * disabled, while rotate_context is called from IRQ context.
91  */
92 static void perf_pmu_rotate_start(struct pmu *pmu)
93 {
94         struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
95         struct list_head *head = &__get_cpu_var(rotation_list);
96
97         WARN_ON(!irqs_disabled());
98
99         if (list_empty(&cpuctx->rotation_list))
100                 list_add(&cpuctx->rotation_list, head);
101 }
102
103 static void get_ctx(struct perf_event_context *ctx)
104 {
105         WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
106 }
107
108 static void free_ctx(struct rcu_head *head)
109 {
110         struct perf_event_context *ctx;
111
112         ctx = container_of(head, struct perf_event_context, rcu_head);
113         kfree(ctx);
114 }
115
116 static void put_ctx(struct perf_event_context *ctx)
117 {
118         if (atomic_dec_and_test(&ctx->refcount)) {
119                 if (ctx->parent_ctx)
120                         put_ctx(ctx->parent_ctx);
121                 if (ctx->task)
122                         put_task_struct(ctx->task);
123                 call_rcu(&ctx->rcu_head, free_ctx);
124         }
125 }
126
127 static void unclone_ctx(struct perf_event_context *ctx)
128 {
129         if (ctx->parent_ctx) {
130                 put_ctx(ctx->parent_ctx);
131                 ctx->parent_ctx = NULL;
132         }
133 }
134
135 /*
136  * If we inherit events we want to return the parent event id
137  * to userspace.
138  */
139 static u64 primary_event_id(struct perf_event *event)
140 {
141         u64 id = event->id;
142
143         if (event->parent)
144                 id = event->parent->id;
145
146         return id;
147 }
148
149 /*
150  * Get the perf_event_context for a task and lock it.
151  * This has to cope with with the fact that until it is locked,
152  * the context could get moved to another task.
153  */
154 static struct perf_event_context *
155 perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
156 {
157         struct perf_event_context *ctx;
158
159         rcu_read_lock();
160 retry:
161         ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
162         if (ctx) {
163                 /*
164                  * If this context is a clone of another, it might
165                  * get swapped for another underneath us by
166                  * perf_event_task_sched_out, though the
167                  * rcu_read_lock() protects us from any context
168                  * getting freed.  Lock the context and check if it
169                  * got swapped before we could get the lock, and retry
170                  * if so.  If we locked the right context, then it
171                  * can't get swapped on us any more.
172                  */
173                 raw_spin_lock_irqsave(&ctx->lock, *flags);
174                 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
175                         raw_spin_unlock_irqrestore(&ctx->lock, *flags);
176                         goto retry;
177                 }
178
179                 if (!atomic_inc_not_zero(&ctx->refcount)) {
180                         raw_spin_unlock_irqrestore(&ctx->lock, *flags);
181                         ctx = NULL;
182                 }
183         }
184         rcu_read_unlock();
185         return ctx;
186 }
187
188 /*
189  * Get the context for a task and increment its pin_count so it
190  * can't get swapped to another task.  This also increments its
191  * reference count so that the context can't get freed.
192  */
193 static struct perf_event_context *
194 perf_pin_task_context(struct task_struct *task, int ctxn)
195 {
196         struct perf_event_context *ctx;
197         unsigned long flags;
198
199         ctx = perf_lock_task_context(task, ctxn, &flags);
200         if (ctx) {
201                 ++ctx->pin_count;
202                 raw_spin_unlock_irqrestore(&ctx->lock, flags);
203         }
204         return ctx;
205 }
206
207 static void perf_unpin_context(struct perf_event_context *ctx)
208 {
209         unsigned long flags;
210
211         raw_spin_lock_irqsave(&ctx->lock, flags);
212         --ctx->pin_count;
213         raw_spin_unlock_irqrestore(&ctx->lock, flags);
214         put_ctx(ctx);
215 }
216
217 static inline u64 perf_clock(void)
218 {
219         return local_clock();
220 }
221
222 /*
223  * Update the record of the current time in a context.
224  */
225 static void update_context_time(struct perf_event_context *ctx)
226 {
227         u64 now = perf_clock();
228
229         ctx->time += now - ctx->timestamp;
230         ctx->timestamp = now;
231 }
232
233 /*
234  * Update the total_time_enabled and total_time_running fields for a event.
235  */
236 static void update_event_times(struct perf_event *event)
237 {
238         struct perf_event_context *ctx = event->ctx;
239         u64 run_end;
240
241         if (event->state < PERF_EVENT_STATE_INACTIVE ||
242             event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
243                 return;
244
245         if (ctx->is_active)
246                 run_end = ctx->time;
247         else
248                 run_end = event->tstamp_stopped;
249
250         event->total_time_enabled = run_end - event->tstamp_enabled;
251
252         if (event->state == PERF_EVENT_STATE_INACTIVE)
253                 run_end = event->tstamp_stopped;
254         else
255                 run_end = ctx->time;
256
257         event->total_time_running = run_end - event->tstamp_running;
258 }
259
260 /*
261  * Update total_time_enabled and total_time_running for all events in a group.
262  */
263 static void update_group_times(struct perf_event *leader)
264 {
265         struct perf_event *event;
266
267         update_event_times(leader);
268         list_for_each_entry(event, &leader->sibling_list, group_entry)
269                 update_event_times(event);
270 }
271
272 static struct list_head *
273 ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
274 {
275         if (event->attr.pinned)
276                 return &ctx->pinned_groups;
277         else
278                 return &ctx->flexible_groups;
279 }
280
281 /*
282  * Add a event from the lists for its context.
283  * Must be called with ctx->mutex and ctx->lock held.
284  */
285 static void
286 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
287 {
288         WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
289         event->attach_state |= PERF_ATTACH_CONTEXT;
290
291         /*
292          * If we're a stand alone event or group leader, we go to the context
293          * list, group events are kept attached to the group so that
294          * perf_group_detach can, at all times, locate all siblings.
295          */
296         if (event->group_leader == event) {
297                 struct list_head *list;
298
299                 if (is_software_event(event))
300                         event->group_flags |= PERF_GROUP_SOFTWARE;
301
302                 list = ctx_group_list(event, ctx);
303                 list_add_tail(&event->group_entry, list);
304         }
305
306         list_add_rcu(&event->event_entry, &ctx->event_list);
307         if (!ctx->nr_events)
308                 perf_pmu_rotate_start(ctx->pmu);
309         ctx->nr_events++;
310         if (event->attr.inherit_stat)
311                 ctx->nr_stat++;
312 }
313
314 static void perf_group_attach(struct perf_event *event)
315 {
316         struct perf_event *group_leader = event->group_leader;
317
318         WARN_ON_ONCE(event->attach_state & PERF_ATTACH_GROUP);
319         event->attach_state |= PERF_ATTACH_GROUP;
320
321         if (group_leader == event)
322                 return;
323
324         if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
325                         !is_software_event(event))
326                 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
327
328         list_add_tail(&event->group_entry, &group_leader->sibling_list);
329         group_leader->nr_siblings++;
330 }
331
332 /*
333  * Remove a event from the lists for its context.
334  * Must be called with ctx->mutex and ctx->lock held.
335  */
336 static void
337 list_del_event(struct perf_event *event, struct perf_event_context *ctx)
338 {
339         /*
340          * We can have double detach due to exit/hot-unplug + close.
341          */
342         if (!(event->attach_state & PERF_ATTACH_CONTEXT))
343                 return;
344
345         event->attach_state &= ~PERF_ATTACH_CONTEXT;
346
347         ctx->nr_events--;
348         if (event->attr.inherit_stat)
349                 ctx->nr_stat--;
350
351         list_del_rcu(&event->event_entry);
352
353         if (event->group_leader == event)
354                 list_del_init(&event->group_entry);
355
356         update_group_times(event);
357
358         /*
359          * If event was in error state, then keep it
360          * that way, otherwise bogus counts will be
361          * returned on read(). The only way to get out
362          * of error state is by explicit re-enabling
363          * of the event
364          */
365         if (event->state > PERF_EVENT_STATE_OFF)
366                 event->state = PERF_EVENT_STATE_OFF;
367 }
368
369 static void perf_group_detach(struct perf_event *event)
370 {
371         struct perf_event *sibling, *tmp;
372         struct list_head *list = NULL;
373
374         /*
375          * We can have double detach due to exit/hot-unplug + close.
376          */
377         if (!(event->attach_state & PERF_ATTACH_GROUP))
378                 return;
379
380         event->attach_state &= ~PERF_ATTACH_GROUP;
381
382         /*
383          * If this is a sibling, remove it from its group.
384          */
385         if (event->group_leader != event) {
386                 list_del_init(&event->group_entry);
387                 event->group_leader->nr_siblings--;
388                 return;
389         }
390
391         if (!list_empty(&event->group_entry))
392                 list = &event->group_entry;
393
394         /*
395          * If this was a group event with sibling events then
396          * upgrade the siblings to singleton events by adding them
397          * to whatever list we are on.
398          */
399         list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
400                 if (list)
401                         list_move_tail(&sibling->group_entry, list);
402                 sibling->group_leader = sibling;
403
404                 /* Inherit group flags from the previous leader */
405                 sibling->group_flags = event->group_flags;
406         }
407 }
408
409 static inline int
410 event_filter_match(struct perf_event *event)
411 {
412         return event->cpu == -1 || event->cpu == smp_processor_id();
413 }
414
415 static void
416 event_sched_out(struct perf_event *event,
417                   struct perf_cpu_context *cpuctx,
418                   struct perf_event_context *ctx)
419 {
420         u64 delta;
421         /*
422          * An event which could not be activated because of
423          * filter mismatch still needs to have its timings
424          * maintained, otherwise bogus information is return
425          * via read() for time_enabled, time_running:
426          */
427         if (event->state == PERF_EVENT_STATE_INACTIVE
428             && !event_filter_match(event)) {
429                 delta = ctx->time - event->tstamp_stopped;
430                 event->tstamp_running += delta;
431                 event->tstamp_stopped = ctx->time;
432         }
433
434         if (event->state != PERF_EVENT_STATE_ACTIVE)
435                 return;
436
437         event->state = PERF_EVENT_STATE_INACTIVE;
438         if (event->pending_disable) {
439                 event->pending_disable = 0;
440                 event->state = PERF_EVENT_STATE_OFF;
441         }
442         event->tstamp_stopped = ctx->time;
443         event->pmu->del(event, 0);
444         event->oncpu = -1;
445
446         if (!is_software_event(event))
447                 cpuctx->active_oncpu--;
448         ctx->nr_active--;
449         if (event->attr.exclusive || !cpuctx->active_oncpu)
450                 cpuctx->exclusive = 0;
451 }
452
453 static void
454 group_sched_out(struct perf_event *group_event,
455                 struct perf_cpu_context *cpuctx,
456                 struct perf_event_context *ctx)
457 {
458         struct perf_event *event;
459         int state = group_event->state;
460
461         event_sched_out(group_event, cpuctx, ctx);
462
463         /*
464          * Schedule out siblings (if any):
465          */
466         list_for_each_entry(event, &group_event->sibling_list, group_entry)
467                 event_sched_out(event, cpuctx, ctx);
468
469         if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
470                 cpuctx->exclusive = 0;
471 }
472
473 static inline struct perf_cpu_context *
474 __get_cpu_context(struct perf_event_context *ctx)
475 {
476         return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
477 }
478
479 /*
480  * Cross CPU call to remove a performance event
481  *
482  * We disable the event on the hardware level first. After that we
483  * remove it from the context list.
484  */
485 static void __perf_event_remove_from_context(void *info)
486 {
487         struct perf_event *event = info;
488         struct perf_event_context *ctx = event->ctx;
489         struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
490
491         /*
492          * If this is a task context, we need to check whether it is
493          * the current task context of this cpu. If not it has been
494          * scheduled out before the smp call arrived.
495          */
496         if (ctx->task && cpuctx->task_ctx != ctx)
497                 return;
498
499         raw_spin_lock(&ctx->lock);
500
501         event_sched_out(event, cpuctx, ctx);
502
503         list_del_event(event, ctx);
504
505         raw_spin_unlock(&ctx->lock);
506 }
507
508
509 /*
510  * Remove the event from a task's (or a CPU's) list of events.
511  *
512  * Must be called with ctx->mutex held.
513  *
514  * CPU events are removed with a smp call. For task events we only
515  * call when the task is on a CPU.
516  *
517  * If event->ctx is a cloned context, callers must make sure that
518  * every task struct that event->ctx->task could possibly point to
519  * remains valid.  This is OK when called from perf_release since
520  * that only calls us on the top-level context, which can't be a clone.
521  * When called from perf_event_exit_task, it's OK because the
522  * context has been detached from its task.
523  */
524 static void perf_event_remove_from_context(struct perf_event *event)
525 {
526         struct perf_event_context *ctx = event->ctx;
527         struct task_struct *task = ctx->task;
528
529         if (!task) {
530                 /*
531                  * Per cpu events are removed via an smp call and
532                  * the removal is always successful.
533                  */
534                 smp_call_function_single(event->cpu,
535                                          __perf_event_remove_from_context,
536                                          event, 1);
537                 return;
538         }
539
540 retry:
541         task_oncpu_function_call(task, __perf_event_remove_from_context,
542                                  event);
543
544         raw_spin_lock_irq(&ctx->lock);
545         /*
546          * If the context is active we need to retry the smp call.
547          */
548         if (ctx->nr_active && !list_empty(&event->group_entry)) {
549                 raw_spin_unlock_irq(&ctx->lock);
550                 goto retry;
551         }
552
553         /*
554          * The lock prevents that this context is scheduled in so we
555          * can remove the event safely, if the call above did not
556          * succeed.
557          */
558         if (!list_empty(&event->group_entry))
559                 list_del_event(event, ctx);
560         raw_spin_unlock_irq(&ctx->lock);
561 }
562
563 /*
564  * Cross CPU call to disable a performance event
565  */
566 static void __perf_event_disable(void *info)
567 {
568         struct perf_event *event = info;
569         struct perf_event_context *ctx = event->ctx;
570         struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
571
572         /*
573          * If this is a per-task event, need to check whether this
574          * event's task is the current task on this cpu.
575          */
576         if (ctx->task && cpuctx->task_ctx != ctx)
577                 return;
578
579         raw_spin_lock(&ctx->lock);
580
581         /*
582          * If the event is on, turn it off.
583          * If it is in error state, leave it in error state.
584          */
585         if (event->state >= PERF_EVENT_STATE_INACTIVE) {
586                 update_context_time(ctx);
587                 update_group_times(event);
588                 if (event == event->group_leader)
589                         group_sched_out(event, cpuctx, ctx);
590                 else
591                         event_sched_out(event, cpuctx, ctx);
592                 event->state = PERF_EVENT_STATE_OFF;
593         }
594
595         raw_spin_unlock(&ctx->lock);
596 }
597
598 /*
599  * Disable a event.
600  *
601  * If event->ctx is a cloned context, callers must make sure that
602  * every task struct that event->ctx->task could possibly point to
603  * remains valid.  This condition is satisifed when called through
604  * perf_event_for_each_child or perf_event_for_each because they
605  * hold the top-level event's child_mutex, so any descendant that
606  * goes to exit will block in sync_child_event.
607  * When called from perf_pending_event it's OK because event->ctx
608  * is the current context on this CPU and preemption is disabled,
609  * hence we can't get into perf_event_task_sched_out for this context.
610  */
611 void perf_event_disable(struct perf_event *event)
612 {
613         struct perf_event_context *ctx = event->ctx;
614         struct task_struct *task = ctx->task;
615
616         if (!task) {
617                 /*
618                  * Disable the event on the cpu that it's on
619                  */
620                 smp_call_function_single(event->cpu, __perf_event_disable,
621                                          event, 1);
622                 return;
623         }
624
625 retry:
626         task_oncpu_function_call(task, __perf_event_disable, event);
627
628         raw_spin_lock_irq(&ctx->lock);
629         /*
630          * If the event is still active, we need to retry the cross-call.
631          */
632         if (event->state == PERF_EVENT_STATE_ACTIVE) {
633                 raw_spin_unlock_irq(&ctx->lock);
634                 goto retry;
635         }
636
637         /*
638          * Since we have the lock this context can't be scheduled
639          * in, so we can change the state safely.
640          */
641         if (event->state == PERF_EVENT_STATE_INACTIVE) {
642                 update_group_times(event);
643                 event->state = PERF_EVENT_STATE_OFF;
644         }
645
646         raw_spin_unlock_irq(&ctx->lock);
647 }
648
649 static int
650 event_sched_in(struct perf_event *event,
651                  struct perf_cpu_context *cpuctx,
652                  struct perf_event_context *ctx)
653 {
654         if (event->state <= PERF_EVENT_STATE_OFF)
655                 return 0;
656
657         event->state = PERF_EVENT_STATE_ACTIVE;
658         event->oncpu = smp_processor_id();
659         /*
660          * The new state must be visible before we turn it on in the hardware:
661          */
662         smp_wmb();
663
664         if (event->pmu->add(event, PERF_EF_START)) {
665                 event->state = PERF_EVENT_STATE_INACTIVE;
666                 event->oncpu = -1;
667                 return -EAGAIN;
668         }
669
670         event->tstamp_running += ctx->time - event->tstamp_stopped;
671
672         if (!is_software_event(event))
673                 cpuctx->active_oncpu++;
674         ctx->nr_active++;
675
676         if (event->attr.exclusive)
677                 cpuctx->exclusive = 1;
678
679         return 0;
680 }
681
682 static int
683 group_sched_in(struct perf_event *group_event,
684                struct perf_cpu_context *cpuctx,
685                struct perf_event_context *ctx)
686 {
687         struct perf_event *event, *partial_group = NULL;
688         struct pmu *pmu = group_event->pmu;
689
690         if (group_event->state == PERF_EVENT_STATE_OFF)
691                 return 0;
692
693         pmu->start_txn(pmu);
694
695         if (event_sched_in(group_event, cpuctx, ctx)) {
696                 pmu->cancel_txn(pmu);
697                 return -EAGAIN;
698         }
699
700         /*
701          * Schedule in siblings as one group (if any):
702          */
703         list_for_each_entry(event, &group_event->sibling_list, group_entry) {
704                 if (event_sched_in(event, cpuctx, ctx)) {
705                         partial_group = event;
706                         goto group_error;
707                 }
708         }
709
710         if (!pmu->commit_txn(pmu))
711                 return 0;
712
713 group_error:
714         /*
715          * Groups can be scheduled in as one unit only, so undo any
716          * partial group before returning:
717          */
718         list_for_each_entry(event, &group_event->sibling_list, group_entry) {
719                 if (event == partial_group)
720                         break;
721                 event_sched_out(event, cpuctx, ctx);
722         }
723         event_sched_out(group_event, cpuctx, ctx);
724
725         pmu->cancel_txn(pmu);
726
727         return -EAGAIN;
728 }
729
730 /*
731  * Work out whether we can put this event group on the CPU now.
732  */
733 static int group_can_go_on(struct perf_event *event,
734                            struct perf_cpu_context *cpuctx,
735                            int can_add_hw)
736 {
737         /*
738          * Groups consisting entirely of software events can always go on.
739          */
740         if (event->group_flags & PERF_GROUP_SOFTWARE)
741                 return 1;
742         /*
743          * If an exclusive group is already on, no other hardware
744          * events can go on.
745          */
746         if (cpuctx->exclusive)
747                 return 0;
748         /*
749          * If this group is exclusive and there are already
750          * events on the CPU, it can't go on.
751          */
752         if (event->attr.exclusive && cpuctx->active_oncpu)
753                 return 0;
754         /*
755          * Otherwise, try to add it if all previous groups were able
756          * to go on.
757          */
758         return can_add_hw;
759 }
760
761 static void add_event_to_ctx(struct perf_event *event,
762                                struct perf_event_context *ctx)
763 {
764         list_add_event(event, ctx);
765         perf_group_attach(event);
766         event->tstamp_enabled = ctx->time;
767         event->tstamp_running = ctx->time;
768         event->tstamp_stopped = ctx->time;
769 }
770
771 /*
772  * Cross CPU call to install and enable a performance event
773  *
774  * Must be called with ctx->mutex held
775  */
776 static void __perf_install_in_context(void *info)
777 {
778         struct perf_event *event = info;
779         struct perf_event_context *ctx = event->ctx;
780         struct perf_event *leader = event->group_leader;
781         struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
782         int err;
783
784         /*
785          * If this is a task context, we need to check whether it is
786          * the current task context of this cpu. If not it has been
787          * scheduled out before the smp call arrived.
788          * Or possibly this is the right context but it isn't
789          * on this cpu because it had no events.
790          */
791         if (ctx->task && cpuctx->task_ctx != ctx) {
792                 if (cpuctx->task_ctx || ctx->task != current)
793                         return;
794                 cpuctx->task_ctx = ctx;
795         }
796
797         raw_spin_lock(&ctx->lock);
798         ctx->is_active = 1;
799         update_context_time(ctx);
800
801         add_event_to_ctx(event, ctx);
802
803         if (event->cpu != -1 && event->cpu != smp_processor_id())
804                 goto unlock;
805
806         /*
807          * Don't put the event on if it is disabled or if
808          * it is in a group and the group isn't on.
809          */
810         if (event->state != PERF_EVENT_STATE_INACTIVE ||
811             (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
812                 goto unlock;
813
814         /*
815          * An exclusive event can't go on if there are already active
816          * hardware events, and no hardware event can go on if there
817          * is already an exclusive event on.
818          */
819         if (!group_can_go_on(event, cpuctx, 1))
820                 err = -EEXIST;
821         else
822                 err = event_sched_in(event, cpuctx, ctx);
823
824         if (err) {
825                 /*
826                  * This event couldn't go on.  If it is in a group
827                  * then we have to pull the whole group off.
828                  * If the event group is pinned then put it in error state.
829                  */
830                 if (leader != event)
831                         group_sched_out(leader, cpuctx, ctx);
832                 if (leader->attr.pinned) {
833                         update_group_times(leader);
834                         leader->state = PERF_EVENT_STATE_ERROR;
835                 }
836         }
837
838 unlock:
839         raw_spin_unlock(&ctx->lock);
840 }
841
842 /*
843  * Attach a performance event to a context
844  *
845  * First we add the event to the list with the hardware enable bit
846  * in event->hw_config cleared.
847  *
848  * If the event is attached to a task which is on a CPU we use a smp
849  * call to enable it in the task context. The task might have been
850  * scheduled away, but we check this in the smp call again.
851  *
852  * Must be called with ctx->mutex held.
853  */
854 static void
855 perf_install_in_context(struct perf_event_context *ctx,
856                         struct perf_event *event,
857                         int cpu)
858 {
859         struct task_struct *task = ctx->task;
860
861         event->ctx = ctx;
862
863         if (!task) {
864                 /*
865                  * Per cpu events are installed via an smp call and
866                  * the install is always successful.
867                  */
868                 smp_call_function_single(cpu, __perf_install_in_context,
869                                          event, 1);
870                 return;
871         }
872
873 retry:
874         task_oncpu_function_call(task, __perf_install_in_context,
875                                  event);
876
877         raw_spin_lock_irq(&ctx->lock);
878         /*
879          * we need to retry the smp call.
880          */
881         if (ctx->is_active && list_empty(&event->group_entry)) {
882                 raw_spin_unlock_irq(&ctx->lock);
883                 goto retry;
884         }
885
886         /*
887          * The lock prevents that this context is scheduled in so we
888          * can add the event safely, if it the call above did not
889          * succeed.
890          */
891         if (list_empty(&event->group_entry))
892                 add_event_to_ctx(event, ctx);
893         raw_spin_unlock_irq(&ctx->lock);
894 }
895
896 /*
897  * Put a event into inactive state and update time fields.
898  * Enabling the leader of a group effectively enables all
899  * the group members that aren't explicitly disabled, so we
900  * have to update their ->tstamp_enabled also.
901  * Note: this works for group members as well as group leaders
902  * since the non-leader members' sibling_lists will be empty.
903  */
904 static void __perf_event_mark_enabled(struct perf_event *event,
905                                         struct perf_event_context *ctx)
906 {
907         struct perf_event *sub;
908
909         event->state = PERF_EVENT_STATE_INACTIVE;
910         event->tstamp_enabled = ctx->time - event->total_time_enabled;
911         list_for_each_entry(sub, &event->sibling_list, group_entry) {
912                 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
913                         sub->tstamp_enabled =
914                                 ctx->time - sub->total_time_enabled;
915                 }
916         }
917 }
918
919 /*
920  * Cross CPU call to enable a performance event
921  */
922 static void __perf_event_enable(void *info)
923 {
924         struct perf_event *event = info;
925         struct perf_event_context *ctx = event->ctx;
926         struct perf_event *leader = event->group_leader;
927         struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
928         int err;
929
930         /*
931          * If this is a per-task event, need to check whether this
932          * event's task is the current task on this cpu.
933          */
934         if (ctx->task && cpuctx->task_ctx != ctx) {
935                 if (cpuctx->task_ctx || ctx->task != current)
936                         return;
937                 cpuctx->task_ctx = ctx;
938         }
939
940         raw_spin_lock(&ctx->lock);
941         ctx->is_active = 1;
942         update_context_time(ctx);
943
944         if (event->state >= PERF_EVENT_STATE_INACTIVE)
945                 goto unlock;
946         __perf_event_mark_enabled(event, ctx);
947
948         if (event->cpu != -1 && event->cpu != smp_processor_id())
949                 goto unlock;
950
951         /*
952          * If the event is in a group and isn't the group leader,
953          * then don't put it on unless the group is on.
954          */
955         if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
956                 goto unlock;
957
958         if (!group_can_go_on(event, cpuctx, 1)) {
959                 err = -EEXIST;
960         } else {
961                 if (event == leader)
962                         err = group_sched_in(event, cpuctx, ctx);
963                 else
964                         err = event_sched_in(event, cpuctx, ctx);
965         }
966
967         if (err) {
968                 /*
969                  * If this event can't go on and it's part of a
970                  * group, then the whole group has to come off.
971                  */
972                 if (leader != event)
973                         group_sched_out(leader, cpuctx, ctx);
974                 if (leader->attr.pinned) {
975                         update_group_times(leader);
976                         leader->state = PERF_EVENT_STATE_ERROR;
977                 }
978         }
979
980 unlock:
981         raw_spin_unlock(&ctx->lock);
982 }
983
984 /*
985  * Enable a event.
986  *
987  * If event->ctx is a cloned context, callers must make sure that
988  * every task struct that event->ctx->task could possibly point to
989  * remains valid.  This condition is satisfied when called through
990  * perf_event_for_each_child or perf_event_for_each as described
991  * for perf_event_disable.
992  */
993 void perf_event_enable(struct perf_event *event)
994 {
995         struct perf_event_context *ctx = event->ctx;
996         struct task_struct *task = ctx->task;
997
998         if (!task) {
999                 /*
1000                  * Enable the event on the cpu that it's on
1001                  */
1002                 smp_call_function_single(event->cpu, __perf_event_enable,
1003                                          event, 1);
1004                 return;
1005         }
1006
1007         raw_spin_lock_irq(&ctx->lock);
1008         if (event->state >= PERF_EVENT_STATE_INACTIVE)
1009                 goto out;
1010
1011         /*
1012          * If the event is in error state, clear that first.
1013          * That way, if we see the event in error state below, we
1014          * know that it has gone back into error state, as distinct
1015          * from the task having been scheduled away before the
1016          * cross-call arrived.
1017          */
1018         if (event->state == PERF_EVENT_STATE_ERROR)
1019                 event->state = PERF_EVENT_STATE_OFF;
1020
1021 retry:
1022         raw_spin_unlock_irq(&ctx->lock);
1023         task_oncpu_function_call(task, __perf_event_enable, event);
1024
1025         raw_spin_lock_irq(&ctx->lock);
1026
1027         /*
1028          * If the context is active and the event is still off,
1029          * we need to retry the cross-call.
1030          */
1031         if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF)
1032                 goto retry;
1033
1034         /*
1035          * Since we have the lock this context can't be scheduled
1036          * in, so we can change the state safely.
1037          */
1038         if (event->state == PERF_EVENT_STATE_OFF)
1039                 __perf_event_mark_enabled(event, ctx);
1040
1041 out:
1042         raw_spin_unlock_irq(&ctx->lock);
1043 }
1044
1045 static int perf_event_refresh(struct perf_event *event, int refresh)
1046 {
1047         /*
1048          * not supported on inherited events
1049          */
1050         if (event->attr.inherit)
1051                 return -EINVAL;
1052
1053         atomic_add(refresh, &event->event_limit);
1054         perf_event_enable(event);
1055
1056         return 0;
1057 }
1058
1059 enum event_type_t {
1060         EVENT_FLEXIBLE = 0x1,
1061         EVENT_PINNED = 0x2,
1062         EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
1063 };
1064
1065 static void ctx_sched_out(struct perf_event_context *ctx,
1066                           struct perf_cpu_context *cpuctx,
1067                           enum event_type_t event_type)
1068 {
1069         struct perf_event *event;
1070
1071         raw_spin_lock(&ctx->lock);
1072         perf_pmu_disable(ctx->pmu);
1073         ctx->is_active = 0;
1074         if (likely(!ctx->nr_events))
1075                 goto out;
1076         update_context_time(ctx);
1077
1078         if (!ctx->nr_active)
1079                 goto out;
1080
1081         if (event_type & EVENT_PINNED) {
1082                 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1083                         group_sched_out(event, cpuctx, ctx);
1084         }
1085
1086         if (event_type & EVENT_FLEXIBLE) {
1087                 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1088                         group_sched_out(event, cpuctx, ctx);
1089         }
1090 out:
1091         perf_pmu_enable(ctx->pmu);
1092         raw_spin_unlock(&ctx->lock);
1093 }
1094
1095 /*
1096  * Test whether two contexts are equivalent, i.e. whether they
1097  * have both been cloned from the same version of the same context
1098  * and they both have the same number of enabled events.
1099  * If the number of enabled events is the same, then the set
1100  * of enabled events should be the same, because these are both
1101  * inherited contexts, therefore we can't access individual events
1102  * in them directly with an fd; we can only enable/disable all
1103  * events via prctl, or enable/disable all events in a family
1104  * via ioctl, which will have the same effect on both contexts.
1105  */
1106 static int context_equiv(struct perf_event_context *ctx1,
1107                          struct perf_event_context *ctx2)
1108 {
1109         return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1110                 && ctx1->parent_gen == ctx2->parent_gen
1111                 && !ctx1->pin_count && !ctx2->pin_count;
1112 }
1113
1114 static void __perf_event_sync_stat(struct perf_event *event,
1115                                      struct perf_event *next_event)
1116 {
1117         u64 value;
1118
1119         if (!event->attr.inherit_stat)
1120                 return;
1121
1122         /*
1123          * Update the event value, we cannot use perf_event_read()
1124          * because we're in the middle of a context switch and have IRQs
1125          * disabled, which upsets smp_call_function_single(), however
1126          * we know the event must be on the current CPU, therefore we
1127          * don't need to use it.
1128          */
1129         switch (event->state) {
1130         case PERF_EVENT_STATE_ACTIVE:
1131                 event->pmu->read(event);
1132                 /* fall-through */
1133
1134         case PERF_EVENT_STATE_INACTIVE:
1135                 update_event_times(event);
1136                 break;
1137
1138         default:
1139                 break;
1140         }
1141
1142         /*
1143          * In order to keep per-task stats reliable we need to flip the event
1144          * values when we flip the contexts.
1145          */
1146         value = local64_read(&next_event->count);
1147         value = local64_xchg(&event->count, value);
1148         local64_set(&next_event->count, value);
1149
1150         swap(event->total_time_enabled, next_event->total_time_enabled);
1151         swap(event->total_time_running, next_event->total_time_running);
1152
1153         /*
1154          * Since we swizzled the values, update the user visible data too.
1155          */
1156         perf_event_update_userpage(event);
1157         perf_event_update_userpage(next_event);
1158 }
1159
1160 #define list_next_entry(pos, member) \
1161         list_entry(pos->member.next, typeof(*pos), member)
1162
1163 static void perf_event_sync_stat(struct perf_event_context *ctx,
1164                                    struct perf_event_context *next_ctx)
1165 {
1166         struct perf_event *event, *next_event;
1167
1168         if (!ctx->nr_stat)
1169                 return;
1170
1171         update_context_time(ctx);
1172
1173         event = list_first_entry(&ctx->event_list,
1174                                    struct perf_event, event_entry);
1175
1176         next_event = list_first_entry(&next_ctx->event_list,
1177                                         struct perf_event, event_entry);
1178
1179         while (&event->event_entry != &ctx->event_list &&
1180                &next_event->event_entry != &next_ctx->event_list) {
1181
1182                 __perf_event_sync_stat(event, next_event);
1183
1184                 event = list_next_entry(event, event_entry);
1185                 next_event = list_next_entry(next_event, event_entry);
1186         }
1187 }
1188
1189 void perf_event_context_sched_out(struct task_struct *task, int ctxn,
1190                                   struct task_struct *next)
1191 {
1192         struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
1193         struct perf_event_context *next_ctx;
1194         struct perf_event_context *parent;
1195         struct perf_cpu_context *cpuctx;
1196         int do_switch = 1;
1197
1198         if (likely(!ctx))
1199                 return;
1200
1201         cpuctx = __get_cpu_context(ctx);
1202         if (!cpuctx->task_ctx)
1203                 return;
1204
1205         rcu_read_lock();
1206         parent = rcu_dereference(ctx->parent_ctx);
1207         next_ctx = next->perf_event_ctxp[ctxn];
1208         if (parent && next_ctx &&
1209             rcu_dereference(next_ctx->parent_ctx) == parent) {
1210                 /*
1211                  * Looks like the two contexts are clones, so we might be
1212                  * able to optimize the context switch.  We lock both
1213                  * contexts and check that they are clones under the
1214                  * lock (including re-checking that neither has been
1215                  * uncloned in the meantime).  It doesn't matter which
1216                  * order we take the locks because no other cpu could
1217                  * be trying to lock both of these tasks.
1218                  */
1219                 raw_spin_lock(&ctx->lock);
1220                 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
1221                 if (context_equiv(ctx, next_ctx)) {
1222                         /*
1223                          * XXX do we need a memory barrier of sorts
1224                          * wrt to rcu_dereference() of perf_event_ctxp
1225                          */
1226                         task->perf_event_ctxp[ctxn] = next_ctx;
1227                         next->perf_event_ctxp[ctxn] = ctx;
1228                         ctx->task = next;
1229                         next_ctx->task = task;
1230                         do_switch = 0;
1231
1232                         perf_event_sync_stat(ctx, next_ctx);
1233                 }
1234                 raw_spin_unlock(&next_ctx->lock);
1235                 raw_spin_unlock(&ctx->lock);
1236         }
1237         rcu_read_unlock();
1238
1239         if (do_switch) {
1240                 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
1241                 cpuctx->task_ctx = NULL;
1242         }
1243 }
1244
1245 #define for_each_task_context_nr(ctxn)                                  \
1246         for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
1247
1248 /*
1249  * Called from scheduler to remove the events of the current task,
1250  * with interrupts disabled.
1251  *
1252  * We stop each event and update the event value in event->count.
1253  *
1254  * This does not protect us against NMI, but disable()
1255  * sets the disabled bit in the control field of event _before_
1256  * accessing the event control register. If a NMI hits, then it will
1257  * not restart the event.
1258  */
1259 void perf_event_task_sched_out(struct task_struct *task,
1260                                struct task_struct *next)
1261 {
1262         int ctxn;
1263
1264         perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1265
1266         for_each_task_context_nr(ctxn)
1267                 perf_event_context_sched_out(task, ctxn, next);
1268 }
1269
1270 static void task_ctx_sched_out(struct perf_event_context *ctx,
1271                                enum event_type_t event_type)
1272 {
1273         struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1274
1275         if (!cpuctx->task_ctx)
1276                 return;
1277
1278         if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1279                 return;
1280
1281         ctx_sched_out(ctx, cpuctx, event_type);
1282         cpuctx->task_ctx = NULL;
1283 }
1284
1285 /*
1286  * Called with IRQs disabled
1287  */
1288 static void __perf_event_task_sched_out(struct perf_event_context *ctx)
1289 {
1290         task_ctx_sched_out(ctx, EVENT_ALL);
1291 }
1292
1293 /*
1294  * Called with IRQs disabled
1295  */
1296 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1297                               enum event_type_t event_type)
1298 {
1299         ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
1300 }
1301
1302 static void
1303 ctx_pinned_sched_in(struct perf_event_context *ctx,
1304                     struct perf_cpu_context *cpuctx)
1305 {
1306         struct perf_event *event;
1307
1308         list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1309                 if (event->state <= PERF_EVENT_STATE_OFF)
1310                         continue;
1311                 if (event->cpu != -1 && event->cpu != smp_processor_id())
1312                         continue;
1313
1314                 if (group_can_go_on(event, cpuctx, 1))
1315                         group_sched_in(event, cpuctx, ctx);
1316
1317                 /*
1318                  * If this pinned group hasn't been scheduled,
1319                  * put it in error state.
1320                  */
1321                 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1322                         update_group_times(event);
1323                         event->state = PERF_EVENT_STATE_ERROR;
1324                 }
1325         }
1326 }
1327
1328 static void
1329 ctx_flexible_sched_in(struct perf_event_context *ctx,
1330                       struct perf_cpu_context *cpuctx)
1331 {
1332         struct perf_event *event;
1333         int can_add_hw = 1;
1334
1335         list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1336                 /* Ignore events in OFF or ERROR state */
1337                 if (event->state <= PERF_EVENT_STATE_OFF)
1338                         continue;
1339                 /*
1340                  * Listen to the 'cpu' scheduling filter constraint
1341                  * of events:
1342                  */
1343                 if (event->cpu != -1 && event->cpu != smp_processor_id())
1344                         continue;
1345
1346                 if (group_can_go_on(event, cpuctx, can_add_hw)) {
1347                         if (group_sched_in(event, cpuctx, ctx))
1348                                 can_add_hw = 0;
1349                 }
1350         }
1351 }
1352
1353 static void
1354 ctx_sched_in(struct perf_event_context *ctx,
1355              struct perf_cpu_context *cpuctx,
1356              enum event_type_t event_type)
1357 {
1358         raw_spin_lock(&ctx->lock);
1359         ctx->is_active = 1;
1360         if (likely(!ctx->nr_events))
1361                 goto out;
1362
1363         ctx->timestamp = perf_clock();
1364
1365         /*
1366          * First go through the list and put on any pinned groups
1367          * in order to give them the best chance of going on.
1368          */
1369         if (event_type & EVENT_PINNED)
1370                 ctx_pinned_sched_in(ctx, cpuctx);
1371
1372         /* Then walk through the lower prio flexible groups */
1373         if (event_type & EVENT_FLEXIBLE)
1374                 ctx_flexible_sched_in(ctx, cpuctx);
1375
1376 out:
1377         raw_spin_unlock(&ctx->lock);
1378 }
1379
1380 static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
1381                              enum event_type_t event_type)
1382 {
1383         struct perf_event_context *ctx = &cpuctx->ctx;
1384
1385         ctx_sched_in(ctx, cpuctx, event_type);
1386 }
1387
1388 static void task_ctx_sched_in(struct perf_event_context *ctx,
1389                               enum event_type_t event_type)
1390 {
1391         struct perf_cpu_context *cpuctx;
1392
1393         cpuctx = __get_cpu_context(ctx);
1394         if (cpuctx->task_ctx == ctx)
1395                 return;
1396
1397         ctx_sched_in(ctx, cpuctx, event_type);
1398         cpuctx->task_ctx = ctx;
1399 }
1400
1401 void perf_event_context_sched_in(struct perf_event_context *ctx)
1402 {
1403         struct perf_cpu_context *cpuctx;
1404
1405         cpuctx = __get_cpu_context(ctx);
1406         if (cpuctx->task_ctx == ctx)
1407                 return;
1408
1409         perf_pmu_disable(ctx->pmu);
1410         /*
1411          * We want to keep the following priority order:
1412          * cpu pinned (that don't need to move), task pinned,
1413          * cpu flexible, task flexible.
1414          */
1415         cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1416
1417         ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
1418         cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1419         ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
1420
1421         cpuctx->task_ctx = ctx;
1422
1423         /*
1424          * Since these rotations are per-cpu, we need to ensure the
1425          * cpu-context we got scheduled on is actually rotating.
1426          */
1427         perf_pmu_rotate_start(ctx->pmu);
1428         perf_pmu_enable(ctx->pmu);
1429 }
1430
1431 /*
1432  * Called from scheduler to add the events of the current task
1433  * with interrupts disabled.
1434  *
1435  * We restore the event value and then enable it.
1436  *
1437  * This does not protect us against NMI, but enable()
1438  * sets the enabled bit in the control field of event _before_
1439  * accessing the event control register. If a NMI hits, then it will
1440  * keep the event running.
1441  */
1442 void perf_event_task_sched_in(struct task_struct *task)
1443 {
1444         struct perf_event_context *ctx;
1445         int ctxn;
1446
1447         for_each_task_context_nr(ctxn) {
1448                 ctx = task->perf_event_ctxp[ctxn];
1449                 if (likely(!ctx))
1450                         continue;
1451
1452                 perf_event_context_sched_in(ctx);
1453         }
1454 }
1455
1456 #define MAX_INTERRUPTS (~0ULL)
1457
1458 static void perf_log_throttle(struct perf_event *event, int enable);
1459
1460 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
1461 {
1462         u64 frequency = event->attr.sample_freq;
1463         u64 sec = NSEC_PER_SEC;
1464         u64 divisor, dividend;
1465
1466         int count_fls, nsec_fls, frequency_fls, sec_fls;
1467
1468         count_fls = fls64(count);
1469         nsec_fls = fls64(nsec);
1470         frequency_fls = fls64(frequency);
1471         sec_fls = 30;
1472
1473         /*
1474          * We got @count in @nsec, with a target of sample_freq HZ
1475          * the target period becomes:
1476          *
1477          *             @count * 10^9
1478          * period = -------------------
1479          *          @nsec * sample_freq
1480          *
1481          */
1482
1483         /*
1484          * Reduce accuracy by one bit such that @a and @b converge
1485          * to a similar magnitude.
1486          */
1487 #define REDUCE_FLS(a, b)                \
1488 do {                                    \
1489         if (a##_fls > b##_fls) {        \
1490                 a >>= 1;                \
1491                 a##_fls--;              \
1492         } else {                        \
1493                 b >>= 1;                \
1494                 b##_fls--;              \
1495         }                               \
1496 } while (0)
1497
1498         /*
1499          * Reduce accuracy until either term fits in a u64, then proceed with
1500          * the other, so that finally we can do a u64/u64 division.
1501          */
1502         while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
1503                 REDUCE_FLS(nsec, frequency);
1504                 REDUCE_FLS(sec, count);
1505         }
1506
1507         if (count_fls + sec_fls > 64) {
1508                 divisor = nsec * frequency;
1509
1510                 while (count_fls + sec_fls > 64) {
1511                         REDUCE_FLS(count, sec);
1512                         divisor >>= 1;
1513                 }
1514
1515                 dividend = count * sec;
1516         } else {
1517                 dividend = count * sec;
1518
1519                 while (nsec_fls + frequency_fls > 64) {
1520                         REDUCE_FLS(nsec, frequency);
1521                         dividend >>= 1;
1522                 }
1523
1524                 divisor = nsec * frequency;
1525         }
1526
1527         if (!divisor)
1528                 return dividend;
1529
1530         return div64_u64(dividend, divisor);
1531 }
1532
1533 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
1534 {
1535         struct hw_perf_event *hwc = &event->hw;
1536         s64 period, sample_period;
1537         s64 delta;
1538
1539         period = perf_calculate_period(event, nsec, count);
1540
1541         delta = (s64)(period - hwc->sample_period);
1542         delta = (delta + 7) / 8; /* low pass filter */
1543
1544         sample_period = hwc->sample_period + delta;
1545
1546         if (!sample_period)
1547                 sample_period = 1;
1548
1549         hwc->sample_period = sample_period;
1550
1551         if (local64_read(&hwc->period_left) > 8*sample_period) {
1552                 event->pmu->stop(event, PERF_EF_UPDATE);
1553                 local64_set(&hwc->period_left, 0);
1554                 event->pmu->start(event, PERF_EF_RELOAD);
1555         }
1556 }
1557
1558 static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
1559 {
1560         struct perf_event *event;
1561         struct hw_perf_event *hwc;
1562         u64 interrupts, now;
1563         s64 delta;
1564
1565         raw_spin_lock(&ctx->lock);
1566         list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
1567                 if (event->state != PERF_EVENT_STATE_ACTIVE)
1568                         continue;
1569
1570                 if (event->cpu != -1 && event->cpu != smp_processor_id())
1571                         continue;
1572
1573                 hwc = &event->hw;
1574
1575                 interrupts = hwc->interrupts;
1576                 hwc->interrupts = 0;
1577
1578                 /*
1579                  * unthrottle events on the tick
1580                  */
1581                 if (interrupts == MAX_INTERRUPTS) {
1582                         perf_log_throttle(event, 1);
1583                         event->pmu->start(event, 0);
1584                 }
1585
1586                 if (!event->attr.freq || !event->attr.sample_freq)
1587                         continue;
1588
1589                 event->pmu->read(event);
1590                 now = local64_read(&event->count);
1591                 delta = now - hwc->freq_count_stamp;
1592                 hwc->freq_count_stamp = now;
1593
1594                 if (delta > 0)
1595                         perf_adjust_period(event, period, delta);
1596         }
1597         raw_spin_unlock(&ctx->lock);
1598 }
1599
1600 /*
1601  * Round-robin a context's events:
1602  */
1603 static void rotate_ctx(struct perf_event_context *ctx)
1604 {
1605         raw_spin_lock(&ctx->lock);
1606
1607         /* Rotate the first entry last of non-pinned groups */
1608         list_rotate_left(&ctx->flexible_groups);
1609
1610         raw_spin_unlock(&ctx->lock);
1611 }
1612
1613 /*
1614  * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
1615  * because they're strictly cpu affine and rotate_start is called with IRQs
1616  * disabled, while rotate_context is called from IRQ context.
1617  */
1618 static void perf_rotate_context(struct perf_cpu_context *cpuctx)
1619 {
1620         u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
1621         struct perf_event_context *ctx = NULL;
1622         int rotate = 0, remove = 1;
1623
1624         if (cpuctx->ctx.nr_events) {
1625                 remove = 0;
1626                 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1627                         rotate = 1;
1628         }
1629
1630         ctx = cpuctx->task_ctx;
1631         if (ctx && ctx->nr_events) {
1632                 remove = 0;
1633                 if (ctx->nr_events != ctx->nr_active)
1634                         rotate = 1;
1635         }
1636
1637         perf_pmu_disable(cpuctx->ctx.pmu);
1638         perf_ctx_adjust_freq(&cpuctx->ctx, interval);
1639         if (ctx)
1640                 perf_ctx_adjust_freq(ctx, interval);
1641
1642         if (!rotate)
1643                 goto done;
1644
1645         cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1646         if (ctx)
1647                 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
1648
1649         rotate_ctx(&cpuctx->ctx);
1650         if (ctx)
1651                 rotate_ctx(ctx);
1652
1653         cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1654         if (ctx)
1655                 task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
1656
1657 done:
1658         if (remove)
1659                 list_del_init(&cpuctx->rotation_list);
1660
1661         perf_pmu_enable(cpuctx->ctx.pmu);
1662 }
1663
1664 void perf_event_task_tick(void)
1665 {
1666         struct list_head *head = &__get_cpu_var(rotation_list);
1667         struct perf_cpu_context *cpuctx, *tmp;
1668
1669         WARN_ON(!irqs_disabled());
1670
1671         list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
1672                 if (cpuctx->jiffies_interval == 1 ||
1673                                 !(jiffies % cpuctx->jiffies_interval))
1674                         perf_rotate_context(cpuctx);
1675         }
1676 }
1677
1678 static int event_enable_on_exec(struct perf_event *event,
1679                                 struct perf_event_context *ctx)
1680 {
1681         if (!event->attr.enable_on_exec)
1682                 return 0;
1683
1684         event->attr.enable_on_exec = 0;
1685         if (event->state >= PERF_EVENT_STATE_INACTIVE)
1686                 return 0;
1687
1688         __perf_event_mark_enabled(event, ctx);
1689
1690         return 1;
1691 }
1692
1693 /*
1694  * Enable all of a task's events that have been marked enable-on-exec.
1695  * This expects task == current.
1696  */
1697 static void perf_event_enable_on_exec(struct perf_event_context *ctx)
1698 {
1699         struct perf_event *event;
1700         unsigned long flags;
1701         int enabled = 0;
1702         int ret;
1703
1704         local_irq_save(flags);
1705         if (!ctx || !ctx->nr_events)
1706                 goto out;
1707
1708         task_ctx_sched_out(ctx, EVENT_ALL);
1709
1710         raw_spin_lock(&ctx->lock);
1711
1712         list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
1713                 ret = event_enable_on_exec(event, ctx);
1714                 if (ret)
1715                         enabled = 1;
1716         }
1717
1718         list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
1719                 ret = event_enable_on_exec(event, ctx);
1720                 if (ret)
1721                         enabled = 1;
1722         }
1723
1724         /*
1725          * Unclone this context if we enabled any event.
1726          */
1727         if (enabled)
1728                 unclone_ctx(ctx);
1729
1730         raw_spin_unlock(&ctx->lock);
1731
1732         perf_event_context_sched_in(ctx);
1733 out:
1734         local_irq_restore(flags);
1735 }
1736
1737 /*
1738  * Cross CPU call to read the hardware event
1739  */
1740 static void __perf_event_read(void *info)
1741 {
1742         struct perf_event *event = info;
1743         struct perf_event_context *ctx = event->ctx;
1744         struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1745
1746         /*
1747          * If this is a task context, we need to check whether it is
1748          * the current task context of this cpu.  If not it has been
1749          * scheduled out before the smp call arrived.  In that case
1750          * event->count would have been updated to a recent sample
1751          * when the event was scheduled out.
1752          */
1753         if (ctx->task && cpuctx->task_ctx != ctx)
1754                 return;
1755
1756         raw_spin_lock(&ctx->lock);
1757         update_context_time(ctx);
1758         update_event_times(event);
1759         raw_spin_unlock(&ctx->lock);
1760
1761         event->pmu->read(event);
1762 }
1763
1764 static inline u64 perf_event_count(struct perf_event *event)
1765 {
1766         return local64_read(&event->count) + atomic64_read(&event->child_count);
1767 }
1768
1769 static u64 perf_event_read(struct perf_event *event)
1770 {
1771         /*
1772          * If event is enabled and currently active on a CPU, update the
1773          * value in the event structure:
1774          */
1775         if (event->state == PERF_EVENT_STATE_ACTIVE) {
1776                 smp_call_function_single(event->oncpu,
1777                                          __perf_event_read, event, 1);
1778         } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
1779                 struct perf_event_context *ctx = event->ctx;
1780                 unsigned long flags;
1781
1782                 raw_spin_lock_irqsave(&ctx->lock, flags);
1783                 update_context_time(ctx);
1784                 update_event_times(event);
1785                 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1786         }
1787
1788         return perf_event_count(event);
1789 }
1790
1791 /*
1792  * Callchain support
1793  */
1794
1795 struct callchain_cpus_entries {
1796         struct rcu_head                 rcu_head;
1797         struct perf_callchain_entry     *cpu_entries[0];
1798 };
1799
1800 static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
1801 static atomic_t nr_callchain_events;
1802 static DEFINE_MUTEX(callchain_mutex);
1803 struct callchain_cpus_entries *callchain_cpus_entries;
1804
1805
1806 __weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
1807                                   struct pt_regs *regs)
1808 {
1809 }
1810
1811 __weak void perf_callchain_user(struct perf_callchain_entry *entry,
1812                                 struct pt_regs *regs)
1813 {
1814 }
1815
1816 static void release_callchain_buffers_rcu(struct rcu_head *head)
1817 {
1818         struct callchain_cpus_entries *entries;
1819         int cpu;
1820
1821         entries = container_of(head, struct callchain_cpus_entries, rcu_head);
1822
1823         for_each_possible_cpu(cpu)
1824                 kfree(entries->cpu_entries[cpu]);
1825
1826         kfree(entries);
1827 }
1828
1829 static void release_callchain_buffers(void)
1830 {
1831         struct callchain_cpus_entries *entries;
1832
1833         entries = callchain_cpus_entries;
1834         rcu_assign_pointer(callchain_cpus_entries, NULL);
1835         call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
1836 }
1837
1838 static int alloc_callchain_buffers(void)
1839 {
1840         int cpu;
1841         int size;
1842         struct callchain_cpus_entries *entries;
1843
1844         /*
1845          * We can't use the percpu allocation API for data that can be
1846          * accessed from NMI. Use a temporary manual per cpu allocation
1847          * until that gets sorted out.
1848          */
1849         size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) *
1850                 num_possible_cpus();
1851
1852         entries = kzalloc(size, GFP_KERNEL);
1853         if (!entries)
1854                 return -ENOMEM;
1855
1856         size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
1857
1858         for_each_possible_cpu(cpu) {
1859                 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
1860                                                          cpu_to_node(cpu));
1861                 if (!entries->cpu_entries[cpu])
1862                         goto fail;
1863         }
1864
1865         rcu_assign_pointer(callchain_cpus_entries, entries);
1866
1867         return 0;
1868
1869 fail:
1870         for_each_possible_cpu(cpu)
1871                 kfree(entries->cpu_entries[cpu]);
1872         kfree(entries);
1873
1874         return -ENOMEM;
1875 }
1876
1877 static int get_callchain_buffers(void)
1878 {
1879         int err = 0;
1880         int count;
1881
1882         mutex_lock(&callchain_mutex);
1883
1884         count = atomic_inc_return(&nr_callchain_events);
1885         if (WARN_ON_ONCE(count < 1)) {
1886                 err = -EINVAL;
1887                 goto exit;
1888         }
1889
1890         if (count > 1) {
1891                 /* If the allocation failed, give up */
1892                 if (!callchain_cpus_entries)
1893                         err = -ENOMEM;
1894                 goto exit;
1895         }
1896
1897         err = alloc_callchain_buffers();
1898         if (err)
1899                 release_callchain_buffers();
1900 exit:
1901         mutex_unlock(&callchain_mutex);
1902
1903         return err;
1904 }
1905
1906 static void put_callchain_buffers(void)
1907 {
1908         if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
1909                 release_callchain_buffers();
1910                 mutex_unlock(&callchain_mutex);
1911         }
1912 }
1913
1914 static int get_recursion_context(int *recursion)
1915 {
1916         int rctx;
1917
1918         if (in_nmi())
1919                 rctx = 3;
1920         else if (in_irq())
1921                 rctx = 2;
1922         else if (in_softirq())
1923                 rctx = 1;
1924         else
1925                 rctx = 0;
1926
1927         if (recursion[rctx])
1928                 return -1;
1929
1930         recursion[rctx]++;
1931         barrier();
1932
1933         return rctx;
1934 }
1935
1936 static inline void put_recursion_context(int *recursion, int rctx)
1937 {
1938         barrier();
1939         recursion[rctx]--;
1940 }
1941
1942 static struct perf_callchain_entry *get_callchain_entry(int *rctx)
1943 {
1944         int cpu;
1945         struct callchain_cpus_entries *entries;
1946
1947         *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
1948         if (*rctx == -1)
1949                 return NULL;
1950
1951         entries = rcu_dereference(callchain_cpus_entries);
1952         if (!entries)
1953                 return NULL;
1954
1955         cpu = smp_processor_id();
1956
1957         return &entries->cpu_entries[cpu][*rctx];
1958 }
1959
1960 static void
1961 put_callchain_entry(int rctx)
1962 {
1963         put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
1964 }
1965
1966 static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1967 {
1968         int rctx;
1969         struct perf_callchain_entry *entry;
1970
1971
1972         entry = get_callchain_entry(&rctx);
1973         if (rctx == -1)
1974                 return NULL;
1975
1976         if (!entry)
1977                 goto exit_put;
1978
1979         entry->nr = 0;
1980
1981         if (!user_mode(regs)) {
1982                 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
1983                 perf_callchain_kernel(entry, regs);
1984                 if (current->mm)
1985                         regs = task_pt_regs(current);
1986                 else
1987                         regs = NULL;
1988         }
1989
1990         if (regs) {
1991                 perf_callchain_store(entry, PERF_CONTEXT_USER);
1992                 perf_callchain_user(entry, regs);
1993         }
1994
1995 exit_put:
1996         put_callchain_entry(rctx);
1997
1998         return entry;
1999 }
2000
2001 /*
2002  * Initialize the perf_event context in a task_struct:
2003  */
2004 static void __perf_event_init_context(struct perf_event_context *ctx)
2005 {
2006         raw_spin_lock_init(&ctx->lock);
2007         mutex_init(&ctx->mutex);
2008         INIT_LIST_HEAD(&ctx->pinned_groups);
2009         INIT_LIST_HEAD(&ctx->flexible_groups);
2010         INIT_LIST_HEAD(&ctx->event_list);
2011         atomic_set(&ctx->refcount, 1);
2012 }
2013
2014 static struct perf_event_context *
2015 alloc_perf_context(struct pmu *pmu, struct task_struct *task)
2016 {
2017         struct perf_event_context *ctx;
2018
2019         ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2020         if (!ctx)
2021                 return NULL;
2022
2023         __perf_event_init_context(ctx);
2024         if (task) {
2025                 ctx->task = task;
2026                 get_task_struct(task);
2027         }
2028         ctx->pmu = pmu;
2029
2030         return ctx;
2031 }
2032
2033 static struct task_struct *
2034 find_lively_task_by_vpid(pid_t vpid)
2035 {
2036         struct task_struct *task;
2037         int err;
2038
2039         rcu_read_lock();
2040         if (!vpid)
2041                 task = current;
2042         else
2043                 task = find_task_by_vpid(vpid);
2044         if (task)
2045                 get_task_struct(task);
2046         rcu_read_unlock();
2047
2048         if (!task)
2049                 return ERR_PTR(-ESRCH);
2050
2051         /*
2052          * Can't attach events to a dying task.
2053          */
2054         err = -ESRCH;
2055         if (task->flags & PF_EXITING)
2056                 goto errout;
2057
2058         /* Reuse ptrace permission checks for now. */
2059         err = -EACCES;
2060         if (!ptrace_may_access(task, PTRACE_MODE_READ))
2061                 goto errout;
2062
2063         return task;
2064 errout:
2065         put_task_struct(task);
2066         return ERR_PTR(err);
2067
2068 }
2069
2070 static struct perf_event_context *
2071 find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
2072 {
2073         struct perf_event_context *ctx;
2074         struct perf_cpu_context *cpuctx;
2075         unsigned long flags;
2076         int ctxn, err;
2077
2078         if (!task && cpu != -1) {
2079                 /* Must be root to operate on a CPU event: */
2080                 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
2081                         return ERR_PTR(-EACCES);
2082
2083                 if (cpu < 0 || cpu >= nr_cpumask_bits)
2084                         return ERR_PTR(-EINVAL);
2085
2086                 /*
2087                  * We could be clever and allow to attach a event to an
2088                  * offline CPU and activate it when the CPU comes up, but
2089                  * that's for later.
2090                  */
2091                 if (!cpu_online(cpu))
2092                         return ERR_PTR(-ENODEV);
2093
2094                 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
2095                 ctx = &cpuctx->ctx;
2096                 get_ctx(ctx);
2097
2098                 return ctx;
2099         }
2100
2101         err = -EINVAL;
2102         ctxn = pmu->task_ctx_nr;
2103         if (ctxn < 0)
2104                 goto errout;
2105
2106 retry:
2107         ctx = perf_lock_task_context(task, ctxn, &flags);
2108         if (ctx) {
2109                 unclone_ctx(ctx);
2110                 raw_spin_unlock_irqrestore(&ctx->lock, flags);
2111         }
2112
2113         if (!ctx) {
2114                 ctx = alloc_perf_context(pmu, task);
2115                 err = -ENOMEM;
2116                 if (!ctx)
2117                         goto errout;
2118
2119                 get_ctx(ctx);
2120
2121                 if (cmpxchg(&task->perf_event_ctxp[ctxn], NULL, ctx)) {
2122                         /*
2123                          * We raced with some other task; use
2124                          * the context they set.
2125                          */
2126                         put_task_struct(task);
2127                         kfree(ctx);
2128                         goto retry;
2129                 }
2130         }
2131
2132         put_task_struct(task);
2133         return ctx;
2134
2135 errout:
2136         put_task_struct(task);
2137         return ERR_PTR(err);
2138 }
2139
2140 static void perf_event_free_filter(struct perf_event *event);
2141
2142 static void free_event_rcu(struct rcu_head *head)
2143 {
2144         struct perf_event *event;
2145
2146         event = container_of(head, struct perf_event, rcu_head);
2147         if (event->ns)
2148                 put_pid_ns(event->ns);
2149         perf_event_free_filter(event);
2150         kfree(event);
2151 }
2152
2153 static void perf_pending_sync(struct perf_event *event);
2154 static void perf_buffer_put(struct perf_buffer *buffer);
2155
2156 static void free_event(struct perf_event *event)
2157 {
2158         perf_pending_sync(event);
2159
2160         if (!event->parent) {
2161                 atomic_dec(&nr_events);
2162                 if (event->attr.mmap || event->attr.mmap_data)
2163                         atomic_dec(&nr_mmap_events);
2164                 if (event->attr.comm)
2165                         atomic_dec(&nr_comm_events);
2166                 if (event->attr.task)
2167                         atomic_dec(&nr_task_events);
2168                 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
2169                         put_callchain_buffers();
2170         }
2171
2172         if (event->buffer) {
2173                 perf_buffer_put(event->buffer);
2174                 event->buffer = NULL;
2175         }
2176
2177         if (event->destroy)
2178                 event->destroy(event);
2179
2180         if (event->ctx)
2181                 put_ctx(event->ctx);
2182
2183         call_rcu(&event->rcu_head, free_event_rcu);
2184 }
2185
2186 int perf_event_release_kernel(struct perf_event *event)
2187 {
2188         struct perf_event_context *ctx = event->ctx;
2189
2190         /*
2191          * Remove from the PMU, can't get re-enabled since we got
2192          * here because the last ref went.
2193          */
2194         perf_event_disable(event);
2195
2196         WARN_ON_ONCE(ctx->parent_ctx);
2197         /*
2198          * There are two ways this annotation is useful:
2199          *
2200          *  1) there is a lock recursion from perf_event_exit_task
2201          *     see the comment there.
2202          *
2203          *  2) there is a lock-inversion with mmap_sem through
2204          *     perf_event_read_group(), which takes faults while
2205          *     holding ctx->mutex, however this is called after
2206          *     the last filedesc died, so there is no possibility
2207          *     to trigger the AB-BA case.
2208          */
2209         mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
2210         raw_spin_lock_irq(&ctx->lock);
2211         perf_group_detach(event);
2212         list_del_event(event, ctx);
2213         raw_spin_unlock_irq(&ctx->lock);
2214         mutex_unlock(&ctx->mutex);
2215
2216         mutex_lock(&event->owner->perf_event_mutex);
2217         list_del_init(&event->owner_entry);
2218         mutex_unlock(&event->owner->perf_event_mutex);
2219         put_task_struct(event->owner);
2220
2221         free_event(event);
2222
2223         return 0;
2224 }
2225 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
2226
2227 /*
2228  * Called when the last reference to the file is gone.
2229  */
2230 static int perf_release(struct inode *inode, struct file *file)
2231 {
2232         struct perf_event *event = file->private_data;
2233
2234         file->private_data = NULL;
2235
2236         return perf_event_release_kernel(event);
2237 }
2238
2239 static int perf_event_read_size(struct perf_event *event)
2240 {
2241         int entry = sizeof(u64); /* value */
2242         int size = 0;
2243         int nr = 1;
2244
2245         if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2246                 size += sizeof(u64);
2247
2248         if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2249                 size += sizeof(u64);
2250
2251         if (event->attr.read_format & PERF_FORMAT_ID)
2252                 entry += sizeof(u64);
2253
2254         if (event->attr.read_format & PERF_FORMAT_GROUP) {
2255                 nr += event->group_leader->nr_siblings;
2256                 size += sizeof(u64);
2257         }
2258
2259         size += entry * nr;
2260
2261         return size;
2262 }
2263
2264 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
2265 {
2266         struct perf_event *child;
2267         u64 total = 0;
2268
2269         *enabled = 0;
2270         *running = 0;
2271
2272         mutex_lock(&event->child_mutex);
2273         total += perf_event_read(event);
2274         *enabled += event->total_time_enabled +
2275                         atomic64_read(&event->child_total_time_enabled);
2276         *running += event->total_time_running +
2277                         atomic64_read(&event->child_total_time_running);
2278
2279         list_for_each_entry(child, &event->child_list, child_list) {
2280                 total += perf_event_read(child);
2281                 *enabled += child->total_time_enabled;
2282                 *running += child->total_time_running;
2283         }
2284         mutex_unlock(&event->child_mutex);
2285
2286         return total;
2287 }
2288 EXPORT_SYMBOL_GPL(perf_event_read_value);
2289
2290 static int perf_event_read_group(struct perf_event *event,
2291                                    u64 read_format, char __user *buf)
2292 {
2293         struct perf_event *leader = event->group_leader, *sub;
2294         int n = 0, size = 0, ret = -EFAULT;
2295         struct perf_event_context *ctx = leader->ctx;
2296         u64 values[5];
2297         u64 count, enabled, running;
2298
2299         mutex_lock(&ctx->mutex);
2300         count = perf_event_read_value(leader, &enabled, &running);
2301
2302         values[n++] = 1 + leader->nr_siblings;
2303         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2304                 values[n++] = enabled;
2305         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2306                 values[n++] = running;
2307         values[n++] = count;
2308         if (read_format & PERF_FORMAT_ID)
2309                 values[n++] = primary_event_id(leader);
2310
2311         size = n * sizeof(u64);
2312
2313         if (copy_to_user(buf, values, size))
2314                 goto unlock;
2315
2316         ret = size;
2317
2318         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2319                 n = 0;
2320
2321                 values[n++] = perf_event_read_value(sub, &enabled, &running);
2322                 if (read_format & PERF_FORMAT_ID)
2323                         values[n++] = primary_event_id(sub);
2324
2325                 size = n * sizeof(u64);
2326
2327                 if (copy_to_user(buf + ret, values, size)) {
2328                         ret = -EFAULT;
2329                         goto unlock;
2330                 }
2331
2332                 ret += size;
2333         }
2334 unlock:
2335         mutex_unlock(&ctx->mutex);
2336
2337         return ret;
2338 }
2339
2340 static int perf_event_read_one(struct perf_event *event,
2341                                  u64 read_format, char __user *buf)
2342 {
2343         u64 enabled, running;
2344         u64 values[4];
2345         int n = 0;
2346
2347         values[n++] = perf_event_read_value(event, &enabled, &running);
2348         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2349                 values[n++] = enabled;
2350         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2351                 values[n++] = running;
2352         if (read_format & PERF_FORMAT_ID)
2353                 values[n++] = primary_event_id(event);
2354
2355         if (copy_to_user(buf, values, n * sizeof(u64)))
2356                 return -EFAULT;
2357
2358         return n * sizeof(u64);
2359 }
2360
2361 /*
2362  * Read the performance event - simple non blocking version for now
2363  */
2364 static ssize_t
2365 perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
2366 {
2367         u64 read_format = event->attr.read_format;
2368         int ret;
2369
2370         /*
2371          * Return end-of-file for a read on a event that is in
2372          * error state (i.e. because it was pinned but it couldn't be
2373          * scheduled on to the CPU at some point).
2374          */
2375         if (event->state == PERF_EVENT_STATE_ERROR)
2376                 return 0;
2377
2378         if (count < perf_event_read_size(event))
2379                 return -ENOSPC;
2380
2381         WARN_ON_ONCE(event->ctx->parent_ctx);
2382         if (read_format & PERF_FORMAT_GROUP)
2383                 ret = perf_event_read_group(event, read_format, buf);
2384         else
2385                 ret = perf_event_read_one(event, read_format, buf);
2386
2387         return ret;
2388 }
2389
2390 static ssize_t
2391 perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
2392 {
2393         struct perf_event *event = file->private_data;
2394
2395         return perf_read_hw(event, buf, count);
2396 }
2397
2398 static unsigned int perf_poll(struct file *file, poll_table *wait)
2399 {
2400         struct perf_event *event = file->private_data;
2401         struct perf_buffer *buffer;
2402         unsigned int events = POLL_HUP;
2403
2404         rcu_read_lock();
2405         buffer = rcu_dereference(event->buffer);
2406         if (buffer)
2407                 events = atomic_xchg(&buffer->poll, 0);
2408         rcu_read_unlock();
2409
2410         poll_wait(file, &event->waitq, wait);
2411
2412         return events;
2413 }
2414
2415 static void perf_event_reset(struct perf_event *event)
2416 {
2417         (void)perf_event_read(event);
2418         local64_set(&event->count, 0);
2419         perf_event_update_userpage(event);
2420 }
2421
2422 /*
2423  * Holding the top-level event's child_mutex means that any
2424  * descendant process that has inherited this event will block
2425  * in sync_child_event if it goes to exit, thus satisfying the
2426  * task existence requirements of perf_event_enable/disable.
2427  */
2428 static void perf_event_for_each_child(struct perf_event *event,
2429                                         void (*func)(struct perf_event *))
2430 {
2431         struct perf_event *child;
2432
2433         WARN_ON_ONCE(event->ctx->parent_ctx);
2434         mutex_lock(&event->child_mutex);
2435         func(event);
2436         list_for_each_entry(child, &event->child_list, child_list)
2437                 func(child);
2438         mutex_unlock(&event->child_mutex);
2439 }
2440
2441 static void perf_event_for_each(struct perf_event *event,
2442                                   void (*func)(struct perf_event *))
2443 {
2444         struct perf_event_context *ctx = event->ctx;
2445         struct perf_event *sibling;
2446
2447         WARN_ON_ONCE(ctx->parent_ctx);
2448         mutex_lock(&ctx->mutex);
2449         event = event->group_leader;
2450
2451         perf_event_for_each_child(event, func);
2452         func(event);
2453         list_for_each_entry(sibling, &event->sibling_list, group_entry)
2454                 perf_event_for_each_child(event, func);
2455         mutex_unlock(&ctx->mutex);
2456 }
2457
2458 static int perf_event_period(struct perf_event *event, u64 __user *arg)
2459 {
2460         struct perf_event_context *ctx = event->ctx;
2461         unsigned long size;
2462         int ret = 0;
2463         u64 value;
2464
2465         if (!event->attr.sample_period)
2466                 return -EINVAL;
2467
2468         size = copy_from_user(&value, arg, sizeof(value));
2469         if (size != sizeof(value))
2470                 return -EFAULT;
2471
2472         if (!value)
2473                 return -EINVAL;
2474
2475         raw_spin_lock_irq(&ctx->lock);
2476         if (event->attr.freq) {
2477                 if (value > sysctl_perf_event_sample_rate) {
2478                         ret = -EINVAL;
2479                         goto unlock;
2480                 }
2481
2482                 event->attr.sample_freq = value;
2483         } else {
2484                 event->attr.sample_period = value;
2485                 event->hw.sample_period = value;
2486         }
2487 unlock:
2488         raw_spin_unlock_irq(&ctx->lock);
2489
2490         return ret;
2491 }
2492
2493 static const struct file_operations perf_fops;
2494
2495 static struct perf_event *perf_fget_light(int fd, int *fput_needed)
2496 {
2497         struct file *file;
2498
2499         file = fget_light(fd, fput_needed);
2500         if (!file)
2501                 return ERR_PTR(-EBADF);
2502
2503         if (file->f_op != &perf_fops) {
2504                 fput_light(file, *fput_needed);
2505                 *fput_needed = 0;
2506                 return ERR_PTR(-EBADF);
2507         }
2508
2509         return file->private_data;
2510 }
2511
2512 static int perf_event_set_output(struct perf_event *event,
2513                                  struct perf_event *output_event);
2514 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
2515
2516 static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2517 {
2518         struct perf_event *event = file->private_data;
2519         void (*func)(struct perf_event *);
2520         u32 flags = arg;
2521
2522         switch (cmd) {
2523         case PERF_EVENT_IOC_ENABLE:
2524                 func = perf_event_enable;
2525                 break;
2526         case PERF_EVENT_IOC_DISABLE:
2527                 func = perf_event_disable;
2528                 break;
2529         case PERF_EVENT_IOC_RESET:
2530                 func = perf_event_reset;
2531                 break;
2532
2533         case PERF_EVENT_IOC_REFRESH:
2534                 return perf_event_refresh(event, arg);
2535
2536         case PERF_EVENT_IOC_PERIOD:
2537                 return perf_event_period(event, (u64 __user *)arg);
2538
2539         case PERF_EVENT_IOC_SET_OUTPUT:
2540         {
2541                 struct perf_event *output_event = NULL;
2542                 int fput_needed = 0;
2543                 int ret;
2544
2545                 if (arg != -1) {
2546                         output_event = perf_fget_light(arg, &fput_needed);
2547                         if (IS_ERR(output_event))
2548                                 return PTR_ERR(output_event);
2549                 }
2550
2551                 ret = perf_event_set_output(event, output_event);
2552                 if (output_event)
2553                         fput_light(output_event->filp, fput_needed);
2554
2555                 return ret;
2556         }
2557
2558         case PERF_EVENT_IOC_SET_FILTER:
2559                 return perf_event_set_filter(event, (void __user *)arg);
2560
2561         default:
2562                 return -ENOTTY;
2563         }
2564
2565         if (flags & PERF_IOC_FLAG_GROUP)
2566                 perf_event_for_each(event, func);
2567         else
2568                 perf_event_for_each_child(event, func);
2569
2570         return 0;
2571 }
2572
2573 int perf_event_task_enable(void)
2574 {
2575         struct perf_event *event;
2576
2577         mutex_lock(&current->perf_event_mutex);
2578         list_for_each_entry(event, &current->perf_event_list, owner_entry)
2579                 perf_event_for_each_child(event, perf_event_enable);
2580         mutex_unlock(&current->perf_event_mutex);
2581
2582         return 0;
2583 }
2584
2585 int perf_event_task_disable(void)
2586 {
2587         struct perf_event *event;
2588
2589         mutex_lock(&current->perf_event_mutex);
2590         list_for_each_entry(event, &current->perf_event_list, owner_entry)
2591                 perf_event_for_each_child(event, perf_event_disable);
2592         mutex_unlock(&current->perf_event_mutex);
2593
2594         return 0;
2595 }
2596
2597 #ifndef PERF_EVENT_INDEX_OFFSET
2598 # define PERF_EVENT_INDEX_OFFSET 0
2599 #endif
2600
2601 static int perf_event_index(struct perf_event *event)
2602 {
2603         if (event->hw.state & PERF_HES_STOPPED)
2604                 return 0;
2605
2606         if (event->state != PERF_EVENT_STATE_ACTIVE)
2607                 return 0;
2608
2609         return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
2610 }
2611
2612 /*
2613  * Callers need to ensure there can be no nesting of this function, otherwise
2614  * the seqlock logic goes bad. We can not serialize this because the arch
2615  * code calls this from NMI context.
2616  */
2617 void perf_event_update_userpage(struct perf_event *event)
2618 {
2619         struct perf_event_mmap_page *userpg;
2620         struct perf_buffer *buffer;
2621
2622         rcu_read_lock();
2623         buffer = rcu_dereference(event->buffer);
2624         if (!buffer)
2625                 goto unlock;
2626
2627         userpg = buffer->user_page;
2628
2629         /*
2630          * Disable preemption so as to not let the corresponding user-space
2631          * spin too long if we get preempted.
2632          */
2633         preempt_disable();
2634         ++userpg->lock;
2635         barrier();
2636         userpg->index = perf_event_index(event);
2637         userpg->offset = perf_event_count(event);
2638         if (event->state == PERF_EVENT_STATE_ACTIVE)
2639                 userpg->offset -= local64_read(&event->hw.prev_count);
2640
2641         userpg->time_enabled = event->total_time_enabled +
2642                         atomic64_read(&event->child_total_time_enabled);
2643
2644         userpg->time_running = event->total_time_running +
2645                         atomic64_read(&event->child_total_time_running);
2646
2647         barrier();
2648         ++userpg->lock;
2649         preempt_enable();
2650 unlock:
2651         rcu_read_unlock();
2652 }
2653
2654 static unsigned long perf_data_size(struct perf_buffer *buffer);
2655
2656 static void
2657 perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags)
2658 {
2659         long max_size = perf_data_size(buffer);
2660
2661         if (watermark)
2662                 buffer->watermark = min(max_size, watermark);
2663
2664         if (!buffer->watermark)
2665                 buffer->watermark = max_size / 2;
2666
2667         if (flags & PERF_BUFFER_WRITABLE)
2668                 buffer->writable = 1;
2669
2670         atomic_set(&buffer->refcount, 1);
2671 }
2672
2673 #ifndef CONFIG_PERF_USE_VMALLOC
2674
2675 /*
2676  * Back perf_mmap() with regular GFP_KERNEL-0 pages.
2677  */
2678
2679 static struct page *
2680 perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
2681 {
2682         if (pgoff > buffer->nr_pages)
2683                 return NULL;
2684
2685         if (pgoff == 0)
2686                 return virt_to_page(buffer->user_page);
2687
2688         return virt_to_page(buffer->data_pages[pgoff - 1]);
2689 }
2690
2691 static void *perf_mmap_alloc_page(int cpu)
2692 {
2693         struct page *page;
2694         int node;
2695
2696         node = (cpu == -1) ? cpu : cpu_to_node(cpu);
2697         page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
2698         if (!page)
2699                 return NULL;
2700
2701         return page_address(page);
2702 }
2703
2704 static struct perf_buffer *
2705 perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
2706 {
2707         struct perf_buffer *buffer;
2708         unsigned long size;
2709         int i;
2710
2711         size = sizeof(struct perf_buffer);
2712         size += nr_pages * sizeof(void *);
2713
2714         buffer = kzalloc(size, GFP_KERNEL);
2715         if (!buffer)
2716                 goto fail;
2717
2718         buffer->user_page = perf_mmap_alloc_page(cpu);
2719         if (!buffer->user_page)
2720                 goto fail_user_page;
2721
2722         for (i = 0; i < nr_pages; i++) {
2723                 buffer->data_pages[i] = perf_mmap_alloc_page(cpu);
2724                 if (!buffer->data_pages[i])
2725                         goto fail_data_pages;
2726         }
2727
2728         buffer->nr_pages = nr_pages;
2729
2730         perf_buffer_init(buffer, watermark, flags);
2731
2732         return buffer;
2733
2734 fail_data_pages:
2735         for (i--; i >= 0; i--)
2736                 free_page((unsigned long)buffer->data_pages[i]);
2737
2738         free_page((unsigned long)buffer->user_page);
2739
2740 fail_user_page:
2741         kfree(buffer);
2742
2743 fail:
2744         return NULL;
2745 }
2746
2747 static void perf_mmap_free_page(unsigned long addr)
2748 {
2749         struct page *page = virt_to_page((void *)addr);
2750
2751         page->mapping = NULL;
2752         __free_page(page);
2753 }
2754
2755 static void perf_buffer_free(struct perf_buffer *buffer)
2756 {
2757         int i;
2758
2759         perf_mmap_free_page((unsigned long)buffer->user_page);
2760         for (i = 0; i < buffer->nr_pages; i++)
2761                 perf_mmap_free_page((unsigned long)buffer->data_pages[i]);
2762         kfree(buffer);
2763 }
2764
2765 static inline int page_order(struct perf_buffer *buffer)
2766 {
2767         return 0;
2768 }
2769
2770 #else
2771
2772 /*
2773  * Back perf_mmap() with vmalloc memory.
2774  *
2775  * Required for architectures that have d-cache aliasing issues.
2776  */
2777
2778 static inline int page_order(struct perf_buffer *buffer)
2779 {
2780         return buffer->page_order;
2781 }
2782
2783 static struct page *
2784 perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff)
2785 {
2786         if (pgoff > (1UL << page_order(buffer)))
2787                 return NULL;
2788
2789         return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE);
2790 }
2791
2792 static void perf_mmap_unmark_page(void *addr)
2793 {
2794         struct page *page = vmalloc_to_page(addr);
2795
2796         page->mapping = NULL;
2797 }
2798
2799 static void perf_buffer_free_work(struct work_struct *work)
2800 {
2801         struct perf_buffer *buffer;
2802         void *base;
2803         int i, nr;
2804
2805         buffer = container_of(work, struct perf_buffer, work);
2806         nr = 1 << page_order(buffer);
2807
2808         base = buffer->user_page;
2809         for (i = 0; i < nr + 1; i++)
2810                 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
2811
2812         vfree(base);
2813         kfree(buffer);
2814 }
2815
2816 static void perf_buffer_free(struct perf_buffer *buffer)
2817 {
2818         schedule_work(&buffer->work);
2819 }
2820
2821 static struct perf_buffer *
2822 perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags)
2823 {
2824         struct perf_buffer *buffer;
2825         unsigned long size;
2826         void *all_buf;
2827
2828         size = sizeof(struct perf_buffer);
2829         size += sizeof(void *);
2830
2831         buffer = kzalloc(size, GFP_KERNEL);
2832         if (!buffer)
2833                 goto fail;
2834
2835         INIT_WORK(&buffer->work, perf_buffer_free_work);
2836
2837         all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
2838         if (!all_buf)
2839                 goto fail_all_buf;
2840
2841         buffer->user_page = all_buf;
2842         buffer->data_pages[0] = all_buf + PAGE_SIZE;
2843         buffer->page_order = ilog2(nr_pages);
2844         buffer->nr_pages = 1;
2845
2846         perf_buffer_init(buffer, watermark, flags);
2847
2848         return buffer;
2849
2850 fail_all_buf:
2851         kfree(buffer);
2852
2853 fail:
2854         return NULL;
2855 }
2856
2857 #endif
2858
2859 static unsigned long perf_data_size(struct perf_buffer *buffer)
2860 {
2861         return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer));
2862 }
2863
2864 static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2865 {
2866         struct perf_event *event = vma->vm_file->private_data;
2867         struct perf_buffer *buffer;
2868         int ret = VM_FAULT_SIGBUS;
2869
2870         if (vmf->flags & FAULT_FLAG_MKWRITE) {
2871                 if (vmf->pgoff == 0)
2872                         ret = 0;
2873                 return ret;
2874         }
2875
2876         rcu_read_lock();
2877         buffer = rcu_dereference(event->buffer);
2878         if (!buffer)
2879                 goto unlock;
2880
2881         if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
2882                 goto unlock;
2883
2884         vmf->page = perf_mmap_to_page(buffer, vmf->pgoff);
2885         if (!vmf->page)
2886                 goto unlock;
2887
2888         get_page(vmf->page);
2889         vmf->page->mapping = vma->vm_file->f_mapping;
2890         vmf->page->index   = vmf->pgoff;
2891
2892         ret = 0;
2893 unlock:
2894         rcu_read_unlock();
2895
2896         return ret;
2897 }
2898
2899 static void perf_buffer_free_rcu(struct rcu_head *rcu_head)
2900 {
2901         struct perf_buffer *buffer;
2902
2903         buffer = container_of(rcu_head, struct perf_buffer, rcu_head);
2904         perf_buffer_free(buffer);
2905 }
2906
2907 static struct perf_buffer *perf_buffer_get(struct perf_event *event)
2908 {
2909         struct perf_buffer *buffer;
2910
2911         rcu_read_lock();
2912         buffer = rcu_dereference(event->buffer);
2913         if (buffer) {
2914                 if (!atomic_inc_not_zero(&buffer->refcount))
2915                         buffer = NULL;
2916         }
2917         rcu_read_unlock();
2918
2919         return buffer;
2920 }
2921
2922 static void perf_buffer_put(struct perf_buffer *buffer)
2923 {
2924         if (!atomic_dec_and_test(&buffer->refcount))
2925                 return;
2926
2927         call_rcu(&buffer->rcu_head, perf_buffer_free_rcu);
2928 }
2929
2930 static void perf_mmap_open(struct vm_area_struct *vma)
2931 {
2932         struct perf_event *event = vma->vm_file->private_data;
2933
2934         atomic_inc(&event->mmap_count);
2935 }
2936
2937 static void perf_mmap_close(struct vm_area_struct *vma)
2938 {
2939         struct perf_event *event = vma->vm_file->private_data;
2940
2941         if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
2942                 unsigned long size = perf_data_size(event->buffer);
2943                 struct user_struct *user = event->mmap_user;
2944                 struct perf_buffer *buffer = event->buffer;
2945
2946                 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
2947                 vma->vm_mm->locked_vm -= event->mmap_locked;
2948                 rcu_assign_pointer(event->buffer, NULL);
2949                 mutex_unlock(&event->mmap_mutex);
2950
2951                 perf_buffer_put(buffer);
2952                 free_uid(user);
2953         }
2954 }
2955
2956 static const struct vm_operations_struct perf_mmap_vmops = {
2957         .open           = perf_mmap_open,
2958         .close          = perf_mmap_close,
2959         .fault          = perf_mmap_fault,
2960         .page_mkwrite   = perf_mmap_fault,
2961 };
2962
2963 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
2964 {
2965         struct perf_event *event = file->private_data;
2966         unsigned long user_locked, user_lock_limit;
2967         struct user_struct *user = current_user();
2968         unsigned long locked, lock_limit;
2969         struct perf_buffer *buffer;
2970         unsigned long vma_size;
2971         unsigned long nr_pages;
2972         long user_extra, extra;
2973         int ret = 0, flags = 0;
2974
2975         /*
2976          * Don't allow mmap() of inherited per-task counters. This would
2977          * create a performance issue due to all children writing to the
2978          * same buffer.
2979          */
2980         if (event->cpu == -1 && event->attr.inherit)
2981                 return -EINVAL;
2982
2983         if (!(vma->vm_flags & VM_SHARED))
2984                 return -EINVAL;
2985
2986         vma_size = vma->vm_end - vma->vm_start;
2987         nr_pages = (vma_size / PAGE_SIZE) - 1;
2988
2989         /*
2990          * If we have buffer pages ensure they're a power-of-two number, so we
2991          * can do bitmasks instead of modulo.
2992          */
2993         if (nr_pages != 0 && !is_power_of_2(nr_pages))
2994                 return -EINVAL;
2995
2996         if (vma_size != PAGE_SIZE * (1 + nr_pages))
2997                 return -EINVAL;
2998
2999         if (vma->vm_pgoff != 0)
3000                 return -EINVAL;
3001
3002         WARN_ON_ONCE(event->ctx->parent_ctx);
3003         mutex_lock(&event->mmap_mutex);
3004         if (event->buffer) {
3005                 if (event->buffer->nr_pages == nr_pages)
3006                         atomic_inc(&event->buffer->refcount);
3007                 else
3008                         ret = -EINVAL;
3009                 goto unlock;
3010         }
3011
3012         user_extra = nr_pages + 1;
3013         user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
3014
3015         /*
3016          * Increase the limit linearly with more CPUs:
3017          */
3018         user_lock_limit *= num_online_cpus();
3019
3020         user_locked = atomic_long_read(&user->locked_vm) + user_extra;
3021
3022         extra = 0;
3023         if (user_locked > user_lock_limit)
3024                 extra = user_locked - user_lock_limit;
3025
3026         lock_limit = rlimit(RLIMIT_MEMLOCK);
3027         lock_limit >>= PAGE_SHIFT;
3028         locked = vma->vm_mm->locked_vm + extra;
3029
3030         if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
3031                 !capable(CAP_IPC_LOCK)) {
3032                 ret = -EPERM;
3033                 goto unlock;
3034         }
3035
3036         WARN_ON(event->buffer);
3037
3038         if (vma->vm_flags & VM_WRITE)
3039                 flags |= PERF_BUFFER_WRITABLE;
3040
3041         buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark,
3042                                    event->cpu, flags);
3043         if (!buffer) {
3044                 ret = -ENOMEM;
3045                 goto unlock;
3046         }
3047         rcu_assign_pointer(event->buffer, buffer);
3048
3049         atomic_long_add(user_extra, &user->locked_vm);
3050         event->mmap_locked = extra;
3051         event->mmap_user = get_current_user();
3052         vma->vm_mm->locked_vm += event->mmap_locked;
3053
3054 unlock:
3055         if (!ret)
3056                 atomic_inc(&event->mmap_count);
3057         mutex_unlock(&event->mmap_mutex);
3058
3059         vma->vm_flags |= VM_RESERVED;
3060         vma->vm_ops = &perf_mmap_vmops;
3061
3062         return ret;
3063 }
3064
3065 static int perf_fasync(int fd, struct file *filp, int on)
3066 {
3067         struct inode *inode = filp->f_path.dentry->d_inode;
3068         struct perf_event *event = filp->private_data;
3069         int retval;
3070
3071         mutex_lock(&inode->i_mutex);
3072         retval = fasync_helper(fd, filp, on, &event->fasync);
3073         mutex_unlock(&inode->i_mutex);
3074
3075         if (retval < 0)
3076                 return retval;
3077
3078         return 0;
3079 }
3080
3081 static const struct file_operations perf_fops = {
3082         .llseek                 = no_llseek,
3083         .release                = perf_release,
3084         .read                   = perf_read,
3085         .poll                   = perf_poll,
3086         .unlocked_ioctl         = perf_ioctl,
3087         .compat_ioctl           = perf_ioctl,
3088         .mmap                   = perf_mmap,
3089         .fasync                 = perf_fasync,
3090 };
3091
3092 /*
3093  * Perf event wakeup
3094  *
3095  * If there's data, ensure we set the poll() state and publish everything
3096  * to user-space before waking everybody up.
3097  */
3098
3099 void perf_event_wakeup(struct perf_event *event)
3100 {
3101         wake_up_all(&event->waitq);
3102
3103         if (event->pending_kill) {
3104                 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
3105                 event->pending_kill = 0;
3106         }
3107 }
3108
3109 /*
3110  * Pending wakeups
3111  *
3112  * Handle the case where we need to wakeup up from NMI (or rq->lock) context.
3113  *
3114  * The NMI bit means we cannot possibly take locks. Therefore, maintain a
3115  * single linked list and use cmpxchg() to add entries lockless.
3116  */
3117
3118 static void perf_pending_event(struct perf_pending_entry *entry)
3119 {
3120         struct perf_event *event = container_of(entry,
3121                         struct perf_event, pending);
3122
3123         if (event->pending_disable) {
3124                 event->pending_disable = 0;
3125                 __perf_event_disable(event);
3126         }
3127
3128         if (event->pending_wakeup) {
3129                 event->pending_wakeup = 0;
3130                 perf_event_wakeup(event);
3131         }
3132 }
3133
3134 #define PENDING_TAIL ((struct perf_pending_entry *)-1UL)
3135
3136 static DEFINE_PER_CPU(struct perf_pending_entry *, perf_pending_head) = {
3137         PENDING_TAIL,
3138 };
3139
3140 static void perf_pending_queue(struct perf_pending_entry *entry,
3141                                void (*func)(struct perf_pending_entry *))
3142 {
3143         struct perf_pending_entry **head;
3144
3145         if (cmpxchg(&entry->next, NULL, PENDING_TAIL) != NULL)
3146                 return;
3147
3148         entry->func = func;
3149
3150         head = &get_cpu_var(perf_pending_head);
3151
3152         do {
3153                 entry->next = *head;
3154         } while (cmpxchg(head, entry->next, entry) != entry->next);
3155
3156         set_perf_event_pending();
3157
3158         put_cpu_var(perf_pending_head);
3159 }
3160
3161 static int __perf_pending_run(void)
3162 {
3163         struct perf_pending_entry *list;
3164         int nr = 0;
3165
3166         list = xchg(&__get_cpu_var(perf_pending_head), PENDING_TAIL);
3167         while (list != PENDING_TAIL) {
3168                 void (*func)(struct perf_pending_entry *);
3169                 struct perf_pending_entry *entry = list;
3170
3171                 list = list->next;
3172
3173                 func = entry->func;
3174                 entry->next = NULL;
3175                 /*
3176                  * Ensure we observe the unqueue before we issue the wakeup,
3177                  * so that we won't be waiting forever.
3178                  * -- see perf_not_pending().
3179                  */
3180                 smp_wmb();
3181
3182                 func(entry);
3183                 nr++;
3184         }
3185
3186         return nr;
3187 }
3188
3189 static inline int perf_not_pending(struct perf_event *event)
3190 {
3191         /*
3192          * If we flush on whatever cpu we run, there is a chance we don't
3193          * need to wait.
3194          */
3195         get_cpu();
3196         __perf_pending_run();
3197         put_cpu();
3198
3199         /*
3200          * Ensure we see the proper queue state before going to sleep
3201          * so that we do not miss the wakeup. -- see perf_pending_handle()
3202          */
3203         smp_rmb();
3204         return event->pending.next == NULL;
3205 }
3206
3207 static void perf_pending_sync(struct perf_event *event)
3208 {
3209         wait_event(event->waitq, perf_not_pending(event));
3210 }
3211
3212 void perf_event_do_pending(void)
3213 {
3214         __perf_pending_run();
3215 }
3216
3217 /*
3218  * We assume there is only KVM supporting the callbacks.
3219  * Later on, we might change it to a list if there is
3220  * another virtualization implementation supporting the callbacks.
3221  */
3222 struct perf_guest_info_callbacks *perf_guest_cbs;
3223
3224 int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3225 {
3226         perf_guest_cbs = cbs;
3227         return 0;
3228 }
3229 EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
3230
3231 int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3232 {
3233         perf_guest_cbs = NULL;
3234         return 0;
3235 }
3236 EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
3237
3238 /*
3239  * Output
3240  */
3241 static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail,
3242                               unsigned long offset, unsigned long head)
3243 {
3244         unsigned long mask;
3245
3246         if (!buffer->writable)
3247                 return true;
3248
3249         mask = perf_data_size(buffer) - 1;
3250
3251         offset = (offset - tail) & mask;
3252         head   = (head   - tail) & mask;
3253
3254         if ((int)(head - offset) < 0)
3255                 return false;
3256
3257         return true;
3258 }
3259
3260 static void perf_output_wakeup(struct perf_output_handle *handle)
3261 {
3262         atomic_set(&handle->buffer->poll, POLL_IN);
3263
3264         if (handle->nmi) {
3265                 handle->event->pending_wakeup = 1;
3266                 perf_pending_queue(&handle->event->pending,
3267                                    perf_pending_event);
3268         } else
3269                 perf_event_wakeup(handle->event);
3270 }
3271
3272 /*
3273  * We need to ensure a later event_id doesn't publish a head when a former
3274  * event isn't done writing. However since we need to deal with NMIs we
3275  * cannot fully serialize things.
3276  *
3277  * We only publish the head (and generate a wakeup) when the outer-most
3278  * event completes.
3279  */
3280 static void perf_output_get_handle(struct perf_output_handle *handle)
3281 {
3282         struct perf_buffer *buffer = handle->buffer;
3283
3284         preempt_disable();
3285         local_inc(&buffer->nest);
3286         handle->wakeup = local_read(&buffer->wakeup);
3287 }
3288
3289 static void perf_output_put_handle(struct perf_output_handle *handle)
3290 {
3291         struct perf_buffer *buffer = handle->buffer;
3292         unsigned long head;
3293
3294 again:
3295         head = local_read(&buffer->head);
3296
3297         /*
3298          * IRQ/NMI can happen here, which means we can miss a head update.
3299          */
3300
3301         if (!local_dec_and_test(&buffer->nest))
3302                 goto out;
3303
3304         /*
3305          * Publish the known good head. Rely on the full barrier implied
3306          * by atomic_dec_and_test() order the buffer->head read and this
3307          * write.
3308          */
3309         buffer->user_page->data_head = head;
3310
3311         /*
3312          * Now check if we missed an update, rely on the (compiler)
3313          * barrier in atomic_dec_and_test() to re-read buffer->head.
3314          */
3315         if (unlikely(head != local_read(&buffer->head))) {
3316                 local_inc(&buffer->nest);
3317                 goto again;
3318         }
3319
3320         if (handle->wakeup != local_read(&buffer->wakeup))
3321                 perf_output_wakeup(handle);
3322
3323 out:
3324         preempt_enable();
3325 }
3326
3327 __always_inline void perf_output_copy(struct perf_output_handle *handle,
3328                       const void *buf, unsigned int len)
3329 {
3330         do {
3331                 unsigned long size = min_t(unsigned long, handle->size, len);
3332
3333                 memcpy(handle->addr, buf, size);
3334
3335                 len -= size;
3336                 handle->addr += size;
3337                 buf += size;
3338                 handle->size -= size;
3339                 if (!handle->size) {
3340                         struct perf_buffer *buffer = handle->buffer;
3341
3342                         handle->page++;
3343                         handle->page &= buffer->nr_pages - 1;
3344                         handle->addr = buffer->data_pages[handle->page];
3345                         handle->size = PAGE_SIZE << page_order(buffer);
3346                 }
3347         } while (len);
3348 }
3349
3350 int perf_output_begin(struct perf_output_handle *handle,
3351                       struct perf_event *event, unsigned int size,
3352                       int nmi, int sample)
3353 {
3354         struct perf_buffer *buffer;
3355         unsigned long tail, offset, head;
3356         int have_lost;
3357         struct {
3358                 struct perf_event_header header;
3359                 u64                      id;
3360                 u64                      lost;
3361         } lost_event;
3362
3363         rcu_read_lock();
3364         /*
3365          * For inherited events we send all the output towards the parent.
3366          */
3367         if (event->parent)
3368                 event = event->parent;
3369
3370         buffer = rcu_dereference(event->buffer);
3371         if (!buffer)
3372                 goto out;
3373
3374         handle->buffer  = buffer;
3375         handle->event   = event;
3376         handle->nmi     = nmi;
3377         handle->sample  = sample;
3378
3379         if (!buffer->nr_pages)
3380                 goto out;
3381
3382         have_lost = local_read(&buffer->lost);
3383         if (have_lost)
3384                 size += sizeof(lost_event);
3385
3386         perf_output_get_handle(handle);
3387
3388         do {
3389                 /*
3390                  * Userspace could choose to issue a mb() before updating the
3391                  * tail pointer. So that all reads will be completed before the
3392                  * write is issued.
3393                  */
3394                 tail = ACCESS_ONCE(buffer->user_page->data_tail);
3395                 smp_rmb();
3396                 offset = head = local_read(&buffer->head);
3397                 head += size;
3398                 if (unlikely(!perf_output_space(buffer, tail, offset, head)))
3399                         goto fail;
3400         } while (local_cmpxchg(&buffer->head, offset, head) != offset);
3401
3402         if (head - local_read(&buffer->wakeup) > buffer->watermark)
3403                 local_add(buffer->watermark, &buffer->wakeup);
3404
3405         handle->page = offset >> (PAGE_SHIFT + page_order(buffer));
3406         handle->page &= buffer->nr_pages - 1;
3407         handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1);
3408         handle->addr = buffer->data_pages[handle->page];
3409         handle->addr += handle->size;
3410         handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size;
3411
3412         if (have_lost) {
3413                 lost_event.header.type = PERF_RECORD_LOST;
3414                 lost_event.header.misc = 0;
3415                 lost_event.header.size = sizeof(lost_event);
3416                 lost_event.id          = event->id;
3417                 lost_event.lost        = local_xchg(&buffer->lost, 0);
3418
3419                 perf_output_put(handle, lost_event);
3420         }
3421
3422         return 0;
3423
3424 fail:
3425         local_inc(&buffer->lost);
3426         perf_output_put_handle(handle);
3427 out:
3428         rcu_read_unlock();
3429
3430         return -ENOSPC;
3431 }
3432
3433 void perf_output_end(struct perf_output_handle *handle)
3434 {
3435         struct perf_event *event = handle->event;
3436         struct perf_buffer *buffer = handle->buffer;
3437
3438         int wakeup_events = event->attr.wakeup_events;
3439
3440         if (handle->sample && wakeup_events) {
3441                 int events = local_inc_return(&buffer->events);
3442                 if (events >= wakeup_events) {
3443                         local_sub(wakeup_events, &buffer->events);
3444                         local_inc(&buffer->wakeup);
3445                 }
3446         }
3447
3448         perf_output_put_handle(handle);
3449         rcu_read_unlock();
3450 }
3451
3452 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
3453 {
3454         /*
3455          * only top level events have the pid namespace they were created in
3456          */
3457         if (event->parent)
3458                 event = event->parent;
3459
3460         return task_tgid_nr_ns(p, event->ns);
3461 }
3462
3463 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
3464 {
3465         /*
3466          * only top level events have the pid namespace they were created in
3467          */
3468         if (event->parent)
3469                 event = event->parent;
3470
3471         return task_pid_nr_ns(p, event->ns);
3472 }
3473
3474 static void perf_output_read_one(struct perf_output_handle *handle,
3475                                  struct perf_event *event)
3476 {
3477         u64 read_format = event->attr.read_format;
3478         u64 values[4];
3479         int n = 0;
3480
3481         values[n++] = perf_event_count(event);
3482         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
3483                 values[n++] = event->total_time_enabled +
3484                         atomic64_read(&event->child_total_time_enabled);
3485         }
3486         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
3487                 values[n++] = event->total_time_running +
3488                         atomic64_read(&event->child_total_time_running);
3489         }
3490         if (read_format & PERF_FORMAT_ID)
3491                 values[n++] = primary_event_id(event);
3492
3493         perf_output_copy(handle, values, n * sizeof(u64));
3494 }
3495
3496 /*
3497  * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3498  */
3499 static void perf_output_read_group(struct perf_output_handle *handle,
3500                             struct perf_event *event)
3501 {
3502         struct perf_event *leader = event->group_leader, *sub;
3503         u64 read_format = event->attr.read_format;
3504         u64 values[5];
3505         int n = 0;
3506
3507         values[n++] = 1 + leader->nr_siblings;
3508
3509         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3510                 values[n++] = leader->total_time_enabled;
3511
3512         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3513                 values[n++] = leader->total_time_running;
3514
3515         if (leader != event)
3516                 leader->pmu->read(leader);
3517
3518         values[n++] = perf_event_count(leader);
3519         if (read_format & PERF_FORMAT_ID)
3520                 values[n++] = primary_event_id(leader);
3521
3522         perf_output_copy(handle, values, n * sizeof(u64));
3523
3524         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3525                 n = 0;
3526
3527                 if (sub != event)
3528                         sub->pmu->read(sub);
3529
3530                 values[n++] = perf_event_count(sub);
3531                 if (read_format & PERF_FORMAT_ID)
3532                         values[n++] = primary_event_id(sub);
3533
3534                 perf_output_copy(handle, values, n * sizeof(u64));
3535         }
3536 }
3537
3538 static void perf_output_read(struct perf_output_handle *handle,
3539                              struct perf_event *event)
3540 {
3541         if (event->attr.read_format & PERF_FORMAT_GROUP)
3542                 perf_output_read_group(handle, event);
3543         else
3544                 perf_output_read_one(handle, event);
3545 }
3546
3547 void perf_output_sample(struct perf_output_handle *handle,
3548                         struct perf_event_header *header,
3549                         struct perf_sample_data *data,
3550                         struct perf_event *event)
3551 {
3552         u64 sample_type = data->type;
3553
3554         perf_output_put(handle, *header);
3555
3556         if (sample_type & PERF_SAMPLE_IP)
3557                 perf_output_put(handle, data->ip);
3558
3559         if (sample_type & PERF_SAMPLE_TID)
3560                 perf_output_put(handle, data->tid_entry);
3561
3562         if (sample_type & PERF_SAMPLE_TIME)
3563                 perf_output_put(handle, data->time);
3564
3565         if (sample_type & PERF_SAMPLE_ADDR)
3566                 perf_output_put(handle, data->addr);
3567
3568         if (sample_type & PERF_SAMPLE_ID)
3569                 perf_output_put(handle, data->id);
3570
3571         if (sample_type & PERF_SAMPLE_STREAM_ID)
3572                 perf_output_put(handle, data->stream_id);
3573
3574         if (sample_type & PERF_SAMPLE_CPU)
3575                 perf_output_put(handle, data->cpu_entry);
3576
3577         if (sample_type & PERF_SAMPLE_PERIOD)
3578                 perf_output_put(handle, data->period);
3579
3580         if (sample_type & PERF_SAMPLE_READ)
3581                 perf_output_read(handle, event);
3582
3583         if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3584                 if (data->callchain) {
3585                         int size = 1;
3586
3587                         if (data->callchain)
3588                                 size += data->callchain->nr;
3589
3590                         size *= sizeof(u64);
3591
3592                         perf_output_copy(handle, data->callchain, size);
3593                 } else {
3594                         u64 nr = 0;
3595                         perf_output_put(handle, nr);
3596                 }
3597         }
3598
3599         if (sample_type & PERF_SAMPLE_RAW) {
3600                 if (data->raw) {
3601                         perf_output_put(handle, data->raw->size);
3602                         perf_output_copy(handle, data->raw->data,
3603                                          data->raw->size);
3604                 } else {
3605                         struct {
3606                                 u32     size;
3607                                 u32     data;
3608                         } raw = {
3609                                 .size = sizeof(u32),
3610                                 .data = 0,
3611                         };
3612                         perf_output_put(handle, raw);
3613                 }
3614         }
3615 }
3616
3617 void perf_prepare_sample(struct perf_event_header *header,
3618                          struct perf_sample_data *data,
3619                          struct perf_event *event,
3620                          struct pt_regs *regs)
3621 {
3622         u64 sample_type = event->attr.sample_type;
3623
3624         data->type = sample_type;
3625
3626         header->type = PERF_RECORD_SAMPLE;
3627         header->size = sizeof(*header);
3628
3629         header->misc = 0;
3630         header->misc |= perf_misc_flags(regs);
3631
3632         if (sample_type & PERF_SAMPLE_IP) {
3633                 data->ip = perf_instruction_pointer(regs);
3634
3635                 header->size += sizeof(data->ip);
3636         }
3637
3638         if (sample_type & PERF_SAMPLE_TID) {
3639                 /* namespace issues */
3640                 data->tid_entry.pid = perf_event_pid(event, current);
3641                 data->tid_entry.tid = perf_event_tid(event, current);
3642
3643                 header->size += sizeof(data->tid_entry);
3644         }
3645
3646         if (sample_type & PERF_SAMPLE_TIME) {
3647                 data->time = perf_clock();
3648
3649                 header->size += sizeof(data->time);
3650         }
3651
3652         if (sample_type & PERF_SAMPLE_ADDR)
3653                 header->size += sizeof(data->addr);
3654
3655         if (sample_type & PERF_SAMPLE_ID) {
3656                 data->id = primary_event_id(event);
3657
3658                 header->size += sizeof(data->id);
3659         }
3660
3661         if (sample_type & PERF_SAMPLE_STREAM_ID) {
3662                 data->stream_id = event->id;
3663
3664                 header->size += sizeof(data->stream_id);
3665         }
3666
3667         if (sample_type & PERF_SAMPLE_CPU) {
3668                 data->cpu_entry.cpu             = raw_smp_processor_id();
3669                 data->cpu_entry.reserved        = 0;
3670
3671                 header->size += sizeof(data->cpu_entry);
3672         }
3673
3674         if (sample_type & PERF_SAMPLE_PERIOD)
3675                 header->size += sizeof(data->period);
3676
3677         if (sample_type & PERF_SAMPLE_READ)
3678                 header->size += perf_event_read_size(event);
3679
3680         if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3681                 int size = 1;
3682
3683                 data->callchain = perf_callchain(regs);
3684
3685                 if (data->callchain)
3686                         size += data->callchain->nr;
3687
3688                 header->size += size * sizeof(u64);
3689         }
3690
3691         if (sample_type & PERF_SAMPLE_RAW) {
3692                 int size = sizeof(u32);
3693
3694                 if (data->raw)
3695                         size += data->raw->size;
3696                 else
3697                         size += sizeof(u32);
3698
3699                 WARN_ON_ONCE(size & (sizeof(u64)-1));
3700                 header->size += size;
3701         }
3702 }
3703
3704 static void perf_event_output(struct perf_event *event, int nmi,
3705                                 struct perf_sample_data *data,
3706                                 struct pt_regs *regs)
3707 {
3708         struct perf_output_handle handle;
3709         struct perf_event_header header;
3710
3711         /* protect the callchain buffers */
3712         rcu_read_lock();
3713
3714         perf_prepare_sample(&header, data, event, regs);
3715
3716         if (perf_output_begin(&handle, event, header.size, nmi, 1))
3717                 goto exit;
3718
3719         perf_output_sample(&handle, &header, data, event);
3720
3721         perf_output_end(&handle);
3722
3723 exit:
3724         rcu_read_unlock();
3725 }
3726
3727 /*
3728  * read event_id
3729  */
3730
3731 struct perf_read_event {
3732         struct perf_event_header        header;
3733
3734         u32                             pid;
3735         u32                             tid;
3736 };
3737
3738 static void
3739 perf_event_read_event(struct perf_event *event,
3740                         struct task_struct *task)
3741 {
3742         struct perf_output_handle handle;
3743         struct perf_read_event read_event = {
3744                 .header = {
3745                         .type = PERF_RECORD_READ,
3746                         .misc = 0,
3747                         .size = sizeof(read_event) + perf_event_read_size(event),
3748                 },
3749                 .pid = perf_event_pid(event, task),
3750                 .tid = perf_event_tid(event, task),
3751         };
3752         int ret;
3753
3754         ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
3755         if (ret)
3756                 return;
3757
3758         perf_output_put(&handle, read_event);
3759         perf_output_read(&handle, event);
3760
3761         perf_output_end(&handle);
3762 }
3763
3764 /*
3765  * task tracking -- fork/exit
3766  *
3767  * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
3768  */
3769
3770 struct perf_task_event {
3771         struct task_struct              *task;
3772         struct perf_event_context       *task_ctx;
3773
3774         struct {
3775                 struct perf_event_header        header;
3776
3777                 u32                             pid;
3778                 u32                             ppid;
3779                 u32                             tid;
3780                 u32                             ptid;
3781                 u64                             time;
3782         } event_id;
3783 };
3784
3785 static void perf_event_task_output(struct perf_event *event,
3786                                      struct perf_task_event *task_event)
3787 {
3788         struct perf_output_handle handle;
3789         struct task_struct *task = task_event->task;
3790         int size, ret;
3791
3792         size  = task_event->event_id.header.size;
3793         ret = perf_output_begin(&handle, event, size, 0, 0);
3794
3795         if (ret)
3796                 return;
3797
3798         task_event->event_id.pid = perf_event_pid(event, task);
3799         task_event->event_id.ppid = perf_event_pid(event, current);
3800
3801         task_event->event_id.tid = perf_event_tid(event, task);
3802         task_event->event_id.ptid = perf_event_tid(event, current);
3803
3804         perf_output_put(&handle, task_event->event_id);
3805
3806         perf_output_end(&handle);
3807 }
3808
3809 static int perf_event_task_match(struct perf_event *event)
3810 {
3811         if (event->state < PERF_EVENT_STATE_INACTIVE)
3812                 return 0;
3813
3814         if (event->cpu != -1 && event->cpu != smp_processor_id())
3815                 return 0;
3816
3817         if (event->attr.comm || event->attr.mmap ||
3818             event->attr.mmap_data || event->attr.task)
3819                 return 1;
3820
3821         return 0;
3822 }
3823
3824 static void perf_event_task_ctx(struct perf_event_context *ctx,
3825                                   struct perf_task_event *task_event)
3826 {
3827         struct perf_event *event;
3828
3829         list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3830                 if (perf_event_task_match(event))
3831                         perf_event_task_output(event, task_event);
3832         }
3833 }
3834
3835 static void perf_event_task_event(struct perf_task_event *task_event)
3836 {
3837         struct perf_cpu_context *cpuctx;
3838         struct perf_event_context *ctx;
3839         struct pmu *pmu;
3840         int ctxn;
3841
3842         rcu_read_lock();
3843         list_for_each_entry_rcu(pmu, &pmus, entry) {
3844                 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
3845                 perf_event_task_ctx(&cpuctx->ctx, task_event);
3846
3847                 ctx = task_event->task_ctx;
3848                 if (!ctx) {
3849                         ctxn = pmu->task_ctx_nr;
3850                         if (ctxn < 0)
3851                                 goto next;
3852                         ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
3853                 }
3854                 if (ctx)
3855                         perf_event_task_ctx(ctx, task_event);
3856 next:
3857                 put_cpu_ptr(pmu->pmu_cpu_context);
3858         }
3859         rcu_read_unlock();
3860 }
3861
3862 static void perf_event_task(struct task_struct *task,
3863                               struct perf_event_context *task_ctx,
3864                               int new)
3865 {
3866         struct perf_task_event task_event;
3867
3868         if (!atomic_read(&nr_comm_events) &&
3869             !atomic_read(&nr_mmap_events) &&
3870             !atomic_read(&nr_task_events))
3871                 return;
3872
3873         task_event = (struct perf_task_event){
3874                 .task     = task,
3875                 .task_ctx = task_ctx,
3876                 .event_id    = {
3877                         .header = {
3878                                 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
3879                                 .misc = 0,
3880                                 .size = sizeof(task_event.event_id),
3881                         },
3882                         /* .pid  */
3883                         /* .ppid */
3884                         /* .tid  */
3885                         /* .ptid */
3886                         .time = perf_clock(),
3887                 },
3888         };
3889
3890         perf_event_task_event(&task_event);
3891 }
3892
3893 void perf_event_fork(struct task_struct *task)
3894 {
3895         perf_event_task(task, NULL, 1);
3896 }
3897
3898 /*
3899  * comm tracking
3900  */
3901
3902 struct perf_comm_event {
3903         struct task_struct      *task;
3904         char                    *comm;
3905         int                     comm_size;
3906
3907         struct {
3908                 struct perf_event_header        header;
3909
3910                 u32                             pid;
3911                 u32                             tid;
3912         } event_id;
3913 };
3914
3915 static void perf_event_comm_output(struct perf_event *event,
3916                                      struct perf_comm_event *comm_event)
3917 {
3918         struct perf_output_handle handle;
3919         int size = comm_event->event_id.header.size;
3920         int ret = perf_output_begin(&handle, event, size, 0, 0);
3921
3922         if (ret)
3923                 return;
3924
3925         comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
3926         comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
3927
3928         perf_output_put(&handle, comm_event->event_id);
3929         perf_output_copy(&handle, comm_event->comm,
3930                                    comm_event->comm_size);
3931         perf_output_end(&handle);
3932 }
3933
3934 static int perf_event_comm_match(struct perf_event *event)
3935 {
3936         if (event->state < PERF_EVENT_STATE_INACTIVE)
3937                 return 0;
3938
3939         if (event->cpu != -1 && event->cpu != smp_processor_id())
3940                 return 0;
3941
3942         if (event->attr.comm)
3943                 return 1;
3944
3945         return 0;
3946 }
3947
3948 static void perf_event_comm_ctx(struct perf_event_context *ctx,
3949                                   struct perf_comm_event *comm_event)
3950 {
3951         struct perf_event *event;
3952
3953         list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
3954                 if (perf_event_comm_match(event))
3955                         perf_event_comm_output(event, comm_event);
3956         }
3957 }
3958
3959 static void perf_event_comm_event(struct perf_comm_event *comm_event)
3960 {
3961         struct perf_cpu_context *cpuctx;
3962         struct perf_event_context *ctx;
3963         char comm[TASK_COMM_LEN];
3964         unsigned int size;
3965         struct pmu *pmu;
3966         int ctxn;
3967
3968         memset(comm, 0, sizeof(comm));
3969         strlcpy(comm, comm_event->task->comm, sizeof(comm));
3970         size = ALIGN(strlen(comm)+1, sizeof(u64));
3971
3972         comm_event->comm = comm;
3973         comm_event->comm_size = size;
3974
3975         comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
3976
3977         rcu_read_lock();
3978         list_for_each_entry_rcu(pmu, &pmus, entry) {
3979                 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
3980                 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
3981
3982                 ctxn = pmu->task_ctx_nr;
3983                 if (ctxn < 0)
3984                         goto next;
3985
3986                 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
3987                 if (ctx)
3988                         perf_event_comm_ctx(ctx, comm_event);
3989 next:
3990                 put_cpu_ptr(pmu->pmu_cpu_context);
3991         }
3992         rcu_read_unlock();
3993 }
3994
3995 void perf_event_comm(struct task_struct *task)
3996 {
3997         struct perf_comm_event comm_event;
3998         struct perf_event_context *ctx;
3999         int ctxn;
4000
4001         for_each_task_context_nr(ctxn) {
4002                 ctx = task->perf_event_ctxp[ctxn];
4003                 if (!ctx)
4004                         continue;
4005
4006                 perf_event_enable_on_exec(ctx);
4007         }
4008
4009         if (!atomic_read(&nr_comm_events))
4010                 return;
4011
4012         comm_event = (struct perf_comm_event){
4013                 .task   = task,
4014                 /* .comm      */
4015                 /* .comm_size */
4016                 .event_id  = {
4017                         .header = {
4018                                 .type = PERF_RECORD_COMM,
4019                                 .misc = 0,
4020                                 /* .size */
4021                         },
4022                         /* .pid */
4023                         /* .tid */
4024                 },
4025         };
4026
4027         perf_event_comm_event(&comm_event);
4028 }
4029
4030 /*
4031  * mmap tracking
4032  */
4033
4034 struct perf_mmap_event {
4035         struct vm_area_struct   *vma;
4036
4037         const char              *file_name;
4038         int                     file_size;
4039
4040         struct {
4041                 struct perf_event_header        header;
4042
4043                 u32                             pid;
4044                 u32                             tid;
4045                 u64                             start;
4046                 u64                             len;
4047                 u64                             pgoff;
4048         } event_id;
4049 };
4050
4051 static void perf_event_mmap_output(struct perf_event *event,
4052                                      struct perf_mmap_event *mmap_event)
4053 {
4054         struct perf_output_handle handle;
4055         int size = mmap_event->event_id.header.size;
4056         int ret = perf_output_begin(&handle, event, size, 0, 0);
4057
4058         if (ret)
4059                 return;
4060
4061         mmap_event->event_id.pid = perf_event_pid(event, current);
4062         mmap_event->event_id.tid = perf_event_tid(event, current);
4063
4064         perf_output_put(&handle, mmap_event->event_id);
4065         perf_output_copy(&handle, mmap_event->file_name,
4066                                    mmap_event->file_size);
4067         perf_output_end(&handle);
4068 }
4069
4070 static int perf_event_mmap_match(struct perf_event *event,
4071                                    struct perf_mmap_event *mmap_event,
4072                                    int executable)
4073 {
4074         if (event->state < PERF_EVENT_STATE_INACTIVE)
4075                 return 0;
4076
4077         if (event->cpu != -1 && event->cpu != smp_processor_id())
4078                 return 0;
4079
4080         if ((!executable && event->attr.mmap_data) ||
4081             (executable && event->attr.mmap))
4082                 return 1;
4083
4084         return 0;
4085 }
4086
4087 static void perf_event_mmap_ctx(struct perf_event_context *ctx,
4088                                   struct perf_mmap_event *mmap_event,
4089                                   int executable)
4090 {
4091         struct perf_event *event;
4092
4093         list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4094                 if (perf_event_mmap_match(event, mmap_event, executable))
4095                         perf_event_mmap_output(event, mmap_event);
4096         }
4097 }
4098
4099 static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4100 {
4101         struct perf_cpu_context *cpuctx;
4102         struct perf_event_context *ctx;
4103         struct vm_area_struct *vma = mmap_event->vma;
4104         struct file *file = vma->vm_file;
4105         unsigned int size;
4106         char tmp[16];
4107         char *buf = NULL;
4108         const char *name;
4109         struct pmu *pmu;
4110         int ctxn;
4111
4112         memset(tmp, 0, sizeof(tmp));
4113
4114         if (file) {
4115                 /*
4116                  * d_path works from the end of the buffer backwards, so we
4117                  * need to add enough zero bytes after the string to handle
4118                  * the 64bit alignment we do later.
4119                  */
4120                 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
4121                 if (!buf) {
4122                         name = strncpy(tmp, "//enomem", sizeof(tmp));
4123                         goto got_name;
4124                 }
4125                 name = d_path(&file->f_path, buf, PATH_MAX);
4126                 if (IS_ERR(name)) {
4127                         name = strncpy(tmp, "//toolong", sizeof(tmp));
4128                         goto got_name;
4129                 }
4130         } else {
4131                 if (arch_vma_name(mmap_event->vma)) {
4132                         name = strncpy(tmp, arch_vma_name(mmap_event->vma),
4133                                        sizeof(tmp));
4134                         goto got_name;
4135                 }
4136
4137                 if (!vma->vm_mm) {
4138                         name = strncpy(tmp, "[vdso]", sizeof(tmp));
4139                         goto got_name;
4140                 } else if (vma->vm_start <= vma->vm_mm->start_brk &&
4141                                 vma->vm_end >= vma->vm_mm->brk) {
4142                         name = strncpy(tmp, "[heap]", sizeof(tmp));
4143                         goto got_name;
4144                 } else if (vma->vm_start <= vma->vm_mm->start_stack &&
4145                                 vma->vm_end >= vma->vm_mm->start_stack) {
4146                         name = strncpy(tmp, "[stack]", sizeof(tmp));
4147                         goto got_name;
4148                 }
4149
4150                 name = strncpy(tmp, "//anon", sizeof(tmp));
4151                 goto got_name;
4152         }
4153
4154 got_name:
4155         size = ALIGN(strlen(name)+1, sizeof(u64));
4156
4157         mmap_event->file_name = name;
4158         mmap_event->file_size = size;
4159
4160         mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4161
4162         rcu_read_lock();
4163         list_for_each_entry_rcu(pmu, &pmus, entry) {
4164                 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4165                 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4166                                         vma->vm_flags & VM_EXEC);
4167
4168                 ctxn = pmu->task_ctx_nr;
4169                 if (ctxn < 0)
4170                         goto next;
4171
4172                 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4173                 if (ctx) {
4174                         perf_event_mmap_ctx(ctx, mmap_event,
4175                                         vma->vm_flags & VM_EXEC);
4176                 }
4177 next:
4178                 put_cpu_ptr(pmu->pmu_cpu_context);
4179         }
4180         rcu_read_unlock();
4181
4182         kfree(buf);
4183 }
4184
4185 void perf_event_mmap(struct vm_area_struct *vma)
4186 {
4187         struct perf_mmap_event mmap_event;
4188
4189         if (!atomic_read(&nr_mmap_events))
4190                 return;
4191
4192         mmap_event = (struct perf_mmap_event){
4193                 .vma    = vma,
4194                 /* .file_name */
4195                 /* .file_size */
4196                 .event_id  = {
4197                         .header = {
4198                                 .type = PERF_RECORD_MMAP,
4199                                 .misc = PERF_RECORD_MISC_USER,
4200                                 /* .size */
4201                         },
4202                         /* .pid */
4203                         /* .tid */
4204                         .start  = vma->vm_start,
4205                         .len    = vma->vm_end - vma->vm_start,
4206                         .pgoff  = (u64)vma->vm_pgoff << PAGE_SHIFT,
4207                 },
4208         };
4209
4210         perf_event_mmap_event(&mmap_event);
4211 }
4212
4213 /*
4214  * IRQ throttle logging
4215  */
4216
4217 static void perf_log_throttle(struct perf_event *event, int enable)
4218 {
4219         struct perf_output_handle handle;
4220         int ret;
4221
4222         struct {
4223                 struct perf_event_header        header;
4224                 u64                             time;
4225                 u64                             id;
4226                 u64                             stream_id;
4227         } throttle_event = {
4228                 .header = {
4229                         .type = PERF_RECORD_THROTTLE,
4230                         .misc = 0,
4231                         .size = sizeof(throttle_event),
4232                 },
4233                 .time           = perf_clock(),
4234                 .id             = primary_event_id(event),
4235                 .stream_id      = event->id,
4236         };
4237
4238         if (enable)
4239                 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
4240
4241         ret = perf_output_begin(&handle, event, sizeof(throttle_event), 1, 0);
4242         if (ret)
4243                 return;
4244
4245         perf_output_put(&handle, throttle_event);
4246         perf_output_end(&handle);
4247 }
4248
4249 /*
4250  * Generic event overflow handling, sampling.
4251  */
4252
4253 static int __perf_event_overflow(struct perf_event *event, int nmi,
4254                                    int throttle, struct perf_sample_data *data,
4255                                    struct pt_regs *regs)
4256 {
4257         int events = atomic_read(&event->event_limit);
4258         struct hw_perf_event *hwc = &event->hw;
4259         int ret = 0;
4260
4261         if (!throttle) {
4262                 hwc->interrupts++;
4263         } else {
4264                 if (hwc->interrupts != MAX_INTERRUPTS) {
4265                         hwc->interrupts++;
4266                         if (HZ * hwc->interrupts >
4267                                         (u64)sysctl_perf_event_sample_rate) {
4268                                 hwc->interrupts = MAX_INTERRUPTS;
4269                                 perf_log_throttle(event, 0);
4270                                 ret = 1;
4271                         }
4272                 } else {
4273                         /*
4274                          * Keep re-disabling events even though on the previous
4275                          * pass we disabled it - just in case we raced with a
4276                          * sched-in and the event got enabled again:
4277                          */
4278                         ret = 1;
4279                 }
4280         }
4281
4282         if (event->attr.freq) {
4283                 u64 now = perf_clock();
4284                 s64 delta = now - hwc->freq_time_stamp;
4285
4286                 hwc->freq_time_stamp = now;
4287
4288                 if (delta > 0 && delta < 2*TICK_NSEC)
4289                         perf_adjust_period(event, delta, hwc->last_period);
4290         }
4291
4292         /*
4293          * XXX event_limit might not quite work as expected on inherited
4294          * events
4295          */
4296
4297         event->pending_kill = POLL_IN;
4298         if (events && atomic_dec_and_test(&event->event_limit)) {
4299                 ret = 1;
4300                 event->pending_kill = POLL_HUP;
4301                 if (nmi) {
4302                         event->pending_disable = 1;
4303                         perf_pending_queue(&event->pending,
4304                                            perf_pending_event);
4305                 } else
4306                         perf_event_disable(event);
4307         }
4308
4309         if (event->overflow_handler)
4310                 event->overflow_handler(event, nmi, data, regs);
4311         else
4312                 perf_event_output(event, nmi, data, regs);
4313
4314         return ret;
4315 }
4316
4317 int perf_event_overflow(struct perf_event *event, int nmi,
4318                           struct perf_sample_data *data,
4319                           struct pt_regs *regs)
4320 {
4321         return __perf_event_overflow(event, nmi, 1, data, regs);
4322 }
4323
4324 /*
4325  * Generic software event infrastructure
4326  */
4327
4328 struct swevent_htable {
4329         struct swevent_hlist            *swevent_hlist;
4330         struct mutex                    hlist_mutex;
4331         int                             hlist_refcount;
4332
4333         /* Recursion avoidance in each contexts */
4334         int                             recursion[PERF_NR_CONTEXTS];
4335 };
4336
4337 static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
4338
4339 /*
4340  * We directly increment event->count and keep a second value in
4341  * event->hw.period_left to count intervals. This period event
4342  * is kept in the range [-sample_period, 0] so that we can use the
4343  * sign as trigger.
4344  */
4345
4346 static u64 perf_swevent_set_period(struct perf_event *event)
4347 {
4348         struct hw_perf_event *hwc = &event->hw;
4349         u64 period = hwc->last_period;
4350         u64 nr, offset;
4351         s64 old, val;
4352
4353         hwc->last_period = hwc->sample_period;
4354
4355 again:
4356         old = val = local64_read(&hwc->period_left);
4357         if (val < 0)
4358                 return 0;
4359
4360         nr = div64_u64(period + val, period);
4361         offset = nr * period;
4362         val -= offset;
4363         if (local64_cmpxchg(&hwc->period_left, old, val) != old)
4364                 goto again;
4365
4366         return nr;
4367 }
4368
4369 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
4370                                     int nmi, struct perf_sample_data *data,
4371                                     struct pt_regs *regs)
4372 {
4373         struct hw_perf_event *hwc = &event->hw;
4374         int throttle = 0;
4375
4376         data->period = event->hw.last_period;
4377         if (!overflow)
4378                 overflow = perf_swevent_set_period(event);
4379
4380         if (hwc->interrupts == MAX_INTERRUPTS)
4381                 return;
4382
4383         for (; overflow; overflow--) {
4384                 if (__perf_event_overflow(event, nmi, throttle,
4385                                             data, regs)) {
4386                         /*
4387                          * We inhibit the overflow from happening when
4388                          * hwc->interrupts == MAX_INTERRUPTS.
4389                          */
4390                         break;
4391                 }
4392                 throttle = 1;
4393         }
4394 }
4395
4396 static void perf_swevent_event(struct perf_event *event, u64 nr,
4397                                int nmi, struct perf_sample_data *data,
4398                                struct pt_regs *regs)
4399 {
4400         struct hw_perf_event *hwc = &event->hw;
4401
4402         local64_add(nr, &event->count);
4403
4404         if (!regs)
4405                 return;
4406
4407         if (!hwc->sample_period)
4408                 return;
4409
4410         if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
4411                 return perf_swevent_overflow(event, 1, nmi, data, regs);
4412
4413         if (local64_add_negative(nr, &hwc->period_left))
4414                 return;
4415
4416         perf_swevent_overflow(event, 0, nmi, data, regs);
4417 }
4418
4419 static int perf_exclude_event(struct perf_event *event,
4420                               struct pt_regs *regs)
4421 {
4422         if (event->hw.state & PERF_HES_STOPPED)
4423                 return 0;
4424
4425         if (regs) {
4426                 if (event->attr.exclude_user && user_mode(regs))
4427                         return 1;
4428
4429                 if (event->attr.exclude_kernel && !user_mode(regs))
4430                         return 1;
4431         }
4432
4433         return 0;
4434 }
4435
4436 static int perf_swevent_match(struct perf_event *event,
4437                                 enum perf_type_id type,
4438                                 u32 event_id,
4439                                 struct perf_sample_data *data,
4440                                 struct pt_regs *regs)
4441 {
4442         if (event->attr.type != type)
4443                 return 0;
4444
4445         if (event->attr.config != event_id)
4446                 return 0;
4447
4448         if (perf_exclude_event(event, regs))
4449                 return 0;
4450
4451         return 1;
4452 }
4453
4454 static inline u64 swevent_hash(u64 type, u32 event_id)
4455 {
4456         u64 val = event_id | (type << 32);
4457
4458         return hash_64(val, SWEVENT_HLIST_BITS);
4459 }
4460
4461 static inline struct hlist_head *
4462 __find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
4463 {
4464         u64 hash = swevent_hash(type, event_id);
4465
4466         return &hlist->heads[hash];
4467 }
4468
4469 /* For the read side: events when they trigger */
4470 static inline struct hlist_head *
4471 find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
4472 {
4473         struct swevent_hlist *hlist;
4474
4475         hlist = rcu_dereference(swhash->swevent_hlist);
4476         if (!hlist)
4477                 return NULL;
4478
4479         return __find_swevent_head(hlist, type, event_id);
4480 }
4481
4482 /* For the event head insertion and removal in the hlist */
4483 static inline struct hlist_head *
4484 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
4485 {
4486         struct swevent_hlist *hlist;
4487         u32 event_id = event->attr.config;
4488         u64 type = event->attr.type;
4489
4490         /*
4491          * Event scheduling is always serialized against hlist allocation
4492          * and release. Which makes the protected version suitable here.
4493          * The context lock guarantees that.
4494          */
4495         hlist = rcu_dereference_protected(swhash->swevent_hlist,
4496                                           lockdep_is_held(&event->ctx->lock));
4497         if (!hlist)
4498                 return NULL;
4499
4500         return __find_swevent_head(hlist, type, event_id);
4501 }
4502
4503 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
4504                                     u64 nr, int nmi,
4505                                     struct perf_sample_data *data,
4506                                     struct pt_regs *regs)
4507 {
4508         struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4509         struct perf_event *event;
4510         struct hlist_node *node;
4511         struct hlist_head *head;
4512
4513         rcu_read_lock();
4514         head = find_swevent_head_rcu(swhash, type, event_id);
4515         if (!head)
4516                 goto end;
4517
4518         hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4519                 if (perf_swevent_match(event, type, event_id, data, regs))
4520                         perf_swevent_event(event, nr, nmi, data, regs);
4521         }
4522 end:
4523         rcu_read_unlock();
4524 }
4525
4526 int perf_swevent_get_recursion_context(void)
4527 {
4528         struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4529
4530         return get_recursion_context(swhash->recursion);
4531 }
4532 EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
4533
4534 void inline perf_swevent_put_recursion_context(int rctx)
4535 {
4536         struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4537
4538         put_recursion_context(swhash->recursion, rctx);
4539 }
4540
4541 void __perf_sw_event(u32 event_id, u64 nr, int nmi,
4542                             struct pt_regs *regs, u64 addr)
4543 {
4544         struct perf_sample_data data;
4545         int rctx;
4546
4547         preempt_disable_notrace();
4548         rctx = perf_swevent_get_recursion_context();
4549         if (rctx < 0)
4550                 return;
4551
4552         perf_sample_data_init(&data, addr);
4553
4554         do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
4555
4556         perf_swevent_put_recursion_context(rctx);
4557         preempt_enable_notrace();
4558 }
4559
4560 static void perf_swevent_read(struct perf_event *event)
4561 {
4562 }
4563
4564 static int perf_swevent_add(struct perf_event *event, int flags)
4565 {
4566         struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
4567         struct hw_perf_event *hwc = &event->hw;
4568         struct hlist_head *head;
4569
4570         if (hwc->sample_period) {
4571                 hwc->last_period = hwc->sample_period;
4572                 perf_swevent_set_period(event);
4573         }
4574
4575         hwc->state = !(flags & PERF_EF_START);
4576
4577         head = find_swevent_head(swhash, event);
4578         if (WARN_ON_ONCE(!head))
4579                 return -EINVAL;
4580
4581         hlist_add_head_rcu(&event->hlist_entry, head);
4582
4583         return 0;
4584 }
4585
4586 static void perf_swevent_del(struct perf_event *event, int flags)
4587 {
4588         hlist_del_rcu(&event->hlist_entry);
4589 }
4590
4591 static void perf_swevent_start(struct perf_event *event, int flags)
4592 {
4593         event->hw.state = 0;
4594 }
4595
4596 static void perf_swevent_stop(struct perf_event *event, int flags)
4597 {
4598         event->hw.state = PERF_HES_STOPPED;
4599 }
4600
4601 /* Deref the hlist from the update side */
4602 static inline struct swevent_hlist *
4603 swevent_hlist_deref(struct swevent_htable *swhash)
4604 {
4605         return rcu_dereference_protected(swhash->swevent_hlist,
4606                                          lockdep_is_held(&swhash->hlist_mutex));
4607 }
4608
4609 static void swevent_hlist_release_rcu(struct rcu_head *rcu_head)
4610 {
4611         struct swevent_hlist *hlist;
4612
4613         hlist = container_of(rcu_head, struct swevent_hlist, rcu_head);
4614         kfree(hlist);
4615 }
4616
4617 static void swevent_hlist_release(struct swevent_htable *swhash)
4618 {
4619         struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
4620
4621         if (!hlist)
4622                 return;
4623
4624         rcu_assign_pointer(swhash->swevent_hlist, NULL);
4625         call_rcu(&hlist->rcu_head, swevent_hlist_release_rcu);
4626 }
4627
4628 static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
4629 {
4630         struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
4631
4632         mutex_lock(&swhash->hlist_mutex);
4633
4634         if (!--swhash->hlist_refcount)
4635                 swevent_hlist_release(swhash);
4636
4637         mutex_unlock(&swhash->hlist_mutex);
4638 }
4639
4640 static void swevent_hlist_put(struct perf_event *event)
4641 {
4642         int cpu;
4643
4644         if (event->cpu != -1) {
4645                 swevent_hlist_put_cpu(event, event->cpu);
4646                 return;
4647         }
4648
4649         for_each_possible_cpu(cpu)
4650                 swevent_hlist_put_cpu(event, cpu);
4651 }
4652
4653 static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
4654 {
4655         struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
4656         int err = 0;
4657
4658         mutex_lock(&swhash->hlist_mutex);
4659
4660         if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
4661                 struct swevent_hlist *hlist;
4662
4663                 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
4664                 if (!hlist) {
4665                         err = -ENOMEM;
4666                         goto exit;
4667                 }
4668                 rcu_assign_pointer(swhash->swevent_hlist, hlist);
4669         }
4670         swhash->hlist_refcount++;
4671 exit:
4672         mutex_unlock(&swhash->hlist_mutex);
4673
4674         return err;
4675 }
4676
4677 static int swevent_hlist_get(struct perf_event *event)
4678 {
4679         int err;
4680         int cpu, failed_cpu;
4681
4682         if (event->cpu != -1)
4683                 return swevent_hlist_get_cpu(event, event->cpu);
4684
4685         get_online_cpus();
4686         for_each_possible_cpu(cpu) {
4687                 err = swevent_hlist_get_cpu(event, cpu);
4688                 if (err) {
4689                         failed_cpu = cpu;
4690                         goto fail;
4691                 }
4692         }
4693         put_online_cpus();
4694
4695         return 0;
4696 fail:
4697         for_each_possible_cpu(cpu) {
4698                 if (cpu == failed_cpu)
4699                         break;
4700                 swevent_hlist_put_cpu(event, cpu);
4701         }
4702
4703         put_online_cpus();
4704         return err;
4705 }
4706
4707 atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
4708
4709 static void sw_perf_event_destroy(struct perf_event *event)
4710 {
4711         u64 event_id = event->attr.config;
4712
4713         WARN_ON(event->parent);
4714
4715         atomic_dec(&perf_swevent_enabled[event_id]);
4716         swevent_hlist_put(event);
4717 }
4718
4719 static int perf_swevent_init(struct perf_event *event)
4720 {
4721         int event_id = event->attr.config;
4722
4723         if (event->attr.type != PERF_TYPE_SOFTWARE)
4724                 return -ENOENT;
4725
4726         switch (event_id) {
4727         case PERF_COUNT_SW_CPU_CLOCK:
4728         case PERF_COUNT_SW_TASK_CLOCK:
4729                 return -ENOENT;
4730
4731         default:
4732                 break;
4733         }
4734
4735         if (event_id > PERF_COUNT_SW_MAX)
4736                 return -ENOENT;
4737
4738         if (!event->parent) {
4739                 int err;
4740
4741                 err = swevent_hlist_get(event);
4742                 if (err)
4743                         return err;
4744
4745                 atomic_inc(&perf_swevent_enabled[event_id]);
4746                 event->destroy = sw_perf_event_destroy;
4747         }
4748
4749         return 0;
4750 }
4751
4752 static struct pmu perf_swevent = {
4753         .task_ctx_nr    = perf_sw_context,
4754
4755         .event_init     = perf_swevent_init,
4756         .add            = perf_swevent_add,
4757         .del            = perf_swevent_del,
4758         .start          = perf_swevent_start,
4759         .stop           = perf_swevent_stop,
4760         .read           = perf_swevent_read,
4761 };
4762
4763 #ifdef CONFIG_EVENT_TRACING
4764
4765 static int perf_tp_filter_match(struct perf_event *event,
4766                                 struct perf_sample_data *data)
4767 {
4768         void *record = data->raw->data;
4769
4770         if (likely(!event->filter) || filter_match_preds(event->filter, record))
4771                 return 1;
4772         return 0;
4773 }
4774
4775 static int perf_tp_event_match(struct perf_event *event,
4776                                 struct perf_sample_data *data,
4777                                 struct pt_regs *regs)
4778 {
4779         /*
4780          * All tracepoints are from kernel-space.
4781          */
4782         if (event->attr.exclude_kernel)
4783                 return 0;
4784
4785         if (!perf_tp_filter_match(event, data))
4786                 return 0;
4787
4788         return 1;
4789 }
4790
4791 void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
4792                    struct pt_regs *regs, struct hlist_head *head, int rctx)
4793 {
4794         struct perf_sample_data data;
4795         struct perf_event *event;
4796         struct hlist_node *node;
4797
4798         struct perf_raw_record raw = {
4799                 .size = entry_size,
4800                 .data = record,
4801         };
4802
4803         perf_sample_data_init(&data, addr);
4804         data.raw = &raw;
4805
4806         hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
4807                 if (perf_tp_event_match(event, &data, regs))
4808                         perf_swevent_event(event, count, 1, &data, regs);
4809         }
4810
4811         perf_swevent_put_recursion_context(rctx);
4812 }
4813 EXPORT_SYMBOL_GPL(perf_tp_event);
4814
4815 static void tp_perf_event_destroy(struct perf_event *event)
4816 {
4817         perf_trace_destroy(event);
4818 }
4819
4820 static int perf_tp_event_init(struct perf_event *event)
4821 {
4822         int err;
4823
4824         if (event->attr.type != PERF_TYPE_TRACEPOINT)
4825                 return -ENOENT;
4826
4827         /*
4828          * Raw tracepoint data is a severe data leak, only allow root to
4829          * have these.
4830          */
4831         if ((event->attr.sample_type & PERF_SAMPLE_RAW) &&
4832                         perf_paranoid_tracepoint_raw() &&
4833                         !capable(CAP_SYS_ADMIN))
4834                 return -EPERM;
4835
4836         err = perf_trace_init(event);
4837         if (err)
4838                 return err;
4839
4840         event->destroy = tp_perf_event_destroy;
4841
4842         return 0;
4843 }
4844
4845 static struct pmu perf_tracepoint = {
4846         .task_ctx_nr    = perf_sw_context,
4847
4848         .event_init     = perf_tp_event_init,
4849         .add            = perf_trace_add,
4850         .del            = perf_trace_del,
4851         .start          = perf_swevent_start,
4852         .stop           = perf_swevent_stop,
4853         .read           = perf_swevent_read,
4854 };
4855
4856 static inline void perf_tp_register(void)
4857 {
4858         perf_pmu_register(&perf_tracepoint);
4859 }
4860
4861 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4862 {
4863         char *filter_str;
4864         int ret;
4865
4866         if (event->attr.type != PERF_TYPE_TRACEPOINT)
4867                 return -EINVAL;
4868
4869         filter_str = strndup_user(arg, PAGE_SIZE);
4870         if (IS_ERR(filter_str))
4871                 return PTR_ERR(filter_str);
4872
4873         ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
4874
4875         kfree(filter_str);
4876         return ret;
4877 }
4878
4879 static void perf_event_free_filter(struct perf_event *event)
4880 {
4881         ftrace_profile_free_filter(event);
4882 }
4883
4884 #else
4885
4886 static inline void perf_tp_register(void)
4887 {
4888 }
4889
4890 static int perf_event_set_filter(struct perf_event *event, void __user *arg)
4891 {
4892         return -ENOENT;
4893 }
4894
4895 static void perf_event_free_filter(struct perf_event *event)
4896 {
4897 }
4898
4899 #endif /* CONFIG_EVENT_TRACING */
4900
4901 #ifdef CONFIG_HAVE_HW_BREAKPOINT
4902 void perf_bp_event(struct perf_event *bp, void *data)
4903 {
4904         struct perf_sample_data sample;
4905         struct pt_regs *regs = data;
4906
4907         perf_sample_data_init(&sample, bp->attr.bp_addr);
4908
4909         if (!bp->hw.state && !perf_exclude_event(bp, regs))
4910                 perf_swevent_event(bp, 1, 1, &sample, regs);
4911 }
4912 #endif
4913
4914 /*
4915  * hrtimer based swevent callback
4916  */
4917
4918 static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
4919 {
4920         enum hrtimer_restart ret = HRTIMER_RESTART;
4921         struct perf_sample_data data;
4922         struct pt_regs *regs;
4923         struct perf_event *event;
4924         u64 period;
4925
4926         event = container_of(hrtimer, struct perf_event, hw.hrtimer);
4927         event->pmu->read(event);
4928
4929         perf_sample_data_init(&data, 0);
4930         data.period = event->hw.last_period;
4931         regs = get_irq_regs();
4932
4933         if (regs && !perf_exclude_event(event, regs)) {
4934                 if (!(event->attr.exclude_idle && current->pid == 0))
4935                         if (perf_event_overflow(event, 0, &data, regs))
4936                                 ret = HRTIMER_NORESTART;
4937         }
4938
4939         period = max_t(u64, 10000, event->hw.sample_period);
4940         hrtimer_forward_now(hrtimer, ns_to_ktime(period));
4941
4942         return ret;
4943 }
4944
4945 static void perf_swevent_start_hrtimer(struct perf_event *event)
4946 {
4947         struct hw_perf_event *hwc = &event->hw;
4948
4949         hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
4950         hwc->hrtimer.function = perf_swevent_hrtimer;
4951         if (hwc->sample_period) {
4952                 s64 period = local64_read(&hwc->period_left);
4953
4954                 if (period) {
4955                         if (period < 0)
4956                                 period = 10000;
4957
4958                         local64_set(&hwc->period_left, 0);
4959                 } else {
4960                         period = max_t(u64, 10000, hwc->sample_period);
4961                 }
4962                 __hrtimer_start_range_ns(&hwc->hrtimer,
4963                                 ns_to_ktime(period), 0,
4964                                 HRTIMER_MODE_REL_PINNED, 0);
4965         }
4966 }
4967
4968 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
4969 {
4970         struct hw_perf_event *hwc = &event->hw;
4971
4972         if (hwc->sample_period) {
4973                 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
4974                 local64_set(&hwc->period_left, ktime_to_ns(remaining));
4975
4976                 hrtimer_cancel(&hwc->hrtimer);
4977         }
4978 }
4979
4980 /*
4981  * Software event: cpu wall time clock
4982  */
4983
4984 static void cpu_clock_event_update(struct perf_event *event)
4985 {
4986         s64 prev;
4987         u64 now;
4988
4989         now = local_clock();
4990         prev = local64_xchg(&event->hw.prev_count, now);
4991         local64_add(now - prev, &event->count);
4992 }
4993
4994 static void cpu_clock_event_start(struct perf_event *event, int flags)
4995 {
4996         local64_set(&event->hw.prev_count, local_clock());
4997         perf_swevent_start_hrtimer(event);
4998 }
4999
5000 static void cpu_clock_event_stop(struct perf_event *event, int flags)
5001 {
5002         perf_swevent_cancel_hrtimer(event);
5003         cpu_clock_event_update(event);
5004 }
5005
5006 static int cpu_clock_event_add(struct perf_event *event, int flags)
5007 {
5008         if (flags & PERF_EF_START)
5009                 cpu_clock_event_start(event, flags);
5010
5011         return 0;
5012 }
5013
5014 static void cpu_clock_event_del(struct perf_event *event, int flags)
5015 {
5016         cpu_clock_event_stop(event, flags);
5017 }
5018
5019 static void cpu_clock_event_read(struct perf_event *event)
5020 {
5021         cpu_clock_event_update(event);
5022 }
5023
5024 static int cpu_clock_event_init(struct perf_event *event)
5025 {
5026         if (event->attr.type != PERF_TYPE_SOFTWARE)
5027                 return -ENOENT;
5028
5029         if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
5030                 return -ENOENT;
5031
5032         return 0;
5033 }
5034
5035 static struct pmu perf_cpu_clock = {
5036         .task_ctx_nr    = perf_sw_context,
5037
5038         .event_init     = cpu_clock_event_init,
5039         .add            = cpu_clock_event_add,
5040         .del            = cpu_clock_event_del,
5041         .start          = cpu_clock_event_start,
5042         .stop           = cpu_clock_event_stop,
5043         .read           = cpu_clock_event_read,
5044 };
5045
5046 /*
5047  * Software event: task time clock
5048  */
5049
5050 static void task_clock_event_update(struct perf_event *event, u64 now)
5051 {
5052         u64 prev;
5053         s64 delta;
5054
5055         prev = local64_xchg(&event->hw.prev_count, now);
5056         delta = now - prev;
5057         local64_add(delta, &event->count);
5058 }
5059
5060 static void task_clock_event_start(struct perf_event *event, int flags)
5061 {
5062         local64_set(&event->hw.prev_count, event->ctx->time);
5063         perf_swevent_start_hrtimer(event);
5064 }
5065
5066 static void task_clock_event_stop(struct perf_event *event, int flags)
5067 {
5068         perf_swevent_cancel_hrtimer(event);
5069         task_clock_event_update(event, event->ctx->time);
5070 }
5071
5072 static int task_clock_event_add(struct perf_event *event, int flags)
5073 {
5074         if (flags & PERF_EF_START)
5075                 task_clock_event_start(event, flags);
5076
5077         return 0;
5078 }
5079
5080 static void task_clock_event_del(struct perf_event *event, int flags)
5081 {
5082         task_clock_event_stop(event, PERF_EF_UPDATE);
5083 }
5084
5085 static void task_clock_event_read(struct perf_event *event)
5086 {
5087         u64 time;
5088
5089         if (!in_nmi()) {
5090                 update_context_time(event->ctx);
5091                 time = event->ctx->time;
5092         } else {
5093                 u64 now = perf_clock();
5094                 u64 delta = now - event->ctx->timestamp;
5095                 time = event->ctx->time + delta;
5096         }
5097
5098         task_clock_event_update(event, time);
5099 }
5100
5101 static int task_clock_event_init(struct perf_event *event)
5102 {
5103         if (event->attr.type != PERF_TYPE_SOFTWARE)
5104                 return -ENOENT;
5105
5106         if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
5107                 return -ENOENT;
5108
5109         return 0;
5110 }
5111
5112 static struct pmu perf_task_clock = {
5113         .task_ctx_nr    = perf_sw_context,
5114
5115         .event_init     = task_clock_event_init,
5116         .add            = task_clock_event_add,
5117         .del            = task_clock_event_del,
5118         .start          = task_clock_event_start,
5119         .stop           = task_clock_event_stop,
5120         .read           = task_clock_event_read,
5121 };
5122
5123 static void perf_pmu_nop_void(struct pmu *pmu)
5124 {
5125 }
5126
5127 static int perf_pmu_nop_int(struct pmu *pmu)
5128 {
5129         return 0;
5130 }
5131
5132 static void perf_pmu_start_txn(struct pmu *pmu)
5133 {
5134         perf_pmu_disable(pmu);
5135 }
5136
5137 static int perf_pmu_commit_txn(struct pmu *pmu)
5138 {
5139         perf_pmu_enable(pmu);
5140         return 0;
5141 }
5142
5143 static void perf_pmu_cancel_txn(struct pmu *pmu)
5144 {
5145         perf_pmu_enable(pmu);
5146 }
5147
5148 /*
5149  * Ensures all contexts with the same task_ctx_nr have the same
5150  * pmu_cpu_context too.
5151  */
5152 static void *find_pmu_context(int ctxn)
5153 {
5154         struct pmu *pmu;
5155
5156         if (ctxn < 0)
5157                 return NULL;
5158
5159         list_for_each_entry(pmu, &pmus, entry) {
5160                 if (pmu->task_ctx_nr == ctxn)
5161                         return pmu->pmu_cpu_context;
5162         }
5163
5164         return NULL;
5165 }
5166
5167 static void free_pmu_context(void * __percpu cpu_context)
5168 {
5169         struct pmu *pmu;
5170
5171         mutex_lock(&pmus_lock);
5172         /*
5173          * Like a real lame refcount.
5174          */
5175         list_for_each_entry(pmu, &pmus, entry) {
5176                 if (pmu->pmu_cpu_context == cpu_context)
5177                         goto out;
5178         }
5179
5180         free_percpu(cpu_context);
5181 out:
5182         mutex_unlock(&pmus_lock);
5183 }
5184
5185 int perf_pmu_register(struct pmu *pmu)
5186 {
5187         int cpu, ret;
5188
5189         mutex_lock(&pmus_lock);
5190         ret = -ENOMEM;
5191         pmu->pmu_disable_count = alloc_percpu(int);
5192         if (!pmu->pmu_disable_count)
5193                 goto unlock;
5194
5195         pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
5196         if (pmu->pmu_cpu_context)
5197                 goto got_cpu_context;
5198
5199         pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
5200         if (!pmu->pmu_cpu_context)
5201                 goto free_pdc;
5202
5203         for_each_possible_cpu(cpu) {
5204                 struct perf_cpu_context *cpuctx;
5205
5206                 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5207                 __perf_event_init_context(&cpuctx->ctx);
5208                 cpuctx->ctx.type = cpu_context;
5209                 cpuctx->ctx.pmu = pmu;
5210                 cpuctx->jiffies_interval = 1;
5211                 INIT_LIST_HEAD(&cpuctx->rotation_list);
5212         }
5213
5214 got_cpu_context:
5215         if (!pmu->start_txn) {
5216                 if (pmu->pmu_enable) {
5217                         /*
5218                          * If we have pmu_enable/pmu_disable calls, install
5219                          * transaction stubs that use that to try and batch
5220                          * hardware accesses.
5221                          */
5222                         pmu->start_txn  = perf_pmu_start_txn;
5223                         pmu->commit_txn = perf_pmu_commit_txn;
5224                         pmu->cancel_txn = perf_pmu_cancel_txn;
5225                 } else {
5226                         pmu->start_txn  = perf_pmu_nop_void;
5227                         pmu->commit_txn = perf_pmu_nop_int;
5228                         pmu->cancel_txn = perf_pmu_nop_void;
5229                 }
5230         }
5231
5232         if (!pmu->pmu_enable) {
5233                 pmu->pmu_enable  = perf_pmu_nop_void;
5234                 pmu->pmu_disable = perf_pmu_nop_void;
5235         }
5236
5237         list_add_rcu(&pmu->entry, &pmus);
5238         ret = 0;
5239 unlock:
5240         mutex_unlock(&pmus_lock);
5241
5242         return ret;
5243
5244 free_pdc:
5245         free_percpu(pmu->pmu_disable_count);
5246         goto unlock;
5247 }
5248
5249 void perf_pmu_unregister(struct pmu *pmu)
5250 {
5251         mutex_lock(&pmus_lock);
5252         list_del_rcu(&pmu->entry);
5253         mutex_unlock(&pmus_lock);
5254
5255         /*
5256          * We dereference the pmu list under both SRCU and regular RCU, so
5257          * synchronize against both of those.
5258          */
5259         synchronize_srcu(&pmus_srcu);
5260         synchronize_rcu();
5261
5262         free_percpu(pmu->pmu_disable_count);
5263         free_pmu_context(pmu->pmu_cpu_context);
5264 }
5265
5266 struct pmu *perf_init_event(struct perf_event *event)
5267 {
5268         struct pmu *pmu = NULL;
5269         int idx;
5270
5271         idx = srcu_read_lock(&pmus_srcu);
5272         list_for_each_entry_rcu(pmu, &pmus, entry) {
5273                 int ret = pmu->event_init(event);
5274                 if (!ret)
5275                         goto unlock;
5276
5277                 if (ret != -ENOENT) {
5278                         pmu = ERR_PTR(ret);
5279                         goto unlock;
5280                 }
5281         }
5282         pmu = ERR_PTR(-ENOENT);
5283 unlock:
5284         srcu_read_unlock(&pmus_srcu, idx);
5285
5286         return pmu;
5287 }
5288
5289 /*
5290  * Allocate and initialize a event structure
5291  */
5292 static struct perf_event *
5293 perf_event_alloc(struct perf_event_attr *attr, int cpu,
5294                    struct perf_event *group_leader,
5295                    struct perf_event *parent_event,
5296                    perf_overflow_handler_t overflow_handler)
5297 {
5298         struct pmu *pmu;
5299         struct perf_event *event;
5300         struct hw_perf_event *hwc;
5301         long err;
5302
5303         event = kzalloc(sizeof(*event), GFP_KERNEL);
5304         if (!event)
5305                 return ERR_PTR(-ENOMEM);
5306
5307         /*
5308          * Single events are their own group leaders, with an
5309          * empty sibling list:
5310          */
5311         if (!group_leader)
5312                 group_leader = event;
5313
5314         mutex_init(&event->child_mutex);
5315         INIT_LIST_HEAD(&event->child_list);
5316
5317         INIT_LIST_HEAD(&event->group_entry);
5318         INIT_LIST_HEAD(&event->event_entry);
5319         INIT_LIST_HEAD(&event->sibling_list);
5320         init_waitqueue_head(&event->waitq);
5321
5322         mutex_init(&event->mmap_mutex);
5323
5324         event->cpu              = cpu;
5325         event->attr             = *attr;
5326         event->group_leader     = group_leader;
5327         event->pmu              = NULL;
5328         event->oncpu            = -1;
5329
5330         event->parent           = parent_event;
5331
5332         event->ns               = get_pid_ns(current->nsproxy->pid_ns);
5333         event->id               = atomic64_inc_return(&perf_event_id);
5334
5335         event->state            = PERF_EVENT_STATE_INACTIVE;
5336
5337         if (!overflow_handler && parent_event)
5338                 overflow_handler = parent_event->overflow_handler;
5339         
5340         event->overflow_handler = overflow_handler;
5341
5342         if (attr->disabled)
5343                 event->state = PERF_EVENT_STATE_OFF;
5344
5345         pmu = NULL;
5346
5347         hwc = &event->hw;
5348         hwc->sample_period = attr->sample_period;
5349         if (attr->freq && attr->sample_freq)
5350                 hwc->sample_period = 1;
5351         hwc->last_period = hwc->sample_period;
5352
5353         local64_set(&hwc->period_left, hwc->sample_period);
5354
5355         /*
5356          * we currently do not support PERF_FORMAT_GROUP on inherited events
5357          */
5358         if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
5359                 goto done;
5360
5361         pmu = perf_init_event(event);
5362
5363 done:
5364         err = 0;
5365         if (!pmu)
5366                 err = -EINVAL;
5367         else if (IS_ERR(pmu))
5368                 err = PTR_ERR(pmu);
5369
5370         if (err) {
5371                 if (event->ns)
5372                         put_pid_ns(event->ns);
5373                 kfree(event);
5374                 return ERR_PTR(err);
5375         }
5376
5377         event->pmu = pmu;
5378
5379         if (!event->parent) {
5380                 atomic_inc(&nr_events);
5381                 if (event->attr.mmap || event->attr.mmap_data)
5382                         atomic_inc(&nr_mmap_events);
5383                 if (event->attr.comm)
5384                         atomic_inc(&nr_comm_events);
5385                 if (event->attr.task)
5386                         atomic_inc(&nr_task_events);
5387                 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
5388                         err = get_callchain_buffers();
5389                         if (err) {
5390                                 free_event(event);
5391                                 return ERR_PTR(err);
5392                         }
5393                 }
5394         }
5395
5396         return event;
5397 }
5398
5399 static int perf_copy_attr(struct perf_event_attr __user *uattr,
5400                           struct perf_event_attr *attr)
5401 {
5402         u32 size;
5403         int ret;
5404
5405         if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
5406                 return -EFAULT;
5407
5408         /*
5409          * zero the full structure, so that a short copy will be nice.
5410          */
5411         memset(attr, 0, sizeof(*attr));
5412
5413         ret = get_user(size, &uattr->size);
5414         if (ret)
5415                 return ret;
5416
5417         if (size > PAGE_SIZE)   /* silly large */
5418                 goto err_size;
5419
5420         if (!size)              /* abi compat */
5421                 size = PERF_ATTR_SIZE_VER0;
5422
5423         if (size < PERF_ATTR_SIZE_VER0)
5424                 goto err_size;
5425
5426         /*
5427          * If we're handed a bigger struct than we know of,
5428          * ensure all the unknown bits are 0 - i.e. new
5429          * user-space does not rely on any kernel feature
5430          * extensions we dont know about yet.
5431          */
5432         if (size > sizeof(*attr)) {
5433                 unsigned char __user *addr;
5434                 unsigned char __user *end;
5435                 unsigned char val;
5436
5437                 addr = (void __user *)uattr + sizeof(*attr);
5438                 end  = (void __user *)uattr + size;
5439
5440                 for (; addr < end; addr++) {
5441                         ret = get_user(val, addr);
5442                         if (ret)
5443                                 return ret;
5444                         if (val)
5445                                 goto err_size;
5446                 }
5447                 size = sizeof(*attr);
5448         }
5449
5450         ret = copy_from_user(attr, uattr, size);
5451         if (ret)
5452                 return -EFAULT;
5453
5454         /*
5455          * If the type exists, the corresponding creation will verify
5456          * the attr->config.
5457          */
5458         if (attr->type >= PERF_TYPE_MAX)
5459                 return -EINVAL;
5460
5461         if (attr->__reserved_1)
5462                 return -EINVAL;
5463
5464         if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
5465                 return -EINVAL;
5466
5467         if (attr->read_format & ~(PERF_FORMAT_MAX-1))
5468                 return -EINVAL;
5469
5470 out:
5471         return ret;
5472
5473 err_size:
5474         put_user(sizeof(*attr), &uattr->size);
5475         ret = -E2BIG;
5476         goto out;
5477 }
5478
5479 static int
5480 perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
5481 {
5482         struct perf_buffer *buffer = NULL, *old_buffer = NULL;
5483         int ret = -EINVAL;
5484
5485         if (!output_event)
5486                 goto set;
5487
5488         /* don't allow circular references */
5489         if (event == output_event)
5490                 goto out;
5491
5492         /*
5493          * Don't allow cross-cpu buffers
5494          */
5495         if (output_event->cpu != event->cpu)
5496                 goto out;
5497
5498         /*
5499          * If its not a per-cpu buffer, it must be the same task.
5500          */
5501         if (output_event->cpu == -1 && output_event->ctx != event->ctx)
5502                 goto out;
5503
5504 set:
5505         mutex_lock(&event->mmap_mutex);
5506         /* Can't redirect output if we've got an active mmap() */
5507         if (atomic_read(&event->mmap_count))
5508                 goto unlock;
5509
5510         if (output_event) {
5511                 /* get the buffer we want to redirect to */
5512                 buffer = perf_buffer_get(output_event);
5513                 if (!buffer)
5514                         goto unlock;
5515         }
5516
5517         old_buffer = event->buffer;
5518         rcu_assign_pointer(event->buffer, buffer);
5519         ret = 0;
5520 unlock:
5521         mutex_unlock(&event->mmap_mutex);
5522
5523         if (old_buffer)
5524                 perf_buffer_put(old_buffer);
5525 out:
5526         return ret;
5527 }
5528
5529 /**
5530  * sys_perf_event_open - open a performance event, associate it to a task/cpu
5531  *
5532  * @attr_uptr:  event_id type attributes for monitoring/sampling
5533  * @pid:                target pid
5534  * @cpu:                target cpu
5535  * @group_fd:           group leader event fd
5536  */
5537 SYSCALL_DEFINE5(perf_event_open,
5538                 struct perf_event_attr __user *, attr_uptr,
5539                 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
5540 {
5541         struct perf_event *group_leader = NULL, *output_event = NULL;
5542         struct perf_event *event, *sibling;
5543         struct perf_event_attr attr;
5544         struct perf_event_context *ctx;
5545         struct file *event_file = NULL;
5546         struct file *group_file = NULL;
5547         struct task_struct *task = NULL;
5548         struct pmu *pmu;
5549         int event_fd;
5550         int move_group = 0;
5551         int fput_needed = 0;
5552         int err;
5553
5554         /* for future expandability... */
5555         if (flags & ~(PERF_FLAG_FD_NO_GROUP | PERF_FLAG_FD_OUTPUT))
5556                 return -EINVAL;
5557
5558         err = perf_copy_attr(attr_uptr, &attr);
5559         if (err)
5560                 return err;
5561
5562         if (!attr.exclude_kernel) {
5563                 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
5564                         return -EACCES;
5565         }
5566
5567         if (attr.freq) {
5568                 if (attr.sample_freq > sysctl_perf_event_sample_rate)
5569                         return -EINVAL;
5570         }
5571
5572         event_fd = get_unused_fd_flags(O_RDWR);
5573         if (event_fd < 0)
5574                 return event_fd;
5575
5576         if (group_fd != -1) {
5577                 group_leader = perf_fget_light(group_fd, &fput_needed);
5578                 if (IS_ERR(group_leader)) {
5579                         err = PTR_ERR(group_leader);
5580                         goto err_fd;
5581                 }
5582                 group_file = group_leader->filp;
5583                 if (flags & PERF_FLAG_FD_OUTPUT)
5584                         output_event = group_leader;
5585                 if (flags & PERF_FLAG_FD_NO_GROUP)
5586                         group_leader = NULL;
5587         }
5588
5589         event = perf_event_alloc(&attr, cpu, group_leader, NULL, NULL);
5590         if (IS_ERR(event)) {
5591                 err = PTR_ERR(event);
5592                 goto err_fd;
5593         }
5594
5595         /*
5596          * Special case software events and allow them to be part of
5597          * any hardware group.
5598          */
5599         pmu = event->pmu;
5600
5601         if (group_leader &&
5602             (is_software_event(event) != is_software_event(group_leader))) {
5603                 if (is_software_event(event)) {
5604                         /*
5605                          * If event and group_leader are not both a software
5606                          * event, and event is, then group leader is not.
5607                          *
5608                          * Allow the addition of software events to !software
5609                          * groups, this is safe because software events never
5610                          * fail to schedule.
5611                          */
5612                         pmu = group_leader->pmu;
5613                 } else if (is_software_event(group_leader) &&
5614                            (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
5615                         /*
5616                          * In case the group is a pure software group, and we
5617                          * try to add a hardware event, move the whole group to
5618                          * the hardware context.
5619                          */
5620                         move_group = 1;
5621                 }
5622         }
5623
5624         if (pid != -1) {
5625                 task = find_lively_task_by_vpid(pid);
5626                 if (IS_ERR(task)) {
5627                         err = PTR_ERR(task);
5628                         goto err_group_fd;
5629                 }
5630         }
5631
5632         /*
5633          * Get the target context (task or percpu):
5634          */
5635         ctx = find_get_context(pmu, task, cpu);
5636         if (IS_ERR(ctx)) {
5637                 err = PTR_ERR(ctx);
5638                 goto err_group_fd;
5639         }
5640
5641         /*
5642          * Look up the group leader (we will attach this event to it):
5643          */
5644         if (group_leader) {
5645                 err = -EINVAL;
5646
5647                 /*
5648                  * Do not allow a recursive hierarchy (this new sibling
5649                  * becoming part of another group-sibling):
5650                  */
5651                 if (group_leader->group_leader != group_leader)
5652                         goto err_context;
5653                 /*
5654                  * Do not allow to attach to a group in a different
5655                  * task or CPU context:
5656                  */
5657                 if (move_group) {
5658                         if (group_leader->ctx->type != ctx->type)
5659                                 goto err_context;
5660                 } else {
5661                         if (group_leader->ctx != ctx)
5662                                 goto err_context;
5663                 }
5664
5665                 /*
5666                  * Only a group leader can be exclusive or pinned
5667                  */
5668                 if (attr.exclusive || attr.pinned)
5669                         goto err_context;
5670         }
5671
5672         if (output_event) {
5673                 err = perf_event_set_output(event, output_event);
5674                 if (err)
5675                         goto err_context;
5676         }
5677
5678         event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
5679         if (IS_ERR(event_file)) {
5680                 err = PTR_ERR(event_file);
5681                 goto err_context;
5682         }
5683
5684         if (move_group) {
5685                 struct perf_event_context *gctx = group_leader->ctx;
5686
5687                 mutex_lock(&gctx->mutex);
5688                 perf_event_remove_from_context(group_leader);
5689                 list_for_each_entry(sibling, &group_leader->sibling_list,
5690                                     group_entry) {
5691                         perf_event_remove_from_context(sibling);
5692                         put_ctx(gctx);
5693                 }
5694                 mutex_unlock(&gctx->mutex);
5695                 put_ctx(gctx);
5696         }
5697
5698         event->filp = event_file;
5699         WARN_ON_ONCE(ctx->parent_ctx);
5700         mutex_lock(&ctx->mutex);
5701
5702         if (move_group) {
5703                 perf_install_in_context(ctx, group_leader, cpu);
5704                 get_ctx(ctx);
5705                 list_for_each_entry(sibling, &group_leader->sibling_list,
5706                                     group_entry) {
5707                         perf_install_in_context(ctx, sibling, cpu);
5708                         get_ctx(ctx);
5709                 }
5710         }
5711
5712         perf_install_in_context(ctx, event, cpu);
5713         ++ctx->generation;
5714         mutex_unlock(&ctx->mutex);
5715
5716         event->owner = current;
5717         get_task_struct(current);
5718         mutex_lock(&current->perf_event_mutex);
5719         list_add_tail(&event->owner_entry, &current->perf_event_list);
5720         mutex_unlock(&current->perf_event_mutex);
5721
5722         /*
5723          * Drop the reference on the group_event after placing the
5724          * new event on the sibling_list. This ensures destruction
5725          * of the group leader will find the pointer to itself in
5726          * perf_group_detach().
5727          */
5728         fput_light(group_file, fput_needed);
5729         fd_install(event_fd, event_file);
5730         return event_fd;
5731
5732 err_context:
5733         put_ctx(ctx);
5734 err_group_fd:
5735         fput_light(group_file, fput_needed);
5736         free_event(event);
5737 err_fd:
5738         put_unused_fd(event_fd);
5739         return err;
5740 }
5741
5742 /**
5743  * perf_event_create_kernel_counter
5744  *
5745  * @attr: attributes of the counter to create
5746  * @cpu: cpu in which the counter is bound
5747  * @task: task to profile (NULL for percpu)
5748  */
5749 struct perf_event *
5750 perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
5751                                  struct task_struct *task,
5752                                  perf_overflow_handler_t overflow_handler)
5753 {
5754         struct perf_event_context *ctx;
5755         struct perf_event *event;
5756         int err;
5757
5758         /*
5759          * Get the target context (task or percpu):
5760          */
5761
5762         event = perf_event_alloc(attr, cpu, NULL, NULL, overflow_handler);
5763         if (IS_ERR(event)) {
5764                 err = PTR_ERR(event);
5765                 goto err;
5766         }
5767
5768         ctx = find_get_context(event->pmu, task, cpu);
5769         if (IS_ERR(ctx)) {
5770                 err = PTR_ERR(ctx);
5771                 goto err_free;
5772         }
5773
5774         event->filp = NULL;
5775         WARN_ON_ONCE(ctx->parent_ctx);
5776         mutex_lock(&ctx->mutex);
5777         perf_install_in_context(ctx, event, cpu);
5778         ++ctx->generation;
5779         mutex_unlock(&ctx->mutex);
5780
5781         event->owner = current;
5782         get_task_struct(current);
5783         mutex_lock(&current->perf_event_mutex);
5784         list_add_tail(&event->owner_entry, &current->perf_event_list);
5785         mutex_unlock(&current->perf_event_mutex);
5786
5787         return event;
5788
5789 err_free:
5790         free_event(event);
5791 err:
5792         return ERR_PTR(err);
5793 }
5794 EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
5795
5796 static void sync_child_event(struct perf_event *child_event,
5797                                struct task_struct *child)
5798 {
5799         struct perf_event *parent_event = child_event->parent;
5800         u64 child_val;
5801
5802         if (child_event->attr.inherit_stat)
5803                 perf_event_read_event(child_event, child);
5804
5805         child_val = perf_event_count(child_event);
5806
5807         /*
5808          * Add back the child's count to the parent's count:
5809          */
5810         atomic64_add(child_val, &parent_event->child_count);
5811         atomic64_add(child_event->total_time_enabled,
5812                      &parent_event->child_total_time_enabled);
5813         atomic64_add(child_event->total_time_running,
5814                      &parent_event->child_total_time_running);
5815
5816         /*
5817          * Remove this event from the parent's list
5818          */
5819         WARN_ON_ONCE(parent_event->ctx->parent_ctx);
5820         mutex_lock(&parent_event->child_mutex);
5821         list_del_init(&child_event->child_list);
5822         mutex_unlock(&parent_event->child_mutex);
5823
5824         /*
5825          * Release the parent event, if this was the last
5826          * reference to it.
5827          */
5828         fput(parent_event->filp);
5829 }
5830
5831 static void
5832 __perf_event_exit_task(struct perf_event *child_event,
5833                          struct perf_event_context *child_ctx,
5834                          struct task_struct *child)
5835 {
5836         struct perf_event *parent_event;
5837
5838         perf_event_remove_from_context(child_event);
5839
5840         parent_event = child_event->parent;
5841         /*
5842          * It can happen that parent exits first, and has events
5843          * that are still around due to the child reference. These
5844          * events need to be zapped - but otherwise linger.
5845          */
5846         if (parent_event) {
5847                 sync_child_event(child_event, child);
5848                 free_event(child_event);
5849         }
5850 }
5851
5852 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
5853 {
5854         struct perf_event *child_event, *tmp;
5855         struct perf_event_context *child_ctx;
5856         unsigned long flags;
5857
5858         if (likely(!child->perf_event_ctxp[ctxn])) {
5859                 perf_event_task(child, NULL, 0);
5860                 return;
5861         }
5862
5863         local_irq_save(flags);
5864         /*
5865          * We can't reschedule here because interrupts are disabled,
5866          * and either child is current or it is a task that can't be
5867          * scheduled, so we are now safe from rescheduling changing
5868          * our context.
5869          */
5870         child_ctx = child->perf_event_ctxp[ctxn];
5871         __perf_event_task_sched_out(child_ctx);
5872
5873         /*
5874          * Take the context lock here so that if find_get_context is
5875          * reading child->perf_event_ctxp, we wait until it has
5876          * incremented the context's refcount before we do put_ctx below.
5877          */
5878         raw_spin_lock(&child_ctx->lock);
5879         child->perf_event_ctxp[ctxn] = NULL;
5880         /*
5881          * If this context is a clone; unclone it so it can't get
5882          * swapped to another process while we're removing all
5883          * the events from it.
5884          */
5885         unclone_ctx(child_ctx);
5886         update_context_time(child_ctx);
5887         raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
5888
5889         /*
5890          * Report the task dead after unscheduling the events so that we
5891          * won't get any samples after PERF_RECORD_EXIT. We can however still
5892          * get a few PERF_RECORD_READ events.
5893          */
5894         perf_event_task(child, child_ctx, 0);
5895
5896         /*
5897          * We can recurse on the same lock type through:
5898          *
5899          *   __perf_event_exit_task()
5900          *     sync_child_event()
5901          *       fput(parent_event->filp)
5902          *         perf_release()
5903          *           mutex_lock(&ctx->mutex)
5904          *
5905          * But since its the parent context it won't be the same instance.
5906          */
5907         mutex_lock(&child_ctx->mutex);
5908
5909 again:
5910         list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
5911                                  group_entry)
5912                 __perf_event_exit_task(child_event, child_ctx, child);
5913
5914         list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
5915                                  group_entry)
5916                 __perf_event_exit_task(child_event, child_ctx, child);
5917
5918         /*
5919          * If the last event was a group event, it will have appended all
5920          * its siblings to the list, but we obtained 'tmp' before that which
5921          * will still point to the list head terminating the iteration.
5922          */
5923         if (!list_empty(&child_ctx->pinned_groups) ||
5924             !list_empty(&child_ctx->flexible_groups))
5925                 goto again;
5926
5927         mutex_unlock(&child_ctx->mutex);
5928
5929         put_ctx(child_ctx);
5930 }
5931
5932 /*
5933  * When a child task exits, feed back event values to parent events.
5934  */
5935 void perf_event_exit_task(struct task_struct *child)
5936 {
5937         int ctxn;
5938
5939         for_each_task_context_nr(ctxn)
5940                 perf_event_exit_task_context(child, ctxn);
5941 }
5942
5943 static void perf_free_event(struct perf_event *event,
5944                             struct perf_event_context *ctx)
5945 {
5946         struct perf_event *parent = event->parent;
5947
5948         if (WARN_ON_ONCE(!parent))
5949                 return;
5950
5951         mutex_lock(&parent->child_mutex);
5952         list_del_init(&event->child_list);
5953         mutex_unlock(&parent->child_mutex);
5954
5955         fput(parent->filp);
5956
5957         perf_group_detach(event);
5958         list_del_event(event, ctx);
5959         free_event(event);
5960 }
5961
5962 /*
5963  * free an unexposed, unused context as created by inheritance by
5964  * perf_event_init_task below, used by fork() in case of fail.
5965  */
5966 void perf_event_free_task(struct task_struct *task)
5967 {
5968         struct perf_event_context *ctx;
5969         struct perf_event *event, *tmp;
5970         int ctxn;
5971
5972         for_each_task_context_nr(ctxn) {
5973                 ctx = task->perf_event_ctxp[ctxn];
5974                 if (!ctx)
5975                         continue;
5976
5977                 mutex_lock(&ctx->mutex);
5978 again:
5979                 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
5980                                 group_entry)
5981                         perf_free_event(event, ctx);
5982
5983                 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
5984                                 group_entry)
5985                         perf_free_event(event, ctx);
5986
5987                 if (!list_empty(&ctx->pinned_groups) ||
5988                                 !list_empty(&ctx->flexible_groups))
5989                         goto again;
5990
5991                 mutex_unlock(&ctx->mutex);
5992
5993                 put_ctx(ctx);
5994         }
5995 }
5996
5997 void perf_event_delayed_put(struct task_struct *task)
5998 {
5999         int ctxn;
6000
6001         for_each_task_context_nr(ctxn)
6002                 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
6003 }
6004
6005 /*
6006  * inherit a event from parent task to child task:
6007  */
6008 static struct perf_event *
6009 inherit_event(struct perf_event *parent_event,
6010               struct task_struct *parent,
6011               struct perf_event_context *parent_ctx,
6012               struct task_struct *child,
6013               struct perf_event *group_leader,
6014               struct perf_event_context *child_ctx)
6015 {
6016         struct perf_event *child_event;
6017         unsigned long flags;
6018
6019         /*
6020          * Instead of creating recursive hierarchies of events,
6021          * we link inherited events back to the original parent,
6022          * which has a filp for sure, which we use as the reference
6023          * count:
6024          */
6025         if (parent_event->parent)
6026                 parent_event = parent_event->parent;
6027
6028         child_event = perf_event_alloc(&parent_event->attr,
6029                                            parent_event->cpu,
6030                                            group_leader, parent_event,
6031                                            NULL);
6032         if (IS_ERR(child_event))
6033                 return child_event;
6034         get_ctx(child_ctx);
6035
6036         /*
6037          * Make the child state follow the state of the parent event,
6038          * not its attr.disabled bit.  We hold the parent's mutex,
6039          * so we won't race with perf_event_{en, dis}able_family.
6040          */
6041         if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
6042                 child_event->state = PERF_EVENT_STATE_INACTIVE;
6043         else
6044                 child_event->state = PERF_EVENT_STATE_OFF;
6045
6046         if (parent_event->attr.freq) {
6047                 u64 sample_period = parent_event->hw.sample_period;
6048                 struct hw_perf_event *hwc = &child_event->hw;
6049
6050                 hwc->sample_period = sample_period;
6051                 hwc->last_period   = sample_period;
6052
6053                 local64_set(&hwc->period_left, sample_period);
6054         }
6055
6056         child_event->ctx = child_ctx;
6057         child_event->overflow_handler = parent_event->overflow_handler;
6058
6059         /*
6060          * Link it up in the child's context:
6061          */
6062         raw_spin_lock_irqsave(&child_ctx->lock, flags);
6063         add_event_to_ctx(child_event, child_ctx);
6064         raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
6065
6066         /*
6067          * Get a reference to the parent filp - we will fput it
6068          * when the child event exits. This is safe to do because
6069          * we are in the parent and we know that the filp still
6070          * exists and has a nonzero count:
6071          */
6072         atomic_long_inc(&parent_event->filp->f_count);
6073
6074         /*
6075          * Link this into the parent event's child list
6076          */
6077         WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6078         mutex_lock(&parent_event->child_mutex);
6079         list_add_tail(&child_event->child_list, &parent_event->child_list);
6080         mutex_unlock(&parent_event->child_mutex);
6081
6082         return child_event;
6083 }
6084
6085 static int inherit_group(struct perf_event *parent_event,
6086               struct task_struct *parent,
6087               struct perf_event_context *parent_ctx,
6088               struct task_struct *child,
6089               struct perf_event_context *child_ctx)
6090 {
6091         struct perf_event *leader;
6092         struct perf_event *sub;
6093         struct perf_event *child_ctr;
6094
6095         leader = inherit_event(parent_event, parent, parent_ctx,
6096                                  child, NULL, child_ctx);
6097         if (IS_ERR(leader))
6098                 return PTR_ERR(leader);
6099         list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
6100                 child_ctr = inherit_event(sub, parent, parent_ctx,
6101                                             child, leader, child_ctx);
6102                 if (IS_ERR(child_ctr))
6103                         return PTR_ERR(child_ctr);
6104         }
6105         return 0;
6106 }
6107
6108 static int
6109 inherit_task_group(struct perf_event *event, struct task_struct *parent,
6110                    struct perf_event_context *parent_ctx,
6111                    struct task_struct *child, int ctxn,
6112                    int *inherited_all)
6113 {
6114         int ret;
6115         struct perf_event_context *child_ctx;
6116
6117         if (!event->attr.inherit) {
6118                 *inherited_all = 0;
6119                 return 0;
6120         }
6121
6122         child_ctx = child->perf_event_ctxp[ctxn];
6123         if (!child_ctx) {
6124                 /*
6125                  * This is executed from the parent task context, so
6126                  * inherit events that have been marked for cloning.
6127                  * First allocate and initialize a context for the
6128                  * child.
6129                  */
6130
6131                 child_ctx = alloc_perf_context(event->pmu, child);
6132                 if (!child_ctx)
6133                         return -ENOMEM;
6134
6135                 child->perf_event_ctxp[ctxn] = child_ctx;
6136         }
6137
6138         ret = inherit_group(event, parent, parent_ctx,
6139                             child, child_ctx);
6140
6141         if (ret)
6142                 *inherited_all = 0;
6143
6144         return ret;
6145 }
6146
6147 /*
6148  * Initialize the perf_event context in task_struct
6149  */
6150 int perf_event_init_context(struct task_struct *child, int ctxn)
6151 {
6152         struct perf_event_context *child_ctx, *parent_ctx;
6153         struct perf_event_context *cloned_ctx;
6154         struct perf_event *event;
6155         struct task_struct *parent = current;
6156         int inherited_all = 1;
6157         int ret = 0;
6158
6159         child->perf_event_ctxp[ctxn] = NULL;
6160
6161         mutex_init(&child->perf_event_mutex);
6162         INIT_LIST_HEAD(&child->perf_event_list);
6163
6164         if (likely(!parent->perf_event_ctxp[ctxn]))
6165                 return 0;
6166
6167         /*
6168          * If the parent's context is a clone, pin it so it won't get
6169          * swapped under us.
6170          */
6171         parent_ctx = perf_pin_task_context(parent, ctxn);
6172
6173         /*
6174          * No need to check if parent_ctx != NULL here; since we saw
6175          * it non-NULL earlier, the only reason for it to become NULL
6176          * is if we exit, and since we're currently in the middle of
6177          * a fork we can't be exiting at the same time.
6178          */
6179
6180         /*
6181          * Lock the parent list. No need to lock the child - not PID
6182          * hashed yet and not running, so nobody can access it.
6183          */
6184         mutex_lock(&parent_ctx->mutex);
6185
6186         /*
6187          * We dont have to disable NMIs - we are only looking at
6188          * the list, not manipulating it:
6189          */
6190         list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
6191                 ret = inherit_task_group(event, parent, parent_ctx,
6192                                          child, ctxn, &inherited_all);
6193                 if (ret)
6194                         break;
6195         }
6196
6197         list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
6198                 ret = inherit_task_group(event, parent, parent_ctx,
6199                                          child, ctxn, &inherited_all);
6200                 if (ret)
6201                         break;
6202         }
6203
6204         child_ctx = child->perf_event_ctxp[ctxn];
6205
6206         if (child_ctx && inherited_all) {
6207                 /*
6208                  * Mark the child context as a clone of the parent
6209                  * context, or of whatever the parent is a clone of.
6210                  * Note that if the parent is a clone, it could get
6211                  * uncloned at any point, but that doesn't matter
6212                  * because the list of events and the generation
6213                  * count can't have changed since we took the mutex.
6214                  */
6215                 cloned_ctx = rcu_dereference(parent_ctx->parent_ctx);
6216                 if (cloned_ctx) {
6217                         child_ctx->parent_ctx = cloned_ctx;
6218                         child_ctx->parent_gen = parent_ctx->parent_gen;
6219                 } else {
6220                         child_ctx->parent_ctx = parent_ctx;
6221                         child_ctx->parent_gen = parent_ctx->generation;
6222                 }
6223                 get_ctx(child_ctx->parent_ctx);
6224         }
6225
6226         mutex_unlock(&parent_ctx->mutex);
6227
6228         perf_unpin_context(parent_ctx);
6229
6230         return ret;
6231 }
6232
6233 /*
6234  * Initialize the perf_event context in task_struct
6235  */
6236 int perf_event_init_task(struct task_struct *child)
6237 {
6238         int ctxn, ret;
6239
6240         for_each_task_context_nr(ctxn) {
6241                 ret = perf_event_init_context(child, ctxn);
6242                 if (ret)
6243                         return ret;
6244         }
6245
6246         return 0;
6247 }
6248
6249 static void __init perf_event_init_all_cpus(void)
6250 {
6251         struct swevent_htable *swhash;
6252         int cpu;
6253
6254         for_each_possible_cpu(cpu) {
6255                 swhash = &per_cpu(swevent_htable, cpu);
6256                 mutex_init(&swhash->hlist_mutex);
6257                 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
6258         }
6259 }
6260
6261 static void __cpuinit perf_event_init_cpu(int cpu)
6262 {
6263         struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6264
6265         mutex_lock(&swhash->hlist_mutex);
6266         if (swhash->hlist_refcount > 0) {
6267                 struct swevent_hlist *hlist;
6268
6269                 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
6270                 WARN_ON(!hlist);
6271                 rcu_assign_pointer(swhash->swevent_hlist, hlist);
6272         }
6273         mutex_unlock(&swhash->hlist_mutex);
6274 }
6275
6276 #ifdef CONFIG_HOTPLUG_CPU
6277 static void perf_pmu_rotate_stop(struct pmu *pmu)
6278 {
6279         struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
6280
6281         WARN_ON(!irqs_disabled());
6282
6283         list_del_init(&cpuctx->rotation_list);
6284 }
6285
6286 static void __perf_event_exit_context(void *__info)
6287 {
6288         struct perf_event_context *ctx = __info;
6289         struct perf_event *event, *tmp;
6290
6291         perf_pmu_rotate_stop(ctx->pmu);
6292
6293         list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
6294                 __perf_event_remove_from_context(event);
6295         list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
6296                 __perf_event_remove_from_context(event);
6297 }
6298
6299 static void perf_event_exit_cpu_context(int cpu)
6300 {
6301         struct perf_event_context *ctx;
6302         struct pmu *pmu;
6303         int idx;
6304
6305         idx = srcu_read_lock(&pmus_srcu);
6306         list_for_each_entry_rcu(pmu, &pmus, entry) {
6307                 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
6308
6309                 mutex_lock(&ctx->mutex);
6310                 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
6311                 mutex_unlock(&ctx->mutex);
6312         }
6313         srcu_read_unlock(&pmus_srcu, idx);
6314 }
6315
6316 static void perf_event_exit_cpu(int cpu)
6317 {
6318         struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
6319
6320         mutex_lock(&swhash->hlist_mutex);
6321         swevent_hlist_release(swhash);
6322         mutex_unlock(&swhash->hlist_mutex);
6323
6324         perf_event_exit_cpu_context(cpu);
6325 }
6326 #else
6327 static inline void perf_event_exit_cpu(int cpu) { }
6328 #endif
6329
6330 static int __cpuinit
6331 perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
6332 {
6333         unsigned int cpu = (long)hcpu;
6334
6335         switch (action & ~CPU_TASKS_FROZEN) {
6336
6337         case CPU_UP_PREPARE:
6338         case CPU_DOWN_FAILED:
6339                 perf_event_init_cpu(cpu);
6340                 break;
6341
6342         case CPU_UP_CANCELED:
6343         case CPU_DOWN_PREPARE:
6344                 perf_event_exit_cpu(cpu);
6345                 break;
6346
6347         default:
6348                 break;
6349         }
6350
6351         return NOTIFY_OK;
6352 }
6353
6354 void __init perf_event_init(void)
6355 {
6356         perf_event_init_all_cpus();
6357         init_srcu_struct(&pmus_srcu);
6358         perf_pmu_register(&perf_swevent);
6359         perf_pmu_register(&perf_cpu_clock);
6360         perf_pmu_register(&perf_task_clock);
6361         perf_tp_register();
6362         perf_cpu_notifier(perf_cpu_notify);
6363 }