tracing: Make syscall tracepoints conditional
[pandora-kernel.git] / kernel / tracepoint.c
1 /*
2  * Copyright (C) 2008 Mathieu Desnoyers
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  */
18 #include <linux/module.h>
19 #include <linux/mutex.h>
20 #include <linux/types.h>
21 #include <linux/jhash.h>
22 #include <linux/list.h>
23 #include <linux/rcupdate.h>
24 #include <linux/tracepoint.h>
25 #include <linux/err.h>
26 #include <linux/slab.h>
27 #include <linux/sched.h>
28
29 extern struct tracepoint __start___tracepoints[];
30 extern struct tracepoint __stop___tracepoints[];
31
32 /* Set to 1 to enable tracepoint debug output */
33 static const int tracepoint_debug;
34
35 /*
36  * tracepoints_mutex nests inside module_mutex. Tracepoints mutex protects the
37  * builtin and module tracepoints and the hash table.
38  */
39 static DEFINE_MUTEX(tracepoints_mutex);
40
41 /*
42  * Tracepoint hash table, containing the active tracepoints.
43  * Protected by tracepoints_mutex.
44  */
45 #define TRACEPOINT_HASH_BITS 6
46 #define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
47 static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
48
49 /*
50  * Note about RCU :
51  * It is used to to delay the free of multiple probes array until a quiescent
52  * state is reached.
53  * Tracepoint entries modifications are protected by the tracepoints_mutex.
54  */
55 struct tracepoint_entry {
56         struct hlist_node hlist;
57         void **funcs;
58         int refcount;   /* Number of times armed. 0 if disarmed. */
59         char name[0];
60 };
61
62 struct tp_probes {
63         union {
64                 struct rcu_head rcu;
65                 struct list_head list;
66         } u;
67         void *probes[0];
68 };
69
70 static inline void *allocate_probes(int count)
71 {
72         struct tp_probes *p  = kmalloc(count * sizeof(void *)
73                         + sizeof(struct tp_probes), GFP_KERNEL);
74         return p == NULL ? NULL : p->probes;
75 }
76
77 static void rcu_free_old_probes(struct rcu_head *head)
78 {
79         kfree(container_of(head, struct tp_probes, u.rcu));
80 }
81
82 static inline void release_probes(void *old)
83 {
84         if (old) {
85                 struct tp_probes *tp_probes = container_of(old,
86                         struct tp_probes, probes[0]);
87                 call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes);
88         }
89 }
90
91 static void debug_print_probes(struct tracepoint_entry *entry)
92 {
93         int i;
94
95         if (!tracepoint_debug || !entry->funcs)
96                 return;
97
98         for (i = 0; entry->funcs[i]; i++)
99                 printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i]);
100 }
101
102 static void *
103 tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe)
104 {
105         int nr_probes = 0;
106         void **old, **new;
107
108         WARN_ON(!probe);
109
110         debug_print_probes(entry);
111         old = entry->funcs;
112         if (old) {
113                 /* (N -> N+1), (N != 0, 1) probes */
114                 for (nr_probes = 0; old[nr_probes]; nr_probes++)
115                         if (old[nr_probes] == probe)
116                                 return ERR_PTR(-EEXIST);
117         }
118         /* + 2 : one for new probe, one for NULL func */
119         new = allocate_probes(nr_probes + 2);
120         if (new == NULL)
121                 return ERR_PTR(-ENOMEM);
122         if (old)
123                 memcpy(new, old, nr_probes * sizeof(void *));
124         new[nr_probes] = probe;
125         new[nr_probes + 1] = NULL;
126         entry->refcount = nr_probes + 1;
127         entry->funcs = new;
128         debug_print_probes(entry);
129         return old;
130 }
131
132 static void *
133 tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe)
134 {
135         int nr_probes = 0, nr_del = 0, i;
136         void **old, **new;
137
138         old = entry->funcs;
139
140         if (!old)
141                 return ERR_PTR(-ENOENT);
142
143         debug_print_probes(entry);
144         /* (N -> M), (N > 1, M >= 0) probes */
145         for (nr_probes = 0; old[nr_probes]; nr_probes++) {
146                 if ((!probe || old[nr_probes] == probe))
147                         nr_del++;
148         }
149
150         if (nr_probes - nr_del == 0) {
151                 /* N -> 0, (N > 1) */
152                 entry->funcs = NULL;
153                 entry->refcount = 0;
154                 debug_print_probes(entry);
155                 return old;
156         } else {
157                 int j = 0;
158                 /* N -> M, (N > 1, M > 0) */
159                 /* + 1 for NULL */
160                 new = allocate_probes(nr_probes - nr_del + 1);
161                 if (new == NULL)
162                         return ERR_PTR(-ENOMEM);
163                 for (i = 0; old[i]; i++)
164                         if ((probe && old[i] != probe))
165                                 new[j++] = old[i];
166                 new[nr_probes - nr_del] = NULL;
167                 entry->refcount = nr_probes - nr_del;
168                 entry->funcs = new;
169         }
170         debug_print_probes(entry);
171         return old;
172 }
173
174 /*
175  * Get tracepoint if the tracepoint is present in the tracepoint hash table.
176  * Must be called with tracepoints_mutex held.
177  * Returns NULL if not present.
178  */
179 static struct tracepoint_entry *get_tracepoint(const char *name)
180 {
181         struct hlist_head *head;
182         struct hlist_node *node;
183         struct tracepoint_entry *e;
184         u32 hash = jhash(name, strlen(name), 0);
185
186         head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
187         hlist_for_each_entry(e, node, head, hlist) {
188                 if (!strcmp(name, e->name))
189                         return e;
190         }
191         return NULL;
192 }
193
194 /*
195  * Add the tracepoint to the tracepoint hash table. Must be called with
196  * tracepoints_mutex held.
197  */
198 static struct tracepoint_entry *add_tracepoint(const char *name)
199 {
200         struct hlist_head *head;
201         struct hlist_node *node;
202         struct tracepoint_entry *e;
203         size_t name_len = strlen(name) + 1;
204         u32 hash = jhash(name, name_len-1, 0);
205
206         head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
207         hlist_for_each_entry(e, node, head, hlist) {
208                 if (!strcmp(name, e->name)) {
209                         printk(KERN_NOTICE
210                                 "tracepoint %s busy\n", name);
211                         return ERR_PTR(-EEXIST);        /* Already there */
212                 }
213         }
214         /*
215          * Using kmalloc here to allocate a variable length element. Could
216          * cause some memory fragmentation if overused.
217          */
218         e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL);
219         if (!e)
220                 return ERR_PTR(-ENOMEM);
221         memcpy(&e->name[0], name, name_len);
222         e->funcs = NULL;
223         e->refcount = 0;
224         hlist_add_head(&e->hlist, head);
225         return e;
226 }
227
228 /*
229  * Remove the tracepoint from the tracepoint hash table. Must be called with
230  * mutex_lock held.
231  */
232 static inline void remove_tracepoint(struct tracepoint_entry *e)
233 {
234         hlist_del(&e->hlist);
235         kfree(e);
236 }
237
238 /*
239  * Sets the probe callback corresponding to one tracepoint.
240  */
241 static void set_tracepoint(struct tracepoint_entry **entry,
242         struct tracepoint *elem, int active)
243 {
244         WARN_ON(strcmp((*entry)->name, elem->name) != 0);
245
246         /*
247          * rcu_assign_pointer has a smp_wmb() which makes sure that the new
248          * probe callbacks array is consistent before setting a pointer to it.
249          * This array is referenced by __DO_TRACE from
250          * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
251          * is used.
252          */
253         rcu_assign_pointer(elem->funcs, (*entry)->funcs);
254         elem->state = active;
255 }
256
257 /*
258  * Disable a tracepoint and its probe callback.
259  * Note: only waiting an RCU period after setting elem->call to the empty
260  * function insures that the original callback is not used anymore. This insured
261  * by preempt_disable around the call site.
262  */
263 static void disable_tracepoint(struct tracepoint *elem)
264 {
265         elem->state = 0;
266         rcu_assign_pointer(elem->funcs, NULL);
267 }
268
269 /**
270  * tracepoint_update_probe_range - Update a probe range
271  * @begin: beginning of the range
272  * @end: end of the range
273  *
274  * Updates the probe callback corresponding to a range of tracepoints.
275  */
276 void
277 tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end)
278 {
279         struct tracepoint *iter;
280         struct tracepoint_entry *mark_entry;
281
282         if (!begin)
283                 return;
284
285         mutex_lock(&tracepoints_mutex);
286         for (iter = begin; iter < end; iter++) {
287                 mark_entry = get_tracepoint(iter->name);
288                 if (mark_entry) {
289                         set_tracepoint(&mark_entry, iter,
290                                         !!mark_entry->refcount);
291                 } else {
292                         disable_tracepoint(iter);
293                 }
294         }
295         mutex_unlock(&tracepoints_mutex);
296 }
297
298 /*
299  * Update probes, removing the faulty probes.
300  */
301 static void tracepoint_update_probes(void)
302 {
303         /* Core kernel tracepoints */
304         tracepoint_update_probe_range(__start___tracepoints,
305                 __stop___tracepoints);
306         /* tracepoints in modules. */
307         module_update_tracepoints();
308 }
309
310 static void *tracepoint_add_probe(const char *name, void *probe)
311 {
312         struct tracepoint_entry *entry;
313         void *old;
314
315         entry = get_tracepoint(name);
316         if (!entry) {
317                 entry = add_tracepoint(name);
318                 if (IS_ERR(entry))
319                         return entry;
320         }
321         old = tracepoint_entry_add_probe(entry, probe);
322         if (IS_ERR(old) && !entry->refcount)
323                 remove_tracepoint(entry);
324         return old;
325 }
326
327 /**
328  * tracepoint_probe_register -  Connect a probe to a tracepoint
329  * @name: tracepoint name
330  * @probe: probe handler
331  *
332  * Returns 0 if ok, error value on error.
333  * The probe address must at least be aligned on the architecture pointer size.
334  */
335 int tracepoint_probe_register(const char *name, void *probe)
336 {
337         void *old;
338
339         mutex_lock(&tracepoints_mutex);
340         old = tracepoint_add_probe(name, probe);
341         mutex_unlock(&tracepoints_mutex);
342         if (IS_ERR(old))
343                 return PTR_ERR(old);
344
345         tracepoint_update_probes();             /* may update entry */
346         release_probes(old);
347         return 0;
348 }
349 EXPORT_SYMBOL_GPL(tracepoint_probe_register);
350
351 static void *tracepoint_remove_probe(const char *name, void *probe)
352 {
353         struct tracepoint_entry *entry;
354         void *old;
355
356         entry = get_tracepoint(name);
357         if (!entry)
358                 return ERR_PTR(-ENOENT);
359         old = tracepoint_entry_remove_probe(entry, probe);
360         if (IS_ERR(old))
361                 return old;
362         if (!entry->refcount)
363                 remove_tracepoint(entry);
364         return old;
365 }
366
367 /**
368  * tracepoint_probe_unregister -  Disconnect a probe from a tracepoint
369  * @name: tracepoint name
370  * @probe: probe function pointer
371  *
372  * We do not need to call a synchronize_sched to make sure the probes have
373  * finished running before doing a module unload, because the module unload
374  * itself uses stop_machine(), which insures that every preempt disabled section
375  * have finished.
376  */
377 int tracepoint_probe_unregister(const char *name, void *probe)
378 {
379         void *old;
380
381         mutex_lock(&tracepoints_mutex);
382         old = tracepoint_remove_probe(name, probe);
383         mutex_unlock(&tracepoints_mutex);
384         if (IS_ERR(old))
385                 return PTR_ERR(old);
386
387         tracepoint_update_probes();             /* may update entry */
388         release_probes(old);
389         return 0;
390 }
391 EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
392
393 static LIST_HEAD(old_probes);
394 static int need_update;
395
396 static void tracepoint_add_old_probes(void *old)
397 {
398         need_update = 1;
399         if (old) {
400                 struct tp_probes *tp_probes = container_of(old,
401                         struct tp_probes, probes[0]);
402                 list_add(&tp_probes->u.list, &old_probes);
403         }
404 }
405
406 /**
407  * tracepoint_probe_register_noupdate -  register a probe but not connect
408  * @name: tracepoint name
409  * @probe: probe handler
410  *
411  * caller must call tracepoint_probe_update_all()
412  */
413 int tracepoint_probe_register_noupdate(const char *name, void *probe)
414 {
415         void *old;
416
417         mutex_lock(&tracepoints_mutex);
418         old = tracepoint_add_probe(name, probe);
419         if (IS_ERR(old)) {
420                 mutex_unlock(&tracepoints_mutex);
421                 return PTR_ERR(old);
422         }
423         tracepoint_add_old_probes(old);
424         mutex_unlock(&tracepoints_mutex);
425         return 0;
426 }
427 EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
428
429 /**
430  * tracepoint_probe_unregister_noupdate -  remove a probe but not disconnect
431  * @name: tracepoint name
432  * @probe: probe function pointer
433  *
434  * caller must call tracepoint_probe_update_all()
435  */
436 int tracepoint_probe_unregister_noupdate(const char *name, void *probe)
437 {
438         void *old;
439
440         mutex_lock(&tracepoints_mutex);
441         old = tracepoint_remove_probe(name, probe);
442         if (IS_ERR(old)) {
443                 mutex_unlock(&tracepoints_mutex);
444                 return PTR_ERR(old);
445         }
446         tracepoint_add_old_probes(old);
447         mutex_unlock(&tracepoints_mutex);
448         return 0;
449 }
450 EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate);
451
452 /**
453  * tracepoint_probe_update_all -  update tracepoints
454  */
455 void tracepoint_probe_update_all(void)
456 {
457         LIST_HEAD(release_probes);
458         struct tp_probes *pos, *next;
459
460         mutex_lock(&tracepoints_mutex);
461         if (!need_update) {
462                 mutex_unlock(&tracepoints_mutex);
463                 return;
464         }
465         if (!list_empty(&old_probes))
466                 list_replace_init(&old_probes, &release_probes);
467         need_update = 0;
468         mutex_unlock(&tracepoints_mutex);
469
470         tracepoint_update_probes();
471         list_for_each_entry_safe(pos, next, &release_probes, u.list) {
472                 list_del(&pos->u.list);
473                 call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
474         }
475 }
476 EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
477
478 /**
479  * tracepoint_get_iter_range - Get a next tracepoint iterator given a range.
480  * @tracepoint: current tracepoints (in), next tracepoint (out)
481  * @begin: beginning of the range
482  * @end: end of the range
483  *
484  * Returns whether a next tracepoint has been found (1) or not (0).
485  * Will return the first tracepoint in the range if the input tracepoint is
486  * NULL.
487  */
488 int tracepoint_get_iter_range(struct tracepoint **tracepoint,
489         struct tracepoint *begin, struct tracepoint *end)
490 {
491         if (!*tracepoint && begin != end) {
492                 *tracepoint = begin;
493                 return 1;
494         }
495         if (*tracepoint >= begin && *tracepoint < end)
496                 return 1;
497         return 0;
498 }
499 EXPORT_SYMBOL_GPL(tracepoint_get_iter_range);
500
501 static void tracepoint_get_iter(struct tracepoint_iter *iter)
502 {
503         int found = 0;
504
505         /* Core kernel tracepoints */
506         if (!iter->module) {
507                 found = tracepoint_get_iter_range(&iter->tracepoint,
508                                 __start___tracepoints, __stop___tracepoints);
509                 if (found)
510                         goto end;
511         }
512         /* tracepoints in modules. */
513         found = module_get_iter_tracepoints(iter);
514 end:
515         if (!found)
516                 tracepoint_iter_reset(iter);
517 }
518
519 void tracepoint_iter_start(struct tracepoint_iter *iter)
520 {
521         tracepoint_get_iter(iter);
522 }
523 EXPORT_SYMBOL_GPL(tracepoint_iter_start);
524
525 void tracepoint_iter_next(struct tracepoint_iter *iter)
526 {
527         iter->tracepoint++;
528         /*
529          * iter->tracepoint may be invalid because we blindly incremented it.
530          * Make sure it is valid by marshalling on the tracepoints, getting the
531          * tracepoints from following modules if necessary.
532          */
533         tracepoint_get_iter(iter);
534 }
535 EXPORT_SYMBOL_GPL(tracepoint_iter_next);
536
537 void tracepoint_iter_stop(struct tracepoint_iter *iter)
538 {
539 }
540 EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
541
542 void tracepoint_iter_reset(struct tracepoint_iter *iter)
543 {
544         iter->module = NULL;
545         iter->tracepoint = NULL;
546 }
547 EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
548
549 #ifdef CONFIG_MODULES
550
551 int tracepoint_module_notify(struct notifier_block *self,
552                              unsigned long val, void *data)
553 {
554         struct module *mod = data;
555
556         switch (val) {
557         case MODULE_STATE_COMING:
558         case MODULE_STATE_GOING:
559                 tracepoint_update_probe_range(mod->tracepoints,
560                         mod->tracepoints + mod->num_tracepoints);
561                 break;
562         }
563         return 0;
564 }
565
566 struct notifier_block tracepoint_module_nb = {
567         .notifier_call = tracepoint_module_notify,
568         .priority = 0,
569 };
570
571 static int init_tracepoints(void)
572 {
573         return register_module_notifier(&tracepoint_module_nb);
574 }
575 __initcall(init_tracepoints);
576
577 #endif /* CONFIG_MODULES */
578
579 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
580
581 static DEFINE_MUTEX(regfunc_mutex);
582 static int sys_tracepoint_refcount;
583
584 void syscall_regfunc(void)
585 {
586         unsigned long flags;
587         struct task_struct *g, *t;
588
589         mutex_lock(&regfunc_mutex);
590         if (!sys_tracepoint_refcount) {
591                 read_lock_irqsave(&tasklist_lock, flags);
592                 do_each_thread(g, t) {
593                         set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
594                 } while_each_thread(g, t);
595                 read_unlock_irqrestore(&tasklist_lock, flags);
596         }
597         sys_tracepoint_refcount++;
598         mutex_unlock(&regfunc_mutex);
599 }
600
601 void syscall_unregfunc(void)
602 {
603         unsigned long flags;
604         struct task_struct *g, *t;
605
606         mutex_lock(&regfunc_mutex);
607         sys_tracepoint_refcount--;
608         if (!sys_tracepoint_refcount) {
609                 read_lock_irqsave(&tasklist_lock, flags);
610                 do_each_thread(g, t) {
611                         clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
612                 } while_each_thread(g, t);
613                 read_unlock_irqrestore(&tasklist_lock, flags);
614         }
615         mutex_unlock(&regfunc_mutex);
616 }
617 #endif