pandora: defconfig: update
[pandora-kernel.git] / kernel / trace / ftrace.c
index b1e8943..08e043b 100644 (file)
@@ -190,6 +190,12 @@ static void update_global_ops(void)
        global_ops.func = func;
 }
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static void update_function_graph_func(void);
+#else
+static inline void update_function_graph_func(void) { }
+#endif
+
 static void update_ftrace_function(void)
 {
        ftrace_func_t func;
@@ -237,6 +243,8 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
 {
        struct ftrace_ops **p;
 
+       update_function_graph_func();
+
        /*
         * If we are removing the last function, then simply point
         * to the ftrace_stub.
@@ -259,9 +267,6 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
 
 static int __register_ftrace_function(struct ftrace_ops *ops)
 {
-       if (ftrace_disabled)
-               return -ENODEV;
-
        if (FTRACE_WARN_ON(ops == &global_ops))
                return -EINVAL;
 
@@ -286,13 +291,21 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
        return 0;
 }
 
+static void ftrace_sync(struct work_struct *work)
+{
+       /*
+        * This function is just a stub to implement a hard force
+        * of synchronize_sched(). This requires synchronizing
+        * tasks even in userspace and idle.
+        *
+        * Yes, function tracing is rude.
+        */
+}
+
 static int __unregister_ftrace_function(struct ftrace_ops *ops)
 {
        int ret;
 
-       if (ftrace_disabled)
-               return -ENODEV;
-
        if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
                return -EBUSY;
 
@@ -314,13 +327,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
        if (ftrace_enabled)
                update_ftrace_function();
 
-       /*
-        * Dynamic ops may be freed, we must make sure that all
-        * callers are done before leaving this function.
-        */
-       if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
-               synchronize_sched();
-
        return 0;
 }
 
@@ -554,7 +560,7 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
 
        pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
 
-       for (i = 0; i < pages; i++) {
+       for (i = 1; i < pages; i++) {
                pg->next = (void *)get_zeroed_page(GFP_KERNEL);
                if (!pg->next)
                        goto out_free;
@@ -572,7 +578,6 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
                free_page(tmp);
        }
 
-       free_page((unsigned long)stat->pages);
        stat->pages = NULL;
        stat->start = NULL;
 
@@ -625,7 +630,7 @@ static int ftrace_profile_init(void)
        int cpu;
        int ret = 0;
 
-       for_each_online_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                ret = ftrace_profile_init_cpu(cpu);
                if (ret)
                        break;
@@ -930,6 +935,25 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
 
 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static int ftrace_graph_active;
+#else
+# define ftrace_graph_active 0
+#endif
+
+static loff_t
+ftrace_filter_lseek(struct file *file, loff_t offset, int whence)
+{
+       loff_t ret;
+
+       if (file->f_mode & FMODE_READ)
+               ret = seq_lseek(file, offset, whence);
+       else
+               file->f_pos = ret = 1;
+
+       return ret;
+}
+
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
@@ -948,7 +972,7 @@ struct ftrace_func_probe {
 };
 
 enum {
-       FTRACE_ENABLE_CALLS             = (1 << 0),
+       FTRACE_UPDATE_CALLS             = (1 << 0),
        FTRACE_DISABLE_CALLS            = (1 << 1),
        FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
        FTRACE_START_FUNC_RET           = (1 << 3),
@@ -1005,6 +1029,11 @@ static struct ftrace_page        *ftrace_pages;
 
 static struct dyn_ftrace *ftrace_free_records;
 
+static bool ftrace_hash_empty(struct ftrace_hash *hash)
+{
+       return !hash || !hash->count;
+}
+
 static struct ftrace_func_entry *
 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
 {
@@ -1013,7 +1042,7 @@ ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
        struct hlist_head *hhd;
        struct hlist_node *n;
 
-       if (!hash->count)
+       if (ftrace_hash_empty(hash))
                return NULL;
 
        if (hash->size_bits > 0)
@@ -1157,7 +1186,7 @@ alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
                return NULL;
 
        /* Empty hash? */
-       if (!hash || !hash->count)
+       if (ftrace_hash_empty(hash))
                return new_hash;
 
        size = 1 << hash->size_bits;
@@ -1282,9 +1311,9 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
        filter_hash = rcu_dereference_raw(ops->filter_hash);
        notrace_hash = rcu_dereference_raw(ops->notrace_hash);
 
-       if ((!filter_hash || !filter_hash->count ||
+       if ((ftrace_hash_empty(filter_hash) ||
             ftrace_lookup_ip(filter_hash, ip)) &&
-           (!notrace_hash || !notrace_hash->count ||
+           (ftrace_hash_empty(notrace_hash) ||
             !ftrace_lookup_ip(notrace_hash, ip)))
                ret = 1;
        else
@@ -1336,7 +1365,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
        if (filter_hash) {
                hash = ops->filter_hash;
                other_hash = ops->notrace_hash;
-               if (!hash || !hash->count)
+               if (ftrace_hash_empty(hash))
                        all = 1;
        } else {
                inc = !inc;
@@ -1346,7 +1375,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
                 * If the notrace hash has no items,
                 * then there's nothing to do.
                 */
-               if (hash && !hash->count)
+               if (ftrace_hash_empty(hash))
                        return;
        }
 
@@ -1363,8 +1392,8 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
                        if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
                                match = 1;
                } else {
-                       in_hash = hash && !!ftrace_lookup_ip(hash, rec->ip);
-                       in_other_hash = other_hash && !!ftrace_lookup_ip(other_hash, rec->ip);
+                       in_hash = !!ftrace_lookup_ip(hash, rec->ip);
+                       in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
 
                        /*
                         *
@@ -1372,7 +1401,7 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
                        if (filter_hash && in_hash && !in_other_hash)
                                match = 1;
                        else if (!filter_hash && in_hash &&
-                                (in_other_hash || !other_hash->count))
+                                (in_other_hash || ftrace_hash_empty(other_hash)))
                                match = 1;
                }
                if (!match)
@@ -1519,7 +1548,7 @@ int ftrace_text_reserved(void *start, void *end)
 
 
 static int
-__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
+__ftrace_replace_code(struct dyn_ftrace *rec, int update)
 {
        unsigned long ftrace_addr;
        unsigned long flag = 0UL;
@@ -1527,17 +1556,17 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
        ftrace_addr = (unsigned long)FTRACE_ADDR;
 
        /*
-        * If we are enabling tracing:
+        * If we are updating calls:
         *
         *   If the record has a ref count, then we need to enable it
         *   because someone is using it.
         *
         *   Otherwise we make sure its disabled.
         *
-        * If we are disabling tracing, then disable all records that
+        * If we are disabling calls, then disable all records that
         * are enabled.
         */
-       if (enable && (rec->flags & ~FTRACE_FL_MASK))
+       if (update && (rec->flags & ~FTRACE_FL_MASK))
                flag = FTRACE_FL_ENABLED;
 
        /* If the state of this record hasn't changed, then do nothing */
@@ -1553,7 +1582,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
        return ftrace_make_nop(NULL, rec, ftrace_addr);
 }
 
-static void ftrace_replace_code(int enable)
+static void ftrace_replace_code(int update)
 {
        struct dyn_ftrace *rec;
        struct ftrace_page *pg;
@@ -1567,7 +1596,7 @@ static void ftrace_replace_code(int enable)
                if (rec->flags & FTRACE_FL_FREE)
                        continue;
 
-               failed = __ftrace_replace_code(rec, enable);
+               failed = __ftrace_replace_code(rec, update);
                if (failed) {
                        ftrace_bug(failed, rec->ip);
                        /* Stop processing */
@@ -1623,7 +1652,7 @@ static int __ftrace_modify_code(void *data)
         */
        function_trace_stop++;
 
-       if (*command & FTRACE_ENABLE_CALLS)
+       if (*command & FTRACE_UPDATE_CALLS)
                ftrace_replace_code(1);
        else if (*command & FTRACE_DISABLE_CALLS)
                ftrace_replace_code(0);
@@ -1686,12 +1715,17 @@ static void ftrace_startup_enable(int command)
 static int ftrace_startup(struct ftrace_ops *ops, int command)
 {
        bool hash_enable = true;
+       int ret;
 
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
+       ret = __register_ftrace_function(ops);
+       if (ret)
+               return ret;
+
        ftrace_start_up++;
-       command |= FTRACE_ENABLE_CALLS;
+       command |= FTRACE_UPDATE_CALLS;
 
        /* ops marked global share the filter hashes */
        if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
@@ -1711,12 +1745,17 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
        return 0;
 }
 
-static void ftrace_shutdown(struct ftrace_ops *ops, int command)
+static int ftrace_shutdown(struct ftrace_ops *ops, int command)
 {
        bool hash_disable = true;
+       int ret;
 
        if (unlikely(ftrace_disabled))
-               return;
+               return -ENODEV;
+
+       ret = __unregister_ftrace_function(ops);
+       if (ret)
+               return ret;
 
        ftrace_start_up--;
        /*
@@ -1743,8 +1782,7 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
        if (ops != &global_ops || !global_start_up)
                ops->flags &= ~FTRACE_OPS_FL_ENABLED;
 
-       if (!ftrace_start_up)
-               command |= FTRACE_DISABLE_CALLS;
+       command |= FTRACE_UPDATE_CALLS;
 
        if (saved_ftrace_func != ftrace_trace_function) {
                saved_ftrace_func = ftrace_trace_function;
@@ -1752,43 +1790,119 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
        }
 
        if (!command || !ftrace_enabled)
-               return;
+               return 0;
 
        ftrace_run_update_code(command);
+
+       /*
+        * Dynamic ops may be freed, we must make sure that all
+        * callers are done before leaving this function.
+        * The same goes for freeing the per_cpu data of the control
+        * ops.
+        *
+        * Again, normal synchronize_sched() is not good enough.
+        * We need to do a hard force of sched synchronization.
+        * This is because we use preempt_disable() to do RCU, but
+        * the function tracers can be called where RCU is not watching
+        * (like before user_exit()). We can not rely on the RCU
+        * infrastructure to do the synchronization, thus we must do it
+        * ourselves.
+        */
+       if (ops->flags & (FTRACE_OPS_FL_DYNAMIC))
+               schedule_on_each_cpu(ftrace_sync);
+
+       return 0;
 }
 
 static void ftrace_startup_sysctl(void)
 {
+       int command;
+
        if (unlikely(ftrace_disabled))
                return;
 
        /* Force update next time */
        saved_ftrace_func = NULL;
        /* ftrace_start_up is true if we want ftrace running */
-       if (ftrace_start_up)
-               ftrace_run_update_code(FTRACE_ENABLE_CALLS);
+       if (ftrace_start_up) {
+               command = FTRACE_UPDATE_CALLS;
+               if (ftrace_graph_active)
+                       command |= FTRACE_START_FUNC_RET;
+               ftrace_startup_enable(command);
+       }
 }
 
 static void ftrace_shutdown_sysctl(void)
 {
+       int command;
+
        if (unlikely(ftrace_disabled))
                return;
 
        /* ftrace_start_up is true if ftrace is running */
-       if (ftrace_start_up)
-               ftrace_run_update_code(FTRACE_DISABLE_CALLS);
+       if (ftrace_start_up) {
+               command = FTRACE_DISABLE_CALLS;
+               if (ftrace_graph_active)
+                       command |= FTRACE_STOP_FUNC_RET;
+               ftrace_run_update_code(command);
+       }
 }
 
 static cycle_t         ftrace_update_time;
 static unsigned long   ftrace_update_cnt;
 unsigned long          ftrace_update_tot_cnt;
 
-static int ops_traces_mod(struct ftrace_ops *ops)
+static inline int ops_traces_mod(struct ftrace_ops *ops)
 {
-       struct ftrace_hash *hash;
+       /*
+        * Filter_hash being empty will default to trace module.
+        * But notrace hash requires a test of individual module functions.
+        */
+       return ftrace_hash_empty(ops->filter_hash) &&
+               ftrace_hash_empty(ops->notrace_hash);
+}
 
-       hash = ops->filter_hash;
-       return !!(!hash || !hash->count);
+/*
+ * Check if the current ops references the record.
+ *
+ * If the ops traces all functions, then it was already accounted for.
+ * If the ops does not trace the current record function, skip it.
+ * If the ops ignores the function via notrace filter, skip it.
+ */
+static inline bool
+ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
+{
+       /* If ops isn't enabled, ignore it */
+       if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+               return 0;
+
+       /* If ops traces all mods, we already accounted for it */
+       if (ops_traces_mod(ops))
+               return 0;
+
+       /* The function must be in the filter */
+       if (!ftrace_hash_empty(ops->filter_hash) &&
+           !ftrace_lookup_ip(ops->filter_hash, rec->ip))
+               return 0;
+
+       /* If in notrace hash, we ignore it too */
+       if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
+               return 0;
+
+       return 1;
+}
+
+static int referenced_filters(struct dyn_ftrace *rec)
+{
+       struct ftrace_ops *ops;
+       int cnt = 0;
+
+       for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
+               if (ops_references_rec(ops, rec))
+                   cnt++;
+       }
+
+       return cnt;
 }
 
 static int ftrace_update_code(struct module *mod)
@@ -1796,6 +1910,7 @@ static int ftrace_update_code(struct module *mod)
        struct dyn_ftrace *p;
        cycle_t start, stop;
        unsigned long ref = 0;
+       bool test = false;
 
        /*
         * When adding a module, we need to check if tracers are
@@ -1808,9 +1923,12 @@ static int ftrace_update_code(struct module *mod)
 
                for (ops = ftrace_ops_list;
                     ops != &ftrace_list_end; ops = ops->next) {
-                       if (ops->flags & FTRACE_OPS_FL_ENABLED &&
-                           ops_traces_mod(ops))
-                               ref++;
+                       if (ops->flags & FTRACE_OPS_FL_ENABLED) {
+                               if (ops_traces_mod(ops))
+                                       ref++;
+                               else
+                                       test = true;
+                       }
                }
        }
 
@@ -1818,6 +1936,7 @@ static int ftrace_update_code(struct module *mod)
        ftrace_update_cnt = 0;
 
        while (ftrace_new_addrs) {
+               int cnt = ref;
 
                /* If something went wrong, bail without enabling anything */
                if (unlikely(ftrace_disabled))
@@ -1825,7 +1944,9 @@ static int ftrace_update_code(struct module *mod)
 
                p = ftrace_new_addrs;
                ftrace_new_addrs = p->newlist;
-               p->flags = ref;
+               if (test)
+                       cnt += referenced_filters(p);
+               p->flags = cnt;
 
                /*
                 * Do the initial record conversion from mcount jump
@@ -1848,7 +1969,7 @@ static int ftrace_update_code(struct module *mod)
                 * conversion puts the module to the correct state, thus
                 * passing the ftrace_make_call check.
                 */
-               if (ftrace_start_up && ref) {
+               if (ftrace_start_up && cnt) {
                        int failed = __ftrace_replace_code(p, 1);
                        if (failed) {
                                ftrace_bug(failed, p->ip);
@@ -2075,7 +2196,7 @@ static void reset_iter_read(struct ftrace_iterator *iter)
 {
        iter->pos = 0;
        iter->func_pos = 0;
-       iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
+       iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
 }
 
 static void *t_start(struct seq_file *m, loff_t *pos)
@@ -2101,7 +2222,8 @@ static void *t_start(struct seq_file *m, loff_t *pos)
         * off, we can short cut and just print out that all
         * functions are enabled.
         */
-       if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
+       if (iter->flags & FTRACE_ITER_FILTER &&
+           ftrace_hash_empty(ops->filter_hash)) {
                if (*pos > 0)
                        return t_hash_start(m, pos);
                iter->flags |= FTRACE_ITER_PRINTALL;
@@ -2317,19 +2439,6 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
                                 inode, file);
 }
 
-static loff_t
-ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
-{
-       loff_t ret;
-
-       if (file->f_mode & FMODE_READ)
-               ret = seq_lseek(file, offset, origin);
-       else
-               file->f_pos = ret = 1;
-
-       return ret;
-}
-
 static int ftrace_match(char *str, char *regex, int len, int type)
 {
        int matched = 0;
@@ -2566,16 +2675,13 @@ static void __enable_ftrace_function_probe(void)
        if (i == FTRACE_FUNC_HASHSIZE)
                return;
 
-       ret = __register_ftrace_function(&trace_probe_ops);
-       if (!ret)
-               ret = ftrace_startup(&trace_probe_ops, 0);
+       ret = ftrace_startup(&trace_probe_ops, 0);
 
        ftrace_probe_registered = 1;
 }
 
 static void __disable_ftrace_function_probe(void)
 {
-       int ret;
        int i;
 
        if (!ftrace_probe_registered)
@@ -2588,9 +2694,7 @@ static void __disable_ftrace_function_probe(void)
        }
 
        /* no more funcs left */
-       ret = __unregister_ftrace_function(&trace_probe_ops);
-       if (!ret)
-               ftrace_shutdown(&trace_probe_ops, 0);
+       ftrace_shutdown(&trace_probe_ops, 0);
 
        ftrace_probe_registered = 0;
 }
@@ -2726,8 +2830,8 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
                                        continue;
                        }
 
-                       hlist_del(&entry->node);
-                       call_rcu(&entry->rcu, ftrace_free_entry_rcu);
+                       hlist_del_rcu(&entry->node);
+                       call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu);
                }
        }
        __disable_ftrace_function_probe();
@@ -2919,7 +3023,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
        ret = ftrace_hash_move(ops, enable, orig_hash, hash);
        if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
            && ftrace_enabled)
-               ftrace_run_update_code(FTRACE_ENABLE_CALLS);
+               ftrace_run_update_code(FTRACE_UPDATE_CALLS);
 
        mutex_unlock(&ftrace_lock);
 
@@ -3107,7 +3211,7 @@ ftrace_regex_release(struct inode *inode, struct file *file)
                                       orig_hash, iter->hash);
                if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
                    && ftrace_enabled)
-                       ftrace_run_update_code(FTRACE_ENABLE_CALLS);
+                       ftrace_run_update_code(FTRACE_UPDATE_CALLS);
 
                mutex_unlock(&ftrace_lock);
        }
@@ -3136,7 +3240,7 @@ static const struct file_operations ftrace_filter_fops = {
        .open = ftrace_filter_open,
        .read = seq_read,
        .write = ftrace_filter_write,
-       .llseek = ftrace_regex_lseek,
+       .llseek = ftrace_filter_lseek,
        .release = ftrace_regex_release,
 };
 
@@ -3144,7 +3248,7 @@ static const struct file_operations ftrace_notrace_fops = {
        .open = ftrace_notrace_open,
        .read = seq_read,
        .write = ftrace_notrace_write,
-       .llseek = ftrace_regex_lseek,
+       .llseek = ftrace_filter_lseek,
        .release = ftrace_regex_release,
 };
 
@@ -3305,7 +3409,8 @@ out:
        if (fail)
                return -EINVAL;
 
-       ftrace_graph_filter_enabled = 1;
+       ftrace_graph_filter_enabled = !!(*idx);
+
        return 0;
 }
 
@@ -3352,8 +3457,8 @@ static const struct file_operations ftrace_graph_fops = {
        .open           = ftrace_graph_open,
        .read           = seq_read,
        .write          = ftrace_graph_write,
+       .llseek         = ftrace_filter_lseek,
        .release        = ftrace_graph_release,
-       .llseek         = seq_lseek,
 };
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
@@ -3455,35 +3560,34 @@ static void ftrace_init_module(struct module *mod,
        ftrace_process_locs(mod, start, end);
 }
 
-static int ftrace_module_notify(struct notifier_block *self,
-                               unsigned long val, void *data)
+void ftrace_module_init(struct module *mod)
+{
+       ftrace_init_module(mod, mod->ftrace_callsites,
+                          mod->ftrace_callsites +
+                          mod->num_ftrace_callsites);
+}
+
+static int ftrace_module_notify_exit(struct notifier_block *self,
+                                    unsigned long val, void *data)
 {
        struct module *mod = data;
 
-       switch (val) {
-       case MODULE_STATE_COMING:
-               ftrace_init_module(mod, mod->ftrace_callsites,
-                                  mod->ftrace_callsites +
-                                  mod->num_ftrace_callsites);
-               break;
-       case MODULE_STATE_GOING:
+       if (val == MODULE_STATE_GOING)
                ftrace_release_mod(mod);
-               break;
-       }
 
        return 0;
 }
 #else
-static int ftrace_module_notify(struct notifier_block *self,
-                               unsigned long val, void *data)
+static int ftrace_module_notify_exit(struct notifier_block *self,
+                                    unsigned long val, void *data)
 {
        return 0;
 }
 #endif /* CONFIG_MODULES */
 
-struct notifier_block ftrace_module_nb = {
-       .notifier_call = ftrace_module_notify,
-       .priority = 0,
+struct notifier_block ftrace_module_exit_nb = {
+       .notifier_call = ftrace_module_notify_exit,
+       .priority = INT_MIN,    /* Run after anything that can remove kprobes */
 };
 
 extern unsigned long __start_mcount_loc[];
@@ -3517,9 +3621,9 @@ void __init ftrace_init(void)
                                  __start_mcount_loc,
                                  __stop_mcount_loc);
 
-       ret = register_module_notifier(&ftrace_module_nb);
+       ret = register_module_notifier(&ftrace_module_exit_nb);
        if (ret)
-               pr_warning("Failed to register trace ftrace module notifier\n");
+               pr_warning("Failed to register trace ftrace module exit notifier\n");
 
        set_ftrace_early_filters();
 
@@ -3544,12 +3648,15 @@ device_initcall(ftrace_nodyn_init);
 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
 static inline void ftrace_startup_enable(int command) { }
 /* Keep as macros so we do not need to define the commands */
-# define ftrace_startup(ops, command)                  \
-       ({                                              \
-               (ops)->flags |= FTRACE_OPS_FL_ENABLED;  \
-               0;                                      \
+# define ftrace_startup(ops, command)                                  \
+       ({                                                              \
+               int ___ret = __register_ftrace_function(ops);           \
+               if (!___ret)                                            \
+                       (ops)->flags |= FTRACE_OPS_FL_ENABLED;          \
+               ___ret;                                                 \
        })
-# define ftrace_shutdown(ops, command) do { } while (0)
+# define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops)
+
 # define ftrace_startup_sysctl()       do { } while (0)
 # define ftrace_shutdown_sysctl()      do { } while (0)
 
@@ -3827,7 +3934,7 @@ static const struct file_operations ftrace_pid_fops = {
        .open           = ftrace_pid_open,
        .write          = ftrace_pid_write,
        .read           = seq_read,
-       .llseek         = seq_lseek,
+       .llseek         = ftrace_filter_lseek,
        .release        = ftrace_pid_release,
 };
 
@@ -3889,15 +3996,8 @@ int register_ftrace_function(struct ftrace_ops *ops)
 
        mutex_lock(&ftrace_lock);
 
-       if (unlikely(ftrace_disabled))
-               goto out_unlock;
-
-       ret = __register_ftrace_function(ops);
-       if (!ret)
-               ret = ftrace_startup(ops, 0);
-
+       ret = ftrace_startup(ops, 0);
 
- out_unlock:
        mutex_unlock(&ftrace_lock);
        return ret;
 }
@@ -3914,9 +4014,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
        int ret;
 
        mutex_lock(&ftrace_lock);
-       ret = __unregister_ftrace_function(ops);
-       if (!ret)
-               ftrace_shutdown(ops, 0);
+       ret = ftrace_shutdown(ops, 0);
        mutex_unlock(&ftrace_lock);
 
        return ret;
@@ -3944,15 +4042,11 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
 
        if (ftrace_enabled) {
 
-               ftrace_startup_sysctl();
-
                /* we are starting ftrace again */
-               if (ftrace_ops_list != &ftrace_list_end) {
-                       if (ftrace_ops_list->next == &ftrace_list_end)
-                               ftrace_trace_function = ftrace_ops_list->func;
-                       else
-                               ftrace_trace_function = ftrace_ops_list_func;
-               }
+               if (ftrace_ops_list != &ftrace_list_end)
+                       update_ftrace_function();
+
+               ftrace_startup_sysctl();
 
        } else {
                /* stopping ftrace calls (just send to ftrace_stub) */
@@ -3968,7 +4062,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 
-static int ftrace_graph_active;
 static struct notifier_block ftrace_suspend_notifier;
 
 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
@@ -3980,6 +4073,7 @@ int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
 trace_func_graph_ret_t ftrace_graph_return =
                        (trace_func_graph_ret_t)ftrace_stub;
 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
+static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
 
 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
@@ -4114,6 +4208,36 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
        return NOTIFY_DONE;
 }
 
+/* Just a place holder for function graph */
+static struct ftrace_ops fgraph_ops __read_mostly = {
+       .func           = ftrace_stub,
+       .flags          = FTRACE_OPS_FL_GLOBAL,
+};
+
+static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
+{
+       if (!ftrace_ops_test(&global_ops, trace->func))
+               return 0;
+       return __ftrace_graph_entry(trace);
+}
+
+/*
+ * The function graph tracer should only trace the functions defined
+ * by set_ftrace_filter and set_ftrace_notrace. If another function
+ * tracer ops is registered, the graph tracer requires testing the
+ * function against the global ops, and not just trace any function
+ * that any ftrace_ops registered.
+ */
+static void update_function_graph_func(void)
+{
+       if (ftrace_ops_list == &ftrace_list_end ||
+           (ftrace_ops_list == &global_ops &&
+            global_ops.next == &ftrace_list_end))
+               ftrace_graph_entry = __ftrace_graph_entry;
+       else
+               ftrace_graph_entry = ftrace_graph_entry_test;
+}
+
 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
                        trace_func_graph_ent_t entryfunc)
 {
@@ -4138,9 +4262,18 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
        }
 
        ftrace_graph_return = retfunc;
-       ftrace_graph_entry = entryfunc;
 
-       ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
+       /*
+        * Update the indirect function to the entryfunc, and the
+        * function that gets called to the entry_test first. Then
+        * call the update fgraph entry function to determine if
+        * the entryfunc should be called directly or not.
+        */
+       __ftrace_graph_entry = entryfunc;
+       ftrace_graph_entry = ftrace_graph_entry_test;
+       update_function_graph_func();
+
+       ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
 
 out:
        mutex_unlock(&ftrace_lock);
@@ -4157,7 +4290,8 @@ void unregister_ftrace_graph(void)
        ftrace_graph_active--;
        ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
        ftrace_graph_entry = ftrace_graph_entry_stub;
-       ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
+       __ftrace_graph_entry = ftrace_graph_entry_stub;
+       ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
        unregister_pm_notifier(&ftrace_suspend_notifier);
        unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);