Merge tag 'trace-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 4 Aug 2014 18:50:00 +0000 (11:50 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 4 Aug 2014 18:50:00 +0000 (11:50 -0700)
Pull tracing updates from Steven Rostedt:
 "This pull request has a lot of work done.  The main thing is the
  changes to the ftrace function callback infrastructure.  It's
  introducing a way to allow different functions to call directly
  different trampolines instead of all calling the same "mcount" one.

  The only user of this for now is the function graph tracer, which
  always had a different trampoline, but the function tracer trampoline
  was called and did basically nothing, and then the function graph
  tracer trampoline was called.  The difference now, is that the
  function graph tracer trampoline can be called directly if a function
  is only being traced by the function graph trampoline.  If function
  tracing is also happening on the same function, the old way is still
  done.

  The accounting for this takes up more memory when function graph
  tracing is activated, as it needs to keep track of which functions it
  uses.  I have a new way that wont take as much memory, but it's not
  ready yet for this merge window, and will have to wait for the next
  one.

  Another big change was the removal of the ftrace_start/stop() calls
  that were used by the suspend/resume code that stopped function
  tracing when entering into suspend and resume paths.  The stop of
  ftrace was done because there was some function that would crash the
  system if one called smp_processor_id()! The stop/start was a big
  hammer to solve the issue at the time, which was when ftrace was first
  introduced into Linux.  Now ftrace has better infrastructure to debug
  such issues, and I found the problem function and labeled it with
  "notrace" and function tracing can now safely be activated all the way
  down into the guts of suspend and resume

  Other changes include clean ups of uprobe code, clean up of the
  trace_seq() code, and other various small fixes and clean ups to
  ftrace and tracing"

* tag 'trace-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (57 commits)
  ftrace: Add warning if tramp hash does not match nr_trampolines
  ftrace: Fix trampoline hash update check on rec->flags
  ring-buffer: Use rb_page_size() instead of open coded head_page size
  ftrace: Rename ftrace_ops field from trampolines to nr_trampolines
  tracing: Convert local function_graph functions to static
  ftrace: Do not copy old hash when resetting
  tracing: let user specify tracing_thresh after selecting function_graph
  ring-buffer: Always run per-cpu ring buffer resize with schedule_work_on()
  tracing: Remove function_trace_stop and HAVE_FUNCTION_TRACE_MCOUNT_TEST
  s390/ftrace: remove check of obsolete variable function_trace_stop
  arm64, ftrace: Remove check of obsolete variable function_trace_stop
  Blackfin: ftrace: Remove check of obsolete variable function_trace_stop
  metag: ftrace: Remove check of obsolete variable function_trace_stop
  microblaze: ftrace: Remove check of obsolete variable function_trace_stop
  MIPS: ftrace: Remove check of obsolete variable function_trace_stop
  parisc: ftrace: Remove check of obsolete variable function_trace_stop
  sh: ftrace: Remove check of obsolete variable function_trace_stop
  sparc64,ftrace: Remove check of obsolete variable function_trace_stop
  tile: ftrace: Remove check of obsolete variable function_trace_stop
  ftrace: x86: Remove check of obsolete variable function_trace_stop
  ...

1  2 
Documentation/kernel-parameters.txt
arch/sparc/Kconfig
arch/x86/Kconfig
arch/x86/kernel/entry_32.S
kernel/power/suspend.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_events.c

@@@ -1097,6 -1097,12 +1097,12 @@@ bytes respectively. Such letter suffixe
                        that can be changed at run time by the
                        set_graph_function file in the debugfs tracing directory.
  
+       ftrace_graph_notrace=[function-list]
+                       [FTRACE] Do not trace from the functions specified in
+                       function-list.  This list is a comma separated list of
+                       functions that can be changed at run time by the
+                       set_graph_notrace file in the debugfs tracing directory.
        gamecon.map[2|3]=
                        [HW,JOY] Multisystem joystick and NES/SNES/PSX pad
                        support via parallel port (up to 5 devices per port)
                        leaf rcu_node structure.  Useful for very large
                        systems.
  
 +      rcutree.jiffies_till_sched_qs= [KNL]
 +                      Set required age in jiffies for a
 +                      given grace period before RCU starts
 +                      soliciting quiescent-state help from
 +                      rcu_note_context_switch().
 +
        rcutree.jiffies_till_first_fqs= [KNL]
                        Set delay from grace-period initialization to
                        first attempt to force quiescent states.
                        the allocated input device; If set to 0, video driver
                        will only send out the event without touching backlight
                        brightness level.
 -                      default: 0
 +                      default: 1
  
        virtio_mmio.device=
                        [VMMIO] Memory mapped virtio (platform) device.
diff --combined arch/sparc/Kconfig
@@@ -55,7 -55,6 +55,6 @@@ config SPARC6
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_GRAPH_FP_TEST
-       select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_KRETPROBES
        select HAVE_KPROBES
        select HAVE_RCU_TABLE_FREE if SMP
@@@ -78,7 -77,6 +77,7 @@@
        select HAVE_C_RECORDMCOUNT
        select NO_BOOTMEM
        select HAVE_ARCH_AUDITSYSCALL
 +      select ARCH_SUPPORTS_ATOMIC_RMW
  
  config ARCH_DEFCONFIG
        string
diff --combined arch/x86/Kconfig
@@@ -54,7 -54,6 +54,6 @@@ config X8
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_GRAPH_FP_TEST
-       select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_SYSCALL_TRACEPOINTS
        select SYSCTL_EXCEPTION_TRACE
        select HAVE_KVM
        select HAVE_CC_STACKPROTECTOR
        select GENERIC_CPU_AUTOPROBE
        select HAVE_ARCH_AUDITSYSCALL
 +      select ARCH_SUPPORTS_ATOMIC_RMW
  
  config INSTRUCTION_DECODER
        def_bool y
@@@ -425,8 -425,8 +425,8 @@@ sysenter_do_call
        cmpl $(NR_syscalls), %eax
        jae sysenter_badsys
        call *sys_call_table(,%eax,4)
 -      movl %eax,PT_EAX(%esp)
  sysenter_after_call:
 +      movl %eax,PT_EAX(%esp)
        LOCKDEP_SYS_EXIT
        DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
@@@ -502,7 -502,6 +502,7 @@@ ENTRY(system_call
        jae syscall_badsys
  syscall_call:
        call *sys_call_table(,%eax,4)
 +syscall_after_call:
        movl %eax,PT_EAX(%esp)          # store the return value
  syscall_exit:
        LOCKDEP_SYS_EXIT
@@@ -676,12 -675,12 +676,12 @@@ syscall_fault
  END(syscall_fault)
  
  syscall_badsys:
 -      movl $-ENOSYS,PT_EAX(%esp)
 -      jmp syscall_exit
 +      movl $-ENOSYS,%eax
 +      jmp syscall_after_call
  END(syscall_badsys)
  
  sysenter_badsys:
 -      movl $-ENOSYS,PT_EAX(%esp)
 +      movl $-ENOSYS,%eax
        jmp sysenter_after_call
  END(syscall_badsys)
        CFI_ENDPROC
@@@ -1059,9 -1058,6 +1059,6 @@@ ENTRY(mcount
  END(mcount)
  
  ENTRY(ftrace_caller)
-       cmpl $0, function_trace_stop
-       jne  ftrace_stub
        pushl %eax
        pushl %ecx
        pushl %edx
@@@ -1093,8 -1089,6 +1090,6 @@@ END(ftrace_caller
  
  ENTRY(ftrace_regs_caller)
        pushf   /* push flags before compare (in cs location) */
-       cmpl $0, function_trace_stop
-       jne ftrace_restore_flags
  
        /*
         * i386 does not save SS and ESP when coming from kernel.
@@@ -1153,7 -1147,6 +1148,6 @@@ GLOBAL(ftrace_regs_call
        popf                    /* Pop flags at end (no addl to corrupt flags) */
        jmp ftrace_ret
  
- ftrace_restore_flags:
        popf
        jmp  ftrace_stub
  #else /* ! CONFIG_DYNAMIC_FTRACE */
@@@ -1162,9 -1155,6 +1156,6 @@@ ENTRY(mcount
        cmpl $__PAGE_OFFSET, %esp
        jb ftrace_stub          /* Paging not enabled yet? */
  
-       cmpl $0, function_trace_stop
-       jne  ftrace_stub
        cmpl $ftrace_stub, ftrace_trace_function
        jnz trace
  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
diff --combined kernel/power/suspend.c
@@@ -248,7 -248,6 +248,6 @@@ static int suspend_enter(suspend_state_
                goto Platform_wake;
        }
  
-       ftrace_stop();
        error = disable_nonboot_cpus();
        if (error || suspend_test(TEST_CPUS))
                goto Enable_cpus;
  
   Enable_cpus:
        enable_nonboot_cpus();
-       ftrace_start();
  
   Platform_wake:
        if (need_suspend_ops(state) && suspend_ops->wake)
@@@ -306,7 -304,7 +304,7 @@@ int suspend_devices_and_enter(suspend_s
                error = suspend_ops->begin(state);
                if (error)
                        goto Close;
 -      } else if (state == PM_SUSPEND_FREEZE && freeze_ops->begin) {
 +      } else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin) {
                error = freeze_ops->begin();
                if (error)
                        goto Close;
   Close:
        if (need_suspend_ops(state) && suspend_ops->end)
                suspend_ops->end();
 -      else if (state == PM_SUSPEND_FREEZE && freeze_ops->end)
 +      else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end)
                freeze_ops->end();
  
        return error;
diff --combined kernel/trace/ftrace.c
@@@ -80,9 -80,6 +80,6 @@@ static struct ftrace_ops ftrace_list_en
  int ftrace_enabled __read_mostly;
  static int last_ftrace_enabled;
  
- /* Quick disabling of function tracer. */
- int function_trace_stop __read_mostly;
  /* Current function tracing op */
  struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
  /* What to set function_trace_op to */
@@@ -265,12 -262,12 +262,12 @@@ static void update_ftrace_function(void
                func = ftrace_ops_list_func;
        }
  
 +      update_function_graph_func();
 +
        /* If there's no change, then do nothing more here */
        if (ftrace_trace_function == func)
                return;
  
 -      update_function_graph_func();
 -
        /*
         * If we are using the list function, it doesn't care
         * about the function_trace_ops.
@@@ -1042,6 -1039,8 +1039,8 @@@ static struct pid * const ftrace_swappe
  
  #ifdef CONFIG_DYNAMIC_FTRACE
  
+ static struct ftrace_ops *removed_ops;
  #ifndef CONFIG_FTRACE_MCOUNT_RECORD
  # error Dynamic ftrace depends on MCOUNT_RECORD
  #endif
@@@ -1304,25 -1303,15 +1303,15 @@@ ftrace_hash_move(struct ftrace_ops *ops
        struct ftrace_hash *new_hash;
        int size = src->count;
        int bits = 0;
-       int ret;
        int i;
  
-       /*
-        * Remove the current set, update the hash and add
-        * them back.
-        */
-       ftrace_hash_rec_disable(ops, enable);
        /*
         * If the new source is empty, just free dst and assign it
         * the empty_hash.
         */
        if (!src->count) {
-               free_ftrace_hash_rcu(*dst);
-               rcu_assign_pointer(*dst, EMPTY_HASH);
-               /* still need to update the function records */
-               ret = 0;
-               goto out;
+               new_hash = EMPTY_HASH;
+               goto update;
        }
  
        /*
        if (bits > FTRACE_HASH_MAX_BITS)
                bits = FTRACE_HASH_MAX_BITS;
  
-       ret = -ENOMEM;
        new_hash = alloc_ftrace_hash(bits);
        if (!new_hash)
-               goto out;
+               return -ENOMEM;
  
        size = 1 << src->size_bits;
        for (i = 0; i < size; i++) {
                }
        }
  
+ update:
+       /*
+        * Remove the current set, update the hash and add
+        * them back.
+        */
+       ftrace_hash_rec_disable(ops, enable);
        old_hash = *dst;
        rcu_assign_pointer(*dst, new_hash);
        free_ftrace_hash_rcu(old_hash);
  
-       ret = 0;
-  out:
-       /*
-        * Enable regardless of ret:
-        *  On success, we enable the new hash.
-        *  On failure, we re-enable the original hash.
-        */
        ftrace_hash_rec_enable(ops, enable);
  
-       return ret;
+       return 0;
  }
  
  /*
@@@ -1492,6 -1480,53 +1480,53 @@@ int ftrace_text_reserved(const void *st
        return (int)!!ret;
  }
  
+ /* Test if ops registered to this rec needs regs */
+ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
+ {
+       struct ftrace_ops *ops;
+       bool keep_regs = false;
+       for (ops = ftrace_ops_list;
+            ops != &ftrace_list_end; ops = ops->next) {
+               /* pass rec in as regs to have non-NULL val */
+               if (ftrace_ops_test(ops, rec->ip, rec)) {
+                       if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
+                               keep_regs = true;
+                               break;
+                       }
+               }
+       }
+       return  keep_regs;
+ }
+ static void ftrace_remove_tramp(struct ftrace_ops *ops,
+                               struct dyn_ftrace *rec)
+ {
+       struct ftrace_func_entry *entry;
+       entry = ftrace_lookup_ip(ops->tramp_hash, rec->ip);
+       if (!entry)
+               return;
+       /*
+        * The tramp_hash entry will be removed at time
+        * of update.
+        */
+       ops->nr_trampolines--;
+       rec->flags &= ~FTRACE_FL_TRAMP;
+ }
+ static void ftrace_clear_tramps(struct dyn_ftrace *rec)
+ {
+       struct ftrace_ops *op;
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               if (op->nr_trampolines)
+                       ftrace_remove_tramp(op, rec);
+       } while_for_each_ftrace_op(op);
+ }
  static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
                                     int filter_hash,
                                     bool inc)
  
                if (inc) {
                        rec->flags++;
-                       if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
+                       if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
                                return;
+                       /*
+                        * If there's only a single callback registered to a
+                        * function, and the ops has a trampoline registered
+                        * for it, then we can call it directly.
+                        */
+                       if (ftrace_rec_count(rec) == 1 && ops->trampoline) {
+                               rec->flags |= FTRACE_FL_TRAMP;
+                               ops->nr_trampolines++;
+                       } else {
+                               /*
+                                * If we are adding another function callback
+                                * to this function, and the previous had a
+                                * trampoline used, then we need to go back to
+                                * the default trampoline.
+                                */
+                               rec->flags &= ~FTRACE_FL_TRAMP;
+                               /* remove trampolines from any ops for this rec */
+                               ftrace_clear_tramps(rec);
+                       }
                        /*
                         * If any ops wants regs saved for this function
                         * then all ops will get saved regs.
                        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
                                rec->flags |= FTRACE_FL_REGS;
                } else {
-                       if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
+                       if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
                                return;
                        rec->flags--;
+                       if (ops->trampoline && !ftrace_rec_count(rec))
+                               ftrace_remove_tramp(ops, rec);
+                       /*
+                        * If the rec had REGS enabled and the ops that is
+                        * being removed had REGS set, then see if there is
+                        * still any ops for this record that wants regs.
+                        * If not, we can stop recording them.
+                        */
+                       if (ftrace_rec_count(rec) > 0 &&
+                           rec->flags & FTRACE_FL_REGS &&
+                           ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
+                               if (!test_rec_ops_needs_regs(rec))
+                                       rec->flags &= ~FTRACE_FL_REGS;
+                       }
+                       /*
+                        * flags will be cleared in ftrace_check_record()
+                        * if rec count is zero.
+                        */
                }
                count++;
                /* Shortcut, if we handled all records, we are done. */
@@@ -1668,17 -1746,23 +1746,23 @@@ static int ftrace_check_record(struct d
         * If we are disabling calls, then disable all records that
         * are enabled.
         */
-       if (enable && (rec->flags & ~FTRACE_FL_MASK))
+       if (enable && ftrace_rec_count(rec))
                flag = FTRACE_FL_ENABLED;
  
        /*
-        * If enabling and the REGS flag does not match the REGS_EN, then
-        * do not ignore this record. Set flags to fail the compare against
-        * ENABLED.
+        * If enabling and the REGS flag does not match the REGS_EN, or
+        * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
+        * this record. Set flags to fail the compare against ENABLED.
         */
-       if (flag &&
-           (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
-               flag |= FTRACE_FL_REGS;
+       if (flag) {
+               if (!(rec->flags & FTRACE_FL_REGS) != 
+                   !(rec->flags & FTRACE_FL_REGS_EN))
+                       flag |= FTRACE_FL_REGS;
+               if (!(rec->flags & FTRACE_FL_TRAMP) != 
+                   !(rec->flags & FTRACE_FL_TRAMP_EN))
+                       flag |= FTRACE_FL_TRAMP;
+       }
  
        /* If the state of this record hasn't changed, then do nothing */
        if ((rec->flags & FTRACE_FL_ENABLED) == flag)
                                else
                                        rec->flags &= ~FTRACE_FL_REGS_EN;
                        }
+                       if (flag & FTRACE_FL_TRAMP) {
+                               if (rec->flags & FTRACE_FL_TRAMP)
+                                       rec->flags |= FTRACE_FL_TRAMP_EN;
+                               else
+                                       rec->flags &= ~FTRACE_FL_TRAMP_EN;
+                       }
                }
  
                /*
                 * Otherwise,
                 *   return UPDATE_MODIFY_CALL to tell the caller to convert
                 *   from the save regs, to a non-save regs function or
-                *   vice versa.
+                *   vice versa, or from a trampoline call.
                 */
                if (flag & FTRACE_FL_ENABLED)
                        return FTRACE_UPDATE_MAKE_CALL;
  
        if (update) {
                /* If there's no more users, clear all flags */
-               if (!(rec->flags & ~FTRACE_FL_MASK))
+               if (!ftrace_rec_count(rec))
                        rec->flags = 0;
                else
                        /* Just disable the record (keep REGS state) */
@@@ -1751,6 -1841,43 +1841,43 @@@ int ftrace_test_record(struct dyn_ftrac
        return ftrace_check_record(rec, enable, 0);
  }
  
+ static struct ftrace_ops *
+ ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
+ {
+       struct ftrace_ops *op;
+       /* Removed ops need to be tested first */
+       if (removed_ops && removed_ops->tramp_hash) {
+               if (ftrace_lookup_ip(removed_ops->tramp_hash, rec->ip))
+                       return removed_ops;
+       }
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               if (!op->tramp_hash)
+                       continue;
+               if (ftrace_lookup_ip(op->tramp_hash, rec->ip))
+                       return op;
+       } while_for_each_ftrace_op(op);
+       return NULL;
+ }
+ static struct ftrace_ops *
+ ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
+ {
+       struct ftrace_ops *op;
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               /* pass rec in as regs to have non-NULL val */
+               if (ftrace_ops_test(op, rec->ip, rec))
+                       return op;
+       } while_for_each_ftrace_op(op);
+       return NULL;
+ }
  /**
   * ftrace_get_addr_new - Get the call address to set to
   * @rec:  The ftrace record descriptor
   */
  unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
  {
+       struct ftrace_ops *ops;
+       /* Trampolines take precedence over regs */
+       if (rec->flags & FTRACE_FL_TRAMP) {
+               ops = ftrace_find_tramp_ops_new(rec);
+               if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
+                       pr_warning("Bad trampoline accounting at: %p (%pS)\n",
+                                   (void *)rec->ip, (void *)rec->ip);
+                       /* Ftrace is shutting down, return anything */
+                       return (unsigned long)FTRACE_ADDR;
+               }
+               return ops->trampoline;
+       }
        if (rec->flags & FTRACE_FL_REGS)
                return (unsigned long)FTRACE_REGS_ADDR;
        else
   */
  unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
  {
+       struct ftrace_ops *ops;
+       /* Trampolines take precedence over regs */
+       if (rec->flags & FTRACE_FL_TRAMP_EN) {
+               ops = ftrace_find_tramp_ops_curr(rec);
+               if (FTRACE_WARN_ON(!ops)) {
+                       pr_warning("Bad trampoline accounting at: %p (%pS)\n",
+                                   (void *)rec->ip, (void *)rec->ip);
+                       /* Ftrace is shutting down, return anything */
+                       return (unsigned long)FTRACE_ADDR;
+               }
+               return ops->trampoline;
+       }
        if (rec->flags & FTRACE_FL_REGS_EN)
                return (unsigned long)FTRACE_REGS_ADDR;
        else
@@@ -2023,6 -2178,89 +2178,89 @@@ void __weak arch_ftrace_update_code(in
        ftrace_run_stop_machine(command);
  }
  
+ static int ftrace_save_ops_tramp_hash(struct ftrace_ops *ops)
+ {
+       struct ftrace_page *pg;
+       struct dyn_ftrace *rec;
+       int size, bits;
+       int ret;
+       size = ops->nr_trampolines;
+       bits = 0;
+       /*
+        * Make the hash size about 1/2 the # found
+        */
+       for (size /= 2; size; size >>= 1)
+               bits++;
+       ops->tramp_hash = alloc_ftrace_hash(bits);
+       /*
+        * TODO: a failed allocation is going to screw up
+        * the accounting of what needs to be modified
+        * and not. For now, we kill ftrace if we fail
+        * to allocate here. But there are ways around this,
+        * but that will take a little more work.
+        */
+       if (!ops->tramp_hash)
+               return -ENOMEM;
+       do_for_each_ftrace_rec(pg, rec) {
+               if (ftrace_rec_count(rec) == 1 &&
+                   ftrace_ops_test(ops, rec->ip, rec)) {
+                       /*
+                        * If another ops adds to a rec, the rec will
+                        * lose its trampoline and never get it back
+                        * until all ops are off of it.
+                        */
+                       if (!(rec->flags & FTRACE_FL_TRAMP))
+                               continue;
+                       /* This record had better have a trampoline */
+                       if (FTRACE_WARN_ON(!(rec->flags & FTRACE_FL_TRAMP_EN)))
+                               return -1;
+                       ret = add_hash_entry(ops->tramp_hash, rec->ip);
+                       if (ret < 0)
+                               return ret;
+               }
+       } while_for_each_ftrace_rec();
+       /* The number of recs in the hash must match nr_trampolines */
+       FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines);
+       return 0;
+ }
+ static int ftrace_save_tramp_hashes(void)
+ {
+       struct ftrace_ops *op;
+       int ret;
+       /*
+        * Now that any trampoline is being used, we need to save the
+        * hashes for the ops that have them. This allows the mapping
+        * back from the record to the ops that has the trampoline to
+        * know what code is being replaced. Modifying code must always
+        * verify what it is changing.
+        */
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               /* The tramp_hash is recreated each time. */
+               free_ftrace_hash(op->tramp_hash);
+               op->tramp_hash = NULL;
+               if (op->nr_trampolines) {
+                       ret = ftrace_save_ops_tramp_hash(op);
+                       if (ret)
+                               return ret;
+               }
+       } while_for_each_ftrace_op(op);
+       return 0;
+ }
  static void ftrace_run_update_code(int command)
  {
        int ret;
        FTRACE_WARN_ON(ret);
        if (ret)
                return;
-       /*
-        * Do not call function tracer while we update the code.
-        * We are in stop machine.
-        */
-       function_trace_stop++;
  
        /*
         * By default we use stop_machine() to modify the code.
         */
        arch_ftrace_update_code(command);
  
-       function_trace_stop--;
        ret = ftrace_arch_code_modify_post_process();
        FTRACE_WARN_ON(ret);
+       ret = ftrace_save_tramp_hashes();
+       FTRACE_WARN_ON(ret);
  }
  
  static ftrace_func_t saved_ftrace_func;
  static int ftrace_start_up;
- static int global_start_up;
  
  static void control_ops_free(struct ftrace_ops *ops)
  {
@@@ -2117,8 -2350,7 +2350,7 @@@ static int ftrace_shutdown(struct ftrac
  
        ftrace_hash_rec_disable(ops, 1);
  
-       if (!global_start_up)
-               ops->flags &= ~FTRACE_OPS_FL_ENABLED;
+       ops->flags &= ~FTRACE_OPS_FL_ENABLED;
  
        command |= FTRACE_UPDATE_CALLS;
  
                return 0;
        }
  
+       /*
+        * If the ops uses a trampoline, then it needs to be
+        * tested first on update.
+        */
+       removed_ops = ops;
        ftrace_run_update_code(command);
  
+       removed_ops = NULL;
        /*
         * Dynamic ops may be freed, we must make sure that all
         * callers are done before leaving this function.
@@@ -2398,7 -2638,8 +2638,8 @@@ ftrace_allocate_pages(unsigned long num
        return start_pg;
  
   free_pages:
-       while (start_pg) {
+       pg = start_pg;
+       while (pg) {
                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
                free_pages((unsigned long)pg->records, order);
                start_pg = pg->next;
@@@ -2595,8 -2836,10 +2836,10 @@@ static void *t_start(struct seq_file *m
         * off, we can short cut and just print out that all
         * functions are enabled.
         */
-       if (iter->flags & FTRACE_ITER_FILTER &&
-           ftrace_hash_empty(ops->filter_hash)) {
+       if ((iter->flags & FTRACE_ITER_FILTER &&
+            ftrace_hash_empty(ops->filter_hash)) ||
+           (iter->flags & FTRACE_ITER_NOTRACE &&
+            ftrace_hash_empty(ops->notrace_hash))) {
                if (*pos > 0)
                        return t_hash_start(m, pos);
                iter->flags |= FTRACE_ITER_PRINTALL;
@@@ -2641,7 -2884,10 +2884,10 @@@ static int t_show(struct seq_file *m, v
                return t_hash_show(m, iter);
  
        if (iter->flags & FTRACE_ITER_PRINTALL) {
-               seq_printf(m, "#### all functions enabled ####\n");
+               if (iter->flags & FTRACE_ITER_NOTRACE)
+                       seq_printf(m, "#### no functions disabled ####\n");
+               else
+                       seq_printf(m, "#### all functions enabled ####\n");
                return 0;
        }
  
                return 0;
  
        seq_printf(m, "%ps", (void *)rec->ip);
-       if (iter->flags & FTRACE_ITER_ENABLED)
+       if (iter->flags & FTRACE_ITER_ENABLED) {
                seq_printf(m, " (%ld)%s",
-                          rec->flags & ~FTRACE_FL_MASK,
-                          rec->flags & FTRACE_FL_REGS ? " R" : "");
+                          ftrace_rec_count(rec),
+                          rec->flags & FTRACE_FL_REGS ? " R" : "  ");
+               if (rec->flags & FTRACE_FL_TRAMP_EN) {
+                       struct ftrace_ops *ops;
+                       ops = ftrace_find_tramp_ops_curr(rec);
+                       if (ops && ops->trampoline)
+                               seq_printf(m, "\ttramp: %pS",
+                                          (void *)ops->trampoline);
+                       else
+                               seq_printf(m, "\ttramp: ERROR!");
+               }
+       }       
        seq_printf(m, "\n");
  
        return 0;
@@@ -2702,13 -2960,6 +2960,6 @@@ ftrace_enabled_open(struct inode *inode
        return iter ? 0 : -ENOMEM;
  }
  
- static void ftrace_filter_reset(struct ftrace_hash *hash)
- {
-       mutex_lock(&ftrace_lock);
-       ftrace_hash_clear(hash);
-       mutex_unlock(&ftrace_lock);
- }
  /**
   * ftrace_regex_open - initialize function tracer filter files
   * @ops: The ftrace_ops that hold the hash filters
@@@ -2758,7 -3009,13 +3009,13 @@@ ftrace_regex_open(struct ftrace_ops *op
                hash = ops->filter_hash;
  
        if (file->f_mode & FMODE_WRITE) {
-               iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
+               const int size_bits = FTRACE_HASH_DEFAULT_BITS;
+               if (file->f_flags & O_TRUNC)
+                       iter->hash = alloc_ftrace_hash(size_bits);
+               else
+                       iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
                if (!iter->hash) {
                        trace_parser_put(&iter->parser);
                        kfree(iter);
                }
        }
  
-       if ((file->f_mode & FMODE_WRITE) &&
-           (file->f_flags & O_TRUNC))
-               ftrace_filter_reset(iter->hash);
        if (file->f_mode & FMODE_READ) {
                iter->pg = ftrace_pages_start;
  
@@@ -3471,14 -3724,16 +3724,16 @@@ ftrace_set_hash(struct ftrace_ops *ops
        else
                orig_hash = &ops->notrace_hash;
  
-       hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
+       if (reset)
+               hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
+       else
+               hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
        if (!hash) {
                ret = -ENOMEM;
                goto out_regex_unlock;
        }
  
-       if (reset)
-               ftrace_filter_reset(hash);
        if (buf && !ftrace_match_records(hash, buf, len)) {
                ret = -EINVAL;
                goto out_regex_unlock;
@@@ -3630,6 -3885,7 +3885,7 @@@ __setup("ftrace_filter=", set_ftrace_fi
  
  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
+ static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
  static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
  
  static int __init set_graph_function(char *str)
  }
  __setup("ftrace_graph_filter=", set_graph_function);
  
- static void __init set_ftrace_early_graph(char *buf)
+ static int __init set_graph_notrace_function(char *str)
+ {
+       strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
+       return 1;
+ }
+ __setup("ftrace_graph_notrace=", set_graph_notrace_function);
+ static void __init set_ftrace_early_graph(char *buf, int enable)
  {
        int ret;
        char *func;
+       unsigned long *table = ftrace_graph_funcs;
+       int *count = &ftrace_graph_count;
+       if (!enable) {
+               table = ftrace_graph_notrace_funcs;
+               count = &ftrace_graph_notrace_count;
+       }
  
        while (buf) {
                func = strsep(&buf, ",");
                /* we allow only one expression at a time */
-               ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
-                                     FTRACE_GRAPH_MAX_FUNCS, func);
+               ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func);
                if (ret)
                        printk(KERN_DEBUG "ftrace: function %s not "
                                          "traceable\n", func);
@@@ -3677,7 -3946,9 +3946,9 @@@ static void __init set_ftrace_early_fil
                ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        if (ftrace_graph_buf[0])
-               set_ftrace_early_graph(ftrace_graph_buf);
+               set_ftrace_early_graph(ftrace_graph_buf, 1);
+       if (ftrace_graph_notrace_buf[0])
+               set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
  #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  }
  
@@@ -3819,7 -4090,12 +4090,12 @@@ static int g_show(struct seq_file *m, v
                return 0;
  
        if (ptr == (unsigned long *)1) {
-               seq_printf(m, "#### all functions enabled ####\n");
+               struct ftrace_graph_data *fgd = m->private;
+               if (fgd->table == ftrace_graph_funcs)
+                       seq_printf(m, "#### all functions enabled ####\n");
+               else
+                       seq_printf(m, "#### no functions disabled ####\n");
                return 0;
        }
  
@@@ -4447,9 -4723,6 +4723,6 @@@ __ftrace_ops_list_func(unsigned long ip
        struct ftrace_ops *op;
        int bit;
  
-       if (function_trace_stop)
-               return;
        bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
        if (bit < 0)
                return;
        preempt_disable_notrace();
        do_for_each_ftrace_op(op, ftrace_ops_list) {
                if (ftrace_ops_test(op, ip, regs)) {
-                       if (WARN_ON(!op->func)) {
-                               function_trace_stop = 1;
-                               printk("op=%p %pS\n", op, op);
+                       if (FTRACE_WARN_ON(!op->func)) {
+                               pr_warn("op=%p %pS\n", op, op);
                                goto out;
                        }
                        op->func(ip, parent_ip, op, regs);
@@@ -5084,6 -5356,12 +5356,12 @@@ int register_ftrace_graph(trace_func_gr
        /* Function graph doesn't use the .func field of global_ops */
        global_ops.flags |= FTRACE_OPS_FL_STUB;
  
+ #ifdef CONFIG_DYNAMIC_FTRACE
+       /* Optimize function graph calling (if implemented by arch) */
+       if (FTRACE_GRAPH_TRAMP_ADDR != 0)
+               global_ops.trampoline = FTRACE_GRAPH_TRAMP_ADDR;
+ #endif
        ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
  
  out:
@@@ -5104,6 -5382,10 +5382,10 @@@ void unregister_ftrace_graph(void
        __ftrace_graph_entry = ftrace_graph_entry_stub;
        ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
        global_ops.flags &= ~FTRACE_OPS_FL_STUB;
+ #ifdef CONFIG_DYNAMIC_FTRACE
+       if (FTRACE_GRAPH_TRAMP_ADDR != 0)
+               global_ops.trampoline = 0;
+ #endif
        unregister_pm_notifier(&ftrace_suspend_notifier);
        unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
  
@@@ -5183,9 -5465,4 +5465,4 @@@ void ftrace_graph_exit_task(struct task
  
        kfree(ret_stack);
  }
- void ftrace_graph_stop(void)
- {
-       ftrace_stop();
- }
  #endif
@@@ -616,6 -616,10 +616,6 @@@ int ring_buffer_poll_wait(struct ring_b
        struct ring_buffer_per_cpu *cpu_buffer;
        struct rb_irq_work *work;
  
 -      if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
 -          (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
 -              return POLLIN | POLLRDNORM;
 -
        if (cpu == RING_BUFFER_ALL_CPUS)
                work = &buffer->irq_work;
        else {
@@@ -1689,22 -1693,14 +1689,14 @@@ int ring_buffer_resize(struct ring_buff
                        if (!cpu_buffer->nr_pages_to_update)
                                continue;
  
-                       /* The update must run on the CPU that is being updated. */
-                       preempt_disable();
-                       if (cpu == smp_processor_id() || !cpu_online(cpu)) {
+                       /* Can't run something on an offline CPU. */
+                       if (!cpu_online(cpu)) {
                                rb_update_pages(cpu_buffer);
                                cpu_buffer->nr_pages_to_update = 0;
                        } else {
-                               /*
-                                * Can not disable preemption for schedule_work_on()
-                                * on PREEMPT_RT.
-                                */
-                               preempt_enable();
                                schedule_work_on(cpu,
                                                &cpu_buffer->update_pages_work);
-                               preempt_disable();
                        }
-                       preempt_enable();
                }
  
                /* wait for all the updates to complete */
  
                get_online_cpus();
  
-               preempt_disable();
-               /* The update must run on the CPU that is being updated. */
-               if (cpu_id == smp_processor_id() || !cpu_online(cpu_id))
+               /* Can't run something on an offline CPU. */
+               if (!cpu_online(cpu_id))
                        rb_update_pages(cpu_buffer);
                else {
-                       /*
-                        * Can not disable preemption for schedule_work_on()
-                        * on PREEMPT_RT.
-                        */
-                       preempt_enable();
                        schedule_work_on(cpu_id,
                                         &cpu_buffer->update_pages_work);
                        wait_for_completion(&cpu_buffer->update_done);
-                       preempt_disable();
                }
-               preempt_enable();
  
                cpu_buffer->nr_pages_to_update = 0;
                put_online_cpus();
@@@ -3775,7 -3763,7 +3759,7 @@@ rb_iter_peek(struct ring_buffer_iter *i
        if (rb_per_cpu_empty(cpu_buffer))
                return NULL;
  
-       if (iter->head >= local_read(&iter->head_page->page->commit)) {
+       if (iter->head >= rb_page_size(iter->head_page)) {
                rb_inc_iter(iter);
                goto again;
        }
diff --combined kernel/trace/trace.c
@@@ -466,12 -466,6 +466,12 @@@ int __trace_puts(unsigned long ip, cons
        struct print_entry *entry;
        unsigned long irq_flags;
        int alloc;
 +      int pc;
 +
 +      if (!(trace_flags & TRACE_ITER_PRINTK))
 +              return 0;
 +
 +      pc = preempt_count();
  
        if (unlikely(tracing_selftest_running || tracing_disabled))
                return 0;
        local_save_flags(irq_flags);
        buffer = global_trace.trace_buffer.buffer;
        event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
 -                                        irq_flags, preempt_count());
 +                                        irq_flags, pc);
        if (!event)
                return 0;
  
                entry->buf[size] = '\0';
  
        __buffer_unlock_commit(buffer, event);
 +      ftrace_trace_stack(buffer, irq_flags, 4, pc);
  
        return size;
  }
@@@ -516,12 -509,6 +516,12 @@@ int __trace_bputs(unsigned long ip, con
        struct bputs_entry *entry;
        unsigned long irq_flags;
        int size = sizeof(struct bputs_entry);
 +      int pc;
 +
 +      if (!(trace_flags & TRACE_ITER_PRINTK))
 +              return 0;
 +
 +      pc = preempt_count();
  
        if (unlikely(tracing_selftest_running || tracing_disabled))
                return 0;
        local_save_flags(irq_flags);
        buffer = global_trace.trace_buffer.buffer;
        event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
 -                                        irq_flags, preempt_count());
 +                                        irq_flags, pc);
        if (!event)
                return 0;
  
        entry->str                      = str;
  
        __buffer_unlock_commit(buffer, event);
 +      ftrace_trace_stack(buffer, irq_flags, 4, pc);
  
        return 1;
  }
@@@ -823,7 -809,7 +823,7 @@@ static struct 
        { trace_clock_local,    "local",        1 },
        { trace_clock_global,   "global",       1 },
        { trace_clock_counter,  "counter",      0 },
 -      { trace_clock_jiffies,  "uptime",       1 },
 +      { trace_clock_jiffies,  "uptime",       0 },
        { trace_clock,          "perf",         1 },
        ARCH_TRACE_CLOCKS
  };
@@@ -937,30 -923,6 +937,6 @@@ out
        return ret;
  }
  
- ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
- {
-       int len;
-       int ret;
-       if (!cnt)
-               return 0;
-       if (s->len <= s->readpos)
-               return -EBUSY;
-       len = s->len - s->readpos;
-       if (cnt > len)
-               cnt = len;
-       ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
-       if (ret == cnt)
-               return -EFAULT;
-       cnt -= ret;
-       s->readpos += cnt;
-       return cnt;
- }
  static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
  {
        int len;
@@@ -3699,6 -3661,7 +3675,7 @@@ static const char readme_msg[] 
  #endif
  #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        "  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
+       "  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
        "  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
  #endif
  #ifdef CONFIG_TRACER_SNAPSHOT
@@@ -4238,10 -4201,9 +4215,9 @@@ tracing_set_trace_write(struct file *fi
  }
  
  static ssize_t
- tracing_max_lat_read(struct file *filp, char __user *ubuf,
-                    size_t cnt, loff_t *ppos)
+ tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
+                  size_t cnt, loff_t *ppos)
  {
-       unsigned long *ptr = filp->private_data;
        char buf[64];
        int r;
  
  }
  
  static ssize_t
- tracing_max_lat_write(struct file *filp, const char __user *ubuf,
-                     size_t cnt, loff_t *ppos)
+ tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
+                   size_t cnt, loff_t *ppos)
  {
-       unsigned long *ptr = filp->private_data;
        unsigned long val;
        int ret;
  
        return cnt;
  }
  
+ static ssize_t
+ tracing_thresh_read(struct file *filp, char __user *ubuf,
+                   size_t cnt, loff_t *ppos)
+ {
+       return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
+ }
+ static ssize_t
+ tracing_thresh_write(struct file *filp, const char __user *ubuf,
+                    size_t cnt, loff_t *ppos)
+ {
+       struct trace_array *tr = filp->private_data;
+       int ret;
+       mutex_lock(&trace_types_lock);
+       ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
+       if (ret < 0)
+               goto out;
+       if (tr->current_trace->update_thresh) {
+               ret = tr->current_trace->update_thresh(tr);
+               if (ret < 0)
+                       goto out;
+       }
+       ret = cnt;
+ out:
+       mutex_unlock(&trace_types_lock);
+       return ret;
+ }
+ static ssize_t
+ tracing_max_lat_read(struct file *filp, char __user *ubuf,
+                    size_t cnt, loff_t *ppos)
+ {
+       return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
+ }
+ static ssize_t
+ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
+                     size_t cnt, loff_t *ppos)
+ {
+       return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
+ }
  static int tracing_open_pipe(struct inode *inode, struct file *filp)
  {
        struct trace_array *tr = inode->i_private;
@@@ -5170,6 -5177,13 +5191,13 @@@ static int snapshot_raw_open(struct ino
  #endif /* CONFIG_TRACER_SNAPSHOT */
  
  
+ static const struct file_operations tracing_thresh_fops = {
+       .open           = tracing_open_generic,
+       .read           = tracing_thresh_read,
+       .write          = tracing_thresh_write,
+       .llseek         = generic_file_llseek,
+ };
  static const struct file_operations tracing_max_lat_fops = {
        .open           = tracing_open_generic,
        .read           = tracing_max_lat_read,
@@@ -6107,10 -6121,8 +6135,8 @@@ destroy_trace_option_files(struct trace
        if (!topts)
                return;
  
-       for (cnt = 0; topts[cnt].opt; cnt++) {
-               if (topts[cnt].entry)
-                       debugfs_remove(topts[cnt].entry);
-       }
+       for (cnt = 0; topts[cnt].opt; cnt++)
+               debugfs_remove(topts[cnt].entry);
  
        kfree(topts);
  }
@@@ -6533,7 -6545,7 +6559,7 @@@ static __init int tracer_init_debugfs(v
        init_tracer_debugfs(&global_trace, d_tracer);
  
        trace_create_file("tracing_thresh", 0644, d_tracer,
-                       &tracing_thresh, &tracing_max_lat_fops);
+                       &global_trace, &tracing_thresh_fops);
  
        trace_create_file("README", 0444, d_tracer,
                        NULL, &tracing_readme_fops);
@@@ -8,6 -8,8 +8,8 @@@
   *
   */
  
+ #define pr_fmt(fmt) fmt
  #include <linux/workqueue.h>
  #include <linux/spinlock.h>
  #include <linux/kthread.h>
@@@ -470,7 -472,6 +472,7 @@@ static void remove_event_file_dir(struc
  
        list_del(&file->list);
        remove_subsystem(file->system);
 +      free_event_filter(file->filter);
        kmem_cache_free(file_cachep, file);
  }
  
@@@ -1491,7 -1492,7 +1493,7 @@@ event_subsystem_dir(struct trace_array 
  
        dir->entry = debugfs_create_dir(name, parent);
        if (!dir->entry) {
-               pr_warning("Failed to create system directory %s\n", name);
+               pr_warn("Failed to create system directory %s\n", name);
                __put_system(system);
                goto out_free;
        }
        if (!entry) {
                kfree(system->filter);
                system->filter = NULL;
-               pr_warning("Could not create debugfs '%s/filter' entry\n", name);
+               pr_warn("Could not create debugfs '%s/filter' entry\n", name);
        }
  
        trace_create_file("enable", 0644, dir->entry, dir,
   out_fail:
        /* Only print this message if failed on memory allocation */
        if (!dir || !system)
-               pr_warning("No memory to create event subsystem %s\n",
-                          name);
+               pr_warn("No memory to create event subsystem %s\n", name);
        return NULL;
  }
  
@@@ -1551,8 -1551,7 +1552,7 @@@ event_create_dir(struct dentry *parent
        name = ftrace_event_name(call);
        file->dir = debugfs_create_dir(name, d_events);
        if (!file->dir) {
-               pr_warning("Could not create debugfs '%s' directory\n",
-                          name);
+               pr_warn("Could not create debugfs '%s' directory\n", name);
                return -1;
        }
  
        if (list_empty(head)) {
                ret = call->class->define_fields(call);
                if (ret < 0) {
-                       pr_warning("Could not initialize trace point"
-                                  " events/%s\n", name);
+                       pr_warn("Could not initialize trace point events/%s\n",
+                               name);
                        return -1;
                }
        }
@@@ -1649,8 -1648,7 +1649,7 @@@ static int event_init(struct ftrace_eve
        if (call->class->raw_init) {
                ret = call->class->raw_init(call);
                if (ret < 0 && ret != -ENOSYS)
-                       pr_warn("Could not initialize trace events/%s\n",
-                               name);
+                       pr_warn("Could not initialize trace events/%s\n", name);
        }
  
        return ret;
@@@ -1895,8 -1893,8 +1894,8 @@@ __trace_add_event_dirs(struct trace_arr
        list_for_each_entry(call, &ftrace_events, list) {
                ret = __trace_add_new_event(call, tr);
                if (ret < 0)
-                       pr_warning("Could not create directory for event %s\n",
-                                  ftrace_event_name(call));
+                       pr_warn("Could not create directory for event %s\n",
+                               ftrace_event_name(call));
        }
  }
  
@@@ -2208,8 -2206,8 +2207,8 @@@ __trace_early_add_event_dirs(struct tra
        list_for_each_entry(file, &tr->events, list) {
                ret = event_create_dir(tr->event_dir, file);
                if (ret < 0)
-                       pr_warning("Could not create directory for event %s\n",
-                                  ftrace_event_name(file->event_call));
+                       pr_warn("Could not create directory for event %s\n",
+                               ftrace_event_name(file->event_call));
        }
  }
  
@@@ -2232,8 -2230,8 +2231,8 @@@ __trace_early_add_events(struct trace_a
  
                ret = __trace_early_add_new_event(call, tr);
                if (ret < 0)
-                       pr_warning("Could not create early event %s\n",
-                                  ftrace_event_name(call));
+                       pr_warn("Could not create early event %s\n",
+                               ftrace_event_name(call));
        }
  }
  
@@@ -2280,13 -2278,13 +2279,13 @@@ create_event_toplevel_files(struct dent
        entry = debugfs_create_file("set_event", 0644, parent,
                                    tr, &ftrace_set_event_fops);
        if (!entry) {
-               pr_warning("Could not create debugfs 'set_event' entry\n");
+               pr_warn("Could not create debugfs 'set_event' entry\n");
                return -ENOMEM;
        }
  
        d_events = debugfs_create_dir("events", parent);
        if (!d_events) {
-               pr_warning("Could not create debugfs 'events' directory\n");
+               pr_warn("Could not create debugfs 'events' directory\n");
                return -ENOMEM;
        }
  
@@@ -2462,11 -2460,10 +2461,10 @@@ static __init int event_trace_init(void
        entry = debugfs_create_file("available_events", 0444, d_tracer,
                                    tr, &ftrace_avail_fops);
        if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'available_events' entry\n");
+               pr_warn("Could not create debugfs 'available_events' entry\n");
  
        if (trace_define_common_fields())
-               pr_warning("tracing: Failed to allocate common fields");
+               pr_warn("tracing: Failed to allocate common fields");
  
        ret = early_event_add_tracer(d_tracer, tr);
        if (ret)
  #ifdef CONFIG_MODULES
        ret = register_module_notifier(&trace_module_nb);
        if (ret)
-               pr_warning("Failed to register trace events module notifier\n");
+               pr_warn("Failed to register trace events module notifier\n");
  #endif
        return 0;
  }
@@@ -2579,7 -2576,7 +2577,7 @@@ static __init void event_trace_self_tes
                 * it and the self test should not be on.
                 */
                if (file->flags & FTRACE_EVENT_FL_ENABLED) {
-                       pr_warning("Enabled event during self test!\n");
+                       pr_warn("Enabled event during self test!\n");
                        WARN_ON_ONCE(1);
                        continue;
                }
  
                ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
                if (WARN_ON_ONCE(ret)) {
-                       pr_warning("error enabling system %s\n",
-                                  system->name);
+                       pr_warn("error enabling system %s\n",
+                               system->name);
                        continue;
                }
  
  
                ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
                if (WARN_ON_ONCE(ret)) {
-                       pr_warning("error disabling system %s\n",
-                                  system->name);
+                       pr_warn("error disabling system %s\n",
+                               system->name);
                        continue;
                }
  
  
        ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
        if (WARN_ON_ONCE(ret)) {
-               pr_warning("error enabling all events\n");
+               pr_warn("error enabling all events\n");
                return;
        }
  
        /* reset sysname */
        ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
        if (WARN_ON_ONCE(ret)) {
-               pr_warning("error disabling all events\n");
+               pr_warn("error disabling all events\n");
                return;
        }