perf: Remove the nmi parameter from the swevent and overflow interface
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Mon, 27 Jun 2011 12:41:57 +0000 (14:41 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 1 Jul 2011 09:06:35 +0000 (11:06 +0200)
The nmi parameter indicated if we could do wakeups from the current
context, if not, we would set some state and self-IPI and let the
resulting interrupt do the wakeup.

For the various event classes:

  - hardware: nmi=0; PMI is in fact an NMI or we run irq_work_run from
    the PMI-tail (ARM etc.)
  - tracepoint: nmi=0; since tracepoint could be from NMI context.
  - software: nmi=[0,1]; some, like the schedule thing cannot
    perform wakeups, and hence need 0.

As one can see, there is very little nmi=1 usage, and the down-side of
not using it is that on some platforms some software events can have a
jiffy delay in wakeup (when arch_irq_work_raise isn't implemented).

The up-side however is that we can remove the nmi parameter and save a
bunch of conditionals in fast paths.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Michael Cree <mcree@orcon.net.nz>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Eric B Munson <emunson@mgebm.net>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jason Wessel <jason.wessel@windriver.com>
Cc: Don Zickus <dzickus@redhat.com>
Link: http://lkml.kernel.org/n/tip-agjev8eu666tvknpb3iaj0fg@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
46 files changed:
arch/alpha/kernel/perf_event.c
arch/arm/kernel/perf_event_v6.c
arch/arm/kernel/perf_event_v7.c
arch/arm/kernel/perf_event_xscale.c
arch/arm/kernel/ptrace.c
arch/arm/kernel/swp_emulate.c
arch/arm/mm/fault.c
arch/mips/kernel/perf_event.c
arch/mips/kernel/traps.c
arch/mips/kernel/unaligned.c
arch/mips/math-emu/cp1emu.c
arch/mips/mm/fault.c
arch/powerpc/include/asm/emulated_ops.h
arch/powerpc/kernel/perf_event.c
arch/powerpc/kernel/perf_event_fsl_emb.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/mm/fault.c
arch/s390/mm/fault.c
arch/sh/kernel/ptrace_32.c
arch/sh/kernel/traps_32.c
arch/sh/kernel/traps_64.c
arch/sh/math-emu/math.c
arch/sh/mm/fault_32.c
arch/sh/mm/tlbflush_64.c
arch/sparc/kernel/perf_event.c
arch/sparc/kernel/unaligned_32.c
arch/sparc/kernel/unaligned_64.c
arch/sparc/kernel/visemul.c
arch/sparc/math-emu/math_32.c
arch/sparc/math-emu/math_64.c
arch/sparc/mm/fault_32.c
arch/sparc/mm/fault_64.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_p4.c
arch/x86/kernel/kgdb.c
arch/x86/kernel/ptrace.c
arch/x86/mm/fault.c
include/linux/perf_event.h
kernel/events/core.c
kernel/events/internal.h
kernel/events/ring_buffer.c
kernel/sched.c
kernel/watchdog.c
samples/hw_breakpoint/data_breakpoint.c

index 90561c4..8e47709 100644 (file)
@@ -847,7 +847,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
        data.period = event->hw.last_period;
 
        if (alpha_perf_event_set_period(event, hwc, idx)) {
-               if (perf_event_overflow(event, 1, &data, regs)) {
+               if (perf_event_overflow(event, &data, regs)) {
                        /* Interrupts coming too quickly; "throttle" the
                         * counter, i.e., disable it for a little while.
                         */
index f1e8dd9..38dc4da 100644 (file)
@@ -479,7 +479,7 @@ armv6pmu_handle_irq(int irq_num,
                if (!armpmu_event_set_period(event, hwc, idx))
                        continue;
 
-               if (perf_event_overflow(event, 0, &data, regs))
+               if (perf_event_overflow(event, &data, regs))
                        armpmu->disable(hwc, idx);
        }
 
index 4960686..6e5f875 100644 (file)
@@ -787,7 +787,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
                if (!armpmu_event_set_period(event, hwc, idx))
                        continue;
 
-               if (perf_event_overflow(event, 0, &data, regs))
+               if (perf_event_overflow(event, &data, regs))
                        armpmu->disable(hwc, idx);
        }
 
index 39affbe..99b6b85 100644 (file)
@@ -251,7 +251,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
                if (!armpmu_event_set_period(event, hwc, idx))
                        continue;
 
-               if (perf_event_overflow(event, 0, &data, regs))
+               if (perf_event_overflow(event, &data, regs))
                        armpmu->disable(hwc, idx);
        }
 
@@ -583,7 +583,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
                if (!armpmu_event_set_period(event, hwc, idx))
                        continue;
 
-               if (perf_event_overflow(event, 0, &data, regs))
+               if (perf_event_overflow(event, &data, regs))
                        armpmu->disable(hwc, idx);
        }
 
index 9726006..0c9b105 100644 (file)
@@ -396,7 +396,7 @@ static long ptrace_hbp_idx_to_num(int idx)
 /*
  * Handle hitting a HW-breakpoint.
  */
-static void ptrace_hbptriggered(struct perf_event *bp, int unused,
+static void ptrace_hbptriggered(struct perf_event *bp,
                                     struct perf_sample_data *data,
                                     struct pt_regs *regs)
 {
index 40ee7e5..5f452f8 100644 (file)
@@ -183,7 +183,7 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr)
        unsigned int address, destreg, data, type;
        unsigned int res = 0;
 
-       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc);
+       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);
 
        if (current->pid != previous_pid) {
                pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
index bc0e1d8..9ea4f7d 100644 (file)
@@ -318,11 +318,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        fault = __do_page_fault(mm, addr, fsr, tsk);
        up_read(&mm->mmap_sem);
 
-       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr);
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
        if (fault & VM_FAULT_MAJOR)
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr);
+               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr);
        else if (fault & VM_FAULT_MINOR)
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr);
+               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr);
 
        /*
         * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
index a824485..d0deaab 100644 (file)
@@ -527,7 +527,7 @@ handle_associated_event(struct cpu_hw_events *cpuc,
        if (!mipspmu_event_set_period(event, hwc, idx))
                return;
 
-       if (perf_event_overflow(event, 0, data, regs))
+       if (perf_event_overflow(event, data, regs))
                mipspmu->disable_event(idx);
 }
 
index e9b3af2..b7517e3 100644 (file)
@@ -578,12 +578,12 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
 {
        if ((opcode & OPCODE) == LL) {
                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
-                               1, 0, regs, 0);
+                               1, regs, 0);
                return simulate_ll(regs, opcode);
        }
        if ((opcode & OPCODE) == SC) {
                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
-                               1, 0, regs, 0);
+                               1, regs, 0);
                return simulate_sc(regs, opcode);
        }
 
@@ -602,7 +602,7 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
                int rd = (opcode & RD) >> 11;
                int rt = (opcode & RT) >> 16;
                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
-                               1, 0, regs, 0);
+                               1, regs, 0);
                switch (rd) {
                case 0:         /* CPU number */
                        regs->regs[rt] = smp_processor_id();
@@ -640,7 +640,7 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
 {
        if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
-                               1, 0, regs, 0);
+                               1, regs, 0);
                return 0;
        }
 
index cfea1ad..eb319b5 100644 (file)
@@ -111,8 +111,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
        unsigned long value;
        unsigned int res;
 
-       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
-                     1, 0, regs, 0);
+       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 
        /*
         * This load never faults.
@@ -517,7 +516,7 @@ asmlinkage void do_ade(struct pt_regs *regs)
        mm_segment_t seg;
 
        perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
-                       1, 0, regs, regs->cp0_badvaddr);
+                       1, regs, regs->cp0_badvaddr);
        /*
         * Did we catch a fault trying to load an instruction?
         * Or are we running in MIPS16 mode?
index d32cb05..dbf2f93 100644 (file)
@@ -272,8 +272,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
        }
 
       emul:
-       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
-                       1, 0, xcp, 0);
+       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, xcp, 0);
        MIPS_FPU_EMU_INC_STATS(emulated);
        switch (MIPSInst_OPCODE(ir)) {
        case ldc1_op:{
index 137ee76..937cf33 100644 (file)
@@ -145,7 +145,7 @@ good_area:
         * the fault.
         */
        fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
-       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
@@ -154,12 +154,10 @@ good_area:
                BUG();
        }
        if (fault & VM_FAULT_MAJOR) {
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
-                               1, 0, regs, address);
+               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
                tsk->maj_flt++;
        } else {
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
-                               1, 0, regs, address);
+               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
                tsk->min_flt++;
        }
 
index 4592167..2cc41c7 100644 (file)
@@ -78,14 +78,14 @@ extern void ppc_warn_emulated_print(const char *type);
 #define PPC_WARN_EMULATED(type, regs)                                  \
        do {                                                            \
                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,           \
-                       1, 0, regs, 0);                                 \
+                       1, regs, 0);                                    \
                __PPC_WARN_EMULATED(type);                              \
        } while (0)
 
 #define PPC_WARN_ALIGNMENT(type, regs)                                 \
        do {                                                            \
                perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,           \
-                       1, 0, regs, regs->dar);                         \
+                       1, regs, regs->dar);                            \
                __PPC_WARN_EMULATED(type);                              \
        } while (0)
 
index 822f630..14967de 100644 (file)
@@ -1207,7 +1207,7 @@ struct pmu power_pmu = {
  * here so there is no possibility of being interrupted.
  */
 static void record_and_restart(struct perf_event *event, unsigned long val,
-                              struct pt_regs *regs, int nmi)
+                              struct pt_regs *regs)
 {
        u64 period = event->hw.sample_period;
        s64 prev, delta, left;
@@ -1258,7 +1258,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
                if (event->attr.sample_type & PERF_SAMPLE_ADDR)
                        perf_get_data_addr(regs, &data.addr);
 
-               if (perf_event_overflow(event, nmi, &data, regs))
+               if (perf_event_overflow(event, &data, regs))
                        power_pmu_stop(event, 0);
        }
 }
@@ -1346,7 +1346,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
                if ((int)val < 0) {
                        /* event has overflowed */
                        found = 1;
-                       record_and_restart(event, val, regs, nmi);
+                       record_and_restart(event, val, regs);
                }
        }
 
index b0dc8f7..0a6d2a9 100644 (file)
@@ -568,7 +568,7 @@ static struct pmu fsl_emb_pmu = {
  * here so there is no possibility of being interrupted.
  */
 static void record_and_restart(struct perf_event *event, unsigned long val,
-                              struct pt_regs *regs, int nmi)
+                              struct pt_regs *regs)
 {
        u64 period = event->hw.sample_period;
        s64 prev, delta, left;
@@ -616,7 +616,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
                perf_sample_data_init(&data, 0);
                data.period = event->hw.last_period;
 
-               if (perf_event_overflow(event, nmi, &data, regs))
+               if (perf_event_overflow(event, &data, regs))
                        fsl_emb_pmu_stop(event, 0);
        }
 }
@@ -644,7 +644,7 @@ static void perf_event_interrupt(struct pt_regs *regs)
                        if (event) {
                                /* event has overflowed */
                                found = 1;
-                               record_and_restart(event, val, regs, nmi);
+                               record_and_restart(event, val, regs);
                        } else {
                                /*
                                 * Disabled counter is negative,
index cb22024..3177617 100644 (file)
@@ -882,7 +882,7 @@ void user_disable_single_step(struct task_struct *task)
 }
 
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
-void ptrace_triggered(struct perf_event *bp, int nmi,
+void ptrace_triggered(struct perf_event *bp,
                      struct perf_sample_data *data, struct pt_regs *regs)
 {
        struct perf_event_attr attr;
index 54f4fb9..dbc4825 100644 (file)
@@ -173,7 +173,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
                die("Weird page fault", regs, SIGSEGV);
        }
 
-       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
        /* When running in the kernel we expect faults to occur only to
         * addresses in user space.  All other faults represent errors in the
@@ -319,7 +319,7 @@ good_area:
        }
        if (ret & VM_FAULT_MAJOR) {
                current->maj_flt++;
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
                                     regs, address);
 #ifdef CONFIG_PPC_SMLPAR
                if (firmware_has_feature(FW_FEATURE_CMO)) {
@@ -330,7 +330,7 @@ good_area:
 #endif
        } else {
                current->min_flt++;
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
                                     regs, address);
        }
        up_read(&mm->mmap_sem);
index fe103e8..095f782 100644 (file)
@@ -299,7 +299,7 @@ static inline int do_exception(struct pt_regs *regs, int access,
                goto out;
 
        address = trans_exc_code & __FAIL_ADDR_MASK;
-       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
        flags = FAULT_FLAG_ALLOW_RETRY;
        if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
                flags |= FAULT_FLAG_WRITE;
@@ -345,11 +345,11 @@ retry:
        if (flags & FAULT_FLAG_ALLOW_RETRY) {
                if (fault & VM_FAULT_MAJOR) {
                        tsk->maj_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
                                      regs, address);
                } else {
                        tsk->min_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
                                      regs, address);
                }
                if (fault & VM_FAULT_RETRY) {
index 3d7b209..8051976 100644 (file)
@@ -63,7 +63,7 @@ static inline int put_stack_long(struct task_struct *task, int offset,
        return 0;
 }
 
-void ptrace_triggered(struct perf_event *bp, int nmi,
+void ptrace_triggered(struct perf_event *bp,
                      struct perf_sample_data *data, struct pt_regs *regs)
 {
        struct perf_event_attr attr;
index b51a171..d9006f8 100644 (file)
@@ -393,7 +393,7 @@ int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
         */
        if (!expected) {
                unaligned_fixups_notify(current, instruction, regs);
-               perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0,
+               perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1,
                              regs, address);
        }
 
index 6713ca9..67110be 100644 (file)
@@ -434,7 +434,7 @@ static int misaligned_load(struct pt_regs *regs,
                return error;
        }
 
-       perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address);
+       perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
 
        destreg = (opcode >> 4) & 0x3f;
        if (user_mode(regs)) {
@@ -512,7 +512,7 @@ static int misaligned_store(struct pt_regs *regs,
                return error;
        }
 
-       perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address);
+       perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
 
        srcreg = (opcode >> 4) & 0x3f;
        if (user_mode(regs)) {
@@ -588,7 +588,7 @@ static int misaligned_fpu_load(struct pt_regs *regs,
                return error;
        }
 
-       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address);
+       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
 
        destreg = (opcode >> 4) & 0x3f;
        if (user_mode(regs)) {
@@ -665,7 +665,7 @@ static int misaligned_fpu_store(struct pt_regs *regs,
                return error;
        }
 
-       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address);
+       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, address);
 
        srcreg = (opcode >> 4) & 0x3f;
        if (user_mode(regs)) {
index f76a509..9771952 100644 (file)
@@ -620,7 +620,7 @@ int do_fpu_inst(unsigned short inst, struct pt_regs *regs)
        struct task_struct *tsk = current;
        struct sh_fpu_soft_struct *fpu = &(tsk->thread.xstate->softfpu);
 
-       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 
        if (!(task_thread_info(tsk)->status & TS_USEDFPU)) {
                /* initialize once. */
index d4c34d7..7bebd04 100644 (file)
@@ -160,7 +160,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
        if ((regs->sr & SR_IMASK) != SR_IMASK)
                local_irq_enable();
 
-       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
        /*
         * If we're in an interrupt, have no user context or are running
@@ -210,11 +210,11 @@ good_area:
        }
        if (fault & VM_FAULT_MAJOR) {
                tsk->maj_flt++;
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
                                     regs, address);
        } else {
                tsk->min_flt++;
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
                                     regs, address);
        }
 
index 7f5810f..e3430e0 100644 (file)
@@ -116,7 +116,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
        /* Not an IO address, so reenable interrupts */
        local_irq_enable();
 
-       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
        /*
         * If we're in an interrupt or have no user
@@ -200,11 +200,11 @@ good_area:
 
        if (fault & VM_FAULT_MAJOR) {
                tsk->maj_flt++;
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
                                     regs, address);
        } else {
                tsk->min_flt++;
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
                                     regs, address);
        }
 
index 2cb0e1c..0b32f2e 100644 (file)
@@ -1277,7 +1277,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
                if (!sparc_perf_event_set_period(event, hwc, idx))
                        continue;
 
-               if (perf_event_overflow(event, 1, &data, regs))
+               if (perf_event_overflow(event, &data, regs))
                        sparc_pmu_stop(event, 0);
        }
 
index 4491f4c..7efbb2f 100644 (file)
@@ -247,7 +247,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
                unsigned long addr = compute_effective_address(regs, insn);
                int err;
 
-               perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
+               perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
                switch (dir) {
                case load:
                        err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
@@ -338,7 +338,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
                }
 
                addr = compute_effective_address(regs, insn);
-               perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
+               perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
                switch(dir) {
                case load:
                        err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
index b2b019e..35cff16 100644 (file)
@@ -317,7 +317,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
 
                addr = compute_effective_address(regs, insn,
                                                 ((insn >> 25) & 0x1f));
-               perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr);
+               perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
                switch (asi) {
                case ASI_NL:
                case ASI_AIUPL:
@@ -384,7 +384,7 @@ int handle_popc(u32 insn, struct pt_regs *regs)
        int ret, i, rd = ((insn >> 25) & 0x1f);
        int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
                                
-       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
        if (insn & 0x2000) {
                maybe_flush_windows(0, 0, rd, from_kernel);
                value = sign_extend_imm13(insn);
@@ -431,7 +431,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs)
        int asi = decode_asi(insn, regs);
        int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
 
-       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 
        save_and_clear_fpu();
        current_thread_info()->xfsr[0] &= ~0x1c000;
@@ -554,7 +554,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs)
        int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
        unsigned long *reg;
                                
-       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 
        maybe_flush_windows(0, 0, rd, from_kernel);
        reg = fetch_reg_addr(rd, regs);
@@ -586,7 +586,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
 
        if (tstate & TSTATE_PRIV)
                die_if_kernel("lddfmna from kernel", regs);
-       perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar);
+       perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
        if (test_thread_flag(TIF_32BIT))
                pc = (u32)pc;
        if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
@@ -647,7 +647,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
 
        if (tstate & TSTATE_PRIV)
                die_if_kernel("stdfmna from kernel", regs);
-       perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar);
+       perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
        if (test_thread_flag(TIF_32BIT))
                pc = (u32)pc;
        if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
index 3635771..32b626c 100644 (file)
@@ -802,7 +802,7 @@ int vis_emul(struct pt_regs *regs, unsigned int insn)
 
        BUG_ON(regs->tstate & TSTATE_PRIV);
 
-       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 
        if (test_thread_flag(TIF_32BIT))
                pc = (u32)pc;
index a3fccde..aa4d55b 100644 (file)
@@ -164,7 +164,7 @@ int do_mathemu(struct pt_regs *regs, struct task_struct *fpt)
        int retcode = 0;                               /* assume all succeed */
        unsigned long insn;
 
-       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 
 #ifdef DEBUG_MATHEMU
        printk("In do_mathemu()... pc is %08lx\n", regs->pc);
index 56d2c44..e575bd2 100644 (file)
@@ -184,7 +184,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
 
        if (tstate & TSTATE_PRIV)
                die_if_kernel("unfinished/unimplemented FPop from kernel", regs);
-       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0);
+       perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
        if (test_thread_flag(TIF_32BIT))
                pc = (u32)pc;
        if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
index 7543ddb..aa1c1b1 100644 (file)
@@ -251,7 +251,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
         if (in_atomic() || !mm)
                 goto no_context;
 
-       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
        down_read(&mm->mmap_sem);
 
@@ -301,12 +301,10 @@ good_area:
        }
        if (fault & VM_FAULT_MAJOR) {
                current->maj_flt++;
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
-                             regs, address);
+               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
        } else {
                current->min_flt++;
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
-                             regs, address);
+               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
        }
        up_read(&mm->mmap_sem);
        return;
index f92ce56..504c062 100644 (file)
@@ -325,7 +325,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
        if (in_atomic() || !mm)
                goto intr_or_no_mm;
 
-       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
        if (!down_read_trylock(&mm->mmap_sem)) {
                if ((regs->tstate & TSTATE_PRIV) &&
@@ -433,12 +433,10 @@ good_area:
        }
        if (fault & VM_FAULT_MAJOR) {
                current->maj_flt++;
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
-                             regs, address);
+               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
        } else {
                current->min_flt++;
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
-                             regs, address);
+               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
        }
        up_read(&mm->mmap_sem);
 
index 8a57f9a..5b86ec5 100644 (file)
@@ -1339,7 +1339,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
                if (!x86_perf_event_set_period(event))
                        continue;
 
-               if (perf_event_overflow(event, 1, &data, regs))
+               if (perf_event_overflow(event, &data, regs))
                        x86_pmu_stop(event, 0);
        }
 
index 41178c8..d38b002 100644 (file)
@@ -1003,7 +1003,7 @@ again:
 
                data.period = event->hw.last_period;
 
-               if (perf_event_overflow(event, 1, &data, regs))
+               if (perf_event_overflow(event, &data, regs))
                        x86_pmu_stop(event, 0);
        }
 
index bab491b..0941f93 100644 (file)
@@ -340,7 +340,7 @@ static int intel_pmu_drain_bts_buffer(void)
         */
        perf_prepare_sample(&header, &data, event, &regs);
 
-       if (perf_output_begin(&handle, event, header.size * (top - at), 1, 1))
+       if (perf_output_begin(&handle, event, header.size * (top - at), 1))
                return 1;
 
        for (; at < top; at++) {
@@ -616,7 +616,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
        else
                regs.flags &= ~PERF_EFLAGS_EXACT;
 
-       if (perf_event_overflow(event, 1, &data, &regs))
+       if (perf_event_overflow(event, &data, &regs))
                x86_pmu_stop(event, 0);
 }
 
index f76fddf..d6e6a67 100644 (file)
@@ -970,7 +970,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
 
                if (!x86_perf_event_set_period(event))
                        continue;
-               if (perf_event_overflow(event, 1, &data, regs))
+               if (perf_event_overflow(event, &data, regs))
                        x86_pmu_stop(event, 0);
        }
 
index 5f9ecff..98da6a7 100644 (file)
@@ -608,7 +608,7 @@ int kgdb_arch_init(void)
        return register_die_notifier(&kgdb_notifier);
 }
 
-static void kgdb_hw_overflow_handler(struct perf_event *event, int nmi,
+static void kgdb_hw_overflow_handler(struct perf_event *event,
                struct perf_sample_data *data, struct pt_regs *regs)
 {
        struct task_struct *tsk = current;
index 807c2a2..11db2e9 100644 (file)
@@ -528,7 +528,7 @@ static int genregs_set(struct task_struct *target,
        return ret;
 }
 
-static void ptrace_triggered(struct perf_event *bp, int nmi,
+static void ptrace_triggered(struct perf_event *bp,
                             struct perf_sample_data *data,
                             struct pt_regs *regs)
 {
index 2dbf6bf..4d09df0 100644 (file)
@@ -1059,7 +1059,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
        if (unlikely(error_code & PF_RSVD))
                pgtable_bad(regs, error_code, address);
 
-       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
        /*
         * If we're in an interrupt, have no user context or are running
@@ -1161,11 +1161,11 @@ good_area:
        if (flags & FAULT_FLAG_ALLOW_RETRY) {
                if (fault & VM_FAULT_MAJOR) {
                        tsk->maj_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
                                      regs, address);
                } else {
                        tsk->min_flt++;
-                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
                                      regs, address);
                }
                if (fault & VM_FAULT_RETRY) {
index 2f7b5d4..0946a8b 100644 (file)
@@ -682,7 +682,7 @@ enum perf_event_active_state {
 struct file;
 struct perf_sample_data;
 
-typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
+typedef void (*perf_overflow_handler_t)(struct perf_event *,
                                        struct perf_sample_data *,
                                        struct pt_regs *regs);
 
@@ -925,7 +925,6 @@ struct perf_output_handle {
        unsigned long                   size;
        void                            *addr;
        int                             page;
-       int                             nmi;
        int                             sample;
 };
 
@@ -993,7 +992,7 @@ extern void perf_prepare_sample(struct perf_event_header *header,
                                struct perf_event *event,
                                struct pt_regs *regs);
 
-extern int perf_event_overflow(struct perf_event *event, int nmi,
+extern int perf_event_overflow(struct perf_event *event,
                                 struct perf_sample_data *data,
                                 struct pt_regs *regs);
 
@@ -1012,7 +1011,7 @@ static inline int is_software_event(struct perf_event *event)
 
 extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
 
-extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
+extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
 
 #ifndef perf_arch_fetch_caller_regs
 static inline void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
@@ -1034,7 +1033,7 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs)
 }
 
 static __always_inline void
-perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
+perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 {
        struct pt_regs hot_regs;
 
@@ -1043,7 +1042,7 @@ perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
                        perf_fetch_caller_regs(&hot_regs);
                        regs = &hot_regs;
                }
-               __perf_sw_event(event_id, nr, nmi, regs, addr);
+               __perf_sw_event(event_id, nr, regs, addr);
        }
 }
 
@@ -1057,7 +1056,7 @@ static inline void perf_event_task_sched_in(struct task_struct *task)
 
 static inline void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
 {
-       perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
+       perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
 
        __perf_event_task_sched_out(task, next);
 }
@@ -1119,7 +1118,7 @@ extern void perf_bp_event(struct perf_event *event, void *data);
 
 extern int perf_output_begin(struct perf_output_handle *handle,
                             struct perf_event *event, unsigned int size,
-                            int nmi, int sample);
+                            int sample);
 extern void perf_output_end(struct perf_output_handle *handle);
 extern void perf_output_copy(struct perf_output_handle *handle,
                             const void *buf, unsigned int len);
@@ -1143,8 +1142,7 @@ static inline int perf_event_task_disable(void)                           { return -EINVAL; }
 static inline int perf_event_task_enable(void)                         { return -EINVAL; }
 
 static inline void
-perf_sw_event(u32 event_id, u64 nr, int nmi,
-                    struct pt_regs *regs, u64 addr)                    { }
+perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)    { }
 static inline void
 perf_bp_event(struct perf_event *event, void *data)                    { }
 
index 270e32f..dbd1ca7 100644 (file)
@@ -3972,7 +3972,7 @@ void perf_prepare_sample(struct perf_event_header *header,
        }
 }
 
-static void perf_event_output(struct perf_event *event, int nmi,
+static void perf_event_output(struct perf_event *event,
                                struct perf_sample_data *data,
                                struct pt_regs *regs)
 {
@@ -3984,7 +3984,7 @@ static void perf_event_output(struct perf_event *event, int nmi,
 
        perf_prepare_sample(&header, data, event, regs);
 
-       if (perf_output_begin(&handle, event, header.size, nmi, 1))
+       if (perf_output_begin(&handle, event, header.size, 1))
                goto exit;
 
        perf_output_sample(&handle, &header, data, event);
@@ -4024,7 +4024,7 @@ perf_event_read_event(struct perf_event *event,
        int ret;
 
        perf_event_header__init_id(&read_event.header, &sample, event);
-       ret = perf_output_begin(&handle, event, read_event.header.size, 0, 0);
+       ret = perf_output_begin(&handle, event, read_event.header.size, 0);
        if (ret)
                return;
 
@@ -4067,7 +4067,7 @@ static void perf_event_task_output(struct perf_event *event,
        perf_event_header__init_id(&task_event->event_id.header, &sample, event);
 
        ret = perf_output_begin(&handle, event,
-                               task_event->event_id.header.size, 0, 0);
+                               task_event->event_id.header.size, 0);
        if (ret)
                goto out;
 
@@ -4204,7 +4204,7 @@ static void perf_event_comm_output(struct perf_event *event,
 
        perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
        ret = perf_output_begin(&handle, event,
-                               comm_event->event_id.header.size, 0, 0);
+                               comm_event->event_id.header.size, 0);
 
        if (ret)
                goto out;
@@ -4351,7 +4351,7 @@ static void perf_event_mmap_output(struct perf_event *event,
 
        perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
        ret = perf_output_begin(&handle, event,
-                               mmap_event->event_id.header.size, 0, 0);
+                               mmap_event->event_id.header.size, 0);
        if (ret)
                goto out;
 
@@ -4546,7 +4546,7 @@ static void perf_log_throttle(struct perf_event *event, int enable)
        perf_event_header__init_id(&throttle_event.header, &sample, event);
 
        ret = perf_output_begin(&handle, event,
-                               throttle_event.header.size, 1, 0);
+                               throttle_event.header.size, 0);
        if (ret)
                return;
 
@@ -4559,7 +4559,7 @@ static void perf_log_throttle(struct perf_event *event, int enable)
  * Generic event overflow handling, sampling.
  */
 
-static int __perf_event_overflow(struct perf_event *event, int nmi,
+static int __perf_event_overflow(struct perf_event *event,
                                   int throttle, struct perf_sample_data *data,
                                   struct pt_regs *regs)
 {
@@ -4602,34 +4602,28 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
        if (events && atomic_dec_and_test(&event->event_limit)) {
                ret = 1;
                event->pending_kill = POLL_HUP;
-               if (nmi) {
-                       event->pending_disable = 1;
-                       irq_work_queue(&event->pending);
-               } else
-                       perf_event_disable(event);
+               event->pending_disable = 1;
+               irq_work_queue(&event->pending);
        }
 
        if (event->overflow_handler)
-               event->overflow_handler(event, nmi, data, regs);
+               event->overflow_handler(event, data, regs);
        else
-               perf_event_output(event, nmi, data, regs);
+               perf_event_output(event, data, regs);
 
        if (event->fasync && event->pending_kill) {
-               if (nmi) {
-                       event->pending_wakeup = 1;
-                       irq_work_queue(&event->pending);
-               } else
-                       perf_event_wakeup(event);
+               event->pending_wakeup = 1;
+               irq_work_queue(&event->pending);
        }
 
        return ret;
 }
 
-int perf_event_overflow(struct perf_event *event, int nmi,
+int perf_event_overflow(struct perf_event *event,
                          struct perf_sample_data *data,
                          struct pt_regs *regs)
 {
-       return __perf_event_overflow(event, nmi, 1, data, regs);
+       return __perf_event_overflow(event, 1, data, regs);
 }
 
 /*
@@ -4678,7 +4672,7 @@ again:
 }
 
 static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
-                                   int nmi, struct perf_sample_data *data,
+                                   struct perf_sample_data *data,
                                    struct pt_regs *regs)
 {
        struct hw_perf_event *hwc = &event->hw;
@@ -4692,7 +4686,7 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
                return;
 
        for (; overflow; overflow--) {
-               if (__perf_event_overflow(event, nmi, throttle,
+               if (__perf_event_overflow(event, throttle,
                                            data, regs)) {
                        /*
                         * We inhibit the overflow from happening when
@@ -4705,7 +4699,7 @@ static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
 }
 
 static void perf_swevent_event(struct perf_event *event, u64 nr,
-                              int nmi, struct perf_sample_data *data,
+                              struct perf_sample_data *data,
                               struct pt_regs *regs)
 {
        struct hw_perf_event *hwc = &event->hw;
@@ -4719,12 +4713,12 @@ static void perf_swevent_event(struct perf_event *event, u64 nr,
                return;
 
        if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
-               return perf_swevent_overflow(event, 1, nmi, data, regs);
+               return perf_swevent_overflow(event, 1, data, regs);
 
        if (local64_add_negative(nr, &hwc->period_left))
                return;
 
-       perf_swevent_overflow(event, 0, nmi, data, regs);
+       perf_swevent_overflow(event, 0, data, regs);
 }
 
 static int perf_exclude_event(struct perf_event *event,
@@ -4812,7 +4806,7 @@ find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
 }
 
 static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
-                                   u64 nr, int nmi,
+                                   u64 nr,
                                    struct perf_sample_data *data,
                                    struct pt_regs *regs)
 {
@@ -4828,7 +4822,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
 
        hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
                if (perf_swevent_match(event, type, event_id, data, regs))
-                       perf_swevent_event(event, nr, nmi, data, regs);
+                       perf_swevent_event(event, nr, data, regs);
        }
 end:
        rcu_read_unlock();
@@ -4849,8 +4843,7 @@ inline void perf_swevent_put_recursion_context(int rctx)
        put_recursion_context(swhash->recursion, rctx);
 }
 
-void __perf_sw_event(u32 event_id, u64 nr, int nmi,
-                           struct pt_regs *regs, u64 addr)
+void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 {
        struct perf_sample_data data;
        int rctx;
@@ -4862,7 +4855,7 @@ void __perf_sw_event(u32 event_id, u64 nr, int nmi,
 
        perf_sample_data_init(&data, addr);
 
-       do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, nmi, &data, regs);
+       do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
 
        perf_swevent_put_recursion_context(rctx);
        preempt_enable_notrace();
@@ -5110,7 +5103,7 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
 
        hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
                if (perf_tp_event_match(event, &data, regs))
-                       perf_swevent_event(event, count, 1, &data, regs);
+                       perf_swevent_event(event, count, &data, regs);
        }
 
        perf_swevent_put_recursion_context(rctx);
@@ -5203,7 +5196,7 @@ void perf_bp_event(struct perf_event *bp, void *data)
        perf_sample_data_init(&sample, bp->attr.bp_addr);
 
        if (!bp->hw.state && !perf_exclude_event(bp, regs))
-               perf_swevent_event(bp, 1, 1, &sample, regs);
+               perf_swevent_event(bp, 1, &sample, regs);
 }
 #endif
 
@@ -5232,7 +5225,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
 
        if (regs && !perf_exclude_event(event, regs)) {
                if (!(event->attr.exclude_idle && current->pid == 0))
-                       if (perf_event_overflow(event, 0, &data, regs))
+                       if (perf_event_overflow(event, &data, regs))
                                ret = HRTIMER_NORESTART;
        }
 
index 114f27f..09097dd 100644 (file)
@@ -27,7 +27,6 @@ struct ring_buffer {
        void                            *data_pages[0];
 };
 
-
 extern void rb_free(struct ring_buffer *rb);
 extern struct ring_buffer *
 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
index fc2701c..8b3b736 100644 (file)
@@ -38,11 +38,8 @@ static void perf_output_wakeup(struct perf_output_handle *handle)
 {
        atomic_set(&handle->rb->poll, POLL_IN);
 
-       if (handle->nmi) {
-               handle->event->pending_wakeup = 1;
-               irq_work_queue(&handle->event->pending);
-       } else
-               perf_event_wakeup(handle->event);
+       handle->event->pending_wakeup = 1;
+       irq_work_queue(&handle->event->pending);
 }
 
 /*
@@ -102,7 +99,7 @@ out:
 
 int perf_output_begin(struct perf_output_handle *handle,
                      struct perf_event *event, unsigned int size,
-                     int nmi, int sample)
+                     int sample)
 {
        struct ring_buffer *rb;
        unsigned long tail, offset, head;
@@ -127,7 +124,6 @@ int perf_output_begin(struct perf_output_handle *handle,
 
        handle->rb      = rb;
        handle->event   = event;
-       handle->nmi     = nmi;
        handle->sample  = sample;
 
        if (!rb->nr_pages)
index 3f2e502..d08d110 100644 (file)
@@ -2220,7 +2220,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
 
        if (task_cpu(p) != new_cpu) {
                p->se.nr_migrations++;
-               perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
+               perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
        }
 
        __set_task_cpu(p, new_cpu);
index 752b75b..a6708e6 100644 (file)
@@ -211,7 +211,7 @@ static struct perf_event_attr wd_hw_attr = {
 };
 
 /* Callback function for perf event subsystem */
-static void watchdog_overflow_callback(struct perf_event *event, int nmi,
+static void watchdog_overflow_callback(struct perf_event *event,
                 struct perf_sample_data *data,
                 struct pt_regs *regs)
 {
index 0636539..7b164d3 100644 (file)
@@ -41,7 +41,7 @@ module_param_string(ksym, ksym_name, KSYM_NAME_LEN, S_IRUGO);
 MODULE_PARM_DESC(ksym, "Kernel symbol to monitor; this module will report any"
                        " write operations on the kernel symbol");
 
-static void sample_hbp_handler(struct perf_event *bp, int nmi,
+static void sample_hbp_handler(struct perf_event *bp,
                               struct perf_sample_data *data,
                               struct pt_regs *regs)
 {