X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=arch%2Farm%2Fkernel%2Fperf_event.c;h=3d865f3f63639ce0b58bed2f452f9bea038d9b8b;hb=31d30f41bfc7d054bf78b18acff4aedd4e04662b;hp=88b0941ce51ec674bbf46d604fcbb46f5dc25255;hpb=2cfab8d74ebfbe06ce2947117945c4f45a5915ec;p=pandora-kernel.git diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 88b0941ce51e..3d865f3f6363 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -116,7 +116,12 @@ armpmu_map_cache_event(const unsigned (*cache_map) static int armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) { - int mapping = (*event_map)[config]; + int mapping; + + if (config >= PERF_COUNT_HW_MAX) + return -ENOENT; + + mapping = (*event_map)[config]; return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; } @@ -187,7 +192,7 @@ armpmu_event_set_period(struct perf_event *event, u64 armpmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, - int idx, int overflow) + int idx) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); u64 delta, prev_raw_count, new_raw_count; @@ -200,13 +205,7 @@ again: new_raw_count) != prev_raw_count) goto again; - new_raw_count &= armpmu->max_period; - prev_raw_count &= armpmu->max_period; - - if (overflow) - delta = armpmu->max_period - prev_raw_count + new_raw_count + 1; - else - delta = new_raw_count - prev_raw_count; + delta = (new_raw_count - prev_raw_count) & armpmu->max_period; local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); @@ -223,7 +222,7 @@ armpmu_read(struct perf_event *event) if (hwc->idx < 0) return; - armpmu_event_update(event, hwc, hwc->idx, 0); + armpmu_event_update(event, hwc, hwc->idx); } static void @@ -239,7 +238,7 @@ armpmu_stop(struct perf_event *event, int flags) if (!(hwc->state & PERF_HES_STOPPED)) { armpmu->disable(hwc, hwc->idx); barrier(); /* why? */ - armpmu_event_update(event, hwc, hwc->idx, 0); + armpmu_event_update(event, hwc, hwc->idx); hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; } } @@ -332,7 +331,13 @@ validate_event(struct pmu_hw_events *hw_events, struct hw_perf_event fake_event = event->hw; struct pmu *leader_pmu = event->group_leader->pmu; - if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) + if (is_software_event(event)) + return 1; + + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) + return 1; + + if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) return 1; return armpmu->get_event_idx(hw_events, &fake_event) >= 0; @@ -519,7 +524,13 @@ __hw_perf_event_init(struct perf_event *event) hwc->config_base |= (unsigned long)mapping; if (!hwc->sample_period) { - hwc->sample_period = armpmu->max_period; + /* + * For non-sampling runs, limit the sample_period to half + * of the counter width. That way, the new counter value + * is far less likely to overtake the previous one unless + * you have some serious IRQ latency issues. + */ + hwc->sample_period = armpmu->max_period >> 1; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); } @@ -767,11 +778,16 @@ user_backtrace(struct frame_tail __user *tail, struct perf_callchain_entry *entry) { struct frame_tail buftail; + unsigned long err; - /* Also check accessibility of one struct frame_tail beyond */ if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) return NULL; - if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail))) + + pagefault_disable(); + err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); + pagefault_enable(); + + if (err) return NULL; perf_callchain_store(entry, buftail.lr); @@ -792,6 +808,11 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) struct frame_tail __user *tail; + perf_callchain_store(entry, regs->ARM_pc); + + if (!current->mm) + return; + tail = (struct frame_tail __user *)regs->ARM_fp - 1; while ((entry->nr < PERF_MAX_STACK_DEPTH) &&