X-Git-Url: https://git.openpandora.org/cgi-bin/gitweb.cgi?p=pandora-kernel.git;a=blobdiff_plain;f=arch%2Farm%2Fkernel%2Fperf_event.c;h=40dbe119f6f1b49b04d36246f6a334eb6e0dd9f0;hp=8e9c98edc0682a8aa23790737a3b63b08dea847f;hb=b0dbb52f869311b6d3e128b781d4253e03d83081;hpb=34a9d2c39afe74a941b9e88efe2762afc4d82443 diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 8e9c98edc068..40dbe119f6f1 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -116,7 +116,12 @@ armpmu_map_cache_event(const unsigned (*cache_map) static int armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) { - int mapping = (*event_map)[config]; + int mapping; + + if (config >= PERF_COUNT_HW_MAX) + return -ENOENT; + + mapping = (*event_map)[config]; return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; } @@ -172,8 +177,14 @@ armpmu_event_set_period(struct perf_event *event, ret = 1; } - if (left > (s64)armpmu->max_period) - left = armpmu->max_period; + /* + * Limit the maximum period to prevent the counter value + * from overtaking the one we are about to program. In + * effect we are reducing max_period to account for + * interrupt latency (and we are being very conservative). + */ + if (left > (armpmu->max_period >> 1)) + left = armpmu->max_period >> 1; local64_set(&hwc->prev_count, (u64)-left); @@ -187,7 +198,7 @@ armpmu_event_set_period(struct perf_event *event, u64 armpmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, - int idx, int overflow) + int idx) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); u64 delta, prev_raw_count, new_raw_count; @@ -200,13 +211,7 @@ again: new_raw_count) != prev_raw_count) goto again; - new_raw_count &= armpmu->max_period; - prev_raw_count &= armpmu->max_period; - - if (overflow) - delta = armpmu->max_period - prev_raw_count + new_raw_count + 1; - else - delta = new_raw_count - prev_raw_count; + delta = (new_raw_count - prev_raw_count) & armpmu->max_period; local64_add(delta, &event->count); local64_sub(delta, &hwc->period_left); @@ -223,7 +228,7 @@ armpmu_read(struct perf_event *event) if (hwc->idx < 0) return; - armpmu_event_update(event, hwc, hwc->idx, 0); + armpmu_event_update(event, hwc, hwc->idx); } static void @@ -239,7 +244,7 @@ armpmu_stop(struct perf_event *event, int flags) if (!(hwc->state & PERF_HES_STOPPED)) { armpmu->disable(hwc, hwc->idx); barrier(); /* why? */ - armpmu_event_update(event, hwc, hwc->idx, 0); + armpmu_event_update(event, hwc, hwc->idx); hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; } } @@ -332,7 +337,13 @@ validate_event(struct pmu_hw_events *hw_events, struct hw_perf_event fake_event = event->hw; struct pmu *leader_pmu = event->group_leader->pmu; - if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) + if (is_software_event(event)) + return 1; + + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) + return 1; + + if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) return 1; return armpmu->get_event_idx(hw_events, &fake_event) >= 0; @@ -519,7 +530,13 @@ __hw_perf_event_init(struct perf_event *event) hwc->config_base |= (unsigned long)mapping; if (!hwc->sample_period) { - hwc->sample_period = armpmu->max_period; + /* + * For non-sampling runs, limit the sample_period to half + * of the counter width. That way, the new counter value + * is far less likely to overtake the previous one unless + * you have some serious IRQ latency issues. + */ + hwc->sample_period = armpmu->max_period >> 1; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); } @@ -640,6 +657,9 @@ static struct platform_device_id armpmu_plat_device_ids[] = { static int __devinit armpmu_device_probe(struct platform_device *pdev) { + if (!cpu_pmu) + return -ENODEV; + cpu_pmu->plat_device = pdev; return 0; } @@ -764,11 +784,16 @@ user_backtrace(struct frame_tail __user *tail, struct perf_callchain_entry *entry) { struct frame_tail buftail; + unsigned long err; - /* Also check accessibility of one struct frame_tail beyond */ if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) return NULL; - if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail))) + + pagefault_disable(); + err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); + pagefault_enable(); + + if (err) return NULL; perf_callchain_store(entry, buftail.lr); @@ -789,6 +814,11 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) struct frame_tail __user *tail; + perf_callchain_store(entry, regs->ARM_pc); + + if (!current->mm) + return; + tail = (struct frame_tail __user *)regs->ARM_fp - 1; while ((entry->nr < PERF_MAX_STACK_DEPTH) &&