ARM: 8255/1: perf: Prevent wraparound during overflow
[pandora-kernel.git] / arch / arm / kernel / perf_event.c
index 88b0941..40dbe11 100644 (file)
@@ -116,7 +116,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
 static int
 armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
 {
-       int mapping = (*event_map)[config];
+       int mapping;
+
+       if (config >= PERF_COUNT_HW_MAX)
+               return -ENOENT;
+
+       mapping = (*event_map)[config];
        return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
 }
 
@@ -172,8 +177,14 @@ armpmu_event_set_period(struct perf_event *event,
                ret = 1;
        }
 
-       if (left > (s64)armpmu->max_period)
-               left = armpmu->max_period;
+       /*
+        * Limit the maximum period to prevent the counter value
+        * from overtaking the one we are about to program. In
+        * effect we are reducing max_period to account for
+        * interrupt latency (and we are being very conservative).
+        */
+       if (left > (armpmu->max_period >> 1))
+               left = armpmu->max_period >> 1;
 
        local64_set(&hwc->prev_count, (u64)-left);
 
@@ -187,7 +198,7 @@ armpmu_event_set_period(struct perf_event *event,
 u64
 armpmu_event_update(struct perf_event *event,
                    struct hw_perf_event *hwc,
-                   int idx, int overflow)
+                   int idx)
 {
        struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
        u64 delta, prev_raw_count, new_raw_count;
@@ -200,13 +211,7 @@ again:
                             new_raw_count) != prev_raw_count)
                goto again;
 
-       new_raw_count &= armpmu->max_period;
-       prev_raw_count &= armpmu->max_period;
-
-       if (overflow)
-               delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
-       else
-               delta = new_raw_count - prev_raw_count;
+       delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
 
        local64_add(delta, &event->count);
        local64_sub(delta, &hwc->period_left);
@@ -223,7 +228,7 @@ armpmu_read(struct perf_event *event)
        if (hwc->idx < 0)
                return;
 
-       armpmu_event_update(event, hwc, hwc->idx, 0);
+       armpmu_event_update(event, hwc, hwc->idx);
 }
 
 static void
@@ -239,7 +244,7 @@ armpmu_stop(struct perf_event *event, int flags)
        if (!(hwc->state & PERF_HES_STOPPED)) {
                armpmu->disable(hwc, hwc->idx);
                barrier(); /* why? */
-               armpmu_event_update(event, hwc, hwc->idx, 0);
+               armpmu_event_update(event, hwc, hwc->idx);
                hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
        }
 }
@@ -332,7 +337,13 @@ validate_event(struct pmu_hw_events *hw_events,
        struct hw_perf_event fake_event = event->hw;
        struct pmu *leader_pmu = event->group_leader->pmu;
 
-       if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
+       if (is_software_event(event))
+               return 1;
+
+       if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+               return 1;
+
+       if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
                return 1;
 
        return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
@@ -519,7 +530,13 @@ __hw_perf_event_init(struct perf_event *event)
        hwc->config_base            |= (unsigned long)mapping;
 
        if (!hwc->sample_period) {
-               hwc->sample_period  = armpmu->max_period;
+               /*
+                * For non-sampling runs, limit the sample_period to half
+                * of the counter width. That way, the new counter value
+                * is far less likely to overtake the previous one unless
+                * you have some serious IRQ latency issues.
+                */
+               hwc->sample_period  = armpmu->max_period >> 1;
                hwc->last_period    = hwc->sample_period;
                local64_set(&hwc->period_left, hwc->sample_period);
        }
@@ -767,11 +784,16 @@ user_backtrace(struct frame_tail __user *tail,
               struct perf_callchain_entry *entry)
 {
        struct frame_tail buftail;
+       unsigned long err;
 
-       /* Also check accessibility of one struct frame_tail beyond */
        if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
                return NULL;
-       if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
+
+       pagefault_disable();
+       err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+       pagefault_enable();
+
+       if (err)
                return NULL;
 
        perf_callchain_store(entry, buftail.lr);
@@ -792,6 +814,11 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
        struct frame_tail __user *tail;
 
 
+       perf_callchain_store(entry, regs->ARM_pc);
+
+       if (!current->mm)
+               return;
+
        tail = (struct frame_tail __user *)regs->ARM_fp - 1;
 
        while ((entry->nr < PERF_MAX_STACK_DEPTH) &&