ARM: 8255/1: perf: Prevent wraparound during overflow
[pandora-kernel.git] / arch / arm / kernel / perf_event.c
index ecebb89..40dbe11 100644 (file)
@@ -116,7 +116,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
 static int
 armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
 {
-       int mapping = (*event_map)[config];
+       int mapping;
+
+       if (config >= PERF_COUNT_HW_MAX)
+               return -ENOENT;
+
+       mapping = (*event_map)[config];
        return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
 }
 
@@ -172,8 +177,14 @@ armpmu_event_set_period(struct perf_event *event,
                ret = 1;
        }
 
-       if (left > (s64)armpmu->max_period)
-               left = armpmu->max_period;
+       /*
+        * Limit the maximum period to prevent the counter value
+        * from overtaking the one we are about to program. In
+        * effect we are reducing max_period to account for
+        * interrupt latency (and we are being very conservative).
+        */
+       if (left > (armpmu->max_period >> 1))
+               left = armpmu->max_period >> 1;
 
        local64_set(&hwc->prev_count, (u64)-left);
 
@@ -326,7 +337,13 @@ validate_event(struct pmu_hw_events *hw_events,
        struct hw_perf_event fake_event = event->hw;
        struct pmu *leader_pmu = event->group_leader->pmu;
 
-       if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
+       if (is_software_event(event))
+               return 1;
+
+       if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+               return 1;
+
+       if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
                return 1;
 
        return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
@@ -767,11 +784,16 @@ user_backtrace(struct frame_tail __user *tail,
               struct perf_callchain_entry *entry)
 {
        struct frame_tail buftail;
+       unsigned long err;
 
-       /* Also check accessibility of one struct frame_tail beyond */
        if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
                return NULL;
-       if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
+
+       pagefault_disable();
+       err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+       pagefault_enable();
+
+       if (err)
                return NULL;
 
        perf_callchain_store(entry, buftail.lr);
@@ -792,6 +814,11 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
        struct frame_tail __user *tail;
 
 
+       perf_callchain_store(entry, regs->ARM_pc);
+
+       if (!current->mm)
+               return;
+
        tail = (struct frame_tail __user *)regs->ARM_fp - 1;
 
        while ((entry->nr < PERF_MAX_STACK_DEPTH) &&