ipv6: update skb->csum when CE mark is propagated
[pandora-kernel.git] / kernel / hrtimer.c
index cdd5607..6918c03 100644 (file)
@@ -61,6 +61,7 @@
 DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
 {
 
+       .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
        .clock_base =
        {
                {
@@ -231,6 +232,11 @@ again:
                        goto again;
                }
                timer->base = new_base;
+       } else {
+               if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
+                       cpu = this_cpu;
+                       goto again;
+               }
        }
        return new_base;
 }
@@ -297,6 +303,10 @@ ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
        } else {
                unsigned long rem = do_div(nsec, NSEC_PER_SEC);
 
+               /* Make sure nsec fits into long */
+               if (unlikely(nsec > KTIME_SEC_MAX))
+                       return (ktime_t){ .tv64 = KTIME_MAX };
+
                tmp = ktime_set((long)nsec, rem);
        }
 
@@ -562,6 +572,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
 
        cpu_base->expires_next.tv64 = expires_next.tv64;
 
+       /*
+        * If a hang was detected in the last timer interrupt then we
+        * leave the hang delay active in the hardware. We want the
+        * system to make progress. That also prevents the following
+        * scenario:
+        * T1 expires 50ms from now
+        * T2 expires 5s from now
+        *
+        * T1 is removed, so this code is called and would reprogram
+        * the hardware to 5s from now. Any hrtimer_start after that
+        * will not reprogram the hardware due to hang_detected being
+        * set. So we'd effectivly block all timers until the T2 event
+        * fires.
+        */
+       if (cpu_base->hang_detected)
+               return;
+
        if (cpu_base->expires_next.tv64 != KTIME_MAX)
                tick_program_event(cpu_base->expires_next, 1);
 }
@@ -821,6 +848,9 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
        if (delta.tv64 < 0)
                return 0;
 
+       if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
+               return 0;
+
        if (interval.tv64 < timer->base->resolution.tv64)
                interval.tv64 = timer->base->resolution.tv64;
 
@@ -880,7 +910,7 @@ static int enqueue_hrtimer(struct hrtimer *timer,
  */
 static void __remove_hrtimer(struct hrtimer *timer,
                             struct hrtimer_clock_base *base,
-                            unsigned long newstate, int reprogram)
+                            u8 newstate, int reprogram)
 {
        struct timerqueue_node *next_timer;
        if (!(timer->state & HRTIMER_STATE_ENQUEUED))
@@ -914,7 +944,7 @@ static inline int
 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
 {
        if (hrtimer_is_queued(timer)) {
-               unsigned long state;
+               u8 state;
                int reprogram;
 
                /*
@@ -940,6 +970,22 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
        return 0;
 }
 
+static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
+                                           const enum hrtimer_mode mode)
+{
+#ifdef CONFIG_TIME_LOW_RES
+       /*
+        * CONFIG_TIME_LOW_RES indicates that the system has no way to return
+        * granular time values. For relative timers we add KTIME_LOW_RES
+        * (i.e. one jiffie) to prevent short timeouts.
+        */
+       timer->is_rel = mode & HRTIMER_MODE_REL;
+       if (timer->is_rel)
+               tim = ktime_add_safe(tim, KTIME_LOW_RES);
+#endif
+       return tim;
+}
+
 int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
                unsigned long delta_ns, const enum hrtimer_mode mode,
                int wakeup)
@@ -953,25 +999,16 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
        /* Remove an active timer from the queue: */
        ret = remove_hrtimer(timer, base);
 
-       /* Switch the timer base, if necessary: */
-       new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
+       if (mode & HRTIMER_MODE_REL)
+               tim = ktime_add_safe(tim, base->get_time());
 
-       if (mode & HRTIMER_MODE_REL) {
-               tim = ktime_add_safe(tim, new_base->get_time());
-               /*
-                * CONFIG_TIME_LOW_RES is a temporary way for architectures
-                * to signal that they simply return xtime in
-                * do_gettimeoffset(). In this case we want to round up by
-                * resolution when starting a relative timer, to avoid short
-                * timeouts. This will go away with the GTOD framework.
-                */
-#ifdef CONFIG_TIME_LOW_RES
-               tim = ktime_add_safe(tim, base->resolution);
-#endif
-       }
+       tim = hrtimer_update_lowres(timer, tim, mode);
 
        hrtimer_set_expires_range_ns(timer, tim, delta_ns);
 
+       /* Switch the timer base, if necessary: */
+       new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
+
        timer_stats_hrtimer_set_start_info(timer);
 
        leftmost = enqueue_hrtimer(timer, new_base);
@@ -1090,19 +1127,25 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
 /**
  * hrtimer_get_remaining - get remaining time for the timer
  * @timer:     the timer to read
+ * @adjust:    adjust relative timers when CONFIG_TIME_LOW_RES=y
  */
-ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
+ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
 {
        unsigned long flags;
        ktime_t rem;
 
        lock_hrtimer_base(timer, &flags);
-       rem = hrtimer_expires_remaining(timer);
+#ifdef CONFIG_TIME_LOW_RES
+       if (adjust)
+               rem = hrtimer_expires_remaining_adjusted(timer);
+       else
+#endif
+               rem = hrtimer_expires_remaining(timer);
        unlock_hrtimer_base(timer, &flags);
 
        return rem;
 }
-EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
+EXPORT_SYMBOL_GPL(__hrtimer_get_remaining);
 
 #ifdef CONFIG_NO_HZ
 /**
@@ -1218,6 +1261,15 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
        timer_stats_account_hrtimer(timer);
        fn = timer->function;
 
+       /*
+        * Clear the 'is relative' flag for the TIME_LOW_RES case. If the
+        * timer is restarted with a period then it becomes an absolute
+        * timer. If its not restarted it does not matter.
+        */
+#ifdef CONFIG_TIME_LOW_RES
+       timer->is_rel = false;
+#endif
+
        /*
         * Because we run timers from hardirq context, there is no chance
         * they get migrated to another cpu, therefore its safe to unlock
@@ -1233,11 +1285,14 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
         * Note: We clear the CALLBACK bit after enqueue_hrtimer and
         * we do not reprogramm the event hardware. Happens either in
         * hrtimer_start_range_ns() or in hrtimer_interrupt()
+        *
+        * Note: Because we dropped the cpu_base->lock above,
+        * hrtimer_start_range_ns() can have popped in and enqueued the timer
+        * for us already.
         */
-       if (restart != HRTIMER_NORESTART) {
-               BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
+       if (restart != HRTIMER_NORESTART &&
+           !(timer->state & HRTIMER_STATE_ENQUEUED))
                enqueue_hrtimer(timer, base);
-       }
 
        WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK));
 
@@ -1307,6 +1362,8 @@ retry:
 
                                expires = ktime_sub(hrtimer_get_expires(timer),
                                                    base->offset);
+                               if (expires.tv64 < 0)
+                                       expires.tv64 = KTIME_MAX;
                                if (expires.tv64 < expires_next.tv64)
                                        expires_next = expires;
                                break;
@@ -1640,8 +1697,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
        struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
        int i;
 
-       raw_spin_lock_init(&cpu_base->lock);
-
        for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
                cpu_base->clock_base[i].cpu_base = cpu_base;
                timerqueue_init_head(&cpu_base->clock_base[i].active);