Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
[pandora-kernel.git] / kernel / timer.c
index 471ab87..9e49dee 100644 (file)
@@ -81,9 +81,10 @@ struct tvec_t_base_s {
 } ____cacheline_aligned_in_smp;
 
 typedef struct tvec_t_base_s tvec_base_t;
-static DEFINE_PER_CPU(tvec_base_t *, tvec_bases);
+
 tvec_base_t boot_tvec_bases;
 EXPORT_SYMBOL(boot_tvec_bases);
+static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = { &boot_tvec_bases };
 
 static inline void set_running_timer(tvec_base_t *base,
                                        struct timer_list *timer)
@@ -540,6 +541,22 @@ found:
        }
        spin_unlock(&base->lock);
 
+       /*
+        * It can happen that other CPUs service timer IRQs and increment
+        * jiffies, but we have not yet got a local timer tick to process
+        * the timer wheels.  In that case, the expiry time can be before
+        * jiffies, but since the high-resolution timer here is relative to
+        * jiffies, the default expression when high-resolution timers are
+        * not active,
+        *
+        *   time_before(MAX_JIFFY_OFFSET + jiffies, expires)
+        *
+        * would falsely evaluate to true.  If that is the case, just
+        * return jiffies so that we can immediately fire the local timer
+        */
+       if (time_before(expires, jiffies))
+               return jiffies;
+
        if (time_before(hr_expires, expires))
                return hr_expires;
 
@@ -1224,28 +1241,36 @@ static int __devinit init_timers_cpu(int cpu)
 {
        int j;
        tvec_base_t *base;
+       static char __devinitdata tvec_base_done[NR_CPUS];
 
-       base = per_cpu(tvec_bases, cpu);
-       if (!base) {
+       if (!tvec_base_done[cpu]) {
                static char boot_done;
 
-               /*
-                * Cannot do allocation in init_timers as that runs before the
-                * allocator initializes (and would waste memory if there are
-                * more possible CPUs than will ever be installed/brought up).
-                */
                if (boot_done) {
+                       /*
+                        * The APs use this path later in boot
+                        */
                        base = kmalloc_node(sizeof(*base), GFP_KERNEL,
                                                cpu_to_node(cpu));
                        if (!base)
                                return -ENOMEM;
                        memset(base, 0, sizeof(*base));
+                       per_cpu(tvec_bases, cpu) = base;
                } else {
-                       base = &boot_tvec_bases;
+                       /*
+                        * This is for the boot CPU - we use compile-time
+                        * static initialisation because per-cpu memory isn't
+                        * ready yet and because the memory allocators are not
+                        * initialised either.
+                        */
                        boot_done = 1;
+                       base = &boot_tvec_bases;
                }
-               per_cpu(tvec_bases, cpu) = base;
+               tvec_base_done[cpu] = 1;
+       } else {
+               base = per_cpu(tvec_bases, cpu);
        }
+
        spin_lock_init(&base->lock);
        for (j = 0; j < TVN_SIZE; j++) {
                INIT_LIST_HEAD(base->tv5.vec + j);
@@ -1305,7 +1330,7 @@ static void __devinit migrate_timers(int cpu)
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static int __devinit timer_cpu_notify(struct notifier_block *self, 
+static int timer_cpu_notify(struct notifier_block *self,
                                unsigned long action, void *hcpu)
 {
        long cpu = (long)hcpu;
@@ -1325,7 +1350,7 @@ static int __devinit timer_cpu_notify(struct notifier_block *self,
        return NOTIFY_OK;
 }
 
-static struct notifier_block __devinitdata timers_nb = {
+static struct notifier_block timers_nb = {
        .notifier_call  = timer_cpu_notify,
 };