[PATCH] Change synchronize_kernel to _rcu and _sched
authorPaul E. McKenney <paulmck@us.ibm.com>
Sun, 1 May 2005 15:59:04 +0000 (08:59 -0700)
committerLinus Torvalds <torvalds@ppc970.osdl.org>
Sun, 1 May 2005 15:59:04 +0000 (08:59 -0700)
This patch changes calls to synchronize_kernel(), deprecated in the earlier
"Deprecate synchronize_kernel, GPL replacement" patch to instead call the new
synchronize_rcu() and synchronize_sched() APIs.

Signed-off-by: Paul E. McKenney <paulmck@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
16 files changed:
arch/i386/oprofile/nmi_timer_int.c
arch/ppc64/kernel/HvLpEvent.c
drivers/acpi/processor_idle.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/input/keyboard/atkbd.c
drivers/md/multipath.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/md/raid6main.c
drivers/net/r8169.c
drivers/s390/cio/airq.c
kernel/module.c
kernel/profile.c
mm/slab.c
net/core/dev.c

index b2e462a..c58d0c1 100644 (file)
@@ -36,7 +36,7 @@ static void timer_stop(void)
 {
        enable_timer_nmi_watchdog();
        unset_nmi_callback();
-       synchronize_kernel();
+       synchronize_sched();  /* Allow already-started NMIs to complete. */
 }
 
 
index 9802bee..f8f1963 100644 (file)
@@ -45,7 +45,7 @@ int HvLpEvent_unregisterHandler( HvLpEvent_Type eventType )
                        /* We now sleep until all other CPUs have scheduled. This ensures that
                         * the deletion is seen by all other CPUs, and that the deleted handler
                         * isn't still running on another CPU when we return. */
-                       synchronize_kernel();
+                       synchronize_rcu();
                }
        }
        return rc;
index 05a1781..ff64d33 100644 (file)
@@ -838,7 +838,7 @@ int acpi_processor_cst_has_changed (struct acpi_processor *pr)
 
        /* Fall back to the default idle loop */
        pm_idle = pm_idle_save;
-       synchronize_kernel();
+       synchronize_sched();  /* Relies on interrupts forcing exit from idle. */
 
        pr->flags.power = 0;
        result = acpi_processor_get_power_info(pr);
index 29de259..44a7f13 100644 (file)
@@ -2199,7 +2199,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi)
        /* Wait until we know that we are out of any interrupt
           handlers might have been running before we freed the
           interrupt. */
-       synchronize_kernel();
+       synchronize_sched();
 
        if (new_smi->si_sm) {
                if (new_smi->handlers)
@@ -2312,7 +2312,7 @@ static void __exit cleanup_one_si(struct smi_info *to_clean)
        /* Wait until we know that we are out of any interrupt
           handlers might have been running before we freed the
           interrupt. */
-       synchronize_kernel();
+       synchronize_sched();
 
        /* Wait for the timer to stop.  This avoids problems with race
           conditions removing the timer here. */
index f7304f0..ff66ed4 100644 (file)
@@ -678,7 +678,7 @@ static void atkbd_disconnect(struct serio *serio)
        atkbd_disable(atkbd);
 
        /* make sure we don't have a command in flight */
-       synchronize_kernel();
+       synchronize_sched();  /* Allow atkbd_interrupt()s to complete. */
        flush_scheduled_work();
 
        device_remove_file(&serio->dev, &atkbd_attr_extra);
index c9b134c..1891e49 100644 (file)
@@ -355,7 +355,7 @@ static int multipath_remove_disk(mddev_t *mddev, int number)
                        goto abort;
                }
                p->rdev = NULL;
-               synchronize_kernel();
+               synchronize_rcu();
                if (atomic_read(&rdev->nr_pending)) {
                        /* lost the race, try later */
                        err = -EBUSY;
index a389394..83380b5 100644 (file)
@@ -797,7 +797,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
                        goto abort;
                }
                p->rdev = NULL;
-               synchronize_kernel();
+               synchronize_rcu();
                if (atomic_read(&rdev->nr_pending)) {
                        /* lost the race, try later */
                        err = -EBUSY;
index b100bfe..e9dc287 100644 (file)
@@ -977,7 +977,7 @@ static int raid10_remove_disk(mddev_t *mddev, int number)
                        goto abort;
                }
                p->rdev = NULL;
-               synchronize_kernel();
+               synchronize_rcu();
                if (atomic_read(&rdev->nr_pending)) {
                        /* lost the race, try later */
                        err = -EBUSY;
index 52c3a81..e96e2a1 100644 (file)
@@ -1873,7 +1873,7 @@ static int raid5_remove_disk(mddev_t *mddev, int number)
                        goto abort;
                }
                p->rdev = NULL;
-               synchronize_kernel();
+               synchronize_rcu();
                if (atomic_read(&rdev->nr_pending)) {
                        /* lost the race, try later */
                        err = -EBUSY;
index 7e30ab2..8a33f35 100644 (file)
@@ -2038,7 +2038,7 @@ static int raid6_remove_disk(mddev_t *mddev, int number)
                        goto abort;
                }
                p->rdev = NULL;
-               synchronize_kernel();
+               synchronize_rcu();
                if (atomic_read(&rdev->nr_pending)) {
                        /* lost the race, try later */
                        err = -EBUSY;
index 07e2df0..c59507f 100644 (file)
@@ -2385,7 +2385,7 @@ core_down:
        }
 
        /* Give a racing hard_start_xmit a few cycles to complete. */
-       synchronize_kernel();
+       synchronize_sched();  /* FIXME: should this be synchronize_irq()? */
 
        /*
         * And now for the 50k$ question: are IRQ disabled or not ?
index 3720e77..83e6a06 100644 (file)
@@ -45,7 +45,7 @@ s390_register_adapter_interrupt (adapter_int_handler_t handler)
        else
                ret = (cmpxchg(&adapter_handler, NULL, handler) ? -EBUSY : 0);
        if (!ret)
-               synchronize_kernel();
+               synchronize_sched();  /* Allow interrupts to complete. */
 
        sprintf (dbf_txt, "ret:%d", ret);
        CIO_TRACE_EVENT (4, dbf_txt);
@@ -65,7 +65,7 @@ s390_unregister_adapter_interrupt (adapter_int_handler_t handler)
                ret = -EINVAL;
        else {
                adapter_handler = NULL;
-               synchronize_kernel();
+               synchronize_sched();  /* Allow interrupts to complete. */
                ret = 0;
        }
        sprintf (dbf_txt, "ret:%d", ret);
index 2dbfa07..5734ab0 100644 (file)
@@ -1801,7 +1801,7 @@ sys_init_module(void __user *umod,
                /* Init routine failed: abort.  Try to protect us from
                    buggy refcounters. */
                mod->state = MODULE_STATE_GOING;
-               synchronize_kernel();
+               synchronize_sched();
                if (mod->unsafe)
                        printk(KERN_ERR "%s: module is now stuck!\n",
                               mod->name);
index a66be46..0221a50 100644 (file)
@@ -184,7 +184,7 @@ void unregister_timer_hook(int (*hook)(struct pt_regs *))
        WARN_ON(hook != timer_hook);
        timer_hook = NULL;
        /* make sure all CPUs see the NULL hook */
-       synchronize_kernel();
+       synchronize_sched();  /* Allow ongoing interrupts to complete. */
 }
 
 EXPORT_SYMBOL_GPL(register_timer_hook);
index 771cc09..8407426 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1666,7 +1666,7 @@ int kmem_cache_destroy(kmem_cache_t * cachep)
        }
 
        if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
-               synchronize_kernel();
+               synchronize_rcu();
 
        /* no cpu_online check required here since we clear the percpu
         * array on cpu offline and set this to NULL.
index 7bd4cd4..f5f0058 100644 (file)
@@ -3091,7 +3091,7 @@ void free_netdev(struct net_device *dev)
 void synchronize_net(void) 
 {
        might_sleep();
-       synchronize_kernel();
+       synchronize_rcu();
 }
 
 /**